blob: 2484c1337e34ded0bf9c3fa78f00b17cc7008474 [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
njnc9539842002-10-02 13:26:35 +00009 This file is part of MemCheck, a heavyweight Valgrind skin for
10 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
12 Copyright (C) 2000-2002 Julian Seward
13 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
njn25cac76cb2002-09-23 11:21:57 +000033#include "mc_include.h"
34#include "memcheck.h" /* for client requests */
njn25e49d8e72002-09-23 09:36:25 +000035//#include "vg_profile.c"
36
37/* Define to debug the mem audit system. */
38/* #define VG_DEBUG_MEMORY */
39
njn25e49d8e72002-09-23 09:36:25 +000040/* Define to collect detailed performance info. */
41/* #define VG_PROFILE_MEMORY */
42
43#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
44
45/*------------------------------------------------------------*/
46/*--- Command line options ---*/
47/*------------------------------------------------------------*/
48
49Bool SK_(clo_partial_loads_ok) = True;
50Int SK_(clo_freelist_vol) = 1000000;
51Bool SK_(clo_leak_check) = False;
52VgRes SK_(clo_leak_resolution) = Vg_LowRes;
53Bool SK_(clo_show_reachable) = False;
54Bool SK_(clo_workaround_gcc296_bugs) = False;
55Bool SK_(clo_check_addrVs) = True;
56Bool SK_(clo_cleanup) = True;
sewardj8ec2cfc2002-10-13 00:57:26 +000057Bool SK_(clo_avoid_strlen_errors) = True;
58
njn25e49d8e72002-09-23 09:36:25 +000059
60/*------------------------------------------------------------*/
61/*--- Profiling events ---*/
62/*------------------------------------------------------------*/
63
64typedef
65 enum {
66 VgpCheckMem = VgpFini+1,
67 VgpSetMem
68 }
69 VgpSkinCC;
70
71/*------------------------------------------------------------*/
72/*--- Low-level support for memory checking. ---*/
73/*------------------------------------------------------------*/
74
75/* All reads and writes are checked against a memory map, which
76 records the state of all memory in the process. The memory map is
77 organised like this:
78
79 The top 16 bits of an address are used to index into a top-level
80 map table, containing 65536 entries. Each entry is a pointer to a
81 second-level map, which records the accesibililty and validity
82 permissions for the 65536 bytes indexed by the lower 16 bits of the
83 address. Each byte is represented by nine bits, one indicating
84 accessibility, the other eight validity. So each second-level map
85 contains 73728 bytes. This two-level arrangement conveniently
86 divides the 4G address space into 64k lumps, each size 64k bytes.
87
88 All entries in the primary (top-level) map must point to a valid
89 secondary (second-level) map. Since most of the 4G of address
90 space will not be in use -- ie, not mapped at all -- there is a
91 distinguished secondary map, which indicates `not addressible and
92 not valid' writeable for all bytes. Entries in the primary map for
93 which the entire 64k is not in use at all point at this
94 distinguished map.
95
96 [...] lots of stuff deleted due to out of date-ness
97
98 As a final optimisation, the alignment and address checks for
99 4-byte loads and stores are combined in a neat way. The primary
100 map is extended to have 262144 entries (2^18), rather than 2^16.
101 The top 3/4 of these entries are permanently set to the
102 distinguished secondary map. For a 4-byte load/store, the
103 top-level map is indexed not with (addr >> 16) but instead f(addr),
104 where
105
106 f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
107 = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
108 = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
109
110 ie the lowest two bits are placed above the 16 high address bits.
111 If either of these two bits are nonzero, the address is misaligned;
112 this will select a secondary map from the upper 3/4 of the primary
113 map. Because this is always the distinguished secondary map, a
114 (bogus) address check failure will result. The failure handling
115 code can then figure out whether this is a genuine addr check
116 failure or whether it is a possibly-legitimate access at a
117 misaligned address.
118*/
119
120
121/*------------------------------------------------------------*/
122/*--- Crude profiling machinery. ---*/
123/*------------------------------------------------------------*/
124
125#ifdef VG_PROFILE_MEMORY
126
127#define N_PROF_EVENTS 150
128
129static UInt event_ctr[N_PROF_EVENTS];
130
131static void init_prof_mem ( void )
132{
133 Int i;
134 for (i = 0; i < N_PROF_EVENTS; i++)
135 event_ctr[i] = 0;
136}
137
138static void done_prof_mem ( void )
139{
140 Int i;
141 for (i = 0; i < N_PROF_EVENTS; i++) {
142 if ((i % 10) == 0)
143 VG_(printf)("\n");
144 if (event_ctr[i] > 0)
145 VG_(printf)( "prof mem event %2d: %d\n", i, event_ctr[i] );
146 }
147 VG_(printf)("\n");
148}
149
150#define PROF_EVENT(ev) \
njne427a662002-10-02 11:08:25 +0000151 do { sk_assert((ev) >= 0 && (ev) < N_PROF_EVENTS); \
njn25e49d8e72002-09-23 09:36:25 +0000152 event_ctr[ev]++; \
153 } while (False);
154
155#else
156
157static void init_prof_mem ( void ) { }
158static void done_prof_mem ( void ) { }
159
160#define PROF_EVENT(ev) /* */
161
162#endif
163
164/* Event index. If just the name of the fn is given, this means the
165 number of calls to the fn. Otherwise it is the specified event.
166
167 10 alloc_secondary_map
168
169 20 get_abit
170 21 get_vbyte
171 22 set_abit
172 23 set_vbyte
173 24 get_abits4_ALIGNED
174 25 get_vbytes4_ALIGNED
175
176 30 set_address_range_perms
177 31 set_address_range_perms(lower byte loop)
178 32 set_address_range_perms(quadword loop)
179 33 set_address_range_perms(upper byte loop)
180
181 35 make_noaccess
182 36 make_writable
183 37 make_readable
184
185 40 copy_address_range_state
186 41 copy_address_range_state(byte loop)
187 42 check_writable
188 43 check_writable(byte loop)
189 44 check_readable
190 45 check_readable(byte loop)
191 46 check_readable_asciiz
192 47 check_readable_asciiz(byte loop)
193
194 50 make_aligned_word_NOACCESS
195 51 make_aligned_word_WRITABLE
196
197 60 helperc_LOADV4
198 61 helperc_STOREV4
199 62 helperc_LOADV2
200 63 helperc_STOREV2
201 64 helperc_LOADV1
202 65 helperc_STOREV1
203
204 70 rim_rd_V4_SLOWLY
205 71 rim_wr_V4_SLOWLY
206 72 rim_rd_V2_SLOWLY
207 73 rim_wr_V2_SLOWLY
208 74 rim_rd_V1_SLOWLY
209 75 rim_wr_V1_SLOWLY
210
211 80 fpu_read
212 81 fpu_read aligned 4
213 82 fpu_read aligned 8
214 83 fpu_read 2
215 84 fpu_read 10
216
217 85 fpu_write
218 86 fpu_write aligned 4
219 87 fpu_write aligned 8
220 88 fpu_write 2
221 89 fpu_write 10
222
223 90 fpu_read_check_SLOWLY
224 91 fpu_read_check_SLOWLY(byte loop)
225 92 fpu_write_check_SLOWLY
226 93 fpu_write_check_SLOWLY(byte loop)
227
228 100 is_plausible_stack_addr
229 101 handle_esp_assignment
230 102 handle_esp_assignment(-4)
231 103 handle_esp_assignment(+4)
232 104 handle_esp_assignment(-12)
233 105 handle_esp_assignment(-8)
234 106 handle_esp_assignment(+16)
235 107 handle_esp_assignment(+12)
236 108 handle_esp_assignment(0)
237 109 handle_esp_assignment(+8)
238 110 handle_esp_assignment(-16)
239 111 handle_esp_assignment(+20)
240 112 handle_esp_assignment(-20)
241 113 handle_esp_assignment(+24)
242 114 handle_esp_assignment(-24)
243
244 120 vg_handle_esp_assignment_SLOWLY
245 121 vg_handle_esp_assignment_SLOWLY(normal; move down)
246 122 vg_handle_esp_assignment_SLOWLY(normal; move up)
247 123 vg_handle_esp_assignment_SLOWLY(normal)
248 124 vg_handle_esp_assignment_SLOWLY(>= HUGE_DELTA)
249*/
250
251/*------------------------------------------------------------*/
252/*--- Function declarations. ---*/
253/*------------------------------------------------------------*/
254
255static UInt vgmext_rd_V4_SLOWLY ( Addr a );
256static UInt vgmext_rd_V2_SLOWLY ( Addr a );
257static UInt vgmext_rd_V1_SLOWLY ( Addr a );
258static void vgmext_wr_V4_SLOWLY ( Addr a, UInt vbytes );
259static void vgmext_wr_V2_SLOWLY ( Addr a, UInt vbytes );
260static void vgmext_wr_V1_SLOWLY ( Addr a, UInt vbytes );
261static void fpu_read_check_SLOWLY ( Addr addr, Int size );
262static void fpu_write_check_SLOWLY ( Addr addr, Int size );
263
264/*------------------------------------------------------------*/
265/*--- Data defns. ---*/
266/*------------------------------------------------------------*/
267
268typedef
269 struct {
270 UChar abits[8192];
271 UChar vbyte[65536];
272 }
273 SecMap;
274
275static SecMap* primary_map[ /*65536*/ 262144 ];
276static SecMap distinguished_secondary_map;
277
278#define IS_DISTINGUISHED_SM(smap) \
279 ((smap) == &distinguished_secondary_map)
280
281#define ENSURE_MAPPABLE(addr,caller) \
282 do { \
283 if (IS_DISTINGUISHED_SM(primary_map[(addr) >> 16])) { \
284 primary_map[(addr) >> 16] = alloc_secondary_map(caller); \
285 /* VG_(printf)("new 2map because of %p\n", addr); */ \
286 } \
287 } while(0)
288
289#define BITARR_SET(aaa_p,iii_p) \
290 do { \
291 UInt iii = (UInt)iii_p; \
292 UChar* aaa = (UChar*)aaa_p; \
293 aaa[iii >> 3] |= (1 << (iii & 7)); \
294 } while (0)
295
296#define BITARR_CLEAR(aaa_p,iii_p) \
297 do { \
298 UInt iii = (UInt)iii_p; \
299 UChar* aaa = (UChar*)aaa_p; \
300 aaa[iii >> 3] &= ~(1 << (iii & 7)); \
301 } while (0)
302
303#define BITARR_TEST(aaa_p,iii_p) \
304 (0 != (((UChar*)aaa_p)[ ((UInt)iii_p) >> 3 ] \
305 & (1 << (((UInt)iii_p) & 7)))) \
306
307
308#define VGM_BIT_VALID 0
309#define VGM_BIT_INVALID 1
310
311#define VGM_NIBBLE_VALID 0
312#define VGM_NIBBLE_INVALID 0xF
313
314#define VGM_BYTE_VALID 0
315#define VGM_BYTE_INVALID 0xFF
316
317#define VGM_WORD_VALID 0
318#define VGM_WORD_INVALID 0xFFFFFFFF
319
320#define VGM_EFLAGS_VALID 0xFFFFFFFE
321#define VGM_EFLAGS_INVALID 0xFFFFFFFF /* not used */
322
323
324static void init_shadow_memory ( void )
325{
326 Int i;
327
328 for (i = 0; i < 8192; i++) /* Invalid address */
329 distinguished_secondary_map.abits[i] = VGM_BYTE_INVALID;
330 for (i = 0; i < 65536; i++) /* Invalid Value */
331 distinguished_secondary_map.vbyte[i] = VGM_BYTE_INVALID;
332
333 /* These entries gradually get overwritten as the used address
334 space expands. */
335 for (i = 0; i < 65536; i++)
336 primary_map[i] = &distinguished_secondary_map;
337
338 /* These ones should never change; it's a bug in Valgrind if they do. */
339 for (i = 65536; i < 262144; i++)
340 primary_map[i] = &distinguished_secondary_map;
341}
342
343void SK_(post_clo_init) ( void )
344{
345}
346
347void SK_(fini) ( void )
348{
349 VG_(print_malloc_stats)();
350
351 if (VG_(clo_verbosity) == 1) {
352 if (!SK_(clo_leak_check))
353 VG_(message)(Vg_UserMsg,
354 "For a detailed leak analysis, rerun with: --leak-check=yes");
355
356 VG_(message)(Vg_UserMsg,
357 "For counts of detected errors, rerun with: -v");
358 }
359 if (SK_(clo_leak_check)) SK_(detect_memory_leaks)();
360
361 done_prof_mem();
362
363 if (0) {
364 VG_(message)(Vg_DebugMsg,
365 "------ Valgrind's client block stats follow ---------------" );
366 SK_(show_client_block_stats)();
367 }
368}
369
370/*------------------------------------------------------------*/
371/*--- Basic bitmap management, reading and writing. ---*/
372/*------------------------------------------------------------*/
373
374/* Allocate and initialise a secondary map. */
375
376static SecMap* alloc_secondary_map ( __attribute__ ((unused))
377 Char* caller )
378{
379 SecMap* map;
380 UInt i;
381 PROF_EVENT(10);
382
383 /* Mark all bytes as invalid access and invalid value. */
384
385 /* It just happens that a SecMap occupies exactly 18 pages --
386 although this isn't important, so the following assert is
387 spurious. */
njne427a662002-10-02 11:08:25 +0000388 sk_assert(0 == (sizeof(SecMap) % VKI_BYTES_PER_PAGE));
njn25e49d8e72002-09-23 09:36:25 +0000389 map = VG_(get_memory_from_mmap)( sizeof(SecMap), caller );
390
391 for (i = 0; i < 8192; i++)
392 map->abits[i] = VGM_BYTE_INVALID; /* Invalid address */
393 for (i = 0; i < 65536; i++)
394 map->vbyte[i] = VGM_BYTE_INVALID; /* Invalid Value */
395
396 /* VG_(printf)("ALLOC_2MAP(%s)\n", caller ); */
397 return map;
398}
399
400
401/* Basic reading/writing of the bitmaps, for byte-sized accesses. */
402
403static __inline__ UChar get_abit ( Addr a )
404{
405 SecMap* sm = primary_map[a >> 16];
406 UInt sm_off = a & 0xFFFF;
407 PROF_EVENT(20);
408# if 0
409 if (IS_DISTINGUISHED_SM(sm))
410 VG_(message)(Vg_DebugMsg,
411 "accessed distinguished 2ndary (A)map! 0x%x\n", a);
412# endif
413 return BITARR_TEST(sm->abits, sm_off)
414 ? VGM_BIT_INVALID : VGM_BIT_VALID;
415}
416
417static __inline__ UChar get_vbyte ( Addr a )
418{
419 SecMap* sm = primary_map[a >> 16];
420 UInt sm_off = a & 0xFFFF;
421 PROF_EVENT(21);
422# if 0
423 if (IS_DISTINGUISHED_SM(sm))
424 VG_(message)(Vg_DebugMsg,
425 "accessed distinguished 2ndary (V)map! 0x%x\n", a);
426# endif
427 return sm->vbyte[sm_off];
428}
429
430static __inline__ void set_abit ( Addr a, UChar abit )
431{
432 SecMap* sm;
433 UInt sm_off;
434 PROF_EVENT(22);
435 ENSURE_MAPPABLE(a, "set_abit");
436 sm = primary_map[a >> 16];
437 sm_off = a & 0xFFFF;
438 if (abit)
439 BITARR_SET(sm->abits, sm_off);
440 else
441 BITARR_CLEAR(sm->abits, sm_off);
442}
443
444static __inline__ void set_vbyte ( Addr a, UChar vbyte )
445{
446 SecMap* sm;
447 UInt sm_off;
448 PROF_EVENT(23);
449 ENSURE_MAPPABLE(a, "set_vbyte");
450 sm = primary_map[a >> 16];
451 sm_off = a & 0xFFFF;
452 sm->vbyte[sm_off] = vbyte;
453}
454
455
456/* Reading/writing of the bitmaps, for aligned word-sized accesses. */
457
458static __inline__ UChar get_abits4_ALIGNED ( Addr a )
459{
460 SecMap* sm;
461 UInt sm_off;
462 UChar abits8;
463 PROF_EVENT(24);
464# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000465 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000466# endif
467 sm = primary_map[a >> 16];
468 sm_off = a & 0xFFFF;
469 abits8 = sm->abits[sm_off >> 3];
470 abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
471 abits8 &= 0x0F;
472 return abits8;
473}
474
475static UInt __inline__ get_vbytes4_ALIGNED ( Addr a )
476{
477 SecMap* sm = primary_map[a >> 16];
478 UInt sm_off = a & 0xFFFF;
479 PROF_EVENT(25);
480# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000481 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000482# endif
483 return ((UInt*)(sm->vbyte))[sm_off >> 2];
484}
485
486
487/*------------------------------------------------------------*/
488/*--- Setting permissions over address ranges. ---*/
489/*------------------------------------------------------------*/
490
491static void set_address_range_perms ( Addr a, UInt len,
492 UInt example_a_bit,
493 UInt example_v_bit )
494{
495 UChar vbyte, abyte8;
496 UInt vword4, sm_off;
497 SecMap* sm;
498
499 PROF_EVENT(30);
500
501 if (len == 0)
502 return;
503
504 if (len > 100 * 1000 * 1000) {
505 VG_(message)(Vg_UserMsg,
506 "Warning: set address range perms: "
507 "large range %u, a %d, v %d",
508 len, example_a_bit, example_v_bit );
509 }
510
511 VGP_PUSHCC(VgpSetMem);
512
513 /* Requests to change permissions of huge address ranges may
514 indicate bugs in our machinery. 30,000,000 is arbitrary, but so
515 far all legitimate requests have fallen beneath that size. */
516 /* 4 Mar 02: this is just stupid; get rid of it. */
njne427a662002-10-02 11:08:25 +0000517 /* sk_assert(len < 30000000); */
njn25e49d8e72002-09-23 09:36:25 +0000518
519 /* Check the permissions make sense. */
njne427a662002-10-02 11:08:25 +0000520 sk_assert(example_a_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000521 || example_a_bit == VGM_BIT_INVALID);
njne427a662002-10-02 11:08:25 +0000522 sk_assert(example_v_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000523 || example_v_bit == VGM_BIT_INVALID);
524 if (example_a_bit == VGM_BIT_INVALID)
njne427a662002-10-02 11:08:25 +0000525 sk_assert(example_v_bit == VGM_BIT_INVALID);
njn25e49d8e72002-09-23 09:36:25 +0000526
527 /* The validity bits to write. */
528 vbyte = example_v_bit==VGM_BIT_VALID
529 ? VGM_BYTE_VALID : VGM_BYTE_INVALID;
530
531 /* In order that we can charge through the address space at 8
532 bytes/main-loop iteration, make up some perms. */
533 abyte8 = (example_a_bit << 7)
534 | (example_a_bit << 6)
535 | (example_a_bit << 5)
536 | (example_a_bit << 4)
537 | (example_a_bit << 3)
538 | (example_a_bit << 2)
539 | (example_a_bit << 1)
540 | (example_a_bit << 0);
541 vword4 = (vbyte << 24) | (vbyte << 16) | (vbyte << 8) | vbyte;
542
543# ifdef VG_DEBUG_MEMORY
544 /* Do it ... */
545 while (True) {
546 PROF_EVENT(31);
547 if (len == 0) break;
548 set_abit ( a, example_a_bit );
549 set_vbyte ( a, vbyte );
550 a++;
551 len--;
552 }
553
554# else
555 /* Slowly do parts preceding 8-byte alignment. */
556 while (True) {
557 PROF_EVENT(31);
558 if (len == 0) break;
559 if ((a % 8) == 0) break;
560 set_abit ( a, example_a_bit );
561 set_vbyte ( a, vbyte );
562 a++;
563 len--;
564 }
565
566 if (len == 0) {
567 VGP_POPCC(VgpSetMem);
568 return;
569 }
njne427a662002-10-02 11:08:25 +0000570 sk_assert((a % 8) == 0 && len > 0);
njn25e49d8e72002-09-23 09:36:25 +0000571
572 /* Once aligned, go fast. */
573 while (True) {
574 PROF_EVENT(32);
575 if (len < 8) break;
576 ENSURE_MAPPABLE(a, "set_address_range_perms(fast)");
577 sm = primary_map[a >> 16];
578 sm_off = a & 0xFFFF;
579 sm->abits[sm_off >> 3] = abyte8;
580 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = vword4;
581 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = vword4;
582 a += 8;
583 len -= 8;
584 }
585
586 if (len == 0) {
587 VGP_POPCC(VgpSetMem);
588 return;
589 }
njne427a662002-10-02 11:08:25 +0000590 sk_assert((a % 8) == 0 && len > 0 && len < 8);
njn25e49d8e72002-09-23 09:36:25 +0000591
592 /* Finish the upper fragment. */
593 while (True) {
594 PROF_EVENT(33);
595 if (len == 0) break;
596 set_abit ( a, example_a_bit );
597 set_vbyte ( a, vbyte );
598 a++;
599 len--;
600 }
601# endif
602
603 /* Check that zero page and highest page have not been written to
604 -- this could happen with buggy syscall wrappers. Today
605 (2001-04-26) had precisely such a problem with __NR_setitimer. */
njne427a662002-10-02 11:08:25 +0000606 sk_assert(SK_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +0000607 VGP_POPCC(VgpSetMem);
608}
609
610/* Set permissions for address ranges ... */
611
612void SK_(make_noaccess) ( Addr a, UInt len )
613{
614 PROF_EVENT(35);
615 DEBUG("SK_(make_noaccess)(%p, %x)\n", a, len);
616 set_address_range_perms ( a, len, VGM_BIT_INVALID, VGM_BIT_INVALID );
617}
618
619void SK_(make_writable) ( Addr a, UInt len )
620{
621 PROF_EVENT(36);
622 DEBUG("SK_(make_writable)(%p, %x)\n", a, len);
623 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_INVALID );
624}
625
626void SK_(make_readable) ( Addr a, UInt len )
627{
628 PROF_EVENT(37);
629 DEBUG("SK_(make_readable)(%p, 0x%x)\n", a, len);
630 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_VALID );
631}
632
633/* Block-copy permissions (needed for implementing realloc()). */
634
635static void copy_address_range_state ( Addr src, Addr dst, UInt len )
636{
637 UInt i;
638
639 DEBUG("copy_address_range_state\n");
640
641 PROF_EVENT(40);
642 for (i = 0; i < len; i++) {
643 UChar abit = get_abit ( src+i );
644 UChar vbyte = get_vbyte ( src+i );
645 PROF_EVENT(41);
646 set_abit ( dst+i, abit );
647 set_vbyte ( dst+i, vbyte );
648 }
649}
650
651
652/* Check permissions for address range. If inadequate permissions
653 exist, *bad_addr is set to the offending address, so the caller can
654 know what it is. */
655
656Bool SK_(check_writable) ( Addr a, UInt len, Addr* bad_addr )
657{
658 UInt i;
659 UChar abit;
660 PROF_EVENT(42);
661 for (i = 0; i < len; i++) {
662 PROF_EVENT(43);
663 abit = get_abit(a);
664 if (abit == VGM_BIT_INVALID) {
665 if (bad_addr != NULL) *bad_addr = a;
666 return False;
667 }
668 a++;
669 }
670 return True;
671}
672
673Bool SK_(check_readable) ( Addr a, UInt len, Addr* bad_addr )
674{
675 UInt i;
676 UChar abit;
677 UChar vbyte;
678
679 PROF_EVENT(44);
680 DEBUG("SK_(check_readable)\n");
681 for (i = 0; i < len; i++) {
682 abit = get_abit(a);
683 vbyte = get_vbyte(a);
684 PROF_EVENT(45);
685 if (abit != VGM_BIT_VALID || vbyte != VGM_BYTE_VALID) {
686 if (bad_addr != NULL) *bad_addr = a;
687 return False;
688 }
689 a++;
690 }
691 return True;
692}
693
694
695/* Check a zero-terminated ascii string. Tricky -- don't want to
696 examine the actual bytes, to find the end, until we're sure it is
697 safe to do so. */
698
699Bool SK_(check_readable_asciiz) ( Addr a, Addr* bad_addr )
700{
701 UChar abit;
702 UChar vbyte;
703 PROF_EVENT(46);
704 DEBUG("SK_(check_readable_asciiz)\n");
705 while (True) {
706 PROF_EVENT(47);
707 abit = get_abit(a);
708 vbyte = get_vbyte(a);
709 if (abit != VGM_BIT_VALID || vbyte != VGM_BYTE_VALID) {
710 if (bad_addr != NULL) *bad_addr = a;
711 return False;
712 }
713 /* Ok, a is safe to read. */
714 if (* ((UChar*)a) == 0) return True;
715 a++;
716 }
717}
718
719
720/*------------------------------------------------------------*/
721/*--- Memory event handlers ---*/
722/*------------------------------------------------------------*/
723
724/* Setting permissions for aligned words. This supports fast stack
725 operations. */
726
727static void make_noaccess_aligned ( Addr a, UInt len )
728{
729 SecMap* sm;
730 UInt sm_off;
731 UChar mask;
732 Addr a_past_end = a + len;
733
734 VGP_PUSHCC(VgpSetMem);
735
736 PROF_EVENT(50);
737# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000738 sk_assert(IS_ALIGNED4_ADDR(a));
739 sk_assert(IS_ALIGNED4_ADDR(len));
njn25e49d8e72002-09-23 09:36:25 +0000740# endif
741
742 for ( ; a < a_past_end; a += 4) {
743 ENSURE_MAPPABLE(a, "make_noaccess_aligned");
744 sm = primary_map[a >> 16];
745 sm_off = a & 0xFFFF;
746 ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
747 mask = 0x0F;
748 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
749 /* mask now contains 1s where we wish to make address bits
750 invalid (1s). */
751 sm->abits[sm_off >> 3] |= mask;
752 }
753 VGP_POPCC(VgpSetMem);
754}
755
756static void make_writable_aligned ( Addr a, UInt len )
757{
758 SecMap* sm;
759 UInt sm_off;
760 UChar mask;
761 Addr a_past_end = a + len;
762
763 VGP_PUSHCC(VgpSetMem);
764
765 PROF_EVENT(51);
766# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000767 sk_assert(IS_ALIGNED4_ADDR(a));
768 sk_assert(IS_ALIGNED4_ADDR(len));
njn25e49d8e72002-09-23 09:36:25 +0000769# endif
770
771 for ( ; a < a_past_end; a += 4) {
772 ENSURE_MAPPABLE(a, "make_writable_aligned");
773 sm = primary_map[a >> 16];
774 sm_off = a & 0xFFFF;
775 ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
776 mask = 0x0F;
777 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
778 /* mask now contains 1s where we wish to make address bits
779 invalid (0s). */
780 sm->abits[sm_off >> 3] &= ~mask;
781 }
782 VGP_POPCC(VgpSetMem);
783}
784
785
786static
787void check_is_writable ( CorePart part, ThreadState* tst,
njn4a540d02002-10-23 12:54:11 +0000788 Char* s, Addr base, UInt size )
njn25e49d8e72002-09-23 09:36:25 +0000789{
790 Bool ok;
791 Addr bad_addr;
792
793 VGP_PUSHCC(VgpCheckMem);
794
795 /* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
796 base,base+size-1); */
797 ok = SK_(check_writable) ( base, size, &bad_addr );
798 if (!ok) {
799 switch (part) {
800 case Vg_CoreSysCall:
801 SK_(record_param_error) ( tst, bad_addr, /*isWrite =*/True, s );
802 break;
803
804 case Vg_CorePThread:
805 case Vg_CoreSignal:
806 SK_(record_core_mem_error)( tst, /*isWrite=*/True, s );
807 break;
808
809 default:
njne427a662002-10-02 11:08:25 +0000810 VG_(skin_panic)("check_is_writable: Unknown or unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000811 }
812 }
813
814 VGP_POPCC(VgpCheckMem);
815}
816
817static
818void check_is_readable ( CorePart part, ThreadState* tst,
njn4a540d02002-10-23 12:54:11 +0000819 Char* s, Addr base, UInt size )
njn25e49d8e72002-09-23 09:36:25 +0000820{
821 Bool ok;
822 Addr bad_addr;
823
824 VGP_PUSHCC(VgpCheckMem);
825
826 /* VG_(message)(Vg_DebugMsg,"check is readable: %x .. %x",
827 base,base+size-1); */
828 ok = SK_(check_readable) ( base, size, &bad_addr );
829 if (!ok) {
830 switch (part) {
831 case Vg_CoreSysCall:
832 SK_(record_param_error) ( tst, bad_addr, /*isWrite =*/False, s );
833 break;
834
835 case Vg_CorePThread:
836 SK_(record_core_mem_error)( tst, /*isWrite=*/False, s );
837 break;
838
839 /* If we're being asked to jump to a silly address, record an error
840 message before potentially crashing the entire system. */
841 case Vg_CoreTranslate:
842 SK_(record_jump_error)( tst, bad_addr );
843 break;
844
845 default:
njne427a662002-10-02 11:08:25 +0000846 VG_(skin_panic)("check_is_readable: Unknown or unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000847 }
848 }
849 VGP_POPCC(VgpCheckMem);
850}
851
852static
853void check_is_readable_asciiz ( CorePart part, ThreadState* tst,
njn4a540d02002-10-23 12:54:11 +0000854 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +0000855{
856 Bool ok = True;
857 Addr bad_addr;
858 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
859
860 VGP_PUSHCC(VgpCheckMem);
861
njne427a662002-10-02 11:08:25 +0000862 sk_assert(part == Vg_CoreSysCall);
njn25e49d8e72002-09-23 09:36:25 +0000863 ok = SK_(check_readable_asciiz) ( (Addr)str, &bad_addr );
864 if (!ok) {
865 SK_(record_param_error) ( tst, bad_addr, /*is_writable =*/False, s );
866 }
867
868 VGP_POPCC(VgpCheckMem);
869}
870
871
872static
873void memcheck_new_mem_startup( Addr a, UInt len, Bool rr, Bool ww, Bool xx )
874{
njn1f3a9092002-10-04 09:22:30 +0000875 /* Ignore the permissions, just make it readable. Seems to work... */
njn25e49d8e72002-09-23 09:36:25 +0000876 DEBUG("new_mem_startup(%p, %u, rr=%u, ww=%u, xx=%u)\n", a,len,rr,ww,xx);
877 SK_(make_readable)(a, len);
878}
879
880static
881void memcheck_new_mem_heap ( Addr a, UInt len, Bool is_inited )
882{
883 if (is_inited) {
884 SK_(make_readable)(a, len);
885 } else {
886 SK_(make_writable)(a, len);
887 }
888}
889
890static
891void memcheck_set_perms (Addr a, UInt len,
sewardj40f8ebe2002-10-23 21:46:13 +0000892 Bool rr, Bool ww, Bool xx)
njn25e49d8e72002-09-23 09:36:25 +0000893{
sewardj40f8ebe2002-10-23 21:46:13 +0000894 DEBUG("memcheck_set_perms(%p, %u, rr=%u ww=%u, xx=%u)\n",
895 a, len, rr, ww, xx);
njn25e49d8e72002-09-23 09:36:25 +0000896 if (rr) SK_(make_readable)(a, len);
897 else if (ww) SK_(make_writable)(a, len);
898 else SK_(make_noaccess)(a, len);
899}
900
901
902/*------------------------------------------------------------*/
903/*--- Functions called directly from generated code. ---*/
904/*------------------------------------------------------------*/
905
906static __inline__ UInt rotateRight16 ( UInt x )
907{
908 /* Amazingly, gcc turns this into a single rotate insn. */
909 return (x >> 16) | (x << 16);
910}
911
912
913static __inline__ UInt shiftRight16 ( UInt x )
914{
915 return x >> 16;
916}
917
918
919/* Read/write 1/2/4 sized V bytes, and emit an address error if
920 needed. */
921
922/* VG_(helperc_{LD,ST}V{1,2,4}) handle the common case fast.
923 Under all other circumstances, it defers to the relevant _SLOWLY
924 function, which can handle all situations.
925*/
926__attribute__ ((regparm(1)))
927UInt SK_(helperc_LOADV4) ( Addr a )
928{
929# ifdef VG_DEBUG_MEMORY
930 return vgmext_rd_V4_SLOWLY(a);
931# else
932 UInt sec_no = rotateRight16(a) & 0x3FFFF;
933 SecMap* sm = primary_map[sec_no];
934 UInt a_off = (a & 0xFFFF) >> 3;
935 UChar abits = sm->abits[a_off];
936 abits >>= (a & 4);
937 abits &= 15;
938 PROF_EVENT(60);
939 if (abits == VGM_NIBBLE_VALID) {
940 /* Handle common case quickly: a is suitably aligned, is mapped,
941 and is addressible. */
942 UInt v_off = a & 0xFFFF;
943 return ((UInt*)(sm->vbyte))[ v_off >> 2 ];
944 } else {
945 /* Slow but general case. */
946 return vgmext_rd_V4_SLOWLY(a);
947 }
948# endif
949}
950
951__attribute__ ((regparm(2)))
952void SK_(helperc_STOREV4) ( Addr a, UInt vbytes )
953{
954# ifdef VG_DEBUG_MEMORY
955 vgmext_wr_V4_SLOWLY(a, vbytes);
956# else
957 UInt sec_no = rotateRight16(a) & 0x3FFFF;
958 SecMap* sm = primary_map[sec_no];
959 UInt a_off = (a & 0xFFFF) >> 3;
960 UChar abits = sm->abits[a_off];
961 abits >>= (a & 4);
962 abits &= 15;
963 PROF_EVENT(61);
964 if (abits == VGM_NIBBLE_VALID) {
965 /* Handle common case quickly: a is suitably aligned, is mapped,
966 and is addressible. */
967 UInt v_off = a & 0xFFFF;
968 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = vbytes;
969 } else {
970 /* Slow but general case. */
971 vgmext_wr_V4_SLOWLY(a, vbytes);
972 }
973# endif
974}
975
976__attribute__ ((regparm(1)))
977UInt SK_(helperc_LOADV2) ( Addr a )
978{
979# ifdef VG_DEBUG_MEMORY
980 return vgmext_rd_V2_SLOWLY(a);
981# else
982 UInt sec_no = rotateRight16(a) & 0x1FFFF;
983 SecMap* sm = primary_map[sec_no];
984 UInt a_off = (a & 0xFFFF) >> 3;
985 PROF_EVENT(62);
986 if (sm->abits[a_off] == VGM_BYTE_VALID) {
987 /* Handle common case quickly. */
988 UInt v_off = a & 0xFFFF;
989 return 0xFFFF0000
990 |
991 (UInt)( ((UShort*)(sm->vbyte))[ v_off >> 1 ] );
992 } else {
993 /* Slow but general case. */
994 return vgmext_rd_V2_SLOWLY(a);
995 }
996# endif
997}
998
999__attribute__ ((regparm(2)))
1000void SK_(helperc_STOREV2) ( Addr a, UInt vbytes )
1001{
1002# ifdef VG_DEBUG_MEMORY
1003 vgmext_wr_V2_SLOWLY(a, vbytes);
1004# else
1005 UInt sec_no = rotateRight16(a) & 0x1FFFF;
1006 SecMap* sm = primary_map[sec_no];
1007 UInt a_off = (a & 0xFFFF) >> 3;
1008 PROF_EVENT(63);
1009 if (sm->abits[a_off] == VGM_BYTE_VALID) {
1010 /* Handle common case quickly. */
1011 UInt v_off = a & 0xFFFF;
1012 ((UShort*)(sm->vbyte))[ v_off >> 1 ] = vbytes & 0x0000FFFF;
1013 } else {
1014 /* Slow but general case. */
1015 vgmext_wr_V2_SLOWLY(a, vbytes);
1016 }
1017# endif
1018}
1019
1020__attribute__ ((regparm(1)))
1021UInt SK_(helperc_LOADV1) ( Addr a )
1022{
1023# ifdef VG_DEBUG_MEMORY
1024 return vgmext_rd_V1_SLOWLY(a);
1025# else
1026 UInt sec_no = shiftRight16(a);
1027 SecMap* sm = primary_map[sec_no];
1028 UInt a_off = (a & 0xFFFF) >> 3;
1029 PROF_EVENT(64);
1030 if (sm->abits[a_off] == VGM_BYTE_VALID) {
1031 /* Handle common case quickly. */
1032 UInt v_off = a & 0xFFFF;
1033 return 0xFFFFFF00
1034 |
1035 (UInt)( ((UChar*)(sm->vbyte))[ v_off ] );
1036 } else {
1037 /* Slow but general case. */
1038 return vgmext_rd_V1_SLOWLY(a);
1039 }
1040# endif
1041}
1042
1043__attribute__ ((regparm(2)))
1044void SK_(helperc_STOREV1) ( Addr a, UInt vbytes )
1045{
1046# ifdef VG_DEBUG_MEMORY
1047 vgmext_wr_V1_SLOWLY(a, vbytes);
1048# else
1049 UInt sec_no = shiftRight16(a);
1050 SecMap* sm = primary_map[sec_no];
1051 UInt a_off = (a & 0xFFFF) >> 3;
1052 PROF_EVENT(65);
1053 if (sm->abits[a_off] == VGM_BYTE_VALID) {
1054 /* Handle common case quickly. */
1055 UInt v_off = a & 0xFFFF;
1056 ((UChar*)(sm->vbyte))[ v_off ] = vbytes & 0x000000FF;
1057 } else {
1058 /* Slow but general case. */
1059 vgmext_wr_V1_SLOWLY(a, vbytes);
1060 }
1061# endif
1062}
1063
1064
1065/*------------------------------------------------------------*/
1066/*--- Fallback functions to handle cases that the above ---*/
1067/*--- VG_(helperc_{LD,ST}V{1,2,4}) can't manage. ---*/
1068/*------------------------------------------------------------*/
1069
1070static UInt vgmext_rd_V4_SLOWLY ( Addr a )
1071{
1072 Bool a0ok, a1ok, a2ok, a3ok;
1073 UInt vb0, vb1, vb2, vb3;
1074
1075 PROF_EVENT(70);
1076
1077 /* First establish independently the addressibility of the 4 bytes
1078 involved. */
1079 a0ok = get_abit(a+0) == VGM_BIT_VALID;
1080 a1ok = get_abit(a+1) == VGM_BIT_VALID;
1081 a2ok = get_abit(a+2) == VGM_BIT_VALID;
1082 a3ok = get_abit(a+3) == VGM_BIT_VALID;
1083
1084 /* Also get the validity bytes for the address. */
1085 vb0 = (UInt)get_vbyte(a+0);
1086 vb1 = (UInt)get_vbyte(a+1);
1087 vb2 = (UInt)get_vbyte(a+2);
1088 vb3 = (UInt)get_vbyte(a+3);
1089
1090 /* Now distinguish 3 cases */
1091
1092 /* Case 1: the address is completely valid, so:
1093 - no addressing error
1094 - return V bytes as read from memory
1095 */
1096 if (a0ok && a1ok && a2ok && a3ok) {
1097 UInt vw = VGM_WORD_INVALID;
1098 vw <<= 8; vw |= vb3;
1099 vw <<= 8; vw |= vb2;
1100 vw <<= 8; vw |= vb1;
1101 vw <<= 8; vw |= vb0;
1102 return vw;
1103 }
1104
1105 /* Case 2: the address is completely invalid.
1106 - emit addressing error
1107 - return V word indicating validity.
1108 This sounds strange, but if we make loads from invalid addresses
1109 give invalid data, we also risk producing a number of confusing
1110 undefined-value errors later, which confuses the fact that the
1111 error arose in the first place from an invalid address.
1112 */
1113 /* VG_(printf)("%p (%d %d %d %d)\n", a, a0ok, a1ok, a2ok, a3ok); */
1114 if (!SK_(clo_partial_loads_ok)
1115 || ((a & 3) != 0)
1116 || (!a0ok && !a1ok && !a2ok && !a3ok)) {
1117 SK_(record_address_error)( a, 4, False );
1118 return (VGM_BYTE_VALID << 24) | (VGM_BYTE_VALID << 16)
1119 | (VGM_BYTE_VALID << 8) | VGM_BYTE_VALID;
1120 }
1121
1122 /* Case 3: the address is partially valid.
1123 - no addressing error
1124 - returned V word is invalid where the address is invalid,
1125 and contains V bytes from memory otherwise.
1126 Case 3 is only allowed if SK_(clo_partial_loads_ok) is True
1127 (which is the default), and the address is 4-aligned.
1128 If not, Case 2 will have applied.
1129 */
njne427a662002-10-02 11:08:25 +00001130 sk_assert(SK_(clo_partial_loads_ok));
njn25e49d8e72002-09-23 09:36:25 +00001131 {
1132 UInt vw = VGM_WORD_INVALID;
1133 vw <<= 8; vw |= (a3ok ? vb3 : VGM_BYTE_INVALID);
1134 vw <<= 8; vw |= (a2ok ? vb2 : VGM_BYTE_INVALID);
1135 vw <<= 8; vw |= (a1ok ? vb1 : VGM_BYTE_INVALID);
1136 vw <<= 8; vw |= (a0ok ? vb0 : VGM_BYTE_INVALID);
1137 return vw;
1138 }
1139}
1140
1141static void vgmext_wr_V4_SLOWLY ( Addr a, UInt vbytes )
1142{
1143 /* Check the address for validity. */
1144 Bool aerr = False;
1145 PROF_EVENT(71);
1146
1147 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1148 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1149 if (get_abit(a+2) != VGM_BIT_VALID) aerr = True;
1150 if (get_abit(a+3) != VGM_BIT_VALID) aerr = True;
1151
1152 /* Store the V bytes, remembering to do it little-endian-ly. */
1153 set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
1154 set_vbyte( a+1, vbytes & 0x000000FF ); vbytes >>= 8;
1155 set_vbyte( a+2, vbytes & 0x000000FF ); vbytes >>= 8;
1156 set_vbyte( a+3, vbytes & 0x000000FF );
1157
1158 /* If an address error has happened, report it. */
1159 if (aerr)
1160 SK_(record_address_error)( a, 4, True );
1161}
1162
1163static UInt vgmext_rd_V2_SLOWLY ( Addr a )
1164{
1165 /* Check the address for validity. */
1166 UInt vw = VGM_WORD_INVALID;
1167 Bool aerr = False;
1168 PROF_EVENT(72);
1169
1170 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1171 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1172
1173 /* Fetch the V bytes, remembering to do it little-endian-ly. */
1174 vw <<= 8; vw |= (UInt)get_vbyte(a+1);
1175 vw <<= 8; vw |= (UInt)get_vbyte(a+0);
1176
1177 /* If an address error has happened, report it. */
1178 if (aerr) {
1179 SK_(record_address_error)( a, 2, False );
1180 vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
1181 | (VGM_BYTE_VALID << 8) | (VGM_BYTE_VALID);
1182 }
1183 return vw;
1184}
1185
1186static void vgmext_wr_V2_SLOWLY ( Addr a, UInt vbytes )
1187{
1188 /* Check the address for validity. */
1189 Bool aerr = False;
1190 PROF_EVENT(73);
1191
1192 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1193 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1194
1195 /* Store the V bytes, remembering to do it little-endian-ly. */
1196 set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
1197 set_vbyte( a+1, vbytes & 0x000000FF );
1198
1199 /* If an address error has happened, report it. */
1200 if (aerr)
1201 SK_(record_address_error)( a, 2, True );
1202}
1203
1204static UInt vgmext_rd_V1_SLOWLY ( Addr a )
1205{
1206 /* Check the address for validity. */
1207 UInt vw = VGM_WORD_INVALID;
1208 Bool aerr = False;
1209 PROF_EVENT(74);
1210
1211 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1212
1213 /* Fetch the V byte. */
1214 vw <<= 8; vw |= (UInt)get_vbyte(a+0);
1215
1216 /* If an address error has happened, report it. */
1217 if (aerr) {
1218 SK_(record_address_error)( a, 1, False );
1219 vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
1220 | (VGM_BYTE_INVALID << 8) | (VGM_BYTE_VALID);
1221 }
1222 return vw;
1223}
1224
1225static void vgmext_wr_V1_SLOWLY ( Addr a, UInt vbytes )
1226{
1227 /* Check the address for validity. */
1228 Bool aerr = False;
1229 PROF_EVENT(75);
1230 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1231
1232 /* Store the V bytes, remembering to do it little-endian-ly. */
1233 set_vbyte( a+0, vbytes & 0x000000FF );
1234
1235 /* If an address error has happened, report it. */
1236 if (aerr)
1237 SK_(record_address_error)( a, 1, True );
1238}
1239
1240
1241/* ---------------------------------------------------------------------
1242 Called from generated code, or from the assembly helpers.
1243 Handlers for value check failures.
1244 ------------------------------------------------------------------ */
1245
1246void SK_(helperc_value_check0_fail) ( void )
1247{
1248 SK_(record_value_error) ( 0 );
1249}
1250
1251void SK_(helperc_value_check1_fail) ( void )
1252{
1253 SK_(record_value_error) ( 1 );
1254}
1255
1256void SK_(helperc_value_check2_fail) ( void )
1257{
1258 SK_(record_value_error) ( 2 );
1259}
1260
1261void SK_(helperc_value_check4_fail) ( void )
1262{
1263 SK_(record_value_error) ( 4 );
1264}
1265
1266
1267/* ---------------------------------------------------------------------
1268 FPU load and store checks, called from generated code.
1269 ------------------------------------------------------------------ */
1270
1271__attribute__ ((regparm(2)))
1272void SK_(fpu_read_check) ( Addr addr, Int size )
1273{
1274 /* Ensure the read area is both addressible and valid (ie,
1275 readable). If there's an address error, don't report a value
1276 error too; but if there isn't an address error, check for a
1277 value error.
1278
1279 Try to be reasonably fast on the common case; wimp out and defer
1280 to fpu_read_check_SLOWLY for everything else. */
1281
1282 SecMap* sm;
1283 UInt sm_off, v_off, a_off;
1284 Addr addr4;
1285
1286 PROF_EVENT(80);
1287
1288# ifdef VG_DEBUG_MEMORY
1289 fpu_read_check_SLOWLY ( addr, size );
1290# else
1291
1292 if (size == 4) {
1293 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
1294 PROF_EVENT(81);
1295 /* Properly aligned. */
1296 sm = primary_map[addr >> 16];
1297 sm_off = addr & 0xFFFF;
1298 a_off = sm_off >> 3;
1299 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
1300 /* Properly aligned and addressible. */
1301 v_off = addr & 0xFFFF;
1302 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1303 goto slow4;
1304 /* Properly aligned, addressible and with valid data. */
1305 return;
1306 slow4:
1307 fpu_read_check_SLOWLY ( addr, 4 );
1308 return;
1309 }
1310
1311 if (size == 8) {
1312 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
1313 PROF_EVENT(82);
1314 /* Properly aligned. Do it in two halves. */
1315 addr4 = addr + 4;
1316 /* First half. */
1317 sm = primary_map[addr >> 16];
1318 sm_off = addr & 0xFFFF;
1319 a_off = sm_off >> 3;
1320 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1321 /* First half properly aligned and addressible. */
1322 v_off = addr & 0xFFFF;
1323 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1324 goto slow8;
1325 /* Second half. */
1326 sm = primary_map[addr4 >> 16];
1327 sm_off = addr4 & 0xFFFF;
1328 a_off = sm_off >> 3;
1329 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1330 /* Second half properly aligned and addressible. */
1331 v_off = addr4 & 0xFFFF;
1332 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1333 goto slow8;
1334 /* Both halves properly aligned, addressible and with valid
1335 data. */
1336 return;
1337 slow8:
1338 fpu_read_check_SLOWLY ( addr, 8 );
1339 return;
1340 }
1341
1342 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
1343 cases go quickly. */
1344 if (size == 2) {
1345 PROF_EVENT(83);
1346 fpu_read_check_SLOWLY ( addr, 2 );
1347 return;
1348 }
1349
1350 if (size == 10) {
1351 PROF_EVENT(84);
1352 fpu_read_check_SLOWLY ( addr, 10 );
1353 return;
1354 }
1355
1356 if (size == 28 || size == 108) {
1357 PROF_EVENT(84); /* XXX assign correct event number */
sewardjb3243352002-09-27 01:11:36 +00001358 fpu_read_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001359 return;
1360 }
1361
1362 VG_(printf)("size is %d\n", size);
njne427a662002-10-02 11:08:25 +00001363 VG_(skin_panic)("vgmext_fpu_read_check: unhandled size");
njn25e49d8e72002-09-23 09:36:25 +00001364# endif
1365}
1366
1367
1368__attribute__ ((regparm(2)))
1369void SK_(fpu_write_check) ( Addr addr, Int size )
1370{
1371 /* Ensure the written area is addressible, and moan if otherwise.
1372 If it is addressible, make it valid, otherwise invalid.
1373 */
1374
1375 SecMap* sm;
1376 UInt sm_off, v_off, a_off;
1377 Addr addr4;
1378
1379 PROF_EVENT(85);
1380
1381# ifdef VG_DEBUG_MEMORY
1382 fpu_write_check_SLOWLY ( addr, size );
1383# else
1384
1385 if (size == 4) {
1386 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
1387 PROF_EVENT(86);
1388 /* Properly aligned. */
1389 sm = primary_map[addr >> 16];
1390 sm_off = addr & 0xFFFF;
1391 a_off = sm_off >> 3;
1392 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
1393 /* Properly aligned and addressible. Make valid. */
1394 v_off = addr & 0xFFFF;
1395 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1396 return;
1397 slow4:
1398 fpu_write_check_SLOWLY ( addr, 4 );
1399 return;
1400 }
1401
1402 if (size == 8) {
1403 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
1404 PROF_EVENT(87);
1405 /* Properly aligned. Do it in two halves. */
1406 addr4 = addr + 4;
1407 /* First half. */
1408 sm = primary_map[addr >> 16];
1409 sm_off = addr & 0xFFFF;
1410 a_off = sm_off >> 3;
1411 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1412 /* First half properly aligned and addressible. Make valid. */
1413 v_off = addr & 0xFFFF;
1414 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1415 /* Second half. */
1416 sm = primary_map[addr4 >> 16];
1417 sm_off = addr4 & 0xFFFF;
1418 a_off = sm_off >> 3;
1419 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1420 /* Second half properly aligned and addressible. */
1421 v_off = addr4 & 0xFFFF;
1422 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1423 /* Properly aligned, addressible and with valid data. */
1424 return;
1425 slow8:
1426 fpu_write_check_SLOWLY ( addr, 8 );
1427 return;
1428 }
1429
1430 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
1431 cases go quickly. */
1432 if (size == 2) {
1433 PROF_EVENT(88);
1434 fpu_write_check_SLOWLY ( addr, 2 );
1435 return;
1436 }
1437
1438 if (size == 10) {
1439 PROF_EVENT(89);
1440 fpu_write_check_SLOWLY ( addr, 10 );
1441 return;
1442 }
1443
1444 if (size == 28 || size == 108) {
1445 PROF_EVENT(89); /* XXX assign correct event number */
sewardjb3243352002-09-27 01:11:36 +00001446 fpu_write_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001447 return;
1448 }
1449
1450 VG_(printf)("size is %d\n", size);
njne427a662002-10-02 11:08:25 +00001451 VG_(skin_panic)("vgmext_fpu_write_check: unhandled size");
njn25e49d8e72002-09-23 09:36:25 +00001452# endif
1453}
1454
1455
1456/* ---------------------------------------------------------------------
1457 Slow, general cases for FPU load and store checks.
1458 ------------------------------------------------------------------ */
1459
1460/* Generic version. Test for both addr and value errors, but if
1461 there's an addr error, don't report a value error even if it
1462 exists. */
1463
1464void fpu_read_check_SLOWLY ( Addr addr, Int size )
1465{
1466 Int i;
1467 Bool aerr = False;
1468 Bool verr = False;
1469 PROF_EVENT(90);
1470 for (i = 0; i < size; i++) {
1471 PROF_EVENT(91);
1472 if (get_abit(addr+i) != VGM_BIT_VALID)
1473 aerr = True;
1474 if (get_vbyte(addr+i) != VGM_BYTE_VALID)
1475 verr = True;
1476 }
1477
1478 if (aerr) {
1479 SK_(record_address_error)( addr, size, False );
1480 } else {
1481 if (verr)
1482 SK_(record_value_error)( size );
1483 }
1484}
1485
1486
1487/* Generic version. Test for addr errors. Valid addresses are
1488 given valid values, and invalid addresses invalid values. */
1489
1490void fpu_write_check_SLOWLY ( Addr addr, Int size )
1491{
1492 Int i;
1493 Addr a_here;
1494 Bool a_ok;
1495 Bool aerr = False;
1496 PROF_EVENT(92);
1497 for (i = 0; i < size; i++) {
1498 PROF_EVENT(93);
1499 a_here = addr+i;
1500 a_ok = get_abit(a_here) == VGM_BIT_VALID;
1501 if (a_ok) {
1502 set_vbyte(a_here, VGM_BYTE_VALID);
1503 } else {
1504 set_vbyte(a_here, VGM_BYTE_INVALID);
1505 aerr = True;
1506 }
1507 }
1508 if (aerr) {
1509 SK_(record_address_error)( addr, size, True );
1510 }
1511}
1512
1513/*------------------------------------------------------------*/
1514/*--- Shadow chunks info ---*/
1515/*------------------------------------------------------------*/
1516
1517static __inline__
1518void set_where( ShadowChunk* sc, ExeContext* ec )
1519{
1520 sc->skin_extra[0] = (UInt)ec;
1521}
1522
1523static __inline__
1524ExeContext *get_where( ShadowChunk* sc )
1525{
1526 return (ExeContext*)sc->skin_extra[0];
1527}
1528
1529void SK_(complete_shadow_chunk) ( ShadowChunk* sc, ThreadState* tst )
1530{
1531 set_where( sc, VG_(get_ExeContext) ( tst ) );
1532}
1533
1534/*------------------------------------------------------------*/
1535/*--- Postponing free()ing ---*/
1536/*------------------------------------------------------------*/
1537
1538/* Holds blocks after freeing. */
1539static ShadowChunk* vg_freed_list_start = NULL;
1540static ShadowChunk* vg_freed_list_end = NULL;
1541static Int vg_freed_list_volume = 0;
1542
1543static __attribute__ ((unused))
1544 Int count_freelist ( void )
1545{
1546 ShadowChunk* sc;
1547 Int n = 0;
1548 for (sc = vg_freed_list_start; sc != NULL; sc = sc->next)
1549 n++;
1550 return n;
1551}
1552
1553static __attribute__ ((unused))
1554 void freelist_sanity ( void )
1555{
1556 ShadowChunk* sc;
1557 Int n = 0;
1558 /* VG_(printf)("freelist sanity\n"); */
1559 for (sc = vg_freed_list_start; sc != NULL; sc = sc->next)
1560 n += sc->size;
njne427a662002-10-02 11:08:25 +00001561 sk_assert(n == vg_freed_list_volume);
njn25e49d8e72002-09-23 09:36:25 +00001562}
1563
1564/* Put a shadow chunk on the freed blocks queue, possibly freeing up
1565 some of the oldest blocks in the queue at the same time. */
1566static void add_to_freed_queue ( ShadowChunk* sc )
1567{
1568 ShadowChunk* sc1;
1569
1570 /* Put it at the end of the freed list */
1571 if (vg_freed_list_end == NULL) {
njne427a662002-10-02 11:08:25 +00001572 sk_assert(vg_freed_list_start == NULL);
njn25e49d8e72002-09-23 09:36:25 +00001573 vg_freed_list_end = vg_freed_list_start = sc;
1574 vg_freed_list_volume = sc->size;
1575 } else {
njne427a662002-10-02 11:08:25 +00001576 sk_assert(vg_freed_list_end->next == NULL);
njn25e49d8e72002-09-23 09:36:25 +00001577 vg_freed_list_end->next = sc;
1578 vg_freed_list_end = sc;
1579 vg_freed_list_volume += sc->size;
1580 }
1581 sc->next = NULL;
1582
1583 /* Release enough of the oldest blocks to bring the free queue
1584 volume below vg_clo_freelist_vol. */
1585
1586 while (vg_freed_list_volume > SK_(clo_freelist_vol)) {
1587 /* freelist_sanity(); */
njne427a662002-10-02 11:08:25 +00001588 sk_assert(vg_freed_list_start != NULL);
1589 sk_assert(vg_freed_list_end != NULL);
njn25e49d8e72002-09-23 09:36:25 +00001590
1591 sc1 = vg_freed_list_start;
1592 vg_freed_list_volume -= sc1->size;
1593 /* VG_(printf)("volume now %d\n", vg_freed_list_volume); */
njne427a662002-10-02 11:08:25 +00001594 sk_assert(vg_freed_list_volume >= 0);
njn25e49d8e72002-09-23 09:36:25 +00001595
1596 if (vg_freed_list_start == vg_freed_list_end) {
1597 vg_freed_list_start = vg_freed_list_end = NULL;
1598 } else {
1599 vg_freed_list_start = sc1->next;
1600 }
1601 sc1->next = NULL; /* just paranoia */
njn4ba5a792002-09-30 10:23:54 +00001602 VG_(free_ShadowChunk) ( sc1 );
njn25e49d8e72002-09-23 09:36:25 +00001603 }
1604}
1605
1606/* Return the first shadow chunk satisfying the predicate p. */
1607ShadowChunk* SK_(any_matching_freed_ShadowChunks)
1608 ( Bool (*p) ( ShadowChunk* ))
1609{
1610 ShadowChunk* sc;
1611
1612 /* No point looking through freed blocks if we're not keeping
1613 them around for a while... */
1614 for (sc = vg_freed_list_start; sc != NULL; sc = sc->next)
1615 if (p(sc))
1616 return sc;
1617
1618 return NULL;
1619}
1620
1621void SK_(alt_free) ( ShadowChunk* sc, ThreadState* tst )
1622{
1623 /* Record where freed */
1624 set_where( sc, VG_(get_ExeContext) ( tst ) );
1625
1626 /* Put it out of harm's way for a while. */
1627 add_to_freed_queue ( sc );
1628}
1629
njn25e49d8e72002-09-23 09:36:25 +00001630
1631/*------------------------------------------------------------*/
1632/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1633/*------------------------------------------------------------*/
1634
sewardja4495682002-10-21 07:29:59 +00001635/* For the memory leak detector, say whether an entire 64k chunk of
1636 address space is possibly in use, or not. If in doubt return
1637 True.
njn25e49d8e72002-09-23 09:36:25 +00001638*/
sewardja4495682002-10-21 07:29:59 +00001639static
1640Bool mc_is_valid_64k_chunk ( UInt chunk_number )
njn25e49d8e72002-09-23 09:36:25 +00001641{
sewardja4495682002-10-21 07:29:59 +00001642 sk_assert(chunk_number >= 0 && chunk_number < 65536);
1643 if (IS_DISTINGUISHED_SM(primary_map[chunk_number])) {
1644 /* Definitely not in use. */
1645 return False;
1646 } else {
1647 return True;
njn25e49d8e72002-09-23 09:36:25 +00001648 }
1649}
1650
1651
sewardja4495682002-10-21 07:29:59 +00001652/* For the memory leak detector, say whether or not a given word
1653 address is to be regarded as valid. */
1654static
1655Bool mc_is_valid_address ( Addr a )
1656{
1657 UInt vbytes;
1658 UChar abits;
1659 sk_assert(IS_ALIGNED4_ADDR(a));
1660 abits = get_abits4_ALIGNED(a);
1661 vbytes = get_vbytes4_ALIGNED(a);
1662 if (abits == VGM_NIBBLE_VALID && vbytes == VGM_WORD_VALID) {
1663 return True;
1664 } else {
1665 return False;
1666 }
1667}
1668
1669
1670/* Leak detector for this skin. We don't actually do anything, merely
1671 run the generic leak detector with suitable parameters for this
1672 skin. */
njn25e49d8e72002-09-23 09:36:25 +00001673void SK_(detect_memory_leaks) ( void )
1674{
sewardja4495682002-10-21 07:29:59 +00001675 VG_(generic_detect_memory_leaks) (
1676 mc_is_valid_64k_chunk,
1677 mc_is_valid_address,
1678 get_where,
1679 SK_(clo_leak_resolution),
1680 SK_(clo_show_reachable)
1681 );
njn25e49d8e72002-09-23 09:36:25 +00001682}
1683
1684
1685/* ---------------------------------------------------------------------
1686 Sanity check machinery (permanently engaged).
1687 ------------------------------------------------------------------ */
1688
1689/* Check that nobody has spuriously claimed that the first or last 16
1690 pages (64 KB) of address space have become accessible. Failure of
1691 the following do not per se indicate an internal consistency
1692 problem, but they are so likely to that we really want to know
1693 about it if so. */
1694
1695Bool SK_(cheap_sanity_check) ( void )
1696{
1697 if (IS_DISTINGUISHED_SM(primary_map[0]) &&
1698 IS_DISTINGUISHED_SM(primary_map[65535]))
1699 return True;
1700 else
1701 return False;
1702}
1703
1704Bool SK_(expensive_sanity_check) ( void )
1705{
1706 Int i;
1707
1708 /* Make sure nobody changed the distinguished secondary. */
1709 for (i = 0; i < 8192; i++)
1710 if (distinguished_secondary_map.abits[i] != VGM_BYTE_INVALID)
1711 return False;
1712
1713 for (i = 0; i < 65536; i++)
1714 if (distinguished_secondary_map.vbyte[i] != VGM_BYTE_INVALID)
1715 return False;
1716
1717 /* Make sure that the upper 3/4 of the primary map hasn't
1718 been messed with. */
1719 for (i = 65536; i < 262144; i++)
1720 if (primary_map[i] != & distinguished_secondary_map)
1721 return False;
1722
1723 return True;
1724}
1725
1726/* ---------------------------------------------------------------------
1727 Debugging machinery (turn on to debug). Something of a mess.
1728 ------------------------------------------------------------------ */
1729
1730#if 0
1731/* Print the value tags on the 8 integer registers & flag reg. */
1732
1733static void uint_to_bits ( UInt x, Char* str )
1734{
1735 Int i;
1736 Int w = 0;
1737 /* str must point to a space of at least 36 bytes. */
1738 for (i = 31; i >= 0; i--) {
1739 str[w++] = (x & ( ((UInt)1) << i)) ? '1' : '0';
1740 if (i == 24 || i == 16 || i == 8)
1741 str[w++] = ' ';
1742 }
1743 str[w++] = 0;
njne427a662002-10-02 11:08:25 +00001744 sk_assert(w == 36);
njn25e49d8e72002-09-23 09:36:25 +00001745}
1746
1747/* Caution! Not vthread-safe; looks in VG_(baseBlock), not the thread
1748 state table. */
1749
1750static void vg_show_reg_tags ( void )
1751{
1752 Char buf1[36];
1753 Char buf2[36];
1754 UInt z_eax, z_ebx, z_ecx, z_edx,
1755 z_esi, z_edi, z_ebp, z_esp, z_eflags;
1756
1757 z_eax = VG_(baseBlock)[VGOFF_(sh_eax)];
1758 z_ebx = VG_(baseBlock)[VGOFF_(sh_ebx)];
1759 z_ecx = VG_(baseBlock)[VGOFF_(sh_ecx)];
1760 z_edx = VG_(baseBlock)[VGOFF_(sh_edx)];
1761 z_esi = VG_(baseBlock)[VGOFF_(sh_esi)];
1762 z_edi = VG_(baseBlock)[VGOFF_(sh_edi)];
1763 z_ebp = VG_(baseBlock)[VGOFF_(sh_ebp)];
1764 z_esp = VG_(baseBlock)[VGOFF_(sh_esp)];
1765 z_eflags = VG_(baseBlock)[VGOFF_(sh_eflags)];
1766
1767 uint_to_bits(z_eflags, buf1);
njn9b6d34e2002-10-15 08:48:08 +00001768 VG_(message)(Vg_DebugMsg, "efl %s\n", buf1);
njn25e49d8e72002-09-23 09:36:25 +00001769
1770 uint_to_bits(z_eax, buf1);
1771 uint_to_bits(z_ebx, buf2);
1772 VG_(message)(Vg_DebugMsg, "eax %s ebx %s\n", buf1, buf2);
1773
1774 uint_to_bits(z_ecx, buf1);
1775 uint_to_bits(z_edx, buf2);
1776 VG_(message)(Vg_DebugMsg, "ecx %s edx %s\n", buf1, buf2);
1777
1778 uint_to_bits(z_esi, buf1);
1779 uint_to_bits(z_edi, buf2);
1780 VG_(message)(Vg_DebugMsg, "esi %s edi %s\n", buf1, buf2);
1781
1782 uint_to_bits(z_ebp, buf1);
1783 uint_to_bits(z_esp, buf2);
1784 VG_(message)(Vg_DebugMsg, "ebp %s esp %s\n", buf1, buf2);
1785}
1786
1787
1788/* For debugging only. Scan the address space and touch all allegedly
1789 addressible words. Useful for establishing where Valgrind's idea of
1790 addressibility has diverged from what the kernel believes. */
1791
1792static
1793void zzzmemscan_notify_word ( Addr a, UInt w )
1794{
1795}
1796
1797void zzzmemscan ( void )
1798{
1799 Int n_notifies
1800 = VG_(scan_all_valid_memory)( zzzmemscan_notify_word );
1801 VG_(printf)("zzzmemscan: n_bytes = %d\n", 4 * n_notifies );
1802}
1803#endif
1804
1805
1806
1807
1808#if 0
1809static Int zzz = 0;
1810
1811void show_bb ( Addr eip_next )
1812{
1813 VG_(printf)("[%4d] ", zzz);
1814 vg_show_reg_tags( &VG_(m_shadow );
1815 VG_(translate) ( eip_next, NULL, NULL, NULL );
1816}
1817#endif /* 0 */
1818
1819/*------------------------------------------------------------*/
1820/*--- Syscall wrappers ---*/
1821/*------------------------------------------------------------*/
1822
1823void* SK_(pre_syscall) ( ThreadId tid, UInt syscallno, Bool isBlocking )
1824{
1825 Int sane = SK_(cheap_sanity_check)();
1826 return (void*)sane;
1827}
1828
1829void SK_(post_syscall) ( ThreadId tid, UInt syscallno,
1830 void* pre_result, Int res, Bool isBlocking )
1831{
1832 Int sane_before_call = (Int)pre_result;
1833 Bool sane_after_call = SK_(cheap_sanity_check)();
1834
1835 if ((Int)sane_before_call && (!sane_after_call)) {
1836 VG_(message)(Vg_DebugMsg, "post-syscall: ");
1837 VG_(message)(Vg_DebugMsg,
1838 "probable sanity check failure for syscall number %d\n",
1839 syscallno );
njne427a662002-10-02 11:08:25 +00001840 VG_(skin_panic)("aborting due to the above ... bye!");
njn25e49d8e72002-09-23 09:36:25 +00001841 }
1842}
1843
1844
1845/*------------------------------------------------------------*/
1846/*--- Setup ---*/
1847/*------------------------------------------------------------*/
1848
1849void SK_(written_shadow_regs_values)( UInt* gen_reg_value, UInt* eflags_value )
1850{
1851 *gen_reg_value = VGM_WORD_VALID;
1852 *eflags_value = VGM_EFLAGS_VALID;
1853}
1854
1855Bool SK_(process_cmd_line_option)(Char* arg)
1856{
1857# define STREQ(s1,s2) (0==VG_(strcmp_ws)((s1),(s2)))
1858# define STREQN(nn,s1,s2) (0==VG_(strncmp_ws)((s1),(s2),(nn)))
1859
1860 if (STREQ(arg, "--partial-loads-ok=yes"))
1861 SK_(clo_partial_loads_ok) = True;
1862 else if (STREQ(arg, "--partial-loads-ok=no"))
1863 SK_(clo_partial_loads_ok) = False;
1864
1865 else if (STREQN(15, arg, "--freelist-vol=")) {
1866 SK_(clo_freelist_vol) = (Int)VG_(atoll)(&arg[15]);
1867 if (SK_(clo_freelist_vol) < 0) SK_(clo_freelist_vol) = 0;
1868 }
1869
1870 else if (STREQ(arg, "--leak-check=yes"))
1871 SK_(clo_leak_check) = True;
1872 else if (STREQ(arg, "--leak-check=no"))
1873 SK_(clo_leak_check) = False;
1874
1875 else if (STREQ(arg, "--leak-resolution=low"))
1876 SK_(clo_leak_resolution) = Vg_LowRes;
1877 else if (STREQ(arg, "--leak-resolution=med"))
1878 SK_(clo_leak_resolution) = Vg_MedRes;
1879 else if (STREQ(arg, "--leak-resolution=high"))
1880 SK_(clo_leak_resolution) = Vg_HighRes;
1881
1882 else if (STREQ(arg, "--show-reachable=yes"))
1883 SK_(clo_show_reachable) = True;
1884 else if (STREQ(arg, "--show-reachable=no"))
1885 SK_(clo_show_reachable) = False;
1886
1887 else if (STREQ(arg, "--workaround-gcc296-bugs=yes"))
1888 SK_(clo_workaround_gcc296_bugs) = True;
1889 else if (STREQ(arg, "--workaround-gcc296-bugs=no"))
1890 SK_(clo_workaround_gcc296_bugs) = False;
1891
1892 else if (STREQ(arg, "--check-addrVs=yes"))
1893 SK_(clo_check_addrVs) = True;
1894 else if (STREQ(arg, "--check-addrVs=no"))
1895 SK_(clo_check_addrVs) = False;
1896
1897 else if (STREQ(arg, "--cleanup=yes"))
1898 SK_(clo_cleanup) = True;
1899 else if (STREQ(arg, "--cleanup=no"))
1900 SK_(clo_cleanup) = False;
1901
sewardj8ec2cfc2002-10-13 00:57:26 +00001902 else if (STREQ(arg, "--avoid-strlen-errors=yes"))
1903 SK_(clo_avoid_strlen_errors) = True;
1904 else if (STREQ(arg, "--avoid-strlen-errors=no"))
1905 SK_(clo_avoid_strlen_errors) = False;
1906
njn25e49d8e72002-09-23 09:36:25 +00001907 else
1908 return False;
1909
1910 return True;
1911
1912#undef STREQ
1913#undef STREQN
1914}
1915
1916Char* SK_(usage)(void)
1917{
1918 return
1919" --partial-loads-ok=no|yes too hard to explain here; see manual [yes]\n"
1920" --freelist-vol=<number> volume of freed blocks queue [1000000]\n"
1921" --leak-check=no|yes search for memory leaks at exit? [no]\n"
1922" --leak-resolution=low|med|high\n"
1923" amount of bt merging in leak check [low]\n"
1924" --show-reachable=no|yes show reachable blocks in leak check? [no]\n"
1925" --workaround-gcc296-bugs=no|yes self explanatory [no]\n"
1926" --check-addrVs=no|yes experimental lighterweight checking? [yes]\n"
1927" yes == Valgrind's original behaviour\n"
1928"\n"
sewardj8ec2cfc2002-10-13 00:57:26 +00001929" --cleanup=no|yes improve after instrumentation? [yes]\n"
1930" --avoid-strlen-errors=no|yes suppress errs from inlined strlen [yes]\n";
njn25e49d8e72002-09-23 09:36:25 +00001931}
1932
1933
1934/*------------------------------------------------------------*/
1935/*--- Setup ---*/
1936/*------------------------------------------------------------*/
1937
njnd04b7c62002-10-03 14:05:52 +00001938void SK_(pre_clo_init)(VgDetails* details, VgNeeds* needs, VgTrackEvents* track)
njn25e49d8e72002-09-23 09:36:25 +00001939{
sewardj34eccb12002-10-05 16:49:09 +00001940 details->name = "Memcheck";
njnd04b7c62002-10-03 14:05:52 +00001941 details->version = NULL;
sewardj34eccb12002-10-05 16:49:09 +00001942 details->description = "a.k.a. Valgrind, a memory error detector";
njnd04b7c62002-10-03 14:05:52 +00001943 details->copyright_author =
1944 "Copyright (C) 2000-2002, and GNU GPL'd, by Julian Seward.";
1945 details->bug_reports_to = "jseward@acm.org";
njn25e49d8e72002-09-23 09:36:25 +00001946
njnd04b7c62002-10-03 14:05:52 +00001947 needs->core_errors = True;
1948 needs->skin_errors = True;
1949 needs->libc_freeres = True;
1950 needs->sizeof_shadow_block = 1;
1951 needs->basic_block_discards = False;
1952 needs->shadow_regs = True;
1953 needs->command_line_options = True;
1954 needs->client_requests = True;
1955 needs->extended_UCode = True;
1956 needs->syscall_wrapper = True;
1957 needs->alternative_free = True;
1958 needs->sanity_checks = True;
njn25e49d8e72002-09-23 09:36:25 +00001959
njn25e49d8e72002-09-23 09:36:25 +00001960 track->new_mem_startup = & memcheck_new_mem_startup;
1961 track->new_mem_heap = & memcheck_new_mem_heap;
1962 track->new_mem_stack = & SK_(make_writable);
1963 track->new_mem_stack_aligned = & make_writable_aligned;
1964 track->new_mem_stack_signal = & SK_(make_writable);
1965 track->new_mem_brk = & SK_(make_writable);
1966 track->new_mem_mmap = & memcheck_set_perms;
1967
1968 track->copy_mem_heap = & copy_address_range_state;
1969 track->copy_mem_remap = & copy_address_range_state;
1970 track->change_mem_mprotect = & memcheck_set_perms;
1971
1972 track->ban_mem_heap = & SK_(make_noaccess);
1973 track->ban_mem_stack = & SK_(make_noaccess);
1974
1975 track->die_mem_heap = & SK_(make_noaccess);
1976 track->die_mem_stack = & SK_(make_noaccess);
1977 track->die_mem_stack_aligned = & make_noaccess_aligned;
1978 track->die_mem_stack_signal = & SK_(make_noaccess);
1979 track->die_mem_brk = & SK_(make_noaccess);
1980 track->die_mem_munmap = & SK_(make_noaccess);
1981
1982 track->bad_free = & SK_(record_free_error);
1983 track->mismatched_free = & SK_(record_freemismatch_error);
1984
1985 track->pre_mem_read = & check_is_readable;
1986 track->pre_mem_read_asciiz = & check_is_readable_asciiz;
1987 track->pre_mem_write = & check_is_writable;
1988 track->post_mem_write = & SK_(make_readable);
1989
njnd04b7c62002-10-03 14:05:52 +00001990 VG_(register_compact_helper)((Addr) & SK_(helper_value_check4_fail));
1991 VG_(register_compact_helper)((Addr) & SK_(helper_value_check0_fail));
1992 VG_(register_compact_helper)((Addr) & SK_(helper_value_check2_fail));
1993 VG_(register_compact_helper)((Addr) & SK_(helperc_STOREV4));
1994 VG_(register_compact_helper)((Addr) & SK_(helperc_STOREV1));
1995 VG_(register_compact_helper)((Addr) & SK_(helperc_LOADV4));
1996 VG_(register_compact_helper)((Addr) & SK_(helperc_LOADV1));
njn25e49d8e72002-09-23 09:36:25 +00001997
njnd04b7c62002-10-03 14:05:52 +00001998 /* These two made non-compact because 2-byte transactions are rare. */
1999 VG_(register_noncompact_helper)((Addr) & SK_(helperc_STOREV2));
2000 VG_(register_noncompact_helper)((Addr) & SK_(helperc_LOADV2));
2001 VG_(register_noncompact_helper)((Addr) & SK_(fpu_write_check));
2002 VG_(register_noncompact_helper)((Addr) & SK_(fpu_read_check));
2003 VG_(register_noncompact_helper)((Addr) & SK_(helper_value_check1_fail));
njn25e49d8e72002-09-23 09:36:25 +00002004
2005 VGP_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
2006 VGP_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
njnd04b7c62002-10-03 14:05:52 +00002007
2008 init_shadow_memory();
2009 init_prof_mem();
njn25e49d8e72002-09-23 09:36:25 +00002010}
2011
2012/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00002013/*--- end mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00002014/*--------------------------------------------------------------------*/