blob: 288359a166f766a862db424cb845c173ac540ead [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
njnc9539842002-10-02 13:26:35 +00009 This file is part of MemCheck, a heavyweight Valgrind skin for
10 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
12 Copyright (C) 2000-2002 Julian Seward
13 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
njn25cac76cb2002-09-23 11:21:57 +000033#include "mc_include.h"
34#include "memcheck.h" /* for client requests */
njn25e49d8e72002-09-23 09:36:25 +000035//#include "vg_profile.c"
36
njn27f1a382002-11-08 15:48:16 +000037VG_DETERMINE_INTERFACE_VERSION
38
njn25e49d8e72002-09-23 09:36:25 +000039/* Define to debug the mem audit system. */
40/* #define VG_DEBUG_MEMORY */
41
njn25e49d8e72002-09-23 09:36:25 +000042/* Define to collect detailed performance info. */
43/* #define VG_PROFILE_MEMORY */
44
45#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
46
47/*------------------------------------------------------------*/
48/*--- Command line options ---*/
49/*------------------------------------------------------------*/
50
51Bool SK_(clo_partial_loads_ok) = True;
52Int SK_(clo_freelist_vol) = 1000000;
53Bool SK_(clo_leak_check) = False;
54VgRes SK_(clo_leak_resolution) = Vg_LowRes;
55Bool SK_(clo_show_reachable) = False;
56Bool SK_(clo_workaround_gcc296_bugs) = False;
57Bool SK_(clo_check_addrVs) = True;
58Bool SK_(clo_cleanup) = True;
sewardj8ec2cfc2002-10-13 00:57:26 +000059Bool SK_(clo_avoid_strlen_errors) = True;
60
njn25e49d8e72002-09-23 09:36:25 +000061
62/*------------------------------------------------------------*/
63/*--- Profiling events ---*/
64/*------------------------------------------------------------*/
65
66typedef
67 enum {
68 VgpCheckMem = VgpFini+1,
69 VgpSetMem
70 }
71 VgpSkinCC;
72
73/*------------------------------------------------------------*/
74/*--- Low-level support for memory checking. ---*/
75/*------------------------------------------------------------*/
76
77/* All reads and writes are checked against a memory map, which
78 records the state of all memory in the process. The memory map is
79 organised like this:
80
81 The top 16 bits of an address are used to index into a top-level
82 map table, containing 65536 entries. Each entry is a pointer to a
83 second-level map, which records the accesibililty and validity
84 permissions for the 65536 bytes indexed by the lower 16 bits of the
85 address. Each byte is represented by nine bits, one indicating
86 accessibility, the other eight validity. So each second-level map
87 contains 73728 bytes. This two-level arrangement conveniently
88 divides the 4G address space into 64k lumps, each size 64k bytes.
89
90 All entries in the primary (top-level) map must point to a valid
91 secondary (second-level) map. Since most of the 4G of address
92 space will not be in use -- ie, not mapped at all -- there is a
93 distinguished secondary map, which indicates `not addressible and
94 not valid' writeable for all bytes. Entries in the primary map for
95 which the entire 64k is not in use at all point at this
96 distinguished map.
97
98 [...] lots of stuff deleted due to out of date-ness
99
100 As a final optimisation, the alignment and address checks for
101 4-byte loads and stores are combined in a neat way. The primary
102 map is extended to have 262144 entries (2^18), rather than 2^16.
103 The top 3/4 of these entries are permanently set to the
104 distinguished secondary map. For a 4-byte load/store, the
105 top-level map is indexed not with (addr >> 16) but instead f(addr),
106 where
107
108 f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
109 = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
110 = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
111
112 ie the lowest two bits are placed above the 16 high address bits.
113 If either of these two bits are nonzero, the address is misaligned;
114 this will select a secondary map from the upper 3/4 of the primary
115 map. Because this is always the distinguished secondary map, a
116 (bogus) address check failure will result. The failure handling
117 code can then figure out whether this is a genuine addr check
118 failure or whether it is a possibly-legitimate access at a
119 misaligned address.
120*/
121
122
123/*------------------------------------------------------------*/
124/*--- Crude profiling machinery. ---*/
125/*------------------------------------------------------------*/
126
127#ifdef VG_PROFILE_MEMORY
128
129#define N_PROF_EVENTS 150
130
131static UInt event_ctr[N_PROF_EVENTS];
132
133static void init_prof_mem ( void )
134{
135 Int i;
136 for (i = 0; i < N_PROF_EVENTS; i++)
137 event_ctr[i] = 0;
138}
139
140static void done_prof_mem ( void )
141{
142 Int i;
143 for (i = 0; i < N_PROF_EVENTS; i++) {
144 if ((i % 10) == 0)
145 VG_(printf)("\n");
146 if (event_ctr[i] > 0)
147 VG_(printf)( "prof mem event %2d: %d\n", i, event_ctr[i] );
148 }
149 VG_(printf)("\n");
150}
151
152#define PROF_EVENT(ev) \
njne427a662002-10-02 11:08:25 +0000153 do { sk_assert((ev) >= 0 && (ev) < N_PROF_EVENTS); \
njn25e49d8e72002-09-23 09:36:25 +0000154 event_ctr[ev]++; \
155 } while (False);
156
157#else
158
159static void init_prof_mem ( void ) { }
160static void done_prof_mem ( void ) { }
161
162#define PROF_EVENT(ev) /* */
163
164#endif
165
166/* Event index. If just the name of the fn is given, this means the
167 number of calls to the fn. Otherwise it is the specified event.
168
169 10 alloc_secondary_map
170
171 20 get_abit
172 21 get_vbyte
173 22 set_abit
174 23 set_vbyte
175 24 get_abits4_ALIGNED
176 25 get_vbytes4_ALIGNED
177
178 30 set_address_range_perms
179 31 set_address_range_perms(lower byte loop)
180 32 set_address_range_perms(quadword loop)
181 33 set_address_range_perms(upper byte loop)
182
183 35 make_noaccess
184 36 make_writable
185 37 make_readable
186
187 40 copy_address_range_state
188 41 copy_address_range_state(byte loop)
189 42 check_writable
190 43 check_writable(byte loop)
191 44 check_readable
192 45 check_readable(byte loop)
193 46 check_readable_asciiz
194 47 check_readable_asciiz(byte loop)
195
196 50 make_aligned_word_NOACCESS
197 51 make_aligned_word_WRITABLE
198
199 60 helperc_LOADV4
200 61 helperc_STOREV4
201 62 helperc_LOADV2
202 63 helperc_STOREV2
203 64 helperc_LOADV1
204 65 helperc_STOREV1
205
206 70 rim_rd_V4_SLOWLY
207 71 rim_wr_V4_SLOWLY
208 72 rim_rd_V2_SLOWLY
209 73 rim_wr_V2_SLOWLY
210 74 rim_rd_V1_SLOWLY
211 75 rim_wr_V1_SLOWLY
212
213 80 fpu_read
214 81 fpu_read aligned 4
215 82 fpu_read aligned 8
216 83 fpu_read 2
217 84 fpu_read 10
218
219 85 fpu_write
220 86 fpu_write aligned 4
221 87 fpu_write aligned 8
222 88 fpu_write 2
223 89 fpu_write 10
224
225 90 fpu_read_check_SLOWLY
226 91 fpu_read_check_SLOWLY(byte loop)
227 92 fpu_write_check_SLOWLY
228 93 fpu_write_check_SLOWLY(byte loop)
229
230 100 is_plausible_stack_addr
231 101 handle_esp_assignment
232 102 handle_esp_assignment(-4)
233 103 handle_esp_assignment(+4)
234 104 handle_esp_assignment(-12)
235 105 handle_esp_assignment(-8)
236 106 handle_esp_assignment(+16)
237 107 handle_esp_assignment(+12)
238 108 handle_esp_assignment(0)
239 109 handle_esp_assignment(+8)
240 110 handle_esp_assignment(-16)
241 111 handle_esp_assignment(+20)
242 112 handle_esp_assignment(-20)
243 113 handle_esp_assignment(+24)
244 114 handle_esp_assignment(-24)
245
246 120 vg_handle_esp_assignment_SLOWLY
247 121 vg_handle_esp_assignment_SLOWLY(normal; move down)
248 122 vg_handle_esp_assignment_SLOWLY(normal; move up)
249 123 vg_handle_esp_assignment_SLOWLY(normal)
250 124 vg_handle_esp_assignment_SLOWLY(>= HUGE_DELTA)
251*/
252
253/*------------------------------------------------------------*/
254/*--- Function declarations. ---*/
255/*------------------------------------------------------------*/
256
257static UInt vgmext_rd_V4_SLOWLY ( Addr a );
258static UInt vgmext_rd_V2_SLOWLY ( Addr a );
259static UInt vgmext_rd_V1_SLOWLY ( Addr a );
260static void vgmext_wr_V4_SLOWLY ( Addr a, UInt vbytes );
261static void vgmext_wr_V2_SLOWLY ( Addr a, UInt vbytes );
262static void vgmext_wr_V1_SLOWLY ( Addr a, UInt vbytes );
263static void fpu_read_check_SLOWLY ( Addr addr, Int size );
264static void fpu_write_check_SLOWLY ( Addr addr, Int size );
265
266/*------------------------------------------------------------*/
267/*--- Data defns. ---*/
268/*------------------------------------------------------------*/
269
270typedef
271 struct {
272 UChar abits[8192];
273 UChar vbyte[65536];
274 }
275 SecMap;
276
277static SecMap* primary_map[ /*65536*/ 262144 ];
278static SecMap distinguished_secondary_map;
279
280#define IS_DISTINGUISHED_SM(smap) \
281 ((smap) == &distinguished_secondary_map)
282
283#define ENSURE_MAPPABLE(addr,caller) \
284 do { \
285 if (IS_DISTINGUISHED_SM(primary_map[(addr) >> 16])) { \
286 primary_map[(addr) >> 16] = alloc_secondary_map(caller); \
287 /* VG_(printf)("new 2map because of %p\n", addr); */ \
288 } \
289 } while(0)
290
291#define BITARR_SET(aaa_p,iii_p) \
292 do { \
293 UInt iii = (UInt)iii_p; \
294 UChar* aaa = (UChar*)aaa_p; \
295 aaa[iii >> 3] |= (1 << (iii & 7)); \
296 } while (0)
297
298#define BITARR_CLEAR(aaa_p,iii_p) \
299 do { \
300 UInt iii = (UInt)iii_p; \
301 UChar* aaa = (UChar*)aaa_p; \
302 aaa[iii >> 3] &= ~(1 << (iii & 7)); \
303 } while (0)
304
305#define BITARR_TEST(aaa_p,iii_p) \
306 (0 != (((UChar*)aaa_p)[ ((UInt)iii_p) >> 3 ] \
307 & (1 << (((UInt)iii_p) & 7)))) \
308
309
310#define VGM_BIT_VALID 0
311#define VGM_BIT_INVALID 1
312
313#define VGM_NIBBLE_VALID 0
314#define VGM_NIBBLE_INVALID 0xF
315
316#define VGM_BYTE_VALID 0
317#define VGM_BYTE_INVALID 0xFF
318
319#define VGM_WORD_VALID 0
320#define VGM_WORD_INVALID 0xFFFFFFFF
321
322#define VGM_EFLAGS_VALID 0xFFFFFFFE
323#define VGM_EFLAGS_INVALID 0xFFFFFFFF /* not used */
324
325
326static void init_shadow_memory ( void )
327{
328 Int i;
329
330 for (i = 0; i < 8192; i++) /* Invalid address */
331 distinguished_secondary_map.abits[i] = VGM_BYTE_INVALID;
332 for (i = 0; i < 65536; i++) /* Invalid Value */
333 distinguished_secondary_map.vbyte[i] = VGM_BYTE_INVALID;
334
335 /* These entries gradually get overwritten as the used address
336 space expands. */
337 for (i = 0; i < 65536; i++)
338 primary_map[i] = &distinguished_secondary_map;
339
340 /* These ones should never change; it's a bug in Valgrind if they do. */
341 for (i = 65536; i < 262144; i++)
342 primary_map[i] = &distinguished_secondary_map;
343}
344
345void SK_(post_clo_init) ( void )
346{
347}
348
349void SK_(fini) ( void )
350{
351 VG_(print_malloc_stats)();
352
353 if (VG_(clo_verbosity) == 1) {
354 if (!SK_(clo_leak_check))
355 VG_(message)(Vg_UserMsg,
356 "For a detailed leak analysis, rerun with: --leak-check=yes");
357
358 VG_(message)(Vg_UserMsg,
359 "For counts of detected errors, rerun with: -v");
360 }
361 if (SK_(clo_leak_check)) SK_(detect_memory_leaks)();
362
363 done_prof_mem();
364
365 if (0) {
366 VG_(message)(Vg_DebugMsg,
367 "------ Valgrind's client block stats follow ---------------" );
368 SK_(show_client_block_stats)();
369 }
370}
371
372/*------------------------------------------------------------*/
373/*--- Basic bitmap management, reading and writing. ---*/
374/*------------------------------------------------------------*/
375
376/* Allocate and initialise a secondary map. */
377
378static SecMap* alloc_secondary_map ( __attribute__ ((unused))
379 Char* caller )
380{
381 SecMap* map;
382 UInt i;
383 PROF_EVENT(10);
384
385 /* Mark all bytes as invalid access and invalid value. */
386
387 /* It just happens that a SecMap occupies exactly 18 pages --
388 although this isn't important, so the following assert is
389 spurious. */
njne427a662002-10-02 11:08:25 +0000390 sk_assert(0 == (sizeof(SecMap) % VKI_BYTES_PER_PAGE));
njn25e49d8e72002-09-23 09:36:25 +0000391 map = VG_(get_memory_from_mmap)( sizeof(SecMap), caller );
392
393 for (i = 0; i < 8192; i++)
394 map->abits[i] = VGM_BYTE_INVALID; /* Invalid address */
395 for (i = 0; i < 65536; i++)
396 map->vbyte[i] = VGM_BYTE_INVALID; /* Invalid Value */
397
398 /* VG_(printf)("ALLOC_2MAP(%s)\n", caller ); */
399 return map;
400}
401
402
403/* Basic reading/writing of the bitmaps, for byte-sized accesses. */
404
405static __inline__ UChar get_abit ( Addr a )
406{
407 SecMap* sm = primary_map[a >> 16];
408 UInt sm_off = a & 0xFFFF;
409 PROF_EVENT(20);
410# if 0
411 if (IS_DISTINGUISHED_SM(sm))
412 VG_(message)(Vg_DebugMsg,
413 "accessed distinguished 2ndary (A)map! 0x%x\n", a);
414# endif
415 return BITARR_TEST(sm->abits, sm_off)
416 ? VGM_BIT_INVALID : VGM_BIT_VALID;
417}
418
419static __inline__ UChar get_vbyte ( Addr a )
420{
421 SecMap* sm = primary_map[a >> 16];
422 UInt sm_off = a & 0xFFFF;
423 PROF_EVENT(21);
424# if 0
425 if (IS_DISTINGUISHED_SM(sm))
426 VG_(message)(Vg_DebugMsg,
427 "accessed distinguished 2ndary (V)map! 0x%x\n", a);
428# endif
429 return sm->vbyte[sm_off];
430}
431
432static __inline__ void set_abit ( Addr a, UChar abit )
433{
434 SecMap* sm;
435 UInt sm_off;
436 PROF_EVENT(22);
437 ENSURE_MAPPABLE(a, "set_abit");
438 sm = primary_map[a >> 16];
439 sm_off = a & 0xFFFF;
440 if (abit)
441 BITARR_SET(sm->abits, sm_off);
442 else
443 BITARR_CLEAR(sm->abits, sm_off);
444}
445
446static __inline__ void set_vbyte ( Addr a, UChar vbyte )
447{
448 SecMap* sm;
449 UInt sm_off;
450 PROF_EVENT(23);
451 ENSURE_MAPPABLE(a, "set_vbyte");
452 sm = primary_map[a >> 16];
453 sm_off = a & 0xFFFF;
454 sm->vbyte[sm_off] = vbyte;
455}
456
457
458/* Reading/writing of the bitmaps, for aligned word-sized accesses. */
459
460static __inline__ UChar get_abits4_ALIGNED ( Addr a )
461{
462 SecMap* sm;
463 UInt sm_off;
464 UChar abits8;
465 PROF_EVENT(24);
466# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000467 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000468# endif
469 sm = primary_map[a >> 16];
470 sm_off = a & 0xFFFF;
471 abits8 = sm->abits[sm_off >> 3];
472 abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
473 abits8 &= 0x0F;
474 return abits8;
475}
476
477static UInt __inline__ get_vbytes4_ALIGNED ( Addr a )
478{
479 SecMap* sm = primary_map[a >> 16];
480 UInt sm_off = a & 0xFFFF;
481 PROF_EVENT(25);
482# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000483 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000484# endif
485 return ((UInt*)(sm->vbyte))[sm_off >> 2];
486}
487
488
489/*------------------------------------------------------------*/
490/*--- Setting permissions over address ranges. ---*/
491/*------------------------------------------------------------*/
492
493static void set_address_range_perms ( Addr a, UInt len,
494 UInt example_a_bit,
495 UInt example_v_bit )
496{
497 UChar vbyte, abyte8;
498 UInt vword4, sm_off;
499 SecMap* sm;
500
501 PROF_EVENT(30);
502
503 if (len == 0)
504 return;
505
506 if (len > 100 * 1000 * 1000) {
507 VG_(message)(Vg_UserMsg,
508 "Warning: set address range perms: "
509 "large range %u, a %d, v %d",
510 len, example_a_bit, example_v_bit );
511 }
512
513 VGP_PUSHCC(VgpSetMem);
514
515 /* Requests to change permissions of huge address ranges may
516 indicate bugs in our machinery. 30,000,000 is arbitrary, but so
517 far all legitimate requests have fallen beneath that size. */
518 /* 4 Mar 02: this is just stupid; get rid of it. */
njne427a662002-10-02 11:08:25 +0000519 /* sk_assert(len < 30000000); */
njn25e49d8e72002-09-23 09:36:25 +0000520
521 /* Check the permissions make sense. */
njne427a662002-10-02 11:08:25 +0000522 sk_assert(example_a_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000523 || example_a_bit == VGM_BIT_INVALID);
njne427a662002-10-02 11:08:25 +0000524 sk_assert(example_v_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000525 || example_v_bit == VGM_BIT_INVALID);
526 if (example_a_bit == VGM_BIT_INVALID)
njne427a662002-10-02 11:08:25 +0000527 sk_assert(example_v_bit == VGM_BIT_INVALID);
njn25e49d8e72002-09-23 09:36:25 +0000528
529 /* The validity bits to write. */
530 vbyte = example_v_bit==VGM_BIT_VALID
531 ? VGM_BYTE_VALID : VGM_BYTE_INVALID;
532
533 /* In order that we can charge through the address space at 8
534 bytes/main-loop iteration, make up some perms. */
535 abyte8 = (example_a_bit << 7)
536 | (example_a_bit << 6)
537 | (example_a_bit << 5)
538 | (example_a_bit << 4)
539 | (example_a_bit << 3)
540 | (example_a_bit << 2)
541 | (example_a_bit << 1)
542 | (example_a_bit << 0);
543 vword4 = (vbyte << 24) | (vbyte << 16) | (vbyte << 8) | vbyte;
544
545# ifdef VG_DEBUG_MEMORY
546 /* Do it ... */
547 while (True) {
548 PROF_EVENT(31);
549 if (len == 0) break;
550 set_abit ( a, example_a_bit );
551 set_vbyte ( a, vbyte );
552 a++;
553 len--;
554 }
555
556# else
557 /* Slowly do parts preceding 8-byte alignment. */
558 while (True) {
559 PROF_EVENT(31);
560 if (len == 0) break;
561 if ((a % 8) == 0) break;
562 set_abit ( a, example_a_bit );
563 set_vbyte ( a, vbyte );
564 a++;
565 len--;
566 }
567
568 if (len == 0) {
569 VGP_POPCC(VgpSetMem);
570 return;
571 }
njne427a662002-10-02 11:08:25 +0000572 sk_assert((a % 8) == 0 && len > 0);
njn25e49d8e72002-09-23 09:36:25 +0000573
574 /* Once aligned, go fast. */
575 while (True) {
576 PROF_EVENT(32);
577 if (len < 8) break;
578 ENSURE_MAPPABLE(a, "set_address_range_perms(fast)");
579 sm = primary_map[a >> 16];
580 sm_off = a & 0xFFFF;
581 sm->abits[sm_off >> 3] = abyte8;
582 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = vword4;
583 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = vword4;
584 a += 8;
585 len -= 8;
586 }
587
588 if (len == 0) {
589 VGP_POPCC(VgpSetMem);
590 return;
591 }
njne427a662002-10-02 11:08:25 +0000592 sk_assert((a % 8) == 0 && len > 0 && len < 8);
njn25e49d8e72002-09-23 09:36:25 +0000593
594 /* Finish the upper fragment. */
595 while (True) {
596 PROF_EVENT(33);
597 if (len == 0) break;
598 set_abit ( a, example_a_bit );
599 set_vbyte ( a, vbyte );
600 a++;
601 len--;
602 }
603# endif
604
605 /* Check that zero page and highest page have not been written to
606 -- this could happen with buggy syscall wrappers. Today
607 (2001-04-26) had precisely such a problem with __NR_setitimer. */
njne427a662002-10-02 11:08:25 +0000608 sk_assert(SK_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +0000609 VGP_POPCC(VgpSetMem);
610}
611
612/* Set permissions for address ranges ... */
613
614void SK_(make_noaccess) ( Addr a, UInt len )
615{
616 PROF_EVENT(35);
617 DEBUG("SK_(make_noaccess)(%p, %x)\n", a, len);
618 set_address_range_perms ( a, len, VGM_BIT_INVALID, VGM_BIT_INVALID );
619}
620
621void SK_(make_writable) ( Addr a, UInt len )
622{
623 PROF_EVENT(36);
624 DEBUG("SK_(make_writable)(%p, %x)\n", a, len);
625 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_INVALID );
626}
627
628void SK_(make_readable) ( Addr a, UInt len )
629{
630 PROF_EVENT(37);
631 DEBUG("SK_(make_readable)(%p, 0x%x)\n", a, len);
632 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_VALID );
633}
634
635/* Block-copy permissions (needed for implementing realloc()). */
636
637static void copy_address_range_state ( Addr src, Addr dst, UInt len )
638{
639 UInt i;
640
641 DEBUG("copy_address_range_state\n");
642
643 PROF_EVENT(40);
644 for (i = 0; i < len; i++) {
645 UChar abit = get_abit ( src+i );
646 UChar vbyte = get_vbyte ( src+i );
647 PROF_EVENT(41);
648 set_abit ( dst+i, abit );
649 set_vbyte ( dst+i, vbyte );
650 }
651}
652
653
654/* Check permissions for address range. If inadequate permissions
655 exist, *bad_addr is set to the offending address, so the caller can
656 know what it is. */
657
658Bool SK_(check_writable) ( Addr a, UInt len, Addr* bad_addr )
659{
660 UInt i;
661 UChar abit;
662 PROF_EVENT(42);
663 for (i = 0; i < len; i++) {
664 PROF_EVENT(43);
665 abit = get_abit(a);
666 if (abit == VGM_BIT_INVALID) {
667 if (bad_addr != NULL) *bad_addr = a;
668 return False;
669 }
670 a++;
671 }
672 return True;
673}
674
675Bool SK_(check_readable) ( Addr a, UInt len, Addr* bad_addr )
676{
677 UInt i;
678 UChar abit;
679 UChar vbyte;
680
681 PROF_EVENT(44);
682 DEBUG("SK_(check_readable)\n");
683 for (i = 0; i < len; i++) {
684 abit = get_abit(a);
685 vbyte = get_vbyte(a);
686 PROF_EVENT(45);
687 if (abit != VGM_BIT_VALID || vbyte != VGM_BYTE_VALID) {
688 if (bad_addr != NULL) *bad_addr = a;
689 return False;
690 }
691 a++;
692 }
693 return True;
694}
695
696
697/* Check a zero-terminated ascii string. Tricky -- don't want to
698 examine the actual bytes, to find the end, until we're sure it is
699 safe to do so. */
700
701Bool SK_(check_readable_asciiz) ( Addr a, Addr* bad_addr )
702{
703 UChar abit;
704 UChar vbyte;
705 PROF_EVENT(46);
706 DEBUG("SK_(check_readable_asciiz)\n");
707 while (True) {
708 PROF_EVENT(47);
709 abit = get_abit(a);
710 vbyte = get_vbyte(a);
711 if (abit != VGM_BIT_VALID || vbyte != VGM_BYTE_VALID) {
712 if (bad_addr != NULL) *bad_addr = a;
713 return False;
714 }
715 /* Ok, a is safe to read. */
716 if (* ((UChar*)a) == 0) return True;
717 a++;
718 }
719}
720
721
722/*------------------------------------------------------------*/
723/*--- Memory event handlers ---*/
724/*------------------------------------------------------------*/
725
726/* Setting permissions for aligned words. This supports fast stack
727 operations. */
728
729static void make_noaccess_aligned ( Addr a, UInt len )
730{
731 SecMap* sm;
732 UInt sm_off;
733 UChar mask;
734 Addr a_past_end = a + len;
735
736 VGP_PUSHCC(VgpSetMem);
737
738 PROF_EVENT(50);
739# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000740 sk_assert(IS_ALIGNED4_ADDR(a));
741 sk_assert(IS_ALIGNED4_ADDR(len));
njn25e49d8e72002-09-23 09:36:25 +0000742# endif
743
744 for ( ; a < a_past_end; a += 4) {
745 ENSURE_MAPPABLE(a, "make_noaccess_aligned");
746 sm = primary_map[a >> 16];
747 sm_off = a & 0xFFFF;
748 ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
749 mask = 0x0F;
750 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
751 /* mask now contains 1s where we wish to make address bits
752 invalid (1s). */
753 sm->abits[sm_off >> 3] |= mask;
754 }
755 VGP_POPCC(VgpSetMem);
756}
757
758static void make_writable_aligned ( Addr a, UInt len )
759{
760 SecMap* sm;
761 UInt sm_off;
762 UChar mask;
763 Addr a_past_end = a + len;
764
765 VGP_PUSHCC(VgpSetMem);
766
767 PROF_EVENT(51);
768# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000769 sk_assert(IS_ALIGNED4_ADDR(a));
770 sk_assert(IS_ALIGNED4_ADDR(len));
njn25e49d8e72002-09-23 09:36:25 +0000771# endif
772
773 for ( ; a < a_past_end; a += 4) {
774 ENSURE_MAPPABLE(a, "make_writable_aligned");
775 sm = primary_map[a >> 16];
776 sm_off = a & 0xFFFF;
777 ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
778 mask = 0x0F;
779 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
780 /* mask now contains 1s where we wish to make address bits
781 invalid (0s). */
782 sm->abits[sm_off >> 3] &= ~mask;
783 }
784 VGP_POPCC(VgpSetMem);
785}
786
787
788static
789void check_is_writable ( CorePart part, ThreadState* tst,
njn4a540d02002-10-23 12:54:11 +0000790 Char* s, Addr base, UInt size )
njn25e49d8e72002-09-23 09:36:25 +0000791{
792 Bool ok;
793 Addr bad_addr;
794
795 VGP_PUSHCC(VgpCheckMem);
796
797 /* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
798 base,base+size-1); */
799 ok = SK_(check_writable) ( base, size, &bad_addr );
800 if (!ok) {
801 switch (part) {
802 case Vg_CoreSysCall:
803 SK_(record_param_error) ( tst, bad_addr, /*isWrite =*/True, s );
804 break;
805
806 case Vg_CorePThread:
807 case Vg_CoreSignal:
808 SK_(record_core_mem_error)( tst, /*isWrite=*/True, s );
809 break;
810
811 default:
njne427a662002-10-02 11:08:25 +0000812 VG_(skin_panic)("check_is_writable: Unknown or unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000813 }
814 }
815
816 VGP_POPCC(VgpCheckMem);
817}
818
819static
820void check_is_readable ( CorePart part, ThreadState* tst,
njn4a540d02002-10-23 12:54:11 +0000821 Char* s, Addr base, UInt size )
njn25e49d8e72002-09-23 09:36:25 +0000822{
823 Bool ok;
824 Addr bad_addr;
825
826 VGP_PUSHCC(VgpCheckMem);
827
828 /* VG_(message)(Vg_DebugMsg,"check is readable: %x .. %x",
829 base,base+size-1); */
830 ok = SK_(check_readable) ( base, size, &bad_addr );
831 if (!ok) {
832 switch (part) {
833 case Vg_CoreSysCall:
834 SK_(record_param_error) ( tst, bad_addr, /*isWrite =*/False, s );
835 break;
836
837 case Vg_CorePThread:
838 SK_(record_core_mem_error)( tst, /*isWrite=*/False, s );
839 break;
840
841 /* If we're being asked to jump to a silly address, record an error
842 message before potentially crashing the entire system. */
843 case Vg_CoreTranslate:
844 SK_(record_jump_error)( tst, bad_addr );
845 break;
846
847 default:
njne427a662002-10-02 11:08:25 +0000848 VG_(skin_panic)("check_is_readable: Unknown or unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000849 }
850 }
851 VGP_POPCC(VgpCheckMem);
852}
853
854static
855void check_is_readable_asciiz ( CorePart part, ThreadState* tst,
njn4a540d02002-10-23 12:54:11 +0000856 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +0000857{
858 Bool ok = True;
859 Addr bad_addr;
860 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
861
862 VGP_PUSHCC(VgpCheckMem);
863
njne427a662002-10-02 11:08:25 +0000864 sk_assert(part == Vg_CoreSysCall);
njn25e49d8e72002-09-23 09:36:25 +0000865 ok = SK_(check_readable_asciiz) ( (Addr)str, &bad_addr );
866 if (!ok) {
867 SK_(record_param_error) ( tst, bad_addr, /*is_writable =*/False, s );
868 }
869
870 VGP_POPCC(VgpCheckMem);
871}
872
873
874static
875void memcheck_new_mem_startup( Addr a, UInt len, Bool rr, Bool ww, Bool xx )
876{
njn1f3a9092002-10-04 09:22:30 +0000877 /* Ignore the permissions, just make it readable. Seems to work... */
njn25e49d8e72002-09-23 09:36:25 +0000878 DEBUG("new_mem_startup(%p, %u, rr=%u, ww=%u, xx=%u)\n", a,len,rr,ww,xx);
879 SK_(make_readable)(a, len);
880}
881
882static
883void memcheck_new_mem_heap ( Addr a, UInt len, Bool is_inited )
884{
885 if (is_inited) {
886 SK_(make_readable)(a, len);
887 } else {
888 SK_(make_writable)(a, len);
889 }
890}
891
892static
893void memcheck_set_perms (Addr a, UInt len,
sewardj40f8ebe2002-10-23 21:46:13 +0000894 Bool rr, Bool ww, Bool xx)
njn25e49d8e72002-09-23 09:36:25 +0000895{
sewardj40f8ebe2002-10-23 21:46:13 +0000896 DEBUG("memcheck_set_perms(%p, %u, rr=%u ww=%u, xx=%u)\n",
897 a, len, rr, ww, xx);
njn25e49d8e72002-09-23 09:36:25 +0000898 if (rr) SK_(make_readable)(a, len);
899 else if (ww) SK_(make_writable)(a, len);
900 else SK_(make_noaccess)(a, len);
901}
902
903
904/*------------------------------------------------------------*/
905/*--- Functions called directly from generated code. ---*/
906/*------------------------------------------------------------*/
907
908static __inline__ UInt rotateRight16 ( UInt x )
909{
910 /* Amazingly, gcc turns this into a single rotate insn. */
911 return (x >> 16) | (x << 16);
912}
913
914
915static __inline__ UInt shiftRight16 ( UInt x )
916{
917 return x >> 16;
918}
919
920
921/* Read/write 1/2/4 sized V bytes, and emit an address error if
922 needed. */
923
924/* VG_(helperc_{LD,ST}V{1,2,4}) handle the common case fast.
925 Under all other circumstances, it defers to the relevant _SLOWLY
926 function, which can handle all situations.
927*/
928__attribute__ ((regparm(1)))
929UInt SK_(helperc_LOADV4) ( Addr a )
930{
931# ifdef VG_DEBUG_MEMORY
932 return vgmext_rd_V4_SLOWLY(a);
933# else
934 UInt sec_no = rotateRight16(a) & 0x3FFFF;
935 SecMap* sm = primary_map[sec_no];
936 UInt a_off = (a & 0xFFFF) >> 3;
937 UChar abits = sm->abits[a_off];
938 abits >>= (a & 4);
939 abits &= 15;
940 PROF_EVENT(60);
941 if (abits == VGM_NIBBLE_VALID) {
942 /* Handle common case quickly: a is suitably aligned, is mapped,
943 and is addressible. */
944 UInt v_off = a & 0xFFFF;
945 return ((UInt*)(sm->vbyte))[ v_off >> 2 ];
946 } else {
947 /* Slow but general case. */
948 return vgmext_rd_V4_SLOWLY(a);
949 }
950# endif
951}
952
953__attribute__ ((regparm(2)))
954void SK_(helperc_STOREV4) ( Addr a, UInt vbytes )
955{
956# ifdef VG_DEBUG_MEMORY
957 vgmext_wr_V4_SLOWLY(a, vbytes);
958# else
959 UInt sec_no = rotateRight16(a) & 0x3FFFF;
960 SecMap* sm = primary_map[sec_no];
961 UInt a_off = (a & 0xFFFF) >> 3;
962 UChar abits = sm->abits[a_off];
963 abits >>= (a & 4);
964 abits &= 15;
965 PROF_EVENT(61);
966 if (abits == VGM_NIBBLE_VALID) {
967 /* Handle common case quickly: a is suitably aligned, is mapped,
968 and is addressible. */
969 UInt v_off = a & 0xFFFF;
970 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = vbytes;
971 } else {
972 /* Slow but general case. */
973 vgmext_wr_V4_SLOWLY(a, vbytes);
974 }
975# endif
976}
977
978__attribute__ ((regparm(1)))
979UInt SK_(helperc_LOADV2) ( Addr a )
980{
981# ifdef VG_DEBUG_MEMORY
982 return vgmext_rd_V2_SLOWLY(a);
983# else
984 UInt sec_no = rotateRight16(a) & 0x1FFFF;
985 SecMap* sm = primary_map[sec_no];
986 UInt a_off = (a & 0xFFFF) >> 3;
987 PROF_EVENT(62);
988 if (sm->abits[a_off] == VGM_BYTE_VALID) {
989 /* Handle common case quickly. */
990 UInt v_off = a & 0xFFFF;
991 return 0xFFFF0000
992 |
993 (UInt)( ((UShort*)(sm->vbyte))[ v_off >> 1 ] );
994 } else {
995 /* Slow but general case. */
996 return vgmext_rd_V2_SLOWLY(a);
997 }
998# endif
999}
1000
1001__attribute__ ((regparm(2)))
1002void SK_(helperc_STOREV2) ( Addr a, UInt vbytes )
1003{
1004# ifdef VG_DEBUG_MEMORY
1005 vgmext_wr_V2_SLOWLY(a, vbytes);
1006# else
1007 UInt sec_no = rotateRight16(a) & 0x1FFFF;
1008 SecMap* sm = primary_map[sec_no];
1009 UInt a_off = (a & 0xFFFF) >> 3;
1010 PROF_EVENT(63);
1011 if (sm->abits[a_off] == VGM_BYTE_VALID) {
1012 /* Handle common case quickly. */
1013 UInt v_off = a & 0xFFFF;
1014 ((UShort*)(sm->vbyte))[ v_off >> 1 ] = vbytes & 0x0000FFFF;
1015 } else {
1016 /* Slow but general case. */
1017 vgmext_wr_V2_SLOWLY(a, vbytes);
1018 }
1019# endif
1020}
1021
1022__attribute__ ((regparm(1)))
1023UInt SK_(helperc_LOADV1) ( Addr a )
1024{
1025# ifdef VG_DEBUG_MEMORY
1026 return vgmext_rd_V1_SLOWLY(a);
1027# else
1028 UInt sec_no = shiftRight16(a);
1029 SecMap* sm = primary_map[sec_no];
1030 UInt a_off = (a & 0xFFFF) >> 3;
1031 PROF_EVENT(64);
1032 if (sm->abits[a_off] == VGM_BYTE_VALID) {
1033 /* Handle common case quickly. */
1034 UInt v_off = a & 0xFFFF;
1035 return 0xFFFFFF00
1036 |
1037 (UInt)( ((UChar*)(sm->vbyte))[ v_off ] );
1038 } else {
1039 /* Slow but general case. */
1040 return vgmext_rd_V1_SLOWLY(a);
1041 }
1042# endif
1043}
1044
1045__attribute__ ((regparm(2)))
1046void SK_(helperc_STOREV1) ( Addr a, UInt vbytes )
1047{
1048# ifdef VG_DEBUG_MEMORY
1049 vgmext_wr_V1_SLOWLY(a, vbytes);
1050# else
1051 UInt sec_no = shiftRight16(a);
1052 SecMap* sm = primary_map[sec_no];
1053 UInt a_off = (a & 0xFFFF) >> 3;
1054 PROF_EVENT(65);
1055 if (sm->abits[a_off] == VGM_BYTE_VALID) {
1056 /* Handle common case quickly. */
1057 UInt v_off = a & 0xFFFF;
1058 ((UChar*)(sm->vbyte))[ v_off ] = vbytes & 0x000000FF;
1059 } else {
1060 /* Slow but general case. */
1061 vgmext_wr_V1_SLOWLY(a, vbytes);
1062 }
1063# endif
1064}
1065
1066
1067/*------------------------------------------------------------*/
1068/*--- Fallback functions to handle cases that the above ---*/
1069/*--- VG_(helperc_{LD,ST}V{1,2,4}) can't manage. ---*/
1070/*------------------------------------------------------------*/
1071
1072static UInt vgmext_rd_V4_SLOWLY ( Addr a )
1073{
1074 Bool a0ok, a1ok, a2ok, a3ok;
1075 UInt vb0, vb1, vb2, vb3;
1076
1077 PROF_EVENT(70);
1078
1079 /* First establish independently the addressibility of the 4 bytes
1080 involved. */
1081 a0ok = get_abit(a+0) == VGM_BIT_VALID;
1082 a1ok = get_abit(a+1) == VGM_BIT_VALID;
1083 a2ok = get_abit(a+2) == VGM_BIT_VALID;
1084 a3ok = get_abit(a+3) == VGM_BIT_VALID;
1085
1086 /* Also get the validity bytes for the address. */
1087 vb0 = (UInt)get_vbyte(a+0);
1088 vb1 = (UInt)get_vbyte(a+1);
1089 vb2 = (UInt)get_vbyte(a+2);
1090 vb3 = (UInt)get_vbyte(a+3);
1091
1092 /* Now distinguish 3 cases */
1093
1094 /* Case 1: the address is completely valid, so:
1095 - no addressing error
1096 - return V bytes as read from memory
1097 */
1098 if (a0ok && a1ok && a2ok && a3ok) {
1099 UInt vw = VGM_WORD_INVALID;
1100 vw <<= 8; vw |= vb3;
1101 vw <<= 8; vw |= vb2;
1102 vw <<= 8; vw |= vb1;
1103 vw <<= 8; vw |= vb0;
1104 return vw;
1105 }
1106
1107 /* Case 2: the address is completely invalid.
1108 - emit addressing error
1109 - return V word indicating validity.
1110 This sounds strange, but if we make loads from invalid addresses
1111 give invalid data, we also risk producing a number of confusing
1112 undefined-value errors later, which confuses the fact that the
1113 error arose in the first place from an invalid address.
1114 */
1115 /* VG_(printf)("%p (%d %d %d %d)\n", a, a0ok, a1ok, a2ok, a3ok); */
1116 if (!SK_(clo_partial_loads_ok)
1117 || ((a & 3) != 0)
1118 || (!a0ok && !a1ok && !a2ok && !a3ok)) {
1119 SK_(record_address_error)( a, 4, False );
1120 return (VGM_BYTE_VALID << 24) | (VGM_BYTE_VALID << 16)
1121 | (VGM_BYTE_VALID << 8) | VGM_BYTE_VALID;
1122 }
1123
1124 /* Case 3: the address is partially valid.
1125 - no addressing error
1126 - returned V word is invalid where the address is invalid,
1127 and contains V bytes from memory otherwise.
1128 Case 3 is only allowed if SK_(clo_partial_loads_ok) is True
1129 (which is the default), and the address is 4-aligned.
1130 If not, Case 2 will have applied.
1131 */
njne427a662002-10-02 11:08:25 +00001132 sk_assert(SK_(clo_partial_loads_ok));
njn25e49d8e72002-09-23 09:36:25 +00001133 {
1134 UInt vw = VGM_WORD_INVALID;
1135 vw <<= 8; vw |= (a3ok ? vb3 : VGM_BYTE_INVALID);
1136 vw <<= 8; vw |= (a2ok ? vb2 : VGM_BYTE_INVALID);
1137 vw <<= 8; vw |= (a1ok ? vb1 : VGM_BYTE_INVALID);
1138 vw <<= 8; vw |= (a0ok ? vb0 : VGM_BYTE_INVALID);
1139 return vw;
1140 }
1141}
1142
1143static void vgmext_wr_V4_SLOWLY ( Addr a, UInt vbytes )
1144{
1145 /* Check the address for validity. */
1146 Bool aerr = False;
1147 PROF_EVENT(71);
1148
1149 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1150 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1151 if (get_abit(a+2) != VGM_BIT_VALID) aerr = True;
1152 if (get_abit(a+3) != VGM_BIT_VALID) aerr = True;
1153
1154 /* Store the V bytes, remembering to do it little-endian-ly. */
1155 set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
1156 set_vbyte( a+1, vbytes & 0x000000FF ); vbytes >>= 8;
1157 set_vbyte( a+2, vbytes & 0x000000FF ); vbytes >>= 8;
1158 set_vbyte( a+3, vbytes & 0x000000FF );
1159
1160 /* If an address error has happened, report it. */
1161 if (aerr)
1162 SK_(record_address_error)( a, 4, True );
1163}
1164
1165static UInt vgmext_rd_V2_SLOWLY ( Addr a )
1166{
1167 /* Check the address for validity. */
1168 UInt vw = VGM_WORD_INVALID;
1169 Bool aerr = False;
1170 PROF_EVENT(72);
1171
1172 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1173 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1174
1175 /* Fetch the V bytes, remembering to do it little-endian-ly. */
1176 vw <<= 8; vw |= (UInt)get_vbyte(a+1);
1177 vw <<= 8; vw |= (UInt)get_vbyte(a+0);
1178
1179 /* If an address error has happened, report it. */
1180 if (aerr) {
1181 SK_(record_address_error)( a, 2, False );
1182 vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
1183 | (VGM_BYTE_VALID << 8) | (VGM_BYTE_VALID);
1184 }
1185 return vw;
1186}
1187
1188static void vgmext_wr_V2_SLOWLY ( Addr a, UInt vbytes )
1189{
1190 /* Check the address for validity. */
1191 Bool aerr = False;
1192 PROF_EVENT(73);
1193
1194 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1195 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1196
1197 /* Store the V bytes, remembering to do it little-endian-ly. */
1198 set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
1199 set_vbyte( a+1, vbytes & 0x000000FF );
1200
1201 /* If an address error has happened, report it. */
1202 if (aerr)
1203 SK_(record_address_error)( a, 2, True );
1204}
1205
1206static UInt vgmext_rd_V1_SLOWLY ( Addr a )
1207{
1208 /* Check the address for validity. */
1209 UInt vw = VGM_WORD_INVALID;
1210 Bool aerr = False;
1211 PROF_EVENT(74);
1212
1213 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1214
1215 /* Fetch the V byte. */
1216 vw <<= 8; vw |= (UInt)get_vbyte(a+0);
1217
1218 /* If an address error has happened, report it. */
1219 if (aerr) {
1220 SK_(record_address_error)( a, 1, False );
1221 vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
1222 | (VGM_BYTE_INVALID << 8) | (VGM_BYTE_VALID);
1223 }
1224 return vw;
1225}
1226
1227static void vgmext_wr_V1_SLOWLY ( Addr a, UInt vbytes )
1228{
1229 /* Check the address for validity. */
1230 Bool aerr = False;
1231 PROF_EVENT(75);
1232 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1233
1234 /* Store the V bytes, remembering to do it little-endian-ly. */
1235 set_vbyte( a+0, vbytes & 0x000000FF );
1236
1237 /* If an address error has happened, report it. */
1238 if (aerr)
1239 SK_(record_address_error)( a, 1, True );
1240}
1241
1242
1243/* ---------------------------------------------------------------------
1244 Called from generated code, or from the assembly helpers.
1245 Handlers for value check failures.
1246 ------------------------------------------------------------------ */
1247
1248void SK_(helperc_value_check0_fail) ( void )
1249{
1250 SK_(record_value_error) ( 0 );
1251}
1252
1253void SK_(helperc_value_check1_fail) ( void )
1254{
1255 SK_(record_value_error) ( 1 );
1256}
1257
1258void SK_(helperc_value_check2_fail) ( void )
1259{
1260 SK_(record_value_error) ( 2 );
1261}
1262
1263void SK_(helperc_value_check4_fail) ( void )
1264{
1265 SK_(record_value_error) ( 4 );
1266}
1267
1268
1269/* ---------------------------------------------------------------------
1270 FPU load and store checks, called from generated code.
1271 ------------------------------------------------------------------ */
1272
1273__attribute__ ((regparm(2)))
1274void SK_(fpu_read_check) ( Addr addr, Int size )
1275{
1276 /* Ensure the read area is both addressible and valid (ie,
1277 readable). If there's an address error, don't report a value
1278 error too; but if there isn't an address error, check for a
1279 value error.
1280
1281 Try to be reasonably fast on the common case; wimp out and defer
1282 to fpu_read_check_SLOWLY for everything else. */
1283
1284 SecMap* sm;
1285 UInt sm_off, v_off, a_off;
1286 Addr addr4;
1287
1288 PROF_EVENT(80);
1289
1290# ifdef VG_DEBUG_MEMORY
1291 fpu_read_check_SLOWLY ( addr, size );
1292# else
1293
1294 if (size == 4) {
1295 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
1296 PROF_EVENT(81);
1297 /* Properly aligned. */
1298 sm = primary_map[addr >> 16];
1299 sm_off = addr & 0xFFFF;
1300 a_off = sm_off >> 3;
1301 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
1302 /* Properly aligned and addressible. */
1303 v_off = addr & 0xFFFF;
1304 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1305 goto slow4;
1306 /* Properly aligned, addressible and with valid data. */
1307 return;
1308 slow4:
1309 fpu_read_check_SLOWLY ( addr, 4 );
1310 return;
1311 }
1312
1313 if (size == 8) {
1314 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
1315 PROF_EVENT(82);
1316 /* Properly aligned. Do it in two halves. */
1317 addr4 = addr + 4;
1318 /* First half. */
1319 sm = primary_map[addr >> 16];
1320 sm_off = addr & 0xFFFF;
1321 a_off = sm_off >> 3;
1322 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1323 /* First half properly aligned and addressible. */
1324 v_off = addr & 0xFFFF;
1325 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1326 goto slow8;
1327 /* Second half. */
1328 sm = primary_map[addr4 >> 16];
1329 sm_off = addr4 & 0xFFFF;
1330 a_off = sm_off >> 3;
1331 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1332 /* Second half properly aligned and addressible. */
1333 v_off = addr4 & 0xFFFF;
1334 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1335 goto slow8;
1336 /* Both halves properly aligned, addressible and with valid
1337 data. */
1338 return;
1339 slow8:
1340 fpu_read_check_SLOWLY ( addr, 8 );
1341 return;
1342 }
1343
1344 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
1345 cases go quickly. */
1346 if (size == 2) {
1347 PROF_EVENT(83);
1348 fpu_read_check_SLOWLY ( addr, 2 );
1349 return;
1350 }
1351
1352 if (size == 10) {
1353 PROF_EVENT(84);
1354 fpu_read_check_SLOWLY ( addr, 10 );
1355 return;
1356 }
1357
1358 if (size == 28 || size == 108) {
1359 PROF_EVENT(84); /* XXX assign correct event number */
sewardjb3243352002-09-27 01:11:36 +00001360 fpu_read_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001361 return;
1362 }
1363
1364 VG_(printf)("size is %d\n", size);
njne427a662002-10-02 11:08:25 +00001365 VG_(skin_panic)("vgmext_fpu_read_check: unhandled size");
njn25e49d8e72002-09-23 09:36:25 +00001366# endif
1367}
1368
1369
1370__attribute__ ((regparm(2)))
1371void SK_(fpu_write_check) ( Addr addr, Int size )
1372{
1373 /* Ensure the written area is addressible, and moan if otherwise.
1374 If it is addressible, make it valid, otherwise invalid.
1375 */
1376
1377 SecMap* sm;
1378 UInt sm_off, v_off, a_off;
1379 Addr addr4;
1380
1381 PROF_EVENT(85);
1382
1383# ifdef VG_DEBUG_MEMORY
1384 fpu_write_check_SLOWLY ( addr, size );
1385# else
1386
1387 if (size == 4) {
1388 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
1389 PROF_EVENT(86);
1390 /* Properly aligned. */
1391 sm = primary_map[addr >> 16];
1392 sm_off = addr & 0xFFFF;
1393 a_off = sm_off >> 3;
1394 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
1395 /* Properly aligned and addressible. Make valid. */
1396 v_off = addr & 0xFFFF;
1397 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1398 return;
1399 slow4:
1400 fpu_write_check_SLOWLY ( addr, 4 );
1401 return;
1402 }
1403
1404 if (size == 8) {
1405 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
1406 PROF_EVENT(87);
1407 /* Properly aligned. Do it in two halves. */
1408 addr4 = addr + 4;
1409 /* First half. */
1410 sm = primary_map[addr >> 16];
1411 sm_off = addr & 0xFFFF;
1412 a_off = sm_off >> 3;
1413 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1414 /* First half properly aligned and addressible. Make valid. */
1415 v_off = addr & 0xFFFF;
1416 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1417 /* Second half. */
1418 sm = primary_map[addr4 >> 16];
1419 sm_off = addr4 & 0xFFFF;
1420 a_off = sm_off >> 3;
1421 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1422 /* Second half properly aligned and addressible. */
1423 v_off = addr4 & 0xFFFF;
1424 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1425 /* Properly aligned, addressible and with valid data. */
1426 return;
1427 slow8:
1428 fpu_write_check_SLOWLY ( addr, 8 );
1429 return;
1430 }
1431
1432 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
1433 cases go quickly. */
1434 if (size == 2) {
1435 PROF_EVENT(88);
1436 fpu_write_check_SLOWLY ( addr, 2 );
1437 return;
1438 }
1439
1440 if (size == 10) {
1441 PROF_EVENT(89);
1442 fpu_write_check_SLOWLY ( addr, 10 );
1443 return;
1444 }
1445
1446 if (size == 28 || size == 108) {
1447 PROF_EVENT(89); /* XXX assign correct event number */
sewardjb3243352002-09-27 01:11:36 +00001448 fpu_write_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001449 return;
1450 }
1451
1452 VG_(printf)("size is %d\n", size);
njne427a662002-10-02 11:08:25 +00001453 VG_(skin_panic)("vgmext_fpu_write_check: unhandled size");
njn25e49d8e72002-09-23 09:36:25 +00001454# endif
1455}
1456
1457
1458/* ---------------------------------------------------------------------
1459 Slow, general cases for FPU load and store checks.
1460 ------------------------------------------------------------------ */
1461
1462/* Generic version. Test for both addr and value errors, but if
1463 there's an addr error, don't report a value error even if it
1464 exists. */
1465
1466void fpu_read_check_SLOWLY ( Addr addr, Int size )
1467{
1468 Int i;
1469 Bool aerr = False;
1470 Bool verr = False;
1471 PROF_EVENT(90);
1472 for (i = 0; i < size; i++) {
1473 PROF_EVENT(91);
1474 if (get_abit(addr+i) != VGM_BIT_VALID)
1475 aerr = True;
1476 if (get_vbyte(addr+i) != VGM_BYTE_VALID)
1477 verr = True;
1478 }
1479
1480 if (aerr) {
1481 SK_(record_address_error)( addr, size, False );
1482 } else {
1483 if (verr)
1484 SK_(record_value_error)( size );
1485 }
1486}
1487
1488
1489/* Generic version. Test for addr errors. Valid addresses are
1490 given valid values, and invalid addresses invalid values. */
1491
1492void fpu_write_check_SLOWLY ( Addr addr, Int size )
1493{
1494 Int i;
1495 Addr a_here;
1496 Bool a_ok;
1497 Bool aerr = False;
1498 PROF_EVENT(92);
1499 for (i = 0; i < size; i++) {
1500 PROF_EVENT(93);
1501 a_here = addr+i;
1502 a_ok = get_abit(a_here) == VGM_BIT_VALID;
1503 if (a_ok) {
1504 set_vbyte(a_here, VGM_BYTE_VALID);
1505 } else {
1506 set_vbyte(a_here, VGM_BYTE_INVALID);
1507 aerr = True;
1508 }
1509 }
1510 if (aerr) {
1511 SK_(record_address_error)( addr, size, True );
1512 }
1513}
1514
1515/*------------------------------------------------------------*/
1516/*--- Shadow chunks info ---*/
1517/*------------------------------------------------------------*/
1518
1519static __inline__
1520void set_where( ShadowChunk* sc, ExeContext* ec )
1521{
1522 sc->skin_extra[0] = (UInt)ec;
1523}
1524
1525static __inline__
1526ExeContext *get_where( ShadowChunk* sc )
1527{
1528 return (ExeContext*)sc->skin_extra[0];
1529}
1530
1531void SK_(complete_shadow_chunk) ( ShadowChunk* sc, ThreadState* tst )
1532{
1533 set_where( sc, VG_(get_ExeContext) ( tst ) );
1534}
1535
1536/*------------------------------------------------------------*/
1537/*--- Postponing free()ing ---*/
1538/*------------------------------------------------------------*/
1539
1540/* Holds blocks after freeing. */
1541static ShadowChunk* vg_freed_list_start = NULL;
1542static ShadowChunk* vg_freed_list_end = NULL;
1543static Int vg_freed_list_volume = 0;
1544
1545static __attribute__ ((unused))
1546 Int count_freelist ( void )
1547{
1548 ShadowChunk* sc;
1549 Int n = 0;
1550 for (sc = vg_freed_list_start; sc != NULL; sc = sc->next)
1551 n++;
1552 return n;
1553}
1554
1555static __attribute__ ((unused))
1556 void freelist_sanity ( void )
1557{
1558 ShadowChunk* sc;
1559 Int n = 0;
1560 /* VG_(printf)("freelist sanity\n"); */
1561 for (sc = vg_freed_list_start; sc != NULL; sc = sc->next)
1562 n += sc->size;
njne427a662002-10-02 11:08:25 +00001563 sk_assert(n == vg_freed_list_volume);
njn25e49d8e72002-09-23 09:36:25 +00001564}
1565
1566/* Put a shadow chunk on the freed blocks queue, possibly freeing up
1567 some of the oldest blocks in the queue at the same time. */
1568static void add_to_freed_queue ( ShadowChunk* sc )
1569{
1570 ShadowChunk* sc1;
1571
1572 /* Put it at the end of the freed list */
1573 if (vg_freed_list_end == NULL) {
njne427a662002-10-02 11:08:25 +00001574 sk_assert(vg_freed_list_start == NULL);
njn25e49d8e72002-09-23 09:36:25 +00001575 vg_freed_list_end = vg_freed_list_start = sc;
1576 vg_freed_list_volume = sc->size;
1577 } else {
njne427a662002-10-02 11:08:25 +00001578 sk_assert(vg_freed_list_end->next == NULL);
njn25e49d8e72002-09-23 09:36:25 +00001579 vg_freed_list_end->next = sc;
1580 vg_freed_list_end = sc;
1581 vg_freed_list_volume += sc->size;
1582 }
1583 sc->next = NULL;
1584
1585 /* Release enough of the oldest blocks to bring the free queue
1586 volume below vg_clo_freelist_vol. */
1587
1588 while (vg_freed_list_volume > SK_(clo_freelist_vol)) {
1589 /* freelist_sanity(); */
njne427a662002-10-02 11:08:25 +00001590 sk_assert(vg_freed_list_start != NULL);
1591 sk_assert(vg_freed_list_end != NULL);
njn25e49d8e72002-09-23 09:36:25 +00001592
1593 sc1 = vg_freed_list_start;
1594 vg_freed_list_volume -= sc1->size;
1595 /* VG_(printf)("volume now %d\n", vg_freed_list_volume); */
njne427a662002-10-02 11:08:25 +00001596 sk_assert(vg_freed_list_volume >= 0);
njn25e49d8e72002-09-23 09:36:25 +00001597
1598 if (vg_freed_list_start == vg_freed_list_end) {
1599 vg_freed_list_start = vg_freed_list_end = NULL;
1600 } else {
1601 vg_freed_list_start = sc1->next;
1602 }
1603 sc1->next = NULL; /* just paranoia */
njn4ba5a792002-09-30 10:23:54 +00001604 VG_(free_ShadowChunk) ( sc1 );
njn25e49d8e72002-09-23 09:36:25 +00001605 }
1606}
1607
1608/* Return the first shadow chunk satisfying the predicate p. */
1609ShadowChunk* SK_(any_matching_freed_ShadowChunks)
1610 ( Bool (*p) ( ShadowChunk* ))
1611{
1612 ShadowChunk* sc;
1613
1614 /* No point looking through freed blocks if we're not keeping
1615 them around for a while... */
1616 for (sc = vg_freed_list_start; sc != NULL; sc = sc->next)
1617 if (p(sc))
1618 return sc;
1619
1620 return NULL;
1621}
1622
1623void SK_(alt_free) ( ShadowChunk* sc, ThreadState* tst )
1624{
1625 /* Record where freed */
1626 set_where( sc, VG_(get_ExeContext) ( tst ) );
1627
1628 /* Put it out of harm's way for a while. */
1629 add_to_freed_queue ( sc );
1630}
1631
njn25e49d8e72002-09-23 09:36:25 +00001632
1633/*------------------------------------------------------------*/
1634/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1635/*------------------------------------------------------------*/
1636
sewardja4495682002-10-21 07:29:59 +00001637/* For the memory leak detector, say whether an entire 64k chunk of
1638 address space is possibly in use, or not. If in doubt return
1639 True.
njn25e49d8e72002-09-23 09:36:25 +00001640*/
sewardja4495682002-10-21 07:29:59 +00001641static
1642Bool mc_is_valid_64k_chunk ( UInt chunk_number )
njn25e49d8e72002-09-23 09:36:25 +00001643{
sewardja4495682002-10-21 07:29:59 +00001644 sk_assert(chunk_number >= 0 && chunk_number < 65536);
1645 if (IS_DISTINGUISHED_SM(primary_map[chunk_number])) {
1646 /* Definitely not in use. */
1647 return False;
1648 } else {
1649 return True;
njn25e49d8e72002-09-23 09:36:25 +00001650 }
1651}
1652
1653
sewardja4495682002-10-21 07:29:59 +00001654/* For the memory leak detector, say whether or not a given word
1655 address is to be regarded as valid. */
1656static
1657Bool mc_is_valid_address ( Addr a )
1658{
1659 UInt vbytes;
1660 UChar abits;
1661 sk_assert(IS_ALIGNED4_ADDR(a));
1662 abits = get_abits4_ALIGNED(a);
1663 vbytes = get_vbytes4_ALIGNED(a);
1664 if (abits == VGM_NIBBLE_VALID && vbytes == VGM_WORD_VALID) {
1665 return True;
1666 } else {
1667 return False;
1668 }
1669}
1670
1671
1672/* Leak detector for this skin. We don't actually do anything, merely
1673 run the generic leak detector with suitable parameters for this
1674 skin. */
njn25e49d8e72002-09-23 09:36:25 +00001675void SK_(detect_memory_leaks) ( void )
1676{
sewardja4495682002-10-21 07:29:59 +00001677 VG_(generic_detect_memory_leaks) (
1678 mc_is_valid_64k_chunk,
1679 mc_is_valid_address,
1680 get_where,
1681 SK_(clo_leak_resolution),
1682 SK_(clo_show_reachable)
1683 );
njn25e49d8e72002-09-23 09:36:25 +00001684}
1685
1686
1687/* ---------------------------------------------------------------------
1688 Sanity check machinery (permanently engaged).
1689 ------------------------------------------------------------------ */
1690
1691/* Check that nobody has spuriously claimed that the first or last 16
1692 pages (64 KB) of address space have become accessible. Failure of
1693 the following do not per se indicate an internal consistency
1694 problem, but they are so likely to that we really want to know
1695 about it if so. */
1696
1697Bool SK_(cheap_sanity_check) ( void )
1698{
1699 if (IS_DISTINGUISHED_SM(primary_map[0]) &&
1700 IS_DISTINGUISHED_SM(primary_map[65535]))
1701 return True;
1702 else
1703 return False;
1704}
1705
1706Bool SK_(expensive_sanity_check) ( void )
1707{
1708 Int i;
1709
1710 /* Make sure nobody changed the distinguished secondary. */
1711 for (i = 0; i < 8192; i++)
1712 if (distinguished_secondary_map.abits[i] != VGM_BYTE_INVALID)
1713 return False;
1714
1715 for (i = 0; i < 65536; i++)
1716 if (distinguished_secondary_map.vbyte[i] != VGM_BYTE_INVALID)
1717 return False;
1718
1719 /* Make sure that the upper 3/4 of the primary map hasn't
1720 been messed with. */
1721 for (i = 65536; i < 262144; i++)
1722 if (primary_map[i] != & distinguished_secondary_map)
1723 return False;
1724
1725 return True;
1726}
1727
1728/* ---------------------------------------------------------------------
1729 Debugging machinery (turn on to debug). Something of a mess.
1730 ------------------------------------------------------------------ */
1731
1732#if 0
1733/* Print the value tags on the 8 integer registers & flag reg. */
1734
1735static void uint_to_bits ( UInt x, Char* str )
1736{
1737 Int i;
1738 Int w = 0;
1739 /* str must point to a space of at least 36 bytes. */
1740 for (i = 31; i >= 0; i--) {
1741 str[w++] = (x & ( ((UInt)1) << i)) ? '1' : '0';
1742 if (i == 24 || i == 16 || i == 8)
1743 str[w++] = ' ';
1744 }
1745 str[w++] = 0;
njne427a662002-10-02 11:08:25 +00001746 sk_assert(w == 36);
njn25e49d8e72002-09-23 09:36:25 +00001747}
1748
1749/* Caution! Not vthread-safe; looks in VG_(baseBlock), not the thread
1750 state table. */
1751
1752static void vg_show_reg_tags ( void )
1753{
1754 Char buf1[36];
1755 Char buf2[36];
1756 UInt z_eax, z_ebx, z_ecx, z_edx,
1757 z_esi, z_edi, z_ebp, z_esp, z_eflags;
1758
1759 z_eax = VG_(baseBlock)[VGOFF_(sh_eax)];
1760 z_ebx = VG_(baseBlock)[VGOFF_(sh_ebx)];
1761 z_ecx = VG_(baseBlock)[VGOFF_(sh_ecx)];
1762 z_edx = VG_(baseBlock)[VGOFF_(sh_edx)];
1763 z_esi = VG_(baseBlock)[VGOFF_(sh_esi)];
1764 z_edi = VG_(baseBlock)[VGOFF_(sh_edi)];
1765 z_ebp = VG_(baseBlock)[VGOFF_(sh_ebp)];
1766 z_esp = VG_(baseBlock)[VGOFF_(sh_esp)];
1767 z_eflags = VG_(baseBlock)[VGOFF_(sh_eflags)];
1768
1769 uint_to_bits(z_eflags, buf1);
njn9b6d34e2002-10-15 08:48:08 +00001770 VG_(message)(Vg_DebugMsg, "efl %s\n", buf1);
njn25e49d8e72002-09-23 09:36:25 +00001771
1772 uint_to_bits(z_eax, buf1);
1773 uint_to_bits(z_ebx, buf2);
1774 VG_(message)(Vg_DebugMsg, "eax %s ebx %s\n", buf1, buf2);
1775
1776 uint_to_bits(z_ecx, buf1);
1777 uint_to_bits(z_edx, buf2);
1778 VG_(message)(Vg_DebugMsg, "ecx %s edx %s\n", buf1, buf2);
1779
1780 uint_to_bits(z_esi, buf1);
1781 uint_to_bits(z_edi, buf2);
1782 VG_(message)(Vg_DebugMsg, "esi %s edi %s\n", buf1, buf2);
1783
1784 uint_to_bits(z_ebp, buf1);
1785 uint_to_bits(z_esp, buf2);
1786 VG_(message)(Vg_DebugMsg, "ebp %s esp %s\n", buf1, buf2);
1787}
1788
1789
1790/* For debugging only. Scan the address space and touch all allegedly
1791 addressible words. Useful for establishing where Valgrind's idea of
1792 addressibility has diverged from what the kernel believes. */
1793
1794static
1795void zzzmemscan_notify_word ( Addr a, UInt w )
1796{
1797}
1798
1799void zzzmemscan ( void )
1800{
1801 Int n_notifies
1802 = VG_(scan_all_valid_memory)( zzzmemscan_notify_word );
1803 VG_(printf)("zzzmemscan: n_bytes = %d\n", 4 * n_notifies );
1804}
1805#endif
1806
1807
1808
1809
1810#if 0
1811static Int zzz = 0;
1812
1813void show_bb ( Addr eip_next )
1814{
1815 VG_(printf)("[%4d] ", zzz);
1816 vg_show_reg_tags( &VG_(m_shadow );
1817 VG_(translate) ( eip_next, NULL, NULL, NULL );
1818}
1819#endif /* 0 */
1820
1821/*------------------------------------------------------------*/
1822/*--- Syscall wrappers ---*/
1823/*------------------------------------------------------------*/
1824
1825void* SK_(pre_syscall) ( ThreadId tid, UInt syscallno, Bool isBlocking )
1826{
1827 Int sane = SK_(cheap_sanity_check)();
1828 return (void*)sane;
1829}
1830
1831void SK_(post_syscall) ( ThreadId tid, UInt syscallno,
1832 void* pre_result, Int res, Bool isBlocking )
1833{
1834 Int sane_before_call = (Int)pre_result;
1835 Bool sane_after_call = SK_(cheap_sanity_check)();
1836
1837 if ((Int)sane_before_call && (!sane_after_call)) {
1838 VG_(message)(Vg_DebugMsg, "post-syscall: ");
1839 VG_(message)(Vg_DebugMsg,
1840 "probable sanity check failure for syscall number %d\n",
1841 syscallno );
njne427a662002-10-02 11:08:25 +00001842 VG_(skin_panic)("aborting due to the above ... bye!");
njn25e49d8e72002-09-23 09:36:25 +00001843 }
1844}
1845
1846
1847/*------------------------------------------------------------*/
1848/*--- Setup ---*/
1849/*------------------------------------------------------------*/
1850
1851void SK_(written_shadow_regs_values)( UInt* gen_reg_value, UInt* eflags_value )
1852{
1853 *gen_reg_value = VGM_WORD_VALID;
1854 *eflags_value = VGM_EFLAGS_VALID;
1855}
1856
1857Bool SK_(process_cmd_line_option)(Char* arg)
1858{
1859# define STREQ(s1,s2) (0==VG_(strcmp_ws)((s1),(s2)))
1860# define STREQN(nn,s1,s2) (0==VG_(strncmp_ws)((s1),(s2),(nn)))
1861
1862 if (STREQ(arg, "--partial-loads-ok=yes"))
1863 SK_(clo_partial_loads_ok) = True;
1864 else if (STREQ(arg, "--partial-loads-ok=no"))
1865 SK_(clo_partial_loads_ok) = False;
1866
1867 else if (STREQN(15, arg, "--freelist-vol=")) {
1868 SK_(clo_freelist_vol) = (Int)VG_(atoll)(&arg[15]);
1869 if (SK_(clo_freelist_vol) < 0) SK_(clo_freelist_vol) = 0;
1870 }
1871
1872 else if (STREQ(arg, "--leak-check=yes"))
1873 SK_(clo_leak_check) = True;
1874 else if (STREQ(arg, "--leak-check=no"))
1875 SK_(clo_leak_check) = False;
1876
1877 else if (STREQ(arg, "--leak-resolution=low"))
1878 SK_(clo_leak_resolution) = Vg_LowRes;
1879 else if (STREQ(arg, "--leak-resolution=med"))
1880 SK_(clo_leak_resolution) = Vg_MedRes;
1881 else if (STREQ(arg, "--leak-resolution=high"))
1882 SK_(clo_leak_resolution) = Vg_HighRes;
1883
1884 else if (STREQ(arg, "--show-reachable=yes"))
1885 SK_(clo_show_reachable) = True;
1886 else if (STREQ(arg, "--show-reachable=no"))
1887 SK_(clo_show_reachable) = False;
1888
1889 else if (STREQ(arg, "--workaround-gcc296-bugs=yes"))
1890 SK_(clo_workaround_gcc296_bugs) = True;
1891 else if (STREQ(arg, "--workaround-gcc296-bugs=no"))
1892 SK_(clo_workaround_gcc296_bugs) = False;
1893
1894 else if (STREQ(arg, "--check-addrVs=yes"))
1895 SK_(clo_check_addrVs) = True;
1896 else if (STREQ(arg, "--check-addrVs=no"))
1897 SK_(clo_check_addrVs) = False;
1898
1899 else if (STREQ(arg, "--cleanup=yes"))
1900 SK_(clo_cleanup) = True;
1901 else if (STREQ(arg, "--cleanup=no"))
1902 SK_(clo_cleanup) = False;
1903
sewardj8ec2cfc2002-10-13 00:57:26 +00001904 else if (STREQ(arg, "--avoid-strlen-errors=yes"))
1905 SK_(clo_avoid_strlen_errors) = True;
1906 else if (STREQ(arg, "--avoid-strlen-errors=no"))
1907 SK_(clo_avoid_strlen_errors) = False;
1908
njn25e49d8e72002-09-23 09:36:25 +00001909 else
1910 return False;
1911
1912 return True;
1913
1914#undef STREQ
1915#undef STREQN
1916}
1917
1918Char* SK_(usage)(void)
1919{
1920 return
1921" --partial-loads-ok=no|yes too hard to explain here; see manual [yes]\n"
1922" --freelist-vol=<number> volume of freed blocks queue [1000000]\n"
1923" --leak-check=no|yes search for memory leaks at exit? [no]\n"
1924" --leak-resolution=low|med|high\n"
1925" amount of bt merging in leak check [low]\n"
1926" --show-reachable=no|yes show reachable blocks in leak check? [no]\n"
1927" --workaround-gcc296-bugs=no|yes self explanatory [no]\n"
1928" --check-addrVs=no|yes experimental lighterweight checking? [yes]\n"
1929" yes == Valgrind's original behaviour\n"
1930"\n"
sewardj8ec2cfc2002-10-13 00:57:26 +00001931" --cleanup=no|yes improve after instrumentation? [yes]\n"
1932" --avoid-strlen-errors=no|yes suppress errs from inlined strlen [yes]\n";
njn25e49d8e72002-09-23 09:36:25 +00001933}
1934
1935
1936/*------------------------------------------------------------*/
1937/*--- Setup ---*/
1938/*------------------------------------------------------------*/
1939
njnd04b7c62002-10-03 14:05:52 +00001940void SK_(pre_clo_init)(VgDetails* details, VgNeeds* needs, VgTrackEvents* track)
njn25e49d8e72002-09-23 09:36:25 +00001941{
sewardj34eccb12002-10-05 16:49:09 +00001942 details->name = "Memcheck";
njnd04b7c62002-10-03 14:05:52 +00001943 details->version = NULL;
sewardj34eccb12002-10-05 16:49:09 +00001944 details->description = "a.k.a. Valgrind, a memory error detector";
njnd04b7c62002-10-03 14:05:52 +00001945 details->copyright_author =
1946 "Copyright (C) 2000-2002, and GNU GPL'd, by Julian Seward.";
1947 details->bug_reports_to = "jseward@acm.org";
njn25e49d8e72002-09-23 09:36:25 +00001948
njnd04b7c62002-10-03 14:05:52 +00001949 needs->core_errors = True;
1950 needs->skin_errors = True;
1951 needs->libc_freeres = True;
1952 needs->sizeof_shadow_block = 1;
1953 needs->basic_block_discards = False;
1954 needs->shadow_regs = True;
1955 needs->command_line_options = True;
1956 needs->client_requests = True;
1957 needs->extended_UCode = True;
1958 needs->syscall_wrapper = True;
1959 needs->alternative_free = True;
1960 needs->sanity_checks = True;
njn25e49d8e72002-09-23 09:36:25 +00001961
njn25e49d8e72002-09-23 09:36:25 +00001962 track->new_mem_startup = & memcheck_new_mem_startup;
1963 track->new_mem_heap = & memcheck_new_mem_heap;
1964 track->new_mem_stack = & SK_(make_writable);
1965 track->new_mem_stack_aligned = & make_writable_aligned;
1966 track->new_mem_stack_signal = & SK_(make_writable);
1967 track->new_mem_brk = & SK_(make_writable);
1968 track->new_mem_mmap = & memcheck_set_perms;
1969
1970 track->copy_mem_heap = & copy_address_range_state;
1971 track->copy_mem_remap = & copy_address_range_state;
1972 track->change_mem_mprotect = & memcheck_set_perms;
1973
1974 track->ban_mem_heap = & SK_(make_noaccess);
1975 track->ban_mem_stack = & SK_(make_noaccess);
1976
1977 track->die_mem_heap = & SK_(make_noaccess);
1978 track->die_mem_stack = & SK_(make_noaccess);
1979 track->die_mem_stack_aligned = & make_noaccess_aligned;
1980 track->die_mem_stack_signal = & SK_(make_noaccess);
1981 track->die_mem_brk = & SK_(make_noaccess);
1982 track->die_mem_munmap = & SK_(make_noaccess);
1983
1984 track->bad_free = & SK_(record_free_error);
1985 track->mismatched_free = & SK_(record_freemismatch_error);
1986
1987 track->pre_mem_read = & check_is_readable;
1988 track->pre_mem_read_asciiz = & check_is_readable_asciiz;
1989 track->pre_mem_write = & check_is_writable;
1990 track->post_mem_write = & SK_(make_readable);
1991
njnd04b7c62002-10-03 14:05:52 +00001992 VG_(register_compact_helper)((Addr) & SK_(helper_value_check4_fail));
1993 VG_(register_compact_helper)((Addr) & SK_(helper_value_check0_fail));
1994 VG_(register_compact_helper)((Addr) & SK_(helper_value_check2_fail));
1995 VG_(register_compact_helper)((Addr) & SK_(helperc_STOREV4));
1996 VG_(register_compact_helper)((Addr) & SK_(helperc_STOREV1));
1997 VG_(register_compact_helper)((Addr) & SK_(helperc_LOADV4));
1998 VG_(register_compact_helper)((Addr) & SK_(helperc_LOADV1));
njn25e49d8e72002-09-23 09:36:25 +00001999
njnd04b7c62002-10-03 14:05:52 +00002000 /* These two made non-compact because 2-byte transactions are rare. */
2001 VG_(register_noncompact_helper)((Addr) & SK_(helperc_STOREV2));
2002 VG_(register_noncompact_helper)((Addr) & SK_(helperc_LOADV2));
2003 VG_(register_noncompact_helper)((Addr) & SK_(fpu_write_check));
2004 VG_(register_noncompact_helper)((Addr) & SK_(fpu_read_check));
2005 VG_(register_noncompact_helper)((Addr) & SK_(helper_value_check1_fail));
njn25e49d8e72002-09-23 09:36:25 +00002006
2007 VGP_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
2008 VGP_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
njnd04b7c62002-10-03 14:05:52 +00002009
2010 init_shadow_memory();
2011 init_prof_mem();
njn25e49d8e72002-09-23 09:36:25 +00002012}
2013
2014/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00002015/*--- end mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00002016/*--------------------------------------------------------------------*/