blob: a8369cfadf8a9d56804c1ee47699e3c30527b399 [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
nethercote137bc552003-11-14 17:47:54 +00009 This file is part of MemCheck, a heavyweight Valgrind tool for
njnc9539842002-10-02 13:26:35 +000010 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
nethercotebb1c9912004-01-04 16:43:23 +000012 Copyright (C) 2000-2004 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
njn25cac76cb2002-09-23 11:21:57 +000033#include "mc_include.h"
34#include "memcheck.h" /* for client requests */
njn25e49d8e72002-09-23 09:36:25 +000035//#include "vg_profile.c"
36
37/* Define to debug the mem audit system. */
38/* #define VG_DEBUG_MEMORY */
39
njn25e49d8e72002-09-23 09:36:25 +000040#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
41
42/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000043/*--- Low-level support for memory checking. ---*/
44/*------------------------------------------------------------*/
45
46/* All reads and writes are checked against a memory map, which
47 records the state of all memory in the process. The memory map is
48 organised like this:
49
50 The top 16 bits of an address are used to index into a top-level
51 map table, containing 65536 entries. Each entry is a pointer to a
52 second-level map, which records the accesibililty and validity
53 permissions for the 65536 bytes indexed by the lower 16 bits of the
54 address. Each byte is represented by nine bits, one indicating
55 accessibility, the other eight validity. So each second-level map
56 contains 73728 bytes. This two-level arrangement conveniently
57 divides the 4G address space into 64k lumps, each size 64k bytes.
58
59 All entries in the primary (top-level) map must point to a valid
60 secondary (second-level) map. Since most of the 4G of address
61 space will not be in use -- ie, not mapped at all -- there is a
62 distinguished secondary map, which indicates `not addressible and
63 not valid' writeable for all bytes. Entries in the primary map for
64 which the entire 64k is not in use at all point at this
65 distinguished map.
66
67 [...] lots of stuff deleted due to out of date-ness
68
69 As a final optimisation, the alignment and address checks for
70 4-byte loads and stores are combined in a neat way. The primary
71 map is extended to have 262144 entries (2^18), rather than 2^16.
72 The top 3/4 of these entries are permanently set to the
73 distinguished secondary map. For a 4-byte load/store, the
74 top-level map is indexed not with (addr >> 16) but instead f(addr),
75 where
76
77 f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
78 = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
79 = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
80
81 ie the lowest two bits are placed above the 16 high address bits.
82 If either of these two bits are nonzero, the address is misaligned;
83 this will select a secondary map from the upper 3/4 of the primary
84 map. Because this is always the distinguished secondary map, a
85 (bogus) address check failure will result. The failure handling
86 code can then figure out whether this is a genuine addr check
87 failure or whether it is a possibly-legitimate access at a
88 misaligned address.
89*/
90
91
92/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000093/*--- Function declarations. ---*/
94/*------------------------------------------------------------*/
95
njn5c004e42002-11-18 11:04:50 +000096static UInt mc_rd_V4_SLOWLY ( Addr a );
97static UInt mc_rd_V2_SLOWLY ( Addr a );
98static UInt mc_rd_V1_SLOWLY ( Addr a );
99static void mc_wr_V4_SLOWLY ( Addr a, UInt vbytes );
100static void mc_wr_V2_SLOWLY ( Addr a, UInt vbytes );
101static void mc_wr_V1_SLOWLY ( Addr a, UInt vbytes );
102static void mc_fpu_read_check_SLOWLY ( Addr addr, Int size );
103static void mc_fpu_write_check_SLOWLY ( Addr addr, Int size );
njn25e49d8e72002-09-23 09:36:25 +0000104
105/*------------------------------------------------------------*/
106/*--- Data defns. ---*/
107/*------------------------------------------------------------*/
108
109typedef
110 struct {
111 UChar abits[8192];
112 UChar vbyte[65536];
113 }
114 SecMap;
115
116static SecMap* primary_map[ /*65536*/ 262144 ];
117static SecMap distinguished_secondary_map;
118
njn25e49d8e72002-09-23 09:36:25 +0000119static void init_shadow_memory ( void )
120{
121 Int i;
122
123 for (i = 0; i < 8192; i++) /* Invalid address */
124 distinguished_secondary_map.abits[i] = VGM_BYTE_INVALID;
125 for (i = 0; i < 65536; i++) /* Invalid Value */
126 distinguished_secondary_map.vbyte[i] = VGM_BYTE_INVALID;
127
128 /* These entries gradually get overwritten as the used address
129 space expands. */
130 for (i = 0; i < 65536; i++)
131 primary_map[i] = &distinguished_secondary_map;
132
133 /* These ones should never change; it's a bug in Valgrind if they do. */
134 for (i = 65536; i < 262144; i++)
135 primary_map[i] = &distinguished_secondary_map;
136}
137
njn25e49d8e72002-09-23 09:36:25 +0000138/*------------------------------------------------------------*/
139/*--- Basic bitmap management, reading and writing. ---*/
140/*------------------------------------------------------------*/
141
142/* Allocate and initialise a secondary map. */
143
144static SecMap* alloc_secondary_map ( __attribute__ ((unused))
145 Char* caller )
146{
147 SecMap* map;
148 UInt i;
149 PROF_EVENT(10);
150
151 /* Mark all bytes as invalid access and invalid value. */
fitzhardinge98abfc72003-12-16 02:05:15 +0000152 map = (SecMap *)VG_(shadow_alloc)(sizeof(SecMap));
njn25e49d8e72002-09-23 09:36:25 +0000153
154 for (i = 0; i < 8192; i++)
155 map->abits[i] = VGM_BYTE_INVALID; /* Invalid address */
156 for (i = 0; i < 65536; i++)
157 map->vbyte[i] = VGM_BYTE_INVALID; /* Invalid Value */
158
159 /* VG_(printf)("ALLOC_2MAP(%s)\n", caller ); */
160 return map;
161}
162
163
164/* Basic reading/writing of the bitmaps, for byte-sized accesses. */
165
166static __inline__ UChar get_abit ( Addr a )
167{
168 SecMap* sm = primary_map[a >> 16];
169 UInt sm_off = a & 0xFFFF;
170 PROF_EVENT(20);
171# if 0
172 if (IS_DISTINGUISHED_SM(sm))
173 VG_(message)(Vg_DebugMsg,
174 "accessed distinguished 2ndary (A)map! 0x%x\n", a);
175# endif
176 return BITARR_TEST(sm->abits, sm_off)
177 ? VGM_BIT_INVALID : VGM_BIT_VALID;
178}
179
180static __inline__ UChar get_vbyte ( Addr a )
181{
182 SecMap* sm = primary_map[a >> 16];
183 UInt sm_off = a & 0xFFFF;
184 PROF_EVENT(21);
185# if 0
186 if (IS_DISTINGUISHED_SM(sm))
187 VG_(message)(Vg_DebugMsg,
188 "accessed distinguished 2ndary (V)map! 0x%x\n", a);
189# endif
190 return sm->vbyte[sm_off];
191}
192
sewardj56867352003-10-12 10:27:06 +0000193static /* __inline__ */ void set_abit ( Addr a, UChar abit )
njn25e49d8e72002-09-23 09:36:25 +0000194{
195 SecMap* sm;
196 UInt sm_off;
197 PROF_EVENT(22);
198 ENSURE_MAPPABLE(a, "set_abit");
199 sm = primary_map[a >> 16];
200 sm_off = a & 0xFFFF;
201 if (abit)
202 BITARR_SET(sm->abits, sm_off);
203 else
204 BITARR_CLEAR(sm->abits, sm_off);
205}
206
207static __inline__ void set_vbyte ( Addr a, UChar vbyte )
208{
209 SecMap* sm;
210 UInt sm_off;
211 PROF_EVENT(23);
212 ENSURE_MAPPABLE(a, "set_vbyte");
213 sm = primary_map[a >> 16];
214 sm_off = a & 0xFFFF;
215 sm->vbyte[sm_off] = vbyte;
216}
217
218
219/* Reading/writing of the bitmaps, for aligned word-sized accesses. */
220
221static __inline__ UChar get_abits4_ALIGNED ( Addr a )
222{
223 SecMap* sm;
224 UInt sm_off;
225 UChar abits8;
226 PROF_EVENT(24);
227# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000228 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000229# endif
230 sm = primary_map[a >> 16];
231 sm_off = a & 0xFFFF;
232 abits8 = sm->abits[sm_off >> 3];
233 abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
234 abits8 &= 0x0F;
235 return abits8;
236}
237
238static UInt __inline__ get_vbytes4_ALIGNED ( Addr a )
239{
240 SecMap* sm = primary_map[a >> 16];
241 UInt sm_off = a & 0xFFFF;
242 PROF_EVENT(25);
243# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000244 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000245# endif
246 return ((UInt*)(sm->vbyte))[sm_off >> 2];
247}
248
249
sewardjee070842003-07-05 17:53:55 +0000250static void __inline__ set_vbytes4_ALIGNED ( Addr a, UInt vbytes )
251{
252 SecMap* sm;
253 UInt sm_off;
254 ENSURE_MAPPABLE(a, "set_vbytes4_ALIGNED");
255 sm = primary_map[a >> 16];
256 sm_off = a & 0xFFFF;
257 PROF_EVENT(23);
258# ifdef VG_DEBUG_MEMORY
259 sk_assert(IS_ALIGNED4_ADDR(a));
260# endif
261 ((UInt*)(sm->vbyte))[sm_off >> 2] = vbytes;
262}
263
264
njn25e49d8e72002-09-23 09:36:25 +0000265/*------------------------------------------------------------*/
266/*--- Setting permissions over address ranges. ---*/
267/*------------------------------------------------------------*/
268
269static void set_address_range_perms ( Addr a, UInt len,
270 UInt example_a_bit,
271 UInt example_v_bit )
272{
273 UChar vbyte, abyte8;
274 UInt vword4, sm_off;
275 SecMap* sm;
276
277 PROF_EVENT(30);
278
279 if (len == 0)
280 return;
281
nethercotea66033c2004-03-08 15:37:58 +0000282 if (VG_(clo_verbosity) > 0) {
283 if (len > 100 * 1000 * 1000) {
284 VG_(message)(Vg_UserMsg,
285 "Warning: set address range perms: "
286 "large range %u, a %d, v %d",
287 len, example_a_bit, example_v_bit );
288 }
njn25e49d8e72002-09-23 09:36:25 +0000289 }
290
291 VGP_PUSHCC(VgpSetMem);
292
293 /* Requests to change permissions of huge address ranges may
294 indicate bugs in our machinery. 30,000,000 is arbitrary, but so
295 far all legitimate requests have fallen beneath that size. */
296 /* 4 Mar 02: this is just stupid; get rid of it. */
njne427a662002-10-02 11:08:25 +0000297 /* sk_assert(len < 30000000); */
njn25e49d8e72002-09-23 09:36:25 +0000298
299 /* Check the permissions make sense. */
njne427a662002-10-02 11:08:25 +0000300 sk_assert(example_a_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000301 || example_a_bit == VGM_BIT_INVALID);
njne427a662002-10-02 11:08:25 +0000302 sk_assert(example_v_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000303 || example_v_bit == VGM_BIT_INVALID);
304 if (example_a_bit == VGM_BIT_INVALID)
njne427a662002-10-02 11:08:25 +0000305 sk_assert(example_v_bit == VGM_BIT_INVALID);
njn25e49d8e72002-09-23 09:36:25 +0000306
307 /* The validity bits to write. */
308 vbyte = example_v_bit==VGM_BIT_VALID
309 ? VGM_BYTE_VALID : VGM_BYTE_INVALID;
310
311 /* In order that we can charge through the address space at 8
312 bytes/main-loop iteration, make up some perms. */
313 abyte8 = (example_a_bit << 7)
314 | (example_a_bit << 6)
315 | (example_a_bit << 5)
316 | (example_a_bit << 4)
317 | (example_a_bit << 3)
318 | (example_a_bit << 2)
319 | (example_a_bit << 1)
320 | (example_a_bit << 0);
321 vword4 = (vbyte << 24) | (vbyte << 16) | (vbyte << 8) | vbyte;
322
323# ifdef VG_DEBUG_MEMORY
324 /* Do it ... */
325 while (True) {
326 PROF_EVENT(31);
327 if (len == 0) break;
328 set_abit ( a, example_a_bit );
329 set_vbyte ( a, vbyte );
330 a++;
331 len--;
332 }
333
334# else
335 /* Slowly do parts preceding 8-byte alignment. */
336 while (True) {
337 PROF_EVENT(31);
338 if (len == 0) break;
339 if ((a % 8) == 0) break;
340 set_abit ( a, example_a_bit );
341 set_vbyte ( a, vbyte );
342 a++;
343 len--;
344 }
345
346 if (len == 0) {
347 VGP_POPCC(VgpSetMem);
348 return;
349 }
njne427a662002-10-02 11:08:25 +0000350 sk_assert((a % 8) == 0 && len > 0);
njn25e49d8e72002-09-23 09:36:25 +0000351
352 /* Once aligned, go fast. */
353 while (True) {
354 PROF_EVENT(32);
355 if (len < 8) break;
356 ENSURE_MAPPABLE(a, "set_address_range_perms(fast)");
357 sm = primary_map[a >> 16];
358 sm_off = a & 0xFFFF;
359 sm->abits[sm_off >> 3] = abyte8;
360 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = vword4;
361 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = vword4;
362 a += 8;
363 len -= 8;
364 }
365
366 if (len == 0) {
367 VGP_POPCC(VgpSetMem);
368 return;
369 }
njne427a662002-10-02 11:08:25 +0000370 sk_assert((a % 8) == 0 && len > 0 && len < 8);
njn25e49d8e72002-09-23 09:36:25 +0000371
372 /* Finish the upper fragment. */
373 while (True) {
374 PROF_EVENT(33);
375 if (len == 0) break;
376 set_abit ( a, example_a_bit );
377 set_vbyte ( a, vbyte );
378 a++;
379 len--;
380 }
381# endif
382
383 /* Check that zero page and highest page have not been written to
384 -- this could happen with buggy syscall wrappers. Today
385 (2001-04-26) had precisely such a problem with __NR_setitimer. */
njne427a662002-10-02 11:08:25 +0000386 sk_assert(SK_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +0000387 VGP_POPCC(VgpSetMem);
388}
389
390/* Set permissions for address ranges ... */
391
njn5c004e42002-11-18 11:04:50 +0000392void MC_(make_noaccess) ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000393{
394 PROF_EVENT(35);
njn5c004e42002-11-18 11:04:50 +0000395 DEBUG("MC_(make_noaccess)(%p, %x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000396 set_address_range_perms ( a, len, VGM_BIT_INVALID, VGM_BIT_INVALID );
397}
398
njn5c004e42002-11-18 11:04:50 +0000399void MC_(make_writable) ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000400{
401 PROF_EVENT(36);
njn5c004e42002-11-18 11:04:50 +0000402 DEBUG("MC_(make_writable)(%p, %x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000403 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_INVALID );
404}
405
njn5c004e42002-11-18 11:04:50 +0000406void MC_(make_readable) ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000407{
408 PROF_EVENT(37);
njn5c004e42002-11-18 11:04:50 +0000409 DEBUG("MC_(make_readable)(%p, 0x%x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000410 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_VALID );
411}
412
njn9b007f62003-04-07 14:40:25 +0000413static __inline__
414void make_aligned_word_writable(Addr a)
415{
416 SecMap* sm;
417 UInt sm_off;
418 UChar mask;
njn25e49d8e72002-09-23 09:36:25 +0000419
njn9b007f62003-04-07 14:40:25 +0000420 VGP_PUSHCC(VgpESPAdj);
421 ENSURE_MAPPABLE(a, "make_aligned_word_writable");
422 sm = primary_map[a >> 16];
423 sm_off = a & 0xFFFF;
424 ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
425 mask = 0x0F;
426 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
427 /* mask now contains 1s where we wish to make address bits invalid (0s). */
428 sm->abits[sm_off >> 3] &= ~mask;
429 VGP_POPCC(VgpESPAdj);
430}
431
432static __inline__
433void make_aligned_word_noaccess(Addr a)
434{
435 SecMap* sm;
436 UInt sm_off;
437 UChar mask;
438
439 VGP_PUSHCC(VgpESPAdj);
440 ENSURE_MAPPABLE(a, "make_aligned_word_noaccess");
441 sm = primary_map[a >> 16];
442 sm_off = a & 0xFFFF;
443 ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
444 mask = 0x0F;
445 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
446 /* mask now contains 1s where we wish to make address bits invalid (1s). */
447 sm->abits[sm_off >> 3] |= mask;
448 VGP_POPCC(VgpESPAdj);
449}
450
451/* Nb: by "aligned" here we mean 8-byte aligned */
452static __inline__
453void make_aligned_doubleword_writable(Addr a)
454{
455 SecMap* sm;
456 UInt sm_off;
457
458 VGP_PUSHCC(VgpESPAdj);
459 ENSURE_MAPPABLE(a, "make_aligned_doubleword_writable");
460 sm = primary_map[a >> 16];
461 sm_off = a & 0xFFFF;
462 sm->abits[sm_off >> 3] = VGM_BYTE_VALID;
463 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = VGM_WORD_INVALID;
464 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = VGM_WORD_INVALID;
465 VGP_POPCC(VgpESPAdj);
466}
467
468static __inline__
469void make_aligned_doubleword_noaccess(Addr a)
470{
471 SecMap* sm;
472 UInt sm_off;
473
474 VGP_PUSHCC(VgpESPAdj);
475 ENSURE_MAPPABLE(a, "make_aligned_doubleword_noaccess");
476 sm = primary_map[a >> 16];
477 sm_off = a & 0xFFFF;
478 sm->abits[sm_off >> 3] = VGM_BYTE_INVALID;
479 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = VGM_WORD_INVALID;
480 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = VGM_WORD_INVALID;
481 VGP_POPCC(VgpESPAdj);
482}
483
484/* The %esp update handling functions */
485ESP_UPDATE_HANDLERS ( make_aligned_word_writable,
486 make_aligned_word_noaccess,
487 make_aligned_doubleword_writable,
488 make_aligned_doubleword_noaccess,
489 MC_(make_writable),
490 MC_(make_noaccess)
491 );
492
493/* Block-copy permissions (needed for implementing realloc()). */
njn5c004e42002-11-18 11:04:50 +0000494static void mc_copy_address_range_state ( Addr src, Addr dst, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000495{
496 UInt i;
497
njn5c004e42002-11-18 11:04:50 +0000498 DEBUG("mc_copy_address_range_state\n");
njn25e49d8e72002-09-23 09:36:25 +0000499
500 PROF_EVENT(40);
501 for (i = 0; i < len; i++) {
502 UChar abit = get_abit ( src+i );
503 UChar vbyte = get_vbyte ( src+i );
504 PROF_EVENT(41);
505 set_abit ( dst+i, abit );
506 set_vbyte ( dst+i, vbyte );
507 }
508}
509
510
511/* Check permissions for address range. If inadequate permissions
512 exist, *bad_addr is set to the offending address, so the caller can
513 know what it is. */
514
sewardjecf8e102003-07-12 12:11:39 +0000515/* Returns True if [a .. a+len) is not addressible. Otherwise,
516 returns False, and if bad_addr is non-NULL, sets *bad_addr to
517 indicate the lowest failing address. Functions below are
518 similar. */
519Bool MC_(check_noaccess) ( Addr a, UInt len, Addr* bad_addr )
520{
521 UInt i;
522 UChar abit;
523 PROF_EVENT(42);
524 for (i = 0; i < len; i++) {
525 PROF_EVENT(43);
526 abit = get_abit(a);
527 if (abit == VGM_BIT_VALID) {
528 if (bad_addr != NULL) *bad_addr = a;
529 return False;
530 }
531 a++;
532 }
533 return True;
534}
535
njn5c004e42002-11-18 11:04:50 +0000536Bool MC_(check_writable) ( Addr a, UInt len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000537{
538 UInt i;
539 UChar abit;
540 PROF_EVENT(42);
541 for (i = 0; i < len; i++) {
542 PROF_EVENT(43);
543 abit = get_abit(a);
544 if (abit == VGM_BIT_INVALID) {
545 if (bad_addr != NULL) *bad_addr = a;
546 return False;
547 }
548 a++;
549 }
550 return True;
551}
552
njn5c004e42002-11-18 11:04:50 +0000553Bool MC_(check_readable) ( Addr a, UInt len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000554{
555 UInt i;
556 UChar abit;
557 UChar vbyte;
558
559 PROF_EVENT(44);
njn5c004e42002-11-18 11:04:50 +0000560 DEBUG("MC_(check_readable)\n");
njn25e49d8e72002-09-23 09:36:25 +0000561 for (i = 0; i < len; i++) {
562 abit = get_abit(a);
563 vbyte = get_vbyte(a);
564 PROF_EVENT(45);
565 if (abit != VGM_BIT_VALID || vbyte != VGM_BYTE_VALID) {
566 if (bad_addr != NULL) *bad_addr = a;
567 return False;
568 }
569 a++;
570 }
571 return True;
572}
573
574
575/* Check a zero-terminated ascii string. Tricky -- don't want to
576 examine the actual bytes, to find the end, until we're sure it is
577 safe to do so. */
578
njn9b007f62003-04-07 14:40:25 +0000579static Bool mc_check_readable_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000580{
581 UChar abit;
582 UChar vbyte;
583 PROF_EVENT(46);
njn5c004e42002-11-18 11:04:50 +0000584 DEBUG("mc_check_readable_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +0000585 while (True) {
586 PROF_EVENT(47);
587 abit = get_abit(a);
588 vbyte = get_vbyte(a);
589 if (abit != VGM_BIT_VALID || vbyte != VGM_BYTE_VALID) {
590 if (bad_addr != NULL) *bad_addr = a;
591 return False;
592 }
593 /* Ok, a is safe to read. */
594 if (* ((UChar*)a) == 0) return True;
595 a++;
596 }
597}
598
599
600/*------------------------------------------------------------*/
601/*--- Memory event handlers ---*/
602/*------------------------------------------------------------*/
603
njn25e49d8e72002-09-23 09:36:25 +0000604static
njn72718642003-07-24 08:45:32 +0000605void mc_check_is_writable ( CorePart part, ThreadId tid, Char* s,
606 Addr base, UInt size )
njn25e49d8e72002-09-23 09:36:25 +0000607{
608 Bool ok;
609 Addr bad_addr;
610
611 VGP_PUSHCC(VgpCheckMem);
612
613 /* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
614 base,base+size-1); */
njn5c004e42002-11-18 11:04:50 +0000615 ok = MC_(check_writable) ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000616 if (!ok) {
617 switch (part) {
618 case Vg_CoreSysCall:
njn72718642003-07-24 08:45:32 +0000619 MAC_(record_param_error) ( tid, bad_addr, /*isWrite =*/True, s );
njn25e49d8e72002-09-23 09:36:25 +0000620 break;
621
622 case Vg_CorePThread:
623 case Vg_CoreSignal:
njn72718642003-07-24 08:45:32 +0000624 MAC_(record_core_mem_error)( tid, /*isWrite=*/True, s );
njn25e49d8e72002-09-23 09:36:25 +0000625 break;
626
627 default:
njn5c004e42002-11-18 11:04:50 +0000628 VG_(skin_panic)("mc_check_is_writable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000629 }
630 }
631
632 VGP_POPCC(VgpCheckMem);
633}
634
635static
njn72718642003-07-24 08:45:32 +0000636void mc_check_is_readable ( CorePart part, ThreadId tid, Char* s,
637 Addr base, UInt size )
njn25e49d8e72002-09-23 09:36:25 +0000638{
639 Bool ok;
640 Addr bad_addr;
641
642 VGP_PUSHCC(VgpCheckMem);
643
644 /* VG_(message)(Vg_DebugMsg,"check is readable: %x .. %x",
645 base,base+size-1); */
njn5c004e42002-11-18 11:04:50 +0000646 ok = MC_(check_readable) ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000647 if (!ok) {
648 switch (part) {
649 case Vg_CoreSysCall:
njn72718642003-07-24 08:45:32 +0000650 MAC_(record_param_error) ( tid, bad_addr, /*isWrite =*/False, s );
njn25e49d8e72002-09-23 09:36:25 +0000651 break;
652
653 case Vg_CorePThread:
njn72718642003-07-24 08:45:32 +0000654 MAC_(record_core_mem_error)( tid, /*isWrite=*/False, s );
njn25e49d8e72002-09-23 09:36:25 +0000655 break;
656
657 /* If we're being asked to jump to a silly address, record an error
658 message before potentially crashing the entire system. */
659 case Vg_CoreTranslate:
njn72718642003-07-24 08:45:32 +0000660 MAC_(record_jump_error)( tid, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000661 break;
662
663 default:
njn5c004e42002-11-18 11:04:50 +0000664 VG_(skin_panic)("mc_check_is_readable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000665 }
666 }
667 VGP_POPCC(VgpCheckMem);
668}
669
670static
njn72718642003-07-24 08:45:32 +0000671void mc_check_is_readable_asciiz ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +0000672 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +0000673{
674 Bool ok = True;
675 Addr bad_addr;
676 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
677
678 VGP_PUSHCC(VgpCheckMem);
679
njne427a662002-10-02 11:08:25 +0000680 sk_assert(part == Vg_CoreSysCall);
njn5c004e42002-11-18 11:04:50 +0000681 ok = mc_check_readable_asciiz ( (Addr)str, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000682 if (!ok) {
njn72718642003-07-24 08:45:32 +0000683 MAC_(record_param_error) ( tid, bad_addr, /*is_writable =*/False, s );
njn25e49d8e72002-09-23 09:36:25 +0000684 }
685
686 VGP_POPCC(VgpCheckMem);
687}
688
689
690static
njn5c004e42002-11-18 11:04:50 +0000691void mc_new_mem_startup( Addr a, UInt len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +0000692{
njn1f3a9092002-10-04 09:22:30 +0000693 /* Ignore the permissions, just make it readable. Seems to work... */
njn5c004e42002-11-18 11:04:50 +0000694 DEBUG("mc_new_mem_startup(%p, %u, rr=%u, ww=%u, xx=%u)\n", a,len,rr,ww,xx);
695 MC_(make_readable)(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000696}
697
698static
njn5c004e42002-11-18 11:04:50 +0000699void mc_new_mem_heap ( Addr a, UInt len, Bool is_inited )
njn25e49d8e72002-09-23 09:36:25 +0000700{
701 if (is_inited) {
njn5c004e42002-11-18 11:04:50 +0000702 MC_(make_readable)(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000703 } else {
njn5c004e42002-11-18 11:04:50 +0000704 MC_(make_writable)(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000705 }
706}
707
708static
njn5c004e42002-11-18 11:04:50 +0000709void mc_set_perms (Addr a, UInt len, Bool rr, Bool ww, Bool xx)
njn25e49d8e72002-09-23 09:36:25 +0000710{
njn5c004e42002-11-18 11:04:50 +0000711 DEBUG("mc_set_perms(%p, %u, rr=%u ww=%u, xx=%u)\n", a, len, rr, ww, xx);
712 if (rr) MC_(make_readable)(a, len);
713 else if (ww) MC_(make_writable)(a, len);
714 else MC_(make_noaccess)(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000715}
716
717
718/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +0000719/*--- Register event handlers ---*/
720/*------------------------------------------------------------*/
721
722static void mc_post_regs_write_init ( void )
723{
724 UInt i;
nethercotec06e2132004-09-03 13:45:29 +0000725 for (i = FIRST_ARCH_REG; i <= LAST_ARCH_REG; i++)
njnd3040452003-05-19 15:04:06 +0000726 VG_(set_shadow_archreg)( i, VGM_WORD_VALID );
727 VG_(set_shadow_eflags)( VGM_EFLAGS_VALID );
728}
729
730static void mc_post_reg_write(ThreadId tid, UInt reg)
731{
732 VG_(set_thread_shadow_archreg)( tid, reg, VGM_WORD_VALID );
733}
734
735static void mc_post_reg_write_clientcall(ThreadId tid, UInt reg, Addr f )
736{
737 VG_(set_thread_shadow_archreg)( tid, reg, VGM_WORD_VALID );
738}
739
740
741/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000742/*--- Functions called directly from generated code. ---*/
743/*------------------------------------------------------------*/
744
745static __inline__ UInt rotateRight16 ( UInt x )
746{
747 /* Amazingly, gcc turns this into a single rotate insn. */
748 return (x >> 16) | (x << 16);
749}
750
751
752static __inline__ UInt shiftRight16 ( UInt x )
753{
754 return x >> 16;
755}
756
757
758/* Read/write 1/2/4 sized V bytes, and emit an address error if
759 needed. */
760
761/* VG_(helperc_{LD,ST}V{1,2,4}) handle the common case fast.
762 Under all other circumstances, it defers to the relevant _SLOWLY
763 function, which can handle all situations.
764*/
nethercoteeec46302004-08-23 15:06:23 +0000765REGPARM(1)
njn5c004e42002-11-18 11:04:50 +0000766UInt MC_(helperc_LOADV4) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000767{
768# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000769 return mc_rd_V4_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000770# else
771 UInt sec_no = rotateRight16(a) & 0x3FFFF;
772 SecMap* sm = primary_map[sec_no];
773 UInt a_off = (a & 0xFFFF) >> 3;
774 UChar abits = sm->abits[a_off];
775 abits >>= (a & 4);
776 abits &= 15;
777 PROF_EVENT(60);
778 if (abits == VGM_NIBBLE_VALID) {
779 /* Handle common case quickly: a is suitably aligned, is mapped,
780 and is addressible. */
781 UInt v_off = a & 0xFFFF;
782 return ((UInt*)(sm->vbyte))[ v_off >> 2 ];
783 } else {
784 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000785 return mc_rd_V4_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000786 }
787# endif
788}
789
nethercoteeec46302004-08-23 15:06:23 +0000790REGPARM(2)
njn5c004e42002-11-18 11:04:50 +0000791void MC_(helperc_STOREV4) ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000792{
793# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000794 mc_wr_V4_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000795# else
796 UInt sec_no = rotateRight16(a) & 0x3FFFF;
797 SecMap* sm = primary_map[sec_no];
798 UInt a_off = (a & 0xFFFF) >> 3;
799 UChar abits = sm->abits[a_off];
800 abits >>= (a & 4);
801 abits &= 15;
802 PROF_EVENT(61);
803 if (abits == VGM_NIBBLE_VALID) {
804 /* Handle common case quickly: a is suitably aligned, is mapped,
805 and is addressible. */
806 UInt v_off = a & 0xFFFF;
807 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = vbytes;
808 } else {
809 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000810 mc_wr_V4_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000811 }
812# endif
813}
814
nethercoteeec46302004-08-23 15:06:23 +0000815REGPARM(1)
njn5c004e42002-11-18 11:04:50 +0000816UInt MC_(helperc_LOADV2) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000817{
818# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000819 return mc_rd_V2_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000820# else
821 UInt sec_no = rotateRight16(a) & 0x1FFFF;
822 SecMap* sm = primary_map[sec_no];
823 UInt a_off = (a & 0xFFFF) >> 3;
824 PROF_EVENT(62);
825 if (sm->abits[a_off] == VGM_BYTE_VALID) {
826 /* Handle common case quickly. */
827 UInt v_off = a & 0xFFFF;
828 return 0xFFFF0000
829 |
830 (UInt)( ((UShort*)(sm->vbyte))[ v_off >> 1 ] );
831 } else {
832 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000833 return mc_rd_V2_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000834 }
835# endif
836}
837
nethercoteeec46302004-08-23 15:06:23 +0000838REGPARM(2)
njn5c004e42002-11-18 11:04:50 +0000839void MC_(helperc_STOREV2) ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000840{
841# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000842 mc_wr_V2_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000843# else
844 UInt sec_no = rotateRight16(a) & 0x1FFFF;
845 SecMap* sm = primary_map[sec_no];
846 UInt a_off = (a & 0xFFFF) >> 3;
847 PROF_EVENT(63);
848 if (sm->abits[a_off] == VGM_BYTE_VALID) {
849 /* Handle common case quickly. */
850 UInt v_off = a & 0xFFFF;
851 ((UShort*)(sm->vbyte))[ v_off >> 1 ] = vbytes & 0x0000FFFF;
852 } else {
853 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000854 mc_wr_V2_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000855 }
856# endif
857}
858
nethercoteeec46302004-08-23 15:06:23 +0000859REGPARM(1)
njn5c004e42002-11-18 11:04:50 +0000860UInt MC_(helperc_LOADV1) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000861{
862# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000863 return mc_rd_V1_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000864# else
865 UInt sec_no = shiftRight16(a);
866 SecMap* sm = primary_map[sec_no];
867 UInt a_off = (a & 0xFFFF) >> 3;
868 PROF_EVENT(64);
869 if (sm->abits[a_off] == VGM_BYTE_VALID) {
870 /* Handle common case quickly. */
871 UInt v_off = a & 0xFFFF;
872 return 0xFFFFFF00
873 |
874 (UInt)( ((UChar*)(sm->vbyte))[ v_off ] );
875 } else {
876 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000877 return mc_rd_V1_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000878 }
879# endif
880}
881
nethercoteeec46302004-08-23 15:06:23 +0000882REGPARM(2)
njn5c004e42002-11-18 11:04:50 +0000883void MC_(helperc_STOREV1) ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000884{
885# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000886 mc_wr_V1_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000887# else
888 UInt sec_no = shiftRight16(a);
889 SecMap* sm = primary_map[sec_no];
890 UInt a_off = (a & 0xFFFF) >> 3;
891 PROF_EVENT(65);
892 if (sm->abits[a_off] == VGM_BYTE_VALID) {
893 /* Handle common case quickly. */
894 UInt v_off = a & 0xFFFF;
895 ((UChar*)(sm->vbyte))[ v_off ] = vbytes & 0x000000FF;
896 } else {
897 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000898 mc_wr_V1_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000899 }
900# endif
901}
902
903
904/*------------------------------------------------------------*/
905/*--- Fallback functions to handle cases that the above ---*/
906/*--- VG_(helperc_{LD,ST}V{1,2,4}) can't manage. ---*/
907/*------------------------------------------------------------*/
908
njn5c004e42002-11-18 11:04:50 +0000909static UInt mc_rd_V4_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000910{
911 Bool a0ok, a1ok, a2ok, a3ok;
912 UInt vb0, vb1, vb2, vb3;
913
914 PROF_EVENT(70);
915
916 /* First establish independently the addressibility of the 4 bytes
917 involved. */
918 a0ok = get_abit(a+0) == VGM_BIT_VALID;
919 a1ok = get_abit(a+1) == VGM_BIT_VALID;
920 a2ok = get_abit(a+2) == VGM_BIT_VALID;
921 a3ok = get_abit(a+3) == VGM_BIT_VALID;
922
923 /* Also get the validity bytes for the address. */
924 vb0 = (UInt)get_vbyte(a+0);
925 vb1 = (UInt)get_vbyte(a+1);
926 vb2 = (UInt)get_vbyte(a+2);
927 vb3 = (UInt)get_vbyte(a+3);
928
929 /* Now distinguish 3 cases */
930
931 /* Case 1: the address is completely valid, so:
932 - no addressing error
933 - return V bytes as read from memory
934 */
935 if (a0ok && a1ok && a2ok && a3ok) {
936 UInt vw = VGM_WORD_INVALID;
937 vw <<= 8; vw |= vb3;
938 vw <<= 8; vw |= vb2;
939 vw <<= 8; vw |= vb1;
940 vw <<= 8; vw |= vb0;
941 return vw;
942 }
943
944 /* Case 2: the address is completely invalid.
945 - emit addressing error
946 - return V word indicating validity.
947 This sounds strange, but if we make loads from invalid addresses
948 give invalid data, we also risk producing a number of confusing
949 undefined-value errors later, which confuses the fact that the
950 error arose in the first place from an invalid address.
951 */
952 /* VG_(printf)("%p (%d %d %d %d)\n", a, a0ok, a1ok, a2ok, a3ok); */
njn43c799e2003-04-08 00:08:52 +0000953 if (!MAC_(clo_partial_loads_ok)
njn25e49d8e72002-09-23 09:36:25 +0000954 || ((a & 3) != 0)
955 || (!a0ok && !a1ok && !a2ok && !a3ok)) {
njn72718642003-07-24 08:45:32 +0000956 MAC_(record_address_error)( VG_(get_current_tid)(), a, 4, False );
njn25e49d8e72002-09-23 09:36:25 +0000957 return (VGM_BYTE_VALID << 24) | (VGM_BYTE_VALID << 16)
958 | (VGM_BYTE_VALID << 8) | VGM_BYTE_VALID;
959 }
960
961 /* Case 3: the address is partially valid.
962 - no addressing error
963 - returned V word is invalid where the address is invalid,
964 and contains V bytes from memory otherwise.
njn5c004e42002-11-18 11:04:50 +0000965 Case 3 is only allowed if MC_(clo_partial_loads_ok) is True
njn25e49d8e72002-09-23 09:36:25 +0000966 (which is the default), and the address is 4-aligned.
967 If not, Case 2 will have applied.
968 */
njn43c799e2003-04-08 00:08:52 +0000969 sk_assert(MAC_(clo_partial_loads_ok));
njn25e49d8e72002-09-23 09:36:25 +0000970 {
971 UInt vw = VGM_WORD_INVALID;
972 vw <<= 8; vw |= (a3ok ? vb3 : VGM_BYTE_INVALID);
973 vw <<= 8; vw |= (a2ok ? vb2 : VGM_BYTE_INVALID);
974 vw <<= 8; vw |= (a1ok ? vb1 : VGM_BYTE_INVALID);
975 vw <<= 8; vw |= (a0ok ? vb0 : VGM_BYTE_INVALID);
976 return vw;
977 }
978}
979
njn5c004e42002-11-18 11:04:50 +0000980static void mc_wr_V4_SLOWLY ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000981{
982 /* Check the address for validity. */
983 Bool aerr = False;
984 PROF_EVENT(71);
985
986 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
987 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
988 if (get_abit(a+2) != VGM_BIT_VALID) aerr = True;
989 if (get_abit(a+3) != VGM_BIT_VALID) aerr = True;
990
991 /* Store the V bytes, remembering to do it little-endian-ly. */
992 set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
993 set_vbyte( a+1, vbytes & 0x000000FF ); vbytes >>= 8;
994 set_vbyte( a+2, vbytes & 0x000000FF ); vbytes >>= 8;
995 set_vbyte( a+3, vbytes & 0x000000FF );
996
997 /* If an address error has happened, report it. */
998 if (aerr)
njn72718642003-07-24 08:45:32 +0000999 MAC_(record_address_error)( VG_(get_current_tid)(), a, 4, True );
njn25e49d8e72002-09-23 09:36:25 +00001000}
1001
njn5c004e42002-11-18 11:04:50 +00001002static UInt mc_rd_V2_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +00001003{
1004 /* Check the address for validity. */
1005 UInt vw = VGM_WORD_INVALID;
1006 Bool aerr = False;
1007 PROF_EVENT(72);
1008
1009 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1010 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1011
1012 /* Fetch the V bytes, remembering to do it little-endian-ly. */
1013 vw <<= 8; vw |= (UInt)get_vbyte(a+1);
1014 vw <<= 8; vw |= (UInt)get_vbyte(a+0);
1015
1016 /* If an address error has happened, report it. */
1017 if (aerr) {
njn72718642003-07-24 08:45:32 +00001018 MAC_(record_address_error)( VG_(get_current_tid)(), a, 2, False );
njn25e49d8e72002-09-23 09:36:25 +00001019 vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
1020 | (VGM_BYTE_VALID << 8) | (VGM_BYTE_VALID);
1021 }
1022 return vw;
1023}
1024
njn5c004e42002-11-18 11:04:50 +00001025static void mc_wr_V2_SLOWLY ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001026{
1027 /* Check the address for validity. */
1028 Bool aerr = False;
1029 PROF_EVENT(73);
1030
1031 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1032 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1033
1034 /* Store the V bytes, remembering to do it little-endian-ly. */
1035 set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
1036 set_vbyte( a+1, vbytes & 0x000000FF );
1037
1038 /* If an address error has happened, report it. */
1039 if (aerr)
njn72718642003-07-24 08:45:32 +00001040 MAC_(record_address_error)( VG_(get_current_tid)(), a, 2, True );
njn25e49d8e72002-09-23 09:36:25 +00001041}
1042
njn5c004e42002-11-18 11:04:50 +00001043static UInt mc_rd_V1_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +00001044{
1045 /* Check the address for validity. */
1046 UInt vw = VGM_WORD_INVALID;
1047 Bool aerr = False;
1048 PROF_EVENT(74);
1049
1050 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1051
1052 /* Fetch the V byte. */
1053 vw <<= 8; vw |= (UInt)get_vbyte(a+0);
1054
1055 /* If an address error has happened, report it. */
1056 if (aerr) {
njn72718642003-07-24 08:45:32 +00001057 MAC_(record_address_error)( VG_(get_current_tid)(), a, 1, False );
njn25e49d8e72002-09-23 09:36:25 +00001058 vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
1059 | (VGM_BYTE_INVALID << 8) | (VGM_BYTE_VALID);
1060 }
1061 return vw;
1062}
1063
njn5c004e42002-11-18 11:04:50 +00001064static void mc_wr_V1_SLOWLY ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001065{
1066 /* Check the address for validity. */
1067 Bool aerr = False;
1068 PROF_EVENT(75);
1069 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1070
1071 /* Store the V bytes, remembering to do it little-endian-ly. */
1072 set_vbyte( a+0, vbytes & 0x000000FF );
1073
1074 /* If an address error has happened, report it. */
1075 if (aerr)
njn72718642003-07-24 08:45:32 +00001076 MAC_(record_address_error)( VG_(get_current_tid)(), a, 1, True );
njn25e49d8e72002-09-23 09:36:25 +00001077}
1078
1079
1080/* ---------------------------------------------------------------------
1081 Called from generated code, or from the assembly helpers.
1082 Handlers for value check failures.
1083 ------------------------------------------------------------------ */
1084
njn5c004e42002-11-18 11:04:50 +00001085void MC_(helperc_value_check0_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001086{
njn72718642003-07-24 08:45:32 +00001087 MC_(record_value_error) ( VG_(get_current_tid)(), 0 );
njn25e49d8e72002-09-23 09:36:25 +00001088}
1089
njn5c004e42002-11-18 11:04:50 +00001090void MC_(helperc_value_check1_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001091{
njn72718642003-07-24 08:45:32 +00001092 MC_(record_value_error) ( VG_(get_current_tid)(), 1 );
njn25e49d8e72002-09-23 09:36:25 +00001093}
1094
njn5c004e42002-11-18 11:04:50 +00001095void MC_(helperc_value_check2_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001096{
njn72718642003-07-24 08:45:32 +00001097 MC_(record_value_error) ( VG_(get_current_tid)(), 2 );
njn25e49d8e72002-09-23 09:36:25 +00001098}
1099
njn5c004e42002-11-18 11:04:50 +00001100void MC_(helperc_value_check4_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001101{
njn72718642003-07-24 08:45:32 +00001102 MC_(record_value_error) ( VG_(get_current_tid)(), 4 );
njn25e49d8e72002-09-23 09:36:25 +00001103}
1104
1105
1106/* ---------------------------------------------------------------------
1107 FPU load and store checks, called from generated code.
1108 ------------------------------------------------------------------ */
1109
nethercoteeec46302004-08-23 15:06:23 +00001110REGPARM(2)
njn5c004e42002-11-18 11:04:50 +00001111void MC_(fpu_read_check) ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001112{
1113 /* Ensure the read area is both addressible and valid (ie,
1114 readable). If there's an address error, don't report a value
1115 error too; but if there isn't an address error, check for a
1116 value error.
1117
1118 Try to be reasonably fast on the common case; wimp out and defer
njn5c004e42002-11-18 11:04:50 +00001119 to mc_fpu_read_check_SLOWLY for everything else. */
njn25e49d8e72002-09-23 09:36:25 +00001120
1121 SecMap* sm;
1122 UInt sm_off, v_off, a_off;
1123 Addr addr4;
1124
1125 PROF_EVENT(80);
1126
1127# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +00001128 mc_fpu_read_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001129# else
1130
1131 if (size == 4) {
1132 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
1133 PROF_EVENT(81);
1134 /* Properly aligned. */
1135 sm = primary_map[addr >> 16];
1136 sm_off = addr & 0xFFFF;
1137 a_off = sm_off >> 3;
1138 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
1139 /* Properly aligned and addressible. */
1140 v_off = addr & 0xFFFF;
1141 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1142 goto slow4;
1143 /* Properly aligned, addressible and with valid data. */
1144 return;
1145 slow4:
njn5c004e42002-11-18 11:04:50 +00001146 mc_fpu_read_check_SLOWLY ( addr, 4 );
njn25e49d8e72002-09-23 09:36:25 +00001147 return;
1148 }
1149
1150 if (size == 8) {
1151 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
1152 PROF_EVENT(82);
1153 /* Properly aligned. Do it in two halves. */
1154 addr4 = addr + 4;
1155 /* First half. */
1156 sm = primary_map[addr >> 16];
1157 sm_off = addr & 0xFFFF;
1158 a_off = sm_off >> 3;
1159 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1160 /* First half properly aligned and addressible. */
1161 v_off = addr & 0xFFFF;
1162 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1163 goto slow8;
1164 /* Second half. */
1165 sm = primary_map[addr4 >> 16];
1166 sm_off = addr4 & 0xFFFF;
1167 a_off = sm_off >> 3;
1168 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1169 /* Second half properly aligned and addressible. */
1170 v_off = addr4 & 0xFFFF;
1171 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1172 goto slow8;
1173 /* Both halves properly aligned, addressible and with valid
1174 data. */
1175 return;
1176 slow8:
njn5c004e42002-11-18 11:04:50 +00001177 mc_fpu_read_check_SLOWLY ( addr, 8 );
njn25e49d8e72002-09-23 09:36:25 +00001178 return;
1179 }
1180
1181 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
1182 cases go quickly. */
1183 if (size == 2) {
1184 PROF_EVENT(83);
njn5c004e42002-11-18 11:04:50 +00001185 mc_fpu_read_check_SLOWLY ( addr, 2 );
njn25e49d8e72002-09-23 09:36:25 +00001186 return;
1187 }
1188
sewardj93992e22003-05-26 09:17:41 +00001189 if (size == 16 /*SSE*/
jsewardfca60182004-01-04 23:30:55 +00001190 || size == 10 || size == 28 || size == 108 || size == 512) {
njn25e49d8e72002-09-23 09:36:25 +00001191 PROF_EVENT(84);
njn5c004e42002-11-18 11:04:50 +00001192 mc_fpu_read_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001193 return;
1194 }
1195
1196 VG_(printf)("size is %d\n", size);
njn5c004e42002-11-18 11:04:50 +00001197 VG_(skin_panic)("MC_(fpu_read_check): unhandled size");
njn25e49d8e72002-09-23 09:36:25 +00001198# endif
1199}
1200
1201
nethercoteeec46302004-08-23 15:06:23 +00001202REGPARM(2)
njn5c004e42002-11-18 11:04:50 +00001203void MC_(fpu_write_check) ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001204{
1205 /* Ensure the written area is addressible, and moan if otherwise.
1206 If it is addressible, make it valid, otherwise invalid.
1207 */
1208
1209 SecMap* sm;
1210 UInt sm_off, v_off, a_off;
1211 Addr addr4;
1212
1213 PROF_EVENT(85);
1214
1215# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +00001216 mc_fpu_write_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001217# else
1218
1219 if (size == 4) {
1220 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
1221 PROF_EVENT(86);
1222 /* Properly aligned. */
1223 sm = primary_map[addr >> 16];
1224 sm_off = addr & 0xFFFF;
1225 a_off = sm_off >> 3;
1226 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
1227 /* Properly aligned and addressible. Make valid. */
1228 v_off = addr & 0xFFFF;
1229 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1230 return;
1231 slow4:
njn5c004e42002-11-18 11:04:50 +00001232 mc_fpu_write_check_SLOWLY ( addr, 4 );
njn25e49d8e72002-09-23 09:36:25 +00001233 return;
1234 }
1235
1236 if (size == 8) {
1237 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
1238 PROF_EVENT(87);
1239 /* Properly aligned. Do it in two halves. */
1240 addr4 = addr + 4;
1241 /* First half. */
1242 sm = primary_map[addr >> 16];
1243 sm_off = addr & 0xFFFF;
1244 a_off = sm_off >> 3;
1245 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1246 /* First half properly aligned and addressible. Make valid. */
1247 v_off = addr & 0xFFFF;
1248 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1249 /* Second half. */
1250 sm = primary_map[addr4 >> 16];
1251 sm_off = addr4 & 0xFFFF;
1252 a_off = sm_off >> 3;
1253 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1254 /* Second half properly aligned and addressible. */
1255 v_off = addr4 & 0xFFFF;
1256 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1257 /* Properly aligned, addressible and with valid data. */
1258 return;
1259 slow8:
njn5c004e42002-11-18 11:04:50 +00001260 mc_fpu_write_check_SLOWLY ( addr, 8 );
njn25e49d8e72002-09-23 09:36:25 +00001261 return;
1262 }
1263
1264 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
1265 cases go quickly. */
1266 if (size == 2) {
1267 PROF_EVENT(88);
njn5c004e42002-11-18 11:04:50 +00001268 mc_fpu_write_check_SLOWLY ( addr, 2 );
njn25e49d8e72002-09-23 09:36:25 +00001269 return;
1270 }
1271
sewardj93992e22003-05-26 09:17:41 +00001272 if (size == 16 /*SSE*/
jsewardfca60182004-01-04 23:30:55 +00001273 || size == 10 || size == 28 || size == 108 || size == 512) {
njn25e49d8e72002-09-23 09:36:25 +00001274 PROF_EVENT(89);
njn5c004e42002-11-18 11:04:50 +00001275 mc_fpu_write_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001276 return;
1277 }
1278
1279 VG_(printf)("size is %d\n", size);
njn5c004e42002-11-18 11:04:50 +00001280 VG_(skin_panic)("MC_(fpu_write_check): unhandled size");
njn25e49d8e72002-09-23 09:36:25 +00001281# endif
1282}
1283
1284
1285/* ---------------------------------------------------------------------
1286 Slow, general cases for FPU load and store checks.
1287 ------------------------------------------------------------------ */
1288
1289/* Generic version. Test for both addr and value errors, but if
1290 there's an addr error, don't report a value error even if it
1291 exists. */
1292
njn5c004e42002-11-18 11:04:50 +00001293void mc_fpu_read_check_SLOWLY ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001294{
1295 Int i;
1296 Bool aerr = False;
1297 Bool verr = False;
1298 PROF_EVENT(90);
1299 for (i = 0; i < size; i++) {
1300 PROF_EVENT(91);
1301 if (get_abit(addr+i) != VGM_BIT_VALID)
1302 aerr = True;
1303 if (get_vbyte(addr+i) != VGM_BYTE_VALID)
1304 verr = True;
1305 }
1306
1307 if (aerr) {
njn72718642003-07-24 08:45:32 +00001308 MAC_(record_address_error)( VG_(get_current_tid)(), addr, size, False );
njn25e49d8e72002-09-23 09:36:25 +00001309 } else {
1310 if (verr)
njn72718642003-07-24 08:45:32 +00001311 MC_(record_value_error)( VG_(get_current_tid)(), size );
njn25e49d8e72002-09-23 09:36:25 +00001312 }
1313}
1314
1315
1316/* Generic version. Test for addr errors. Valid addresses are
1317 given valid values, and invalid addresses invalid values. */
1318
njn5c004e42002-11-18 11:04:50 +00001319void mc_fpu_write_check_SLOWLY ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001320{
1321 Int i;
1322 Addr a_here;
1323 Bool a_ok;
1324 Bool aerr = False;
1325 PROF_EVENT(92);
1326 for (i = 0; i < size; i++) {
1327 PROF_EVENT(93);
1328 a_here = addr+i;
1329 a_ok = get_abit(a_here) == VGM_BIT_VALID;
1330 if (a_ok) {
1331 set_vbyte(a_here, VGM_BYTE_VALID);
1332 } else {
1333 set_vbyte(a_here, VGM_BYTE_INVALID);
1334 aerr = True;
1335 }
1336 }
1337 if (aerr) {
njn72718642003-07-24 08:45:32 +00001338 MAC_(record_address_error)( VG_(get_current_tid)(), addr, size, True );
njn25e49d8e72002-09-23 09:36:25 +00001339 }
1340}
1341
njn25e49d8e72002-09-23 09:36:25 +00001342
1343/*------------------------------------------------------------*/
sewardjee070842003-07-05 17:53:55 +00001344/*--- Metadata get/set functions, for client requests. ---*/
1345/*------------------------------------------------------------*/
1346
1347/* Copy Vbits for src into vbits. Returns: 1 == OK, 2 == alignment
1348 error, 3 == addressing error. */
1349Int MC_(get_or_set_vbits_for_client) (
njn72718642003-07-24 08:45:32 +00001350 ThreadId tid,
sewardjee070842003-07-05 17:53:55 +00001351 Addr dataV,
1352 Addr vbitsV,
1353 UInt size,
1354 Bool setting /* True <=> set vbits, False <=> get vbits */
1355)
1356{
1357 Bool addressibleD = True;
1358 Bool addressibleV = True;
1359 UInt* data = (UInt*)dataV;
1360 UInt* vbits = (UInt*)vbitsV;
1361 UInt szW = size / 4; /* sigh */
1362 UInt i;
sewardjaf48a602003-07-06 00:54:47 +00001363 UInt* dataP = NULL; /* bogus init to keep gcc happy */
1364 UInt* vbitsP = NULL; /* ditto */
sewardjee070842003-07-05 17:53:55 +00001365
1366 /* Check alignment of args. */
1367 if (!(IS_ALIGNED4_ADDR(data) && IS_ALIGNED4_ADDR(vbits)))
1368 return 2;
1369 if ((size & 3) != 0)
1370 return 2;
1371
1372 /* Check that arrays are addressible. */
1373 for (i = 0; i < szW; i++) {
sewardjaf48a602003-07-06 00:54:47 +00001374 dataP = &data[i];
1375 vbitsP = &vbits[i];
sewardjee070842003-07-05 17:53:55 +00001376 if (get_abits4_ALIGNED((Addr)dataP) != VGM_NIBBLE_VALID) {
1377 addressibleD = False;
1378 break;
1379 }
1380 if (get_abits4_ALIGNED((Addr)vbitsP) != VGM_NIBBLE_VALID) {
1381 addressibleV = False;
1382 break;
1383 }
1384 }
1385 if (!addressibleD) {
njn72718642003-07-24 08:45:32 +00001386 MAC_(record_address_error)( tid, (Addr)dataP, 4,
sewardjee070842003-07-05 17:53:55 +00001387 setting ? True : False );
1388 return 3;
1389 }
1390 if (!addressibleV) {
njn72718642003-07-24 08:45:32 +00001391 MAC_(record_address_error)( tid, (Addr)vbitsP, 4,
sewardjee070842003-07-05 17:53:55 +00001392 setting ? False : True );
1393 return 3;
1394 }
1395
1396 /* Do the copy */
1397 if (setting) {
1398 /* setting */
1399 for (i = 0; i < szW; i++) {
1400 if (get_vbytes4_ALIGNED( (Addr)&vbits[i] ) != VGM_WORD_VALID)
njn72718642003-07-24 08:45:32 +00001401 MC_(record_value_error)(tid, 4);
sewardjee070842003-07-05 17:53:55 +00001402 set_vbytes4_ALIGNED( (Addr)&data[i], vbits[i] );
1403 }
1404 } else {
1405 /* getting */
1406 for (i = 0; i < szW; i++) {
1407 vbits[i] = get_vbytes4_ALIGNED( (Addr)&data[i] );
1408 set_vbytes4_ALIGNED( (Addr)&vbits[i], VGM_WORD_VALID );
1409 }
1410 }
1411
1412 return 1;
1413}
1414
1415
1416/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001417/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1418/*------------------------------------------------------------*/
1419
sewardja4495682002-10-21 07:29:59 +00001420/* For the memory leak detector, say whether an entire 64k chunk of
1421 address space is possibly in use, or not. If in doubt return
1422 True.
njn25e49d8e72002-09-23 09:36:25 +00001423*/
sewardja4495682002-10-21 07:29:59 +00001424static
1425Bool mc_is_valid_64k_chunk ( UInt chunk_number )
njn25e49d8e72002-09-23 09:36:25 +00001426{
sewardja4495682002-10-21 07:29:59 +00001427 sk_assert(chunk_number >= 0 && chunk_number < 65536);
1428 if (IS_DISTINGUISHED_SM(primary_map[chunk_number])) {
1429 /* Definitely not in use. */
1430 return False;
1431 } else {
1432 return True;
njn25e49d8e72002-09-23 09:36:25 +00001433 }
1434}
1435
1436
sewardja4495682002-10-21 07:29:59 +00001437/* For the memory leak detector, say whether or not a given word
1438 address is to be regarded as valid. */
1439static
1440Bool mc_is_valid_address ( Addr a )
1441{
1442 UInt vbytes;
1443 UChar abits;
1444 sk_assert(IS_ALIGNED4_ADDR(a));
1445 abits = get_abits4_ALIGNED(a);
1446 vbytes = get_vbytes4_ALIGNED(a);
1447 if (abits == VGM_NIBBLE_VALID && vbytes == VGM_WORD_VALID) {
1448 return True;
1449 } else {
1450 return False;
1451 }
1452}
1453
1454
nethercote996901a2004-08-03 13:29:09 +00001455/* Leak detector for this tool. We don't actually do anything, merely
sewardja4495682002-10-21 07:29:59 +00001456 run the generic leak detector with suitable parameters for this
nethercote996901a2004-08-03 13:29:09 +00001457 tool. */
njn5c004e42002-11-18 11:04:50 +00001458void MC_(detect_memory_leaks) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001459{
njn43c799e2003-04-08 00:08:52 +00001460 MAC_(do_detect_memory_leaks) ( mc_is_valid_64k_chunk, mc_is_valid_address );
njn25e49d8e72002-09-23 09:36:25 +00001461}
1462
1463
1464/* ---------------------------------------------------------------------
1465 Sanity check machinery (permanently engaged).
1466 ------------------------------------------------------------------ */
1467
njn25e49d8e72002-09-23 09:36:25 +00001468Bool SK_(cheap_sanity_check) ( void )
1469{
jseward9800fd32004-01-04 23:08:04 +00001470 /* nothing useful we can rapidly check */
1471 return True;
njn25e49d8e72002-09-23 09:36:25 +00001472}
1473
1474Bool SK_(expensive_sanity_check) ( void )
1475{
1476 Int i;
1477
1478 /* Make sure nobody changed the distinguished secondary. */
1479 for (i = 0; i < 8192; i++)
1480 if (distinguished_secondary_map.abits[i] != VGM_BYTE_INVALID)
1481 return False;
1482
1483 for (i = 0; i < 65536; i++)
1484 if (distinguished_secondary_map.vbyte[i] != VGM_BYTE_INVALID)
1485 return False;
1486
1487 /* Make sure that the upper 3/4 of the primary map hasn't
1488 been messed with. */
1489 for (i = 65536; i < 262144; i++)
1490 if (primary_map[i] != & distinguished_secondary_map)
1491 return False;
1492
1493 return True;
1494}
1495
1496/* ---------------------------------------------------------------------
1497 Debugging machinery (turn on to debug). Something of a mess.
1498 ------------------------------------------------------------------ */
1499
1500#if 0
1501/* Print the value tags on the 8 integer registers & flag reg. */
1502
1503static void uint_to_bits ( UInt x, Char* str )
1504{
1505 Int i;
1506 Int w = 0;
1507 /* str must point to a space of at least 36 bytes. */
1508 for (i = 31; i >= 0; i--) {
1509 str[w++] = (x & ( ((UInt)1) << i)) ? '1' : '0';
1510 if (i == 24 || i == 16 || i == 8)
1511 str[w++] = ' ';
1512 }
1513 str[w++] = 0;
njne427a662002-10-02 11:08:25 +00001514 sk_assert(w == 36);
njn25e49d8e72002-09-23 09:36:25 +00001515}
1516
1517/* Caution! Not vthread-safe; looks in VG_(baseBlock), not the thread
1518 state table. */
1519
1520static void vg_show_reg_tags ( void )
1521{
1522 Char buf1[36];
1523 Char buf2[36];
1524 UInt z_eax, z_ebx, z_ecx, z_edx,
1525 z_esi, z_edi, z_ebp, z_esp, z_eflags;
1526
1527 z_eax = VG_(baseBlock)[VGOFF_(sh_eax)];
1528 z_ebx = VG_(baseBlock)[VGOFF_(sh_ebx)];
1529 z_ecx = VG_(baseBlock)[VGOFF_(sh_ecx)];
1530 z_edx = VG_(baseBlock)[VGOFF_(sh_edx)];
1531 z_esi = VG_(baseBlock)[VGOFF_(sh_esi)];
1532 z_edi = VG_(baseBlock)[VGOFF_(sh_edi)];
1533 z_ebp = VG_(baseBlock)[VGOFF_(sh_ebp)];
1534 z_esp = VG_(baseBlock)[VGOFF_(sh_esp)];
1535 z_eflags = VG_(baseBlock)[VGOFF_(sh_eflags)];
1536
1537 uint_to_bits(z_eflags, buf1);
njn9b6d34e2002-10-15 08:48:08 +00001538 VG_(message)(Vg_DebugMsg, "efl %s\n", buf1);
njn25e49d8e72002-09-23 09:36:25 +00001539
1540 uint_to_bits(z_eax, buf1);
1541 uint_to_bits(z_ebx, buf2);
1542 VG_(message)(Vg_DebugMsg, "eax %s ebx %s\n", buf1, buf2);
1543
1544 uint_to_bits(z_ecx, buf1);
1545 uint_to_bits(z_edx, buf2);
1546 VG_(message)(Vg_DebugMsg, "ecx %s edx %s\n", buf1, buf2);
1547
1548 uint_to_bits(z_esi, buf1);
1549 uint_to_bits(z_edi, buf2);
1550 VG_(message)(Vg_DebugMsg, "esi %s edi %s\n", buf1, buf2);
1551
1552 uint_to_bits(z_ebp, buf1);
1553 uint_to_bits(z_esp, buf2);
1554 VG_(message)(Vg_DebugMsg, "ebp %s esp %s\n", buf1, buf2);
1555}
1556
1557
1558/* For debugging only. Scan the address space and touch all allegedly
1559 addressible words. Useful for establishing where Valgrind's idea of
1560 addressibility has diverged from what the kernel believes. */
1561
1562static
1563void zzzmemscan_notify_word ( Addr a, UInt w )
1564{
1565}
1566
1567void zzzmemscan ( void )
1568{
1569 Int n_notifies
1570 = VG_(scan_all_valid_memory)( zzzmemscan_notify_word );
1571 VG_(printf)("zzzmemscan: n_bytes = %d\n", 4 * n_notifies );
1572}
1573#endif
1574
1575
1576
1577
1578#if 0
1579static Int zzz = 0;
1580
1581void show_bb ( Addr eip_next )
1582{
1583 VG_(printf)("[%4d] ", zzz);
1584 vg_show_reg_tags( &VG_(m_shadow );
1585 VG_(translate) ( eip_next, NULL, NULL, NULL );
1586}
1587#endif /* 0 */
1588
njn25e49d8e72002-09-23 09:36:25 +00001589
1590/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00001591/*--- Command line args ---*/
njn25e49d8e72002-09-23 09:36:25 +00001592/*------------------------------------------------------------*/
1593
njn43c799e2003-04-08 00:08:52 +00001594Bool MC_(clo_avoid_strlen_errors) = True;
1595Bool MC_(clo_cleanup) = True;
1596
njn25e49d8e72002-09-23 09:36:25 +00001597Bool SK_(process_cmd_line_option)(Char* arg)
1598{
nethercote27fec902004-06-16 21:26:32 +00001599 VG_BOOL_CLO("--avoid-strlen-errors", MC_(clo_avoid_strlen_errors))
1600 else VG_BOOL_CLO("--cleanup", MC_(clo_cleanup))
njn25e49d8e72002-09-23 09:36:25 +00001601 else
njn43c799e2003-04-08 00:08:52 +00001602 return MAC_(process_common_cmd_line_option)(arg);
njn25e49d8e72002-09-23 09:36:25 +00001603
1604 return True;
njn25e49d8e72002-09-23 09:36:25 +00001605}
1606
njn3e884182003-04-15 13:03:23 +00001607void SK_(print_usage)(void)
njn25e49d8e72002-09-23 09:36:25 +00001608{
njn3e884182003-04-15 13:03:23 +00001609 MAC_(print_common_usage)();
1610 VG_(printf)(
1611" --avoid-strlen-errors=no|yes suppress errs from inlined strlen [yes]\n"
1612 );
1613}
1614
1615void SK_(print_debug_usage)(void)
1616{
1617 MAC_(print_common_debug_usage)();
1618 VG_(printf)(
sewardj8ec2cfc2002-10-13 00:57:26 +00001619" --cleanup=no|yes improve after instrumentation? [yes]\n"
njn3e884182003-04-15 13:03:23 +00001620 );
njn25e49d8e72002-09-23 09:36:25 +00001621}
1622
1623
1624/*------------------------------------------------------------*/
1625/*--- Setup ---*/
1626/*------------------------------------------------------------*/
1627
njn810086f2002-11-14 12:42:47 +00001628void SK_(pre_clo_init)(void)
njn25e49d8e72002-09-23 09:36:25 +00001629{
njn810086f2002-11-14 12:42:47 +00001630 VG_(details_name) ("Memcheck");
1631 VG_(details_version) (NULL);
nethercote262eedf2003-11-13 17:57:18 +00001632 VG_(details_description) ("a memory error detector");
njn810086f2002-11-14 12:42:47 +00001633 VG_(details_copyright_author)(
nethercote08fa9a72004-07-16 17:44:00 +00001634 "Copyright (C) 2002-2004, and GNU GPL'd, by Julian Seward et al.");
nethercote421281e2003-11-20 16:20:55 +00001635 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj78210aa2002-12-01 02:55:46 +00001636 VG_(details_avg_translation_sizeB) ( 228 );
njn25e49d8e72002-09-23 09:36:25 +00001637
njn810086f2002-11-14 12:42:47 +00001638 VG_(needs_core_errors) ();
1639 VG_(needs_skin_errors) ();
1640 VG_(needs_libc_freeres) ();
njn810086f2002-11-14 12:42:47 +00001641 VG_(needs_shadow_regs) ();
1642 VG_(needs_command_line_options)();
1643 VG_(needs_client_requests) ();
1644 VG_(needs_extended_UCode) ();
1645 VG_(needs_syscall_wrapper) ();
njn810086f2002-11-14 12:42:47 +00001646 VG_(needs_sanity_checks) ();
fitzhardinge98abfc72003-12-16 02:05:15 +00001647 VG_(needs_shadow_memory) ();
njn25e49d8e72002-09-23 09:36:25 +00001648
njn3e884182003-04-15 13:03:23 +00001649 MAC_( new_mem_heap) = & mc_new_mem_heap;
1650 MAC_( ban_mem_heap) = & MC_(make_noaccess);
1651 MAC_(copy_mem_heap) = & mc_copy_address_range_state;
1652 MAC_( die_mem_heap) = & MC_(make_noaccess);
sewardjecf8e102003-07-12 12:11:39 +00001653 MAC_(check_noaccess) = & MC_(check_noaccess);
njn3e884182003-04-15 13:03:23 +00001654
fitzhardinge98abfc72003-12-16 02:05:15 +00001655 VG_(init_new_mem_startup) ( & mc_new_mem_startup );
1656 VG_(init_new_mem_stack_signal) ( & MC_(make_writable) );
1657 VG_(init_new_mem_brk) ( & MC_(make_writable) );
1658 VG_(init_new_mem_mmap) ( & mc_set_perms );
njn25e49d8e72002-09-23 09:36:25 +00001659
fitzhardinge98abfc72003-12-16 02:05:15 +00001660 VG_(init_copy_mem_remap) ( & mc_copy_address_range_state );
1661 VG_(init_change_mem_mprotect) ( & mc_set_perms );
njn3e884182003-04-15 13:03:23 +00001662
fitzhardinge98abfc72003-12-16 02:05:15 +00001663 VG_(init_die_mem_stack_signal) ( & MC_(make_noaccess) );
1664 VG_(init_die_mem_brk) ( & MC_(make_noaccess) );
1665 VG_(init_die_mem_munmap) ( & MC_(make_noaccess) );
njn3e884182003-04-15 13:03:23 +00001666
fitzhardinge98abfc72003-12-16 02:05:15 +00001667 VG_(init_new_mem_stack_4) ( & MAC_(new_mem_stack_4) );
1668 VG_(init_new_mem_stack_8) ( & MAC_(new_mem_stack_8) );
1669 VG_(init_new_mem_stack_12) ( & MAC_(new_mem_stack_12) );
1670 VG_(init_new_mem_stack_16) ( & MAC_(new_mem_stack_16) );
1671 VG_(init_new_mem_stack_32) ( & MAC_(new_mem_stack_32) );
1672 VG_(init_new_mem_stack) ( & MAC_(new_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00001673
fitzhardinge98abfc72003-12-16 02:05:15 +00001674 VG_(init_die_mem_stack_4) ( & MAC_(die_mem_stack_4) );
1675 VG_(init_die_mem_stack_8) ( & MAC_(die_mem_stack_8) );
1676 VG_(init_die_mem_stack_12) ( & MAC_(die_mem_stack_12) );
1677 VG_(init_die_mem_stack_16) ( & MAC_(die_mem_stack_16) );
1678 VG_(init_die_mem_stack_32) ( & MAC_(die_mem_stack_32) );
1679 VG_(init_die_mem_stack) ( & MAC_(die_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00001680
fitzhardinge98abfc72003-12-16 02:05:15 +00001681 VG_(init_ban_mem_stack) ( & MC_(make_noaccess) );
njn25e49d8e72002-09-23 09:36:25 +00001682
fitzhardinge98abfc72003-12-16 02:05:15 +00001683 VG_(init_pre_mem_read) ( & mc_check_is_readable );
1684 VG_(init_pre_mem_read_asciiz) ( & mc_check_is_readable_asciiz );
1685 VG_(init_pre_mem_write) ( & mc_check_is_writable );
1686 VG_(init_post_mem_write) ( & MC_(make_readable) );
njn25e49d8e72002-09-23 09:36:25 +00001687
fitzhardinge98abfc72003-12-16 02:05:15 +00001688 VG_(init_post_regs_write_init) ( & mc_post_regs_write_init );
1689 VG_(init_post_reg_write_syscall_return) ( & mc_post_reg_write );
1690 VG_(init_post_reg_write_deliver_signal) ( & mc_post_reg_write );
1691 VG_(init_post_reg_write_pthread_return) ( & mc_post_reg_write );
1692 VG_(init_post_reg_write_clientreq_return) ( & mc_post_reg_write );
1693 VG_(init_post_reg_write_clientcall_return) ( & mc_post_reg_write_clientcall );
njnd3040452003-05-19 15:04:06 +00001694
njn9b007f62003-04-07 14:40:25 +00001695 /* Three compact slots taken up by stack memory helpers */
njn5c004e42002-11-18 11:04:50 +00001696 VG_(register_compact_helper)((Addr) & MC_(helper_value_check4_fail));
1697 VG_(register_compact_helper)((Addr) & MC_(helper_value_check0_fail));
1698 VG_(register_compact_helper)((Addr) & MC_(helper_value_check2_fail));
1699 VG_(register_compact_helper)((Addr) & MC_(helperc_STOREV4));
njn5c004e42002-11-18 11:04:50 +00001700 VG_(register_compact_helper)((Addr) & MC_(helperc_LOADV4));
njn25e49d8e72002-09-23 09:36:25 +00001701
njnd04b7c62002-10-03 14:05:52 +00001702 /* These two made non-compact because 2-byte transactions are rare. */
njn5c004e42002-11-18 11:04:50 +00001703 VG_(register_noncompact_helper)((Addr) & MC_(helperc_STOREV2));
njn9b007f62003-04-07 14:40:25 +00001704 VG_(register_noncompact_helper)((Addr) & MC_(helperc_STOREV1));
njn5c004e42002-11-18 11:04:50 +00001705 VG_(register_noncompact_helper)((Addr) & MC_(helperc_LOADV2));
njn9b007f62003-04-07 14:40:25 +00001706 VG_(register_noncompact_helper)((Addr) & MC_(helperc_LOADV1));
njn5c004e42002-11-18 11:04:50 +00001707 VG_(register_noncompact_helper)((Addr) & MC_(fpu_write_check));
1708 VG_(register_noncompact_helper)((Addr) & MC_(fpu_read_check));
1709 VG_(register_noncompact_helper)((Addr) & MC_(helper_value_check1_fail));
njn25e49d8e72002-09-23 09:36:25 +00001710
1711 VGP_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
1712 VGP_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
njn9b007f62003-04-07 14:40:25 +00001713 VGP_(register_profile_event) ( VgpESPAdj, "adjust-ESP" );
njnd04b7c62002-10-03 14:05:52 +00001714
njn43c799e2003-04-08 00:08:52 +00001715 /* Additional block description for VG_(describe_addr)() */
1716 MAC_(describe_addr_supp) = MC_(client_perm_maybe_describe);
1717
njnd04b7c62002-10-03 14:05:52 +00001718 init_shadow_memory();
njn3e884182003-04-15 13:03:23 +00001719 MAC_(common_pre_clo_init)();
njn5c004e42002-11-18 11:04:50 +00001720}
1721
1722void SK_(post_clo_init) ( void )
1723{
1724}
1725
njn7d9f94d2003-04-22 21:41:40 +00001726void SK_(fini) ( Int exitcode )
njn5c004e42002-11-18 11:04:50 +00001727{
njn3e884182003-04-15 13:03:23 +00001728 MAC_(common_fini)( MC_(detect_memory_leaks) );
1729
njn5c004e42002-11-18 11:04:50 +00001730 if (0) {
1731 VG_(message)(Vg_DebugMsg,
1732 "------ Valgrind's client block stats follow ---------------" );
1733 MC_(show_client_block_stats)();
1734 }
njn25e49d8e72002-09-23 09:36:25 +00001735}
1736
fitzhardinge98abfc72003-12-16 02:05:15 +00001737VG_DETERMINE_INTERFACE_VERSION(SK_(pre_clo_init), 9./8)
1738
njn25e49d8e72002-09-23 09:36:25 +00001739/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00001740/*--- end mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00001741/*--------------------------------------------------------------------*/