blob: 8b46797ac5c41c0c9c29c96ed8e219cc272bac0d [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
nethercote137bc552003-11-14 17:47:54 +00009 This file is part of MemCheck, a heavyweight Valgrind tool for
njnc9539842002-10-02 13:26:35 +000010 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
njn0e1b5142003-04-15 14:58:06 +000012 Copyright (C) 2000-2003 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
njn25cac76cb2002-09-23 11:21:57 +000033#include "mc_include.h"
34#include "memcheck.h" /* for client requests */
njn25e49d8e72002-09-23 09:36:25 +000035//#include "vg_profile.c"
36
njn27f1a382002-11-08 15:48:16 +000037VG_DETERMINE_INTERFACE_VERSION
38
njn25e49d8e72002-09-23 09:36:25 +000039/* Define to debug the mem audit system. */
40/* #define VG_DEBUG_MEMORY */
41
njn25e49d8e72002-09-23 09:36:25 +000042#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
43
44/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000045/*--- Low-level support for memory checking. ---*/
46/*------------------------------------------------------------*/
47
48/* All reads and writes are checked against a memory map, which
49 records the state of all memory in the process. The memory map is
50 organised like this:
51
52 The top 16 bits of an address are used to index into a top-level
53 map table, containing 65536 entries. Each entry is a pointer to a
54 second-level map, which records the accesibililty and validity
55 permissions for the 65536 bytes indexed by the lower 16 bits of the
56 address. Each byte is represented by nine bits, one indicating
57 accessibility, the other eight validity. So each second-level map
58 contains 73728 bytes. This two-level arrangement conveniently
59 divides the 4G address space into 64k lumps, each size 64k bytes.
60
61 All entries in the primary (top-level) map must point to a valid
62 secondary (second-level) map. Since most of the 4G of address
63 space will not be in use -- ie, not mapped at all -- there is a
64 distinguished secondary map, which indicates `not addressible and
65 not valid' writeable for all bytes. Entries in the primary map for
66 which the entire 64k is not in use at all point at this
67 distinguished map.
68
69 [...] lots of stuff deleted due to out of date-ness
70
71 As a final optimisation, the alignment and address checks for
72 4-byte loads and stores are combined in a neat way. The primary
73 map is extended to have 262144 entries (2^18), rather than 2^16.
74 The top 3/4 of these entries are permanently set to the
75 distinguished secondary map. For a 4-byte load/store, the
76 top-level map is indexed not with (addr >> 16) but instead f(addr),
77 where
78
79 f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
80 = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
81 = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
82
83 ie the lowest two bits are placed above the 16 high address bits.
84 If either of these two bits are nonzero, the address is misaligned;
85 this will select a secondary map from the upper 3/4 of the primary
86 map. Because this is always the distinguished secondary map, a
87 (bogus) address check failure will result. The failure handling
88 code can then figure out whether this is a genuine addr check
89 failure or whether it is a possibly-legitimate access at a
90 misaligned address.
91*/
92
93
94/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000095/*--- Function declarations. ---*/
96/*------------------------------------------------------------*/
97
njn5c004e42002-11-18 11:04:50 +000098static UInt mc_rd_V4_SLOWLY ( Addr a );
99static UInt mc_rd_V2_SLOWLY ( Addr a );
100static UInt mc_rd_V1_SLOWLY ( Addr a );
101static void mc_wr_V4_SLOWLY ( Addr a, UInt vbytes );
102static void mc_wr_V2_SLOWLY ( Addr a, UInt vbytes );
103static void mc_wr_V1_SLOWLY ( Addr a, UInt vbytes );
104static void mc_fpu_read_check_SLOWLY ( Addr addr, Int size );
105static void mc_fpu_write_check_SLOWLY ( Addr addr, Int size );
njn25e49d8e72002-09-23 09:36:25 +0000106
107/*------------------------------------------------------------*/
108/*--- Data defns. ---*/
109/*------------------------------------------------------------*/
110
111typedef
112 struct {
113 UChar abits[8192];
114 UChar vbyte[65536];
115 }
116 SecMap;
117
118static SecMap* primary_map[ /*65536*/ 262144 ];
119static SecMap distinguished_secondary_map;
120
njn25e49d8e72002-09-23 09:36:25 +0000121
122static void init_shadow_memory ( void )
123{
124 Int i;
125
126 for (i = 0; i < 8192; i++) /* Invalid address */
127 distinguished_secondary_map.abits[i] = VGM_BYTE_INVALID;
128 for (i = 0; i < 65536; i++) /* Invalid Value */
129 distinguished_secondary_map.vbyte[i] = VGM_BYTE_INVALID;
130
131 /* These entries gradually get overwritten as the used address
132 space expands. */
133 for (i = 0; i < 65536; i++)
134 primary_map[i] = &distinguished_secondary_map;
135
136 /* These ones should never change; it's a bug in Valgrind if they do. */
137 for (i = 65536; i < 262144; i++)
138 primary_map[i] = &distinguished_secondary_map;
139}
140
njn25e49d8e72002-09-23 09:36:25 +0000141/*------------------------------------------------------------*/
142/*--- Basic bitmap management, reading and writing. ---*/
143/*------------------------------------------------------------*/
144
145/* Allocate and initialise a secondary map. */
146
147static SecMap* alloc_secondary_map ( __attribute__ ((unused))
148 Char* caller )
149{
150 SecMap* map;
151 UInt i;
152 PROF_EVENT(10);
153
154 /* Mark all bytes as invalid access and invalid value. */
155
156 /* It just happens that a SecMap occupies exactly 18 pages --
157 although this isn't important, so the following assert is
158 spurious. */
njne427a662002-10-02 11:08:25 +0000159 sk_assert(0 == (sizeof(SecMap) % VKI_BYTES_PER_PAGE));
njn25e49d8e72002-09-23 09:36:25 +0000160 map = VG_(get_memory_from_mmap)( sizeof(SecMap), caller );
161
162 for (i = 0; i < 8192; i++)
163 map->abits[i] = VGM_BYTE_INVALID; /* Invalid address */
164 for (i = 0; i < 65536; i++)
165 map->vbyte[i] = VGM_BYTE_INVALID; /* Invalid Value */
166
167 /* VG_(printf)("ALLOC_2MAP(%s)\n", caller ); */
168 return map;
169}
170
171
172/* Basic reading/writing of the bitmaps, for byte-sized accesses. */
173
174static __inline__ UChar get_abit ( Addr a )
175{
176 SecMap* sm = primary_map[a >> 16];
177 UInt sm_off = a & 0xFFFF;
178 PROF_EVENT(20);
179# if 0
180 if (IS_DISTINGUISHED_SM(sm))
181 VG_(message)(Vg_DebugMsg,
182 "accessed distinguished 2ndary (A)map! 0x%x\n", a);
183# endif
184 return BITARR_TEST(sm->abits, sm_off)
185 ? VGM_BIT_INVALID : VGM_BIT_VALID;
186}
187
188static __inline__ UChar get_vbyte ( Addr a )
189{
190 SecMap* sm = primary_map[a >> 16];
191 UInt sm_off = a & 0xFFFF;
192 PROF_EVENT(21);
193# if 0
194 if (IS_DISTINGUISHED_SM(sm))
195 VG_(message)(Vg_DebugMsg,
196 "accessed distinguished 2ndary (V)map! 0x%x\n", a);
197# endif
198 return sm->vbyte[sm_off];
199}
200
sewardj56867352003-10-12 10:27:06 +0000201static /* __inline__ */ void set_abit ( Addr a, UChar abit )
njn25e49d8e72002-09-23 09:36:25 +0000202{
203 SecMap* sm;
204 UInt sm_off;
205 PROF_EVENT(22);
206 ENSURE_MAPPABLE(a, "set_abit");
207 sm = primary_map[a >> 16];
208 sm_off = a & 0xFFFF;
209 if (abit)
210 BITARR_SET(sm->abits, sm_off);
211 else
212 BITARR_CLEAR(sm->abits, sm_off);
213}
214
215static __inline__ void set_vbyte ( Addr a, UChar vbyte )
216{
217 SecMap* sm;
218 UInt sm_off;
219 PROF_EVENT(23);
220 ENSURE_MAPPABLE(a, "set_vbyte");
221 sm = primary_map[a >> 16];
222 sm_off = a & 0xFFFF;
223 sm->vbyte[sm_off] = vbyte;
224}
225
226
227/* Reading/writing of the bitmaps, for aligned word-sized accesses. */
228
229static __inline__ UChar get_abits4_ALIGNED ( Addr a )
230{
231 SecMap* sm;
232 UInt sm_off;
233 UChar abits8;
234 PROF_EVENT(24);
235# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000236 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000237# endif
238 sm = primary_map[a >> 16];
239 sm_off = a & 0xFFFF;
240 abits8 = sm->abits[sm_off >> 3];
241 abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
242 abits8 &= 0x0F;
243 return abits8;
244}
245
246static UInt __inline__ get_vbytes4_ALIGNED ( Addr a )
247{
248 SecMap* sm = primary_map[a >> 16];
249 UInt sm_off = a & 0xFFFF;
250 PROF_EVENT(25);
251# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000252 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000253# endif
254 return ((UInt*)(sm->vbyte))[sm_off >> 2];
255}
256
257
sewardjee070842003-07-05 17:53:55 +0000258static void __inline__ set_vbytes4_ALIGNED ( Addr a, UInt vbytes )
259{
260 SecMap* sm;
261 UInt sm_off;
262 ENSURE_MAPPABLE(a, "set_vbytes4_ALIGNED");
263 sm = primary_map[a >> 16];
264 sm_off = a & 0xFFFF;
265 PROF_EVENT(23);
266# ifdef VG_DEBUG_MEMORY
267 sk_assert(IS_ALIGNED4_ADDR(a));
268# endif
269 ((UInt*)(sm->vbyte))[sm_off >> 2] = vbytes;
270}
271
272
njn25e49d8e72002-09-23 09:36:25 +0000273/*------------------------------------------------------------*/
274/*--- Setting permissions over address ranges. ---*/
275/*------------------------------------------------------------*/
276
277static void set_address_range_perms ( Addr a, UInt len,
278 UInt example_a_bit,
279 UInt example_v_bit )
280{
281 UChar vbyte, abyte8;
282 UInt vword4, sm_off;
283 SecMap* sm;
284
285 PROF_EVENT(30);
286
287 if (len == 0)
288 return;
289
290 if (len > 100 * 1000 * 1000) {
291 VG_(message)(Vg_UserMsg,
292 "Warning: set address range perms: "
293 "large range %u, a %d, v %d",
294 len, example_a_bit, example_v_bit );
295 }
296
297 VGP_PUSHCC(VgpSetMem);
298
299 /* Requests to change permissions of huge address ranges may
300 indicate bugs in our machinery. 30,000,000 is arbitrary, but so
301 far all legitimate requests have fallen beneath that size. */
302 /* 4 Mar 02: this is just stupid; get rid of it. */
njne427a662002-10-02 11:08:25 +0000303 /* sk_assert(len < 30000000); */
njn25e49d8e72002-09-23 09:36:25 +0000304
305 /* Check the permissions make sense. */
njne427a662002-10-02 11:08:25 +0000306 sk_assert(example_a_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000307 || example_a_bit == VGM_BIT_INVALID);
njne427a662002-10-02 11:08:25 +0000308 sk_assert(example_v_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000309 || example_v_bit == VGM_BIT_INVALID);
310 if (example_a_bit == VGM_BIT_INVALID)
njne427a662002-10-02 11:08:25 +0000311 sk_assert(example_v_bit == VGM_BIT_INVALID);
njn25e49d8e72002-09-23 09:36:25 +0000312
313 /* The validity bits to write. */
314 vbyte = example_v_bit==VGM_BIT_VALID
315 ? VGM_BYTE_VALID : VGM_BYTE_INVALID;
316
317 /* In order that we can charge through the address space at 8
318 bytes/main-loop iteration, make up some perms. */
319 abyte8 = (example_a_bit << 7)
320 | (example_a_bit << 6)
321 | (example_a_bit << 5)
322 | (example_a_bit << 4)
323 | (example_a_bit << 3)
324 | (example_a_bit << 2)
325 | (example_a_bit << 1)
326 | (example_a_bit << 0);
327 vword4 = (vbyte << 24) | (vbyte << 16) | (vbyte << 8) | vbyte;
328
329# ifdef VG_DEBUG_MEMORY
330 /* Do it ... */
331 while (True) {
332 PROF_EVENT(31);
333 if (len == 0) break;
334 set_abit ( a, example_a_bit );
335 set_vbyte ( a, vbyte );
336 a++;
337 len--;
338 }
339
340# else
341 /* Slowly do parts preceding 8-byte alignment. */
342 while (True) {
343 PROF_EVENT(31);
344 if (len == 0) break;
345 if ((a % 8) == 0) break;
346 set_abit ( a, example_a_bit );
347 set_vbyte ( a, vbyte );
348 a++;
349 len--;
350 }
351
352 if (len == 0) {
353 VGP_POPCC(VgpSetMem);
354 return;
355 }
njne427a662002-10-02 11:08:25 +0000356 sk_assert((a % 8) == 0 && len > 0);
njn25e49d8e72002-09-23 09:36:25 +0000357
358 /* Once aligned, go fast. */
359 while (True) {
360 PROF_EVENT(32);
361 if (len < 8) break;
362 ENSURE_MAPPABLE(a, "set_address_range_perms(fast)");
363 sm = primary_map[a >> 16];
364 sm_off = a & 0xFFFF;
365 sm->abits[sm_off >> 3] = abyte8;
366 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = vword4;
367 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = vword4;
368 a += 8;
369 len -= 8;
370 }
371
372 if (len == 0) {
373 VGP_POPCC(VgpSetMem);
374 return;
375 }
njne427a662002-10-02 11:08:25 +0000376 sk_assert((a % 8) == 0 && len > 0 && len < 8);
njn25e49d8e72002-09-23 09:36:25 +0000377
378 /* Finish the upper fragment. */
379 while (True) {
380 PROF_EVENT(33);
381 if (len == 0) break;
382 set_abit ( a, example_a_bit );
383 set_vbyte ( a, vbyte );
384 a++;
385 len--;
386 }
387# endif
388
389 /* Check that zero page and highest page have not been written to
390 -- this could happen with buggy syscall wrappers. Today
391 (2001-04-26) had precisely such a problem with __NR_setitimer. */
njne427a662002-10-02 11:08:25 +0000392 sk_assert(SK_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +0000393 VGP_POPCC(VgpSetMem);
394}
395
396/* Set permissions for address ranges ... */
397
njn5c004e42002-11-18 11:04:50 +0000398void MC_(make_noaccess) ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000399{
400 PROF_EVENT(35);
njn5c004e42002-11-18 11:04:50 +0000401 DEBUG("MC_(make_noaccess)(%p, %x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000402 set_address_range_perms ( a, len, VGM_BIT_INVALID, VGM_BIT_INVALID );
403}
404
njn5c004e42002-11-18 11:04:50 +0000405void MC_(make_writable) ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000406{
407 PROF_EVENT(36);
njn5c004e42002-11-18 11:04:50 +0000408 DEBUG("MC_(make_writable)(%p, %x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000409 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_INVALID );
410}
411
njn5c004e42002-11-18 11:04:50 +0000412void MC_(make_readable) ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000413{
414 PROF_EVENT(37);
njn5c004e42002-11-18 11:04:50 +0000415 DEBUG("MC_(make_readable)(%p, 0x%x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000416 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_VALID );
417}
418
njn9b007f62003-04-07 14:40:25 +0000419static __inline__
420void make_aligned_word_writable(Addr a)
421{
422 SecMap* sm;
423 UInt sm_off;
424 UChar mask;
njn25e49d8e72002-09-23 09:36:25 +0000425
njn9b007f62003-04-07 14:40:25 +0000426 VGP_PUSHCC(VgpESPAdj);
427 ENSURE_MAPPABLE(a, "make_aligned_word_writable");
428 sm = primary_map[a >> 16];
429 sm_off = a & 0xFFFF;
430 ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
431 mask = 0x0F;
432 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
433 /* mask now contains 1s where we wish to make address bits invalid (0s). */
434 sm->abits[sm_off >> 3] &= ~mask;
435 VGP_POPCC(VgpESPAdj);
436}
437
438static __inline__
439void make_aligned_word_noaccess(Addr a)
440{
441 SecMap* sm;
442 UInt sm_off;
443 UChar mask;
444
445 VGP_PUSHCC(VgpESPAdj);
446 ENSURE_MAPPABLE(a, "make_aligned_word_noaccess");
447 sm = primary_map[a >> 16];
448 sm_off = a & 0xFFFF;
449 ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
450 mask = 0x0F;
451 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
452 /* mask now contains 1s where we wish to make address bits invalid (1s). */
453 sm->abits[sm_off >> 3] |= mask;
454 VGP_POPCC(VgpESPAdj);
455}
456
457/* Nb: by "aligned" here we mean 8-byte aligned */
458static __inline__
459void make_aligned_doubleword_writable(Addr a)
460{
461 SecMap* sm;
462 UInt sm_off;
463
464 VGP_PUSHCC(VgpESPAdj);
465 ENSURE_MAPPABLE(a, "make_aligned_doubleword_writable");
466 sm = primary_map[a >> 16];
467 sm_off = a & 0xFFFF;
468 sm->abits[sm_off >> 3] = VGM_BYTE_VALID;
469 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = VGM_WORD_INVALID;
470 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = VGM_WORD_INVALID;
471 VGP_POPCC(VgpESPAdj);
472}
473
474static __inline__
475void make_aligned_doubleword_noaccess(Addr a)
476{
477 SecMap* sm;
478 UInt sm_off;
479
480 VGP_PUSHCC(VgpESPAdj);
481 ENSURE_MAPPABLE(a, "make_aligned_doubleword_noaccess");
482 sm = primary_map[a >> 16];
483 sm_off = a & 0xFFFF;
484 sm->abits[sm_off >> 3] = VGM_BYTE_INVALID;
485 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = VGM_WORD_INVALID;
486 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = VGM_WORD_INVALID;
487 VGP_POPCC(VgpESPAdj);
488}
489
490/* The %esp update handling functions */
491ESP_UPDATE_HANDLERS ( make_aligned_word_writable,
492 make_aligned_word_noaccess,
493 make_aligned_doubleword_writable,
494 make_aligned_doubleword_noaccess,
495 MC_(make_writable),
496 MC_(make_noaccess)
497 );
498
499/* Block-copy permissions (needed for implementing realloc()). */
njn5c004e42002-11-18 11:04:50 +0000500static void mc_copy_address_range_state ( Addr src, Addr dst, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000501{
502 UInt i;
503
njn5c004e42002-11-18 11:04:50 +0000504 DEBUG("mc_copy_address_range_state\n");
njn25e49d8e72002-09-23 09:36:25 +0000505
506 PROF_EVENT(40);
507 for (i = 0; i < len; i++) {
508 UChar abit = get_abit ( src+i );
509 UChar vbyte = get_vbyte ( src+i );
510 PROF_EVENT(41);
511 set_abit ( dst+i, abit );
512 set_vbyte ( dst+i, vbyte );
513 }
514}
515
516
517/* Check permissions for address range. If inadequate permissions
518 exist, *bad_addr is set to the offending address, so the caller can
519 know what it is. */
520
sewardjecf8e102003-07-12 12:11:39 +0000521/* Returns True if [a .. a+len) is not addressible. Otherwise,
522 returns False, and if bad_addr is non-NULL, sets *bad_addr to
523 indicate the lowest failing address. Functions below are
524 similar. */
525Bool MC_(check_noaccess) ( Addr a, UInt len, Addr* bad_addr )
526{
527 UInt i;
528 UChar abit;
529 PROF_EVENT(42);
530 for (i = 0; i < len; i++) {
531 PROF_EVENT(43);
532 abit = get_abit(a);
533 if (abit == VGM_BIT_VALID) {
534 if (bad_addr != NULL) *bad_addr = a;
535 return False;
536 }
537 a++;
538 }
539 return True;
540}
541
njn5c004e42002-11-18 11:04:50 +0000542Bool MC_(check_writable) ( Addr a, UInt len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000543{
544 UInt i;
545 UChar abit;
546 PROF_EVENT(42);
547 for (i = 0; i < len; i++) {
548 PROF_EVENT(43);
549 abit = get_abit(a);
550 if (abit == VGM_BIT_INVALID) {
551 if (bad_addr != NULL) *bad_addr = a;
552 return False;
553 }
554 a++;
555 }
556 return True;
557}
558
njn5c004e42002-11-18 11:04:50 +0000559Bool MC_(check_readable) ( Addr a, UInt len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000560{
561 UInt i;
562 UChar abit;
563 UChar vbyte;
564
565 PROF_EVENT(44);
njn5c004e42002-11-18 11:04:50 +0000566 DEBUG("MC_(check_readable)\n");
njn25e49d8e72002-09-23 09:36:25 +0000567 for (i = 0; i < len; i++) {
568 abit = get_abit(a);
569 vbyte = get_vbyte(a);
570 PROF_EVENT(45);
571 if (abit != VGM_BIT_VALID || vbyte != VGM_BYTE_VALID) {
572 if (bad_addr != NULL) *bad_addr = a;
573 return False;
574 }
575 a++;
576 }
577 return True;
578}
579
580
581/* Check a zero-terminated ascii string. Tricky -- don't want to
582 examine the actual bytes, to find the end, until we're sure it is
583 safe to do so. */
584
njn9b007f62003-04-07 14:40:25 +0000585static Bool mc_check_readable_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000586{
587 UChar abit;
588 UChar vbyte;
589 PROF_EVENT(46);
njn5c004e42002-11-18 11:04:50 +0000590 DEBUG("mc_check_readable_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +0000591 while (True) {
592 PROF_EVENT(47);
593 abit = get_abit(a);
594 vbyte = get_vbyte(a);
595 if (abit != VGM_BIT_VALID || vbyte != VGM_BYTE_VALID) {
596 if (bad_addr != NULL) *bad_addr = a;
597 return False;
598 }
599 /* Ok, a is safe to read. */
600 if (* ((UChar*)a) == 0) return True;
601 a++;
602 }
603}
604
605
606/*------------------------------------------------------------*/
607/*--- Memory event handlers ---*/
608/*------------------------------------------------------------*/
609
njn25e49d8e72002-09-23 09:36:25 +0000610static
njn72718642003-07-24 08:45:32 +0000611void mc_check_is_writable ( CorePart part, ThreadId tid, Char* s,
612 Addr base, UInt size )
njn25e49d8e72002-09-23 09:36:25 +0000613{
614 Bool ok;
615 Addr bad_addr;
616
617 VGP_PUSHCC(VgpCheckMem);
618
619 /* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
620 base,base+size-1); */
njn5c004e42002-11-18 11:04:50 +0000621 ok = MC_(check_writable) ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000622 if (!ok) {
623 switch (part) {
624 case Vg_CoreSysCall:
njn72718642003-07-24 08:45:32 +0000625 MAC_(record_param_error) ( tid, bad_addr, /*isWrite =*/True, s );
njn25e49d8e72002-09-23 09:36:25 +0000626 break;
627
628 case Vg_CorePThread:
629 case Vg_CoreSignal:
njn72718642003-07-24 08:45:32 +0000630 MAC_(record_core_mem_error)( tid, /*isWrite=*/True, s );
njn25e49d8e72002-09-23 09:36:25 +0000631 break;
632
633 default:
njn5c004e42002-11-18 11:04:50 +0000634 VG_(skin_panic)("mc_check_is_writable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000635 }
636 }
637
638 VGP_POPCC(VgpCheckMem);
639}
640
641static
njn72718642003-07-24 08:45:32 +0000642void mc_check_is_readable ( CorePart part, ThreadId tid, Char* s,
643 Addr base, UInt size )
njn25e49d8e72002-09-23 09:36:25 +0000644{
645 Bool ok;
646 Addr bad_addr;
647
648 VGP_PUSHCC(VgpCheckMem);
649
650 /* VG_(message)(Vg_DebugMsg,"check is readable: %x .. %x",
651 base,base+size-1); */
njn5c004e42002-11-18 11:04:50 +0000652 ok = MC_(check_readable) ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000653 if (!ok) {
654 switch (part) {
655 case Vg_CoreSysCall:
njn72718642003-07-24 08:45:32 +0000656 MAC_(record_param_error) ( tid, bad_addr, /*isWrite =*/False, s );
njn25e49d8e72002-09-23 09:36:25 +0000657 break;
658
659 case Vg_CorePThread:
njn72718642003-07-24 08:45:32 +0000660 MAC_(record_core_mem_error)( tid, /*isWrite=*/False, s );
njn25e49d8e72002-09-23 09:36:25 +0000661 break;
662
663 /* If we're being asked to jump to a silly address, record an error
664 message before potentially crashing the entire system. */
665 case Vg_CoreTranslate:
njn72718642003-07-24 08:45:32 +0000666 MAC_(record_jump_error)( tid, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000667 break;
668
669 default:
njn5c004e42002-11-18 11:04:50 +0000670 VG_(skin_panic)("mc_check_is_readable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000671 }
672 }
673 VGP_POPCC(VgpCheckMem);
674}
675
676static
njn72718642003-07-24 08:45:32 +0000677void mc_check_is_readable_asciiz ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +0000678 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +0000679{
680 Bool ok = True;
681 Addr bad_addr;
682 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
683
684 VGP_PUSHCC(VgpCheckMem);
685
njne427a662002-10-02 11:08:25 +0000686 sk_assert(part == Vg_CoreSysCall);
njn5c004e42002-11-18 11:04:50 +0000687 ok = mc_check_readable_asciiz ( (Addr)str, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000688 if (!ok) {
njn72718642003-07-24 08:45:32 +0000689 MAC_(record_param_error) ( tid, bad_addr, /*is_writable =*/False, s );
njn25e49d8e72002-09-23 09:36:25 +0000690 }
691
692 VGP_POPCC(VgpCheckMem);
693}
694
695
696static
njn5c004e42002-11-18 11:04:50 +0000697void mc_new_mem_startup( Addr a, UInt len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +0000698{
njn1f3a9092002-10-04 09:22:30 +0000699 /* Ignore the permissions, just make it readable. Seems to work... */
njn5c004e42002-11-18 11:04:50 +0000700 DEBUG("mc_new_mem_startup(%p, %u, rr=%u, ww=%u, xx=%u)\n", a,len,rr,ww,xx);
701 MC_(make_readable)(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000702}
703
704static
njn5c004e42002-11-18 11:04:50 +0000705void mc_new_mem_heap ( Addr a, UInt len, Bool is_inited )
njn25e49d8e72002-09-23 09:36:25 +0000706{
707 if (is_inited) {
njn5c004e42002-11-18 11:04:50 +0000708 MC_(make_readable)(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000709 } else {
njn5c004e42002-11-18 11:04:50 +0000710 MC_(make_writable)(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000711 }
712}
713
714static
njn5c004e42002-11-18 11:04:50 +0000715void mc_set_perms (Addr a, UInt len, Bool rr, Bool ww, Bool xx)
njn25e49d8e72002-09-23 09:36:25 +0000716{
njn5c004e42002-11-18 11:04:50 +0000717 DEBUG("mc_set_perms(%p, %u, rr=%u ww=%u, xx=%u)\n", a, len, rr, ww, xx);
718 if (rr) MC_(make_readable)(a, len);
719 else if (ww) MC_(make_writable)(a, len);
720 else MC_(make_noaccess)(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000721}
722
723
724/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +0000725/*--- Register event handlers ---*/
726/*------------------------------------------------------------*/
727
728static void mc_post_regs_write_init ( void )
729{
730 UInt i;
731 for (i = R_EAX; i <= R_EDI; i++)
732 VG_(set_shadow_archreg)( i, VGM_WORD_VALID );
733 VG_(set_shadow_eflags)( VGM_EFLAGS_VALID );
734}
735
736static void mc_post_reg_write(ThreadId tid, UInt reg)
737{
738 VG_(set_thread_shadow_archreg)( tid, reg, VGM_WORD_VALID );
739}
740
741static void mc_post_reg_write_clientcall(ThreadId tid, UInt reg, Addr f )
742{
743 VG_(set_thread_shadow_archreg)( tid, reg, VGM_WORD_VALID );
744}
745
746
747/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000748/*--- Functions called directly from generated code. ---*/
749/*------------------------------------------------------------*/
750
751static __inline__ UInt rotateRight16 ( UInt x )
752{
753 /* Amazingly, gcc turns this into a single rotate insn. */
754 return (x >> 16) | (x << 16);
755}
756
757
758static __inline__ UInt shiftRight16 ( UInt x )
759{
760 return x >> 16;
761}
762
763
764/* Read/write 1/2/4 sized V bytes, and emit an address error if
765 needed. */
766
767/* VG_(helperc_{LD,ST}V{1,2,4}) handle the common case fast.
768 Under all other circumstances, it defers to the relevant _SLOWLY
769 function, which can handle all situations.
770*/
771__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000772UInt MC_(helperc_LOADV4) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000773{
774# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000775 return mc_rd_V4_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000776# else
777 UInt sec_no = rotateRight16(a) & 0x3FFFF;
778 SecMap* sm = primary_map[sec_no];
779 UInt a_off = (a & 0xFFFF) >> 3;
780 UChar abits = sm->abits[a_off];
781 abits >>= (a & 4);
782 abits &= 15;
783 PROF_EVENT(60);
784 if (abits == VGM_NIBBLE_VALID) {
785 /* Handle common case quickly: a is suitably aligned, is mapped,
786 and is addressible. */
787 UInt v_off = a & 0xFFFF;
788 return ((UInt*)(sm->vbyte))[ v_off >> 2 ];
789 } else {
790 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000791 return mc_rd_V4_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000792 }
793# endif
794}
795
796__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +0000797void MC_(helperc_STOREV4) ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000798{
799# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000800 mc_wr_V4_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000801# else
802 UInt sec_no = rotateRight16(a) & 0x3FFFF;
803 SecMap* sm = primary_map[sec_no];
804 UInt a_off = (a & 0xFFFF) >> 3;
805 UChar abits = sm->abits[a_off];
806 abits >>= (a & 4);
807 abits &= 15;
808 PROF_EVENT(61);
809 if (abits == VGM_NIBBLE_VALID) {
810 /* Handle common case quickly: a is suitably aligned, is mapped,
811 and is addressible. */
812 UInt v_off = a & 0xFFFF;
813 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = vbytes;
814 } else {
815 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000816 mc_wr_V4_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000817 }
818# endif
819}
820
821__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000822UInt MC_(helperc_LOADV2) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000823{
824# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000825 return mc_rd_V2_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000826# else
827 UInt sec_no = rotateRight16(a) & 0x1FFFF;
828 SecMap* sm = primary_map[sec_no];
829 UInt a_off = (a & 0xFFFF) >> 3;
830 PROF_EVENT(62);
831 if (sm->abits[a_off] == VGM_BYTE_VALID) {
832 /* Handle common case quickly. */
833 UInt v_off = a & 0xFFFF;
834 return 0xFFFF0000
835 |
836 (UInt)( ((UShort*)(sm->vbyte))[ v_off >> 1 ] );
837 } else {
838 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000839 return mc_rd_V2_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000840 }
841# endif
842}
843
844__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +0000845void MC_(helperc_STOREV2) ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000846{
847# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000848 mc_wr_V2_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000849# else
850 UInt sec_no = rotateRight16(a) & 0x1FFFF;
851 SecMap* sm = primary_map[sec_no];
852 UInt a_off = (a & 0xFFFF) >> 3;
853 PROF_EVENT(63);
854 if (sm->abits[a_off] == VGM_BYTE_VALID) {
855 /* Handle common case quickly. */
856 UInt v_off = a & 0xFFFF;
857 ((UShort*)(sm->vbyte))[ v_off >> 1 ] = vbytes & 0x0000FFFF;
858 } else {
859 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000860 mc_wr_V2_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000861 }
862# endif
863}
864
865__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000866UInt MC_(helperc_LOADV1) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000867{
868# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000869 return mc_rd_V1_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000870# else
871 UInt sec_no = shiftRight16(a);
872 SecMap* sm = primary_map[sec_no];
873 UInt a_off = (a & 0xFFFF) >> 3;
874 PROF_EVENT(64);
875 if (sm->abits[a_off] == VGM_BYTE_VALID) {
876 /* Handle common case quickly. */
877 UInt v_off = a & 0xFFFF;
878 return 0xFFFFFF00
879 |
880 (UInt)( ((UChar*)(sm->vbyte))[ v_off ] );
881 } else {
882 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000883 return mc_rd_V1_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000884 }
885# endif
886}
887
888__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +0000889void MC_(helperc_STOREV1) ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000890{
891# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000892 mc_wr_V1_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000893# else
894 UInt sec_no = shiftRight16(a);
895 SecMap* sm = primary_map[sec_no];
896 UInt a_off = (a & 0xFFFF) >> 3;
897 PROF_EVENT(65);
898 if (sm->abits[a_off] == VGM_BYTE_VALID) {
899 /* Handle common case quickly. */
900 UInt v_off = a & 0xFFFF;
901 ((UChar*)(sm->vbyte))[ v_off ] = vbytes & 0x000000FF;
902 } else {
903 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000904 mc_wr_V1_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000905 }
906# endif
907}
908
909
910/*------------------------------------------------------------*/
911/*--- Fallback functions to handle cases that the above ---*/
912/*--- VG_(helperc_{LD,ST}V{1,2,4}) can't manage. ---*/
913/*------------------------------------------------------------*/
914
njn5c004e42002-11-18 11:04:50 +0000915static UInt mc_rd_V4_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000916{
917 Bool a0ok, a1ok, a2ok, a3ok;
918 UInt vb0, vb1, vb2, vb3;
919
920 PROF_EVENT(70);
921
922 /* First establish independently the addressibility of the 4 bytes
923 involved. */
924 a0ok = get_abit(a+0) == VGM_BIT_VALID;
925 a1ok = get_abit(a+1) == VGM_BIT_VALID;
926 a2ok = get_abit(a+2) == VGM_BIT_VALID;
927 a3ok = get_abit(a+3) == VGM_BIT_VALID;
928
929 /* Also get the validity bytes for the address. */
930 vb0 = (UInt)get_vbyte(a+0);
931 vb1 = (UInt)get_vbyte(a+1);
932 vb2 = (UInt)get_vbyte(a+2);
933 vb3 = (UInt)get_vbyte(a+3);
934
935 /* Now distinguish 3 cases */
936
937 /* Case 1: the address is completely valid, so:
938 - no addressing error
939 - return V bytes as read from memory
940 */
941 if (a0ok && a1ok && a2ok && a3ok) {
942 UInt vw = VGM_WORD_INVALID;
943 vw <<= 8; vw |= vb3;
944 vw <<= 8; vw |= vb2;
945 vw <<= 8; vw |= vb1;
946 vw <<= 8; vw |= vb0;
947 return vw;
948 }
949
950 /* Case 2: the address is completely invalid.
951 - emit addressing error
952 - return V word indicating validity.
953 This sounds strange, but if we make loads from invalid addresses
954 give invalid data, we also risk producing a number of confusing
955 undefined-value errors later, which confuses the fact that the
956 error arose in the first place from an invalid address.
957 */
958 /* VG_(printf)("%p (%d %d %d %d)\n", a, a0ok, a1ok, a2ok, a3ok); */
njn43c799e2003-04-08 00:08:52 +0000959 if (!MAC_(clo_partial_loads_ok)
njn25e49d8e72002-09-23 09:36:25 +0000960 || ((a & 3) != 0)
961 || (!a0ok && !a1ok && !a2ok && !a3ok)) {
njn72718642003-07-24 08:45:32 +0000962 MAC_(record_address_error)( VG_(get_current_tid)(), a, 4, False );
njn25e49d8e72002-09-23 09:36:25 +0000963 return (VGM_BYTE_VALID << 24) | (VGM_BYTE_VALID << 16)
964 | (VGM_BYTE_VALID << 8) | VGM_BYTE_VALID;
965 }
966
967 /* Case 3: the address is partially valid.
968 - no addressing error
969 - returned V word is invalid where the address is invalid,
970 and contains V bytes from memory otherwise.
njn5c004e42002-11-18 11:04:50 +0000971 Case 3 is only allowed if MC_(clo_partial_loads_ok) is True
njn25e49d8e72002-09-23 09:36:25 +0000972 (which is the default), and the address is 4-aligned.
973 If not, Case 2 will have applied.
974 */
njn43c799e2003-04-08 00:08:52 +0000975 sk_assert(MAC_(clo_partial_loads_ok));
njn25e49d8e72002-09-23 09:36:25 +0000976 {
977 UInt vw = VGM_WORD_INVALID;
978 vw <<= 8; vw |= (a3ok ? vb3 : VGM_BYTE_INVALID);
979 vw <<= 8; vw |= (a2ok ? vb2 : VGM_BYTE_INVALID);
980 vw <<= 8; vw |= (a1ok ? vb1 : VGM_BYTE_INVALID);
981 vw <<= 8; vw |= (a0ok ? vb0 : VGM_BYTE_INVALID);
982 return vw;
983 }
984}
985
njn5c004e42002-11-18 11:04:50 +0000986static void mc_wr_V4_SLOWLY ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000987{
988 /* Check the address for validity. */
989 Bool aerr = False;
990 PROF_EVENT(71);
991
992 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
993 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
994 if (get_abit(a+2) != VGM_BIT_VALID) aerr = True;
995 if (get_abit(a+3) != VGM_BIT_VALID) aerr = True;
996
997 /* Store the V bytes, remembering to do it little-endian-ly. */
998 set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
999 set_vbyte( a+1, vbytes & 0x000000FF ); vbytes >>= 8;
1000 set_vbyte( a+2, vbytes & 0x000000FF ); vbytes >>= 8;
1001 set_vbyte( a+3, vbytes & 0x000000FF );
1002
1003 /* If an address error has happened, report it. */
1004 if (aerr)
njn72718642003-07-24 08:45:32 +00001005 MAC_(record_address_error)( VG_(get_current_tid)(), a, 4, True );
njn25e49d8e72002-09-23 09:36:25 +00001006}
1007
njn5c004e42002-11-18 11:04:50 +00001008static UInt mc_rd_V2_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +00001009{
1010 /* Check the address for validity. */
1011 UInt vw = VGM_WORD_INVALID;
1012 Bool aerr = False;
1013 PROF_EVENT(72);
1014
1015 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1016 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1017
1018 /* Fetch the V bytes, remembering to do it little-endian-ly. */
1019 vw <<= 8; vw |= (UInt)get_vbyte(a+1);
1020 vw <<= 8; vw |= (UInt)get_vbyte(a+0);
1021
1022 /* If an address error has happened, report it. */
1023 if (aerr) {
njn72718642003-07-24 08:45:32 +00001024 MAC_(record_address_error)( VG_(get_current_tid)(), a, 2, False );
njn25e49d8e72002-09-23 09:36:25 +00001025 vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
1026 | (VGM_BYTE_VALID << 8) | (VGM_BYTE_VALID);
1027 }
1028 return vw;
1029}
1030
njn5c004e42002-11-18 11:04:50 +00001031static void mc_wr_V2_SLOWLY ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001032{
1033 /* Check the address for validity. */
1034 Bool aerr = False;
1035 PROF_EVENT(73);
1036
1037 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1038 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1039
1040 /* Store the V bytes, remembering to do it little-endian-ly. */
1041 set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
1042 set_vbyte( a+1, vbytes & 0x000000FF );
1043
1044 /* If an address error has happened, report it. */
1045 if (aerr)
njn72718642003-07-24 08:45:32 +00001046 MAC_(record_address_error)( VG_(get_current_tid)(), a, 2, True );
njn25e49d8e72002-09-23 09:36:25 +00001047}
1048
njn5c004e42002-11-18 11:04:50 +00001049static UInt mc_rd_V1_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +00001050{
1051 /* Check the address for validity. */
1052 UInt vw = VGM_WORD_INVALID;
1053 Bool aerr = False;
1054 PROF_EVENT(74);
1055
1056 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1057
1058 /* Fetch the V byte. */
1059 vw <<= 8; vw |= (UInt)get_vbyte(a+0);
1060
1061 /* If an address error has happened, report it. */
1062 if (aerr) {
njn72718642003-07-24 08:45:32 +00001063 MAC_(record_address_error)( VG_(get_current_tid)(), a, 1, False );
njn25e49d8e72002-09-23 09:36:25 +00001064 vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
1065 | (VGM_BYTE_INVALID << 8) | (VGM_BYTE_VALID);
1066 }
1067 return vw;
1068}
1069
njn5c004e42002-11-18 11:04:50 +00001070static void mc_wr_V1_SLOWLY ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001071{
1072 /* Check the address for validity. */
1073 Bool aerr = False;
1074 PROF_EVENT(75);
1075 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1076
1077 /* Store the V bytes, remembering to do it little-endian-ly. */
1078 set_vbyte( a+0, vbytes & 0x000000FF );
1079
1080 /* If an address error has happened, report it. */
1081 if (aerr)
njn72718642003-07-24 08:45:32 +00001082 MAC_(record_address_error)( VG_(get_current_tid)(), a, 1, True );
njn25e49d8e72002-09-23 09:36:25 +00001083}
1084
1085
1086/* ---------------------------------------------------------------------
1087 Called from generated code, or from the assembly helpers.
1088 Handlers for value check failures.
1089 ------------------------------------------------------------------ */
1090
njn5c004e42002-11-18 11:04:50 +00001091void MC_(helperc_value_check0_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001092{
njn72718642003-07-24 08:45:32 +00001093 MC_(record_value_error) ( VG_(get_current_tid)(), 0 );
njn25e49d8e72002-09-23 09:36:25 +00001094}
1095
njn5c004e42002-11-18 11:04:50 +00001096void MC_(helperc_value_check1_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001097{
njn72718642003-07-24 08:45:32 +00001098 MC_(record_value_error) ( VG_(get_current_tid)(), 1 );
njn25e49d8e72002-09-23 09:36:25 +00001099}
1100
njn5c004e42002-11-18 11:04:50 +00001101void MC_(helperc_value_check2_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001102{
njn72718642003-07-24 08:45:32 +00001103 MC_(record_value_error) ( VG_(get_current_tid)(), 2 );
njn25e49d8e72002-09-23 09:36:25 +00001104}
1105
njn5c004e42002-11-18 11:04:50 +00001106void MC_(helperc_value_check4_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001107{
njn72718642003-07-24 08:45:32 +00001108 MC_(record_value_error) ( VG_(get_current_tid)(), 4 );
njn25e49d8e72002-09-23 09:36:25 +00001109}
1110
1111
1112/* ---------------------------------------------------------------------
1113 FPU load and store checks, called from generated code.
1114 ------------------------------------------------------------------ */
1115
1116__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +00001117void MC_(fpu_read_check) ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001118{
1119 /* Ensure the read area is both addressible and valid (ie,
1120 readable). If there's an address error, don't report a value
1121 error too; but if there isn't an address error, check for a
1122 value error.
1123
1124 Try to be reasonably fast on the common case; wimp out and defer
njn5c004e42002-11-18 11:04:50 +00001125 to mc_fpu_read_check_SLOWLY for everything else. */
njn25e49d8e72002-09-23 09:36:25 +00001126
1127 SecMap* sm;
1128 UInt sm_off, v_off, a_off;
1129 Addr addr4;
1130
1131 PROF_EVENT(80);
1132
1133# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +00001134 mc_fpu_read_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001135# else
1136
1137 if (size == 4) {
1138 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
1139 PROF_EVENT(81);
1140 /* Properly aligned. */
1141 sm = primary_map[addr >> 16];
1142 sm_off = addr & 0xFFFF;
1143 a_off = sm_off >> 3;
1144 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
1145 /* Properly aligned and addressible. */
1146 v_off = addr & 0xFFFF;
1147 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1148 goto slow4;
1149 /* Properly aligned, addressible and with valid data. */
1150 return;
1151 slow4:
njn5c004e42002-11-18 11:04:50 +00001152 mc_fpu_read_check_SLOWLY ( addr, 4 );
njn25e49d8e72002-09-23 09:36:25 +00001153 return;
1154 }
1155
1156 if (size == 8) {
1157 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
1158 PROF_EVENT(82);
1159 /* Properly aligned. Do it in two halves. */
1160 addr4 = addr + 4;
1161 /* First half. */
1162 sm = primary_map[addr >> 16];
1163 sm_off = addr & 0xFFFF;
1164 a_off = sm_off >> 3;
1165 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1166 /* First half properly aligned and addressible. */
1167 v_off = addr & 0xFFFF;
1168 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1169 goto slow8;
1170 /* Second half. */
1171 sm = primary_map[addr4 >> 16];
1172 sm_off = addr4 & 0xFFFF;
1173 a_off = sm_off >> 3;
1174 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1175 /* Second half properly aligned and addressible. */
1176 v_off = addr4 & 0xFFFF;
1177 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1178 goto slow8;
1179 /* Both halves properly aligned, addressible and with valid
1180 data. */
1181 return;
1182 slow8:
njn5c004e42002-11-18 11:04:50 +00001183 mc_fpu_read_check_SLOWLY ( addr, 8 );
njn25e49d8e72002-09-23 09:36:25 +00001184 return;
1185 }
1186
1187 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
1188 cases go quickly. */
1189 if (size == 2) {
1190 PROF_EVENT(83);
njn5c004e42002-11-18 11:04:50 +00001191 mc_fpu_read_check_SLOWLY ( addr, 2 );
njn25e49d8e72002-09-23 09:36:25 +00001192 return;
1193 }
1194
sewardj93992e22003-05-26 09:17:41 +00001195 if (size == 16 /*SSE*/
1196 || size == 10 || size == 28 || size == 108) {
njn25e49d8e72002-09-23 09:36:25 +00001197 PROF_EVENT(84);
njn5c004e42002-11-18 11:04:50 +00001198 mc_fpu_read_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001199 return;
1200 }
1201
1202 VG_(printf)("size is %d\n", size);
njn5c004e42002-11-18 11:04:50 +00001203 VG_(skin_panic)("MC_(fpu_read_check): unhandled size");
njn25e49d8e72002-09-23 09:36:25 +00001204# endif
1205}
1206
1207
1208__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +00001209void MC_(fpu_write_check) ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001210{
1211 /* Ensure the written area is addressible, and moan if otherwise.
1212 If it is addressible, make it valid, otherwise invalid.
1213 */
1214
1215 SecMap* sm;
1216 UInt sm_off, v_off, a_off;
1217 Addr addr4;
1218
1219 PROF_EVENT(85);
1220
1221# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +00001222 mc_fpu_write_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001223# else
1224
1225 if (size == 4) {
1226 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
1227 PROF_EVENT(86);
1228 /* Properly aligned. */
1229 sm = primary_map[addr >> 16];
1230 sm_off = addr & 0xFFFF;
1231 a_off = sm_off >> 3;
1232 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
1233 /* Properly aligned and addressible. Make valid. */
1234 v_off = addr & 0xFFFF;
1235 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1236 return;
1237 slow4:
njn5c004e42002-11-18 11:04:50 +00001238 mc_fpu_write_check_SLOWLY ( addr, 4 );
njn25e49d8e72002-09-23 09:36:25 +00001239 return;
1240 }
1241
1242 if (size == 8) {
1243 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
1244 PROF_EVENT(87);
1245 /* Properly aligned. Do it in two halves. */
1246 addr4 = addr + 4;
1247 /* First half. */
1248 sm = primary_map[addr >> 16];
1249 sm_off = addr & 0xFFFF;
1250 a_off = sm_off >> 3;
1251 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1252 /* First half properly aligned and addressible. Make valid. */
1253 v_off = addr & 0xFFFF;
1254 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1255 /* Second half. */
1256 sm = primary_map[addr4 >> 16];
1257 sm_off = addr4 & 0xFFFF;
1258 a_off = sm_off >> 3;
1259 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1260 /* Second half properly aligned and addressible. */
1261 v_off = addr4 & 0xFFFF;
1262 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1263 /* Properly aligned, addressible and with valid data. */
1264 return;
1265 slow8:
njn5c004e42002-11-18 11:04:50 +00001266 mc_fpu_write_check_SLOWLY ( addr, 8 );
njn25e49d8e72002-09-23 09:36:25 +00001267 return;
1268 }
1269
1270 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
1271 cases go quickly. */
1272 if (size == 2) {
1273 PROF_EVENT(88);
njn5c004e42002-11-18 11:04:50 +00001274 mc_fpu_write_check_SLOWLY ( addr, 2 );
njn25e49d8e72002-09-23 09:36:25 +00001275 return;
1276 }
1277
sewardj93992e22003-05-26 09:17:41 +00001278 if (size == 16 /*SSE*/
1279 || size == 10 || size == 28 || size == 108) {
njn25e49d8e72002-09-23 09:36:25 +00001280 PROF_EVENT(89);
njn5c004e42002-11-18 11:04:50 +00001281 mc_fpu_write_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001282 return;
1283 }
1284
1285 VG_(printf)("size is %d\n", size);
njn5c004e42002-11-18 11:04:50 +00001286 VG_(skin_panic)("MC_(fpu_write_check): unhandled size");
njn25e49d8e72002-09-23 09:36:25 +00001287# endif
1288}
1289
1290
1291/* ---------------------------------------------------------------------
1292 Slow, general cases for FPU load and store checks.
1293 ------------------------------------------------------------------ */
1294
1295/* Generic version. Test for both addr and value errors, but if
1296 there's an addr error, don't report a value error even if it
1297 exists. */
1298
njn5c004e42002-11-18 11:04:50 +00001299void mc_fpu_read_check_SLOWLY ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001300{
1301 Int i;
1302 Bool aerr = False;
1303 Bool verr = False;
1304 PROF_EVENT(90);
1305 for (i = 0; i < size; i++) {
1306 PROF_EVENT(91);
1307 if (get_abit(addr+i) != VGM_BIT_VALID)
1308 aerr = True;
1309 if (get_vbyte(addr+i) != VGM_BYTE_VALID)
1310 verr = True;
1311 }
1312
1313 if (aerr) {
njn72718642003-07-24 08:45:32 +00001314 MAC_(record_address_error)( VG_(get_current_tid)(), addr, size, False );
njn25e49d8e72002-09-23 09:36:25 +00001315 } else {
1316 if (verr)
njn72718642003-07-24 08:45:32 +00001317 MC_(record_value_error)( VG_(get_current_tid)(), size );
njn25e49d8e72002-09-23 09:36:25 +00001318 }
1319}
1320
1321
1322/* Generic version. Test for addr errors. Valid addresses are
1323 given valid values, and invalid addresses invalid values. */
1324
njn5c004e42002-11-18 11:04:50 +00001325void mc_fpu_write_check_SLOWLY ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001326{
1327 Int i;
1328 Addr a_here;
1329 Bool a_ok;
1330 Bool aerr = False;
1331 PROF_EVENT(92);
1332 for (i = 0; i < size; i++) {
1333 PROF_EVENT(93);
1334 a_here = addr+i;
1335 a_ok = get_abit(a_here) == VGM_BIT_VALID;
1336 if (a_ok) {
1337 set_vbyte(a_here, VGM_BYTE_VALID);
1338 } else {
1339 set_vbyte(a_here, VGM_BYTE_INVALID);
1340 aerr = True;
1341 }
1342 }
1343 if (aerr) {
njn72718642003-07-24 08:45:32 +00001344 MAC_(record_address_error)( VG_(get_current_tid)(), addr, size, True );
njn25e49d8e72002-09-23 09:36:25 +00001345 }
1346}
1347
njn25e49d8e72002-09-23 09:36:25 +00001348
1349/*------------------------------------------------------------*/
sewardjee070842003-07-05 17:53:55 +00001350/*--- Metadata get/set functions, for client requests. ---*/
1351/*------------------------------------------------------------*/
1352
1353/* Copy Vbits for src into vbits. Returns: 1 == OK, 2 == alignment
1354 error, 3 == addressing error. */
1355Int MC_(get_or_set_vbits_for_client) (
njn72718642003-07-24 08:45:32 +00001356 ThreadId tid,
sewardjee070842003-07-05 17:53:55 +00001357 Addr dataV,
1358 Addr vbitsV,
1359 UInt size,
1360 Bool setting /* True <=> set vbits, False <=> get vbits */
1361)
1362{
1363 Bool addressibleD = True;
1364 Bool addressibleV = True;
1365 UInt* data = (UInt*)dataV;
1366 UInt* vbits = (UInt*)vbitsV;
1367 UInt szW = size / 4; /* sigh */
1368 UInt i;
sewardjaf48a602003-07-06 00:54:47 +00001369 UInt* dataP = NULL; /* bogus init to keep gcc happy */
1370 UInt* vbitsP = NULL; /* ditto */
sewardjee070842003-07-05 17:53:55 +00001371
1372 /* Check alignment of args. */
1373 if (!(IS_ALIGNED4_ADDR(data) && IS_ALIGNED4_ADDR(vbits)))
1374 return 2;
1375 if ((size & 3) != 0)
1376 return 2;
1377
1378 /* Check that arrays are addressible. */
1379 for (i = 0; i < szW; i++) {
sewardjaf48a602003-07-06 00:54:47 +00001380 dataP = &data[i];
1381 vbitsP = &vbits[i];
sewardjee070842003-07-05 17:53:55 +00001382 if (get_abits4_ALIGNED((Addr)dataP) != VGM_NIBBLE_VALID) {
1383 addressibleD = False;
1384 break;
1385 }
1386 if (get_abits4_ALIGNED((Addr)vbitsP) != VGM_NIBBLE_VALID) {
1387 addressibleV = False;
1388 break;
1389 }
1390 }
1391 if (!addressibleD) {
njn72718642003-07-24 08:45:32 +00001392 MAC_(record_address_error)( tid, (Addr)dataP, 4,
sewardjee070842003-07-05 17:53:55 +00001393 setting ? True : False );
1394 return 3;
1395 }
1396 if (!addressibleV) {
njn72718642003-07-24 08:45:32 +00001397 MAC_(record_address_error)( tid, (Addr)vbitsP, 4,
sewardjee070842003-07-05 17:53:55 +00001398 setting ? False : True );
1399 return 3;
1400 }
1401
1402 /* Do the copy */
1403 if (setting) {
1404 /* setting */
1405 for (i = 0; i < szW; i++) {
1406 if (get_vbytes4_ALIGNED( (Addr)&vbits[i] ) != VGM_WORD_VALID)
njn72718642003-07-24 08:45:32 +00001407 MC_(record_value_error)(tid, 4);
sewardjee070842003-07-05 17:53:55 +00001408 set_vbytes4_ALIGNED( (Addr)&data[i], vbits[i] );
1409 }
1410 } else {
1411 /* getting */
1412 for (i = 0; i < szW; i++) {
1413 vbits[i] = get_vbytes4_ALIGNED( (Addr)&data[i] );
1414 set_vbytes4_ALIGNED( (Addr)&vbits[i], VGM_WORD_VALID );
1415 }
1416 }
1417
1418 return 1;
1419}
1420
1421
1422/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001423/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1424/*------------------------------------------------------------*/
1425
sewardja4495682002-10-21 07:29:59 +00001426/* For the memory leak detector, say whether an entire 64k chunk of
1427 address space is possibly in use, or not. If in doubt return
1428 True.
njn25e49d8e72002-09-23 09:36:25 +00001429*/
sewardja4495682002-10-21 07:29:59 +00001430static
1431Bool mc_is_valid_64k_chunk ( UInt chunk_number )
njn25e49d8e72002-09-23 09:36:25 +00001432{
sewardja4495682002-10-21 07:29:59 +00001433 sk_assert(chunk_number >= 0 && chunk_number < 65536);
1434 if (IS_DISTINGUISHED_SM(primary_map[chunk_number])) {
1435 /* Definitely not in use. */
1436 return False;
1437 } else {
1438 return True;
njn25e49d8e72002-09-23 09:36:25 +00001439 }
1440}
1441
1442
sewardja4495682002-10-21 07:29:59 +00001443/* For the memory leak detector, say whether or not a given word
1444 address is to be regarded as valid. */
1445static
1446Bool mc_is_valid_address ( Addr a )
1447{
1448 UInt vbytes;
1449 UChar abits;
1450 sk_assert(IS_ALIGNED4_ADDR(a));
1451 abits = get_abits4_ALIGNED(a);
1452 vbytes = get_vbytes4_ALIGNED(a);
1453 if (abits == VGM_NIBBLE_VALID && vbytes == VGM_WORD_VALID) {
1454 return True;
1455 } else {
1456 return False;
1457 }
1458}
1459
1460
1461/* Leak detector for this skin. We don't actually do anything, merely
1462 run the generic leak detector with suitable parameters for this
1463 skin. */
njn5c004e42002-11-18 11:04:50 +00001464void MC_(detect_memory_leaks) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001465{
njn43c799e2003-04-08 00:08:52 +00001466 MAC_(do_detect_memory_leaks) ( mc_is_valid_64k_chunk, mc_is_valid_address );
njn25e49d8e72002-09-23 09:36:25 +00001467}
1468
1469
1470/* ---------------------------------------------------------------------
1471 Sanity check machinery (permanently engaged).
1472 ------------------------------------------------------------------ */
1473
1474/* Check that nobody has spuriously claimed that the first or last 16
1475 pages (64 KB) of address space have become accessible. Failure of
1476 the following do not per se indicate an internal consistency
1477 problem, but they are so likely to that we really want to know
1478 about it if so. */
1479
1480Bool SK_(cheap_sanity_check) ( void )
1481{
sewardjd5815ec2003-04-06 12:23:27 +00001482 if (IS_DISTINGUISHED_SM(primary_map[0])
1483 /* kludge: kernel drops a page up at top of address range for
1484 magic "optimized syscalls", so we can no longer check the
1485 highest page */
1486 /* && IS_DISTINGUISHED_SM(primary_map[65535]) */
1487 )
njn25e49d8e72002-09-23 09:36:25 +00001488 return True;
1489 else
1490 return False;
1491}
1492
1493Bool SK_(expensive_sanity_check) ( void )
1494{
1495 Int i;
1496
1497 /* Make sure nobody changed the distinguished secondary. */
1498 for (i = 0; i < 8192; i++)
1499 if (distinguished_secondary_map.abits[i] != VGM_BYTE_INVALID)
1500 return False;
1501
1502 for (i = 0; i < 65536; i++)
1503 if (distinguished_secondary_map.vbyte[i] != VGM_BYTE_INVALID)
1504 return False;
1505
1506 /* Make sure that the upper 3/4 of the primary map hasn't
1507 been messed with. */
1508 for (i = 65536; i < 262144; i++)
1509 if (primary_map[i] != & distinguished_secondary_map)
1510 return False;
1511
1512 return True;
1513}
1514
1515/* ---------------------------------------------------------------------
1516 Debugging machinery (turn on to debug). Something of a mess.
1517 ------------------------------------------------------------------ */
1518
1519#if 0
1520/* Print the value tags on the 8 integer registers & flag reg. */
1521
1522static void uint_to_bits ( UInt x, Char* str )
1523{
1524 Int i;
1525 Int w = 0;
1526 /* str must point to a space of at least 36 bytes. */
1527 for (i = 31; i >= 0; i--) {
1528 str[w++] = (x & ( ((UInt)1) << i)) ? '1' : '0';
1529 if (i == 24 || i == 16 || i == 8)
1530 str[w++] = ' ';
1531 }
1532 str[w++] = 0;
njne427a662002-10-02 11:08:25 +00001533 sk_assert(w == 36);
njn25e49d8e72002-09-23 09:36:25 +00001534}
1535
1536/* Caution! Not vthread-safe; looks in VG_(baseBlock), not the thread
1537 state table. */
1538
1539static void vg_show_reg_tags ( void )
1540{
1541 Char buf1[36];
1542 Char buf2[36];
1543 UInt z_eax, z_ebx, z_ecx, z_edx,
1544 z_esi, z_edi, z_ebp, z_esp, z_eflags;
1545
1546 z_eax = VG_(baseBlock)[VGOFF_(sh_eax)];
1547 z_ebx = VG_(baseBlock)[VGOFF_(sh_ebx)];
1548 z_ecx = VG_(baseBlock)[VGOFF_(sh_ecx)];
1549 z_edx = VG_(baseBlock)[VGOFF_(sh_edx)];
1550 z_esi = VG_(baseBlock)[VGOFF_(sh_esi)];
1551 z_edi = VG_(baseBlock)[VGOFF_(sh_edi)];
1552 z_ebp = VG_(baseBlock)[VGOFF_(sh_ebp)];
1553 z_esp = VG_(baseBlock)[VGOFF_(sh_esp)];
1554 z_eflags = VG_(baseBlock)[VGOFF_(sh_eflags)];
1555
1556 uint_to_bits(z_eflags, buf1);
njn9b6d34e2002-10-15 08:48:08 +00001557 VG_(message)(Vg_DebugMsg, "efl %s\n", buf1);
njn25e49d8e72002-09-23 09:36:25 +00001558
1559 uint_to_bits(z_eax, buf1);
1560 uint_to_bits(z_ebx, buf2);
1561 VG_(message)(Vg_DebugMsg, "eax %s ebx %s\n", buf1, buf2);
1562
1563 uint_to_bits(z_ecx, buf1);
1564 uint_to_bits(z_edx, buf2);
1565 VG_(message)(Vg_DebugMsg, "ecx %s edx %s\n", buf1, buf2);
1566
1567 uint_to_bits(z_esi, buf1);
1568 uint_to_bits(z_edi, buf2);
1569 VG_(message)(Vg_DebugMsg, "esi %s edi %s\n", buf1, buf2);
1570
1571 uint_to_bits(z_ebp, buf1);
1572 uint_to_bits(z_esp, buf2);
1573 VG_(message)(Vg_DebugMsg, "ebp %s esp %s\n", buf1, buf2);
1574}
1575
1576
1577/* For debugging only. Scan the address space and touch all allegedly
1578 addressible words. Useful for establishing where Valgrind's idea of
1579 addressibility has diverged from what the kernel believes. */
1580
1581static
1582void zzzmemscan_notify_word ( Addr a, UInt w )
1583{
1584}
1585
1586void zzzmemscan ( void )
1587{
1588 Int n_notifies
1589 = VG_(scan_all_valid_memory)( zzzmemscan_notify_word );
1590 VG_(printf)("zzzmemscan: n_bytes = %d\n", 4 * n_notifies );
1591}
1592#endif
1593
1594
1595
1596
1597#if 0
1598static Int zzz = 0;
1599
1600void show_bb ( Addr eip_next )
1601{
1602 VG_(printf)("[%4d] ", zzz);
1603 vg_show_reg_tags( &VG_(m_shadow );
1604 VG_(translate) ( eip_next, NULL, NULL, NULL );
1605}
1606#endif /* 0 */
1607
njn25e49d8e72002-09-23 09:36:25 +00001608
1609/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00001610/*--- Command line args ---*/
njn25e49d8e72002-09-23 09:36:25 +00001611/*------------------------------------------------------------*/
1612
njn43c799e2003-04-08 00:08:52 +00001613Bool MC_(clo_avoid_strlen_errors) = True;
1614Bool MC_(clo_cleanup) = True;
1615
njn25e49d8e72002-09-23 09:36:25 +00001616Bool SK_(process_cmd_line_option)(Char* arg)
1617{
njn43c799e2003-04-08 00:08:52 +00001618 if (VG_CLO_STREQ(arg, "--avoid-strlen-errors=yes"))
njn5c004e42002-11-18 11:04:50 +00001619 MC_(clo_avoid_strlen_errors) = True;
njn43c799e2003-04-08 00:08:52 +00001620 else if (VG_CLO_STREQ(arg, "--avoid-strlen-errors=no"))
njn5c004e42002-11-18 11:04:50 +00001621 MC_(clo_avoid_strlen_errors) = False;
sewardj8ec2cfc2002-10-13 00:57:26 +00001622
njn43c799e2003-04-08 00:08:52 +00001623 else if (VG_CLO_STREQ(arg, "--cleanup=yes"))
1624 MC_(clo_cleanup) = True;
1625 else if (VG_CLO_STREQ(arg, "--cleanup=no"))
1626 MC_(clo_cleanup) = False;
1627
njn25e49d8e72002-09-23 09:36:25 +00001628 else
njn43c799e2003-04-08 00:08:52 +00001629 return MAC_(process_common_cmd_line_option)(arg);
njn25e49d8e72002-09-23 09:36:25 +00001630
1631 return True;
njn25e49d8e72002-09-23 09:36:25 +00001632}
1633
njn3e884182003-04-15 13:03:23 +00001634void SK_(print_usage)(void)
njn25e49d8e72002-09-23 09:36:25 +00001635{
njn3e884182003-04-15 13:03:23 +00001636 MAC_(print_common_usage)();
1637 VG_(printf)(
1638" --avoid-strlen-errors=no|yes suppress errs from inlined strlen [yes]\n"
1639 );
1640}
1641
1642void SK_(print_debug_usage)(void)
1643{
1644 MAC_(print_common_debug_usage)();
1645 VG_(printf)(
sewardj8ec2cfc2002-10-13 00:57:26 +00001646" --cleanup=no|yes improve after instrumentation? [yes]\n"
njn3e884182003-04-15 13:03:23 +00001647 );
njn25e49d8e72002-09-23 09:36:25 +00001648}
1649
1650
1651/*------------------------------------------------------------*/
1652/*--- Setup ---*/
1653/*------------------------------------------------------------*/
1654
njn810086f2002-11-14 12:42:47 +00001655void SK_(pre_clo_init)(void)
njn25e49d8e72002-09-23 09:36:25 +00001656{
njn810086f2002-11-14 12:42:47 +00001657 VG_(details_name) ("Memcheck");
1658 VG_(details_version) (NULL);
nethercote262eedf2003-11-13 17:57:18 +00001659 VG_(details_description) ("a memory error detector");
njn810086f2002-11-14 12:42:47 +00001660 VG_(details_copyright_author)(
njn0e1b5142003-04-15 14:58:06 +00001661 "Copyright (C) 2002-2003, and GNU GPL'd, by Julian Seward.");
nethercote421281e2003-11-20 16:20:55 +00001662 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj78210aa2002-12-01 02:55:46 +00001663 VG_(details_avg_translation_sizeB) ( 228 );
njn25e49d8e72002-09-23 09:36:25 +00001664
njn810086f2002-11-14 12:42:47 +00001665 VG_(needs_core_errors) ();
1666 VG_(needs_skin_errors) ();
1667 VG_(needs_libc_freeres) ();
njn810086f2002-11-14 12:42:47 +00001668 VG_(needs_shadow_regs) ();
1669 VG_(needs_command_line_options)();
1670 VG_(needs_client_requests) ();
1671 VG_(needs_extended_UCode) ();
1672 VG_(needs_syscall_wrapper) ();
njn810086f2002-11-14 12:42:47 +00001673 VG_(needs_sanity_checks) ();
njn25e49d8e72002-09-23 09:36:25 +00001674
njn3e884182003-04-15 13:03:23 +00001675 MAC_( new_mem_heap) = & mc_new_mem_heap;
1676 MAC_( ban_mem_heap) = & MC_(make_noaccess);
1677 MAC_(copy_mem_heap) = & mc_copy_address_range_state;
1678 MAC_( die_mem_heap) = & MC_(make_noaccess);
sewardjecf8e102003-07-12 12:11:39 +00001679 MAC_(check_noaccess) = & MC_(check_noaccess);
njn3e884182003-04-15 13:03:23 +00001680
njn5c004e42002-11-18 11:04:50 +00001681 VG_(track_new_mem_startup) ( & mc_new_mem_startup );
njn5c004e42002-11-18 11:04:50 +00001682 VG_(track_new_mem_stack_signal) ( & MC_(make_writable) );
1683 VG_(track_new_mem_brk) ( & MC_(make_writable) );
1684 VG_(track_new_mem_mmap) ( & mc_set_perms );
njn25e49d8e72002-09-23 09:36:25 +00001685
njn3e884182003-04-15 13:03:23 +00001686 VG_(track_copy_mem_remap) ( & mc_copy_address_range_state );
1687 VG_(track_change_mem_mprotect) ( & mc_set_perms );
1688
1689 VG_(track_die_mem_stack_signal) ( & MC_(make_noaccess) );
1690 VG_(track_die_mem_brk) ( & MC_(make_noaccess) );
1691 VG_(track_die_mem_munmap) ( & MC_(make_noaccess) );
1692
njn43c799e2003-04-08 00:08:52 +00001693 VG_(track_new_mem_stack_4) ( & MAC_(new_mem_stack_4) );
1694 VG_(track_new_mem_stack_8) ( & MAC_(new_mem_stack_8) );
1695 VG_(track_new_mem_stack_12) ( & MAC_(new_mem_stack_12) );
1696 VG_(track_new_mem_stack_16) ( & MAC_(new_mem_stack_16) );
1697 VG_(track_new_mem_stack_32) ( & MAC_(new_mem_stack_32) );
1698 VG_(track_new_mem_stack) ( & MAC_(new_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00001699
njn43c799e2003-04-08 00:08:52 +00001700 VG_(track_die_mem_stack_4) ( & MAC_(die_mem_stack_4) );
1701 VG_(track_die_mem_stack_8) ( & MAC_(die_mem_stack_8) );
1702 VG_(track_die_mem_stack_12) ( & MAC_(die_mem_stack_12) );
1703 VG_(track_die_mem_stack_16) ( & MAC_(die_mem_stack_16) );
1704 VG_(track_die_mem_stack_32) ( & MAC_(die_mem_stack_32) );
1705 VG_(track_die_mem_stack) ( & MAC_(die_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00001706
njn3e884182003-04-15 13:03:23 +00001707 VG_(track_ban_mem_stack) ( & MC_(make_noaccess) );
njn25e49d8e72002-09-23 09:36:25 +00001708
njn5c004e42002-11-18 11:04:50 +00001709 VG_(track_pre_mem_read) ( & mc_check_is_readable );
1710 VG_(track_pre_mem_read_asciiz) ( & mc_check_is_readable_asciiz );
1711 VG_(track_pre_mem_write) ( & mc_check_is_writable );
1712 VG_(track_post_mem_write) ( & MC_(make_readable) );
njn25e49d8e72002-09-23 09:36:25 +00001713
njnd3040452003-05-19 15:04:06 +00001714 VG_(track_post_regs_write_init) ( & mc_post_regs_write_init );
1715 VG_(track_post_reg_write_syscall_return) ( & mc_post_reg_write );
1716 VG_(track_post_reg_write_deliver_signal) ( & mc_post_reg_write );
1717 VG_(track_post_reg_write_pthread_return) ( & mc_post_reg_write );
1718 VG_(track_post_reg_write_clientreq_return) ( & mc_post_reg_write );
1719 VG_(track_post_reg_write_clientcall_return) ( & mc_post_reg_write_clientcall );
1720
njn9b007f62003-04-07 14:40:25 +00001721 /* Three compact slots taken up by stack memory helpers */
njn5c004e42002-11-18 11:04:50 +00001722 VG_(register_compact_helper)((Addr) & MC_(helper_value_check4_fail));
1723 VG_(register_compact_helper)((Addr) & MC_(helper_value_check0_fail));
1724 VG_(register_compact_helper)((Addr) & MC_(helper_value_check2_fail));
1725 VG_(register_compact_helper)((Addr) & MC_(helperc_STOREV4));
njn5c004e42002-11-18 11:04:50 +00001726 VG_(register_compact_helper)((Addr) & MC_(helperc_LOADV4));
njn25e49d8e72002-09-23 09:36:25 +00001727
njnd04b7c62002-10-03 14:05:52 +00001728 /* These two made non-compact because 2-byte transactions are rare. */
njn5c004e42002-11-18 11:04:50 +00001729 VG_(register_noncompact_helper)((Addr) & MC_(helperc_STOREV2));
njn9b007f62003-04-07 14:40:25 +00001730 VG_(register_noncompact_helper)((Addr) & MC_(helperc_STOREV1));
njn5c004e42002-11-18 11:04:50 +00001731 VG_(register_noncompact_helper)((Addr) & MC_(helperc_LOADV2));
njn9b007f62003-04-07 14:40:25 +00001732 VG_(register_noncompact_helper)((Addr) & MC_(helperc_LOADV1));
njn5c004e42002-11-18 11:04:50 +00001733 VG_(register_noncompact_helper)((Addr) & MC_(fpu_write_check));
1734 VG_(register_noncompact_helper)((Addr) & MC_(fpu_read_check));
1735 VG_(register_noncompact_helper)((Addr) & MC_(helper_value_check1_fail));
njn25e49d8e72002-09-23 09:36:25 +00001736
1737 VGP_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
1738 VGP_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
njn9b007f62003-04-07 14:40:25 +00001739 VGP_(register_profile_event) ( VgpESPAdj, "adjust-ESP" );
njnd04b7c62002-10-03 14:05:52 +00001740
njn43c799e2003-04-08 00:08:52 +00001741 /* Additional block description for VG_(describe_addr)() */
1742 MAC_(describe_addr_supp) = MC_(client_perm_maybe_describe);
1743
njnd04b7c62002-10-03 14:05:52 +00001744 init_shadow_memory();
njn3e884182003-04-15 13:03:23 +00001745 MAC_(common_pre_clo_init)();
njn5c004e42002-11-18 11:04:50 +00001746}
1747
1748void SK_(post_clo_init) ( void )
1749{
1750}
1751
njn7d9f94d2003-04-22 21:41:40 +00001752void SK_(fini) ( Int exitcode )
njn5c004e42002-11-18 11:04:50 +00001753{
njn3e884182003-04-15 13:03:23 +00001754 MAC_(common_fini)( MC_(detect_memory_leaks) );
1755
njn5c004e42002-11-18 11:04:50 +00001756 if (0) {
1757 VG_(message)(Vg_DebugMsg,
1758 "------ Valgrind's client block stats follow ---------------" );
1759 MC_(show_client_block_stats)();
1760 }
njn25e49d8e72002-09-23 09:36:25 +00001761}
1762
1763/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00001764/*--- end mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00001765/*--------------------------------------------------------------------*/