blob: 167a2ba82b60882fb03bf9be16390b274dceb8f0 [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
njnc9539842002-10-02 13:26:35 +00009 This file is part of MemCheck, a heavyweight Valgrind skin for
10 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
njn0e1b5142003-04-15 14:58:06 +000012 Copyright (C) 2000-2003 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
njn25cac76cb2002-09-23 11:21:57 +000033#include "mc_include.h"
34#include "memcheck.h" /* for client requests */
njn25e49d8e72002-09-23 09:36:25 +000035//#include "vg_profile.c"
36
njn27f1a382002-11-08 15:48:16 +000037VG_DETERMINE_INTERFACE_VERSION
38
njn25e49d8e72002-09-23 09:36:25 +000039/* Define to debug the mem audit system. */
40/* #define VG_DEBUG_MEMORY */
41
njn25e49d8e72002-09-23 09:36:25 +000042#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
43
44/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000045/*--- Low-level support for memory checking. ---*/
46/*------------------------------------------------------------*/
47
48/* All reads and writes are checked against a memory map, which
49 records the state of all memory in the process. The memory map is
50 organised like this:
51
52 The top 16 bits of an address are used to index into a top-level
53 map table, containing 65536 entries. Each entry is a pointer to a
54 second-level map, which records the accesibililty and validity
55 permissions for the 65536 bytes indexed by the lower 16 bits of the
56 address. Each byte is represented by nine bits, one indicating
57 accessibility, the other eight validity. So each second-level map
58 contains 73728 bytes. This two-level arrangement conveniently
59 divides the 4G address space into 64k lumps, each size 64k bytes.
60
61 All entries in the primary (top-level) map must point to a valid
62 secondary (second-level) map. Since most of the 4G of address
63 space will not be in use -- ie, not mapped at all -- there is a
64 distinguished secondary map, which indicates `not addressible and
65 not valid' writeable for all bytes. Entries in the primary map for
66 which the entire 64k is not in use at all point at this
67 distinguished map.
68
69 [...] lots of stuff deleted due to out of date-ness
70
71 As a final optimisation, the alignment and address checks for
72 4-byte loads and stores are combined in a neat way. The primary
73 map is extended to have 262144 entries (2^18), rather than 2^16.
74 The top 3/4 of these entries are permanently set to the
75 distinguished secondary map. For a 4-byte load/store, the
76 top-level map is indexed not with (addr >> 16) but instead f(addr),
77 where
78
79 f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
80 = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
81 = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
82
83 ie the lowest two bits are placed above the 16 high address bits.
84 If either of these two bits are nonzero, the address is misaligned;
85 this will select a secondary map from the upper 3/4 of the primary
86 map. Because this is always the distinguished secondary map, a
87 (bogus) address check failure will result. The failure handling
88 code can then figure out whether this is a genuine addr check
89 failure or whether it is a possibly-legitimate access at a
90 misaligned address.
91*/
92
93
94/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000095/*--- Function declarations. ---*/
96/*------------------------------------------------------------*/
97
njn5c004e42002-11-18 11:04:50 +000098static UInt mc_rd_V4_SLOWLY ( Addr a );
99static UInt mc_rd_V2_SLOWLY ( Addr a );
100static UInt mc_rd_V1_SLOWLY ( Addr a );
101static void mc_wr_V4_SLOWLY ( Addr a, UInt vbytes );
102static void mc_wr_V2_SLOWLY ( Addr a, UInt vbytes );
103static void mc_wr_V1_SLOWLY ( Addr a, UInt vbytes );
104static void mc_fpu_read_check_SLOWLY ( Addr addr, Int size );
105static void mc_fpu_write_check_SLOWLY ( Addr addr, Int size );
njn25e49d8e72002-09-23 09:36:25 +0000106
107/*------------------------------------------------------------*/
108/*--- Data defns. ---*/
109/*------------------------------------------------------------*/
110
111typedef
112 struct {
113 UChar abits[8192];
114 UChar vbyte[65536];
115 }
116 SecMap;
117
118static SecMap* primary_map[ /*65536*/ 262144 ];
119static SecMap distinguished_secondary_map;
120
njn25e49d8e72002-09-23 09:36:25 +0000121
122static void init_shadow_memory ( void )
123{
124 Int i;
125
126 for (i = 0; i < 8192; i++) /* Invalid address */
127 distinguished_secondary_map.abits[i] = VGM_BYTE_INVALID;
128 for (i = 0; i < 65536; i++) /* Invalid Value */
129 distinguished_secondary_map.vbyte[i] = VGM_BYTE_INVALID;
130
131 /* These entries gradually get overwritten as the used address
132 space expands. */
133 for (i = 0; i < 65536; i++)
134 primary_map[i] = &distinguished_secondary_map;
135
136 /* These ones should never change; it's a bug in Valgrind if they do. */
137 for (i = 65536; i < 262144; i++)
138 primary_map[i] = &distinguished_secondary_map;
139}
140
njn25e49d8e72002-09-23 09:36:25 +0000141/*------------------------------------------------------------*/
142/*--- Basic bitmap management, reading and writing. ---*/
143/*------------------------------------------------------------*/
144
145/* Allocate and initialise a secondary map. */
146
147static SecMap* alloc_secondary_map ( __attribute__ ((unused))
148 Char* caller )
149{
150 SecMap* map;
151 UInt i;
152 PROF_EVENT(10);
153
154 /* Mark all bytes as invalid access and invalid value. */
155
156 /* It just happens that a SecMap occupies exactly 18 pages --
157 although this isn't important, so the following assert is
158 spurious. */
njne427a662002-10-02 11:08:25 +0000159 sk_assert(0 == (sizeof(SecMap) % VKI_BYTES_PER_PAGE));
njn25e49d8e72002-09-23 09:36:25 +0000160 map = VG_(get_memory_from_mmap)( sizeof(SecMap), caller );
161
162 for (i = 0; i < 8192; i++)
163 map->abits[i] = VGM_BYTE_INVALID; /* Invalid address */
164 for (i = 0; i < 65536; i++)
165 map->vbyte[i] = VGM_BYTE_INVALID; /* Invalid Value */
166
167 /* VG_(printf)("ALLOC_2MAP(%s)\n", caller ); */
168 return map;
169}
170
171
172/* Basic reading/writing of the bitmaps, for byte-sized accesses. */
173
174static __inline__ UChar get_abit ( Addr a )
175{
176 SecMap* sm = primary_map[a >> 16];
177 UInt sm_off = a & 0xFFFF;
178 PROF_EVENT(20);
179# if 0
180 if (IS_DISTINGUISHED_SM(sm))
181 VG_(message)(Vg_DebugMsg,
182 "accessed distinguished 2ndary (A)map! 0x%x\n", a);
183# endif
184 return BITARR_TEST(sm->abits, sm_off)
185 ? VGM_BIT_INVALID : VGM_BIT_VALID;
186}
187
188static __inline__ UChar get_vbyte ( Addr a )
189{
190 SecMap* sm = primary_map[a >> 16];
191 UInt sm_off = a & 0xFFFF;
192 PROF_EVENT(21);
193# if 0
194 if (IS_DISTINGUISHED_SM(sm))
195 VG_(message)(Vg_DebugMsg,
196 "accessed distinguished 2ndary (V)map! 0x%x\n", a);
197# endif
198 return sm->vbyte[sm_off];
199}
200
201static __inline__ void set_abit ( Addr a, UChar abit )
202{
203 SecMap* sm;
204 UInt sm_off;
205 PROF_EVENT(22);
206 ENSURE_MAPPABLE(a, "set_abit");
207 sm = primary_map[a >> 16];
208 sm_off = a & 0xFFFF;
209 if (abit)
210 BITARR_SET(sm->abits, sm_off);
211 else
212 BITARR_CLEAR(sm->abits, sm_off);
213}
214
215static __inline__ void set_vbyte ( Addr a, UChar vbyte )
216{
217 SecMap* sm;
218 UInt sm_off;
219 PROF_EVENT(23);
220 ENSURE_MAPPABLE(a, "set_vbyte");
221 sm = primary_map[a >> 16];
222 sm_off = a & 0xFFFF;
223 sm->vbyte[sm_off] = vbyte;
224}
225
226
227/* Reading/writing of the bitmaps, for aligned word-sized accesses. */
228
229static __inline__ UChar get_abits4_ALIGNED ( Addr a )
230{
231 SecMap* sm;
232 UInt sm_off;
233 UChar abits8;
234 PROF_EVENT(24);
235# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000236 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000237# endif
238 sm = primary_map[a >> 16];
239 sm_off = a & 0xFFFF;
240 abits8 = sm->abits[sm_off >> 3];
241 abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
242 abits8 &= 0x0F;
243 return abits8;
244}
245
246static UInt __inline__ get_vbytes4_ALIGNED ( Addr a )
247{
248 SecMap* sm = primary_map[a >> 16];
249 UInt sm_off = a & 0xFFFF;
250 PROF_EVENT(25);
251# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000252 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000253# endif
254 return ((UInt*)(sm->vbyte))[sm_off >> 2];
255}
256
257
sewardjee070842003-07-05 17:53:55 +0000258static void __inline__ set_vbytes4_ALIGNED ( Addr a, UInt vbytes )
259{
260 SecMap* sm;
261 UInt sm_off;
262 ENSURE_MAPPABLE(a, "set_vbytes4_ALIGNED");
263 sm = primary_map[a >> 16];
264 sm_off = a & 0xFFFF;
265 PROF_EVENT(23);
266# ifdef VG_DEBUG_MEMORY
267 sk_assert(IS_ALIGNED4_ADDR(a));
268# endif
269 ((UInt*)(sm->vbyte))[sm_off >> 2] = vbytes;
270}
271
272
njn25e49d8e72002-09-23 09:36:25 +0000273/*------------------------------------------------------------*/
274/*--- Setting permissions over address ranges. ---*/
275/*------------------------------------------------------------*/
276
277static void set_address_range_perms ( Addr a, UInt len,
278 UInt example_a_bit,
279 UInt example_v_bit )
280{
281 UChar vbyte, abyte8;
282 UInt vword4, sm_off;
283 SecMap* sm;
284
285 PROF_EVENT(30);
286
287 if (len == 0)
288 return;
289
290 if (len > 100 * 1000 * 1000) {
291 VG_(message)(Vg_UserMsg,
292 "Warning: set address range perms: "
293 "large range %u, a %d, v %d",
294 len, example_a_bit, example_v_bit );
295 }
296
297 VGP_PUSHCC(VgpSetMem);
298
299 /* Requests to change permissions of huge address ranges may
300 indicate bugs in our machinery. 30,000,000 is arbitrary, but so
301 far all legitimate requests have fallen beneath that size. */
302 /* 4 Mar 02: this is just stupid; get rid of it. */
njne427a662002-10-02 11:08:25 +0000303 /* sk_assert(len < 30000000); */
njn25e49d8e72002-09-23 09:36:25 +0000304
305 /* Check the permissions make sense. */
njne427a662002-10-02 11:08:25 +0000306 sk_assert(example_a_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000307 || example_a_bit == VGM_BIT_INVALID);
njne427a662002-10-02 11:08:25 +0000308 sk_assert(example_v_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000309 || example_v_bit == VGM_BIT_INVALID);
310 if (example_a_bit == VGM_BIT_INVALID)
njne427a662002-10-02 11:08:25 +0000311 sk_assert(example_v_bit == VGM_BIT_INVALID);
njn25e49d8e72002-09-23 09:36:25 +0000312
313 /* The validity bits to write. */
314 vbyte = example_v_bit==VGM_BIT_VALID
315 ? VGM_BYTE_VALID : VGM_BYTE_INVALID;
316
317 /* In order that we can charge through the address space at 8
318 bytes/main-loop iteration, make up some perms. */
319 abyte8 = (example_a_bit << 7)
320 | (example_a_bit << 6)
321 | (example_a_bit << 5)
322 | (example_a_bit << 4)
323 | (example_a_bit << 3)
324 | (example_a_bit << 2)
325 | (example_a_bit << 1)
326 | (example_a_bit << 0);
327 vword4 = (vbyte << 24) | (vbyte << 16) | (vbyte << 8) | vbyte;
328
329# ifdef VG_DEBUG_MEMORY
330 /* Do it ... */
331 while (True) {
332 PROF_EVENT(31);
333 if (len == 0) break;
334 set_abit ( a, example_a_bit );
335 set_vbyte ( a, vbyte );
336 a++;
337 len--;
338 }
339
340# else
341 /* Slowly do parts preceding 8-byte alignment. */
342 while (True) {
343 PROF_EVENT(31);
344 if (len == 0) break;
345 if ((a % 8) == 0) break;
346 set_abit ( a, example_a_bit );
347 set_vbyte ( a, vbyte );
348 a++;
349 len--;
350 }
351
352 if (len == 0) {
353 VGP_POPCC(VgpSetMem);
354 return;
355 }
njne427a662002-10-02 11:08:25 +0000356 sk_assert((a % 8) == 0 && len > 0);
njn25e49d8e72002-09-23 09:36:25 +0000357
358 /* Once aligned, go fast. */
359 while (True) {
360 PROF_EVENT(32);
361 if (len < 8) break;
362 ENSURE_MAPPABLE(a, "set_address_range_perms(fast)");
363 sm = primary_map[a >> 16];
364 sm_off = a & 0xFFFF;
365 sm->abits[sm_off >> 3] = abyte8;
366 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = vword4;
367 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = vword4;
368 a += 8;
369 len -= 8;
370 }
371
372 if (len == 0) {
373 VGP_POPCC(VgpSetMem);
374 return;
375 }
njne427a662002-10-02 11:08:25 +0000376 sk_assert((a % 8) == 0 && len > 0 && len < 8);
njn25e49d8e72002-09-23 09:36:25 +0000377
378 /* Finish the upper fragment. */
379 while (True) {
380 PROF_EVENT(33);
381 if (len == 0) break;
382 set_abit ( a, example_a_bit );
383 set_vbyte ( a, vbyte );
384 a++;
385 len--;
386 }
387# endif
388
389 /* Check that zero page and highest page have not been written to
390 -- this could happen with buggy syscall wrappers. Today
391 (2001-04-26) had precisely such a problem with __NR_setitimer. */
njne427a662002-10-02 11:08:25 +0000392 sk_assert(SK_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +0000393 VGP_POPCC(VgpSetMem);
394}
395
396/* Set permissions for address ranges ... */
397
njn5c004e42002-11-18 11:04:50 +0000398void MC_(make_noaccess) ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000399{
400 PROF_EVENT(35);
njn5c004e42002-11-18 11:04:50 +0000401 DEBUG("MC_(make_noaccess)(%p, %x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000402 set_address_range_perms ( a, len, VGM_BIT_INVALID, VGM_BIT_INVALID );
403}
404
njn5c004e42002-11-18 11:04:50 +0000405void MC_(make_writable) ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000406{
407 PROF_EVENT(36);
njn5c004e42002-11-18 11:04:50 +0000408 DEBUG("MC_(make_writable)(%p, %x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000409 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_INVALID );
410}
411
njn5c004e42002-11-18 11:04:50 +0000412void MC_(make_readable) ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000413{
414 PROF_EVENT(37);
njn5c004e42002-11-18 11:04:50 +0000415 DEBUG("MC_(make_readable)(%p, 0x%x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000416 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_VALID );
417}
418
njn9b007f62003-04-07 14:40:25 +0000419static __inline__
420void make_aligned_word_writable(Addr a)
421{
422 SecMap* sm;
423 UInt sm_off;
424 UChar mask;
njn25e49d8e72002-09-23 09:36:25 +0000425
njn9b007f62003-04-07 14:40:25 +0000426 VGP_PUSHCC(VgpESPAdj);
427 ENSURE_MAPPABLE(a, "make_aligned_word_writable");
428 sm = primary_map[a >> 16];
429 sm_off = a & 0xFFFF;
430 ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
431 mask = 0x0F;
432 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
433 /* mask now contains 1s where we wish to make address bits invalid (0s). */
434 sm->abits[sm_off >> 3] &= ~mask;
435 VGP_POPCC(VgpESPAdj);
436}
437
438static __inline__
439void make_aligned_word_noaccess(Addr a)
440{
441 SecMap* sm;
442 UInt sm_off;
443 UChar mask;
444
445 VGP_PUSHCC(VgpESPAdj);
446 ENSURE_MAPPABLE(a, "make_aligned_word_noaccess");
447 sm = primary_map[a >> 16];
448 sm_off = a & 0xFFFF;
449 ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
450 mask = 0x0F;
451 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
452 /* mask now contains 1s where we wish to make address bits invalid (1s). */
453 sm->abits[sm_off >> 3] |= mask;
454 VGP_POPCC(VgpESPAdj);
455}
456
457/* Nb: by "aligned" here we mean 8-byte aligned */
458static __inline__
459void make_aligned_doubleword_writable(Addr a)
460{
461 SecMap* sm;
462 UInt sm_off;
463
464 VGP_PUSHCC(VgpESPAdj);
465 ENSURE_MAPPABLE(a, "make_aligned_doubleword_writable");
466 sm = primary_map[a >> 16];
467 sm_off = a & 0xFFFF;
468 sm->abits[sm_off >> 3] = VGM_BYTE_VALID;
469 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = VGM_WORD_INVALID;
470 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = VGM_WORD_INVALID;
471 VGP_POPCC(VgpESPAdj);
472}
473
474static __inline__
475void make_aligned_doubleword_noaccess(Addr a)
476{
477 SecMap* sm;
478 UInt sm_off;
479
480 VGP_PUSHCC(VgpESPAdj);
481 ENSURE_MAPPABLE(a, "make_aligned_doubleword_noaccess");
482 sm = primary_map[a >> 16];
483 sm_off = a & 0xFFFF;
484 sm->abits[sm_off >> 3] = VGM_BYTE_INVALID;
485 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = VGM_WORD_INVALID;
486 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = VGM_WORD_INVALID;
487 VGP_POPCC(VgpESPAdj);
488}
489
490/* The %esp update handling functions */
491ESP_UPDATE_HANDLERS ( make_aligned_word_writable,
492 make_aligned_word_noaccess,
493 make_aligned_doubleword_writable,
494 make_aligned_doubleword_noaccess,
495 MC_(make_writable),
496 MC_(make_noaccess)
497 );
498
499/* Block-copy permissions (needed for implementing realloc()). */
njn5c004e42002-11-18 11:04:50 +0000500static void mc_copy_address_range_state ( Addr src, Addr dst, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000501{
502 UInt i;
503
njn5c004e42002-11-18 11:04:50 +0000504 DEBUG("mc_copy_address_range_state\n");
njn25e49d8e72002-09-23 09:36:25 +0000505
506 PROF_EVENT(40);
507 for (i = 0; i < len; i++) {
508 UChar abit = get_abit ( src+i );
509 UChar vbyte = get_vbyte ( src+i );
510 PROF_EVENT(41);
511 set_abit ( dst+i, abit );
512 set_vbyte ( dst+i, vbyte );
513 }
514}
515
516
517/* Check permissions for address range. If inadequate permissions
518 exist, *bad_addr is set to the offending address, so the caller can
519 know what it is. */
520
njn5c004e42002-11-18 11:04:50 +0000521Bool MC_(check_writable) ( Addr a, UInt len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000522{
523 UInt i;
524 UChar abit;
525 PROF_EVENT(42);
526 for (i = 0; i < len; i++) {
527 PROF_EVENT(43);
528 abit = get_abit(a);
529 if (abit == VGM_BIT_INVALID) {
530 if (bad_addr != NULL) *bad_addr = a;
531 return False;
532 }
533 a++;
534 }
535 return True;
536}
537
njn5c004e42002-11-18 11:04:50 +0000538Bool MC_(check_readable) ( Addr a, UInt len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000539{
540 UInt i;
541 UChar abit;
542 UChar vbyte;
543
544 PROF_EVENT(44);
njn5c004e42002-11-18 11:04:50 +0000545 DEBUG("MC_(check_readable)\n");
njn25e49d8e72002-09-23 09:36:25 +0000546 for (i = 0; i < len; i++) {
547 abit = get_abit(a);
548 vbyte = get_vbyte(a);
549 PROF_EVENT(45);
550 if (abit != VGM_BIT_VALID || vbyte != VGM_BYTE_VALID) {
551 if (bad_addr != NULL) *bad_addr = a;
552 return False;
553 }
554 a++;
555 }
556 return True;
557}
558
559
560/* Check a zero-terminated ascii string. Tricky -- don't want to
561 examine the actual bytes, to find the end, until we're sure it is
562 safe to do so. */
563
njn9b007f62003-04-07 14:40:25 +0000564static Bool mc_check_readable_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000565{
566 UChar abit;
567 UChar vbyte;
568 PROF_EVENT(46);
njn5c004e42002-11-18 11:04:50 +0000569 DEBUG("mc_check_readable_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +0000570 while (True) {
571 PROF_EVENT(47);
572 abit = get_abit(a);
573 vbyte = get_vbyte(a);
574 if (abit != VGM_BIT_VALID || vbyte != VGM_BYTE_VALID) {
575 if (bad_addr != NULL) *bad_addr = a;
576 return False;
577 }
578 /* Ok, a is safe to read. */
579 if (* ((UChar*)a) == 0) return True;
580 a++;
581 }
582}
583
584
585/*------------------------------------------------------------*/
586/*--- Memory event handlers ---*/
587/*------------------------------------------------------------*/
588
njn25e49d8e72002-09-23 09:36:25 +0000589static
njn5c004e42002-11-18 11:04:50 +0000590void mc_check_is_writable ( CorePart part, ThreadState* tst,
591 Char* s, Addr base, UInt size )
njn25e49d8e72002-09-23 09:36:25 +0000592{
593 Bool ok;
594 Addr bad_addr;
595
596 VGP_PUSHCC(VgpCheckMem);
597
598 /* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
599 base,base+size-1); */
njn5c004e42002-11-18 11:04:50 +0000600 ok = MC_(check_writable) ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000601 if (!ok) {
602 switch (part) {
603 case Vg_CoreSysCall:
njn43c799e2003-04-08 00:08:52 +0000604 MAC_(record_param_error) ( tst, bad_addr, /*isWrite =*/True, s );
njn25e49d8e72002-09-23 09:36:25 +0000605 break;
606
607 case Vg_CorePThread:
608 case Vg_CoreSignal:
njn43c799e2003-04-08 00:08:52 +0000609 MAC_(record_core_mem_error)( tst, /*isWrite=*/True, s );
njn25e49d8e72002-09-23 09:36:25 +0000610 break;
611
612 default:
njn5c004e42002-11-18 11:04:50 +0000613 VG_(skin_panic)("mc_check_is_writable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000614 }
615 }
616
617 VGP_POPCC(VgpCheckMem);
618}
619
620static
njn5c004e42002-11-18 11:04:50 +0000621void mc_check_is_readable ( CorePart part, ThreadState* tst,
622 Char* s, Addr base, UInt size )
njn25e49d8e72002-09-23 09:36:25 +0000623{
624 Bool ok;
625 Addr bad_addr;
626
627 VGP_PUSHCC(VgpCheckMem);
628
629 /* VG_(message)(Vg_DebugMsg,"check is readable: %x .. %x",
630 base,base+size-1); */
njn5c004e42002-11-18 11:04:50 +0000631 ok = MC_(check_readable) ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000632 if (!ok) {
633 switch (part) {
634 case Vg_CoreSysCall:
njn43c799e2003-04-08 00:08:52 +0000635 MAC_(record_param_error) ( tst, bad_addr, /*isWrite =*/False, s );
njn25e49d8e72002-09-23 09:36:25 +0000636 break;
637
638 case Vg_CorePThread:
njn43c799e2003-04-08 00:08:52 +0000639 MAC_(record_core_mem_error)( tst, /*isWrite=*/False, s );
njn25e49d8e72002-09-23 09:36:25 +0000640 break;
641
642 /* If we're being asked to jump to a silly address, record an error
643 message before potentially crashing the entire system. */
644 case Vg_CoreTranslate:
njn43c799e2003-04-08 00:08:52 +0000645 MAC_(record_jump_error)( tst, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000646 break;
647
648 default:
njn5c004e42002-11-18 11:04:50 +0000649 VG_(skin_panic)("mc_check_is_readable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000650 }
651 }
652 VGP_POPCC(VgpCheckMem);
653}
654
655static
njn5c004e42002-11-18 11:04:50 +0000656void mc_check_is_readable_asciiz ( CorePart part, ThreadState* tst,
657 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +0000658{
659 Bool ok = True;
660 Addr bad_addr;
661 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
662
663 VGP_PUSHCC(VgpCheckMem);
664
njne427a662002-10-02 11:08:25 +0000665 sk_assert(part == Vg_CoreSysCall);
njn5c004e42002-11-18 11:04:50 +0000666 ok = mc_check_readable_asciiz ( (Addr)str, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000667 if (!ok) {
njn43c799e2003-04-08 00:08:52 +0000668 MAC_(record_param_error) ( tst, bad_addr, /*is_writable =*/False, s );
njn25e49d8e72002-09-23 09:36:25 +0000669 }
670
671 VGP_POPCC(VgpCheckMem);
672}
673
674
675static
njn5c004e42002-11-18 11:04:50 +0000676void mc_new_mem_startup( Addr a, UInt len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +0000677{
njn1f3a9092002-10-04 09:22:30 +0000678 /* Ignore the permissions, just make it readable. Seems to work... */
njn5c004e42002-11-18 11:04:50 +0000679 DEBUG("mc_new_mem_startup(%p, %u, rr=%u, ww=%u, xx=%u)\n", a,len,rr,ww,xx);
680 MC_(make_readable)(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000681}
682
683static
njn5c004e42002-11-18 11:04:50 +0000684void mc_new_mem_heap ( Addr a, UInt len, Bool is_inited )
njn25e49d8e72002-09-23 09:36:25 +0000685{
686 if (is_inited) {
njn5c004e42002-11-18 11:04:50 +0000687 MC_(make_readable)(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000688 } else {
njn5c004e42002-11-18 11:04:50 +0000689 MC_(make_writable)(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000690 }
691}
692
693static
njn5c004e42002-11-18 11:04:50 +0000694void mc_set_perms (Addr a, UInt len, Bool rr, Bool ww, Bool xx)
njn25e49d8e72002-09-23 09:36:25 +0000695{
njn5c004e42002-11-18 11:04:50 +0000696 DEBUG("mc_set_perms(%p, %u, rr=%u ww=%u, xx=%u)\n", a, len, rr, ww, xx);
697 if (rr) MC_(make_readable)(a, len);
698 else if (ww) MC_(make_writable)(a, len);
699 else MC_(make_noaccess)(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000700}
701
702
703/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +0000704/*--- Register event handlers ---*/
705/*------------------------------------------------------------*/
706
707static void mc_post_regs_write_init ( void )
708{
709 UInt i;
710 for (i = R_EAX; i <= R_EDI; i++)
711 VG_(set_shadow_archreg)( i, VGM_WORD_VALID );
712 VG_(set_shadow_eflags)( VGM_EFLAGS_VALID );
713}
714
715static void mc_post_reg_write(ThreadId tid, UInt reg)
716{
717 VG_(set_thread_shadow_archreg)( tid, reg, VGM_WORD_VALID );
718}
719
720static void mc_post_reg_write_clientcall(ThreadId tid, UInt reg, Addr f )
721{
722 VG_(set_thread_shadow_archreg)( tid, reg, VGM_WORD_VALID );
723}
724
725
726/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000727/*--- Functions called directly from generated code. ---*/
728/*------------------------------------------------------------*/
729
730static __inline__ UInt rotateRight16 ( UInt x )
731{
732 /* Amazingly, gcc turns this into a single rotate insn. */
733 return (x >> 16) | (x << 16);
734}
735
736
737static __inline__ UInt shiftRight16 ( UInt x )
738{
739 return x >> 16;
740}
741
742
743/* Read/write 1/2/4 sized V bytes, and emit an address error if
744 needed. */
745
746/* VG_(helperc_{LD,ST}V{1,2,4}) handle the common case fast.
747 Under all other circumstances, it defers to the relevant _SLOWLY
748 function, which can handle all situations.
749*/
750__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000751UInt MC_(helperc_LOADV4) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000752{
753# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000754 return mc_rd_V4_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000755# else
756 UInt sec_no = rotateRight16(a) & 0x3FFFF;
757 SecMap* sm = primary_map[sec_no];
758 UInt a_off = (a & 0xFFFF) >> 3;
759 UChar abits = sm->abits[a_off];
760 abits >>= (a & 4);
761 abits &= 15;
762 PROF_EVENT(60);
763 if (abits == VGM_NIBBLE_VALID) {
764 /* Handle common case quickly: a is suitably aligned, is mapped,
765 and is addressible. */
766 UInt v_off = a & 0xFFFF;
767 return ((UInt*)(sm->vbyte))[ v_off >> 2 ];
768 } else {
769 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000770 return mc_rd_V4_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000771 }
772# endif
773}
774
775__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +0000776void MC_(helperc_STOREV4) ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000777{
778# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000779 mc_wr_V4_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000780# else
781 UInt sec_no = rotateRight16(a) & 0x3FFFF;
782 SecMap* sm = primary_map[sec_no];
783 UInt a_off = (a & 0xFFFF) >> 3;
784 UChar abits = sm->abits[a_off];
785 abits >>= (a & 4);
786 abits &= 15;
787 PROF_EVENT(61);
788 if (abits == VGM_NIBBLE_VALID) {
789 /* Handle common case quickly: a is suitably aligned, is mapped,
790 and is addressible. */
791 UInt v_off = a & 0xFFFF;
792 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = vbytes;
793 } else {
794 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000795 mc_wr_V4_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000796 }
797# endif
798}
799
800__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000801UInt MC_(helperc_LOADV2) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000802{
803# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000804 return mc_rd_V2_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000805# else
806 UInt sec_no = rotateRight16(a) & 0x1FFFF;
807 SecMap* sm = primary_map[sec_no];
808 UInt a_off = (a & 0xFFFF) >> 3;
809 PROF_EVENT(62);
810 if (sm->abits[a_off] == VGM_BYTE_VALID) {
811 /* Handle common case quickly. */
812 UInt v_off = a & 0xFFFF;
813 return 0xFFFF0000
814 |
815 (UInt)( ((UShort*)(sm->vbyte))[ v_off >> 1 ] );
816 } else {
817 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000818 return mc_rd_V2_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000819 }
820# endif
821}
822
823__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +0000824void MC_(helperc_STOREV2) ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000825{
826# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000827 mc_wr_V2_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000828# else
829 UInt sec_no = rotateRight16(a) & 0x1FFFF;
830 SecMap* sm = primary_map[sec_no];
831 UInt a_off = (a & 0xFFFF) >> 3;
832 PROF_EVENT(63);
833 if (sm->abits[a_off] == VGM_BYTE_VALID) {
834 /* Handle common case quickly. */
835 UInt v_off = a & 0xFFFF;
836 ((UShort*)(sm->vbyte))[ v_off >> 1 ] = vbytes & 0x0000FFFF;
837 } else {
838 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000839 mc_wr_V2_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000840 }
841# endif
842}
843
844__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000845UInt MC_(helperc_LOADV1) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000846{
847# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000848 return mc_rd_V1_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000849# else
850 UInt sec_no = shiftRight16(a);
851 SecMap* sm = primary_map[sec_no];
852 UInt a_off = (a & 0xFFFF) >> 3;
853 PROF_EVENT(64);
854 if (sm->abits[a_off] == VGM_BYTE_VALID) {
855 /* Handle common case quickly. */
856 UInt v_off = a & 0xFFFF;
857 return 0xFFFFFF00
858 |
859 (UInt)( ((UChar*)(sm->vbyte))[ v_off ] );
860 } else {
861 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000862 return mc_rd_V1_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000863 }
864# endif
865}
866
867__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +0000868void MC_(helperc_STOREV1) ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000869{
870# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000871 mc_wr_V1_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000872# else
873 UInt sec_no = shiftRight16(a);
874 SecMap* sm = primary_map[sec_no];
875 UInt a_off = (a & 0xFFFF) >> 3;
876 PROF_EVENT(65);
877 if (sm->abits[a_off] == VGM_BYTE_VALID) {
878 /* Handle common case quickly. */
879 UInt v_off = a & 0xFFFF;
880 ((UChar*)(sm->vbyte))[ v_off ] = vbytes & 0x000000FF;
881 } else {
882 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000883 mc_wr_V1_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000884 }
885# endif
886}
887
888
889/*------------------------------------------------------------*/
890/*--- Fallback functions to handle cases that the above ---*/
891/*--- VG_(helperc_{LD,ST}V{1,2,4}) can't manage. ---*/
892/*------------------------------------------------------------*/
893
njn5c004e42002-11-18 11:04:50 +0000894static UInt mc_rd_V4_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000895{
896 Bool a0ok, a1ok, a2ok, a3ok;
897 UInt vb0, vb1, vb2, vb3;
898
899 PROF_EVENT(70);
900
901 /* First establish independently the addressibility of the 4 bytes
902 involved. */
903 a0ok = get_abit(a+0) == VGM_BIT_VALID;
904 a1ok = get_abit(a+1) == VGM_BIT_VALID;
905 a2ok = get_abit(a+2) == VGM_BIT_VALID;
906 a3ok = get_abit(a+3) == VGM_BIT_VALID;
907
908 /* Also get the validity bytes for the address. */
909 vb0 = (UInt)get_vbyte(a+0);
910 vb1 = (UInt)get_vbyte(a+1);
911 vb2 = (UInt)get_vbyte(a+2);
912 vb3 = (UInt)get_vbyte(a+3);
913
914 /* Now distinguish 3 cases */
915
916 /* Case 1: the address is completely valid, so:
917 - no addressing error
918 - return V bytes as read from memory
919 */
920 if (a0ok && a1ok && a2ok && a3ok) {
921 UInt vw = VGM_WORD_INVALID;
922 vw <<= 8; vw |= vb3;
923 vw <<= 8; vw |= vb2;
924 vw <<= 8; vw |= vb1;
925 vw <<= 8; vw |= vb0;
926 return vw;
927 }
928
929 /* Case 2: the address is completely invalid.
930 - emit addressing error
931 - return V word indicating validity.
932 This sounds strange, but if we make loads from invalid addresses
933 give invalid data, we also risk producing a number of confusing
934 undefined-value errors later, which confuses the fact that the
935 error arose in the first place from an invalid address.
936 */
937 /* VG_(printf)("%p (%d %d %d %d)\n", a, a0ok, a1ok, a2ok, a3ok); */
njn43c799e2003-04-08 00:08:52 +0000938 if (!MAC_(clo_partial_loads_ok)
njn25e49d8e72002-09-23 09:36:25 +0000939 || ((a & 3) != 0)
940 || (!a0ok && !a1ok && !a2ok && !a3ok)) {
sewardjaf48a602003-07-06 00:54:47 +0000941 MAC_(record_address_error)( /*tst*/NULL, a, 4, False );
njn25e49d8e72002-09-23 09:36:25 +0000942 return (VGM_BYTE_VALID << 24) | (VGM_BYTE_VALID << 16)
943 | (VGM_BYTE_VALID << 8) | VGM_BYTE_VALID;
944 }
945
946 /* Case 3: the address is partially valid.
947 - no addressing error
948 - returned V word is invalid where the address is invalid,
949 and contains V bytes from memory otherwise.
njn5c004e42002-11-18 11:04:50 +0000950 Case 3 is only allowed if MC_(clo_partial_loads_ok) is True
njn25e49d8e72002-09-23 09:36:25 +0000951 (which is the default), and the address is 4-aligned.
952 If not, Case 2 will have applied.
953 */
njn43c799e2003-04-08 00:08:52 +0000954 sk_assert(MAC_(clo_partial_loads_ok));
njn25e49d8e72002-09-23 09:36:25 +0000955 {
956 UInt vw = VGM_WORD_INVALID;
957 vw <<= 8; vw |= (a3ok ? vb3 : VGM_BYTE_INVALID);
958 vw <<= 8; vw |= (a2ok ? vb2 : VGM_BYTE_INVALID);
959 vw <<= 8; vw |= (a1ok ? vb1 : VGM_BYTE_INVALID);
960 vw <<= 8; vw |= (a0ok ? vb0 : VGM_BYTE_INVALID);
961 return vw;
962 }
963}
964
njn5c004e42002-11-18 11:04:50 +0000965static void mc_wr_V4_SLOWLY ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000966{
967 /* Check the address for validity. */
968 Bool aerr = False;
969 PROF_EVENT(71);
970
971 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
972 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
973 if (get_abit(a+2) != VGM_BIT_VALID) aerr = True;
974 if (get_abit(a+3) != VGM_BIT_VALID) aerr = True;
975
976 /* Store the V bytes, remembering to do it little-endian-ly. */
977 set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
978 set_vbyte( a+1, vbytes & 0x000000FF ); vbytes >>= 8;
979 set_vbyte( a+2, vbytes & 0x000000FF ); vbytes >>= 8;
980 set_vbyte( a+3, vbytes & 0x000000FF );
981
982 /* If an address error has happened, report it. */
983 if (aerr)
sewardjaf48a602003-07-06 00:54:47 +0000984 MAC_(record_address_error)( /*tst*/NULL, a, 4, True );
njn25e49d8e72002-09-23 09:36:25 +0000985}
986
njn5c004e42002-11-18 11:04:50 +0000987static UInt mc_rd_V2_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000988{
989 /* Check the address for validity. */
990 UInt vw = VGM_WORD_INVALID;
991 Bool aerr = False;
992 PROF_EVENT(72);
993
994 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
995 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
996
997 /* Fetch the V bytes, remembering to do it little-endian-ly. */
998 vw <<= 8; vw |= (UInt)get_vbyte(a+1);
999 vw <<= 8; vw |= (UInt)get_vbyte(a+0);
1000
1001 /* If an address error has happened, report it. */
1002 if (aerr) {
sewardjaf48a602003-07-06 00:54:47 +00001003 MAC_(record_address_error)( /*tst*/NULL, a, 2, False );
njn25e49d8e72002-09-23 09:36:25 +00001004 vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
1005 | (VGM_BYTE_VALID << 8) | (VGM_BYTE_VALID);
1006 }
1007 return vw;
1008}
1009
njn5c004e42002-11-18 11:04:50 +00001010static void mc_wr_V2_SLOWLY ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001011{
1012 /* Check the address for validity. */
1013 Bool aerr = False;
1014 PROF_EVENT(73);
1015
1016 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1017 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1018
1019 /* Store the V bytes, remembering to do it little-endian-ly. */
1020 set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
1021 set_vbyte( a+1, vbytes & 0x000000FF );
1022
1023 /* If an address error has happened, report it. */
1024 if (aerr)
sewardjaf48a602003-07-06 00:54:47 +00001025 MAC_(record_address_error)( /*tst*/NULL, a, 2, True );
njn25e49d8e72002-09-23 09:36:25 +00001026}
1027
njn5c004e42002-11-18 11:04:50 +00001028static UInt mc_rd_V1_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +00001029{
1030 /* Check the address for validity. */
1031 UInt vw = VGM_WORD_INVALID;
1032 Bool aerr = False;
1033 PROF_EVENT(74);
1034
1035 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1036
1037 /* Fetch the V byte. */
1038 vw <<= 8; vw |= (UInt)get_vbyte(a+0);
1039
1040 /* If an address error has happened, report it. */
1041 if (aerr) {
sewardjaf48a602003-07-06 00:54:47 +00001042 MAC_(record_address_error)( /*tst*/NULL, a, 1, False );
njn25e49d8e72002-09-23 09:36:25 +00001043 vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
1044 | (VGM_BYTE_INVALID << 8) | (VGM_BYTE_VALID);
1045 }
1046 return vw;
1047}
1048
njn5c004e42002-11-18 11:04:50 +00001049static void mc_wr_V1_SLOWLY ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001050{
1051 /* Check the address for validity. */
1052 Bool aerr = False;
1053 PROF_EVENT(75);
1054 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1055
1056 /* Store the V bytes, remembering to do it little-endian-ly. */
1057 set_vbyte( a+0, vbytes & 0x000000FF );
1058
1059 /* If an address error has happened, report it. */
1060 if (aerr)
sewardjaf48a602003-07-06 00:54:47 +00001061 MAC_(record_address_error)( /*tst*/NULL, a, 1, True );
njn25e49d8e72002-09-23 09:36:25 +00001062}
1063
1064
1065/* ---------------------------------------------------------------------
1066 Called from generated code, or from the assembly helpers.
1067 Handlers for value check failures.
1068 ------------------------------------------------------------------ */
1069
njn5c004e42002-11-18 11:04:50 +00001070void MC_(helperc_value_check0_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001071{
sewardjaf48a602003-07-06 00:54:47 +00001072 MC_(record_value_error) ( /*tst*/NULL, 0 );
njn25e49d8e72002-09-23 09:36:25 +00001073}
1074
njn5c004e42002-11-18 11:04:50 +00001075void MC_(helperc_value_check1_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001076{
sewardjaf48a602003-07-06 00:54:47 +00001077 MC_(record_value_error) ( /*tst*/NULL, 1 );
njn25e49d8e72002-09-23 09:36:25 +00001078}
1079
njn5c004e42002-11-18 11:04:50 +00001080void MC_(helperc_value_check2_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001081{
sewardjaf48a602003-07-06 00:54:47 +00001082 MC_(record_value_error) ( /*tst*/NULL, 2 );
njn25e49d8e72002-09-23 09:36:25 +00001083}
1084
njn5c004e42002-11-18 11:04:50 +00001085void MC_(helperc_value_check4_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001086{
sewardjaf48a602003-07-06 00:54:47 +00001087 MC_(record_value_error) ( /*tst*/NULL, 4 );
njn25e49d8e72002-09-23 09:36:25 +00001088}
1089
1090
1091/* ---------------------------------------------------------------------
1092 FPU load and store checks, called from generated code.
1093 ------------------------------------------------------------------ */
1094
1095__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +00001096void MC_(fpu_read_check) ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001097{
1098 /* Ensure the read area is both addressible and valid (ie,
1099 readable). If there's an address error, don't report a value
1100 error too; but if there isn't an address error, check for a
1101 value error.
1102
1103 Try to be reasonably fast on the common case; wimp out and defer
njn5c004e42002-11-18 11:04:50 +00001104 to mc_fpu_read_check_SLOWLY for everything else. */
njn25e49d8e72002-09-23 09:36:25 +00001105
1106 SecMap* sm;
1107 UInt sm_off, v_off, a_off;
1108 Addr addr4;
1109
1110 PROF_EVENT(80);
1111
1112# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +00001113 mc_fpu_read_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001114# else
1115
1116 if (size == 4) {
1117 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
1118 PROF_EVENT(81);
1119 /* Properly aligned. */
1120 sm = primary_map[addr >> 16];
1121 sm_off = addr & 0xFFFF;
1122 a_off = sm_off >> 3;
1123 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
1124 /* Properly aligned and addressible. */
1125 v_off = addr & 0xFFFF;
1126 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1127 goto slow4;
1128 /* Properly aligned, addressible and with valid data. */
1129 return;
1130 slow4:
njn5c004e42002-11-18 11:04:50 +00001131 mc_fpu_read_check_SLOWLY ( addr, 4 );
njn25e49d8e72002-09-23 09:36:25 +00001132 return;
1133 }
1134
1135 if (size == 8) {
1136 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
1137 PROF_EVENT(82);
1138 /* Properly aligned. Do it in two halves. */
1139 addr4 = addr + 4;
1140 /* First half. */
1141 sm = primary_map[addr >> 16];
1142 sm_off = addr & 0xFFFF;
1143 a_off = sm_off >> 3;
1144 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1145 /* First half properly aligned and addressible. */
1146 v_off = addr & 0xFFFF;
1147 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1148 goto slow8;
1149 /* Second half. */
1150 sm = primary_map[addr4 >> 16];
1151 sm_off = addr4 & 0xFFFF;
1152 a_off = sm_off >> 3;
1153 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1154 /* Second half properly aligned and addressible. */
1155 v_off = addr4 & 0xFFFF;
1156 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1157 goto slow8;
1158 /* Both halves properly aligned, addressible and with valid
1159 data. */
1160 return;
1161 slow8:
njn5c004e42002-11-18 11:04:50 +00001162 mc_fpu_read_check_SLOWLY ( addr, 8 );
njn25e49d8e72002-09-23 09:36:25 +00001163 return;
1164 }
1165
1166 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
1167 cases go quickly. */
1168 if (size == 2) {
1169 PROF_EVENT(83);
njn5c004e42002-11-18 11:04:50 +00001170 mc_fpu_read_check_SLOWLY ( addr, 2 );
njn25e49d8e72002-09-23 09:36:25 +00001171 return;
1172 }
1173
sewardj93992e22003-05-26 09:17:41 +00001174 if (size == 16 /*SSE*/
1175 || size == 10 || size == 28 || size == 108) {
njn25e49d8e72002-09-23 09:36:25 +00001176 PROF_EVENT(84);
njn5c004e42002-11-18 11:04:50 +00001177 mc_fpu_read_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001178 return;
1179 }
1180
1181 VG_(printf)("size is %d\n", size);
njn5c004e42002-11-18 11:04:50 +00001182 VG_(skin_panic)("MC_(fpu_read_check): unhandled size");
njn25e49d8e72002-09-23 09:36:25 +00001183# endif
1184}
1185
1186
1187__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +00001188void MC_(fpu_write_check) ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001189{
1190 /* Ensure the written area is addressible, and moan if otherwise.
1191 If it is addressible, make it valid, otherwise invalid.
1192 */
1193
1194 SecMap* sm;
1195 UInt sm_off, v_off, a_off;
1196 Addr addr4;
1197
1198 PROF_EVENT(85);
1199
1200# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +00001201 mc_fpu_write_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001202# else
1203
1204 if (size == 4) {
1205 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
1206 PROF_EVENT(86);
1207 /* Properly aligned. */
1208 sm = primary_map[addr >> 16];
1209 sm_off = addr & 0xFFFF;
1210 a_off = sm_off >> 3;
1211 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
1212 /* Properly aligned and addressible. Make valid. */
1213 v_off = addr & 0xFFFF;
1214 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1215 return;
1216 slow4:
njn5c004e42002-11-18 11:04:50 +00001217 mc_fpu_write_check_SLOWLY ( addr, 4 );
njn25e49d8e72002-09-23 09:36:25 +00001218 return;
1219 }
1220
1221 if (size == 8) {
1222 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
1223 PROF_EVENT(87);
1224 /* Properly aligned. Do it in two halves. */
1225 addr4 = addr + 4;
1226 /* First half. */
1227 sm = primary_map[addr >> 16];
1228 sm_off = addr & 0xFFFF;
1229 a_off = sm_off >> 3;
1230 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1231 /* First half properly aligned and addressible. Make valid. */
1232 v_off = addr & 0xFFFF;
1233 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1234 /* Second half. */
1235 sm = primary_map[addr4 >> 16];
1236 sm_off = addr4 & 0xFFFF;
1237 a_off = sm_off >> 3;
1238 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1239 /* Second half properly aligned and addressible. */
1240 v_off = addr4 & 0xFFFF;
1241 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1242 /* Properly aligned, addressible and with valid data. */
1243 return;
1244 slow8:
njn5c004e42002-11-18 11:04:50 +00001245 mc_fpu_write_check_SLOWLY ( addr, 8 );
njn25e49d8e72002-09-23 09:36:25 +00001246 return;
1247 }
1248
1249 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
1250 cases go quickly. */
1251 if (size == 2) {
1252 PROF_EVENT(88);
njn5c004e42002-11-18 11:04:50 +00001253 mc_fpu_write_check_SLOWLY ( addr, 2 );
njn25e49d8e72002-09-23 09:36:25 +00001254 return;
1255 }
1256
sewardj93992e22003-05-26 09:17:41 +00001257 if (size == 16 /*SSE*/
1258 || size == 10 || size == 28 || size == 108) {
njn25e49d8e72002-09-23 09:36:25 +00001259 PROF_EVENT(89);
njn5c004e42002-11-18 11:04:50 +00001260 mc_fpu_write_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001261 return;
1262 }
1263
1264 VG_(printf)("size is %d\n", size);
njn5c004e42002-11-18 11:04:50 +00001265 VG_(skin_panic)("MC_(fpu_write_check): unhandled size");
njn25e49d8e72002-09-23 09:36:25 +00001266# endif
1267}
1268
1269
1270/* ---------------------------------------------------------------------
1271 Slow, general cases for FPU load and store checks.
1272 ------------------------------------------------------------------ */
1273
1274/* Generic version. Test for both addr and value errors, but if
1275 there's an addr error, don't report a value error even if it
1276 exists. */
1277
njn5c004e42002-11-18 11:04:50 +00001278void mc_fpu_read_check_SLOWLY ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001279{
1280 Int i;
1281 Bool aerr = False;
1282 Bool verr = False;
1283 PROF_EVENT(90);
1284 for (i = 0; i < size; i++) {
1285 PROF_EVENT(91);
1286 if (get_abit(addr+i) != VGM_BIT_VALID)
1287 aerr = True;
1288 if (get_vbyte(addr+i) != VGM_BYTE_VALID)
1289 verr = True;
1290 }
1291
1292 if (aerr) {
sewardjaf48a602003-07-06 00:54:47 +00001293 MAC_(record_address_error)( /*tst*/NULL, addr, size, False );
njn25e49d8e72002-09-23 09:36:25 +00001294 } else {
1295 if (verr)
sewardjaf48a602003-07-06 00:54:47 +00001296 MC_(record_value_error)( /*tst*/NULL, size );
njn25e49d8e72002-09-23 09:36:25 +00001297 }
1298}
1299
1300
1301/* Generic version. Test for addr errors. Valid addresses are
1302 given valid values, and invalid addresses invalid values. */
1303
njn5c004e42002-11-18 11:04:50 +00001304void mc_fpu_write_check_SLOWLY ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001305{
1306 Int i;
1307 Addr a_here;
1308 Bool a_ok;
1309 Bool aerr = False;
1310 PROF_EVENT(92);
1311 for (i = 0; i < size; i++) {
1312 PROF_EVENT(93);
1313 a_here = addr+i;
1314 a_ok = get_abit(a_here) == VGM_BIT_VALID;
1315 if (a_ok) {
1316 set_vbyte(a_here, VGM_BYTE_VALID);
1317 } else {
1318 set_vbyte(a_here, VGM_BYTE_INVALID);
1319 aerr = True;
1320 }
1321 }
1322 if (aerr) {
sewardjaf48a602003-07-06 00:54:47 +00001323 MAC_(record_address_error)( /*tst*/NULL, addr, size, True );
njn25e49d8e72002-09-23 09:36:25 +00001324 }
1325}
1326
njn25e49d8e72002-09-23 09:36:25 +00001327
1328/*------------------------------------------------------------*/
sewardjee070842003-07-05 17:53:55 +00001329/*--- Metadata get/set functions, for client requests. ---*/
1330/*------------------------------------------------------------*/
1331
1332/* Copy Vbits for src into vbits. Returns: 1 == OK, 2 == alignment
1333 error, 3 == addressing error. */
1334Int MC_(get_or_set_vbits_for_client) (
sewardjaf48a602003-07-06 00:54:47 +00001335 ThreadState* tst,
sewardjee070842003-07-05 17:53:55 +00001336 Addr dataV,
1337 Addr vbitsV,
1338 UInt size,
1339 Bool setting /* True <=> set vbits, False <=> get vbits */
1340)
1341{
1342 Bool addressibleD = True;
1343 Bool addressibleV = True;
1344 UInt* data = (UInt*)dataV;
1345 UInt* vbits = (UInt*)vbitsV;
1346 UInt szW = size / 4; /* sigh */
1347 UInt i;
sewardjaf48a602003-07-06 00:54:47 +00001348 UInt* dataP = NULL; /* bogus init to keep gcc happy */
1349 UInt* vbitsP = NULL; /* ditto */
sewardjee070842003-07-05 17:53:55 +00001350
1351 /* Check alignment of args. */
1352 if (!(IS_ALIGNED4_ADDR(data) && IS_ALIGNED4_ADDR(vbits)))
1353 return 2;
1354 if ((size & 3) != 0)
1355 return 2;
1356
1357 /* Check that arrays are addressible. */
1358 for (i = 0; i < szW; i++) {
sewardjaf48a602003-07-06 00:54:47 +00001359 dataP = &data[i];
1360 vbitsP = &vbits[i];
sewardjee070842003-07-05 17:53:55 +00001361 if (get_abits4_ALIGNED((Addr)dataP) != VGM_NIBBLE_VALID) {
1362 addressibleD = False;
1363 break;
1364 }
1365 if (get_abits4_ALIGNED((Addr)vbitsP) != VGM_NIBBLE_VALID) {
1366 addressibleV = False;
1367 break;
1368 }
1369 }
1370 if (!addressibleD) {
sewardjaf48a602003-07-06 00:54:47 +00001371 MAC_(record_address_error)( tst, (Addr)dataP, 4,
sewardjee070842003-07-05 17:53:55 +00001372 setting ? True : False );
1373 return 3;
1374 }
1375 if (!addressibleV) {
sewardjaf48a602003-07-06 00:54:47 +00001376 MAC_(record_address_error)( tst, (Addr)vbitsP, 4,
sewardjee070842003-07-05 17:53:55 +00001377 setting ? False : True );
1378 return 3;
1379 }
1380
1381 /* Do the copy */
1382 if (setting) {
1383 /* setting */
1384 for (i = 0; i < szW; i++) {
1385 if (get_vbytes4_ALIGNED( (Addr)&vbits[i] ) != VGM_WORD_VALID)
sewardjaf48a602003-07-06 00:54:47 +00001386 MC_(record_value_error)(tst, 4);
sewardjee070842003-07-05 17:53:55 +00001387 set_vbytes4_ALIGNED( (Addr)&data[i], vbits[i] );
1388 }
1389 } else {
1390 /* getting */
1391 for (i = 0; i < szW; i++) {
1392 vbits[i] = get_vbytes4_ALIGNED( (Addr)&data[i] );
1393 set_vbytes4_ALIGNED( (Addr)&vbits[i], VGM_WORD_VALID );
1394 }
1395 }
1396
1397 return 1;
1398}
1399
1400
1401/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001402/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1403/*------------------------------------------------------------*/
1404
sewardja4495682002-10-21 07:29:59 +00001405/* For the memory leak detector, say whether an entire 64k chunk of
1406 address space is possibly in use, or not. If in doubt return
1407 True.
njn25e49d8e72002-09-23 09:36:25 +00001408*/
sewardja4495682002-10-21 07:29:59 +00001409static
1410Bool mc_is_valid_64k_chunk ( UInt chunk_number )
njn25e49d8e72002-09-23 09:36:25 +00001411{
sewardja4495682002-10-21 07:29:59 +00001412 sk_assert(chunk_number >= 0 && chunk_number < 65536);
1413 if (IS_DISTINGUISHED_SM(primary_map[chunk_number])) {
1414 /* Definitely not in use. */
1415 return False;
1416 } else {
1417 return True;
njn25e49d8e72002-09-23 09:36:25 +00001418 }
1419}
1420
1421
sewardja4495682002-10-21 07:29:59 +00001422/* For the memory leak detector, say whether or not a given word
1423 address is to be regarded as valid. */
1424static
1425Bool mc_is_valid_address ( Addr a )
1426{
1427 UInt vbytes;
1428 UChar abits;
1429 sk_assert(IS_ALIGNED4_ADDR(a));
1430 abits = get_abits4_ALIGNED(a);
1431 vbytes = get_vbytes4_ALIGNED(a);
1432 if (abits == VGM_NIBBLE_VALID && vbytes == VGM_WORD_VALID) {
1433 return True;
1434 } else {
1435 return False;
1436 }
1437}
1438
1439
1440/* Leak detector for this skin. We don't actually do anything, merely
1441 run the generic leak detector with suitable parameters for this
1442 skin. */
njn5c004e42002-11-18 11:04:50 +00001443void MC_(detect_memory_leaks) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001444{
njn43c799e2003-04-08 00:08:52 +00001445 MAC_(do_detect_memory_leaks) ( mc_is_valid_64k_chunk, mc_is_valid_address );
njn25e49d8e72002-09-23 09:36:25 +00001446}
1447
1448
1449/* ---------------------------------------------------------------------
1450 Sanity check machinery (permanently engaged).
1451 ------------------------------------------------------------------ */
1452
1453/* Check that nobody has spuriously claimed that the first or last 16
1454 pages (64 KB) of address space have become accessible. Failure of
1455 the following do not per se indicate an internal consistency
1456 problem, but they are so likely to that we really want to know
1457 about it if so. */
1458
1459Bool SK_(cheap_sanity_check) ( void )
1460{
sewardjd5815ec2003-04-06 12:23:27 +00001461 if (IS_DISTINGUISHED_SM(primary_map[0])
1462 /* kludge: kernel drops a page up at top of address range for
1463 magic "optimized syscalls", so we can no longer check the
1464 highest page */
1465 /* && IS_DISTINGUISHED_SM(primary_map[65535]) */
1466 )
njn25e49d8e72002-09-23 09:36:25 +00001467 return True;
1468 else
1469 return False;
1470}
1471
1472Bool SK_(expensive_sanity_check) ( void )
1473{
1474 Int i;
1475
1476 /* Make sure nobody changed the distinguished secondary. */
1477 for (i = 0; i < 8192; i++)
1478 if (distinguished_secondary_map.abits[i] != VGM_BYTE_INVALID)
1479 return False;
1480
1481 for (i = 0; i < 65536; i++)
1482 if (distinguished_secondary_map.vbyte[i] != VGM_BYTE_INVALID)
1483 return False;
1484
1485 /* Make sure that the upper 3/4 of the primary map hasn't
1486 been messed with. */
1487 for (i = 65536; i < 262144; i++)
1488 if (primary_map[i] != & distinguished_secondary_map)
1489 return False;
1490
1491 return True;
1492}
1493
1494/* ---------------------------------------------------------------------
1495 Debugging machinery (turn on to debug). Something of a mess.
1496 ------------------------------------------------------------------ */
1497
1498#if 0
1499/* Print the value tags on the 8 integer registers & flag reg. */
1500
1501static void uint_to_bits ( UInt x, Char* str )
1502{
1503 Int i;
1504 Int w = 0;
1505 /* str must point to a space of at least 36 bytes. */
1506 for (i = 31; i >= 0; i--) {
1507 str[w++] = (x & ( ((UInt)1) << i)) ? '1' : '0';
1508 if (i == 24 || i == 16 || i == 8)
1509 str[w++] = ' ';
1510 }
1511 str[w++] = 0;
njne427a662002-10-02 11:08:25 +00001512 sk_assert(w == 36);
njn25e49d8e72002-09-23 09:36:25 +00001513}
1514
1515/* Caution! Not vthread-safe; looks in VG_(baseBlock), not the thread
1516 state table. */
1517
1518static void vg_show_reg_tags ( void )
1519{
1520 Char buf1[36];
1521 Char buf2[36];
1522 UInt z_eax, z_ebx, z_ecx, z_edx,
1523 z_esi, z_edi, z_ebp, z_esp, z_eflags;
1524
1525 z_eax = VG_(baseBlock)[VGOFF_(sh_eax)];
1526 z_ebx = VG_(baseBlock)[VGOFF_(sh_ebx)];
1527 z_ecx = VG_(baseBlock)[VGOFF_(sh_ecx)];
1528 z_edx = VG_(baseBlock)[VGOFF_(sh_edx)];
1529 z_esi = VG_(baseBlock)[VGOFF_(sh_esi)];
1530 z_edi = VG_(baseBlock)[VGOFF_(sh_edi)];
1531 z_ebp = VG_(baseBlock)[VGOFF_(sh_ebp)];
1532 z_esp = VG_(baseBlock)[VGOFF_(sh_esp)];
1533 z_eflags = VG_(baseBlock)[VGOFF_(sh_eflags)];
1534
1535 uint_to_bits(z_eflags, buf1);
njn9b6d34e2002-10-15 08:48:08 +00001536 VG_(message)(Vg_DebugMsg, "efl %s\n", buf1);
njn25e49d8e72002-09-23 09:36:25 +00001537
1538 uint_to_bits(z_eax, buf1);
1539 uint_to_bits(z_ebx, buf2);
1540 VG_(message)(Vg_DebugMsg, "eax %s ebx %s\n", buf1, buf2);
1541
1542 uint_to_bits(z_ecx, buf1);
1543 uint_to_bits(z_edx, buf2);
1544 VG_(message)(Vg_DebugMsg, "ecx %s edx %s\n", buf1, buf2);
1545
1546 uint_to_bits(z_esi, buf1);
1547 uint_to_bits(z_edi, buf2);
1548 VG_(message)(Vg_DebugMsg, "esi %s edi %s\n", buf1, buf2);
1549
1550 uint_to_bits(z_ebp, buf1);
1551 uint_to_bits(z_esp, buf2);
1552 VG_(message)(Vg_DebugMsg, "ebp %s esp %s\n", buf1, buf2);
1553}
1554
1555
1556/* For debugging only. Scan the address space and touch all allegedly
1557 addressible words. Useful for establishing where Valgrind's idea of
1558 addressibility has diverged from what the kernel believes. */
1559
1560static
1561void zzzmemscan_notify_word ( Addr a, UInt w )
1562{
1563}
1564
1565void zzzmemscan ( void )
1566{
1567 Int n_notifies
1568 = VG_(scan_all_valid_memory)( zzzmemscan_notify_word );
1569 VG_(printf)("zzzmemscan: n_bytes = %d\n", 4 * n_notifies );
1570}
1571#endif
1572
1573
1574
1575
1576#if 0
1577static Int zzz = 0;
1578
1579void show_bb ( Addr eip_next )
1580{
1581 VG_(printf)("[%4d] ", zzz);
1582 vg_show_reg_tags( &VG_(m_shadow );
1583 VG_(translate) ( eip_next, NULL, NULL, NULL );
1584}
1585#endif /* 0 */
1586
njn25e49d8e72002-09-23 09:36:25 +00001587
1588/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00001589/*--- Command line args ---*/
njn25e49d8e72002-09-23 09:36:25 +00001590/*------------------------------------------------------------*/
1591
njn43c799e2003-04-08 00:08:52 +00001592Bool MC_(clo_avoid_strlen_errors) = True;
1593Bool MC_(clo_cleanup) = True;
1594
njn25e49d8e72002-09-23 09:36:25 +00001595Bool SK_(process_cmd_line_option)(Char* arg)
1596{
njn43c799e2003-04-08 00:08:52 +00001597 if (VG_CLO_STREQ(arg, "--avoid-strlen-errors=yes"))
njn5c004e42002-11-18 11:04:50 +00001598 MC_(clo_avoid_strlen_errors) = True;
njn43c799e2003-04-08 00:08:52 +00001599 else if (VG_CLO_STREQ(arg, "--avoid-strlen-errors=no"))
njn5c004e42002-11-18 11:04:50 +00001600 MC_(clo_avoid_strlen_errors) = False;
sewardj8ec2cfc2002-10-13 00:57:26 +00001601
njn43c799e2003-04-08 00:08:52 +00001602 else if (VG_CLO_STREQ(arg, "--cleanup=yes"))
1603 MC_(clo_cleanup) = True;
1604 else if (VG_CLO_STREQ(arg, "--cleanup=no"))
1605 MC_(clo_cleanup) = False;
1606
njn25e49d8e72002-09-23 09:36:25 +00001607 else
njn43c799e2003-04-08 00:08:52 +00001608 return MAC_(process_common_cmd_line_option)(arg);
njn25e49d8e72002-09-23 09:36:25 +00001609
1610 return True;
njn25e49d8e72002-09-23 09:36:25 +00001611}
1612
njn3e884182003-04-15 13:03:23 +00001613void SK_(print_usage)(void)
njn25e49d8e72002-09-23 09:36:25 +00001614{
njn3e884182003-04-15 13:03:23 +00001615 MAC_(print_common_usage)();
1616 VG_(printf)(
1617" --avoid-strlen-errors=no|yes suppress errs from inlined strlen [yes]\n"
1618 );
1619}
1620
1621void SK_(print_debug_usage)(void)
1622{
1623 MAC_(print_common_debug_usage)();
1624 VG_(printf)(
sewardj8ec2cfc2002-10-13 00:57:26 +00001625" --cleanup=no|yes improve after instrumentation? [yes]\n"
njn3e884182003-04-15 13:03:23 +00001626 );
njn25e49d8e72002-09-23 09:36:25 +00001627}
1628
1629
1630/*------------------------------------------------------------*/
1631/*--- Setup ---*/
1632/*------------------------------------------------------------*/
1633
njn810086f2002-11-14 12:42:47 +00001634void SK_(pre_clo_init)(void)
njn25e49d8e72002-09-23 09:36:25 +00001635{
njn810086f2002-11-14 12:42:47 +00001636 VG_(details_name) ("Memcheck");
1637 VG_(details_version) (NULL);
1638 VG_(details_description) ("a.k.a. Valgrind, a memory error detector");
1639 VG_(details_copyright_author)(
njn0e1b5142003-04-15 14:58:06 +00001640 "Copyright (C) 2002-2003, and GNU GPL'd, by Julian Seward.");
njn810086f2002-11-14 12:42:47 +00001641 VG_(details_bug_reports_to) ("jseward@acm.org");
sewardj78210aa2002-12-01 02:55:46 +00001642 VG_(details_avg_translation_sizeB) ( 228 );
njn25e49d8e72002-09-23 09:36:25 +00001643
njn810086f2002-11-14 12:42:47 +00001644 VG_(needs_core_errors) ();
1645 VG_(needs_skin_errors) ();
1646 VG_(needs_libc_freeres) ();
njn810086f2002-11-14 12:42:47 +00001647 VG_(needs_shadow_regs) ();
1648 VG_(needs_command_line_options)();
1649 VG_(needs_client_requests) ();
1650 VG_(needs_extended_UCode) ();
1651 VG_(needs_syscall_wrapper) ();
njn810086f2002-11-14 12:42:47 +00001652 VG_(needs_sanity_checks) ();
njn25e49d8e72002-09-23 09:36:25 +00001653
njn3e884182003-04-15 13:03:23 +00001654 MAC_( new_mem_heap) = & mc_new_mem_heap;
1655 MAC_( ban_mem_heap) = & MC_(make_noaccess);
1656 MAC_(copy_mem_heap) = & mc_copy_address_range_state;
1657 MAC_( die_mem_heap) = & MC_(make_noaccess);
1658
njn5c004e42002-11-18 11:04:50 +00001659 VG_(track_new_mem_startup) ( & mc_new_mem_startup );
njn5c004e42002-11-18 11:04:50 +00001660 VG_(track_new_mem_stack_signal) ( & MC_(make_writable) );
1661 VG_(track_new_mem_brk) ( & MC_(make_writable) );
1662 VG_(track_new_mem_mmap) ( & mc_set_perms );
njn25e49d8e72002-09-23 09:36:25 +00001663
njn3e884182003-04-15 13:03:23 +00001664 VG_(track_copy_mem_remap) ( & mc_copy_address_range_state );
1665 VG_(track_change_mem_mprotect) ( & mc_set_perms );
1666
1667 VG_(track_die_mem_stack_signal) ( & MC_(make_noaccess) );
1668 VG_(track_die_mem_brk) ( & MC_(make_noaccess) );
1669 VG_(track_die_mem_munmap) ( & MC_(make_noaccess) );
1670
njn43c799e2003-04-08 00:08:52 +00001671 VG_(track_new_mem_stack_4) ( & MAC_(new_mem_stack_4) );
1672 VG_(track_new_mem_stack_8) ( & MAC_(new_mem_stack_8) );
1673 VG_(track_new_mem_stack_12) ( & MAC_(new_mem_stack_12) );
1674 VG_(track_new_mem_stack_16) ( & MAC_(new_mem_stack_16) );
1675 VG_(track_new_mem_stack_32) ( & MAC_(new_mem_stack_32) );
1676 VG_(track_new_mem_stack) ( & MAC_(new_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00001677
njn43c799e2003-04-08 00:08:52 +00001678 VG_(track_die_mem_stack_4) ( & MAC_(die_mem_stack_4) );
1679 VG_(track_die_mem_stack_8) ( & MAC_(die_mem_stack_8) );
1680 VG_(track_die_mem_stack_12) ( & MAC_(die_mem_stack_12) );
1681 VG_(track_die_mem_stack_16) ( & MAC_(die_mem_stack_16) );
1682 VG_(track_die_mem_stack_32) ( & MAC_(die_mem_stack_32) );
1683 VG_(track_die_mem_stack) ( & MAC_(die_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00001684
njn3e884182003-04-15 13:03:23 +00001685 VG_(track_ban_mem_stack) ( & MC_(make_noaccess) );
njn25e49d8e72002-09-23 09:36:25 +00001686
njn5c004e42002-11-18 11:04:50 +00001687 VG_(track_pre_mem_read) ( & mc_check_is_readable );
1688 VG_(track_pre_mem_read_asciiz) ( & mc_check_is_readable_asciiz );
1689 VG_(track_pre_mem_write) ( & mc_check_is_writable );
1690 VG_(track_post_mem_write) ( & MC_(make_readable) );
njn25e49d8e72002-09-23 09:36:25 +00001691
njnd3040452003-05-19 15:04:06 +00001692 VG_(track_post_regs_write_init) ( & mc_post_regs_write_init );
1693 VG_(track_post_reg_write_syscall_return) ( & mc_post_reg_write );
1694 VG_(track_post_reg_write_deliver_signal) ( & mc_post_reg_write );
1695 VG_(track_post_reg_write_pthread_return) ( & mc_post_reg_write );
1696 VG_(track_post_reg_write_clientreq_return) ( & mc_post_reg_write );
1697 VG_(track_post_reg_write_clientcall_return) ( & mc_post_reg_write_clientcall );
1698
njn9b007f62003-04-07 14:40:25 +00001699 /* Three compact slots taken up by stack memory helpers */
njn5c004e42002-11-18 11:04:50 +00001700 VG_(register_compact_helper)((Addr) & MC_(helper_value_check4_fail));
1701 VG_(register_compact_helper)((Addr) & MC_(helper_value_check0_fail));
1702 VG_(register_compact_helper)((Addr) & MC_(helper_value_check2_fail));
1703 VG_(register_compact_helper)((Addr) & MC_(helperc_STOREV4));
njn5c004e42002-11-18 11:04:50 +00001704 VG_(register_compact_helper)((Addr) & MC_(helperc_LOADV4));
njn25e49d8e72002-09-23 09:36:25 +00001705
njnd04b7c62002-10-03 14:05:52 +00001706 /* These two made non-compact because 2-byte transactions are rare. */
njn5c004e42002-11-18 11:04:50 +00001707 VG_(register_noncompact_helper)((Addr) & MC_(helperc_STOREV2));
njn9b007f62003-04-07 14:40:25 +00001708 VG_(register_noncompact_helper)((Addr) & MC_(helperc_STOREV1));
njn5c004e42002-11-18 11:04:50 +00001709 VG_(register_noncompact_helper)((Addr) & MC_(helperc_LOADV2));
njn9b007f62003-04-07 14:40:25 +00001710 VG_(register_noncompact_helper)((Addr) & MC_(helperc_LOADV1));
njn5c004e42002-11-18 11:04:50 +00001711 VG_(register_noncompact_helper)((Addr) & MC_(fpu_write_check));
1712 VG_(register_noncompact_helper)((Addr) & MC_(fpu_read_check));
1713 VG_(register_noncompact_helper)((Addr) & MC_(helper_value_check1_fail));
njn25e49d8e72002-09-23 09:36:25 +00001714
1715 VGP_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
1716 VGP_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
njn9b007f62003-04-07 14:40:25 +00001717 VGP_(register_profile_event) ( VgpESPAdj, "adjust-ESP" );
njnd04b7c62002-10-03 14:05:52 +00001718
njn43c799e2003-04-08 00:08:52 +00001719 /* Additional block description for VG_(describe_addr)() */
1720 MAC_(describe_addr_supp) = MC_(client_perm_maybe_describe);
1721
njnd04b7c62002-10-03 14:05:52 +00001722 init_shadow_memory();
njn3e884182003-04-15 13:03:23 +00001723 MAC_(common_pre_clo_init)();
njn5c004e42002-11-18 11:04:50 +00001724}
1725
1726void SK_(post_clo_init) ( void )
1727{
1728}
1729
njn7d9f94d2003-04-22 21:41:40 +00001730void SK_(fini) ( Int exitcode )
njn5c004e42002-11-18 11:04:50 +00001731{
njn3e884182003-04-15 13:03:23 +00001732 MAC_(common_fini)( MC_(detect_memory_leaks) );
1733
njn5c004e42002-11-18 11:04:50 +00001734 if (0) {
1735 VG_(message)(Vg_DebugMsg,
1736 "------ Valgrind's client block stats follow ---------------" );
1737 MC_(show_client_block_stats)();
1738 }
njn25e49d8e72002-09-23 09:36:25 +00001739}
1740
1741/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00001742/*--- end mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00001743/*--------------------------------------------------------------------*/