blob: 4497b324db167e0aacfbf656380ca878f4c71635 [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
nethercote137bc552003-11-14 17:47:54 +00009 This file is part of MemCheck, a heavyweight Valgrind tool for
njnc9539842002-10-02 13:26:35 +000010 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
nethercotebb1c9912004-01-04 16:43:23 +000012 Copyright (C) 2000-2004 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
njn25cac76cb2002-09-23 11:21:57 +000033#include "mc_include.h"
34#include "memcheck.h" /* for client requests */
njn25e49d8e72002-09-23 09:36:25 +000035//#include "vg_profile.c"
36
37/* Define to debug the mem audit system. */
38/* #define VG_DEBUG_MEMORY */
39
njn25e49d8e72002-09-23 09:36:25 +000040#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
41
42/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000043/*--- Low-level support for memory checking. ---*/
44/*------------------------------------------------------------*/
45
46/* All reads and writes are checked against a memory map, which
47 records the state of all memory in the process. The memory map is
48 organised like this:
49
50 The top 16 bits of an address are used to index into a top-level
51 map table, containing 65536 entries. Each entry is a pointer to a
52 second-level map, which records the accesibililty and validity
53 permissions for the 65536 bytes indexed by the lower 16 bits of the
54 address. Each byte is represented by nine bits, one indicating
55 accessibility, the other eight validity. So each second-level map
56 contains 73728 bytes. This two-level arrangement conveniently
57 divides the 4G address space into 64k lumps, each size 64k bytes.
58
59 All entries in the primary (top-level) map must point to a valid
60 secondary (second-level) map. Since most of the 4G of address
61 space will not be in use -- ie, not mapped at all -- there is a
62 distinguished secondary map, which indicates `not addressible and
63 not valid' writeable for all bytes. Entries in the primary map for
64 which the entire 64k is not in use at all point at this
65 distinguished map.
66
67 [...] lots of stuff deleted due to out of date-ness
68
69 As a final optimisation, the alignment and address checks for
70 4-byte loads and stores are combined in a neat way. The primary
71 map is extended to have 262144 entries (2^18), rather than 2^16.
72 The top 3/4 of these entries are permanently set to the
73 distinguished secondary map. For a 4-byte load/store, the
74 top-level map is indexed not with (addr >> 16) but instead f(addr),
75 where
76
77 f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
78 = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
79 = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
80
81 ie the lowest two bits are placed above the 16 high address bits.
82 If either of these two bits are nonzero, the address is misaligned;
83 this will select a secondary map from the upper 3/4 of the primary
84 map. Because this is always the distinguished secondary map, a
85 (bogus) address check failure will result. The failure handling
86 code can then figure out whether this is a genuine addr check
87 failure or whether it is a possibly-legitimate access at a
88 misaligned address.
89*/
90
91
92/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000093/*--- Function declarations. ---*/
94/*------------------------------------------------------------*/
95
njn5c004e42002-11-18 11:04:50 +000096static UInt mc_rd_V4_SLOWLY ( Addr a );
97static UInt mc_rd_V2_SLOWLY ( Addr a );
98static UInt mc_rd_V1_SLOWLY ( Addr a );
99static void mc_wr_V4_SLOWLY ( Addr a, UInt vbytes );
100static void mc_wr_V2_SLOWLY ( Addr a, UInt vbytes );
101static void mc_wr_V1_SLOWLY ( Addr a, UInt vbytes );
102static void mc_fpu_read_check_SLOWLY ( Addr addr, Int size );
103static void mc_fpu_write_check_SLOWLY ( Addr addr, Int size );
njn25e49d8e72002-09-23 09:36:25 +0000104
105/*------------------------------------------------------------*/
106/*--- Data defns. ---*/
107/*------------------------------------------------------------*/
108
109typedef
110 struct {
111 UChar abits[8192];
112 UChar vbyte[65536];
113 }
114 SecMap;
115
116static SecMap* primary_map[ /*65536*/ 262144 ];
117static SecMap distinguished_secondary_map;
118
njn25e49d8e72002-09-23 09:36:25 +0000119static void init_shadow_memory ( void )
120{
121 Int i;
122
123 for (i = 0; i < 8192; i++) /* Invalid address */
124 distinguished_secondary_map.abits[i] = VGM_BYTE_INVALID;
125 for (i = 0; i < 65536; i++) /* Invalid Value */
126 distinguished_secondary_map.vbyte[i] = VGM_BYTE_INVALID;
127
128 /* These entries gradually get overwritten as the used address
129 space expands. */
130 for (i = 0; i < 65536; i++)
131 primary_map[i] = &distinguished_secondary_map;
132
133 /* These ones should never change; it's a bug in Valgrind if they do. */
134 for (i = 65536; i < 262144; i++)
135 primary_map[i] = &distinguished_secondary_map;
136}
137
njn25e49d8e72002-09-23 09:36:25 +0000138/*------------------------------------------------------------*/
139/*--- Basic bitmap management, reading and writing. ---*/
140/*------------------------------------------------------------*/
141
142/* Allocate and initialise a secondary map. */
143
144static SecMap* alloc_secondary_map ( __attribute__ ((unused))
145 Char* caller )
146{
147 SecMap* map;
148 UInt i;
149 PROF_EVENT(10);
150
151 /* Mark all bytes as invalid access and invalid value. */
152
153 /* It just happens that a SecMap occupies exactly 18 pages --
154 although this isn't important, so the following assert is
155 spurious. */
njne427a662002-10-02 11:08:25 +0000156 sk_assert(0 == (sizeof(SecMap) % VKI_BYTES_PER_PAGE));
fitzhardinge98abfc72003-12-16 02:05:15 +0000157 map = (SecMap *)VG_(shadow_alloc)(sizeof(SecMap));
njn25e49d8e72002-09-23 09:36:25 +0000158
159 for (i = 0; i < 8192; i++)
160 map->abits[i] = VGM_BYTE_INVALID; /* Invalid address */
161 for (i = 0; i < 65536; i++)
162 map->vbyte[i] = VGM_BYTE_INVALID; /* Invalid Value */
163
164 /* VG_(printf)("ALLOC_2MAP(%s)\n", caller ); */
165 return map;
166}
167
168
169/* Basic reading/writing of the bitmaps, for byte-sized accesses. */
170
171static __inline__ UChar get_abit ( Addr a )
172{
173 SecMap* sm = primary_map[a >> 16];
174 UInt sm_off = a & 0xFFFF;
175 PROF_EVENT(20);
176# if 0
177 if (IS_DISTINGUISHED_SM(sm))
178 VG_(message)(Vg_DebugMsg,
179 "accessed distinguished 2ndary (A)map! 0x%x\n", a);
180# endif
181 return BITARR_TEST(sm->abits, sm_off)
182 ? VGM_BIT_INVALID : VGM_BIT_VALID;
183}
184
185static __inline__ UChar get_vbyte ( Addr a )
186{
187 SecMap* sm = primary_map[a >> 16];
188 UInt sm_off = a & 0xFFFF;
189 PROF_EVENT(21);
190# if 0
191 if (IS_DISTINGUISHED_SM(sm))
192 VG_(message)(Vg_DebugMsg,
193 "accessed distinguished 2ndary (V)map! 0x%x\n", a);
194# endif
195 return sm->vbyte[sm_off];
196}
197
sewardj56867352003-10-12 10:27:06 +0000198static /* __inline__ */ void set_abit ( Addr a, UChar abit )
njn25e49d8e72002-09-23 09:36:25 +0000199{
200 SecMap* sm;
201 UInt sm_off;
202 PROF_EVENT(22);
203 ENSURE_MAPPABLE(a, "set_abit");
204 sm = primary_map[a >> 16];
205 sm_off = a & 0xFFFF;
206 if (abit)
207 BITARR_SET(sm->abits, sm_off);
208 else
209 BITARR_CLEAR(sm->abits, sm_off);
210}
211
212static __inline__ void set_vbyte ( Addr a, UChar vbyte )
213{
214 SecMap* sm;
215 UInt sm_off;
216 PROF_EVENT(23);
217 ENSURE_MAPPABLE(a, "set_vbyte");
218 sm = primary_map[a >> 16];
219 sm_off = a & 0xFFFF;
220 sm->vbyte[sm_off] = vbyte;
221}
222
223
224/* Reading/writing of the bitmaps, for aligned word-sized accesses. */
225
226static __inline__ UChar get_abits4_ALIGNED ( Addr a )
227{
228 SecMap* sm;
229 UInt sm_off;
230 UChar abits8;
231 PROF_EVENT(24);
232# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000233 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000234# endif
235 sm = primary_map[a >> 16];
236 sm_off = a & 0xFFFF;
237 abits8 = sm->abits[sm_off >> 3];
238 abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
239 abits8 &= 0x0F;
240 return abits8;
241}
242
243static UInt __inline__ get_vbytes4_ALIGNED ( Addr a )
244{
245 SecMap* sm = primary_map[a >> 16];
246 UInt sm_off = a & 0xFFFF;
247 PROF_EVENT(25);
248# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000249 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000250# endif
251 return ((UInt*)(sm->vbyte))[sm_off >> 2];
252}
253
254
sewardjee070842003-07-05 17:53:55 +0000255static void __inline__ set_vbytes4_ALIGNED ( Addr a, UInt vbytes )
256{
257 SecMap* sm;
258 UInt sm_off;
259 ENSURE_MAPPABLE(a, "set_vbytes4_ALIGNED");
260 sm = primary_map[a >> 16];
261 sm_off = a & 0xFFFF;
262 PROF_EVENT(23);
263# ifdef VG_DEBUG_MEMORY
264 sk_assert(IS_ALIGNED4_ADDR(a));
265# endif
266 ((UInt*)(sm->vbyte))[sm_off >> 2] = vbytes;
267}
268
269
njn25e49d8e72002-09-23 09:36:25 +0000270/*------------------------------------------------------------*/
271/*--- Setting permissions over address ranges. ---*/
272/*------------------------------------------------------------*/
273
274static void set_address_range_perms ( Addr a, UInt len,
275 UInt example_a_bit,
276 UInt example_v_bit )
277{
278 UChar vbyte, abyte8;
279 UInt vword4, sm_off;
280 SecMap* sm;
281
282 PROF_EVENT(30);
283
284 if (len == 0)
285 return;
286
nethercotea66033c2004-03-08 15:37:58 +0000287 if (VG_(clo_verbosity) > 0) {
288 if (len > 100 * 1000 * 1000) {
289 VG_(message)(Vg_UserMsg,
290 "Warning: set address range perms: "
291 "large range %u, a %d, v %d",
292 len, example_a_bit, example_v_bit );
293 }
njn25e49d8e72002-09-23 09:36:25 +0000294 }
295
296 VGP_PUSHCC(VgpSetMem);
297
298 /* Requests to change permissions of huge address ranges may
299 indicate bugs in our machinery. 30,000,000 is arbitrary, but so
300 far all legitimate requests have fallen beneath that size. */
301 /* 4 Mar 02: this is just stupid; get rid of it. */
njne427a662002-10-02 11:08:25 +0000302 /* sk_assert(len < 30000000); */
njn25e49d8e72002-09-23 09:36:25 +0000303
304 /* Check the permissions make sense. */
njne427a662002-10-02 11:08:25 +0000305 sk_assert(example_a_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000306 || example_a_bit == VGM_BIT_INVALID);
njne427a662002-10-02 11:08:25 +0000307 sk_assert(example_v_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000308 || example_v_bit == VGM_BIT_INVALID);
309 if (example_a_bit == VGM_BIT_INVALID)
njne427a662002-10-02 11:08:25 +0000310 sk_assert(example_v_bit == VGM_BIT_INVALID);
njn25e49d8e72002-09-23 09:36:25 +0000311
312 /* The validity bits to write. */
313 vbyte = example_v_bit==VGM_BIT_VALID
314 ? VGM_BYTE_VALID : VGM_BYTE_INVALID;
315
316 /* In order that we can charge through the address space at 8
317 bytes/main-loop iteration, make up some perms. */
318 abyte8 = (example_a_bit << 7)
319 | (example_a_bit << 6)
320 | (example_a_bit << 5)
321 | (example_a_bit << 4)
322 | (example_a_bit << 3)
323 | (example_a_bit << 2)
324 | (example_a_bit << 1)
325 | (example_a_bit << 0);
326 vword4 = (vbyte << 24) | (vbyte << 16) | (vbyte << 8) | vbyte;
327
328# ifdef VG_DEBUG_MEMORY
329 /* Do it ... */
330 while (True) {
331 PROF_EVENT(31);
332 if (len == 0) break;
333 set_abit ( a, example_a_bit );
334 set_vbyte ( a, vbyte );
335 a++;
336 len--;
337 }
338
339# else
340 /* Slowly do parts preceding 8-byte alignment. */
341 while (True) {
342 PROF_EVENT(31);
343 if (len == 0) break;
344 if ((a % 8) == 0) break;
345 set_abit ( a, example_a_bit );
346 set_vbyte ( a, vbyte );
347 a++;
348 len--;
349 }
350
351 if (len == 0) {
352 VGP_POPCC(VgpSetMem);
353 return;
354 }
njne427a662002-10-02 11:08:25 +0000355 sk_assert((a % 8) == 0 && len > 0);
njn25e49d8e72002-09-23 09:36:25 +0000356
357 /* Once aligned, go fast. */
358 while (True) {
359 PROF_EVENT(32);
360 if (len < 8) break;
361 ENSURE_MAPPABLE(a, "set_address_range_perms(fast)");
362 sm = primary_map[a >> 16];
363 sm_off = a & 0xFFFF;
364 sm->abits[sm_off >> 3] = abyte8;
365 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = vword4;
366 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = vword4;
367 a += 8;
368 len -= 8;
369 }
370
371 if (len == 0) {
372 VGP_POPCC(VgpSetMem);
373 return;
374 }
njne427a662002-10-02 11:08:25 +0000375 sk_assert((a % 8) == 0 && len > 0 && len < 8);
njn25e49d8e72002-09-23 09:36:25 +0000376
377 /* Finish the upper fragment. */
378 while (True) {
379 PROF_EVENT(33);
380 if (len == 0) break;
381 set_abit ( a, example_a_bit );
382 set_vbyte ( a, vbyte );
383 a++;
384 len--;
385 }
386# endif
387
388 /* Check that zero page and highest page have not been written to
389 -- this could happen with buggy syscall wrappers. Today
390 (2001-04-26) had precisely such a problem with __NR_setitimer. */
njne427a662002-10-02 11:08:25 +0000391 sk_assert(SK_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +0000392 VGP_POPCC(VgpSetMem);
393}
394
395/* Set permissions for address ranges ... */
396
njn5c004e42002-11-18 11:04:50 +0000397void MC_(make_noaccess) ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000398{
399 PROF_EVENT(35);
njn5c004e42002-11-18 11:04:50 +0000400 DEBUG("MC_(make_noaccess)(%p, %x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000401 set_address_range_perms ( a, len, VGM_BIT_INVALID, VGM_BIT_INVALID );
402}
403
njn5c004e42002-11-18 11:04:50 +0000404void MC_(make_writable) ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000405{
406 PROF_EVENT(36);
njn5c004e42002-11-18 11:04:50 +0000407 DEBUG("MC_(make_writable)(%p, %x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000408 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_INVALID );
409}
410
njn5c004e42002-11-18 11:04:50 +0000411void MC_(make_readable) ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000412{
413 PROF_EVENT(37);
njn5c004e42002-11-18 11:04:50 +0000414 DEBUG("MC_(make_readable)(%p, 0x%x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000415 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_VALID );
416}
417
njn9b007f62003-04-07 14:40:25 +0000418static __inline__
419void make_aligned_word_writable(Addr a)
420{
421 SecMap* sm;
422 UInt sm_off;
423 UChar mask;
njn25e49d8e72002-09-23 09:36:25 +0000424
njn9b007f62003-04-07 14:40:25 +0000425 VGP_PUSHCC(VgpESPAdj);
426 ENSURE_MAPPABLE(a, "make_aligned_word_writable");
427 sm = primary_map[a >> 16];
428 sm_off = a & 0xFFFF;
429 ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
430 mask = 0x0F;
431 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
432 /* mask now contains 1s where we wish to make address bits invalid (0s). */
433 sm->abits[sm_off >> 3] &= ~mask;
434 VGP_POPCC(VgpESPAdj);
435}
436
437static __inline__
438void make_aligned_word_noaccess(Addr a)
439{
440 SecMap* sm;
441 UInt sm_off;
442 UChar mask;
443
444 VGP_PUSHCC(VgpESPAdj);
445 ENSURE_MAPPABLE(a, "make_aligned_word_noaccess");
446 sm = primary_map[a >> 16];
447 sm_off = a & 0xFFFF;
448 ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
449 mask = 0x0F;
450 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
451 /* mask now contains 1s where we wish to make address bits invalid (1s). */
452 sm->abits[sm_off >> 3] |= mask;
453 VGP_POPCC(VgpESPAdj);
454}
455
456/* Nb: by "aligned" here we mean 8-byte aligned */
457static __inline__
458void make_aligned_doubleword_writable(Addr a)
459{
460 SecMap* sm;
461 UInt sm_off;
462
463 VGP_PUSHCC(VgpESPAdj);
464 ENSURE_MAPPABLE(a, "make_aligned_doubleword_writable");
465 sm = primary_map[a >> 16];
466 sm_off = a & 0xFFFF;
467 sm->abits[sm_off >> 3] = VGM_BYTE_VALID;
468 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = VGM_WORD_INVALID;
469 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = VGM_WORD_INVALID;
470 VGP_POPCC(VgpESPAdj);
471}
472
473static __inline__
474void make_aligned_doubleword_noaccess(Addr a)
475{
476 SecMap* sm;
477 UInt sm_off;
478
479 VGP_PUSHCC(VgpESPAdj);
480 ENSURE_MAPPABLE(a, "make_aligned_doubleword_noaccess");
481 sm = primary_map[a >> 16];
482 sm_off = a & 0xFFFF;
483 sm->abits[sm_off >> 3] = VGM_BYTE_INVALID;
484 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = VGM_WORD_INVALID;
485 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = VGM_WORD_INVALID;
486 VGP_POPCC(VgpESPAdj);
487}
488
489/* The %esp update handling functions */
490ESP_UPDATE_HANDLERS ( make_aligned_word_writable,
491 make_aligned_word_noaccess,
492 make_aligned_doubleword_writable,
493 make_aligned_doubleword_noaccess,
494 MC_(make_writable),
495 MC_(make_noaccess)
496 );
497
498/* Block-copy permissions (needed for implementing realloc()). */
njn5c004e42002-11-18 11:04:50 +0000499static void mc_copy_address_range_state ( Addr src, Addr dst, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000500{
501 UInt i;
502
njn5c004e42002-11-18 11:04:50 +0000503 DEBUG("mc_copy_address_range_state\n");
njn25e49d8e72002-09-23 09:36:25 +0000504
505 PROF_EVENT(40);
506 for (i = 0; i < len; i++) {
507 UChar abit = get_abit ( src+i );
508 UChar vbyte = get_vbyte ( src+i );
509 PROF_EVENT(41);
510 set_abit ( dst+i, abit );
511 set_vbyte ( dst+i, vbyte );
512 }
513}
514
515
516/* Check permissions for address range. If inadequate permissions
517 exist, *bad_addr is set to the offending address, so the caller can
518 know what it is. */
519
sewardjecf8e102003-07-12 12:11:39 +0000520/* Returns True if [a .. a+len) is not addressible. Otherwise,
521 returns False, and if bad_addr is non-NULL, sets *bad_addr to
522 indicate the lowest failing address. Functions below are
523 similar. */
524Bool MC_(check_noaccess) ( Addr a, UInt len, Addr* bad_addr )
525{
526 UInt i;
527 UChar abit;
528 PROF_EVENT(42);
529 for (i = 0; i < len; i++) {
530 PROF_EVENT(43);
531 abit = get_abit(a);
532 if (abit == VGM_BIT_VALID) {
533 if (bad_addr != NULL) *bad_addr = a;
534 return False;
535 }
536 a++;
537 }
538 return True;
539}
540
njn5c004e42002-11-18 11:04:50 +0000541Bool MC_(check_writable) ( Addr a, UInt len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000542{
543 UInt i;
544 UChar abit;
545 PROF_EVENT(42);
546 for (i = 0; i < len; i++) {
547 PROF_EVENT(43);
548 abit = get_abit(a);
549 if (abit == VGM_BIT_INVALID) {
550 if (bad_addr != NULL) *bad_addr = a;
551 return False;
552 }
553 a++;
554 }
555 return True;
556}
557
njn5c004e42002-11-18 11:04:50 +0000558Bool MC_(check_readable) ( Addr a, UInt len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000559{
560 UInt i;
561 UChar abit;
562 UChar vbyte;
563
564 PROF_EVENT(44);
njn5c004e42002-11-18 11:04:50 +0000565 DEBUG("MC_(check_readable)\n");
njn25e49d8e72002-09-23 09:36:25 +0000566 for (i = 0; i < len; i++) {
567 abit = get_abit(a);
568 vbyte = get_vbyte(a);
569 PROF_EVENT(45);
570 if (abit != VGM_BIT_VALID || vbyte != VGM_BYTE_VALID) {
571 if (bad_addr != NULL) *bad_addr = a;
572 return False;
573 }
574 a++;
575 }
576 return True;
577}
578
579
580/* Check a zero-terminated ascii string. Tricky -- don't want to
581 examine the actual bytes, to find the end, until we're sure it is
582 safe to do so. */
583
njn9b007f62003-04-07 14:40:25 +0000584static Bool mc_check_readable_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000585{
586 UChar abit;
587 UChar vbyte;
588 PROF_EVENT(46);
njn5c004e42002-11-18 11:04:50 +0000589 DEBUG("mc_check_readable_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +0000590 while (True) {
591 PROF_EVENT(47);
592 abit = get_abit(a);
593 vbyte = get_vbyte(a);
594 if (abit != VGM_BIT_VALID || vbyte != VGM_BYTE_VALID) {
595 if (bad_addr != NULL) *bad_addr = a;
596 return False;
597 }
598 /* Ok, a is safe to read. */
599 if (* ((UChar*)a) == 0) return True;
600 a++;
601 }
602}
603
604
605/*------------------------------------------------------------*/
606/*--- Memory event handlers ---*/
607/*------------------------------------------------------------*/
608
njn25e49d8e72002-09-23 09:36:25 +0000609static
njn72718642003-07-24 08:45:32 +0000610void mc_check_is_writable ( CorePart part, ThreadId tid, Char* s,
611 Addr base, UInt size )
njn25e49d8e72002-09-23 09:36:25 +0000612{
613 Bool ok;
614 Addr bad_addr;
615
616 VGP_PUSHCC(VgpCheckMem);
617
618 /* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
619 base,base+size-1); */
njn5c004e42002-11-18 11:04:50 +0000620 ok = MC_(check_writable) ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000621 if (!ok) {
622 switch (part) {
623 case Vg_CoreSysCall:
njn72718642003-07-24 08:45:32 +0000624 MAC_(record_param_error) ( tid, bad_addr, /*isWrite =*/True, s );
njn25e49d8e72002-09-23 09:36:25 +0000625 break;
626
627 case Vg_CorePThread:
628 case Vg_CoreSignal:
njn72718642003-07-24 08:45:32 +0000629 MAC_(record_core_mem_error)( tid, /*isWrite=*/True, s );
njn25e49d8e72002-09-23 09:36:25 +0000630 break;
631
632 default:
njn5c004e42002-11-18 11:04:50 +0000633 VG_(skin_panic)("mc_check_is_writable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000634 }
635 }
636
637 VGP_POPCC(VgpCheckMem);
638}
639
640static
njn72718642003-07-24 08:45:32 +0000641void mc_check_is_readable ( CorePart part, ThreadId tid, Char* s,
642 Addr base, UInt size )
njn25e49d8e72002-09-23 09:36:25 +0000643{
644 Bool ok;
645 Addr bad_addr;
646
647 VGP_PUSHCC(VgpCheckMem);
648
649 /* VG_(message)(Vg_DebugMsg,"check is readable: %x .. %x",
650 base,base+size-1); */
njn5c004e42002-11-18 11:04:50 +0000651 ok = MC_(check_readable) ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000652 if (!ok) {
653 switch (part) {
654 case Vg_CoreSysCall:
njn72718642003-07-24 08:45:32 +0000655 MAC_(record_param_error) ( tid, bad_addr, /*isWrite =*/False, s );
njn25e49d8e72002-09-23 09:36:25 +0000656 break;
657
658 case Vg_CorePThread:
njn72718642003-07-24 08:45:32 +0000659 MAC_(record_core_mem_error)( tid, /*isWrite=*/False, s );
njn25e49d8e72002-09-23 09:36:25 +0000660 break;
661
662 /* If we're being asked to jump to a silly address, record an error
663 message before potentially crashing the entire system. */
664 case Vg_CoreTranslate:
njn72718642003-07-24 08:45:32 +0000665 MAC_(record_jump_error)( tid, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000666 break;
667
668 default:
njn5c004e42002-11-18 11:04:50 +0000669 VG_(skin_panic)("mc_check_is_readable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000670 }
671 }
672 VGP_POPCC(VgpCheckMem);
673}
674
675static
njn72718642003-07-24 08:45:32 +0000676void mc_check_is_readable_asciiz ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +0000677 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +0000678{
679 Bool ok = True;
680 Addr bad_addr;
681 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
682
683 VGP_PUSHCC(VgpCheckMem);
684
njne427a662002-10-02 11:08:25 +0000685 sk_assert(part == Vg_CoreSysCall);
njn5c004e42002-11-18 11:04:50 +0000686 ok = mc_check_readable_asciiz ( (Addr)str, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000687 if (!ok) {
njn72718642003-07-24 08:45:32 +0000688 MAC_(record_param_error) ( tid, bad_addr, /*is_writable =*/False, s );
njn25e49d8e72002-09-23 09:36:25 +0000689 }
690
691 VGP_POPCC(VgpCheckMem);
692}
693
694
695static
njn5c004e42002-11-18 11:04:50 +0000696void mc_new_mem_startup( Addr a, UInt len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +0000697{
njn1f3a9092002-10-04 09:22:30 +0000698 /* Ignore the permissions, just make it readable. Seems to work... */
njn5c004e42002-11-18 11:04:50 +0000699 DEBUG("mc_new_mem_startup(%p, %u, rr=%u, ww=%u, xx=%u)\n", a,len,rr,ww,xx);
700 MC_(make_readable)(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000701}
702
703static
njn5c004e42002-11-18 11:04:50 +0000704void mc_new_mem_heap ( Addr a, UInt len, Bool is_inited )
njn25e49d8e72002-09-23 09:36:25 +0000705{
706 if (is_inited) {
njn5c004e42002-11-18 11:04:50 +0000707 MC_(make_readable)(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000708 } else {
njn5c004e42002-11-18 11:04:50 +0000709 MC_(make_writable)(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000710 }
711}
712
713static
njn5c004e42002-11-18 11:04:50 +0000714void mc_set_perms (Addr a, UInt len, Bool rr, Bool ww, Bool xx)
njn25e49d8e72002-09-23 09:36:25 +0000715{
njn5c004e42002-11-18 11:04:50 +0000716 DEBUG("mc_set_perms(%p, %u, rr=%u ww=%u, xx=%u)\n", a, len, rr, ww, xx);
717 if (rr) MC_(make_readable)(a, len);
718 else if (ww) MC_(make_writable)(a, len);
719 else MC_(make_noaccess)(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000720}
721
722
723/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +0000724/*--- Register event handlers ---*/
725/*------------------------------------------------------------*/
726
727static void mc_post_regs_write_init ( void )
728{
729 UInt i;
730 for (i = R_EAX; i <= R_EDI; i++)
731 VG_(set_shadow_archreg)( i, VGM_WORD_VALID );
732 VG_(set_shadow_eflags)( VGM_EFLAGS_VALID );
733}
734
735static void mc_post_reg_write(ThreadId tid, UInt reg)
736{
737 VG_(set_thread_shadow_archreg)( tid, reg, VGM_WORD_VALID );
738}
739
740static void mc_post_reg_write_clientcall(ThreadId tid, UInt reg, Addr f )
741{
742 VG_(set_thread_shadow_archreg)( tid, reg, VGM_WORD_VALID );
743}
744
745
746/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000747/*--- Functions called directly from generated code. ---*/
748/*------------------------------------------------------------*/
749
750static __inline__ UInt rotateRight16 ( UInt x )
751{
752 /* Amazingly, gcc turns this into a single rotate insn. */
753 return (x >> 16) | (x << 16);
754}
755
756
757static __inline__ UInt shiftRight16 ( UInt x )
758{
759 return x >> 16;
760}
761
762
763/* Read/write 1/2/4 sized V bytes, and emit an address error if
764 needed. */
765
766/* VG_(helperc_{LD,ST}V{1,2,4}) handle the common case fast.
767 Under all other circumstances, it defers to the relevant _SLOWLY
768 function, which can handle all situations.
769*/
770__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000771UInt MC_(helperc_LOADV4) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000772{
773# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000774 return mc_rd_V4_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000775# else
776 UInt sec_no = rotateRight16(a) & 0x3FFFF;
777 SecMap* sm = primary_map[sec_no];
778 UInt a_off = (a & 0xFFFF) >> 3;
779 UChar abits = sm->abits[a_off];
780 abits >>= (a & 4);
781 abits &= 15;
782 PROF_EVENT(60);
783 if (abits == VGM_NIBBLE_VALID) {
784 /* Handle common case quickly: a is suitably aligned, is mapped,
785 and is addressible. */
786 UInt v_off = a & 0xFFFF;
787 return ((UInt*)(sm->vbyte))[ v_off >> 2 ];
788 } else {
789 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000790 return mc_rd_V4_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000791 }
792# endif
793}
794
795__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +0000796void MC_(helperc_STOREV4) ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000797{
798# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000799 mc_wr_V4_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000800# else
801 UInt sec_no = rotateRight16(a) & 0x3FFFF;
802 SecMap* sm = primary_map[sec_no];
803 UInt a_off = (a & 0xFFFF) >> 3;
804 UChar abits = sm->abits[a_off];
805 abits >>= (a & 4);
806 abits &= 15;
807 PROF_EVENT(61);
808 if (abits == VGM_NIBBLE_VALID) {
809 /* Handle common case quickly: a is suitably aligned, is mapped,
810 and is addressible. */
811 UInt v_off = a & 0xFFFF;
812 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = vbytes;
813 } else {
814 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000815 mc_wr_V4_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000816 }
817# endif
818}
819
820__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000821UInt MC_(helperc_LOADV2) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000822{
823# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000824 return mc_rd_V2_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000825# else
826 UInt sec_no = rotateRight16(a) & 0x1FFFF;
827 SecMap* sm = primary_map[sec_no];
828 UInt a_off = (a & 0xFFFF) >> 3;
829 PROF_EVENT(62);
830 if (sm->abits[a_off] == VGM_BYTE_VALID) {
831 /* Handle common case quickly. */
832 UInt v_off = a & 0xFFFF;
833 return 0xFFFF0000
834 |
835 (UInt)( ((UShort*)(sm->vbyte))[ v_off >> 1 ] );
836 } else {
837 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000838 return mc_rd_V2_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000839 }
840# endif
841}
842
843__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +0000844void MC_(helperc_STOREV2) ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000845{
846# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000847 mc_wr_V2_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000848# else
849 UInt sec_no = rotateRight16(a) & 0x1FFFF;
850 SecMap* sm = primary_map[sec_no];
851 UInt a_off = (a & 0xFFFF) >> 3;
852 PROF_EVENT(63);
853 if (sm->abits[a_off] == VGM_BYTE_VALID) {
854 /* Handle common case quickly. */
855 UInt v_off = a & 0xFFFF;
856 ((UShort*)(sm->vbyte))[ v_off >> 1 ] = vbytes & 0x0000FFFF;
857 } else {
858 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000859 mc_wr_V2_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000860 }
861# endif
862}
863
864__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000865UInt MC_(helperc_LOADV1) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000866{
867# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000868 return mc_rd_V1_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000869# else
870 UInt sec_no = shiftRight16(a);
871 SecMap* sm = primary_map[sec_no];
872 UInt a_off = (a & 0xFFFF) >> 3;
873 PROF_EVENT(64);
874 if (sm->abits[a_off] == VGM_BYTE_VALID) {
875 /* Handle common case quickly. */
876 UInt v_off = a & 0xFFFF;
877 return 0xFFFFFF00
878 |
879 (UInt)( ((UChar*)(sm->vbyte))[ v_off ] );
880 } else {
881 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000882 return mc_rd_V1_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000883 }
884# endif
885}
886
887__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +0000888void MC_(helperc_STOREV1) ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000889{
890# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000891 mc_wr_V1_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000892# else
893 UInt sec_no = shiftRight16(a);
894 SecMap* sm = primary_map[sec_no];
895 UInt a_off = (a & 0xFFFF) >> 3;
896 PROF_EVENT(65);
897 if (sm->abits[a_off] == VGM_BYTE_VALID) {
898 /* Handle common case quickly. */
899 UInt v_off = a & 0xFFFF;
900 ((UChar*)(sm->vbyte))[ v_off ] = vbytes & 0x000000FF;
901 } else {
902 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000903 mc_wr_V1_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000904 }
905# endif
906}
907
908
909/*------------------------------------------------------------*/
910/*--- Fallback functions to handle cases that the above ---*/
911/*--- VG_(helperc_{LD,ST}V{1,2,4}) can't manage. ---*/
912/*------------------------------------------------------------*/
913
njn5c004e42002-11-18 11:04:50 +0000914static UInt mc_rd_V4_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000915{
916 Bool a0ok, a1ok, a2ok, a3ok;
917 UInt vb0, vb1, vb2, vb3;
918
919 PROF_EVENT(70);
920
921 /* First establish independently the addressibility of the 4 bytes
922 involved. */
923 a0ok = get_abit(a+0) == VGM_BIT_VALID;
924 a1ok = get_abit(a+1) == VGM_BIT_VALID;
925 a2ok = get_abit(a+2) == VGM_BIT_VALID;
926 a3ok = get_abit(a+3) == VGM_BIT_VALID;
927
928 /* Also get the validity bytes for the address. */
929 vb0 = (UInt)get_vbyte(a+0);
930 vb1 = (UInt)get_vbyte(a+1);
931 vb2 = (UInt)get_vbyte(a+2);
932 vb3 = (UInt)get_vbyte(a+3);
933
934 /* Now distinguish 3 cases */
935
936 /* Case 1: the address is completely valid, so:
937 - no addressing error
938 - return V bytes as read from memory
939 */
940 if (a0ok && a1ok && a2ok && a3ok) {
941 UInt vw = VGM_WORD_INVALID;
942 vw <<= 8; vw |= vb3;
943 vw <<= 8; vw |= vb2;
944 vw <<= 8; vw |= vb1;
945 vw <<= 8; vw |= vb0;
946 return vw;
947 }
948
949 /* Case 2: the address is completely invalid.
950 - emit addressing error
951 - return V word indicating validity.
952 This sounds strange, but if we make loads from invalid addresses
953 give invalid data, we also risk producing a number of confusing
954 undefined-value errors later, which confuses the fact that the
955 error arose in the first place from an invalid address.
956 */
957 /* VG_(printf)("%p (%d %d %d %d)\n", a, a0ok, a1ok, a2ok, a3ok); */
njn43c799e2003-04-08 00:08:52 +0000958 if (!MAC_(clo_partial_loads_ok)
njn25e49d8e72002-09-23 09:36:25 +0000959 || ((a & 3) != 0)
960 || (!a0ok && !a1ok && !a2ok && !a3ok)) {
njn72718642003-07-24 08:45:32 +0000961 MAC_(record_address_error)( VG_(get_current_tid)(), a, 4, False );
njn25e49d8e72002-09-23 09:36:25 +0000962 return (VGM_BYTE_VALID << 24) | (VGM_BYTE_VALID << 16)
963 | (VGM_BYTE_VALID << 8) | VGM_BYTE_VALID;
964 }
965
966 /* Case 3: the address is partially valid.
967 - no addressing error
968 - returned V word is invalid where the address is invalid,
969 and contains V bytes from memory otherwise.
njn5c004e42002-11-18 11:04:50 +0000970 Case 3 is only allowed if MC_(clo_partial_loads_ok) is True
njn25e49d8e72002-09-23 09:36:25 +0000971 (which is the default), and the address is 4-aligned.
972 If not, Case 2 will have applied.
973 */
njn43c799e2003-04-08 00:08:52 +0000974 sk_assert(MAC_(clo_partial_loads_ok));
njn25e49d8e72002-09-23 09:36:25 +0000975 {
976 UInt vw = VGM_WORD_INVALID;
977 vw <<= 8; vw |= (a3ok ? vb3 : VGM_BYTE_INVALID);
978 vw <<= 8; vw |= (a2ok ? vb2 : VGM_BYTE_INVALID);
979 vw <<= 8; vw |= (a1ok ? vb1 : VGM_BYTE_INVALID);
980 vw <<= 8; vw |= (a0ok ? vb0 : VGM_BYTE_INVALID);
981 return vw;
982 }
983}
984
njn5c004e42002-11-18 11:04:50 +0000985static void mc_wr_V4_SLOWLY ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000986{
987 /* Check the address for validity. */
988 Bool aerr = False;
989 PROF_EVENT(71);
990
991 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
992 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
993 if (get_abit(a+2) != VGM_BIT_VALID) aerr = True;
994 if (get_abit(a+3) != VGM_BIT_VALID) aerr = True;
995
996 /* Store the V bytes, remembering to do it little-endian-ly. */
997 set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
998 set_vbyte( a+1, vbytes & 0x000000FF ); vbytes >>= 8;
999 set_vbyte( a+2, vbytes & 0x000000FF ); vbytes >>= 8;
1000 set_vbyte( a+3, vbytes & 0x000000FF );
1001
1002 /* If an address error has happened, report it. */
1003 if (aerr)
njn72718642003-07-24 08:45:32 +00001004 MAC_(record_address_error)( VG_(get_current_tid)(), a, 4, True );
njn25e49d8e72002-09-23 09:36:25 +00001005}
1006
njn5c004e42002-11-18 11:04:50 +00001007static UInt mc_rd_V2_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +00001008{
1009 /* Check the address for validity. */
1010 UInt vw = VGM_WORD_INVALID;
1011 Bool aerr = False;
1012 PROF_EVENT(72);
1013
1014 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1015 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1016
1017 /* Fetch the V bytes, remembering to do it little-endian-ly. */
1018 vw <<= 8; vw |= (UInt)get_vbyte(a+1);
1019 vw <<= 8; vw |= (UInt)get_vbyte(a+0);
1020
1021 /* If an address error has happened, report it. */
1022 if (aerr) {
njn72718642003-07-24 08:45:32 +00001023 MAC_(record_address_error)( VG_(get_current_tid)(), a, 2, False );
njn25e49d8e72002-09-23 09:36:25 +00001024 vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
1025 | (VGM_BYTE_VALID << 8) | (VGM_BYTE_VALID);
1026 }
1027 return vw;
1028}
1029
njn5c004e42002-11-18 11:04:50 +00001030static void mc_wr_V2_SLOWLY ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001031{
1032 /* Check the address for validity. */
1033 Bool aerr = False;
1034 PROF_EVENT(73);
1035
1036 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1037 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1038
1039 /* Store the V bytes, remembering to do it little-endian-ly. */
1040 set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
1041 set_vbyte( a+1, vbytes & 0x000000FF );
1042
1043 /* If an address error has happened, report it. */
1044 if (aerr)
njn72718642003-07-24 08:45:32 +00001045 MAC_(record_address_error)( VG_(get_current_tid)(), a, 2, True );
njn25e49d8e72002-09-23 09:36:25 +00001046}
1047
njn5c004e42002-11-18 11:04:50 +00001048static UInt mc_rd_V1_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +00001049{
1050 /* Check the address for validity. */
1051 UInt vw = VGM_WORD_INVALID;
1052 Bool aerr = False;
1053 PROF_EVENT(74);
1054
1055 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1056
1057 /* Fetch the V byte. */
1058 vw <<= 8; vw |= (UInt)get_vbyte(a+0);
1059
1060 /* If an address error has happened, report it. */
1061 if (aerr) {
njn72718642003-07-24 08:45:32 +00001062 MAC_(record_address_error)( VG_(get_current_tid)(), a, 1, False );
njn25e49d8e72002-09-23 09:36:25 +00001063 vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
1064 | (VGM_BYTE_INVALID << 8) | (VGM_BYTE_VALID);
1065 }
1066 return vw;
1067}
1068
njn5c004e42002-11-18 11:04:50 +00001069static void mc_wr_V1_SLOWLY ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001070{
1071 /* Check the address for validity. */
1072 Bool aerr = False;
1073 PROF_EVENT(75);
1074 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1075
1076 /* Store the V bytes, remembering to do it little-endian-ly. */
1077 set_vbyte( a+0, vbytes & 0x000000FF );
1078
1079 /* If an address error has happened, report it. */
1080 if (aerr)
njn72718642003-07-24 08:45:32 +00001081 MAC_(record_address_error)( VG_(get_current_tid)(), a, 1, True );
njn25e49d8e72002-09-23 09:36:25 +00001082}
1083
1084
1085/* ---------------------------------------------------------------------
1086 Called from generated code, or from the assembly helpers.
1087 Handlers for value check failures.
1088 ------------------------------------------------------------------ */
1089
njn5c004e42002-11-18 11:04:50 +00001090void MC_(helperc_value_check0_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001091{
njn72718642003-07-24 08:45:32 +00001092 MC_(record_value_error) ( VG_(get_current_tid)(), 0 );
njn25e49d8e72002-09-23 09:36:25 +00001093}
1094
njn5c004e42002-11-18 11:04:50 +00001095void MC_(helperc_value_check1_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001096{
njn72718642003-07-24 08:45:32 +00001097 MC_(record_value_error) ( VG_(get_current_tid)(), 1 );
njn25e49d8e72002-09-23 09:36:25 +00001098}
1099
njn5c004e42002-11-18 11:04:50 +00001100void MC_(helperc_value_check2_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001101{
njn72718642003-07-24 08:45:32 +00001102 MC_(record_value_error) ( VG_(get_current_tid)(), 2 );
njn25e49d8e72002-09-23 09:36:25 +00001103}
1104
njn5c004e42002-11-18 11:04:50 +00001105void MC_(helperc_value_check4_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001106{
njn72718642003-07-24 08:45:32 +00001107 MC_(record_value_error) ( VG_(get_current_tid)(), 4 );
njn25e49d8e72002-09-23 09:36:25 +00001108}
1109
1110
1111/* ---------------------------------------------------------------------
1112 FPU load and store checks, called from generated code.
1113 ------------------------------------------------------------------ */
1114
1115__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +00001116void MC_(fpu_read_check) ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001117{
1118 /* Ensure the read area is both addressible and valid (ie,
1119 readable). If there's an address error, don't report a value
1120 error too; but if there isn't an address error, check for a
1121 value error.
1122
1123 Try to be reasonably fast on the common case; wimp out and defer
njn5c004e42002-11-18 11:04:50 +00001124 to mc_fpu_read_check_SLOWLY for everything else. */
njn25e49d8e72002-09-23 09:36:25 +00001125
1126 SecMap* sm;
1127 UInt sm_off, v_off, a_off;
1128 Addr addr4;
1129
1130 PROF_EVENT(80);
1131
1132# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +00001133 mc_fpu_read_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001134# else
1135
1136 if (size == 4) {
1137 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
1138 PROF_EVENT(81);
1139 /* Properly aligned. */
1140 sm = primary_map[addr >> 16];
1141 sm_off = addr & 0xFFFF;
1142 a_off = sm_off >> 3;
1143 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
1144 /* Properly aligned and addressible. */
1145 v_off = addr & 0xFFFF;
1146 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1147 goto slow4;
1148 /* Properly aligned, addressible and with valid data. */
1149 return;
1150 slow4:
njn5c004e42002-11-18 11:04:50 +00001151 mc_fpu_read_check_SLOWLY ( addr, 4 );
njn25e49d8e72002-09-23 09:36:25 +00001152 return;
1153 }
1154
1155 if (size == 8) {
1156 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
1157 PROF_EVENT(82);
1158 /* Properly aligned. Do it in two halves. */
1159 addr4 = addr + 4;
1160 /* First half. */
1161 sm = primary_map[addr >> 16];
1162 sm_off = addr & 0xFFFF;
1163 a_off = sm_off >> 3;
1164 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1165 /* First half properly aligned and addressible. */
1166 v_off = addr & 0xFFFF;
1167 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1168 goto slow8;
1169 /* Second half. */
1170 sm = primary_map[addr4 >> 16];
1171 sm_off = addr4 & 0xFFFF;
1172 a_off = sm_off >> 3;
1173 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1174 /* Second half properly aligned and addressible. */
1175 v_off = addr4 & 0xFFFF;
1176 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1177 goto slow8;
1178 /* Both halves properly aligned, addressible and with valid
1179 data. */
1180 return;
1181 slow8:
njn5c004e42002-11-18 11:04:50 +00001182 mc_fpu_read_check_SLOWLY ( addr, 8 );
njn25e49d8e72002-09-23 09:36:25 +00001183 return;
1184 }
1185
1186 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
1187 cases go quickly. */
1188 if (size == 2) {
1189 PROF_EVENT(83);
njn5c004e42002-11-18 11:04:50 +00001190 mc_fpu_read_check_SLOWLY ( addr, 2 );
njn25e49d8e72002-09-23 09:36:25 +00001191 return;
1192 }
1193
sewardj93992e22003-05-26 09:17:41 +00001194 if (size == 16 /*SSE*/
jsewardfca60182004-01-04 23:30:55 +00001195 || size == 10 || size == 28 || size == 108 || size == 512) {
njn25e49d8e72002-09-23 09:36:25 +00001196 PROF_EVENT(84);
njn5c004e42002-11-18 11:04:50 +00001197 mc_fpu_read_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001198 return;
1199 }
1200
1201 VG_(printf)("size is %d\n", size);
njn5c004e42002-11-18 11:04:50 +00001202 VG_(skin_panic)("MC_(fpu_read_check): unhandled size");
njn25e49d8e72002-09-23 09:36:25 +00001203# endif
1204}
1205
1206
1207__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +00001208void MC_(fpu_write_check) ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001209{
1210 /* Ensure the written area is addressible, and moan if otherwise.
1211 If it is addressible, make it valid, otherwise invalid.
1212 */
1213
1214 SecMap* sm;
1215 UInt sm_off, v_off, a_off;
1216 Addr addr4;
1217
1218 PROF_EVENT(85);
1219
1220# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +00001221 mc_fpu_write_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001222# else
1223
1224 if (size == 4) {
1225 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
1226 PROF_EVENT(86);
1227 /* Properly aligned. */
1228 sm = primary_map[addr >> 16];
1229 sm_off = addr & 0xFFFF;
1230 a_off = sm_off >> 3;
1231 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
1232 /* Properly aligned and addressible. Make valid. */
1233 v_off = addr & 0xFFFF;
1234 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1235 return;
1236 slow4:
njn5c004e42002-11-18 11:04:50 +00001237 mc_fpu_write_check_SLOWLY ( addr, 4 );
njn25e49d8e72002-09-23 09:36:25 +00001238 return;
1239 }
1240
1241 if (size == 8) {
1242 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
1243 PROF_EVENT(87);
1244 /* Properly aligned. Do it in two halves. */
1245 addr4 = addr + 4;
1246 /* First half. */
1247 sm = primary_map[addr >> 16];
1248 sm_off = addr & 0xFFFF;
1249 a_off = sm_off >> 3;
1250 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1251 /* First half properly aligned and addressible. Make valid. */
1252 v_off = addr & 0xFFFF;
1253 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1254 /* Second half. */
1255 sm = primary_map[addr4 >> 16];
1256 sm_off = addr4 & 0xFFFF;
1257 a_off = sm_off >> 3;
1258 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1259 /* Second half properly aligned and addressible. */
1260 v_off = addr4 & 0xFFFF;
1261 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1262 /* Properly aligned, addressible and with valid data. */
1263 return;
1264 slow8:
njn5c004e42002-11-18 11:04:50 +00001265 mc_fpu_write_check_SLOWLY ( addr, 8 );
njn25e49d8e72002-09-23 09:36:25 +00001266 return;
1267 }
1268
1269 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
1270 cases go quickly. */
1271 if (size == 2) {
1272 PROF_EVENT(88);
njn5c004e42002-11-18 11:04:50 +00001273 mc_fpu_write_check_SLOWLY ( addr, 2 );
njn25e49d8e72002-09-23 09:36:25 +00001274 return;
1275 }
1276
sewardj93992e22003-05-26 09:17:41 +00001277 if (size == 16 /*SSE*/
jsewardfca60182004-01-04 23:30:55 +00001278 || size == 10 || size == 28 || size == 108 || size == 512) {
njn25e49d8e72002-09-23 09:36:25 +00001279 PROF_EVENT(89);
njn5c004e42002-11-18 11:04:50 +00001280 mc_fpu_write_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001281 return;
1282 }
1283
1284 VG_(printf)("size is %d\n", size);
njn5c004e42002-11-18 11:04:50 +00001285 VG_(skin_panic)("MC_(fpu_write_check): unhandled size");
njn25e49d8e72002-09-23 09:36:25 +00001286# endif
1287}
1288
1289
1290/* ---------------------------------------------------------------------
1291 Slow, general cases for FPU load and store checks.
1292 ------------------------------------------------------------------ */
1293
1294/* Generic version. Test for both addr and value errors, but if
1295 there's an addr error, don't report a value error even if it
1296 exists. */
1297
njn5c004e42002-11-18 11:04:50 +00001298void mc_fpu_read_check_SLOWLY ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001299{
1300 Int i;
1301 Bool aerr = False;
1302 Bool verr = False;
1303 PROF_EVENT(90);
1304 for (i = 0; i < size; i++) {
1305 PROF_EVENT(91);
1306 if (get_abit(addr+i) != VGM_BIT_VALID)
1307 aerr = True;
1308 if (get_vbyte(addr+i) != VGM_BYTE_VALID)
1309 verr = True;
1310 }
1311
1312 if (aerr) {
njn72718642003-07-24 08:45:32 +00001313 MAC_(record_address_error)( VG_(get_current_tid)(), addr, size, False );
njn25e49d8e72002-09-23 09:36:25 +00001314 } else {
1315 if (verr)
njn72718642003-07-24 08:45:32 +00001316 MC_(record_value_error)( VG_(get_current_tid)(), size );
njn25e49d8e72002-09-23 09:36:25 +00001317 }
1318}
1319
1320
1321/* Generic version. Test for addr errors. Valid addresses are
1322 given valid values, and invalid addresses invalid values. */
1323
njn5c004e42002-11-18 11:04:50 +00001324void mc_fpu_write_check_SLOWLY ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001325{
1326 Int i;
1327 Addr a_here;
1328 Bool a_ok;
1329 Bool aerr = False;
1330 PROF_EVENT(92);
1331 for (i = 0; i < size; i++) {
1332 PROF_EVENT(93);
1333 a_here = addr+i;
1334 a_ok = get_abit(a_here) == VGM_BIT_VALID;
1335 if (a_ok) {
1336 set_vbyte(a_here, VGM_BYTE_VALID);
1337 } else {
1338 set_vbyte(a_here, VGM_BYTE_INVALID);
1339 aerr = True;
1340 }
1341 }
1342 if (aerr) {
njn72718642003-07-24 08:45:32 +00001343 MAC_(record_address_error)( VG_(get_current_tid)(), addr, size, True );
njn25e49d8e72002-09-23 09:36:25 +00001344 }
1345}
1346
njn25e49d8e72002-09-23 09:36:25 +00001347
1348/*------------------------------------------------------------*/
sewardjee070842003-07-05 17:53:55 +00001349/*--- Metadata get/set functions, for client requests. ---*/
1350/*------------------------------------------------------------*/
1351
1352/* Copy Vbits for src into vbits. Returns: 1 == OK, 2 == alignment
1353 error, 3 == addressing error. */
1354Int MC_(get_or_set_vbits_for_client) (
njn72718642003-07-24 08:45:32 +00001355 ThreadId tid,
sewardjee070842003-07-05 17:53:55 +00001356 Addr dataV,
1357 Addr vbitsV,
1358 UInt size,
1359 Bool setting /* True <=> set vbits, False <=> get vbits */
1360)
1361{
1362 Bool addressibleD = True;
1363 Bool addressibleV = True;
1364 UInt* data = (UInt*)dataV;
1365 UInt* vbits = (UInt*)vbitsV;
1366 UInt szW = size / 4; /* sigh */
1367 UInt i;
sewardjaf48a602003-07-06 00:54:47 +00001368 UInt* dataP = NULL; /* bogus init to keep gcc happy */
1369 UInt* vbitsP = NULL; /* ditto */
sewardjee070842003-07-05 17:53:55 +00001370
1371 /* Check alignment of args. */
1372 if (!(IS_ALIGNED4_ADDR(data) && IS_ALIGNED4_ADDR(vbits)))
1373 return 2;
1374 if ((size & 3) != 0)
1375 return 2;
1376
1377 /* Check that arrays are addressible. */
1378 for (i = 0; i < szW; i++) {
sewardjaf48a602003-07-06 00:54:47 +00001379 dataP = &data[i];
1380 vbitsP = &vbits[i];
sewardjee070842003-07-05 17:53:55 +00001381 if (get_abits4_ALIGNED((Addr)dataP) != VGM_NIBBLE_VALID) {
1382 addressibleD = False;
1383 break;
1384 }
1385 if (get_abits4_ALIGNED((Addr)vbitsP) != VGM_NIBBLE_VALID) {
1386 addressibleV = False;
1387 break;
1388 }
1389 }
1390 if (!addressibleD) {
njn72718642003-07-24 08:45:32 +00001391 MAC_(record_address_error)( tid, (Addr)dataP, 4,
sewardjee070842003-07-05 17:53:55 +00001392 setting ? True : False );
1393 return 3;
1394 }
1395 if (!addressibleV) {
njn72718642003-07-24 08:45:32 +00001396 MAC_(record_address_error)( tid, (Addr)vbitsP, 4,
sewardjee070842003-07-05 17:53:55 +00001397 setting ? False : True );
1398 return 3;
1399 }
1400
1401 /* Do the copy */
1402 if (setting) {
1403 /* setting */
1404 for (i = 0; i < szW; i++) {
1405 if (get_vbytes4_ALIGNED( (Addr)&vbits[i] ) != VGM_WORD_VALID)
njn72718642003-07-24 08:45:32 +00001406 MC_(record_value_error)(tid, 4);
sewardjee070842003-07-05 17:53:55 +00001407 set_vbytes4_ALIGNED( (Addr)&data[i], vbits[i] );
1408 }
1409 } else {
1410 /* getting */
1411 for (i = 0; i < szW; i++) {
1412 vbits[i] = get_vbytes4_ALIGNED( (Addr)&data[i] );
1413 set_vbytes4_ALIGNED( (Addr)&vbits[i], VGM_WORD_VALID );
1414 }
1415 }
1416
1417 return 1;
1418}
1419
1420
1421/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001422/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1423/*------------------------------------------------------------*/
1424
sewardja4495682002-10-21 07:29:59 +00001425/* For the memory leak detector, say whether an entire 64k chunk of
1426 address space is possibly in use, or not. If in doubt return
1427 True.
njn25e49d8e72002-09-23 09:36:25 +00001428*/
sewardja4495682002-10-21 07:29:59 +00001429static
1430Bool mc_is_valid_64k_chunk ( UInt chunk_number )
njn25e49d8e72002-09-23 09:36:25 +00001431{
sewardja4495682002-10-21 07:29:59 +00001432 sk_assert(chunk_number >= 0 && chunk_number < 65536);
1433 if (IS_DISTINGUISHED_SM(primary_map[chunk_number])) {
1434 /* Definitely not in use. */
1435 return False;
1436 } else {
1437 return True;
njn25e49d8e72002-09-23 09:36:25 +00001438 }
1439}
1440
1441
sewardja4495682002-10-21 07:29:59 +00001442/* For the memory leak detector, say whether or not a given word
1443 address is to be regarded as valid. */
1444static
1445Bool mc_is_valid_address ( Addr a )
1446{
1447 UInt vbytes;
1448 UChar abits;
1449 sk_assert(IS_ALIGNED4_ADDR(a));
1450 abits = get_abits4_ALIGNED(a);
1451 vbytes = get_vbytes4_ALIGNED(a);
1452 if (abits == VGM_NIBBLE_VALID && vbytes == VGM_WORD_VALID) {
1453 return True;
1454 } else {
1455 return False;
1456 }
1457}
1458
1459
1460/* Leak detector for this skin. We don't actually do anything, merely
1461 run the generic leak detector with suitable parameters for this
1462 skin. */
njn5c004e42002-11-18 11:04:50 +00001463void MC_(detect_memory_leaks) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001464{
njn43c799e2003-04-08 00:08:52 +00001465 MAC_(do_detect_memory_leaks) ( mc_is_valid_64k_chunk, mc_is_valid_address );
njn25e49d8e72002-09-23 09:36:25 +00001466}
1467
1468
1469/* ---------------------------------------------------------------------
1470 Sanity check machinery (permanently engaged).
1471 ------------------------------------------------------------------ */
1472
njn25e49d8e72002-09-23 09:36:25 +00001473Bool SK_(cheap_sanity_check) ( void )
1474{
jseward9800fd32004-01-04 23:08:04 +00001475 /* nothing useful we can rapidly check */
1476 return True;
njn25e49d8e72002-09-23 09:36:25 +00001477}
1478
1479Bool SK_(expensive_sanity_check) ( void )
1480{
1481 Int i;
1482
1483 /* Make sure nobody changed the distinguished secondary. */
1484 for (i = 0; i < 8192; i++)
1485 if (distinguished_secondary_map.abits[i] != VGM_BYTE_INVALID)
1486 return False;
1487
1488 for (i = 0; i < 65536; i++)
1489 if (distinguished_secondary_map.vbyte[i] != VGM_BYTE_INVALID)
1490 return False;
1491
1492 /* Make sure that the upper 3/4 of the primary map hasn't
1493 been messed with. */
1494 for (i = 65536; i < 262144; i++)
1495 if (primary_map[i] != & distinguished_secondary_map)
1496 return False;
1497
1498 return True;
1499}
1500
1501/* ---------------------------------------------------------------------
1502 Debugging machinery (turn on to debug). Something of a mess.
1503 ------------------------------------------------------------------ */
1504
1505#if 0
1506/* Print the value tags on the 8 integer registers & flag reg. */
1507
1508static void uint_to_bits ( UInt x, Char* str )
1509{
1510 Int i;
1511 Int w = 0;
1512 /* str must point to a space of at least 36 bytes. */
1513 for (i = 31; i >= 0; i--) {
1514 str[w++] = (x & ( ((UInt)1) << i)) ? '1' : '0';
1515 if (i == 24 || i == 16 || i == 8)
1516 str[w++] = ' ';
1517 }
1518 str[w++] = 0;
njne427a662002-10-02 11:08:25 +00001519 sk_assert(w == 36);
njn25e49d8e72002-09-23 09:36:25 +00001520}
1521
1522/* Caution! Not vthread-safe; looks in VG_(baseBlock), not the thread
1523 state table. */
1524
1525static void vg_show_reg_tags ( void )
1526{
1527 Char buf1[36];
1528 Char buf2[36];
1529 UInt z_eax, z_ebx, z_ecx, z_edx,
1530 z_esi, z_edi, z_ebp, z_esp, z_eflags;
1531
1532 z_eax = VG_(baseBlock)[VGOFF_(sh_eax)];
1533 z_ebx = VG_(baseBlock)[VGOFF_(sh_ebx)];
1534 z_ecx = VG_(baseBlock)[VGOFF_(sh_ecx)];
1535 z_edx = VG_(baseBlock)[VGOFF_(sh_edx)];
1536 z_esi = VG_(baseBlock)[VGOFF_(sh_esi)];
1537 z_edi = VG_(baseBlock)[VGOFF_(sh_edi)];
1538 z_ebp = VG_(baseBlock)[VGOFF_(sh_ebp)];
1539 z_esp = VG_(baseBlock)[VGOFF_(sh_esp)];
1540 z_eflags = VG_(baseBlock)[VGOFF_(sh_eflags)];
1541
1542 uint_to_bits(z_eflags, buf1);
njn9b6d34e2002-10-15 08:48:08 +00001543 VG_(message)(Vg_DebugMsg, "efl %s\n", buf1);
njn25e49d8e72002-09-23 09:36:25 +00001544
1545 uint_to_bits(z_eax, buf1);
1546 uint_to_bits(z_ebx, buf2);
1547 VG_(message)(Vg_DebugMsg, "eax %s ebx %s\n", buf1, buf2);
1548
1549 uint_to_bits(z_ecx, buf1);
1550 uint_to_bits(z_edx, buf2);
1551 VG_(message)(Vg_DebugMsg, "ecx %s edx %s\n", buf1, buf2);
1552
1553 uint_to_bits(z_esi, buf1);
1554 uint_to_bits(z_edi, buf2);
1555 VG_(message)(Vg_DebugMsg, "esi %s edi %s\n", buf1, buf2);
1556
1557 uint_to_bits(z_ebp, buf1);
1558 uint_to_bits(z_esp, buf2);
1559 VG_(message)(Vg_DebugMsg, "ebp %s esp %s\n", buf1, buf2);
1560}
1561
1562
1563/* For debugging only. Scan the address space and touch all allegedly
1564 addressible words. Useful for establishing where Valgrind's idea of
1565 addressibility has diverged from what the kernel believes. */
1566
1567static
1568void zzzmemscan_notify_word ( Addr a, UInt w )
1569{
1570}
1571
1572void zzzmemscan ( void )
1573{
1574 Int n_notifies
1575 = VG_(scan_all_valid_memory)( zzzmemscan_notify_word );
1576 VG_(printf)("zzzmemscan: n_bytes = %d\n", 4 * n_notifies );
1577}
1578#endif
1579
1580
1581
1582
1583#if 0
1584static Int zzz = 0;
1585
1586void show_bb ( Addr eip_next )
1587{
1588 VG_(printf)("[%4d] ", zzz);
1589 vg_show_reg_tags( &VG_(m_shadow );
1590 VG_(translate) ( eip_next, NULL, NULL, NULL );
1591}
1592#endif /* 0 */
1593
njn25e49d8e72002-09-23 09:36:25 +00001594
1595/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00001596/*--- Command line args ---*/
njn25e49d8e72002-09-23 09:36:25 +00001597/*------------------------------------------------------------*/
1598
njn43c799e2003-04-08 00:08:52 +00001599Bool MC_(clo_avoid_strlen_errors) = True;
1600Bool MC_(clo_cleanup) = True;
1601
njn25e49d8e72002-09-23 09:36:25 +00001602Bool SK_(process_cmd_line_option)(Char* arg)
1603{
nethercote27fec902004-06-16 21:26:32 +00001604 VG_BOOL_CLO("--avoid-strlen-errors", MC_(clo_avoid_strlen_errors))
1605 else VG_BOOL_CLO("--cleanup", MC_(clo_cleanup))
njn25e49d8e72002-09-23 09:36:25 +00001606 else
njn43c799e2003-04-08 00:08:52 +00001607 return MAC_(process_common_cmd_line_option)(arg);
njn25e49d8e72002-09-23 09:36:25 +00001608
1609 return True;
njn25e49d8e72002-09-23 09:36:25 +00001610}
1611
njn3e884182003-04-15 13:03:23 +00001612void SK_(print_usage)(void)
njn25e49d8e72002-09-23 09:36:25 +00001613{
njn3e884182003-04-15 13:03:23 +00001614 MAC_(print_common_usage)();
1615 VG_(printf)(
1616" --avoid-strlen-errors=no|yes suppress errs from inlined strlen [yes]\n"
1617 );
1618}
1619
1620void SK_(print_debug_usage)(void)
1621{
1622 MAC_(print_common_debug_usage)();
1623 VG_(printf)(
sewardj8ec2cfc2002-10-13 00:57:26 +00001624" --cleanup=no|yes improve after instrumentation? [yes]\n"
njn3e884182003-04-15 13:03:23 +00001625 );
njn25e49d8e72002-09-23 09:36:25 +00001626}
1627
1628
1629/*------------------------------------------------------------*/
1630/*--- Setup ---*/
1631/*------------------------------------------------------------*/
1632
njn810086f2002-11-14 12:42:47 +00001633void SK_(pre_clo_init)(void)
njn25e49d8e72002-09-23 09:36:25 +00001634{
njn810086f2002-11-14 12:42:47 +00001635 VG_(details_name) ("Memcheck");
1636 VG_(details_version) (NULL);
nethercote262eedf2003-11-13 17:57:18 +00001637 VG_(details_description) ("a memory error detector");
njn810086f2002-11-14 12:42:47 +00001638 VG_(details_copyright_author)(
nethercotebb1c9912004-01-04 16:43:23 +00001639 "Copyright (C) 2002-2004, and GNU GPL'd, by Julian Seward.");
nethercote421281e2003-11-20 16:20:55 +00001640 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj78210aa2002-12-01 02:55:46 +00001641 VG_(details_avg_translation_sizeB) ( 228 );
njn25e49d8e72002-09-23 09:36:25 +00001642
njn810086f2002-11-14 12:42:47 +00001643 VG_(needs_core_errors) ();
1644 VG_(needs_skin_errors) ();
1645 VG_(needs_libc_freeres) ();
njn810086f2002-11-14 12:42:47 +00001646 VG_(needs_shadow_regs) ();
1647 VG_(needs_command_line_options)();
1648 VG_(needs_client_requests) ();
1649 VG_(needs_extended_UCode) ();
1650 VG_(needs_syscall_wrapper) ();
njn810086f2002-11-14 12:42:47 +00001651 VG_(needs_sanity_checks) ();
fitzhardinge98abfc72003-12-16 02:05:15 +00001652 VG_(needs_shadow_memory) ();
njn25e49d8e72002-09-23 09:36:25 +00001653
njn3e884182003-04-15 13:03:23 +00001654 MAC_( new_mem_heap) = & mc_new_mem_heap;
1655 MAC_( ban_mem_heap) = & MC_(make_noaccess);
1656 MAC_(copy_mem_heap) = & mc_copy_address_range_state;
1657 MAC_( die_mem_heap) = & MC_(make_noaccess);
sewardjecf8e102003-07-12 12:11:39 +00001658 MAC_(check_noaccess) = & MC_(check_noaccess);
njn3e884182003-04-15 13:03:23 +00001659
fitzhardinge98abfc72003-12-16 02:05:15 +00001660 VG_(init_new_mem_startup) ( & mc_new_mem_startup );
1661 VG_(init_new_mem_stack_signal) ( & MC_(make_writable) );
1662 VG_(init_new_mem_brk) ( & MC_(make_writable) );
1663 VG_(init_new_mem_mmap) ( & mc_set_perms );
njn25e49d8e72002-09-23 09:36:25 +00001664
fitzhardinge98abfc72003-12-16 02:05:15 +00001665 VG_(init_copy_mem_remap) ( & mc_copy_address_range_state );
1666 VG_(init_change_mem_mprotect) ( & mc_set_perms );
njn3e884182003-04-15 13:03:23 +00001667
fitzhardinge98abfc72003-12-16 02:05:15 +00001668 VG_(init_die_mem_stack_signal) ( & MC_(make_noaccess) );
1669 VG_(init_die_mem_brk) ( & MC_(make_noaccess) );
1670 VG_(init_die_mem_munmap) ( & MC_(make_noaccess) );
njn3e884182003-04-15 13:03:23 +00001671
fitzhardinge98abfc72003-12-16 02:05:15 +00001672 VG_(init_new_mem_stack_4) ( & MAC_(new_mem_stack_4) );
1673 VG_(init_new_mem_stack_8) ( & MAC_(new_mem_stack_8) );
1674 VG_(init_new_mem_stack_12) ( & MAC_(new_mem_stack_12) );
1675 VG_(init_new_mem_stack_16) ( & MAC_(new_mem_stack_16) );
1676 VG_(init_new_mem_stack_32) ( & MAC_(new_mem_stack_32) );
1677 VG_(init_new_mem_stack) ( & MAC_(new_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00001678
fitzhardinge98abfc72003-12-16 02:05:15 +00001679 VG_(init_die_mem_stack_4) ( & MAC_(die_mem_stack_4) );
1680 VG_(init_die_mem_stack_8) ( & MAC_(die_mem_stack_8) );
1681 VG_(init_die_mem_stack_12) ( & MAC_(die_mem_stack_12) );
1682 VG_(init_die_mem_stack_16) ( & MAC_(die_mem_stack_16) );
1683 VG_(init_die_mem_stack_32) ( & MAC_(die_mem_stack_32) );
1684 VG_(init_die_mem_stack) ( & MAC_(die_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00001685
fitzhardinge98abfc72003-12-16 02:05:15 +00001686 VG_(init_ban_mem_stack) ( & MC_(make_noaccess) );
njn25e49d8e72002-09-23 09:36:25 +00001687
fitzhardinge98abfc72003-12-16 02:05:15 +00001688 VG_(init_pre_mem_read) ( & mc_check_is_readable );
1689 VG_(init_pre_mem_read_asciiz) ( & mc_check_is_readable_asciiz );
1690 VG_(init_pre_mem_write) ( & mc_check_is_writable );
1691 VG_(init_post_mem_write) ( & MC_(make_readable) );
njn25e49d8e72002-09-23 09:36:25 +00001692
fitzhardinge98abfc72003-12-16 02:05:15 +00001693 VG_(init_post_regs_write_init) ( & mc_post_regs_write_init );
1694 VG_(init_post_reg_write_syscall_return) ( & mc_post_reg_write );
1695 VG_(init_post_reg_write_deliver_signal) ( & mc_post_reg_write );
1696 VG_(init_post_reg_write_pthread_return) ( & mc_post_reg_write );
1697 VG_(init_post_reg_write_clientreq_return) ( & mc_post_reg_write );
1698 VG_(init_post_reg_write_clientcall_return) ( & mc_post_reg_write_clientcall );
njnd3040452003-05-19 15:04:06 +00001699
njn9b007f62003-04-07 14:40:25 +00001700 /* Three compact slots taken up by stack memory helpers */
njn5c004e42002-11-18 11:04:50 +00001701 VG_(register_compact_helper)((Addr) & MC_(helper_value_check4_fail));
1702 VG_(register_compact_helper)((Addr) & MC_(helper_value_check0_fail));
1703 VG_(register_compact_helper)((Addr) & MC_(helper_value_check2_fail));
1704 VG_(register_compact_helper)((Addr) & MC_(helperc_STOREV4));
njn5c004e42002-11-18 11:04:50 +00001705 VG_(register_compact_helper)((Addr) & MC_(helperc_LOADV4));
njn25e49d8e72002-09-23 09:36:25 +00001706
njnd04b7c62002-10-03 14:05:52 +00001707 /* These two made non-compact because 2-byte transactions are rare. */
njn5c004e42002-11-18 11:04:50 +00001708 VG_(register_noncompact_helper)((Addr) & MC_(helperc_STOREV2));
njn9b007f62003-04-07 14:40:25 +00001709 VG_(register_noncompact_helper)((Addr) & MC_(helperc_STOREV1));
njn5c004e42002-11-18 11:04:50 +00001710 VG_(register_noncompact_helper)((Addr) & MC_(helperc_LOADV2));
njn9b007f62003-04-07 14:40:25 +00001711 VG_(register_noncompact_helper)((Addr) & MC_(helperc_LOADV1));
njn5c004e42002-11-18 11:04:50 +00001712 VG_(register_noncompact_helper)((Addr) & MC_(fpu_write_check));
1713 VG_(register_noncompact_helper)((Addr) & MC_(fpu_read_check));
1714 VG_(register_noncompact_helper)((Addr) & MC_(helper_value_check1_fail));
njn25e49d8e72002-09-23 09:36:25 +00001715
1716 VGP_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
1717 VGP_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
njn9b007f62003-04-07 14:40:25 +00001718 VGP_(register_profile_event) ( VgpESPAdj, "adjust-ESP" );
njnd04b7c62002-10-03 14:05:52 +00001719
njn43c799e2003-04-08 00:08:52 +00001720 /* Additional block description for VG_(describe_addr)() */
1721 MAC_(describe_addr_supp) = MC_(client_perm_maybe_describe);
1722
njnd04b7c62002-10-03 14:05:52 +00001723 init_shadow_memory();
njn3e884182003-04-15 13:03:23 +00001724 MAC_(common_pre_clo_init)();
njn5c004e42002-11-18 11:04:50 +00001725}
1726
1727void SK_(post_clo_init) ( void )
1728{
1729}
1730
njn7d9f94d2003-04-22 21:41:40 +00001731void SK_(fini) ( Int exitcode )
njn5c004e42002-11-18 11:04:50 +00001732{
njn3e884182003-04-15 13:03:23 +00001733 MAC_(common_fini)( MC_(detect_memory_leaks) );
1734
njn5c004e42002-11-18 11:04:50 +00001735 if (0) {
1736 VG_(message)(Vg_DebugMsg,
1737 "------ Valgrind's client block stats follow ---------------" );
1738 MC_(show_client_block_stats)();
1739 }
njn25e49d8e72002-09-23 09:36:25 +00001740}
1741
fitzhardinge98abfc72003-12-16 02:05:15 +00001742VG_DETERMINE_INTERFACE_VERSION(SK_(pre_clo_init), 9./8)
1743
njn25e49d8e72002-09-23 09:36:25 +00001744/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00001745/*--- end mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00001746/*--------------------------------------------------------------------*/