blob: 3cc44deab653f0918db4fe449102b85dc23a0349 [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
nethercote137bc552003-11-14 17:47:54 +00009 This file is part of MemCheck, a heavyweight Valgrind tool for
njnc9539842002-10-02 13:26:35 +000010 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
nethercotebb1c9912004-01-04 16:43:23 +000012 Copyright (C) 2000-2004 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
njn25cac76cb2002-09-23 11:21:57 +000033#include "mc_include.h"
34#include "memcheck.h" /* for client requests */
njn25e49d8e72002-09-23 09:36:25 +000035//#include "vg_profile.c"
36
37/* Define to debug the mem audit system. */
38/* #define VG_DEBUG_MEMORY */
39
njn25e49d8e72002-09-23 09:36:25 +000040#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
41
42/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000043/*--- Low-level support for memory checking. ---*/
44/*------------------------------------------------------------*/
45
46/* All reads and writes are checked against a memory map, which
47 records the state of all memory in the process. The memory map is
48 organised like this:
49
50 The top 16 bits of an address are used to index into a top-level
51 map table, containing 65536 entries. Each entry is a pointer to a
52 second-level map, which records the accesibililty and validity
53 permissions for the 65536 bytes indexed by the lower 16 bits of the
54 address. Each byte is represented by nine bits, one indicating
55 accessibility, the other eight validity. So each second-level map
56 contains 73728 bytes. This two-level arrangement conveniently
57 divides the 4G address space into 64k lumps, each size 64k bytes.
58
59 All entries in the primary (top-level) map must point to a valid
60 secondary (second-level) map. Since most of the 4G of address
61 space will not be in use -- ie, not mapped at all -- there is a
62 distinguished secondary map, which indicates `not addressible and
63 not valid' writeable for all bytes. Entries in the primary map for
64 which the entire 64k is not in use at all point at this
65 distinguished map.
66
67 [...] lots of stuff deleted due to out of date-ness
68
69 As a final optimisation, the alignment and address checks for
70 4-byte loads and stores are combined in a neat way. The primary
71 map is extended to have 262144 entries (2^18), rather than 2^16.
72 The top 3/4 of these entries are permanently set to the
73 distinguished secondary map. For a 4-byte load/store, the
74 top-level map is indexed not with (addr >> 16) but instead f(addr),
75 where
76
77 f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
78 = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
79 = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
80
81 ie the lowest two bits are placed above the 16 high address bits.
82 If either of these two bits are nonzero, the address is misaligned;
83 this will select a secondary map from the upper 3/4 of the primary
84 map. Because this is always the distinguished secondary map, a
85 (bogus) address check failure will result. The failure handling
86 code can then figure out whether this is a genuine addr check
87 failure or whether it is a possibly-legitimate access at a
88 misaligned address.
89*/
90
91
92/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000093/*--- Function declarations. ---*/
94/*------------------------------------------------------------*/
95
njn5c004e42002-11-18 11:04:50 +000096static UInt mc_rd_V4_SLOWLY ( Addr a );
97static UInt mc_rd_V2_SLOWLY ( Addr a );
98static UInt mc_rd_V1_SLOWLY ( Addr a );
99static void mc_wr_V4_SLOWLY ( Addr a, UInt vbytes );
100static void mc_wr_V2_SLOWLY ( Addr a, UInt vbytes );
101static void mc_wr_V1_SLOWLY ( Addr a, UInt vbytes );
102static void mc_fpu_read_check_SLOWLY ( Addr addr, Int size );
103static void mc_fpu_write_check_SLOWLY ( Addr addr, Int size );
njn25e49d8e72002-09-23 09:36:25 +0000104
105/*------------------------------------------------------------*/
106/*--- Data defns. ---*/
107/*------------------------------------------------------------*/
108
109typedef
110 struct {
111 UChar abits[8192];
112 UChar vbyte[65536];
113 }
114 SecMap;
115
116static SecMap* primary_map[ /*65536*/ 262144 ];
117static SecMap distinguished_secondary_map;
118
njn25e49d8e72002-09-23 09:36:25 +0000119static void init_shadow_memory ( void )
120{
121 Int i;
122
123 for (i = 0; i < 8192; i++) /* Invalid address */
124 distinguished_secondary_map.abits[i] = VGM_BYTE_INVALID;
125 for (i = 0; i < 65536; i++) /* Invalid Value */
126 distinguished_secondary_map.vbyte[i] = VGM_BYTE_INVALID;
127
128 /* These entries gradually get overwritten as the used address
129 space expands. */
130 for (i = 0; i < 65536; i++)
131 primary_map[i] = &distinguished_secondary_map;
132
133 /* These ones should never change; it's a bug in Valgrind if they do. */
134 for (i = 65536; i < 262144; i++)
135 primary_map[i] = &distinguished_secondary_map;
136}
137
njn25e49d8e72002-09-23 09:36:25 +0000138/*------------------------------------------------------------*/
139/*--- Basic bitmap management, reading and writing. ---*/
140/*------------------------------------------------------------*/
141
142/* Allocate and initialise a secondary map. */
143
144static SecMap* alloc_secondary_map ( __attribute__ ((unused))
145 Char* caller )
146{
147 SecMap* map;
148 UInt i;
149 PROF_EVENT(10);
150
151 /* Mark all bytes as invalid access and invalid value. */
152
153 /* It just happens that a SecMap occupies exactly 18 pages --
154 although this isn't important, so the following assert is
155 spurious. */
njne427a662002-10-02 11:08:25 +0000156 sk_assert(0 == (sizeof(SecMap) % VKI_BYTES_PER_PAGE));
fitzhardinge98abfc72003-12-16 02:05:15 +0000157 map = (SecMap *)VG_(shadow_alloc)(sizeof(SecMap));
njn25e49d8e72002-09-23 09:36:25 +0000158
159 for (i = 0; i < 8192; i++)
160 map->abits[i] = VGM_BYTE_INVALID; /* Invalid address */
161 for (i = 0; i < 65536; i++)
162 map->vbyte[i] = VGM_BYTE_INVALID; /* Invalid Value */
163
164 /* VG_(printf)("ALLOC_2MAP(%s)\n", caller ); */
165 return map;
166}
167
168
169/* Basic reading/writing of the bitmaps, for byte-sized accesses. */
170
171static __inline__ UChar get_abit ( Addr a )
172{
173 SecMap* sm = primary_map[a >> 16];
174 UInt sm_off = a & 0xFFFF;
175 PROF_EVENT(20);
176# if 0
177 if (IS_DISTINGUISHED_SM(sm))
178 VG_(message)(Vg_DebugMsg,
179 "accessed distinguished 2ndary (A)map! 0x%x\n", a);
180# endif
181 return BITARR_TEST(sm->abits, sm_off)
182 ? VGM_BIT_INVALID : VGM_BIT_VALID;
183}
184
185static __inline__ UChar get_vbyte ( Addr a )
186{
187 SecMap* sm = primary_map[a >> 16];
188 UInt sm_off = a & 0xFFFF;
189 PROF_EVENT(21);
190# if 0
191 if (IS_DISTINGUISHED_SM(sm))
192 VG_(message)(Vg_DebugMsg,
193 "accessed distinguished 2ndary (V)map! 0x%x\n", a);
194# endif
195 return sm->vbyte[sm_off];
196}
197
sewardj56867352003-10-12 10:27:06 +0000198static /* __inline__ */ void set_abit ( Addr a, UChar abit )
njn25e49d8e72002-09-23 09:36:25 +0000199{
200 SecMap* sm;
201 UInt sm_off;
202 PROF_EVENT(22);
203 ENSURE_MAPPABLE(a, "set_abit");
204 sm = primary_map[a >> 16];
205 sm_off = a & 0xFFFF;
206 if (abit)
207 BITARR_SET(sm->abits, sm_off);
208 else
209 BITARR_CLEAR(sm->abits, sm_off);
210}
211
212static __inline__ void set_vbyte ( Addr a, UChar vbyte )
213{
214 SecMap* sm;
215 UInt sm_off;
216 PROF_EVENT(23);
217 ENSURE_MAPPABLE(a, "set_vbyte");
218 sm = primary_map[a >> 16];
219 sm_off = a & 0xFFFF;
220 sm->vbyte[sm_off] = vbyte;
221}
222
223
224/* Reading/writing of the bitmaps, for aligned word-sized accesses. */
225
226static __inline__ UChar get_abits4_ALIGNED ( Addr a )
227{
228 SecMap* sm;
229 UInt sm_off;
230 UChar abits8;
231 PROF_EVENT(24);
232# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000233 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000234# endif
235 sm = primary_map[a >> 16];
236 sm_off = a & 0xFFFF;
237 abits8 = sm->abits[sm_off >> 3];
238 abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
239 abits8 &= 0x0F;
240 return abits8;
241}
242
243static UInt __inline__ get_vbytes4_ALIGNED ( Addr a )
244{
245 SecMap* sm = primary_map[a >> 16];
246 UInt sm_off = a & 0xFFFF;
247 PROF_EVENT(25);
248# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000249 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000250# endif
251 return ((UInt*)(sm->vbyte))[sm_off >> 2];
252}
253
254
sewardjee070842003-07-05 17:53:55 +0000255static void __inline__ set_vbytes4_ALIGNED ( Addr a, UInt vbytes )
256{
257 SecMap* sm;
258 UInt sm_off;
259 ENSURE_MAPPABLE(a, "set_vbytes4_ALIGNED");
260 sm = primary_map[a >> 16];
261 sm_off = a & 0xFFFF;
262 PROF_EVENT(23);
263# ifdef VG_DEBUG_MEMORY
264 sk_assert(IS_ALIGNED4_ADDR(a));
265# endif
266 ((UInt*)(sm->vbyte))[sm_off >> 2] = vbytes;
267}
268
269
njn25e49d8e72002-09-23 09:36:25 +0000270/*------------------------------------------------------------*/
271/*--- Setting permissions over address ranges. ---*/
272/*------------------------------------------------------------*/
273
274static void set_address_range_perms ( Addr a, UInt len,
275 UInt example_a_bit,
276 UInt example_v_bit )
277{
278 UChar vbyte, abyte8;
279 UInt vword4, sm_off;
280 SecMap* sm;
281
282 PROF_EVENT(30);
283
284 if (len == 0)
285 return;
286
287 if (len > 100 * 1000 * 1000) {
288 VG_(message)(Vg_UserMsg,
289 "Warning: set address range perms: "
290 "large range %u, a %d, v %d",
291 len, example_a_bit, example_v_bit );
292 }
293
294 VGP_PUSHCC(VgpSetMem);
295
296 /* Requests to change permissions of huge address ranges may
297 indicate bugs in our machinery. 30,000,000 is arbitrary, but so
298 far all legitimate requests have fallen beneath that size. */
299 /* 4 Mar 02: this is just stupid; get rid of it. */
njne427a662002-10-02 11:08:25 +0000300 /* sk_assert(len < 30000000); */
njn25e49d8e72002-09-23 09:36:25 +0000301
302 /* Check the permissions make sense. */
njne427a662002-10-02 11:08:25 +0000303 sk_assert(example_a_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000304 || example_a_bit == VGM_BIT_INVALID);
njne427a662002-10-02 11:08:25 +0000305 sk_assert(example_v_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000306 || example_v_bit == VGM_BIT_INVALID);
307 if (example_a_bit == VGM_BIT_INVALID)
njne427a662002-10-02 11:08:25 +0000308 sk_assert(example_v_bit == VGM_BIT_INVALID);
njn25e49d8e72002-09-23 09:36:25 +0000309
310 /* The validity bits to write. */
311 vbyte = example_v_bit==VGM_BIT_VALID
312 ? VGM_BYTE_VALID : VGM_BYTE_INVALID;
313
314 /* In order that we can charge through the address space at 8
315 bytes/main-loop iteration, make up some perms. */
316 abyte8 = (example_a_bit << 7)
317 | (example_a_bit << 6)
318 | (example_a_bit << 5)
319 | (example_a_bit << 4)
320 | (example_a_bit << 3)
321 | (example_a_bit << 2)
322 | (example_a_bit << 1)
323 | (example_a_bit << 0);
324 vword4 = (vbyte << 24) | (vbyte << 16) | (vbyte << 8) | vbyte;
325
326# ifdef VG_DEBUG_MEMORY
327 /* Do it ... */
328 while (True) {
329 PROF_EVENT(31);
330 if (len == 0) break;
331 set_abit ( a, example_a_bit );
332 set_vbyte ( a, vbyte );
333 a++;
334 len--;
335 }
336
337# else
338 /* Slowly do parts preceding 8-byte alignment. */
339 while (True) {
340 PROF_EVENT(31);
341 if (len == 0) break;
342 if ((a % 8) == 0) break;
343 set_abit ( a, example_a_bit );
344 set_vbyte ( a, vbyte );
345 a++;
346 len--;
347 }
348
349 if (len == 0) {
350 VGP_POPCC(VgpSetMem);
351 return;
352 }
njne427a662002-10-02 11:08:25 +0000353 sk_assert((a % 8) == 0 && len > 0);
njn25e49d8e72002-09-23 09:36:25 +0000354
355 /* Once aligned, go fast. */
356 while (True) {
357 PROF_EVENT(32);
358 if (len < 8) break;
359 ENSURE_MAPPABLE(a, "set_address_range_perms(fast)");
360 sm = primary_map[a >> 16];
361 sm_off = a & 0xFFFF;
362 sm->abits[sm_off >> 3] = abyte8;
363 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = vword4;
364 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = vword4;
365 a += 8;
366 len -= 8;
367 }
368
369 if (len == 0) {
370 VGP_POPCC(VgpSetMem);
371 return;
372 }
njne427a662002-10-02 11:08:25 +0000373 sk_assert((a % 8) == 0 && len > 0 && len < 8);
njn25e49d8e72002-09-23 09:36:25 +0000374
375 /* Finish the upper fragment. */
376 while (True) {
377 PROF_EVENT(33);
378 if (len == 0) break;
379 set_abit ( a, example_a_bit );
380 set_vbyte ( a, vbyte );
381 a++;
382 len--;
383 }
384# endif
385
386 /* Check that zero page and highest page have not been written to
387 -- this could happen with buggy syscall wrappers. Today
388 (2001-04-26) had precisely such a problem with __NR_setitimer. */
njne427a662002-10-02 11:08:25 +0000389 sk_assert(SK_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +0000390 VGP_POPCC(VgpSetMem);
391}
392
393/* Set permissions for address ranges ... */
394
njn5c004e42002-11-18 11:04:50 +0000395void MC_(make_noaccess) ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000396{
397 PROF_EVENT(35);
njn5c004e42002-11-18 11:04:50 +0000398 DEBUG("MC_(make_noaccess)(%p, %x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000399 set_address_range_perms ( a, len, VGM_BIT_INVALID, VGM_BIT_INVALID );
400}
401
njn5c004e42002-11-18 11:04:50 +0000402void MC_(make_writable) ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000403{
404 PROF_EVENT(36);
njn5c004e42002-11-18 11:04:50 +0000405 DEBUG("MC_(make_writable)(%p, %x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000406 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_INVALID );
407}
408
njn5c004e42002-11-18 11:04:50 +0000409void MC_(make_readable) ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000410{
411 PROF_EVENT(37);
njn5c004e42002-11-18 11:04:50 +0000412 DEBUG("MC_(make_readable)(%p, 0x%x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000413 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_VALID );
414}
415
njn9b007f62003-04-07 14:40:25 +0000416static __inline__
417void make_aligned_word_writable(Addr a)
418{
419 SecMap* sm;
420 UInt sm_off;
421 UChar mask;
njn25e49d8e72002-09-23 09:36:25 +0000422
njn9b007f62003-04-07 14:40:25 +0000423 VGP_PUSHCC(VgpESPAdj);
424 ENSURE_MAPPABLE(a, "make_aligned_word_writable");
425 sm = primary_map[a >> 16];
426 sm_off = a & 0xFFFF;
427 ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
428 mask = 0x0F;
429 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
430 /* mask now contains 1s where we wish to make address bits invalid (0s). */
431 sm->abits[sm_off >> 3] &= ~mask;
432 VGP_POPCC(VgpESPAdj);
433}
434
435static __inline__
436void make_aligned_word_noaccess(Addr a)
437{
438 SecMap* sm;
439 UInt sm_off;
440 UChar mask;
441
442 VGP_PUSHCC(VgpESPAdj);
443 ENSURE_MAPPABLE(a, "make_aligned_word_noaccess");
444 sm = primary_map[a >> 16];
445 sm_off = a & 0xFFFF;
446 ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
447 mask = 0x0F;
448 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
449 /* mask now contains 1s where we wish to make address bits invalid (1s). */
450 sm->abits[sm_off >> 3] |= mask;
451 VGP_POPCC(VgpESPAdj);
452}
453
454/* Nb: by "aligned" here we mean 8-byte aligned */
455static __inline__
456void make_aligned_doubleword_writable(Addr a)
457{
458 SecMap* sm;
459 UInt sm_off;
460
461 VGP_PUSHCC(VgpESPAdj);
462 ENSURE_MAPPABLE(a, "make_aligned_doubleword_writable");
463 sm = primary_map[a >> 16];
464 sm_off = a & 0xFFFF;
465 sm->abits[sm_off >> 3] = VGM_BYTE_VALID;
466 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = VGM_WORD_INVALID;
467 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = VGM_WORD_INVALID;
468 VGP_POPCC(VgpESPAdj);
469}
470
471static __inline__
472void make_aligned_doubleword_noaccess(Addr a)
473{
474 SecMap* sm;
475 UInt sm_off;
476
477 VGP_PUSHCC(VgpESPAdj);
478 ENSURE_MAPPABLE(a, "make_aligned_doubleword_noaccess");
479 sm = primary_map[a >> 16];
480 sm_off = a & 0xFFFF;
481 sm->abits[sm_off >> 3] = VGM_BYTE_INVALID;
482 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = VGM_WORD_INVALID;
483 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = VGM_WORD_INVALID;
484 VGP_POPCC(VgpESPAdj);
485}
486
487/* The %esp update handling functions */
488ESP_UPDATE_HANDLERS ( make_aligned_word_writable,
489 make_aligned_word_noaccess,
490 make_aligned_doubleword_writable,
491 make_aligned_doubleword_noaccess,
492 MC_(make_writable),
493 MC_(make_noaccess)
494 );
495
496/* Block-copy permissions (needed for implementing realloc()). */
njn5c004e42002-11-18 11:04:50 +0000497static void mc_copy_address_range_state ( Addr src, Addr dst, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000498{
499 UInt i;
500
njn5c004e42002-11-18 11:04:50 +0000501 DEBUG("mc_copy_address_range_state\n");
njn25e49d8e72002-09-23 09:36:25 +0000502
503 PROF_EVENT(40);
504 for (i = 0; i < len; i++) {
505 UChar abit = get_abit ( src+i );
506 UChar vbyte = get_vbyte ( src+i );
507 PROF_EVENT(41);
508 set_abit ( dst+i, abit );
509 set_vbyte ( dst+i, vbyte );
510 }
511}
512
513
514/* Check permissions for address range. If inadequate permissions
515 exist, *bad_addr is set to the offending address, so the caller can
516 know what it is. */
517
sewardjecf8e102003-07-12 12:11:39 +0000518/* Returns True if [a .. a+len) is not addressible. Otherwise,
519 returns False, and if bad_addr is non-NULL, sets *bad_addr to
520 indicate the lowest failing address. Functions below are
521 similar. */
522Bool MC_(check_noaccess) ( Addr a, UInt len, Addr* bad_addr )
523{
524 UInt i;
525 UChar abit;
526 PROF_EVENT(42);
527 for (i = 0; i < len; i++) {
528 PROF_EVENT(43);
529 abit = get_abit(a);
530 if (abit == VGM_BIT_VALID) {
531 if (bad_addr != NULL) *bad_addr = a;
532 return False;
533 }
534 a++;
535 }
536 return True;
537}
538
njn5c004e42002-11-18 11:04:50 +0000539Bool MC_(check_writable) ( Addr a, UInt len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000540{
541 UInt i;
542 UChar abit;
543 PROF_EVENT(42);
544 for (i = 0; i < len; i++) {
545 PROF_EVENT(43);
546 abit = get_abit(a);
547 if (abit == VGM_BIT_INVALID) {
548 if (bad_addr != NULL) *bad_addr = a;
549 return False;
550 }
551 a++;
552 }
553 return True;
554}
555
njn5c004e42002-11-18 11:04:50 +0000556Bool MC_(check_readable) ( Addr a, UInt len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000557{
558 UInt i;
559 UChar abit;
560 UChar vbyte;
561
562 PROF_EVENT(44);
njn5c004e42002-11-18 11:04:50 +0000563 DEBUG("MC_(check_readable)\n");
njn25e49d8e72002-09-23 09:36:25 +0000564 for (i = 0; i < len; i++) {
565 abit = get_abit(a);
566 vbyte = get_vbyte(a);
567 PROF_EVENT(45);
568 if (abit != VGM_BIT_VALID || vbyte != VGM_BYTE_VALID) {
569 if (bad_addr != NULL) *bad_addr = a;
570 return False;
571 }
572 a++;
573 }
574 return True;
575}
576
577
578/* Check a zero-terminated ascii string. Tricky -- don't want to
579 examine the actual bytes, to find the end, until we're sure it is
580 safe to do so. */
581
njn9b007f62003-04-07 14:40:25 +0000582static Bool mc_check_readable_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000583{
584 UChar abit;
585 UChar vbyte;
586 PROF_EVENT(46);
njn5c004e42002-11-18 11:04:50 +0000587 DEBUG("mc_check_readable_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +0000588 while (True) {
589 PROF_EVENT(47);
590 abit = get_abit(a);
591 vbyte = get_vbyte(a);
592 if (abit != VGM_BIT_VALID || vbyte != VGM_BYTE_VALID) {
593 if (bad_addr != NULL) *bad_addr = a;
594 return False;
595 }
596 /* Ok, a is safe to read. */
597 if (* ((UChar*)a) == 0) return True;
598 a++;
599 }
600}
601
602
603/*------------------------------------------------------------*/
604/*--- Memory event handlers ---*/
605/*------------------------------------------------------------*/
606
njn25e49d8e72002-09-23 09:36:25 +0000607static
njn72718642003-07-24 08:45:32 +0000608void mc_check_is_writable ( CorePart part, ThreadId tid, Char* s,
609 Addr base, UInt size )
njn25e49d8e72002-09-23 09:36:25 +0000610{
611 Bool ok;
612 Addr bad_addr;
613
614 VGP_PUSHCC(VgpCheckMem);
615
616 /* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
617 base,base+size-1); */
njn5c004e42002-11-18 11:04:50 +0000618 ok = MC_(check_writable) ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000619 if (!ok) {
620 switch (part) {
621 case Vg_CoreSysCall:
njn72718642003-07-24 08:45:32 +0000622 MAC_(record_param_error) ( tid, bad_addr, /*isWrite =*/True, s );
njn25e49d8e72002-09-23 09:36:25 +0000623 break;
624
625 case Vg_CorePThread:
626 case Vg_CoreSignal:
njn72718642003-07-24 08:45:32 +0000627 MAC_(record_core_mem_error)( tid, /*isWrite=*/True, s );
njn25e49d8e72002-09-23 09:36:25 +0000628 break;
629
630 default:
njn5c004e42002-11-18 11:04:50 +0000631 VG_(skin_panic)("mc_check_is_writable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000632 }
633 }
634
635 VGP_POPCC(VgpCheckMem);
636}
637
638static
njn72718642003-07-24 08:45:32 +0000639void mc_check_is_readable ( CorePart part, ThreadId tid, Char* s,
640 Addr base, UInt size )
njn25e49d8e72002-09-23 09:36:25 +0000641{
642 Bool ok;
643 Addr bad_addr;
644
645 VGP_PUSHCC(VgpCheckMem);
646
647 /* VG_(message)(Vg_DebugMsg,"check is readable: %x .. %x",
648 base,base+size-1); */
njn5c004e42002-11-18 11:04:50 +0000649 ok = MC_(check_readable) ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000650 if (!ok) {
651 switch (part) {
652 case Vg_CoreSysCall:
njn72718642003-07-24 08:45:32 +0000653 MAC_(record_param_error) ( tid, bad_addr, /*isWrite =*/False, s );
njn25e49d8e72002-09-23 09:36:25 +0000654 break;
655
656 case Vg_CorePThread:
njn72718642003-07-24 08:45:32 +0000657 MAC_(record_core_mem_error)( tid, /*isWrite=*/False, s );
njn25e49d8e72002-09-23 09:36:25 +0000658 break;
659
660 /* If we're being asked to jump to a silly address, record an error
661 message before potentially crashing the entire system. */
662 case Vg_CoreTranslate:
njn72718642003-07-24 08:45:32 +0000663 MAC_(record_jump_error)( tid, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000664 break;
665
666 default:
njn5c004e42002-11-18 11:04:50 +0000667 VG_(skin_panic)("mc_check_is_readable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000668 }
669 }
670 VGP_POPCC(VgpCheckMem);
671}
672
673static
njn72718642003-07-24 08:45:32 +0000674void mc_check_is_readable_asciiz ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +0000675 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +0000676{
677 Bool ok = True;
678 Addr bad_addr;
679 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
680
681 VGP_PUSHCC(VgpCheckMem);
682
njne427a662002-10-02 11:08:25 +0000683 sk_assert(part == Vg_CoreSysCall);
njn5c004e42002-11-18 11:04:50 +0000684 ok = mc_check_readable_asciiz ( (Addr)str, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000685 if (!ok) {
njn72718642003-07-24 08:45:32 +0000686 MAC_(record_param_error) ( tid, bad_addr, /*is_writable =*/False, s );
njn25e49d8e72002-09-23 09:36:25 +0000687 }
688
689 VGP_POPCC(VgpCheckMem);
690}
691
692
693static
njn5c004e42002-11-18 11:04:50 +0000694void mc_new_mem_startup( Addr a, UInt len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +0000695{
njn1f3a9092002-10-04 09:22:30 +0000696 /* Ignore the permissions, just make it readable. Seems to work... */
njn5c004e42002-11-18 11:04:50 +0000697 DEBUG("mc_new_mem_startup(%p, %u, rr=%u, ww=%u, xx=%u)\n", a,len,rr,ww,xx);
698 MC_(make_readable)(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000699}
700
701static
njn5c004e42002-11-18 11:04:50 +0000702void mc_new_mem_heap ( Addr a, UInt len, Bool is_inited )
njn25e49d8e72002-09-23 09:36:25 +0000703{
704 if (is_inited) {
njn5c004e42002-11-18 11:04:50 +0000705 MC_(make_readable)(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000706 } else {
njn5c004e42002-11-18 11:04:50 +0000707 MC_(make_writable)(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000708 }
709}
710
711static
njn5c004e42002-11-18 11:04:50 +0000712void mc_set_perms (Addr a, UInt len, Bool rr, Bool ww, Bool xx)
njn25e49d8e72002-09-23 09:36:25 +0000713{
njn5c004e42002-11-18 11:04:50 +0000714 DEBUG("mc_set_perms(%p, %u, rr=%u ww=%u, xx=%u)\n", a, len, rr, ww, xx);
715 if (rr) MC_(make_readable)(a, len);
716 else if (ww) MC_(make_writable)(a, len);
717 else MC_(make_noaccess)(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000718}
719
720
721/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +0000722/*--- Register event handlers ---*/
723/*------------------------------------------------------------*/
724
725static void mc_post_regs_write_init ( void )
726{
727 UInt i;
728 for (i = R_EAX; i <= R_EDI; i++)
729 VG_(set_shadow_archreg)( i, VGM_WORD_VALID );
730 VG_(set_shadow_eflags)( VGM_EFLAGS_VALID );
731}
732
733static void mc_post_reg_write(ThreadId tid, UInt reg)
734{
735 VG_(set_thread_shadow_archreg)( tid, reg, VGM_WORD_VALID );
736}
737
738static void mc_post_reg_write_clientcall(ThreadId tid, UInt reg, Addr f )
739{
740 VG_(set_thread_shadow_archreg)( tid, reg, VGM_WORD_VALID );
741}
742
743
744/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000745/*--- Functions called directly from generated code. ---*/
746/*------------------------------------------------------------*/
747
748static __inline__ UInt rotateRight16 ( UInt x )
749{
750 /* Amazingly, gcc turns this into a single rotate insn. */
751 return (x >> 16) | (x << 16);
752}
753
754
755static __inline__ UInt shiftRight16 ( UInt x )
756{
757 return x >> 16;
758}
759
760
761/* Read/write 1/2/4 sized V bytes, and emit an address error if
762 needed. */
763
764/* VG_(helperc_{LD,ST}V{1,2,4}) handle the common case fast.
765 Under all other circumstances, it defers to the relevant _SLOWLY
766 function, which can handle all situations.
767*/
768__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000769UInt MC_(helperc_LOADV4) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000770{
771# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000772 return mc_rd_V4_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000773# else
774 UInt sec_no = rotateRight16(a) & 0x3FFFF;
775 SecMap* sm = primary_map[sec_no];
776 UInt a_off = (a & 0xFFFF) >> 3;
777 UChar abits = sm->abits[a_off];
778 abits >>= (a & 4);
779 abits &= 15;
780 PROF_EVENT(60);
781 if (abits == VGM_NIBBLE_VALID) {
782 /* Handle common case quickly: a is suitably aligned, is mapped,
783 and is addressible. */
784 UInt v_off = a & 0xFFFF;
785 return ((UInt*)(sm->vbyte))[ v_off >> 2 ];
786 } else {
787 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000788 return mc_rd_V4_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000789 }
790# endif
791}
792
793__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +0000794void MC_(helperc_STOREV4) ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000795{
796# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000797 mc_wr_V4_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000798# else
799 UInt sec_no = rotateRight16(a) & 0x3FFFF;
800 SecMap* sm = primary_map[sec_no];
801 UInt a_off = (a & 0xFFFF) >> 3;
802 UChar abits = sm->abits[a_off];
803 abits >>= (a & 4);
804 abits &= 15;
805 PROF_EVENT(61);
806 if (abits == VGM_NIBBLE_VALID) {
807 /* Handle common case quickly: a is suitably aligned, is mapped,
808 and is addressible. */
809 UInt v_off = a & 0xFFFF;
810 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = vbytes;
811 } else {
812 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000813 mc_wr_V4_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000814 }
815# endif
816}
817
818__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000819UInt MC_(helperc_LOADV2) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000820{
821# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000822 return mc_rd_V2_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000823# else
824 UInt sec_no = rotateRight16(a) & 0x1FFFF;
825 SecMap* sm = primary_map[sec_no];
826 UInt a_off = (a & 0xFFFF) >> 3;
827 PROF_EVENT(62);
828 if (sm->abits[a_off] == VGM_BYTE_VALID) {
829 /* Handle common case quickly. */
830 UInt v_off = a & 0xFFFF;
831 return 0xFFFF0000
832 |
833 (UInt)( ((UShort*)(sm->vbyte))[ v_off >> 1 ] );
834 } else {
835 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000836 return mc_rd_V2_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000837 }
838# endif
839}
840
841__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +0000842void MC_(helperc_STOREV2) ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000843{
844# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000845 mc_wr_V2_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000846# else
847 UInt sec_no = rotateRight16(a) & 0x1FFFF;
848 SecMap* sm = primary_map[sec_no];
849 UInt a_off = (a & 0xFFFF) >> 3;
850 PROF_EVENT(63);
851 if (sm->abits[a_off] == VGM_BYTE_VALID) {
852 /* Handle common case quickly. */
853 UInt v_off = a & 0xFFFF;
854 ((UShort*)(sm->vbyte))[ v_off >> 1 ] = vbytes & 0x0000FFFF;
855 } else {
856 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000857 mc_wr_V2_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000858 }
859# endif
860}
861
862__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000863UInt MC_(helperc_LOADV1) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000864{
865# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000866 return mc_rd_V1_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000867# else
868 UInt sec_no = shiftRight16(a);
869 SecMap* sm = primary_map[sec_no];
870 UInt a_off = (a & 0xFFFF) >> 3;
871 PROF_EVENT(64);
872 if (sm->abits[a_off] == VGM_BYTE_VALID) {
873 /* Handle common case quickly. */
874 UInt v_off = a & 0xFFFF;
875 return 0xFFFFFF00
876 |
877 (UInt)( ((UChar*)(sm->vbyte))[ v_off ] );
878 } else {
879 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000880 return mc_rd_V1_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000881 }
882# endif
883}
884
885__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +0000886void MC_(helperc_STOREV1) ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000887{
888# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000889 mc_wr_V1_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000890# else
891 UInt sec_no = shiftRight16(a);
892 SecMap* sm = primary_map[sec_no];
893 UInt a_off = (a & 0xFFFF) >> 3;
894 PROF_EVENT(65);
895 if (sm->abits[a_off] == VGM_BYTE_VALID) {
896 /* Handle common case quickly. */
897 UInt v_off = a & 0xFFFF;
898 ((UChar*)(sm->vbyte))[ v_off ] = vbytes & 0x000000FF;
899 } else {
900 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000901 mc_wr_V1_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000902 }
903# endif
904}
905
906
907/*------------------------------------------------------------*/
908/*--- Fallback functions to handle cases that the above ---*/
909/*--- VG_(helperc_{LD,ST}V{1,2,4}) can't manage. ---*/
910/*------------------------------------------------------------*/
911
njn5c004e42002-11-18 11:04:50 +0000912static UInt mc_rd_V4_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000913{
914 Bool a0ok, a1ok, a2ok, a3ok;
915 UInt vb0, vb1, vb2, vb3;
916
917 PROF_EVENT(70);
918
919 /* First establish independently the addressibility of the 4 bytes
920 involved. */
921 a0ok = get_abit(a+0) == VGM_BIT_VALID;
922 a1ok = get_abit(a+1) == VGM_BIT_VALID;
923 a2ok = get_abit(a+2) == VGM_BIT_VALID;
924 a3ok = get_abit(a+3) == VGM_BIT_VALID;
925
926 /* Also get the validity bytes for the address. */
927 vb0 = (UInt)get_vbyte(a+0);
928 vb1 = (UInt)get_vbyte(a+1);
929 vb2 = (UInt)get_vbyte(a+2);
930 vb3 = (UInt)get_vbyte(a+3);
931
932 /* Now distinguish 3 cases */
933
934 /* Case 1: the address is completely valid, so:
935 - no addressing error
936 - return V bytes as read from memory
937 */
938 if (a0ok && a1ok && a2ok && a3ok) {
939 UInt vw = VGM_WORD_INVALID;
940 vw <<= 8; vw |= vb3;
941 vw <<= 8; vw |= vb2;
942 vw <<= 8; vw |= vb1;
943 vw <<= 8; vw |= vb0;
944 return vw;
945 }
946
947 /* Case 2: the address is completely invalid.
948 - emit addressing error
949 - return V word indicating validity.
950 This sounds strange, but if we make loads from invalid addresses
951 give invalid data, we also risk producing a number of confusing
952 undefined-value errors later, which confuses the fact that the
953 error arose in the first place from an invalid address.
954 */
955 /* VG_(printf)("%p (%d %d %d %d)\n", a, a0ok, a1ok, a2ok, a3ok); */
njn43c799e2003-04-08 00:08:52 +0000956 if (!MAC_(clo_partial_loads_ok)
njn25e49d8e72002-09-23 09:36:25 +0000957 || ((a & 3) != 0)
958 || (!a0ok && !a1ok && !a2ok && !a3ok)) {
njn72718642003-07-24 08:45:32 +0000959 MAC_(record_address_error)( VG_(get_current_tid)(), a, 4, False );
njn25e49d8e72002-09-23 09:36:25 +0000960 return (VGM_BYTE_VALID << 24) | (VGM_BYTE_VALID << 16)
961 | (VGM_BYTE_VALID << 8) | VGM_BYTE_VALID;
962 }
963
964 /* Case 3: the address is partially valid.
965 - no addressing error
966 - returned V word is invalid where the address is invalid,
967 and contains V bytes from memory otherwise.
njn5c004e42002-11-18 11:04:50 +0000968 Case 3 is only allowed if MC_(clo_partial_loads_ok) is True
njn25e49d8e72002-09-23 09:36:25 +0000969 (which is the default), and the address is 4-aligned.
970 If not, Case 2 will have applied.
971 */
njn43c799e2003-04-08 00:08:52 +0000972 sk_assert(MAC_(clo_partial_loads_ok));
njn25e49d8e72002-09-23 09:36:25 +0000973 {
974 UInt vw = VGM_WORD_INVALID;
975 vw <<= 8; vw |= (a3ok ? vb3 : VGM_BYTE_INVALID);
976 vw <<= 8; vw |= (a2ok ? vb2 : VGM_BYTE_INVALID);
977 vw <<= 8; vw |= (a1ok ? vb1 : VGM_BYTE_INVALID);
978 vw <<= 8; vw |= (a0ok ? vb0 : VGM_BYTE_INVALID);
979 return vw;
980 }
981}
982
njn5c004e42002-11-18 11:04:50 +0000983static void mc_wr_V4_SLOWLY ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000984{
985 /* Check the address for validity. */
986 Bool aerr = False;
987 PROF_EVENT(71);
988
989 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
990 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
991 if (get_abit(a+2) != VGM_BIT_VALID) aerr = True;
992 if (get_abit(a+3) != VGM_BIT_VALID) aerr = True;
993
994 /* Store the V bytes, remembering to do it little-endian-ly. */
995 set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
996 set_vbyte( a+1, vbytes & 0x000000FF ); vbytes >>= 8;
997 set_vbyte( a+2, vbytes & 0x000000FF ); vbytes >>= 8;
998 set_vbyte( a+3, vbytes & 0x000000FF );
999
1000 /* If an address error has happened, report it. */
1001 if (aerr)
njn72718642003-07-24 08:45:32 +00001002 MAC_(record_address_error)( VG_(get_current_tid)(), a, 4, True );
njn25e49d8e72002-09-23 09:36:25 +00001003}
1004
njn5c004e42002-11-18 11:04:50 +00001005static UInt mc_rd_V2_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +00001006{
1007 /* Check the address for validity. */
1008 UInt vw = VGM_WORD_INVALID;
1009 Bool aerr = False;
1010 PROF_EVENT(72);
1011
1012 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1013 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1014
1015 /* Fetch the V bytes, remembering to do it little-endian-ly. */
1016 vw <<= 8; vw |= (UInt)get_vbyte(a+1);
1017 vw <<= 8; vw |= (UInt)get_vbyte(a+0);
1018
1019 /* If an address error has happened, report it. */
1020 if (aerr) {
njn72718642003-07-24 08:45:32 +00001021 MAC_(record_address_error)( VG_(get_current_tid)(), a, 2, False );
njn25e49d8e72002-09-23 09:36:25 +00001022 vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
1023 | (VGM_BYTE_VALID << 8) | (VGM_BYTE_VALID);
1024 }
1025 return vw;
1026}
1027
njn5c004e42002-11-18 11:04:50 +00001028static void mc_wr_V2_SLOWLY ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001029{
1030 /* Check the address for validity. */
1031 Bool aerr = False;
1032 PROF_EVENT(73);
1033
1034 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1035 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1036
1037 /* Store the V bytes, remembering to do it little-endian-ly. */
1038 set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
1039 set_vbyte( a+1, vbytes & 0x000000FF );
1040
1041 /* If an address error has happened, report it. */
1042 if (aerr)
njn72718642003-07-24 08:45:32 +00001043 MAC_(record_address_error)( VG_(get_current_tid)(), a, 2, True );
njn25e49d8e72002-09-23 09:36:25 +00001044}
1045
njn5c004e42002-11-18 11:04:50 +00001046static UInt mc_rd_V1_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +00001047{
1048 /* Check the address for validity. */
1049 UInt vw = VGM_WORD_INVALID;
1050 Bool aerr = False;
1051 PROF_EVENT(74);
1052
1053 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1054
1055 /* Fetch the V byte. */
1056 vw <<= 8; vw |= (UInt)get_vbyte(a+0);
1057
1058 /* If an address error has happened, report it. */
1059 if (aerr) {
njn72718642003-07-24 08:45:32 +00001060 MAC_(record_address_error)( VG_(get_current_tid)(), a, 1, False );
njn25e49d8e72002-09-23 09:36:25 +00001061 vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
1062 | (VGM_BYTE_INVALID << 8) | (VGM_BYTE_VALID);
1063 }
1064 return vw;
1065}
1066
njn5c004e42002-11-18 11:04:50 +00001067static void mc_wr_V1_SLOWLY ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001068{
1069 /* Check the address for validity. */
1070 Bool aerr = False;
1071 PROF_EVENT(75);
1072 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1073
1074 /* Store the V bytes, remembering to do it little-endian-ly. */
1075 set_vbyte( a+0, vbytes & 0x000000FF );
1076
1077 /* If an address error has happened, report it. */
1078 if (aerr)
njn72718642003-07-24 08:45:32 +00001079 MAC_(record_address_error)( VG_(get_current_tid)(), a, 1, True );
njn25e49d8e72002-09-23 09:36:25 +00001080}
1081
1082
1083/* ---------------------------------------------------------------------
1084 Called from generated code, or from the assembly helpers.
1085 Handlers for value check failures.
1086 ------------------------------------------------------------------ */
1087
njn5c004e42002-11-18 11:04:50 +00001088void MC_(helperc_value_check0_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001089{
njn72718642003-07-24 08:45:32 +00001090 MC_(record_value_error) ( VG_(get_current_tid)(), 0 );
njn25e49d8e72002-09-23 09:36:25 +00001091}
1092
njn5c004e42002-11-18 11:04:50 +00001093void MC_(helperc_value_check1_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001094{
njn72718642003-07-24 08:45:32 +00001095 MC_(record_value_error) ( VG_(get_current_tid)(), 1 );
njn25e49d8e72002-09-23 09:36:25 +00001096}
1097
njn5c004e42002-11-18 11:04:50 +00001098void MC_(helperc_value_check2_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001099{
njn72718642003-07-24 08:45:32 +00001100 MC_(record_value_error) ( VG_(get_current_tid)(), 2 );
njn25e49d8e72002-09-23 09:36:25 +00001101}
1102
njn5c004e42002-11-18 11:04:50 +00001103void MC_(helperc_value_check4_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001104{
njn72718642003-07-24 08:45:32 +00001105 MC_(record_value_error) ( VG_(get_current_tid)(), 4 );
njn25e49d8e72002-09-23 09:36:25 +00001106}
1107
1108
1109/* ---------------------------------------------------------------------
1110 FPU load and store checks, called from generated code.
1111 ------------------------------------------------------------------ */
1112
1113__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +00001114void MC_(fpu_read_check) ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001115{
1116 /* Ensure the read area is both addressible and valid (ie,
1117 readable). If there's an address error, don't report a value
1118 error too; but if there isn't an address error, check for a
1119 value error.
1120
1121 Try to be reasonably fast on the common case; wimp out and defer
njn5c004e42002-11-18 11:04:50 +00001122 to mc_fpu_read_check_SLOWLY for everything else. */
njn25e49d8e72002-09-23 09:36:25 +00001123
1124 SecMap* sm;
1125 UInt sm_off, v_off, a_off;
1126 Addr addr4;
1127
1128 PROF_EVENT(80);
1129
1130# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +00001131 mc_fpu_read_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001132# else
1133
1134 if (size == 4) {
1135 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
1136 PROF_EVENT(81);
1137 /* Properly aligned. */
1138 sm = primary_map[addr >> 16];
1139 sm_off = addr & 0xFFFF;
1140 a_off = sm_off >> 3;
1141 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
1142 /* Properly aligned and addressible. */
1143 v_off = addr & 0xFFFF;
1144 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1145 goto slow4;
1146 /* Properly aligned, addressible and with valid data. */
1147 return;
1148 slow4:
njn5c004e42002-11-18 11:04:50 +00001149 mc_fpu_read_check_SLOWLY ( addr, 4 );
njn25e49d8e72002-09-23 09:36:25 +00001150 return;
1151 }
1152
1153 if (size == 8) {
1154 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
1155 PROF_EVENT(82);
1156 /* Properly aligned. Do it in two halves. */
1157 addr4 = addr + 4;
1158 /* First half. */
1159 sm = primary_map[addr >> 16];
1160 sm_off = addr & 0xFFFF;
1161 a_off = sm_off >> 3;
1162 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1163 /* First half properly aligned and addressible. */
1164 v_off = addr & 0xFFFF;
1165 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1166 goto slow8;
1167 /* Second half. */
1168 sm = primary_map[addr4 >> 16];
1169 sm_off = addr4 & 0xFFFF;
1170 a_off = sm_off >> 3;
1171 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1172 /* Second half properly aligned and addressible. */
1173 v_off = addr4 & 0xFFFF;
1174 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1175 goto slow8;
1176 /* Both halves properly aligned, addressible and with valid
1177 data. */
1178 return;
1179 slow8:
njn5c004e42002-11-18 11:04:50 +00001180 mc_fpu_read_check_SLOWLY ( addr, 8 );
njn25e49d8e72002-09-23 09:36:25 +00001181 return;
1182 }
1183
1184 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
1185 cases go quickly. */
1186 if (size == 2) {
1187 PROF_EVENT(83);
njn5c004e42002-11-18 11:04:50 +00001188 mc_fpu_read_check_SLOWLY ( addr, 2 );
njn25e49d8e72002-09-23 09:36:25 +00001189 return;
1190 }
1191
sewardj93992e22003-05-26 09:17:41 +00001192 if (size == 16 /*SSE*/
jsewardfca60182004-01-04 23:30:55 +00001193 || size == 10 || size == 28 || size == 108 || size == 512) {
njn25e49d8e72002-09-23 09:36:25 +00001194 PROF_EVENT(84);
njn5c004e42002-11-18 11:04:50 +00001195 mc_fpu_read_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001196 return;
1197 }
1198
1199 VG_(printf)("size is %d\n", size);
njn5c004e42002-11-18 11:04:50 +00001200 VG_(skin_panic)("MC_(fpu_read_check): unhandled size");
njn25e49d8e72002-09-23 09:36:25 +00001201# endif
1202}
1203
1204
1205__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +00001206void MC_(fpu_write_check) ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001207{
1208 /* Ensure the written area is addressible, and moan if otherwise.
1209 If it is addressible, make it valid, otherwise invalid.
1210 */
1211
1212 SecMap* sm;
1213 UInt sm_off, v_off, a_off;
1214 Addr addr4;
1215
1216 PROF_EVENT(85);
1217
1218# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +00001219 mc_fpu_write_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001220# else
1221
1222 if (size == 4) {
1223 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
1224 PROF_EVENT(86);
1225 /* Properly aligned. */
1226 sm = primary_map[addr >> 16];
1227 sm_off = addr & 0xFFFF;
1228 a_off = sm_off >> 3;
1229 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
1230 /* Properly aligned and addressible. Make valid. */
1231 v_off = addr & 0xFFFF;
1232 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1233 return;
1234 slow4:
njn5c004e42002-11-18 11:04:50 +00001235 mc_fpu_write_check_SLOWLY ( addr, 4 );
njn25e49d8e72002-09-23 09:36:25 +00001236 return;
1237 }
1238
1239 if (size == 8) {
1240 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
1241 PROF_EVENT(87);
1242 /* Properly aligned. Do it in two halves. */
1243 addr4 = addr + 4;
1244 /* First half. */
1245 sm = primary_map[addr >> 16];
1246 sm_off = addr & 0xFFFF;
1247 a_off = sm_off >> 3;
1248 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1249 /* First half properly aligned and addressible. Make valid. */
1250 v_off = addr & 0xFFFF;
1251 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1252 /* Second half. */
1253 sm = primary_map[addr4 >> 16];
1254 sm_off = addr4 & 0xFFFF;
1255 a_off = sm_off >> 3;
1256 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1257 /* Second half properly aligned and addressible. */
1258 v_off = addr4 & 0xFFFF;
1259 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1260 /* Properly aligned, addressible and with valid data. */
1261 return;
1262 slow8:
njn5c004e42002-11-18 11:04:50 +00001263 mc_fpu_write_check_SLOWLY ( addr, 8 );
njn25e49d8e72002-09-23 09:36:25 +00001264 return;
1265 }
1266
1267 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
1268 cases go quickly. */
1269 if (size == 2) {
1270 PROF_EVENT(88);
njn5c004e42002-11-18 11:04:50 +00001271 mc_fpu_write_check_SLOWLY ( addr, 2 );
njn25e49d8e72002-09-23 09:36:25 +00001272 return;
1273 }
1274
sewardj93992e22003-05-26 09:17:41 +00001275 if (size == 16 /*SSE*/
jsewardfca60182004-01-04 23:30:55 +00001276 || size == 10 || size == 28 || size == 108 || size == 512) {
njn25e49d8e72002-09-23 09:36:25 +00001277 PROF_EVENT(89);
njn5c004e42002-11-18 11:04:50 +00001278 mc_fpu_write_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001279 return;
1280 }
1281
1282 VG_(printf)("size is %d\n", size);
njn5c004e42002-11-18 11:04:50 +00001283 VG_(skin_panic)("MC_(fpu_write_check): unhandled size");
njn25e49d8e72002-09-23 09:36:25 +00001284# endif
1285}
1286
1287
1288/* ---------------------------------------------------------------------
1289 Slow, general cases for FPU load and store checks.
1290 ------------------------------------------------------------------ */
1291
1292/* Generic version. Test for both addr and value errors, but if
1293 there's an addr error, don't report a value error even if it
1294 exists. */
1295
njn5c004e42002-11-18 11:04:50 +00001296void mc_fpu_read_check_SLOWLY ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001297{
1298 Int i;
1299 Bool aerr = False;
1300 Bool verr = False;
1301 PROF_EVENT(90);
1302 for (i = 0; i < size; i++) {
1303 PROF_EVENT(91);
1304 if (get_abit(addr+i) != VGM_BIT_VALID)
1305 aerr = True;
1306 if (get_vbyte(addr+i) != VGM_BYTE_VALID)
1307 verr = True;
1308 }
1309
1310 if (aerr) {
njn72718642003-07-24 08:45:32 +00001311 MAC_(record_address_error)( VG_(get_current_tid)(), addr, size, False );
njn25e49d8e72002-09-23 09:36:25 +00001312 } else {
1313 if (verr)
njn72718642003-07-24 08:45:32 +00001314 MC_(record_value_error)( VG_(get_current_tid)(), size );
njn25e49d8e72002-09-23 09:36:25 +00001315 }
1316}
1317
1318
1319/* Generic version. Test for addr errors. Valid addresses are
1320 given valid values, and invalid addresses invalid values. */
1321
njn5c004e42002-11-18 11:04:50 +00001322void mc_fpu_write_check_SLOWLY ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001323{
1324 Int i;
1325 Addr a_here;
1326 Bool a_ok;
1327 Bool aerr = False;
1328 PROF_EVENT(92);
1329 for (i = 0; i < size; i++) {
1330 PROF_EVENT(93);
1331 a_here = addr+i;
1332 a_ok = get_abit(a_here) == VGM_BIT_VALID;
1333 if (a_ok) {
1334 set_vbyte(a_here, VGM_BYTE_VALID);
1335 } else {
1336 set_vbyte(a_here, VGM_BYTE_INVALID);
1337 aerr = True;
1338 }
1339 }
1340 if (aerr) {
njn72718642003-07-24 08:45:32 +00001341 MAC_(record_address_error)( VG_(get_current_tid)(), addr, size, True );
njn25e49d8e72002-09-23 09:36:25 +00001342 }
1343}
1344
njn25e49d8e72002-09-23 09:36:25 +00001345
1346/*------------------------------------------------------------*/
sewardjee070842003-07-05 17:53:55 +00001347/*--- Metadata get/set functions, for client requests. ---*/
1348/*------------------------------------------------------------*/
1349
1350/* Copy Vbits for src into vbits. Returns: 1 == OK, 2 == alignment
1351 error, 3 == addressing error. */
1352Int MC_(get_or_set_vbits_for_client) (
njn72718642003-07-24 08:45:32 +00001353 ThreadId tid,
sewardjee070842003-07-05 17:53:55 +00001354 Addr dataV,
1355 Addr vbitsV,
1356 UInt size,
1357 Bool setting /* True <=> set vbits, False <=> get vbits */
1358)
1359{
1360 Bool addressibleD = True;
1361 Bool addressibleV = True;
1362 UInt* data = (UInt*)dataV;
1363 UInt* vbits = (UInt*)vbitsV;
1364 UInt szW = size / 4; /* sigh */
1365 UInt i;
sewardjaf48a602003-07-06 00:54:47 +00001366 UInt* dataP = NULL; /* bogus init to keep gcc happy */
1367 UInt* vbitsP = NULL; /* ditto */
sewardjee070842003-07-05 17:53:55 +00001368
1369 /* Check alignment of args. */
1370 if (!(IS_ALIGNED4_ADDR(data) && IS_ALIGNED4_ADDR(vbits)))
1371 return 2;
1372 if ((size & 3) != 0)
1373 return 2;
1374
1375 /* Check that arrays are addressible. */
1376 for (i = 0; i < szW; i++) {
sewardjaf48a602003-07-06 00:54:47 +00001377 dataP = &data[i];
1378 vbitsP = &vbits[i];
sewardjee070842003-07-05 17:53:55 +00001379 if (get_abits4_ALIGNED((Addr)dataP) != VGM_NIBBLE_VALID) {
1380 addressibleD = False;
1381 break;
1382 }
1383 if (get_abits4_ALIGNED((Addr)vbitsP) != VGM_NIBBLE_VALID) {
1384 addressibleV = False;
1385 break;
1386 }
1387 }
1388 if (!addressibleD) {
njn72718642003-07-24 08:45:32 +00001389 MAC_(record_address_error)( tid, (Addr)dataP, 4,
sewardjee070842003-07-05 17:53:55 +00001390 setting ? True : False );
1391 return 3;
1392 }
1393 if (!addressibleV) {
njn72718642003-07-24 08:45:32 +00001394 MAC_(record_address_error)( tid, (Addr)vbitsP, 4,
sewardjee070842003-07-05 17:53:55 +00001395 setting ? False : True );
1396 return 3;
1397 }
1398
1399 /* Do the copy */
1400 if (setting) {
1401 /* setting */
1402 for (i = 0; i < szW; i++) {
1403 if (get_vbytes4_ALIGNED( (Addr)&vbits[i] ) != VGM_WORD_VALID)
njn72718642003-07-24 08:45:32 +00001404 MC_(record_value_error)(tid, 4);
sewardjee070842003-07-05 17:53:55 +00001405 set_vbytes4_ALIGNED( (Addr)&data[i], vbits[i] );
1406 }
1407 } else {
1408 /* getting */
1409 for (i = 0; i < szW; i++) {
1410 vbits[i] = get_vbytes4_ALIGNED( (Addr)&data[i] );
1411 set_vbytes4_ALIGNED( (Addr)&vbits[i], VGM_WORD_VALID );
1412 }
1413 }
1414
1415 return 1;
1416}
1417
1418
1419/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001420/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1421/*------------------------------------------------------------*/
1422
sewardja4495682002-10-21 07:29:59 +00001423/* For the memory leak detector, say whether an entire 64k chunk of
1424 address space is possibly in use, or not. If in doubt return
1425 True.
njn25e49d8e72002-09-23 09:36:25 +00001426*/
sewardja4495682002-10-21 07:29:59 +00001427static
1428Bool mc_is_valid_64k_chunk ( UInt chunk_number )
njn25e49d8e72002-09-23 09:36:25 +00001429{
sewardja4495682002-10-21 07:29:59 +00001430 sk_assert(chunk_number >= 0 && chunk_number < 65536);
1431 if (IS_DISTINGUISHED_SM(primary_map[chunk_number])) {
1432 /* Definitely not in use. */
1433 return False;
1434 } else {
1435 return True;
njn25e49d8e72002-09-23 09:36:25 +00001436 }
1437}
1438
1439
sewardja4495682002-10-21 07:29:59 +00001440/* For the memory leak detector, say whether or not a given word
1441 address is to be regarded as valid. */
1442static
1443Bool mc_is_valid_address ( Addr a )
1444{
1445 UInt vbytes;
1446 UChar abits;
1447 sk_assert(IS_ALIGNED4_ADDR(a));
1448 abits = get_abits4_ALIGNED(a);
1449 vbytes = get_vbytes4_ALIGNED(a);
1450 if (abits == VGM_NIBBLE_VALID && vbytes == VGM_WORD_VALID) {
1451 return True;
1452 } else {
1453 return False;
1454 }
1455}
1456
1457
1458/* Leak detector for this skin. We don't actually do anything, merely
1459 run the generic leak detector with suitable parameters for this
1460 skin. */
njn5c004e42002-11-18 11:04:50 +00001461void MC_(detect_memory_leaks) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001462{
njn43c799e2003-04-08 00:08:52 +00001463 MAC_(do_detect_memory_leaks) ( mc_is_valid_64k_chunk, mc_is_valid_address );
njn25e49d8e72002-09-23 09:36:25 +00001464}
1465
1466
1467/* ---------------------------------------------------------------------
1468 Sanity check machinery (permanently engaged).
1469 ------------------------------------------------------------------ */
1470
njn25e49d8e72002-09-23 09:36:25 +00001471Bool SK_(cheap_sanity_check) ( void )
1472{
jseward9800fd32004-01-04 23:08:04 +00001473 /* nothing useful we can rapidly check */
1474 return True;
njn25e49d8e72002-09-23 09:36:25 +00001475}
1476
1477Bool SK_(expensive_sanity_check) ( void )
1478{
1479 Int i;
1480
1481 /* Make sure nobody changed the distinguished secondary. */
1482 for (i = 0; i < 8192; i++)
1483 if (distinguished_secondary_map.abits[i] != VGM_BYTE_INVALID)
1484 return False;
1485
1486 for (i = 0; i < 65536; i++)
1487 if (distinguished_secondary_map.vbyte[i] != VGM_BYTE_INVALID)
1488 return False;
1489
1490 /* Make sure that the upper 3/4 of the primary map hasn't
1491 been messed with. */
1492 for (i = 65536; i < 262144; i++)
1493 if (primary_map[i] != & distinguished_secondary_map)
1494 return False;
1495
1496 return True;
1497}
1498
1499/* ---------------------------------------------------------------------
1500 Debugging machinery (turn on to debug). Something of a mess.
1501 ------------------------------------------------------------------ */
1502
1503#if 0
1504/* Print the value tags on the 8 integer registers & flag reg. */
1505
1506static void uint_to_bits ( UInt x, Char* str )
1507{
1508 Int i;
1509 Int w = 0;
1510 /* str must point to a space of at least 36 bytes. */
1511 for (i = 31; i >= 0; i--) {
1512 str[w++] = (x & ( ((UInt)1) << i)) ? '1' : '0';
1513 if (i == 24 || i == 16 || i == 8)
1514 str[w++] = ' ';
1515 }
1516 str[w++] = 0;
njne427a662002-10-02 11:08:25 +00001517 sk_assert(w == 36);
njn25e49d8e72002-09-23 09:36:25 +00001518}
1519
1520/* Caution! Not vthread-safe; looks in VG_(baseBlock), not the thread
1521 state table. */
1522
1523static void vg_show_reg_tags ( void )
1524{
1525 Char buf1[36];
1526 Char buf2[36];
1527 UInt z_eax, z_ebx, z_ecx, z_edx,
1528 z_esi, z_edi, z_ebp, z_esp, z_eflags;
1529
1530 z_eax = VG_(baseBlock)[VGOFF_(sh_eax)];
1531 z_ebx = VG_(baseBlock)[VGOFF_(sh_ebx)];
1532 z_ecx = VG_(baseBlock)[VGOFF_(sh_ecx)];
1533 z_edx = VG_(baseBlock)[VGOFF_(sh_edx)];
1534 z_esi = VG_(baseBlock)[VGOFF_(sh_esi)];
1535 z_edi = VG_(baseBlock)[VGOFF_(sh_edi)];
1536 z_ebp = VG_(baseBlock)[VGOFF_(sh_ebp)];
1537 z_esp = VG_(baseBlock)[VGOFF_(sh_esp)];
1538 z_eflags = VG_(baseBlock)[VGOFF_(sh_eflags)];
1539
1540 uint_to_bits(z_eflags, buf1);
njn9b6d34e2002-10-15 08:48:08 +00001541 VG_(message)(Vg_DebugMsg, "efl %s\n", buf1);
njn25e49d8e72002-09-23 09:36:25 +00001542
1543 uint_to_bits(z_eax, buf1);
1544 uint_to_bits(z_ebx, buf2);
1545 VG_(message)(Vg_DebugMsg, "eax %s ebx %s\n", buf1, buf2);
1546
1547 uint_to_bits(z_ecx, buf1);
1548 uint_to_bits(z_edx, buf2);
1549 VG_(message)(Vg_DebugMsg, "ecx %s edx %s\n", buf1, buf2);
1550
1551 uint_to_bits(z_esi, buf1);
1552 uint_to_bits(z_edi, buf2);
1553 VG_(message)(Vg_DebugMsg, "esi %s edi %s\n", buf1, buf2);
1554
1555 uint_to_bits(z_ebp, buf1);
1556 uint_to_bits(z_esp, buf2);
1557 VG_(message)(Vg_DebugMsg, "ebp %s esp %s\n", buf1, buf2);
1558}
1559
1560
1561/* For debugging only. Scan the address space and touch all allegedly
1562 addressible words. Useful for establishing where Valgrind's idea of
1563 addressibility has diverged from what the kernel believes. */
1564
1565static
1566void zzzmemscan_notify_word ( Addr a, UInt w )
1567{
1568}
1569
1570void zzzmemscan ( void )
1571{
1572 Int n_notifies
1573 = VG_(scan_all_valid_memory)( zzzmemscan_notify_word );
1574 VG_(printf)("zzzmemscan: n_bytes = %d\n", 4 * n_notifies );
1575}
1576#endif
1577
1578
1579
1580
1581#if 0
1582static Int zzz = 0;
1583
1584void show_bb ( Addr eip_next )
1585{
1586 VG_(printf)("[%4d] ", zzz);
1587 vg_show_reg_tags( &VG_(m_shadow );
1588 VG_(translate) ( eip_next, NULL, NULL, NULL );
1589}
1590#endif /* 0 */
1591
njn25e49d8e72002-09-23 09:36:25 +00001592
1593/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00001594/*--- Command line args ---*/
njn25e49d8e72002-09-23 09:36:25 +00001595/*------------------------------------------------------------*/
1596
njn43c799e2003-04-08 00:08:52 +00001597Bool MC_(clo_avoid_strlen_errors) = True;
1598Bool MC_(clo_cleanup) = True;
1599
njn25e49d8e72002-09-23 09:36:25 +00001600Bool SK_(process_cmd_line_option)(Char* arg)
1601{
njn43c799e2003-04-08 00:08:52 +00001602 if (VG_CLO_STREQ(arg, "--avoid-strlen-errors=yes"))
njn5c004e42002-11-18 11:04:50 +00001603 MC_(clo_avoid_strlen_errors) = True;
njn43c799e2003-04-08 00:08:52 +00001604 else if (VG_CLO_STREQ(arg, "--avoid-strlen-errors=no"))
njn5c004e42002-11-18 11:04:50 +00001605 MC_(clo_avoid_strlen_errors) = False;
sewardj8ec2cfc2002-10-13 00:57:26 +00001606
njn43c799e2003-04-08 00:08:52 +00001607 else if (VG_CLO_STREQ(arg, "--cleanup=yes"))
1608 MC_(clo_cleanup) = True;
1609 else if (VG_CLO_STREQ(arg, "--cleanup=no"))
1610 MC_(clo_cleanup) = False;
1611
njn25e49d8e72002-09-23 09:36:25 +00001612 else
njn43c799e2003-04-08 00:08:52 +00001613 return MAC_(process_common_cmd_line_option)(arg);
njn25e49d8e72002-09-23 09:36:25 +00001614
1615 return True;
njn25e49d8e72002-09-23 09:36:25 +00001616}
1617
njn3e884182003-04-15 13:03:23 +00001618void SK_(print_usage)(void)
njn25e49d8e72002-09-23 09:36:25 +00001619{
njn3e884182003-04-15 13:03:23 +00001620 MAC_(print_common_usage)();
1621 VG_(printf)(
1622" --avoid-strlen-errors=no|yes suppress errs from inlined strlen [yes]\n"
1623 );
1624}
1625
1626void SK_(print_debug_usage)(void)
1627{
1628 MAC_(print_common_debug_usage)();
1629 VG_(printf)(
sewardj8ec2cfc2002-10-13 00:57:26 +00001630" --cleanup=no|yes improve after instrumentation? [yes]\n"
njn3e884182003-04-15 13:03:23 +00001631 );
njn25e49d8e72002-09-23 09:36:25 +00001632}
1633
1634
1635/*------------------------------------------------------------*/
1636/*--- Setup ---*/
1637/*------------------------------------------------------------*/
1638
njn810086f2002-11-14 12:42:47 +00001639void SK_(pre_clo_init)(void)
njn25e49d8e72002-09-23 09:36:25 +00001640{
njn810086f2002-11-14 12:42:47 +00001641 VG_(details_name) ("Memcheck");
1642 VG_(details_version) (NULL);
nethercote262eedf2003-11-13 17:57:18 +00001643 VG_(details_description) ("a memory error detector");
njn810086f2002-11-14 12:42:47 +00001644 VG_(details_copyright_author)(
nethercotebb1c9912004-01-04 16:43:23 +00001645 "Copyright (C) 2002-2004, and GNU GPL'd, by Julian Seward.");
nethercote421281e2003-11-20 16:20:55 +00001646 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj78210aa2002-12-01 02:55:46 +00001647 VG_(details_avg_translation_sizeB) ( 228 );
njn25e49d8e72002-09-23 09:36:25 +00001648
njn810086f2002-11-14 12:42:47 +00001649 VG_(needs_core_errors) ();
1650 VG_(needs_skin_errors) ();
1651 VG_(needs_libc_freeres) ();
njn810086f2002-11-14 12:42:47 +00001652 VG_(needs_shadow_regs) ();
1653 VG_(needs_command_line_options)();
1654 VG_(needs_client_requests) ();
1655 VG_(needs_extended_UCode) ();
1656 VG_(needs_syscall_wrapper) ();
njn810086f2002-11-14 12:42:47 +00001657 VG_(needs_sanity_checks) ();
fitzhardinge98abfc72003-12-16 02:05:15 +00001658 VG_(needs_shadow_memory) ();
njn25e49d8e72002-09-23 09:36:25 +00001659
njn3e884182003-04-15 13:03:23 +00001660 MAC_( new_mem_heap) = & mc_new_mem_heap;
1661 MAC_( ban_mem_heap) = & MC_(make_noaccess);
1662 MAC_(copy_mem_heap) = & mc_copy_address_range_state;
1663 MAC_( die_mem_heap) = & MC_(make_noaccess);
sewardjecf8e102003-07-12 12:11:39 +00001664 MAC_(check_noaccess) = & MC_(check_noaccess);
njn3e884182003-04-15 13:03:23 +00001665
fitzhardinge98abfc72003-12-16 02:05:15 +00001666 VG_(init_new_mem_startup) ( & mc_new_mem_startup );
1667 VG_(init_new_mem_stack_signal) ( & MC_(make_writable) );
1668 VG_(init_new_mem_brk) ( & MC_(make_writable) );
1669 VG_(init_new_mem_mmap) ( & mc_set_perms );
njn25e49d8e72002-09-23 09:36:25 +00001670
fitzhardinge98abfc72003-12-16 02:05:15 +00001671 VG_(init_copy_mem_remap) ( & mc_copy_address_range_state );
1672 VG_(init_change_mem_mprotect) ( & mc_set_perms );
njn3e884182003-04-15 13:03:23 +00001673
fitzhardinge98abfc72003-12-16 02:05:15 +00001674 VG_(init_die_mem_stack_signal) ( & MC_(make_noaccess) );
1675 VG_(init_die_mem_brk) ( & MC_(make_noaccess) );
1676 VG_(init_die_mem_munmap) ( & MC_(make_noaccess) );
njn3e884182003-04-15 13:03:23 +00001677
fitzhardinge98abfc72003-12-16 02:05:15 +00001678 VG_(init_new_mem_stack_4) ( & MAC_(new_mem_stack_4) );
1679 VG_(init_new_mem_stack_8) ( & MAC_(new_mem_stack_8) );
1680 VG_(init_new_mem_stack_12) ( & MAC_(new_mem_stack_12) );
1681 VG_(init_new_mem_stack_16) ( & MAC_(new_mem_stack_16) );
1682 VG_(init_new_mem_stack_32) ( & MAC_(new_mem_stack_32) );
1683 VG_(init_new_mem_stack) ( & MAC_(new_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00001684
fitzhardinge98abfc72003-12-16 02:05:15 +00001685 VG_(init_die_mem_stack_4) ( & MAC_(die_mem_stack_4) );
1686 VG_(init_die_mem_stack_8) ( & MAC_(die_mem_stack_8) );
1687 VG_(init_die_mem_stack_12) ( & MAC_(die_mem_stack_12) );
1688 VG_(init_die_mem_stack_16) ( & MAC_(die_mem_stack_16) );
1689 VG_(init_die_mem_stack_32) ( & MAC_(die_mem_stack_32) );
1690 VG_(init_die_mem_stack) ( & MAC_(die_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00001691
fitzhardinge98abfc72003-12-16 02:05:15 +00001692 VG_(init_ban_mem_stack) ( & MC_(make_noaccess) );
njn25e49d8e72002-09-23 09:36:25 +00001693
fitzhardinge98abfc72003-12-16 02:05:15 +00001694 VG_(init_pre_mem_read) ( & mc_check_is_readable );
1695 VG_(init_pre_mem_read_asciiz) ( & mc_check_is_readable_asciiz );
1696 VG_(init_pre_mem_write) ( & mc_check_is_writable );
1697 VG_(init_post_mem_write) ( & MC_(make_readable) );
njn25e49d8e72002-09-23 09:36:25 +00001698
fitzhardinge98abfc72003-12-16 02:05:15 +00001699 VG_(init_post_regs_write_init) ( & mc_post_regs_write_init );
1700 VG_(init_post_reg_write_syscall_return) ( & mc_post_reg_write );
1701 VG_(init_post_reg_write_deliver_signal) ( & mc_post_reg_write );
1702 VG_(init_post_reg_write_pthread_return) ( & mc_post_reg_write );
1703 VG_(init_post_reg_write_clientreq_return) ( & mc_post_reg_write );
1704 VG_(init_post_reg_write_clientcall_return) ( & mc_post_reg_write_clientcall );
njnd3040452003-05-19 15:04:06 +00001705
njn9b007f62003-04-07 14:40:25 +00001706 /* Three compact slots taken up by stack memory helpers */
njn5c004e42002-11-18 11:04:50 +00001707 VG_(register_compact_helper)((Addr) & MC_(helper_value_check4_fail));
1708 VG_(register_compact_helper)((Addr) & MC_(helper_value_check0_fail));
1709 VG_(register_compact_helper)((Addr) & MC_(helper_value_check2_fail));
1710 VG_(register_compact_helper)((Addr) & MC_(helperc_STOREV4));
njn5c004e42002-11-18 11:04:50 +00001711 VG_(register_compact_helper)((Addr) & MC_(helperc_LOADV4));
njn25e49d8e72002-09-23 09:36:25 +00001712
njnd04b7c62002-10-03 14:05:52 +00001713 /* These two made non-compact because 2-byte transactions are rare. */
njn5c004e42002-11-18 11:04:50 +00001714 VG_(register_noncompact_helper)((Addr) & MC_(helperc_STOREV2));
njn9b007f62003-04-07 14:40:25 +00001715 VG_(register_noncompact_helper)((Addr) & MC_(helperc_STOREV1));
njn5c004e42002-11-18 11:04:50 +00001716 VG_(register_noncompact_helper)((Addr) & MC_(helperc_LOADV2));
njn9b007f62003-04-07 14:40:25 +00001717 VG_(register_noncompact_helper)((Addr) & MC_(helperc_LOADV1));
njn5c004e42002-11-18 11:04:50 +00001718 VG_(register_noncompact_helper)((Addr) & MC_(fpu_write_check));
1719 VG_(register_noncompact_helper)((Addr) & MC_(fpu_read_check));
1720 VG_(register_noncompact_helper)((Addr) & MC_(helper_value_check1_fail));
njn25e49d8e72002-09-23 09:36:25 +00001721
1722 VGP_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
1723 VGP_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
njn9b007f62003-04-07 14:40:25 +00001724 VGP_(register_profile_event) ( VgpESPAdj, "adjust-ESP" );
njnd04b7c62002-10-03 14:05:52 +00001725
njn43c799e2003-04-08 00:08:52 +00001726 /* Additional block description for VG_(describe_addr)() */
1727 MAC_(describe_addr_supp) = MC_(client_perm_maybe_describe);
1728
njnd04b7c62002-10-03 14:05:52 +00001729 init_shadow_memory();
njn3e884182003-04-15 13:03:23 +00001730 MAC_(common_pre_clo_init)();
njn5c004e42002-11-18 11:04:50 +00001731}
1732
1733void SK_(post_clo_init) ( void )
1734{
1735}
1736
njn7d9f94d2003-04-22 21:41:40 +00001737void SK_(fini) ( Int exitcode )
njn5c004e42002-11-18 11:04:50 +00001738{
njn3e884182003-04-15 13:03:23 +00001739 MAC_(common_fini)( MC_(detect_memory_leaks) );
1740
njn5c004e42002-11-18 11:04:50 +00001741 if (0) {
1742 VG_(message)(Vg_DebugMsg,
1743 "------ Valgrind's client block stats follow ---------------" );
1744 MC_(show_client_block_stats)();
1745 }
njn25e49d8e72002-09-23 09:36:25 +00001746}
1747
fitzhardinge98abfc72003-12-16 02:05:15 +00001748VG_DETERMINE_INTERFACE_VERSION(SK_(pre_clo_init), 9./8)
1749
njn25e49d8e72002-09-23 09:36:25 +00001750/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00001751/*--- end mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00001752/*--------------------------------------------------------------------*/