blob: 1a9f4d65fccd2f720617a4da964d9719878c019d [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
njnc9539842002-10-02 13:26:35 +00009 This file is part of MemCheck, a heavyweight Valgrind skin for
10 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
njn0e1b5142003-04-15 14:58:06 +000012 Copyright (C) 2000-2003 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
njn25cac76cb2002-09-23 11:21:57 +000033#include "mc_include.h"
34#include "memcheck.h" /* for client requests */
njn25e49d8e72002-09-23 09:36:25 +000035//#include "vg_profile.c"
36
njn27f1a382002-11-08 15:48:16 +000037VG_DETERMINE_INTERFACE_VERSION
38
njn25e49d8e72002-09-23 09:36:25 +000039/* Define to debug the mem audit system. */
40/* #define VG_DEBUG_MEMORY */
41
njn25e49d8e72002-09-23 09:36:25 +000042#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
43
44/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000045/*--- Low-level support for memory checking. ---*/
46/*------------------------------------------------------------*/
47
48/* All reads and writes are checked against a memory map, which
49 records the state of all memory in the process. The memory map is
50 organised like this:
51
52 The top 16 bits of an address are used to index into a top-level
53 map table, containing 65536 entries. Each entry is a pointer to a
54 second-level map, which records the accesibililty and validity
55 permissions for the 65536 bytes indexed by the lower 16 bits of the
56 address. Each byte is represented by nine bits, one indicating
57 accessibility, the other eight validity. So each second-level map
58 contains 73728 bytes. This two-level arrangement conveniently
59 divides the 4G address space into 64k lumps, each size 64k bytes.
60
61 All entries in the primary (top-level) map must point to a valid
62 secondary (second-level) map. Since most of the 4G of address
63 space will not be in use -- ie, not mapped at all -- there is a
64 distinguished secondary map, which indicates `not addressible and
65 not valid' writeable for all bytes. Entries in the primary map for
66 which the entire 64k is not in use at all point at this
67 distinguished map.
68
69 [...] lots of stuff deleted due to out of date-ness
70
71 As a final optimisation, the alignment and address checks for
72 4-byte loads and stores are combined in a neat way. The primary
73 map is extended to have 262144 entries (2^18), rather than 2^16.
74 The top 3/4 of these entries are permanently set to the
75 distinguished secondary map. For a 4-byte load/store, the
76 top-level map is indexed not with (addr >> 16) but instead f(addr),
77 where
78
79 f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
80 = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
81 = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
82
83 ie the lowest two bits are placed above the 16 high address bits.
84 If either of these two bits are nonzero, the address is misaligned;
85 this will select a secondary map from the upper 3/4 of the primary
86 map. Because this is always the distinguished secondary map, a
87 (bogus) address check failure will result. The failure handling
88 code can then figure out whether this is a genuine addr check
89 failure or whether it is a possibly-legitimate access at a
90 misaligned address.
91*/
92
93
94/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000095/*--- Function declarations. ---*/
96/*------------------------------------------------------------*/
97
njn5c004e42002-11-18 11:04:50 +000098static UInt mc_rd_V4_SLOWLY ( Addr a );
99static UInt mc_rd_V2_SLOWLY ( Addr a );
100static UInt mc_rd_V1_SLOWLY ( Addr a );
101static void mc_wr_V4_SLOWLY ( Addr a, UInt vbytes );
102static void mc_wr_V2_SLOWLY ( Addr a, UInt vbytes );
103static void mc_wr_V1_SLOWLY ( Addr a, UInt vbytes );
104static void mc_fpu_read_check_SLOWLY ( Addr addr, Int size );
105static void mc_fpu_write_check_SLOWLY ( Addr addr, Int size );
njn25e49d8e72002-09-23 09:36:25 +0000106
107/*------------------------------------------------------------*/
108/*--- Data defns. ---*/
109/*------------------------------------------------------------*/
110
111typedef
112 struct {
113 UChar abits[8192];
114 UChar vbyte[65536];
115 }
116 SecMap;
117
118static SecMap* primary_map[ /*65536*/ 262144 ];
119static SecMap distinguished_secondary_map;
120
njn25e49d8e72002-09-23 09:36:25 +0000121
122static void init_shadow_memory ( void )
123{
124 Int i;
125
126 for (i = 0; i < 8192; i++) /* Invalid address */
127 distinguished_secondary_map.abits[i] = VGM_BYTE_INVALID;
128 for (i = 0; i < 65536; i++) /* Invalid Value */
129 distinguished_secondary_map.vbyte[i] = VGM_BYTE_INVALID;
130
131 /* These entries gradually get overwritten as the used address
132 space expands. */
133 for (i = 0; i < 65536; i++)
134 primary_map[i] = &distinguished_secondary_map;
135
136 /* These ones should never change; it's a bug in Valgrind if they do. */
137 for (i = 65536; i < 262144; i++)
138 primary_map[i] = &distinguished_secondary_map;
139}
140
njn25e49d8e72002-09-23 09:36:25 +0000141/*------------------------------------------------------------*/
142/*--- Basic bitmap management, reading and writing. ---*/
143/*------------------------------------------------------------*/
144
145/* Allocate and initialise a secondary map. */
146
147static SecMap* alloc_secondary_map ( __attribute__ ((unused))
148 Char* caller )
149{
150 SecMap* map;
151 UInt i;
152 PROF_EVENT(10);
153
154 /* Mark all bytes as invalid access and invalid value. */
155
156 /* It just happens that a SecMap occupies exactly 18 pages --
157 although this isn't important, so the following assert is
158 spurious. */
njne427a662002-10-02 11:08:25 +0000159 sk_assert(0 == (sizeof(SecMap) % VKI_BYTES_PER_PAGE));
njn25e49d8e72002-09-23 09:36:25 +0000160 map = VG_(get_memory_from_mmap)( sizeof(SecMap), caller );
161
162 for (i = 0; i < 8192; i++)
163 map->abits[i] = VGM_BYTE_INVALID; /* Invalid address */
164 for (i = 0; i < 65536; i++)
165 map->vbyte[i] = VGM_BYTE_INVALID; /* Invalid Value */
166
167 /* VG_(printf)("ALLOC_2MAP(%s)\n", caller ); */
168 return map;
169}
170
171
172/* Basic reading/writing of the bitmaps, for byte-sized accesses. */
173
174static __inline__ UChar get_abit ( Addr a )
175{
176 SecMap* sm = primary_map[a >> 16];
177 UInt sm_off = a & 0xFFFF;
178 PROF_EVENT(20);
179# if 0
180 if (IS_DISTINGUISHED_SM(sm))
181 VG_(message)(Vg_DebugMsg,
182 "accessed distinguished 2ndary (A)map! 0x%x\n", a);
183# endif
184 return BITARR_TEST(sm->abits, sm_off)
185 ? VGM_BIT_INVALID : VGM_BIT_VALID;
186}
187
188static __inline__ UChar get_vbyte ( Addr a )
189{
190 SecMap* sm = primary_map[a >> 16];
191 UInt sm_off = a & 0xFFFF;
192 PROF_EVENT(21);
193# if 0
194 if (IS_DISTINGUISHED_SM(sm))
195 VG_(message)(Vg_DebugMsg,
196 "accessed distinguished 2ndary (V)map! 0x%x\n", a);
197# endif
198 return sm->vbyte[sm_off];
199}
200
201static __inline__ void set_abit ( Addr a, UChar abit )
202{
203 SecMap* sm;
204 UInt sm_off;
205 PROF_EVENT(22);
206 ENSURE_MAPPABLE(a, "set_abit");
207 sm = primary_map[a >> 16];
208 sm_off = a & 0xFFFF;
209 if (abit)
210 BITARR_SET(sm->abits, sm_off);
211 else
212 BITARR_CLEAR(sm->abits, sm_off);
213}
214
215static __inline__ void set_vbyte ( Addr a, UChar vbyte )
216{
217 SecMap* sm;
218 UInt sm_off;
219 PROF_EVENT(23);
220 ENSURE_MAPPABLE(a, "set_vbyte");
221 sm = primary_map[a >> 16];
222 sm_off = a & 0xFFFF;
223 sm->vbyte[sm_off] = vbyte;
224}
225
226
227/* Reading/writing of the bitmaps, for aligned word-sized accesses. */
228
229static __inline__ UChar get_abits4_ALIGNED ( Addr a )
230{
231 SecMap* sm;
232 UInt sm_off;
233 UChar abits8;
234 PROF_EVENT(24);
235# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000236 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000237# endif
238 sm = primary_map[a >> 16];
239 sm_off = a & 0xFFFF;
240 abits8 = sm->abits[sm_off >> 3];
241 abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
242 abits8 &= 0x0F;
243 return abits8;
244}
245
246static UInt __inline__ get_vbytes4_ALIGNED ( Addr a )
247{
248 SecMap* sm = primary_map[a >> 16];
249 UInt sm_off = a & 0xFFFF;
250 PROF_EVENT(25);
251# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000252 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000253# endif
254 return ((UInt*)(sm->vbyte))[sm_off >> 2];
255}
256
257
258/*------------------------------------------------------------*/
259/*--- Setting permissions over address ranges. ---*/
260/*------------------------------------------------------------*/
261
262static void set_address_range_perms ( Addr a, UInt len,
263 UInt example_a_bit,
264 UInt example_v_bit )
265{
266 UChar vbyte, abyte8;
267 UInt vword4, sm_off;
268 SecMap* sm;
269
270 PROF_EVENT(30);
271
272 if (len == 0)
273 return;
274
275 if (len > 100 * 1000 * 1000) {
276 VG_(message)(Vg_UserMsg,
277 "Warning: set address range perms: "
278 "large range %u, a %d, v %d",
279 len, example_a_bit, example_v_bit );
280 }
281
282 VGP_PUSHCC(VgpSetMem);
283
284 /* Requests to change permissions of huge address ranges may
285 indicate bugs in our machinery. 30,000,000 is arbitrary, but so
286 far all legitimate requests have fallen beneath that size. */
287 /* 4 Mar 02: this is just stupid; get rid of it. */
njne427a662002-10-02 11:08:25 +0000288 /* sk_assert(len < 30000000); */
njn25e49d8e72002-09-23 09:36:25 +0000289
290 /* Check the permissions make sense. */
njne427a662002-10-02 11:08:25 +0000291 sk_assert(example_a_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000292 || example_a_bit == VGM_BIT_INVALID);
njne427a662002-10-02 11:08:25 +0000293 sk_assert(example_v_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000294 || example_v_bit == VGM_BIT_INVALID);
295 if (example_a_bit == VGM_BIT_INVALID)
njne427a662002-10-02 11:08:25 +0000296 sk_assert(example_v_bit == VGM_BIT_INVALID);
njn25e49d8e72002-09-23 09:36:25 +0000297
298 /* The validity bits to write. */
299 vbyte = example_v_bit==VGM_BIT_VALID
300 ? VGM_BYTE_VALID : VGM_BYTE_INVALID;
301
302 /* In order that we can charge through the address space at 8
303 bytes/main-loop iteration, make up some perms. */
304 abyte8 = (example_a_bit << 7)
305 | (example_a_bit << 6)
306 | (example_a_bit << 5)
307 | (example_a_bit << 4)
308 | (example_a_bit << 3)
309 | (example_a_bit << 2)
310 | (example_a_bit << 1)
311 | (example_a_bit << 0);
312 vword4 = (vbyte << 24) | (vbyte << 16) | (vbyte << 8) | vbyte;
313
314# ifdef VG_DEBUG_MEMORY
315 /* Do it ... */
316 while (True) {
317 PROF_EVENT(31);
318 if (len == 0) break;
319 set_abit ( a, example_a_bit );
320 set_vbyte ( a, vbyte );
321 a++;
322 len--;
323 }
324
325# else
326 /* Slowly do parts preceding 8-byte alignment. */
327 while (True) {
328 PROF_EVENT(31);
329 if (len == 0) break;
330 if ((a % 8) == 0) break;
331 set_abit ( a, example_a_bit );
332 set_vbyte ( a, vbyte );
333 a++;
334 len--;
335 }
336
337 if (len == 0) {
338 VGP_POPCC(VgpSetMem);
339 return;
340 }
njne427a662002-10-02 11:08:25 +0000341 sk_assert((a % 8) == 0 && len > 0);
njn25e49d8e72002-09-23 09:36:25 +0000342
343 /* Once aligned, go fast. */
344 while (True) {
345 PROF_EVENT(32);
346 if (len < 8) break;
347 ENSURE_MAPPABLE(a, "set_address_range_perms(fast)");
348 sm = primary_map[a >> 16];
349 sm_off = a & 0xFFFF;
350 sm->abits[sm_off >> 3] = abyte8;
351 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = vword4;
352 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = vword4;
353 a += 8;
354 len -= 8;
355 }
356
357 if (len == 0) {
358 VGP_POPCC(VgpSetMem);
359 return;
360 }
njne427a662002-10-02 11:08:25 +0000361 sk_assert((a % 8) == 0 && len > 0 && len < 8);
njn25e49d8e72002-09-23 09:36:25 +0000362
363 /* Finish the upper fragment. */
364 while (True) {
365 PROF_EVENT(33);
366 if (len == 0) break;
367 set_abit ( a, example_a_bit );
368 set_vbyte ( a, vbyte );
369 a++;
370 len--;
371 }
372# endif
373
374 /* Check that zero page and highest page have not been written to
375 -- this could happen with buggy syscall wrappers. Today
376 (2001-04-26) had precisely such a problem with __NR_setitimer. */
njne427a662002-10-02 11:08:25 +0000377 sk_assert(SK_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +0000378 VGP_POPCC(VgpSetMem);
379}
380
381/* Set permissions for address ranges ... */
382
njn5c004e42002-11-18 11:04:50 +0000383void MC_(make_noaccess) ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000384{
385 PROF_EVENT(35);
njn5c004e42002-11-18 11:04:50 +0000386 DEBUG("MC_(make_noaccess)(%p, %x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000387 set_address_range_perms ( a, len, VGM_BIT_INVALID, VGM_BIT_INVALID );
388}
389
njn5c004e42002-11-18 11:04:50 +0000390void MC_(make_writable) ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000391{
392 PROF_EVENT(36);
njn5c004e42002-11-18 11:04:50 +0000393 DEBUG("MC_(make_writable)(%p, %x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000394 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_INVALID );
395}
396
njn5c004e42002-11-18 11:04:50 +0000397void MC_(make_readable) ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000398{
399 PROF_EVENT(37);
njn5c004e42002-11-18 11:04:50 +0000400 DEBUG("MC_(make_readable)(%p, 0x%x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000401 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_VALID );
402}
403
njn9b007f62003-04-07 14:40:25 +0000404static __inline__
405void make_aligned_word_writable(Addr a)
406{
407 SecMap* sm;
408 UInt sm_off;
409 UChar mask;
njn25e49d8e72002-09-23 09:36:25 +0000410
njn9b007f62003-04-07 14:40:25 +0000411 VGP_PUSHCC(VgpESPAdj);
412 ENSURE_MAPPABLE(a, "make_aligned_word_writable");
413 sm = primary_map[a >> 16];
414 sm_off = a & 0xFFFF;
415 ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
416 mask = 0x0F;
417 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
418 /* mask now contains 1s where we wish to make address bits invalid (0s). */
419 sm->abits[sm_off >> 3] &= ~mask;
420 VGP_POPCC(VgpESPAdj);
421}
422
423static __inline__
424void make_aligned_word_noaccess(Addr a)
425{
426 SecMap* sm;
427 UInt sm_off;
428 UChar mask;
429
430 VGP_PUSHCC(VgpESPAdj);
431 ENSURE_MAPPABLE(a, "make_aligned_word_noaccess");
432 sm = primary_map[a >> 16];
433 sm_off = a & 0xFFFF;
434 ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
435 mask = 0x0F;
436 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
437 /* mask now contains 1s where we wish to make address bits invalid (1s). */
438 sm->abits[sm_off >> 3] |= mask;
439 VGP_POPCC(VgpESPAdj);
440}
441
442/* Nb: by "aligned" here we mean 8-byte aligned */
443static __inline__
444void make_aligned_doubleword_writable(Addr a)
445{
446 SecMap* sm;
447 UInt sm_off;
448
449 VGP_PUSHCC(VgpESPAdj);
450 ENSURE_MAPPABLE(a, "make_aligned_doubleword_writable");
451 sm = primary_map[a >> 16];
452 sm_off = a & 0xFFFF;
453 sm->abits[sm_off >> 3] = VGM_BYTE_VALID;
454 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = VGM_WORD_INVALID;
455 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = VGM_WORD_INVALID;
456 VGP_POPCC(VgpESPAdj);
457}
458
459static __inline__
460void make_aligned_doubleword_noaccess(Addr a)
461{
462 SecMap* sm;
463 UInt sm_off;
464
465 VGP_PUSHCC(VgpESPAdj);
466 ENSURE_MAPPABLE(a, "make_aligned_doubleword_noaccess");
467 sm = primary_map[a >> 16];
468 sm_off = a & 0xFFFF;
469 sm->abits[sm_off >> 3] = VGM_BYTE_INVALID;
470 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = VGM_WORD_INVALID;
471 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = VGM_WORD_INVALID;
472 VGP_POPCC(VgpESPAdj);
473}
474
475/* The %esp update handling functions */
476ESP_UPDATE_HANDLERS ( make_aligned_word_writable,
477 make_aligned_word_noaccess,
478 make_aligned_doubleword_writable,
479 make_aligned_doubleword_noaccess,
480 MC_(make_writable),
481 MC_(make_noaccess)
482 );
483
484/* Block-copy permissions (needed for implementing realloc()). */
njn5c004e42002-11-18 11:04:50 +0000485static void mc_copy_address_range_state ( Addr src, Addr dst, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000486{
487 UInt i;
488
njn5c004e42002-11-18 11:04:50 +0000489 DEBUG("mc_copy_address_range_state\n");
njn25e49d8e72002-09-23 09:36:25 +0000490
491 PROF_EVENT(40);
492 for (i = 0; i < len; i++) {
493 UChar abit = get_abit ( src+i );
494 UChar vbyte = get_vbyte ( src+i );
495 PROF_EVENT(41);
496 set_abit ( dst+i, abit );
497 set_vbyte ( dst+i, vbyte );
498 }
499}
500
501
502/* Check permissions for address range. If inadequate permissions
503 exist, *bad_addr is set to the offending address, so the caller can
504 know what it is. */
505
njn5c004e42002-11-18 11:04:50 +0000506Bool MC_(check_writable) ( Addr a, UInt len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000507{
508 UInt i;
509 UChar abit;
510 PROF_EVENT(42);
511 for (i = 0; i < len; i++) {
512 PROF_EVENT(43);
513 abit = get_abit(a);
514 if (abit == VGM_BIT_INVALID) {
515 if (bad_addr != NULL) *bad_addr = a;
516 return False;
517 }
518 a++;
519 }
520 return True;
521}
522
njn5c004e42002-11-18 11:04:50 +0000523Bool MC_(check_readable) ( Addr a, UInt len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000524{
525 UInt i;
526 UChar abit;
527 UChar vbyte;
528
529 PROF_EVENT(44);
njn5c004e42002-11-18 11:04:50 +0000530 DEBUG("MC_(check_readable)\n");
njn25e49d8e72002-09-23 09:36:25 +0000531 for (i = 0; i < len; i++) {
532 abit = get_abit(a);
533 vbyte = get_vbyte(a);
534 PROF_EVENT(45);
535 if (abit != VGM_BIT_VALID || vbyte != VGM_BYTE_VALID) {
536 if (bad_addr != NULL) *bad_addr = a;
537 return False;
538 }
539 a++;
540 }
541 return True;
542}
543
544
545/* Check a zero-terminated ascii string. Tricky -- don't want to
546 examine the actual bytes, to find the end, until we're sure it is
547 safe to do so. */
548
njn9b007f62003-04-07 14:40:25 +0000549static Bool mc_check_readable_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000550{
551 UChar abit;
552 UChar vbyte;
553 PROF_EVENT(46);
njn5c004e42002-11-18 11:04:50 +0000554 DEBUG("mc_check_readable_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +0000555 while (True) {
556 PROF_EVENT(47);
557 abit = get_abit(a);
558 vbyte = get_vbyte(a);
559 if (abit != VGM_BIT_VALID || vbyte != VGM_BYTE_VALID) {
560 if (bad_addr != NULL) *bad_addr = a;
561 return False;
562 }
563 /* Ok, a is safe to read. */
564 if (* ((UChar*)a) == 0) return True;
565 a++;
566 }
567}
568
569
570/*------------------------------------------------------------*/
571/*--- Memory event handlers ---*/
572/*------------------------------------------------------------*/
573
njn25e49d8e72002-09-23 09:36:25 +0000574static
njn5c004e42002-11-18 11:04:50 +0000575void mc_check_is_writable ( CorePart part, ThreadState* tst,
576 Char* s, Addr base, UInt size )
njn25e49d8e72002-09-23 09:36:25 +0000577{
578 Bool ok;
579 Addr bad_addr;
580
581 VGP_PUSHCC(VgpCheckMem);
582
583 /* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
584 base,base+size-1); */
njn5c004e42002-11-18 11:04:50 +0000585 ok = MC_(check_writable) ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000586 if (!ok) {
587 switch (part) {
588 case Vg_CoreSysCall:
njn43c799e2003-04-08 00:08:52 +0000589 MAC_(record_param_error) ( tst, bad_addr, /*isWrite =*/True, s );
njn25e49d8e72002-09-23 09:36:25 +0000590 break;
591
592 case Vg_CorePThread:
593 case Vg_CoreSignal:
njn43c799e2003-04-08 00:08:52 +0000594 MAC_(record_core_mem_error)( tst, /*isWrite=*/True, s );
njn25e49d8e72002-09-23 09:36:25 +0000595 break;
596
597 default:
njn5c004e42002-11-18 11:04:50 +0000598 VG_(skin_panic)("mc_check_is_writable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000599 }
600 }
601
602 VGP_POPCC(VgpCheckMem);
603}
604
605static
njn5c004e42002-11-18 11:04:50 +0000606void mc_check_is_readable ( CorePart part, ThreadState* tst,
607 Char* s, Addr base, UInt size )
njn25e49d8e72002-09-23 09:36:25 +0000608{
609 Bool ok;
610 Addr bad_addr;
611
612 VGP_PUSHCC(VgpCheckMem);
613
614 /* VG_(message)(Vg_DebugMsg,"check is readable: %x .. %x",
615 base,base+size-1); */
njn5c004e42002-11-18 11:04:50 +0000616 ok = MC_(check_readable) ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000617 if (!ok) {
618 switch (part) {
619 case Vg_CoreSysCall:
njn43c799e2003-04-08 00:08:52 +0000620 MAC_(record_param_error) ( tst, bad_addr, /*isWrite =*/False, s );
njn25e49d8e72002-09-23 09:36:25 +0000621 break;
622
623 case Vg_CorePThread:
njn43c799e2003-04-08 00:08:52 +0000624 MAC_(record_core_mem_error)( tst, /*isWrite=*/False, s );
njn25e49d8e72002-09-23 09:36:25 +0000625 break;
626
627 /* If we're being asked to jump to a silly address, record an error
628 message before potentially crashing the entire system. */
629 case Vg_CoreTranslate:
njn43c799e2003-04-08 00:08:52 +0000630 MAC_(record_jump_error)( tst, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000631 break;
632
633 default:
njn5c004e42002-11-18 11:04:50 +0000634 VG_(skin_panic)("mc_check_is_readable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000635 }
636 }
637 VGP_POPCC(VgpCheckMem);
638}
639
640static
njn5c004e42002-11-18 11:04:50 +0000641void mc_check_is_readable_asciiz ( CorePart part, ThreadState* tst,
642 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +0000643{
644 Bool ok = True;
645 Addr bad_addr;
646 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
647
648 VGP_PUSHCC(VgpCheckMem);
649
njne427a662002-10-02 11:08:25 +0000650 sk_assert(part == Vg_CoreSysCall);
njn5c004e42002-11-18 11:04:50 +0000651 ok = mc_check_readable_asciiz ( (Addr)str, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000652 if (!ok) {
njn43c799e2003-04-08 00:08:52 +0000653 MAC_(record_param_error) ( tst, bad_addr, /*is_writable =*/False, s );
njn25e49d8e72002-09-23 09:36:25 +0000654 }
655
656 VGP_POPCC(VgpCheckMem);
657}
658
659
660static
njn5c004e42002-11-18 11:04:50 +0000661void mc_new_mem_startup( Addr a, UInt len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +0000662{
njn1f3a9092002-10-04 09:22:30 +0000663 /* Ignore the permissions, just make it readable. Seems to work... */
njn5c004e42002-11-18 11:04:50 +0000664 DEBUG("mc_new_mem_startup(%p, %u, rr=%u, ww=%u, xx=%u)\n", a,len,rr,ww,xx);
665 MC_(make_readable)(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000666}
667
668static
njn5c004e42002-11-18 11:04:50 +0000669void mc_new_mem_heap ( Addr a, UInt len, Bool is_inited )
njn25e49d8e72002-09-23 09:36:25 +0000670{
671 if (is_inited) {
njn5c004e42002-11-18 11:04:50 +0000672 MC_(make_readable)(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000673 } else {
njn5c004e42002-11-18 11:04:50 +0000674 MC_(make_writable)(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000675 }
676}
677
678static
njn5c004e42002-11-18 11:04:50 +0000679void mc_set_perms (Addr a, UInt len, Bool rr, Bool ww, Bool xx)
njn25e49d8e72002-09-23 09:36:25 +0000680{
njn5c004e42002-11-18 11:04:50 +0000681 DEBUG("mc_set_perms(%p, %u, rr=%u ww=%u, xx=%u)\n", a, len, rr, ww, xx);
682 if (rr) MC_(make_readable)(a, len);
683 else if (ww) MC_(make_writable)(a, len);
684 else MC_(make_noaccess)(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000685}
686
687
688/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +0000689/*--- Register event handlers ---*/
690/*------------------------------------------------------------*/
691
692static void mc_post_regs_write_init ( void )
693{
694 UInt i;
695 for (i = R_EAX; i <= R_EDI; i++)
696 VG_(set_shadow_archreg)( i, VGM_WORD_VALID );
697 VG_(set_shadow_eflags)( VGM_EFLAGS_VALID );
698}
699
700static void mc_post_reg_write(ThreadId tid, UInt reg)
701{
702 VG_(set_thread_shadow_archreg)( tid, reg, VGM_WORD_VALID );
703}
704
705static void mc_post_reg_write_clientcall(ThreadId tid, UInt reg, Addr f )
706{
707 VG_(set_thread_shadow_archreg)( tid, reg, VGM_WORD_VALID );
708}
709
710
711/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000712/*--- Functions called directly from generated code. ---*/
713/*------------------------------------------------------------*/
714
715static __inline__ UInt rotateRight16 ( UInt x )
716{
717 /* Amazingly, gcc turns this into a single rotate insn. */
718 return (x >> 16) | (x << 16);
719}
720
721
722static __inline__ UInt shiftRight16 ( UInt x )
723{
724 return x >> 16;
725}
726
727
728/* Read/write 1/2/4 sized V bytes, and emit an address error if
729 needed. */
730
731/* VG_(helperc_{LD,ST}V{1,2,4}) handle the common case fast.
732 Under all other circumstances, it defers to the relevant _SLOWLY
733 function, which can handle all situations.
734*/
735__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000736UInt MC_(helperc_LOADV4) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000737{
738# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000739 return mc_rd_V4_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000740# else
741 UInt sec_no = rotateRight16(a) & 0x3FFFF;
742 SecMap* sm = primary_map[sec_no];
743 UInt a_off = (a & 0xFFFF) >> 3;
744 UChar abits = sm->abits[a_off];
745 abits >>= (a & 4);
746 abits &= 15;
747 PROF_EVENT(60);
748 if (abits == VGM_NIBBLE_VALID) {
749 /* Handle common case quickly: a is suitably aligned, is mapped,
750 and is addressible. */
751 UInt v_off = a & 0xFFFF;
752 return ((UInt*)(sm->vbyte))[ v_off >> 2 ];
753 } else {
754 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000755 return mc_rd_V4_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000756 }
757# endif
758}
759
760__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +0000761void MC_(helperc_STOREV4) ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000762{
763# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000764 mc_wr_V4_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000765# else
766 UInt sec_no = rotateRight16(a) & 0x3FFFF;
767 SecMap* sm = primary_map[sec_no];
768 UInt a_off = (a & 0xFFFF) >> 3;
769 UChar abits = sm->abits[a_off];
770 abits >>= (a & 4);
771 abits &= 15;
772 PROF_EVENT(61);
773 if (abits == VGM_NIBBLE_VALID) {
774 /* Handle common case quickly: a is suitably aligned, is mapped,
775 and is addressible. */
776 UInt v_off = a & 0xFFFF;
777 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = vbytes;
778 } else {
779 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000780 mc_wr_V4_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000781 }
782# endif
783}
784
785__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000786UInt MC_(helperc_LOADV2) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000787{
788# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000789 return mc_rd_V2_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000790# else
791 UInt sec_no = rotateRight16(a) & 0x1FFFF;
792 SecMap* sm = primary_map[sec_no];
793 UInt a_off = (a & 0xFFFF) >> 3;
794 PROF_EVENT(62);
795 if (sm->abits[a_off] == VGM_BYTE_VALID) {
796 /* Handle common case quickly. */
797 UInt v_off = a & 0xFFFF;
798 return 0xFFFF0000
799 |
800 (UInt)( ((UShort*)(sm->vbyte))[ v_off >> 1 ] );
801 } else {
802 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000803 return mc_rd_V2_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000804 }
805# endif
806}
807
808__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +0000809void MC_(helperc_STOREV2) ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000810{
811# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000812 mc_wr_V2_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000813# else
814 UInt sec_no = rotateRight16(a) & 0x1FFFF;
815 SecMap* sm = primary_map[sec_no];
816 UInt a_off = (a & 0xFFFF) >> 3;
817 PROF_EVENT(63);
818 if (sm->abits[a_off] == VGM_BYTE_VALID) {
819 /* Handle common case quickly. */
820 UInt v_off = a & 0xFFFF;
821 ((UShort*)(sm->vbyte))[ v_off >> 1 ] = vbytes & 0x0000FFFF;
822 } else {
823 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000824 mc_wr_V2_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000825 }
826# endif
827}
828
829__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000830UInt MC_(helperc_LOADV1) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000831{
832# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000833 return mc_rd_V1_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000834# else
835 UInt sec_no = shiftRight16(a);
836 SecMap* sm = primary_map[sec_no];
837 UInt a_off = (a & 0xFFFF) >> 3;
838 PROF_EVENT(64);
839 if (sm->abits[a_off] == VGM_BYTE_VALID) {
840 /* Handle common case quickly. */
841 UInt v_off = a & 0xFFFF;
842 return 0xFFFFFF00
843 |
844 (UInt)( ((UChar*)(sm->vbyte))[ v_off ] );
845 } else {
846 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000847 return mc_rd_V1_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000848 }
849# endif
850}
851
852__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +0000853void MC_(helperc_STOREV1) ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000854{
855# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000856 mc_wr_V1_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000857# else
858 UInt sec_no = shiftRight16(a);
859 SecMap* sm = primary_map[sec_no];
860 UInt a_off = (a & 0xFFFF) >> 3;
861 PROF_EVENT(65);
862 if (sm->abits[a_off] == VGM_BYTE_VALID) {
863 /* Handle common case quickly. */
864 UInt v_off = a & 0xFFFF;
865 ((UChar*)(sm->vbyte))[ v_off ] = vbytes & 0x000000FF;
866 } else {
867 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000868 mc_wr_V1_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000869 }
870# endif
871}
872
873
874/*------------------------------------------------------------*/
875/*--- Fallback functions to handle cases that the above ---*/
876/*--- VG_(helperc_{LD,ST}V{1,2,4}) can't manage. ---*/
877/*------------------------------------------------------------*/
878
njn5c004e42002-11-18 11:04:50 +0000879static UInt mc_rd_V4_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000880{
881 Bool a0ok, a1ok, a2ok, a3ok;
882 UInt vb0, vb1, vb2, vb3;
883
884 PROF_EVENT(70);
885
886 /* First establish independently the addressibility of the 4 bytes
887 involved. */
888 a0ok = get_abit(a+0) == VGM_BIT_VALID;
889 a1ok = get_abit(a+1) == VGM_BIT_VALID;
890 a2ok = get_abit(a+2) == VGM_BIT_VALID;
891 a3ok = get_abit(a+3) == VGM_BIT_VALID;
892
893 /* Also get the validity bytes for the address. */
894 vb0 = (UInt)get_vbyte(a+0);
895 vb1 = (UInt)get_vbyte(a+1);
896 vb2 = (UInt)get_vbyte(a+2);
897 vb3 = (UInt)get_vbyte(a+3);
898
899 /* Now distinguish 3 cases */
900
901 /* Case 1: the address is completely valid, so:
902 - no addressing error
903 - return V bytes as read from memory
904 */
905 if (a0ok && a1ok && a2ok && a3ok) {
906 UInt vw = VGM_WORD_INVALID;
907 vw <<= 8; vw |= vb3;
908 vw <<= 8; vw |= vb2;
909 vw <<= 8; vw |= vb1;
910 vw <<= 8; vw |= vb0;
911 return vw;
912 }
913
914 /* Case 2: the address is completely invalid.
915 - emit addressing error
916 - return V word indicating validity.
917 This sounds strange, but if we make loads from invalid addresses
918 give invalid data, we also risk producing a number of confusing
919 undefined-value errors later, which confuses the fact that the
920 error arose in the first place from an invalid address.
921 */
922 /* VG_(printf)("%p (%d %d %d %d)\n", a, a0ok, a1ok, a2ok, a3ok); */
njn43c799e2003-04-08 00:08:52 +0000923 if (!MAC_(clo_partial_loads_ok)
njn25e49d8e72002-09-23 09:36:25 +0000924 || ((a & 3) != 0)
925 || (!a0ok && !a1ok && !a2ok && !a3ok)) {
njn43c799e2003-04-08 00:08:52 +0000926 MAC_(record_address_error)( a, 4, False );
njn25e49d8e72002-09-23 09:36:25 +0000927 return (VGM_BYTE_VALID << 24) | (VGM_BYTE_VALID << 16)
928 | (VGM_BYTE_VALID << 8) | VGM_BYTE_VALID;
929 }
930
931 /* Case 3: the address is partially valid.
932 - no addressing error
933 - returned V word is invalid where the address is invalid,
934 and contains V bytes from memory otherwise.
njn5c004e42002-11-18 11:04:50 +0000935 Case 3 is only allowed if MC_(clo_partial_loads_ok) is True
njn25e49d8e72002-09-23 09:36:25 +0000936 (which is the default), and the address is 4-aligned.
937 If not, Case 2 will have applied.
938 */
njn43c799e2003-04-08 00:08:52 +0000939 sk_assert(MAC_(clo_partial_loads_ok));
njn25e49d8e72002-09-23 09:36:25 +0000940 {
941 UInt vw = VGM_WORD_INVALID;
942 vw <<= 8; vw |= (a3ok ? vb3 : VGM_BYTE_INVALID);
943 vw <<= 8; vw |= (a2ok ? vb2 : VGM_BYTE_INVALID);
944 vw <<= 8; vw |= (a1ok ? vb1 : VGM_BYTE_INVALID);
945 vw <<= 8; vw |= (a0ok ? vb0 : VGM_BYTE_INVALID);
946 return vw;
947 }
948}
949
njn5c004e42002-11-18 11:04:50 +0000950static void mc_wr_V4_SLOWLY ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000951{
952 /* Check the address for validity. */
953 Bool aerr = False;
954 PROF_EVENT(71);
955
956 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
957 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
958 if (get_abit(a+2) != VGM_BIT_VALID) aerr = True;
959 if (get_abit(a+3) != VGM_BIT_VALID) aerr = True;
960
961 /* Store the V bytes, remembering to do it little-endian-ly. */
962 set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
963 set_vbyte( a+1, vbytes & 0x000000FF ); vbytes >>= 8;
964 set_vbyte( a+2, vbytes & 0x000000FF ); vbytes >>= 8;
965 set_vbyte( a+3, vbytes & 0x000000FF );
966
967 /* If an address error has happened, report it. */
968 if (aerr)
njn43c799e2003-04-08 00:08:52 +0000969 MAC_(record_address_error)( a, 4, True );
njn25e49d8e72002-09-23 09:36:25 +0000970}
971
njn5c004e42002-11-18 11:04:50 +0000972static UInt mc_rd_V2_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000973{
974 /* Check the address for validity. */
975 UInt vw = VGM_WORD_INVALID;
976 Bool aerr = False;
977 PROF_EVENT(72);
978
979 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
980 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
981
982 /* Fetch the V bytes, remembering to do it little-endian-ly. */
983 vw <<= 8; vw |= (UInt)get_vbyte(a+1);
984 vw <<= 8; vw |= (UInt)get_vbyte(a+0);
985
986 /* If an address error has happened, report it. */
987 if (aerr) {
njn43c799e2003-04-08 00:08:52 +0000988 MAC_(record_address_error)( a, 2, False );
njn25e49d8e72002-09-23 09:36:25 +0000989 vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
990 | (VGM_BYTE_VALID << 8) | (VGM_BYTE_VALID);
991 }
992 return vw;
993}
994
njn5c004e42002-11-18 11:04:50 +0000995static void mc_wr_V2_SLOWLY ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000996{
997 /* Check the address for validity. */
998 Bool aerr = False;
999 PROF_EVENT(73);
1000
1001 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1002 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1003
1004 /* Store the V bytes, remembering to do it little-endian-ly. */
1005 set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
1006 set_vbyte( a+1, vbytes & 0x000000FF );
1007
1008 /* If an address error has happened, report it. */
1009 if (aerr)
njn43c799e2003-04-08 00:08:52 +00001010 MAC_(record_address_error)( a, 2, True );
njn25e49d8e72002-09-23 09:36:25 +00001011}
1012
njn5c004e42002-11-18 11:04:50 +00001013static UInt mc_rd_V1_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +00001014{
1015 /* Check the address for validity. */
1016 UInt vw = VGM_WORD_INVALID;
1017 Bool aerr = False;
1018 PROF_EVENT(74);
1019
1020 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1021
1022 /* Fetch the V byte. */
1023 vw <<= 8; vw |= (UInt)get_vbyte(a+0);
1024
1025 /* If an address error has happened, report it. */
1026 if (aerr) {
njn43c799e2003-04-08 00:08:52 +00001027 MAC_(record_address_error)( a, 1, False );
njn25e49d8e72002-09-23 09:36:25 +00001028 vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
1029 | (VGM_BYTE_INVALID << 8) | (VGM_BYTE_VALID);
1030 }
1031 return vw;
1032}
1033
njn5c004e42002-11-18 11:04:50 +00001034static void mc_wr_V1_SLOWLY ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001035{
1036 /* Check the address for validity. */
1037 Bool aerr = False;
1038 PROF_EVENT(75);
1039 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1040
1041 /* Store the V bytes, remembering to do it little-endian-ly. */
1042 set_vbyte( a+0, vbytes & 0x000000FF );
1043
1044 /* If an address error has happened, report it. */
1045 if (aerr)
njn43c799e2003-04-08 00:08:52 +00001046 MAC_(record_address_error)( a, 1, True );
njn25e49d8e72002-09-23 09:36:25 +00001047}
1048
1049
1050/* ---------------------------------------------------------------------
1051 Called from generated code, or from the assembly helpers.
1052 Handlers for value check failures.
1053 ------------------------------------------------------------------ */
1054
njn5c004e42002-11-18 11:04:50 +00001055void MC_(helperc_value_check0_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001056{
njn5c004e42002-11-18 11:04:50 +00001057 MC_(record_value_error) ( 0 );
njn25e49d8e72002-09-23 09:36:25 +00001058}
1059
njn5c004e42002-11-18 11:04:50 +00001060void MC_(helperc_value_check1_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001061{
njn5c004e42002-11-18 11:04:50 +00001062 MC_(record_value_error) ( 1 );
njn25e49d8e72002-09-23 09:36:25 +00001063}
1064
njn5c004e42002-11-18 11:04:50 +00001065void MC_(helperc_value_check2_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001066{
njn5c004e42002-11-18 11:04:50 +00001067 MC_(record_value_error) ( 2 );
njn25e49d8e72002-09-23 09:36:25 +00001068}
1069
njn5c004e42002-11-18 11:04:50 +00001070void MC_(helperc_value_check4_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001071{
njn5c004e42002-11-18 11:04:50 +00001072 MC_(record_value_error) ( 4 );
njn25e49d8e72002-09-23 09:36:25 +00001073}
1074
1075
1076/* ---------------------------------------------------------------------
1077 FPU load and store checks, called from generated code.
1078 ------------------------------------------------------------------ */
1079
1080__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +00001081void MC_(fpu_read_check) ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001082{
1083 /* Ensure the read area is both addressible and valid (ie,
1084 readable). If there's an address error, don't report a value
1085 error too; but if there isn't an address error, check for a
1086 value error.
1087
1088 Try to be reasonably fast on the common case; wimp out and defer
njn5c004e42002-11-18 11:04:50 +00001089 to mc_fpu_read_check_SLOWLY for everything else. */
njn25e49d8e72002-09-23 09:36:25 +00001090
1091 SecMap* sm;
1092 UInt sm_off, v_off, a_off;
1093 Addr addr4;
1094
1095 PROF_EVENT(80);
1096
1097# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +00001098 mc_fpu_read_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001099# else
1100
1101 if (size == 4) {
1102 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
1103 PROF_EVENT(81);
1104 /* Properly aligned. */
1105 sm = primary_map[addr >> 16];
1106 sm_off = addr & 0xFFFF;
1107 a_off = sm_off >> 3;
1108 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
1109 /* Properly aligned and addressible. */
1110 v_off = addr & 0xFFFF;
1111 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1112 goto slow4;
1113 /* Properly aligned, addressible and with valid data. */
1114 return;
1115 slow4:
njn5c004e42002-11-18 11:04:50 +00001116 mc_fpu_read_check_SLOWLY ( addr, 4 );
njn25e49d8e72002-09-23 09:36:25 +00001117 return;
1118 }
1119
1120 if (size == 8) {
1121 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
1122 PROF_EVENT(82);
1123 /* Properly aligned. Do it in two halves. */
1124 addr4 = addr + 4;
1125 /* First half. */
1126 sm = primary_map[addr >> 16];
1127 sm_off = addr & 0xFFFF;
1128 a_off = sm_off >> 3;
1129 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1130 /* First half properly aligned and addressible. */
1131 v_off = addr & 0xFFFF;
1132 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1133 goto slow8;
1134 /* Second half. */
1135 sm = primary_map[addr4 >> 16];
1136 sm_off = addr4 & 0xFFFF;
1137 a_off = sm_off >> 3;
1138 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1139 /* Second half properly aligned and addressible. */
1140 v_off = addr4 & 0xFFFF;
1141 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1142 goto slow8;
1143 /* Both halves properly aligned, addressible and with valid
1144 data. */
1145 return;
1146 slow8:
njn5c004e42002-11-18 11:04:50 +00001147 mc_fpu_read_check_SLOWLY ( addr, 8 );
njn25e49d8e72002-09-23 09:36:25 +00001148 return;
1149 }
1150
1151 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
1152 cases go quickly. */
1153 if (size == 2) {
1154 PROF_EVENT(83);
njn5c004e42002-11-18 11:04:50 +00001155 mc_fpu_read_check_SLOWLY ( addr, 2 );
njn25e49d8e72002-09-23 09:36:25 +00001156 return;
1157 }
1158
sewardj93992e22003-05-26 09:17:41 +00001159 if (size == 16 /*SSE*/
1160 || size == 10 || size == 28 || size == 108) {
njn25e49d8e72002-09-23 09:36:25 +00001161 PROF_EVENT(84);
njn5c004e42002-11-18 11:04:50 +00001162 mc_fpu_read_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001163 return;
1164 }
1165
1166 VG_(printf)("size is %d\n", size);
njn5c004e42002-11-18 11:04:50 +00001167 VG_(skin_panic)("MC_(fpu_read_check): unhandled size");
njn25e49d8e72002-09-23 09:36:25 +00001168# endif
1169}
1170
1171
1172__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +00001173void MC_(fpu_write_check) ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001174{
1175 /* Ensure the written area is addressible, and moan if otherwise.
1176 If it is addressible, make it valid, otherwise invalid.
1177 */
1178
1179 SecMap* sm;
1180 UInt sm_off, v_off, a_off;
1181 Addr addr4;
1182
1183 PROF_EVENT(85);
1184
1185# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +00001186 mc_fpu_write_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001187# else
1188
1189 if (size == 4) {
1190 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
1191 PROF_EVENT(86);
1192 /* Properly aligned. */
1193 sm = primary_map[addr >> 16];
1194 sm_off = addr & 0xFFFF;
1195 a_off = sm_off >> 3;
1196 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
1197 /* Properly aligned and addressible. Make valid. */
1198 v_off = addr & 0xFFFF;
1199 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1200 return;
1201 slow4:
njn5c004e42002-11-18 11:04:50 +00001202 mc_fpu_write_check_SLOWLY ( addr, 4 );
njn25e49d8e72002-09-23 09:36:25 +00001203 return;
1204 }
1205
1206 if (size == 8) {
1207 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
1208 PROF_EVENT(87);
1209 /* Properly aligned. Do it in two halves. */
1210 addr4 = addr + 4;
1211 /* First half. */
1212 sm = primary_map[addr >> 16];
1213 sm_off = addr & 0xFFFF;
1214 a_off = sm_off >> 3;
1215 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1216 /* First half properly aligned and addressible. Make valid. */
1217 v_off = addr & 0xFFFF;
1218 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1219 /* Second half. */
1220 sm = primary_map[addr4 >> 16];
1221 sm_off = addr4 & 0xFFFF;
1222 a_off = sm_off >> 3;
1223 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1224 /* Second half properly aligned and addressible. */
1225 v_off = addr4 & 0xFFFF;
1226 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1227 /* Properly aligned, addressible and with valid data. */
1228 return;
1229 slow8:
njn5c004e42002-11-18 11:04:50 +00001230 mc_fpu_write_check_SLOWLY ( addr, 8 );
njn25e49d8e72002-09-23 09:36:25 +00001231 return;
1232 }
1233
1234 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
1235 cases go quickly. */
1236 if (size == 2) {
1237 PROF_EVENT(88);
njn5c004e42002-11-18 11:04:50 +00001238 mc_fpu_write_check_SLOWLY ( addr, 2 );
njn25e49d8e72002-09-23 09:36:25 +00001239 return;
1240 }
1241
sewardj93992e22003-05-26 09:17:41 +00001242 if (size == 16 /*SSE*/
1243 || size == 10 || size == 28 || size == 108) {
njn25e49d8e72002-09-23 09:36:25 +00001244 PROF_EVENT(89);
njn5c004e42002-11-18 11:04:50 +00001245 mc_fpu_write_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001246 return;
1247 }
1248
1249 VG_(printf)("size is %d\n", size);
njn5c004e42002-11-18 11:04:50 +00001250 VG_(skin_panic)("MC_(fpu_write_check): unhandled size");
njn25e49d8e72002-09-23 09:36:25 +00001251# endif
1252}
1253
1254
1255/* ---------------------------------------------------------------------
1256 Slow, general cases for FPU load and store checks.
1257 ------------------------------------------------------------------ */
1258
1259/* Generic version. Test for both addr and value errors, but if
1260 there's an addr error, don't report a value error even if it
1261 exists. */
1262
njn5c004e42002-11-18 11:04:50 +00001263void mc_fpu_read_check_SLOWLY ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001264{
1265 Int i;
1266 Bool aerr = False;
1267 Bool verr = False;
1268 PROF_EVENT(90);
1269 for (i = 0; i < size; i++) {
1270 PROF_EVENT(91);
1271 if (get_abit(addr+i) != VGM_BIT_VALID)
1272 aerr = True;
1273 if (get_vbyte(addr+i) != VGM_BYTE_VALID)
1274 verr = True;
1275 }
1276
1277 if (aerr) {
njn43c799e2003-04-08 00:08:52 +00001278 MAC_(record_address_error)( addr, size, False );
njn25e49d8e72002-09-23 09:36:25 +00001279 } else {
1280 if (verr)
njn5c004e42002-11-18 11:04:50 +00001281 MC_(record_value_error)( size );
njn25e49d8e72002-09-23 09:36:25 +00001282 }
1283}
1284
1285
1286/* Generic version. Test for addr errors. Valid addresses are
1287 given valid values, and invalid addresses invalid values. */
1288
njn5c004e42002-11-18 11:04:50 +00001289void mc_fpu_write_check_SLOWLY ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001290{
1291 Int i;
1292 Addr a_here;
1293 Bool a_ok;
1294 Bool aerr = False;
1295 PROF_EVENT(92);
1296 for (i = 0; i < size; i++) {
1297 PROF_EVENT(93);
1298 a_here = addr+i;
1299 a_ok = get_abit(a_here) == VGM_BIT_VALID;
1300 if (a_ok) {
1301 set_vbyte(a_here, VGM_BYTE_VALID);
1302 } else {
1303 set_vbyte(a_here, VGM_BYTE_INVALID);
1304 aerr = True;
1305 }
1306 }
1307 if (aerr) {
njn43c799e2003-04-08 00:08:52 +00001308 MAC_(record_address_error)( addr, size, True );
njn25e49d8e72002-09-23 09:36:25 +00001309 }
1310}
1311
njn25e49d8e72002-09-23 09:36:25 +00001312
1313/*------------------------------------------------------------*/
1314/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1315/*------------------------------------------------------------*/
1316
sewardja4495682002-10-21 07:29:59 +00001317/* For the memory leak detector, say whether an entire 64k chunk of
1318 address space is possibly in use, or not. If in doubt return
1319 True.
njn25e49d8e72002-09-23 09:36:25 +00001320*/
sewardja4495682002-10-21 07:29:59 +00001321static
1322Bool mc_is_valid_64k_chunk ( UInt chunk_number )
njn25e49d8e72002-09-23 09:36:25 +00001323{
sewardja4495682002-10-21 07:29:59 +00001324 sk_assert(chunk_number >= 0 && chunk_number < 65536);
1325 if (IS_DISTINGUISHED_SM(primary_map[chunk_number])) {
1326 /* Definitely not in use. */
1327 return False;
1328 } else {
1329 return True;
njn25e49d8e72002-09-23 09:36:25 +00001330 }
1331}
1332
1333
sewardja4495682002-10-21 07:29:59 +00001334/* For the memory leak detector, say whether or not a given word
1335 address is to be regarded as valid. */
1336static
1337Bool mc_is_valid_address ( Addr a )
1338{
1339 UInt vbytes;
1340 UChar abits;
1341 sk_assert(IS_ALIGNED4_ADDR(a));
1342 abits = get_abits4_ALIGNED(a);
1343 vbytes = get_vbytes4_ALIGNED(a);
1344 if (abits == VGM_NIBBLE_VALID && vbytes == VGM_WORD_VALID) {
1345 return True;
1346 } else {
1347 return False;
1348 }
1349}
1350
1351
1352/* Leak detector for this skin. We don't actually do anything, merely
1353 run the generic leak detector with suitable parameters for this
1354 skin. */
njn5c004e42002-11-18 11:04:50 +00001355void MC_(detect_memory_leaks) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001356{
njn43c799e2003-04-08 00:08:52 +00001357 MAC_(do_detect_memory_leaks) ( mc_is_valid_64k_chunk, mc_is_valid_address );
njn25e49d8e72002-09-23 09:36:25 +00001358}
1359
1360
1361/* ---------------------------------------------------------------------
1362 Sanity check machinery (permanently engaged).
1363 ------------------------------------------------------------------ */
1364
1365/* Check that nobody has spuriously claimed that the first or last 16
1366 pages (64 KB) of address space have become accessible. Failure of
1367 the following do not per se indicate an internal consistency
1368 problem, but they are so likely to that we really want to know
1369 about it if so. */
1370
1371Bool SK_(cheap_sanity_check) ( void )
1372{
sewardjd5815ec2003-04-06 12:23:27 +00001373 if (IS_DISTINGUISHED_SM(primary_map[0])
1374 /* kludge: kernel drops a page up at top of address range for
1375 magic "optimized syscalls", so we can no longer check the
1376 highest page */
1377 /* && IS_DISTINGUISHED_SM(primary_map[65535]) */
1378 )
njn25e49d8e72002-09-23 09:36:25 +00001379 return True;
1380 else
1381 return False;
1382}
1383
1384Bool SK_(expensive_sanity_check) ( void )
1385{
1386 Int i;
1387
1388 /* Make sure nobody changed the distinguished secondary. */
1389 for (i = 0; i < 8192; i++)
1390 if (distinguished_secondary_map.abits[i] != VGM_BYTE_INVALID)
1391 return False;
1392
1393 for (i = 0; i < 65536; i++)
1394 if (distinguished_secondary_map.vbyte[i] != VGM_BYTE_INVALID)
1395 return False;
1396
1397 /* Make sure that the upper 3/4 of the primary map hasn't
1398 been messed with. */
1399 for (i = 65536; i < 262144; i++)
1400 if (primary_map[i] != & distinguished_secondary_map)
1401 return False;
1402
1403 return True;
1404}
1405
1406/* ---------------------------------------------------------------------
1407 Debugging machinery (turn on to debug). Something of a mess.
1408 ------------------------------------------------------------------ */
1409
1410#if 0
1411/* Print the value tags on the 8 integer registers & flag reg. */
1412
1413static void uint_to_bits ( UInt x, Char* str )
1414{
1415 Int i;
1416 Int w = 0;
1417 /* str must point to a space of at least 36 bytes. */
1418 for (i = 31; i >= 0; i--) {
1419 str[w++] = (x & ( ((UInt)1) << i)) ? '1' : '0';
1420 if (i == 24 || i == 16 || i == 8)
1421 str[w++] = ' ';
1422 }
1423 str[w++] = 0;
njne427a662002-10-02 11:08:25 +00001424 sk_assert(w == 36);
njn25e49d8e72002-09-23 09:36:25 +00001425}
1426
1427/* Caution! Not vthread-safe; looks in VG_(baseBlock), not the thread
1428 state table. */
1429
1430static void vg_show_reg_tags ( void )
1431{
1432 Char buf1[36];
1433 Char buf2[36];
1434 UInt z_eax, z_ebx, z_ecx, z_edx,
1435 z_esi, z_edi, z_ebp, z_esp, z_eflags;
1436
1437 z_eax = VG_(baseBlock)[VGOFF_(sh_eax)];
1438 z_ebx = VG_(baseBlock)[VGOFF_(sh_ebx)];
1439 z_ecx = VG_(baseBlock)[VGOFF_(sh_ecx)];
1440 z_edx = VG_(baseBlock)[VGOFF_(sh_edx)];
1441 z_esi = VG_(baseBlock)[VGOFF_(sh_esi)];
1442 z_edi = VG_(baseBlock)[VGOFF_(sh_edi)];
1443 z_ebp = VG_(baseBlock)[VGOFF_(sh_ebp)];
1444 z_esp = VG_(baseBlock)[VGOFF_(sh_esp)];
1445 z_eflags = VG_(baseBlock)[VGOFF_(sh_eflags)];
1446
1447 uint_to_bits(z_eflags, buf1);
njn9b6d34e2002-10-15 08:48:08 +00001448 VG_(message)(Vg_DebugMsg, "efl %s\n", buf1);
njn25e49d8e72002-09-23 09:36:25 +00001449
1450 uint_to_bits(z_eax, buf1);
1451 uint_to_bits(z_ebx, buf2);
1452 VG_(message)(Vg_DebugMsg, "eax %s ebx %s\n", buf1, buf2);
1453
1454 uint_to_bits(z_ecx, buf1);
1455 uint_to_bits(z_edx, buf2);
1456 VG_(message)(Vg_DebugMsg, "ecx %s edx %s\n", buf1, buf2);
1457
1458 uint_to_bits(z_esi, buf1);
1459 uint_to_bits(z_edi, buf2);
1460 VG_(message)(Vg_DebugMsg, "esi %s edi %s\n", buf1, buf2);
1461
1462 uint_to_bits(z_ebp, buf1);
1463 uint_to_bits(z_esp, buf2);
1464 VG_(message)(Vg_DebugMsg, "ebp %s esp %s\n", buf1, buf2);
1465}
1466
1467
1468/* For debugging only. Scan the address space and touch all allegedly
1469 addressible words. Useful for establishing where Valgrind's idea of
1470 addressibility has diverged from what the kernel believes. */
1471
1472static
1473void zzzmemscan_notify_word ( Addr a, UInt w )
1474{
1475}
1476
1477void zzzmemscan ( void )
1478{
1479 Int n_notifies
1480 = VG_(scan_all_valid_memory)( zzzmemscan_notify_word );
1481 VG_(printf)("zzzmemscan: n_bytes = %d\n", 4 * n_notifies );
1482}
1483#endif
1484
1485
1486
1487
1488#if 0
1489static Int zzz = 0;
1490
1491void show_bb ( Addr eip_next )
1492{
1493 VG_(printf)("[%4d] ", zzz);
1494 vg_show_reg_tags( &VG_(m_shadow );
1495 VG_(translate) ( eip_next, NULL, NULL, NULL );
1496}
1497#endif /* 0 */
1498
njn25e49d8e72002-09-23 09:36:25 +00001499
1500/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00001501/*--- Command line args ---*/
njn25e49d8e72002-09-23 09:36:25 +00001502/*------------------------------------------------------------*/
1503
njn43c799e2003-04-08 00:08:52 +00001504Bool MC_(clo_avoid_strlen_errors) = True;
1505Bool MC_(clo_cleanup) = True;
1506
njn25e49d8e72002-09-23 09:36:25 +00001507Bool SK_(process_cmd_line_option)(Char* arg)
1508{
njn43c799e2003-04-08 00:08:52 +00001509 if (VG_CLO_STREQ(arg, "--avoid-strlen-errors=yes"))
njn5c004e42002-11-18 11:04:50 +00001510 MC_(clo_avoid_strlen_errors) = True;
njn43c799e2003-04-08 00:08:52 +00001511 else if (VG_CLO_STREQ(arg, "--avoid-strlen-errors=no"))
njn5c004e42002-11-18 11:04:50 +00001512 MC_(clo_avoid_strlen_errors) = False;
sewardj8ec2cfc2002-10-13 00:57:26 +00001513
njn43c799e2003-04-08 00:08:52 +00001514 else if (VG_CLO_STREQ(arg, "--cleanup=yes"))
1515 MC_(clo_cleanup) = True;
1516 else if (VG_CLO_STREQ(arg, "--cleanup=no"))
1517 MC_(clo_cleanup) = False;
1518
njn25e49d8e72002-09-23 09:36:25 +00001519 else
njn43c799e2003-04-08 00:08:52 +00001520 return MAC_(process_common_cmd_line_option)(arg);
njn25e49d8e72002-09-23 09:36:25 +00001521
1522 return True;
njn25e49d8e72002-09-23 09:36:25 +00001523}
1524
njn3e884182003-04-15 13:03:23 +00001525void SK_(print_usage)(void)
njn25e49d8e72002-09-23 09:36:25 +00001526{
njn3e884182003-04-15 13:03:23 +00001527 MAC_(print_common_usage)();
1528 VG_(printf)(
1529" --avoid-strlen-errors=no|yes suppress errs from inlined strlen [yes]\n"
1530 );
1531}
1532
1533void SK_(print_debug_usage)(void)
1534{
1535 MAC_(print_common_debug_usage)();
1536 VG_(printf)(
sewardj8ec2cfc2002-10-13 00:57:26 +00001537" --cleanup=no|yes improve after instrumentation? [yes]\n"
njn3e884182003-04-15 13:03:23 +00001538 );
njn25e49d8e72002-09-23 09:36:25 +00001539}
1540
1541
1542/*------------------------------------------------------------*/
1543/*--- Setup ---*/
1544/*------------------------------------------------------------*/
1545
njn810086f2002-11-14 12:42:47 +00001546void SK_(pre_clo_init)(void)
njn25e49d8e72002-09-23 09:36:25 +00001547{
njn810086f2002-11-14 12:42:47 +00001548 VG_(details_name) ("Memcheck");
1549 VG_(details_version) (NULL);
1550 VG_(details_description) ("a.k.a. Valgrind, a memory error detector");
1551 VG_(details_copyright_author)(
njn0e1b5142003-04-15 14:58:06 +00001552 "Copyright (C) 2002-2003, and GNU GPL'd, by Julian Seward.");
njn810086f2002-11-14 12:42:47 +00001553 VG_(details_bug_reports_to) ("jseward@acm.org");
sewardj78210aa2002-12-01 02:55:46 +00001554 VG_(details_avg_translation_sizeB) ( 228 );
njn25e49d8e72002-09-23 09:36:25 +00001555
njn810086f2002-11-14 12:42:47 +00001556 VG_(needs_core_errors) ();
1557 VG_(needs_skin_errors) ();
1558 VG_(needs_libc_freeres) ();
njn810086f2002-11-14 12:42:47 +00001559 VG_(needs_shadow_regs) ();
1560 VG_(needs_command_line_options)();
1561 VG_(needs_client_requests) ();
1562 VG_(needs_extended_UCode) ();
1563 VG_(needs_syscall_wrapper) ();
njn810086f2002-11-14 12:42:47 +00001564 VG_(needs_sanity_checks) ();
njn25e49d8e72002-09-23 09:36:25 +00001565
njn3e884182003-04-15 13:03:23 +00001566 MAC_( new_mem_heap) = & mc_new_mem_heap;
1567 MAC_( ban_mem_heap) = & MC_(make_noaccess);
1568 MAC_(copy_mem_heap) = & mc_copy_address_range_state;
1569 MAC_( die_mem_heap) = & MC_(make_noaccess);
1570
njn5c004e42002-11-18 11:04:50 +00001571 VG_(track_new_mem_startup) ( & mc_new_mem_startup );
njn5c004e42002-11-18 11:04:50 +00001572 VG_(track_new_mem_stack_signal) ( & MC_(make_writable) );
1573 VG_(track_new_mem_brk) ( & MC_(make_writable) );
1574 VG_(track_new_mem_mmap) ( & mc_set_perms );
njn25e49d8e72002-09-23 09:36:25 +00001575
njn3e884182003-04-15 13:03:23 +00001576 VG_(track_copy_mem_remap) ( & mc_copy_address_range_state );
1577 VG_(track_change_mem_mprotect) ( & mc_set_perms );
1578
1579 VG_(track_die_mem_stack_signal) ( & MC_(make_noaccess) );
1580 VG_(track_die_mem_brk) ( & MC_(make_noaccess) );
1581 VG_(track_die_mem_munmap) ( & MC_(make_noaccess) );
1582
njn43c799e2003-04-08 00:08:52 +00001583 VG_(track_new_mem_stack_4) ( & MAC_(new_mem_stack_4) );
1584 VG_(track_new_mem_stack_8) ( & MAC_(new_mem_stack_8) );
1585 VG_(track_new_mem_stack_12) ( & MAC_(new_mem_stack_12) );
1586 VG_(track_new_mem_stack_16) ( & MAC_(new_mem_stack_16) );
1587 VG_(track_new_mem_stack_32) ( & MAC_(new_mem_stack_32) );
1588 VG_(track_new_mem_stack) ( & MAC_(new_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00001589
njn43c799e2003-04-08 00:08:52 +00001590 VG_(track_die_mem_stack_4) ( & MAC_(die_mem_stack_4) );
1591 VG_(track_die_mem_stack_8) ( & MAC_(die_mem_stack_8) );
1592 VG_(track_die_mem_stack_12) ( & MAC_(die_mem_stack_12) );
1593 VG_(track_die_mem_stack_16) ( & MAC_(die_mem_stack_16) );
1594 VG_(track_die_mem_stack_32) ( & MAC_(die_mem_stack_32) );
1595 VG_(track_die_mem_stack) ( & MAC_(die_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00001596
njn3e884182003-04-15 13:03:23 +00001597 VG_(track_ban_mem_stack) ( & MC_(make_noaccess) );
njn25e49d8e72002-09-23 09:36:25 +00001598
njn5c004e42002-11-18 11:04:50 +00001599 VG_(track_pre_mem_read) ( & mc_check_is_readable );
1600 VG_(track_pre_mem_read_asciiz) ( & mc_check_is_readable_asciiz );
1601 VG_(track_pre_mem_write) ( & mc_check_is_writable );
1602 VG_(track_post_mem_write) ( & MC_(make_readable) );
njn25e49d8e72002-09-23 09:36:25 +00001603
njnd3040452003-05-19 15:04:06 +00001604 VG_(track_post_regs_write_init) ( & mc_post_regs_write_init );
1605 VG_(track_post_reg_write_syscall_return) ( & mc_post_reg_write );
1606 VG_(track_post_reg_write_deliver_signal) ( & mc_post_reg_write );
1607 VG_(track_post_reg_write_pthread_return) ( & mc_post_reg_write );
1608 VG_(track_post_reg_write_clientreq_return) ( & mc_post_reg_write );
1609 VG_(track_post_reg_write_clientcall_return) ( & mc_post_reg_write_clientcall );
1610
njn9b007f62003-04-07 14:40:25 +00001611 /* Three compact slots taken up by stack memory helpers */
njn5c004e42002-11-18 11:04:50 +00001612 VG_(register_compact_helper)((Addr) & MC_(helper_value_check4_fail));
1613 VG_(register_compact_helper)((Addr) & MC_(helper_value_check0_fail));
1614 VG_(register_compact_helper)((Addr) & MC_(helper_value_check2_fail));
1615 VG_(register_compact_helper)((Addr) & MC_(helperc_STOREV4));
njn5c004e42002-11-18 11:04:50 +00001616 VG_(register_compact_helper)((Addr) & MC_(helperc_LOADV4));
njn25e49d8e72002-09-23 09:36:25 +00001617
njnd04b7c62002-10-03 14:05:52 +00001618 /* These two made non-compact because 2-byte transactions are rare. */
njn5c004e42002-11-18 11:04:50 +00001619 VG_(register_noncompact_helper)((Addr) & MC_(helperc_STOREV2));
njn9b007f62003-04-07 14:40:25 +00001620 VG_(register_noncompact_helper)((Addr) & MC_(helperc_STOREV1));
njn5c004e42002-11-18 11:04:50 +00001621 VG_(register_noncompact_helper)((Addr) & MC_(helperc_LOADV2));
njn9b007f62003-04-07 14:40:25 +00001622 VG_(register_noncompact_helper)((Addr) & MC_(helperc_LOADV1));
njn5c004e42002-11-18 11:04:50 +00001623 VG_(register_noncompact_helper)((Addr) & MC_(fpu_write_check));
1624 VG_(register_noncompact_helper)((Addr) & MC_(fpu_read_check));
1625 VG_(register_noncompact_helper)((Addr) & MC_(helper_value_check1_fail));
njn25e49d8e72002-09-23 09:36:25 +00001626
1627 VGP_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
1628 VGP_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
njn9b007f62003-04-07 14:40:25 +00001629 VGP_(register_profile_event) ( VgpESPAdj, "adjust-ESP" );
njnd04b7c62002-10-03 14:05:52 +00001630
njn43c799e2003-04-08 00:08:52 +00001631 /* Additional block description for VG_(describe_addr)() */
1632 MAC_(describe_addr_supp) = MC_(client_perm_maybe_describe);
1633
njnd04b7c62002-10-03 14:05:52 +00001634 init_shadow_memory();
njn3e884182003-04-15 13:03:23 +00001635 MAC_(common_pre_clo_init)();
njn5c004e42002-11-18 11:04:50 +00001636}
1637
1638void SK_(post_clo_init) ( void )
1639{
1640}
1641
njn7d9f94d2003-04-22 21:41:40 +00001642void SK_(fini) ( Int exitcode )
njn5c004e42002-11-18 11:04:50 +00001643{
njn3e884182003-04-15 13:03:23 +00001644 MAC_(common_fini)( MC_(detect_memory_leaks) );
1645
njn5c004e42002-11-18 11:04:50 +00001646 if (0) {
1647 VG_(message)(Vg_DebugMsg,
1648 "------ Valgrind's client block stats follow ---------------" );
1649 MC_(show_client_block_stats)();
1650 }
njn25e49d8e72002-09-23 09:36:25 +00001651}
1652
1653/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00001654/*--- end mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00001655/*--------------------------------------------------------------------*/