blob: 149bb238153d4908eedf72eca15efdb499aeb558 [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
njnc9539842002-10-02 13:26:35 +00009 This file is part of MemCheck, a heavyweight Valgrind skin for
10 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
njn0e1b5142003-04-15 14:58:06 +000012 Copyright (C) 2000-2003 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
njn25cac76cb2002-09-23 11:21:57 +000033#include "mc_include.h"
34#include "memcheck.h" /* for client requests */
njn25e49d8e72002-09-23 09:36:25 +000035//#include "vg_profile.c"
36
njn27f1a382002-11-08 15:48:16 +000037VG_DETERMINE_INTERFACE_VERSION
38
njn25e49d8e72002-09-23 09:36:25 +000039/* Define to debug the mem audit system. */
40/* #define VG_DEBUG_MEMORY */
41
njn25e49d8e72002-09-23 09:36:25 +000042#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
43
44/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000045/*--- Low-level support for memory checking. ---*/
46/*------------------------------------------------------------*/
47
48/* All reads and writes are checked against a memory map, which
49 records the state of all memory in the process. The memory map is
50 organised like this:
51
52 The top 16 bits of an address are used to index into a top-level
53 map table, containing 65536 entries. Each entry is a pointer to a
54 second-level map, which records the accesibililty and validity
55 permissions for the 65536 bytes indexed by the lower 16 bits of the
56 address. Each byte is represented by nine bits, one indicating
57 accessibility, the other eight validity. So each second-level map
58 contains 73728 bytes. This two-level arrangement conveniently
59 divides the 4G address space into 64k lumps, each size 64k bytes.
60
61 All entries in the primary (top-level) map must point to a valid
62 secondary (second-level) map. Since most of the 4G of address
63 space will not be in use -- ie, not mapped at all -- there is a
64 distinguished secondary map, which indicates `not addressible and
65 not valid' writeable for all bytes. Entries in the primary map for
66 which the entire 64k is not in use at all point at this
67 distinguished map.
68
69 [...] lots of stuff deleted due to out of date-ness
70
71 As a final optimisation, the alignment and address checks for
72 4-byte loads and stores are combined in a neat way. The primary
73 map is extended to have 262144 entries (2^18), rather than 2^16.
74 The top 3/4 of these entries are permanently set to the
75 distinguished secondary map. For a 4-byte load/store, the
76 top-level map is indexed not with (addr >> 16) but instead f(addr),
77 where
78
79 f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
80 = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
81 = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
82
83 ie the lowest two bits are placed above the 16 high address bits.
84 If either of these two bits are nonzero, the address is misaligned;
85 this will select a secondary map from the upper 3/4 of the primary
86 map. Because this is always the distinguished secondary map, a
87 (bogus) address check failure will result. The failure handling
88 code can then figure out whether this is a genuine addr check
89 failure or whether it is a possibly-legitimate access at a
90 misaligned address.
91*/
92
93
94/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000095/*--- Function declarations. ---*/
96/*------------------------------------------------------------*/
97
njn5c004e42002-11-18 11:04:50 +000098static UInt mc_rd_V4_SLOWLY ( Addr a );
99static UInt mc_rd_V2_SLOWLY ( Addr a );
100static UInt mc_rd_V1_SLOWLY ( Addr a );
101static void mc_wr_V4_SLOWLY ( Addr a, UInt vbytes );
102static void mc_wr_V2_SLOWLY ( Addr a, UInt vbytes );
103static void mc_wr_V1_SLOWLY ( Addr a, UInt vbytes );
104static void mc_fpu_read_check_SLOWLY ( Addr addr, Int size );
105static void mc_fpu_write_check_SLOWLY ( Addr addr, Int size );
njn25e49d8e72002-09-23 09:36:25 +0000106
107/*------------------------------------------------------------*/
108/*--- Data defns. ---*/
109/*------------------------------------------------------------*/
110
111typedef
112 struct {
113 UChar abits[8192];
114 UChar vbyte[65536];
115 }
116 SecMap;
117
118static SecMap* primary_map[ /*65536*/ 262144 ];
119static SecMap distinguished_secondary_map;
120
njn25e49d8e72002-09-23 09:36:25 +0000121
122static void init_shadow_memory ( void )
123{
124 Int i;
125
126 for (i = 0; i < 8192; i++) /* Invalid address */
127 distinguished_secondary_map.abits[i] = VGM_BYTE_INVALID;
128 for (i = 0; i < 65536; i++) /* Invalid Value */
129 distinguished_secondary_map.vbyte[i] = VGM_BYTE_INVALID;
130
131 /* These entries gradually get overwritten as the used address
132 space expands. */
133 for (i = 0; i < 65536; i++)
134 primary_map[i] = &distinguished_secondary_map;
135
136 /* These ones should never change; it's a bug in Valgrind if they do. */
137 for (i = 65536; i < 262144; i++)
138 primary_map[i] = &distinguished_secondary_map;
139}
140
njn25e49d8e72002-09-23 09:36:25 +0000141/*------------------------------------------------------------*/
142/*--- Basic bitmap management, reading and writing. ---*/
143/*------------------------------------------------------------*/
144
145/* Allocate and initialise a secondary map. */
146
147static SecMap* alloc_secondary_map ( __attribute__ ((unused))
148 Char* caller )
149{
150 SecMap* map;
151 UInt i;
152 PROF_EVENT(10);
153
154 /* Mark all bytes as invalid access and invalid value. */
155
156 /* It just happens that a SecMap occupies exactly 18 pages --
157 although this isn't important, so the following assert is
158 spurious. */
njne427a662002-10-02 11:08:25 +0000159 sk_assert(0 == (sizeof(SecMap) % VKI_BYTES_PER_PAGE));
njn25e49d8e72002-09-23 09:36:25 +0000160 map = VG_(get_memory_from_mmap)( sizeof(SecMap), caller );
161
162 for (i = 0; i < 8192; i++)
163 map->abits[i] = VGM_BYTE_INVALID; /* Invalid address */
164 for (i = 0; i < 65536; i++)
165 map->vbyte[i] = VGM_BYTE_INVALID; /* Invalid Value */
166
167 /* VG_(printf)("ALLOC_2MAP(%s)\n", caller ); */
168 return map;
169}
170
171
172/* Basic reading/writing of the bitmaps, for byte-sized accesses. */
173
174static __inline__ UChar get_abit ( Addr a )
175{
176 SecMap* sm = primary_map[a >> 16];
177 UInt sm_off = a & 0xFFFF;
178 PROF_EVENT(20);
179# if 0
180 if (IS_DISTINGUISHED_SM(sm))
181 VG_(message)(Vg_DebugMsg,
182 "accessed distinguished 2ndary (A)map! 0x%x\n", a);
183# endif
184 return BITARR_TEST(sm->abits, sm_off)
185 ? VGM_BIT_INVALID : VGM_BIT_VALID;
186}
187
188static __inline__ UChar get_vbyte ( Addr a )
189{
190 SecMap* sm = primary_map[a >> 16];
191 UInt sm_off = a & 0xFFFF;
192 PROF_EVENT(21);
193# if 0
194 if (IS_DISTINGUISHED_SM(sm))
195 VG_(message)(Vg_DebugMsg,
196 "accessed distinguished 2ndary (V)map! 0x%x\n", a);
197# endif
198 return sm->vbyte[sm_off];
199}
200
201static __inline__ void set_abit ( Addr a, UChar abit )
202{
203 SecMap* sm;
204 UInt sm_off;
205 PROF_EVENT(22);
206 ENSURE_MAPPABLE(a, "set_abit");
207 sm = primary_map[a >> 16];
208 sm_off = a & 0xFFFF;
209 if (abit)
210 BITARR_SET(sm->abits, sm_off);
211 else
212 BITARR_CLEAR(sm->abits, sm_off);
213}
214
215static __inline__ void set_vbyte ( Addr a, UChar vbyte )
216{
217 SecMap* sm;
218 UInt sm_off;
219 PROF_EVENT(23);
220 ENSURE_MAPPABLE(a, "set_vbyte");
221 sm = primary_map[a >> 16];
222 sm_off = a & 0xFFFF;
223 sm->vbyte[sm_off] = vbyte;
224}
225
226
227/* Reading/writing of the bitmaps, for aligned word-sized accesses. */
228
229static __inline__ UChar get_abits4_ALIGNED ( Addr a )
230{
231 SecMap* sm;
232 UInt sm_off;
233 UChar abits8;
234 PROF_EVENT(24);
235# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000236 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000237# endif
238 sm = primary_map[a >> 16];
239 sm_off = a & 0xFFFF;
240 abits8 = sm->abits[sm_off >> 3];
241 abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
242 abits8 &= 0x0F;
243 return abits8;
244}
245
246static UInt __inline__ get_vbytes4_ALIGNED ( Addr a )
247{
248 SecMap* sm = primary_map[a >> 16];
249 UInt sm_off = a & 0xFFFF;
250 PROF_EVENT(25);
251# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000252 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000253# endif
254 return ((UInt*)(sm->vbyte))[sm_off >> 2];
255}
256
257
258/*------------------------------------------------------------*/
259/*--- Setting permissions over address ranges. ---*/
260/*------------------------------------------------------------*/
261
262static void set_address_range_perms ( Addr a, UInt len,
263 UInt example_a_bit,
264 UInt example_v_bit )
265{
266 UChar vbyte, abyte8;
267 UInt vword4, sm_off;
268 SecMap* sm;
269
270 PROF_EVENT(30);
271
272 if (len == 0)
273 return;
274
275 if (len > 100 * 1000 * 1000) {
276 VG_(message)(Vg_UserMsg,
277 "Warning: set address range perms: "
278 "large range %u, a %d, v %d",
279 len, example_a_bit, example_v_bit );
280 }
281
282 VGP_PUSHCC(VgpSetMem);
283
284 /* Requests to change permissions of huge address ranges may
285 indicate bugs in our machinery. 30,000,000 is arbitrary, but so
286 far all legitimate requests have fallen beneath that size. */
287 /* 4 Mar 02: this is just stupid; get rid of it. */
njne427a662002-10-02 11:08:25 +0000288 /* sk_assert(len < 30000000); */
njn25e49d8e72002-09-23 09:36:25 +0000289
290 /* Check the permissions make sense. */
njne427a662002-10-02 11:08:25 +0000291 sk_assert(example_a_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000292 || example_a_bit == VGM_BIT_INVALID);
njne427a662002-10-02 11:08:25 +0000293 sk_assert(example_v_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000294 || example_v_bit == VGM_BIT_INVALID);
295 if (example_a_bit == VGM_BIT_INVALID)
njne427a662002-10-02 11:08:25 +0000296 sk_assert(example_v_bit == VGM_BIT_INVALID);
njn25e49d8e72002-09-23 09:36:25 +0000297
298 /* The validity bits to write. */
299 vbyte = example_v_bit==VGM_BIT_VALID
300 ? VGM_BYTE_VALID : VGM_BYTE_INVALID;
301
302 /* In order that we can charge through the address space at 8
303 bytes/main-loop iteration, make up some perms. */
304 abyte8 = (example_a_bit << 7)
305 | (example_a_bit << 6)
306 | (example_a_bit << 5)
307 | (example_a_bit << 4)
308 | (example_a_bit << 3)
309 | (example_a_bit << 2)
310 | (example_a_bit << 1)
311 | (example_a_bit << 0);
312 vword4 = (vbyte << 24) | (vbyte << 16) | (vbyte << 8) | vbyte;
313
314# ifdef VG_DEBUG_MEMORY
315 /* Do it ... */
316 while (True) {
317 PROF_EVENT(31);
318 if (len == 0) break;
319 set_abit ( a, example_a_bit );
320 set_vbyte ( a, vbyte );
321 a++;
322 len--;
323 }
324
325# else
326 /* Slowly do parts preceding 8-byte alignment. */
327 while (True) {
328 PROF_EVENT(31);
329 if (len == 0) break;
330 if ((a % 8) == 0) break;
331 set_abit ( a, example_a_bit );
332 set_vbyte ( a, vbyte );
333 a++;
334 len--;
335 }
336
337 if (len == 0) {
338 VGP_POPCC(VgpSetMem);
339 return;
340 }
njne427a662002-10-02 11:08:25 +0000341 sk_assert((a % 8) == 0 && len > 0);
njn25e49d8e72002-09-23 09:36:25 +0000342
343 /* Once aligned, go fast. */
344 while (True) {
345 PROF_EVENT(32);
346 if (len < 8) break;
347 ENSURE_MAPPABLE(a, "set_address_range_perms(fast)");
348 sm = primary_map[a >> 16];
349 sm_off = a & 0xFFFF;
350 sm->abits[sm_off >> 3] = abyte8;
351 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = vword4;
352 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = vword4;
353 a += 8;
354 len -= 8;
355 }
356
357 if (len == 0) {
358 VGP_POPCC(VgpSetMem);
359 return;
360 }
njne427a662002-10-02 11:08:25 +0000361 sk_assert((a % 8) == 0 && len > 0 && len < 8);
njn25e49d8e72002-09-23 09:36:25 +0000362
363 /* Finish the upper fragment. */
364 while (True) {
365 PROF_EVENT(33);
366 if (len == 0) break;
367 set_abit ( a, example_a_bit );
368 set_vbyte ( a, vbyte );
369 a++;
370 len--;
371 }
372# endif
373
374 /* Check that zero page and highest page have not been written to
375 -- this could happen with buggy syscall wrappers. Today
376 (2001-04-26) had precisely such a problem with __NR_setitimer. */
njne427a662002-10-02 11:08:25 +0000377 sk_assert(SK_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +0000378 VGP_POPCC(VgpSetMem);
379}
380
381/* Set permissions for address ranges ... */
382
njn5c004e42002-11-18 11:04:50 +0000383void MC_(make_noaccess) ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000384{
385 PROF_EVENT(35);
njn5c004e42002-11-18 11:04:50 +0000386 DEBUG("MC_(make_noaccess)(%p, %x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000387 set_address_range_perms ( a, len, VGM_BIT_INVALID, VGM_BIT_INVALID );
388}
389
njn5c004e42002-11-18 11:04:50 +0000390void MC_(make_writable) ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000391{
392 PROF_EVENT(36);
njn5c004e42002-11-18 11:04:50 +0000393 DEBUG("MC_(make_writable)(%p, %x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000394 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_INVALID );
395}
396
njn5c004e42002-11-18 11:04:50 +0000397void MC_(make_readable) ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000398{
399 PROF_EVENT(37);
njn5c004e42002-11-18 11:04:50 +0000400 DEBUG("MC_(make_readable)(%p, 0x%x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000401 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_VALID );
402}
403
njn9b007f62003-04-07 14:40:25 +0000404static __inline__
405void make_aligned_word_writable(Addr a)
406{
407 SecMap* sm;
408 UInt sm_off;
409 UChar mask;
njn25e49d8e72002-09-23 09:36:25 +0000410
njn9b007f62003-04-07 14:40:25 +0000411 VGP_PUSHCC(VgpESPAdj);
412 ENSURE_MAPPABLE(a, "make_aligned_word_writable");
413 sm = primary_map[a >> 16];
414 sm_off = a & 0xFFFF;
415 ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
416 mask = 0x0F;
417 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
418 /* mask now contains 1s where we wish to make address bits invalid (0s). */
419 sm->abits[sm_off >> 3] &= ~mask;
420 VGP_POPCC(VgpESPAdj);
421}
422
423static __inline__
424void make_aligned_word_noaccess(Addr a)
425{
426 SecMap* sm;
427 UInt sm_off;
428 UChar mask;
429
430 VGP_PUSHCC(VgpESPAdj);
431 ENSURE_MAPPABLE(a, "make_aligned_word_noaccess");
432 sm = primary_map[a >> 16];
433 sm_off = a & 0xFFFF;
434 ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
435 mask = 0x0F;
436 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
437 /* mask now contains 1s where we wish to make address bits invalid (1s). */
438 sm->abits[sm_off >> 3] |= mask;
439 VGP_POPCC(VgpESPAdj);
440}
441
442/* Nb: by "aligned" here we mean 8-byte aligned */
443static __inline__
444void make_aligned_doubleword_writable(Addr a)
445{
446 SecMap* sm;
447 UInt sm_off;
448
449 VGP_PUSHCC(VgpESPAdj);
450 ENSURE_MAPPABLE(a, "make_aligned_doubleword_writable");
451 sm = primary_map[a >> 16];
452 sm_off = a & 0xFFFF;
453 sm->abits[sm_off >> 3] = VGM_BYTE_VALID;
454 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = VGM_WORD_INVALID;
455 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = VGM_WORD_INVALID;
456 VGP_POPCC(VgpESPAdj);
457}
458
459static __inline__
460void make_aligned_doubleword_noaccess(Addr a)
461{
462 SecMap* sm;
463 UInt sm_off;
464
465 VGP_PUSHCC(VgpESPAdj);
466 ENSURE_MAPPABLE(a, "make_aligned_doubleword_noaccess");
467 sm = primary_map[a >> 16];
468 sm_off = a & 0xFFFF;
469 sm->abits[sm_off >> 3] = VGM_BYTE_INVALID;
470 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = VGM_WORD_INVALID;
471 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = VGM_WORD_INVALID;
472 VGP_POPCC(VgpESPAdj);
473}
474
475/* The %esp update handling functions */
476ESP_UPDATE_HANDLERS ( make_aligned_word_writable,
477 make_aligned_word_noaccess,
478 make_aligned_doubleword_writable,
479 make_aligned_doubleword_noaccess,
480 MC_(make_writable),
481 MC_(make_noaccess)
482 );
483
484/* Block-copy permissions (needed for implementing realloc()). */
njn5c004e42002-11-18 11:04:50 +0000485static void mc_copy_address_range_state ( Addr src, Addr dst, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000486{
487 UInt i;
488
njn5c004e42002-11-18 11:04:50 +0000489 DEBUG("mc_copy_address_range_state\n");
njn25e49d8e72002-09-23 09:36:25 +0000490
491 PROF_EVENT(40);
492 for (i = 0; i < len; i++) {
493 UChar abit = get_abit ( src+i );
494 UChar vbyte = get_vbyte ( src+i );
495 PROF_EVENT(41);
496 set_abit ( dst+i, abit );
497 set_vbyte ( dst+i, vbyte );
498 }
499}
500
501
502/* Check permissions for address range. If inadequate permissions
503 exist, *bad_addr is set to the offending address, so the caller can
504 know what it is. */
505
njn5c004e42002-11-18 11:04:50 +0000506Bool MC_(check_writable) ( Addr a, UInt len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000507{
508 UInt i;
509 UChar abit;
510 PROF_EVENT(42);
511 for (i = 0; i < len; i++) {
512 PROF_EVENT(43);
513 abit = get_abit(a);
514 if (abit == VGM_BIT_INVALID) {
515 if (bad_addr != NULL) *bad_addr = a;
516 return False;
517 }
518 a++;
519 }
520 return True;
521}
522
njn5c004e42002-11-18 11:04:50 +0000523Bool MC_(check_readable) ( Addr a, UInt len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000524{
525 UInt i;
526 UChar abit;
527 UChar vbyte;
528
529 PROF_EVENT(44);
njn5c004e42002-11-18 11:04:50 +0000530 DEBUG("MC_(check_readable)\n");
njn25e49d8e72002-09-23 09:36:25 +0000531 for (i = 0; i < len; i++) {
532 abit = get_abit(a);
533 vbyte = get_vbyte(a);
534 PROF_EVENT(45);
535 if (abit != VGM_BIT_VALID || vbyte != VGM_BYTE_VALID) {
536 if (bad_addr != NULL) *bad_addr = a;
537 return False;
538 }
539 a++;
540 }
541 return True;
542}
543
544
545/* Check a zero-terminated ascii string. Tricky -- don't want to
546 examine the actual bytes, to find the end, until we're sure it is
547 safe to do so. */
548
njn9b007f62003-04-07 14:40:25 +0000549static Bool mc_check_readable_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000550{
551 UChar abit;
552 UChar vbyte;
553 PROF_EVENT(46);
njn5c004e42002-11-18 11:04:50 +0000554 DEBUG("mc_check_readable_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +0000555 while (True) {
556 PROF_EVENT(47);
557 abit = get_abit(a);
558 vbyte = get_vbyte(a);
559 if (abit != VGM_BIT_VALID || vbyte != VGM_BYTE_VALID) {
560 if (bad_addr != NULL) *bad_addr = a;
561 return False;
562 }
563 /* Ok, a is safe to read. */
564 if (* ((UChar*)a) == 0) return True;
565 a++;
566 }
567}
568
569
570/*------------------------------------------------------------*/
571/*--- Memory event handlers ---*/
572/*------------------------------------------------------------*/
573
njn25e49d8e72002-09-23 09:36:25 +0000574static
njn5c004e42002-11-18 11:04:50 +0000575void mc_check_is_writable ( CorePart part, ThreadState* tst,
576 Char* s, Addr base, UInt size )
njn25e49d8e72002-09-23 09:36:25 +0000577{
578 Bool ok;
579 Addr bad_addr;
580
581 VGP_PUSHCC(VgpCheckMem);
582
583 /* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
584 base,base+size-1); */
njn5c004e42002-11-18 11:04:50 +0000585 ok = MC_(check_writable) ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000586 if (!ok) {
587 switch (part) {
588 case Vg_CoreSysCall:
njn43c799e2003-04-08 00:08:52 +0000589 MAC_(record_param_error) ( tst, bad_addr, /*isWrite =*/True, s );
njn25e49d8e72002-09-23 09:36:25 +0000590 break;
591
592 case Vg_CorePThread:
593 case Vg_CoreSignal:
njn43c799e2003-04-08 00:08:52 +0000594 MAC_(record_core_mem_error)( tst, /*isWrite=*/True, s );
njn25e49d8e72002-09-23 09:36:25 +0000595 break;
596
597 default:
njn5c004e42002-11-18 11:04:50 +0000598 VG_(skin_panic)("mc_check_is_writable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000599 }
600 }
601
602 VGP_POPCC(VgpCheckMem);
603}
604
605static
njn5c004e42002-11-18 11:04:50 +0000606void mc_check_is_readable ( CorePart part, ThreadState* tst,
607 Char* s, Addr base, UInt size )
njn25e49d8e72002-09-23 09:36:25 +0000608{
609 Bool ok;
610 Addr bad_addr;
611
612 VGP_PUSHCC(VgpCheckMem);
613
614 /* VG_(message)(Vg_DebugMsg,"check is readable: %x .. %x",
615 base,base+size-1); */
njn5c004e42002-11-18 11:04:50 +0000616 ok = MC_(check_readable) ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000617 if (!ok) {
618 switch (part) {
619 case Vg_CoreSysCall:
njn43c799e2003-04-08 00:08:52 +0000620 MAC_(record_param_error) ( tst, bad_addr, /*isWrite =*/False, s );
njn25e49d8e72002-09-23 09:36:25 +0000621 break;
622
623 case Vg_CorePThread:
njn43c799e2003-04-08 00:08:52 +0000624 MAC_(record_core_mem_error)( tst, /*isWrite=*/False, s );
njn25e49d8e72002-09-23 09:36:25 +0000625 break;
626
627 /* If we're being asked to jump to a silly address, record an error
628 message before potentially crashing the entire system. */
629 case Vg_CoreTranslate:
njn43c799e2003-04-08 00:08:52 +0000630 MAC_(record_jump_error)( tst, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000631 break;
632
633 default:
njn5c004e42002-11-18 11:04:50 +0000634 VG_(skin_panic)("mc_check_is_readable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000635 }
636 }
637 VGP_POPCC(VgpCheckMem);
638}
639
640static
njn5c004e42002-11-18 11:04:50 +0000641void mc_check_is_readable_asciiz ( CorePart part, ThreadState* tst,
642 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +0000643{
644 Bool ok = True;
645 Addr bad_addr;
646 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
647
648 VGP_PUSHCC(VgpCheckMem);
649
njne427a662002-10-02 11:08:25 +0000650 sk_assert(part == Vg_CoreSysCall);
njn5c004e42002-11-18 11:04:50 +0000651 ok = mc_check_readable_asciiz ( (Addr)str, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000652 if (!ok) {
njn43c799e2003-04-08 00:08:52 +0000653 MAC_(record_param_error) ( tst, bad_addr, /*is_writable =*/False, s );
njn25e49d8e72002-09-23 09:36:25 +0000654 }
655
656 VGP_POPCC(VgpCheckMem);
657}
658
659
660static
njn5c004e42002-11-18 11:04:50 +0000661void mc_new_mem_startup( Addr a, UInt len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +0000662{
njn1f3a9092002-10-04 09:22:30 +0000663 /* Ignore the permissions, just make it readable. Seems to work... */
njn5c004e42002-11-18 11:04:50 +0000664 DEBUG("mc_new_mem_startup(%p, %u, rr=%u, ww=%u, xx=%u)\n", a,len,rr,ww,xx);
665 MC_(make_readable)(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000666}
667
668static
njn5c004e42002-11-18 11:04:50 +0000669void mc_new_mem_heap ( Addr a, UInt len, Bool is_inited )
njn25e49d8e72002-09-23 09:36:25 +0000670{
671 if (is_inited) {
njn5c004e42002-11-18 11:04:50 +0000672 MC_(make_readable)(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000673 } else {
njn5c004e42002-11-18 11:04:50 +0000674 MC_(make_writable)(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000675 }
676}
677
678static
njn5c004e42002-11-18 11:04:50 +0000679void mc_set_perms (Addr a, UInt len, Bool rr, Bool ww, Bool xx)
njn25e49d8e72002-09-23 09:36:25 +0000680{
njn5c004e42002-11-18 11:04:50 +0000681 DEBUG("mc_set_perms(%p, %u, rr=%u ww=%u, xx=%u)\n", a, len, rr, ww, xx);
682 if (rr) MC_(make_readable)(a, len);
683 else if (ww) MC_(make_writable)(a, len);
684 else MC_(make_noaccess)(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000685}
686
687
688/*------------------------------------------------------------*/
689/*--- Functions called directly from generated code. ---*/
690/*------------------------------------------------------------*/
691
692static __inline__ UInt rotateRight16 ( UInt x )
693{
694 /* Amazingly, gcc turns this into a single rotate insn. */
695 return (x >> 16) | (x << 16);
696}
697
698
699static __inline__ UInt shiftRight16 ( UInt x )
700{
701 return x >> 16;
702}
703
704
705/* Read/write 1/2/4 sized V bytes, and emit an address error if
706 needed. */
707
708/* VG_(helperc_{LD,ST}V{1,2,4}) handle the common case fast.
709 Under all other circumstances, it defers to the relevant _SLOWLY
710 function, which can handle all situations.
711*/
712__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000713UInt MC_(helperc_LOADV4) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000714{
715# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000716 return mc_rd_V4_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000717# else
718 UInt sec_no = rotateRight16(a) & 0x3FFFF;
719 SecMap* sm = primary_map[sec_no];
720 UInt a_off = (a & 0xFFFF) >> 3;
721 UChar abits = sm->abits[a_off];
722 abits >>= (a & 4);
723 abits &= 15;
724 PROF_EVENT(60);
725 if (abits == VGM_NIBBLE_VALID) {
726 /* Handle common case quickly: a is suitably aligned, is mapped,
727 and is addressible. */
728 UInt v_off = a & 0xFFFF;
729 return ((UInt*)(sm->vbyte))[ v_off >> 2 ];
730 } else {
731 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000732 return mc_rd_V4_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000733 }
734# endif
735}
736
737__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +0000738void MC_(helperc_STOREV4) ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000739{
740# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000741 mc_wr_V4_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000742# else
743 UInt sec_no = rotateRight16(a) & 0x3FFFF;
744 SecMap* sm = primary_map[sec_no];
745 UInt a_off = (a & 0xFFFF) >> 3;
746 UChar abits = sm->abits[a_off];
747 abits >>= (a & 4);
748 abits &= 15;
749 PROF_EVENT(61);
750 if (abits == VGM_NIBBLE_VALID) {
751 /* Handle common case quickly: a is suitably aligned, is mapped,
752 and is addressible. */
753 UInt v_off = a & 0xFFFF;
754 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = vbytes;
755 } else {
756 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000757 mc_wr_V4_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000758 }
759# endif
760}
761
762__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000763UInt MC_(helperc_LOADV2) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000764{
765# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000766 return mc_rd_V2_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000767# else
768 UInt sec_no = rotateRight16(a) & 0x1FFFF;
769 SecMap* sm = primary_map[sec_no];
770 UInt a_off = (a & 0xFFFF) >> 3;
771 PROF_EVENT(62);
772 if (sm->abits[a_off] == VGM_BYTE_VALID) {
773 /* Handle common case quickly. */
774 UInt v_off = a & 0xFFFF;
775 return 0xFFFF0000
776 |
777 (UInt)( ((UShort*)(sm->vbyte))[ v_off >> 1 ] );
778 } else {
779 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000780 return mc_rd_V2_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000781 }
782# endif
783}
784
785__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +0000786void MC_(helperc_STOREV2) ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000787{
788# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000789 mc_wr_V2_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000790# else
791 UInt sec_no = rotateRight16(a) & 0x1FFFF;
792 SecMap* sm = primary_map[sec_no];
793 UInt a_off = (a & 0xFFFF) >> 3;
794 PROF_EVENT(63);
795 if (sm->abits[a_off] == VGM_BYTE_VALID) {
796 /* Handle common case quickly. */
797 UInt v_off = a & 0xFFFF;
798 ((UShort*)(sm->vbyte))[ v_off >> 1 ] = vbytes & 0x0000FFFF;
799 } else {
800 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000801 mc_wr_V2_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000802 }
803# endif
804}
805
806__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000807UInt MC_(helperc_LOADV1) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000808{
809# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000810 return mc_rd_V1_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000811# else
812 UInt sec_no = shiftRight16(a);
813 SecMap* sm = primary_map[sec_no];
814 UInt a_off = (a & 0xFFFF) >> 3;
815 PROF_EVENT(64);
816 if (sm->abits[a_off] == VGM_BYTE_VALID) {
817 /* Handle common case quickly. */
818 UInt v_off = a & 0xFFFF;
819 return 0xFFFFFF00
820 |
821 (UInt)( ((UChar*)(sm->vbyte))[ v_off ] );
822 } else {
823 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000824 return mc_rd_V1_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000825 }
826# endif
827}
828
829__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +0000830void MC_(helperc_STOREV1) ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000831{
832# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000833 mc_wr_V1_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000834# else
835 UInt sec_no = shiftRight16(a);
836 SecMap* sm = primary_map[sec_no];
837 UInt a_off = (a & 0xFFFF) >> 3;
838 PROF_EVENT(65);
839 if (sm->abits[a_off] == VGM_BYTE_VALID) {
840 /* Handle common case quickly. */
841 UInt v_off = a & 0xFFFF;
842 ((UChar*)(sm->vbyte))[ v_off ] = vbytes & 0x000000FF;
843 } else {
844 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000845 mc_wr_V1_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000846 }
847# endif
848}
849
850
851/*------------------------------------------------------------*/
852/*--- Fallback functions to handle cases that the above ---*/
853/*--- VG_(helperc_{LD,ST}V{1,2,4}) can't manage. ---*/
854/*------------------------------------------------------------*/
855
njn5c004e42002-11-18 11:04:50 +0000856static UInt mc_rd_V4_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000857{
858 Bool a0ok, a1ok, a2ok, a3ok;
859 UInt vb0, vb1, vb2, vb3;
860
861 PROF_EVENT(70);
862
863 /* First establish independently the addressibility of the 4 bytes
864 involved. */
865 a0ok = get_abit(a+0) == VGM_BIT_VALID;
866 a1ok = get_abit(a+1) == VGM_BIT_VALID;
867 a2ok = get_abit(a+2) == VGM_BIT_VALID;
868 a3ok = get_abit(a+3) == VGM_BIT_VALID;
869
870 /* Also get the validity bytes for the address. */
871 vb0 = (UInt)get_vbyte(a+0);
872 vb1 = (UInt)get_vbyte(a+1);
873 vb2 = (UInt)get_vbyte(a+2);
874 vb3 = (UInt)get_vbyte(a+3);
875
876 /* Now distinguish 3 cases */
877
878 /* Case 1: the address is completely valid, so:
879 - no addressing error
880 - return V bytes as read from memory
881 */
882 if (a0ok && a1ok && a2ok && a3ok) {
883 UInt vw = VGM_WORD_INVALID;
884 vw <<= 8; vw |= vb3;
885 vw <<= 8; vw |= vb2;
886 vw <<= 8; vw |= vb1;
887 vw <<= 8; vw |= vb0;
888 return vw;
889 }
890
891 /* Case 2: the address is completely invalid.
892 - emit addressing error
893 - return V word indicating validity.
894 This sounds strange, but if we make loads from invalid addresses
895 give invalid data, we also risk producing a number of confusing
896 undefined-value errors later, which confuses the fact that the
897 error arose in the first place from an invalid address.
898 */
899 /* VG_(printf)("%p (%d %d %d %d)\n", a, a0ok, a1ok, a2ok, a3ok); */
njn43c799e2003-04-08 00:08:52 +0000900 if (!MAC_(clo_partial_loads_ok)
njn25e49d8e72002-09-23 09:36:25 +0000901 || ((a & 3) != 0)
902 || (!a0ok && !a1ok && !a2ok && !a3ok)) {
njn43c799e2003-04-08 00:08:52 +0000903 MAC_(record_address_error)( a, 4, False );
njn25e49d8e72002-09-23 09:36:25 +0000904 return (VGM_BYTE_VALID << 24) | (VGM_BYTE_VALID << 16)
905 | (VGM_BYTE_VALID << 8) | VGM_BYTE_VALID;
906 }
907
908 /* Case 3: the address is partially valid.
909 - no addressing error
910 - returned V word is invalid where the address is invalid,
911 and contains V bytes from memory otherwise.
njn5c004e42002-11-18 11:04:50 +0000912 Case 3 is only allowed if MC_(clo_partial_loads_ok) is True
njn25e49d8e72002-09-23 09:36:25 +0000913 (which is the default), and the address is 4-aligned.
914 If not, Case 2 will have applied.
915 */
njn43c799e2003-04-08 00:08:52 +0000916 sk_assert(MAC_(clo_partial_loads_ok));
njn25e49d8e72002-09-23 09:36:25 +0000917 {
918 UInt vw = VGM_WORD_INVALID;
919 vw <<= 8; vw |= (a3ok ? vb3 : VGM_BYTE_INVALID);
920 vw <<= 8; vw |= (a2ok ? vb2 : VGM_BYTE_INVALID);
921 vw <<= 8; vw |= (a1ok ? vb1 : VGM_BYTE_INVALID);
922 vw <<= 8; vw |= (a0ok ? vb0 : VGM_BYTE_INVALID);
923 return vw;
924 }
925}
926
njn5c004e42002-11-18 11:04:50 +0000927static void mc_wr_V4_SLOWLY ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000928{
929 /* Check the address for validity. */
930 Bool aerr = False;
931 PROF_EVENT(71);
932
933 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
934 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
935 if (get_abit(a+2) != VGM_BIT_VALID) aerr = True;
936 if (get_abit(a+3) != VGM_BIT_VALID) aerr = True;
937
938 /* Store the V bytes, remembering to do it little-endian-ly. */
939 set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
940 set_vbyte( a+1, vbytes & 0x000000FF ); vbytes >>= 8;
941 set_vbyte( a+2, vbytes & 0x000000FF ); vbytes >>= 8;
942 set_vbyte( a+3, vbytes & 0x000000FF );
943
944 /* If an address error has happened, report it. */
945 if (aerr)
njn43c799e2003-04-08 00:08:52 +0000946 MAC_(record_address_error)( a, 4, True );
njn25e49d8e72002-09-23 09:36:25 +0000947}
948
njn5c004e42002-11-18 11:04:50 +0000949static UInt mc_rd_V2_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000950{
951 /* Check the address for validity. */
952 UInt vw = VGM_WORD_INVALID;
953 Bool aerr = False;
954 PROF_EVENT(72);
955
956 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
957 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
958
959 /* Fetch the V bytes, remembering to do it little-endian-ly. */
960 vw <<= 8; vw |= (UInt)get_vbyte(a+1);
961 vw <<= 8; vw |= (UInt)get_vbyte(a+0);
962
963 /* If an address error has happened, report it. */
964 if (aerr) {
njn43c799e2003-04-08 00:08:52 +0000965 MAC_(record_address_error)( a, 2, False );
njn25e49d8e72002-09-23 09:36:25 +0000966 vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
967 | (VGM_BYTE_VALID << 8) | (VGM_BYTE_VALID);
968 }
969 return vw;
970}
971
njn5c004e42002-11-18 11:04:50 +0000972static void mc_wr_V2_SLOWLY ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000973{
974 /* Check the address for validity. */
975 Bool aerr = False;
976 PROF_EVENT(73);
977
978 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
979 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
980
981 /* Store the V bytes, remembering to do it little-endian-ly. */
982 set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
983 set_vbyte( a+1, vbytes & 0x000000FF );
984
985 /* If an address error has happened, report it. */
986 if (aerr)
njn43c799e2003-04-08 00:08:52 +0000987 MAC_(record_address_error)( a, 2, True );
njn25e49d8e72002-09-23 09:36:25 +0000988}
989
njn5c004e42002-11-18 11:04:50 +0000990static UInt mc_rd_V1_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000991{
992 /* Check the address for validity. */
993 UInt vw = VGM_WORD_INVALID;
994 Bool aerr = False;
995 PROF_EVENT(74);
996
997 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
998
999 /* Fetch the V byte. */
1000 vw <<= 8; vw |= (UInt)get_vbyte(a+0);
1001
1002 /* If an address error has happened, report it. */
1003 if (aerr) {
njn43c799e2003-04-08 00:08:52 +00001004 MAC_(record_address_error)( a, 1, False );
njn25e49d8e72002-09-23 09:36:25 +00001005 vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
1006 | (VGM_BYTE_INVALID << 8) | (VGM_BYTE_VALID);
1007 }
1008 return vw;
1009}
1010
njn5c004e42002-11-18 11:04:50 +00001011static void mc_wr_V1_SLOWLY ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001012{
1013 /* Check the address for validity. */
1014 Bool aerr = False;
1015 PROF_EVENT(75);
1016 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1017
1018 /* Store the V bytes, remembering to do it little-endian-ly. */
1019 set_vbyte( a+0, vbytes & 0x000000FF );
1020
1021 /* If an address error has happened, report it. */
1022 if (aerr)
njn43c799e2003-04-08 00:08:52 +00001023 MAC_(record_address_error)( a, 1, True );
njn25e49d8e72002-09-23 09:36:25 +00001024}
1025
1026
1027/* ---------------------------------------------------------------------
1028 Called from generated code, or from the assembly helpers.
1029 Handlers for value check failures.
1030 ------------------------------------------------------------------ */
1031
njn5c004e42002-11-18 11:04:50 +00001032void MC_(helperc_value_check0_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001033{
njn5c004e42002-11-18 11:04:50 +00001034 MC_(record_value_error) ( 0 );
njn25e49d8e72002-09-23 09:36:25 +00001035}
1036
njn5c004e42002-11-18 11:04:50 +00001037void MC_(helperc_value_check1_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001038{
njn5c004e42002-11-18 11:04:50 +00001039 MC_(record_value_error) ( 1 );
njn25e49d8e72002-09-23 09:36:25 +00001040}
1041
njn5c004e42002-11-18 11:04:50 +00001042void MC_(helperc_value_check2_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001043{
njn5c004e42002-11-18 11:04:50 +00001044 MC_(record_value_error) ( 2 );
njn25e49d8e72002-09-23 09:36:25 +00001045}
1046
njn5c004e42002-11-18 11:04:50 +00001047void MC_(helperc_value_check4_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001048{
njn5c004e42002-11-18 11:04:50 +00001049 MC_(record_value_error) ( 4 );
njn25e49d8e72002-09-23 09:36:25 +00001050}
1051
1052
1053/* ---------------------------------------------------------------------
1054 FPU load and store checks, called from generated code.
1055 ------------------------------------------------------------------ */
1056
1057__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +00001058void MC_(fpu_read_check) ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001059{
1060 /* Ensure the read area is both addressible and valid (ie,
1061 readable). If there's an address error, don't report a value
1062 error too; but if there isn't an address error, check for a
1063 value error.
1064
1065 Try to be reasonably fast on the common case; wimp out and defer
njn5c004e42002-11-18 11:04:50 +00001066 to mc_fpu_read_check_SLOWLY for everything else. */
njn25e49d8e72002-09-23 09:36:25 +00001067
1068 SecMap* sm;
1069 UInt sm_off, v_off, a_off;
1070 Addr addr4;
1071
1072 PROF_EVENT(80);
1073
1074# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +00001075 mc_fpu_read_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001076# else
1077
1078 if (size == 4) {
1079 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
1080 PROF_EVENT(81);
1081 /* Properly aligned. */
1082 sm = primary_map[addr >> 16];
1083 sm_off = addr & 0xFFFF;
1084 a_off = sm_off >> 3;
1085 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
1086 /* Properly aligned and addressible. */
1087 v_off = addr & 0xFFFF;
1088 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1089 goto slow4;
1090 /* Properly aligned, addressible and with valid data. */
1091 return;
1092 slow4:
njn5c004e42002-11-18 11:04:50 +00001093 mc_fpu_read_check_SLOWLY ( addr, 4 );
njn25e49d8e72002-09-23 09:36:25 +00001094 return;
1095 }
1096
1097 if (size == 8) {
1098 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
1099 PROF_EVENT(82);
1100 /* Properly aligned. Do it in two halves. */
1101 addr4 = addr + 4;
1102 /* First half. */
1103 sm = primary_map[addr >> 16];
1104 sm_off = addr & 0xFFFF;
1105 a_off = sm_off >> 3;
1106 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1107 /* First half properly aligned and addressible. */
1108 v_off = addr & 0xFFFF;
1109 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1110 goto slow8;
1111 /* Second half. */
1112 sm = primary_map[addr4 >> 16];
1113 sm_off = addr4 & 0xFFFF;
1114 a_off = sm_off >> 3;
1115 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1116 /* Second half properly aligned and addressible. */
1117 v_off = addr4 & 0xFFFF;
1118 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1119 goto slow8;
1120 /* Both halves properly aligned, addressible and with valid
1121 data. */
1122 return;
1123 slow8:
njn5c004e42002-11-18 11:04:50 +00001124 mc_fpu_read_check_SLOWLY ( addr, 8 );
njn25e49d8e72002-09-23 09:36:25 +00001125 return;
1126 }
1127
1128 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
1129 cases go quickly. */
1130 if (size == 2) {
1131 PROF_EVENT(83);
njn5c004e42002-11-18 11:04:50 +00001132 mc_fpu_read_check_SLOWLY ( addr, 2 );
njn25e49d8e72002-09-23 09:36:25 +00001133 return;
1134 }
1135
njn5c004e42002-11-18 11:04:50 +00001136 if (size == 10 || size == 28 || size == 108) {
njn25e49d8e72002-09-23 09:36:25 +00001137 PROF_EVENT(84);
njn5c004e42002-11-18 11:04:50 +00001138 mc_fpu_read_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001139 return;
1140 }
1141
1142 VG_(printf)("size is %d\n", size);
njn5c004e42002-11-18 11:04:50 +00001143 VG_(skin_panic)("MC_(fpu_read_check): unhandled size");
njn25e49d8e72002-09-23 09:36:25 +00001144# endif
1145}
1146
1147
1148__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +00001149void MC_(fpu_write_check) ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001150{
1151 /* Ensure the written area is addressible, and moan if otherwise.
1152 If it is addressible, make it valid, otherwise invalid.
1153 */
1154
1155 SecMap* sm;
1156 UInt sm_off, v_off, a_off;
1157 Addr addr4;
1158
1159 PROF_EVENT(85);
1160
1161# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +00001162 mc_fpu_write_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001163# else
1164
1165 if (size == 4) {
1166 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
1167 PROF_EVENT(86);
1168 /* Properly aligned. */
1169 sm = primary_map[addr >> 16];
1170 sm_off = addr & 0xFFFF;
1171 a_off = sm_off >> 3;
1172 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
1173 /* Properly aligned and addressible. Make valid. */
1174 v_off = addr & 0xFFFF;
1175 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1176 return;
1177 slow4:
njn5c004e42002-11-18 11:04:50 +00001178 mc_fpu_write_check_SLOWLY ( addr, 4 );
njn25e49d8e72002-09-23 09:36:25 +00001179 return;
1180 }
1181
1182 if (size == 8) {
1183 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
1184 PROF_EVENT(87);
1185 /* Properly aligned. Do it in two halves. */
1186 addr4 = addr + 4;
1187 /* First half. */
1188 sm = primary_map[addr >> 16];
1189 sm_off = addr & 0xFFFF;
1190 a_off = sm_off >> 3;
1191 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1192 /* First half properly aligned and addressible. Make valid. */
1193 v_off = addr & 0xFFFF;
1194 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1195 /* Second half. */
1196 sm = primary_map[addr4 >> 16];
1197 sm_off = addr4 & 0xFFFF;
1198 a_off = sm_off >> 3;
1199 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1200 /* Second half properly aligned and addressible. */
1201 v_off = addr4 & 0xFFFF;
1202 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1203 /* Properly aligned, addressible and with valid data. */
1204 return;
1205 slow8:
njn5c004e42002-11-18 11:04:50 +00001206 mc_fpu_write_check_SLOWLY ( addr, 8 );
njn25e49d8e72002-09-23 09:36:25 +00001207 return;
1208 }
1209
1210 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
1211 cases go quickly. */
1212 if (size == 2) {
1213 PROF_EVENT(88);
njn5c004e42002-11-18 11:04:50 +00001214 mc_fpu_write_check_SLOWLY ( addr, 2 );
njn25e49d8e72002-09-23 09:36:25 +00001215 return;
1216 }
1217
njn5c004e42002-11-18 11:04:50 +00001218 if (size == 10 || size == 28 || size == 108) {
njn25e49d8e72002-09-23 09:36:25 +00001219 PROF_EVENT(89);
njn5c004e42002-11-18 11:04:50 +00001220 mc_fpu_write_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001221 return;
1222 }
1223
1224 VG_(printf)("size is %d\n", size);
njn5c004e42002-11-18 11:04:50 +00001225 VG_(skin_panic)("MC_(fpu_write_check): unhandled size");
njn25e49d8e72002-09-23 09:36:25 +00001226# endif
1227}
1228
1229
1230/* ---------------------------------------------------------------------
1231 Slow, general cases for FPU load and store checks.
1232 ------------------------------------------------------------------ */
1233
1234/* Generic version. Test for both addr and value errors, but if
1235 there's an addr error, don't report a value error even if it
1236 exists. */
1237
njn5c004e42002-11-18 11:04:50 +00001238void mc_fpu_read_check_SLOWLY ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001239{
1240 Int i;
1241 Bool aerr = False;
1242 Bool verr = False;
1243 PROF_EVENT(90);
1244 for (i = 0; i < size; i++) {
1245 PROF_EVENT(91);
1246 if (get_abit(addr+i) != VGM_BIT_VALID)
1247 aerr = True;
1248 if (get_vbyte(addr+i) != VGM_BYTE_VALID)
1249 verr = True;
1250 }
1251
1252 if (aerr) {
njn43c799e2003-04-08 00:08:52 +00001253 MAC_(record_address_error)( addr, size, False );
njn25e49d8e72002-09-23 09:36:25 +00001254 } else {
1255 if (verr)
njn5c004e42002-11-18 11:04:50 +00001256 MC_(record_value_error)( size );
njn25e49d8e72002-09-23 09:36:25 +00001257 }
1258}
1259
1260
1261/* Generic version. Test for addr errors. Valid addresses are
1262 given valid values, and invalid addresses invalid values. */
1263
njn5c004e42002-11-18 11:04:50 +00001264void mc_fpu_write_check_SLOWLY ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001265{
1266 Int i;
1267 Addr a_here;
1268 Bool a_ok;
1269 Bool aerr = False;
1270 PROF_EVENT(92);
1271 for (i = 0; i < size; i++) {
1272 PROF_EVENT(93);
1273 a_here = addr+i;
1274 a_ok = get_abit(a_here) == VGM_BIT_VALID;
1275 if (a_ok) {
1276 set_vbyte(a_here, VGM_BYTE_VALID);
1277 } else {
1278 set_vbyte(a_here, VGM_BYTE_INVALID);
1279 aerr = True;
1280 }
1281 }
1282 if (aerr) {
njn43c799e2003-04-08 00:08:52 +00001283 MAC_(record_address_error)( addr, size, True );
njn25e49d8e72002-09-23 09:36:25 +00001284 }
1285}
1286
njn25e49d8e72002-09-23 09:36:25 +00001287
1288/*------------------------------------------------------------*/
1289/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1290/*------------------------------------------------------------*/
1291
sewardja4495682002-10-21 07:29:59 +00001292/* For the memory leak detector, say whether an entire 64k chunk of
1293 address space is possibly in use, or not. If in doubt return
1294 True.
njn25e49d8e72002-09-23 09:36:25 +00001295*/
sewardja4495682002-10-21 07:29:59 +00001296static
1297Bool mc_is_valid_64k_chunk ( UInt chunk_number )
njn25e49d8e72002-09-23 09:36:25 +00001298{
sewardja4495682002-10-21 07:29:59 +00001299 sk_assert(chunk_number >= 0 && chunk_number < 65536);
1300 if (IS_DISTINGUISHED_SM(primary_map[chunk_number])) {
1301 /* Definitely not in use. */
1302 return False;
1303 } else {
1304 return True;
njn25e49d8e72002-09-23 09:36:25 +00001305 }
1306}
1307
1308
sewardja4495682002-10-21 07:29:59 +00001309/* For the memory leak detector, say whether or not a given word
1310 address is to be regarded as valid. */
1311static
1312Bool mc_is_valid_address ( Addr a )
1313{
1314 UInt vbytes;
1315 UChar abits;
1316 sk_assert(IS_ALIGNED4_ADDR(a));
1317 abits = get_abits4_ALIGNED(a);
1318 vbytes = get_vbytes4_ALIGNED(a);
1319 if (abits == VGM_NIBBLE_VALID && vbytes == VGM_WORD_VALID) {
1320 return True;
1321 } else {
1322 return False;
1323 }
1324}
1325
1326
1327/* Leak detector for this skin. We don't actually do anything, merely
1328 run the generic leak detector with suitable parameters for this
1329 skin. */
njn5c004e42002-11-18 11:04:50 +00001330void MC_(detect_memory_leaks) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001331{
njn43c799e2003-04-08 00:08:52 +00001332 MAC_(do_detect_memory_leaks) ( mc_is_valid_64k_chunk, mc_is_valid_address );
njn25e49d8e72002-09-23 09:36:25 +00001333}
1334
1335
1336/* ---------------------------------------------------------------------
1337 Sanity check machinery (permanently engaged).
1338 ------------------------------------------------------------------ */
1339
1340/* Check that nobody has spuriously claimed that the first or last 16
1341 pages (64 KB) of address space have become accessible. Failure of
1342 the following do not per se indicate an internal consistency
1343 problem, but they are so likely to that we really want to know
1344 about it if so. */
1345
1346Bool SK_(cheap_sanity_check) ( void )
1347{
sewardjd5815ec2003-04-06 12:23:27 +00001348 if (IS_DISTINGUISHED_SM(primary_map[0])
1349 /* kludge: kernel drops a page up at top of address range for
1350 magic "optimized syscalls", so we can no longer check the
1351 highest page */
1352 /* && IS_DISTINGUISHED_SM(primary_map[65535]) */
1353 )
njn25e49d8e72002-09-23 09:36:25 +00001354 return True;
1355 else
1356 return False;
1357}
1358
1359Bool SK_(expensive_sanity_check) ( void )
1360{
1361 Int i;
1362
1363 /* Make sure nobody changed the distinguished secondary. */
1364 for (i = 0; i < 8192; i++)
1365 if (distinguished_secondary_map.abits[i] != VGM_BYTE_INVALID)
1366 return False;
1367
1368 for (i = 0; i < 65536; i++)
1369 if (distinguished_secondary_map.vbyte[i] != VGM_BYTE_INVALID)
1370 return False;
1371
1372 /* Make sure that the upper 3/4 of the primary map hasn't
1373 been messed with. */
1374 for (i = 65536; i < 262144; i++)
1375 if (primary_map[i] != & distinguished_secondary_map)
1376 return False;
1377
1378 return True;
1379}
1380
1381/* ---------------------------------------------------------------------
1382 Debugging machinery (turn on to debug). Something of a mess.
1383 ------------------------------------------------------------------ */
1384
1385#if 0
1386/* Print the value tags on the 8 integer registers & flag reg. */
1387
1388static void uint_to_bits ( UInt x, Char* str )
1389{
1390 Int i;
1391 Int w = 0;
1392 /* str must point to a space of at least 36 bytes. */
1393 for (i = 31; i >= 0; i--) {
1394 str[w++] = (x & ( ((UInt)1) << i)) ? '1' : '0';
1395 if (i == 24 || i == 16 || i == 8)
1396 str[w++] = ' ';
1397 }
1398 str[w++] = 0;
njne427a662002-10-02 11:08:25 +00001399 sk_assert(w == 36);
njn25e49d8e72002-09-23 09:36:25 +00001400}
1401
1402/* Caution! Not vthread-safe; looks in VG_(baseBlock), not the thread
1403 state table. */
1404
1405static void vg_show_reg_tags ( void )
1406{
1407 Char buf1[36];
1408 Char buf2[36];
1409 UInt z_eax, z_ebx, z_ecx, z_edx,
1410 z_esi, z_edi, z_ebp, z_esp, z_eflags;
1411
1412 z_eax = VG_(baseBlock)[VGOFF_(sh_eax)];
1413 z_ebx = VG_(baseBlock)[VGOFF_(sh_ebx)];
1414 z_ecx = VG_(baseBlock)[VGOFF_(sh_ecx)];
1415 z_edx = VG_(baseBlock)[VGOFF_(sh_edx)];
1416 z_esi = VG_(baseBlock)[VGOFF_(sh_esi)];
1417 z_edi = VG_(baseBlock)[VGOFF_(sh_edi)];
1418 z_ebp = VG_(baseBlock)[VGOFF_(sh_ebp)];
1419 z_esp = VG_(baseBlock)[VGOFF_(sh_esp)];
1420 z_eflags = VG_(baseBlock)[VGOFF_(sh_eflags)];
1421
1422 uint_to_bits(z_eflags, buf1);
njn9b6d34e2002-10-15 08:48:08 +00001423 VG_(message)(Vg_DebugMsg, "efl %s\n", buf1);
njn25e49d8e72002-09-23 09:36:25 +00001424
1425 uint_to_bits(z_eax, buf1);
1426 uint_to_bits(z_ebx, buf2);
1427 VG_(message)(Vg_DebugMsg, "eax %s ebx %s\n", buf1, buf2);
1428
1429 uint_to_bits(z_ecx, buf1);
1430 uint_to_bits(z_edx, buf2);
1431 VG_(message)(Vg_DebugMsg, "ecx %s edx %s\n", buf1, buf2);
1432
1433 uint_to_bits(z_esi, buf1);
1434 uint_to_bits(z_edi, buf2);
1435 VG_(message)(Vg_DebugMsg, "esi %s edi %s\n", buf1, buf2);
1436
1437 uint_to_bits(z_ebp, buf1);
1438 uint_to_bits(z_esp, buf2);
1439 VG_(message)(Vg_DebugMsg, "ebp %s esp %s\n", buf1, buf2);
1440}
1441
1442
1443/* For debugging only. Scan the address space and touch all allegedly
1444 addressible words. Useful for establishing where Valgrind's idea of
1445 addressibility has diverged from what the kernel believes. */
1446
1447static
1448void zzzmemscan_notify_word ( Addr a, UInt w )
1449{
1450}
1451
1452void zzzmemscan ( void )
1453{
1454 Int n_notifies
1455 = VG_(scan_all_valid_memory)( zzzmemscan_notify_word );
1456 VG_(printf)("zzzmemscan: n_bytes = %d\n", 4 * n_notifies );
1457}
1458#endif
1459
1460
1461
1462
1463#if 0
1464static Int zzz = 0;
1465
1466void show_bb ( Addr eip_next )
1467{
1468 VG_(printf)("[%4d] ", zzz);
1469 vg_show_reg_tags( &VG_(m_shadow );
1470 VG_(translate) ( eip_next, NULL, NULL, NULL );
1471}
1472#endif /* 0 */
1473
njn25e49d8e72002-09-23 09:36:25 +00001474
1475/*------------------------------------------------------------*/
1476/*--- Setup ---*/
1477/*------------------------------------------------------------*/
1478
1479void SK_(written_shadow_regs_values)( UInt* gen_reg_value, UInt* eflags_value )
1480{
1481 *gen_reg_value = VGM_WORD_VALID;
1482 *eflags_value = VGM_EFLAGS_VALID;
1483}
1484
njn43c799e2003-04-08 00:08:52 +00001485Bool MC_(clo_avoid_strlen_errors) = True;
1486Bool MC_(clo_cleanup) = True;
1487
njn25e49d8e72002-09-23 09:36:25 +00001488Bool SK_(process_cmd_line_option)(Char* arg)
1489{
njn43c799e2003-04-08 00:08:52 +00001490 if (VG_CLO_STREQ(arg, "--avoid-strlen-errors=yes"))
njn5c004e42002-11-18 11:04:50 +00001491 MC_(clo_avoid_strlen_errors) = True;
njn43c799e2003-04-08 00:08:52 +00001492 else if (VG_CLO_STREQ(arg, "--avoid-strlen-errors=no"))
njn5c004e42002-11-18 11:04:50 +00001493 MC_(clo_avoid_strlen_errors) = False;
sewardj8ec2cfc2002-10-13 00:57:26 +00001494
njn43c799e2003-04-08 00:08:52 +00001495 else if (VG_CLO_STREQ(arg, "--cleanup=yes"))
1496 MC_(clo_cleanup) = True;
1497 else if (VG_CLO_STREQ(arg, "--cleanup=no"))
1498 MC_(clo_cleanup) = False;
1499
njn25e49d8e72002-09-23 09:36:25 +00001500 else
njn43c799e2003-04-08 00:08:52 +00001501 return MAC_(process_common_cmd_line_option)(arg);
njn25e49d8e72002-09-23 09:36:25 +00001502
1503 return True;
njn25e49d8e72002-09-23 09:36:25 +00001504}
1505
njn3e884182003-04-15 13:03:23 +00001506void SK_(print_usage)(void)
njn25e49d8e72002-09-23 09:36:25 +00001507{
njn3e884182003-04-15 13:03:23 +00001508 MAC_(print_common_usage)();
1509 VG_(printf)(
1510" --avoid-strlen-errors=no|yes suppress errs from inlined strlen [yes]\n"
1511 );
1512}
1513
1514void SK_(print_debug_usage)(void)
1515{
1516 MAC_(print_common_debug_usage)();
1517 VG_(printf)(
sewardj8ec2cfc2002-10-13 00:57:26 +00001518" --cleanup=no|yes improve after instrumentation? [yes]\n"
njn3e884182003-04-15 13:03:23 +00001519 );
njn25e49d8e72002-09-23 09:36:25 +00001520}
1521
1522
1523/*------------------------------------------------------------*/
1524/*--- Setup ---*/
1525/*------------------------------------------------------------*/
1526
njn810086f2002-11-14 12:42:47 +00001527void SK_(pre_clo_init)(void)
njn25e49d8e72002-09-23 09:36:25 +00001528{
njn810086f2002-11-14 12:42:47 +00001529 VG_(details_name) ("Memcheck");
1530 VG_(details_version) (NULL);
1531 VG_(details_description) ("a.k.a. Valgrind, a memory error detector");
1532 VG_(details_copyright_author)(
njn0e1b5142003-04-15 14:58:06 +00001533 "Copyright (C) 2002-2003, and GNU GPL'd, by Julian Seward.");
njn810086f2002-11-14 12:42:47 +00001534 VG_(details_bug_reports_to) ("jseward@acm.org");
sewardj78210aa2002-12-01 02:55:46 +00001535 VG_(details_avg_translation_sizeB) ( 228 );
njn25e49d8e72002-09-23 09:36:25 +00001536
njn810086f2002-11-14 12:42:47 +00001537 VG_(needs_core_errors) ();
1538 VG_(needs_skin_errors) ();
1539 VG_(needs_libc_freeres) ();
njn810086f2002-11-14 12:42:47 +00001540 VG_(needs_shadow_regs) ();
1541 VG_(needs_command_line_options)();
1542 VG_(needs_client_requests) ();
1543 VG_(needs_extended_UCode) ();
1544 VG_(needs_syscall_wrapper) ();
njn810086f2002-11-14 12:42:47 +00001545 VG_(needs_sanity_checks) ();
njn25e49d8e72002-09-23 09:36:25 +00001546
njn3e884182003-04-15 13:03:23 +00001547 MAC_( new_mem_heap) = & mc_new_mem_heap;
1548 MAC_( ban_mem_heap) = & MC_(make_noaccess);
1549 MAC_(copy_mem_heap) = & mc_copy_address_range_state;
1550 MAC_( die_mem_heap) = & MC_(make_noaccess);
1551
njn5c004e42002-11-18 11:04:50 +00001552 VG_(track_new_mem_startup) ( & mc_new_mem_startup );
njn5c004e42002-11-18 11:04:50 +00001553 VG_(track_new_mem_stack_signal) ( & MC_(make_writable) );
1554 VG_(track_new_mem_brk) ( & MC_(make_writable) );
1555 VG_(track_new_mem_mmap) ( & mc_set_perms );
njn25e49d8e72002-09-23 09:36:25 +00001556
njn3e884182003-04-15 13:03:23 +00001557 VG_(track_copy_mem_remap) ( & mc_copy_address_range_state );
1558 VG_(track_change_mem_mprotect) ( & mc_set_perms );
1559
1560 VG_(track_die_mem_stack_signal) ( & MC_(make_noaccess) );
1561 VG_(track_die_mem_brk) ( & MC_(make_noaccess) );
1562 VG_(track_die_mem_munmap) ( & MC_(make_noaccess) );
1563
njn43c799e2003-04-08 00:08:52 +00001564 VG_(track_new_mem_stack_4) ( & MAC_(new_mem_stack_4) );
1565 VG_(track_new_mem_stack_8) ( & MAC_(new_mem_stack_8) );
1566 VG_(track_new_mem_stack_12) ( & MAC_(new_mem_stack_12) );
1567 VG_(track_new_mem_stack_16) ( & MAC_(new_mem_stack_16) );
1568 VG_(track_new_mem_stack_32) ( & MAC_(new_mem_stack_32) );
1569 VG_(track_new_mem_stack) ( & MAC_(new_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00001570
njn43c799e2003-04-08 00:08:52 +00001571 VG_(track_die_mem_stack_4) ( & MAC_(die_mem_stack_4) );
1572 VG_(track_die_mem_stack_8) ( & MAC_(die_mem_stack_8) );
1573 VG_(track_die_mem_stack_12) ( & MAC_(die_mem_stack_12) );
1574 VG_(track_die_mem_stack_16) ( & MAC_(die_mem_stack_16) );
1575 VG_(track_die_mem_stack_32) ( & MAC_(die_mem_stack_32) );
1576 VG_(track_die_mem_stack) ( & MAC_(die_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00001577
njn3e884182003-04-15 13:03:23 +00001578 VG_(track_ban_mem_stack) ( & MC_(make_noaccess) );
njn25e49d8e72002-09-23 09:36:25 +00001579
njn5c004e42002-11-18 11:04:50 +00001580 VG_(track_pre_mem_read) ( & mc_check_is_readable );
1581 VG_(track_pre_mem_read_asciiz) ( & mc_check_is_readable_asciiz );
1582 VG_(track_pre_mem_write) ( & mc_check_is_writable );
1583 VG_(track_post_mem_write) ( & MC_(make_readable) );
njn25e49d8e72002-09-23 09:36:25 +00001584
njn9b007f62003-04-07 14:40:25 +00001585 /* Three compact slots taken up by stack memory helpers */
njn5c004e42002-11-18 11:04:50 +00001586 VG_(register_compact_helper)((Addr) & MC_(helper_value_check4_fail));
1587 VG_(register_compact_helper)((Addr) & MC_(helper_value_check0_fail));
1588 VG_(register_compact_helper)((Addr) & MC_(helper_value_check2_fail));
1589 VG_(register_compact_helper)((Addr) & MC_(helperc_STOREV4));
njn5c004e42002-11-18 11:04:50 +00001590 VG_(register_compact_helper)((Addr) & MC_(helperc_LOADV4));
njn25e49d8e72002-09-23 09:36:25 +00001591
njnd04b7c62002-10-03 14:05:52 +00001592 /* These two made non-compact because 2-byte transactions are rare. */
njn5c004e42002-11-18 11:04:50 +00001593 VG_(register_noncompact_helper)((Addr) & MC_(helperc_STOREV2));
njn9b007f62003-04-07 14:40:25 +00001594 VG_(register_noncompact_helper)((Addr) & MC_(helperc_STOREV1));
njn5c004e42002-11-18 11:04:50 +00001595 VG_(register_noncompact_helper)((Addr) & MC_(helperc_LOADV2));
njn9b007f62003-04-07 14:40:25 +00001596 VG_(register_noncompact_helper)((Addr) & MC_(helperc_LOADV1));
njn5c004e42002-11-18 11:04:50 +00001597 VG_(register_noncompact_helper)((Addr) & MC_(fpu_write_check));
1598 VG_(register_noncompact_helper)((Addr) & MC_(fpu_read_check));
1599 VG_(register_noncompact_helper)((Addr) & MC_(helper_value_check1_fail));
njn25e49d8e72002-09-23 09:36:25 +00001600
1601 VGP_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
1602 VGP_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
njn9b007f62003-04-07 14:40:25 +00001603 VGP_(register_profile_event) ( VgpESPAdj, "adjust-ESP" );
njnd04b7c62002-10-03 14:05:52 +00001604
njn43c799e2003-04-08 00:08:52 +00001605 /* Additional block description for VG_(describe_addr)() */
1606 MAC_(describe_addr_supp) = MC_(client_perm_maybe_describe);
1607
njnd04b7c62002-10-03 14:05:52 +00001608 init_shadow_memory();
njn3e884182003-04-15 13:03:23 +00001609 MAC_(common_pre_clo_init)();
njn5c004e42002-11-18 11:04:50 +00001610}
1611
1612void SK_(post_clo_init) ( void )
1613{
1614}
1615
njn7d9f94d2003-04-22 21:41:40 +00001616void SK_(fini) ( Int exitcode )
njn5c004e42002-11-18 11:04:50 +00001617{
njn3e884182003-04-15 13:03:23 +00001618 MAC_(common_fini)( MC_(detect_memory_leaks) );
1619
njn5c004e42002-11-18 11:04:50 +00001620 if (0) {
1621 VG_(message)(Vg_DebugMsg,
1622 "------ Valgrind's client block stats follow ---------------" );
1623 MC_(show_client_block_stats)();
1624 }
njn25e49d8e72002-09-23 09:36:25 +00001625}
1626
1627/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00001628/*--- end mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00001629/*--------------------------------------------------------------------*/