blob: 78779bbeac9e453861db558656786bf6aaf6846b [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
njnc9539842002-10-02 13:26:35 +00009 This file is part of MemCheck, a heavyweight Valgrind skin for
10 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
12 Copyright (C) 2000-2002 Julian Seward
13 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
njn25cac76cb2002-09-23 11:21:57 +000033#include "mc_include.h"
34#include "memcheck.h" /* for client requests */
njn25e49d8e72002-09-23 09:36:25 +000035//#include "vg_profile.c"
36
njn27f1a382002-11-08 15:48:16 +000037VG_DETERMINE_INTERFACE_VERSION
38
njn25e49d8e72002-09-23 09:36:25 +000039/* Define to debug the mem audit system. */
40/* #define VG_DEBUG_MEMORY */
41
njn25e49d8e72002-09-23 09:36:25 +000042/* Define to collect detailed performance info. */
43/* #define VG_PROFILE_MEMORY */
44
45#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
46
47/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000048/*--- Profiling events ---*/
49/*------------------------------------------------------------*/
50
51typedef
52 enum {
53 VgpCheckMem = VgpFini+1,
54 VgpSetMem
55 }
56 VgpSkinCC;
57
58/*------------------------------------------------------------*/
59/*--- Low-level support for memory checking. ---*/
60/*------------------------------------------------------------*/
61
62/* All reads and writes are checked against a memory map, which
63 records the state of all memory in the process. The memory map is
64 organised like this:
65
66 The top 16 bits of an address are used to index into a top-level
67 map table, containing 65536 entries. Each entry is a pointer to a
68 second-level map, which records the accesibililty and validity
69 permissions for the 65536 bytes indexed by the lower 16 bits of the
70 address. Each byte is represented by nine bits, one indicating
71 accessibility, the other eight validity. So each second-level map
72 contains 73728 bytes. This two-level arrangement conveniently
73 divides the 4G address space into 64k lumps, each size 64k bytes.
74
75 All entries in the primary (top-level) map must point to a valid
76 secondary (second-level) map. Since most of the 4G of address
77 space will not be in use -- ie, not mapped at all -- there is a
78 distinguished secondary map, which indicates `not addressible and
79 not valid' writeable for all bytes. Entries in the primary map for
80 which the entire 64k is not in use at all point at this
81 distinguished map.
82
83 [...] lots of stuff deleted due to out of date-ness
84
85 As a final optimisation, the alignment and address checks for
86 4-byte loads and stores are combined in a neat way. The primary
87 map is extended to have 262144 entries (2^18), rather than 2^16.
88 The top 3/4 of these entries are permanently set to the
89 distinguished secondary map. For a 4-byte load/store, the
90 top-level map is indexed not with (addr >> 16) but instead f(addr),
91 where
92
93 f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
94 = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
95 = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
96
97 ie the lowest two bits are placed above the 16 high address bits.
98 If either of these two bits are nonzero, the address is misaligned;
99 this will select a secondary map from the upper 3/4 of the primary
100 map. Because this is always the distinguished secondary map, a
101 (bogus) address check failure will result. The failure handling
102 code can then figure out whether this is a genuine addr check
103 failure or whether it is a possibly-legitimate access at a
104 misaligned address.
105*/
106
107
108/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000109/*--- Function declarations. ---*/
110/*------------------------------------------------------------*/
111
njn5c004e42002-11-18 11:04:50 +0000112static UInt mc_rd_V4_SLOWLY ( Addr a );
113static UInt mc_rd_V2_SLOWLY ( Addr a );
114static UInt mc_rd_V1_SLOWLY ( Addr a );
115static void mc_wr_V4_SLOWLY ( Addr a, UInt vbytes );
116static void mc_wr_V2_SLOWLY ( Addr a, UInt vbytes );
117static void mc_wr_V1_SLOWLY ( Addr a, UInt vbytes );
118static void mc_fpu_read_check_SLOWLY ( Addr addr, Int size );
119static void mc_fpu_write_check_SLOWLY ( Addr addr, Int size );
njn25e49d8e72002-09-23 09:36:25 +0000120
121/*------------------------------------------------------------*/
122/*--- Data defns. ---*/
123/*------------------------------------------------------------*/
124
125typedef
126 struct {
127 UChar abits[8192];
128 UChar vbyte[65536];
129 }
130 SecMap;
131
132static SecMap* primary_map[ /*65536*/ 262144 ];
133static SecMap distinguished_secondary_map;
134
njn25e49d8e72002-09-23 09:36:25 +0000135
136static void init_shadow_memory ( void )
137{
138 Int i;
139
140 for (i = 0; i < 8192; i++) /* Invalid address */
141 distinguished_secondary_map.abits[i] = VGM_BYTE_INVALID;
142 for (i = 0; i < 65536; i++) /* Invalid Value */
143 distinguished_secondary_map.vbyte[i] = VGM_BYTE_INVALID;
144
145 /* These entries gradually get overwritten as the used address
146 space expands. */
147 for (i = 0; i < 65536; i++)
148 primary_map[i] = &distinguished_secondary_map;
149
150 /* These ones should never change; it's a bug in Valgrind if they do. */
151 for (i = 65536; i < 262144; i++)
152 primary_map[i] = &distinguished_secondary_map;
153}
154
njn25e49d8e72002-09-23 09:36:25 +0000155/*------------------------------------------------------------*/
156/*--- Basic bitmap management, reading and writing. ---*/
157/*------------------------------------------------------------*/
158
159/* Allocate and initialise a secondary map. */
160
161static SecMap* alloc_secondary_map ( __attribute__ ((unused))
162 Char* caller )
163{
164 SecMap* map;
165 UInt i;
166 PROF_EVENT(10);
167
168 /* Mark all bytes as invalid access and invalid value. */
169
170 /* It just happens that a SecMap occupies exactly 18 pages --
171 although this isn't important, so the following assert is
172 spurious. */
njne427a662002-10-02 11:08:25 +0000173 sk_assert(0 == (sizeof(SecMap) % VKI_BYTES_PER_PAGE));
njn25e49d8e72002-09-23 09:36:25 +0000174 map = VG_(get_memory_from_mmap)( sizeof(SecMap), caller );
175
176 for (i = 0; i < 8192; i++)
177 map->abits[i] = VGM_BYTE_INVALID; /* Invalid address */
178 for (i = 0; i < 65536; i++)
179 map->vbyte[i] = VGM_BYTE_INVALID; /* Invalid Value */
180
181 /* VG_(printf)("ALLOC_2MAP(%s)\n", caller ); */
182 return map;
183}
184
185
186/* Basic reading/writing of the bitmaps, for byte-sized accesses. */
187
188static __inline__ UChar get_abit ( Addr a )
189{
190 SecMap* sm = primary_map[a >> 16];
191 UInt sm_off = a & 0xFFFF;
192 PROF_EVENT(20);
193# if 0
194 if (IS_DISTINGUISHED_SM(sm))
195 VG_(message)(Vg_DebugMsg,
196 "accessed distinguished 2ndary (A)map! 0x%x\n", a);
197# endif
198 return BITARR_TEST(sm->abits, sm_off)
199 ? VGM_BIT_INVALID : VGM_BIT_VALID;
200}
201
202static __inline__ UChar get_vbyte ( Addr a )
203{
204 SecMap* sm = primary_map[a >> 16];
205 UInt sm_off = a & 0xFFFF;
206 PROF_EVENT(21);
207# if 0
208 if (IS_DISTINGUISHED_SM(sm))
209 VG_(message)(Vg_DebugMsg,
210 "accessed distinguished 2ndary (V)map! 0x%x\n", a);
211# endif
212 return sm->vbyte[sm_off];
213}
214
215static __inline__ void set_abit ( Addr a, UChar abit )
216{
217 SecMap* sm;
218 UInt sm_off;
219 PROF_EVENT(22);
220 ENSURE_MAPPABLE(a, "set_abit");
221 sm = primary_map[a >> 16];
222 sm_off = a & 0xFFFF;
223 if (abit)
224 BITARR_SET(sm->abits, sm_off);
225 else
226 BITARR_CLEAR(sm->abits, sm_off);
227}
228
229static __inline__ void set_vbyte ( Addr a, UChar vbyte )
230{
231 SecMap* sm;
232 UInt sm_off;
233 PROF_EVENT(23);
234 ENSURE_MAPPABLE(a, "set_vbyte");
235 sm = primary_map[a >> 16];
236 sm_off = a & 0xFFFF;
237 sm->vbyte[sm_off] = vbyte;
238}
239
240
241/* Reading/writing of the bitmaps, for aligned word-sized accesses. */
242
243static __inline__ UChar get_abits4_ALIGNED ( Addr a )
244{
245 SecMap* sm;
246 UInt sm_off;
247 UChar abits8;
248 PROF_EVENT(24);
249# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000250 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000251# endif
252 sm = primary_map[a >> 16];
253 sm_off = a & 0xFFFF;
254 abits8 = sm->abits[sm_off >> 3];
255 abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
256 abits8 &= 0x0F;
257 return abits8;
258}
259
260static UInt __inline__ get_vbytes4_ALIGNED ( Addr a )
261{
262 SecMap* sm = primary_map[a >> 16];
263 UInt sm_off = a & 0xFFFF;
264 PROF_EVENT(25);
265# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000266 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000267# endif
268 return ((UInt*)(sm->vbyte))[sm_off >> 2];
269}
270
271
272/*------------------------------------------------------------*/
273/*--- Setting permissions over address ranges. ---*/
274/*------------------------------------------------------------*/
275
276static void set_address_range_perms ( Addr a, UInt len,
277 UInt example_a_bit,
278 UInt example_v_bit )
279{
280 UChar vbyte, abyte8;
281 UInt vword4, sm_off;
282 SecMap* sm;
283
284 PROF_EVENT(30);
285
286 if (len == 0)
287 return;
288
289 if (len > 100 * 1000 * 1000) {
290 VG_(message)(Vg_UserMsg,
291 "Warning: set address range perms: "
292 "large range %u, a %d, v %d",
293 len, example_a_bit, example_v_bit );
294 }
295
296 VGP_PUSHCC(VgpSetMem);
297
298 /* Requests to change permissions of huge address ranges may
299 indicate bugs in our machinery. 30,000,000 is arbitrary, but so
300 far all legitimate requests have fallen beneath that size. */
301 /* 4 Mar 02: this is just stupid; get rid of it. */
njne427a662002-10-02 11:08:25 +0000302 /* sk_assert(len < 30000000); */
njn25e49d8e72002-09-23 09:36:25 +0000303
304 /* Check the permissions make sense. */
njne427a662002-10-02 11:08:25 +0000305 sk_assert(example_a_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000306 || example_a_bit == VGM_BIT_INVALID);
njne427a662002-10-02 11:08:25 +0000307 sk_assert(example_v_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000308 || example_v_bit == VGM_BIT_INVALID);
309 if (example_a_bit == VGM_BIT_INVALID)
njne427a662002-10-02 11:08:25 +0000310 sk_assert(example_v_bit == VGM_BIT_INVALID);
njn25e49d8e72002-09-23 09:36:25 +0000311
312 /* The validity bits to write. */
313 vbyte = example_v_bit==VGM_BIT_VALID
314 ? VGM_BYTE_VALID : VGM_BYTE_INVALID;
315
316 /* In order that we can charge through the address space at 8
317 bytes/main-loop iteration, make up some perms. */
318 abyte8 = (example_a_bit << 7)
319 | (example_a_bit << 6)
320 | (example_a_bit << 5)
321 | (example_a_bit << 4)
322 | (example_a_bit << 3)
323 | (example_a_bit << 2)
324 | (example_a_bit << 1)
325 | (example_a_bit << 0);
326 vword4 = (vbyte << 24) | (vbyte << 16) | (vbyte << 8) | vbyte;
327
328# ifdef VG_DEBUG_MEMORY
329 /* Do it ... */
330 while (True) {
331 PROF_EVENT(31);
332 if (len == 0) break;
333 set_abit ( a, example_a_bit );
334 set_vbyte ( a, vbyte );
335 a++;
336 len--;
337 }
338
339# else
340 /* Slowly do parts preceding 8-byte alignment. */
341 while (True) {
342 PROF_EVENT(31);
343 if (len == 0) break;
344 if ((a % 8) == 0) break;
345 set_abit ( a, example_a_bit );
346 set_vbyte ( a, vbyte );
347 a++;
348 len--;
349 }
350
351 if (len == 0) {
352 VGP_POPCC(VgpSetMem);
353 return;
354 }
njne427a662002-10-02 11:08:25 +0000355 sk_assert((a % 8) == 0 && len > 0);
njn25e49d8e72002-09-23 09:36:25 +0000356
357 /* Once aligned, go fast. */
358 while (True) {
359 PROF_EVENT(32);
360 if (len < 8) break;
361 ENSURE_MAPPABLE(a, "set_address_range_perms(fast)");
362 sm = primary_map[a >> 16];
363 sm_off = a & 0xFFFF;
364 sm->abits[sm_off >> 3] = abyte8;
365 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = vword4;
366 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = vword4;
367 a += 8;
368 len -= 8;
369 }
370
371 if (len == 0) {
372 VGP_POPCC(VgpSetMem);
373 return;
374 }
njne427a662002-10-02 11:08:25 +0000375 sk_assert((a % 8) == 0 && len > 0 && len < 8);
njn25e49d8e72002-09-23 09:36:25 +0000376
377 /* Finish the upper fragment. */
378 while (True) {
379 PROF_EVENT(33);
380 if (len == 0) break;
381 set_abit ( a, example_a_bit );
382 set_vbyte ( a, vbyte );
383 a++;
384 len--;
385 }
386# endif
387
388 /* Check that zero page and highest page have not been written to
389 -- this could happen with buggy syscall wrappers. Today
390 (2001-04-26) had precisely such a problem with __NR_setitimer. */
njne427a662002-10-02 11:08:25 +0000391 sk_assert(SK_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +0000392 VGP_POPCC(VgpSetMem);
393}
394
395/* Set permissions for address ranges ... */
396
njn5c004e42002-11-18 11:04:50 +0000397void MC_(make_noaccess) ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000398{
399 PROF_EVENT(35);
njn5c004e42002-11-18 11:04:50 +0000400 DEBUG("MC_(make_noaccess)(%p, %x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000401 set_address_range_perms ( a, len, VGM_BIT_INVALID, VGM_BIT_INVALID );
402}
403
njn5c004e42002-11-18 11:04:50 +0000404void MC_(make_writable) ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000405{
406 PROF_EVENT(36);
njn5c004e42002-11-18 11:04:50 +0000407 DEBUG("MC_(make_writable)(%p, %x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000408 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_INVALID );
409}
410
njn5c004e42002-11-18 11:04:50 +0000411void MC_(make_readable) ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000412{
413 PROF_EVENT(37);
njn5c004e42002-11-18 11:04:50 +0000414 DEBUG("MC_(make_readable)(%p, 0x%x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000415 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_VALID );
416}
417
418/* Block-copy permissions (needed for implementing realloc()). */
419
njn5c004e42002-11-18 11:04:50 +0000420static void mc_copy_address_range_state ( Addr src, Addr dst, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000421{
422 UInt i;
423
njn5c004e42002-11-18 11:04:50 +0000424 DEBUG("mc_copy_address_range_state\n");
njn25e49d8e72002-09-23 09:36:25 +0000425
426 PROF_EVENT(40);
427 for (i = 0; i < len; i++) {
428 UChar abit = get_abit ( src+i );
429 UChar vbyte = get_vbyte ( src+i );
430 PROF_EVENT(41);
431 set_abit ( dst+i, abit );
432 set_vbyte ( dst+i, vbyte );
433 }
434}
435
436
437/* Check permissions for address range. If inadequate permissions
438 exist, *bad_addr is set to the offending address, so the caller can
439 know what it is. */
440
njn5c004e42002-11-18 11:04:50 +0000441Bool MC_(check_writable) ( Addr a, UInt len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000442{
443 UInt i;
444 UChar abit;
445 PROF_EVENT(42);
446 for (i = 0; i < len; i++) {
447 PROF_EVENT(43);
448 abit = get_abit(a);
449 if (abit == VGM_BIT_INVALID) {
450 if (bad_addr != NULL) *bad_addr = a;
451 return False;
452 }
453 a++;
454 }
455 return True;
456}
457
njn5c004e42002-11-18 11:04:50 +0000458Bool MC_(check_readable) ( Addr a, UInt len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000459{
460 UInt i;
461 UChar abit;
462 UChar vbyte;
463
464 PROF_EVENT(44);
njn5c004e42002-11-18 11:04:50 +0000465 DEBUG("MC_(check_readable)\n");
njn25e49d8e72002-09-23 09:36:25 +0000466 for (i = 0; i < len; i++) {
467 abit = get_abit(a);
468 vbyte = get_vbyte(a);
469 PROF_EVENT(45);
470 if (abit != VGM_BIT_VALID || vbyte != VGM_BYTE_VALID) {
471 if (bad_addr != NULL) *bad_addr = a;
472 return False;
473 }
474 a++;
475 }
476 return True;
477}
478
479
480/* Check a zero-terminated ascii string. Tricky -- don't want to
481 examine the actual bytes, to find the end, until we're sure it is
482 safe to do so. */
483
njn5c004e42002-11-18 11:04:50 +0000484Bool mc_check_readable_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000485{
486 UChar abit;
487 UChar vbyte;
488 PROF_EVENT(46);
njn5c004e42002-11-18 11:04:50 +0000489 DEBUG("mc_check_readable_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +0000490 while (True) {
491 PROF_EVENT(47);
492 abit = get_abit(a);
493 vbyte = get_vbyte(a);
494 if (abit != VGM_BIT_VALID || vbyte != VGM_BYTE_VALID) {
495 if (bad_addr != NULL) *bad_addr = a;
496 return False;
497 }
498 /* Ok, a is safe to read. */
499 if (* ((UChar*)a) == 0) return True;
500 a++;
501 }
502}
503
504
505/*------------------------------------------------------------*/
506/*--- Memory event handlers ---*/
507/*------------------------------------------------------------*/
508
509/* Setting permissions for aligned words. This supports fast stack
510 operations. */
511
njn5c004e42002-11-18 11:04:50 +0000512static void mc_make_noaccess_aligned ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000513{
514 SecMap* sm;
515 UInt sm_off;
516 UChar mask;
517 Addr a_past_end = a + len;
518
519 VGP_PUSHCC(VgpSetMem);
520
521 PROF_EVENT(50);
522# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000523 sk_assert(IS_ALIGNED4_ADDR(a));
524 sk_assert(IS_ALIGNED4_ADDR(len));
njn25e49d8e72002-09-23 09:36:25 +0000525# endif
526
527 for ( ; a < a_past_end; a += 4) {
njn5c004e42002-11-18 11:04:50 +0000528 ENSURE_MAPPABLE(a, "mc_make_noaccess_aligned");
njn25e49d8e72002-09-23 09:36:25 +0000529 sm = primary_map[a >> 16];
530 sm_off = a & 0xFFFF;
531 ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
532 mask = 0x0F;
533 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
534 /* mask now contains 1s where we wish to make address bits
535 invalid (1s). */
536 sm->abits[sm_off >> 3] |= mask;
537 }
538 VGP_POPCC(VgpSetMem);
539}
540
njn5c004e42002-11-18 11:04:50 +0000541static void mc_make_writable_aligned ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000542{
543 SecMap* sm;
544 UInt sm_off;
545 UChar mask;
546 Addr a_past_end = a + len;
547
548 VGP_PUSHCC(VgpSetMem);
549
550 PROF_EVENT(51);
551# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000552 sk_assert(IS_ALIGNED4_ADDR(a));
553 sk_assert(IS_ALIGNED4_ADDR(len));
njn25e49d8e72002-09-23 09:36:25 +0000554# endif
555
556 for ( ; a < a_past_end; a += 4) {
njn5c004e42002-11-18 11:04:50 +0000557 ENSURE_MAPPABLE(a, "mc_make_writable_aligned");
njn25e49d8e72002-09-23 09:36:25 +0000558 sm = primary_map[a >> 16];
559 sm_off = a & 0xFFFF;
560 ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
561 mask = 0x0F;
562 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
563 /* mask now contains 1s where we wish to make address bits
564 invalid (0s). */
565 sm->abits[sm_off >> 3] &= ~mask;
566 }
567 VGP_POPCC(VgpSetMem);
568}
569
570
571static
njn5c004e42002-11-18 11:04:50 +0000572void mc_check_is_writable ( CorePart part, ThreadState* tst,
573 Char* s, Addr base, UInt size )
njn25e49d8e72002-09-23 09:36:25 +0000574{
575 Bool ok;
576 Addr bad_addr;
577
578 VGP_PUSHCC(VgpCheckMem);
579
580 /* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
581 base,base+size-1); */
njn5c004e42002-11-18 11:04:50 +0000582 ok = MC_(check_writable) ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000583 if (!ok) {
584 switch (part) {
585 case Vg_CoreSysCall:
njn5c004e42002-11-18 11:04:50 +0000586 MC_(record_param_error) ( tst, bad_addr, /*isWrite =*/True, s );
njn25e49d8e72002-09-23 09:36:25 +0000587 break;
588
589 case Vg_CorePThread:
590 case Vg_CoreSignal:
njn5c004e42002-11-18 11:04:50 +0000591 MC_(record_core_mem_error)( tst, /*isWrite=*/True, s );
njn25e49d8e72002-09-23 09:36:25 +0000592 break;
593
594 default:
njn5c004e42002-11-18 11:04:50 +0000595 VG_(skin_panic)("mc_check_is_writable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000596 }
597 }
598
599 VGP_POPCC(VgpCheckMem);
600}
601
602static
njn5c004e42002-11-18 11:04:50 +0000603void mc_check_is_readable ( CorePart part, ThreadState* tst,
604 Char* s, Addr base, UInt size )
njn25e49d8e72002-09-23 09:36:25 +0000605{
606 Bool ok;
607 Addr bad_addr;
608
609 VGP_PUSHCC(VgpCheckMem);
610
611 /* VG_(message)(Vg_DebugMsg,"check is readable: %x .. %x",
612 base,base+size-1); */
njn5c004e42002-11-18 11:04:50 +0000613 ok = MC_(check_readable) ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000614 if (!ok) {
615 switch (part) {
616 case Vg_CoreSysCall:
njn5c004e42002-11-18 11:04:50 +0000617 MC_(record_param_error) ( tst, bad_addr, /*isWrite =*/False, s );
njn25e49d8e72002-09-23 09:36:25 +0000618 break;
619
620 case Vg_CorePThread:
njn5c004e42002-11-18 11:04:50 +0000621 MC_(record_core_mem_error)( tst, /*isWrite=*/False, s );
njn25e49d8e72002-09-23 09:36:25 +0000622 break;
623
624 /* If we're being asked to jump to a silly address, record an error
625 message before potentially crashing the entire system. */
626 case Vg_CoreTranslate:
njn5c004e42002-11-18 11:04:50 +0000627 MC_(record_jump_error)( tst, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000628 break;
629
630 default:
njn5c004e42002-11-18 11:04:50 +0000631 VG_(skin_panic)("mc_check_is_readable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000632 }
633 }
634 VGP_POPCC(VgpCheckMem);
635}
636
637static
njn5c004e42002-11-18 11:04:50 +0000638void mc_check_is_readable_asciiz ( CorePart part, ThreadState* tst,
639 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +0000640{
641 Bool ok = True;
642 Addr bad_addr;
643 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
644
645 VGP_PUSHCC(VgpCheckMem);
646
njne427a662002-10-02 11:08:25 +0000647 sk_assert(part == Vg_CoreSysCall);
njn5c004e42002-11-18 11:04:50 +0000648 ok = mc_check_readable_asciiz ( (Addr)str, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000649 if (!ok) {
njn5c004e42002-11-18 11:04:50 +0000650 MC_(record_param_error) ( tst, bad_addr, /*is_writable =*/False, s );
njn25e49d8e72002-09-23 09:36:25 +0000651 }
652
653 VGP_POPCC(VgpCheckMem);
654}
655
656
657static
njn5c004e42002-11-18 11:04:50 +0000658void mc_new_mem_startup( Addr a, UInt len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +0000659{
njn1f3a9092002-10-04 09:22:30 +0000660 /* Ignore the permissions, just make it readable. Seems to work... */
njn5c004e42002-11-18 11:04:50 +0000661 DEBUG("mc_new_mem_startup(%p, %u, rr=%u, ww=%u, xx=%u)\n", a,len,rr,ww,xx);
662 MC_(make_readable)(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000663}
664
665static
njn5c004e42002-11-18 11:04:50 +0000666void mc_new_mem_heap ( Addr a, UInt len, Bool is_inited )
njn25e49d8e72002-09-23 09:36:25 +0000667{
668 if (is_inited) {
njn5c004e42002-11-18 11:04:50 +0000669 MC_(make_readable)(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000670 } else {
njn5c004e42002-11-18 11:04:50 +0000671 MC_(make_writable)(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000672 }
673}
674
675static
njn5c004e42002-11-18 11:04:50 +0000676void mc_set_perms (Addr a, UInt len, Bool rr, Bool ww, Bool xx)
njn25e49d8e72002-09-23 09:36:25 +0000677{
njn5c004e42002-11-18 11:04:50 +0000678 DEBUG("mc_set_perms(%p, %u, rr=%u ww=%u, xx=%u)\n", a, len, rr, ww, xx);
679 if (rr) MC_(make_readable)(a, len);
680 else if (ww) MC_(make_writable)(a, len);
681 else MC_(make_noaccess)(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000682}
683
684
685/*------------------------------------------------------------*/
686/*--- Functions called directly from generated code. ---*/
687/*------------------------------------------------------------*/
688
689static __inline__ UInt rotateRight16 ( UInt x )
690{
691 /* Amazingly, gcc turns this into a single rotate insn. */
692 return (x >> 16) | (x << 16);
693}
694
695
696static __inline__ UInt shiftRight16 ( UInt x )
697{
698 return x >> 16;
699}
700
701
702/* Read/write 1/2/4 sized V bytes, and emit an address error if
703 needed. */
704
705/* VG_(helperc_{LD,ST}V{1,2,4}) handle the common case fast.
706 Under all other circumstances, it defers to the relevant _SLOWLY
707 function, which can handle all situations.
708*/
709__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000710UInt MC_(helperc_LOADV4) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000711{
712# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000713 return mc_rd_V4_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000714# else
715 UInt sec_no = rotateRight16(a) & 0x3FFFF;
716 SecMap* sm = primary_map[sec_no];
717 UInt a_off = (a & 0xFFFF) >> 3;
718 UChar abits = sm->abits[a_off];
719 abits >>= (a & 4);
720 abits &= 15;
721 PROF_EVENT(60);
722 if (abits == VGM_NIBBLE_VALID) {
723 /* Handle common case quickly: a is suitably aligned, is mapped,
724 and is addressible. */
725 UInt v_off = a & 0xFFFF;
726 return ((UInt*)(sm->vbyte))[ v_off >> 2 ];
727 } else {
728 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000729 return mc_rd_V4_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000730 }
731# endif
732}
733
734__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +0000735void MC_(helperc_STOREV4) ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000736{
737# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000738 mc_wr_V4_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000739# else
740 UInt sec_no = rotateRight16(a) & 0x3FFFF;
741 SecMap* sm = primary_map[sec_no];
742 UInt a_off = (a & 0xFFFF) >> 3;
743 UChar abits = sm->abits[a_off];
744 abits >>= (a & 4);
745 abits &= 15;
746 PROF_EVENT(61);
747 if (abits == VGM_NIBBLE_VALID) {
748 /* Handle common case quickly: a is suitably aligned, is mapped,
749 and is addressible. */
750 UInt v_off = a & 0xFFFF;
751 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = vbytes;
752 } else {
753 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000754 mc_wr_V4_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000755 }
756# endif
757}
758
759__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000760UInt MC_(helperc_LOADV2) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000761{
762# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000763 return mc_rd_V2_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000764# else
765 UInt sec_no = rotateRight16(a) & 0x1FFFF;
766 SecMap* sm = primary_map[sec_no];
767 UInt a_off = (a & 0xFFFF) >> 3;
768 PROF_EVENT(62);
769 if (sm->abits[a_off] == VGM_BYTE_VALID) {
770 /* Handle common case quickly. */
771 UInt v_off = a & 0xFFFF;
772 return 0xFFFF0000
773 |
774 (UInt)( ((UShort*)(sm->vbyte))[ v_off >> 1 ] );
775 } else {
776 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000777 return mc_rd_V2_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000778 }
779# endif
780}
781
782__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +0000783void MC_(helperc_STOREV2) ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000784{
785# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000786 mc_wr_V2_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000787# else
788 UInt sec_no = rotateRight16(a) & 0x1FFFF;
789 SecMap* sm = primary_map[sec_no];
790 UInt a_off = (a & 0xFFFF) >> 3;
791 PROF_EVENT(63);
792 if (sm->abits[a_off] == VGM_BYTE_VALID) {
793 /* Handle common case quickly. */
794 UInt v_off = a & 0xFFFF;
795 ((UShort*)(sm->vbyte))[ v_off >> 1 ] = vbytes & 0x0000FFFF;
796 } else {
797 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000798 mc_wr_V2_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000799 }
800# endif
801}
802
803__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000804UInt MC_(helperc_LOADV1) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000805{
806# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000807 return mc_rd_V1_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000808# else
809 UInt sec_no = shiftRight16(a);
810 SecMap* sm = primary_map[sec_no];
811 UInt a_off = (a & 0xFFFF) >> 3;
812 PROF_EVENT(64);
813 if (sm->abits[a_off] == VGM_BYTE_VALID) {
814 /* Handle common case quickly. */
815 UInt v_off = a & 0xFFFF;
816 return 0xFFFFFF00
817 |
818 (UInt)( ((UChar*)(sm->vbyte))[ v_off ] );
819 } else {
820 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000821 return mc_rd_V1_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000822 }
823# endif
824}
825
826__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +0000827void MC_(helperc_STOREV1) ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000828{
829# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000830 mc_wr_V1_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000831# else
832 UInt sec_no = shiftRight16(a);
833 SecMap* sm = primary_map[sec_no];
834 UInt a_off = (a & 0xFFFF) >> 3;
835 PROF_EVENT(65);
836 if (sm->abits[a_off] == VGM_BYTE_VALID) {
837 /* Handle common case quickly. */
838 UInt v_off = a & 0xFFFF;
839 ((UChar*)(sm->vbyte))[ v_off ] = vbytes & 0x000000FF;
840 } else {
841 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000842 mc_wr_V1_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000843 }
844# endif
845}
846
847
848/*------------------------------------------------------------*/
849/*--- Fallback functions to handle cases that the above ---*/
850/*--- VG_(helperc_{LD,ST}V{1,2,4}) can't manage. ---*/
851/*------------------------------------------------------------*/
852
njn5c004e42002-11-18 11:04:50 +0000853static UInt mc_rd_V4_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000854{
855 Bool a0ok, a1ok, a2ok, a3ok;
856 UInt vb0, vb1, vb2, vb3;
857
858 PROF_EVENT(70);
859
860 /* First establish independently the addressibility of the 4 bytes
861 involved. */
862 a0ok = get_abit(a+0) == VGM_BIT_VALID;
863 a1ok = get_abit(a+1) == VGM_BIT_VALID;
864 a2ok = get_abit(a+2) == VGM_BIT_VALID;
865 a3ok = get_abit(a+3) == VGM_BIT_VALID;
866
867 /* Also get the validity bytes for the address. */
868 vb0 = (UInt)get_vbyte(a+0);
869 vb1 = (UInt)get_vbyte(a+1);
870 vb2 = (UInt)get_vbyte(a+2);
871 vb3 = (UInt)get_vbyte(a+3);
872
873 /* Now distinguish 3 cases */
874
875 /* Case 1: the address is completely valid, so:
876 - no addressing error
877 - return V bytes as read from memory
878 */
879 if (a0ok && a1ok && a2ok && a3ok) {
880 UInt vw = VGM_WORD_INVALID;
881 vw <<= 8; vw |= vb3;
882 vw <<= 8; vw |= vb2;
883 vw <<= 8; vw |= vb1;
884 vw <<= 8; vw |= vb0;
885 return vw;
886 }
887
888 /* Case 2: the address is completely invalid.
889 - emit addressing error
890 - return V word indicating validity.
891 This sounds strange, but if we make loads from invalid addresses
892 give invalid data, we also risk producing a number of confusing
893 undefined-value errors later, which confuses the fact that the
894 error arose in the first place from an invalid address.
895 */
896 /* VG_(printf)("%p (%d %d %d %d)\n", a, a0ok, a1ok, a2ok, a3ok); */
njn5c004e42002-11-18 11:04:50 +0000897 if (!MC_(clo_partial_loads_ok)
njn25e49d8e72002-09-23 09:36:25 +0000898 || ((a & 3) != 0)
899 || (!a0ok && !a1ok && !a2ok && !a3ok)) {
njn5c004e42002-11-18 11:04:50 +0000900 MC_(record_address_error)( a, 4, False );
njn25e49d8e72002-09-23 09:36:25 +0000901 return (VGM_BYTE_VALID << 24) | (VGM_BYTE_VALID << 16)
902 | (VGM_BYTE_VALID << 8) | VGM_BYTE_VALID;
903 }
904
905 /* Case 3: the address is partially valid.
906 - no addressing error
907 - returned V word is invalid where the address is invalid,
908 and contains V bytes from memory otherwise.
njn5c004e42002-11-18 11:04:50 +0000909 Case 3 is only allowed if MC_(clo_partial_loads_ok) is True
njn25e49d8e72002-09-23 09:36:25 +0000910 (which is the default), and the address is 4-aligned.
911 If not, Case 2 will have applied.
912 */
njn5c004e42002-11-18 11:04:50 +0000913 sk_assert(MC_(clo_partial_loads_ok));
njn25e49d8e72002-09-23 09:36:25 +0000914 {
915 UInt vw = VGM_WORD_INVALID;
916 vw <<= 8; vw |= (a3ok ? vb3 : VGM_BYTE_INVALID);
917 vw <<= 8; vw |= (a2ok ? vb2 : VGM_BYTE_INVALID);
918 vw <<= 8; vw |= (a1ok ? vb1 : VGM_BYTE_INVALID);
919 vw <<= 8; vw |= (a0ok ? vb0 : VGM_BYTE_INVALID);
920 return vw;
921 }
922}
923
njn5c004e42002-11-18 11:04:50 +0000924static void mc_wr_V4_SLOWLY ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000925{
926 /* Check the address for validity. */
927 Bool aerr = False;
928 PROF_EVENT(71);
929
930 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
931 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
932 if (get_abit(a+2) != VGM_BIT_VALID) aerr = True;
933 if (get_abit(a+3) != VGM_BIT_VALID) aerr = True;
934
935 /* Store the V bytes, remembering to do it little-endian-ly. */
936 set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
937 set_vbyte( a+1, vbytes & 0x000000FF ); vbytes >>= 8;
938 set_vbyte( a+2, vbytes & 0x000000FF ); vbytes >>= 8;
939 set_vbyte( a+3, vbytes & 0x000000FF );
940
941 /* If an address error has happened, report it. */
942 if (aerr)
njn5c004e42002-11-18 11:04:50 +0000943 MC_(record_address_error)( a, 4, True );
njn25e49d8e72002-09-23 09:36:25 +0000944}
945
njn5c004e42002-11-18 11:04:50 +0000946static UInt mc_rd_V2_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000947{
948 /* Check the address for validity. */
949 UInt vw = VGM_WORD_INVALID;
950 Bool aerr = False;
951 PROF_EVENT(72);
952
953 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
954 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
955
956 /* Fetch the V bytes, remembering to do it little-endian-ly. */
957 vw <<= 8; vw |= (UInt)get_vbyte(a+1);
958 vw <<= 8; vw |= (UInt)get_vbyte(a+0);
959
960 /* If an address error has happened, report it. */
961 if (aerr) {
njn5c004e42002-11-18 11:04:50 +0000962 MC_(record_address_error)( a, 2, False );
njn25e49d8e72002-09-23 09:36:25 +0000963 vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
964 | (VGM_BYTE_VALID << 8) | (VGM_BYTE_VALID);
965 }
966 return vw;
967}
968
njn5c004e42002-11-18 11:04:50 +0000969static void mc_wr_V2_SLOWLY ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000970{
971 /* Check the address for validity. */
972 Bool aerr = False;
973 PROF_EVENT(73);
974
975 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
976 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
977
978 /* Store the V bytes, remembering to do it little-endian-ly. */
979 set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
980 set_vbyte( a+1, vbytes & 0x000000FF );
981
982 /* If an address error has happened, report it. */
983 if (aerr)
njn5c004e42002-11-18 11:04:50 +0000984 MC_(record_address_error)( a, 2, True );
njn25e49d8e72002-09-23 09:36:25 +0000985}
986
njn5c004e42002-11-18 11:04:50 +0000987static UInt mc_rd_V1_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000988{
989 /* Check the address for validity. */
990 UInt vw = VGM_WORD_INVALID;
991 Bool aerr = False;
992 PROF_EVENT(74);
993
994 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
995
996 /* Fetch the V byte. */
997 vw <<= 8; vw |= (UInt)get_vbyte(a+0);
998
999 /* If an address error has happened, report it. */
1000 if (aerr) {
njn5c004e42002-11-18 11:04:50 +00001001 MC_(record_address_error)( a, 1, False );
njn25e49d8e72002-09-23 09:36:25 +00001002 vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
1003 | (VGM_BYTE_INVALID << 8) | (VGM_BYTE_VALID);
1004 }
1005 return vw;
1006}
1007
njn5c004e42002-11-18 11:04:50 +00001008static void mc_wr_V1_SLOWLY ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001009{
1010 /* Check the address for validity. */
1011 Bool aerr = False;
1012 PROF_EVENT(75);
1013 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1014
1015 /* Store the V bytes, remembering to do it little-endian-ly. */
1016 set_vbyte( a+0, vbytes & 0x000000FF );
1017
1018 /* If an address error has happened, report it. */
1019 if (aerr)
njn5c004e42002-11-18 11:04:50 +00001020 MC_(record_address_error)( a, 1, True );
njn25e49d8e72002-09-23 09:36:25 +00001021}
1022
1023
1024/* ---------------------------------------------------------------------
1025 Called from generated code, or from the assembly helpers.
1026 Handlers for value check failures.
1027 ------------------------------------------------------------------ */
1028
njn5c004e42002-11-18 11:04:50 +00001029void MC_(helperc_value_check0_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001030{
njn5c004e42002-11-18 11:04:50 +00001031 MC_(record_value_error) ( 0 );
njn25e49d8e72002-09-23 09:36:25 +00001032}
1033
njn5c004e42002-11-18 11:04:50 +00001034void MC_(helperc_value_check1_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001035{
njn5c004e42002-11-18 11:04:50 +00001036 MC_(record_value_error) ( 1 );
njn25e49d8e72002-09-23 09:36:25 +00001037}
1038
njn5c004e42002-11-18 11:04:50 +00001039void MC_(helperc_value_check2_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001040{
njn5c004e42002-11-18 11:04:50 +00001041 MC_(record_value_error) ( 2 );
njn25e49d8e72002-09-23 09:36:25 +00001042}
1043
njn5c004e42002-11-18 11:04:50 +00001044void MC_(helperc_value_check4_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001045{
njn5c004e42002-11-18 11:04:50 +00001046 MC_(record_value_error) ( 4 );
njn25e49d8e72002-09-23 09:36:25 +00001047}
1048
1049
1050/* ---------------------------------------------------------------------
1051 FPU load and store checks, called from generated code.
1052 ------------------------------------------------------------------ */
1053
1054__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +00001055void MC_(fpu_read_check) ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001056{
1057 /* Ensure the read area is both addressible and valid (ie,
1058 readable). If there's an address error, don't report a value
1059 error too; but if there isn't an address error, check for a
1060 value error.
1061
1062 Try to be reasonably fast on the common case; wimp out and defer
njn5c004e42002-11-18 11:04:50 +00001063 to mc_fpu_read_check_SLOWLY for everything else. */
njn25e49d8e72002-09-23 09:36:25 +00001064
1065 SecMap* sm;
1066 UInt sm_off, v_off, a_off;
1067 Addr addr4;
1068
1069 PROF_EVENT(80);
1070
1071# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +00001072 mc_fpu_read_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001073# else
1074
1075 if (size == 4) {
1076 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
1077 PROF_EVENT(81);
1078 /* Properly aligned. */
1079 sm = primary_map[addr >> 16];
1080 sm_off = addr & 0xFFFF;
1081 a_off = sm_off >> 3;
1082 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
1083 /* Properly aligned and addressible. */
1084 v_off = addr & 0xFFFF;
1085 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1086 goto slow4;
1087 /* Properly aligned, addressible and with valid data. */
1088 return;
1089 slow4:
njn5c004e42002-11-18 11:04:50 +00001090 mc_fpu_read_check_SLOWLY ( addr, 4 );
njn25e49d8e72002-09-23 09:36:25 +00001091 return;
1092 }
1093
1094 if (size == 8) {
1095 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
1096 PROF_EVENT(82);
1097 /* Properly aligned. Do it in two halves. */
1098 addr4 = addr + 4;
1099 /* First half. */
1100 sm = primary_map[addr >> 16];
1101 sm_off = addr & 0xFFFF;
1102 a_off = sm_off >> 3;
1103 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1104 /* First half properly aligned and addressible. */
1105 v_off = addr & 0xFFFF;
1106 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1107 goto slow8;
1108 /* Second half. */
1109 sm = primary_map[addr4 >> 16];
1110 sm_off = addr4 & 0xFFFF;
1111 a_off = sm_off >> 3;
1112 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1113 /* Second half properly aligned and addressible. */
1114 v_off = addr4 & 0xFFFF;
1115 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1116 goto slow8;
1117 /* Both halves properly aligned, addressible and with valid
1118 data. */
1119 return;
1120 slow8:
njn5c004e42002-11-18 11:04:50 +00001121 mc_fpu_read_check_SLOWLY ( addr, 8 );
njn25e49d8e72002-09-23 09:36:25 +00001122 return;
1123 }
1124
1125 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
1126 cases go quickly. */
1127 if (size == 2) {
1128 PROF_EVENT(83);
njn5c004e42002-11-18 11:04:50 +00001129 mc_fpu_read_check_SLOWLY ( addr, 2 );
njn25e49d8e72002-09-23 09:36:25 +00001130 return;
1131 }
1132
njn5c004e42002-11-18 11:04:50 +00001133 if (size == 10 || size == 28 || size == 108) {
njn25e49d8e72002-09-23 09:36:25 +00001134 PROF_EVENT(84);
njn5c004e42002-11-18 11:04:50 +00001135 mc_fpu_read_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001136 return;
1137 }
1138
1139 VG_(printf)("size is %d\n", size);
njn5c004e42002-11-18 11:04:50 +00001140 VG_(skin_panic)("MC_(fpu_read_check): unhandled size");
njn25e49d8e72002-09-23 09:36:25 +00001141# endif
1142}
1143
1144
1145__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +00001146void MC_(fpu_write_check) ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001147{
1148 /* Ensure the written area is addressible, and moan if otherwise.
1149 If it is addressible, make it valid, otherwise invalid.
1150 */
1151
1152 SecMap* sm;
1153 UInt sm_off, v_off, a_off;
1154 Addr addr4;
1155
1156 PROF_EVENT(85);
1157
1158# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +00001159 mc_fpu_write_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001160# else
1161
1162 if (size == 4) {
1163 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
1164 PROF_EVENT(86);
1165 /* Properly aligned. */
1166 sm = primary_map[addr >> 16];
1167 sm_off = addr & 0xFFFF;
1168 a_off = sm_off >> 3;
1169 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
1170 /* Properly aligned and addressible. Make valid. */
1171 v_off = addr & 0xFFFF;
1172 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1173 return;
1174 slow4:
njn5c004e42002-11-18 11:04:50 +00001175 mc_fpu_write_check_SLOWLY ( addr, 4 );
njn25e49d8e72002-09-23 09:36:25 +00001176 return;
1177 }
1178
1179 if (size == 8) {
1180 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
1181 PROF_EVENT(87);
1182 /* Properly aligned. Do it in two halves. */
1183 addr4 = addr + 4;
1184 /* First half. */
1185 sm = primary_map[addr >> 16];
1186 sm_off = addr & 0xFFFF;
1187 a_off = sm_off >> 3;
1188 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1189 /* First half properly aligned and addressible. Make valid. */
1190 v_off = addr & 0xFFFF;
1191 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1192 /* Second half. */
1193 sm = primary_map[addr4 >> 16];
1194 sm_off = addr4 & 0xFFFF;
1195 a_off = sm_off >> 3;
1196 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1197 /* Second half properly aligned and addressible. */
1198 v_off = addr4 & 0xFFFF;
1199 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1200 /* Properly aligned, addressible and with valid data. */
1201 return;
1202 slow8:
njn5c004e42002-11-18 11:04:50 +00001203 mc_fpu_write_check_SLOWLY ( addr, 8 );
njn25e49d8e72002-09-23 09:36:25 +00001204 return;
1205 }
1206
1207 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
1208 cases go quickly. */
1209 if (size == 2) {
1210 PROF_EVENT(88);
njn5c004e42002-11-18 11:04:50 +00001211 mc_fpu_write_check_SLOWLY ( addr, 2 );
njn25e49d8e72002-09-23 09:36:25 +00001212 return;
1213 }
1214
njn5c004e42002-11-18 11:04:50 +00001215 if (size == 10 || size == 28 || size == 108) {
njn25e49d8e72002-09-23 09:36:25 +00001216 PROF_EVENT(89);
njn5c004e42002-11-18 11:04:50 +00001217 mc_fpu_write_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001218 return;
1219 }
1220
1221 VG_(printf)("size is %d\n", size);
njn5c004e42002-11-18 11:04:50 +00001222 VG_(skin_panic)("MC_(fpu_write_check): unhandled size");
njn25e49d8e72002-09-23 09:36:25 +00001223# endif
1224}
1225
1226
1227/* ---------------------------------------------------------------------
1228 Slow, general cases for FPU load and store checks.
1229 ------------------------------------------------------------------ */
1230
1231/* Generic version. Test for both addr and value errors, but if
1232 there's an addr error, don't report a value error even if it
1233 exists. */
1234
njn5c004e42002-11-18 11:04:50 +00001235void mc_fpu_read_check_SLOWLY ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001236{
1237 Int i;
1238 Bool aerr = False;
1239 Bool verr = False;
1240 PROF_EVENT(90);
1241 for (i = 0; i < size; i++) {
1242 PROF_EVENT(91);
1243 if (get_abit(addr+i) != VGM_BIT_VALID)
1244 aerr = True;
1245 if (get_vbyte(addr+i) != VGM_BYTE_VALID)
1246 verr = True;
1247 }
1248
1249 if (aerr) {
njn5c004e42002-11-18 11:04:50 +00001250 MC_(record_address_error)( addr, size, False );
njn25e49d8e72002-09-23 09:36:25 +00001251 } else {
1252 if (verr)
njn5c004e42002-11-18 11:04:50 +00001253 MC_(record_value_error)( size );
njn25e49d8e72002-09-23 09:36:25 +00001254 }
1255}
1256
1257
1258/* Generic version. Test for addr errors. Valid addresses are
1259 given valid values, and invalid addresses invalid values. */
1260
njn5c004e42002-11-18 11:04:50 +00001261void mc_fpu_write_check_SLOWLY ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001262{
1263 Int i;
1264 Addr a_here;
1265 Bool a_ok;
1266 Bool aerr = False;
1267 PROF_EVENT(92);
1268 for (i = 0; i < size; i++) {
1269 PROF_EVENT(93);
1270 a_here = addr+i;
1271 a_ok = get_abit(a_here) == VGM_BIT_VALID;
1272 if (a_ok) {
1273 set_vbyte(a_here, VGM_BYTE_VALID);
1274 } else {
1275 set_vbyte(a_here, VGM_BYTE_INVALID);
1276 aerr = True;
1277 }
1278 }
1279 if (aerr) {
njn5c004e42002-11-18 11:04:50 +00001280 MC_(record_address_error)( addr, size, True );
njn25e49d8e72002-09-23 09:36:25 +00001281 }
1282}
1283
njn25e49d8e72002-09-23 09:36:25 +00001284
1285/*------------------------------------------------------------*/
1286/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1287/*------------------------------------------------------------*/
1288
sewardja4495682002-10-21 07:29:59 +00001289/* For the memory leak detector, say whether an entire 64k chunk of
1290 address space is possibly in use, or not. If in doubt return
1291 True.
njn25e49d8e72002-09-23 09:36:25 +00001292*/
sewardja4495682002-10-21 07:29:59 +00001293static
1294Bool mc_is_valid_64k_chunk ( UInt chunk_number )
njn25e49d8e72002-09-23 09:36:25 +00001295{
sewardja4495682002-10-21 07:29:59 +00001296 sk_assert(chunk_number >= 0 && chunk_number < 65536);
1297 if (IS_DISTINGUISHED_SM(primary_map[chunk_number])) {
1298 /* Definitely not in use. */
1299 return False;
1300 } else {
1301 return True;
njn25e49d8e72002-09-23 09:36:25 +00001302 }
1303}
1304
1305
sewardja4495682002-10-21 07:29:59 +00001306/* For the memory leak detector, say whether or not a given word
1307 address is to be regarded as valid. */
1308static
1309Bool mc_is_valid_address ( Addr a )
1310{
1311 UInt vbytes;
1312 UChar abits;
1313 sk_assert(IS_ALIGNED4_ADDR(a));
1314 abits = get_abits4_ALIGNED(a);
1315 vbytes = get_vbytes4_ALIGNED(a);
1316 if (abits == VGM_NIBBLE_VALID && vbytes == VGM_WORD_VALID) {
1317 return True;
1318 } else {
1319 return False;
1320 }
1321}
1322
1323
1324/* Leak detector for this skin. We don't actually do anything, merely
1325 run the generic leak detector with suitable parameters for this
1326 skin. */
njn5c004e42002-11-18 11:04:50 +00001327void MC_(detect_memory_leaks) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001328{
sewardja4495682002-10-21 07:29:59 +00001329 VG_(generic_detect_memory_leaks) (
1330 mc_is_valid_64k_chunk,
1331 mc_is_valid_address,
njn5c004e42002-11-18 11:04:50 +00001332 MC_(get_where),
1333 MC_(clo_leak_resolution),
sewardj99aac972002-12-26 01:53:45 +00001334 MC_(clo_show_reachable),
1335 (UInt)LeakSupp
sewardja4495682002-10-21 07:29:59 +00001336 );
njn25e49d8e72002-09-23 09:36:25 +00001337}
1338
1339
1340/* ---------------------------------------------------------------------
1341 Sanity check machinery (permanently engaged).
1342 ------------------------------------------------------------------ */
1343
1344/* Check that nobody has spuriously claimed that the first or last 16
1345 pages (64 KB) of address space have become accessible. Failure of
1346 the following do not per se indicate an internal consistency
1347 problem, but they are so likely to that we really want to know
1348 about it if so. */
1349
1350Bool SK_(cheap_sanity_check) ( void )
1351{
1352 if (IS_DISTINGUISHED_SM(primary_map[0]) &&
1353 IS_DISTINGUISHED_SM(primary_map[65535]))
1354 return True;
1355 else
1356 return False;
1357}
1358
1359Bool SK_(expensive_sanity_check) ( void )
1360{
1361 Int i;
1362
1363 /* Make sure nobody changed the distinguished secondary. */
1364 for (i = 0; i < 8192; i++)
1365 if (distinguished_secondary_map.abits[i] != VGM_BYTE_INVALID)
1366 return False;
1367
1368 for (i = 0; i < 65536; i++)
1369 if (distinguished_secondary_map.vbyte[i] != VGM_BYTE_INVALID)
1370 return False;
1371
1372 /* Make sure that the upper 3/4 of the primary map hasn't
1373 been messed with. */
1374 for (i = 65536; i < 262144; i++)
1375 if (primary_map[i] != & distinguished_secondary_map)
1376 return False;
1377
1378 return True;
1379}
1380
1381/* ---------------------------------------------------------------------
1382 Debugging machinery (turn on to debug). Something of a mess.
1383 ------------------------------------------------------------------ */
1384
1385#if 0
1386/* Print the value tags on the 8 integer registers & flag reg. */
1387
1388static void uint_to_bits ( UInt x, Char* str )
1389{
1390 Int i;
1391 Int w = 0;
1392 /* str must point to a space of at least 36 bytes. */
1393 for (i = 31; i >= 0; i--) {
1394 str[w++] = (x & ( ((UInt)1) << i)) ? '1' : '0';
1395 if (i == 24 || i == 16 || i == 8)
1396 str[w++] = ' ';
1397 }
1398 str[w++] = 0;
njne427a662002-10-02 11:08:25 +00001399 sk_assert(w == 36);
njn25e49d8e72002-09-23 09:36:25 +00001400}
1401
1402/* Caution! Not vthread-safe; looks in VG_(baseBlock), not the thread
1403 state table. */
1404
1405static void vg_show_reg_tags ( void )
1406{
1407 Char buf1[36];
1408 Char buf2[36];
1409 UInt z_eax, z_ebx, z_ecx, z_edx,
1410 z_esi, z_edi, z_ebp, z_esp, z_eflags;
1411
1412 z_eax = VG_(baseBlock)[VGOFF_(sh_eax)];
1413 z_ebx = VG_(baseBlock)[VGOFF_(sh_ebx)];
1414 z_ecx = VG_(baseBlock)[VGOFF_(sh_ecx)];
1415 z_edx = VG_(baseBlock)[VGOFF_(sh_edx)];
1416 z_esi = VG_(baseBlock)[VGOFF_(sh_esi)];
1417 z_edi = VG_(baseBlock)[VGOFF_(sh_edi)];
1418 z_ebp = VG_(baseBlock)[VGOFF_(sh_ebp)];
1419 z_esp = VG_(baseBlock)[VGOFF_(sh_esp)];
1420 z_eflags = VG_(baseBlock)[VGOFF_(sh_eflags)];
1421
1422 uint_to_bits(z_eflags, buf1);
njn9b6d34e2002-10-15 08:48:08 +00001423 VG_(message)(Vg_DebugMsg, "efl %s\n", buf1);
njn25e49d8e72002-09-23 09:36:25 +00001424
1425 uint_to_bits(z_eax, buf1);
1426 uint_to_bits(z_ebx, buf2);
1427 VG_(message)(Vg_DebugMsg, "eax %s ebx %s\n", buf1, buf2);
1428
1429 uint_to_bits(z_ecx, buf1);
1430 uint_to_bits(z_edx, buf2);
1431 VG_(message)(Vg_DebugMsg, "ecx %s edx %s\n", buf1, buf2);
1432
1433 uint_to_bits(z_esi, buf1);
1434 uint_to_bits(z_edi, buf2);
1435 VG_(message)(Vg_DebugMsg, "esi %s edi %s\n", buf1, buf2);
1436
1437 uint_to_bits(z_ebp, buf1);
1438 uint_to_bits(z_esp, buf2);
1439 VG_(message)(Vg_DebugMsg, "ebp %s esp %s\n", buf1, buf2);
1440}
1441
1442
1443/* For debugging only. Scan the address space and touch all allegedly
1444 addressible words. Useful for establishing where Valgrind's idea of
1445 addressibility has diverged from what the kernel believes. */
1446
1447static
1448void zzzmemscan_notify_word ( Addr a, UInt w )
1449{
1450}
1451
1452void zzzmemscan ( void )
1453{
1454 Int n_notifies
1455 = VG_(scan_all_valid_memory)( zzzmemscan_notify_word );
1456 VG_(printf)("zzzmemscan: n_bytes = %d\n", 4 * n_notifies );
1457}
1458#endif
1459
1460
1461
1462
1463#if 0
1464static Int zzz = 0;
1465
1466void show_bb ( Addr eip_next )
1467{
1468 VG_(printf)("[%4d] ", zzz);
1469 vg_show_reg_tags( &VG_(m_shadow );
1470 VG_(translate) ( eip_next, NULL, NULL, NULL );
1471}
1472#endif /* 0 */
1473
njn25e49d8e72002-09-23 09:36:25 +00001474
1475/*------------------------------------------------------------*/
1476/*--- Setup ---*/
1477/*------------------------------------------------------------*/
1478
1479void SK_(written_shadow_regs_values)( UInt* gen_reg_value, UInt* eflags_value )
1480{
1481 *gen_reg_value = VGM_WORD_VALID;
1482 *eflags_value = VGM_EFLAGS_VALID;
1483}
1484
1485Bool SK_(process_cmd_line_option)(Char* arg)
1486{
1487# define STREQ(s1,s2) (0==VG_(strcmp_ws)((s1),(s2)))
1488# define STREQN(nn,s1,s2) (0==VG_(strncmp_ws)((s1),(s2),(nn)))
1489
sewardj51647482002-12-15 01:42:22 +00001490 if (STREQ(arg, "--avoid-strlen-errors=yes"))
njn5c004e42002-11-18 11:04:50 +00001491 MC_(clo_avoid_strlen_errors) = True;
sewardj8ec2cfc2002-10-13 00:57:26 +00001492 else if (STREQ(arg, "--avoid-strlen-errors=no"))
njn5c004e42002-11-18 11:04:50 +00001493 MC_(clo_avoid_strlen_errors) = False;
sewardj8ec2cfc2002-10-13 00:57:26 +00001494
njn25e49d8e72002-09-23 09:36:25 +00001495 else
njn5c004e42002-11-18 11:04:50 +00001496 return MC_(process_common_cmd_line_option)(arg);
njn25e49d8e72002-09-23 09:36:25 +00001497
1498 return True;
1499
1500#undef STREQ
1501#undef STREQN
1502}
1503
1504Char* SK_(usage)(void)
1505{
1506 return
1507" --partial-loads-ok=no|yes too hard to explain here; see manual [yes]\n"
1508" --freelist-vol=<number> volume of freed blocks queue [1000000]\n"
1509" --leak-check=no|yes search for memory leaks at exit? [no]\n"
1510" --leak-resolution=low|med|high\n"
1511" amount of bt merging in leak check [low]\n"
1512" --show-reachable=no|yes show reachable blocks in leak check? [no]\n"
1513" --workaround-gcc296-bugs=no|yes self explanatory [no]\n"
njn25e49d8e72002-09-23 09:36:25 +00001514"\n"
sewardj8ec2cfc2002-10-13 00:57:26 +00001515" --cleanup=no|yes improve after instrumentation? [yes]\n"
1516" --avoid-strlen-errors=no|yes suppress errs from inlined strlen [yes]\n";
njn25e49d8e72002-09-23 09:36:25 +00001517}
1518
1519
1520/*------------------------------------------------------------*/
1521/*--- Setup ---*/
1522/*------------------------------------------------------------*/
1523
njn810086f2002-11-14 12:42:47 +00001524void SK_(pre_clo_init)(void)
njn25e49d8e72002-09-23 09:36:25 +00001525{
njn810086f2002-11-14 12:42:47 +00001526 VG_(details_name) ("Memcheck");
1527 VG_(details_version) (NULL);
1528 VG_(details_description) ("a.k.a. Valgrind, a memory error detector");
1529 VG_(details_copyright_author)(
1530 "Copyright (C) 2002, and GNU GPL'd, by Julian Seward.");
1531 VG_(details_bug_reports_to) ("jseward@acm.org");
sewardj78210aa2002-12-01 02:55:46 +00001532 VG_(details_avg_translation_sizeB) ( 228 );
njn25e49d8e72002-09-23 09:36:25 +00001533
njn810086f2002-11-14 12:42:47 +00001534 VG_(needs_core_errors) ();
1535 VG_(needs_skin_errors) ();
1536 VG_(needs_libc_freeres) ();
1537 VG_(needs_sizeof_shadow_block) ( 1 );
1538 VG_(needs_shadow_regs) ();
1539 VG_(needs_command_line_options)();
1540 VG_(needs_client_requests) ();
1541 VG_(needs_extended_UCode) ();
1542 VG_(needs_syscall_wrapper) ();
1543 VG_(needs_alternative_free) ();
1544 VG_(needs_sanity_checks) ();
njn25e49d8e72002-09-23 09:36:25 +00001545
njn5c004e42002-11-18 11:04:50 +00001546 VG_(track_new_mem_startup) ( & mc_new_mem_startup );
1547 VG_(track_new_mem_heap) ( & mc_new_mem_heap );
1548 VG_(track_new_mem_stack) ( & MC_(make_writable) );
1549 VG_(track_new_mem_stack_aligned)( & mc_make_writable_aligned );
1550 VG_(track_new_mem_stack_signal) ( & MC_(make_writable) );
1551 VG_(track_new_mem_brk) ( & MC_(make_writable) );
1552 VG_(track_new_mem_mmap) ( & mc_set_perms );
njn25e49d8e72002-09-23 09:36:25 +00001553
njn5c004e42002-11-18 11:04:50 +00001554 VG_(track_copy_mem_heap) ( & mc_copy_address_range_state );
1555 VG_(track_copy_mem_remap) ( & mc_copy_address_range_state );
1556 VG_(track_change_mem_mprotect) ( & mc_set_perms );
njn25e49d8e72002-09-23 09:36:25 +00001557
njn5c004e42002-11-18 11:04:50 +00001558 VG_(track_ban_mem_heap) ( & MC_(make_noaccess) );
1559 VG_(track_ban_mem_stack) ( & MC_(make_noaccess) );
njn25e49d8e72002-09-23 09:36:25 +00001560
njn5c004e42002-11-18 11:04:50 +00001561 VG_(track_die_mem_heap) ( & MC_(make_noaccess) );
1562 VG_(track_die_mem_stack) ( & MC_(make_noaccess) );
1563 VG_(track_die_mem_stack_aligned)( & mc_make_noaccess_aligned );
1564 VG_(track_die_mem_stack_signal) ( & MC_(make_noaccess) );
1565 VG_(track_die_mem_brk) ( & MC_(make_noaccess) );
1566 VG_(track_die_mem_munmap) ( & MC_(make_noaccess) );
njn25e49d8e72002-09-23 09:36:25 +00001567
njn5c004e42002-11-18 11:04:50 +00001568 VG_(track_bad_free) ( & MC_(record_free_error) );
1569 VG_(track_mismatched_free) ( & MC_(record_freemismatch_error) );
njn25e49d8e72002-09-23 09:36:25 +00001570
njn5c004e42002-11-18 11:04:50 +00001571 VG_(track_pre_mem_read) ( & mc_check_is_readable );
1572 VG_(track_pre_mem_read_asciiz) ( & mc_check_is_readable_asciiz );
1573 VG_(track_pre_mem_write) ( & mc_check_is_writable );
1574 VG_(track_post_mem_write) ( & MC_(make_readable) );
njn25e49d8e72002-09-23 09:36:25 +00001575
njn5c004e42002-11-18 11:04:50 +00001576 VG_(register_compact_helper)((Addr) & MC_(helper_value_check4_fail));
1577 VG_(register_compact_helper)((Addr) & MC_(helper_value_check0_fail));
1578 VG_(register_compact_helper)((Addr) & MC_(helper_value_check2_fail));
1579 VG_(register_compact_helper)((Addr) & MC_(helperc_STOREV4));
1580 VG_(register_compact_helper)((Addr) & MC_(helperc_STOREV1));
1581 VG_(register_compact_helper)((Addr) & MC_(helperc_LOADV4));
1582 VG_(register_compact_helper)((Addr) & MC_(helperc_LOADV1));
njn25e49d8e72002-09-23 09:36:25 +00001583
njnd04b7c62002-10-03 14:05:52 +00001584 /* These two made non-compact because 2-byte transactions are rare. */
njn5c004e42002-11-18 11:04:50 +00001585 VG_(register_noncompact_helper)((Addr) & MC_(helperc_STOREV2));
1586 VG_(register_noncompact_helper)((Addr) & MC_(helperc_LOADV2));
1587 VG_(register_noncompact_helper)((Addr) & MC_(fpu_write_check));
1588 VG_(register_noncompact_helper)((Addr) & MC_(fpu_read_check));
1589 VG_(register_noncompact_helper)((Addr) & MC_(helper_value_check1_fail));
njn25e49d8e72002-09-23 09:36:25 +00001590
1591 VGP_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
1592 VGP_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
njnd04b7c62002-10-03 14:05:52 +00001593
1594 init_shadow_memory();
njn5c004e42002-11-18 11:04:50 +00001595 MC_(init_prof_mem)();
1596}
1597
1598void SK_(post_clo_init) ( void )
1599{
1600}
1601
1602void SK_(fini) ( void )
1603{
1604 VG_(print_malloc_stats)();
1605
1606 if (VG_(clo_verbosity) == 1) {
1607 if (!MC_(clo_leak_check))
1608 VG_(message)(Vg_UserMsg,
1609 "For a detailed leak analysis, rerun with: --leak-check=yes");
1610
1611 VG_(message)(Vg_UserMsg,
1612 "For counts of detected errors, rerun with: -v");
1613 }
1614 if (MC_(clo_leak_check)) MC_(detect_memory_leaks)();
1615
1616 MC_(done_prof_mem)();
1617
1618 if (0) {
1619 VG_(message)(Vg_DebugMsg,
1620 "------ Valgrind's client block stats follow ---------------" );
1621 MC_(show_client_block_stats)();
1622 }
njn25e49d8e72002-09-23 09:36:25 +00001623}
1624
1625/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00001626/*--- end mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00001627/*--------------------------------------------------------------------*/