blob: f849875efbb12f0e1fc7fca1a8bf1af7573ddde7 [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
nethercote137bc552003-11-14 17:47:54 +00009 This file is part of MemCheck, a heavyweight Valgrind tool for
njnc9539842002-10-02 13:26:35 +000010 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
nethercotebb1c9912004-01-04 16:43:23 +000012 Copyright (C) 2000-2004 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
njn25cac76cb2002-09-23 11:21:57 +000033#include "mc_include.h"
34#include "memcheck.h" /* for client requests */
njn25e49d8e72002-09-23 09:36:25 +000035//#include "vg_profile.c"
36
37/* Define to debug the mem audit system. */
38/* #define VG_DEBUG_MEMORY */
39
njn25e49d8e72002-09-23 09:36:25 +000040#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
41
42/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000043/*--- Low-level support for memory checking. ---*/
44/*------------------------------------------------------------*/
45
46/* All reads and writes are checked against a memory map, which
47 records the state of all memory in the process. The memory map is
48 organised like this:
49
50 The top 16 bits of an address are used to index into a top-level
51 map table, containing 65536 entries. Each entry is a pointer to a
52 second-level map, which records the accesibililty and validity
53 permissions for the 65536 bytes indexed by the lower 16 bits of the
54 address. Each byte is represented by nine bits, one indicating
55 accessibility, the other eight validity. So each second-level map
56 contains 73728 bytes. This two-level arrangement conveniently
57 divides the 4G address space into 64k lumps, each size 64k bytes.
58
59 All entries in the primary (top-level) map must point to a valid
60 secondary (second-level) map. Since most of the 4G of address
61 space will not be in use -- ie, not mapped at all -- there is a
62 distinguished secondary map, which indicates `not addressible and
63 not valid' writeable for all bytes. Entries in the primary map for
64 which the entire 64k is not in use at all point at this
65 distinguished map.
66
67 [...] lots of stuff deleted due to out of date-ness
68
69 As a final optimisation, the alignment and address checks for
70 4-byte loads and stores are combined in a neat way. The primary
71 map is extended to have 262144 entries (2^18), rather than 2^16.
72 The top 3/4 of these entries are permanently set to the
73 distinguished secondary map. For a 4-byte load/store, the
74 top-level map is indexed not with (addr >> 16) but instead f(addr),
75 where
76
77 f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
78 = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
79 = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
80
81 ie the lowest two bits are placed above the 16 high address bits.
82 If either of these two bits are nonzero, the address is misaligned;
83 this will select a secondary map from the upper 3/4 of the primary
84 map. Because this is always the distinguished secondary map, a
85 (bogus) address check failure will result. The failure handling
86 code can then figure out whether this is a genuine addr check
87 failure or whether it is a possibly-legitimate access at a
88 misaligned address.
89*/
90
91
92/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000093/*--- Function declarations. ---*/
94/*------------------------------------------------------------*/
95
njn5c004e42002-11-18 11:04:50 +000096static UInt mc_rd_V4_SLOWLY ( Addr a );
97static UInt mc_rd_V2_SLOWLY ( Addr a );
98static UInt mc_rd_V1_SLOWLY ( Addr a );
99static void mc_wr_V4_SLOWLY ( Addr a, UInt vbytes );
100static void mc_wr_V2_SLOWLY ( Addr a, UInt vbytes );
101static void mc_wr_V1_SLOWLY ( Addr a, UInt vbytes );
nethercote451eae92004-11-02 13:06:32 +0000102static void mc_fpu_read_check_SLOWLY ( Addr addr, SizeT size );
103static void mc_fpu_write_check_SLOWLY ( Addr addr, SizeT size );
njn25e49d8e72002-09-23 09:36:25 +0000104
105/*------------------------------------------------------------*/
106/*--- Data defns. ---*/
107/*------------------------------------------------------------*/
108
109typedef
110 struct {
111 UChar abits[8192];
112 UChar vbyte[65536];
113 }
114 SecMap;
115
116static SecMap* primary_map[ /*65536*/ 262144 ];
117static SecMap distinguished_secondary_map;
118
njn25e49d8e72002-09-23 09:36:25 +0000119static void init_shadow_memory ( void )
120{
121 Int i;
122
123 for (i = 0; i < 8192; i++) /* Invalid address */
124 distinguished_secondary_map.abits[i] = VGM_BYTE_INVALID;
125 for (i = 0; i < 65536; i++) /* Invalid Value */
126 distinguished_secondary_map.vbyte[i] = VGM_BYTE_INVALID;
127
128 /* These entries gradually get overwritten as the used address
129 space expands. */
130 for (i = 0; i < 65536; i++)
131 primary_map[i] = &distinguished_secondary_map;
132
133 /* These ones should never change; it's a bug in Valgrind if they do. */
134 for (i = 65536; i < 262144; i++)
135 primary_map[i] = &distinguished_secondary_map;
136}
137
njn25e49d8e72002-09-23 09:36:25 +0000138/*------------------------------------------------------------*/
139/*--- Basic bitmap management, reading and writing. ---*/
140/*------------------------------------------------------------*/
141
142/* Allocate and initialise a secondary map. */
143
144static SecMap* alloc_secondary_map ( __attribute__ ((unused))
145 Char* caller )
146{
147 SecMap* map;
148 UInt i;
149 PROF_EVENT(10);
150
151 /* Mark all bytes as invalid access and invalid value. */
fitzhardinge98abfc72003-12-16 02:05:15 +0000152 map = (SecMap *)VG_(shadow_alloc)(sizeof(SecMap));
njn25e49d8e72002-09-23 09:36:25 +0000153
154 for (i = 0; i < 8192; i++)
155 map->abits[i] = VGM_BYTE_INVALID; /* Invalid address */
156 for (i = 0; i < 65536; i++)
157 map->vbyte[i] = VGM_BYTE_INVALID; /* Invalid Value */
158
159 /* VG_(printf)("ALLOC_2MAP(%s)\n", caller ); */
160 return map;
161}
162
163
164/* Basic reading/writing of the bitmaps, for byte-sized accesses. */
165
166static __inline__ UChar get_abit ( Addr a )
167{
168 SecMap* sm = primary_map[a >> 16];
169 UInt sm_off = a & 0xFFFF;
170 PROF_EVENT(20);
171# if 0
172 if (IS_DISTINGUISHED_SM(sm))
173 VG_(message)(Vg_DebugMsg,
174 "accessed distinguished 2ndary (A)map! 0x%x\n", a);
175# endif
176 return BITARR_TEST(sm->abits, sm_off)
177 ? VGM_BIT_INVALID : VGM_BIT_VALID;
178}
179
180static __inline__ UChar get_vbyte ( Addr a )
181{
182 SecMap* sm = primary_map[a >> 16];
183 UInt sm_off = a & 0xFFFF;
184 PROF_EVENT(21);
185# if 0
186 if (IS_DISTINGUISHED_SM(sm))
187 VG_(message)(Vg_DebugMsg,
188 "accessed distinguished 2ndary (V)map! 0x%x\n", a);
189# endif
190 return sm->vbyte[sm_off];
191}
192
sewardj56867352003-10-12 10:27:06 +0000193static /* __inline__ */ void set_abit ( Addr a, UChar abit )
njn25e49d8e72002-09-23 09:36:25 +0000194{
195 SecMap* sm;
196 UInt sm_off;
197 PROF_EVENT(22);
198 ENSURE_MAPPABLE(a, "set_abit");
199 sm = primary_map[a >> 16];
200 sm_off = a & 0xFFFF;
201 if (abit)
202 BITARR_SET(sm->abits, sm_off);
203 else
204 BITARR_CLEAR(sm->abits, sm_off);
205}
206
207static __inline__ void set_vbyte ( Addr a, UChar vbyte )
208{
209 SecMap* sm;
210 UInt sm_off;
211 PROF_EVENT(23);
212 ENSURE_MAPPABLE(a, "set_vbyte");
213 sm = primary_map[a >> 16];
214 sm_off = a & 0xFFFF;
215 sm->vbyte[sm_off] = vbyte;
216}
217
218
219/* Reading/writing of the bitmaps, for aligned word-sized accesses. */
220
221static __inline__ UChar get_abits4_ALIGNED ( Addr a )
222{
223 SecMap* sm;
224 UInt sm_off;
225 UChar abits8;
226 PROF_EVENT(24);
227# ifdef VG_DEBUG_MEMORY
njnca82cc02004-11-22 17:18:48 +0000228 tl_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000229# endif
230 sm = primary_map[a >> 16];
231 sm_off = a & 0xFFFF;
232 abits8 = sm->abits[sm_off >> 3];
233 abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
234 abits8 &= 0x0F;
235 return abits8;
236}
237
238static UInt __inline__ get_vbytes4_ALIGNED ( Addr a )
239{
240 SecMap* sm = primary_map[a >> 16];
241 UInt sm_off = a & 0xFFFF;
242 PROF_EVENT(25);
243# ifdef VG_DEBUG_MEMORY
njnca82cc02004-11-22 17:18:48 +0000244 tl_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000245# endif
246 return ((UInt*)(sm->vbyte))[sm_off >> 2];
247}
248
249
sewardjee070842003-07-05 17:53:55 +0000250static void __inline__ set_vbytes4_ALIGNED ( Addr a, UInt vbytes )
251{
252 SecMap* sm;
253 UInt sm_off;
254 ENSURE_MAPPABLE(a, "set_vbytes4_ALIGNED");
255 sm = primary_map[a >> 16];
256 sm_off = a & 0xFFFF;
257 PROF_EVENT(23);
258# ifdef VG_DEBUG_MEMORY
njnca82cc02004-11-22 17:18:48 +0000259 tl_assert(IS_ALIGNED4_ADDR(a));
sewardjee070842003-07-05 17:53:55 +0000260# endif
261 ((UInt*)(sm->vbyte))[sm_off >> 2] = vbytes;
262}
263
264
njn25e49d8e72002-09-23 09:36:25 +0000265/*------------------------------------------------------------*/
266/*--- Setting permissions over address ranges. ---*/
267/*------------------------------------------------------------*/
268
nethercote451eae92004-11-02 13:06:32 +0000269static void set_address_range_perms ( Addr a, SizeT len,
njn25e49d8e72002-09-23 09:36:25 +0000270 UInt example_a_bit,
271 UInt example_v_bit )
272{
273 UChar vbyte, abyte8;
274 UInt vword4, sm_off;
275 SecMap* sm;
276
277 PROF_EVENT(30);
278
279 if (len == 0)
280 return;
281
nethercotea66033c2004-03-08 15:37:58 +0000282 if (VG_(clo_verbosity) > 0) {
283 if (len > 100 * 1000 * 1000) {
284 VG_(message)(Vg_UserMsg,
285 "Warning: set address range perms: "
286 "large range %u, a %d, v %d",
287 len, example_a_bit, example_v_bit );
288 }
njn25e49d8e72002-09-23 09:36:25 +0000289 }
290
291 VGP_PUSHCC(VgpSetMem);
292
293 /* Requests to change permissions of huge address ranges may
294 indicate bugs in our machinery. 30,000,000 is arbitrary, but so
295 far all legitimate requests have fallen beneath that size. */
296 /* 4 Mar 02: this is just stupid; get rid of it. */
njnca82cc02004-11-22 17:18:48 +0000297 /* tl_assert(len < 30000000); */
njn25e49d8e72002-09-23 09:36:25 +0000298
299 /* Check the permissions make sense. */
njnca82cc02004-11-22 17:18:48 +0000300 tl_assert(example_a_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000301 || example_a_bit == VGM_BIT_INVALID);
njnca82cc02004-11-22 17:18:48 +0000302 tl_assert(example_v_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000303 || example_v_bit == VGM_BIT_INVALID);
304 if (example_a_bit == VGM_BIT_INVALID)
njnca82cc02004-11-22 17:18:48 +0000305 tl_assert(example_v_bit == VGM_BIT_INVALID);
njn25e49d8e72002-09-23 09:36:25 +0000306
307 /* The validity bits to write. */
308 vbyte = example_v_bit==VGM_BIT_VALID
309 ? VGM_BYTE_VALID : VGM_BYTE_INVALID;
310
311 /* In order that we can charge through the address space at 8
312 bytes/main-loop iteration, make up some perms. */
313 abyte8 = (example_a_bit << 7)
314 | (example_a_bit << 6)
315 | (example_a_bit << 5)
316 | (example_a_bit << 4)
317 | (example_a_bit << 3)
318 | (example_a_bit << 2)
319 | (example_a_bit << 1)
320 | (example_a_bit << 0);
321 vword4 = (vbyte << 24) | (vbyte << 16) | (vbyte << 8) | vbyte;
322
323# ifdef VG_DEBUG_MEMORY
324 /* Do it ... */
325 while (True) {
326 PROF_EVENT(31);
327 if (len == 0) break;
328 set_abit ( a, example_a_bit );
329 set_vbyte ( a, vbyte );
330 a++;
331 len--;
332 }
333
334# else
335 /* Slowly do parts preceding 8-byte alignment. */
336 while (True) {
337 PROF_EVENT(31);
338 if (len == 0) break;
339 if ((a % 8) == 0) break;
340 set_abit ( a, example_a_bit );
341 set_vbyte ( a, vbyte );
342 a++;
343 len--;
344 }
345
346 if (len == 0) {
347 VGP_POPCC(VgpSetMem);
348 return;
349 }
njnca82cc02004-11-22 17:18:48 +0000350 tl_assert((a % 8) == 0 && len > 0);
njn25e49d8e72002-09-23 09:36:25 +0000351
352 /* Once aligned, go fast. */
353 while (True) {
354 PROF_EVENT(32);
355 if (len < 8) break;
356 ENSURE_MAPPABLE(a, "set_address_range_perms(fast)");
357 sm = primary_map[a >> 16];
358 sm_off = a & 0xFFFF;
359 sm->abits[sm_off >> 3] = abyte8;
360 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = vword4;
361 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = vword4;
362 a += 8;
363 len -= 8;
364 }
365
366 if (len == 0) {
367 VGP_POPCC(VgpSetMem);
368 return;
369 }
njnca82cc02004-11-22 17:18:48 +0000370 tl_assert((a % 8) == 0 && len > 0 && len < 8);
njn25e49d8e72002-09-23 09:36:25 +0000371
372 /* Finish the upper fragment. */
373 while (True) {
374 PROF_EVENT(33);
375 if (len == 0) break;
376 set_abit ( a, example_a_bit );
377 set_vbyte ( a, vbyte );
378 a++;
379 len--;
380 }
381# endif
382
383 /* Check that zero page and highest page have not been written to
384 -- this could happen with buggy syscall wrappers. Today
385 (2001-04-26) had precisely such a problem with __NR_setitimer. */
njn26f02512004-11-22 18:33:15 +0000386 tl_assert(TL_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +0000387 VGP_POPCC(VgpSetMem);
388}
389
390/* Set permissions for address ranges ... */
391
nethercote8b76fe52004-11-08 19:20:09 +0000392static void mc_make_noaccess ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000393{
394 PROF_EVENT(35);
nethercote8b76fe52004-11-08 19:20:09 +0000395 DEBUG("mc_make_noaccess(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000396 set_address_range_perms ( a, len, VGM_BIT_INVALID, VGM_BIT_INVALID );
397}
398
nethercote8b76fe52004-11-08 19:20:09 +0000399static void mc_make_writable ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000400{
401 PROF_EVENT(36);
nethercote8b76fe52004-11-08 19:20:09 +0000402 DEBUG("mc_make_writable(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000403 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_INVALID );
404}
405
nethercote8b76fe52004-11-08 19:20:09 +0000406static void mc_make_readable ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000407{
408 PROF_EVENT(37);
nethercote8b76fe52004-11-08 19:20:09 +0000409 DEBUG("mc_make_readable(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000410 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_VALID );
411}
412
njn9b007f62003-04-07 14:40:25 +0000413static __inline__
414void make_aligned_word_writable(Addr a)
415{
416 SecMap* sm;
417 UInt sm_off;
418 UChar mask;
njn25e49d8e72002-09-23 09:36:25 +0000419
njn9b007f62003-04-07 14:40:25 +0000420 VGP_PUSHCC(VgpESPAdj);
421 ENSURE_MAPPABLE(a, "make_aligned_word_writable");
422 sm = primary_map[a >> 16];
423 sm_off = a & 0xFFFF;
424 ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
425 mask = 0x0F;
426 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
427 /* mask now contains 1s where we wish to make address bits invalid (0s). */
428 sm->abits[sm_off >> 3] &= ~mask;
429 VGP_POPCC(VgpESPAdj);
430}
431
432static __inline__
433void make_aligned_word_noaccess(Addr a)
434{
435 SecMap* sm;
436 UInt sm_off;
437 UChar mask;
438
439 VGP_PUSHCC(VgpESPAdj);
440 ENSURE_MAPPABLE(a, "make_aligned_word_noaccess");
441 sm = primary_map[a >> 16];
442 sm_off = a & 0xFFFF;
443 ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
444 mask = 0x0F;
445 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
446 /* mask now contains 1s where we wish to make address bits invalid (1s). */
447 sm->abits[sm_off >> 3] |= mask;
448 VGP_POPCC(VgpESPAdj);
449}
450
451/* Nb: by "aligned" here we mean 8-byte aligned */
452static __inline__
453void make_aligned_doubleword_writable(Addr a)
454{
455 SecMap* sm;
456 UInt sm_off;
457
458 VGP_PUSHCC(VgpESPAdj);
459 ENSURE_MAPPABLE(a, "make_aligned_doubleword_writable");
460 sm = primary_map[a >> 16];
461 sm_off = a & 0xFFFF;
462 sm->abits[sm_off >> 3] = VGM_BYTE_VALID;
463 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = VGM_WORD_INVALID;
464 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = VGM_WORD_INVALID;
465 VGP_POPCC(VgpESPAdj);
466}
467
468static __inline__
469void make_aligned_doubleword_noaccess(Addr a)
470{
471 SecMap* sm;
472 UInt sm_off;
473
474 VGP_PUSHCC(VgpESPAdj);
475 ENSURE_MAPPABLE(a, "make_aligned_doubleword_noaccess");
476 sm = primary_map[a >> 16];
477 sm_off = a & 0xFFFF;
478 sm->abits[sm_off >> 3] = VGM_BYTE_INVALID;
479 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = VGM_WORD_INVALID;
480 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = VGM_WORD_INVALID;
481 VGP_POPCC(VgpESPAdj);
482}
483
484/* The %esp update handling functions */
485ESP_UPDATE_HANDLERS ( make_aligned_word_writable,
486 make_aligned_word_noaccess,
487 make_aligned_doubleword_writable,
488 make_aligned_doubleword_noaccess,
nethercote8b76fe52004-11-08 19:20:09 +0000489 mc_make_writable,
490 mc_make_noaccess
njn9b007f62003-04-07 14:40:25 +0000491 );
492
493/* Block-copy permissions (needed for implementing realloc()). */
nethercote451eae92004-11-02 13:06:32 +0000494static void mc_copy_address_range_state ( Addr src, Addr dst, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000495{
nethercote451eae92004-11-02 13:06:32 +0000496 SizeT i;
njn25e49d8e72002-09-23 09:36:25 +0000497
njn5c004e42002-11-18 11:04:50 +0000498 DEBUG("mc_copy_address_range_state\n");
njn25e49d8e72002-09-23 09:36:25 +0000499
500 PROF_EVENT(40);
501 for (i = 0; i < len; i++) {
502 UChar abit = get_abit ( src+i );
503 UChar vbyte = get_vbyte ( src+i );
504 PROF_EVENT(41);
505 set_abit ( dst+i, abit );
506 set_vbyte ( dst+i, vbyte );
507 }
508}
509
nethercote8b76fe52004-11-08 19:20:09 +0000510/*------------------------------------------------------------*/
511/*--- Checking memory ---*/
512/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000513
514/* Check permissions for address range. If inadequate permissions
515 exist, *bad_addr is set to the offending address, so the caller can
516 know what it is. */
517
sewardjecf8e102003-07-12 12:11:39 +0000518/* Returns True if [a .. a+len) is not addressible. Otherwise,
519 returns False, and if bad_addr is non-NULL, sets *bad_addr to
520 indicate the lowest failing address. Functions below are
521 similar. */
nethercote8b76fe52004-11-08 19:20:09 +0000522static Bool mc_check_noaccess ( Addr a, SizeT len, Addr* bad_addr )
sewardjecf8e102003-07-12 12:11:39 +0000523{
nethercote451eae92004-11-02 13:06:32 +0000524 SizeT i;
sewardjecf8e102003-07-12 12:11:39 +0000525 UChar abit;
526 PROF_EVENT(42);
527 for (i = 0; i < len; i++) {
528 PROF_EVENT(43);
529 abit = get_abit(a);
530 if (abit == VGM_BIT_VALID) {
531 if (bad_addr != NULL) *bad_addr = a;
532 return False;
533 }
534 a++;
535 }
536 return True;
537}
538
nethercote8b76fe52004-11-08 19:20:09 +0000539static Bool mc_check_writable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000540{
nethercote451eae92004-11-02 13:06:32 +0000541 SizeT i;
njn25e49d8e72002-09-23 09:36:25 +0000542 UChar abit;
543 PROF_EVENT(42);
544 for (i = 0; i < len; i++) {
545 PROF_EVENT(43);
546 abit = get_abit(a);
547 if (abit == VGM_BIT_INVALID) {
548 if (bad_addr != NULL) *bad_addr = a;
549 return False;
550 }
551 a++;
552 }
553 return True;
554}
555
nethercote8b76fe52004-11-08 19:20:09 +0000556typedef enum {
557 MC_Ok = 5, MC_AddrErr = 6, MC_ValueErr = 7
558} MC_ReadResult;
559
560static MC_ReadResult mc_check_readable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000561{
nethercote451eae92004-11-02 13:06:32 +0000562 SizeT i;
njn25e49d8e72002-09-23 09:36:25 +0000563 UChar abit;
564 UChar vbyte;
565
566 PROF_EVENT(44);
nethercote8b76fe52004-11-08 19:20:09 +0000567 DEBUG("mc_check_readable\n");
njn25e49d8e72002-09-23 09:36:25 +0000568 for (i = 0; i < len; i++) {
569 abit = get_abit(a);
570 vbyte = get_vbyte(a);
571 PROF_EVENT(45);
nethercote8b76fe52004-11-08 19:20:09 +0000572 // Report addressability errors in preference to definedness errors
573 // by checking the A bits first.
574 if (abit != VGM_BIT_VALID) {
njn25e49d8e72002-09-23 09:36:25 +0000575 if (bad_addr != NULL) *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +0000576 return MC_AddrErr;
577 }
578 if (vbyte != VGM_BYTE_VALID) {
579 if (bad_addr != NULL) *bad_addr = a;
580 return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +0000581 }
582 a++;
583 }
nethercote8b76fe52004-11-08 19:20:09 +0000584 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +0000585}
586
587
588/* Check a zero-terminated ascii string. Tricky -- don't want to
589 examine the actual bytes, to find the end, until we're sure it is
590 safe to do so. */
591
njn9b007f62003-04-07 14:40:25 +0000592static Bool mc_check_readable_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000593{
594 UChar abit;
595 UChar vbyte;
596 PROF_EVENT(46);
njn5c004e42002-11-18 11:04:50 +0000597 DEBUG("mc_check_readable_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +0000598 while (True) {
599 PROF_EVENT(47);
600 abit = get_abit(a);
601 vbyte = get_vbyte(a);
nethercote8b76fe52004-11-08 19:20:09 +0000602 // As in mc_check_readable(), check A bits first
603 if (abit != VGM_BIT_VALID) {
njn25e49d8e72002-09-23 09:36:25 +0000604 if (bad_addr != NULL) *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +0000605 return MC_AddrErr;
606 }
607 if (vbyte != VGM_BYTE_VALID) {
608 if (bad_addr != NULL) *bad_addr = a;
609 return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +0000610 }
611 /* Ok, a is safe to read. */
nethercote8b76fe52004-11-08 19:20:09 +0000612 if (* ((UChar*)a) == 0) return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +0000613 a++;
614 }
615}
616
617
618/*------------------------------------------------------------*/
619/*--- Memory event handlers ---*/
620/*------------------------------------------------------------*/
621
njn25e49d8e72002-09-23 09:36:25 +0000622static
njn72718642003-07-24 08:45:32 +0000623void mc_check_is_writable ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +0000624 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +0000625{
626 Bool ok;
627 Addr bad_addr;
628
629 VGP_PUSHCC(VgpCheckMem);
630
631 /* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
632 base,base+size-1); */
nethercote8b76fe52004-11-08 19:20:09 +0000633 ok = mc_check_writable ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000634 if (!ok) {
635 switch (part) {
636 case Vg_CoreSysCall:
nethercote8b76fe52004-11-08 19:20:09 +0000637 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False,
638 /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +0000639 break;
640
641 case Vg_CorePThread:
642 case Vg_CoreSignal:
nethercote8b76fe52004-11-08 19:20:09 +0000643 MAC_(record_core_mem_error)( tid, /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +0000644 break;
645
646 default:
njn67993252004-11-22 18:02:32 +0000647 VG_(tool_panic)("mc_check_is_writable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000648 }
649 }
650
651 VGP_POPCC(VgpCheckMem);
652}
653
654static
njn72718642003-07-24 08:45:32 +0000655void mc_check_is_readable ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +0000656 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +0000657{
njn25e49d8e72002-09-23 09:36:25 +0000658 Addr bad_addr;
nethercote8b76fe52004-11-08 19:20:09 +0000659 MC_ReadResult res;
njn25e49d8e72002-09-23 09:36:25 +0000660
661 VGP_PUSHCC(VgpCheckMem);
662
663 /* VG_(message)(Vg_DebugMsg,"check is readable: %x .. %x",
664 base,base+size-1); */
nethercote8b76fe52004-11-08 19:20:09 +0000665 res = mc_check_readable ( base, size, &bad_addr );
666 if (MC_Ok != res) {
667 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
668
njn25e49d8e72002-09-23 09:36:25 +0000669 switch (part) {
670 case Vg_CoreSysCall:
nethercote8b76fe52004-11-08 19:20:09 +0000671 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False,
672 isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +0000673 break;
674
675 case Vg_CorePThread:
nethercote8b76fe52004-11-08 19:20:09 +0000676 MAC_(record_core_mem_error)( tid, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +0000677 break;
678
679 /* If we're being asked to jump to a silly address, record an error
680 message before potentially crashing the entire system. */
681 case Vg_CoreTranslate:
njn72718642003-07-24 08:45:32 +0000682 MAC_(record_jump_error)( tid, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000683 break;
684
685 default:
njn67993252004-11-22 18:02:32 +0000686 VG_(tool_panic)("mc_check_is_readable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000687 }
688 }
689 VGP_POPCC(VgpCheckMem);
690}
691
692static
njn72718642003-07-24 08:45:32 +0000693void mc_check_is_readable_asciiz ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +0000694 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +0000695{
nethercote8b76fe52004-11-08 19:20:09 +0000696 MC_ReadResult res;
njn25e49d8e72002-09-23 09:36:25 +0000697 Addr bad_addr;
698 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
699
700 VGP_PUSHCC(VgpCheckMem);
701
njnca82cc02004-11-22 17:18:48 +0000702 tl_assert(part == Vg_CoreSysCall);
nethercote8b76fe52004-11-08 19:20:09 +0000703 res = mc_check_readable_asciiz ( (Addr)str, &bad_addr );
704 if (MC_Ok != res) {
705 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
706 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +0000707 }
708
709 VGP_POPCC(VgpCheckMem);
710}
711
712
713static
nethercote451eae92004-11-02 13:06:32 +0000714void mc_new_mem_startup( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +0000715{
njn1f3a9092002-10-04 09:22:30 +0000716 /* Ignore the permissions, just make it readable. Seems to work... */
nethercote451eae92004-11-02 13:06:32 +0000717 DEBUG("mc_new_mem_startup(%p, %llu, rr=%u, ww=%u, xx=%u)\n",
718 a,(ULong)len,rr,ww,xx);
nethercote8b76fe52004-11-08 19:20:09 +0000719 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000720}
721
722static
nethercote451eae92004-11-02 13:06:32 +0000723void mc_new_mem_heap ( Addr a, SizeT len, Bool is_inited )
njn25e49d8e72002-09-23 09:36:25 +0000724{
725 if (is_inited) {
nethercote8b76fe52004-11-08 19:20:09 +0000726 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000727 } else {
nethercote8b76fe52004-11-08 19:20:09 +0000728 mc_make_writable(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000729 }
730}
731
732static
nethercote451eae92004-11-02 13:06:32 +0000733void mc_set_perms (Addr a, SizeT len, Bool rr, Bool ww, Bool xx)
njn25e49d8e72002-09-23 09:36:25 +0000734{
nethercote451eae92004-11-02 13:06:32 +0000735 DEBUG("mc_set_perms(%p, %llu, rr=%u ww=%u, xx=%u)\n",
736 a, (ULong)len, rr, ww, xx);
nethercote8b76fe52004-11-08 19:20:09 +0000737 if (rr) mc_make_readable(a, len);
738 else if (ww) mc_make_writable(a, len);
739 else mc_make_noaccess(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000740}
741
742
743/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +0000744/*--- Register event handlers ---*/
745/*------------------------------------------------------------*/
746
747static void mc_post_regs_write_init ( void )
748{
749 UInt i;
nethercotec06e2132004-09-03 13:45:29 +0000750 for (i = FIRST_ARCH_REG; i <= LAST_ARCH_REG; i++)
njnd3040452003-05-19 15:04:06 +0000751 VG_(set_shadow_archreg)( i, VGM_WORD_VALID );
752 VG_(set_shadow_eflags)( VGM_EFLAGS_VALID );
753}
754
755static void mc_post_reg_write(ThreadId tid, UInt reg)
756{
757 VG_(set_thread_shadow_archreg)( tid, reg, VGM_WORD_VALID );
758}
759
760static void mc_post_reg_write_clientcall(ThreadId tid, UInt reg, Addr f )
761{
762 VG_(set_thread_shadow_archreg)( tid, reg, VGM_WORD_VALID );
763}
764
nethercote8b76fe52004-11-08 19:20:09 +0000765static void mc_pre_reg_read(CorePart part, ThreadId tid, Char* s, UInt reg,
766 SizeT size)
767{
768 UWord mask;
769
770 // XXX: the only one at the moment
njnca82cc02004-11-22 17:18:48 +0000771 tl_assert(Vg_CoreSysCall == part);
nethercote8b76fe52004-11-08 19:20:09 +0000772
773 switch (size) {
774 case 4: mask = 0xffffffff; break;
775 case 2: mask = 0xffff; break;
776 case 1: mask = 0xff; break;
njn67993252004-11-22 18:02:32 +0000777 default: VG_(tool_panic)("Unhandled size in mc_pre_reg_read");
nethercote8b76fe52004-11-08 19:20:09 +0000778 }
779
780 if (VGM_WORD_VALID != (mask & VG_(get_thread_shadow_archreg)( tid, reg )) )
781 MAC_(record_param_error) ( tid, 0, /*isReg*/True, /*isUnaddr*/False, s );
782}
njnd3040452003-05-19 15:04:06 +0000783
784/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000785/*--- Functions called directly from generated code. ---*/
786/*------------------------------------------------------------*/
787
788static __inline__ UInt rotateRight16 ( UInt x )
789{
790 /* Amazingly, gcc turns this into a single rotate insn. */
791 return (x >> 16) | (x << 16);
792}
793
794
795static __inline__ UInt shiftRight16 ( UInt x )
796{
797 return x >> 16;
798}
799
800
801/* Read/write 1/2/4 sized V bytes, and emit an address error if
802 needed. */
803
804/* VG_(helperc_{LD,ST}V{1,2,4}) handle the common case fast.
805 Under all other circumstances, it defers to the relevant _SLOWLY
806 function, which can handle all situations.
807*/
nethercoteeec46302004-08-23 15:06:23 +0000808REGPARM(1)
njn5c004e42002-11-18 11:04:50 +0000809UInt MC_(helperc_LOADV4) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000810{
811# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000812 return mc_rd_V4_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000813# else
814 UInt sec_no = rotateRight16(a) & 0x3FFFF;
815 SecMap* sm = primary_map[sec_no];
816 UInt a_off = (a & 0xFFFF) >> 3;
817 UChar abits = sm->abits[a_off];
818 abits >>= (a & 4);
819 abits &= 15;
820 PROF_EVENT(60);
821 if (abits == VGM_NIBBLE_VALID) {
822 /* Handle common case quickly: a is suitably aligned, is mapped,
823 and is addressible. */
824 UInt v_off = a & 0xFFFF;
825 return ((UInt*)(sm->vbyte))[ v_off >> 2 ];
826 } else {
827 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000828 return mc_rd_V4_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000829 }
830# endif
831}
832
nethercoteeec46302004-08-23 15:06:23 +0000833REGPARM(2)
njn5c004e42002-11-18 11:04:50 +0000834void MC_(helperc_STOREV4) ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000835{
836# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000837 mc_wr_V4_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000838# else
839 UInt sec_no = rotateRight16(a) & 0x3FFFF;
840 SecMap* sm = primary_map[sec_no];
841 UInt a_off = (a & 0xFFFF) >> 3;
842 UChar abits = sm->abits[a_off];
843 abits >>= (a & 4);
844 abits &= 15;
845 PROF_EVENT(61);
846 if (abits == VGM_NIBBLE_VALID) {
847 /* Handle common case quickly: a is suitably aligned, is mapped,
848 and is addressible. */
849 UInt v_off = a & 0xFFFF;
850 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = vbytes;
851 } else {
852 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000853 mc_wr_V4_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000854 }
855# endif
856}
857
nethercoteeec46302004-08-23 15:06:23 +0000858REGPARM(1)
njn5c004e42002-11-18 11:04:50 +0000859UInt MC_(helperc_LOADV2) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000860{
861# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000862 return mc_rd_V2_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000863# else
864 UInt sec_no = rotateRight16(a) & 0x1FFFF;
865 SecMap* sm = primary_map[sec_no];
866 UInt a_off = (a & 0xFFFF) >> 3;
867 PROF_EVENT(62);
868 if (sm->abits[a_off] == VGM_BYTE_VALID) {
869 /* Handle common case quickly. */
870 UInt v_off = a & 0xFFFF;
871 return 0xFFFF0000
872 |
873 (UInt)( ((UShort*)(sm->vbyte))[ v_off >> 1 ] );
874 } else {
875 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000876 return mc_rd_V2_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000877 }
878# endif
879}
880
nethercoteeec46302004-08-23 15:06:23 +0000881REGPARM(2)
njn5c004e42002-11-18 11:04:50 +0000882void MC_(helperc_STOREV2) ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000883{
884# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000885 mc_wr_V2_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000886# else
887 UInt sec_no = rotateRight16(a) & 0x1FFFF;
888 SecMap* sm = primary_map[sec_no];
889 UInt a_off = (a & 0xFFFF) >> 3;
890 PROF_EVENT(63);
891 if (sm->abits[a_off] == VGM_BYTE_VALID) {
892 /* Handle common case quickly. */
893 UInt v_off = a & 0xFFFF;
894 ((UShort*)(sm->vbyte))[ v_off >> 1 ] = vbytes & 0x0000FFFF;
895 } else {
896 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000897 mc_wr_V2_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000898 }
899# endif
900}
901
nethercoteeec46302004-08-23 15:06:23 +0000902REGPARM(1)
njn5c004e42002-11-18 11:04:50 +0000903UInt MC_(helperc_LOADV1) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000904{
905# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000906 return mc_rd_V1_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000907# else
908 UInt sec_no = shiftRight16(a);
909 SecMap* sm = primary_map[sec_no];
910 UInt a_off = (a & 0xFFFF) >> 3;
911 PROF_EVENT(64);
912 if (sm->abits[a_off] == VGM_BYTE_VALID) {
913 /* Handle common case quickly. */
914 UInt v_off = a & 0xFFFF;
915 return 0xFFFFFF00
916 |
917 (UInt)( ((UChar*)(sm->vbyte))[ v_off ] );
918 } else {
919 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000920 return mc_rd_V1_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000921 }
922# endif
923}
924
nethercoteeec46302004-08-23 15:06:23 +0000925REGPARM(2)
njn5c004e42002-11-18 11:04:50 +0000926void MC_(helperc_STOREV1) ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000927{
928# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000929 mc_wr_V1_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000930# else
931 UInt sec_no = shiftRight16(a);
932 SecMap* sm = primary_map[sec_no];
933 UInt a_off = (a & 0xFFFF) >> 3;
934 PROF_EVENT(65);
935 if (sm->abits[a_off] == VGM_BYTE_VALID) {
936 /* Handle common case quickly. */
937 UInt v_off = a & 0xFFFF;
938 ((UChar*)(sm->vbyte))[ v_off ] = vbytes & 0x000000FF;
939 } else {
940 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000941 mc_wr_V1_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000942 }
943# endif
944}
945
946
947/*------------------------------------------------------------*/
948/*--- Fallback functions to handle cases that the above ---*/
949/*--- VG_(helperc_{LD,ST}V{1,2,4}) can't manage. ---*/
950/*------------------------------------------------------------*/
951
njn5c004e42002-11-18 11:04:50 +0000952static UInt mc_rd_V4_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000953{
954 Bool a0ok, a1ok, a2ok, a3ok;
955 UInt vb0, vb1, vb2, vb3;
956
957 PROF_EVENT(70);
958
959 /* First establish independently the addressibility of the 4 bytes
960 involved. */
961 a0ok = get_abit(a+0) == VGM_BIT_VALID;
962 a1ok = get_abit(a+1) == VGM_BIT_VALID;
963 a2ok = get_abit(a+2) == VGM_BIT_VALID;
964 a3ok = get_abit(a+3) == VGM_BIT_VALID;
965
966 /* Also get the validity bytes for the address. */
967 vb0 = (UInt)get_vbyte(a+0);
968 vb1 = (UInt)get_vbyte(a+1);
969 vb2 = (UInt)get_vbyte(a+2);
970 vb3 = (UInt)get_vbyte(a+3);
971
972 /* Now distinguish 3 cases */
973
974 /* Case 1: the address is completely valid, so:
975 - no addressing error
976 - return V bytes as read from memory
977 */
978 if (a0ok && a1ok && a2ok && a3ok) {
979 UInt vw = VGM_WORD_INVALID;
980 vw <<= 8; vw |= vb3;
981 vw <<= 8; vw |= vb2;
982 vw <<= 8; vw |= vb1;
983 vw <<= 8; vw |= vb0;
984 return vw;
985 }
986
987 /* Case 2: the address is completely invalid.
988 - emit addressing error
989 - return V word indicating validity.
990 This sounds strange, but if we make loads from invalid addresses
991 give invalid data, we also risk producing a number of confusing
992 undefined-value errors later, which confuses the fact that the
993 error arose in the first place from an invalid address.
994 */
995 /* VG_(printf)("%p (%d %d %d %d)\n", a, a0ok, a1ok, a2ok, a3ok); */
njn43c799e2003-04-08 00:08:52 +0000996 if (!MAC_(clo_partial_loads_ok)
njn25e49d8e72002-09-23 09:36:25 +0000997 || ((a & 3) != 0)
998 || (!a0ok && !a1ok && !a2ok && !a3ok)) {
njn72718642003-07-24 08:45:32 +0000999 MAC_(record_address_error)( VG_(get_current_tid)(), a, 4, False );
njn25e49d8e72002-09-23 09:36:25 +00001000 return (VGM_BYTE_VALID << 24) | (VGM_BYTE_VALID << 16)
1001 | (VGM_BYTE_VALID << 8) | VGM_BYTE_VALID;
1002 }
1003
1004 /* Case 3: the address is partially valid.
1005 - no addressing error
1006 - returned V word is invalid where the address is invalid,
1007 and contains V bytes from memory otherwise.
njn5c004e42002-11-18 11:04:50 +00001008 Case 3 is only allowed if MC_(clo_partial_loads_ok) is True
njn25e49d8e72002-09-23 09:36:25 +00001009 (which is the default), and the address is 4-aligned.
1010 If not, Case 2 will have applied.
1011 */
njnca82cc02004-11-22 17:18:48 +00001012 tl_assert(MAC_(clo_partial_loads_ok));
njn25e49d8e72002-09-23 09:36:25 +00001013 {
1014 UInt vw = VGM_WORD_INVALID;
1015 vw <<= 8; vw |= (a3ok ? vb3 : VGM_BYTE_INVALID);
1016 vw <<= 8; vw |= (a2ok ? vb2 : VGM_BYTE_INVALID);
1017 vw <<= 8; vw |= (a1ok ? vb1 : VGM_BYTE_INVALID);
1018 vw <<= 8; vw |= (a0ok ? vb0 : VGM_BYTE_INVALID);
1019 return vw;
1020 }
1021}
1022
njn5c004e42002-11-18 11:04:50 +00001023static void mc_wr_V4_SLOWLY ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001024{
1025 /* Check the address for validity. */
1026 Bool aerr = False;
1027 PROF_EVENT(71);
1028
1029 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1030 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1031 if (get_abit(a+2) != VGM_BIT_VALID) aerr = True;
1032 if (get_abit(a+3) != VGM_BIT_VALID) aerr = True;
1033
1034 /* Store the V bytes, remembering to do it little-endian-ly. */
1035 set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
1036 set_vbyte( a+1, vbytes & 0x000000FF ); vbytes >>= 8;
1037 set_vbyte( a+2, vbytes & 0x000000FF ); vbytes >>= 8;
1038 set_vbyte( a+3, vbytes & 0x000000FF );
1039
1040 /* If an address error has happened, report it. */
1041 if (aerr)
njn72718642003-07-24 08:45:32 +00001042 MAC_(record_address_error)( VG_(get_current_tid)(), a, 4, True );
njn25e49d8e72002-09-23 09:36:25 +00001043}
1044
njn5c004e42002-11-18 11:04:50 +00001045static UInt mc_rd_V2_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +00001046{
1047 /* Check the address for validity. */
1048 UInt vw = VGM_WORD_INVALID;
1049 Bool aerr = False;
1050 PROF_EVENT(72);
1051
1052 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1053 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1054
1055 /* Fetch the V bytes, remembering to do it little-endian-ly. */
1056 vw <<= 8; vw |= (UInt)get_vbyte(a+1);
1057 vw <<= 8; vw |= (UInt)get_vbyte(a+0);
1058
1059 /* If an address error has happened, report it. */
1060 if (aerr) {
njn72718642003-07-24 08:45:32 +00001061 MAC_(record_address_error)( VG_(get_current_tid)(), a, 2, False );
njn25e49d8e72002-09-23 09:36:25 +00001062 vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
1063 | (VGM_BYTE_VALID << 8) | (VGM_BYTE_VALID);
1064 }
1065 return vw;
1066}
1067
njn5c004e42002-11-18 11:04:50 +00001068static void mc_wr_V2_SLOWLY ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001069{
1070 /* Check the address for validity. */
1071 Bool aerr = False;
1072 PROF_EVENT(73);
1073
1074 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1075 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1076
1077 /* Store the V bytes, remembering to do it little-endian-ly. */
1078 set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
1079 set_vbyte( a+1, vbytes & 0x000000FF );
1080
1081 /* If an address error has happened, report it. */
1082 if (aerr)
njn72718642003-07-24 08:45:32 +00001083 MAC_(record_address_error)( VG_(get_current_tid)(), a, 2, True );
njn25e49d8e72002-09-23 09:36:25 +00001084}
1085
njn5c004e42002-11-18 11:04:50 +00001086static UInt mc_rd_V1_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +00001087{
1088 /* Check the address for validity. */
1089 UInt vw = VGM_WORD_INVALID;
1090 Bool aerr = False;
1091 PROF_EVENT(74);
1092
1093 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1094
1095 /* Fetch the V byte. */
1096 vw <<= 8; vw |= (UInt)get_vbyte(a+0);
1097
1098 /* If an address error has happened, report it. */
1099 if (aerr) {
njn72718642003-07-24 08:45:32 +00001100 MAC_(record_address_error)( VG_(get_current_tid)(), a, 1, False );
njn25e49d8e72002-09-23 09:36:25 +00001101 vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
1102 | (VGM_BYTE_INVALID << 8) | (VGM_BYTE_VALID);
1103 }
1104 return vw;
1105}
1106
njn5c004e42002-11-18 11:04:50 +00001107static void mc_wr_V1_SLOWLY ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001108{
1109 /* Check the address for validity. */
1110 Bool aerr = False;
1111 PROF_EVENT(75);
1112 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1113
1114 /* Store the V bytes, remembering to do it little-endian-ly. */
1115 set_vbyte( a+0, vbytes & 0x000000FF );
1116
1117 /* If an address error has happened, report it. */
1118 if (aerr)
njn72718642003-07-24 08:45:32 +00001119 MAC_(record_address_error)( VG_(get_current_tid)(), a, 1, True );
njn25e49d8e72002-09-23 09:36:25 +00001120}
1121
1122
1123/* ---------------------------------------------------------------------
1124 Called from generated code, or from the assembly helpers.
1125 Handlers for value check failures.
1126 ------------------------------------------------------------------ */
1127
njn5c004e42002-11-18 11:04:50 +00001128void MC_(helperc_value_check0_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001129{
njn72718642003-07-24 08:45:32 +00001130 MC_(record_value_error) ( VG_(get_current_tid)(), 0 );
njn25e49d8e72002-09-23 09:36:25 +00001131}
1132
njn5c004e42002-11-18 11:04:50 +00001133void MC_(helperc_value_check1_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001134{
njn72718642003-07-24 08:45:32 +00001135 MC_(record_value_error) ( VG_(get_current_tid)(), 1 );
njn25e49d8e72002-09-23 09:36:25 +00001136}
1137
njn5c004e42002-11-18 11:04:50 +00001138void MC_(helperc_value_check2_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001139{
njn72718642003-07-24 08:45:32 +00001140 MC_(record_value_error) ( VG_(get_current_tid)(), 2 );
njn25e49d8e72002-09-23 09:36:25 +00001141}
1142
njn5c004e42002-11-18 11:04:50 +00001143void MC_(helperc_value_check4_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001144{
njn72718642003-07-24 08:45:32 +00001145 MC_(record_value_error) ( VG_(get_current_tid)(), 4 );
njn25e49d8e72002-09-23 09:36:25 +00001146}
1147
1148
1149/* ---------------------------------------------------------------------
1150 FPU load and store checks, called from generated code.
1151 ------------------------------------------------------------------ */
1152
nethercoteeec46302004-08-23 15:06:23 +00001153REGPARM(2)
nethercote451eae92004-11-02 13:06:32 +00001154void MC_(fpu_read_check) ( Addr addr, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001155{
1156 /* Ensure the read area is both addressible and valid (ie,
1157 readable). If there's an address error, don't report a value
1158 error too; but if there isn't an address error, check for a
1159 value error.
1160
1161 Try to be reasonably fast on the common case; wimp out and defer
njn5c004e42002-11-18 11:04:50 +00001162 to mc_fpu_read_check_SLOWLY for everything else. */
njn25e49d8e72002-09-23 09:36:25 +00001163
1164 SecMap* sm;
1165 UInt sm_off, v_off, a_off;
1166 Addr addr4;
1167
1168 PROF_EVENT(80);
1169
1170# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +00001171 mc_fpu_read_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001172# else
1173
1174 if (size == 4) {
1175 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
1176 PROF_EVENT(81);
1177 /* Properly aligned. */
1178 sm = primary_map[addr >> 16];
1179 sm_off = addr & 0xFFFF;
1180 a_off = sm_off >> 3;
1181 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
1182 /* Properly aligned and addressible. */
1183 v_off = addr & 0xFFFF;
1184 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1185 goto slow4;
1186 /* Properly aligned, addressible and with valid data. */
1187 return;
1188 slow4:
njn5c004e42002-11-18 11:04:50 +00001189 mc_fpu_read_check_SLOWLY ( addr, 4 );
njn25e49d8e72002-09-23 09:36:25 +00001190 return;
1191 }
1192
1193 if (size == 8) {
1194 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
1195 PROF_EVENT(82);
1196 /* Properly aligned. Do it in two halves. */
1197 addr4 = addr + 4;
1198 /* First half. */
1199 sm = primary_map[addr >> 16];
1200 sm_off = addr & 0xFFFF;
1201 a_off = sm_off >> 3;
1202 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1203 /* First half properly aligned and addressible. */
1204 v_off = addr & 0xFFFF;
1205 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1206 goto slow8;
1207 /* Second half. */
1208 sm = primary_map[addr4 >> 16];
1209 sm_off = addr4 & 0xFFFF;
1210 a_off = sm_off >> 3;
1211 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1212 /* Second half properly aligned and addressible. */
1213 v_off = addr4 & 0xFFFF;
1214 if (((UInt*)(sm->vbyte))[ v_off >> 2 ] != VGM_WORD_VALID)
1215 goto slow8;
1216 /* Both halves properly aligned, addressible and with valid
1217 data. */
1218 return;
1219 slow8:
njn5c004e42002-11-18 11:04:50 +00001220 mc_fpu_read_check_SLOWLY ( addr, 8 );
njn25e49d8e72002-09-23 09:36:25 +00001221 return;
1222 }
1223
1224 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
1225 cases go quickly. */
1226 if (size == 2) {
1227 PROF_EVENT(83);
njn5c004e42002-11-18 11:04:50 +00001228 mc_fpu_read_check_SLOWLY ( addr, 2 );
njn25e49d8e72002-09-23 09:36:25 +00001229 return;
1230 }
1231
sewardj93992e22003-05-26 09:17:41 +00001232 if (size == 16 /*SSE*/
jsewardfca60182004-01-04 23:30:55 +00001233 || size == 10 || size == 28 || size == 108 || size == 512) {
njn25e49d8e72002-09-23 09:36:25 +00001234 PROF_EVENT(84);
njn5c004e42002-11-18 11:04:50 +00001235 mc_fpu_read_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001236 return;
1237 }
1238
1239 VG_(printf)("size is %d\n", size);
njn67993252004-11-22 18:02:32 +00001240 VG_(tool_panic)("MC_(fpu_read_check): unhandled size");
njn25e49d8e72002-09-23 09:36:25 +00001241# endif
1242}
1243
1244
nethercoteeec46302004-08-23 15:06:23 +00001245REGPARM(2)
nethercote451eae92004-11-02 13:06:32 +00001246void MC_(fpu_write_check) ( Addr addr, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001247{
1248 /* Ensure the written area is addressible, and moan if otherwise.
1249 If it is addressible, make it valid, otherwise invalid.
1250 */
1251
1252 SecMap* sm;
1253 UInt sm_off, v_off, a_off;
1254 Addr addr4;
1255
1256 PROF_EVENT(85);
1257
1258# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +00001259 mc_fpu_write_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001260# else
1261
1262 if (size == 4) {
1263 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
1264 PROF_EVENT(86);
1265 /* Properly aligned. */
1266 sm = primary_map[addr >> 16];
1267 sm_off = addr & 0xFFFF;
1268 a_off = sm_off >> 3;
1269 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
1270 /* Properly aligned and addressible. Make valid. */
1271 v_off = addr & 0xFFFF;
1272 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1273 return;
1274 slow4:
njn5c004e42002-11-18 11:04:50 +00001275 mc_fpu_write_check_SLOWLY ( addr, 4 );
njn25e49d8e72002-09-23 09:36:25 +00001276 return;
1277 }
1278
1279 if (size == 8) {
1280 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
1281 PROF_EVENT(87);
1282 /* Properly aligned. Do it in two halves. */
1283 addr4 = addr + 4;
1284 /* First half. */
1285 sm = primary_map[addr >> 16];
1286 sm_off = addr & 0xFFFF;
1287 a_off = sm_off >> 3;
1288 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1289 /* First half properly aligned and addressible. Make valid. */
1290 v_off = addr & 0xFFFF;
1291 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1292 /* Second half. */
1293 sm = primary_map[addr4 >> 16];
1294 sm_off = addr4 & 0xFFFF;
1295 a_off = sm_off >> 3;
1296 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1297 /* Second half properly aligned and addressible. */
1298 v_off = addr4 & 0xFFFF;
1299 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = VGM_WORD_VALID;
1300 /* Properly aligned, addressible and with valid data. */
1301 return;
1302 slow8:
njn5c004e42002-11-18 11:04:50 +00001303 mc_fpu_write_check_SLOWLY ( addr, 8 );
njn25e49d8e72002-09-23 09:36:25 +00001304 return;
1305 }
1306
1307 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
1308 cases go quickly. */
1309 if (size == 2) {
1310 PROF_EVENT(88);
njn5c004e42002-11-18 11:04:50 +00001311 mc_fpu_write_check_SLOWLY ( addr, 2 );
njn25e49d8e72002-09-23 09:36:25 +00001312 return;
1313 }
1314
sewardj93992e22003-05-26 09:17:41 +00001315 if (size == 16 /*SSE*/
jsewardfca60182004-01-04 23:30:55 +00001316 || size == 10 || size == 28 || size == 108 || size == 512) {
njn25e49d8e72002-09-23 09:36:25 +00001317 PROF_EVENT(89);
njn5c004e42002-11-18 11:04:50 +00001318 mc_fpu_write_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001319 return;
1320 }
1321
1322 VG_(printf)("size is %d\n", size);
njn67993252004-11-22 18:02:32 +00001323 VG_(tool_panic)("MC_(fpu_write_check): unhandled size");
njn25e49d8e72002-09-23 09:36:25 +00001324# endif
1325}
1326
1327
1328/* ---------------------------------------------------------------------
1329 Slow, general cases for FPU load and store checks.
1330 ------------------------------------------------------------------ */
1331
1332/* Generic version. Test for both addr and value errors, but if
1333 there's an addr error, don't report a value error even if it
1334 exists. */
1335
nethercote451eae92004-11-02 13:06:32 +00001336void mc_fpu_read_check_SLOWLY ( Addr addr, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001337{
1338 Int i;
1339 Bool aerr = False;
1340 Bool verr = False;
1341 PROF_EVENT(90);
1342 for (i = 0; i < size; i++) {
1343 PROF_EVENT(91);
1344 if (get_abit(addr+i) != VGM_BIT_VALID)
1345 aerr = True;
1346 if (get_vbyte(addr+i) != VGM_BYTE_VALID)
1347 verr = True;
1348 }
1349
1350 if (aerr) {
njn72718642003-07-24 08:45:32 +00001351 MAC_(record_address_error)( VG_(get_current_tid)(), addr, size, False );
njn25e49d8e72002-09-23 09:36:25 +00001352 } else {
1353 if (verr)
njn72718642003-07-24 08:45:32 +00001354 MC_(record_value_error)( VG_(get_current_tid)(), size );
njn25e49d8e72002-09-23 09:36:25 +00001355 }
1356}
1357
1358
1359/* Generic version. Test for addr errors. Valid addresses are
1360 given valid values, and invalid addresses invalid values. */
1361
nethercote451eae92004-11-02 13:06:32 +00001362void mc_fpu_write_check_SLOWLY ( Addr addr, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001363{
1364 Int i;
1365 Addr a_here;
1366 Bool a_ok;
1367 Bool aerr = False;
1368 PROF_EVENT(92);
1369 for (i = 0; i < size; i++) {
1370 PROF_EVENT(93);
1371 a_here = addr+i;
1372 a_ok = get_abit(a_here) == VGM_BIT_VALID;
1373 if (a_ok) {
1374 set_vbyte(a_here, VGM_BYTE_VALID);
1375 } else {
1376 set_vbyte(a_here, VGM_BYTE_INVALID);
1377 aerr = True;
1378 }
1379 }
1380 if (aerr) {
njn72718642003-07-24 08:45:32 +00001381 MAC_(record_address_error)( VG_(get_current_tid)(), addr, size, True );
njn25e49d8e72002-09-23 09:36:25 +00001382 }
1383}
1384
njn25e49d8e72002-09-23 09:36:25 +00001385
1386/*------------------------------------------------------------*/
sewardjee070842003-07-05 17:53:55 +00001387/*--- Metadata get/set functions, for client requests. ---*/
1388/*------------------------------------------------------------*/
1389
1390/* Copy Vbits for src into vbits. Returns: 1 == OK, 2 == alignment
1391 error, 3 == addressing error. */
nethercote8b76fe52004-11-08 19:20:09 +00001392static Int mc_get_or_set_vbits_for_client (
njn72718642003-07-24 08:45:32 +00001393 ThreadId tid,
sewardjee070842003-07-05 17:53:55 +00001394 Addr dataV,
1395 Addr vbitsV,
nethercote451eae92004-11-02 13:06:32 +00001396 SizeT size,
sewardjee070842003-07-05 17:53:55 +00001397 Bool setting /* True <=> set vbits, False <=> get vbits */
1398)
1399{
1400 Bool addressibleD = True;
1401 Bool addressibleV = True;
1402 UInt* data = (UInt*)dataV;
1403 UInt* vbits = (UInt*)vbitsV;
nethercote451eae92004-11-02 13:06:32 +00001404 SizeT szW = size / 4; /* sigh */
1405 SizeT i;
sewardjaf48a602003-07-06 00:54:47 +00001406 UInt* dataP = NULL; /* bogus init to keep gcc happy */
1407 UInt* vbitsP = NULL; /* ditto */
sewardjee070842003-07-05 17:53:55 +00001408
1409 /* Check alignment of args. */
1410 if (!(IS_ALIGNED4_ADDR(data) && IS_ALIGNED4_ADDR(vbits)))
1411 return 2;
1412 if ((size & 3) != 0)
1413 return 2;
1414
1415 /* Check that arrays are addressible. */
1416 for (i = 0; i < szW; i++) {
sewardjaf48a602003-07-06 00:54:47 +00001417 dataP = &data[i];
1418 vbitsP = &vbits[i];
sewardjee070842003-07-05 17:53:55 +00001419 if (get_abits4_ALIGNED((Addr)dataP) != VGM_NIBBLE_VALID) {
1420 addressibleD = False;
1421 break;
1422 }
1423 if (get_abits4_ALIGNED((Addr)vbitsP) != VGM_NIBBLE_VALID) {
1424 addressibleV = False;
1425 break;
1426 }
1427 }
1428 if (!addressibleD) {
njn72718642003-07-24 08:45:32 +00001429 MAC_(record_address_error)( tid, (Addr)dataP, 4,
sewardjee070842003-07-05 17:53:55 +00001430 setting ? True : False );
1431 return 3;
1432 }
1433 if (!addressibleV) {
njn72718642003-07-24 08:45:32 +00001434 MAC_(record_address_error)( tid, (Addr)vbitsP, 4,
sewardjee070842003-07-05 17:53:55 +00001435 setting ? False : True );
1436 return 3;
1437 }
1438
1439 /* Do the copy */
1440 if (setting) {
1441 /* setting */
1442 for (i = 0; i < szW; i++) {
1443 if (get_vbytes4_ALIGNED( (Addr)&vbits[i] ) != VGM_WORD_VALID)
njn72718642003-07-24 08:45:32 +00001444 MC_(record_value_error)(tid, 4);
sewardjee070842003-07-05 17:53:55 +00001445 set_vbytes4_ALIGNED( (Addr)&data[i], vbits[i] );
1446 }
1447 } else {
1448 /* getting */
1449 for (i = 0; i < szW; i++) {
1450 vbits[i] = get_vbytes4_ALIGNED( (Addr)&data[i] );
1451 set_vbytes4_ALIGNED( (Addr)&vbits[i], VGM_WORD_VALID );
1452 }
1453 }
1454
1455 return 1;
1456}
1457
1458
1459/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001460/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1461/*------------------------------------------------------------*/
1462
sewardja4495682002-10-21 07:29:59 +00001463/* For the memory leak detector, say whether an entire 64k chunk of
1464 address space is possibly in use, or not. If in doubt return
1465 True.
njn25e49d8e72002-09-23 09:36:25 +00001466*/
sewardja4495682002-10-21 07:29:59 +00001467static
1468Bool mc_is_valid_64k_chunk ( UInt chunk_number )
njn25e49d8e72002-09-23 09:36:25 +00001469{
njnca82cc02004-11-22 17:18:48 +00001470 tl_assert(chunk_number >= 0 && chunk_number < 65536);
sewardja4495682002-10-21 07:29:59 +00001471 if (IS_DISTINGUISHED_SM(primary_map[chunk_number])) {
1472 /* Definitely not in use. */
1473 return False;
1474 } else {
1475 return True;
njn25e49d8e72002-09-23 09:36:25 +00001476 }
1477}
1478
1479
sewardja4495682002-10-21 07:29:59 +00001480/* For the memory leak detector, say whether or not a given word
1481 address is to be regarded as valid. */
1482static
1483Bool mc_is_valid_address ( Addr a )
1484{
1485 UInt vbytes;
1486 UChar abits;
njnca82cc02004-11-22 17:18:48 +00001487 tl_assert(IS_ALIGNED4_ADDR(a));
sewardja4495682002-10-21 07:29:59 +00001488 abits = get_abits4_ALIGNED(a);
1489 vbytes = get_vbytes4_ALIGNED(a);
1490 if (abits == VGM_NIBBLE_VALID && vbytes == VGM_WORD_VALID) {
1491 return True;
1492 } else {
1493 return False;
1494 }
1495}
1496
1497
nethercote996901a2004-08-03 13:29:09 +00001498/* Leak detector for this tool. We don't actually do anything, merely
sewardja4495682002-10-21 07:29:59 +00001499 run the generic leak detector with suitable parameters for this
nethercote996901a2004-08-03 13:29:09 +00001500 tool. */
nethercote8b76fe52004-11-08 19:20:09 +00001501static void mc_detect_memory_leaks ( void )
njn25e49d8e72002-09-23 09:36:25 +00001502{
njn43c799e2003-04-08 00:08:52 +00001503 MAC_(do_detect_memory_leaks) ( mc_is_valid_64k_chunk, mc_is_valid_address );
njn25e49d8e72002-09-23 09:36:25 +00001504}
1505
1506
1507/* ---------------------------------------------------------------------
1508 Sanity check machinery (permanently engaged).
1509 ------------------------------------------------------------------ */
1510
njn26f02512004-11-22 18:33:15 +00001511Bool TL_(cheap_sanity_check) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001512{
jseward9800fd32004-01-04 23:08:04 +00001513 /* nothing useful we can rapidly check */
1514 return True;
njn25e49d8e72002-09-23 09:36:25 +00001515}
1516
njn26f02512004-11-22 18:33:15 +00001517Bool TL_(expensive_sanity_check) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001518{
1519 Int i;
1520
1521 /* Make sure nobody changed the distinguished secondary. */
1522 for (i = 0; i < 8192; i++)
1523 if (distinguished_secondary_map.abits[i] != VGM_BYTE_INVALID)
1524 return False;
1525
1526 for (i = 0; i < 65536; i++)
1527 if (distinguished_secondary_map.vbyte[i] != VGM_BYTE_INVALID)
1528 return False;
1529
1530 /* Make sure that the upper 3/4 of the primary map hasn't
1531 been messed with. */
1532 for (i = 65536; i < 262144; i++)
1533 if (primary_map[i] != & distinguished_secondary_map)
1534 return False;
1535
1536 return True;
1537}
1538
1539/* ---------------------------------------------------------------------
1540 Debugging machinery (turn on to debug). Something of a mess.
1541 ------------------------------------------------------------------ */
1542
1543#if 0
1544/* Print the value tags on the 8 integer registers & flag reg. */
1545
1546static void uint_to_bits ( UInt x, Char* str )
1547{
1548 Int i;
1549 Int w = 0;
1550 /* str must point to a space of at least 36 bytes. */
1551 for (i = 31; i >= 0; i--) {
1552 str[w++] = (x & ( ((UInt)1) << i)) ? '1' : '0';
1553 if (i == 24 || i == 16 || i == 8)
1554 str[w++] = ' ';
1555 }
1556 str[w++] = 0;
njnca82cc02004-11-22 17:18:48 +00001557 tl_assert(w == 36);
njn25e49d8e72002-09-23 09:36:25 +00001558}
1559
1560/* Caution! Not vthread-safe; looks in VG_(baseBlock), not the thread
1561 state table. */
1562
1563static void vg_show_reg_tags ( void )
1564{
1565 Char buf1[36];
1566 Char buf2[36];
1567 UInt z_eax, z_ebx, z_ecx, z_edx,
1568 z_esi, z_edi, z_ebp, z_esp, z_eflags;
1569
1570 z_eax = VG_(baseBlock)[VGOFF_(sh_eax)];
1571 z_ebx = VG_(baseBlock)[VGOFF_(sh_ebx)];
1572 z_ecx = VG_(baseBlock)[VGOFF_(sh_ecx)];
1573 z_edx = VG_(baseBlock)[VGOFF_(sh_edx)];
1574 z_esi = VG_(baseBlock)[VGOFF_(sh_esi)];
1575 z_edi = VG_(baseBlock)[VGOFF_(sh_edi)];
1576 z_ebp = VG_(baseBlock)[VGOFF_(sh_ebp)];
1577 z_esp = VG_(baseBlock)[VGOFF_(sh_esp)];
1578 z_eflags = VG_(baseBlock)[VGOFF_(sh_eflags)];
1579
1580 uint_to_bits(z_eflags, buf1);
njn9b6d34e2002-10-15 08:48:08 +00001581 VG_(message)(Vg_DebugMsg, "efl %s\n", buf1);
njn25e49d8e72002-09-23 09:36:25 +00001582
1583 uint_to_bits(z_eax, buf1);
1584 uint_to_bits(z_ebx, buf2);
1585 VG_(message)(Vg_DebugMsg, "eax %s ebx %s\n", buf1, buf2);
1586
1587 uint_to_bits(z_ecx, buf1);
1588 uint_to_bits(z_edx, buf2);
1589 VG_(message)(Vg_DebugMsg, "ecx %s edx %s\n", buf1, buf2);
1590
1591 uint_to_bits(z_esi, buf1);
1592 uint_to_bits(z_edi, buf2);
1593 VG_(message)(Vg_DebugMsg, "esi %s edi %s\n", buf1, buf2);
1594
1595 uint_to_bits(z_ebp, buf1);
1596 uint_to_bits(z_esp, buf2);
1597 VG_(message)(Vg_DebugMsg, "ebp %s esp %s\n", buf1, buf2);
1598}
1599
1600
1601/* For debugging only. Scan the address space and touch all allegedly
1602 addressible words. Useful for establishing where Valgrind's idea of
1603 addressibility has diverged from what the kernel believes. */
1604
1605static
1606void zzzmemscan_notify_word ( Addr a, UInt w )
1607{
1608}
1609
1610void zzzmemscan ( void )
1611{
1612 Int n_notifies
1613 = VG_(scan_all_valid_memory)( zzzmemscan_notify_word );
1614 VG_(printf)("zzzmemscan: n_bytes = %d\n", 4 * n_notifies );
1615}
1616#endif
1617
1618
1619
1620
1621#if 0
1622static Int zzz = 0;
1623
1624void show_bb ( Addr eip_next )
1625{
1626 VG_(printf)("[%4d] ", zzz);
1627 vg_show_reg_tags( &VG_(m_shadow );
1628 VG_(translate) ( eip_next, NULL, NULL, NULL );
1629}
1630#endif /* 0 */
1631
njn25e49d8e72002-09-23 09:36:25 +00001632
1633/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00001634/*--- Command line args ---*/
njn25e49d8e72002-09-23 09:36:25 +00001635/*------------------------------------------------------------*/
1636
njn43c799e2003-04-08 00:08:52 +00001637Bool MC_(clo_avoid_strlen_errors) = True;
1638Bool MC_(clo_cleanup) = True;
1639
njn26f02512004-11-22 18:33:15 +00001640Bool TL_(process_cmd_line_option)(Char* arg)
njn25e49d8e72002-09-23 09:36:25 +00001641{
nethercote27fec902004-06-16 21:26:32 +00001642 VG_BOOL_CLO("--avoid-strlen-errors", MC_(clo_avoid_strlen_errors))
1643 else VG_BOOL_CLO("--cleanup", MC_(clo_cleanup))
njn25e49d8e72002-09-23 09:36:25 +00001644 else
njn43c799e2003-04-08 00:08:52 +00001645 return MAC_(process_common_cmd_line_option)(arg);
njn25e49d8e72002-09-23 09:36:25 +00001646
1647 return True;
njn25e49d8e72002-09-23 09:36:25 +00001648}
1649
njn26f02512004-11-22 18:33:15 +00001650void TL_(print_usage)(void)
njn25e49d8e72002-09-23 09:36:25 +00001651{
njn3e884182003-04-15 13:03:23 +00001652 MAC_(print_common_usage)();
1653 VG_(printf)(
1654" --avoid-strlen-errors=no|yes suppress errs from inlined strlen [yes]\n"
1655 );
1656}
1657
njn26f02512004-11-22 18:33:15 +00001658void TL_(print_debug_usage)(void)
njn3e884182003-04-15 13:03:23 +00001659{
1660 MAC_(print_common_debug_usage)();
1661 VG_(printf)(
sewardj8ec2cfc2002-10-13 00:57:26 +00001662" --cleanup=no|yes improve after instrumentation? [yes]\n"
njn3e884182003-04-15 13:03:23 +00001663 );
njn25e49d8e72002-09-23 09:36:25 +00001664}
1665
nethercote8b76fe52004-11-08 19:20:09 +00001666/*------------------------------------------------------------*/
1667/*--- Client requests ---*/
1668/*------------------------------------------------------------*/
1669
1670/* Client block management:
1671
1672 This is managed as an expanding array of client block descriptors.
1673 Indices of live descriptors are issued to the client, so it can ask
1674 to free them later. Therefore we cannot slide live entries down
1675 over dead ones. Instead we must use free/inuse flags and scan for
1676 an empty slot at allocation time. This in turn means allocation is
1677 relatively expensive, so we hope this does not happen too often.
1678*/
1679
1680typedef
1681 enum { CG_NotInUse, CG_NoAccess, CG_Writable, CG_Readable }
1682 CGenBlockKind;
1683
1684typedef
1685 struct {
1686 Addr start;
1687 SizeT size;
1688 ExeContext* where;
1689 CGenBlockKind kind;
1690 }
1691 CGenBlock;
1692
1693/* This subsystem is self-initialising. */
1694static UInt vg_cgb_size = 0;
1695static UInt vg_cgb_used = 0;
1696static CGenBlock* vg_cgbs = NULL;
1697
1698/* Stats for this subsystem. */
1699static UInt vg_cgb_used_MAX = 0; /* Max in use. */
1700static UInt vg_cgb_allocs = 0; /* Number of allocs. */
1701static UInt vg_cgb_discards = 0; /* Number of discards. */
1702static UInt vg_cgb_search = 0; /* Number of searches. */
1703
1704
1705static
1706Int vg_alloc_client_block ( void )
1707{
1708 UInt i, sz_new;
1709 CGenBlock* cgbs_new;
1710
1711 vg_cgb_allocs++;
1712
1713 for (i = 0; i < vg_cgb_used; i++) {
1714 vg_cgb_search++;
1715 if (vg_cgbs[i].kind == CG_NotInUse)
1716 return i;
1717 }
1718
1719 /* Not found. Try to allocate one at the end. */
1720 if (vg_cgb_used < vg_cgb_size) {
1721 vg_cgb_used++;
1722 return vg_cgb_used-1;
1723 }
1724
1725 /* Ok, we have to allocate a new one. */
njnca82cc02004-11-22 17:18:48 +00001726 tl_assert(vg_cgb_used == vg_cgb_size);
nethercote8b76fe52004-11-08 19:20:09 +00001727 sz_new = (vg_cgbs == NULL) ? 10 : (2 * vg_cgb_size);
1728
1729 cgbs_new = VG_(malloc)( sz_new * sizeof(CGenBlock) );
1730 for (i = 0; i < vg_cgb_used; i++)
1731 cgbs_new[i] = vg_cgbs[i];
1732
1733 if (vg_cgbs != NULL)
1734 VG_(free)( vg_cgbs );
1735 vg_cgbs = cgbs_new;
1736
1737 vg_cgb_size = sz_new;
1738 vg_cgb_used++;
1739 if (vg_cgb_used > vg_cgb_used_MAX)
1740 vg_cgb_used_MAX = vg_cgb_used;
1741 return vg_cgb_used-1;
1742}
1743
1744
1745static void show_client_block_stats ( void )
1746{
1747 VG_(message)(Vg_DebugMsg,
1748 "general CBs: %d allocs, %d discards, %d maxinuse, %d search",
1749 vg_cgb_allocs, vg_cgb_discards, vg_cgb_used_MAX, vg_cgb_search
1750 );
1751}
1752
1753static Bool find_addr(VgHashNode* sh_ch, void* ap)
1754{
1755 MAC_Chunk *m = (MAC_Chunk*)sh_ch;
1756 Addr a = *(Addr*)ap;
1757
1758 return VG_(addr_is_in_block)(a, m->data, m->size);
1759}
1760
1761static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai )
1762{
1763 UInt i;
1764 /* VG_(printf)("try to identify %d\n", a); */
1765
1766 /* Perhaps it's a general block ? */
1767 for (i = 0; i < vg_cgb_used; i++) {
1768 if (vg_cgbs[i].kind == CG_NotInUse)
1769 continue;
1770 if (VG_(addr_is_in_block)(a, vg_cgbs[i].start, vg_cgbs[i].size)) {
1771 MAC_Mempool **d, *mp;
1772
1773 /* OK - maybe it's a mempool, too? */
1774 mp = (MAC_Mempool*)VG_(HT_get_node)(MAC_(mempool_list),
1775 (UWord)vg_cgbs[i].start,
1776 (void*)&d);
1777 if(mp != NULL) {
1778 if(mp->chunks != NULL) {
1779 MAC_Chunk *mc;
1780
1781 mc = (MAC_Chunk*)VG_(HT_first_match)(mp->chunks, find_addr, &a);
1782 if(mc != NULL) {
1783 ai->akind = UserG;
1784 ai->blksize = mc->size;
1785 ai->rwoffset = (Int)(a) - (Int)mc->data;
1786 ai->lastchange = mc->where;
1787 return True;
1788 }
1789 }
1790 ai->akind = Mempool;
1791 ai->blksize = vg_cgbs[i].size;
1792 ai->rwoffset = (Int)(a) - (Int)(vg_cgbs[i].start);
1793 ai->lastchange = vg_cgbs[i].where;
1794 return True;
1795 }
1796 ai->akind = UserG;
1797 ai->blksize = vg_cgbs[i].size;
1798 ai->rwoffset = (Int)(a) - (Int)(vg_cgbs[i].start);
1799 ai->lastchange = vg_cgbs[i].where;
1800 return True;
1801 }
1802 }
1803 return False;
1804}
1805
njn26f02512004-11-22 18:33:15 +00001806Bool TL_(handle_client_request) ( ThreadId tid, UWord* arg, UWord* ret )
nethercote8b76fe52004-11-08 19:20:09 +00001807{
1808 Int i;
1809 Bool ok;
1810 Addr bad_addr;
1811
njnfc26ff92004-11-22 19:12:49 +00001812 if (!VG_IS_TOOL_USERREQ('M','C',arg[0])
nethercote8b76fe52004-11-08 19:20:09 +00001813 && VG_USERREQ__MALLOCLIKE_BLOCK != arg[0]
1814 && VG_USERREQ__FREELIKE_BLOCK != arg[0]
1815 && VG_USERREQ__CREATE_MEMPOOL != arg[0]
1816 && VG_USERREQ__DESTROY_MEMPOOL != arg[0]
1817 && VG_USERREQ__MEMPOOL_ALLOC != arg[0]
1818 && VG_USERREQ__MEMPOOL_FREE != arg[0])
1819 return False;
1820
1821 switch (arg[0]) {
1822 case VG_USERREQ__CHECK_WRITABLE: /* check writable */
1823 ok = mc_check_writable ( arg[1], arg[2], &bad_addr );
1824 if (!ok)
1825 MC_(record_user_error) ( tid, bad_addr, /*isWrite*/True,
1826 /*isUnaddr*/True );
1827 *ret = ok ? (UWord)NULL : bad_addr;
1828 break;
1829
1830 case VG_USERREQ__CHECK_READABLE: { /* check readable */
1831 MC_ReadResult res;
1832 res = mc_check_readable ( arg[1], arg[2], &bad_addr );
1833 if (MC_AddrErr == res)
1834 MC_(record_user_error) ( tid, bad_addr, /*isWrite*/False,
1835 /*isUnaddr*/True );
1836 else if (MC_ValueErr == res)
1837 MC_(record_user_error) ( tid, bad_addr, /*isWrite*/False,
1838 /*isUnaddr*/False );
1839 *ret = ( res==MC_Ok ? (UWord)NULL : bad_addr );
1840 break;
1841 }
1842
1843 case VG_USERREQ__DO_LEAK_CHECK:
1844 mc_detect_memory_leaks();
1845 *ret = 0; /* return value is meaningless */
1846 break;
1847
1848 case VG_USERREQ__MAKE_NOACCESS: /* make no access */
1849 i = vg_alloc_client_block();
1850 /* VG_(printf)("allocated %d %p\n", i, vg_cgbs); */
1851 vg_cgbs[i].kind = CG_NoAccess;
1852 vg_cgbs[i].start = arg[1];
1853 vg_cgbs[i].size = arg[2];
1854 vg_cgbs[i].where = VG_(get_ExeContext) ( tid );
1855 mc_make_noaccess ( arg[1], arg[2] );
1856 *ret = i;
1857 break;
1858
1859 case VG_USERREQ__MAKE_WRITABLE: /* make writable */
1860 i = vg_alloc_client_block();
1861 vg_cgbs[i].kind = CG_Writable;
1862 vg_cgbs[i].start = arg[1];
1863 vg_cgbs[i].size = arg[2];
1864 vg_cgbs[i].where = VG_(get_ExeContext) ( tid );
1865 mc_make_writable ( arg[1], arg[2] );
1866 *ret = i;
1867 break;
1868
1869 case VG_USERREQ__MAKE_READABLE: /* make readable */
1870 i = vg_alloc_client_block();
1871 vg_cgbs[i].kind = CG_Readable;
1872 vg_cgbs[i].start = arg[1];
1873 vg_cgbs[i].size = arg[2];
1874 vg_cgbs[i].where = VG_(get_ExeContext) ( tid );
1875 mc_make_readable ( arg[1], arg[2] );
1876 *ret = i;
1877 break;
1878
1879 case VG_USERREQ__DISCARD: /* discard */
1880 if (vg_cgbs == NULL
1881 || arg[2] >= vg_cgb_used || vg_cgbs[arg[2]].kind == CG_NotInUse)
1882 return 1;
njnca82cc02004-11-22 17:18:48 +00001883 tl_assert(arg[2] >= 0 && arg[2] < vg_cgb_used);
nethercote8b76fe52004-11-08 19:20:09 +00001884 vg_cgbs[arg[2]].kind = CG_NotInUse;
1885 vg_cgb_discards++;
1886 *ret = 0;
1887 break;
1888
1889 case VG_USERREQ__GET_VBITS:
1890 /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
1891 error. */
1892 /* VG_(printf)("get_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
1893 *ret = mc_get_or_set_vbits_for_client
1894 ( tid, arg[1], arg[2], arg[3], False /* get them */ );
1895 break;
1896
1897 case VG_USERREQ__SET_VBITS:
1898 /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
1899 error. */
1900 /* VG_(printf)("set_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
1901 *ret = mc_get_or_set_vbits_for_client
1902 ( tid, arg[1], arg[2], arg[3], True /* set them */ );
1903 break;
1904
1905 default:
1906 if (MAC_(handle_common_client_requests)(tid, arg, ret )) {
1907 return True;
1908 } else {
1909 VG_(message)(Vg_UserMsg,
1910 "Warning: unknown memcheck client request code %llx",
1911 (ULong)arg[0]);
1912 return False;
1913 }
1914 }
1915 return True;
1916}
njn25e49d8e72002-09-23 09:36:25 +00001917
1918/*------------------------------------------------------------*/
1919/*--- Setup ---*/
1920/*------------------------------------------------------------*/
1921
njn26f02512004-11-22 18:33:15 +00001922void TL_(pre_clo_init)(void)
njn25e49d8e72002-09-23 09:36:25 +00001923{
njn810086f2002-11-14 12:42:47 +00001924 VG_(details_name) ("Memcheck");
1925 VG_(details_version) (NULL);
nethercote262eedf2003-11-13 17:57:18 +00001926 VG_(details_description) ("a memory error detector");
njn810086f2002-11-14 12:42:47 +00001927 VG_(details_copyright_author)(
nethercote08fa9a72004-07-16 17:44:00 +00001928 "Copyright (C) 2002-2004, and GNU GPL'd, by Julian Seward et al.");
nethercote421281e2003-11-20 16:20:55 +00001929 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj78210aa2002-12-01 02:55:46 +00001930 VG_(details_avg_translation_sizeB) ( 228 );
njn25e49d8e72002-09-23 09:36:25 +00001931
njn810086f2002-11-14 12:42:47 +00001932 VG_(needs_core_errors) ();
njn95ec8702004-11-22 16:46:13 +00001933 VG_(needs_tool_errors) ();
njn810086f2002-11-14 12:42:47 +00001934 VG_(needs_libc_freeres) ();
njn810086f2002-11-14 12:42:47 +00001935 VG_(needs_shadow_regs) ();
1936 VG_(needs_command_line_options)();
1937 VG_(needs_client_requests) ();
1938 VG_(needs_extended_UCode) ();
1939 VG_(needs_syscall_wrapper) ();
njn810086f2002-11-14 12:42:47 +00001940 VG_(needs_sanity_checks) ();
fitzhardinge98abfc72003-12-16 02:05:15 +00001941 VG_(needs_shadow_memory) ();
njn25e49d8e72002-09-23 09:36:25 +00001942
njn3e884182003-04-15 13:03:23 +00001943 MAC_( new_mem_heap) = & mc_new_mem_heap;
nethercote8b76fe52004-11-08 19:20:09 +00001944 MAC_( ban_mem_heap) = & mc_make_noaccess;
njn3e884182003-04-15 13:03:23 +00001945 MAC_(copy_mem_heap) = & mc_copy_address_range_state;
nethercote8b76fe52004-11-08 19:20:09 +00001946 MAC_( die_mem_heap) = & mc_make_noaccess;
1947 MAC_(check_noaccess) = & mc_check_noaccess;
njn3e884182003-04-15 13:03:23 +00001948
fitzhardinge98abfc72003-12-16 02:05:15 +00001949 VG_(init_new_mem_startup) ( & mc_new_mem_startup );
nethercote8b76fe52004-11-08 19:20:09 +00001950 VG_(init_new_mem_stack_signal) ( & mc_make_writable );
1951 VG_(init_new_mem_brk) ( & mc_make_writable );
fitzhardinge98abfc72003-12-16 02:05:15 +00001952 VG_(init_new_mem_mmap) ( & mc_set_perms );
njn25e49d8e72002-09-23 09:36:25 +00001953
fitzhardinge98abfc72003-12-16 02:05:15 +00001954 VG_(init_copy_mem_remap) ( & mc_copy_address_range_state );
1955 VG_(init_change_mem_mprotect) ( & mc_set_perms );
njn3e884182003-04-15 13:03:23 +00001956
nethercote8b76fe52004-11-08 19:20:09 +00001957 VG_(init_die_mem_stack_signal) ( & mc_make_noaccess );
1958 VG_(init_die_mem_brk) ( & mc_make_noaccess );
1959 VG_(init_die_mem_munmap) ( & mc_make_noaccess );
njn3e884182003-04-15 13:03:23 +00001960
fitzhardinge98abfc72003-12-16 02:05:15 +00001961 VG_(init_new_mem_stack_4) ( & MAC_(new_mem_stack_4) );
1962 VG_(init_new_mem_stack_8) ( & MAC_(new_mem_stack_8) );
1963 VG_(init_new_mem_stack_12) ( & MAC_(new_mem_stack_12) );
1964 VG_(init_new_mem_stack_16) ( & MAC_(new_mem_stack_16) );
1965 VG_(init_new_mem_stack_32) ( & MAC_(new_mem_stack_32) );
1966 VG_(init_new_mem_stack) ( & MAC_(new_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00001967
fitzhardinge98abfc72003-12-16 02:05:15 +00001968 VG_(init_die_mem_stack_4) ( & MAC_(die_mem_stack_4) );
1969 VG_(init_die_mem_stack_8) ( & MAC_(die_mem_stack_8) );
1970 VG_(init_die_mem_stack_12) ( & MAC_(die_mem_stack_12) );
1971 VG_(init_die_mem_stack_16) ( & MAC_(die_mem_stack_16) );
1972 VG_(init_die_mem_stack_32) ( & MAC_(die_mem_stack_32) );
1973 VG_(init_die_mem_stack) ( & MAC_(die_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00001974
nethercote8b76fe52004-11-08 19:20:09 +00001975 VG_(init_ban_mem_stack) ( & mc_make_noaccess );
njn25e49d8e72002-09-23 09:36:25 +00001976
fitzhardinge98abfc72003-12-16 02:05:15 +00001977 VG_(init_pre_mem_read) ( & mc_check_is_readable );
1978 VG_(init_pre_mem_read_asciiz) ( & mc_check_is_readable_asciiz );
1979 VG_(init_pre_mem_write) ( & mc_check_is_writable );
nethercote8b76fe52004-11-08 19:20:09 +00001980 VG_(init_post_mem_write) ( & mc_make_readable );
1981
1982 VG_(init_pre_reg_read) ( & mc_pre_reg_read );
njn25e49d8e72002-09-23 09:36:25 +00001983
fitzhardinge98abfc72003-12-16 02:05:15 +00001984 VG_(init_post_regs_write_init) ( & mc_post_regs_write_init );
1985 VG_(init_post_reg_write_syscall_return) ( & mc_post_reg_write );
1986 VG_(init_post_reg_write_deliver_signal) ( & mc_post_reg_write );
1987 VG_(init_post_reg_write_pthread_return) ( & mc_post_reg_write );
1988 VG_(init_post_reg_write_clientreq_return) ( & mc_post_reg_write );
1989 VG_(init_post_reg_write_clientcall_return) ( & mc_post_reg_write_clientcall );
njnd3040452003-05-19 15:04:06 +00001990
njn9b007f62003-04-07 14:40:25 +00001991 /* Three compact slots taken up by stack memory helpers */
njn5c004e42002-11-18 11:04:50 +00001992 VG_(register_compact_helper)((Addr) & MC_(helper_value_check4_fail));
1993 VG_(register_compact_helper)((Addr) & MC_(helper_value_check0_fail));
1994 VG_(register_compact_helper)((Addr) & MC_(helper_value_check2_fail));
1995 VG_(register_compact_helper)((Addr) & MC_(helperc_STOREV4));
njn5c004e42002-11-18 11:04:50 +00001996 VG_(register_compact_helper)((Addr) & MC_(helperc_LOADV4));
njn25e49d8e72002-09-23 09:36:25 +00001997
njnd04b7c62002-10-03 14:05:52 +00001998 /* These two made non-compact because 2-byte transactions are rare. */
njn5c004e42002-11-18 11:04:50 +00001999 VG_(register_noncompact_helper)((Addr) & MC_(helperc_STOREV2));
njn9b007f62003-04-07 14:40:25 +00002000 VG_(register_noncompact_helper)((Addr) & MC_(helperc_STOREV1));
njn5c004e42002-11-18 11:04:50 +00002001 VG_(register_noncompact_helper)((Addr) & MC_(helperc_LOADV2));
njn9b007f62003-04-07 14:40:25 +00002002 VG_(register_noncompact_helper)((Addr) & MC_(helperc_LOADV1));
njn5c004e42002-11-18 11:04:50 +00002003 VG_(register_noncompact_helper)((Addr) & MC_(fpu_write_check));
2004 VG_(register_noncompact_helper)((Addr) & MC_(fpu_read_check));
2005 VG_(register_noncompact_helper)((Addr) & MC_(helper_value_check1_fail));
njn25e49d8e72002-09-23 09:36:25 +00002006
2007 VGP_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
2008 VGP_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
njn9b007f62003-04-07 14:40:25 +00002009 VGP_(register_profile_event) ( VgpESPAdj, "adjust-ESP" );
njnd04b7c62002-10-03 14:05:52 +00002010
njn43c799e2003-04-08 00:08:52 +00002011 /* Additional block description for VG_(describe_addr)() */
nethercote8b76fe52004-11-08 19:20:09 +00002012 MAC_(describe_addr_supp) = client_perm_maybe_describe;
njn43c799e2003-04-08 00:08:52 +00002013
njnd04b7c62002-10-03 14:05:52 +00002014 init_shadow_memory();
njn3e884182003-04-15 13:03:23 +00002015 MAC_(common_pre_clo_init)();
njn5c004e42002-11-18 11:04:50 +00002016}
2017
njn26f02512004-11-22 18:33:15 +00002018void TL_(post_clo_init) ( void )
njn5c004e42002-11-18 11:04:50 +00002019{
2020}
2021
njn26f02512004-11-22 18:33:15 +00002022void TL_(fini) ( Int exitcode )
njn5c004e42002-11-18 11:04:50 +00002023{
nethercote8b76fe52004-11-08 19:20:09 +00002024 MAC_(common_fini)( mc_detect_memory_leaks );
njn3e884182003-04-15 13:03:23 +00002025
njn5c004e42002-11-18 11:04:50 +00002026 if (0) {
2027 VG_(message)(Vg_DebugMsg,
2028 "------ Valgrind's client block stats follow ---------------" );
nethercote8b76fe52004-11-08 19:20:09 +00002029 show_client_block_stats();
njn5c004e42002-11-18 11:04:50 +00002030 }
njn25e49d8e72002-09-23 09:36:25 +00002031}
2032
njn26f02512004-11-22 18:33:15 +00002033VG_DETERMINE_INTERFACE_VERSION(TL_(pre_clo_init), 9./8)
fitzhardinge98abfc72003-12-16 02:05:15 +00002034
njn25e49d8e72002-09-23 09:36:25 +00002035/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00002036/*--- end mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00002037/*--------------------------------------------------------------------*/