blob: 39f3c01ba378cb16de5808decc141d4235ddd8e3 [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
nethercote137bc552003-11-14 17:47:54 +00009 This file is part of MemCheck, a heavyweight Valgrind tool for
njnc9539842002-10-02 13:26:35 +000010 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
njn53612422005-03-12 16:22:54 +000012 Copyright (C) 2000-2005 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
sewardjc859fbf2005-04-22 21:10:28 +000033/* TODO 22 Apr 05
sewardj45d94cc2005-04-20 14:44:11 +000034
sewardjc859fbf2005-04-22 21:10:28 +000035 test whether it would be faster, for LOADV4, to check
36 only for 8-byte validity on the fast path
sewardj45d94cc2005-04-20 14:44:11 +000037*/
38
njnc7561b92005-06-19 01:24:32 +000039#include "pub_tool_basics.h"
njn4802b382005-06-11 04:58:29 +000040#include "pub_tool_aspacemgr.h"
njnc7561b92005-06-19 01:24:32 +000041#include "pub_tool_errormgr.h" // For mac_shared.h
42#include "pub_tool_execontext.h" // For mac_shared.h
43#include "pub_tool_hashtable.h" // For mac_shared.h
njn97405b22005-06-02 03:39:33 +000044#include "pub_tool_libcbase.h"
njn132bfcc2005-06-04 19:16:06 +000045#include "pub_tool_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000046#include "pub_tool_libcprint.h"
njnf536bbb2005-06-13 04:21:38 +000047#include "pub_tool_machine.h"
njnc7561b92005-06-19 01:24:32 +000048#include "pub_tool_mallocfree.h"
49#include "pub_tool_options.h"
njnc7561b92005-06-19 01:24:32 +000050#include "pub_tool_replacemalloc.h"
51#include "pub_tool_tooliface.h"
52#include "pub_tool_threadstate.h"
53
54#include "mc_include.h"
55#include "memcheck.h" /* for client requests */
njn25e49d8e72002-09-23 09:36:25 +000056
sewardj45d94cc2005-04-20 14:44:11 +000057
tomd55121e2005-12-19 12:40:13 +000058#ifdef HAVE_BUILTIN_EXPECT
sewardjc1a2cda2005-04-21 17:34:00 +000059#define EXPECTED_TAKEN(cond) __builtin_expect((cond),1)
60#define EXPECTED_NOT_TAKEN(cond) __builtin_expect((cond),0)
tomd55121e2005-12-19 12:40:13 +000061#else
62#define EXPECTED_TAKEN(cond) (cond)
63#define EXPECTED_NOT_TAKEN(cond) (cond)
64#endif
sewardjc1a2cda2005-04-21 17:34:00 +000065
66/* Define to debug the mem audit system. Set to:
67 0 no debugging, fast cases are used
68 1 some sanity checking, fast cases are used
69 2 max sanity checking, only slow cases are used
70*/
sewardj23eb2fd2005-04-22 16:29:19 +000071#define VG_DEBUG_MEMORY 0
sewardjc1a2cda2005-04-21 17:34:00 +000072
njn25e49d8e72002-09-23 09:36:25 +000073#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
74
njn25e49d8e72002-09-23 09:36:25 +000075
njn25e49d8e72002-09-23 09:36:25 +000076/*------------------------------------------------------------*/
sewardj45d94cc2005-04-20 14:44:11 +000077/*--- Basic A/V bitmap representation. ---*/
njn25e49d8e72002-09-23 09:36:25 +000078/*------------------------------------------------------------*/
79
sewardjc859fbf2005-04-22 21:10:28 +000080/* TODO: fix this comment */
81//zz /* All reads and writes are checked against a memory map, which
82//zz records the state of all memory in the process. The memory map is
83//zz organised like this:
84//zz
85//zz The top 16 bits of an address are used to index into a top-level
86//zz map table, containing 65536 entries. Each entry is a pointer to a
87//zz second-level map, which records the accesibililty and validity
88//zz permissions for the 65536 bytes indexed by the lower 16 bits of the
89//zz address. Each byte is represented by nine bits, one indicating
90//zz accessibility, the other eight validity. So each second-level map
91//zz contains 73728 bytes. This two-level arrangement conveniently
92//zz divides the 4G address space into 64k lumps, each size 64k bytes.
93//zz
94//zz All entries in the primary (top-level) map must point to a valid
95//zz secondary (second-level) map. Since most of the 4G of address
96//zz space will not be in use -- ie, not mapped at all -- there is a
njn02bc4b82005-05-15 17:28:26 +000097//zz distinguished secondary map, which indicates 'not addressible and
sewardjc859fbf2005-04-22 21:10:28 +000098//zz not valid' writeable for all bytes. Entries in the primary map for
99//zz which the entire 64k is not in use at all point at this
100//zz distinguished map.
101//zz
102//zz There are actually 4 distinguished secondaries. These are used to
103//zz represent a memory range which is either not addressable (validity
104//zz doesn't matter), addressable+not valid, addressable+valid.
sewardjc859fbf2005-04-22 21:10:28 +0000105//zz */
106
sewardj45d94cc2005-04-20 14:44:11 +0000107/* --------------- Basic configuration --------------- */
sewardj95448072004-11-22 20:19:51 +0000108
sewardj23eb2fd2005-04-22 16:29:19 +0000109/* Only change this. N_PRIMARY_MAP *must* be a power of 2. */
sewardj21f7ff42005-04-28 10:32:02 +0000110
sewardje4ccc012005-05-02 12:53:38 +0000111#if VG_WORDSIZE == 4
sewardj21f7ff42005-04-28 10:32:02 +0000112
113/* cover the entire address space */
114# define N_PRIMARY_BITS 16
115
116#else
117
sewardj34483bc2005-09-28 11:50:20 +0000118/* Just handle the first 32G fast and the rest via auxiliary
sewardj21f7ff42005-04-28 10:32:02 +0000119 primaries. */
sewardj34483bc2005-09-28 11:50:20 +0000120# define N_PRIMARY_BITS 19
sewardj21f7ff42005-04-28 10:32:02 +0000121
122#endif
123
sewardj45d94cc2005-04-20 14:44:11 +0000124
sewardjc1a2cda2005-04-21 17:34:00 +0000125/* Do not change this. */
sewardje4ccc012005-05-02 12:53:38 +0000126#define N_PRIMARY_MAP ( ((UWord)1) << N_PRIMARY_BITS)
sewardjc1a2cda2005-04-21 17:34:00 +0000127
128/* Do not change this. */
sewardj23eb2fd2005-04-22 16:29:19 +0000129#define MAX_PRIMARY_ADDRESS (Addr)((((Addr)65536) * N_PRIMARY_MAP)-1)
130
131
132/* --------------- Stats maps --------------- */
133
134static Int n_secmaps_issued = 0;
135static ULong n_auxmap_searches = 0;
136static ULong n_auxmap_cmps = 0;
137static Int n_sanity_cheap = 0;
138static Int n_sanity_expensive = 0;
sewardj45d94cc2005-04-20 14:44:11 +0000139
140
141/* --------------- Secondary maps --------------- */
njn25e49d8e72002-09-23 09:36:25 +0000142
143typedef
144 struct {
sewardj45d94cc2005-04-20 14:44:11 +0000145 UChar abits[8192];
146 UChar vbyte[65536];
njn25e49d8e72002-09-23 09:36:25 +0000147 }
148 SecMap;
149
sewardj45d94cc2005-04-20 14:44:11 +0000150/* 3 distinguished secondary maps, one for no-access, one for
151 accessible but undefined, and one for accessible and defined.
152 Distinguished secondaries may never be modified.
153*/
154#define SM_DIST_NOACCESS 0
155#define SM_DIST_ACCESS_UNDEFINED 1
156#define SM_DIST_ACCESS_DEFINED 2
njnb8dca862005-03-14 02:42:44 +0000157
sewardj45d94cc2005-04-20 14:44:11 +0000158static SecMap sm_distinguished[3];
njnb8dca862005-03-14 02:42:44 +0000159
sewardj45d94cc2005-04-20 14:44:11 +0000160static inline Bool is_distinguished_sm ( SecMap* sm ) {
161 return sm >= &sm_distinguished[0] && sm <= &sm_distinguished[2];
162}
njnb8dca862005-03-14 02:42:44 +0000163
sewardj45d94cc2005-04-20 14:44:11 +0000164/* dist_sm points to one of our three distinguished secondaries. Make
165 a copy of it so that we can write to it.
166*/
167static SecMap* copy_for_writing ( SecMap* dist_sm )
168{
169 SecMap* new_sm;
170 tl_assert(dist_sm == &sm_distinguished[0]
171 || dist_sm == &sm_distinguished[1]
172 || dist_sm == &sm_distinguished[2]);
njnb8dca862005-03-14 02:42:44 +0000173
sewardj45f4e7c2005-09-27 19:20:21 +0000174 new_sm = VG_(am_shadow_alloc)(sizeof(SecMap));
175 if (new_sm == NULL)
176 VG_(out_of_memory_NORETURN)( "memcheck:allocate new SecMap",
177 sizeof(SecMap) );
sewardj45d94cc2005-04-20 14:44:11 +0000178 VG_(memcpy)(new_sm, dist_sm, sizeof(SecMap));
sewardj23eb2fd2005-04-22 16:29:19 +0000179 n_secmaps_issued++;
sewardj45d94cc2005-04-20 14:44:11 +0000180 return new_sm;
181}
njnb8dca862005-03-14 02:42:44 +0000182
sewardj45d94cc2005-04-20 14:44:11 +0000183
184/* --------------- Primary maps --------------- */
185
186/* The main primary map. This covers some initial part of the address
sewardj23eb2fd2005-04-22 16:29:19 +0000187 space, addresses 0 .. (N_PRIMARY_MAP << 16)-1. The rest of it is
sewardj45d94cc2005-04-20 14:44:11 +0000188 handled using the auxiliary primary map.
189*/
sewardj23eb2fd2005-04-22 16:29:19 +0000190static SecMap* primary_map[N_PRIMARY_MAP];
sewardj45d94cc2005-04-20 14:44:11 +0000191
192
193/* An entry in the auxiliary primary map. base must be a 64k-aligned
194 value, and sm points at the relevant secondary map. As with the
195 main primary map, the secondary may be either a real secondary, or
196 one of the three distinguished secondaries.
197*/
198typedef
199 struct {
sewardj23eb2fd2005-04-22 16:29:19 +0000200 Addr base;
sewardj45d94cc2005-04-20 14:44:11 +0000201 SecMap* sm;
202 }
203 AuxMapEnt;
204
205/* An expanding array of AuxMapEnts. */
sewardjaba741d2005-06-09 13:56:07 +0000206#define N_AUXMAPS 20000 /* HACK */
sewardj45d94cc2005-04-20 14:44:11 +0000207static AuxMapEnt hacky_auxmaps[N_AUXMAPS];
208static Int auxmap_size = N_AUXMAPS;
209static Int auxmap_used = 0;
210static AuxMapEnt* auxmap = &hacky_auxmaps[0];
211
sewardj45d94cc2005-04-20 14:44:11 +0000212
213/* Find an entry in the auxiliary map. If an entry is found, move it
214 one step closer to the front of the array, then return its address.
sewardj05fe85e2005-04-27 22:46:36 +0000215 If an entry is not found, return NULL. Note carefully that
sewardj45d94cc2005-04-20 14:44:11 +0000216 because a each call potentially rearranges the entries, each call
217 to this function invalidates ALL AuxMapEnt*s previously obtained by
218 calling this fn.
219*/
sewardj05fe85e2005-04-27 22:46:36 +0000220static AuxMapEnt* maybe_find_in_auxmap ( Addr a )
sewardj45d94cc2005-04-20 14:44:11 +0000221{
222 UWord i;
223 tl_assert(a > MAX_PRIMARY_ADDRESS);
224
225 a &= ~(Addr)0xFFFF;
226
227 /* Search .. */
228 n_auxmap_searches++;
229 for (i = 0; i < auxmap_used; i++) {
230 if (auxmap[i].base == a)
231 break;
232 }
233 n_auxmap_cmps += (ULong)(i+1);
234
235 if (i < auxmap_used) {
236 /* Found it. Nudge it a bit closer to the front. */
237 if (i > 0) {
238 AuxMapEnt tmp = auxmap[i-1];
239 auxmap[i-1] = auxmap[i];
240 auxmap[i] = tmp;
241 i--;
242 }
243 return &auxmap[i];
244 }
245
sewardj05fe85e2005-04-27 22:46:36 +0000246 return NULL;
247}
248
249
250/* Find an entry in the auxiliary map. If an entry is found, move it
251 one step closer to the front of the array, then return its address.
252 If an entry is not found, allocate one. Note carefully that
253 because a each call potentially rearranges the entries, each call
254 to this function invalidates ALL AuxMapEnt*s previously obtained by
255 calling this fn.
256*/
257static AuxMapEnt* find_or_alloc_in_auxmap ( Addr a )
258{
259 AuxMapEnt* am = maybe_find_in_auxmap(a);
260 if (am)
261 return am;
262
sewardj45d94cc2005-04-20 14:44:11 +0000263 /* We didn't find it. Hmm. This is a new piece of address space.
264 We'll need to allocate a new AuxMap entry for it. */
265 if (auxmap_used >= auxmap_size) {
266 tl_assert(auxmap_used == auxmap_size);
267 /* Out of auxmap entries. */
268 tl_assert2(0, "failed to expand the auxmap table");
269 }
270
271 tl_assert(auxmap_used < auxmap_size);
272
273 auxmap[auxmap_used].base = a & ~(Addr)0xFFFF;
274 auxmap[auxmap_used].sm = &sm_distinguished[SM_DIST_NOACCESS];
275
276 if (0)
277 VG_(printf)("new auxmap, base = 0x%llx\n",
278 (ULong)auxmap[auxmap_used].base );
279
280 auxmap_used++;
281 return &auxmap[auxmap_used-1];
282}
283
284
285/* --------------- SecMap fundamentals --------------- */
286
287/* Produce the secmap for 'a', either from the primary map or by
288 ensuring there is an entry for it in the aux primary map. The
289 secmap may be a distinguished one as the caller will only want to
290 be able to read it.
291*/
292static SecMap* get_secmap_readable ( Addr a )
293{
294 if (a <= MAX_PRIMARY_ADDRESS) {
295 UWord pm_off = a >> 16;
296 return primary_map[ pm_off ];
297 } else {
298 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
299 return am->sm;
300 }
301}
302
sewardj05fe85e2005-04-27 22:46:36 +0000303/* If 'a' has a SecMap, produce it. Else produce NULL. But don't
304 allocate one if one doesn't already exist. This is used by the
305 leak checker.
306*/
307static SecMap* maybe_get_secmap_for ( Addr a )
308{
309 if (a <= MAX_PRIMARY_ADDRESS) {
310 UWord pm_off = a >> 16;
311 return primary_map[ pm_off ];
312 } else {
313 AuxMapEnt* am = maybe_find_in_auxmap(a);
314 return am ? am->sm : NULL;
315 }
316}
317
318
319
sewardj45d94cc2005-04-20 14:44:11 +0000320/* Produce the secmap for 'a', either from the primary map or by
321 ensuring there is an entry for it in the aux primary map. The
322 secmap may not be a distinguished one, since the caller will want
323 to be able to write it. If it is a distinguished secondary, make a
324 writable copy of it, install it, and return the copy instead. (COW
325 semantics).
326*/
327static SecMap* get_secmap_writable ( Addr a )
328{
329 if (a <= MAX_PRIMARY_ADDRESS) {
330 UWord pm_off = a >> 16;
331 if (is_distinguished_sm(primary_map[ pm_off ]))
332 primary_map[pm_off] = copy_for_writing(primary_map[pm_off]);
333 return primary_map[pm_off];
334 } else {
335 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
336 if (is_distinguished_sm(am->sm))
337 am->sm = copy_for_writing(am->sm);
338 return am->sm;
339 }
340}
341
342
343/* --------------- Endianness helpers --------------- */
344
345/* Returns the offset in memory of the byteno-th most significant byte
346 in a wordszB-sized word, given the specified endianness. */
347static inline UWord byte_offset_w ( UWord wordszB, Bool bigendian,
348 UWord byteno ) {
349 return bigendian ? (wordszB-1-byteno) : byteno;
350}
351
352
353/* --------------- Fundamental functions --------------- */
354
sewardj7d647cb2006-03-03 21:02:18 +0000355static inline
sewardj45d94cc2005-04-20 14:44:11 +0000356void get_abit_and_vbyte ( /*OUT*/UWord* abit,
357 /*OUT*/UWord* vbyte,
358 Addr a )
359{
360 SecMap* sm = get_secmap_readable(a);
361 *vbyte = 0xFF & sm->vbyte[a & 0xFFFF];
362 *abit = read_bit_array(sm->abits, a & 0xFFFF);
363}
364
sewardj7d647cb2006-03-03 21:02:18 +0000365static inline
sewardj45d94cc2005-04-20 14:44:11 +0000366UWord get_abit ( Addr a )
367{
368 SecMap* sm = get_secmap_readable(a);
369 return read_bit_array(sm->abits, a & 0xFFFF);
370}
371
372static
373void set_abit_and_vbyte ( Addr a, UWord abit, UWord vbyte )
374{
375 SecMap* sm = get_secmap_writable(a);
376 sm->vbyte[a & 0xFFFF] = 0xFF & vbyte;
377 write_bit_array(sm->abits, a & 0xFFFF, abit);
378}
379
380static
381void set_vbyte ( Addr a, UWord vbyte )
382{
383 SecMap* sm = get_secmap_writable(a);
384 sm->vbyte[a & 0xFFFF] = 0xFF & vbyte;
385}
386
387
388/* --------------- Load/store slow cases. --------------- */
389
390static
391ULong mc_LOADVn_slow ( Addr a, SizeT szB, Bool bigendian )
392{
393 /* Make up a result V word, which contains the loaded data for
sewardjf3d57dd2005-04-22 20:23:27 +0000394 valid addresses and Defined for invalid addresses. Iterate over
395 the bytes in the word, from the most significant down to the
396 least. */
sewardj45d94cc2005-04-20 14:44:11 +0000397 ULong vw = VGM_WORD64_INVALID;
398 SizeT i = szB-1;
399 SizeT n_addrs_bad = 0;
400 Addr ai;
sewardj0ded7a42005-11-08 02:25:37 +0000401 Bool aok, partial_load_exemption_applies;
sewardj45d94cc2005-04-20 14:44:11 +0000402 UWord abit, vbyte;
403
sewardjc1a2cda2005-04-21 17:34:00 +0000404 PROF_EVENT(30, "mc_LOADVn_slow");
sewardj45d94cc2005-04-20 14:44:11 +0000405 tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
406
407 while (True) {
sewardjc1a2cda2005-04-21 17:34:00 +0000408 PROF_EVENT(31, "mc_LOADVn_slow(loop)");
sewardj45d94cc2005-04-20 14:44:11 +0000409 ai = a+byte_offset_w(szB,bigendian,i);
410 get_abit_and_vbyte(&abit, &vbyte, ai);
411 aok = abit == VGM_BIT_VALID;
412 if (!aok)
413 n_addrs_bad++;
414 vw <<= 8;
sewardjf3d57dd2005-04-22 20:23:27 +0000415 vw |= 0xFF & (aok ? vbyte : VGM_BYTE_VALID);
sewardj45d94cc2005-04-20 14:44:11 +0000416 if (i == 0) break;
417 i--;
418 }
419
sewardj0ded7a42005-11-08 02:25:37 +0000420 /* This is a hack which avoids producing errors for code which
421 insists in stepping along byte strings in aligned word-sized
422 chunks, and there is a partially defined word at the end. (eg,
423 optimised strlen). Such code is basically broken at least WRT
424 semantics of ANSI C, but sometimes users don't have the option
425 to fix it, and so this option is provided. Note it is now
426 defaulted to not-engaged.
427
428 A load from a partially-addressible place is allowed if:
429 - the command-line flag is set
430 - it's a word-sized, word-aligned load
431 - at least one of the addresses in the word *is* valid
432 */
433 partial_load_exemption_applies
434 = MAC_(clo_partial_loads_ok) && szB == VG_WORDSIZE
435 && VG_IS_WORD_ALIGNED(a)
436 && n_addrs_bad < VG_WORDSIZE;
437
438 if (n_addrs_bad > 0 && !partial_load_exemption_applies)
sewardj45d94cc2005-04-20 14:44:11 +0000439 MAC_(record_address_error)( VG_(get_running_tid)(), a, szB, False );
440
sewardj45d94cc2005-04-20 14:44:11 +0000441 return vw;
442}
443
444
445static
sewardj71ef8e72005-11-20 19:08:08 +0000446void mc_STOREVn_slow ( Addr a, SizeT szB, ULong vbytes, Bool bigendian )
sewardj45d94cc2005-04-20 14:44:11 +0000447{
448 SizeT i;
449 SizeT n_addrs_bad = 0;
450 UWord abit;
451 Bool aok;
452 Addr ai;
453
sewardjc1a2cda2005-04-21 17:34:00 +0000454 PROF_EVENT(35, "mc_STOREVn_slow");
sewardj45d94cc2005-04-20 14:44:11 +0000455 tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
456
457 /* Dump vbytes in memory, iterating from least to most significant
458 byte. At the same time establish addressibility of the
459 location. */
460 for (i = 0; i < szB; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +0000461 PROF_EVENT(36, "mc_STOREVn_slow(loop)");
sewardj45d94cc2005-04-20 14:44:11 +0000462 ai = a+byte_offset_w(szB,bigendian,i);
463 abit = get_abit(ai);
464 aok = abit == VGM_BIT_VALID;
465 if (!aok)
466 n_addrs_bad++;
467 set_vbyte(ai, vbytes & 0xFF );
468 vbytes >>= 8;
469 }
470
471 /* If an address error has happened, report it. */
472 if (n_addrs_bad > 0)
473 MAC_(record_address_error)( VG_(get_running_tid)(), a, szB, True );
474}
475
476
sewardj45d94cc2005-04-20 14:44:11 +0000477//zz /* Reading/writing of the bitmaps, for aligned word-sized accesses. */
478//zz
479//zz static __inline__ UChar get_abits4_ALIGNED ( Addr a )
480//zz {
481//zz SecMap* sm;
482//zz UInt sm_off;
483//zz UChar abits8;
484//zz PROF_EVENT(24);
485//zz # ifdef VG_DEBUG_MEMORY
486//zz tl_assert(VG_IS_4_ALIGNED(a));
487//zz # endif
488//zz sm = primary_map[PM_IDX(a)];
489//zz sm_off = SM_OFF(a);
490//zz abits8 = sm->abits[sm_off >> 3];
491//zz abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
492//zz abits8 &= 0x0F;
493//zz return abits8;
494//zz }
495//zz
496//zz static UInt __inline__ get_vbytes4_ALIGNED ( Addr a )
497//zz {
498//zz SecMap* sm = primary_map[PM_IDX(a)];
499//zz UInt sm_off = SM_OFF(a);
500//zz PROF_EVENT(25);
501//zz # ifdef VG_DEBUG_MEMORY
502//zz tl_assert(VG_IS_4_ALIGNED(a));
503//zz # endif
504//zz return ((UInt*)(sm->vbyte))[sm_off >> 2];
505//zz }
506//zz
507//zz
508//zz static void __inline__ set_vbytes4_ALIGNED ( Addr a, UInt vbytes )
509//zz {
510//zz SecMap* sm;
511//zz UInt sm_off;
512//zz ENSURE_MAPPABLE(a, "set_vbytes4_ALIGNED");
513//zz sm = primary_map[PM_IDX(a)];
514//zz sm_off = SM_OFF(a);
515//zz PROF_EVENT(23);
516//zz # ifdef VG_DEBUG_MEMORY
517//zz tl_assert(VG_IS_4_ALIGNED(a));
518//zz # endif
519//zz ((UInt*)(sm->vbyte))[sm_off >> 2] = vbytes;
520//zz }
sewardjee070842003-07-05 17:53:55 +0000521
522
njn25e49d8e72002-09-23 09:36:25 +0000523/*------------------------------------------------------------*/
524/*--- Setting permissions over address ranges. ---*/
525/*------------------------------------------------------------*/
526
sewardj23eb2fd2005-04-22 16:29:19 +0000527/* Given address 'a', find the place where the pointer to a's
528 secondary map lives. If a falls into the primary map, the returned
529 value points to one of the entries in primary_map[]. Otherwise,
530 the auxiliary primary map is searched for 'a', or an entry is
531 created for it; either way, the returned value points to the
532 relevant AuxMapEnt's .sm field.
533
534 The point of this is to enable set_address_range_perms to assign
535 secondary maps in a uniform way, without worrying about whether a
536 given secondary map is pointed to from the main or auxiliary
537 primary map.
538*/
539
540static SecMap** find_secmap_binder_for_addr ( Addr aA )
541{
542 if (aA > MAX_PRIMARY_ADDRESS) {
543 AuxMapEnt* am = find_or_alloc_in_auxmap(aA);
544 return &am->sm;
545 } else {
546 UWord a = (UWord)aA;
547 UWord sec_no = (UWord)(a >> 16);
548# if VG_DEBUG_MEMORY >= 1
549 tl_assert(sec_no < N_PRIMARY_MAP);
550# endif
551 return &primary_map[sec_no];
552 }
553}
554
555
556static void set_address_range_perms ( Addr aA, SizeT len,
sewardj45d94cc2005-04-20 14:44:11 +0000557 UWord example_a_bit,
558 UWord example_v_bit )
njn25e49d8e72002-09-23 09:36:25 +0000559{
sewardjae986ca2005-10-12 12:53:20 +0000560 UWord a, vbits8, abits8, vbits32, v_off, a_off;
561 SecMap* sm;
562 SecMap** binder;
563 SecMap* example_dsm;
564
sewardj23eb2fd2005-04-22 16:29:19 +0000565 PROF_EVENT(150, "set_address_range_perms");
566
567 /* Check the permissions make sense. */
568 tl_assert(example_a_bit == VGM_BIT_VALID
569 || example_a_bit == VGM_BIT_INVALID);
570 tl_assert(example_v_bit == VGM_BIT_VALID
571 || example_v_bit == VGM_BIT_INVALID);
572 if (example_a_bit == VGM_BIT_INVALID)
573 tl_assert(example_v_bit == VGM_BIT_INVALID);
574
575 if (len == 0)
576 return;
577
sewardj1fa7d2c2005-06-13 18:22:17 +0000578 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
sewardj23eb2fd2005-04-22 16:29:19 +0000579 if (len > 100 * 1000 * 1000) {
580 VG_(message)(Vg_UserMsg,
581 "Warning: set address range perms: "
sewardj9273eb92005-09-28 20:00:30 +0000582 "large range %lu, a %d, v %d",
sewardj23eb2fd2005-04-22 16:29:19 +0000583 len, example_a_bit, example_v_bit );
584 }
585 }
586
sewardjae986ca2005-10-12 12:53:20 +0000587 a = (UWord)aA;
sewardj23eb2fd2005-04-22 16:29:19 +0000588
589# if VG_DEBUG_MEMORY >= 2
590
591 /*------------------ debug-only case ------------------ */
sewardjae986ca2005-10-12 12:53:20 +0000592 { SizeT i;
njn25e49d8e72002-09-23 09:36:25 +0000593
sewardjae986ca2005-10-12 12:53:20 +0000594 UWord example_vbyte = BIT_TO_BYTE(example_v_bit);
sewardj45d94cc2005-04-20 14:44:11 +0000595
sewardjae986ca2005-10-12 12:53:20 +0000596 tl_assert(sizeof(SizeT) == sizeof(Addr));
sewardj45d94cc2005-04-20 14:44:11 +0000597
sewardjae986ca2005-10-12 12:53:20 +0000598 if (0 && len >= 4096)
599 VG_(printf)("s_a_r_p(0x%llx, %d, %d,%d)\n",
600 (ULong)a, len, example_a_bit, example_v_bit);
njn25e49d8e72002-09-23 09:36:25 +0000601
sewardjae986ca2005-10-12 12:53:20 +0000602 if (len == 0)
603 return;
njn25e49d8e72002-09-23 09:36:25 +0000604
sewardjae986ca2005-10-12 12:53:20 +0000605 for (i = 0; i < len; i++) {
606 set_abit_and_vbyte(a+i, example_a_bit, example_vbyte);
607 }
njn25e49d8e72002-09-23 09:36:25 +0000608 }
njn25e49d8e72002-09-23 09:36:25 +0000609
sewardj23eb2fd2005-04-22 16:29:19 +0000610# else
611
612 /*------------------ standard handling ------------------ */
sewardj23eb2fd2005-04-22 16:29:19 +0000613
614 /* Decide on the distinguished secondary that we might want
615 to use (part of the space-compression scheme). */
616 if (example_a_bit == VGM_BIT_INVALID) {
617 example_dsm = &sm_distinguished[SM_DIST_NOACCESS];
618 } else {
619 if (example_v_bit == VGM_BIT_VALID) {
620 example_dsm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
621 } else {
622 example_dsm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
623 }
624 }
625
626 /* Make various wider versions of the A/V values to use. */
627 vbits8 = BIT_TO_BYTE(example_v_bit);
628 abits8 = BIT_TO_BYTE(example_a_bit);
629 vbits32 = (vbits8 << 24) | (vbits8 << 16) | (vbits8 << 8) | vbits8;
630
631 /* Slowly do parts preceding 8-byte alignment. */
632 while (True) {
633 if (len == 0) break;
634 PROF_EVENT(151, "set_address_range_perms-loop1-pre");
635 if (VG_IS_8_ALIGNED(a)) break;
636 set_abit_and_vbyte( a, example_a_bit, vbits8 );
637 a++;
638 len--;
639 }
640
641 if (len == 0)
642 return;
643
644 tl_assert(VG_IS_8_ALIGNED(a) && len > 0);
645
646 /* Now go in steps of 8 bytes. */
647 binder = find_secmap_binder_for_addr(a);
648
649 while (True) {
650
651 if (len < 8) break;
652
653 PROF_EVENT(152, "set_address_range_perms-loop8");
654
655 if ((a & SECONDARY_MASK) == 0) {
656 /* we just traversed a primary map boundary, so update the
657 binder. */
658 binder = find_secmap_binder_for_addr(a);
659 PROF_EVENT(153, "set_address_range_perms-update-binder");
660
661 /* Space-optimisation. If we are setting the entire
662 secondary map, just point this entry at one of our
663 distinguished secondaries. However, only do that if it
664 already points at a distinguished secondary, since doing
665 otherwise would leak the existing secondary. We could do
666 better and free up any pre-existing non-distinguished
667 secondary at this point, since we are guaranteed that each
668 non-dist secondary only has one pointer to it, and we have
669 that pointer right here. */
670 if (len >= SECONDARY_SIZE && is_distinguished_sm(*binder)) {
671 PROF_EVENT(154, "set_address_range_perms-entire-secmap");
672 *binder = example_dsm;
673 len -= SECONDARY_SIZE;
674 a += SECONDARY_SIZE;
675 continue;
676 }
677 }
678
679 /* If the primary is already pointing to a distinguished map
680 with the same properties as we're trying to set, then leave
681 it that way. */
682 if (*binder == example_dsm) {
683 a += 8;
684 len -= 8;
685 continue;
686 }
687
688 /* Make sure it's OK to write the secondary. */
689 if (is_distinguished_sm(*binder))
690 *binder = copy_for_writing(*binder);
691
692 sm = *binder;
693 v_off = a & 0xFFFF;
694 a_off = v_off >> 3;
695 sm->abits[a_off] = (UChar)abits8;
696 ((UInt*)(sm->vbyte))[(v_off >> 2) + 0] = (UInt)vbits32;
697 ((UInt*)(sm->vbyte))[(v_off >> 2) + 1] = (UInt)vbits32;
698
699 a += 8;
700 len -= 8;
701 }
702
703 if (len == 0)
704 return;
705
706 tl_assert(VG_IS_8_ALIGNED(a) && len > 0 && len < 8);
707
708 /* Finish the upper fragment. */
709 while (True) {
710 if (len == 0) break;
711 PROF_EVENT(155, "set_address_range_perms-loop1-post");
712 set_abit_and_vbyte ( a, example_a_bit, vbits8 );
713 a++;
714 len--;
715 }
716
717# endif
718}
sewardj45d94cc2005-04-20 14:44:11 +0000719
sewardjc859fbf2005-04-22 21:10:28 +0000720
721/* --- Set permissions for arbitrary address ranges --- */
njn25e49d8e72002-09-23 09:36:25 +0000722
nethercote8b76fe52004-11-08 19:20:09 +0000723static void mc_make_noaccess ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000724{
sewardjc1a2cda2005-04-21 17:34:00 +0000725 PROF_EVENT(40, "mc_make_noaccess");
nethercote8b76fe52004-11-08 19:20:09 +0000726 DEBUG("mc_make_noaccess(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000727 set_address_range_perms ( a, len, VGM_BIT_INVALID, VGM_BIT_INVALID );
728}
729
nethercote8b76fe52004-11-08 19:20:09 +0000730static void mc_make_writable ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000731{
sewardjc1a2cda2005-04-21 17:34:00 +0000732 PROF_EVENT(41, "mc_make_writable");
nethercote8b76fe52004-11-08 19:20:09 +0000733 DEBUG("mc_make_writable(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000734 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_INVALID );
735}
736
nethercote8b76fe52004-11-08 19:20:09 +0000737static void mc_make_readable ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000738{
sewardjc1a2cda2005-04-21 17:34:00 +0000739 PROF_EVENT(42, "mc_make_readable");
nethercote8b76fe52004-11-08 19:20:09 +0000740 DEBUG("mc_make_readable(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000741 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_VALID );
742}
743
njn9b007f62003-04-07 14:40:25 +0000744
sewardj45f4e7c2005-09-27 19:20:21 +0000745/* --- Block-copy permissions (needed for implementing realloc() and
746 sys_mremap). --- */
sewardjc859fbf2005-04-22 21:10:28 +0000747
748static void mc_copy_address_range_state ( Addr src, Addr dst, SizeT len )
749{
sewardj45f4e7c2005-09-27 19:20:21 +0000750 SizeT i, j;
sewardjc859fbf2005-04-22 21:10:28 +0000751 UWord abit, vbyte;
752
753 DEBUG("mc_copy_address_range_state\n");
sewardjc859fbf2005-04-22 21:10:28 +0000754 PROF_EVENT(50, "mc_copy_address_range_state");
sewardj45f4e7c2005-09-27 19:20:21 +0000755
756 if (len == 0)
757 return;
758
759 if (src < dst) {
760 for (i = 0, j = len-1; i < len; i++, j--) {
761 PROF_EVENT(51, "mc_copy_address_range_state(loop)");
762 get_abit_and_vbyte( &abit, &vbyte, src+j );
763 set_abit_and_vbyte( dst+j, abit, vbyte );
764 }
765 }
766
767 if (src > dst) {
768 for (i = 0; i < len; i++) {
769 PROF_EVENT(51, "mc_copy_address_range_state(loop)");
770 get_abit_and_vbyte( &abit, &vbyte, src+i );
771 set_abit_and_vbyte( dst+i, abit, vbyte );
772 }
sewardjc859fbf2005-04-22 21:10:28 +0000773 }
774}
775
776
777/* --- Fast case permission setters, for dealing with stacks. --- */
778
njn9b007f62003-04-07 14:40:25 +0000779static __inline__
sewardj5d28efc2005-04-21 22:16:29 +0000780void make_aligned_word32_writable ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000781{
sewardjae986ca2005-10-12 12:53:20 +0000782 UWord a, sec_no, v_off, a_off, mask;
783 SecMap* sm;
784
sewardj5d28efc2005-04-21 22:16:29 +0000785 PROF_EVENT(300, "make_aligned_word32_writable");
786
787# if VG_DEBUG_MEMORY >= 2
788 mc_make_writable(aA, 4);
789# else
790
791 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
sewardj23eb2fd2005-04-22 16:29:19 +0000792 PROF_EVENT(301, "make_aligned_word32_writable-slow1");
sewardj5d28efc2005-04-21 22:16:29 +0000793 mc_make_writable(aA, 4);
794 return;
795 }
796
sewardjae986ca2005-10-12 12:53:20 +0000797 a = (UWord)aA;
798 sec_no = (UWord)(a >> 16);
sewardj5d28efc2005-04-21 22:16:29 +0000799# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +0000800 tl_assert(sec_no < N_PRIMARY_MAP);
sewardj5d28efc2005-04-21 22:16:29 +0000801# endif
802
803 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
804 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
805
sewardjae986ca2005-10-12 12:53:20 +0000806 sm = primary_map[sec_no];
807 v_off = a & 0xFFFF;
808 a_off = v_off >> 3;
sewardj5d28efc2005-04-21 22:16:29 +0000809
810 /* Paint the new area as uninitialised. */
811 ((UInt*)(sm->vbyte))[v_off >> 2] = VGM_WORD32_INVALID;
812
sewardjae986ca2005-10-12 12:53:20 +0000813 mask = 0x0F;
sewardj5d28efc2005-04-21 22:16:29 +0000814 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
815 /* mask now contains 1s where we wish to make address bits valid
816 (0s). */
817 sm->abits[a_off] &= ~mask;
818# endif
njn9b007f62003-04-07 14:40:25 +0000819}
820
sewardj5d28efc2005-04-21 22:16:29 +0000821
822static __inline__
823void make_aligned_word32_noaccess ( Addr aA )
824{
sewardjae986ca2005-10-12 12:53:20 +0000825 UWord a, sec_no, v_off, a_off, mask;
826 SecMap* sm;
827
sewardj5d28efc2005-04-21 22:16:29 +0000828 PROF_EVENT(310, "make_aligned_word32_noaccess");
829
830# if VG_DEBUG_MEMORY >= 2
831 mc_make_noaccess(aA, 4);
832# else
833
834 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
835 PROF_EVENT(311, "make_aligned_word32_noaccess-slow1");
836 mc_make_noaccess(aA, 4);
837 return;
838 }
839
sewardjae986ca2005-10-12 12:53:20 +0000840 a = (UWord)aA;
841 sec_no = (UWord)(a >> 16);
sewardj5d28efc2005-04-21 22:16:29 +0000842# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +0000843 tl_assert(sec_no < N_PRIMARY_MAP);
sewardj5d28efc2005-04-21 22:16:29 +0000844# endif
845
846 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
847 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
848
sewardjae986ca2005-10-12 12:53:20 +0000849 sm = primary_map[sec_no];
850 v_off = a & 0xFFFF;
851 a_off = v_off >> 3;
sewardj5d28efc2005-04-21 22:16:29 +0000852
853 /* Paint the abandoned data as uninitialised. Probably not
854 necessary, but still .. */
855 ((UInt*)(sm->vbyte))[v_off >> 2] = VGM_WORD32_INVALID;
856
sewardjae986ca2005-10-12 12:53:20 +0000857 mask = 0x0F;
sewardj5d28efc2005-04-21 22:16:29 +0000858 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
859 /* mask now contains 1s where we wish to make address bits invalid
860 (1s). */
861 sm->abits[a_off] |= mask;
862# endif
863}
864
865
njn9b007f62003-04-07 14:40:25 +0000866/* Nb: by "aligned" here we mean 8-byte aligned */
867static __inline__
sewardj23eb2fd2005-04-22 16:29:19 +0000868void make_aligned_word64_writable ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000869{
sewardjae986ca2005-10-12 12:53:20 +0000870 UWord a, sec_no, v_off, a_off;
871 SecMap* sm;
872
sewardj23eb2fd2005-04-22 16:29:19 +0000873 PROF_EVENT(320, "make_aligned_word64_writable");
874
875# if VG_DEBUG_MEMORY >= 2
876 mc_make_writable(aA, 8);
877# else
878
879 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
880 PROF_EVENT(321, "make_aligned_word64_writable-slow1");
881 mc_make_writable(aA, 8);
882 return;
883 }
884
sewardjae986ca2005-10-12 12:53:20 +0000885 a = (UWord)aA;
886 sec_no = (UWord)(a >> 16);
sewardj23eb2fd2005-04-22 16:29:19 +0000887# if VG_DEBUG_MEMORY >= 1
888 tl_assert(sec_no < N_PRIMARY_MAP);
889# endif
890
891 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
892 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
893
sewardjae986ca2005-10-12 12:53:20 +0000894 sm = primary_map[sec_no];
895 v_off = a & 0xFFFF;
896 a_off = v_off >> 3;
sewardj23eb2fd2005-04-22 16:29:19 +0000897
898 /* Paint the new area as uninitialised. */
899 ((ULong*)(sm->vbyte))[v_off >> 3] = VGM_WORD64_INVALID;
900
901 /* Make the relevant area accessible. */
902 sm->abits[a_off] = VGM_BYTE_VALID;
903# endif
njn9b007f62003-04-07 14:40:25 +0000904}
905
sewardj23eb2fd2005-04-22 16:29:19 +0000906
njn9b007f62003-04-07 14:40:25 +0000907static __inline__
sewardj23eb2fd2005-04-22 16:29:19 +0000908void make_aligned_word64_noaccess ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000909{
sewardjae986ca2005-10-12 12:53:20 +0000910 UWord a, sec_no, v_off, a_off;
911 SecMap* sm;
912
sewardj23eb2fd2005-04-22 16:29:19 +0000913 PROF_EVENT(330, "make_aligned_word64_noaccess");
914
915# if VG_DEBUG_MEMORY >= 2
916 mc_make_noaccess(aA, 8);
917# else
918
919 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
920 PROF_EVENT(331, "make_aligned_word64_noaccess-slow1");
921 mc_make_noaccess(aA, 8);
922 return;
923 }
924
sewardjae986ca2005-10-12 12:53:20 +0000925 a = (UWord)aA;
926 sec_no = (UWord)(a >> 16);
sewardj23eb2fd2005-04-22 16:29:19 +0000927# if VG_DEBUG_MEMORY >= 1
928 tl_assert(sec_no < N_PRIMARY_MAP);
929# endif
930
931 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
932 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
933
sewardjae986ca2005-10-12 12:53:20 +0000934 sm = primary_map[sec_no];
935 v_off = a & 0xFFFF;
936 a_off = v_off >> 3;
sewardj23eb2fd2005-04-22 16:29:19 +0000937
938 /* Paint the abandoned data as uninitialised. Probably not
939 necessary, but still .. */
940 ((ULong*)(sm->vbyte))[v_off >> 3] = VGM_WORD64_INVALID;
941
942 /* Make the abandoned area inaccessible. */
943 sm->abits[a_off] = VGM_BYTE_INVALID;
944# endif
njn9b007f62003-04-07 14:40:25 +0000945}
946
sewardj23eb2fd2005-04-22 16:29:19 +0000947
sewardj45d94cc2005-04-20 14:44:11 +0000948/* The stack-pointer update handling functions */
949SP_UPDATE_HANDLERS ( make_aligned_word32_writable,
950 make_aligned_word32_noaccess,
951 make_aligned_word64_writable,
952 make_aligned_word64_noaccess,
953 mc_make_writable,
954 mc_make_noaccess
955 );
njn9b007f62003-04-07 14:40:25 +0000956
sewardj45d94cc2005-04-20 14:44:11 +0000957
sewardj826ec492005-05-12 18:05:00 +0000958void MC_(helperc_MAKE_STACK_UNINIT) ( Addr base, UWord len )
959{
960 tl_assert(sizeof(UWord) == sizeof(SizeT));
sewardj2a3a1a72005-05-12 23:25:43 +0000961 if (0)
962 VG_(printf)("helperc_MAKE_STACK_UNINIT %p %d\n", base, len );
963
964# if 0
965 /* Really slow version */
966 mc_make_writable(base, len);
967# endif
968
969# if 0
970 /* Slow(ish) version, which is fairly easily seen to be correct.
971 */
972 if (EXPECTED_TAKEN( VG_IS_8_ALIGNED(base) && len==128 )) {
973 make_aligned_word64_writable(base + 0);
974 make_aligned_word64_writable(base + 8);
975 make_aligned_word64_writable(base + 16);
976 make_aligned_word64_writable(base + 24);
977
978 make_aligned_word64_writable(base + 32);
979 make_aligned_word64_writable(base + 40);
980 make_aligned_word64_writable(base + 48);
981 make_aligned_word64_writable(base + 56);
982
983 make_aligned_word64_writable(base + 64);
984 make_aligned_word64_writable(base + 72);
985 make_aligned_word64_writable(base + 80);
986 make_aligned_word64_writable(base + 88);
987
988 make_aligned_word64_writable(base + 96);
989 make_aligned_word64_writable(base + 104);
990 make_aligned_word64_writable(base + 112);
991 make_aligned_word64_writable(base + 120);
992 } else {
993 mc_make_writable(base, len);
994 }
995# endif
996
997 /* Idea is: go fast when
998 * 8-aligned and length is 128
999 * the sm is available in the main primary map
1000 * the address range falls entirely with a single
1001 secondary map
1002 * the SM is modifiable
1003 If all those conditions hold, just update the V bits
1004 by writing directly on the v-bit array. We don't care
1005 about A bits; if the address range is marked invalid,
1006 any attempt to access it will elicit an addressing error,
1007 and that's good enough.
1008 */
sewardj2e1a6772006-01-18 04:16:27 +00001009 /* 128 bytes (16 ULongs) is the magic value for ELF amd64. */
sewardj2a3a1a72005-05-12 23:25:43 +00001010 if (EXPECTED_TAKEN( len == 128
1011 && VG_IS_8_ALIGNED(base)
1012 )) {
1013 /* Now we know the address range is suitably sized and
1014 aligned. */
1015 UWord a_lo = (UWord)base;
1016 UWord a_hi = (UWord)(base + 127);
1017 UWord sec_lo = a_lo >> 16;
1018 UWord sec_hi = a_hi >> 16;
1019
1020 if (EXPECTED_TAKEN( sec_lo == sec_hi
1021 && sec_lo <= N_PRIMARY_MAP
1022 )) {
1023 /* Now we know that the entire address range falls within a
1024 single secondary map, and that that secondary 'lives' in
1025 the main primary map. */
1026 SecMap* sm = primary_map[sec_lo];
1027
1028 if (EXPECTED_TAKEN( !is_distinguished_sm(sm) )) {
1029 /* And finally, now we know that the secondary in question
1030 is modifiable. */
1031 UWord v_off = a_lo & 0xFFFF;
1032 ULong* p = (ULong*)(&sm->vbyte[v_off]);
1033 p[ 0] = VGM_WORD64_INVALID;
1034 p[ 1] = VGM_WORD64_INVALID;
1035 p[ 2] = VGM_WORD64_INVALID;
1036 p[ 3] = VGM_WORD64_INVALID;
1037 p[ 4] = VGM_WORD64_INVALID;
1038 p[ 5] = VGM_WORD64_INVALID;
1039 p[ 6] = VGM_WORD64_INVALID;
1040 p[ 7] = VGM_WORD64_INVALID;
1041 p[ 8] = VGM_WORD64_INVALID;
1042 p[ 9] = VGM_WORD64_INVALID;
1043 p[10] = VGM_WORD64_INVALID;
1044 p[11] = VGM_WORD64_INVALID;
1045 p[12] = VGM_WORD64_INVALID;
1046 p[13] = VGM_WORD64_INVALID;
1047 p[14] = VGM_WORD64_INVALID;
1048 p[15] = VGM_WORD64_INVALID;
1049 return;
1050 }
1051 }
1052 }
1053
sewardj2e1a6772006-01-18 04:16:27 +00001054 /* 288 bytes (36 ULongs) is the magic value for ELF ppc64. */
1055 if (EXPECTED_TAKEN( len == 288
1056 && VG_IS_8_ALIGNED(base)
1057 )) {
1058 /* Now we know the address range is suitably sized and
1059 aligned. */
1060 UWord a_lo = (UWord)base;
1061 UWord a_hi = (UWord)(base + 287);
1062 UWord sec_lo = a_lo >> 16;
1063 UWord sec_hi = a_hi >> 16;
1064
1065 if (EXPECTED_TAKEN( sec_lo == sec_hi
1066 && sec_lo <= N_PRIMARY_MAP
1067 )) {
1068 /* Now we know that the entire address range falls within a
1069 single secondary map, and that that secondary 'lives' in
1070 the main primary map. */
1071 SecMap* sm = primary_map[sec_lo];
1072
1073 if (EXPECTED_TAKEN( !is_distinguished_sm(sm) )) {
1074 /* And finally, now we know that the secondary in question
1075 is modifiable. */
1076 UWord v_off = a_lo & 0xFFFF;
1077 ULong* p = (ULong*)(&sm->vbyte[v_off]);
1078 p[ 0] = VGM_WORD64_INVALID;
1079 p[ 1] = VGM_WORD64_INVALID;
1080 p[ 2] = VGM_WORD64_INVALID;
1081 p[ 3] = VGM_WORD64_INVALID;
1082 p[ 4] = VGM_WORD64_INVALID;
1083 p[ 5] = VGM_WORD64_INVALID;
1084 p[ 6] = VGM_WORD64_INVALID;
1085 p[ 7] = VGM_WORD64_INVALID;
1086 p[ 8] = VGM_WORD64_INVALID;
1087 p[ 9] = VGM_WORD64_INVALID;
1088 p[10] = VGM_WORD64_INVALID;
1089 p[11] = VGM_WORD64_INVALID;
1090 p[12] = VGM_WORD64_INVALID;
1091 p[13] = VGM_WORD64_INVALID;
1092 p[14] = VGM_WORD64_INVALID;
1093 p[15] = VGM_WORD64_INVALID;
1094 p[16] = VGM_WORD64_INVALID;
1095 p[17] = VGM_WORD64_INVALID;
1096 p[18] = VGM_WORD64_INVALID;
1097 p[19] = VGM_WORD64_INVALID;
1098 p[20] = VGM_WORD64_INVALID;
1099 p[21] = VGM_WORD64_INVALID;
1100 p[22] = VGM_WORD64_INVALID;
1101 p[23] = VGM_WORD64_INVALID;
1102 p[24] = VGM_WORD64_INVALID;
1103 p[25] = VGM_WORD64_INVALID;
1104 p[26] = VGM_WORD64_INVALID;
1105 p[27] = VGM_WORD64_INVALID;
1106 p[28] = VGM_WORD64_INVALID;
1107 p[29] = VGM_WORD64_INVALID;
1108 p[30] = VGM_WORD64_INVALID;
1109 p[31] = VGM_WORD64_INVALID;
1110 p[32] = VGM_WORD64_INVALID;
1111 p[33] = VGM_WORD64_INVALID;
1112 p[34] = VGM_WORD64_INVALID;
1113 p[35] = VGM_WORD64_INVALID;
1114 return;
1115 }
1116 }
1117 }
1118
sewardj2a3a1a72005-05-12 23:25:43 +00001119 /* else fall into slow case */
sewardj2e1a6772006-01-18 04:16:27 +00001120 if (0) VG_(printf)("MC_(helperc_MAKE_STACK_UNINIT): "
1121 "slow case, %d\n", len);
sewardj826ec492005-05-12 18:05:00 +00001122 mc_make_writable(base, len);
1123}
1124
1125
nethercote8b76fe52004-11-08 19:20:09 +00001126/*------------------------------------------------------------*/
1127/*--- Checking memory ---*/
1128/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001129
sewardje4ccc012005-05-02 12:53:38 +00001130typedef
1131 enum {
1132 MC_Ok = 5,
1133 MC_AddrErr = 6,
1134 MC_ValueErr = 7
1135 }
1136 MC_ReadResult;
1137
1138
njn25e49d8e72002-09-23 09:36:25 +00001139/* Check permissions for address range. If inadequate permissions
1140 exist, *bad_addr is set to the offending address, so the caller can
1141 know what it is. */
1142
sewardjecf8e102003-07-12 12:11:39 +00001143/* Returns True if [a .. a+len) is not addressible. Otherwise,
1144 returns False, and if bad_addr is non-NULL, sets *bad_addr to
1145 indicate the lowest failing address. Functions below are
1146 similar. */
nethercote8b76fe52004-11-08 19:20:09 +00001147static Bool mc_check_noaccess ( Addr a, SizeT len, Addr* bad_addr )
sewardjecf8e102003-07-12 12:11:39 +00001148{
nethercote451eae92004-11-02 13:06:32 +00001149 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +00001150 UWord abit;
sewardjc1a2cda2005-04-21 17:34:00 +00001151 PROF_EVENT(60, "mc_check_noaccess");
sewardjecf8e102003-07-12 12:11:39 +00001152 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +00001153 PROF_EVENT(61, "mc_check_noaccess(loop)");
sewardjecf8e102003-07-12 12:11:39 +00001154 abit = get_abit(a);
1155 if (abit == VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001156 if (bad_addr != NULL)
1157 *bad_addr = a;
sewardjecf8e102003-07-12 12:11:39 +00001158 return False;
1159 }
1160 a++;
1161 }
1162 return True;
1163}
1164
nethercote8b76fe52004-11-08 19:20:09 +00001165static Bool mc_check_writable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00001166{
nethercote451eae92004-11-02 13:06:32 +00001167 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +00001168 UWord abit;
sewardjc1a2cda2005-04-21 17:34:00 +00001169 PROF_EVENT(62, "mc_check_writable");
njn25e49d8e72002-09-23 09:36:25 +00001170 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +00001171 PROF_EVENT(63, "mc_check_writable(loop)");
njn25e49d8e72002-09-23 09:36:25 +00001172 abit = get_abit(a);
1173 if (abit == VGM_BIT_INVALID) {
1174 if (bad_addr != NULL) *bad_addr = a;
1175 return False;
1176 }
1177 a++;
1178 }
1179 return True;
1180}
1181
nethercote8b76fe52004-11-08 19:20:09 +00001182static MC_ReadResult mc_check_readable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00001183{
nethercote451eae92004-11-02 13:06:32 +00001184 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +00001185 UWord abit;
1186 UWord vbyte;
njn25e49d8e72002-09-23 09:36:25 +00001187
sewardjc1a2cda2005-04-21 17:34:00 +00001188 PROF_EVENT(64, "mc_check_readable");
nethercote8b76fe52004-11-08 19:20:09 +00001189 DEBUG("mc_check_readable\n");
njn25e49d8e72002-09-23 09:36:25 +00001190 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +00001191 PROF_EVENT(65, "mc_check_readable(loop)");
sewardj45d94cc2005-04-20 14:44:11 +00001192 get_abit_and_vbyte(&abit, &vbyte, a);
nethercote8b76fe52004-11-08 19:20:09 +00001193 // Report addressability errors in preference to definedness errors
1194 // by checking the A bits first.
1195 if (abit != VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001196 if (bad_addr != NULL)
1197 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001198 return MC_AddrErr;
1199 }
1200 if (vbyte != VGM_BYTE_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001201 if (bad_addr != NULL)
1202 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001203 return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +00001204 }
1205 a++;
1206 }
nethercote8b76fe52004-11-08 19:20:09 +00001207 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +00001208}
1209
1210
1211/* Check a zero-terminated ascii string. Tricky -- don't want to
1212 examine the actual bytes, to find the end, until we're sure it is
1213 safe to do so. */
1214
njn9b007f62003-04-07 14:40:25 +00001215static Bool mc_check_readable_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00001216{
sewardj45d94cc2005-04-20 14:44:11 +00001217 UWord abit;
1218 UWord vbyte;
sewardjc1a2cda2005-04-21 17:34:00 +00001219 PROF_EVENT(66, "mc_check_readable_asciiz");
njn5c004e42002-11-18 11:04:50 +00001220 DEBUG("mc_check_readable_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +00001221 while (True) {
sewardjc1a2cda2005-04-21 17:34:00 +00001222 PROF_EVENT(67, "mc_check_readable_asciiz(loop)");
sewardj45d94cc2005-04-20 14:44:11 +00001223 get_abit_and_vbyte(&abit, &vbyte, a);
nethercote8b76fe52004-11-08 19:20:09 +00001224 // As in mc_check_readable(), check A bits first
1225 if (abit != VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001226 if (bad_addr != NULL)
1227 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001228 return MC_AddrErr;
1229 }
1230 if (vbyte != VGM_BYTE_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001231 if (bad_addr != NULL)
1232 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001233 return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +00001234 }
1235 /* Ok, a is safe to read. */
sewardj45d94cc2005-04-20 14:44:11 +00001236 if (* ((UChar*)a) == 0)
1237 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +00001238 a++;
1239 }
1240}
1241
1242
1243/*------------------------------------------------------------*/
1244/*--- Memory event handlers ---*/
1245/*------------------------------------------------------------*/
1246
njn25e49d8e72002-09-23 09:36:25 +00001247static
njn72718642003-07-24 08:45:32 +00001248void mc_check_is_writable ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +00001249 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001250{
1251 Bool ok;
1252 Addr bad_addr;
1253
njn25e49d8e72002-09-23 09:36:25 +00001254 /* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
1255 base,base+size-1); */
nethercote8b76fe52004-11-08 19:20:09 +00001256 ok = mc_check_writable ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00001257 if (!ok) {
1258 switch (part) {
1259 case Vg_CoreSysCall:
nethercote8b76fe52004-11-08 19:20:09 +00001260 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False,
1261 /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00001262 break;
1263
1264 case Vg_CorePThread:
1265 case Vg_CoreSignal:
nethercote8b76fe52004-11-08 19:20:09 +00001266 MAC_(record_core_mem_error)( tid, /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00001267 break;
1268
1269 default:
njn67993252004-11-22 18:02:32 +00001270 VG_(tool_panic)("mc_check_is_writable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001271 }
1272 }
njn25e49d8e72002-09-23 09:36:25 +00001273}
1274
1275static
njn72718642003-07-24 08:45:32 +00001276void mc_check_is_readable ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +00001277 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001278{
njn25e49d8e72002-09-23 09:36:25 +00001279 Addr bad_addr;
nethercote8b76fe52004-11-08 19:20:09 +00001280 MC_ReadResult res;
njn25e49d8e72002-09-23 09:36:25 +00001281
nethercote8b76fe52004-11-08 19:20:09 +00001282 res = mc_check_readable ( base, size, &bad_addr );
sewardj45f4e7c2005-09-27 19:20:21 +00001283
1284 if (0)
1285 VG_(printf)("mc_check_is_readable(0x%x, %d, %s) -> %s\n",
1286 (UInt)base, (Int)size, s, res==MC_Ok ? "yes" : "no" );
1287
nethercote8b76fe52004-11-08 19:20:09 +00001288 if (MC_Ok != res) {
1289 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
sewardj45f4e7c2005-09-27 19:20:21 +00001290
njn25e49d8e72002-09-23 09:36:25 +00001291 switch (part) {
1292 case Vg_CoreSysCall:
nethercote8b76fe52004-11-08 19:20:09 +00001293 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False,
1294 isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001295 break;
1296
1297 case Vg_CorePThread:
nethercote8b76fe52004-11-08 19:20:09 +00001298 MAC_(record_core_mem_error)( tid, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001299 break;
1300
1301 /* If we're being asked to jump to a silly address, record an error
1302 message before potentially crashing the entire system. */
1303 case Vg_CoreTranslate:
njn72718642003-07-24 08:45:32 +00001304 MAC_(record_jump_error)( tid, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00001305 break;
1306
1307 default:
njn67993252004-11-22 18:02:32 +00001308 VG_(tool_panic)("mc_check_is_readable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001309 }
1310 }
njn25e49d8e72002-09-23 09:36:25 +00001311}
1312
1313static
njn72718642003-07-24 08:45:32 +00001314void mc_check_is_readable_asciiz ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +00001315 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +00001316{
nethercote8b76fe52004-11-08 19:20:09 +00001317 MC_ReadResult res;
njn5ab96ac2005-05-08 02:59:50 +00001318 Addr bad_addr = 0; // shut GCC up
njn25e49d8e72002-09-23 09:36:25 +00001319 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
1320
njnca82cc02004-11-22 17:18:48 +00001321 tl_assert(part == Vg_CoreSysCall);
nethercote8b76fe52004-11-08 19:20:09 +00001322 res = mc_check_readable_asciiz ( (Addr)str, &bad_addr );
1323 if (MC_Ok != res) {
1324 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
1325 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001326 }
njn25e49d8e72002-09-23 09:36:25 +00001327}
1328
njn25e49d8e72002-09-23 09:36:25 +00001329static
nethercote451eae92004-11-02 13:06:32 +00001330void mc_new_mem_startup( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00001331{
njn1f3a9092002-10-04 09:22:30 +00001332 /* Ignore the permissions, just make it readable. Seems to work... */
nethercote451eae92004-11-02 13:06:32 +00001333 DEBUG("mc_new_mem_startup(%p, %llu, rr=%u, ww=%u, xx=%u)\n",
1334 a,(ULong)len,rr,ww,xx);
nethercote8b76fe52004-11-08 19:20:09 +00001335 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001336}
1337
1338static
nethercote451eae92004-11-02 13:06:32 +00001339void mc_new_mem_heap ( Addr a, SizeT len, Bool is_inited )
njn25e49d8e72002-09-23 09:36:25 +00001340{
1341 if (is_inited) {
nethercote8b76fe52004-11-08 19:20:09 +00001342 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001343 } else {
nethercote8b76fe52004-11-08 19:20:09 +00001344 mc_make_writable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001345 }
1346}
1347
1348static
njnb8dca862005-03-14 02:42:44 +00001349void mc_new_mem_mmap ( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00001350{
njnb8dca862005-03-14 02:42:44 +00001351 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001352}
1353
njncf45fd42004-11-24 16:30:22 +00001354static
1355void mc_post_mem_write(CorePart part, ThreadId tid, Addr a, SizeT len)
1356{
1357 mc_make_readable(a, len);
1358}
njn25e49d8e72002-09-23 09:36:25 +00001359
sewardj45d94cc2005-04-20 14:44:11 +00001360
njn25e49d8e72002-09-23 09:36:25 +00001361/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00001362/*--- Register event handlers ---*/
1363/*------------------------------------------------------------*/
1364
sewardj45d94cc2005-04-20 14:44:11 +00001365/* When some chunk of guest state is written, mark the corresponding
1366 shadow area as valid. This is used to initialise arbitrarily large
sewardj62eae5f2006-01-17 01:58:24 +00001367 chunks of guest state, hence the _SIZE value, which has to be as
1368 big as the biggest guest state.
sewardj45d94cc2005-04-20 14:44:11 +00001369*/
1370static void mc_post_reg_write ( CorePart part, ThreadId tid,
1371 OffT offset, SizeT size)
njnd3040452003-05-19 15:04:06 +00001372{
sewardjd68ac3e2006-01-20 14:31:57 +00001373# define MAX_REG_WRITE_SIZE 1392
cerion21082042005-12-06 19:07:08 +00001374 UChar area[MAX_REG_WRITE_SIZE];
1375 tl_assert(size <= MAX_REG_WRITE_SIZE);
njncf45fd42004-11-24 16:30:22 +00001376 VG_(memset)(area, VGM_BYTE_VALID, size);
1377 VG_(set_shadow_regs_area)( tid, offset, size, area );
cerion21082042005-12-06 19:07:08 +00001378# undef MAX_REG_WRITE_SIZE
njnd3040452003-05-19 15:04:06 +00001379}
1380
sewardj45d94cc2005-04-20 14:44:11 +00001381static
1382void mc_post_reg_write_clientcall ( ThreadId tid,
1383 OffT offset, SizeT size,
1384 Addr f)
njnd3040452003-05-19 15:04:06 +00001385{
njncf45fd42004-11-24 16:30:22 +00001386 mc_post_reg_write(/*dummy*/0, tid, offset, size);
njnd3040452003-05-19 15:04:06 +00001387}
1388
sewardj45d94cc2005-04-20 14:44:11 +00001389/* Look at the definedness of the guest's shadow state for
1390 [offset, offset+len). If any part of that is undefined, record
1391 a parameter error.
1392*/
1393static void mc_pre_reg_read ( CorePart part, ThreadId tid, Char* s,
1394 OffT offset, SizeT size)
nethercote8b76fe52004-11-08 19:20:09 +00001395{
sewardj45d94cc2005-04-20 14:44:11 +00001396 Int i;
1397 Bool bad;
1398
1399 UChar area[16];
1400 tl_assert(size <= 16);
1401
1402 VG_(get_shadow_regs_area)( tid, offset, size, area );
1403
1404 bad = False;
1405 for (i = 0; i < size; i++) {
1406 if (area[i] != VGM_BYTE_VALID) {
sewardj2c27f702005-05-03 18:19:05 +00001407 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00001408 break;
1409 }
nethercote8b76fe52004-11-08 19:20:09 +00001410 }
1411
sewardj45d94cc2005-04-20 14:44:11 +00001412 if (bad)
nethercote8b76fe52004-11-08 19:20:09 +00001413 MAC_(record_param_error) ( tid, 0, /*isReg*/True, /*isUnaddr*/False, s );
1414}
njnd3040452003-05-19 15:04:06 +00001415
njn25e49d8e72002-09-23 09:36:25 +00001416
sewardj6cf40ff2005-04-20 22:31:26 +00001417/*------------------------------------------------------------*/
njn9e63cb62005-05-08 18:34:59 +00001418/*--- Printing errors ---*/
1419/*------------------------------------------------------------*/
1420
njn51d827b2005-05-09 01:02:08 +00001421static void mc_pp_Error ( Error* err )
njn9e63cb62005-05-08 18:34:59 +00001422{
1423 MAC_Error* err_extra = VG_(get_error_extra)(err);
1424
sewardj71bc3cb2005-05-19 00:25:45 +00001425 HChar* xpre = VG_(clo_xml) ? " <what>" : "";
1426 HChar* xpost = VG_(clo_xml) ? "</what>" : "";
1427
njn9e63cb62005-05-08 18:34:59 +00001428 switch (VG_(get_error_kind)(err)) {
1429 case CoreMemErr: {
1430 Char* s = ( err_extra->isUnaddr ? "unaddressable" : "uninitialised" );
sewardj71bc3cb2005-05-19 00:25:45 +00001431 if (VG_(clo_xml))
1432 VG_(message)(Vg_UserMsg, " <kind>CoreMemError</kind>");
1433 /* What the hell *is* a CoreMemError? jrs 2005-May-18 */
1434 VG_(message)(Vg_UserMsg, "%s%s contains %s byte(s)%s",
1435 xpre, VG_(get_error_string)(err), s, xpost);
1436
njn9e63cb62005-05-08 18:34:59 +00001437 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1438 break;
1439
1440 }
1441
1442 case ValueErr:
1443 if (err_extra->size == 0) {
sewardj71bc3cb2005-05-19 00:25:45 +00001444 if (VG_(clo_xml))
1445 VG_(message)(Vg_UserMsg, " <kind>UninitCondition</kind>");
1446 VG_(message)(Vg_UserMsg, "%sConditional jump or move depends"
1447 " on uninitialised value(s)%s",
1448 xpre, xpost);
njn9e63cb62005-05-08 18:34:59 +00001449 } else {
sewardj71bc3cb2005-05-19 00:25:45 +00001450 if (VG_(clo_xml))
1451 VG_(message)(Vg_UserMsg, " <kind>UninitValue</kind>");
1452 VG_(message)(Vg_UserMsg,
1453 "%sUse of uninitialised value of size %d%s",
1454 xpre, err_extra->size, xpost);
njn9e63cb62005-05-08 18:34:59 +00001455 }
1456 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1457 break;
1458
1459 case ParamErr: {
1460 Bool isReg = ( Register == err_extra->addrinfo.akind );
1461 Char* s1 = ( isReg ? "contains" : "points to" );
1462 Char* s2 = ( err_extra->isUnaddr ? "unaddressable" : "uninitialised" );
1463 if (isReg) tl_assert(!err_extra->isUnaddr);
1464
sewardj71bc3cb2005-05-19 00:25:45 +00001465 if (VG_(clo_xml))
1466 VG_(message)(Vg_UserMsg, " <kind>SyscallParam</kind>");
1467 VG_(message)(Vg_UserMsg, "%sSyscall param %s %s %s byte(s)%s",
1468 xpre, VG_(get_error_string)(err), s1, s2, xpost);
njn9e63cb62005-05-08 18:34:59 +00001469
1470 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1471 MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
1472 break;
1473 }
1474 case UserErr: {
1475 Char* s = ( err_extra->isUnaddr ? "Unaddressable" : "Uninitialised" );
1476
sewardj71bc3cb2005-05-19 00:25:45 +00001477 if (VG_(clo_xml))
1478 VG_(message)(Vg_UserMsg, " <kind>ClientCheck</kind>");
njn9e63cb62005-05-08 18:34:59 +00001479 VG_(message)(Vg_UserMsg,
sewardj71bc3cb2005-05-19 00:25:45 +00001480 "%s%s byte(s) found during client check request%s",
1481 xpre, s, xpost);
njn9e63cb62005-05-08 18:34:59 +00001482
1483 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1484 MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
1485 break;
1486 }
1487 default:
1488 MAC_(pp_shared_Error)(err);
1489 break;
1490 }
1491}
1492
1493/*------------------------------------------------------------*/
1494/*--- Recording errors ---*/
1495/*------------------------------------------------------------*/
1496
njn02bc4b82005-05-15 17:28:26 +00001497/* Creates a copy of the 'extra' part, updates the copy with address info if
njn9e63cb62005-05-08 18:34:59 +00001498 necessary, and returns the copy. */
1499/* This one called from generated code and non-generated code. */
njn96364822005-05-08 19:04:53 +00001500static void mc_record_value_error ( ThreadId tid, Int size )
njn9e63cb62005-05-08 18:34:59 +00001501{
1502 MAC_Error err_extra;
1503
1504 MAC_(clear_MAC_Error)( &err_extra );
1505 err_extra.size = size;
1506 err_extra.isUnaddr = False;
1507 VG_(maybe_record_error)( tid, ValueErr, /*addr*/0, /*s*/NULL, &err_extra );
1508}
1509
1510/* This called from non-generated code */
1511
njn96364822005-05-08 19:04:53 +00001512static void mc_record_user_error ( ThreadId tid, Addr a, Bool isWrite,
1513 Bool isUnaddr )
njn9e63cb62005-05-08 18:34:59 +00001514{
1515 MAC_Error err_extra;
1516
1517 tl_assert(VG_INVALID_THREADID != tid);
1518 MAC_(clear_MAC_Error)( &err_extra );
1519 err_extra.addrinfo.akind = Undescribed;
1520 err_extra.isUnaddr = isUnaddr;
1521 VG_(maybe_record_error)( tid, UserErr, a, /*s*/NULL, &err_extra );
1522}
1523
1524/*------------------------------------------------------------*/
1525/*--- Suppressions ---*/
1526/*------------------------------------------------------------*/
1527
njn51d827b2005-05-09 01:02:08 +00001528static Bool mc_recognised_suppression ( Char* name, Supp* su )
njn9e63cb62005-05-08 18:34:59 +00001529{
1530 SuppKind skind;
1531
1532 if (MAC_(shared_recognised_suppression)(name, su))
1533 return True;
1534
1535 /* Extra suppressions not used by Addrcheck */
1536 else if (VG_STREQ(name, "Cond")) skind = Value0Supp;
1537 else if (VG_STREQ(name, "Value0")) skind = Value0Supp;/* backwards compat */
1538 else if (VG_STREQ(name, "Value1")) skind = Value1Supp;
1539 else if (VG_STREQ(name, "Value2")) skind = Value2Supp;
1540 else if (VG_STREQ(name, "Value4")) skind = Value4Supp;
1541 else if (VG_STREQ(name, "Value8")) skind = Value8Supp;
1542 else if (VG_STREQ(name, "Value16")) skind = Value16Supp;
1543 else
1544 return False;
1545
1546 VG_(set_supp_kind)(su, skind);
1547 return True;
1548}
1549
1550/*------------------------------------------------------------*/
sewardjc859fbf2005-04-22 21:10:28 +00001551/*--- Functions called directly from generated code: ---*/
1552/*--- Load/store handlers. ---*/
sewardj6cf40ff2005-04-20 22:31:26 +00001553/*------------------------------------------------------------*/
1554
1555/* Types: LOADV4, LOADV2, LOADV1 are:
1556 UWord fn ( Addr a )
1557 so they return 32-bits on 32-bit machines and 64-bits on
1558 64-bit machines. Addr has the same size as a host word.
1559
1560 LOADV8 is always ULong fn ( Addr a )
1561
1562 Similarly for STOREV1, STOREV2, STOREV4, the supplied vbits
1563 are a UWord, and for STOREV8 they are a ULong.
1564*/
1565
sewardj95448072004-11-22 20:19:51 +00001566/* ------------------------ Size = 8 ------------------------ */
1567
sewardj8cf88b72005-07-08 01:29:33 +00001568#define MAKE_LOADV8(nAME,iS_BIGENDIAN) \
1569 \
1570 VG_REGPARM(1) \
1571 ULong nAME ( Addr aA ) \
1572 { \
sewardjae986ca2005-10-12 12:53:20 +00001573 UWord mask, a, sec_no, v_off, a_off, abits; \
1574 SecMap* sm; \
1575 \
sewardj8cf88b72005-07-08 01:29:33 +00001576 PROF_EVENT(200, #nAME); \
1577 \
1578 if (VG_DEBUG_MEMORY >= 2) \
1579 return mc_LOADVn_slow( aA, 8, iS_BIGENDIAN ); \
1580 \
sewardjae986ca2005-10-12 12:53:20 +00001581 mask = ~((0x10000-8) | ((N_PRIMARY_MAP-1) << 16)); \
1582 a = (UWord)aA; \
sewardj8cf88b72005-07-08 01:29:33 +00001583 \
1584 /* If any part of 'a' indicated by the mask is 1, either */ \
1585 /* 'a' is not naturally aligned, or 'a' exceeds the range */ \
1586 /* covered by the primary map. Either way we defer to the */ \
1587 /* slow-path case. */ \
1588 if (EXPECTED_NOT_TAKEN(a & mask)) { \
1589 PROF_EVENT(201, #nAME"-slow1"); \
sewardj78947932006-01-05 14:09:46 +00001590 return (ULong)mc_LOADVn_slow( aA, 8, iS_BIGENDIAN ); \
sewardj8cf88b72005-07-08 01:29:33 +00001591 } \
1592 \
sewardjae986ca2005-10-12 12:53:20 +00001593 sec_no = (UWord)(a >> 16); \
sewardj8cf88b72005-07-08 01:29:33 +00001594 \
1595 if (VG_DEBUG_MEMORY >= 1) \
1596 tl_assert(sec_no < N_PRIMARY_MAP); \
1597 \
sewardjae986ca2005-10-12 12:53:20 +00001598 sm = primary_map[sec_no]; \
1599 v_off = a & 0xFFFF; \
1600 a_off = v_off >> 3; \
1601 abits = (UWord)(sm->abits[a_off]); \
sewardj8cf88b72005-07-08 01:29:33 +00001602 \
1603 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) { \
1604 /* Handle common case quickly: a is suitably aligned, */ \
1605 /* is mapped, and is addressible. */ \
1606 return ((ULong*)(sm->vbyte))[ v_off >> 3 ]; \
1607 } else { \
1608 /* Slow but general case. */ \
1609 PROF_EVENT(202, #nAME"-slow2"); \
1610 return mc_LOADVn_slow( a, 8, iS_BIGENDIAN ); \
1611 } \
sewardjf9d81612005-04-23 23:25:49 +00001612 }
1613
sewardj8cf88b72005-07-08 01:29:33 +00001614MAKE_LOADV8( MC_(helperc_LOADV8be), True /*bigendian*/ );
1615MAKE_LOADV8( MC_(helperc_LOADV8le), False/*littleendian*/ );
sewardjf9d81612005-04-23 23:25:49 +00001616
sewardjf9d81612005-04-23 23:25:49 +00001617
sewardj8cf88b72005-07-08 01:29:33 +00001618#define MAKE_STOREV8(nAME,iS_BIGENDIAN) \
1619 \
1620 VG_REGPARM(1) \
1621 void nAME ( Addr aA, ULong vbytes ) \
1622 { \
sewardjae986ca2005-10-12 12:53:20 +00001623 UWord mask, a, sec_no, v_off, a_off, abits; \
1624 SecMap* sm; \
1625 \
sewardj8cf88b72005-07-08 01:29:33 +00001626 PROF_EVENT(210, #nAME); \
1627 \
1628 if (VG_DEBUG_MEMORY >= 2) \
1629 mc_STOREVn_slow( aA, 8, vbytes, iS_BIGENDIAN ); \
1630 \
sewardjae986ca2005-10-12 12:53:20 +00001631 mask = ~((0x10000-8) | ((N_PRIMARY_MAP-1) << 16)); \
1632 a = (UWord)aA; \
sewardj8cf88b72005-07-08 01:29:33 +00001633 \
1634 /* If any part of 'a' indicated by the mask is 1, either */ \
1635 /* 'a' is not naturally aligned, or 'a' exceeds the range */ \
1636 /* covered by the primary map. Either way we defer to the */ \
1637 /* slow-path case. */ \
1638 if (EXPECTED_NOT_TAKEN(a & mask)) { \
1639 PROF_EVENT(211, #nAME"-slow1"); \
1640 mc_STOREVn_slow( aA, 8, vbytes, iS_BIGENDIAN ); \
1641 return; \
1642 } \
1643 \
sewardjae986ca2005-10-12 12:53:20 +00001644 sec_no = (UWord)(a >> 16); \
sewardj8cf88b72005-07-08 01:29:33 +00001645 \
1646 if (VG_DEBUG_MEMORY >= 1) \
1647 tl_assert(sec_no < N_PRIMARY_MAP); \
1648 \
sewardjae986ca2005-10-12 12:53:20 +00001649 sm = primary_map[sec_no]; \
1650 v_off = a & 0xFFFF; \
1651 a_off = v_off >> 3; \
1652 abits = (UWord)(sm->abits[a_off]); \
sewardj8cf88b72005-07-08 01:29:33 +00001653 \
1654 if (EXPECTED_TAKEN(!is_distinguished_sm(sm) \
1655 && abits == VGM_BYTE_VALID)) { \
1656 /* Handle common case quickly: a is suitably aligned, */ \
1657 /* is mapped, and is addressible. */ \
1658 ((ULong*)(sm->vbyte))[ v_off >> 3 ] = vbytes; \
1659 } else { \
1660 /* Slow but general case. */ \
1661 PROF_EVENT(212, #nAME"-slow2"); \
1662 mc_STOREVn_slow( aA, 8, vbytes, iS_BIGENDIAN ); \
1663 } \
sewardjf9d81612005-04-23 23:25:49 +00001664 }
1665
sewardj8cf88b72005-07-08 01:29:33 +00001666MAKE_STOREV8( MC_(helperc_STOREV8be), True /*bigendian*/ );
1667MAKE_STOREV8( MC_(helperc_STOREV8le), False/*littleendian*/ );
sewardj95448072004-11-22 20:19:51 +00001668
sewardj95448072004-11-22 20:19:51 +00001669
1670/* ------------------------ Size = 4 ------------------------ */
1671
sewardj8cf88b72005-07-08 01:29:33 +00001672#define MAKE_LOADV4(nAME,iS_BIGENDIAN) \
1673 \
1674 VG_REGPARM(1) \
1675 UWord nAME ( Addr aA ) \
1676 { \
sewardjae986ca2005-10-12 12:53:20 +00001677 UWord mask, a, sec_no, v_off, a_off, abits; \
1678 SecMap* sm; \
1679 \
sewardj8cf88b72005-07-08 01:29:33 +00001680 PROF_EVENT(220, #nAME); \
1681 \
1682 if (VG_DEBUG_MEMORY >= 2) \
1683 return (UWord)mc_LOADVn_slow( aA, 4, iS_BIGENDIAN ); \
1684 \
sewardjae986ca2005-10-12 12:53:20 +00001685 mask = ~((0x10000-4) | ((N_PRIMARY_MAP-1) << 16)); \
1686 a = (UWord)aA; \
sewardj8cf88b72005-07-08 01:29:33 +00001687 \
1688 /* If any part of 'a' indicated by the mask is 1, either */ \
1689 /* 'a' is not naturally aligned, or 'a' exceeds the range */ \
1690 /* covered by the primary map. Either way we defer to the */ \
1691 /* slow-path case. */ \
1692 if (EXPECTED_NOT_TAKEN(a & mask)) { \
1693 PROF_EVENT(221, #nAME"-slow1"); \
1694 return (UWord)mc_LOADVn_slow( aA, 4, iS_BIGENDIAN ); \
1695 } \
1696 \
sewardjae986ca2005-10-12 12:53:20 +00001697 sec_no = (UWord)(a >> 16); \
sewardj8cf88b72005-07-08 01:29:33 +00001698 \
1699 if (VG_DEBUG_MEMORY >= 1) \
1700 tl_assert(sec_no < N_PRIMARY_MAP); \
1701 \
sewardjae986ca2005-10-12 12:53:20 +00001702 sm = primary_map[sec_no]; \
1703 v_off = a & 0xFFFF; \
1704 a_off = v_off >> 3; \
1705 abits = (UWord)(sm->abits[a_off]); \
sewardj8cf88b72005-07-08 01:29:33 +00001706 abits >>= (a & 4); \
1707 abits &= 15; \
1708 if (EXPECTED_TAKEN(abits == VGM_NIBBLE_VALID)) { \
1709 /* Handle common case quickly: a is suitably aligned, */ \
1710 /* is mapped, and is addressible. */ \
1711 /* On a 32-bit platform, simply hoick the required 32 */ \
1712 /* bits out of the vbyte array. On a 64-bit platform, */ \
1713 /* also set the upper 32 bits to 1 ("undefined"), just */ \
1714 /* in case. This almost certainly isn't necessary, */ \
1715 /* but be paranoid. */ \
1716 UWord ret = (UWord)0xFFFFFFFF00000000ULL; \
1717 ret |= (UWord)( ((UInt*)(sm->vbyte))[ v_off >> 2 ] ); \
1718 return ret; \
1719 } else { \
1720 /* Slow but general case. */ \
1721 PROF_EVENT(222, #nAME"-slow2"); \
1722 return (UWord)mc_LOADVn_slow( a, 4, iS_BIGENDIAN ); \
1723 } \
sewardjc1a2cda2005-04-21 17:34:00 +00001724 }
1725
sewardj8cf88b72005-07-08 01:29:33 +00001726MAKE_LOADV4( MC_(helperc_LOADV4be), True /*bigendian*/ );
1727MAKE_LOADV4( MC_(helperc_LOADV4le), False/*littleendian*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001728
sewardjc1a2cda2005-04-21 17:34:00 +00001729
sewardj8cf88b72005-07-08 01:29:33 +00001730#define MAKE_STOREV4(nAME,iS_BIGENDIAN) \
1731 \
1732 VG_REGPARM(2) \
1733 void nAME ( Addr aA, UWord vbytes ) \
1734 { \
sewardjae986ca2005-10-12 12:53:20 +00001735 UWord mask, a, sec_no, v_off, a_off, abits; \
1736 SecMap* sm; \
1737 \
sewardj8cf88b72005-07-08 01:29:33 +00001738 PROF_EVENT(230, #nAME); \
1739 \
1740 if (VG_DEBUG_MEMORY >= 2) \
1741 mc_STOREVn_slow( aA, 4, (ULong)vbytes, iS_BIGENDIAN ); \
1742 \
sewardjae986ca2005-10-12 12:53:20 +00001743 mask = ~((0x10000-4) | ((N_PRIMARY_MAP-1) << 16)); \
1744 a = (UWord)aA; \
sewardj8cf88b72005-07-08 01:29:33 +00001745 \
1746 /* If any part of 'a' indicated by the mask is 1, either */ \
1747 /* 'a' is not naturally aligned, or 'a' exceeds the range */ \
1748 /* covered by the primary map. Either way we defer to the */ \
1749 /* slow-path case. */ \
1750 if (EXPECTED_NOT_TAKEN(a & mask)) { \
1751 PROF_EVENT(231, #nAME"-slow1"); \
1752 mc_STOREVn_slow( aA, 4, (ULong)vbytes, iS_BIGENDIAN ); \
1753 return; \
1754 } \
1755 \
sewardjae986ca2005-10-12 12:53:20 +00001756 sec_no = (UWord)(a >> 16); \
sewardj8cf88b72005-07-08 01:29:33 +00001757 \
1758 if (VG_DEBUG_MEMORY >= 1) \
1759 tl_assert(sec_no < N_PRIMARY_MAP); \
1760 \
sewardjae986ca2005-10-12 12:53:20 +00001761 sm = primary_map[sec_no]; \
1762 v_off = a & 0xFFFF; \
1763 a_off = v_off >> 3; \
1764 abits = (UWord)(sm->abits[a_off]); \
sewardj8cf88b72005-07-08 01:29:33 +00001765 abits >>= (a & 4); \
1766 abits &= 15; \
1767 if (EXPECTED_TAKEN(!is_distinguished_sm(sm) \
1768 && abits == VGM_NIBBLE_VALID)) { \
1769 /* Handle common case quickly: a is suitably aligned, */ \
1770 /* is mapped, and is addressible. */ \
1771 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = (UInt)vbytes; \
1772 } else { \
1773 /* Slow but general case. */ \
1774 PROF_EVENT(232, #nAME"-slow2"); \
1775 mc_STOREVn_slow( aA, 4, (ULong)vbytes, iS_BIGENDIAN ); \
1776 } \
sewardjc1a2cda2005-04-21 17:34:00 +00001777 }
1778
sewardj8cf88b72005-07-08 01:29:33 +00001779MAKE_STOREV4( MC_(helperc_STOREV4be), True /*bigendian*/ );
1780MAKE_STOREV4( MC_(helperc_STOREV4le), False/*littleendian*/ );
njn25e49d8e72002-09-23 09:36:25 +00001781
njn25e49d8e72002-09-23 09:36:25 +00001782
sewardj95448072004-11-22 20:19:51 +00001783/* ------------------------ Size = 2 ------------------------ */
1784
sewardj8cf88b72005-07-08 01:29:33 +00001785#define MAKE_LOADV2(nAME,iS_BIGENDIAN) \
1786 \
1787 VG_REGPARM(1) \
1788 UWord nAME ( Addr aA ) \
1789 { \
sewardjae986ca2005-10-12 12:53:20 +00001790 UWord mask, a, sec_no, v_off, a_off, abits; \
1791 SecMap* sm; \
1792 \
sewardj8cf88b72005-07-08 01:29:33 +00001793 PROF_EVENT(240, #nAME); \
1794 \
1795 if (VG_DEBUG_MEMORY >= 2) \
1796 return (UWord)mc_LOADVn_slow( aA, 2, iS_BIGENDIAN ); \
1797 \
sewardjae986ca2005-10-12 12:53:20 +00001798 mask = ~((0x10000-2) | ((N_PRIMARY_MAP-1) << 16)); \
1799 a = (UWord)aA; \
sewardj8cf88b72005-07-08 01:29:33 +00001800 \
1801 /* If any part of 'a' indicated by the mask is 1, either */ \
1802 /* 'a' is not naturally aligned, or 'a' exceeds the range */ \
1803 /* covered by the primary map. Either way we defer to the */ \
1804 /* slow-path case. */ \
1805 if (EXPECTED_NOT_TAKEN(a & mask)) { \
1806 PROF_EVENT(241, #nAME"-slow1"); \
1807 return (UWord)mc_LOADVn_slow( aA, 2, iS_BIGENDIAN ); \
1808 } \
1809 \
sewardjae986ca2005-10-12 12:53:20 +00001810 sec_no = (UWord)(a >> 16); \
sewardj8cf88b72005-07-08 01:29:33 +00001811 \
1812 if (VG_DEBUG_MEMORY >= 1) \
1813 tl_assert(sec_no < N_PRIMARY_MAP); \
1814 \
sewardjae986ca2005-10-12 12:53:20 +00001815 sm = primary_map[sec_no]; \
1816 v_off = a & 0xFFFF; \
1817 a_off = v_off >> 3; \
1818 abits = (UWord)(sm->abits[a_off]); \
sewardj8cf88b72005-07-08 01:29:33 +00001819 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) { \
1820 /* Handle common case quickly: a is mapped, and the */ \
1821 /* entire word32 it lives in is addressible. */ \
1822 /* Set the upper 16/48 bits of the result to 1 */ \
1823 /* ("undefined"), just in case. This almost certainly */ \
1824 /* isn't necessary, but be paranoid. */ \
1825 return (~(UWord)0xFFFF) \
1826 | \
1827 (UWord)( ((UShort*)(sm->vbyte))[ v_off >> 1 ] ); \
1828 } else { \
1829 /* Slow but general case. */ \
1830 PROF_EVENT(242, #nAME"-slow2"); \
1831 return (UWord)mc_LOADVn_slow( aA, 2, iS_BIGENDIAN ); \
1832 } \
sewardjc1a2cda2005-04-21 17:34:00 +00001833 }
1834
sewardj8cf88b72005-07-08 01:29:33 +00001835MAKE_LOADV2( MC_(helperc_LOADV2be), True /*bigendian*/ );
1836MAKE_LOADV2( MC_(helperc_LOADV2le), False/*littleendian*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001837
sewardjc1a2cda2005-04-21 17:34:00 +00001838
sewardj8cf88b72005-07-08 01:29:33 +00001839#define MAKE_STOREV2(nAME,iS_BIGENDIAN) \
1840 \
1841 VG_REGPARM(2) \
1842 void nAME ( Addr aA, UWord vbytes ) \
1843 { \
sewardjae986ca2005-10-12 12:53:20 +00001844 UWord mask, a, sec_no, v_off, a_off, abits; \
1845 SecMap* sm; \
1846 \
sewardj8cf88b72005-07-08 01:29:33 +00001847 PROF_EVENT(250, #nAME); \
1848 \
1849 if (VG_DEBUG_MEMORY >= 2) \
1850 mc_STOREVn_slow( aA, 2, (ULong)vbytes, iS_BIGENDIAN ); \
1851 \
sewardjae986ca2005-10-12 12:53:20 +00001852 mask = ~((0x10000-2) | ((N_PRIMARY_MAP-1) << 16)); \
1853 a = (UWord)aA; \
sewardj8cf88b72005-07-08 01:29:33 +00001854 \
1855 /* If any part of 'a' indicated by the mask is 1, either */ \
1856 /* 'a' is not naturally aligned, or 'a' exceeds the range */ \
1857 /* covered by the primary map. Either way we defer to the */ \
1858 /* slow-path case. */ \
1859 if (EXPECTED_NOT_TAKEN(a & mask)) { \
1860 PROF_EVENT(251, #nAME"-slow1"); \
1861 mc_STOREVn_slow( aA, 2, (ULong)vbytes, iS_BIGENDIAN ); \
1862 return; \
1863 } \
1864 \
sewardjae986ca2005-10-12 12:53:20 +00001865 sec_no = (UWord)(a >> 16); \
sewardj8cf88b72005-07-08 01:29:33 +00001866 \
1867 if (VG_DEBUG_MEMORY >= 1) \
1868 tl_assert(sec_no < N_PRIMARY_MAP); \
1869 \
sewardjae986ca2005-10-12 12:53:20 +00001870 sm = primary_map[sec_no]; \
1871 v_off = a & 0xFFFF; \
1872 a_off = v_off >> 3; \
1873 abits = (UWord)(sm->abits[a_off]); \
sewardj8cf88b72005-07-08 01:29:33 +00001874 if (EXPECTED_TAKEN(!is_distinguished_sm(sm) \
1875 && abits == VGM_BYTE_VALID)) { \
1876 /* Handle common case quickly. */ \
1877 ((UShort*)(sm->vbyte))[ v_off >> 1 ] = (UShort)vbytes; \
1878 } else { \
1879 /* Slow but general case. */ \
1880 PROF_EVENT(252, #nAME"-slow2"); \
1881 mc_STOREVn_slow( aA, 2, (ULong)vbytes, iS_BIGENDIAN ); \
1882 } \
sewardjc1a2cda2005-04-21 17:34:00 +00001883 }
1884
njn25e49d8e72002-09-23 09:36:25 +00001885
sewardj8cf88b72005-07-08 01:29:33 +00001886MAKE_STOREV2( MC_(helperc_STOREV2be), True /*bigendian*/ );
1887MAKE_STOREV2( MC_(helperc_STOREV2le), False/*littleendian*/ );
sewardj5d28efc2005-04-21 22:16:29 +00001888
njn25e49d8e72002-09-23 09:36:25 +00001889
sewardj95448072004-11-22 20:19:51 +00001890/* ------------------------ Size = 1 ------------------------ */
sewardj8cf88b72005-07-08 01:29:33 +00001891/* Note: endianness is irrelevant for size == 1 */
sewardj95448072004-11-22 20:19:51 +00001892
njnaf839f52005-06-23 03:27:57 +00001893VG_REGPARM(1)
sewardj8cf88b72005-07-08 01:29:33 +00001894UWord MC_(helperc_LOADV1) ( Addr aA )
njn25e49d8e72002-09-23 09:36:25 +00001895{
sewardjae986ca2005-10-12 12:53:20 +00001896 UWord mask, a, sec_no, v_off, a_off, abits;
1897 SecMap* sm;
1898
sewardj8cf88b72005-07-08 01:29:33 +00001899 PROF_EVENT(260, "helperc_LOADV1");
sewardjc1a2cda2005-04-21 17:34:00 +00001900
1901# if VG_DEBUG_MEMORY >= 2
sewardj8cf88b72005-07-08 01:29:33 +00001902 return (UWord)mc_LOADVn_slow( aA, 1, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001903# else
1904
sewardjae986ca2005-10-12 12:53:20 +00001905 mask = ~((0x10000-1) | ((N_PRIMARY_MAP-1) << 16));
1906 a = (UWord)aA;
sewardjc1a2cda2005-04-21 17:34:00 +00001907
1908 /* If any part of 'a' indicated by the mask is 1, it means 'a'
1909 exceeds the range covered by the primary map. In which case we
1910 defer to the slow-path case. */
1911 if (EXPECTED_NOT_TAKEN(a & mask)) {
sewardj8cf88b72005-07-08 01:29:33 +00001912 PROF_EVENT(261, "helperc_LOADV1-slow1");
1913 return (UWord)mc_LOADVn_slow( aA, 1, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001914 }
1915
sewardjae986ca2005-10-12 12:53:20 +00001916 sec_no = (UWord)(a >> 16);
sewardjc1a2cda2005-04-21 17:34:00 +00001917
1918# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001919 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001920# endif
1921
sewardjae986ca2005-10-12 12:53:20 +00001922 sm = primary_map[sec_no];
1923 v_off = a & 0xFFFF;
1924 a_off = v_off >> 3;
1925 abits = (UWord)(sm->abits[a_off]);
sewardjc1a2cda2005-04-21 17:34:00 +00001926 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) {
1927 /* Handle common case quickly: a is mapped, and the entire
1928 word32 it lives in is addressible. */
sewardj5d28efc2005-04-21 22:16:29 +00001929 /* Set the upper 24/56 bits of the result to 1 ("undefined"),
1930 just in case. This almost certainly isn't necessary, but be
1931 paranoid. */
sewardjc1a2cda2005-04-21 17:34:00 +00001932 return (~(UWord)0xFF)
1933 |
1934 (UWord)( ((UChar*)(sm->vbyte))[ v_off ] );
1935 } else {
1936 /* Slow but general case. */
sewardj8cf88b72005-07-08 01:29:33 +00001937 PROF_EVENT(262, "helperc_LOADV1-slow2");
1938 return (UWord)mc_LOADVn_slow( aA, 1, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001939 }
1940# endif
njn25e49d8e72002-09-23 09:36:25 +00001941}
1942
sewardjc1a2cda2005-04-21 17:34:00 +00001943
njnaf839f52005-06-23 03:27:57 +00001944VG_REGPARM(2)
sewardj8cf88b72005-07-08 01:29:33 +00001945void MC_(helperc_STOREV1) ( Addr aA, UWord vbyte )
njn25e49d8e72002-09-23 09:36:25 +00001946{
sewardjae986ca2005-10-12 12:53:20 +00001947 UWord mask, a, sec_no, v_off, a_off, abits;
1948 SecMap* sm;
1949
sewardj8cf88b72005-07-08 01:29:33 +00001950 PROF_EVENT(270, "helperc_STOREV1");
sewardjc1a2cda2005-04-21 17:34:00 +00001951
1952# if VG_DEBUG_MEMORY >= 2
sewardj8cf88b72005-07-08 01:29:33 +00001953 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001954# else
1955
sewardjae986ca2005-10-12 12:53:20 +00001956 mask = ~((0x10000-1) | ((N_PRIMARY_MAP-1) << 16));
1957 a = (UWord)aA;
sewardjc1a2cda2005-04-21 17:34:00 +00001958 /* If any part of 'a' indicated by the mask is 1, it means 'a'
1959 exceeds the range covered by the primary map. In which case we
1960 defer to the slow-path case. */
1961 if (EXPECTED_NOT_TAKEN(a & mask)) {
sewardj8cf88b72005-07-08 01:29:33 +00001962 PROF_EVENT(271, "helperc_STOREV1-slow1");
1963 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001964 return;
1965 }
1966
sewardjae986ca2005-10-12 12:53:20 +00001967 sec_no = (UWord)(a >> 16);
sewardjc1a2cda2005-04-21 17:34:00 +00001968
1969# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001970 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001971# endif
1972
sewardjae986ca2005-10-12 12:53:20 +00001973 sm = primary_map[sec_no];
1974 v_off = a & 0xFFFF;
1975 a_off = v_off >> 3;
1976 abits = (UWord)(sm->abits[a_off]);
sewardjc1a2cda2005-04-21 17:34:00 +00001977 if (EXPECTED_TAKEN(!is_distinguished_sm(sm)
1978 && abits == VGM_BYTE_VALID)) {
1979 /* Handle common case quickly: a is mapped, the entire word32 it
1980 lives in is addressible. */
1981 ((UChar*)(sm->vbyte))[ v_off ] = (UChar)vbyte;
1982 } else {
sewardj8cf88b72005-07-08 01:29:33 +00001983 PROF_EVENT(272, "helperc_STOREV1-slow2");
1984 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001985 }
1986
1987# endif
njn25e49d8e72002-09-23 09:36:25 +00001988}
1989
1990
sewardjc859fbf2005-04-22 21:10:28 +00001991/*------------------------------------------------------------*/
1992/*--- Functions called directly from generated code: ---*/
1993/*--- Value-check failure handlers. ---*/
1994/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001995
njn5c004e42002-11-18 11:04:50 +00001996void MC_(helperc_value_check0_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001997{
njn9e63cb62005-05-08 18:34:59 +00001998 mc_record_value_error ( VG_(get_running_tid)(), 0 );
njn25e49d8e72002-09-23 09:36:25 +00001999}
2000
njn5c004e42002-11-18 11:04:50 +00002001void MC_(helperc_value_check1_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00002002{
njn9e63cb62005-05-08 18:34:59 +00002003 mc_record_value_error ( VG_(get_running_tid)(), 1 );
njn25e49d8e72002-09-23 09:36:25 +00002004}
2005
njn5c004e42002-11-18 11:04:50 +00002006void MC_(helperc_value_check4_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00002007{
njn9e63cb62005-05-08 18:34:59 +00002008 mc_record_value_error ( VG_(get_running_tid)(), 4 );
njn25e49d8e72002-09-23 09:36:25 +00002009}
2010
sewardj11bcc4e2005-04-23 22:38:38 +00002011void MC_(helperc_value_check8_fail) ( void )
2012{
njn9e63cb62005-05-08 18:34:59 +00002013 mc_record_value_error ( VG_(get_running_tid)(), 8 );
sewardj11bcc4e2005-04-23 22:38:38 +00002014}
2015
njnaf839f52005-06-23 03:27:57 +00002016VG_REGPARM(1) void MC_(helperc_complain_undef) ( HWord sz )
sewardj95448072004-11-22 20:19:51 +00002017{
njn9e63cb62005-05-08 18:34:59 +00002018 mc_record_value_error ( VG_(get_running_tid)(), (Int)sz );
sewardj95448072004-11-22 20:19:51 +00002019}
2020
njn25e49d8e72002-09-23 09:36:25 +00002021
sewardj45d94cc2005-04-20 14:44:11 +00002022//zz /*------------------------------------------------------------*/
2023//zz /*--- Metadata get/set functions, for client requests. ---*/
2024//zz /*------------------------------------------------------------*/
2025//zz
2026//zz /* Copy Vbits for src into vbits. Returns: 1 == OK, 2 == alignment
2027//zz error, 3 == addressing error. */
2028//zz static Int mc_get_or_set_vbits_for_client (
2029//zz ThreadId tid,
2030//zz Addr dataV,
2031//zz Addr vbitsV,
2032//zz SizeT size,
2033//zz Bool setting /* True <=> set vbits, False <=> get vbits */
2034//zz )
2035//zz {
2036//zz Bool addressibleD = True;
2037//zz Bool addressibleV = True;
2038//zz UInt* data = (UInt*)dataV;
2039//zz UInt* vbits = (UInt*)vbitsV;
2040//zz SizeT szW = size / 4; /* sigh */
2041//zz SizeT i;
2042//zz UInt* dataP = NULL; /* bogus init to keep gcc happy */
2043//zz UInt* vbitsP = NULL; /* ditto */
2044//zz
2045//zz /* Check alignment of args. */
2046//zz if (!(VG_IS_4_ALIGNED(data) && VG_IS_4_ALIGNED(vbits)))
2047//zz return 2;
2048//zz if ((size & 3) != 0)
2049//zz return 2;
2050//zz
2051//zz /* Check that arrays are addressible. */
2052//zz for (i = 0; i < szW; i++) {
2053//zz dataP = &data[i];
2054//zz vbitsP = &vbits[i];
2055//zz if (get_abits4_ALIGNED((Addr)dataP) != VGM_NIBBLE_VALID) {
2056//zz addressibleD = False;
2057//zz break;
2058//zz }
2059//zz if (get_abits4_ALIGNED((Addr)vbitsP) != VGM_NIBBLE_VALID) {
2060//zz addressibleV = False;
2061//zz break;
2062//zz }
2063//zz }
2064//zz if (!addressibleD) {
2065//zz MAC_(record_address_error)( tid, (Addr)dataP, 4,
2066//zz setting ? True : False );
2067//zz return 3;
2068//zz }
2069//zz if (!addressibleV) {
2070//zz MAC_(record_address_error)( tid, (Addr)vbitsP, 4,
2071//zz setting ? False : True );
2072//zz return 3;
2073//zz }
2074//zz
2075//zz /* Do the copy */
2076//zz if (setting) {
2077//zz /* setting */
2078//zz for (i = 0; i < szW; i++) {
2079//zz if (get_vbytes4_ALIGNED( (Addr)&vbits[i] ) != VGM_WORD_VALID)
njn9e63cb62005-05-08 18:34:59 +00002080//zz mc_record_value_error(tid, 4);
sewardj45d94cc2005-04-20 14:44:11 +00002081//zz set_vbytes4_ALIGNED( (Addr)&data[i], vbits[i] );
2082//zz }
2083//zz } else {
2084//zz /* getting */
2085//zz for (i = 0; i < szW; i++) {
2086//zz vbits[i] = get_vbytes4_ALIGNED( (Addr)&data[i] );
2087//zz set_vbytes4_ALIGNED( (Addr)&vbits[i], VGM_WORD_VALID );
2088//zz }
2089//zz }
2090//zz
2091//zz return 1;
2092//zz }
sewardj05fe85e2005-04-27 22:46:36 +00002093
2094
2095/*------------------------------------------------------------*/
2096/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
2097/*------------------------------------------------------------*/
2098
2099/* For the memory leak detector, say whether an entire 64k chunk of
2100 address space is possibly in use, or not. If in doubt return
2101 True.
2102*/
2103static
2104Bool mc_is_within_valid_secondary ( Addr a )
2105{
2106 SecMap* sm = maybe_get_secmap_for ( a );
2107 if (sm == NULL || sm == &sm_distinguished[SM_DIST_NOACCESS]) {
2108 /* Definitely not in use. */
2109 return False;
2110 } else {
2111 return True;
2112 }
2113}
2114
2115
2116/* For the memory leak detector, say whether or not a given word
2117 address is to be regarded as valid. */
2118static
2119Bool mc_is_valid_aligned_word ( Addr a )
2120{
2121 tl_assert(sizeof(UWord) == 4 || sizeof(UWord) == 8);
2122 if (sizeof(UWord) == 4) {
2123 tl_assert(VG_IS_4_ALIGNED(a));
2124 } else {
2125 tl_assert(VG_IS_8_ALIGNED(a));
2126 }
2127 if (mc_check_readable( a, sizeof(UWord), NULL ) == MC_Ok) {
2128 return True;
2129 } else {
2130 return False;
2131 }
2132}
sewardja4495682002-10-21 07:29:59 +00002133
2134
nethercote996901a2004-08-03 13:29:09 +00002135/* Leak detector for this tool. We don't actually do anything, merely
sewardja4495682002-10-21 07:29:59 +00002136 run the generic leak detector with suitable parameters for this
nethercote996901a2004-08-03 13:29:09 +00002137 tool. */
njnb8dca862005-03-14 02:42:44 +00002138static void mc_detect_memory_leaks ( ThreadId tid, LeakCheckMode mode )
njn25e49d8e72002-09-23 09:36:25 +00002139{
sewardj05fe85e2005-04-27 22:46:36 +00002140 MAC_(do_detect_memory_leaks) (
2141 tid,
2142 mode,
2143 mc_is_within_valid_secondary,
2144 mc_is_valid_aligned_word
2145 );
njn25e49d8e72002-09-23 09:36:25 +00002146}
2147
2148
sewardjc859fbf2005-04-22 21:10:28 +00002149/*------------------------------------------------------------*/
2150/*--- Initialisation ---*/
2151/*------------------------------------------------------------*/
2152
2153static void init_shadow_memory ( void )
2154{
2155 Int i;
2156 SecMap* sm;
2157
2158 /* Build the 3 distinguished secondaries */
2159 tl_assert(VGM_BIT_INVALID == 1);
2160 tl_assert(VGM_BIT_VALID == 0);
2161 tl_assert(VGM_BYTE_INVALID == 0xFF);
2162 tl_assert(VGM_BYTE_VALID == 0);
2163
2164 /* Set A invalid, V invalid. */
2165 sm = &sm_distinguished[SM_DIST_NOACCESS];
2166 for (i = 0; i < 65536; i++)
2167 sm->vbyte[i] = VGM_BYTE_INVALID;
2168 for (i = 0; i < 8192; i++)
2169 sm->abits[i] = VGM_BYTE_INVALID;
2170
2171 /* Set A valid, V invalid. */
2172 sm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
2173 for (i = 0; i < 65536; i++)
2174 sm->vbyte[i] = VGM_BYTE_INVALID;
2175 for (i = 0; i < 8192; i++)
2176 sm->abits[i] = VGM_BYTE_VALID;
2177
2178 /* Set A valid, V valid. */
2179 sm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
2180 for (i = 0; i < 65536; i++)
2181 sm->vbyte[i] = VGM_BYTE_VALID;
2182 for (i = 0; i < 8192; i++)
2183 sm->abits[i] = VGM_BYTE_VALID;
2184
2185 /* Set up the primary map. */
2186 /* These entries gradually get overwritten as the used address
2187 space expands. */
2188 for (i = 0; i < N_PRIMARY_MAP; i++)
2189 primary_map[i] = &sm_distinguished[SM_DIST_NOACCESS];
2190
2191 /* auxmap_size = auxmap_used = 0;
2192 no ... these are statically initialised */
2193}
2194
2195
2196/*------------------------------------------------------------*/
2197/*--- Sanity check machinery (permanently engaged) ---*/
2198/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00002199
njn51d827b2005-05-09 01:02:08 +00002200static Bool mc_cheap_sanity_check ( void )
njn25e49d8e72002-09-23 09:36:25 +00002201{
jseward9800fd32004-01-04 23:08:04 +00002202 /* nothing useful we can rapidly check */
sewardj23eb2fd2005-04-22 16:29:19 +00002203 n_sanity_cheap++;
sewardjc1a2cda2005-04-21 17:34:00 +00002204 PROF_EVENT(490, "cheap_sanity_check");
jseward9800fd32004-01-04 23:08:04 +00002205 return True;
njn25e49d8e72002-09-23 09:36:25 +00002206}
2207
njn51d827b2005-05-09 01:02:08 +00002208static Bool mc_expensive_sanity_check ( void )
njn25e49d8e72002-09-23 09:36:25 +00002209{
sewardj23eb2fd2005-04-22 16:29:19 +00002210 Int i, n_secmaps_found;
sewardj45d94cc2005-04-20 14:44:11 +00002211 SecMap* sm;
sewardj23eb2fd2005-04-22 16:29:19 +00002212 Bool bad = False;
njn25e49d8e72002-09-23 09:36:25 +00002213
sewardj23eb2fd2005-04-22 16:29:19 +00002214 n_sanity_expensive++;
sewardjc1a2cda2005-04-21 17:34:00 +00002215 PROF_EVENT(491, "expensive_sanity_check");
2216
sewardj23eb2fd2005-04-22 16:29:19 +00002217 /* Check that the 3 distinguished SMs are still as they should
2218 be. */
njn25e49d8e72002-09-23 09:36:25 +00002219
sewardj45d94cc2005-04-20 14:44:11 +00002220 /* Check A invalid, V invalid. */
2221 sm = &sm_distinguished[SM_DIST_NOACCESS];
njn25e49d8e72002-09-23 09:36:25 +00002222 for (i = 0; i < 65536; i++)
sewardj45d94cc2005-04-20 14:44:11 +00002223 if (!(sm->vbyte[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002224 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002225 for (i = 0; i < 8192; i++)
2226 if (!(sm->abits[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002227 bad = True;
njn25e49d8e72002-09-23 09:36:25 +00002228
sewardj45d94cc2005-04-20 14:44:11 +00002229 /* Check A valid, V invalid. */
2230 sm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
2231 for (i = 0; i < 65536; i++)
2232 if (!(sm->vbyte[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002233 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002234 for (i = 0; i < 8192; i++)
2235 if (!(sm->abits[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002236 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002237
2238 /* Check A valid, V valid. */
2239 sm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
2240 for (i = 0; i < 65536; i++)
2241 if (!(sm->vbyte[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002242 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002243 for (i = 0; i < 8192; i++)
2244 if (!(sm->abits[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002245 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002246
sewardj23eb2fd2005-04-22 16:29:19 +00002247 if (bad) {
2248 VG_(printf)("memcheck expensive sanity: "
2249 "distinguished_secondaries have changed\n");
2250 return False;
2251 }
2252
2253 /* check nonsensical auxmap sizing */
sewardj45d94cc2005-04-20 14:44:11 +00002254 if (auxmap_used > auxmap_size)
sewardj23eb2fd2005-04-22 16:29:19 +00002255 bad = True;
2256
2257 if (bad) {
2258 VG_(printf)("memcheck expensive sanity: "
2259 "nonsensical auxmap sizing\n");
2260 return False;
2261 }
2262
2263 /* check that the number of secmaps issued matches the number that
2264 are reachable (iow, no secmap leaks) */
2265 n_secmaps_found = 0;
2266 for (i = 0; i < N_PRIMARY_MAP; i++) {
2267 if (primary_map[i] == NULL) {
2268 bad = True;
2269 } else {
2270 if (!is_distinguished_sm(primary_map[i]))
2271 n_secmaps_found++;
2272 }
2273 }
2274
2275 for (i = 0; i < auxmap_used; i++) {
2276 if (auxmap[i].sm == NULL) {
2277 bad = True;
2278 } else {
2279 if (!is_distinguished_sm(auxmap[i].sm))
2280 n_secmaps_found++;
2281 }
2282 }
2283
2284 if (n_secmaps_found != n_secmaps_issued)
2285 bad = True;
2286
2287 if (bad) {
2288 VG_(printf)("memcheck expensive sanity: "
2289 "apparent secmap leakage\n");
2290 return False;
2291 }
2292
2293 /* check that auxmap only covers address space that the primary
2294 doesn't */
2295
2296 for (i = 0; i < auxmap_used; i++)
2297 if (auxmap[i].base <= MAX_PRIMARY_ADDRESS)
2298 bad = True;
2299
2300 if (bad) {
2301 VG_(printf)("memcheck expensive sanity: "
2302 "auxmap covers wrong address space\n");
2303 return False;
2304 }
2305
2306 /* there is only one pointer to each secmap (expensive) */
njn25e49d8e72002-09-23 09:36:25 +00002307
2308 return True;
2309}
sewardj45d94cc2005-04-20 14:44:11 +00002310
njn25e49d8e72002-09-23 09:36:25 +00002311
njn25e49d8e72002-09-23 09:36:25 +00002312/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00002313/*--- Command line args ---*/
njn25e49d8e72002-09-23 09:36:25 +00002314/*------------------------------------------------------------*/
2315
njn51d827b2005-05-09 01:02:08 +00002316static Bool mc_process_cmd_line_option(Char* arg)
njn25e49d8e72002-09-23 09:36:25 +00002317{
sewardjf3418c02005-11-08 14:10:24 +00002318 return MAC_(process_common_cmd_line_option)(arg);
njn25e49d8e72002-09-23 09:36:25 +00002319}
2320
njn51d827b2005-05-09 01:02:08 +00002321static void mc_print_usage(void)
njn25e49d8e72002-09-23 09:36:25 +00002322{
njn3e884182003-04-15 13:03:23 +00002323 MAC_(print_common_usage)();
njn3e884182003-04-15 13:03:23 +00002324}
2325
njn51d827b2005-05-09 01:02:08 +00002326static void mc_print_debug_usage(void)
njn3e884182003-04-15 13:03:23 +00002327{
2328 MAC_(print_common_debug_usage)();
njn25e49d8e72002-09-23 09:36:25 +00002329}
2330
sewardjf3418c02005-11-08 14:10:24 +00002331
nethercote8b76fe52004-11-08 19:20:09 +00002332/*------------------------------------------------------------*/
2333/*--- Client requests ---*/
2334/*------------------------------------------------------------*/
2335
2336/* Client block management:
2337
2338 This is managed as an expanding array of client block descriptors.
2339 Indices of live descriptors are issued to the client, so it can ask
2340 to free them later. Therefore we cannot slide live entries down
2341 over dead ones. Instead we must use free/inuse flags and scan for
2342 an empty slot at allocation time. This in turn means allocation is
2343 relatively expensive, so we hope this does not happen too often.
nethercote8b76fe52004-11-08 19:20:09 +00002344
sewardjedc75ab2005-03-15 23:30:32 +00002345 An unused block has start == size == 0
2346*/
nethercote8b76fe52004-11-08 19:20:09 +00002347
2348typedef
2349 struct {
2350 Addr start;
2351 SizeT size;
2352 ExeContext* where;
sewardj8cf88b72005-07-08 01:29:33 +00002353 Char* desc;
nethercote8b76fe52004-11-08 19:20:09 +00002354 }
2355 CGenBlock;
2356
2357/* This subsystem is self-initialising. */
njn695c16e2005-03-27 03:40:28 +00002358static UInt cgb_size = 0;
2359static UInt cgb_used = 0;
2360static CGenBlock* cgbs = NULL;
nethercote8b76fe52004-11-08 19:20:09 +00002361
2362/* Stats for this subsystem. */
njn695c16e2005-03-27 03:40:28 +00002363static UInt cgb_used_MAX = 0; /* Max in use. */
2364static UInt cgb_allocs = 0; /* Number of allocs. */
2365static UInt cgb_discards = 0; /* Number of discards. */
2366static UInt cgb_search = 0; /* Number of searches. */
nethercote8b76fe52004-11-08 19:20:09 +00002367
2368
2369static
njn695c16e2005-03-27 03:40:28 +00002370Int alloc_client_block ( void )
nethercote8b76fe52004-11-08 19:20:09 +00002371{
2372 UInt i, sz_new;
2373 CGenBlock* cgbs_new;
2374
njn695c16e2005-03-27 03:40:28 +00002375 cgb_allocs++;
nethercote8b76fe52004-11-08 19:20:09 +00002376
njn695c16e2005-03-27 03:40:28 +00002377 for (i = 0; i < cgb_used; i++) {
2378 cgb_search++;
2379 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00002380 return i;
2381 }
2382
2383 /* Not found. Try to allocate one at the end. */
njn695c16e2005-03-27 03:40:28 +00002384 if (cgb_used < cgb_size) {
2385 cgb_used++;
2386 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00002387 }
2388
2389 /* Ok, we have to allocate a new one. */
njn695c16e2005-03-27 03:40:28 +00002390 tl_assert(cgb_used == cgb_size);
2391 sz_new = (cgbs == NULL) ? 10 : (2 * cgb_size);
nethercote8b76fe52004-11-08 19:20:09 +00002392
2393 cgbs_new = VG_(malloc)( sz_new * sizeof(CGenBlock) );
njn695c16e2005-03-27 03:40:28 +00002394 for (i = 0; i < cgb_used; i++)
2395 cgbs_new[i] = cgbs[i];
nethercote8b76fe52004-11-08 19:20:09 +00002396
njn695c16e2005-03-27 03:40:28 +00002397 if (cgbs != NULL)
2398 VG_(free)( cgbs );
2399 cgbs = cgbs_new;
nethercote8b76fe52004-11-08 19:20:09 +00002400
njn695c16e2005-03-27 03:40:28 +00002401 cgb_size = sz_new;
2402 cgb_used++;
2403 if (cgb_used > cgb_used_MAX)
2404 cgb_used_MAX = cgb_used;
2405 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00002406}
2407
2408
2409static void show_client_block_stats ( void )
2410{
2411 VG_(message)(Vg_DebugMsg,
2412 "general CBs: %d allocs, %d discards, %d maxinuse, %d search",
njn695c16e2005-03-27 03:40:28 +00002413 cgb_allocs, cgb_discards, cgb_used_MAX, cgb_search
nethercote8b76fe52004-11-08 19:20:09 +00002414 );
2415}
2416
nethercote8b76fe52004-11-08 19:20:09 +00002417static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai )
2418{
2419 UInt i;
2420 /* VG_(printf)("try to identify %d\n", a); */
2421
2422 /* Perhaps it's a general block ? */
njn695c16e2005-03-27 03:40:28 +00002423 for (i = 0; i < cgb_used; i++) {
2424 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00002425 continue;
njn717cde52005-05-10 02:47:21 +00002426 // Use zero as the redzone for client blocks.
2427 if (VG_(addr_is_in_block)(a, cgbs[i].start, cgbs[i].size, 0)) {
nethercote8b76fe52004-11-08 19:20:09 +00002428 /* OK - maybe it's a mempool, too? */
njn12627272005-08-14 18:32:16 +00002429 MAC_Mempool* mp = VG_(HT_lookup)(MAC_(mempool_list),
2430 (UWord)cgbs[i].start);
njn1d0cb0d2005-08-15 01:52:02 +00002431 if (mp != NULL) {
2432 if (mp->chunks != NULL) {
2433 MAC_Chunk* mc;
njnf66dbfc2005-08-15 01:54:05 +00002434 VG_(HT_ResetIter)(mp->chunks);
2435 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
njn1d0cb0d2005-08-15 01:52:02 +00002436 if (VG_(addr_is_in_block)(a, mc->data, mc->size,
2437 MAC_MALLOC_REDZONE_SZB)) {
2438 ai->akind = UserG;
2439 ai->blksize = mc->size;
2440 ai->rwoffset = (Int)(a) - (Int)mc->data;
2441 ai->lastchange = mc->where;
2442 return True;
2443 }
nethercote8b76fe52004-11-08 19:20:09 +00002444 }
2445 }
njn1d0cb0d2005-08-15 01:52:02 +00002446 ai->akind = Mempool;
2447 ai->blksize = cgbs[i].size;
2448 ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
njn695c16e2005-03-27 03:40:28 +00002449 ai->lastchange = cgbs[i].where;
nethercote8b76fe52004-11-08 19:20:09 +00002450 return True;
2451 }
njn1d0cb0d2005-08-15 01:52:02 +00002452 ai->akind = UserG;
2453 ai->blksize = cgbs[i].size;
2454 ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
njn695c16e2005-03-27 03:40:28 +00002455 ai->lastchange = cgbs[i].where;
njn1d0cb0d2005-08-15 01:52:02 +00002456 ai->desc = cgbs[i].desc;
nethercote8b76fe52004-11-08 19:20:09 +00002457 return True;
2458 }
2459 }
2460 return False;
2461}
2462
njn51d827b2005-05-09 01:02:08 +00002463static Bool mc_handle_client_request ( ThreadId tid, UWord* arg, UWord* ret )
nethercote8b76fe52004-11-08 19:20:09 +00002464{
2465 Int i;
2466 Bool ok;
2467 Addr bad_addr;
2468
njnfc26ff92004-11-22 19:12:49 +00002469 if (!VG_IS_TOOL_USERREQ('M','C',arg[0])
nethercote8b76fe52004-11-08 19:20:09 +00002470 && VG_USERREQ__MALLOCLIKE_BLOCK != arg[0]
2471 && VG_USERREQ__FREELIKE_BLOCK != arg[0]
2472 && VG_USERREQ__CREATE_MEMPOOL != arg[0]
2473 && VG_USERREQ__DESTROY_MEMPOOL != arg[0]
2474 && VG_USERREQ__MEMPOOL_ALLOC != arg[0]
2475 && VG_USERREQ__MEMPOOL_FREE != arg[0])
2476 return False;
2477
2478 switch (arg[0]) {
2479 case VG_USERREQ__CHECK_WRITABLE: /* check writable */
2480 ok = mc_check_writable ( arg[1], arg[2], &bad_addr );
2481 if (!ok)
njn9e63cb62005-05-08 18:34:59 +00002482 mc_record_user_error ( tid, bad_addr, /*isWrite*/True,
2483 /*isUnaddr*/True );
nethercote8b76fe52004-11-08 19:20:09 +00002484 *ret = ok ? (UWord)NULL : bad_addr;
sewardj8cf88b72005-07-08 01:29:33 +00002485 break;
nethercote8b76fe52004-11-08 19:20:09 +00002486
2487 case VG_USERREQ__CHECK_READABLE: { /* check readable */
2488 MC_ReadResult res;
2489 res = mc_check_readable ( arg[1], arg[2], &bad_addr );
2490 if (MC_AddrErr == res)
njn9e63cb62005-05-08 18:34:59 +00002491 mc_record_user_error ( tid, bad_addr, /*isWrite*/False,
2492 /*isUnaddr*/True );
nethercote8b76fe52004-11-08 19:20:09 +00002493 else if (MC_ValueErr == res)
njn9e63cb62005-05-08 18:34:59 +00002494 mc_record_user_error ( tid, bad_addr, /*isWrite*/False,
2495 /*isUnaddr*/False );
nethercote8b76fe52004-11-08 19:20:09 +00002496 *ret = ( res==MC_Ok ? (UWord)NULL : bad_addr );
sewardj8cf88b72005-07-08 01:29:33 +00002497 break;
nethercote8b76fe52004-11-08 19:20:09 +00002498 }
2499
2500 case VG_USERREQ__DO_LEAK_CHECK:
njnb8dca862005-03-14 02:42:44 +00002501 mc_detect_memory_leaks(tid, arg[1] ? LC_Summary : LC_Full);
sewardj8cf88b72005-07-08 01:29:33 +00002502 *ret = 0; /* return value is meaningless */
2503 break;
nethercote8b76fe52004-11-08 19:20:09 +00002504
2505 case VG_USERREQ__MAKE_NOACCESS: /* make no access */
nethercote8b76fe52004-11-08 19:20:09 +00002506 mc_make_noaccess ( arg[1], arg[2] );
sewardj8cf88b72005-07-08 01:29:33 +00002507 *ret = -1;
2508 break;
nethercote8b76fe52004-11-08 19:20:09 +00002509
2510 case VG_USERREQ__MAKE_WRITABLE: /* make writable */
nethercote8b76fe52004-11-08 19:20:09 +00002511 mc_make_writable ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00002512 *ret = -1;
sewardj8cf88b72005-07-08 01:29:33 +00002513 break;
nethercote8b76fe52004-11-08 19:20:09 +00002514
2515 case VG_USERREQ__MAKE_READABLE: /* make readable */
nethercote8b76fe52004-11-08 19:20:09 +00002516 mc_make_readable ( arg[1], arg[2] );
sewardj8cf88b72005-07-08 01:29:33 +00002517 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00002518 break;
2519
sewardjedc75ab2005-03-15 23:30:32 +00002520 case VG_USERREQ__CREATE_BLOCK: /* describe a block */
sewardj8cf88b72005-07-08 01:29:33 +00002521 if (arg[1] != 0 && arg[2] != 0) {
2522 i = alloc_client_block();
2523 /* VG_(printf)("allocated %d %p\n", i, cgbs); */
2524 cgbs[i].start = arg[1];
2525 cgbs[i].size = arg[2];
2526 cgbs[i].desc = VG_(strdup)((Char *)arg[3]);
2527 cgbs[i].where = VG_(record_ExeContext) ( tid );
sewardjedc75ab2005-03-15 23:30:32 +00002528
sewardj8cf88b72005-07-08 01:29:33 +00002529 *ret = i;
2530 } else
2531 *ret = -1;
2532 break;
sewardjedc75ab2005-03-15 23:30:32 +00002533
nethercote8b76fe52004-11-08 19:20:09 +00002534 case VG_USERREQ__DISCARD: /* discard */
njn695c16e2005-03-27 03:40:28 +00002535 if (cgbs == NULL
2536 || arg[2] >= cgb_used ||
sewardj8cf88b72005-07-08 01:29:33 +00002537 (cgbs[arg[2]].start == 0 && cgbs[arg[2]].size == 0)) {
sewardjedc75ab2005-03-15 23:30:32 +00002538 *ret = 1;
sewardj8cf88b72005-07-08 01:29:33 +00002539 } else {
2540 tl_assert(arg[2] >= 0 && arg[2] < cgb_used);
2541 cgbs[arg[2]].start = cgbs[arg[2]].size = 0;
2542 VG_(free)(cgbs[arg[2]].desc);
2543 cgb_discards++;
2544 *ret = 0;
2545 }
2546 break;
nethercote8b76fe52004-11-08 19:20:09 +00002547
sewardj45d94cc2005-04-20 14:44:11 +00002548//zz case VG_USERREQ__GET_VBITS:
2549//zz /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
2550//zz error. */
2551//zz /* VG_(printf)("get_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
2552//zz *ret = mc_get_or_set_vbits_for_client
2553//zz ( tid, arg[1], arg[2], arg[3], False /* get them */ );
2554//zz break;
2555//zz
2556//zz case VG_USERREQ__SET_VBITS:
2557//zz /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
2558//zz error. */
2559//zz /* VG_(printf)("set_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
2560//zz *ret = mc_get_or_set_vbits_for_client
2561//zz ( tid, arg[1], arg[2], arg[3], True /* set them */ );
2562//zz break;
nethercote8b76fe52004-11-08 19:20:09 +00002563
2564 default:
2565 if (MAC_(handle_common_client_requests)(tid, arg, ret )) {
2566 return True;
2567 } else {
2568 VG_(message)(Vg_UserMsg,
2569 "Warning: unknown memcheck client request code %llx",
2570 (ULong)arg[0]);
2571 return False;
2572 }
2573 }
2574 return True;
2575}
njn25e49d8e72002-09-23 09:36:25 +00002576
2577/*------------------------------------------------------------*/
njn51d827b2005-05-09 01:02:08 +00002578/*--- Setup and finalisation ---*/
njn25e49d8e72002-09-23 09:36:25 +00002579/*------------------------------------------------------------*/
2580
njn51d827b2005-05-09 01:02:08 +00002581static void mc_post_clo_init ( void )
njn5c004e42002-11-18 11:04:50 +00002582{
sewardj71bc3cb2005-05-19 00:25:45 +00002583 /* If we've been asked to emit XML, mash around various other
2584 options so as to constrain the output somewhat. */
2585 if (VG_(clo_xml)) {
2586 /* Extract as much info as possible from the leak checker. */
sewardj09890d82005-05-20 02:45:15 +00002587 /* MAC_(clo_show_reachable) = True; */
sewardj71bc3cb2005-05-19 00:25:45 +00002588 MAC_(clo_leak_check) = LC_Full;
2589 }
njn5c004e42002-11-18 11:04:50 +00002590}
2591
njn51d827b2005-05-09 01:02:08 +00002592static void mc_fini ( Int exitcode )
njn5c004e42002-11-18 11:04:50 +00002593{
sewardj23eb2fd2005-04-22 16:29:19 +00002594 Int i, n_accessible_dist;
2595 SecMap* sm;
2596
sewardjae986ca2005-10-12 12:53:20 +00002597 MAC_(common_fini)( mc_detect_memory_leaks );
2598
sewardj45d94cc2005-04-20 14:44:11 +00002599 if (VG_(clo_verbosity) > 1) {
2600 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00002601 " memcheck: sanity checks: %d cheap, %d expensive",
2602 n_sanity_cheap, n_sanity_expensive );
sewardj45d94cc2005-04-20 14:44:11 +00002603 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00002604 " memcheck: auxmaps: %d auxmap entries (%dk, %dM) in use",
2605 auxmap_used,
2606 auxmap_used * 64,
2607 auxmap_used / 16 );
2608 VG_(message)(Vg_DebugMsg,
2609 " memcheck: auxmaps: %lld searches, %lld comparisons",
sewardj45d94cc2005-04-20 14:44:11 +00002610 n_auxmap_searches, n_auxmap_cmps );
sewardj23eb2fd2005-04-22 16:29:19 +00002611 VG_(message)(Vg_DebugMsg,
2612 " memcheck: secondaries: %d issued (%dk, %dM)",
2613 n_secmaps_issued,
2614 n_secmaps_issued * 64,
2615 n_secmaps_issued / 16 );
2616
2617 n_accessible_dist = 0;
2618 for (i = 0; i < N_PRIMARY_MAP; i++) {
2619 sm = primary_map[i];
2620 if (is_distinguished_sm(sm)
2621 && sm != &sm_distinguished[SM_DIST_NOACCESS])
2622 n_accessible_dist ++;
2623 }
2624 for (i = 0; i < auxmap_used; i++) {
2625 sm = auxmap[i].sm;
2626 if (is_distinguished_sm(sm)
2627 && sm != &sm_distinguished[SM_DIST_NOACCESS])
2628 n_accessible_dist ++;
2629 }
2630
2631 VG_(message)(Vg_DebugMsg,
2632 " memcheck: secondaries: %d accessible and distinguished (%dk, %dM)",
2633 n_accessible_dist,
2634 n_accessible_dist * 64,
2635 n_accessible_dist / 16 );
2636
sewardj45d94cc2005-04-20 14:44:11 +00002637 }
2638
njn5c004e42002-11-18 11:04:50 +00002639 if (0) {
2640 VG_(message)(Vg_DebugMsg,
2641 "------ Valgrind's client block stats follow ---------------" );
nethercote8b76fe52004-11-08 19:20:09 +00002642 show_client_block_stats();
njn5c004e42002-11-18 11:04:50 +00002643 }
njn25e49d8e72002-09-23 09:36:25 +00002644}
2645
njn51d827b2005-05-09 01:02:08 +00002646static void mc_pre_clo_init(void)
2647{
2648 VG_(details_name) ("Memcheck");
2649 VG_(details_version) (NULL);
2650 VG_(details_description) ("a memory error detector");
2651 VG_(details_copyright_author)(
2652 "Copyright (C) 2002-2005, and GNU GPL'd, by Julian Seward et al.");
2653 VG_(details_bug_reports_to) (VG_BUGS_TO);
2654 VG_(details_avg_translation_sizeB) ( 370 );
2655
2656 VG_(basic_tool_funcs) (mc_post_clo_init,
2657 MC_(instrument),
2658 mc_fini);
2659
2660 VG_(needs_core_errors) ();
2661 VG_(needs_tool_errors) (MAC_(eq_Error),
2662 mc_pp_Error,
2663 MAC_(update_extra),
2664 mc_recognised_suppression,
2665 MAC_(read_extra_suppression_info),
2666 MAC_(error_matches_suppression),
2667 MAC_(get_error_name),
2668 MAC_(print_extra_suppression_info));
2669 VG_(needs_libc_freeres) ();
2670 VG_(needs_command_line_options)(mc_process_cmd_line_option,
2671 mc_print_usage,
2672 mc_print_debug_usage);
2673 VG_(needs_client_requests) (mc_handle_client_request);
2674 VG_(needs_sanity_checks) (mc_cheap_sanity_check,
2675 mc_expensive_sanity_check);
njn51d827b2005-05-09 01:02:08 +00002676
njnfc51f8d2005-06-21 03:20:17 +00002677 VG_(needs_malloc_replacement) (MAC_(malloc),
njn51d827b2005-05-09 01:02:08 +00002678 MAC_(__builtin_new),
2679 MAC_(__builtin_vec_new),
2680 MAC_(memalign),
2681 MAC_(calloc),
2682 MAC_(free),
2683 MAC_(__builtin_delete),
2684 MAC_(__builtin_vec_delete),
2685 MAC_(realloc),
2686 MAC_MALLOC_REDZONE_SZB );
2687
2688 MAC_( new_mem_heap) = & mc_new_mem_heap;
2689 MAC_( ban_mem_heap) = & mc_make_noaccess;
2690 MAC_(copy_mem_heap) = & mc_copy_address_range_state;
2691 MAC_( die_mem_heap) = & mc_make_noaccess;
2692 MAC_(check_noaccess) = & mc_check_noaccess;
2693
2694 VG_(track_new_mem_startup) ( & mc_new_mem_startup );
2695 VG_(track_new_mem_stack_signal)( & mc_make_writable );
2696 VG_(track_new_mem_brk) ( & mc_make_writable );
2697 VG_(track_new_mem_mmap) ( & mc_new_mem_mmap );
2698
2699 VG_(track_copy_mem_remap) ( & mc_copy_address_range_state );
njn81623712005-10-07 04:48:37 +00002700
2701 // Nb: we don't do anything with mprotect. This means that V bits are
2702 // preserved if a program, for example, marks some memory as inaccessible
2703 // and then later marks it as accessible again.
2704 //
2705 // If an access violation occurs (eg. writing to read-only memory) we let
2706 // it fault and print an informative termination message. This doesn't
2707 // happen if the program catches the signal, though, which is bad. If we
2708 // had two A bits (for readability and writability) that were completely
2709 // distinct from V bits, then we could handle all this properly.
2710 VG_(track_change_mem_mprotect) ( NULL );
njn51d827b2005-05-09 01:02:08 +00002711
2712 VG_(track_die_mem_stack_signal)( & mc_make_noaccess );
2713 VG_(track_die_mem_brk) ( & mc_make_noaccess );
2714 VG_(track_die_mem_munmap) ( & mc_make_noaccess );
2715
sewardjf5c8e372006-02-12 15:42:20 +00002716 VG_(track_new_mem_stack_4) ( & MAC_(new_mem_stack_4) );
2717 VG_(track_new_mem_stack_8) ( & MAC_(new_mem_stack_8) );
2718 VG_(track_new_mem_stack_12) ( & MAC_(new_mem_stack_12) );
2719 VG_(track_new_mem_stack_16) ( & MAC_(new_mem_stack_16) );
2720 VG_(track_new_mem_stack_32) ( & MAC_(new_mem_stack_32) );
2721 VG_(track_new_mem_stack_112) ( & MAC_(new_mem_stack_112) );
2722 VG_(track_new_mem_stack_128) ( & MAC_(new_mem_stack_128) );
2723 VG_(track_new_mem_stack_144) ( & MAC_(new_mem_stack_144) );
2724 VG_(track_new_mem_stack_160) ( & MAC_(new_mem_stack_160) );
2725 VG_(track_new_mem_stack) ( & MAC_(new_mem_stack) );
njn51d827b2005-05-09 01:02:08 +00002726
sewardjf5c8e372006-02-12 15:42:20 +00002727 VG_(track_die_mem_stack_4) ( & MAC_(die_mem_stack_4) );
2728 VG_(track_die_mem_stack_8) ( & MAC_(die_mem_stack_8) );
2729 VG_(track_die_mem_stack_12) ( & MAC_(die_mem_stack_12) );
2730 VG_(track_die_mem_stack_16) ( & MAC_(die_mem_stack_16) );
2731 VG_(track_die_mem_stack_32) ( & MAC_(die_mem_stack_32) );
2732 VG_(track_die_mem_stack_112) ( & MAC_(die_mem_stack_112) );
2733 VG_(track_die_mem_stack_128) ( & MAC_(die_mem_stack_128) );
2734 VG_(track_die_mem_stack_144) ( & MAC_(die_mem_stack_144) );
2735 VG_(track_die_mem_stack_160) ( & MAC_(die_mem_stack_160) );
2736 VG_(track_die_mem_stack) ( & MAC_(die_mem_stack) );
njn51d827b2005-05-09 01:02:08 +00002737
2738 VG_(track_ban_mem_stack) ( & mc_make_noaccess );
2739
2740 VG_(track_pre_mem_read) ( & mc_check_is_readable );
2741 VG_(track_pre_mem_read_asciiz) ( & mc_check_is_readable_asciiz );
2742 VG_(track_pre_mem_write) ( & mc_check_is_writable );
2743 VG_(track_post_mem_write) ( & mc_post_mem_write );
2744
2745 VG_(track_pre_reg_read) ( & mc_pre_reg_read );
2746
2747 VG_(track_post_reg_write) ( & mc_post_reg_write );
2748 VG_(track_post_reg_write_clientcall_return)( & mc_post_reg_write_clientcall );
2749
njn51d827b2005-05-09 01:02:08 +00002750 /* Additional block description for VG_(describe_addr)() */
2751 MAC_(describe_addr_supp) = client_perm_maybe_describe;
2752
2753 init_shadow_memory();
2754 MAC_(common_pre_clo_init)();
2755
2756 tl_assert( mc_expensive_sanity_check() );
2757}
2758
sewardj45f4e7c2005-09-27 19:20:21 +00002759VG_DETERMINE_INTERFACE_VERSION(mc_pre_clo_init)
fitzhardinge98abfc72003-12-16 02:05:15 +00002760
njn25e49d8e72002-09-23 09:36:25 +00002761/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00002762/*--- end mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00002763/*--------------------------------------------------------------------*/