blob: e8ceedbd771c9923baed1a062eff4fc291635e5f [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
nethercote137bc552003-11-14 17:47:54 +00009 This file is part of MemCheck, a heavyweight Valgrind tool for
njnc9539842002-10-02 13:26:35 +000010 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
njn53612422005-03-12 16:22:54 +000012 Copyright (C) 2000-2005 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
sewardjc859fbf2005-04-22 21:10:28 +000033/* TODO 22 Apr 05
sewardj45d94cc2005-04-20 14:44:11 +000034
sewardjc859fbf2005-04-22 21:10:28 +000035 test whether it would be faster, for LOADV4, to check
36 only for 8-byte validity on the fast path
sewardj45d94cc2005-04-20 14:44:11 +000037*/
38
njn25cac76cb2002-09-23 11:21:57 +000039#include "mc_include.h"
40#include "memcheck.h" /* for client requests */
njn25e49d8e72002-09-23 09:36:25 +000041
sewardj45d94cc2005-04-20 14:44:11 +000042
sewardjc1a2cda2005-04-21 17:34:00 +000043#define EXPECTED_TAKEN(cond) __builtin_expect((cond),1)
44#define EXPECTED_NOT_TAKEN(cond) __builtin_expect((cond),0)
45
46/* Define to debug the mem audit system. Set to:
47 0 no debugging, fast cases are used
48 1 some sanity checking, fast cases are used
49 2 max sanity checking, only slow cases are used
50*/
sewardj23eb2fd2005-04-22 16:29:19 +000051#define VG_DEBUG_MEMORY 0
sewardjc1a2cda2005-04-21 17:34:00 +000052
njn25e49d8e72002-09-23 09:36:25 +000053#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
54
njn25e49d8e72002-09-23 09:36:25 +000055
njn25e49d8e72002-09-23 09:36:25 +000056/*------------------------------------------------------------*/
sewardj45d94cc2005-04-20 14:44:11 +000057/*--- Basic A/V bitmap representation. ---*/
njn25e49d8e72002-09-23 09:36:25 +000058/*------------------------------------------------------------*/
59
sewardjc859fbf2005-04-22 21:10:28 +000060/* TODO: fix this comment */
61//zz /* All reads and writes are checked against a memory map, which
62//zz records the state of all memory in the process. The memory map is
63//zz organised like this:
64//zz
65//zz The top 16 bits of an address are used to index into a top-level
66//zz map table, containing 65536 entries. Each entry is a pointer to a
67//zz second-level map, which records the accesibililty and validity
68//zz permissions for the 65536 bytes indexed by the lower 16 bits of the
69//zz address. Each byte is represented by nine bits, one indicating
70//zz accessibility, the other eight validity. So each second-level map
71//zz contains 73728 bytes. This two-level arrangement conveniently
72//zz divides the 4G address space into 64k lumps, each size 64k bytes.
73//zz
74//zz All entries in the primary (top-level) map must point to a valid
75//zz secondary (second-level) map. Since most of the 4G of address
76//zz space will not be in use -- ie, not mapped at all -- there is a
77//zz distinguished secondary map, which indicates `not addressible and
78//zz not valid' writeable for all bytes. Entries in the primary map for
79//zz which the entire 64k is not in use at all point at this
80//zz distinguished map.
81//zz
82//zz There are actually 4 distinguished secondaries. These are used to
83//zz represent a memory range which is either not addressable (validity
84//zz doesn't matter), addressable+not valid, addressable+valid.
85//zz
86//zz [...] lots of stuff deleted due to out of date-ness
87//zz
88//zz As a final optimisation, the alignment and address checks for
89//zz 4-byte loads and stores are combined in a neat way. The primary
90//zz map is extended to have 262144 entries (2^18), rather than 2^16.
91//zz The top 3/4 of these entries are permanently set to the
92//zz distinguished secondary map. For a 4-byte load/store, the
93//zz top-level map is indexed not with (addr >> 16) but instead f(addr),
94//zz where
95//zz
96//zz f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
97//zz = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
98//zz = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
99//zz
100//zz ie the lowest two bits are placed above the 16 high address bits.
101//zz If either of these two bits are nonzero, the address is misaligned;
102//zz this will select a secondary map from the upper 3/4 of the primary
103//zz map. Because this is always the distinguished secondary map, a
104//zz (bogus) address check failure will result. The failure handling
105//zz code can then figure out whether this is a genuine addr check
106//zz failure or whether it is a possibly-legitimate access at a
107//zz misaligned address.
108//zz */
109
sewardj45d94cc2005-04-20 14:44:11 +0000110/* --------------- Basic configuration --------------- */
sewardj95448072004-11-22 20:19:51 +0000111
sewardj23eb2fd2005-04-22 16:29:19 +0000112/* Only change this. N_PRIMARY_MAP *must* be a power of 2. */
sewardj21f7ff42005-04-28 10:32:02 +0000113
sewardje4ccc012005-05-02 12:53:38 +0000114#if VG_WORDSIZE == 4
sewardj21f7ff42005-04-28 10:32:02 +0000115
116/* cover the entire address space */
117# define N_PRIMARY_BITS 16
118
119#else
120
121/* Just handle the first 16G fast and the rest via auxiliary
122 primaries. */
123# define N_PRIMARY_BITS 18
124
125#endif
126
sewardj45d94cc2005-04-20 14:44:11 +0000127
sewardjc1a2cda2005-04-21 17:34:00 +0000128/* Do not change this. */
sewardje4ccc012005-05-02 12:53:38 +0000129#define N_PRIMARY_MAP ( ((UWord)1) << N_PRIMARY_BITS)
sewardjc1a2cda2005-04-21 17:34:00 +0000130
131/* Do not change this. */
sewardj23eb2fd2005-04-22 16:29:19 +0000132#define MAX_PRIMARY_ADDRESS (Addr)((((Addr)65536) * N_PRIMARY_MAP)-1)
133
134
135/* --------------- Stats maps --------------- */
136
137static Int n_secmaps_issued = 0;
138static ULong n_auxmap_searches = 0;
139static ULong n_auxmap_cmps = 0;
140static Int n_sanity_cheap = 0;
141static Int n_sanity_expensive = 0;
sewardj45d94cc2005-04-20 14:44:11 +0000142
143
144/* --------------- Secondary maps --------------- */
njn25e49d8e72002-09-23 09:36:25 +0000145
146typedef
147 struct {
sewardj45d94cc2005-04-20 14:44:11 +0000148 UChar abits[8192];
149 UChar vbyte[65536];
njn25e49d8e72002-09-23 09:36:25 +0000150 }
151 SecMap;
152
sewardj45d94cc2005-04-20 14:44:11 +0000153/* 3 distinguished secondary maps, one for no-access, one for
154 accessible but undefined, and one for accessible and defined.
155 Distinguished secondaries may never be modified.
156*/
157#define SM_DIST_NOACCESS 0
158#define SM_DIST_ACCESS_UNDEFINED 1
159#define SM_DIST_ACCESS_DEFINED 2
njnb8dca862005-03-14 02:42:44 +0000160
sewardj45d94cc2005-04-20 14:44:11 +0000161static SecMap sm_distinguished[3];
njnb8dca862005-03-14 02:42:44 +0000162
sewardj45d94cc2005-04-20 14:44:11 +0000163static inline Bool is_distinguished_sm ( SecMap* sm ) {
164 return sm >= &sm_distinguished[0] && sm <= &sm_distinguished[2];
165}
njnb8dca862005-03-14 02:42:44 +0000166
sewardj45d94cc2005-04-20 14:44:11 +0000167/* dist_sm points to one of our three distinguished secondaries. Make
168 a copy of it so that we can write to it.
169*/
170static SecMap* copy_for_writing ( SecMap* dist_sm )
171{
172 SecMap* new_sm;
173 tl_assert(dist_sm == &sm_distinguished[0]
174 || dist_sm == &sm_distinguished[1]
175 || dist_sm == &sm_distinguished[2]);
njnb8dca862005-03-14 02:42:44 +0000176
sewardj45d94cc2005-04-20 14:44:11 +0000177 new_sm = VG_(shadow_alloc)(sizeof(SecMap));
178 VG_(memcpy)(new_sm, dist_sm, sizeof(SecMap));
sewardj23eb2fd2005-04-22 16:29:19 +0000179 n_secmaps_issued++;
sewardj45d94cc2005-04-20 14:44:11 +0000180 return new_sm;
181}
njnb8dca862005-03-14 02:42:44 +0000182
sewardj45d94cc2005-04-20 14:44:11 +0000183
184/* --------------- Primary maps --------------- */
185
186/* The main primary map. This covers some initial part of the address
sewardj23eb2fd2005-04-22 16:29:19 +0000187 space, addresses 0 .. (N_PRIMARY_MAP << 16)-1. The rest of it is
sewardj45d94cc2005-04-20 14:44:11 +0000188 handled using the auxiliary primary map.
189*/
sewardj23eb2fd2005-04-22 16:29:19 +0000190static SecMap* primary_map[N_PRIMARY_MAP];
sewardj45d94cc2005-04-20 14:44:11 +0000191
192
193/* An entry in the auxiliary primary map. base must be a 64k-aligned
194 value, and sm points at the relevant secondary map. As with the
195 main primary map, the secondary may be either a real secondary, or
196 one of the three distinguished secondaries.
197*/
198typedef
199 struct {
sewardj23eb2fd2005-04-22 16:29:19 +0000200 Addr base;
sewardj45d94cc2005-04-20 14:44:11 +0000201 SecMap* sm;
202 }
203 AuxMapEnt;
204
205/* An expanding array of AuxMapEnts. */
206#define N_AUXMAPS 500 /* HACK */
207static AuxMapEnt hacky_auxmaps[N_AUXMAPS];
208static Int auxmap_size = N_AUXMAPS;
209static Int auxmap_used = 0;
210static AuxMapEnt* auxmap = &hacky_auxmaps[0];
211
sewardj45d94cc2005-04-20 14:44:11 +0000212
213/* Find an entry in the auxiliary map. If an entry is found, move it
214 one step closer to the front of the array, then return its address.
sewardj05fe85e2005-04-27 22:46:36 +0000215 If an entry is not found, return NULL. Note carefully that
sewardj45d94cc2005-04-20 14:44:11 +0000216 because a each call potentially rearranges the entries, each call
217 to this function invalidates ALL AuxMapEnt*s previously obtained by
218 calling this fn.
219*/
sewardj05fe85e2005-04-27 22:46:36 +0000220static AuxMapEnt* maybe_find_in_auxmap ( Addr a )
sewardj45d94cc2005-04-20 14:44:11 +0000221{
222 UWord i;
223 tl_assert(a > MAX_PRIMARY_ADDRESS);
224
225 a &= ~(Addr)0xFFFF;
226
227 /* Search .. */
228 n_auxmap_searches++;
229 for (i = 0; i < auxmap_used; i++) {
230 if (auxmap[i].base == a)
231 break;
232 }
233 n_auxmap_cmps += (ULong)(i+1);
234
235 if (i < auxmap_used) {
236 /* Found it. Nudge it a bit closer to the front. */
237 if (i > 0) {
238 AuxMapEnt tmp = auxmap[i-1];
239 auxmap[i-1] = auxmap[i];
240 auxmap[i] = tmp;
241 i--;
242 }
243 return &auxmap[i];
244 }
245
sewardj05fe85e2005-04-27 22:46:36 +0000246 return NULL;
247}
248
249
250/* Find an entry in the auxiliary map. If an entry is found, move it
251 one step closer to the front of the array, then return its address.
252 If an entry is not found, allocate one. Note carefully that
253 because a each call potentially rearranges the entries, each call
254 to this function invalidates ALL AuxMapEnt*s previously obtained by
255 calling this fn.
256*/
257static AuxMapEnt* find_or_alloc_in_auxmap ( Addr a )
258{
259 AuxMapEnt* am = maybe_find_in_auxmap(a);
260 if (am)
261 return am;
262
sewardj45d94cc2005-04-20 14:44:11 +0000263 /* We didn't find it. Hmm. This is a new piece of address space.
264 We'll need to allocate a new AuxMap entry for it. */
265 if (auxmap_used >= auxmap_size) {
266 tl_assert(auxmap_used == auxmap_size);
267 /* Out of auxmap entries. */
268 tl_assert2(0, "failed to expand the auxmap table");
269 }
270
271 tl_assert(auxmap_used < auxmap_size);
272
273 auxmap[auxmap_used].base = a & ~(Addr)0xFFFF;
274 auxmap[auxmap_used].sm = &sm_distinguished[SM_DIST_NOACCESS];
275
276 if (0)
277 VG_(printf)("new auxmap, base = 0x%llx\n",
278 (ULong)auxmap[auxmap_used].base );
279
280 auxmap_used++;
281 return &auxmap[auxmap_used-1];
282}
283
284
285/* --------------- SecMap fundamentals --------------- */
286
287/* Produce the secmap for 'a', either from the primary map or by
288 ensuring there is an entry for it in the aux primary map. The
289 secmap may be a distinguished one as the caller will only want to
290 be able to read it.
291*/
292static SecMap* get_secmap_readable ( Addr a )
293{
294 if (a <= MAX_PRIMARY_ADDRESS) {
295 UWord pm_off = a >> 16;
296 return primary_map[ pm_off ];
297 } else {
298 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
299 return am->sm;
300 }
301}
302
sewardj05fe85e2005-04-27 22:46:36 +0000303/* If 'a' has a SecMap, produce it. Else produce NULL. But don't
304 allocate one if one doesn't already exist. This is used by the
305 leak checker.
306*/
307static SecMap* maybe_get_secmap_for ( Addr a )
308{
309 if (a <= MAX_PRIMARY_ADDRESS) {
310 UWord pm_off = a >> 16;
311 return primary_map[ pm_off ];
312 } else {
313 AuxMapEnt* am = maybe_find_in_auxmap(a);
314 return am ? am->sm : NULL;
315 }
316}
317
318
319
sewardj45d94cc2005-04-20 14:44:11 +0000320/* Produce the secmap for 'a', either from the primary map or by
321 ensuring there is an entry for it in the aux primary map. The
322 secmap may not be a distinguished one, since the caller will want
323 to be able to write it. If it is a distinguished secondary, make a
324 writable copy of it, install it, and return the copy instead. (COW
325 semantics).
326*/
327static SecMap* get_secmap_writable ( Addr a )
328{
329 if (a <= MAX_PRIMARY_ADDRESS) {
330 UWord pm_off = a >> 16;
331 if (is_distinguished_sm(primary_map[ pm_off ]))
332 primary_map[pm_off] = copy_for_writing(primary_map[pm_off]);
333 return primary_map[pm_off];
334 } else {
335 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
336 if (is_distinguished_sm(am->sm))
337 am->sm = copy_for_writing(am->sm);
338 return am->sm;
339 }
340}
341
342
343/* --------------- Endianness helpers --------------- */
344
345/* Returns the offset in memory of the byteno-th most significant byte
346 in a wordszB-sized word, given the specified endianness. */
347static inline UWord byte_offset_w ( UWord wordszB, Bool bigendian,
348 UWord byteno ) {
349 return bigendian ? (wordszB-1-byteno) : byteno;
350}
351
352
353/* --------------- Fundamental functions --------------- */
354
355static
356void get_abit_and_vbyte ( /*OUT*/UWord* abit,
357 /*OUT*/UWord* vbyte,
358 Addr a )
359{
360 SecMap* sm = get_secmap_readable(a);
361 *vbyte = 0xFF & sm->vbyte[a & 0xFFFF];
362 *abit = read_bit_array(sm->abits, a & 0xFFFF);
363}
364
365static
366UWord get_abit ( Addr a )
367{
368 SecMap* sm = get_secmap_readable(a);
369 return read_bit_array(sm->abits, a & 0xFFFF);
370}
371
372static
373void set_abit_and_vbyte ( Addr a, UWord abit, UWord vbyte )
374{
375 SecMap* sm = get_secmap_writable(a);
376 sm->vbyte[a & 0xFFFF] = 0xFF & vbyte;
377 write_bit_array(sm->abits, a & 0xFFFF, abit);
378}
379
380static
381void set_vbyte ( Addr a, UWord vbyte )
382{
383 SecMap* sm = get_secmap_writable(a);
384 sm->vbyte[a & 0xFFFF] = 0xFF & vbyte;
385}
386
387
388/* --------------- Load/store slow cases. --------------- */
389
390static
391ULong mc_LOADVn_slow ( Addr a, SizeT szB, Bool bigendian )
392{
393 /* Make up a result V word, which contains the loaded data for
sewardjf3d57dd2005-04-22 20:23:27 +0000394 valid addresses and Defined for invalid addresses. Iterate over
395 the bytes in the word, from the most significant down to the
396 least. */
sewardj45d94cc2005-04-20 14:44:11 +0000397 ULong vw = VGM_WORD64_INVALID;
398 SizeT i = szB-1;
399 SizeT n_addrs_bad = 0;
400 Addr ai;
401 Bool aok;
402 UWord abit, vbyte;
403
sewardjc1a2cda2005-04-21 17:34:00 +0000404 PROF_EVENT(30, "mc_LOADVn_slow");
sewardj45d94cc2005-04-20 14:44:11 +0000405 tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
406
407 while (True) {
sewardjc1a2cda2005-04-21 17:34:00 +0000408 PROF_EVENT(31, "mc_LOADVn_slow(loop)");
sewardj45d94cc2005-04-20 14:44:11 +0000409 ai = a+byte_offset_w(szB,bigendian,i);
410 get_abit_and_vbyte(&abit, &vbyte, ai);
411 aok = abit == VGM_BIT_VALID;
412 if (!aok)
413 n_addrs_bad++;
414 vw <<= 8;
sewardjf3d57dd2005-04-22 20:23:27 +0000415 vw |= 0xFF & (aok ? vbyte : VGM_BYTE_VALID);
sewardj45d94cc2005-04-20 14:44:11 +0000416 if (i == 0) break;
417 i--;
418 }
419
420 if (n_addrs_bad > 0)
421 MAC_(record_address_error)( VG_(get_running_tid)(), a, szB, False );
422
sewardj45d94cc2005-04-20 14:44:11 +0000423 return vw;
424}
425
426
427static
428void mc_STOREVn_slow ( Addr a, SizeT szB, UWord vbytes, Bool bigendian )
429{
430 SizeT i;
431 SizeT n_addrs_bad = 0;
432 UWord abit;
433 Bool aok;
434 Addr ai;
435
sewardjc1a2cda2005-04-21 17:34:00 +0000436 PROF_EVENT(35, "mc_STOREVn_slow");
sewardj45d94cc2005-04-20 14:44:11 +0000437 tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
438
439 /* Dump vbytes in memory, iterating from least to most significant
440 byte. At the same time establish addressibility of the
441 location. */
442 for (i = 0; i < szB; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +0000443 PROF_EVENT(36, "mc_STOREVn_slow(loop)");
sewardj45d94cc2005-04-20 14:44:11 +0000444 ai = a+byte_offset_w(szB,bigendian,i);
445 abit = get_abit(ai);
446 aok = abit == VGM_BIT_VALID;
447 if (!aok)
448 n_addrs_bad++;
449 set_vbyte(ai, vbytes & 0xFF );
450 vbytes >>= 8;
451 }
452
453 /* If an address error has happened, report it. */
454 if (n_addrs_bad > 0)
455 MAC_(record_address_error)( VG_(get_running_tid)(), a, szB, True );
456}
457
458
sewardj45d94cc2005-04-20 14:44:11 +0000459//zz /* Reading/writing of the bitmaps, for aligned word-sized accesses. */
460//zz
461//zz static __inline__ UChar get_abits4_ALIGNED ( Addr a )
462//zz {
463//zz SecMap* sm;
464//zz UInt sm_off;
465//zz UChar abits8;
466//zz PROF_EVENT(24);
467//zz # ifdef VG_DEBUG_MEMORY
468//zz tl_assert(VG_IS_4_ALIGNED(a));
469//zz # endif
470//zz sm = primary_map[PM_IDX(a)];
471//zz sm_off = SM_OFF(a);
472//zz abits8 = sm->abits[sm_off >> 3];
473//zz abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
474//zz abits8 &= 0x0F;
475//zz return abits8;
476//zz }
477//zz
478//zz static UInt __inline__ get_vbytes4_ALIGNED ( Addr a )
479//zz {
480//zz SecMap* sm = primary_map[PM_IDX(a)];
481//zz UInt sm_off = SM_OFF(a);
482//zz PROF_EVENT(25);
483//zz # ifdef VG_DEBUG_MEMORY
484//zz tl_assert(VG_IS_4_ALIGNED(a));
485//zz # endif
486//zz return ((UInt*)(sm->vbyte))[sm_off >> 2];
487//zz }
488//zz
489//zz
490//zz static void __inline__ set_vbytes4_ALIGNED ( Addr a, UInt vbytes )
491//zz {
492//zz SecMap* sm;
493//zz UInt sm_off;
494//zz ENSURE_MAPPABLE(a, "set_vbytes4_ALIGNED");
495//zz sm = primary_map[PM_IDX(a)];
496//zz sm_off = SM_OFF(a);
497//zz PROF_EVENT(23);
498//zz # ifdef VG_DEBUG_MEMORY
499//zz tl_assert(VG_IS_4_ALIGNED(a));
500//zz # endif
501//zz ((UInt*)(sm->vbyte))[sm_off >> 2] = vbytes;
502//zz }
sewardjee070842003-07-05 17:53:55 +0000503
504
njn25e49d8e72002-09-23 09:36:25 +0000505/*------------------------------------------------------------*/
506/*--- Setting permissions over address ranges. ---*/
507/*------------------------------------------------------------*/
508
sewardj23eb2fd2005-04-22 16:29:19 +0000509/* Given address 'a', find the place where the pointer to a's
510 secondary map lives. If a falls into the primary map, the returned
511 value points to one of the entries in primary_map[]. Otherwise,
512 the auxiliary primary map is searched for 'a', or an entry is
513 created for it; either way, the returned value points to the
514 relevant AuxMapEnt's .sm field.
515
516 The point of this is to enable set_address_range_perms to assign
517 secondary maps in a uniform way, without worrying about whether a
518 given secondary map is pointed to from the main or auxiliary
519 primary map.
520*/
521
522static SecMap** find_secmap_binder_for_addr ( Addr aA )
523{
524 if (aA > MAX_PRIMARY_ADDRESS) {
525 AuxMapEnt* am = find_or_alloc_in_auxmap(aA);
526 return &am->sm;
527 } else {
528 UWord a = (UWord)aA;
529 UWord sec_no = (UWord)(a >> 16);
530# if VG_DEBUG_MEMORY >= 1
531 tl_assert(sec_no < N_PRIMARY_MAP);
532# endif
533 return &primary_map[sec_no];
534 }
535}
536
537
538static void set_address_range_perms ( Addr aA, SizeT len,
sewardj45d94cc2005-04-20 14:44:11 +0000539 UWord example_a_bit,
540 UWord example_v_bit )
njn25e49d8e72002-09-23 09:36:25 +0000541{
sewardj23eb2fd2005-04-22 16:29:19 +0000542 PROF_EVENT(150, "set_address_range_perms");
543
544 /* Check the permissions make sense. */
545 tl_assert(example_a_bit == VGM_BIT_VALID
546 || example_a_bit == VGM_BIT_INVALID);
547 tl_assert(example_v_bit == VGM_BIT_VALID
548 || example_v_bit == VGM_BIT_INVALID);
549 if (example_a_bit == VGM_BIT_INVALID)
550 tl_assert(example_v_bit == VGM_BIT_INVALID);
551
552 if (len == 0)
553 return;
554
555 if (VG_(clo_verbosity) > 0) {
556 if (len > 100 * 1000 * 1000) {
557 VG_(message)(Vg_UserMsg,
558 "Warning: set address range perms: "
559 "large range %u, a %d, v %d",
560 len, example_a_bit, example_v_bit );
561 }
562 }
563
564 UWord a = (UWord)aA;
565
566# if VG_DEBUG_MEMORY >= 2
567
568 /*------------------ debug-only case ------------------ */
sewardj45d94cc2005-04-20 14:44:11 +0000569 SizeT i;
njn25e49d8e72002-09-23 09:36:25 +0000570
sewardj23eb2fd2005-04-22 16:29:19 +0000571 UWord example_vbyte = BIT_TO_BYTE(example_v_bit);
sewardj45d94cc2005-04-20 14:44:11 +0000572
573 tl_assert(sizeof(SizeT) == sizeof(Addr));
574
575 if (0 && len >= 4096)
576 VG_(printf)("s_a_r_p(0x%llx, %d, %d,%d)\n",
577 (ULong)a, len, example_a_bit, example_v_bit);
njn25e49d8e72002-09-23 09:36:25 +0000578
579 if (len == 0)
580 return;
581
sewardj45d94cc2005-04-20 14:44:11 +0000582 for (i = 0; i < len; i++) {
583 set_abit_and_vbyte(a+i, example_a_bit, example_vbyte);
njn25e49d8e72002-09-23 09:36:25 +0000584 }
njn25e49d8e72002-09-23 09:36:25 +0000585
sewardj23eb2fd2005-04-22 16:29:19 +0000586# else
587
588 /*------------------ standard handling ------------------ */
589 UWord vbits8, abits8, vbits32, v_off, a_off;
590 SecMap* sm;
591 SecMap** binder;
592 SecMap* example_dsm;
593
594 /* Decide on the distinguished secondary that we might want
595 to use (part of the space-compression scheme). */
596 if (example_a_bit == VGM_BIT_INVALID) {
597 example_dsm = &sm_distinguished[SM_DIST_NOACCESS];
598 } else {
599 if (example_v_bit == VGM_BIT_VALID) {
600 example_dsm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
601 } else {
602 example_dsm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
603 }
604 }
605
606 /* Make various wider versions of the A/V values to use. */
607 vbits8 = BIT_TO_BYTE(example_v_bit);
608 abits8 = BIT_TO_BYTE(example_a_bit);
609 vbits32 = (vbits8 << 24) | (vbits8 << 16) | (vbits8 << 8) | vbits8;
610
611 /* Slowly do parts preceding 8-byte alignment. */
612 while (True) {
613 if (len == 0) break;
614 PROF_EVENT(151, "set_address_range_perms-loop1-pre");
615 if (VG_IS_8_ALIGNED(a)) break;
616 set_abit_and_vbyte( a, example_a_bit, vbits8 );
617 a++;
618 len--;
619 }
620
621 if (len == 0)
622 return;
623
624 tl_assert(VG_IS_8_ALIGNED(a) && len > 0);
625
626 /* Now go in steps of 8 bytes. */
627 binder = find_secmap_binder_for_addr(a);
628
629 while (True) {
630
631 if (len < 8) break;
632
633 PROF_EVENT(152, "set_address_range_perms-loop8");
634
635 if ((a & SECONDARY_MASK) == 0) {
636 /* we just traversed a primary map boundary, so update the
637 binder. */
638 binder = find_secmap_binder_for_addr(a);
639 PROF_EVENT(153, "set_address_range_perms-update-binder");
640
641 /* Space-optimisation. If we are setting the entire
642 secondary map, just point this entry at one of our
643 distinguished secondaries. However, only do that if it
644 already points at a distinguished secondary, since doing
645 otherwise would leak the existing secondary. We could do
646 better and free up any pre-existing non-distinguished
647 secondary at this point, since we are guaranteed that each
648 non-dist secondary only has one pointer to it, and we have
649 that pointer right here. */
650 if (len >= SECONDARY_SIZE && is_distinguished_sm(*binder)) {
651 PROF_EVENT(154, "set_address_range_perms-entire-secmap");
652 *binder = example_dsm;
653 len -= SECONDARY_SIZE;
654 a += SECONDARY_SIZE;
655 continue;
656 }
657 }
658
659 /* If the primary is already pointing to a distinguished map
660 with the same properties as we're trying to set, then leave
661 it that way. */
662 if (*binder == example_dsm) {
663 a += 8;
664 len -= 8;
665 continue;
666 }
667
668 /* Make sure it's OK to write the secondary. */
669 if (is_distinguished_sm(*binder))
670 *binder = copy_for_writing(*binder);
671
672 sm = *binder;
673 v_off = a & 0xFFFF;
674 a_off = v_off >> 3;
675 sm->abits[a_off] = (UChar)abits8;
676 ((UInt*)(sm->vbyte))[(v_off >> 2) + 0] = (UInt)vbits32;
677 ((UInt*)(sm->vbyte))[(v_off >> 2) + 1] = (UInt)vbits32;
678
679 a += 8;
680 len -= 8;
681 }
682
683 if (len == 0)
684 return;
685
686 tl_assert(VG_IS_8_ALIGNED(a) && len > 0 && len < 8);
687
688 /* Finish the upper fragment. */
689 while (True) {
690 if (len == 0) break;
691 PROF_EVENT(155, "set_address_range_perms-loop1-post");
692 set_abit_and_vbyte ( a, example_a_bit, vbits8 );
693 a++;
694 len--;
695 }
696
697# endif
698}
sewardj45d94cc2005-04-20 14:44:11 +0000699
sewardjc859fbf2005-04-22 21:10:28 +0000700
701/* --- Set permissions for arbitrary address ranges --- */
njn25e49d8e72002-09-23 09:36:25 +0000702
nethercote8b76fe52004-11-08 19:20:09 +0000703static void mc_make_noaccess ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000704{
sewardjc1a2cda2005-04-21 17:34:00 +0000705 PROF_EVENT(40, "mc_make_noaccess");
nethercote8b76fe52004-11-08 19:20:09 +0000706 DEBUG("mc_make_noaccess(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000707 set_address_range_perms ( a, len, VGM_BIT_INVALID, VGM_BIT_INVALID );
708}
709
nethercote8b76fe52004-11-08 19:20:09 +0000710static void mc_make_writable ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000711{
sewardjc1a2cda2005-04-21 17:34:00 +0000712 PROF_EVENT(41, "mc_make_writable");
nethercote8b76fe52004-11-08 19:20:09 +0000713 DEBUG("mc_make_writable(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000714 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_INVALID );
715}
716
nethercote8b76fe52004-11-08 19:20:09 +0000717static void mc_make_readable ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000718{
sewardjc1a2cda2005-04-21 17:34:00 +0000719 PROF_EVENT(42, "mc_make_readable");
nethercote8b76fe52004-11-08 19:20:09 +0000720 DEBUG("mc_make_readable(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000721 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_VALID );
722}
723
njn9b007f62003-04-07 14:40:25 +0000724
sewardjc859fbf2005-04-22 21:10:28 +0000725/* --- Block-copy permissions (needed for implementing realloc()). --- */
726
727static void mc_copy_address_range_state ( Addr src, Addr dst, SizeT len )
728{
729 SizeT i;
730 UWord abit, vbyte;
731
732 DEBUG("mc_copy_address_range_state\n");
733
734 PROF_EVENT(50, "mc_copy_address_range_state");
735 for (i = 0; i < len; i++) {
736 PROF_EVENT(51, "mc_copy_address_range_state(loop)");
737 get_abit_and_vbyte( &abit, &vbyte, src+i );
738 set_abit_and_vbyte( dst+i, abit, vbyte );
739 }
740}
741
742
743/* --- Fast case permission setters, for dealing with stacks. --- */
744
njn9b007f62003-04-07 14:40:25 +0000745static __inline__
sewardj5d28efc2005-04-21 22:16:29 +0000746void make_aligned_word32_writable ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000747{
sewardj5d28efc2005-04-21 22:16:29 +0000748 PROF_EVENT(300, "make_aligned_word32_writable");
749
750# if VG_DEBUG_MEMORY >= 2
751 mc_make_writable(aA, 4);
752# else
753
754 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
sewardj23eb2fd2005-04-22 16:29:19 +0000755 PROF_EVENT(301, "make_aligned_word32_writable-slow1");
sewardj5d28efc2005-04-21 22:16:29 +0000756 mc_make_writable(aA, 4);
757 return;
758 }
759
760 UWord a = (UWord)aA;
761 UWord sec_no = (UWord)(a >> 16);
762# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +0000763 tl_assert(sec_no < N_PRIMARY_MAP);
sewardj5d28efc2005-04-21 22:16:29 +0000764# endif
765
766 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
767 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
768
769 SecMap* sm = primary_map[sec_no];
770 UWord v_off = a & 0xFFFF;
771 UWord a_off = v_off >> 3;
772
773 /* Paint the new area as uninitialised. */
774 ((UInt*)(sm->vbyte))[v_off >> 2] = VGM_WORD32_INVALID;
775
776 UWord mask = 0x0F;
777 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
778 /* mask now contains 1s where we wish to make address bits valid
779 (0s). */
780 sm->abits[a_off] &= ~mask;
781# endif
njn9b007f62003-04-07 14:40:25 +0000782}
783
sewardj5d28efc2005-04-21 22:16:29 +0000784
785static __inline__
786void make_aligned_word32_noaccess ( Addr aA )
787{
788 PROF_EVENT(310, "make_aligned_word32_noaccess");
789
790# if VG_DEBUG_MEMORY >= 2
791 mc_make_noaccess(aA, 4);
792# else
793
794 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
795 PROF_EVENT(311, "make_aligned_word32_noaccess-slow1");
796 mc_make_noaccess(aA, 4);
797 return;
798 }
799
800 UWord a = (UWord)aA;
801 UWord sec_no = (UWord)(a >> 16);
802# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +0000803 tl_assert(sec_no < N_PRIMARY_MAP);
sewardj5d28efc2005-04-21 22:16:29 +0000804# endif
805
806 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
807 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
808
809 SecMap* sm = primary_map[sec_no];
810 UWord v_off = a & 0xFFFF;
811 UWord a_off = v_off >> 3;
812
813 /* Paint the abandoned data as uninitialised. Probably not
814 necessary, but still .. */
815 ((UInt*)(sm->vbyte))[v_off >> 2] = VGM_WORD32_INVALID;
816
817 UWord mask = 0x0F;
818 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
819 /* mask now contains 1s where we wish to make address bits invalid
820 (1s). */
821 sm->abits[a_off] |= mask;
822# endif
823}
824
825
njn9b007f62003-04-07 14:40:25 +0000826/* Nb: by "aligned" here we mean 8-byte aligned */
827static __inline__
sewardj23eb2fd2005-04-22 16:29:19 +0000828void make_aligned_word64_writable ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000829{
sewardj23eb2fd2005-04-22 16:29:19 +0000830 PROF_EVENT(320, "make_aligned_word64_writable");
831
832# if VG_DEBUG_MEMORY >= 2
833 mc_make_writable(aA, 8);
834# else
835
836 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
837 PROF_EVENT(321, "make_aligned_word64_writable-slow1");
838 mc_make_writable(aA, 8);
839 return;
840 }
841
842 UWord a = (UWord)aA;
843 UWord sec_no = (UWord)(a >> 16);
844# if VG_DEBUG_MEMORY >= 1
845 tl_assert(sec_no < N_PRIMARY_MAP);
846# endif
847
848 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
849 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
850
851 SecMap* sm = primary_map[sec_no];
852 UWord v_off = a & 0xFFFF;
853 UWord a_off = v_off >> 3;
854
855 /* Paint the new area as uninitialised. */
856 ((ULong*)(sm->vbyte))[v_off >> 3] = VGM_WORD64_INVALID;
857
858 /* Make the relevant area accessible. */
859 sm->abits[a_off] = VGM_BYTE_VALID;
860# endif
njn9b007f62003-04-07 14:40:25 +0000861}
862
sewardj23eb2fd2005-04-22 16:29:19 +0000863
njn9b007f62003-04-07 14:40:25 +0000864static __inline__
sewardj23eb2fd2005-04-22 16:29:19 +0000865void make_aligned_word64_noaccess ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000866{
sewardj23eb2fd2005-04-22 16:29:19 +0000867 PROF_EVENT(330, "make_aligned_word64_noaccess");
868
869# if VG_DEBUG_MEMORY >= 2
870 mc_make_noaccess(aA, 8);
871# else
872
873 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
874 PROF_EVENT(331, "make_aligned_word64_noaccess-slow1");
875 mc_make_noaccess(aA, 8);
876 return;
877 }
878
879 UWord a = (UWord)aA;
880 UWord sec_no = (UWord)(a >> 16);
881# if VG_DEBUG_MEMORY >= 1
882 tl_assert(sec_no < N_PRIMARY_MAP);
883# endif
884
885 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
886 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
887
888 SecMap* sm = primary_map[sec_no];
889 UWord v_off = a & 0xFFFF;
890 UWord a_off = v_off >> 3;
891
892 /* Paint the abandoned data as uninitialised. Probably not
893 necessary, but still .. */
894 ((ULong*)(sm->vbyte))[v_off >> 3] = VGM_WORD64_INVALID;
895
896 /* Make the abandoned area inaccessible. */
897 sm->abits[a_off] = VGM_BYTE_INVALID;
898# endif
njn9b007f62003-04-07 14:40:25 +0000899}
900
sewardj23eb2fd2005-04-22 16:29:19 +0000901
sewardj45d94cc2005-04-20 14:44:11 +0000902/* The stack-pointer update handling functions */
903SP_UPDATE_HANDLERS ( make_aligned_word32_writable,
904 make_aligned_word32_noaccess,
905 make_aligned_word64_writable,
906 make_aligned_word64_noaccess,
907 mc_make_writable,
908 mc_make_noaccess
909 );
njn9b007f62003-04-07 14:40:25 +0000910
sewardj45d94cc2005-04-20 14:44:11 +0000911
nethercote8b76fe52004-11-08 19:20:09 +0000912/*------------------------------------------------------------*/
913/*--- Checking memory ---*/
914/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000915
sewardje4ccc012005-05-02 12:53:38 +0000916typedef
917 enum {
918 MC_Ok = 5,
919 MC_AddrErr = 6,
920 MC_ValueErr = 7
921 }
922 MC_ReadResult;
923
924
njn25e49d8e72002-09-23 09:36:25 +0000925/* Check permissions for address range. If inadequate permissions
926 exist, *bad_addr is set to the offending address, so the caller can
927 know what it is. */
928
sewardjecf8e102003-07-12 12:11:39 +0000929/* Returns True if [a .. a+len) is not addressible. Otherwise,
930 returns False, and if bad_addr is non-NULL, sets *bad_addr to
931 indicate the lowest failing address. Functions below are
932 similar. */
nethercote8b76fe52004-11-08 19:20:09 +0000933static Bool mc_check_noaccess ( Addr a, SizeT len, Addr* bad_addr )
sewardjecf8e102003-07-12 12:11:39 +0000934{
nethercote451eae92004-11-02 13:06:32 +0000935 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +0000936 UWord abit;
sewardjc1a2cda2005-04-21 17:34:00 +0000937 PROF_EVENT(60, "mc_check_noaccess");
sewardjecf8e102003-07-12 12:11:39 +0000938 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +0000939 PROF_EVENT(61, "mc_check_noaccess(loop)");
sewardjecf8e102003-07-12 12:11:39 +0000940 abit = get_abit(a);
941 if (abit == VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +0000942 if (bad_addr != NULL)
943 *bad_addr = a;
sewardjecf8e102003-07-12 12:11:39 +0000944 return False;
945 }
946 a++;
947 }
948 return True;
949}
950
nethercote8b76fe52004-11-08 19:20:09 +0000951static Bool mc_check_writable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000952{
nethercote451eae92004-11-02 13:06:32 +0000953 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +0000954 UWord abit;
sewardjc1a2cda2005-04-21 17:34:00 +0000955 PROF_EVENT(62, "mc_check_writable");
njn25e49d8e72002-09-23 09:36:25 +0000956 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +0000957 PROF_EVENT(63, "mc_check_writable(loop)");
njn25e49d8e72002-09-23 09:36:25 +0000958 abit = get_abit(a);
959 if (abit == VGM_BIT_INVALID) {
960 if (bad_addr != NULL) *bad_addr = a;
961 return False;
962 }
963 a++;
964 }
965 return True;
966}
967
nethercote8b76fe52004-11-08 19:20:09 +0000968static MC_ReadResult mc_check_readable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000969{
nethercote451eae92004-11-02 13:06:32 +0000970 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +0000971 UWord abit;
972 UWord vbyte;
njn25e49d8e72002-09-23 09:36:25 +0000973
sewardjc1a2cda2005-04-21 17:34:00 +0000974 PROF_EVENT(64, "mc_check_readable");
nethercote8b76fe52004-11-08 19:20:09 +0000975 DEBUG("mc_check_readable\n");
njn25e49d8e72002-09-23 09:36:25 +0000976 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +0000977 PROF_EVENT(65, "mc_check_readable(loop)");
sewardj45d94cc2005-04-20 14:44:11 +0000978 get_abit_and_vbyte(&abit, &vbyte, a);
nethercote8b76fe52004-11-08 19:20:09 +0000979 // Report addressability errors in preference to definedness errors
980 // by checking the A bits first.
981 if (abit != VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +0000982 if (bad_addr != NULL)
983 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +0000984 return MC_AddrErr;
985 }
986 if (vbyte != VGM_BYTE_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +0000987 if (bad_addr != NULL)
988 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +0000989 return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +0000990 }
991 a++;
992 }
nethercote8b76fe52004-11-08 19:20:09 +0000993 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +0000994}
995
996
997/* Check a zero-terminated ascii string. Tricky -- don't want to
998 examine the actual bytes, to find the end, until we're sure it is
999 safe to do so. */
1000
njn9b007f62003-04-07 14:40:25 +00001001static Bool mc_check_readable_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00001002{
sewardj45d94cc2005-04-20 14:44:11 +00001003 UWord abit;
1004 UWord vbyte;
sewardjc1a2cda2005-04-21 17:34:00 +00001005 PROF_EVENT(66, "mc_check_readable_asciiz");
njn5c004e42002-11-18 11:04:50 +00001006 DEBUG("mc_check_readable_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +00001007 while (True) {
sewardjc1a2cda2005-04-21 17:34:00 +00001008 PROF_EVENT(67, "mc_check_readable_asciiz(loop)");
sewardj45d94cc2005-04-20 14:44:11 +00001009 get_abit_and_vbyte(&abit, &vbyte, a);
nethercote8b76fe52004-11-08 19:20:09 +00001010 // As in mc_check_readable(), check A bits first
1011 if (abit != VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001012 if (bad_addr != NULL)
1013 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001014 return MC_AddrErr;
1015 }
1016 if (vbyte != VGM_BYTE_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001017 if (bad_addr != NULL)
1018 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001019 return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +00001020 }
1021 /* Ok, a is safe to read. */
sewardj45d94cc2005-04-20 14:44:11 +00001022 if (* ((UChar*)a) == 0)
1023 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +00001024 a++;
1025 }
1026}
1027
1028
1029/*------------------------------------------------------------*/
1030/*--- Memory event handlers ---*/
1031/*------------------------------------------------------------*/
1032
njn25e49d8e72002-09-23 09:36:25 +00001033static
njn72718642003-07-24 08:45:32 +00001034void mc_check_is_writable ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +00001035 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001036{
1037 Bool ok;
1038 Addr bad_addr;
1039
1040 VGP_PUSHCC(VgpCheckMem);
1041
1042 /* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
1043 base,base+size-1); */
nethercote8b76fe52004-11-08 19:20:09 +00001044 ok = mc_check_writable ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00001045 if (!ok) {
1046 switch (part) {
1047 case Vg_CoreSysCall:
nethercote8b76fe52004-11-08 19:20:09 +00001048 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False,
1049 /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00001050 break;
1051
1052 case Vg_CorePThread:
1053 case Vg_CoreSignal:
nethercote8b76fe52004-11-08 19:20:09 +00001054 MAC_(record_core_mem_error)( tid, /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00001055 break;
1056
1057 default:
njn67993252004-11-22 18:02:32 +00001058 VG_(tool_panic)("mc_check_is_writable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001059 }
1060 }
1061
1062 VGP_POPCC(VgpCheckMem);
1063}
1064
1065static
njn72718642003-07-24 08:45:32 +00001066void mc_check_is_readable ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +00001067 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001068{
njn25e49d8e72002-09-23 09:36:25 +00001069 Addr bad_addr;
nethercote8b76fe52004-11-08 19:20:09 +00001070 MC_ReadResult res;
njn25e49d8e72002-09-23 09:36:25 +00001071
1072 VGP_PUSHCC(VgpCheckMem);
1073
1074 /* VG_(message)(Vg_DebugMsg,"check is readable: %x .. %x",
1075 base,base+size-1); */
nethercote8b76fe52004-11-08 19:20:09 +00001076 res = mc_check_readable ( base, size, &bad_addr );
1077 if (MC_Ok != res) {
1078 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
1079
njn25e49d8e72002-09-23 09:36:25 +00001080 switch (part) {
1081 case Vg_CoreSysCall:
nethercote8b76fe52004-11-08 19:20:09 +00001082 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False,
1083 isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001084 break;
1085
1086 case Vg_CorePThread:
nethercote8b76fe52004-11-08 19:20:09 +00001087 MAC_(record_core_mem_error)( tid, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001088 break;
1089
1090 /* If we're being asked to jump to a silly address, record an error
1091 message before potentially crashing the entire system. */
1092 case Vg_CoreTranslate:
njn72718642003-07-24 08:45:32 +00001093 MAC_(record_jump_error)( tid, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00001094 break;
1095
1096 default:
njn67993252004-11-22 18:02:32 +00001097 VG_(tool_panic)("mc_check_is_readable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001098 }
1099 }
1100 VGP_POPCC(VgpCheckMem);
1101}
1102
1103static
njn72718642003-07-24 08:45:32 +00001104void mc_check_is_readable_asciiz ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +00001105 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +00001106{
nethercote8b76fe52004-11-08 19:20:09 +00001107 MC_ReadResult res;
njn5ab96ac2005-05-08 02:59:50 +00001108 Addr bad_addr = 0; // shut GCC up
njn25e49d8e72002-09-23 09:36:25 +00001109 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
1110
1111 VGP_PUSHCC(VgpCheckMem);
1112
njnca82cc02004-11-22 17:18:48 +00001113 tl_assert(part == Vg_CoreSysCall);
nethercote8b76fe52004-11-08 19:20:09 +00001114 res = mc_check_readable_asciiz ( (Addr)str, &bad_addr );
1115 if (MC_Ok != res) {
1116 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
1117 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001118 }
1119
1120 VGP_POPCC(VgpCheckMem);
1121}
1122
njn25e49d8e72002-09-23 09:36:25 +00001123static
nethercote451eae92004-11-02 13:06:32 +00001124void mc_new_mem_startup( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00001125{
njn1f3a9092002-10-04 09:22:30 +00001126 /* Ignore the permissions, just make it readable. Seems to work... */
nethercote451eae92004-11-02 13:06:32 +00001127 DEBUG("mc_new_mem_startup(%p, %llu, rr=%u, ww=%u, xx=%u)\n",
1128 a,(ULong)len,rr,ww,xx);
nethercote8b76fe52004-11-08 19:20:09 +00001129 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001130}
1131
1132static
nethercote451eae92004-11-02 13:06:32 +00001133void mc_new_mem_heap ( Addr a, SizeT len, Bool is_inited )
njn25e49d8e72002-09-23 09:36:25 +00001134{
1135 if (is_inited) {
nethercote8b76fe52004-11-08 19:20:09 +00001136 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001137 } else {
nethercote8b76fe52004-11-08 19:20:09 +00001138 mc_make_writable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001139 }
1140}
1141
1142static
njnb8dca862005-03-14 02:42:44 +00001143void mc_new_mem_mmap ( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00001144{
njnb8dca862005-03-14 02:42:44 +00001145 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001146}
1147
njncf45fd42004-11-24 16:30:22 +00001148static
1149void mc_post_mem_write(CorePart part, ThreadId tid, Addr a, SizeT len)
1150{
1151 mc_make_readable(a, len);
1152}
njn25e49d8e72002-09-23 09:36:25 +00001153
sewardj45d94cc2005-04-20 14:44:11 +00001154
njn25e49d8e72002-09-23 09:36:25 +00001155/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00001156/*--- Register event handlers ---*/
1157/*------------------------------------------------------------*/
1158
sewardj45d94cc2005-04-20 14:44:11 +00001159/* When some chunk of guest state is written, mark the corresponding
1160 shadow area as valid. This is used to initialise arbitrarily large
sewardj2c27f702005-05-03 18:19:05 +00001161 chunks of guest state, hence the (somewhat arbitrary) 1024 limit.
sewardj45d94cc2005-04-20 14:44:11 +00001162*/
1163static void mc_post_reg_write ( CorePart part, ThreadId tid,
1164 OffT offset, SizeT size)
njnd3040452003-05-19 15:04:06 +00001165{
sewardj6cf40ff2005-04-20 22:31:26 +00001166 UChar area[1024];
1167 tl_assert(size <= 1024);
njncf45fd42004-11-24 16:30:22 +00001168 VG_(memset)(area, VGM_BYTE_VALID, size);
1169 VG_(set_shadow_regs_area)( tid, offset, size, area );
njnd3040452003-05-19 15:04:06 +00001170}
1171
sewardj45d94cc2005-04-20 14:44:11 +00001172static
1173void mc_post_reg_write_clientcall ( ThreadId tid,
1174 OffT offset, SizeT size,
1175 Addr f)
njnd3040452003-05-19 15:04:06 +00001176{
njncf45fd42004-11-24 16:30:22 +00001177 mc_post_reg_write(/*dummy*/0, tid, offset, size);
njnd3040452003-05-19 15:04:06 +00001178}
1179
sewardj45d94cc2005-04-20 14:44:11 +00001180/* Look at the definedness of the guest's shadow state for
1181 [offset, offset+len). If any part of that is undefined, record
1182 a parameter error.
1183*/
1184static void mc_pre_reg_read ( CorePart part, ThreadId tid, Char* s,
1185 OffT offset, SizeT size)
nethercote8b76fe52004-11-08 19:20:09 +00001186{
sewardj45d94cc2005-04-20 14:44:11 +00001187 Int i;
1188 Bool bad;
1189
1190 UChar area[16];
1191 tl_assert(size <= 16);
1192
1193 VG_(get_shadow_regs_area)( tid, offset, size, area );
1194
1195 bad = False;
1196 for (i = 0; i < size; i++) {
1197 if (area[i] != VGM_BYTE_VALID) {
sewardj2c27f702005-05-03 18:19:05 +00001198 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00001199 break;
1200 }
nethercote8b76fe52004-11-08 19:20:09 +00001201 }
1202
sewardj45d94cc2005-04-20 14:44:11 +00001203 if (bad)
nethercote8b76fe52004-11-08 19:20:09 +00001204 MAC_(record_param_error) ( tid, 0, /*isReg*/True, /*isUnaddr*/False, s );
1205}
njnd3040452003-05-19 15:04:06 +00001206
njn25e49d8e72002-09-23 09:36:25 +00001207
sewardj6cf40ff2005-04-20 22:31:26 +00001208/*------------------------------------------------------------*/
njn9e63cb62005-05-08 18:34:59 +00001209/*--- Printing errors ---*/
1210/*------------------------------------------------------------*/
1211
1212void TL_(pp_Error) ( Error* err )
1213{
1214 MAC_Error* err_extra = VG_(get_error_extra)(err);
1215
1216 switch (VG_(get_error_kind)(err)) {
1217 case CoreMemErr: {
1218 Char* s = ( err_extra->isUnaddr ? "unaddressable" : "uninitialised" );
1219 VG_(message)(Vg_UserMsg, "%s contains %s byte(s)",
1220 VG_(get_error_string)(err), s);
1221 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1222 break;
1223
1224 }
1225
1226 case ValueErr:
1227 if (err_extra->size == 0) {
1228 VG_(message)(Vg_UserMsg,
1229 "Conditional jump or move depends on uninitialised value(s)");
1230 } else {
1231 VG_(message)(Vg_UserMsg,
1232 "Use of uninitialised value of size %d",
1233 err_extra->size);
1234 }
1235 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1236 break;
1237
1238 case ParamErr: {
1239 Bool isReg = ( Register == err_extra->addrinfo.akind );
1240 Char* s1 = ( isReg ? "contains" : "points to" );
1241 Char* s2 = ( err_extra->isUnaddr ? "unaddressable" : "uninitialised" );
1242 if (isReg) tl_assert(!err_extra->isUnaddr);
1243
1244 VG_(message)(Vg_UserMsg, "Syscall param %s %s %s byte(s)",
1245 VG_(get_error_string)(err), s1, s2);
1246
1247 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1248 MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
1249 break;
1250 }
1251 case UserErr: {
1252 Char* s = ( err_extra->isUnaddr ? "Unaddressable" : "Uninitialised" );
1253
1254 VG_(message)(Vg_UserMsg,
1255 "%s byte(s) found during client check request", s);
1256
1257 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1258 MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
1259 break;
1260 }
1261 default:
1262 MAC_(pp_shared_Error)(err);
1263 break;
1264 }
1265}
1266
1267/*------------------------------------------------------------*/
1268/*--- Recording errors ---*/
1269/*------------------------------------------------------------*/
1270
1271/* Creates a copy of the `extra' part, updates the copy with address info if
1272 necessary, and returns the copy. */
1273/* This one called from generated code and non-generated code. */
1274void mc_record_value_error ( ThreadId tid, Int size )
1275{
1276 MAC_Error err_extra;
1277
1278 MAC_(clear_MAC_Error)( &err_extra );
1279 err_extra.size = size;
1280 err_extra.isUnaddr = False;
1281 VG_(maybe_record_error)( tid, ValueErr, /*addr*/0, /*s*/NULL, &err_extra );
1282}
1283
1284/* This called from non-generated code */
1285
1286void mc_record_user_error ( ThreadId tid, Addr a, Bool isWrite,
1287 Bool isUnaddr )
1288{
1289 MAC_Error err_extra;
1290
1291 tl_assert(VG_INVALID_THREADID != tid);
1292 MAC_(clear_MAC_Error)( &err_extra );
1293 err_extra.addrinfo.akind = Undescribed;
1294 err_extra.isUnaddr = isUnaddr;
1295 VG_(maybe_record_error)( tid, UserErr, a, /*s*/NULL, &err_extra );
1296}
1297
1298/*------------------------------------------------------------*/
1299/*--- Suppressions ---*/
1300/*------------------------------------------------------------*/
1301
1302Bool TL_(recognised_suppression) ( Char* name, Supp* su )
1303{
1304 SuppKind skind;
1305
1306 if (MAC_(shared_recognised_suppression)(name, su))
1307 return True;
1308
1309 /* Extra suppressions not used by Addrcheck */
1310 else if (VG_STREQ(name, "Cond")) skind = Value0Supp;
1311 else if (VG_STREQ(name, "Value0")) skind = Value0Supp;/* backwards compat */
1312 else if (VG_STREQ(name, "Value1")) skind = Value1Supp;
1313 else if (VG_STREQ(name, "Value2")) skind = Value2Supp;
1314 else if (VG_STREQ(name, "Value4")) skind = Value4Supp;
1315 else if (VG_STREQ(name, "Value8")) skind = Value8Supp;
1316 else if (VG_STREQ(name, "Value16")) skind = Value16Supp;
1317 else
1318 return False;
1319
1320 VG_(set_supp_kind)(su, skind);
1321 return True;
1322}
1323
1324/*------------------------------------------------------------*/
sewardjc859fbf2005-04-22 21:10:28 +00001325/*--- Functions called directly from generated code: ---*/
1326/*--- Load/store handlers. ---*/
sewardj6cf40ff2005-04-20 22:31:26 +00001327/*------------------------------------------------------------*/
1328
1329/* Types: LOADV4, LOADV2, LOADV1 are:
1330 UWord fn ( Addr a )
1331 so they return 32-bits on 32-bit machines and 64-bits on
1332 64-bit machines. Addr has the same size as a host word.
1333
1334 LOADV8 is always ULong fn ( Addr a )
1335
1336 Similarly for STOREV1, STOREV2, STOREV4, the supplied vbits
1337 are a UWord, and for STOREV8 they are a ULong.
1338*/
1339
sewardj95448072004-11-22 20:19:51 +00001340/* ------------------------ Size = 8 ------------------------ */
1341
njn9fb73db2005-03-27 01:55:21 +00001342VGA_REGPARM(1)
sewardjf9d81612005-04-23 23:25:49 +00001343ULong MC_(helperc_LOADV8) ( Addr aA )
sewardj95448072004-11-22 20:19:51 +00001344{
sewardjf9d81612005-04-23 23:25:49 +00001345 PROF_EVENT(200, "helperc_LOADV8");
1346
1347# if VG_DEBUG_MEMORY >= 2
1348 return mc_LOADVn_slow( aA, 8, False/*littleendian*/ );
1349# else
1350
1351 const UWord mask = ~((0x10000-8) | ((N_PRIMARY_MAP-1) << 16));
1352 UWord a = (UWord)aA;
1353
1354 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1355 naturally aligned, or 'a' exceeds the range covered by the
1356 primary map. Either way we defer to the slow-path case. */
1357 if (EXPECTED_NOT_TAKEN(a & mask)) {
1358 PROF_EVENT(201, "helperc_LOADV8-slow1");
1359 return (UWord)mc_LOADVn_slow( aA, 8, False/*littleendian*/ );
1360 }
1361
1362 UWord sec_no = (UWord)(a >> 16);
1363
1364# if VG_DEBUG_MEMORY >= 1
1365 tl_assert(sec_no < N_PRIMARY_MAP);
1366# endif
1367
1368 SecMap* sm = primary_map[sec_no];
1369 UWord v_off = a & 0xFFFF;
1370 UWord a_off = v_off >> 3;
1371 UWord abits = (UWord)(sm->abits[a_off]);
1372
1373 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) {
1374 /* Handle common case quickly: a is suitably aligned, is mapped,
1375 and is addressible. */
1376 return ((ULong*)(sm->vbyte))[ v_off >> 3 ];
1377 } else {
1378 /* Slow but general case. */
1379 PROF_EVENT(202, "helperc_LOADV8-slow2");
1380 return mc_LOADVn_slow( a, 8, False/*littleendian*/ );
1381 }
1382
1383# endif
sewardj95448072004-11-22 20:19:51 +00001384}
1385
njn9fb73db2005-03-27 01:55:21 +00001386VGA_REGPARM(1)
sewardjf9d81612005-04-23 23:25:49 +00001387void MC_(helperc_STOREV8) ( Addr aA, ULong vbytes )
sewardj95448072004-11-22 20:19:51 +00001388{
sewardjf9d81612005-04-23 23:25:49 +00001389 PROF_EVENT(210, "helperc_STOREV8");
1390
1391# if VG_DEBUG_MEMORY >= 2
1392 mc_STOREVn_slow( aA, 8, vbytes, False/*littleendian*/ );
1393# else
1394
1395 const UWord mask = ~((0x10000-8) | ((N_PRIMARY_MAP-1) << 16));
1396 UWord a = (UWord)aA;
1397
1398 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1399 naturally aligned, or 'a' exceeds the range covered by the
1400 primary map. Either way we defer to the slow-path case. */
1401 if (EXPECTED_NOT_TAKEN(a & mask)) {
1402 PROF_EVENT(211, "helperc_STOREV8-slow1");
1403 mc_STOREVn_slow( aA, 8, vbytes, False/*littleendian*/ );
1404 return;
1405 }
1406
1407 UWord sec_no = (UWord)(a >> 16);
1408
1409# if VG_DEBUG_MEMORY >= 1
1410 tl_assert(sec_no < N_PRIMARY_MAP);
1411# endif
1412
1413 SecMap* sm = primary_map[sec_no];
1414 UWord v_off = a & 0xFFFF;
1415 UWord a_off = v_off >> 3;
1416 UWord abits = (UWord)(sm->abits[a_off]);
1417
1418 if (EXPECTED_TAKEN(!is_distinguished_sm(sm)
1419 && abits == VGM_BYTE_VALID)) {
1420 /* Handle common case quickly: a is suitably aligned, is mapped,
1421 and is addressible. */
1422 ((ULong*)(sm->vbyte))[ v_off >> 3 ] = vbytes;
1423 } else {
1424 /* Slow but general case. */
1425 PROF_EVENT(212, "helperc_STOREV8-slow2");
1426 mc_STOREVn_slow( aA, 8, vbytes, False/*littleendian*/ );
1427 }
1428# endif
sewardj95448072004-11-22 20:19:51 +00001429}
1430
1431/* ------------------------ Size = 4 ------------------------ */
1432
njn9fb73db2005-03-27 01:55:21 +00001433VGA_REGPARM(1)
sewardjc1a2cda2005-04-21 17:34:00 +00001434UWord MC_(helperc_LOADV4) ( Addr aA )
njn25e49d8e72002-09-23 09:36:25 +00001435{
sewardjc1a2cda2005-04-21 17:34:00 +00001436 PROF_EVENT(220, "helperc_LOADV4");
1437
1438# if VG_DEBUG_MEMORY >= 2
1439 return (UWord)mc_LOADVn_slow( aA, 4, False/*littleendian*/ );
1440# else
1441
sewardj23eb2fd2005-04-22 16:29:19 +00001442 const UWord mask = ~((0x10000-4) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001443 UWord a = (UWord)aA;
1444
1445 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1446 naturally aligned, or 'a' exceeds the range covered by the
1447 primary map. Either way we defer to the slow-path case. */
1448 if (EXPECTED_NOT_TAKEN(a & mask)) {
1449 PROF_EVENT(221, "helperc_LOADV4-slow1");
1450 return (UWord)mc_LOADVn_slow( aA, 4, False/*littleendian*/ );
1451 }
1452
1453 UWord sec_no = (UWord)(a >> 16);
1454
1455# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001456 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001457# endif
1458
1459 SecMap* sm = primary_map[sec_no];
1460 UWord v_off = a & 0xFFFF;
1461 UWord a_off = v_off >> 3;
1462 UWord abits = (UWord)(sm->abits[a_off]);
1463 abits >>= (a & 4);
1464 abits &= 15;
1465 if (EXPECTED_TAKEN(abits == VGM_NIBBLE_VALID)) {
1466 /* Handle common case quickly: a is suitably aligned, is mapped,
1467 and is addressible. */
sewardj5d28efc2005-04-21 22:16:29 +00001468 /* On a 32-bit platform, simply hoick the required 32 bits out of
1469 the vbyte array. On a 64-bit platform, also set the upper 32
1470 bits to 1 ("undefined"), just in case. This almost certainly
1471 isn't necessary, but be paranoid. */
1472 UWord ret = (UWord)0xFFFFFFFF00000000ULL;
1473 ret |= (UWord)( ((UInt*)(sm->vbyte))[ v_off >> 2 ] );
1474 return ret;
sewardjc1a2cda2005-04-21 17:34:00 +00001475 } else {
1476 /* Slow but general case. */
1477 PROF_EVENT(222, "helperc_LOADV4-slow2");
1478 return (UWord)mc_LOADVn_slow( a, 4, False/*littleendian*/ );
1479 }
1480
1481# endif
njn25e49d8e72002-09-23 09:36:25 +00001482}
1483
njn9fb73db2005-03-27 01:55:21 +00001484VGA_REGPARM(2)
sewardjc1a2cda2005-04-21 17:34:00 +00001485void MC_(helperc_STOREV4) ( Addr aA, UWord vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001486{
sewardjc1a2cda2005-04-21 17:34:00 +00001487 PROF_EVENT(230, "helperc_STOREV4");
1488
1489# if VG_DEBUG_MEMORY >= 2
sewardj23eb2fd2005-04-22 16:29:19 +00001490 mc_STOREVn_slow( aA, 4, (ULong)vbytes, False/*littleendian*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001491# else
1492
sewardj23eb2fd2005-04-22 16:29:19 +00001493 const UWord mask = ~((0x10000-4) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001494 UWord a = (UWord)aA;
1495
1496 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1497 naturally aligned, or 'a' exceeds the range covered by the
1498 primary map. Either way we defer to the slow-path case. */
1499 if (EXPECTED_NOT_TAKEN(a & mask)) {
1500 PROF_EVENT(231, "helperc_STOREV4-slow1");
1501 mc_STOREVn_slow( aA, 4, (ULong)vbytes, False/*littleendian*/ );
1502 return;
1503 }
1504
1505 UWord sec_no = (UWord)(a >> 16);
1506
1507# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001508 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001509# endif
1510
1511 SecMap* sm = primary_map[sec_no];
1512 UWord v_off = a & 0xFFFF;
1513 UWord a_off = v_off >> 3;
1514 UWord abits = (UWord)(sm->abits[a_off]);
1515 abits >>= (a & 4);
1516 abits &= 15;
1517 if (EXPECTED_TAKEN(!is_distinguished_sm(sm)
1518 && abits == VGM_NIBBLE_VALID)) {
1519 /* Handle common case quickly: a is suitably aligned, is mapped,
1520 and is addressible. */
1521 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = (UInt)vbytes;
1522 } else {
1523 /* Slow but general case. */
1524 PROF_EVENT(232, "helperc_STOREV4-slow2");
1525 mc_STOREVn_slow( aA, 4, (ULong)vbytes, False/*littleendian*/ );
1526 }
1527# endif
njn25e49d8e72002-09-23 09:36:25 +00001528}
1529
sewardj95448072004-11-22 20:19:51 +00001530/* ------------------------ Size = 2 ------------------------ */
1531
njn9fb73db2005-03-27 01:55:21 +00001532VGA_REGPARM(1)
sewardjc1a2cda2005-04-21 17:34:00 +00001533UWord MC_(helperc_LOADV2) ( Addr aA )
njn25e49d8e72002-09-23 09:36:25 +00001534{
sewardjc1a2cda2005-04-21 17:34:00 +00001535 PROF_EVENT(240, "helperc_LOADV2");
1536
1537# if VG_DEBUG_MEMORY >= 2
1538 return (UWord)mc_LOADVn_slow( aA, 2, False/*littleendian*/ );
1539# else
1540
sewardj23eb2fd2005-04-22 16:29:19 +00001541 const UWord mask = ~((0x10000-2) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001542 UWord a = (UWord)aA;
1543
1544 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1545 naturally aligned, or 'a' exceeds the range covered by the
1546 primary map. Either way we defer to the slow-path case. */
1547 if (EXPECTED_NOT_TAKEN(a & mask)) {
1548 PROF_EVENT(241, "helperc_LOADV2-slow1");
1549 return (UWord)mc_LOADVn_slow( aA, 2, False/*littleendian*/ );
1550 }
1551
1552 UWord sec_no = (UWord)(a >> 16);
1553
1554# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001555 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001556# endif
1557
1558 SecMap* sm = primary_map[sec_no];
1559 UWord v_off = a & 0xFFFF;
1560 UWord a_off = v_off >> 3;
1561 UWord abits = (UWord)(sm->abits[a_off]);
sewardjc1a2cda2005-04-21 17:34:00 +00001562 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) {
1563 /* Handle common case quickly: a is mapped, and the entire
1564 word32 it lives in is addressible. */
sewardj5d28efc2005-04-21 22:16:29 +00001565 /* Set the upper 16/48 bits of the result to 1 ("undefined"),
1566 just in case. This almost certainly isn't necessary, but be
1567 paranoid. */
sewardjc1a2cda2005-04-21 17:34:00 +00001568 return (~(UWord)0xFFFF)
1569 |
1570 (UWord)( ((UShort*)(sm->vbyte))[ v_off >> 1 ] );
1571 } else {
1572 /* Slow but general case. */
1573 PROF_EVENT(242, "helperc_LOADV2-slow2");
1574 return (UWord)mc_LOADVn_slow( aA, 2, False/*littleendian*/ );
1575 }
1576
1577# endif
njn25e49d8e72002-09-23 09:36:25 +00001578}
1579
njn9fb73db2005-03-27 01:55:21 +00001580VGA_REGPARM(2)
sewardj5d28efc2005-04-21 22:16:29 +00001581void MC_(helperc_STOREV2) ( Addr aA, UWord vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001582{
sewardjc1a2cda2005-04-21 17:34:00 +00001583 PROF_EVENT(250, "helperc_STOREV2");
sewardj5d28efc2005-04-21 22:16:29 +00001584
1585# if VG_DEBUG_MEMORY >= 2
sewardj23eb2fd2005-04-22 16:29:19 +00001586 mc_STOREVn_slow( aA, 2, (ULong)vbytes, False/*littleendian*/ );
sewardj5d28efc2005-04-21 22:16:29 +00001587# else
1588
sewardj23eb2fd2005-04-22 16:29:19 +00001589 const UWord mask = ~((0x10000-2) | ((N_PRIMARY_MAP-1) << 16));
sewardj5d28efc2005-04-21 22:16:29 +00001590 UWord a = (UWord)aA;
1591
1592 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1593 naturally aligned, or 'a' exceeds the range covered by the
1594 primary map. Either way we defer to the slow-path case. */
1595 if (EXPECTED_NOT_TAKEN(a & mask)) {
1596 PROF_EVENT(251, "helperc_STOREV2-slow1");
1597 mc_STOREVn_slow( aA, 2, (ULong)vbytes, False/*littleendian*/ );
1598 return;
1599 }
1600
1601 UWord sec_no = (UWord)(a >> 16);
1602
1603# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001604 tl_assert(sec_no < N_PRIMARY_MAP);
sewardj5d28efc2005-04-21 22:16:29 +00001605# endif
1606
1607 SecMap* sm = primary_map[sec_no];
1608 UWord v_off = a & 0xFFFF;
1609 UWord a_off = v_off >> 3;
1610 UWord abits = (UWord)(sm->abits[a_off]);
1611 if (EXPECTED_TAKEN(!is_distinguished_sm(sm)
1612 && abits == VGM_BYTE_VALID)) {
1613 /* Handle common case quickly. */
1614 ((UShort*)(sm->vbyte))[ v_off >> 1 ] = (UShort)vbytes;
1615 } else {
1616 /* Slow but general case. */
1617 PROF_EVENT(252, "helperc_STOREV2-slow2");
1618 mc_STOREVn_slow( aA, 2, (ULong)vbytes, False/*littleendian*/ );
1619 }
1620# endif
njn25e49d8e72002-09-23 09:36:25 +00001621}
1622
sewardj95448072004-11-22 20:19:51 +00001623/* ------------------------ Size = 1 ------------------------ */
1624
njn9fb73db2005-03-27 01:55:21 +00001625VGA_REGPARM(1)
sewardjc1a2cda2005-04-21 17:34:00 +00001626UWord MC_(helperc_LOADV1) ( Addr aA )
njn25e49d8e72002-09-23 09:36:25 +00001627{
sewardjc1a2cda2005-04-21 17:34:00 +00001628 PROF_EVENT(260, "helperc_LOADV1");
1629
1630# if VG_DEBUG_MEMORY >= 2
sewardj23eb2fd2005-04-22 16:29:19 +00001631 return (UWord)mc_LOADVn_slow( aA, 1, False/*littleendian*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001632# else
1633
sewardj23eb2fd2005-04-22 16:29:19 +00001634 const UWord mask = ~((0x10000-1) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001635 UWord a = (UWord)aA;
1636
1637 /* If any part of 'a' indicated by the mask is 1, it means 'a'
1638 exceeds the range covered by the primary map. In which case we
1639 defer to the slow-path case. */
1640 if (EXPECTED_NOT_TAKEN(a & mask)) {
1641 PROF_EVENT(261, "helperc_LOADV1-slow1");
1642 return (UWord)mc_LOADVn_slow( aA, 1, False/*littleendian*/ );
1643 }
1644
1645 UWord sec_no = (UWord)(a >> 16);
1646
1647# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001648 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001649# endif
1650
1651 SecMap* sm = primary_map[sec_no];
1652 UWord v_off = a & 0xFFFF;
1653 UWord a_off = v_off >> 3;
sewardj5d28efc2005-04-21 22:16:29 +00001654 UWord abits = (UWord)(sm->abits[a_off]);
sewardjc1a2cda2005-04-21 17:34:00 +00001655 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) {
1656 /* Handle common case quickly: a is mapped, and the entire
1657 word32 it lives in is addressible. */
sewardj5d28efc2005-04-21 22:16:29 +00001658 /* Set the upper 24/56 bits of the result to 1 ("undefined"),
1659 just in case. This almost certainly isn't necessary, but be
1660 paranoid. */
sewardjc1a2cda2005-04-21 17:34:00 +00001661 return (~(UWord)0xFF)
1662 |
1663 (UWord)( ((UChar*)(sm->vbyte))[ v_off ] );
1664 } else {
1665 /* Slow but general case. */
1666 PROF_EVENT(262, "helperc_LOADV1-slow2");
1667 return (UWord)mc_LOADVn_slow( aA, 1, False/*littleendian*/ );
1668 }
1669# endif
njn25e49d8e72002-09-23 09:36:25 +00001670}
1671
sewardjc1a2cda2005-04-21 17:34:00 +00001672
njn9fb73db2005-03-27 01:55:21 +00001673VGA_REGPARM(2)
sewardjc1a2cda2005-04-21 17:34:00 +00001674void MC_(helperc_STOREV1) ( Addr aA, UWord vbyte )
njn25e49d8e72002-09-23 09:36:25 +00001675{
sewardjc1a2cda2005-04-21 17:34:00 +00001676 PROF_EVENT(270, "helperc_STOREV1");
1677
1678# if VG_DEBUG_MEMORY >= 2
1679 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*littleendian*/ );
1680# else
1681
sewardj23eb2fd2005-04-22 16:29:19 +00001682 const UWord mask = ~((0x10000-1) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001683 UWord a = (UWord)aA;
1684 /* If any part of 'a' indicated by the mask is 1, it means 'a'
1685 exceeds the range covered by the primary map. In which case we
1686 defer to the slow-path case. */
1687 if (EXPECTED_NOT_TAKEN(a & mask)) {
1688 PROF_EVENT(271, "helperc_STOREV1-slow1");
1689 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*littleendian*/ );
1690 return;
1691 }
1692
1693 UWord sec_no = (UWord)(a >> 16);
1694
1695# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001696 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001697# endif
1698
1699 SecMap* sm = primary_map[sec_no];
1700 UWord v_off = a & 0xFFFF;
1701 UWord a_off = v_off >> 3;
sewardj5d28efc2005-04-21 22:16:29 +00001702 UWord abits = (UWord)(sm->abits[a_off]);
sewardjc1a2cda2005-04-21 17:34:00 +00001703 if (EXPECTED_TAKEN(!is_distinguished_sm(sm)
1704 && abits == VGM_BYTE_VALID)) {
1705 /* Handle common case quickly: a is mapped, the entire word32 it
1706 lives in is addressible. */
1707 ((UChar*)(sm->vbyte))[ v_off ] = (UChar)vbyte;
1708 } else {
1709 PROF_EVENT(272, "helperc_STOREV1-slow2");
1710 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*littleendian*/ );
1711 }
1712
1713# endif
njn25e49d8e72002-09-23 09:36:25 +00001714}
1715
1716
sewardjc859fbf2005-04-22 21:10:28 +00001717/*------------------------------------------------------------*/
1718/*--- Functions called directly from generated code: ---*/
1719/*--- Value-check failure handlers. ---*/
1720/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001721
njn5c004e42002-11-18 11:04:50 +00001722void MC_(helperc_value_check0_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001723{
njn9e63cb62005-05-08 18:34:59 +00001724 mc_record_value_error ( VG_(get_running_tid)(), 0 );
njn25e49d8e72002-09-23 09:36:25 +00001725}
1726
njn5c004e42002-11-18 11:04:50 +00001727void MC_(helperc_value_check1_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001728{
njn9e63cb62005-05-08 18:34:59 +00001729 mc_record_value_error ( VG_(get_running_tid)(), 1 );
njn25e49d8e72002-09-23 09:36:25 +00001730}
1731
njn5c004e42002-11-18 11:04:50 +00001732void MC_(helperc_value_check4_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001733{
njn9e63cb62005-05-08 18:34:59 +00001734 mc_record_value_error ( VG_(get_running_tid)(), 4 );
njn25e49d8e72002-09-23 09:36:25 +00001735}
1736
sewardj11bcc4e2005-04-23 22:38:38 +00001737void MC_(helperc_value_check8_fail) ( void )
1738{
njn9e63cb62005-05-08 18:34:59 +00001739 mc_record_value_error ( VG_(get_running_tid)(), 8 );
sewardj11bcc4e2005-04-23 22:38:38 +00001740}
1741
njn9fb73db2005-03-27 01:55:21 +00001742VGA_REGPARM(1) void MC_(helperc_complain_undef) ( HWord sz )
sewardj95448072004-11-22 20:19:51 +00001743{
njn9e63cb62005-05-08 18:34:59 +00001744 mc_record_value_error ( VG_(get_running_tid)(), (Int)sz );
sewardj95448072004-11-22 20:19:51 +00001745}
1746
njn25e49d8e72002-09-23 09:36:25 +00001747
sewardj45d94cc2005-04-20 14:44:11 +00001748//zz /*------------------------------------------------------------*/
1749//zz /*--- Metadata get/set functions, for client requests. ---*/
1750//zz /*------------------------------------------------------------*/
1751//zz
1752//zz /* Copy Vbits for src into vbits. Returns: 1 == OK, 2 == alignment
1753//zz error, 3 == addressing error. */
1754//zz static Int mc_get_or_set_vbits_for_client (
1755//zz ThreadId tid,
1756//zz Addr dataV,
1757//zz Addr vbitsV,
1758//zz SizeT size,
1759//zz Bool setting /* True <=> set vbits, False <=> get vbits */
1760//zz )
1761//zz {
1762//zz Bool addressibleD = True;
1763//zz Bool addressibleV = True;
1764//zz UInt* data = (UInt*)dataV;
1765//zz UInt* vbits = (UInt*)vbitsV;
1766//zz SizeT szW = size / 4; /* sigh */
1767//zz SizeT i;
1768//zz UInt* dataP = NULL; /* bogus init to keep gcc happy */
1769//zz UInt* vbitsP = NULL; /* ditto */
1770//zz
1771//zz /* Check alignment of args. */
1772//zz if (!(VG_IS_4_ALIGNED(data) && VG_IS_4_ALIGNED(vbits)))
1773//zz return 2;
1774//zz if ((size & 3) != 0)
1775//zz return 2;
1776//zz
1777//zz /* Check that arrays are addressible. */
1778//zz for (i = 0; i < szW; i++) {
1779//zz dataP = &data[i];
1780//zz vbitsP = &vbits[i];
1781//zz if (get_abits4_ALIGNED((Addr)dataP) != VGM_NIBBLE_VALID) {
1782//zz addressibleD = False;
1783//zz break;
1784//zz }
1785//zz if (get_abits4_ALIGNED((Addr)vbitsP) != VGM_NIBBLE_VALID) {
1786//zz addressibleV = False;
1787//zz break;
1788//zz }
1789//zz }
1790//zz if (!addressibleD) {
1791//zz MAC_(record_address_error)( tid, (Addr)dataP, 4,
1792//zz setting ? True : False );
1793//zz return 3;
1794//zz }
1795//zz if (!addressibleV) {
1796//zz MAC_(record_address_error)( tid, (Addr)vbitsP, 4,
1797//zz setting ? False : True );
1798//zz return 3;
1799//zz }
1800//zz
1801//zz /* Do the copy */
1802//zz if (setting) {
1803//zz /* setting */
1804//zz for (i = 0; i < szW; i++) {
1805//zz if (get_vbytes4_ALIGNED( (Addr)&vbits[i] ) != VGM_WORD_VALID)
njn9e63cb62005-05-08 18:34:59 +00001806//zz mc_record_value_error(tid, 4);
sewardj45d94cc2005-04-20 14:44:11 +00001807//zz set_vbytes4_ALIGNED( (Addr)&data[i], vbits[i] );
1808//zz }
1809//zz } else {
1810//zz /* getting */
1811//zz for (i = 0; i < szW; i++) {
1812//zz vbits[i] = get_vbytes4_ALIGNED( (Addr)&data[i] );
1813//zz set_vbytes4_ALIGNED( (Addr)&vbits[i], VGM_WORD_VALID );
1814//zz }
1815//zz }
1816//zz
1817//zz return 1;
1818//zz }
sewardj05fe85e2005-04-27 22:46:36 +00001819
1820
1821/*------------------------------------------------------------*/
1822/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1823/*------------------------------------------------------------*/
1824
1825/* For the memory leak detector, say whether an entire 64k chunk of
1826 address space is possibly in use, or not. If in doubt return
1827 True.
1828*/
1829static
1830Bool mc_is_within_valid_secondary ( Addr a )
1831{
1832 SecMap* sm = maybe_get_secmap_for ( a );
1833 if (sm == NULL || sm == &sm_distinguished[SM_DIST_NOACCESS]) {
1834 /* Definitely not in use. */
1835 return False;
1836 } else {
1837 return True;
1838 }
1839}
1840
1841
1842/* For the memory leak detector, say whether or not a given word
1843 address is to be regarded as valid. */
1844static
1845Bool mc_is_valid_aligned_word ( Addr a )
1846{
1847 tl_assert(sizeof(UWord) == 4 || sizeof(UWord) == 8);
1848 if (sizeof(UWord) == 4) {
1849 tl_assert(VG_IS_4_ALIGNED(a));
1850 } else {
1851 tl_assert(VG_IS_8_ALIGNED(a));
1852 }
1853 if (mc_check_readable( a, sizeof(UWord), NULL ) == MC_Ok) {
1854 return True;
1855 } else {
1856 return False;
1857 }
1858}
sewardja4495682002-10-21 07:29:59 +00001859
1860
nethercote996901a2004-08-03 13:29:09 +00001861/* Leak detector for this tool. We don't actually do anything, merely
sewardja4495682002-10-21 07:29:59 +00001862 run the generic leak detector with suitable parameters for this
nethercote996901a2004-08-03 13:29:09 +00001863 tool. */
njnb8dca862005-03-14 02:42:44 +00001864static void mc_detect_memory_leaks ( ThreadId tid, LeakCheckMode mode )
njn25e49d8e72002-09-23 09:36:25 +00001865{
sewardj05fe85e2005-04-27 22:46:36 +00001866 MAC_(do_detect_memory_leaks) (
1867 tid,
1868 mode,
1869 mc_is_within_valid_secondary,
1870 mc_is_valid_aligned_word
1871 );
njn25e49d8e72002-09-23 09:36:25 +00001872}
1873
1874
sewardjc859fbf2005-04-22 21:10:28 +00001875/*------------------------------------------------------------*/
1876/*--- Initialisation ---*/
1877/*------------------------------------------------------------*/
1878
1879static void init_shadow_memory ( void )
1880{
1881 Int i;
1882 SecMap* sm;
1883
1884 /* Build the 3 distinguished secondaries */
1885 tl_assert(VGM_BIT_INVALID == 1);
1886 tl_assert(VGM_BIT_VALID == 0);
1887 tl_assert(VGM_BYTE_INVALID == 0xFF);
1888 tl_assert(VGM_BYTE_VALID == 0);
1889
1890 /* Set A invalid, V invalid. */
1891 sm = &sm_distinguished[SM_DIST_NOACCESS];
1892 for (i = 0; i < 65536; i++)
1893 sm->vbyte[i] = VGM_BYTE_INVALID;
1894 for (i = 0; i < 8192; i++)
1895 sm->abits[i] = VGM_BYTE_INVALID;
1896
1897 /* Set A valid, V invalid. */
1898 sm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
1899 for (i = 0; i < 65536; i++)
1900 sm->vbyte[i] = VGM_BYTE_INVALID;
1901 for (i = 0; i < 8192; i++)
1902 sm->abits[i] = VGM_BYTE_VALID;
1903
1904 /* Set A valid, V valid. */
1905 sm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
1906 for (i = 0; i < 65536; i++)
1907 sm->vbyte[i] = VGM_BYTE_VALID;
1908 for (i = 0; i < 8192; i++)
1909 sm->abits[i] = VGM_BYTE_VALID;
1910
1911 /* Set up the primary map. */
1912 /* These entries gradually get overwritten as the used address
1913 space expands. */
1914 for (i = 0; i < N_PRIMARY_MAP; i++)
1915 primary_map[i] = &sm_distinguished[SM_DIST_NOACCESS];
1916
1917 /* auxmap_size = auxmap_used = 0;
1918 no ... these are statically initialised */
1919}
1920
1921
1922/*------------------------------------------------------------*/
1923/*--- Sanity check machinery (permanently engaged) ---*/
1924/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001925
njn26f02512004-11-22 18:33:15 +00001926Bool TL_(cheap_sanity_check) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001927{
jseward9800fd32004-01-04 23:08:04 +00001928 /* nothing useful we can rapidly check */
sewardj23eb2fd2005-04-22 16:29:19 +00001929 n_sanity_cheap++;
sewardjc1a2cda2005-04-21 17:34:00 +00001930 PROF_EVENT(490, "cheap_sanity_check");
jseward9800fd32004-01-04 23:08:04 +00001931 return True;
njn25e49d8e72002-09-23 09:36:25 +00001932}
1933
njn26f02512004-11-22 18:33:15 +00001934Bool TL_(expensive_sanity_check) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001935{
sewardj23eb2fd2005-04-22 16:29:19 +00001936 Int i, n_secmaps_found;
sewardj45d94cc2005-04-20 14:44:11 +00001937 SecMap* sm;
sewardj23eb2fd2005-04-22 16:29:19 +00001938 Bool bad = False;
njn25e49d8e72002-09-23 09:36:25 +00001939
sewardj23eb2fd2005-04-22 16:29:19 +00001940 n_sanity_expensive++;
sewardjc1a2cda2005-04-21 17:34:00 +00001941 PROF_EVENT(491, "expensive_sanity_check");
1942
sewardj23eb2fd2005-04-22 16:29:19 +00001943 /* Check that the 3 distinguished SMs are still as they should
1944 be. */
njn25e49d8e72002-09-23 09:36:25 +00001945
sewardj45d94cc2005-04-20 14:44:11 +00001946 /* Check A invalid, V invalid. */
1947 sm = &sm_distinguished[SM_DIST_NOACCESS];
njn25e49d8e72002-09-23 09:36:25 +00001948 for (i = 0; i < 65536; i++)
sewardj45d94cc2005-04-20 14:44:11 +00001949 if (!(sm->vbyte[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00001950 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00001951 for (i = 0; i < 8192; i++)
1952 if (!(sm->abits[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00001953 bad = True;
njn25e49d8e72002-09-23 09:36:25 +00001954
sewardj45d94cc2005-04-20 14:44:11 +00001955 /* Check A valid, V invalid. */
1956 sm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
1957 for (i = 0; i < 65536; i++)
1958 if (!(sm->vbyte[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00001959 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00001960 for (i = 0; i < 8192; i++)
1961 if (!(sm->abits[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00001962 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00001963
1964 /* Check A valid, V valid. */
1965 sm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
1966 for (i = 0; i < 65536; i++)
1967 if (!(sm->vbyte[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00001968 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00001969 for (i = 0; i < 8192; i++)
1970 if (!(sm->abits[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00001971 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00001972
sewardj23eb2fd2005-04-22 16:29:19 +00001973 if (bad) {
1974 VG_(printf)("memcheck expensive sanity: "
1975 "distinguished_secondaries have changed\n");
1976 return False;
1977 }
1978
1979 /* check nonsensical auxmap sizing */
sewardj45d94cc2005-04-20 14:44:11 +00001980 if (auxmap_used > auxmap_size)
sewardj23eb2fd2005-04-22 16:29:19 +00001981 bad = True;
1982
1983 if (bad) {
1984 VG_(printf)("memcheck expensive sanity: "
1985 "nonsensical auxmap sizing\n");
1986 return False;
1987 }
1988
1989 /* check that the number of secmaps issued matches the number that
1990 are reachable (iow, no secmap leaks) */
1991 n_secmaps_found = 0;
1992 for (i = 0; i < N_PRIMARY_MAP; i++) {
1993 if (primary_map[i] == NULL) {
1994 bad = True;
1995 } else {
1996 if (!is_distinguished_sm(primary_map[i]))
1997 n_secmaps_found++;
1998 }
1999 }
2000
2001 for (i = 0; i < auxmap_used; i++) {
2002 if (auxmap[i].sm == NULL) {
2003 bad = True;
2004 } else {
2005 if (!is_distinguished_sm(auxmap[i].sm))
2006 n_secmaps_found++;
2007 }
2008 }
2009
2010 if (n_secmaps_found != n_secmaps_issued)
2011 bad = True;
2012
2013 if (bad) {
2014 VG_(printf)("memcheck expensive sanity: "
2015 "apparent secmap leakage\n");
2016 return False;
2017 }
2018
2019 /* check that auxmap only covers address space that the primary
2020 doesn't */
2021
2022 for (i = 0; i < auxmap_used; i++)
2023 if (auxmap[i].base <= MAX_PRIMARY_ADDRESS)
2024 bad = True;
2025
2026 if (bad) {
2027 VG_(printf)("memcheck expensive sanity: "
2028 "auxmap covers wrong address space\n");
2029 return False;
2030 }
2031
2032 /* there is only one pointer to each secmap (expensive) */
njn25e49d8e72002-09-23 09:36:25 +00002033
2034 return True;
2035}
sewardj45d94cc2005-04-20 14:44:11 +00002036
njn25e49d8e72002-09-23 09:36:25 +00002037
njn25e49d8e72002-09-23 09:36:25 +00002038/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00002039/*--- Command line args ---*/
njn25e49d8e72002-09-23 09:36:25 +00002040/*------------------------------------------------------------*/
2041
njn43c799e2003-04-08 00:08:52 +00002042Bool MC_(clo_avoid_strlen_errors) = True;
njn43c799e2003-04-08 00:08:52 +00002043
njn26f02512004-11-22 18:33:15 +00002044Bool TL_(process_cmd_line_option)(Char* arg)
njn25e49d8e72002-09-23 09:36:25 +00002045{
njn45270a22005-03-27 01:00:11 +00002046 VG_BOOL_CLO(arg, "--avoid-strlen-errors", MC_(clo_avoid_strlen_errors))
njn25e49d8e72002-09-23 09:36:25 +00002047 else
njn43c799e2003-04-08 00:08:52 +00002048 return MAC_(process_common_cmd_line_option)(arg);
njn25e49d8e72002-09-23 09:36:25 +00002049
2050 return True;
njn25e49d8e72002-09-23 09:36:25 +00002051}
2052
njn26f02512004-11-22 18:33:15 +00002053void TL_(print_usage)(void)
njn25e49d8e72002-09-23 09:36:25 +00002054{
njn3e884182003-04-15 13:03:23 +00002055 MAC_(print_common_usage)();
2056 VG_(printf)(
2057" --avoid-strlen-errors=no|yes suppress errs from inlined strlen [yes]\n"
2058 );
2059}
2060
njn26f02512004-11-22 18:33:15 +00002061void TL_(print_debug_usage)(void)
njn3e884182003-04-15 13:03:23 +00002062{
2063 MAC_(print_common_debug_usage)();
2064 VG_(printf)(
sewardj8ec2cfc2002-10-13 00:57:26 +00002065" --cleanup=no|yes improve after instrumentation? [yes]\n"
njn3e884182003-04-15 13:03:23 +00002066 );
njn25e49d8e72002-09-23 09:36:25 +00002067}
2068
nethercote8b76fe52004-11-08 19:20:09 +00002069/*------------------------------------------------------------*/
2070/*--- Client requests ---*/
2071/*------------------------------------------------------------*/
2072
2073/* Client block management:
2074
2075 This is managed as an expanding array of client block descriptors.
2076 Indices of live descriptors are issued to the client, so it can ask
2077 to free them later. Therefore we cannot slide live entries down
2078 over dead ones. Instead we must use free/inuse flags and scan for
2079 an empty slot at allocation time. This in turn means allocation is
2080 relatively expensive, so we hope this does not happen too often.
nethercote8b76fe52004-11-08 19:20:09 +00002081
sewardjedc75ab2005-03-15 23:30:32 +00002082 An unused block has start == size == 0
2083*/
nethercote8b76fe52004-11-08 19:20:09 +00002084
2085typedef
2086 struct {
2087 Addr start;
2088 SizeT size;
2089 ExeContext* where;
sewardjedc75ab2005-03-15 23:30:32 +00002090 Char* desc;
nethercote8b76fe52004-11-08 19:20:09 +00002091 }
2092 CGenBlock;
2093
2094/* This subsystem is self-initialising. */
njn695c16e2005-03-27 03:40:28 +00002095static UInt cgb_size = 0;
2096static UInt cgb_used = 0;
2097static CGenBlock* cgbs = NULL;
nethercote8b76fe52004-11-08 19:20:09 +00002098
2099/* Stats for this subsystem. */
njn695c16e2005-03-27 03:40:28 +00002100static UInt cgb_used_MAX = 0; /* Max in use. */
2101static UInt cgb_allocs = 0; /* Number of allocs. */
2102static UInt cgb_discards = 0; /* Number of discards. */
2103static UInt cgb_search = 0; /* Number of searches. */
nethercote8b76fe52004-11-08 19:20:09 +00002104
2105
2106static
njn695c16e2005-03-27 03:40:28 +00002107Int alloc_client_block ( void )
nethercote8b76fe52004-11-08 19:20:09 +00002108{
2109 UInt i, sz_new;
2110 CGenBlock* cgbs_new;
2111
njn695c16e2005-03-27 03:40:28 +00002112 cgb_allocs++;
nethercote8b76fe52004-11-08 19:20:09 +00002113
njn695c16e2005-03-27 03:40:28 +00002114 for (i = 0; i < cgb_used; i++) {
2115 cgb_search++;
2116 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00002117 return i;
2118 }
2119
2120 /* Not found. Try to allocate one at the end. */
njn695c16e2005-03-27 03:40:28 +00002121 if (cgb_used < cgb_size) {
2122 cgb_used++;
2123 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00002124 }
2125
2126 /* Ok, we have to allocate a new one. */
njn695c16e2005-03-27 03:40:28 +00002127 tl_assert(cgb_used == cgb_size);
2128 sz_new = (cgbs == NULL) ? 10 : (2 * cgb_size);
nethercote8b76fe52004-11-08 19:20:09 +00002129
2130 cgbs_new = VG_(malloc)( sz_new * sizeof(CGenBlock) );
njn695c16e2005-03-27 03:40:28 +00002131 for (i = 0; i < cgb_used; i++)
2132 cgbs_new[i] = cgbs[i];
nethercote8b76fe52004-11-08 19:20:09 +00002133
njn695c16e2005-03-27 03:40:28 +00002134 if (cgbs != NULL)
2135 VG_(free)( cgbs );
2136 cgbs = cgbs_new;
nethercote8b76fe52004-11-08 19:20:09 +00002137
njn695c16e2005-03-27 03:40:28 +00002138 cgb_size = sz_new;
2139 cgb_used++;
2140 if (cgb_used > cgb_used_MAX)
2141 cgb_used_MAX = cgb_used;
2142 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00002143}
2144
2145
2146static void show_client_block_stats ( void )
2147{
2148 VG_(message)(Vg_DebugMsg,
2149 "general CBs: %d allocs, %d discards, %d maxinuse, %d search",
njn695c16e2005-03-27 03:40:28 +00002150 cgb_allocs, cgb_discards, cgb_used_MAX, cgb_search
nethercote8b76fe52004-11-08 19:20:09 +00002151 );
2152}
2153
2154static Bool find_addr(VgHashNode* sh_ch, void* ap)
2155{
2156 MAC_Chunk *m = (MAC_Chunk*)sh_ch;
2157 Addr a = *(Addr*)ap;
2158
2159 return VG_(addr_is_in_block)(a, m->data, m->size);
2160}
2161
2162static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai )
2163{
2164 UInt i;
2165 /* VG_(printf)("try to identify %d\n", a); */
2166
2167 /* Perhaps it's a general block ? */
njn695c16e2005-03-27 03:40:28 +00002168 for (i = 0; i < cgb_used; i++) {
2169 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00002170 continue;
njn695c16e2005-03-27 03:40:28 +00002171 if (VG_(addr_is_in_block)(a, cgbs[i].start, cgbs[i].size)) {
nethercote8b76fe52004-11-08 19:20:09 +00002172 MAC_Mempool **d, *mp;
2173
2174 /* OK - maybe it's a mempool, too? */
2175 mp = (MAC_Mempool*)VG_(HT_get_node)(MAC_(mempool_list),
njn695c16e2005-03-27 03:40:28 +00002176 (UWord)cgbs[i].start,
nethercote8b76fe52004-11-08 19:20:09 +00002177 (void*)&d);
2178 if(mp != NULL) {
2179 if(mp->chunks != NULL) {
2180 MAC_Chunk *mc;
2181
2182 mc = (MAC_Chunk*)VG_(HT_first_match)(mp->chunks, find_addr, &a);
2183 if(mc != NULL) {
2184 ai->akind = UserG;
2185 ai->blksize = mc->size;
2186 ai->rwoffset = (Int)(a) - (Int)mc->data;
2187 ai->lastchange = mc->where;
2188 return True;
2189 }
2190 }
2191 ai->akind = Mempool;
njn695c16e2005-03-27 03:40:28 +00002192 ai->blksize = cgbs[i].size;
2193 ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
2194 ai->lastchange = cgbs[i].where;
nethercote8b76fe52004-11-08 19:20:09 +00002195 return True;
2196 }
2197 ai->akind = UserG;
njn695c16e2005-03-27 03:40:28 +00002198 ai->blksize = cgbs[i].size;
2199 ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
2200 ai->lastchange = cgbs[i].where;
2201 ai->desc = cgbs[i].desc;
nethercote8b76fe52004-11-08 19:20:09 +00002202 return True;
2203 }
2204 }
2205 return False;
2206}
2207
njn26f02512004-11-22 18:33:15 +00002208Bool TL_(handle_client_request) ( ThreadId tid, UWord* arg, UWord* ret )
nethercote8b76fe52004-11-08 19:20:09 +00002209{
2210 Int i;
2211 Bool ok;
2212 Addr bad_addr;
2213
njnfc26ff92004-11-22 19:12:49 +00002214 if (!VG_IS_TOOL_USERREQ('M','C',arg[0])
nethercote8b76fe52004-11-08 19:20:09 +00002215 && VG_USERREQ__MALLOCLIKE_BLOCK != arg[0]
2216 && VG_USERREQ__FREELIKE_BLOCK != arg[0]
2217 && VG_USERREQ__CREATE_MEMPOOL != arg[0]
2218 && VG_USERREQ__DESTROY_MEMPOOL != arg[0]
2219 && VG_USERREQ__MEMPOOL_ALLOC != arg[0]
2220 && VG_USERREQ__MEMPOOL_FREE != arg[0])
2221 return False;
2222
2223 switch (arg[0]) {
2224 case VG_USERREQ__CHECK_WRITABLE: /* check writable */
2225 ok = mc_check_writable ( arg[1], arg[2], &bad_addr );
2226 if (!ok)
njn9e63cb62005-05-08 18:34:59 +00002227 mc_record_user_error ( tid, bad_addr, /*isWrite*/True,
2228 /*isUnaddr*/True );
nethercote8b76fe52004-11-08 19:20:09 +00002229 *ret = ok ? (UWord)NULL : bad_addr;
2230 break;
2231
2232 case VG_USERREQ__CHECK_READABLE: { /* check readable */
2233 MC_ReadResult res;
2234 res = mc_check_readable ( arg[1], arg[2], &bad_addr );
2235 if (MC_AddrErr == res)
njn9e63cb62005-05-08 18:34:59 +00002236 mc_record_user_error ( tid, bad_addr, /*isWrite*/False,
2237 /*isUnaddr*/True );
nethercote8b76fe52004-11-08 19:20:09 +00002238 else if (MC_ValueErr == res)
njn9e63cb62005-05-08 18:34:59 +00002239 mc_record_user_error ( tid, bad_addr, /*isWrite*/False,
2240 /*isUnaddr*/False );
nethercote8b76fe52004-11-08 19:20:09 +00002241 *ret = ( res==MC_Ok ? (UWord)NULL : bad_addr );
2242 break;
2243 }
2244
2245 case VG_USERREQ__DO_LEAK_CHECK:
njnb8dca862005-03-14 02:42:44 +00002246 mc_detect_memory_leaks(tid, arg[1] ? LC_Summary : LC_Full);
nethercote8b76fe52004-11-08 19:20:09 +00002247 *ret = 0; /* return value is meaningless */
2248 break;
2249
2250 case VG_USERREQ__MAKE_NOACCESS: /* make no access */
nethercote8b76fe52004-11-08 19:20:09 +00002251 mc_make_noaccess ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00002252 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00002253 break;
2254
2255 case VG_USERREQ__MAKE_WRITABLE: /* make writable */
nethercote8b76fe52004-11-08 19:20:09 +00002256 mc_make_writable ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00002257 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00002258 break;
2259
2260 case VG_USERREQ__MAKE_READABLE: /* make readable */
nethercote8b76fe52004-11-08 19:20:09 +00002261 mc_make_readable ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00002262 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00002263 break;
2264
sewardjedc75ab2005-03-15 23:30:32 +00002265 case VG_USERREQ__CREATE_BLOCK: /* describe a block */
2266 if (arg[1] != 0 && arg[2] != 0) {
njn695c16e2005-03-27 03:40:28 +00002267 i = alloc_client_block();
2268 /* VG_(printf)("allocated %d %p\n", i, cgbs); */
2269 cgbs[i].start = arg[1];
2270 cgbs[i].size = arg[2];
2271 cgbs[i].desc = VG_(strdup)((Char *)arg[3]);
2272 cgbs[i].where = VG_(record_ExeContext) ( tid );
sewardjedc75ab2005-03-15 23:30:32 +00002273
2274 *ret = i;
2275 } else
2276 *ret = -1;
2277 break;
2278
nethercote8b76fe52004-11-08 19:20:09 +00002279 case VG_USERREQ__DISCARD: /* discard */
njn695c16e2005-03-27 03:40:28 +00002280 if (cgbs == NULL
2281 || arg[2] >= cgb_used ||
2282 (cgbs[arg[2]].start == 0 && cgbs[arg[2]].size == 0)) {
sewardjedc75ab2005-03-15 23:30:32 +00002283 *ret = 1;
2284 } else {
njn695c16e2005-03-27 03:40:28 +00002285 tl_assert(arg[2] >= 0 && arg[2] < cgb_used);
2286 cgbs[arg[2]].start = cgbs[arg[2]].size = 0;
2287 VG_(free)(cgbs[arg[2]].desc);
2288 cgb_discards++;
sewardjedc75ab2005-03-15 23:30:32 +00002289 *ret = 0;
2290 }
nethercote8b76fe52004-11-08 19:20:09 +00002291 break;
2292
sewardj45d94cc2005-04-20 14:44:11 +00002293//zz case VG_USERREQ__GET_VBITS:
2294//zz /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
2295//zz error. */
2296//zz /* VG_(printf)("get_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
2297//zz *ret = mc_get_or_set_vbits_for_client
2298//zz ( tid, arg[1], arg[2], arg[3], False /* get them */ );
2299//zz break;
2300//zz
2301//zz case VG_USERREQ__SET_VBITS:
2302//zz /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
2303//zz error. */
2304//zz /* VG_(printf)("set_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
2305//zz *ret = mc_get_or_set_vbits_for_client
2306//zz ( tid, arg[1], arg[2], arg[3], True /* set them */ );
2307//zz break;
nethercote8b76fe52004-11-08 19:20:09 +00002308
2309 default:
2310 if (MAC_(handle_common_client_requests)(tid, arg, ret )) {
2311 return True;
2312 } else {
2313 VG_(message)(Vg_UserMsg,
2314 "Warning: unknown memcheck client request code %llx",
2315 (ULong)arg[0]);
2316 return False;
2317 }
2318 }
2319 return True;
2320}
njn25e49d8e72002-09-23 09:36:25 +00002321
2322/*------------------------------------------------------------*/
2323/*--- Setup ---*/
2324/*------------------------------------------------------------*/
2325
njn26f02512004-11-22 18:33:15 +00002326void TL_(pre_clo_init)(void)
njn25e49d8e72002-09-23 09:36:25 +00002327{
njn810086f2002-11-14 12:42:47 +00002328 VG_(details_name) ("Memcheck");
2329 VG_(details_version) (NULL);
nethercote262eedf2003-11-13 17:57:18 +00002330 VG_(details_description) ("a memory error detector");
njn810086f2002-11-14 12:42:47 +00002331 VG_(details_copyright_author)(
njn53612422005-03-12 16:22:54 +00002332 "Copyright (C) 2002-2005, and GNU GPL'd, by Julian Seward et al.");
nethercote421281e2003-11-20 16:20:55 +00002333 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj9ebf9fd2004-11-28 16:56:51 +00002334 VG_(details_avg_translation_sizeB) ( 370 );
njn25e49d8e72002-09-23 09:36:25 +00002335
njn8a97c6d2005-03-31 04:37:24 +00002336 VG_(basic_tool_funcs) (TL_(post_clo_init),
2337 TL_(instrument),
2338 TL_(fini));
2339
njn810086f2002-11-14 12:42:47 +00002340 VG_(needs_core_errors) ();
njn8a97c6d2005-03-31 04:37:24 +00002341 VG_(needs_tool_errors) (TL_(eq_Error),
2342 TL_(pp_Error),
2343 TL_(update_extra),
2344 TL_(recognised_suppression),
2345 TL_(read_extra_suppression_info),
2346 TL_(error_matches_suppression),
2347 TL_(get_error_name),
2348 TL_(print_extra_suppression_info));
njn810086f2002-11-14 12:42:47 +00002349 VG_(needs_libc_freeres) ();
njn8a97c6d2005-03-31 04:37:24 +00002350 VG_(needs_command_line_options)(TL_(process_cmd_line_option),
2351 TL_(print_usage),
2352 TL_(print_debug_usage));
2353 VG_(needs_client_requests) (TL_(handle_client_request));
2354 VG_(needs_sanity_checks) (TL_(cheap_sanity_check),
2355 TL_(expensive_sanity_check));
fitzhardinge98abfc72003-12-16 02:05:15 +00002356 VG_(needs_shadow_memory) ();
njn25e49d8e72002-09-23 09:36:25 +00002357
njn8a97c6d2005-03-31 04:37:24 +00002358 VG_(malloc_funcs) (TL_(malloc),
2359 TL_(__builtin_new),
2360 TL_(__builtin_vec_new),
2361 TL_(memalign),
2362 TL_(calloc),
2363 TL_(free),
2364 TL_(__builtin_delete),
2365 TL_(__builtin_vec_delete),
2366 TL_(realloc),
2367 MALLOC_REDZONE_SZB );
2368
njn3e884182003-04-15 13:03:23 +00002369 MAC_( new_mem_heap) = & mc_new_mem_heap;
nethercote8b76fe52004-11-08 19:20:09 +00002370 MAC_( ban_mem_heap) = & mc_make_noaccess;
njn3e884182003-04-15 13:03:23 +00002371 MAC_(copy_mem_heap) = & mc_copy_address_range_state;
nethercote8b76fe52004-11-08 19:20:09 +00002372 MAC_( die_mem_heap) = & mc_make_noaccess;
2373 MAC_(check_noaccess) = & mc_check_noaccess;
njn3e884182003-04-15 13:03:23 +00002374
fitzhardinge98abfc72003-12-16 02:05:15 +00002375 VG_(init_new_mem_startup) ( & mc_new_mem_startup );
nethercote8b76fe52004-11-08 19:20:09 +00002376 VG_(init_new_mem_stack_signal) ( & mc_make_writable );
2377 VG_(init_new_mem_brk) ( & mc_make_writable );
njnb8dca862005-03-14 02:42:44 +00002378 VG_(init_new_mem_mmap) ( & mc_new_mem_mmap );
njn25e49d8e72002-09-23 09:36:25 +00002379
fitzhardinge98abfc72003-12-16 02:05:15 +00002380 VG_(init_copy_mem_remap) ( & mc_copy_address_range_state );
njn3e884182003-04-15 13:03:23 +00002381
nethercote8b76fe52004-11-08 19:20:09 +00002382 VG_(init_die_mem_stack_signal) ( & mc_make_noaccess );
2383 VG_(init_die_mem_brk) ( & mc_make_noaccess );
2384 VG_(init_die_mem_munmap) ( & mc_make_noaccess );
njn3e884182003-04-15 13:03:23 +00002385
fitzhardinge98abfc72003-12-16 02:05:15 +00002386 VG_(init_new_mem_stack_4) ( & MAC_(new_mem_stack_4) );
2387 VG_(init_new_mem_stack_8) ( & MAC_(new_mem_stack_8) );
2388 VG_(init_new_mem_stack_12) ( & MAC_(new_mem_stack_12) );
2389 VG_(init_new_mem_stack_16) ( & MAC_(new_mem_stack_16) );
2390 VG_(init_new_mem_stack_32) ( & MAC_(new_mem_stack_32) );
2391 VG_(init_new_mem_stack) ( & MAC_(new_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00002392
fitzhardinge98abfc72003-12-16 02:05:15 +00002393 VG_(init_die_mem_stack_4) ( & MAC_(die_mem_stack_4) );
2394 VG_(init_die_mem_stack_8) ( & MAC_(die_mem_stack_8) );
2395 VG_(init_die_mem_stack_12) ( & MAC_(die_mem_stack_12) );
2396 VG_(init_die_mem_stack_16) ( & MAC_(die_mem_stack_16) );
2397 VG_(init_die_mem_stack_32) ( & MAC_(die_mem_stack_32) );
2398 VG_(init_die_mem_stack) ( & MAC_(die_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00002399
nethercote8b76fe52004-11-08 19:20:09 +00002400 VG_(init_ban_mem_stack) ( & mc_make_noaccess );
njn25e49d8e72002-09-23 09:36:25 +00002401
fitzhardinge98abfc72003-12-16 02:05:15 +00002402 VG_(init_pre_mem_read) ( & mc_check_is_readable );
2403 VG_(init_pre_mem_read_asciiz) ( & mc_check_is_readable_asciiz );
2404 VG_(init_pre_mem_write) ( & mc_check_is_writable );
njncf45fd42004-11-24 16:30:22 +00002405 VG_(init_post_mem_write) ( & mc_post_mem_write );
nethercote8b76fe52004-11-08 19:20:09 +00002406
2407 VG_(init_pre_reg_read) ( & mc_pre_reg_read );
njn25e49d8e72002-09-23 09:36:25 +00002408
njncf45fd42004-11-24 16:30:22 +00002409 VG_(init_post_reg_write) ( & mc_post_reg_write );
fitzhardinge98abfc72003-12-16 02:05:15 +00002410 VG_(init_post_reg_write_clientcall_return) ( & mc_post_reg_write_clientcall );
njnd3040452003-05-19 15:04:06 +00002411
njn31066fd2005-03-26 00:42:02 +00002412 VG_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
2413 VG_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
2414 VG_(register_profile_event) ( VgpESPAdj, "adjust-ESP" );
njnd04b7c62002-10-03 14:05:52 +00002415
njn43c799e2003-04-08 00:08:52 +00002416 /* Additional block description for VG_(describe_addr)() */
nethercote8b76fe52004-11-08 19:20:09 +00002417 MAC_(describe_addr_supp) = client_perm_maybe_describe;
njn43c799e2003-04-08 00:08:52 +00002418
njnd04b7c62002-10-03 14:05:52 +00002419 init_shadow_memory();
njn3e884182003-04-15 13:03:23 +00002420 MAC_(common_pre_clo_init)();
sewardjc1a2cda2005-04-21 17:34:00 +00002421
2422 tl_assert( TL_(expensive_sanity_check)() );
njn5c004e42002-11-18 11:04:50 +00002423}
2424
njn26f02512004-11-22 18:33:15 +00002425void TL_(post_clo_init) ( void )
njn5c004e42002-11-18 11:04:50 +00002426{
2427}
2428
njn26f02512004-11-22 18:33:15 +00002429void TL_(fini) ( Int exitcode )
njn5c004e42002-11-18 11:04:50 +00002430{
nethercote8b76fe52004-11-08 19:20:09 +00002431 MAC_(common_fini)( mc_detect_memory_leaks );
sewardj45d94cc2005-04-20 14:44:11 +00002432
sewardj23eb2fd2005-04-22 16:29:19 +00002433 Int i, n_accessible_dist;
2434 SecMap* sm;
2435
sewardj45d94cc2005-04-20 14:44:11 +00002436 if (VG_(clo_verbosity) > 1) {
2437 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00002438 " memcheck: sanity checks: %d cheap, %d expensive",
2439 n_sanity_cheap, n_sanity_expensive );
sewardj45d94cc2005-04-20 14:44:11 +00002440 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00002441 " memcheck: auxmaps: %d auxmap entries (%dk, %dM) in use",
2442 auxmap_used,
2443 auxmap_used * 64,
2444 auxmap_used / 16 );
2445 VG_(message)(Vg_DebugMsg,
2446 " memcheck: auxmaps: %lld searches, %lld comparisons",
sewardj45d94cc2005-04-20 14:44:11 +00002447 n_auxmap_searches, n_auxmap_cmps );
sewardj23eb2fd2005-04-22 16:29:19 +00002448 VG_(message)(Vg_DebugMsg,
2449 " memcheck: secondaries: %d issued (%dk, %dM)",
2450 n_secmaps_issued,
2451 n_secmaps_issued * 64,
2452 n_secmaps_issued / 16 );
2453
2454 n_accessible_dist = 0;
2455 for (i = 0; i < N_PRIMARY_MAP; i++) {
2456 sm = primary_map[i];
2457 if (is_distinguished_sm(sm)
2458 && sm != &sm_distinguished[SM_DIST_NOACCESS])
2459 n_accessible_dist ++;
2460 }
2461 for (i = 0; i < auxmap_used; i++) {
2462 sm = auxmap[i].sm;
2463 if (is_distinguished_sm(sm)
2464 && sm != &sm_distinguished[SM_DIST_NOACCESS])
2465 n_accessible_dist ++;
2466 }
2467
2468 VG_(message)(Vg_DebugMsg,
2469 " memcheck: secondaries: %d accessible and distinguished (%dk, %dM)",
2470 n_accessible_dist,
2471 n_accessible_dist * 64,
2472 n_accessible_dist / 16 );
2473
sewardj45d94cc2005-04-20 14:44:11 +00002474 }
2475
njn5c004e42002-11-18 11:04:50 +00002476 if (0) {
2477 VG_(message)(Vg_DebugMsg,
2478 "------ Valgrind's client block stats follow ---------------" );
nethercote8b76fe52004-11-08 19:20:09 +00002479 show_client_block_stats();
njn5c004e42002-11-18 11:04:50 +00002480 }
njn25e49d8e72002-09-23 09:36:25 +00002481}
2482
njn26f02512004-11-22 18:33:15 +00002483VG_DETERMINE_INTERFACE_VERSION(TL_(pre_clo_init), 9./8)
fitzhardinge98abfc72003-12-16 02:05:15 +00002484
njn25e49d8e72002-09-23 09:36:25 +00002485/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00002486/*--- end mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00002487/*--------------------------------------------------------------------*/