blob: 2ab2987aadaa1b5a600fcceeba049162cf225a89 [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
nethercote137bc552003-11-14 17:47:54 +00009 This file is part of MemCheck, a heavyweight Valgrind tool for
njnc9539842002-10-02 13:26:35 +000010 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
njn53612422005-03-12 16:22:54 +000012 Copyright (C) 2000-2005 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
sewardjc859fbf2005-04-22 21:10:28 +000033/* TODO 22 Apr 05
sewardj45d94cc2005-04-20 14:44:11 +000034
sewardjc859fbf2005-04-22 21:10:28 +000035 test whether it would be faster, for LOADV4, to check
36 only for 8-byte validity on the fast path
sewardj45d94cc2005-04-20 14:44:11 +000037*/
38
njn25cac76cb2002-09-23 11:21:57 +000039#include "mc_include.h"
40#include "memcheck.h" /* for client requests */
njn25e49d8e72002-09-23 09:36:25 +000041
sewardj45d94cc2005-04-20 14:44:11 +000042
sewardjc1a2cda2005-04-21 17:34:00 +000043#define EXPECTED_TAKEN(cond) __builtin_expect((cond),1)
44#define EXPECTED_NOT_TAKEN(cond) __builtin_expect((cond),0)
45
46/* Define to debug the mem audit system. Set to:
47 0 no debugging, fast cases are used
48 1 some sanity checking, fast cases are used
49 2 max sanity checking, only slow cases are used
50*/
sewardj23eb2fd2005-04-22 16:29:19 +000051#define VG_DEBUG_MEMORY 0
sewardjc1a2cda2005-04-21 17:34:00 +000052
njn25e49d8e72002-09-23 09:36:25 +000053#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
54
njn25e49d8e72002-09-23 09:36:25 +000055
njn25e49d8e72002-09-23 09:36:25 +000056/*------------------------------------------------------------*/
sewardj45d94cc2005-04-20 14:44:11 +000057/*--- Basic A/V bitmap representation. ---*/
njn25e49d8e72002-09-23 09:36:25 +000058/*------------------------------------------------------------*/
59
sewardjc859fbf2005-04-22 21:10:28 +000060/* TODO: fix this comment */
61//zz /* All reads and writes are checked against a memory map, which
62//zz records the state of all memory in the process. The memory map is
63//zz organised like this:
64//zz
65//zz The top 16 bits of an address are used to index into a top-level
66//zz map table, containing 65536 entries. Each entry is a pointer to a
67//zz second-level map, which records the accesibililty and validity
68//zz permissions for the 65536 bytes indexed by the lower 16 bits of the
69//zz address. Each byte is represented by nine bits, one indicating
70//zz accessibility, the other eight validity. So each second-level map
71//zz contains 73728 bytes. This two-level arrangement conveniently
72//zz divides the 4G address space into 64k lumps, each size 64k bytes.
73//zz
74//zz All entries in the primary (top-level) map must point to a valid
75//zz secondary (second-level) map. Since most of the 4G of address
76//zz space will not be in use -- ie, not mapped at all -- there is a
77//zz distinguished secondary map, which indicates `not addressible and
78//zz not valid' writeable for all bytes. Entries in the primary map for
79//zz which the entire 64k is not in use at all point at this
80//zz distinguished map.
81//zz
82//zz There are actually 4 distinguished secondaries. These are used to
83//zz represent a memory range which is either not addressable (validity
84//zz doesn't matter), addressable+not valid, addressable+valid.
85//zz
86//zz [...] lots of stuff deleted due to out of date-ness
87//zz
88//zz As a final optimisation, the alignment and address checks for
89//zz 4-byte loads and stores are combined in a neat way. The primary
90//zz map is extended to have 262144 entries (2^18), rather than 2^16.
91//zz The top 3/4 of these entries are permanently set to the
92//zz distinguished secondary map. For a 4-byte load/store, the
93//zz top-level map is indexed not with (addr >> 16) but instead f(addr),
94//zz where
95//zz
96//zz f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
97//zz = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
98//zz = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
99//zz
100//zz ie the lowest two bits are placed above the 16 high address bits.
101//zz If either of these two bits are nonzero, the address is misaligned;
102//zz this will select a secondary map from the upper 3/4 of the primary
103//zz map. Because this is always the distinguished secondary map, a
104//zz (bogus) address check failure will result. The failure handling
105//zz code can then figure out whether this is a genuine addr check
106//zz failure or whether it is a possibly-legitimate access at a
107//zz misaligned address.
108//zz */
109
sewardj45d94cc2005-04-20 14:44:11 +0000110/* --------------- Basic configuration --------------- */
sewardj95448072004-11-22 20:19:51 +0000111
sewardj23eb2fd2005-04-22 16:29:19 +0000112/* Only change this. N_PRIMARY_MAP *must* be a power of 2. */
sewardj21f7ff42005-04-28 10:32:02 +0000113
sewardje4ccc012005-05-02 12:53:38 +0000114#if VG_WORDSIZE == 4
sewardj21f7ff42005-04-28 10:32:02 +0000115
116/* cover the entire address space */
117# define N_PRIMARY_BITS 16
118
119#else
120
121/* Just handle the first 16G fast and the rest via auxiliary
122 primaries. */
123# define N_PRIMARY_BITS 18
124
125#endif
126
sewardj45d94cc2005-04-20 14:44:11 +0000127
sewardjc1a2cda2005-04-21 17:34:00 +0000128/* Do not change this. */
sewardje4ccc012005-05-02 12:53:38 +0000129#define N_PRIMARY_MAP ( ((UWord)1) << N_PRIMARY_BITS)
sewardjc1a2cda2005-04-21 17:34:00 +0000130
131/* Do not change this. */
sewardj23eb2fd2005-04-22 16:29:19 +0000132#define MAX_PRIMARY_ADDRESS (Addr)((((Addr)65536) * N_PRIMARY_MAP)-1)
133
134
135/* --------------- Stats maps --------------- */
136
137static Int n_secmaps_issued = 0;
138static ULong n_auxmap_searches = 0;
139static ULong n_auxmap_cmps = 0;
140static Int n_sanity_cheap = 0;
141static Int n_sanity_expensive = 0;
sewardj45d94cc2005-04-20 14:44:11 +0000142
143
144/* --------------- Secondary maps --------------- */
njn25e49d8e72002-09-23 09:36:25 +0000145
146typedef
147 struct {
sewardj45d94cc2005-04-20 14:44:11 +0000148 UChar abits[8192];
149 UChar vbyte[65536];
njn25e49d8e72002-09-23 09:36:25 +0000150 }
151 SecMap;
152
sewardj45d94cc2005-04-20 14:44:11 +0000153/* 3 distinguished secondary maps, one for no-access, one for
154 accessible but undefined, and one for accessible and defined.
155 Distinguished secondaries may never be modified.
156*/
157#define SM_DIST_NOACCESS 0
158#define SM_DIST_ACCESS_UNDEFINED 1
159#define SM_DIST_ACCESS_DEFINED 2
njnb8dca862005-03-14 02:42:44 +0000160
sewardj45d94cc2005-04-20 14:44:11 +0000161static SecMap sm_distinguished[3];
njnb8dca862005-03-14 02:42:44 +0000162
sewardj45d94cc2005-04-20 14:44:11 +0000163static inline Bool is_distinguished_sm ( SecMap* sm ) {
164 return sm >= &sm_distinguished[0] && sm <= &sm_distinguished[2];
165}
njnb8dca862005-03-14 02:42:44 +0000166
sewardj45d94cc2005-04-20 14:44:11 +0000167/* dist_sm points to one of our three distinguished secondaries. Make
168 a copy of it so that we can write to it.
169*/
170static SecMap* copy_for_writing ( SecMap* dist_sm )
171{
172 SecMap* new_sm;
173 tl_assert(dist_sm == &sm_distinguished[0]
174 || dist_sm == &sm_distinguished[1]
175 || dist_sm == &sm_distinguished[2]);
njnb8dca862005-03-14 02:42:44 +0000176
sewardj45d94cc2005-04-20 14:44:11 +0000177 new_sm = VG_(shadow_alloc)(sizeof(SecMap));
178 VG_(memcpy)(new_sm, dist_sm, sizeof(SecMap));
sewardj23eb2fd2005-04-22 16:29:19 +0000179 n_secmaps_issued++;
sewardj45d94cc2005-04-20 14:44:11 +0000180 return new_sm;
181}
njnb8dca862005-03-14 02:42:44 +0000182
sewardj45d94cc2005-04-20 14:44:11 +0000183
184/* --------------- Primary maps --------------- */
185
186/* The main primary map. This covers some initial part of the address
sewardj23eb2fd2005-04-22 16:29:19 +0000187 space, addresses 0 .. (N_PRIMARY_MAP << 16)-1. The rest of it is
sewardj45d94cc2005-04-20 14:44:11 +0000188 handled using the auxiliary primary map.
189*/
sewardj23eb2fd2005-04-22 16:29:19 +0000190static SecMap* primary_map[N_PRIMARY_MAP];
sewardj45d94cc2005-04-20 14:44:11 +0000191
192
193/* An entry in the auxiliary primary map. base must be a 64k-aligned
194 value, and sm points at the relevant secondary map. As with the
195 main primary map, the secondary may be either a real secondary, or
196 one of the three distinguished secondaries.
197*/
198typedef
199 struct {
sewardj23eb2fd2005-04-22 16:29:19 +0000200 Addr base;
sewardj45d94cc2005-04-20 14:44:11 +0000201 SecMap* sm;
202 }
203 AuxMapEnt;
204
205/* An expanding array of AuxMapEnts. */
206#define N_AUXMAPS 500 /* HACK */
207static AuxMapEnt hacky_auxmaps[N_AUXMAPS];
208static Int auxmap_size = N_AUXMAPS;
209static Int auxmap_used = 0;
210static AuxMapEnt* auxmap = &hacky_auxmaps[0];
211
sewardj45d94cc2005-04-20 14:44:11 +0000212
213/* Find an entry in the auxiliary map. If an entry is found, move it
214 one step closer to the front of the array, then return its address.
sewardj05fe85e2005-04-27 22:46:36 +0000215 If an entry is not found, return NULL. Note carefully that
sewardj45d94cc2005-04-20 14:44:11 +0000216 because a each call potentially rearranges the entries, each call
217 to this function invalidates ALL AuxMapEnt*s previously obtained by
218 calling this fn.
219*/
sewardj05fe85e2005-04-27 22:46:36 +0000220static AuxMapEnt* maybe_find_in_auxmap ( Addr a )
sewardj45d94cc2005-04-20 14:44:11 +0000221{
222 UWord i;
223 tl_assert(a > MAX_PRIMARY_ADDRESS);
224
225 a &= ~(Addr)0xFFFF;
226
227 /* Search .. */
228 n_auxmap_searches++;
229 for (i = 0; i < auxmap_used; i++) {
230 if (auxmap[i].base == a)
231 break;
232 }
233 n_auxmap_cmps += (ULong)(i+1);
234
235 if (i < auxmap_used) {
236 /* Found it. Nudge it a bit closer to the front. */
237 if (i > 0) {
238 AuxMapEnt tmp = auxmap[i-1];
239 auxmap[i-1] = auxmap[i];
240 auxmap[i] = tmp;
241 i--;
242 }
243 return &auxmap[i];
244 }
245
sewardj05fe85e2005-04-27 22:46:36 +0000246 return NULL;
247}
248
249
250/* Find an entry in the auxiliary map. If an entry is found, move it
251 one step closer to the front of the array, then return its address.
252 If an entry is not found, allocate one. Note carefully that
253 because a each call potentially rearranges the entries, each call
254 to this function invalidates ALL AuxMapEnt*s previously obtained by
255 calling this fn.
256*/
257static AuxMapEnt* find_or_alloc_in_auxmap ( Addr a )
258{
259 AuxMapEnt* am = maybe_find_in_auxmap(a);
260 if (am)
261 return am;
262
sewardj45d94cc2005-04-20 14:44:11 +0000263 /* We didn't find it. Hmm. This is a new piece of address space.
264 We'll need to allocate a new AuxMap entry for it. */
265 if (auxmap_used >= auxmap_size) {
266 tl_assert(auxmap_used == auxmap_size);
267 /* Out of auxmap entries. */
268 tl_assert2(0, "failed to expand the auxmap table");
269 }
270
271 tl_assert(auxmap_used < auxmap_size);
272
273 auxmap[auxmap_used].base = a & ~(Addr)0xFFFF;
274 auxmap[auxmap_used].sm = &sm_distinguished[SM_DIST_NOACCESS];
275
276 if (0)
277 VG_(printf)("new auxmap, base = 0x%llx\n",
278 (ULong)auxmap[auxmap_used].base );
279
280 auxmap_used++;
281 return &auxmap[auxmap_used-1];
282}
283
284
285/* --------------- SecMap fundamentals --------------- */
286
287/* Produce the secmap for 'a', either from the primary map or by
288 ensuring there is an entry for it in the aux primary map. The
289 secmap may be a distinguished one as the caller will only want to
290 be able to read it.
291*/
292static SecMap* get_secmap_readable ( Addr a )
293{
294 if (a <= MAX_PRIMARY_ADDRESS) {
295 UWord pm_off = a >> 16;
296 return primary_map[ pm_off ];
297 } else {
298 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
299 return am->sm;
300 }
301}
302
sewardj05fe85e2005-04-27 22:46:36 +0000303/* If 'a' has a SecMap, produce it. Else produce NULL. But don't
304 allocate one if one doesn't already exist. This is used by the
305 leak checker.
306*/
307static SecMap* maybe_get_secmap_for ( Addr a )
308{
309 if (a <= MAX_PRIMARY_ADDRESS) {
310 UWord pm_off = a >> 16;
311 return primary_map[ pm_off ];
312 } else {
313 AuxMapEnt* am = maybe_find_in_auxmap(a);
314 return am ? am->sm : NULL;
315 }
316}
317
318
319
sewardj45d94cc2005-04-20 14:44:11 +0000320/* Produce the secmap for 'a', either from the primary map or by
321 ensuring there is an entry for it in the aux primary map. The
322 secmap may not be a distinguished one, since the caller will want
323 to be able to write it. If it is a distinguished secondary, make a
324 writable copy of it, install it, and return the copy instead. (COW
325 semantics).
326*/
327static SecMap* get_secmap_writable ( Addr a )
328{
329 if (a <= MAX_PRIMARY_ADDRESS) {
330 UWord pm_off = a >> 16;
331 if (is_distinguished_sm(primary_map[ pm_off ]))
332 primary_map[pm_off] = copy_for_writing(primary_map[pm_off]);
333 return primary_map[pm_off];
334 } else {
335 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
336 if (is_distinguished_sm(am->sm))
337 am->sm = copy_for_writing(am->sm);
338 return am->sm;
339 }
340}
341
342
343/* --------------- Endianness helpers --------------- */
344
345/* Returns the offset in memory of the byteno-th most significant byte
346 in a wordszB-sized word, given the specified endianness. */
347static inline UWord byte_offset_w ( UWord wordszB, Bool bigendian,
348 UWord byteno ) {
349 return bigendian ? (wordszB-1-byteno) : byteno;
350}
351
352
353/* --------------- Fundamental functions --------------- */
354
355static
356void get_abit_and_vbyte ( /*OUT*/UWord* abit,
357 /*OUT*/UWord* vbyte,
358 Addr a )
359{
360 SecMap* sm = get_secmap_readable(a);
361 *vbyte = 0xFF & sm->vbyte[a & 0xFFFF];
362 *abit = read_bit_array(sm->abits, a & 0xFFFF);
363}
364
365static
366UWord get_abit ( Addr a )
367{
368 SecMap* sm = get_secmap_readable(a);
369 return read_bit_array(sm->abits, a & 0xFFFF);
370}
371
372static
373void set_abit_and_vbyte ( Addr a, UWord abit, UWord vbyte )
374{
375 SecMap* sm = get_secmap_writable(a);
376 sm->vbyte[a & 0xFFFF] = 0xFF & vbyte;
377 write_bit_array(sm->abits, a & 0xFFFF, abit);
378}
379
380static
381void set_vbyte ( Addr a, UWord vbyte )
382{
383 SecMap* sm = get_secmap_writable(a);
384 sm->vbyte[a & 0xFFFF] = 0xFF & vbyte;
385}
386
387
388/* --------------- Load/store slow cases. --------------- */
389
390static
391ULong mc_LOADVn_slow ( Addr a, SizeT szB, Bool bigendian )
392{
393 /* Make up a result V word, which contains the loaded data for
sewardjf3d57dd2005-04-22 20:23:27 +0000394 valid addresses and Defined for invalid addresses. Iterate over
395 the bytes in the word, from the most significant down to the
396 least. */
sewardj45d94cc2005-04-20 14:44:11 +0000397 ULong vw = VGM_WORD64_INVALID;
398 SizeT i = szB-1;
399 SizeT n_addrs_bad = 0;
400 Addr ai;
401 Bool aok;
402 UWord abit, vbyte;
403
sewardjc1a2cda2005-04-21 17:34:00 +0000404 PROF_EVENT(30, "mc_LOADVn_slow");
sewardj45d94cc2005-04-20 14:44:11 +0000405 tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
406
407 while (True) {
sewardjc1a2cda2005-04-21 17:34:00 +0000408 PROF_EVENT(31, "mc_LOADVn_slow(loop)");
sewardj45d94cc2005-04-20 14:44:11 +0000409 ai = a+byte_offset_w(szB,bigendian,i);
410 get_abit_and_vbyte(&abit, &vbyte, ai);
411 aok = abit == VGM_BIT_VALID;
412 if (!aok)
413 n_addrs_bad++;
414 vw <<= 8;
sewardjf3d57dd2005-04-22 20:23:27 +0000415 vw |= 0xFF & (aok ? vbyte : VGM_BYTE_VALID);
sewardj45d94cc2005-04-20 14:44:11 +0000416 if (i == 0) break;
417 i--;
418 }
419
420 if (n_addrs_bad > 0)
421 MAC_(record_address_error)( VG_(get_running_tid)(), a, szB, False );
422
sewardj45d94cc2005-04-20 14:44:11 +0000423 return vw;
424}
425
426
427static
428void mc_STOREVn_slow ( Addr a, SizeT szB, UWord vbytes, Bool bigendian )
429{
430 SizeT i;
431 SizeT n_addrs_bad = 0;
432 UWord abit;
433 Bool aok;
434 Addr ai;
435
sewardjc1a2cda2005-04-21 17:34:00 +0000436 PROF_EVENT(35, "mc_STOREVn_slow");
sewardj45d94cc2005-04-20 14:44:11 +0000437 tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
438
439 /* Dump vbytes in memory, iterating from least to most significant
440 byte. At the same time establish addressibility of the
441 location. */
442 for (i = 0; i < szB; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +0000443 PROF_EVENT(36, "mc_STOREVn_slow(loop)");
sewardj45d94cc2005-04-20 14:44:11 +0000444 ai = a+byte_offset_w(szB,bigendian,i);
445 abit = get_abit(ai);
446 aok = abit == VGM_BIT_VALID;
447 if (!aok)
448 n_addrs_bad++;
449 set_vbyte(ai, vbytes & 0xFF );
450 vbytes >>= 8;
451 }
452
453 /* If an address error has happened, report it. */
454 if (n_addrs_bad > 0)
455 MAC_(record_address_error)( VG_(get_running_tid)(), a, szB, True );
456}
457
458
sewardj45d94cc2005-04-20 14:44:11 +0000459//zz /* Reading/writing of the bitmaps, for aligned word-sized accesses. */
460//zz
461//zz static __inline__ UChar get_abits4_ALIGNED ( Addr a )
462//zz {
463//zz SecMap* sm;
464//zz UInt sm_off;
465//zz UChar abits8;
466//zz PROF_EVENT(24);
467//zz # ifdef VG_DEBUG_MEMORY
468//zz tl_assert(VG_IS_4_ALIGNED(a));
469//zz # endif
470//zz sm = primary_map[PM_IDX(a)];
471//zz sm_off = SM_OFF(a);
472//zz abits8 = sm->abits[sm_off >> 3];
473//zz abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
474//zz abits8 &= 0x0F;
475//zz return abits8;
476//zz }
477//zz
478//zz static UInt __inline__ get_vbytes4_ALIGNED ( Addr a )
479//zz {
480//zz SecMap* sm = primary_map[PM_IDX(a)];
481//zz UInt sm_off = SM_OFF(a);
482//zz PROF_EVENT(25);
483//zz # ifdef VG_DEBUG_MEMORY
484//zz tl_assert(VG_IS_4_ALIGNED(a));
485//zz # endif
486//zz return ((UInt*)(sm->vbyte))[sm_off >> 2];
487//zz }
488//zz
489//zz
490//zz static void __inline__ set_vbytes4_ALIGNED ( Addr a, UInt vbytes )
491//zz {
492//zz SecMap* sm;
493//zz UInt sm_off;
494//zz ENSURE_MAPPABLE(a, "set_vbytes4_ALIGNED");
495//zz sm = primary_map[PM_IDX(a)];
496//zz sm_off = SM_OFF(a);
497//zz PROF_EVENT(23);
498//zz # ifdef VG_DEBUG_MEMORY
499//zz tl_assert(VG_IS_4_ALIGNED(a));
500//zz # endif
501//zz ((UInt*)(sm->vbyte))[sm_off >> 2] = vbytes;
502//zz }
sewardjee070842003-07-05 17:53:55 +0000503
504
njn25e49d8e72002-09-23 09:36:25 +0000505/*------------------------------------------------------------*/
506/*--- Setting permissions over address ranges. ---*/
507/*------------------------------------------------------------*/
508
sewardj23eb2fd2005-04-22 16:29:19 +0000509/* Given address 'a', find the place where the pointer to a's
510 secondary map lives. If a falls into the primary map, the returned
511 value points to one of the entries in primary_map[]. Otherwise,
512 the auxiliary primary map is searched for 'a', or an entry is
513 created for it; either way, the returned value points to the
514 relevant AuxMapEnt's .sm field.
515
516 The point of this is to enable set_address_range_perms to assign
517 secondary maps in a uniform way, without worrying about whether a
518 given secondary map is pointed to from the main or auxiliary
519 primary map.
520*/
521
522static SecMap** find_secmap_binder_for_addr ( Addr aA )
523{
524 if (aA > MAX_PRIMARY_ADDRESS) {
525 AuxMapEnt* am = find_or_alloc_in_auxmap(aA);
526 return &am->sm;
527 } else {
528 UWord a = (UWord)aA;
529 UWord sec_no = (UWord)(a >> 16);
530# if VG_DEBUG_MEMORY >= 1
531 tl_assert(sec_no < N_PRIMARY_MAP);
532# endif
533 return &primary_map[sec_no];
534 }
535}
536
537
538static void set_address_range_perms ( Addr aA, SizeT len,
sewardj45d94cc2005-04-20 14:44:11 +0000539 UWord example_a_bit,
540 UWord example_v_bit )
njn25e49d8e72002-09-23 09:36:25 +0000541{
sewardj23eb2fd2005-04-22 16:29:19 +0000542 PROF_EVENT(150, "set_address_range_perms");
543
544 /* Check the permissions make sense. */
545 tl_assert(example_a_bit == VGM_BIT_VALID
546 || example_a_bit == VGM_BIT_INVALID);
547 tl_assert(example_v_bit == VGM_BIT_VALID
548 || example_v_bit == VGM_BIT_INVALID);
549 if (example_a_bit == VGM_BIT_INVALID)
550 tl_assert(example_v_bit == VGM_BIT_INVALID);
551
552 if (len == 0)
553 return;
554
555 if (VG_(clo_verbosity) > 0) {
556 if (len > 100 * 1000 * 1000) {
557 VG_(message)(Vg_UserMsg,
558 "Warning: set address range perms: "
559 "large range %u, a %d, v %d",
560 len, example_a_bit, example_v_bit );
561 }
562 }
563
564 UWord a = (UWord)aA;
565
566# if VG_DEBUG_MEMORY >= 2
567
568 /*------------------ debug-only case ------------------ */
sewardj45d94cc2005-04-20 14:44:11 +0000569 SizeT i;
njn25e49d8e72002-09-23 09:36:25 +0000570
sewardj23eb2fd2005-04-22 16:29:19 +0000571 UWord example_vbyte = BIT_TO_BYTE(example_v_bit);
sewardj45d94cc2005-04-20 14:44:11 +0000572
573 tl_assert(sizeof(SizeT) == sizeof(Addr));
574
575 if (0 && len >= 4096)
576 VG_(printf)("s_a_r_p(0x%llx, %d, %d,%d)\n",
577 (ULong)a, len, example_a_bit, example_v_bit);
njn25e49d8e72002-09-23 09:36:25 +0000578
579 if (len == 0)
580 return;
581
sewardj45d94cc2005-04-20 14:44:11 +0000582 for (i = 0; i < len; i++) {
583 set_abit_and_vbyte(a+i, example_a_bit, example_vbyte);
njn25e49d8e72002-09-23 09:36:25 +0000584 }
njn25e49d8e72002-09-23 09:36:25 +0000585
sewardj23eb2fd2005-04-22 16:29:19 +0000586# else
587
588 /*------------------ standard handling ------------------ */
589 UWord vbits8, abits8, vbits32, v_off, a_off;
590 SecMap* sm;
591 SecMap** binder;
592 SecMap* example_dsm;
593
594 /* Decide on the distinguished secondary that we might want
595 to use (part of the space-compression scheme). */
596 if (example_a_bit == VGM_BIT_INVALID) {
597 example_dsm = &sm_distinguished[SM_DIST_NOACCESS];
598 } else {
599 if (example_v_bit == VGM_BIT_VALID) {
600 example_dsm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
601 } else {
602 example_dsm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
603 }
604 }
605
606 /* Make various wider versions of the A/V values to use. */
607 vbits8 = BIT_TO_BYTE(example_v_bit);
608 abits8 = BIT_TO_BYTE(example_a_bit);
609 vbits32 = (vbits8 << 24) | (vbits8 << 16) | (vbits8 << 8) | vbits8;
610
611 /* Slowly do parts preceding 8-byte alignment. */
612 while (True) {
613 if (len == 0) break;
614 PROF_EVENT(151, "set_address_range_perms-loop1-pre");
615 if (VG_IS_8_ALIGNED(a)) break;
616 set_abit_and_vbyte( a, example_a_bit, vbits8 );
617 a++;
618 len--;
619 }
620
621 if (len == 0)
622 return;
623
624 tl_assert(VG_IS_8_ALIGNED(a) && len > 0);
625
626 /* Now go in steps of 8 bytes. */
627 binder = find_secmap_binder_for_addr(a);
628
629 while (True) {
630
631 if (len < 8) break;
632
633 PROF_EVENT(152, "set_address_range_perms-loop8");
634
635 if ((a & SECONDARY_MASK) == 0) {
636 /* we just traversed a primary map boundary, so update the
637 binder. */
638 binder = find_secmap_binder_for_addr(a);
639 PROF_EVENT(153, "set_address_range_perms-update-binder");
640
641 /* Space-optimisation. If we are setting the entire
642 secondary map, just point this entry at one of our
643 distinguished secondaries. However, only do that if it
644 already points at a distinguished secondary, since doing
645 otherwise would leak the existing secondary. We could do
646 better and free up any pre-existing non-distinguished
647 secondary at this point, since we are guaranteed that each
648 non-dist secondary only has one pointer to it, and we have
649 that pointer right here. */
650 if (len >= SECONDARY_SIZE && is_distinguished_sm(*binder)) {
651 PROF_EVENT(154, "set_address_range_perms-entire-secmap");
652 *binder = example_dsm;
653 len -= SECONDARY_SIZE;
654 a += SECONDARY_SIZE;
655 continue;
656 }
657 }
658
659 /* If the primary is already pointing to a distinguished map
660 with the same properties as we're trying to set, then leave
661 it that way. */
662 if (*binder == example_dsm) {
663 a += 8;
664 len -= 8;
665 continue;
666 }
667
668 /* Make sure it's OK to write the secondary. */
669 if (is_distinguished_sm(*binder))
670 *binder = copy_for_writing(*binder);
671
672 sm = *binder;
673 v_off = a & 0xFFFF;
674 a_off = v_off >> 3;
675 sm->abits[a_off] = (UChar)abits8;
676 ((UInt*)(sm->vbyte))[(v_off >> 2) + 0] = (UInt)vbits32;
677 ((UInt*)(sm->vbyte))[(v_off >> 2) + 1] = (UInt)vbits32;
678
679 a += 8;
680 len -= 8;
681 }
682
683 if (len == 0)
684 return;
685
686 tl_assert(VG_IS_8_ALIGNED(a) && len > 0 && len < 8);
687
688 /* Finish the upper fragment. */
689 while (True) {
690 if (len == 0) break;
691 PROF_EVENT(155, "set_address_range_perms-loop1-post");
692 set_abit_and_vbyte ( a, example_a_bit, vbits8 );
693 a++;
694 len--;
695 }
696
697# endif
698}
sewardj45d94cc2005-04-20 14:44:11 +0000699
sewardjc859fbf2005-04-22 21:10:28 +0000700
701/* --- Set permissions for arbitrary address ranges --- */
njn25e49d8e72002-09-23 09:36:25 +0000702
nethercote8b76fe52004-11-08 19:20:09 +0000703static void mc_make_noaccess ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000704{
sewardjc1a2cda2005-04-21 17:34:00 +0000705 PROF_EVENT(40, "mc_make_noaccess");
nethercote8b76fe52004-11-08 19:20:09 +0000706 DEBUG("mc_make_noaccess(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000707 set_address_range_perms ( a, len, VGM_BIT_INVALID, VGM_BIT_INVALID );
708}
709
nethercote8b76fe52004-11-08 19:20:09 +0000710static void mc_make_writable ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000711{
sewardjc1a2cda2005-04-21 17:34:00 +0000712 PROF_EVENT(41, "mc_make_writable");
nethercote8b76fe52004-11-08 19:20:09 +0000713 DEBUG("mc_make_writable(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000714 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_INVALID );
715}
716
nethercote8b76fe52004-11-08 19:20:09 +0000717static void mc_make_readable ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000718{
sewardjc1a2cda2005-04-21 17:34:00 +0000719 PROF_EVENT(42, "mc_make_readable");
nethercote8b76fe52004-11-08 19:20:09 +0000720 DEBUG("mc_make_readable(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000721 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_VALID );
722}
723
njn9b007f62003-04-07 14:40:25 +0000724
sewardjc859fbf2005-04-22 21:10:28 +0000725/* --- Block-copy permissions (needed for implementing realloc()). --- */
726
727static void mc_copy_address_range_state ( Addr src, Addr dst, SizeT len )
728{
729 SizeT i;
730 UWord abit, vbyte;
731
732 DEBUG("mc_copy_address_range_state\n");
733
734 PROF_EVENT(50, "mc_copy_address_range_state");
735 for (i = 0; i < len; i++) {
736 PROF_EVENT(51, "mc_copy_address_range_state(loop)");
737 get_abit_and_vbyte( &abit, &vbyte, src+i );
738 set_abit_and_vbyte( dst+i, abit, vbyte );
739 }
740}
741
742
743/* --- Fast case permission setters, for dealing with stacks. --- */
744
njn9b007f62003-04-07 14:40:25 +0000745static __inline__
sewardj5d28efc2005-04-21 22:16:29 +0000746void make_aligned_word32_writable ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000747{
sewardj5d28efc2005-04-21 22:16:29 +0000748 PROF_EVENT(300, "make_aligned_word32_writable");
749
750# if VG_DEBUG_MEMORY >= 2
751 mc_make_writable(aA, 4);
752# else
753
754 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
sewardj23eb2fd2005-04-22 16:29:19 +0000755 PROF_EVENT(301, "make_aligned_word32_writable-slow1");
sewardj5d28efc2005-04-21 22:16:29 +0000756 mc_make_writable(aA, 4);
757 return;
758 }
759
760 UWord a = (UWord)aA;
761 UWord sec_no = (UWord)(a >> 16);
762# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +0000763 tl_assert(sec_no < N_PRIMARY_MAP);
sewardj5d28efc2005-04-21 22:16:29 +0000764# endif
765
766 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
767 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
768
769 SecMap* sm = primary_map[sec_no];
770 UWord v_off = a & 0xFFFF;
771 UWord a_off = v_off >> 3;
772
773 /* Paint the new area as uninitialised. */
774 ((UInt*)(sm->vbyte))[v_off >> 2] = VGM_WORD32_INVALID;
775
776 UWord mask = 0x0F;
777 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
778 /* mask now contains 1s where we wish to make address bits valid
779 (0s). */
780 sm->abits[a_off] &= ~mask;
781# endif
njn9b007f62003-04-07 14:40:25 +0000782}
783
sewardj5d28efc2005-04-21 22:16:29 +0000784
785static __inline__
786void make_aligned_word32_noaccess ( Addr aA )
787{
788 PROF_EVENT(310, "make_aligned_word32_noaccess");
789
790# if VG_DEBUG_MEMORY >= 2
791 mc_make_noaccess(aA, 4);
792# else
793
794 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
795 PROF_EVENT(311, "make_aligned_word32_noaccess-slow1");
796 mc_make_noaccess(aA, 4);
797 return;
798 }
799
800 UWord a = (UWord)aA;
801 UWord sec_no = (UWord)(a >> 16);
802# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +0000803 tl_assert(sec_no < N_PRIMARY_MAP);
sewardj5d28efc2005-04-21 22:16:29 +0000804# endif
805
806 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
807 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
808
809 SecMap* sm = primary_map[sec_no];
810 UWord v_off = a & 0xFFFF;
811 UWord a_off = v_off >> 3;
812
813 /* Paint the abandoned data as uninitialised. Probably not
814 necessary, but still .. */
815 ((UInt*)(sm->vbyte))[v_off >> 2] = VGM_WORD32_INVALID;
816
817 UWord mask = 0x0F;
818 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
819 /* mask now contains 1s where we wish to make address bits invalid
820 (1s). */
821 sm->abits[a_off] |= mask;
822# endif
823}
824
825
njn9b007f62003-04-07 14:40:25 +0000826/* Nb: by "aligned" here we mean 8-byte aligned */
827static __inline__
sewardj23eb2fd2005-04-22 16:29:19 +0000828void make_aligned_word64_writable ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000829{
sewardj23eb2fd2005-04-22 16:29:19 +0000830 PROF_EVENT(320, "make_aligned_word64_writable");
831
832# if VG_DEBUG_MEMORY >= 2
833 mc_make_writable(aA, 8);
834# else
835
836 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
837 PROF_EVENT(321, "make_aligned_word64_writable-slow1");
838 mc_make_writable(aA, 8);
839 return;
840 }
841
842 UWord a = (UWord)aA;
843 UWord sec_no = (UWord)(a >> 16);
844# if VG_DEBUG_MEMORY >= 1
845 tl_assert(sec_no < N_PRIMARY_MAP);
846# endif
847
848 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
849 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
850
851 SecMap* sm = primary_map[sec_no];
852 UWord v_off = a & 0xFFFF;
853 UWord a_off = v_off >> 3;
854
855 /* Paint the new area as uninitialised. */
856 ((ULong*)(sm->vbyte))[v_off >> 3] = VGM_WORD64_INVALID;
857
858 /* Make the relevant area accessible. */
859 sm->abits[a_off] = VGM_BYTE_VALID;
860# endif
njn9b007f62003-04-07 14:40:25 +0000861}
862
sewardj23eb2fd2005-04-22 16:29:19 +0000863
njn9b007f62003-04-07 14:40:25 +0000864static __inline__
sewardj23eb2fd2005-04-22 16:29:19 +0000865void make_aligned_word64_noaccess ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000866{
sewardj23eb2fd2005-04-22 16:29:19 +0000867 PROF_EVENT(330, "make_aligned_word64_noaccess");
868
869# if VG_DEBUG_MEMORY >= 2
870 mc_make_noaccess(aA, 8);
871# else
872
873 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
874 PROF_EVENT(331, "make_aligned_word64_noaccess-slow1");
875 mc_make_noaccess(aA, 8);
876 return;
877 }
878
879 UWord a = (UWord)aA;
880 UWord sec_no = (UWord)(a >> 16);
881# if VG_DEBUG_MEMORY >= 1
882 tl_assert(sec_no < N_PRIMARY_MAP);
883# endif
884
885 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
886 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
887
888 SecMap* sm = primary_map[sec_no];
889 UWord v_off = a & 0xFFFF;
890 UWord a_off = v_off >> 3;
891
892 /* Paint the abandoned data as uninitialised. Probably not
893 necessary, but still .. */
894 ((ULong*)(sm->vbyte))[v_off >> 3] = VGM_WORD64_INVALID;
895
896 /* Make the abandoned area inaccessible. */
897 sm->abits[a_off] = VGM_BYTE_INVALID;
898# endif
njn9b007f62003-04-07 14:40:25 +0000899}
900
sewardj23eb2fd2005-04-22 16:29:19 +0000901
sewardj45d94cc2005-04-20 14:44:11 +0000902/* The stack-pointer update handling functions */
903SP_UPDATE_HANDLERS ( make_aligned_word32_writable,
904 make_aligned_word32_noaccess,
905 make_aligned_word64_writable,
906 make_aligned_word64_noaccess,
907 mc_make_writable,
908 mc_make_noaccess
909 );
njn9b007f62003-04-07 14:40:25 +0000910
sewardj45d94cc2005-04-20 14:44:11 +0000911
sewardj826ec492005-05-12 18:05:00 +0000912void MC_(helperc_MAKE_STACK_UNINIT) ( Addr base, UWord len )
913{
914 tl_assert(sizeof(UWord) == sizeof(SizeT));
915 // VG_(printf)("helperc_MAKE_STACK_UNINIT %p %d\n", base, len );
916 mc_make_writable(base, len);
917}
918
919
nethercote8b76fe52004-11-08 19:20:09 +0000920/*------------------------------------------------------------*/
921/*--- Checking memory ---*/
922/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000923
sewardje4ccc012005-05-02 12:53:38 +0000924typedef
925 enum {
926 MC_Ok = 5,
927 MC_AddrErr = 6,
928 MC_ValueErr = 7
929 }
930 MC_ReadResult;
931
932
njn25e49d8e72002-09-23 09:36:25 +0000933/* Check permissions for address range. If inadequate permissions
934 exist, *bad_addr is set to the offending address, so the caller can
935 know what it is. */
936
sewardjecf8e102003-07-12 12:11:39 +0000937/* Returns True if [a .. a+len) is not addressible. Otherwise,
938 returns False, and if bad_addr is non-NULL, sets *bad_addr to
939 indicate the lowest failing address. Functions below are
940 similar. */
nethercote8b76fe52004-11-08 19:20:09 +0000941static Bool mc_check_noaccess ( Addr a, SizeT len, Addr* bad_addr )
sewardjecf8e102003-07-12 12:11:39 +0000942{
nethercote451eae92004-11-02 13:06:32 +0000943 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +0000944 UWord abit;
sewardjc1a2cda2005-04-21 17:34:00 +0000945 PROF_EVENT(60, "mc_check_noaccess");
sewardjecf8e102003-07-12 12:11:39 +0000946 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +0000947 PROF_EVENT(61, "mc_check_noaccess(loop)");
sewardjecf8e102003-07-12 12:11:39 +0000948 abit = get_abit(a);
949 if (abit == VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +0000950 if (bad_addr != NULL)
951 *bad_addr = a;
sewardjecf8e102003-07-12 12:11:39 +0000952 return False;
953 }
954 a++;
955 }
956 return True;
957}
958
nethercote8b76fe52004-11-08 19:20:09 +0000959static Bool mc_check_writable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000960{
nethercote451eae92004-11-02 13:06:32 +0000961 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +0000962 UWord abit;
sewardjc1a2cda2005-04-21 17:34:00 +0000963 PROF_EVENT(62, "mc_check_writable");
njn25e49d8e72002-09-23 09:36:25 +0000964 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +0000965 PROF_EVENT(63, "mc_check_writable(loop)");
njn25e49d8e72002-09-23 09:36:25 +0000966 abit = get_abit(a);
967 if (abit == VGM_BIT_INVALID) {
968 if (bad_addr != NULL) *bad_addr = a;
969 return False;
970 }
971 a++;
972 }
973 return True;
974}
975
nethercote8b76fe52004-11-08 19:20:09 +0000976static MC_ReadResult mc_check_readable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000977{
nethercote451eae92004-11-02 13:06:32 +0000978 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +0000979 UWord abit;
980 UWord vbyte;
njn25e49d8e72002-09-23 09:36:25 +0000981
sewardjc1a2cda2005-04-21 17:34:00 +0000982 PROF_EVENT(64, "mc_check_readable");
nethercote8b76fe52004-11-08 19:20:09 +0000983 DEBUG("mc_check_readable\n");
njn25e49d8e72002-09-23 09:36:25 +0000984 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +0000985 PROF_EVENT(65, "mc_check_readable(loop)");
sewardj45d94cc2005-04-20 14:44:11 +0000986 get_abit_and_vbyte(&abit, &vbyte, a);
nethercote8b76fe52004-11-08 19:20:09 +0000987 // Report addressability errors in preference to definedness errors
988 // by checking the A bits first.
989 if (abit != VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +0000990 if (bad_addr != NULL)
991 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +0000992 return MC_AddrErr;
993 }
994 if (vbyte != VGM_BYTE_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +0000995 if (bad_addr != NULL)
996 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +0000997 return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +0000998 }
999 a++;
1000 }
nethercote8b76fe52004-11-08 19:20:09 +00001001 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +00001002}
1003
1004
1005/* Check a zero-terminated ascii string. Tricky -- don't want to
1006 examine the actual bytes, to find the end, until we're sure it is
1007 safe to do so. */
1008
njn9b007f62003-04-07 14:40:25 +00001009static Bool mc_check_readable_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00001010{
sewardj45d94cc2005-04-20 14:44:11 +00001011 UWord abit;
1012 UWord vbyte;
sewardjc1a2cda2005-04-21 17:34:00 +00001013 PROF_EVENT(66, "mc_check_readable_asciiz");
njn5c004e42002-11-18 11:04:50 +00001014 DEBUG("mc_check_readable_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +00001015 while (True) {
sewardjc1a2cda2005-04-21 17:34:00 +00001016 PROF_EVENT(67, "mc_check_readable_asciiz(loop)");
sewardj45d94cc2005-04-20 14:44:11 +00001017 get_abit_and_vbyte(&abit, &vbyte, a);
nethercote8b76fe52004-11-08 19:20:09 +00001018 // As in mc_check_readable(), check A bits first
1019 if (abit != VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001020 if (bad_addr != NULL)
1021 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001022 return MC_AddrErr;
1023 }
1024 if (vbyte != VGM_BYTE_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001025 if (bad_addr != NULL)
1026 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001027 return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +00001028 }
1029 /* Ok, a is safe to read. */
sewardj45d94cc2005-04-20 14:44:11 +00001030 if (* ((UChar*)a) == 0)
1031 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +00001032 a++;
1033 }
1034}
1035
1036
1037/*------------------------------------------------------------*/
1038/*--- Memory event handlers ---*/
1039/*------------------------------------------------------------*/
1040
njn25e49d8e72002-09-23 09:36:25 +00001041static
njn72718642003-07-24 08:45:32 +00001042void mc_check_is_writable ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +00001043 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001044{
1045 Bool ok;
1046 Addr bad_addr;
1047
1048 VGP_PUSHCC(VgpCheckMem);
1049
1050 /* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
1051 base,base+size-1); */
nethercote8b76fe52004-11-08 19:20:09 +00001052 ok = mc_check_writable ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00001053 if (!ok) {
1054 switch (part) {
1055 case Vg_CoreSysCall:
nethercote8b76fe52004-11-08 19:20:09 +00001056 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False,
1057 /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00001058 break;
1059
1060 case Vg_CorePThread:
1061 case Vg_CoreSignal:
nethercote8b76fe52004-11-08 19:20:09 +00001062 MAC_(record_core_mem_error)( tid, /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00001063 break;
1064
1065 default:
njn67993252004-11-22 18:02:32 +00001066 VG_(tool_panic)("mc_check_is_writable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001067 }
1068 }
1069
1070 VGP_POPCC(VgpCheckMem);
1071}
1072
1073static
njn72718642003-07-24 08:45:32 +00001074void mc_check_is_readable ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +00001075 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001076{
njn25e49d8e72002-09-23 09:36:25 +00001077 Addr bad_addr;
nethercote8b76fe52004-11-08 19:20:09 +00001078 MC_ReadResult res;
njn25e49d8e72002-09-23 09:36:25 +00001079
1080 VGP_PUSHCC(VgpCheckMem);
1081
1082 /* VG_(message)(Vg_DebugMsg,"check is readable: %x .. %x",
1083 base,base+size-1); */
nethercote8b76fe52004-11-08 19:20:09 +00001084 res = mc_check_readable ( base, size, &bad_addr );
1085 if (MC_Ok != res) {
1086 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
1087
njn25e49d8e72002-09-23 09:36:25 +00001088 switch (part) {
1089 case Vg_CoreSysCall:
nethercote8b76fe52004-11-08 19:20:09 +00001090 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False,
1091 isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001092 break;
1093
1094 case Vg_CorePThread:
nethercote8b76fe52004-11-08 19:20:09 +00001095 MAC_(record_core_mem_error)( tid, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001096 break;
1097
1098 /* If we're being asked to jump to a silly address, record an error
1099 message before potentially crashing the entire system. */
1100 case Vg_CoreTranslate:
njn72718642003-07-24 08:45:32 +00001101 MAC_(record_jump_error)( tid, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00001102 break;
1103
1104 default:
njn67993252004-11-22 18:02:32 +00001105 VG_(tool_panic)("mc_check_is_readable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001106 }
1107 }
1108 VGP_POPCC(VgpCheckMem);
1109}
1110
1111static
njn72718642003-07-24 08:45:32 +00001112void mc_check_is_readable_asciiz ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +00001113 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +00001114{
nethercote8b76fe52004-11-08 19:20:09 +00001115 MC_ReadResult res;
njn5ab96ac2005-05-08 02:59:50 +00001116 Addr bad_addr = 0; // shut GCC up
njn25e49d8e72002-09-23 09:36:25 +00001117 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
1118
1119 VGP_PUSHCC(VgpCheckMem);
1120
njnca82cc02004-11-22 17:18:48 +00001121 tl_assert(part == Vg_CoreSysCall);
nethercote8b76fe52004-11-08 19:20:09 +00001122 res = mc_check_readable_asciiz ( (Addr)str, &bad_addr );
1123 if (MC_Ok != res) {
1124 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
1125 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001126 }
1127
1128 VGP_POPCC(VgpCheckMem);
1129}
1130
njn25e49d8e72002-09-23 09:36:25 +00001131static
nethercote451eae92004-11-02 13:06:32 +00001132void mc_new_mem_startup( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00001133{
njn1f3a9092002-10-04 09:22:30 +00001134 /* Ignore the permissions, just make it readable. Seems to work... */
nethercote451eae92004-11-02 13:06:32 +00001135 DEBUG("mc_new_mem_startup(%p, %llu, rr=%u, ww=%u, xx=%u)\n",
1136 a,(ULong)len,rr,ww,xx);
nethercote8b76fe52004-11-08 19:20:09 +00001137 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001138}
1139
1140static
nethercote451eae92004-11-02 13:06:32 +00001141void mc_new_mem_heap ( Addr a, SizeT len, Bool is_inited )
njn25e49d8e72002-09-23 09:36:25 +00001142{
1143 if (is_inited) {
nethercote8b76fe52004-11-08 19:20:09 +00001144 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001145 } else {
nethercote8b76fe52004-11-08 19:20:09 +00001146 mc_make_writable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001147 }
1148}
1149
1150static
njnb8dca862005-03-14 02:42:44 +00001151void mc_new_mem_mmap ( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00001152{
njnb8dca862005-03-14 02:42:44 +00001153 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001154}
1155
njncf45fd42004-11-24 16:30:22 +00001156static
1157void mc_post_mem_write(CorePart part, ThreadId tid, Addr a, SizeT len)
1158{
1159 mc_make_readable(a, len);
1160}
njn25e49d8e72002-09-23 09:36:25 +00001161
sewardj45d94cc2005-04-20 14:44:11 +00001162
njn25e49d8e72002-09-23 09:36:25 +00001163/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00001164/*--- Register event handlers ---*/
1165/*------------------------------------------------------------*/
1166
sewardj45d94cc2005-04-20 14:44:11 +00001167/* When some chunk of guest state is written, mark the corresponding
1168 shadow area as valid. This is used to initialise arbitrarily large
sewardj2c27f702005-05-03 18:19:05 +00001169 chunks of guest state, hence the (somewhat arbitrary) 1024 limit.
sewardj45d94cc2005-04-20 14:44:11 +00001170*/
1171static void mc_post_reg_write ( CorePart part, ThreadId tid,
1172 OffT offset, SizeT size)
njnd3040452003-05-19 15:04:06 +00001173{
sewardj6cf40ff2005-04-20 22:31:26 +00001174 UChar area[1024];
1175 tl_assert(size <= 1024);
njncf45fd42004-11-24 16:30:22 +00001176 VG_(memset)(area, VGM_BYTE_VALID, size);
1177 VG_(set_shadow_regs_area)( tid, offset, size, area );
njnd3040452003-05-19 15:04:06 +00001178}
1179
sewardj45d94cc2005-04-20 14:44:11 +00001180static
1181void mc_post_reg_write_clientcall ( ThreadId tid,
1182 OffT offset, SizeT size,
1183 Addr f)
njnd3040452003-05-19 15:04:06 +00001184{
njncf45fd42004-11-24 16:30:22 +00001185 mc_post_reg_write(/*dummy*/0, tid, offset, size);
njnd3040452003-05-19 15:04:06 +00001186}
1187
sewardj45d94cc2005-04-20 14:44:11 +00001188/* Look at the definedness of the guest's shadow state for
1189 [offset, offset+len). If any part of that is undefined, record
1190 a parameter error.
1191*/
1192static void mc_pre_reg_read ( CorePart part, ThreadId tid, Char* s,
1193 OffT offset, SizeT size)
nethercote8b76fe52004-11-08 19:20:09 +00001194{
sewardj45d94cc2005-04-20 14:44:11 +00001195 Int i;
1196 Bool bad;
1197
1198 UChar area[16];
1199 tl_assert(size <= 16);
1200
1201 VG_(get_shadow_regs_area)( tid, offset, size, area );
1202
1203 bad = False;
1204 for (i = 0; i < size; i++) {
1205 if (area[i] != VGM_BYTE_VALID) {
sewardj2c27f702005-05-03 18:19:05 +00001206 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00001207 break;
1208 }
nethercote8b76fe52004-11-08 19:20:09 +00001209 }
1210
sewardj45d94cc2005-04-20 14:44:11 +00001211 if (bad)
nethercote8b76fe52004-11-08 19:20:09 +00001212 MAC_(record_param_error) ( tid, 0, /*isReg*/True, /*isUnaddr*/False, s );
1213}
njnd3040452003-05-19 15:04:06 +00001214
njn25e49d8e72002-09-23 09:36:25 +00001215
sewardj6cf40ff2005-04-20 22:31:26 +00001216/*------------------------------------------------------------*/
njn9e63cb62005-05-08 18:34:59 +00001217/*--- Printing errors ---*/
1218/*------------------------------------------------------------*/
1219
njn51d827b2005-05-09 01:02:08 +00001220static void mc_pp_Error ( Error* err )
njn9e63cb62005-05-08 18:34:59 +00001221{
1222 MAC_Error* err_extra = VG_(get_error_extra)(err);
1223
1224 switch (VG_(get_error_kind)(err)) {
1225 case CoreMemErr: {
1226 Char* s = ( err_extra->isUnaddr ? "unaddressable" : "uninitialised" );
1227 VG_(message)(Vg_UserMsg, "%s contains %s byte(s)",
1228 VG_(get_error_string)(err), s);
1229 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1230 break;
1231
1232 }
1233
1234 case ValueErr:
1235 if (err_extra->size == 0) {
1236 VG_(message)(Vg_UserMsg,
1237 "Conditional jump or move depends on uninitialised value(s)");
1238 } else {
1239 VG_(message)(Vg_UserMsg,
1240 "Use of uninitialised value of size %d",
1241 err_extra->size);
1242 }
1243 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1244 break;
1245
1246 case ParamErr: {
1247 Bool isReg = ( Register == err_extra->addrinfo.akind );
1248 Char* s1 = ( isReg ? "contains" : "points to" );
1249 Char* s2 = ( err_extra->isUnaddr ? "unaddressable" : "uninitialised" );
1250 if (isReg) tl_assert(!err_extra->isUnaddr);
1251
1252 VG_(message)(Vg_UserMsg, "Syscall param %s %s %s byte(s)",
1253 VG_(get_error_string)(err), s1, s2);
1254
1255 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1256 MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
1257 break;
1258 }
1259 case UserErr: {
1260 Char* s = ( err_extra->isUnaddr ? "Unaddressable" : "Uninitialised" );
1261
1262 VG_(message)(Vg_UserMsg,
1263 "%s byte(s) found during client check request", s);
1264
1265 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1266 MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
1267 break;
1268 }
1269 default:
1270 MAC_(pp_shared_Error)(err);
1271 break;
1272 }
1273}
1274
1275/*------------------------------------------------------------*/
1276/*--- Recording errors ---*/
1277/*------------------------------------------------------------*/
1278
1279/* Creates a copy of the `extra' part, updates the copy with address info if
1280 necessary, and returns the copy. */
1281/* This one called from generated code and non-generated code. */
njn96364822005-05-08 19:04:53 +00001282static void mc_record_value_error ( ThreadId tid, Int size )
njn9e63cb62005-05-08 18:34:59 +00001283{
1284 MAC_Error err_extra;
1285
1286 MAC_(clear_MAC_Error)( &err_extra );
1287 err_extra.size = size;
1288 err_extra.isUnaddr = False;
1289 VG_(maybe_record_error)( tid, ValueErr, /*addr*/0, /*s*/NULL, &err_extra );
1290}
1291
1292/* This called from non-generated code */
1293
njn96364822005-05-08 19:04:53 +00001294static void mc_record_user_error ( ThreadId tid, Addr a, Bool isWrite,
1295 Bool isUnaddr )
njn9e63cb62005-05-08 18:34:59 +00001296{
1297 MAC_Error err_extra;
1298
1299 tl_assert(VG_INVALID_THREADID != tid);
1300 MAC_(clear_MAC_Error)( &err_extra );
1301 err_extra.addrinfo.akind = Undescribed;
1302 err_extra.isUnaddr = isUnaddr;
1303 VG_(maybe_record_error)( tid, UserErr, a, /*s*/NULL, &err_extra );
1304}
1305
1306/*------------------------------------------------------------*/
1307/*--- Suppressions ---*/
1308/*------------------------------------------------------------*/
1309
njn51d827b2005-05-09 01:02:08 +00001310static Bool mc_recognised_suppression ( Char* name, Supp* su )
njn9e63cb62005-05-08 18:34:59 +00001311{
1312 SuppKind skind;
1313
1314 if (MAC_(shared_recognised_suppression)(name, su))
1315 return True;
1316
1317 /* Extra suppressions not used by Addrcheck */
1318 else if (VG_STREQ(name, "Cond")) skind = Value0Supp;
1319 else if (VG_STREQ(name, "Value0")) skind = Value0Supp;/* backwards compat */
1320 else if (VG_STREQ(name, "Value1")) skind = Value1Supp;
1321 else if (VG_STREQ(name, "Value2")) skind = Value2Supp;
1322 else if (VG_STREQ(name, "Value4")) skind = Value4Supp;
1323 else if (VG_STREQ(name, "Value8")) skind = Value8Supp;
1324 else if (VG_STREQ(name, "Value16")) skind = Value16Supp;
1325 else
1326 return False;
1327
1328 VG_(set_supp_kind)(su, skind);
1329 return True;
1330}
1331
1332/*------------------------------------------------------------*/
sewardjc859fbf2005-04-22 21:10:28 +00001333/*--- Functions called directly from generated code: ---*/
1334/*--- Load/store handlers. ---*/
sewardj6cf40ff2005-04-20 22:31:26 +00001335/*------------------------------------------------------------*/
1336
1337/* Types: LOADV4, LOADV2, LOADV1 are:
1338 UWord fn ( Addr a )
1339 so they return 32-bits on 32-bit machines and 64-bits on
1340 64-bit machines. Addr has the same size as a host word.
1341
1342 LOADV8 is always ULong fn ( Addr a )
1343
1344 Similarly for STOREV1, STOREV2, STOREV4, the supplied vbits
1345 are a UWord, and for STOREV8 they are a ULong.
1346*/
1347
sewardj95448072004-11-22 20:19:51 +00001348/* ------------------------ Size = 8 ------------------------ */
1349
njn9fb73db2005-03-27 01:55:21 +00001350VGA_REGPARM(1)
sewardjf9d81612005-04-23 23:25:49 +00001351ULong MC_(helperc_LOADV8) ( Addr aA )
sewardj95448072004-11-22 20:19:51 +00001352{
sewardjf9d81612005-04-23 23:25:49 +00001353 PROF_EVENT(200, "helperc_LOADV8");
1354
1355# if VG_DEBUG_MEMORY >= 2
1356 return mc_LOADVn_slow( aA, 8, False/*littleendian*/ );
1357# else
1358
1359 const UWord mask = ~((0x10000-8) | ((N_PRIMARY_MAP-1) << 16));
1360 UWord a = (UWord)aA;
1361
1362 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1363 naturally aligned, or 'a' exceeds the range covered by the
1364 primary map. Either way we defer to the slow-path case. */
1365 if (EXPECTED_NOT_TAKEN(a & mask)) {
1366 PROF_EVENT(201, "helperc_LOADV8-slow1");
1367 return (UWord)mc_LOADVn_slow( aA, 8, False/*littleendian*/ );
1368 }
1369
1370 UWord sec_no = (UWord)(a >> 16);
1371
1372# if VG_DEBUG_MEMORY >= 1
1373 tl_assert(sec_no < N_PRIMARY_MAP);
1374# endif
1375
1376 SecMap* sm = primary_map[sec_no];
1377 UWord v_off = a & 0xFFFF;
1378 UWord a_off = v_off >> 3;
1379 UWord abits = (UWord)(sm->abits[a_off]);
1380
1381 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) {
1382 /* Handle common case quickly: a is suitably aligned, is mapped,
1383 and is addressible. */
1384 return ((ULong*)(sm->vbyte))[ v_off >> 3 ];
1385 } else {
1386 /* Slow but general case. */
1387 PROF_EVENT(202, "helperc_LOADV8-slow2");
1388 return mc_LOADVn_slow( a, 8, False/*littleendian*/ );
1389 }
1390
1391# endif
sewardj95448072004-11-22 20:19:51 +00001392}
1393
njn9fb73db2005-03-27 01:55:21 +00001394VGA_REGPARM(1)
sewardjf9d81612005-04-23 23:25:49 +00001395void MC_(helperc_STOREV8) ( Addr aA, ULong vbytes )
sewardj95448072004-11-22 20:19:51 +00001396{
sewardjf9d81612005-04-23 23:25:49 +00001397 PROF_EVENT(210, "helperc_STOREV8");
1398
1399# if VG_DEBUG_MEMORY >= 2
1400 mc_STOREVn_slow( aA, 8, vbytes, False/*littleendian*/ );
1401# else
1402
1403 const UWord mask = ~((0x10000-8) | ((N_PRIMARY_MAP-1) << 16));
1404 UWord a = (UWord)aA;
1405
1406 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1407 naturally aligned, or 'a' exceeds the range covered by the
1408 primary map. Either way we defer to the slow-path case. */
1409 if (EXPECTED_NOT_TAKEN(a & mask)) {
1410 PROF_EVENT(211, "helperc_STOREV8-slow1");
1411 mc_STOREVn_slow( aA, 8, vbytes, False/*littleendian*/ );
1412 return;
1413 }
1414
1415 UWord sec_no = (UWord)(a >> 16);
1416
1417# if VG_DEBUG_MEMORY >= 1
1418 tl_assert(sec_no < N_PRIMARY_MAP);
1419# endif
1420
1421 SecMap* sm = primary_map[sec_no];
1422 UWord v_off = a & 0xFFFF;
1423 UWord a_off = v_off >> 3;
1424 UWord abits = (UWord)(sm->abits[a_off]);
1425
1426 if (EXPECTED_TAKEN(!is_distinguished_sm(sm)
1427 && abits == VGM_BYTE_VALID)) {
1428 /* Handle common case quickly: a is suitably aligned, is mapped,
1429 and is addressible. */
1430 ((ULong*)(sm->vbyte))[ v_off >> 3 ] = vbytes;
1431 } else {
1432 /* Slow but general case. */
1433 PROF_EVENT(212, "helperc_STOREV8-slow2");
1434 mc_STOREVn_slow( aA, 8, vbytes, False/*littleendian*/ );
1435 }
1436# endif
sewardj95448072004-11-22 20:19:51 +00001437}
1438
1439/* ------------------------ Size = 4 ------------------------ */
1440
njn9fb73db2005-03-27 01:55:21 +00001441VGA_REGPARM(1)
sewardjc1a2cda2005-04-21 17:34:00 +00001442UWord MC_(helperc_LOADV4) ( Addr aA )
njn25e49d8e72002-09-23 09:36:25 +00001443{
sewardjc1a2cda2005-04-21 17:34:00 +00001444 PROF_EVENT(220, "helperc_LOADV4");
1445
1446# if VG_DEBUG_MEMORY >= 2
1447 return (UWord)mc_LOADVn_slow( aA, 4, False/*littleendian*/ );
1448# else
1449
sewardj23eb2fd2005-04-22 16:29:19 +00001450 const UWord mask = ~((0x10000-4) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001451 UWord a = (UWord)aA;
1452
1453 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1454 naturally aligned, or 'a' exceeds the range covered by the
1455 primary map. Either way we defer to the slow-path case. */
1456 if (EXPECTED_NOT_TAKEN(a & mask)) {
1457 PROF_EVENT(221, "helperc_LOADV4-slow1");
1458 return (UWord)mc_LOADVn_slow( aA, 4, False/*littleendian*/ );
1459 }
1460
1461 UWord sec_no = (UWord)(a >> 16);
1462
1463# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001464 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001465# endif
1466
1467 SecMap* sm = primary_map[sec_no];
1468 UWord v_off = a & 0xFFFF;
1469 UWord a_off = v_off >> 3;
1470 UWord abits = (UWord)(sm->abits[a_off]);
1471 abits >>= (a & 4);
1472 abits &= 15;
1473 if (EXPECTED_TAKEN(abits == VGM_NIBBLE_VALID)) {
1474 /* Handle common case quickly: a is suitably aligned, is mapped,
1475 and is addressible. */
sewardj5d28efc2005-04-21 22:16:29 +00001476 /* On a 32-bit platform, simply hoick the required 32 bits out of
1477 the vbyte array. On a 64-bit platform, also set the upper 32
1478 bits to 1 ("undefined"), just in case. This almost certainly
1479 isn't necessary, but be paranoid. */
1480 UWord ret = (UWord)0xFFFFFFFF00000000ULL;
1481 ret |= (UWord)( ((UInt*)(sm->vbyte))[ v_off >> 2 ] );
1482 return ret;
sewardjc1a2cda2005-04-21 17:34:00 +00001483 } else {
1484 /* Slow but general case. */
1485 PROF_EVENT(222, "helperc_LOADV4-slow2");
1486 return (UWord)mc_LOADVn_slow( a, 4, False/*littleendian*/ );
1487 }
1488
1489# endif
njn25e49d8e72002-09-23 09:36:25 +00001490}
1491
njn9fb73db2005-03-27 01:55:21 +00001492VGA_REGPARM(2)
sewardjc1a2cda2005-04-21 17:34:00 +00001493void MC_(helperc_STOREV4) ( Addr aA, UWord vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001494{
sewardjc1a2cda2005-04-21 17:34:00 +00001495 PROF_EVENT(230, "helperc_STOREV4");
1496
1497# if VG_DEBUG_MEMORY >= 2
sewardj23eb2fd2005-04-22 16:29:19 +00001498 mc_STOREVn_slow( aA, 4, (ULong)vbytes, False/*littleendian*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001499# else
1500
sewardj23eb2fd2005-04-22 16:29:19 +00001501 const UWord mask = ~((0x10000-4) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001502 UWord a = (UWord)aA;
1503
1504 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1505 naturally aligned, or 'a' exceeds the range covered by the
1506 primary map. Either way we defer to the slow-path case. */
1507 if (EXPECTED_NOT_TAKEN(a & mask)) {
1508 PROF_EVENT(231, "helperc_STOREV4-slow1");
1509 mc_STOREVn_slow( aA, 4, (ULong)vbytes, False/*littleendian*/ );
1510 return;
1511 }
1512
1513 UWord sec_no = (UWord)(a >> 16);
1514
1515# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001516 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001517# endif
1518
1519 SecMap* sm = primary_map[sec_no];
1520 UWord v_off = a & 0xFFFF;
1521 UWord a_off = v_off >> 3;
1522 UWord abits = (UWord)(sm->abits[a_off]);
1523 abits >>= (a & 4);
1524 abits &= 15;
1525 if (EXPECTED_TAKEN(!is_distinguished_sm(sm)
1526 && abits == VGM_NIBBLE_VALID)) {
1527 /* Handle common case quickly: a is suitably aligned, is mapped,
1528 and is addressible. */
1529 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = (UInt)vbytes;
1530 } else {
1531 /* Slow but general case. */
1532 PROF_EVENT(232, "helperc_STOREV4-slow2");
1533 mc_STOREVn_slow( aA, 4, (ULong)vbytes, False/*littleendian*/ );
1534 }
1535# endif
njn25e49d8e72002-09-23 09:36:25 +00001536}
1537
sewardj95448072004-11-22 20:19:51 +00001538/* ------------------------ Size = 2 ------------------------ */
1539
njn9fb73db2005-03-27 01:55:21 +00001540VGA_REGPARM(1)
sewardjc1a2cda2005-04-21 17:34:00 +00001541UWord MC_(helperc_LOADV2) ( Addr aA )
njn25e49d8e72002-09-23 09:36:25 +00001542{
sewardjc1a2cda2005-04-21 17:34:00 +00001543 PROF_EVENT(240, "helperc_LOADV2");
1544
1545# if VG_DEBUG_MEMORY >= 2
1546 return (UWord)mc_LOADVn_slow( aA, 2, False/*littleendian*/ );
1547# else
1548
sewardj23eb2fd2005-04-22 16:29:19 +00001549 const UWord mask = ~((0x10000-2) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001550 UWord a = (UWord)aA;
1551
1552 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1553 naturally aligned, or 'a' exceeds the range covered by the
1554 primary map. Either way we defer to the slow-path case. */
1555 if (EXPECTED_NOT_TAKEN(a & mask)) {
1556 PROF_EVENT(241, "helperc_LOADV2-slow1");
1557 return (UWord)mc_LOADVn_slow( aA, 2, False/*littleendian*/ );
1558 }
1559
1560 UWord sec_no = (UWord)(a >> 16);
1561
1562# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001563 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001564# endif
1565
1566 SecMap* sm = primary_map[sec_no];
1567 UWord v_off = a & 0xFFFF;
1568 UWord a_off = v_off >> 3;
1569 UWord abits = (UWord)(sm->abits[a_off]);
sewardjc1a2cda2005-04-21 17:34:00 +00001570 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) {
1571 /* Handle common case quickly: a is mapped, and the entire
1572 word32 it lives in is addressible. */
sewardj5d28efc2005-04-21 22:16:29 +00001573 /* Set the upper 16/48 bits of the result to 1 ("undefined"),
1574 just in case. This almost certainly isn't necessary, but be
1575 paranoid. */
sewardjc1a2cda2005-04-21 17:34:00 +00001576 return (~(UWord)0xFFFF)
1577 |
1578 (UWord)( ((UShort*)(sm->vbyte))[ v_off >> 1 ] );
1579 } else {
1580 /* Slow but general case. */
1581 PROF_EVENT(242, "helperc_LOADV2-slow2");
1582 return (UWord)mc_LOADVn_slow( aA, 2, False/*littleendian*/ );
1583 }
1584
1585# endif
njn25e49d8e72002-09-23 09:36:25 +00001586}
1587
njn9fb73db2005-03-27 01:55:21 +00001588VGA_REGPARM(2)
sewardj5d28efc2005-04-21 22:16:29 +00001589void MC_(helperc_STOREV2) ( Addr aA, UWord vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001590{
sewardjc1a2cda2005-04-21 17:34:00 +00001591 PROF_EVENT(250, "helperc_STOREV2");
sewardj5d28efc2005-04-21 22:16:29 +00001592
1593# if VG_DEBUG_MEMORY >= 2
sewardj23eb2fd2005-04-22 16:29:19 +00001594 mc_STOREVn_slow( aA, 2, (ULong)vbytes, False/*littleendian*/ );
sewardj5d28efc2005-04-21 22:16:29 +00001595# else
1596
sewardj23eb2fd2005-04-22 16:29:19 +00001597 const UWord mask = ~((0x10000-2) | ((N_PRIMARY_MAP-1) << 16));
sewardj5d28efc2005-04-21 22:16:29 +00001598 UWord a = (UWord)aA;
1599
1600 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1601 naturally aligned, or 'a' exceeds the range covered by the
1602 primary map. Either way we defer to the slow-path case. */
1603 if (EXPECTED_NOT_TAKEN(a & mask)) {
1604 PROF_EVENT(251, "helperc_STOREV2-slow1");
1605 mc_STOREVn_slow( aA, 2, (ULong)vbytes, False/*littleendian*/ );
1606 return;
1607 }
1608
1609 UWord sec_no = (UWord)(a >> 16);
1610
1611# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001612 tl_assert(sec_no < N_PRIMARY_MAP);
sewardj5d28efc2005-04-21 22:16:29 +00001613# endif
1614
1615 SecMap* sm = primary_map[sec_no];
1616 UWord v_off = a & 0xFFFF;
1617 UWord a_off = v_off >> 3;
1618 UWord abits = (UWord)(sm->abits[a_off]);
1619 if (EXPECTED_TAKEN(!is_distinguished_sm(sm)
1620 && abits == VGM_BYTE_VALID)) {
1621 /* Handle common case quickly. */
1622 ((UShort*)(sm->vbyte))[ v_off >> 1 ] = (UShort)vbytes;
1623 } else {
1624 /* Slow but general case. */
1625 PROF_EVENT(252, "helperc_STOREV2-slow2");
1626 mc_STOREVn_slow( aA, 2, (ULong)vbytes, False/*littleendian*/ );
1627 }
1628# endif
njn25e49d8e72002-09-23 09:36:25 +00001629}
1630
sewardj95448072004-11-22 20:19:51 +00001631/* ------------------------ Size = 1 ------------------------ */
1632
njn9fb73db2005-03-27 01:55:21 +00001633VGA_REGPARM(1)
sewardjc1a2cda2005-04-21 17:34:00 +00001634UWord MC_(helperc_LOADV1) ( Addr aA )
njn25e49d8e72002-09-23 09:36:25 +00001635{
sewardjc1a2cda2005-04-21 17:34:00 +00001636 PROF_EVENT(260, "helperc_LOADV1");
1637
1638# if VG_DEBUG_MEMORY >= 2
sewardj23eb2fd2005-04-22 16:29:19 +00001639 return (UWord)mc_LOADVn_slow( aA, 1, False/*littleendian*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001640# else
1641
sewardj23eb2fd2005-04-22 16:29:19 +00001642 const UWord mask = ~((0x10000-1) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001643 UWord a = (UWord)aA;
1644
1645 /* If any part of 'a' indicated by the mask is 1, it means 'a'
1646 exceeds the range covered by the primary map. In which case we
1647 defer to the slow-path case. */
1648 if (EXPECTED_NOT_TAKEN(a & mask)) {
1649 PROF_EVENT(261, "helperc_LOADV1-slow1");
1650 return (UWord)mc_LOADVn_slow( aA, 1, False/*littleendian*/ );
1651 }
1652
1653 UWord sec_no = (UWord)(a >> 16);
1654
1655# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001656 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001657# endif
1658
1659 SecMap* sm = primary_map[sec_no];
1660 UWord v_off = a & 0xFFFF;
1661 UWord a_off = v_off >> 3;
sewardj5d28efc2005-04-21 22:16:29 +00001662 UWord abits = (UWord)(sm->abits[a_off]);
sewardjc1a2cda2005-04-21 17:34:00 +00001663 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) {
1664 /* Handle common case quickly: a is mapped, and the entire
1665 word32 it lives in is addressible. */
sewardj5d28efc2005-04-21 22:16:29 +00001666 /* Set the upper 24/56 bits of the result to 1 ("undefined"),
1667 just in case. This almost certainly isn't necessary, but be
1668 paranoid. */
sewardjc1a2cda2005-04-21 17:34:00 +00001669 return (~(UWord)0xFF)
1670 |
1671 (UWord)( ((UChar*)(sm->vbyte))[ v_off ] );
1672 } else {
1673 /* Slow but general case. */
1674 PROF_EVENT(262, "helperc_LOADV1-slow2");
1675 return (UWord)mc_LOADVn_slow( aA, 1, False/*littleendian*/ );
1676 }
1677# endif
njn25e49d8e72002-09-23 09:36:25 +00001678}
1679
sewardjc1a2cda2005-04-21 17:34:00 +00001680
njn9fb73db2005-03-27 01:55:21 +00001681VGA_REGPARM(2)
sewardjc1a2cda2005-04-21 17:34:00 +00001682void MC_(helperc_STOREV1) ( Addr aA, UWord vbyte )
njn25e49d8e72002-09-23 09:36:25 +00001683{
sewardjc1a2cda2005-04-21 17:34:00 +00001684 PROF_EVENT(270, "helperc_STOREV1");
1685
1686# if VG_DEBUG_MEMORY >= 2
1687 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*littleendian*/ );
1688# else
1689
sewardj23eb2fd2005-04-22 16:29:19 +00001690 const UWord mask = ~((0x10000-1) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001691 UWord a = (UWord)aA;
1692 /* If any part of 'a' indicated by the mask is 1, it means 'a'
1693 exceeds the range covered by the primary map. In which case we
1694 defer to the slow-path case. */
1695 if (EXPECTED_NOT_TAKEN(a & mask)) {
1696 PROF_EVENT(271, "helperc_STOREV1-slow1");
1697 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*littleendian*/ );
1698 return;
1699 }
1700
1701 UWord sec_no = (UWord)(a >> 16);
1702
1703# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001704 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001705# endif
1706
1707 SecMap* sm = primary_map[sec_no];
1708 UWord v_off = a & 0xFFFF;
1709 UWord a_off = v_off >> 3;
sewardj5d28efc2005-04-21 22:16:29 +00001710 UWord abits = (UWord)(sm->abits[a_off]);
sewardjc1a2cda2005-04-21 17:34:00 +00001711 if (EXPECTED_TAKEN(!is_distinguished_sm(sm)
1712 && abits == VGM_BYTE_VALID)) {
1713 /* Handle common case quickly: a is mapped, the entire word32 it
1714 lives in is addressible. */
1715 ((UChar*)(sm->vbyte))[ v_off ] = (UChar)vbyte;
1716 } else {
1717 PROF_EVENT(272, "helperc_STOREV1-slow2");
1718 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*littleendian*/ );
1719 }
1720
1721# endif
njn25e49d8e72002-09-23 09:36:25 +00001722}
1723
1724
sewardjc859fbf2005-04-22 21:10:28 +00001725/*------------------------------------------------------------*/
1726/*--- Functions called directly from generated code: ---*/
1727/*--- Value-check failure handlers. ---*/
1728/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001729
njn5c004e42002-11-18 11:04:50 +00001730void MC_(helperc_value_check0_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001731{
njn9e63cb62005-05-08 18:34:59 +00001732 mc_record_value_error ( VG_(get_running_tid)(), 0 );
njn25e49d8e72002-09-23 09:36:25 +00001733}
1734
njn5c004e42002-11-18 11:04:50 +00001735void MC_(helperc_value_check1_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001736{
njn9e63cb62005-05-08 18:34:59 +00001737 mc_record_value_error ( VG_(get_running_tid)(), 1 );
njn25e49d8e72002-09-23 09:36:25 +00001738}
1739
njn5c004e42002-11-18 11:04:50 +00001740void MC_(helperc_value_check4_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001741{
njn9e63cb62005-05-08 18:34:59 +00001742 mc_record_value_error ( VG_(get_running_tid)(), 4 );
njn25e49d8e72002-09-23 09:36:25 +00001743}
1744
sewardj11bcc4e2005-04-23 22:38:38 +00001745void MC_(helperc_value_check8_fail) ( void )
1746{
njn9e63cb62005-05-08 18:34:59 +00001747 mc_record_value_error ( VG_(get_running_tid)(), 8 );
sewardj11bcc4e2005-04-23 22:38:38 +00001748}
1749
njn9fb73db2005-03-27 01:55:21 +00001750VGA_REGPARM(1) void MC_(helperc_complain_undef) ( HWord sz )
sewardj95448072004-11-22 20:19:51 +00001751{
njn9e63cb62005-05-08 18:34:59 +00001752 mc_record_value_error ( VG_(get_running_tid)(), (Int)sz );
sewardj95448072004-11-22 20:19:51 +00001753}
1754
njn25e49d8e72002-09-23 09:36:25 +00001755
sewardj45d94cc2005-04-20 14:44:11 +00001756//zz /*------------------------------------------------------------*/
1757//zz /*--- Metadata get/set functions, for client requests. ---*/
1758//zz /*------------------------------------------------------------*/
1759//zz
1760//zz /* Copy Vbits for src into vbits. Returns: 1 == OK, 2 == alignment
1761//zz error, 3 == addressing error. */
1762//zz static Int mc_get_or_set_vbits_for_client (
1763//zz ThreadId tid,
1764//zz Addr dataV,
1765//zz Addr vbitsV,
1766//zz SizeT size,
1767//zz Bool setting /* True <=> set vbits, False <=> get vbits */
1768//zz )
1769//zz {
1770//zz Bool addressibleD = True;
1771//zz Bool addressibleV = True;
1772//zz UInt* data = (UInt*)dataV;
1773//zz UInt* vbits = (UInt*)vbitsV;
1774//zz SizeT szW = size / 4; /* sigh */
1775//zz SizeT i;
1776//zz UInt* dataP = NULL; /* bogus init to keep gcc happy */
1777//zz UInt* vbitsP = NULL; /* ditto */
1778//zz
1779//zz /* Check alignment of args. */
1780//zz if (!(VG_IS_4_ALIGNED(data) && VG_IS_4_ALIGNED(vbits)))
1781//zz return 2;
1782//zz if ((size & 3) != 0)
1783//zz return 2;
1784//zz
1785//zz /* Check that arrays are addressible. */
1786//zz for (i = 0; i < szW; i++) {
1787//zz dataP = &data[i];
1788//zz vbitsP = &vbits[i];
1789//zz if (get_abits4_ALIGNED((Addr)dataP) != VGM_NIBBLE_VALID) {
1790//zz addressibleD = False;
1791//zz break;
1792//zz }
1793//zz if (get_abits4_ALIGNED((Addr)vbitsP) != VGM_NIBBLE_VALID) {
1794//zz addressibleV = False;
1795//zz break;
1796//zz }
1797//zz }
1798//zz if (!addressibleD) {
1799//zz MAC_(record_address_error)( tid, (Addr)dataP, 4,
1800//zz setting ? True : False );
1801//zz return 3;
1802//zz }
1803//zz if (!addressibleV) {
1804//zz MAC_(record_address_error)( tid, (Addr)vbitsP, 4,
1805//zz setting ? False : True );
1806//zz return 3;
1807//zz }
1808//zz
1809//zz /* Do the copy */
1810//zz if (setting) {
1811//zz /* setting */
1812//zz for (i = 0; i < szW; i++) {
1813//zz if (get_vbytes4_ALIGNED( (Addr)&vbits[i] ) != VGM_WORD_VALID)
njn9e63cb62005-05-08 18:34:59 +00001814//zz mc_record_value_error(tid, 4);
sewardj45d94cc2005-04-20 14:44:11 +00001815//zz set_vbytes4_ALIGNED( (Addr)&data[i], vbits[i] );
1816//zz }
1817//zz } else {
1818//zz /* getting */
1819//zz for (i = 0; i < szW; i++) {
1820//zz vbits[i] = get_vbytes4_ALIGNED( (Addr)&data[i] );
1821//zz set_vbytes4_ALIGNED( (Addr)&vbits[i], VGM_WORD_VALID );
1822//zz }
1823//zz }
1824//zz
1825//zz return 1;
1826//zz }
sewardj05fe85e2005-04-27 22:46:36 +00001827
1828
1829/*------------------------------------------------------------*/
1830/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1831/*------------------------------------------------------------*/
1832
1833/* For the memory leak detector, say whether an entire 64k chunk of
1834 address space is possibly in use, or not. If in doubt return
1835 True.
1836*/
1837static
1838Bool mc_is_within_valid_secondary ( Addr a )
1839{
1840 SecMap* sm = maybe_get_secmap_for ( a );
1841 if (sm == NULL || sm == &sm_distinguished[SM_DIST_NOACCESS]) {
1842 /* Definitely not in use. */
1843 return False;
1844 } else {
1845 return True;
1846 }
1847}
1848
1849
1850/* For the memory leak detector, say whether or not a given word
1851 address is to be regarded as valid. */
1852static
1853Bool mc_is_valid_aligned_word ( Addr a )
1854{
1855 tl_assert(sizeof(UWord) == 4 || sizeof(UWord) == 8);
1856 if (sizeof(UWord) == 4) {
1857 tl_assert(VG_IS_4_ALIGNED(a));
1858 } else {
1859 tl_assert(VG_IS_8_ALIGNED(a));
1860 }
1861 if (mc_check_readable( a, sizeof(UWord), NULL ) == MC_Ok) {
1862 return True;
1863 } else {
1864 return False;
1865 }
1866}
sewardja4495682002-10-21 07:29:59 +00001867
1868
nethercote996901a2004-08-03 13:29:09 +00001869/* Leak detector for this tool. We don't actually do anything, merely
sewardja4495682002-10-21 07:29:59 +00001870 run the generic leak detector with suitable parameters for this
nethercote996901a2004-08-03 13:29:09 +00001871 tool. */
njnb8dca862005-03-14 02:42:44 +00001872static void mc_detect_memory_leaks ( ThreadId tid, LeakCheckMode mode )
njn25e49d8e72002-09-23 09:36:25 +00001873{
sewardj05fe85e2005-04-27 22:46:36 +00001874 MAC_(do_detect_memory_leaks) (
1875 tid,
1876 mode,
1877 mc_is_within_valid_secondary,
1878 mc_is_valid_aligned_word
1879 );
njn25e49d8e72002-09-23 09:36:25 +00001880}
1881
1882
sewardjc859fbf2005-04-22 21:10:28 +00001883/*------------------------------------------------------------*/
1884/*--- Initialisation ---*/
1885/*------------------------------------------------------------*/
1886
1887static void init_shadow_memory ( void )
1888{
1889 Int i;
1890 SecMap* sm;
1891
1892 /* Build the 3 distinguished secondaries */
1893 tl_assert(VGM_BIT_INVALID == 1);
1894 tl_assert(VGM_BIT_VALID == 0);
1895 tl_assert(VGM_BYTE_INVALID == 0xFF);
1896 tl_assert(VGM_BYTE_VALID == 0);
1897
1898 /* Set A invalid, V invalid. */
1899 sm = &sm_distinguished[SM_DIST_NOACCESS];
1900 for (i = 0; i < 65536; i++)
1901 sm->vbyte[i] = VGM_BYTE_INVALID;
1902 for (i = 0; i < 8192; i++)
1903 sm->abits[i] = VGM_BYTE_INVALID;
1904
1905 /* Set A valid, V invalid. */
1906 sm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
1907 for (i = 0; i < 65536; i++)
1908 sm->vbyte[i] = VGM_BYTE_INVALID;
1909 for (i = 0; i < 8192; i++)
1910 sm->abits[i] = VGM_BYTE_VALID;
1911
1912 /* Set A valid, V valid. */
1913 sm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
1914 for (i = 0; i < 65536; i++)
1915 sm->vbyte[i] = VGM_BYTE_VALID;
1916 for (i = 0; i < 8192; i++)
1917 sm->abits[i] = VGM_BYTE_VALID;
1918
1919 /* Set up the primary map. */
1920 /* These entries gradually get overwritten as the used address
1921 space expands. */
1922 for (i = 0; i < N_PRIMARY_MAP; i++)
1923 primary_map[i] = &sm_distinguished[SM_DIST_NOACCESS];
1924
1925 /* auxmap_size = auxmap_used = 0;
1926 no ... these are statically initialised */
1927}
1928
1929
1930/*------------------------------------------------------------*/
1931/*--- Sanity check machinery (permanently engaged) ---*/
1932/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001933
njn51d827b2005-05-09 01:02:08 +00001934static Bool mc_cheap_sanity_check ( void )
njn25e49d8e72002-09-23 09:36:25 +00001935{
jseward9800fd32004-01-04 23:08:04 +00001936 /* nothing useful we can rapidly check */
sewardj23eb2fd2005-04-22 16:29:19 +00001937 n_sanity_cheap++;
sewardjc1a2cda2005-04-21 17:34:00 +00001938 PROF_EVENT(490, "cheap_sanity_check");
jseward9800fd32004-01-04 23:08:04 +00001939 return True;
njn25e49d8e72002-09-23 09:36:25 +00001940}
1941
njn51d827b2005-05-09 01:02:08 +00001942static Bool mc_expensive_sanity_check ( void )
njn25e49d8e72002-09-23 09:36:25 +00001943{
sewardj23eb2fd2005-04-22 16:29:19 +00001944 Int i, n_secmaps_found;
sewardj45d94cc2005-04-20 14:44:11 +00001945 SecMap* sm;
sewardj23eb2fd2005-04-22 16:29:19 +00001946 Bool bad = False;
njn25e49d8e72002-09-23 09:36:25 +00001947
sewardj23eb2fd2005-04-22 16:29:19 +00001948 n_sanity_expensive++;
sewardjc1a2cda2005-04-21 17:34:00 +00001949 PROF_EVENT(491, "expensive_sanity_check");
1950
sewardj23eb2fd2005-04-22 16:29:19 +00001951 /* Check that the 3 distinguished SMs are still as they should
1952 be. */
njn25e49d8e72002-09-23 09:36:25 +00001953
sewardj45d94cc2005-04-20 14:44:11 +00001954 /* Check A invalid, V invalid. */
1955 sm = &sm_distinguished[SM_DIST_NOACCESS];
njn25e49d8e72002-09-23 09:36:25 +00001956 for (i = 0; i < 65536; i++)
sewardj45d94cc2005-04-20 14:44:11 +00001957 if (!(sm->vbyte[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00001958 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00001959 for (i = 0; i < 8192; i++)
1960 if (!(sm->abits[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00001961 bad = True;
njn25e49d8e72002-09-23 09:36:25 +00001962
sewardj45d94cc2005-04-20 14:44:11 +00001963 /* Check A valid, V invalid. */
1964 sm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
1965 for (i = 0; i < 65536; i++)
1966 if (!(sm->vbyte[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00001967 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00001968 for (i = 0; i < 8192; i++)
1969 if (!(sm->abits[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00001970 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00001971
1972 /* Check A valid, V valid. */
1973 sm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
1974 for (i = 0; i < 65536; i++)
1975 if (!(sm->vbyte[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00001976 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00001977 for (i = 0; i < 8192; i++)
1978 if (!(sm->abits[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00001979 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00001980
sewardj23eb2fd2005-04-22 16:29:19 +00001981 if (bad) {
1982 VG_(printf)("memcheck expensive sanity: "
1983 "distinguished_secondaries have changed\n");
1984 return False;
1985 }
1986
1987 /* check nonsensical auxmap sizing */
sewardj45d94cc2005-04-20 14:44:11 +00001988 if (auxmap_used > auxmap_size)
sewardj23eb2fd2005-04-22 16:29:19 +00001989 bad = True;
1990
1991 if (bad) {
1992 VG_(printf)("memcheck expensive sanity: "
1993 "nonsensical auxmap sizing\n");
1994 return False;
1995 }
1996
1997 /* check that the number of secmaps issued matches the number that
1998 are reachable (iow, no secmap leaks) */
1999 n_secmaps_found = 0;
2000 for (i = 0; i < N_PRIMARY_MAP; i++) {
2001 if (primary_map[i] == NULL) {
2002 bad = True;
2003 } else {
2004 if (!is_distinguished_sm(primary_map[i]))
2005 n_secmaps_found++;
2006 }
2007 }
2008
2009 for (i = 0; i < auxmap_used; i++) {
2010 if (auxmap[i].sm == NULL) {
2011 bad = True;
2012 } else {
2013 if (!is_distinguished_sm(auxmap[i].sm))
2014 n_secmaps_found++;
2015 }
2016 }
2017
2018 if (n_secmaps_found != n_secmaps_issued)
2019 bad = True;
2020
2021 if (bad) {
2022 VG_(printf)("memcheck expensive sanity: "
2023 "apparent secmap leakage\n");
2024 return False;
2025 }
2026
2027 /* check that auxmap only covers address space that the primary
2028 doesn't */
2029
2030 for (i = 0; i < auxmap_used; i++)
2031 if (auxmap[i].base <= MAX_PRIMARY_ADDRESS)
2032 bad = True;
2033
2034 if (bad) {
2035 VG_(printf)("memcheck expensive sanity: "
2036 "auxmap covers wrong address space\n");
2037 return False;
2038 }
2039
2040 /* there is only one pointer to each secmap (expensive) */
njn25e49d8e72002-09-23 09:36:25 +00002041
2042 return True;
2043}
sewardj45d94cc2005-04-20 14:44:11 +00002044
njn25e49d8e72002-09-23 09:36:25 +00002045
njn25e49d8e72002-09-23 09:36:25 +00002046/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00002047/*--- Command line args ---*/
njn25e49d8e72002-09-23 09:36:25 +00002048/*------------------------------------------------------------*/
2049
njn51d827b2005-05-09 01:02:08 +00002050Bool MC_(clo_avoid_strlen_errors) = True;
njn43c799e2003-04-08 00:08:52 +00002051
njn51d827b2005-05-09 01:02:08 +00002052static Bool mc_process_cmd_line_option(Char* arg)
njn25e49d8e72002-09-23 09:36:25 +00002053{
njn45270a22005-03-27 01:00:11 +00002054 VG_BOOL_CLO(arg, "--avoid-strlen-errors", MC_(clo_avoid_strlen_errors))
njn25e49d8e72002-09-23 09:36:25 +00002055 else
njn43c799e2003-04-08 00:08:52 +00002056 return MAC_(process_common_cmd_line_option)(arg);
njn25e49d8e72002-09-23 09:36:25 +00002057
2058 return True;
njn25e49d8e72002-09-23 09:36:25 +00002059}
2060
njn51d827b2005-05-09 01:02:08 +00002061static void mc_print_usage(void)
njn25e49d8e72002-09-23 09:36:25 +00002062{
njn3e884182003-04-15 13:03:23 +00002063 MAC_(print_common_usage)();
2064 VG_(printf)(
2065" --avoid-strlen-errors=no|yes suppress errs from inlined strlen [yes]\n"
2066 );
2067}
2068
njn51d827b2005-05-09 01:02:08 +00002069static void mc_print_debug_usage(void)
njn3e884182003-04-15 13:03:23 +00002070{
2071 MAC_(print_common_debug_usage)();
2072 VG_(printf)(
sewardj8ec2cfc2002-10-13 00:57:26 +00002073" --cleanup=no|yes improve after instrumentation? [yes]\n"
njn3e884182003-04-15 13:03:23 +00002074 );
njn25e49d8e72002-09-23 09:36:25 +00002075}
2076
nethercote8b76fe52004-11-08 19:20:09 +00002077/*------------------------------------------------------------*/
2078/*--- Client requests ---*/
2079/*------------------------------------------------------------*/
2080
2081/* Client block management:
2082
2083 This is managed as an expanding array of client block descriptors.
2084 Indices of live descriptors are issued to the client, so it can ask
2085 to free them later. Therefore we cannot slide live entries down
2086 over dead ones. Instead we must use free/inuse flags and scan for
2087 an empty slot at allocation time. This in turn means allocation is
2088 relatively expensive, so we hope this does not happen too often.
nethercote8b76fe52004-11-08 19:20:09 +00002089
sewardjedc75ab2005-03-15 23:30:32 +00002090 An unused block has start == size == 0
2091*/
nethercote8b76fe52004-11-08 19:20:09 +00002092
2093typedef
2094 struct {
2095 Addr start;
2096 SizeT size;
2097 ExeContext* where;
sewardjedc75ab2005-03-15 23:30:32 +00002098 Char* desc;
nethercote8b76fe52004-11-08 19:20:09 +00002099 }
2100 CGenBlock;
2101
2102/* This subsystem is self-initialising. */
njn695c16e2005-03-27 03:40:28 +00002103static UInt cgb_size = 0;
2104static UInt cgb_used = 0;
2105static CGenBlock* cgbs = NULL;
nethercote8b76fe52004-11-08 19:20:09 +00002106
2107/* Stats for this subsystem. */
njn695c16e2005-03-27 03:40:28 +00002108static UInt cgb_used_MAX = 0; /* Max in use. */
2109static UInt cgb_allocs = 0; /* Number of allocs. */
2110static UInt cgb_discards = 0; /* Number of discards. */
2111static UInt cgb_search = 0; /* Number of searches. */
nethercote8b76fe52004-11-08 19:20:09 +00002112
2113
2114static
njn695c16e2005-03-27 03:40:28 +00002115Int alloc_client_block ( void )
nethercote8b76fe52004-11-08 19:20:09 +00002116{
2117 UInt i, sz_new;
2118 CGenBlock* cgbs_new;
2119
njn695c16e2005-03-27 03:40:28 +00002120 cgb_allocs++;
nethercote8b76fe52004-11-08 19:20:09 +00002121
njn695c16e2005-03-27 03:40:28 +00002122 for (i = 0; i < cgb_used; i++) {
2123 cgb_search++;
2124 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00002125 return i;
2126 }
2127
2128 /* Not found. Try to allocate one at the end. */
njn695c16e2005-03-27 03:40:28 +00002129 if (cgb_used < cgb_size) {
2130 cgb_used++;
2131 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00002132 }
2133
2134 /* Ok, we have to allocate a new one. */
njn695c16e2005-03-27 03:40:28 +00002135 tl_assert(cgb_used == cgb_size);
2136 sz_new = (cgbs == NULL) ? 10 : (2 * cgb_size);
nethercote8b76fe52004-11-08 19:20:09 +00002137
2138 cgbs_new = VG_(malloc)( sz_new * sizeof(CGenBlock) );
njn695c16e2005-03-27 03:40:28 +00002139 for (i = 0; i < cgb_used; i++)
2140 cgbs_new[i] = cgbs[i];
nethercote8b76fe52004-11-08 19:20:09 +00002141
njn695c16e2005-03-27 03:40:28 +00002142 if (cgbs != NULL)
2143 VG_(free)( cgbs );
2144 cgbs = cgbs_new;
nethercote8b76fe52004-11-08 19:20:09 +00002145
njn695c16e2005-03-27 03:40:28 +00002146 cgb_size = sz_new;
2147 cgb_used++;
2148 if (cgb_used > cgb_used_MAX)
2149 cgb_used_MAX = cgb_used;
2150 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00002151}
2152
2153
2154static void show_client_block_stats ( void )
2155{
2156 VG_(message)(Vg_DebugMsg,
2157 "general CBs: %d allocs, %d discards, %d maxinuse, %d search",
njn695c16e2005-03-27 03:40:28 +00002158 cgb_allocs, cgb_discards, cgb_used_MAX, cgb_search
nethercote8b76fe52004-11-08 19:20:09 +00002159 );
2160}
2161
2162static Bool find_addr(VgHashNode* sh_ch, void* ap)
2163{
2164 MAC_Chunk *m = (MAC_Chunk*)sh_ch;
2165 Addr a = *(Addr*)ap;
2166
njn717cde52005-05-10 02:47:21 +00002167 return VG_(addr_is_in_block)(a, m->data, m->size, MAC_MALLOC_REDZONE_SZB);
nethercote8b76fe52004-11-08 19:20:09 +00002168}
2169
2170static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai )
2171{
2172 UInt i;
2173 /* VG_(printf)("try to identify %d\n", a); */
2174
2175 /* Perhaps it's a general block ? */
njn695c16e2005-03-27 03:40:28 +00002176 for (i = 0; i < cgb_used; i++) {
2177 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00002178 continue;
njn717cde52005-05-10 02:47:21 +00002179 // Use zero as the redzone for client blocks.
2180 if (VG_(addr_is_in_block)(a, cgbs[i].start, cgbs[i].size, 0)) {
nethercote8b76fe52004-11-08 19:20:09 +00002181 MAC_Mempool **d, *mp;
2182
2183 /* OK - maybe it's a mempool, too? */
2184 mp = (MAC_Mempool*)VG_(HT_get_node)(MAC_(mempool_list),
njn695c16e2005-03-27 03:40:28 +00002185 (UWord)cgbs[i].start,
nethercote8b76fe52004-11-08 19:20:09 +00002186 (void*)&d);
2187 if(mp != NULL) {
2188 if(mp->chunks != NULL) {
2189 MAC_Chunk *mc;
2190
2191 mc = (MAC_Chunk*)VG_(HT_first_match)(mp->chunks, find_addr, &a);
2192 if(mc != NULL) {
2193 ai->akind = UserG;
2194 ai->blksize = mc->size;
2195 ai->rwoffset = (Int)(a) - (Int)mc->data;
2196 ai->lastchange = mc->where;
2197 return True;
2198 }
2199 }
2200 ai->akind = Mempool;
njn695c16e2005-03-27 03:40:28 +00002201 ai->blksize = cgbs[i].size;
2202 ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
2203 ai->lastchange = cgbs[i].where;
nethercote8b76fe52004-11-08 19:20:09 +00002204 return True;
2205 }
2206 ai->akind = UserG;
njn695c16e2005-03-27 03:40:28 +00002207 ai->blksize = cgbs[i].size;
2208 ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
2209 ai->lastchange = cgbs[i].where;
2210 ai->desc = cgbs[i].desc;
nethercote8b76fe52004-11-08 19:20:09 +00002211 return True;
2212 }
2213 }
2214 return False;
2215}
2216
njn51d827b2005-05-09 01:02:08 +00002217static Bool mc_handle_client_request ( ThreadId tid, UWord* arg, UWord* ret )
nethercote8b76fe52004-11-08 19:20:09 +00002218{
2219 Int i;
2220 Bool ok;
2221 Addr bad_addr;
2222
njnfc26ff92004-11-22 19:12:49 +00002223 if (!VG_IS_TOOL_USERREQ('M','C',arg[0])
nethercote8b76fe52004-11-08 19:20:09 +00002224 && VG_USERREQ__MALLOCLIKE_BLOCK != arg[0]
2225 && VG_USERREQ__FREELIKE_BLOCK != arg[0]
2226 && VG_USERREQ__CREATE_MEMPOOL != arg[0]
2227 && VG_USERREQ__DESTROY_MEMPOOL != arg[0]
2228 && VG_USERREQ__MEMPOOL_ALLOC != arg[0]
2229 && VG_USERREQ__MEMPOOL_FREE != arg[0])
2230 return False;
2231
2232 switch (arg[0]) {
2233 case VG_USERREQ__CHECK_WRITABLE: /* check writable */
2234 ok = mc_check_writable ( arg[1], arg[2], &bad_addr );
2235 if (!ok)
njn9e63cb62005-05-08 18:34:59 +00002236 mc_record_user_error ( tid, bad_addr, /*isWrite*/True,
2237 /*isUnaddr*/True );
nethercote8b76fe52004-11-08 19:20:09 +00002238 *ret = ok ? (UWord)NULL : bad_addr;
2239 break;
2240
2241 case VG_USERREQ__CHECK_READABLE: { /* check readable */
2242 MC_ReadResult res;
2243 res = mc_check_readable ( arg[1], arg[2], &bad_addr );
2244 if (MC_AddrErr == res)
njn9e63cb62005-05-08 18:34:59 +00002245 mc_record_user_error ( tid, bad_addr, /*isWrite*/False,
2246 /*isUnaddr*/True );
nethercote8b76fe52004-11-08 19:20:09 +00002247 else if (MC_ValueErr == res)
njn9e63cb62005-05-08 18:34:59 +00002248 mc_record_user_error ( tid, bad_addr, /*isWrite*/False,
2249 /*isUnaddr*/False );
nethercote8b76fe52004-11-08 19:20:09 +00002250 *ret = ( res==MC_Ok ? (UWord)NULL : bad_addr );
2251 break;
2252 }
2253
2254 case VG_USERREQ__DO_LEAK_CHECK:
njnb8dca862005-03-14 02:42:44 +00002255 mc_detect_memory_leaks(tid, arg[1] ? LC_Summary : LC_Full);
nethercote8b76fe52004-11-08 19:20:09 +00002256 *ret = 0; /* return value is meaningless */
2257 break;
2258
2259 case VG_USERREQ__MAKE_NOACCESS: /* make no access */
nethercote8b76fe52004-11-08 19:20:09 +00002260 mc_make_noaccess ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00002261 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00002262 break;
2263
2264 case VG_USERREQ__MAKE_WRITABLE: /* make writable */
nethercote8b76fe52004-11-08 19:20:09 +00002265 mc_make_writable ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00002266 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00002267 break;
2268
2269 case VG_USERREQ__MAKE_READABLE: /* make readable */
nethercote8b76fe52004-11-08 19:20:09 +00002270 mc_make_readable ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00002271 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00002272 break;
2273
sewardjedc75ab2005-03-15 23:30:32 +00002274 case VG_USERREQ__CREATE_BLOCK: /* describe a block */
2275 if (arg[1] != 0 && arg[2] != 0) {
njn695c16e2005-03-27 03:40:28 +00002276 i = alloc_client_block();
2277 /* VG_(printf)("allocated %d %p\n", i, cgbs); */
2278 cgbs[i].start = arg[1];
2279 cgbs[i].size = arg[2];
2280 cgbs[i].desc = VG_(strdup)((Char *)arg[3]);
2281 cgbs[i].where = VG_(record_ExeContext) ( tid );
sewardjedc75ab2005-03-15 23:30:32 +00002282
2283 *ret = i;
2284 } else
2285 *ret = -1;
2286 break;
2287
nethercote8b76fe52004-11-08 19:20:09 +00002288 case VG_USERREQ__DISCARD: /* discard */
njn695c16e2005-03-27 03:40:28 +00002289 if (cgbs == NULL
2290 || arg[2] >= cgb_used ||
2291 (cgbs[arg[2]].start == 0 && cgbs[arg[2]].size == 0)) {
sewardjedc75ab2005-03-15 23:30:32 +00002292 *ret = 1;
2293 } else {
njn695c16e2005-03-27 03:40:28 +00002294 tl_assert(arg[2] >= 0 && arg[2] < cgb_used);
2295 cgbs[arg[2]].start = cgbs[arg[2]].size = 0;
2296 VG_(free)(cgbs[arg[2]].desc);
2297 cgb_discards++;
sewardjedc75ab2005-03-15 23:30:32 +00002298 *ret = 0;
2299 }
nethercote8b76fe52004-11-08 19:20:09 +00002300 break;
2301
sewardj45d94cc2005-04-20 14:44:11 +00002302//zz case VG_USERREQ__GET_VBITS:
2303//zz /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
2304//zz error. */
2305//zz /* VG_(printf)("get_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
2306//zz *ret = mc_get_or_set_vbits_for_client
2307//zz ( tid, arg[1], arg[2], arg[3], False /* get them */ );
2308//zz break;
2309//zz
2310//zz case VG_USERREQ__SET_VBITS:
2311//zz /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
2312//zz error. */
2313//zz /* VG_(printf)("set_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
2314//zz *ret = mc_get_or_set_vbits_for_client
2315//zz ( tid, arg[1], arg[2], arg[3], True /* set them */ );
2316//zz break;
nethercote8b76fe52004-11-08 19:20:09 +00002317
2318 default:
2319 if (MAC_(handle_common_client_requests)(tid, arg, ret )) {
2320 return True;
2321 } else {
2322 VG_(message)(Vg_UserMsg,
2323 "Warning: unknown memcheck client request code %llx",
2324 (ULong)arg[0]);
2325 return False;
2326 }
2327 }
2328 return True;
2329}
njn25e49d8e72002-09-23 09:36:25 +00002330
2331/*------------------------------------------------------------*/
njn51d827b2005-05-09 01:02:08 +00002332/*--- Setup and finalisation ---*/
njn25e49d8e72002-09-23 09:36:25 +00002333/*------------------------------------------------------------*/
2334
njn51d827b2005-05-09 01:02:08 +00002335static void mc_post_clo_init ( void )
njn5c004e42002-11-18 11:04:50 +00002336{
2337}
2338
njn51d827b2005-05-09 01:02:08 +00002339static void mc_fini ( Int exitcode )
njn5c004e42002-11-18 11:04:50 +00002340{
nethercote8b76fe52004-11-08 19:20:09 +00002341 MAC_(common_fini)( mc_detect_memory_leaks );
sewardj45d94cc2005-04-20 14:44:11 +00002342
sewardj23eb2fd2005-04-22 16:29:19 +00002343 Int i, n_accessible_dist;
2344 SecMap* sm;
2345
sewardj45d94cc2005-04-20 14:44:11 +00002346 if (VG_(clo_verbosity) > 1) {
2347 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00002348 " memcheck: sanity checks: %d cheap, %d expensive",
2349 n_sanity_cheap, n_sanity_expensive );
sewardj45d94cc2005-04-20 14:44:11 +00002350 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00002351 " memcheck: auxmaps: %d auxmap entries (%dk, %dM) in use",
2352 auxmap_used,
2353 auxmap_used * 64,
2354 auxmap_used / 16 );
2355 VG_(message)(Vg_DebugMsg,
2356 " memcheck: auxmaps: %lld searches, %lld comparisons",
sewardj45d94cc2005-04-20 14:44:11 +00002357 n_auxmap_searches, n_auxmap_cmps );
sewardj23eb2fd2005-04-22 16:29:19 +00002358 VG_(message)(Vg_DebugMsg,
2359 " memcheck: secondaries: %d issued (%dk, %dM)",
2360 n_secmaps_issued,
2361 n_secmaps_issued * 64,
2362 n_secmaps_issued / 16 );
2363
2364 n_accessible_dist = 0;
2365 for (i = 0; i < N_PRIMARY_MAP; i++) {
2366 sm = primary_map[i];
2367 if (is_distinguished_sm(sm)
2368 && sm != &sm_distinguished[SM_DIST_NOACCESS])
2369 n_accessible_dist ++;
2370 }
2371 for (i = 0; i < auxmap_used; i++) {
2372 sm = auxmap[i].sm;
2373 if (is_distinguished_sm(sm)
2374 && sm != &sm_distinguished[SM_DIST_NOACCESS])
2375 n_accessible_dist ++;
2376 }
2377
2378 VG_(message)(Vg_DebugMsg,
2379 " memcheck: secondaries: %d accessible and distinguished (%dk, %dM)",
2380 n_accessible_dist,
2381 n_accessible_dist * 64,
2382 n_accessible_dist / 16 );
2383
sewardj45d94cc2005-04-20 14:44:11 +00002384 }
2385
njn5c004e42002-11-18 11:04:50 +00002386 if (0) {
2387 VG_(message)(Vg_DebugMsg,
2388 "------ Valgrind's client block stats follow ---------------" );
nethercote8b76fe52004-11-08 19:20:09 +00002389 show_client_block_stats();
njn5c004e42002-11-18 11:04:50 +00002390 }
njn25e49d8e72002-09-23 09:36:25 +00002391}
2392
njn51d827b2005-05-09 01:02:08 +00002393static void mc_pre_clo_init(void)
2394{
2395 VG_(details_name) ("Memcheck");
2396 VG_(details_version) (NULL);
2397 VG_(details_description) ("a memory error detector");
2398 VG_(details_copyright_author)(
2399 "Copyright (C) 2002-2005, and GNU GPL'd, by Julian Seward et al.");
2400 VG_(details_bug_reports_to) (VG_BUGS_TO);
2401 VG_(details_avg_translation_sizeB) ( 370 );
2402
2403 VG_(basic_tool_funcs) (mc_post_clo_init,
2404 MC_(instrument),
2405 mc_fini);
2406
2407 VG_(needs_core_errors) ();
2408 VG_(needs_tool_errors) (MAC_(eq_Error),
2409 mc_pp_Error,
2410 MAC_(update_extra),
2411 mc_recognised_suppression,
2412 MAC_(read_extra_suppression_info),
2413 MAC_(error_matches_suppression),
2414 MAC_(get_error_name),
2415 MAC_(print_extra_suppression_info));
2416 VG_(needs_libc_freeres) ();
2417 VG_(needs_command_line_options)(mc_process_cmd_line_option,
2418 mc_print_usage,
2419 mc_print_debug_usage);
2420 VG_(needs_client_requests) (mc_handle_client_request);
2421 VG_(needs_sanity_checks) (mc_cheap_sanity_check,
2422 mc_expensive_sanity_check);
2423 VG_(needs_shadow_memory) ();
2424
2425 VG_(malloc_funcs) (MAC_(malloc),
2426 MAC_(__builtin_new),
2427 MAC_(__builtin_vec_new),
2428 MAC_(memalign),
2429 MAC_(calloc),
2430 MAC_(free),
2431 MAC_(__builtin_delete),
2432 MAC_(__builtin_vec_delete),
2433 MAC_(realloc),
2434 MAC_MALLOC_REDZONE_SZB );
2435
2436 MAC_( new_mem_heap) = & mc_new_mem_heap;
2437 MAC_( ban_mem_heap) = & mc_make_noaccess;
2438 MAC_(copy_mem_heap) = & mc_copy_address_range_state;
2439 MAC_( die_mem_heap) = & mc_make_noaccess;
2440 MAC_(check_noaccess) = & mc_check_noaccess;
2441
2442 VG_(track_new_mem_startup) ( & mc_new_mem_startup );
2443 VG_(track_new_mem_stack_signal)( & mc_make_writable );
2444 VG_(track_new_mem_brk) ( & mc_make_writable );
2445 VG_(track_new_mem_mmap) ( & mc_new_mem_mmap );
2446
2447 VG_(track_copy_mem_remap) ( & mc_copy_address_range_state );
2448
2449 VG_(track_die_mem_stack_signal)( & mc_make_noaccess );
2450 VG_(track_die_mem_brk) ( & mc_make_noaccess );
2451 VG_(track_die_mem_munmap) ( & mc_make_noaccess );
2452
2453 VG_(track_new_mem_stack_4) ( & MAC_(new_mem_stack_4) );
2454 VG_(track_new_mem_stack_8) ( & MAC_(new_mem_stack_8) );
2455 VG_(track_new_mem_stack_12) ( & MAC_(new_mem_stack_12) );
2456 VG_(track_new_mem_stack_16) ( & MAC_(new_mem_stack_16) );
2457 VG_(track_new_mem_stack_32) ( & MAC_(new_mem_stack_32) );
2458 VG_(track_new_mem_stack) ( & MAC_(new_mem_stack) );
2459
2460 VG_(track_die_mem_stack_4) ( & MAC_(die_mem_stack_4) );
2461 VG_(track_die_mem_stack_8) ( & MAC_(die_mem_stack_8) );
2462 VG_(track_die_mem_stack_12) ( & MAC_(die_mem_stack_12) );
2463 VG_(track_die_mem_stack_16) ( & MAC_(die_mem_stack_16) );
2464 VG_(track_die_mem_stack_32) ( & MAC_(die_mem_stack_32) );
2465 VG_(track_die_mem_stack) ( & MAC_(die_mem_stack) );
2466
2467 VG_(track_ban_mem_stack) ( & mc_make_noaccess );
2468
2469 VG_(track_pre_mem_read) ( & mc_check_is_readable );
2470 VG_(track_pre_mem_read_asciiz) ( & mc_check_is_readable_asciiz );
2471 VG_(track_pre_mem_write) ( & mc_check_is_writable );
2472 VG_(track_post_mem_write) ( & mc_post_mem_write );
2473
2474 VG_(track_pre_reg_read) ( & mc_pre_reg_read );
2475
2476 VG_(track_post_reg_write) ( & mc_post_reg_write );
2477 VG_(track_post_reg_write_clientcall_return)( & mc_post_reg_write_clientcall );
2478
2479 VG_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
2480 VG_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
2481 VG_(register_profile_event) ( VgpESPAdj, "adjust-ESP" );
2482
2483 /* Additional block description for VG_(describe_addr)() */
2484 MAC_(describe_addr_supp) = client_perm_maybe_describe;
2485
2486 init_shadow_memory();
2487 MAC_(common_pre_clo_init)();
2488
2489 tl_assert( mc_expensive_sanity_check() );
2490}
2491
2492VG_DETERMINE_INTERFACE_VERSION(mc_pre_clo_init, 9./8)
fitzhardinge98abfc72003-12-16 02:05:15 +00002493
njn25e49d8e72002-09-23 09:36:25 +00002494/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00002495/*--- end mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00002496/*--------------------------------------------------------------------*/