blob: 8299d83fe0e9559e4fe3ea1b5acd731ecceb5672 [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
nethercote137bc552003-11-14 17:47:54 +00009 This file is part of MemCheck, a heavyweight Valgrind tool for
njnc9539842002-10-02 13:26:35 +000010 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
njn53612422005-03-12 16:22:54 +000012 Copyright (C) 2000-2005 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
sewardjc859fbf2005-04-22 21:10:28 +000033/* TODO 22 Apr 05
sewardj45d94cc2005-04-20 14:44:11 +000034
sewardjc859fbf2005-04-22 21:10:28 +000035 test whether it would be faster, for LOADV4, to check
36 only for 8-byte validity on the fast path
sewardj45d94cc2005-04-20 14:44:11 +000037*/
38
njn25cac76cb2002-09-23 11:21:57 +000039#include "mc_include.h"
40#include "memcheck.h" /* for client requests */
njn25e49d8e72002-09-23 09:36:25 +000041
sewardj45d94cc2005-04-20 14:44:11 +000042
sewardjc1a2cda2005-04-21 17:34:00 +000043#define EXPECTED_TAKEN(cond) __builtin_expect((cond),1)
44#define EXPECTED_NOT_TAKEN(cond) __builtin_expect((cond),0)
45
46/* Define to debug the mem audit system. Set to:
47 0 no debugging, fast cases are used
48 1 some sanity checking, fast cases are used
49 2 max sanity checking, only slow cases are used
50*/
sewardj23eb2fd2005-04-22 16:29:19 +000051#define VG_DEBUG_MEMORY 0
sewardjc1a2cda2005-04-21 17:34:00 +000052
njn25e49d8e72002-09-23 09:36:25 +000053#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
54
njn25e49d8e72002-09-23 09:36:25 +000055
njn25e49d8e72002-09-23 09:36:25 +000056/*------------------------------------------------------------*/
sewardj45d94cc2005-04-20 14:44:11 +000057/*--- Basic A/V bitmap representation. ---*/
njn25e49d8e72002-09-23 09:36:25 +000058/*------------------------------------------------------------*/
59
sewardjc859fbf2005-04-22 21:10:28 +000060/* TODO: fix this comment */
61//zz /* All reads and writes are checked against a memory map, which
62//zz records the state of all memory in the process. The memory map is
63//zz organised like this:
64//zz
65//zz The top 16 bits of an address are used to index into a top-level
66//zz map table, containing 65536 entries. Each entry is a pointer to a
67//zz second-level map, which records the accesibililty and validity
68//zz permissions for the 65536 bytes indexed by the lower 16 bits of the
69//zz address. Each byte is represented by nine bits, one indicating
70//zz accessibility, the other eight validity. So each second-level map
71//zz contains 73728 bytes. This two-level arrangement conveniently
72//zz divides the 4G address space into 64k lumps, each size 64k bytes.
73//zz
74//zz All entries in the primary (top-level) map must point to a valid
75//zz secondary (second-level) map. Since most of the 4G of address
76//zz space will not be in use -- ie, not mapped at all -- there is a
77//zz distinguished secondary map, which indicates `not addressible and
78//zz not valid' writeable for all bytes. Entries in the primary map for
79//zz which the entire 64k is not in use at all point at this
80//zz distinguished map.
81//zz
82//zz There are actually 4 distinguished secondaries. These are used to
83//zz represent a memory range which is either not addressable (validity
84//zz doesn't matter), addressable+not valid, addressable+valid.
85//zz
86//zz [...] lots of stuff deleted due to out of date-ness
87//zz
88//zz As a final optimisation, the alignment and address checks for
89//zz 4-byte loads and stores are combined in a neat way. The primary
90//zz map is extended to have 262144 entries (2^18), rather than 2^16.
91//zz The top 3/4 of these entries are permanently set to the
92//zz distinguished secondary map. For a 4-byte load/store, the
93//zz top-level map is indexed not with (addr >> 16) but instead f(addr),
94//zz where
95//zz
96//zz f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
97//zz = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
98//zz = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
99//zz
100//zz ie the lowest two bits are placed above the 16 high address bits.
101//zz If either of these two bits are nonzero, the address is misaligned;
102//zz this will select a secondary map from the upper 3/4 of the primary
103//zz map. Because this is always the distinguished secondary map, a
104//zz (bogus) address check failure will result. The failure handling
105//zz code can then figure out whether this is a genuine addr check
106//zz failure or whether it is a possibly-legitimate access at a
107//zz misaligned address.
108//zz */
109
sewardj45d94cc2005-04-20 14:44:11 +0000110/* --------------- Basic configuration --------------- */
sewardj95448072004-11-22 20:19:51 +0000111
sewardj23eb2fd2005-04-22 16:29:19 +0000112/* Only change this. N_PRIMARY_MAP *must* be a power of 2. */
sewardj21f7ff42005-04-28 10:32:02 +0000113
sewardje4ccc012005-05-02 12:53:38 +0000114#if VG_WORDSIZE == 4
sewardj21f7ff42005-04-28 10:32:02 +0000115
116/* cover the entire address space */
117# define N_PRIMARY_BITS 16
118
119#else
120
121/* Just handle the first 16G fast and the rest via auxiliary
122 primaries. */
123# define N_PRIMARY_BITS 18
124
125#endif
126
sewardj45d94cc2005-04-20 14:44:11 +0000127
sewardjc1a2cda2005-04-21 17:34:00 +0000128/* Do not change this. */
sewardje4ccc012005-05-02 12:53:38 +0000129#define N_PRIMARY_MAP ( ((UWord)1) << N_PRIMARY_BITS)
sewardjc1a2cda2005-04-21 17:34:00 +0000130
131/* Do not change this. */
sewardj23eb2fd2005-04-22 16:29:19 +0000132#define MAX_PRIMARY_ADDRESS (Addr)((((Addr)65536) * N_PRIMARY_MAP)-1)
133
134
135/* --------------- Stats maps --------------- */
136
137static Int n_secmaps_issued = 0;
138static ULong n_auxmap_searches = 0;
139static ULong n_auxmap_cmps = 0;
140static Int n_sanity_cheap = 0;
141static Int n_sanity_expensive = 0;
sewardj45d94cc2005-04-20 14:44:11 +0000142
143
144/* --------------- Secondary maps --------------- */
njn25e49d8e72002-09-23 09:36:25 +0000145
146typedef
147 struct {
sewardj45d94cc2005-04-20 14:44:11 +0000148 UChar abits[8192];
149 UChar vbyte[65536];
njn25e49d8e72002-09-23 09:36:25 +0000150 }
151 SecMap;
152
sewardj45d94cc2005-04-20 14:44:11 +0000153/* 3 distinguished secondary maps, one for no-access, one for
154 accessible but undefined, and one for accessible and defined.
155 Distinguished secondaries may never be modified.
156*/
157#define SM_DIST_NOACCESS 0
158#define SM_DIST_ACCESS_UNDEFINED 1
159#define SM_DIST_ACCESS_DEFINED 2
njnb8dca862005-03-14 02:42:44 +0000160
sewardj45d94cc2005-04-20 14:44:11 +0000161static SecMap sm_distinguished[3];
njnb8dca862005-03-14 02:42:44 +0000162
sewardj45d94cc2005-04-20 14:44:11 +0000163static inline Bool is_distinguished_sm ( SecMap* sm ) {
164 return sm >= &sm_distinguished[0] && sm <= &sm_distinguished[2];
165}
njnb8dca862005-03-14 02:42:44 +0000166
sewardj45d94cc2005-04-20 14:44:11 +0000167/* dist_sm points to one of our three distinguished secondaries. Make
168 a copy of it so that we can write to it.
169*/
170static SecMap* copy_for_writing ( SecMap* dist_sm )
171{
172 SecMap* new_sm;
173 tl_assert(dist_sm == &sm_distinguished[0]
174 || dist_sm == &sm_distinguished[1]
175 || dist_sm == &sm_distinguished[2]);
njnb8dca862005-03-14 02:42:44 +0000176
sewardj45d94cc2005-04-20 14:44:11 +0000177 new_sm = VG_(shadow_alloc)(sizeof(SecMap));
178 VG_(memcpy)(new_sm, dist_sm, sizeof(SecMap));
sewardj23eb2fd2005-04-22 16:29:19 +0000179 n_secmaps_issued++;
sewardj45d94cc2005-04-20 14:44:11 +0000180 return new_sm;
181}
njnb8dca862005-03-14 02:42:44 +0000182
sewardj45d94cc2005-04-20 14:44:11 +0000183
184/* --------------- Primary maps --------------- */
185
186/* The main primary map. This covers some initial part of the address
sewardj23eb2fd2005-04-22 16:29:19 +0000187 space, addresses 0 .. (N_PRIMARY_MAP << 16)-1. The rest of it is
sewardj45d94cc2005-04-20 14:44:11 +0000188 handled using the auxiliary primary map.
189*/
sewardj23eb2fd2005-04-22 16:29:19 +0000190static SecMap* primary_map[N_PRIMARY_MAP];
sewardj45d94cc2005-04-20 14:44:11 +0000191
192
193/* An entry in the auxiliary primary map. base must be a 64k-aligned
194 value, and sm points at the relevant secondary map. As with the
195 main primary map, the secondary may be either a real secondary, or
196 one of the three distinguished secondaries.
197*/
198typedef
199 struct {
sewardj23eb2fd2005-04-22 16:29:19 +0000200 Addr base;
sewardj45d94cc2005-04-20 14:44:11 +0000201 SecMap* sm;
202 }
203 AuxMapEnt;
204
205/* An expanding array of AuxMapEnts. */
206#define N_AUXMAPS 500 /* HACK */
207static AuxMapEnt hacky_auxmaps[N_AUXMAPS];
208static Int auxmap_size = N_AUXMAPS;
209static Int auxmap_used = 0;
210static AuxMapEnt* auxmap = &hacky_auxmaps[0];
211
sewardj45d94cc2005-04-20 14:44:11 +0000212
213/* Find an entry in the auxiliary map. If an entry is found, move it
214 one step closer to the front of the array, then return its address.
sewardj05fe85e2005-04-27 22:46:36 +0000215 If an entry is not found, return NULL. Note carefully that
sewardj45d94cc2005-04-20 14:44:11 +0000216 because a each call potentially rearranges the entries, each call
217 to this function invalidates ALL AuxMapEnt*s previously obtained by
218 calling this fn.
219*/
sewardj05fe85e2005-04-27 22:46:36 +0000220static AuxMapEnt* maybe_find_in_auxmap ( Addr a )
sewardj45d94cc2005-04-20 14:44:11 +0000221{
222 UWord i;
223 tl_assert(a > MAX_PRIMARY_ADDRESS);
224
225 a &= ~(Addr)0xFFFF;
226
227 /* Search .. */
228 n_auxmap_searches++;
229 for (i = 0; i < auxmap_used; i++) {
230 if (auxmap[i].base == a)
231 break;
232 }
233 n_auxmap_cmps += (ULong)(i+1);
234
235 if (i < auxmap_used) {
236 /* Found it. Nudge it a bit closer to the front. */
237 if (i > 0) {
238 AuxMapEnt tmp = auxmap[i-1];
239 auxmap[i-1] = auxmap[i];
240 auxmap[i] = tmp;
241 i--;
242 }
243 return &auxmap[i];
244 }
245
sewardj05fe85e2005-04-27 22:46:36 +0000246 return NULL;
247}
248
249
250/* Find an entry in the auxiliary map. If an entry is found, move it
251 one step closer to the front of the array, then return its address.
252 If an entry is not found, allocate one. Note carefully that
253 because a each call potentially rearranges the entries, each call
254 to this function invalidates ALL AuxMapEnt*s previously obtained by
255 calling this fn.
256*/
257static AuxMapEnt* find_or_alloc_in_auxmap ( Addr a )
258{
259 AuxMapEnt* am = maybe_find_in_auxmap(a);
260 if (am)
261 return am;
262
sewardj45d94cc2005-04-20 14:44:11 +0000263 /* We didn't find it. Hmm. This is a new piece of address space.
264 We'll need to allocate a new AuxMap entry for it. */
265 if (auxmap_used >= auxmap_size) {
266 tl_assert(auxmap_used == auxmap_size);
267 /* Out of auxmap entries. */
268 tl_assert2(0, "failed to expand the auxmap table");
269 }
270
271 tl_assert(auxmap_used < auxmap_size);
272
273 auxmap[auxmap_used].base = a & ~(Addr)0xFFFF;
274 auxmap[auxmap_used].sm = &sm_distinguished[SM_DIST_NOACCESS];
275
276 if (0)
277 VG_(printf)("new auxmap, base = 0x%llx\n",
278 (ULong)auxmap[auxmap_used].base );
279
280 auxmap_used++;
281 return &auxmap[auxmap_used-1];
282}
283
284
285/* --------------- SecMap fundamentals --------------- */
286
287/* Produce the secmap for 'a', either from the primary map or by
288 ensuring there is an entry for it in the aux primary map. The
289 secmap may be a distinguished one as the caller will only want to
290 be able to read it.
291*/
292static SecMap* get_secmap_readable ( Addr a )
293{
294 if (a <= MAX_PRIMARY_ADDRESS) {
295 UWord pm_off = a >> 16;
296 return primary_map[ pm_off ];
297 } else {
298 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
299 return am->sm;
300 }
301}
302
sewardj05fe85e2005-04-27 22:46:36 +0000303/* If 'a' has a SecMap, produce it. Else produce NULL. But don't
304 allocate one if one doesn't already exist. This is used by the
305 leak checker.
306*/
307static SecMap* maybe_get_secmap_for ( Addr a )
308{
309 if (a <= MAX_PRIMARY_ADDRESS) {
310 UWord pm_off = a >> 16;
311 return primary_map[ pm_off ];
312 } else {
313 AuxMapEnt* am = maybe_find_in_auxmap(a);
314 return am ? am->sm : NULL;
315 }
316}
317
318
319
sewardj45d94cc2005-04-20 14:44:11 +0000320/* Produce the secmap for 'a', either from the primary map or by
321 ensuring there is an entry for it in the aux primary map. The
322 secmap may not be a distinguished one, since the caller will want
323 to be able to write it. If it is a distinguished secondary, make a
324 writable copy of it, install it, and return the copy instead. (COW
325 semantics).
326*/
327static SecMap* get_secmap_writable ( Addr a )
328{
329 if (a <= MAX_PRIMARY_ADDRESS) {
330 UWord pm_off = a >> 16;
331 if (is_distinguished_sm(primary_map[ pm_off ]))
332 primary_map[pm_off] = copy_for_writing(primary_map[pm_off]);
333 return primary_map[pm_off];
334 } else {
335 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
336 if (is_distinguished_sm(am->sm))
337 am->sm = copy_for_writing(am->sm);
338 return am->sm;
339 }
340}
341
342
343/* --------------- Endianness helpers --------------- */
344
345/* Returns the offset in memory of the byteno-th most significant byte
346 in a wordszB-sized word, given the specified endianness. */
347static inline UWord byte_offset_w ( UWord wordszB, Bool bigendian,
348 UWord byteno ) {
349 return bigendian ? (wordszB-1-byteno) : byteno;
350}
351
352
353/* --------------- Fundamental functions --------------- */
354
355static
356void get_abit_and_vbyte ( /*OUT*/UWord* abit,
357 /*OUT*/UWord* vbyte,
358 Addr a )
359{
360 SecMap* sm = get_secmap_readable(a);
361 *vbyte = 0xFF & sm->vbyte[a & 0xFFFF];
362 *abit = read_bit_array(sm->abits, a & 0xFFFF);
363}
364
365static
366UWord get_abit ( Addr a )
367{
368 SecMap* sm = get_secmap_readable(a);
369 return read_bit_array(sm->abits, a & 0xFFFF);
370}
371
372static
373void set_abit_and_vbyte ( Addr a, UWord abit, UWord vbyte )
374{
375 SecMap* sm = get_secmap_writable(a);
376 sm->vbyte[a & 0xFFFF] = 0xFF & vbyte;
377 write_bit_array(sm->abits, a & 0xFFFF, abit);
378}
379
380static
381void set_vbyte ( Addr a, UWord vbyte )
382{
383 SecMap* sm = get_secmap_writable(a);
384 sm->vbyte[a & 0xFFFF] = 0xFF & vbyte;
385}
386
387
388/* --------------- Load/store slow cases. --------------- */
389
390static
391ULong mc_LOADVn_slow ( Addr a, SizeT szB, Bool bigendian )
392{
393 /* Make up a result V word, which contains the loaded data for
sewardjf3d57dd2005-04-22 20:23:27 +0000394 valid addresses and Defined for invalid addresses. Iterate over
395 the bytes in the word, from the most significant down to the
396 least. */
sewardj45d94cc2005-04-20 14:44:11 +0000397 ULong vw = VGM_WORD64_INVALID;
398 SizeT i = szB-1;
399 SizeT n_addrs_bad = 0;
400 Addr ai;
401 Bool aok;
402 UWord abit, vbyte;
403
sewardjc1a2cda2005-04-21 17:34:00 +0000404 PROF_EVENT(30, "mc_LOADVn_slow");
sewardj45d94cc2005-04-20 14:44:11 +0000405 tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
406
407 while (True) {
sewardjc1a2cda2005-04-21 17:34:00 +0000408 PROF_EVENT(31, "mc_LOADVn_slow(loop)");
sewardj45d94cc2005-04-20 14:44:11 +0000409 ai = a+byte_offset_w(szB,bigendian,i);
410 get_abit_and_vbyte(&abit, &vbyte, ai);
411 aok = abit == VGM_BIT_VALID;
412 if (!aok)
413 n_addrs_bad++;
414 vw <<= 8;
sewardjf3d57dd2005-04-22 20:23:27 +0000415 vw |= 0xFF & (aok ? vbyte : VGM_BYTE_VALID);
sewardj45d94cc2005-04-20 14:44:11 +0000416 if (i == 0) break;
417 i--;
418 }
419
420 if (n_addrs_bad > 0)
421 MAC_(record_address_error)( VG_(get_running_tid)(), a, szB, False );
422
sewardj45d94cc2005-04-20 14:44:11 +0000423 return vw;
424}
425
426
427static
428void mc_STOREVn_slow ( Addr a, SizeT szB, UWord vbytes, Bool bigendian )
429{
430 SizeT i;
431 SizeT n_addrs_bad = 0;
432 UWord abit;
433 Bool aok;
434 Addr ai;
435
sewardjc1a2cda2005-04-21 17:34:00 +0000436 PROF_EVENT(35, "mc_STOREVn_slow");
sewardj45d94cc2005-04-20 14:44:11 +0000437 tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
438
439 /* Dump vbytes in memory, iterating from least to most significant
440 byte. At the same time establish addressibility of the
441 location. */
442 for (i = 0; i < szB; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +0000443 PROF_EVENT(36, "mc_STOREVn_slow(loop)");
sewardj45d94cc2005-04-20 14:44:11 +0000444 ai = a+byte_offset_w(szB,bigendian,i);
445 abit = get_abit(ai);
446 aok = abit == VGM_BIT_VALID;
447 if (!aok)
448 n_addrs_bad++;
449 set_vbyte(ai, vbytes & 0xFF );
450 vbytes >>= 8;
451 }
452
453 /* If an address error has happened, report it. */
454 if (n_addrs_bad > 0)
455 MAC_(record_address_error)( VG_(get_running_tid)(), a, szB, True );
456}
457
458
sewardj45d94cc2005-04-20 14:44:11 +0000459//zz /* Reading/writing of the bitmaps, for aligned word-sized accesses. */
460//zz
461//zz static __inline__ UChar get_abits4_ALIGNED ( Addr a )
462//zz {
463//zz SecMap* sm;
464//zz UInt sm_off;
465//zz UChar abits8;
466//zz PROF_EVENT(24);
467//zz # ifdef VG_DEBUG_MEMORY
468//zz tl_assert(VG_IS_4_ALIGNED(a));
469//zz # endif
470//zz sm = primary_map[PM_IDX(a)];
471//zz sm_off = SM_OFF(a);
472//zz abits8 = sm->abits[sm_off >> 3];
473//zz abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
474//zz abits8 &= 0x0F;
475//zz return abits8;
476//zz }
477//zz
478//zz static UInt __inline__ get_vbytes4_ALIGNED ( Addr a )
479//zz {
480//zz SecMap* sm = primary_map[PM_IDX(a)];
481//zz UInt sm_off = SM_OFF(a);
482//zz PROF_EVENT(25);
483//zz # ifdef VG_DEBUG_MEMORY
484//zz tl_assert(VG_IS_4_ALIGNED(a));
485//zz # endif
486//zz return ((UInt*)(sm->vbyte))[sm_off >> 2];
487//zz }
488//zz
489//zz
490//zz static void __inline__ set_vbytes4_ALIGNED ( Addr a, UInt vbytes )
491//zz {
492//zz SecMap* sm;
493//zz UInt sm_off;
494//zz ENSURE_MAPPABLE(a, "set_vbytes4_ALIGNED");
495//zz sm = primary_map[PM_IDX(a)];
496//zz sm_off = SM_OFF(a);
497//zz PROF_EVENT(23);
498//zz # ifdef VG_DEBUG_MEMORY
499//zz tl_assert(VG_IS_4_ALIGNED(a));
500//zz # endif
501//zz ((UInt*)(sm->vbyte))[sm_off >> 2] = vbytes;
502//zz }
sewardjee070842003-07-05 17:53:55 +0000503
504
njn25e49d8e72002-09-23 09:36:25 +0000505/*------------------------------------------------------------*/
506/*--- Setting permissions over address ranges. ---*/
507/*------------------------------------------------------------*/
508
sewardj23eb2fd2005-04-22 16:29:19 +0000509/* Given address 'a', find the place where the pointer to a's
510 secondary map lives. If a falls into the primary map, the returned
511 value points to one of the entries in primary_map[]. Otherwise,
512 the auxiliary primary map is searched for 'a', or an entry is
513 created for it; either way, the returned value points to the
514 relevant AuxMapEnt's .sm field.
515
516 The point of this is to enable set_address_range_perms to assign
517 secondary maps in a uniform way, without worrying about whether a
518 given secondary map is pointed to from the main or auxiliary
519 primary map.
520*/
521
522static SecMap** find_secmap_binder_for_addr ( Addr aA )
523{
524 if (aA > MAX_PRIMARY_ADDRESS) {
525 AuxMapEnt* am = find_or_alloc_in_auxmap(aA);
526 return &am->sm;
527 } else {
528 UWord a = (UWord)aA;
529 UWord sec_no = (UWord)(a >> 16);
530# if VG_DEBUG_MEMORY >= 1
531 tl_assert(sec_no < N_PRIMARY_MAP);
532# endif
533 return &primary_map[sec_no];
534 }
535}
536
537
538static void set_address_range_perms ( Addr aA, SizeT len,
sewardj45d94cc2005-04-20 14:44:11 +0000539 UWord example_a_bit,
540 UWord example_v_bit )
njn25e49d8e72002-09-23 09:36:25 +0000541{
sewardj23eb2fd2005-04-22 16:29:19 +0000542 PROF_EVENT(150, "set_address_range_perms");
543
544 /* Check the permissions make sense. */
545 tl_assert(example_a_bit == VGM_BIT_VALID
546 || example_a_bit == VGM_BIT_INVALID);
547 tl_assert(example_v_bit == VGM_BIT_VALID
548 || example_v_bit == VGM_BIT_INVALID);
549 if (example_a_bit == VGM_BIT_INVALID)
550 tl_assert(example_v_bit == VGM_BIT_INVALID);
551
552 if (len == 0)
553 return;
554
555 if (VG_(clo_verbosity) > 0) {
556 if (len > 100 * 1000 * 1000) {
557 VG_(message)(Vg_UserMsg,
558 "Warning: set address range perms: "
559 "large range %u, a %d, v %d",
560 len, example_a_bit, example_v_bit );
561 }
562 }
563
564 UWord a = (UWord)aA;
565
566# if VG_DEBUG_MEMORY >= 2
567
568 /*------------------ debug-only case ------------------ */
sewardj45d94cc2005-04-20 14:44:11 +0000569 SizeT i;
njn25e49d8e72002-09-23 09:36:25 +0000570
sewardj23eb2fd2005-04-22 16:29:19 +0000571 UWord example_vbyte = BIT_TO_BYTE(example_v_bit);
sewardj45d94cc2005-04-20 14:44:11 +0000572
573 tl_assert(sizeof(SizeT) == sizeof(Addr));
574
575 if (0 && len >= 4096)
576 VG_(printf)("s_a_r_p(0x%llx, %d, %d,%d)\n",
577 (ULong)a, len, example_a_bit, example_v_bit);
njn25e49d8e72002-09-23 09:36:25 +0000578
579 if (len == 0)
580 return;
581
sewardj45d94cc2005-04-20 14:44:11 +0000582 for (i = 0; i < len; i++) {
583 set_abit_and_vbyte(a+i, example_a_bit, example_vbyte);
njn25e49d8e72002-09-23 09:36:25 +0000584 }
njn25e49d8e72002-09-23 09:36:25 +0000585
sewardj23eb2fd2005-04-22 16:29:19 +0000586# else
587
588 /*------------------ standard handling ------------------ */
589 UWord vbits8, abits8, vbits32, v_off, a_off;
590 SecMap* sm;
591 SecMap** binder;
592 SecMap* example_dsm;
593
594 /* Decide on the distinguished secondary that we might want
595 to use (part of the space-compression scheme). */
596 if (example_a_bit == VGM_BIT_INVALID) {
597 example_dsm = &sm_distinguished[SM_DIST_NOACCESS];
598 } else {
599 if (example_v_bit == VGM_BIT_VALID) {
600 example_dsm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
601 } else {
602 example_dsm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
603 }
604 }
605
606 /* Make various wider versions of the A/V values to use. */
607 vbits8 = BIT_TO_BYTE(example_v_bit);
608 abits8 = BIT_TO_BYTE(example_a_bit);
609 vbits32 = (vbits8 << 24) | (vbits8 << 16) | (vbits8 << 8) | vbits8;
610
611 /* Slowly do parts preceding 8-byte alignment. */
612 while (True) {
613 if (len == 0) break;
614 PROF_EVENT(151, "set_address_range_perms-loop1-pre");
615 if (VG_IS_8_ALIGNED(a)) break;
616 set_abit_and_vbyte( a, example_a_bit, vbits8 );
617 a++;
618 len--;
619 }
620
621 if (len == 0)
622 return;
623
624 tl_assert(VG_IS_8_ALIGNED(a) && len > 0);
625
626 /* Now go in steps of 8 bytes. */
627 binder = find_secmap_binder_for_addr(a);
628
629 while (True) {
630
631 if (len < 8) break;
632
633 PROF_EVENT(152, "set_address_range_perms-loop8");
634
635 if ((a & SECONDARY_MASK) == 0) {
636 /* we just traversed a primary map boundary, so update the
637 binder. */
638 binder = find_secmap_binder_for_addr(a);
639 PROF_EVENT(153, "set_address_range_perms-update-binder");
640
641 /* Space-optimisation. If we are setting the entire
642 secondary map, just point this entry at one of our
643 distinguished secondaries. However, only do that if it
644 already points at a distinguished secondary, since doing
645 otherwise would leak the existing secondary. We could do
646 better and free up any pre-existing non-distinguished
647 secondary at this point, since we are guaranteed that each
648 non-dist secondary only has one pointer to it, and we have
649 that pointer right here. */
650 if (len >= SECONDARY_SIZE && is_distinguished_sm(*binder)) {
651 PROF_EVENT(154, "set_address_range_perms-entire-secmap");
652 *binder = example_dsm;
653 len -= SECONDARY_SIZE;
654 a += SECONDARY_SIZE;
655 continue;
656 }
657 }
658
659 /* If the primary is already pointing to a distinguished map
660 with the same properties as we're trying to set, then leave
661 it that way. */
662 if (*binder == example_dsm) {
663 a += 8;
664 len -= 8;
665 continue;
666 }
667
668 /* Make sure it's OK to write the secondary. */
669 if (is_distinguished_sm(*binder))
670 *binder = copy_for_writing(*binder);
671
672 sm = *binder;
673 v_off = a & 0xFFFF;
674 a_off = v_off >> 3;
675 sm->abits[a_off] = (UChar)abits8;
676 ((UInt*)(sm->vbyte))[(v_off >> 2) + 0] = (UInt)vbits32;
677 ((UInt*)(sm->vbyte))[(v_off >> 2) + 1] = (UInt)vbits32;
678
679 a += 8;
680 len -= 8;
681 }
682
683 if (len == 0)
684 return;
685
686 tl_assert(VG_IS_8_ALIGNED(a) && len > 0 && len < 8);
687
688 /* Finish the upper fragment. */
689 while (True) {
690 if (len == 0) break;
691 PROF_EVENT(155, "set_address_range_perms-loop1-post");
692 set_abit_and_vbyte ( a, example_a_bit, vbits8 );
693 a++;
694 len--;
695 }
696
697# endif
698}
sewardj45d94cc2005-04-20 14:44:11 +0000699
sewardjc859fbf2005-04-22 21:10:28 +0000700
701/* --- Set permissions for arbitrary address ranges --- */
njn25e49d8e72002-09-23 09:36:25 +0000702
nethercote8b76fe52004-11-08 19:20:09 +0000703static void mc_make_noaccess ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000704{
sewardjc1a2cda2005-04-21 17:34:00 +0000705 PROF_EVENT(40, "mc_make_noaccess");
nethercote8b76fe52004-11-08 19:20:09 +0000706 DEBUG("mc_make_noaccess(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000707 set_address_range_perms ( a, len, VGM_BIT_INVALID, VGM_BIT_INVALID );
708}
709
nethercote8b76fe52004-11-08 19:20:09 +0000710static void mc_make_writable ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000711{
sewardjc1a2cda2005-04-21 17:34:00 +0000712 PROF_EVENT(41, "mc_make_writable");
nethercote8b76fe52004-11-08 19:20:09 +0000713 DEBUG("mc_make_writable(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000714 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_INVALID );
715}
716
nethercote8b76fe52004-11-08 19:20:09 +0000717static void mc_make_readable ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000718{
sewardjc1a2cda2005-04-21 17:34:00 +0000719 PROF_EVENT(42, "mc_make_readable");
nethercote8b76fe52004-11-08 19:20:09 +0000720 DEBUG("mc_make_readable(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000721 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_VALID );
722}
723
njn9b007f62003-04-07 14:40:25 +0000724
sewardjc859fbf2005-04-22 21:10:28 +0000725/* --- Block-copy permissions (needed for implementing realloc()). --- */
726
727static void mc_copy_address_range_state ( Addr src, Addr dst, SizeT len )
728{
729 SizeT i;
730 UWord abit, vbyte;
731
732 DEBUG("mc_copy_address_range_state\n");
733
734 PROF_EVENT(50, "mc_copy_address_range_state");
735 for (i = 0; i < len; i++) {
736 PROF_EVENT(51, "mc_copy_address_range_state(loop)");
737 get_abit_and_vbyte( &abit, &vbyte, src+i );
738 set_abit_and_vbyte( dst+i, abit, vbyte );
739 }
740}
741
742
743/* --- Fast case permission setters, for dealing with stacks. --- */
744
njn9b007f62003-04-07 14:40:25 +0000745static __inline__
sewardj5d28efc2005-04-21 22:16:29 +0000746void make_aligned_word32_writable ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000747{
sewardj5d28efc2005-04-21 22:16:29 +0000748 PROF_EVENT(300, "make_aligned_word32_writable");
749
750# if VG_DEBUG_MEMORY >= 2
751 mc_make_writable(aA, 4);
752# else
753
754 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
sewardj23eb2fd2005-04-22 16:29:19 +0000755 PROF_EVENT(301, "make_aligned_word32_writable-slow1");
sewardj5d28efc2005-04-21 22:16:29 +0000756 mc_make_writable(aA, 4);
757 return;
758 }
759
760 UWord a = (UWord)aA;
761 UWord sec_no = (UWord)(a >> 16);
762# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +0000763 tl_assert(sec_no < N_PRIMARY_MAP);
sewardj5d28efc2005-04-21 22:16:29 +0000764# endif
765
766 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
767 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
768
769 SecMap* sm = primary_map[sec_no];
770 UWord v_off = a & 0xFFFF;
771 UWord a_off = v_off >> 3;
772
773 /* Paint the new area as uninitialised. */
774 ((UInt*)(sm->vbyte))[v_off >> 2] = VGM_WORD32_INVALID;
775
776 UWord mask = 0x0F;
777 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
778 /* mask now contains 1s where we wish to make address bits valid
779 (0s). */
780 sm->abits[a_off] &= ~mask;
781# endif
njn9b007f62003-04-07 14:40:25 +0000782}
783
sewardj5d28efc2005-04-21 22:16:29 +0000784
785static __inline__
786void make_aligned_word32_noaccess ( Addr aA )
787{
788 PROF_EVENT(310, "make_aligned_word32_noaccess");
789
790# if VG_DEBUG_MEMORY >= 2
791 mc_make_noaccess(aA, 4);
792# else
793
794 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
795 PROF_EVENT(311, "make_aligned_word32_noaccess-slow1");
796 mc_make_noaccess(aA, 4);
797 return;
798 }
799
800 UWord a = (UWord)aA;
801 UWord sec_no = (UWord)(a >> 16);
802# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +0000803 tl_assert(sec_no < N_PRIMARY_MAP);
sewardj5d28efc2005-04-21 22:16:29 +0000804# endif
805
806 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
807 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
808
809 SecMap* sm = primary_map[sec_no];
810 UWord v_off = a & 0xFFFF;
811 UWord a_off = v_off >> 3;
812
813 /* Paint the abandoned data as uninitialised. Probably not
814 necessary, but still .. */
815 ((UInt*)(sm->vbyte))[v_off >> 2] = VGM_WORD32_INVALID;
816
817 UWord mask = 0x0F;
818 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
819 /* mask now contains 1s where we wish to make address bits invalid
820 (1s). */
821 sm->abits[a_off] |= mask;
822# endif
823}
824
825
njn9b007f62003-04-07 14:40:25 +0000826/* Nb: by "aligned" here we mean 8-byte aligned */
827static __inline__
sewardj23eb2fd2005-04-22 16:29:19 +0000828void make_aligned_word64_writable ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000829{
sewardj23eb2fd2005-04-22 16:29:19 +0000830 PROF_EVENT(320, "make_aligned_word64_writable");
831
832# if VG_DEBUG_MEMORY >= 2
833 mc_make_writable(aA, 8);
834# else
835
836 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
837 PROF_EVENT(321, "make_aligned_word64_writable-slow1");
838 mc_make_writable(aA, 8);
839 return;
840 }
841
842 UWord a = (UWord)aA;
843 UWord sec_no = (UWord)(a >> 16);
844# if VG_DEBUG_MEMORY >= 1
845 tl_assert(sec_no < N_PRIMARY_MAP);
846# endif
847
848 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
849 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
850
851 SecMap* sm = primary_map[sec_no];
852 UWord v_off = a & 0xFFFF;
853 UWord a_off = v_off >> 3;
854
855 /* Paint the new area as uninitialised. */
856 ((ULong*)(sm->vbyte))[v_off >> 3] = VGM_WORD64_INVALID;
857
858 /* Make the relevant area accessible. */
859 sm->abits[a_off] = VGM_BYTE_VALID;
860# endif
njn9b007f62003-04-07 14:40:25 +0000861}
862
sewardj23eb2fd2005-04-22 16:29:19 +0000863
njn9b007f62003-04-07 14:40:25 +0000864static __inline__
sewardj23eb2fd2005-04-22 16:29:19 +0000865void make_aligned_word64_noaccess ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000866{
sewardj23eb2fd2005-04-22 16:29:19 +0000867 PROF_EVENT(330, "make_aligned_word64_noaccess");
868
869# if VG_DEBUG_MEMORY >= 2
870 mc_make_noaccess(aA, 8);
871# else
872
873 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
874 PROF_EVENT(331, "make_aligned_word64_noaccess-slow1");
875 mc_make_noaccess(aA, 8);
876 return;
877 }
878
879 UWord a = (UWord)aA;
880 UWord sec_no = (UWord)(a >> 16);
881# if VG_DEBUG_MEMORY >= 1
882 tl_assert(sec_no < N_PRIMARY_MAP);
883# endif
884
885 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
886 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
887
888 SecMap* sm = primary_map[sec_no];
889 UWord v_off = a & 0xFFFF;
890 UWord a_off = v_off >> 3;
891
892 /* Paint the abandoned data as uninitialised. Probably not
893 necessary, but still .. */
894 ((ULong*)(sm->vbyte))[v_off >> 3] = VGM_WORD64_INVALID;
895
896 /* Make the abandoned area inaccessible. */
897 sm->abits[a_off] = VGM_BYTE_INVALID;
898# endif
njn9b007f62003-04-07 14:40:25 +0000899}
900
sewardj23eb2fd2005-04-22 16:29:19 +0000901
sewardj45d94cc2005-04-20 14:44:11 +0000902/* The stack-pointer update handling functions */
903SP_UPDATE_HANDLERS ( make_aligned_word32_writable,
904 make_aligned_word32_noaccess,
905 make_aligned_word64_writable,
906 make_aligned_word64_noaccess,
907 mc_make_writable,
908 mc_make_noaccess
909 );
njn9b007f62003-04-07 14:40:25 +0000910
sewardj45d94cc2005-04-20 14:44:11 +0000911
nethercote8b76fe52004-11-08 19:20:09 +0000912/*------------------------------------------------------------*/
913/*--- Checking memory ---*/
914/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000915
sewardje4ccc012005-05-02 12:53:38 +0000916typedef
917 enum {
918 MC_Ok = 5,
919 MC_AddrErr = 6,
920 MC_ValueErr = 7
921 }
922 MC_ReadResult;
923
924
njn25e49d8e72002-09-23 09:36:25 +0000925/* Check permissions for address range. If inadequate permissions
926 exist, *bad_addr is set to the offending address, so the caller can
927 know what it is. */
928
sewardjecf8e102003-07-12 12:11:39 +0000929/* Returns True if [a .. a+len) is not addressible. Otherwise,
930 returns False, and if bad_addr is non-NULL, sets *bad_addr to
931 indicate the lowest failing address. Functions below are
932 similar. */
nethercote8b76fe52004-11-08 19:20:09 +0000933static Bool mc_check_noaccess ( Addr a, SizeT len, Addr* bad_addr )
sewardjecf8e102003-07-12 12:11:39 +0000934{
nethercote451eae92004-11-02 13:06:32 +0000935 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +0000936 UWord abit;
sewardjc1a2cda2005-04-21 17:34:00 +0000937 PROF_EVENT(60, "mc_check_noaccess");
sewardjecf8e102003-07-12 12:11:39 +0000938 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +0000939 PROF_EVENT(61, "mc_check_noaccess(loop)");
sewardjecf8e102003-07-12 12:11:39 +0000940 abit = get_abit(a);
941 if (abit == VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +0000942 if (bad_addr != NULL)
943 *bad_addr = a;
sewardjecf8e102003-07-12 12:11:39 +0000944 return False;
945 }
946 a++;
947 }
948 return True;
949}
950
nethercote8b76fe52004-11-08 19:20:09 +0000951static Bool mc_check_writable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000952{
nethercote451eae92004-11-02 13:06:32 +0000953 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +0000954 UWord abit;
sewardjc1a2cda2005-04-21 17:34:00 +0000955 PROF_EVENT(62, "mc_check_writable");
njn25e49d8e72002-09-23 09:36:25 +0000956 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +0000957 PROF_EVENT(63, "mc_check_writable(loop)");
njn25e49d8e72002-09-23 09:36:25 +0000958 abit = get_abit(a);
959 if (abit == VGM_BIT_INVALID) {
960 if (bad_addr != NULL) *bad_addr = a;
961 return False;
962 }
963 a++;
964 }
965 return True;
966}
967
nethercote8b76fe52004-11-08 19:20:09 +0000968static MC_ReadResult mc_check_readable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000969{
nethercote451eae92004-11-02 13:06:32 +0000970 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +0000971 UWord abit;
972 UWord vbyte;
njn25e49d8e72002-09-23 09:36:25 +0000973
sewardjc1a2cda2005-04-21 17:34:00 +0000974 PROF_EVENT(64, "mc_check_readable");
nethercote8b76fe52004-11-08 19:20:09 +0000975 DEBUG("mc_check_readable\n");
njn25e49d8e72002-09-23 09:36:25 +0000976 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +0000977 PROF_EVENT(65, "mc_check_readable(loop)");
sewardj45d94cc2005-04-20 14:44:11 +0000978 get_abit_and_vbyte(&abit, &vbyte, a);
nethercote8b76fe52004-11-08 19:20:09 +0000979 // Report addressability errors in preference to definedness errors
980 // by checking the A bits first.
981 if (abit != VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +0000982 if (bad_addr != NULL)
983 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +0000984 return MC_AddrErr;
985 }
986 if (vbyte != VGM_BYTE_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +0000987 if (bad_addr != NULL)
988 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +0000989 return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +0000990 }
991 a++;
992 }
nethercote8b76fe52004-11-08 19:20:09 +0000993 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +0000994}
995
996
997/* Check a zero-terminated ascii string. Tricky -- don't want to
998 examine the actual bytes, to find the end, until we're sure it is
999 safe to do so. */
1000
njn9b007f62003-04-07 14:40:25 +00001001static Bool mc_check_readable_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00001002{
sewardj45d94cc2005-04-20 14:44:11 +00001003 UWord abit;
1004 UWord vbyte;
sewardjc1a2cda2005-04-21 17:34:00 +00001005 PROF_EVENT(66, "mc_check_readable_asciiz");
njn5c004e42002-11-18 11:04:50 +00001006 DEBUG("mc_check_readable_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +00001007 while (True) {
sewardjc1a2cda2005-04-21 17:34:00 +00001008 PROF_EVENT(67, "mc_check_readable_asciiz(loop)");
sewardj45d94cc2005-04-20 14:44:11 +00001009 get_abit_and_vbyte(&abit, &vbyte, a);
nethercote8b76fe52004-11-08 19:20:09 +00001010 // As in mc_check_readable(), check A bits first
1011 if (abit != VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001012 if (bad_addr != NULL)
1013 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001014 return MC_AddrErr;
1015 }
1016 if (vbyte != VGM_BYTE_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001017 if (bad_addr != NULL)
1018 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001019 return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +00001020 }
1021 /* Ok, a is safe to read. */
sewardj45d94cc2005-04-20 14:44:11 +00001022 if (* ((UChar*)a) == 0)
1023 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +00001024 a++;
1025 }
1026}
1027
1028
1029/*------------------------------------------------------------*/
1030/*--- Memory event handlers ---*/
1031/*------------------------------------------------------------*/
1032
njn25e49d8e72002-09-23 09:36:25 +00001033static
njn72718642003-07-24 08:45:32 +00001034void mc_check_is_writable ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +00001035 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001036{
1037 Bool ok;
1038 Addr bad_addr;
1039
1040 VGP_PUSHCC(VgpCheckMem);
1041
1042 /* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
1043 base,base+size-1); */
nethercote8b76fe52004-11-08 19:20:09 +00001044 ok = mc_check_writable ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00001045 if (!ok) {
1046 switch (part) {
1047 case Vg_CoreSysCall:
nethercote8b76fe52004-11-08 19:20:09 +00001048 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False,
1049 /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00001050 break;
1051
1052 case Vg_CorePThread:
1053 case Vg_CoreSignal:
nethercote8b76fe52004-11-08 19:20:09 +00001054 MAC_(record_core_mem_error)( tid, /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00001055 break;
1056
1057 default:
njn67993252004-11-22 18:02:32 +00001058 VG_(tool_panic)("mc_check_is_writable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001059 }
1060 }
1061
1062 VGP_POPCC(VgpCheckMem);
1063}
1064
1065static
njn72718642003-07-24 08:45:32 +00001066void mc_check_is_readable ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +00001067 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001068{
njn25e49d8e72002-09-23 09:36:25 +00001069 Addr bad_addr;
nethercote8b76fe52004-11-08 19:20:09 +00001070 MC_ReadResult res;
njn25e49d8e72002-09-23 09:36:25 +00001071
1072 VGP_PUSHCC(VgpCheckMem);
1073
1074 /* VG_(message)(Vg_DebugMsg,"check is readable: %x .. %x",
1075 base,base+size-1); */
nethercote8b76fe52004-11-08 19:20:09 +00001076 res = mc_check_readable ( base, size, &bad_addr );
1077 if (MC_Ok != res) {
1078 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
1079
njn25e49d8e72002-09-23 09:36:25 +00001080 switch (part) {
1081 case Vg_CoreSysCall:
nethercote8b76fe52004-11-08 19:20:09 +00001082 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False,
1083 isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001084 break;
1085
1086 case Vg_CorePThread:
nethercote8b76fe52004-11-08 19:20:09 +00001087 MAC_(record_core_mem_error)( tid, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001088 break;
1089
1090 /* If we're being asked to jump to a silly address, record an error
1091 message before potentially crashing the entire system. */
1092 case Vg_CoreTranslate:
njn72718642003-07-24 08:45:32 +00001093 MAC_(record_jump_error)( tid, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00001094 break;
1095
1096 default:
njn67993252004-11-22 18:02:32 +00001097 VG_(tool_panic)("mc_check_is_readable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001098 }
1099 }
1100 VGP_POPCC(VgpCheckMem);
1101}
1102
1103static
njn72718642003-07-24 08:45:32 +00001104void mc_check_is_readable_asciiz ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +00001105 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +00001106{
nethercote8b76fe52004-11-08 19:20:09 +00001107 MC_ReadResult res;
sewardj45d94cc2005-04-20 14:44:11 +00001108 Addr bad_addr;
njn25e49d8e72002-09-23 09:36:25 +00001109 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
1110
1111 VGP_PUSHCC(VgpCheckMem);
1112
njnca82cc02004-11-22 17:18:48 +00001113 tl_assert(part == Vg_CoreSysCall);
nethercote8b76fe52004-11-08 19:20:09 +00001114 res = mc_check_readable_asciiz ( (Addr)str, &bad_addr );
1115 if (MC_Ok != res) {
1116 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
1117 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001118 }
1119
1120 VGP_POPCC(VgpCheckMem);
1121}
1122
njn25e49d8e72002-09-23 09:36:25 +00001123static
nethercote451eae92004-11-02 13:06:32 +00001124void mc_new_mem_startup( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00001125{
njn1f3a9092002-10-04 09:22:30 +00001126 /* Ignore the permissions, just make it readable. Seems to work... */
nethercote451eae92004-11-02 13:06:32 +00001127 DEBUG("mc_new_mem_startup(%p, %llu, rr=%u, ww=%u, xx=%u)\n",
1128 a,(ULong)len,rr,ww,xx);
nethercote8b76fe52004-11-08 19:20:09 +00001129 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001130}
1131
1132static
nethercote451eae92004-11-02 13:06:32 +00001133void mc_new_mem_heap ( Addr a, SizeT len, Bool is_inited )
njn25e49d8e72002-09-23 09:36:25 +00001134{
1135 if (is_inited) {
nethercote8b76fe52004-11-08 19:20:09 +00001136 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001137 } else {
nethercote8b76fe52004-11-08 19:20:09 +00001138 mc_make_writable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001139 }
1140}
1141
1142static
njnb8dca862005-03-14 02:42:44 +00001143void mc_new_mem_mmap ( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00001144{
njnb8dca862005-03-14 02:42:44 +00001145 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001146}
1147
njncf45fd42004-11-24 16:30:22 +00001148static
1149void mc_post_mem_write(CorePart part, ThreadId tid, Addr a, SizeT len)
1150{
1151 mc_make_readable(a, len);
1152}
njn25e49d8e72002-09-23 09:36:25 +00001153
sewardj45d94cc2005-04-20 14:44:11 +00001154
njn25e49d8e72002-09-23 09:36:25 +00001155/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00001156/*--- Register event handlers ---*/
1157/*------------------------------------------------------------*/
1158
sewardj45d94cc2005-04-20 14:44:11 +00001159/* When some chunk of guest state is written, mark the corresponding
1160 shadow area as valid. This is used to initialise arbitrarily large
sewardj2c27f702005-05-03 18:19:05 +00001161 chunks of guest state, hence the (somewhat arbitrary) 1024 limit.
sewardj45d94cc2005-04-20 14:44:11 +00001162*/
1163static void mc_post_reg_write ( CorePart part, ThreadId tid,
1164 OffT offset, SizeT size)
njnd3040452003-05-19 15:04:06 +00001165{
sewardj6cf40ff2005-04-20 22:31:26 +00001166 UChar area[1024];
1167 tl_assert(size <= 1024);
njncf45fd42004-11-24 16:30:22 +00001168 VG_(memset)(area, VGM_BYTE_VALID, size);
1169 VG_(set_shadow_regs_area)( tid, offset, size, area );
njnd3040452003-05-19 15:04:06 +00001170}
1171
sewardj45d94cc2005-04-20 14:44:11 +00001172static
1173void mc_post_reg_write_clientcall ( ThreadId tid,
1174 OffT offset, SizeT size,
1175 Addr f)
njnd3040452003-05-19 15:04:06 +00001176{
njncf45fd42004-11-24 16:30:22 +00001177 mc_post_reg_write(/*dummy*/0, tid, offset, size);
njnd3040452003-05-19 15:04:06 +00001178}
1179
sewardj45d94cc2005-04-20 14:44:11 +00001180/* Look at the definedness of the guest's shadow state for
1181 [offset, offset+len). If any part of that is undefined, record
1182 a parameter error.
1183*/
1184static void mc_pre_reg_read ( CorePart part, ThreadId tid, Char* s,
1185 OffT offset, SizeT size)
nethercote8b76fe52004-11-08 19:20:09 +00001186{
sewardj45d94cc2005-04-20 14:44:11 +00001187 Int i;
1188 Bool bad;
1189
1190 UChar area[16];
1191 tl_assert(size <= 16);
1192
1193 VG_(get_shadow_regs_area)( tid, offset, size, area );
1194
1195 bad = False;
1196 for (i = 0; i < size; i++) {
1197 if (area[i] != VGM_BYTE_VALID) {
sewardj2c27f702005-05-03 18:19:05 +00001198 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00001199 break;
1200 }
nethercote8b76fe52004-11-08 19:20:09 +00001201 }
1202
sewardj45d94cc2005-04-20 14:44:11 +00001203 if (bad)
nethercote8b76fe52004-11-08 19:20:09 +00001204 MAC_(record_param_error) ( tid, 0, /*isReg*/True, /*isUnaddr*/False, s );
1205}
njnd3040452003-05-19 15:04:06 +00001206
njn25e49d8e72002-09-23 09:36:25 +00001207
sewardj6cf40ff2005-04-20 22:31:26 +00001208/*------------------------------------------------------------*/
sewardjc859fbf2005-04-22 21:10:28 +00001209/*--- Functions called directly from generated code: ---*/
1210/*--- Load/store handlers. ---*/
sewardj6cf40ff2005-04-20 22:31:26 +00001211/*------------------------------------------------------------*/
1212
1213/* Types: LOADV4, LOADV2, LOADV1 are:
1214 UWord fn ( Addr a )
1215 so they return 32-bits on 32-bit machines and 64-bits on
1216 64-bit machines. Addr has the same size as a host word.
1217
1218 LOADV8 is always ULong fn ( Addr a )
1219
1220 Similarly for STOREV1, STOREV2, STOREV4, the supplied vbits
1221 are a UWord, and for STOREV8 they are a ULong.
1222*/
1223
sewardj95448072004-11-22 20:19:51 +00001224/* ------------------------ Size = 8 ------------------------ */
1225
njn9fb73db2005-03-27 01:55:21 +00001226VGA_REGPARM(1)
sewardjf9d81612005-04-23 23:25:49 +00001227ULong MC_(helperc_LOADV8) ( Addr aA )
sewardj95448072004-11-22 20:19:51 +00001228{
sewardjf9d81612005-04-23 23:25:49 +00001229 PROF_EVENT(200, "helperc_LOADV8");
1230
1231# if VG_DEBUG_MEMORY >= 2
1232 return mc_LOADVn_slow( aA, 8, False/*littleendian*/ );
1233# else
1234
1235 const UWord mask = ~((0x10000-8) | ((N_PRIMARY_MAP-1) << 16));
1236 UWord a = (UWord)aA;
1237
1238 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1239 naturally aligned, or 'a' exceeds the range covered by the
1240 primary map. Either way we defer to the slow-path case. */
1241 if (EXPECTED_NOT_TAKEN(a & mask)) {
1242 PROF_EVENT(201, "helperc_LOADV8-slow1");
1243 return (UWord)mc_LOADVn_slow( aA, 8, False/*littleendian*/ );
1244 }
1245
1246 UWord sec_no = (UWord)(a >> 16);
1247
1248# if VG_DEBUG_MEMORY >= 1
1249 tl_assert(sec_no < N_PRIMARY_MAP);
1250# endif
1251
1252 SecMap* sm = primary_map[sec_no];
1253 UWord v_off = a & 0xFFFF;
1254 UWord a_off = v_off >> 3;
1255 UWord abits = (UWord)(sm->abits[a_off]);
1256
1257 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) {
1258 /* Handle common case quickly: a is suitably aligned, is mapped,
1259 and is addressible. */
1260 return ((ULong*)(sm->vbyte))[ v_off >> 3 ];
1261 } else {
1262 /* Slow but general case. */
1263 PROF_EVENT(202, "helperc_LOADV8-slow2");
1264 return mc_LOADVn_slow( a, 8, False/*littleendian*/ );
1265 }
1266
1267# endif
sewardj95448072004-11-22 20:19:51 +00001268}
1269
njn9fb73db2005-03-27 01:55:21 +00001270VGA_REGPARM(1)
sewardjf9d81612005-04-23 23:25:49 +00001271void MC_(helperc_STOREV8) ( Addr aA, ULong vbytes )
sewardj95448072004-11-22 20:19:51 +00001272{
sewardjf9d81612005-04-23 23:25:49 +00001273 PROF_EVENT(210, "helperc_STOREV8");
1274
1275# if VG_DEBUG_MEMORY >= 2
1276 mc_STOREVn_slow( aA, 8, vbytes, False/*littleendian*/ );
1277# else
1278
1279 const UWord mask = ~((0x10000-8) | ((N_PRIMARY_MAP-1) << 16));
1280 UWord a = (UWord)aA;
1281
1282 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1283 naturally aligned, or 'a' exceeds the range covered by the
1284 primary map. Either way we defer to the slow-path case. */
1285 if (EXPECTED_NOT_TAKEN(a & mask)) {
1286 PROF_EVENT(211, "helperc_STOREV8-slow1");
1287 mc_STOREVn_slow( aA, 8, vbytes, False/*littleendian*/ );
1288 return;
1289 }
1290
1291 UWord sec_no = (UWord)(a >> 16);
1292
1293# if VG_DEBUG_MEMORY >= 1
1294 tl_assert(sec_no < N_PRIMARY_MAP);
1295# endif
1296
1297 SecMap* sm = primary_map[sec_no];
1298 UWord v_off = a & 0xFFFF;
1299 UWord a_off = v_off >> 3;
1300 UWord abits = (UWord)(sm->abits[a_off]);
1301
1302 if (EXPECTED_TAKEN(!is_distinguished_sm(sm)
1303 && abits == VGM_BYTE_VALID)) {
1304 /* Handle common case quickly: a is suitably aligned, is mapped,
1305 and is addressible. */
1306 ((ULong*)(sm->vbyte))[ v_off >> 3 ] = vbytes;
1307 } else {
1308 /* Slow but general case. */
1309 PROF_EVENT(212, "helperc_STOREV8-slow2");
1310 mc_STOREVn_slow( aA, 8, vbytes, False/*littleendian*/ );
1311 }
1312# endif
sewardj95448072004-11-22 20:19:51 +00001313}
1314
1315/* ------------------------ Size = 4 ------------------------ */
1316
njn9fb73db2005-03-27 01:55:21 +00001317VGA_REGPARM(1)
sewardjc1a2cda2005-04-21 17:34:00 +00001318UWord MC_(helperc_LOADV4) ( Addr aA )
njn25e49d8e72002-09-23 09:36:25 +00001319{
sewardjc1a2cda2005-04-21 17:34:00 +00001320 PROF_EVENT(220, "helperc_LOADV4");
1321
1322# if VG_DEBUG_MEMORY >= 2
1323 return (UWord)mc_LOADVn_slow( aA, 4, False/*littleendian*/ );
1324# else
1325
sewardj23eb2fd2005-04-22 16:29:19 +00001326 const UWord mask = ~((0x10000-4) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001327 UWord a = (UWord)aA;
1328
1329 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1330 naturally aligned, or 'a' exceeds the range covered by the
1331 primary map. Either way we defer to the slow-path case. */
1332 if (EXPECTED_NOT_TAKEN(a & mask)) {
1333 PROF_EVENT(221, "helperc_LOADV4-slow1");
1334 return (UWord)mc_LOADVn_slow( aA, 4, False/*littleendian*/ );
1335 }
1336
1337 UWord sec_no = (UWord)(a >> 16);
1338
1339# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001340 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001341# endif
1342
1343 SecMap* sm = primary_map[sec_no];
1344 UWord v_off = a & 0xFFFF;
1345 UWord a_off = v_off >> 3;
1346 UWord abits = (UWord)(sm->abits[a_off]);
1347 abits >>= (a & 4);
1348 abits &= 15;
1349 if (EXPECTED_TAKEN(abits == VGM_NIBBLE_VALID)) {
1350 /* Handle common case quickly: a is suitably aligned, is mapped,
1351 and is addressible. */
sewardj5d28efc2005-04-21 22:16:29 +00001352 /* On a 32-bit platform, simply hoick the required 32 bits out of
1353 the vbyte array. On a 64-bit platform, also set the upper 32
1354 bits to 1 ("undefined"), just in case. This almost certainly
1355 isn't necessary, but be paranoid. */
1356 UWord ret = (UWord)0xFFFFFFFF00000000ULL;
1357 ret |= (UWord)( ((UInt*)(sm->vbyte))[ v_off >> 2 ] );
1358 return ret;
sewardjc1a2cda2005-04-21 17:34:00 +00001359 } else {
1360 /* Slow but general case. */
1361 PROF_EVENT(222, "helperc_LOADV4-slow2");
1362 return (UWord)mc_LOADVn_slow( a, 4, False/*littleendian*/ );
1363 }
1364
1365# endif
njn25e49d8e72002-09-23 09:36:25 +00001366}
1367
njn9fb73db2005-03-27 01:55:21 +00001368VGA_REGPARM(2)
sewardjc1a2cda2005-04-21 17:34:00 +00001369void MC_(helperc_STOREV4) ( Addr aA, UWord vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001370{
sewardjc1a2cda2005-04-21 17:34:00 +00001371 PROF_EVENT(230, "helperc_STOREV4");
1372
1373# if VG_DEBUG_MEMORY >= 2
sewardj23eb2fd2005-04-22 16:29:19 +00001374 mc_STOREVn_slow( aA, 4, (ULong)vbytes, False/*littleendian*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001375# else
1376
sewardj23eb2fd2005-04-22 16:29:19 +00001377 const UWord mask = ~((0x10000-4) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001378 UWord a = (UWord)aA;
1379
1380 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1381 naturally aligned, or 'a' exceeds the range covered by the
1382 primary map. Either way we defer to the slow-path case. */
1383 if (EXPECTED_NOT_TAKEN(a & mask)) {
1384 PROF_EVENT(231, "helperc_STOREV4-slow1");
1385 mc_STOREVn_slow( aA, 4, (ULong)vbytes, False/*littleendian*/ );
1386 return;
1387 }
1388
1389 UWord sec_no = (UWord)(a >> 16);
1390
1391# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001392 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001393# endif
1394
1395 SecMap* sm = primary_map[sec_no];
1396 UWord v_off = a & 0xFFFF;
1397 UWord a_off = v_off >> 3;
1398 UWord abits = (UWord)(sm->abits[a_off]);
1399 abits >>= (a & 4);
1400 abits &= 15;
1401 if (EXPECTED_TAKEN(!is_distinguished_sm(sm)
1402 && abits == VGM_NIBBLE_VALID)) {
1403 /* Handle common case quickly: a is suitably aligned, is mapped,
1404 and is addressible. */
1405 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = (UInt)vbytes;
1406 } else {
1407 /* Slow but general case. */
1408 PROF_EVENT(232, "helperc_STOREV4-slow2");
1409 mc_STOREVn_slow( aA, 4, (ULong)vbytes, False/*littleendian*/ );
1410 }
1411# endif
njn25e49d8e72002-09-23 09:36:25 +00001412}
1413
sewardj95448072004-11-22 20:19:51 +00001414/* ------------------------ Size = 2 ------------------------ */
1415
njn9fb73db2005-03-27 01:55:21 +00001416VGA_REGPARM(1)
sewardjc1a2cda2005-04-21 17:34:00 +00001417UWord MC_(helperc_LOADV2) ( Addr aA )
njn25e49d8e72002-09-23 09:36:25 +00001418{
sewardjc1a2cda2005-04-21 17:34:00 +00001419 PROF_EVENT(240, "helperc_LOADV2");
1420
1421# if VG_DEBUG_MEMORY >= 2
1422 return (UWord)mc_LOADVn_slow( aA, 2, False/*littleendian*/ );
1423# else
1424
sewardj23eb2fd2005-04-22 16:29:19 +00001425 const UWord mask = ~((0x10000-2) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001426 UWord a = (UWord)aA;
1427
1428 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1429 naturally aligned, or 'a' exceeds the range covered by the
1430 primary map. Either way we defer to the slow-path case. */
1431 if (EXPECTED_NOT_TAKEN(a & mask)) {
1432 PROF_EVENT(241, "helperc_LOADV2-slow1");
1433 return (UWord)mc_LOADVn_slow( aA, 2, False/*littleendian*/ );
1434 }
1435
1436 UWord sec_no = (UWord)(a >> 16);
1437
1438# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001439 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001440# endif
1441
1442 SecMap* sm = primary_map[sec_no];
1443 UWord v_off = a & 0xFFFF;
1444 UWord a_off = v_off >> 3;
1445 UWord abits = (UWord)(sm->abits[a_off]);
sewardjc1a2cda2005-04-21 17:34:00 +00001446 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) {
1447 /* Handle common case quickly: a is mapped, and the entire
1448 word32 it lives in is addressible. */
sewardj5d28efc2005-04-21 22:16:29 +00001449 /* Set the upper 16/48 bits of the result to 1 ("undefined"),
1450 just in case. This almost certainly isn't necessary, but be
1451 paranoid. */
sewardjc1a2cda2005-04-21 17:34:00 +00001452 return (~(UWord)0xFFFF)
1453 |
1454 (UWord)( ((UShort*)(sm->vbyte))[ v_off >> 1 ] );
1455 } else {
1456 /* Slow but general case. */
1457 PROF_EVENT(242, "helperc_LOADV2-slow2");
1458 return (UWord)mc_LOADVn_slow( aA, 2, False/*littleendian*/ );
1459 }
1460
1461# endif
njn25e49d8e72002-09-23 09:36:25 +00001462}
1463
njn9fb73db2005-03-27 01:55:21 +00001464VGA_REGPARM(2)
sewardj5d28efc2005-04-21 22:16:29 +00001465void MC_(helperc_STOREV2) ( Addr aA, UWord vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001466{
sewardjc1a2cda2005-04-21 17:34:00 +00001467 PROF_EVENT(250, "helperc_STOREV2");
sewardj5d28efc2005-04-21 22:16:29 +00001468
1469# if VG_DEBUG_MEMORY >= 2
sewardj23eb2fd2005-04-22 16:29:19 +00001470 mc_STOREVn_slow( aA, 2, (ULong)vbytes, False/*littleendian*/ );
sewardj5d28efc2005-04-21 22:16:29 +00001471# else
1472
sewardj23eb2fd2005-04-22 16:29:19 +00001473 const UWord mask = ~((0x10000-2) | ((N_PRIMARY_MAP-1) << 16));
sewardj5d28efc2005-04-21 22:16:29 +00001474 UWord a = (UWord)aA;
1475
1476 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1477 naturally aligned, or 'a' exceeds the range covered by the
1478 primary map. Either way we defer to the slow-path case. */
1479 if (EXPECTED_NOT_TAKEN(a & mask)) {
1480 PROF_EVENT(251, "helperc_STOREV2-slow1");
1481 mc_STOREVn_slow( aA, 2, (ULong)vbytes, False/*littleendian*/ );
1482 return;
1483 }
1484
1485 UWord sec_no = (UWord)(a >> 16);
1486
1487# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001488 tl_assert(sec_no < N_PRIMARY_MAP);
sewardj5d28efc2005-04-21 22:16:29 +00001489# endif
1490
1491 SecMap* sm = primary_map[sec_no];
1492 UWord v_off = a & 0xFFFF;
1493 UWord a_off = v_off >> 3;
1494 UWord abits = (UWord)(sm->abits[a_off]);
1495 if (EXPECTED_TAKEN(!is_distinguished_sm(sm)
1496 && abits == VGM_BYTE_VALID)) {
1497 /* Handle common case quickly. */
1498 ((UShort*)(sm->vbyte))[ v_off >> 1 ] = (UShort)vbytes;
1499 } else {
1500 /* Slow but general case. */
1501 PROF_EVENT(252, "helperc_STOREV2-slow2");
1502 mc_STOREVn_slow( aA, 2, (ULong)vbytes, False/*littleendian*/ );
1503 }
1504# endif
njn25e49d8e72002-09-23 09:36:25 +00001505}
1506
sewardj95448072004-11-22 20:19:51 +00001507/* ------------------------ Size = 1 ------------------------ */
1508
njn9fb73db2005-03-27 01:55:21 +00001509VGA_REGPARM(1)
sewardjc1a2cda2005-04-21 17:34:00 +00001510UWord MC_(helperc_LOADV1) ( Addr aA )
njn25e49d8e72002-09-23 09:36:25 +00001511{
sewardjc1a2cda2005-04-21 17:34:00 +00001512 PROF_EVENT(260, "helperc_LOADV1");
1513
1514# if VG_DEBUG_MEMORY >= 2
sewardj23eb2fd2005-04-22 16:29:19 +00001515 return (UWord)mc_LOADVn_slow( aA, 1, False/*littleendian*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001516# else
1517
sewardj23eb2fd2005-04-22 16:29:19 +00001518 const UWord mask = ~((0x10000-1) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001519 UWord a = (UWord)aA;
1520
1521 /* If any part of 'a' indicated by the mask is 1, it means 'a'
1522 exceeds the range covered by the primary map. In which case we
1523 defer to the slow-path case. */
1524 if (EXPECTED_NOT_TAKEN(a & mask)) {
1525 PROF_EVENT(261, "helperc_LOADV1-slow1");
1526 return (UWord)mc_LOADVn_slow( aA, 1, False/*littleendian*/ );
1527 }
1528
1529 UWord sec_no = (UWord)(a >> 16);
1530
1531# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001532 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001533# endif
1534
1535 SecMap* sm = primary_map[sec_no];
1536 UWord v_off = a & 0xFFFF;
1537 UWord a_off = v_off >> 3;
sewardj5d28efc2005-04-21 22:16:29 +00001538 UWord abits = (UWord)(sm->abits[a_off]);
sewardjc1a2cda2005-04-21 17:34:00 +00001539 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) {
1540 /* Handle common case quickly: a is mapped, and the entire
1541 word32 it lives in is addressible. */
sewardj5d28efc2005-04-21 22:16:29 +00001542 /* Set the upper 24/56 bits of the result to 1 ("undefined"),
1543 just in case. This almost certainly isn't necessary, but be
1544 paranoid. */
sewardjc1a2cda2005-04-21 17:34:00 +00001545 return (~(UWord)0xFF)
1546 |
1547 (UWord)( ((UChar*)(sm->vbyte))[ v_off ] );
1548 } else {
1549 /* Slow but general case. */
1550 PROF_EVENT(262, "helperc_LOADV1-slow2");
1551 return (UWord)mc_LOADVn_slow( aA, 1, False/*littleendian*/ );
1552 }
1553# endif
njn25e49d8e72002-09-23 09:36:25 +00001554}
1555
sewardjc1a2cda2005-04-21 17:34:00 +00001556
njn9fb73db2005-03-27 01:55:21 +00001557VGA_REGPARM(2)
sewardjc1a2cda2005-04-21 17:34:00 +00001558void MC_(helperc_STOREV1) ( Addr aA, UWord vbyte )
njn25e49d8e72002-09-23 09:36:25 +00001559{
sewardjc1a2cda2005-04-21 17:34:00 +00001560 PROF_EVENT(270, "helperc_STOREV1");
1561
1562# if VG_DEBUG_MEMORY >= 2
1563 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*littleendian*/ );
1564# else
1565
sewardj23eb2fd2005-04-22 16:29:19 +00001566 const UWord mask = ~((0x10000-1) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001567 UWord a = (UWord)aA;
1568 /* If any part of 'a' indicated by the mask is 1, it means 'a'
1569 exceeds the range covered by the primary map. In which case we
1570 defer to the slow-path case. */
1571 if (EXPECTED_NOT_TAKEN(a & mask)) {
1572 PROF_EVENT(271, "helperc_STOREV1-slow1");
1573 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*littleendian*/ );
1574 return;
1575 }
1576
1577 UWord sec_no = (UWord)(a >> 16);
1578
1579# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001580 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001581# endif
1582
1583 SecMap* sm = primary_map[sec_no];
1584 UWord v_off = a & 0xFFFF;
1585 UWord a_off = v_off >> 3;
sewardj5d28efc2005-04-21 22:16:29 +00001586 UWord abits = (UWord)(sm->abits[a_off]);
sewardjc1a2cda2005-04-21 17:34:00 +00001587 if (EXPECTED_TAKEN(!is_distinguished_sm(sm)
1588 && abits == VGM_BYTE_VALID)) {
1589 /* Handle common case quickly: a is mapped, the entire word32 it
1590 lives in is addressible. */
1591 ((UChar*)(sm->vbyte))[ v_off ] = (UChar)vbyte;
1592 } else {
1593 PROF_EVENT(272, "helperc_STOREV1-slow2");
1594 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*littleendian*/ );
1595 }
1596
1597# endif
njn25e49d8e72002-09-23 09:36:25 +00001598}
1599
1600
sewardjc859fbf2005-04-22 21:10:28 +00001601/*------------------------------------------------------------*/
1602/*--- Functions called directly from generated code: ---*/
1603/*--- Value-check failure handlers. ---*/
1604/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001605
njn5c004e42002-11-18 11:04:50 +00001606void MC_(helperc_value_check0_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001607{
njnb8dca862005-03-14 02:42:44 +00001608 MC_(record_value_error) ( VG_(get_running_tid)(), 0 );
njn25e49d8e72002-09-23 09:36:25 +00001609}
1610
njn5c004e42002-11-18 11:04:50 +00001611void MC_(helperc_value_check1_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001612{
njnb8dca862005-03-14 02:42:44 +00001613 MC_(record_value_error) ( VG_(get_running_tid)(), 1 );
njn25e49d8e72002-09-23 09:36:25 +00001614}
1615
njn5c004e42002-11-18 11:04:50 +00001616void MC_(helperc_value_check4_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001617{
njnb8dca862005-03-14 02:42:44 +00001618 MC_(record_value_error) ( VG_(get_running_tid)(), 4 );
njn25e49d8e72002-09-23 09:36:25 +00001619}
1620
sewardj11bcc4e2005-04-23 22:38:38 +00001621void MC_(helperc_value_check8_fail) ( void )
1622{
1623 MC_(record_value_error) ( VG_(get_running_tid)(), 8 );
1624}
1625
njn9fb73db2005-03-27 01:55:21 +00001626VGA_REGPARM(1) void MC_(helperc_complain_undef) ( HWord sz )
sewardj95448072004-11-22 20:19:51 +00001627{
njnb8dca862005-03-14 02:42:44 +00001628 MC_(record_value_error) ( VG_(get_running_tid)(), (Int)sz );
sewardj95448072004-11-22 20:19:51 +00001629}
1630
njn25e49d8e72002-09-23 09:36:25 +00001631
sewardj45d94cc2005-04-20 14:44:11 +00001632//zz /*------------------------------------------------------------*/
1633//zz /*--- Metadata get/set functions, for client requests. ---*/
1634//zz /*------------------------------------------------------------*/
1635//zz
1636//zz /* Copy Vbits for src into vbits. Returns: 1 == OK, 2 == alignment
1637//zz error, 3 == addressing error. */
1638//zz static Int mc_get_or_set_vbits_for_client (
1639//zz ThreadId tid,
1640//zz Addr dataV,
1641//zz Addr vbitsV,
1642//zz SizeT size,
1643//zz Bool setting /* True <=> set vbits, False <=> get vbits */
1644//zz )
1645//zz {
1646//zz Bool addressibleD = True;
1647//zz Bool addressibleV = True;
1648//zz UInt* data = (UInt*)dataV;
1649//zz UInt* vbits = (UInt*)vbitsV;
1650//zz SizeT szW = size / 4; /* sigh */
1651//zz SizeT i;
1652//zz UInt* dataP = NULL; /* bogus init to keep gcc happy */
1653//zz UInt* vbitsP = NULL; /* ditto */
1654//zz
1655//zz /* Check alignment of args. */
1656//zz if (!(VG_IS_4_ALIGNED(data) && VG_IS_4_ALIGNED(vbits)))
1657//zz return 2;
1658//zz if ((size & 3) != 0)
1659//zz return 2;
1660//zz
1661//zz /* Check that arrays are addressible. */
1662//zz for (i = 0; i < szW; i++) {
1663//zz dataP = &data[i];
1664//zz vbitsP = &vbits[i];
1665//zz if (get_abits4_ALIGNED((Addr)dataP) != VGM_NIBBLE_VALID) {
1666//zz addressibleD = False;
1667//zz break;
1668//zz }
1669//zz if (get_abits4_ALIGNED((Addr)vbitsP) != VGM_NIBBLE_VALID) {
1670//zz addressibleV = False;
1671//zz break;
1672//zz }
1673//zz }
1674//zz if (!addressibleD) {
1675//zz MAC_(record_address_error)( tid, (Addr)dataP, 4,
1676//zz setting ? True : False );
1677//zz return 3;
1678//zz }
1679//zz if (!addressibleV) {
1680//zz MAC_(record_address_error)( tid, (Addr)vbitsP, 4,
1681//zz setting ? False : True );
1682//zz return 3;
1683//zz }
1684//zz
1685//zz /* Do the copy */
1686//zz if (setting) {
1687//zz /* setting */
1688//zz for (i = 0; i < szW; i++) {
1689//zz if (get_vbytes4_ALIGNED( (Addr)&vbits[i] ) != VGM_WORD_VALID)
1690//zz MC_(record_value_error)(tid, 4);
1691//zz set_vbytes4_ALIGNED( (Addr)&data[i], vbits[i] );
1692//zz }
1693//zz } else {
1694//zz /* getting */
1695//zz for (i = 0; i < szW; i++) {
1696//zz vbits[i] = get_vbytes4_ALIGNED( (Addr)&data[i] );
1697//zz set_vbytes4_ALIGNED( (Addr)&vbits[i], VGM_WORD_VALID );
1698//zz }
1699//zz }
1700//zz
1701//zz return 1;
1702//zz }
sewardj05fe85e2005-04-27 22:46:36 +00001703
1704
1705/*------------------------------------------------------------*/
1706/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1707/*------------------------------------------------------------*/
1708
1709/* For the memory leak detector, say whether an entire 64k chunk of
1710 address space is possibly in use, or not. If in doubt return
1711 True.
1712*/
1713static
1714Bool mc_is_within_valid_secondary ( Addr a )
1715{
1716 SecMap* sm = maybe_get_secmap_for ( a );
1717 if (sm == NULL || sm == &sm_distinguished[SM_DIST_NOACCESS]) {
1718 /* Definitely not in use. */
1719 return False;
1720 } else {
1721 return True;
1722 }
1723}
1724
1725
1726/* For the memory leak detector, say whether or not a given word
1727 address is to be regarded as valid. */
1728static
1729Bool mc_is_valid_aligned_word ( Addr a )
1730{
1731 tl_assert(sizeof(UWord) == 4 || sizeof(UWord) == 8);
1732 if (sizeof(UWord) == 4) {
1733 tl_assert(VG_IS_4_ALIGNED(a));
1734 } else {
1735 tl_assert(VG_IS_8_ALIGNED(a));
1736 }
1737 if (mc_check_readable( a, sizeof(UWord), NULL ) == MC_Ok) {
1738 return True;
1739 } else {
1740 return False;
1741 }
1742}
sewardja4495682002-10-21 07:29:59 +00001743
1744
nethercote996901a2004-08-03 13:29:09 +00001745/* Leak detector for this tool. We don't actually do anything, merely
sewardja4495682002-10-21 07:29:59 +00001746 run the generic leak detector with suitable parameters for this
nethercote996901a2004-08-03 13:29:09 +00001747 tool. */
njnb8dca862005-03-14 02:42:44 +00001748static void mc_detect_memory_leaks ( ThreadId tid, LeakCheckMode mode )
njn25e49d8e72002-09-23 09:36:25 +00001749{
sewardj05fe85e2005-04-27 22:46:36 +00001750 MAC_(do_detect_memory_leaks) (
1751 tid,
1752 mode,
1753 mc_is_within_valid_secondary,
1754 mc_is_valid_aligned_word
1755 );
njn25e49d8e72002-09-23 09:36:25 +00001756}
1757
1758
sewardjc859fbf2005-04-22 21:10:28 +00001759/*------------------------------------------------------------*/
1760/*--- Initialisation ---*/
1761/*------------------------------------------------------------*/
1762
1763static void init_shadow_memory ( void )
1764{
1765 Int i;
1766 SecMap* sm;
1767
1768 /* Build the 3 distinguished secondaries */
1769 tl_assert(VGM_BIT_INVALID == 1);
1770 tl_assert(VGM_BIT_VALID == 0);
1771 tl_assert(VGM_BYTE_INVALID == 0xFF);
1772 tl_assert(VGM_BYTE_VALID == 0);
1773
1774 /* Set A invalid, V invalid. */
1775 sm = &sm_distinguished[SM_DIST_NOACCESS];
1776 for (i = 0; i < 65536; i++)
1777 sm->vbyte[i] = VGM_BYTE_INVALID;
1778 for (i = 0; i < 8192; i++)
1779 sm->abits[i] = VGM_BYTE_INVALID;
1780
1781 /* Set A valid, V invalid. */
1782 sm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
1783 for (i = 0; i < 65536; i++)
1784 sm->vbyte[i] = VGM_BYTE_INVALID;
1785 for (i = 0; i < 8192; i++)
1786 sm->abits[i] = VGM_BYTE_VALID;
1787
1788 /* Set A valid, V valid. */
1789 sm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
1790 for (i = 0; i < 65536; i++)
1791 sm->vbyte[i] = VGM_BYTE_VALID;
1792 for (i = 0; i < 8192; i++)
1793 sm->abits[i] = VGM_BYTE_VALID;
1794
1795 /* Set up the primary map. */
1796 /* These entries gradually get overwritten as the used address
1797 space expands. */
1798 for (i = 0; i < N_PRIMARY_MAP; i++)
1799 primary_map[i] = &sm_distinguished[SM_DIST_NOACCESS];
1800
1801 /* auxmap_size = auxmap_used = 0;
1802 no ... these are statically initialised */
1803}
1804
1805
1806/*------------------------------------------------------------*/
1807/*--- Sanity check machinery (permanently engaged) ---*/
1808/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001809
njn26f02512004-11-22 18:33:15 +00001810Bool TL_(cheap_sanity_check) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001811{
jseward9800fd32004-01-04 23:08:04 +00001812 /* nothing useful we can rapidly check */
sewardj23eb2fd2005-04-22 16:29:19 +00001813 n_sanity_cheap++;
sewardjc1a2cda2005-04-21 17:34:00 +00001814 PROF_EVENT(490, "cheap_sanity_check");
jseward9800fd32004-01-04 23:08:04 +00001815 return True;
njn25e49d8e72002-09-23 09:36:25 +00001816}
1817
njn26f02512004-11-22 18:33:15 +00001818Bool TL_(expensive_sanity_check) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001819{
sewardj23eb2fd2005-04-22 16:29:19 +00001820 Int i, n_secmaps_found;
sewardj45d94cc2005-04-20 14:44:11 +00001821 SecMap* sm;
sewardj23eb2fd2005-04-22 16:29:19 +00001822 Bool bad = False;
njn25e49d8e72002-09-23 09:36:25 +00001823
sewardj23eb2fd2005-04-22 16:29:19 +00001824 n_sanity_expensive++;
sewardjc1a2cda2005-04-21 17:34:00 +00001825 PROF_EVENT(491, "expensive_sanity_check");
1826
sewardj23eb2fd2005-04-22 16:29:19 +00001827 /* Check that the 3 distinguished SMs are still as they should
1828 be. */
njn25e49d8e72002-09-23 09:36:25 +00001829
sewardj45d94cc2005-04-20 14:44:11 +00001830 /* Check A invalid, V invalid. */
1831 sm = &sm_distinguished[SM_DIST_NOACCESS];
njn25e49d8e72002-09-23 09:36:25 +00001832 for (i = 0; i < 65536; i++)
sewardj45d94cc2005-04-20 14:44:11 +00001833 if (!(sm->vbyte[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00001834 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00001835 for (i = 0; i < 8192; i++)
1836 if (!(sm->abits[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00001837 bad = True;
njn25e49d8e72002-09-23 09:36:25 +00001838
sewardj45d94cc2005-04-20 14:44:11 +00001839 /* Check A valid, V invalid. */
1840 sm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
1841 for (i = 0; i < 65536; i++)
1842 if (!(sm->vbyte[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00001843 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00001844 for (i = 0; i < 8192; i++)
1845 if (!(sm->abits[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00001846 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00001847
1848 /* Check A valid, V valid. */
1849 sm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
1850 for (i = 0; i < 65536; i++)
1851 if (!(sm->vbyte[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00001852 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00001853 for (i = 0; i < 8192; i++)
1854 if (!(sm->abits[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00001855 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00001856
sewardj23eb2fd2005-04-22 16:29:19 +00001857 if (bad) {
1858 VG_(printf)("memcheck expensive sanity: "
1859 "distinguished_secondaries have changed\n");
1860 return False;
1861 }
1862
1863 /* check nonsensical auxmap sizing */
sewardj45d94cc2005-04-20 14:44:11 +00001864 if (auxmap_used > auxmap_size)
sewardj23eb2fd2005-04-22 16:29:19 +00001865 bad = True;
1866
1867 if (bad) {
1868 VG_(printf)("memcheck expensive sanity: "
1869 "nonsensical auxmap sizing\n");
1870 return False;
1871 }
1872
1873 /* check that the number of secmaps issued matches the number that
1874 are reachable (iow, no secmap leaks) */
1875 n_secmaps_found = 0;
1876 for (i = 0; i < N_PRIMARY_MAP; i++) {
1877 if (primary_map[i] == NULL) {
1878 bad = True;
1879 } else {
1880 if (!is_distinguished_sm(primary_map[i]))
1881 n_secmaps_found++;
1882 }
1883 }
1884
1885 for (i = 0; i < auxmap_used; i++) {
1886 if (auxmap[i].sm == NULL) {
1887 bad = True;
1888 } else {
1889 if (!is_distinguished_sm(auxmap[i].sm))
1890 n_secmaps_found++;
1891 }
1892 }
1893
1894 if (n_secmaps_found != n_secmaps_issued)
1895 bad = True;
1896
1897 if (bad) {
1898 VG_(printf)("memcheck expensive sanity: "
1899 "apparent secmap leakage\n");
1900 return False;
1901 }
1902
1903 /* check that auxmap only covers address space that the primary
1904 doesn't */
1905
1906 for (i = 0; i < auxmap_used; i++)
1907 if (auxmap[i].base <= MAX_PRIMARY_ADDRESS)
1908 bad = True;
1909
1910 if (bad) {
1911 VG_(printf)("memcheck expensive sanity: "
1912 "auxmap covers wrong address space\n");
1913 return False;
1914 }
1915
1916 /* there is only one pointer to each secmap (expensive) */
njn25e49d8e72002-09-23 09:36:25 +00001917
1918 return True;
1919}
sewardj45d94cc2005-04-20 14:44:11 +00001920
njn25e49d8e72002-09-23 09:36:25 +00001921
njn25e49d8e72002-09-23 09:36:25 +00001922/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00001923/*--- Command line args ---*/
njn25e49d8e72002-09-23 09:36:25 +00001924/*------------------------------------------------------------*/
1925
njn43c799e2003-04-08 00:08:52 +00001926Bool MC_(clo_avoid_strlen_errors) = True;
njn43c799e2003-04-08 00:08:52 +00001927
njn26f02512004-11-22 18:33:15 +00001928Bool TL_(process_cmd_line_option)(Char* arg)
njn25e49d8e72002-09-23 09:36:25 +00001929{
njn45270a22005-03-27 01:00:11 +00001930 VG_BOOL_CLO(arg, "--avoid-strlen-errors", MC_(clo_avoid_strlen_errors))
njn25e49d8e72002-09-23 09:36:25 +00001931 else
njn43c799e2003-04-08 00:08:52 +00001932 return MAC_(process_common_cmd_line_option)(arg);
njn25e49d8e72002-09-23 09:36:25 +00001933
1934 return True;
njn25e49d8e72002-09-23 09:36:25 +00001935}
1936
njn26f02512004-11-22 18:33:15 +00001937void TL_(print_usage)(void)
njn25e49d8e72002-09-23 09:36:25 +00001938{
njn3e884182003-04-15 13:03:23 +00001939 MAC_(print_common_usage)();
1940 VG_(printf)(
1941" --avoid-strlen-errors=no|yes suppress errs from inlined strlen [yes]\n"
1942 );
1943}
1944
njn26f02512004-11-22 18:33:15 +00001945void TL_(print_debug_usage)(void)
njn3e884182003-04-15 13:03:23 +00001946{
1947 MAC_(print_common_debug_usage)();
1948 VG_(printf)(
sewardj8ec2cfc2002-10-13 00:57:26 +00001949" --cleanup=no|yes improve after instrumentation? [yes]\n"
njn3e884182003-04-15 13:03:23 +00001950 );
njn25e49d8e72002-09-23 09:36:25 +00001951}
1952
nethercote8b76fe52004-11-08 19:20:09 +00001953/*------------------------------------------------------------*/
1954/*--- Client requests ---*/
1955/*------------------------------------------------------------*/
1956
1957/* Client block management:
1958
1959 This is managed as an expanding array of client block descriptors.
1960 Indices of live descriptors are issued to the client, so it can ask
1961 to free them later. Therefore we cannot slide live entries down
1962 over dead ones. Instead we must use free/inuse flags and scan for
1963 an empty slot at allocation time. This in turn means allocation is
1964 relatively expensive, so we hope this does not happen too often.
nethercote8b76fe52004-11-08 19:20:09 +00001965
sewardjedc75ab2005-03-15 23:30:32 +00001966 An unused block has start == size == 0
1967*/
nethercote8b76fe52004-11-08 19:20:09 +00001968
1969typedef
1970 struct {
1971 Addr start;
1972 SizeT size;
1973 ExeContext* where;
sewardjedc75ab2005-03-15 23:30:32 +00001974 Char* desc;
nethercote8b76fe52004-11-08 19:20:09 +00001975 }
1976 CGenBlock;
1977
1978/* This subsystem is self-initialising. */
njn695c16e2005-03-27 03:40:28 +00001979static UInt cgb_size = 0;
1980static UInt cgb_used = 0;
1981static CGenBlock* cgbs = NULL;
nethercote8b76fe52004-11-08 19:20:09 +00001982
1983/* Stats for this subsystem. */
njn695c16e2005-03-27 03:40:28 +00001984static UInt cgb_used_MAX = 0; /* Max in use. */
1985static UInt cgb_allocs = 0; /* Number of allocs. */
1986static UInt cgb_discards = 0; /* Number of discards. */
1987static UInt cgb_search = 0; /* Number of searches. */
nethercote8b76fe52004-11-08 19:20:09 +00001988
1989
1990static
njn695c16e2005-03-27 03:40:28 +00001991Int alloc_client_block ( void )
nethercote8b76fe52004-11-08 19:20:09 +00001992{
1993 UInt i, sz_new;
1994 CGenBlock* cgbs_new;
1995
njn695c16e2005-03-27 03:40:28 +00001996 cgb_allocs++;
nethercote8b76fe52004-11-08 19:20:09 +00001997
njn695c16e2005-03-27 03:40:28 +00001998 for (i = 0; i < cgb_used; i++) {
1999 cgb_search++;
2000 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00002001 return i;
2002 }
2003
2004 /* Not found. Try to allocate one at the end. */
njn695c16e2005-03-27 03:40:28 +00002005 if (cgb_used < cgb_size) {
2006 cgb_used++;
2007 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00002008 }
2009
2010 /* Ok, we have to allocate a new one. */
njn695c16e2005-03-27 03:40:28 +00002011 tl_assert(cgb_used == cgb_size);
2012 sz_new = (cgbs == NULL) ? 10 : (2 * cgb_size);
nethercote8b76fe52004-11-08 19:20:09 +00002013
2014 cgbs_new = VG_(malloc)( sz_new * sizeof(CGenBlock) );
njn695c16e2005-03-27 03:40:28 +00002015 for (i = 0; i < cgb_used; i++)
2016 cgbs_new[i] = cgbs[i];
nethercote8b76fe52004-11-08 19:20:09 +00002017
njn695c16e2005-03-27 03:40:28 +00002018 if (cgbs != NULL)
2019 VG_(free)( cgbs );
2020 cgbs = cgbs_new;
nethercote8b76fe52004-11-08 19:20:09 +00002021
njn695c16e2005-03-27 03:40:28 +00002022 cgb_size = sz_new;
2023 cgb_used++;
2024 if (cgb_used > cgb_used_MAX)
2025 cgb_used_MAX = cgb_used;
2026 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00002027}
2028
2029
2030static void show_client_block_stats ( void )
2031{
2032 VG_(message)(Vg_DebugMsg,
2033 "general CBs: %d allocs, %d discards, %d maxinuse, %d search",
njn695c16e2005-03-27 03:40:28 +00002034 cgb_allocs, cgb_discards, cgb_used_MAX, cgb_search
nethercote8b76fe52004-11-08 19:20:09 +00002035 );
2036}
2037
2038static Bool find_addr(VgHashNode* sh_ch, void* ap)
2039{
2040 MAC_Chunk *m = (MAC_Chunk*)sh_ch;
2041 Addr a = *(Addr*)ap;
2042
2043 return VG_(addr_is_in_block)(a, m->data, m->size);
2044}
2045
2046static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai )
2047{
2048 UInt i;
2049 /* VG_(printf)("try to identify %d\n", a); */
2050
2051 /* Perhaps it's a general block ? */
njn695c16e2005-03-27 03:40:28 +00002052 for (i = 0; i < cgb_used; i++) {
2053 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00002054 continue;
njn695c16e2005-03-27 03:40:28 +00002055 if (VG_(addr_is_in_block)(a, cgbs[i].start, cgbs[i].size)) {
nethercote8b76fe52004-11-08 19:20:09 +00002056 MAC_Mempool **d, *mp;
2057
2058 /* OK - maybe it's a mempool, too? */
2059 mp = (MAC_Mempool*)VG_(HT_get_node)(MAC_(mempool_list),
njn695c16e2005-03-27 03:40:28 +00002060 (UWord)cgbs[i].start,
nethercote8b76fe52004-11-08 19:20:09 +00002061 (void*)&d);
2062 if(mp != NULL) {
2063 if(mp->chunks != NULL) {
2064 MAC_Chunk *mc;
2065
2066 mc = (MAC_Chunk*)VG_(HT_first_match)(mp->chunks, find_addr, &a);
2067 if(mc != NULL) {
2068 ai->akind = UserG;
2069 ai->blksize = mc->size;
2070 ai->rwoffset = (Int)(a) - (Int)mc->data;
2071 ai->lastchange = mc->where;
2072 return True;
2073 }
2074 }
2075 ai->akind = Mempool;
njn695c16e2005-03-27 03:40:28 +00002076 ai->blksize = cgbs[i].size;
2077 ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
2078 ai->lastchange = cgbs[i].where;
nethercote8b76fe52004-11-08 19:20:09 +00002079 return True;
2080 }
2081 ai->akind = UserG;
njn695c16e2005-03-27 03:40:28 +00002082 ai->blksize = cgbs[i].size;
2083 ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
2084 ai->lastchange = cgbs[i].where;
2085 ai->desc = cgbs[i].desc;
nethercote8b76fe52004-11-08 19:20:09 +00002086 return True;
2087 }
2088 }
2089 return False;
2090}
2091
njn26f02512004-11-22 18:33:15 +00002092Bool TL_(handle_client_request) ( ThreadId tid, UWord* arg, UWord* ret )
nethercote8b76fe52004-11-08 19:20:09 +00002093{
2094 Int i;
2095 Bool ok;
2096 Addr bad_addr;
2097
njnfc26ff92004-11-22 19:12:49 +00002098 if (!VG_IS_TOOL_USERREQ('M','C',arg[0])
nethercote8b76fe52004-11-08 19:20:09 +00002099 && VG_USERREQ__MALLOCLIKE_BLOCK != arg[0]
2100 && VG_USERREQ__FREELIKE_BLOCK != arg[0]
2101 && VG_USERREQ__CREATE_MEMPOOL != arg[0]
2102 && VG_USERREQ__DESTROY_MEMPOOL != arg[0]
2103 && VG_USERREQ__MEMPOOL_ALLOC != arg[0]
2104 && VG_USERREQ__MEMPOOL_FREE != arg[0])
2105 return False;
2106
2107 switch (arg[0]) {
2108 case VG_USERREQ__CHECK_WRITABLE: /* check writable */
2109 ok = mc_check_writable ( arg[1], arg[2], &bad_addr );
2110 if (!ok)
2111 MC_(record_user_error) ( tid, bad_addr, /*isWrite*/True,
2112 /*isUnaddr*/True );
2113 *ret = ok ? (UWord)NULL : bad_addr;
2114 break;
2115
2116 case VG_USERREQ__CHECK_READABLE: { /* check readable */
2117 MC_ReadResult res;
2118 res = mc_check_readable ( arg[1], arg[2], &bad_addr );
2119 if (MC_AddrErr == res)
2120 MC_(record_user_error) ( tid, bad_addr, /*isWrite*/False,
2121 /*isUnaddr*/True );
2122 else if (MC_ValueErr == res)
2123 MC_(record_user_error) ( tid, bad_addr, /*isWrite*/False,
2124 /*isUnaddr*/False );
2125 *ret = ( res==MC_Ok ? (UWord)NULL : bad_addr );
2126 break;
2127 }
2128
2129 case VG_USERREQ__DO_LEAK_CHECK:
njnb8dca862005-03-14 02:42:44 +00002130 mc_detect_memory_leaks(tid, arg[1] ? LC_Summary : LC_Full);
nethercote8b76fe52004-11-08 19:20:09 +00002131 *ret = 0; /* return value is meaningless */
2132 break;
2133
2134 case VG_USERREQ__MAKE_NOACCESS: /* make no access */
nethercote8b76fe52004-11-08 19:20:09 +00002135 mc_make_noaccess ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00002136 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00002137 break;
2138
2139 case VG_USERREQ__MAKE_WRITABLE: /* make writable */
nethercote8b76fe52004-11-08 19:20:09 +00002140 mc_make_writable ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00002141 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00002142 break;
2143
2144 case VG_USERREQ__MAKE_READABLE: /* make readable */
nethercote8b76fe52004-11-08 19:20:09 +00002145 mc_make_readable ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00002146 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00002147 break;
2148
sewardjedc75ab2005-03-15 23:30:32 +00002149 case VG_USERREQ__CREATE_BLOCK: /* describe a block */
2150 if (arg[1] != 0 && arg[2] != 0) {
njn695c16e2005-03-27 03:40:28 +00002151 i = alloc_client_block();
2152 /* VG_(printf)("allocated %d %p\n", i, cgbs); */
2153 cgbs[i].start = arg[1];
2154 cgbs[i].size = arg[2];
2155 cgbs[i].desc = VG_(strdup)((Char *)arg[3]);
2156 cgbs[i].where = VG_(record_ExeContext) ( tid );
sewardjedc75ab2005-03-15 23:30:32 +00002157
2158 *ret = i;
2159 } else
2160 *ret = -1;
2161 break;
2162
nethercote8b76fe52004-11-08 19:20:09 +00002163 case VG_USERREQ__DISCARD: /* discard */
njn695c16e2005-03-27 03:40:28 +00002164 if (cgbs == NULL
2165 || arg[2] >= cgb_used ||
2166 (cgbs[arg[2]].start == 0 && cgbs[arg[2]].size == 0)) {
sewardjedc75ab2005-03-15 23:30:32 +00002167 *ret = 1;
2168 } else {
njn695c16e2005-03-27 03:40:28 +00002169 tl_assert(arg[2] >= 0 && arg[2] < cgb_used);
2170 cgbs[arg[2]].start = cgbs[arg[2]].size = 0;
2171 VG_(free)(cgbs[arg[2]].desc);
2172 cgb_discards++;
sewardjedc75ab2005-03-15 23:30:32 +00002173 *ret = 0;
2174 }
nethercote8b76fe52004-11-08 19:20:09 +00002175 break;
2176
sewardj45d94cc2005-04-20 14:44:11 +00002177//zz case VG_USERREQ__GET_VBITS:
2178//zz /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
2179//zz error. */
2180//zz /* VG_(printf)("get_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
2181//zz *ret = mc_get_or_set_vbits_for_client
2182//zz ( tid, arg[1], arg[2], arg[3], False /* get them */ );
2183//zz break;
2184//zz
2185//zz case VG_USERREQ__SET_VBITS:
2186//zz /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
2187//zz error. */
2188//zz /* VG_(printf)("set_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
2189//zz *ret = mc_get_or_set_vbits_for_client
2190//zz ( tid, arg[1], arg[2], arg[3], True /* set them */ );
2191//zz break;
nethercote8b76fe52004-11-08 19:20:09 +00002192
2193 default:
2194 if (MAC_(handle_common_client_requests)(tid, arg, ret )) {
2195 return True;
2196 } else {
2197 VG_(message)(Vg_UserMsg,
2198 "Warning: unknown memcheck client request code %llx",
2199 (ULong)arg[0]);
2200 return False;
2201 }
2202 }
2203 return True;
2204}
njn25e49d8e72002-09-23 09:36:25 +00002205
2206/*------------------------------------------------------------*/
2207/*--- Setup ---*/
2208/*------------------------------------------------------------*/
2209
njn26f02512004-11-22 18:33:15 +00002210void TL_(pre_clo_init)(void)
njn25e49d8e72002-09-23 09:36:25 +00002211{
njn810086f2002-11-14 12:42:47 +00002212 VG_(details_name) ("Memcheck");
2213 VG_(details_version) (NULL);
nethercote262eedf2003-11-13 17:57:18 +00002214 VG_(details_description) ("a memory error detector");
njn810086f2002-11-14 12:42:47 +00002215 VG_(details_copyright_author)(
njn53612422005-03-12 16:22:54 +00002216 "Copyright (C) 2002-2005, and GNU GPL'd, by Julian Seward et al.");
nethercote421281e2003-11-20 16:20:55 +00002217 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj9ebf9fd2004-11-28 16:56:51 +00002218 VG_(details_avg_translation_sizeB) ( 370 );
njn25e49d8e72002-09-23 09:36:25 +00002219
njn8a97c6d2005-03-31 04:37:24 +00002220 VG_(basic_tool_funcs) (TL_(post_clo_init),
2221 TL_(instrument),
2222 TL_(fini));
2223
njn810086f2002-11-14 12:42:47 +00002224 VG_(needs_core_errors) ();
njn8a97c6d2005-03-31 04:37:24 +00002225 VG_(needs_tool_errors) (TL_(eq_Error),
2226 TL_(pp_Error),
2227 TL_(update_extra),
2228 TL_(recognised_suppression),
2229 TL_(read_extra_suppression_info),
2230 TL_(error_matches_suppression),
2231 TL_(get_error_name),
2232 TL_(print_extra_suppression_info));
njn810086f2002-11-14 12:42:47 +00002233 VG_(needs_libc_freeres) ();
njn8a97c6d2005-03-31 04:37:24 +00002234 VG_(needs_command_line_options)(TL_(process_cmd_line_option),
2235 TL_(print_usage),
2236 TL_(print_debug_usage));
2237 VG_(needs_client_requests) (TL_(handle_client_request));
2238 VG_(needs_sanity_checks) (TL_(cheap_sanity_check),
2239 TL_(expensive_sanity_check));
fitzhardinge98abfc72003-12-16 02:05:15 +00002240 VG_(needs_shadow_memory) ();
njn25e49d8e72002-09-23 09:36:25 +00002241
njn8a97c6d2005-03-31 04:37:24 +00002242 VG_(malloc_funcs) (TL_(malloc),
2243 TL_(__builtin_new),
2244 TL_(__builtin_vec_new),
2245 TL_(memalign),
2246 TL_(calloc),
2247 TL_(free),
2248 TL_(__builtin_delete),
2249 TL_(__builtin_vec_delete),
2250 TL_(realloc),
2251 MALLOC_REDZONE_SZB );
2252
njn3e884182003-04-15 13:03:23 +00002253 MAC_( new_mem_heap) = & mc_new_mem_heap;
nethercote8b76fe52004-11-08 19:20:09 +00002254 MAC_( ban_mem_heap) = & mc_make_noaccess;
njn3e884182003-04-15 13:03:23 +00002255 MAC_(copy_mem_heap) = & mc_copy_address_range_state;
nethercote8b76fe52004-11-08 19:20:09 +00002256 MAC_( die_mem_heap) = & mc_make_noaccess;
2257 MAC_(check_noaccess) = & mc_check_noaccess;
njn3e884182003-04-15 13:03:23 +00002258
fitzhardinge98abfc72003-12-16 02:05:15 +00002259 VG_(init_new_mem_startup) ( & mc_new_mem_startup );
nethercote8b76fe52004-11-08 19:20:09 +00002260 VG_(init_new_mem_stack_signal) ( & mc_make_writable );
2261 VG_(init_new_mem_brk) ( & mc_make_writable );
njnb8dca862005-03-14 02:42:44 +00002262 VG_(init_new_mem_mmap) ( & mc_new_mem_mmap );
njn25e49d8e72002-09-23 09:36:25 +00002263
fitzhardinge98abfc72003-12-16 02:05:15 +00002264 VG_(init_copy_mem_remap) ( & mc_copy_address_range_state );
njn3e884182003-04-15 13:03:23 +00002265
nethercote8b76fe52004-11-08 19:20:09 +00002266 VG_(init_die_mem_stack_signal) ( & mc_make_noaccess );
2267 VG_(init_die_mem_brk) ( & mc_make_noaccess );
2268 VG_(init_die_mem_munmap) ( & mc_make_noaccess );
njn3e884182003-04-15 13:03:23 +00002269
fitzhardinge98abfc72003-12-16 02:05:15 +00002270 VG_(init_new_mem_stack_4) ( & MAC_(new_mem_stack_4) );
2271 VG_(init_new_mem_stack_8) ( & MAC_(new_mem_stack_8) );
2272 VG_(init_new_mem_stack_12) ( & MAC_(new_mem_stack_12) );
2273 VG_(init_new_mem_stack_16) ( & MAC_(new_mem_stack_16) );
2274 VG_(init_new_mem_stack_32) ( & MAC_(new_mem_stack_32) );
2275 VG_(init_new_mem_stack) ( & MAC_(new_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00002276
fitzhardinge98abfc72003-12-16 02:05:15 +00002277 VG_(init_die_mem_stack_4) ( & MAC_(die_mem_stack_4) );
2278 VG_(init_die_mem_stack_8) ( & MAC_(die_mem_stack_8) );
2279 VG_(init_die_mem_stack_12) ( & MAC_(die_mem_stack_12) );
2280 VG_(init_die_mem_stack_16) ( & MAC_(die_mem_stack_16) );
2281 VG_(init_die_mem_stack_32) ( & MAC_(die_mem_stack_32) );
2282 VG_(init_die_mem_stack) ( & MAC_(die_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00002283
nethercote8b76fe52004-11-08 19:20:09 +00002284 VG_(init_ban_mem_stack) ( & mc_make_noaccess );
njn25e49d8e72002-09-23 09:36:25 +00002285
fitzhardinge98abfc72003-12-16 02:05:15 +00002286 VG_(init_pre_mem_read) ( & mc_check_is_readable );
2287 VG_(init_pre_mem_read_asciiz) ( & mc_check_is_readable_asciiz );
2288 VG_(init_pre_mem_write) ( & mc_check_is_writable );
njncf45fd42004-11-24 16:30:22 +00002289 VG_(init_post_mem_write) ( & mc_post_mem_write );
nethercote8b76fe52004-11-08 19:20:09 +00002290
2291 VG_(init_pre_reg_read) ( & mc_pre_reg_read );
njn25e49d8e72002-09-23 09:36:25 +00002292
njncf45fd42004-11-24 16:30:22 +00002293 VG_(init_post_reg_write) ( & mc_post_reg_write );
fitzhardinge98abfc72003-12-16 02:05:15 +00002294 VG_(init_post_reg_write_clientcall_return) ( & mc_post_reg_write_clientcall );
njnd3040452003-05-19 15:04:06 +00002295
njn31066fd2005-03-26 00:42:02 +00002296 VG_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
2297 VG_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
2298 VG_(register_profile_event) ( VgpESPAdj, "adjust-ESP" );
njnd04b7c62002-10-03 14:05:52 +00002299
njn43c799e2003-04-08 00:08:52 +00002300 /* Additional block description for VG_(describe_addr)() */
nethercote8b76fe52004-11-08 19:20:09 +00002301 MAC_(describe_addr_supp) = client_perm_maybe_describe;
njn43c799e2003-04-08 00:08:52 +00002302
njnd04b7c62002-10-03 14:05:52 +00002303 init_shadow_memory();
njn3e884182003-04-15 13:03:23 +00002304 MAC_(common_pre_clo_init)();
sewardjc1a2cda2005-04-21 17:34:00 +00002305
2306 tl_assert( TL_(expensive_sanity_check)() );
njn5c004e42002-11-18 11:04:50 +00002307}
2308
njn26f02512004-11-22 18:33:15 +00002309void TL_(post_clo_init) ( void )
njn5c004e42002-11-18 11:04:50 +00002310{
2311}
2312
njn26f02512004-11-22 18:33:15 +00002313void TL_(fini) ( Int exitcode )
njn5c004e42002-11-18 11:04:50 +00002314{
nethercote8b76fe52004-11-08 19:20:09 +00002315 MAC_(common_fini)( mc_detect_memory_leaks );
sewardj45d94cc2005-04-20 14:44:11 +00002316
sewardj23eb2fd2005-04-22 16:29:19 +00002317 Int i, n_accessible_dist;
2318 SecMap* sm;
2319
sewardj45d94cc2005-04-20 14:44:11 +00002320 if (VG_(clo_verbosity) > 1) {
2321 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00002322 " memcheck: sanity checks: %d cheap, %d expensive",
2323 n_sanity_cheap, n_sanity_expensive );
sewardj45d94cc2005-04-20 14:44:11 +00002324 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00002325 " memcheck: auxmaps: %d auxmap entries (%dk, %dM) in use",
2326 auxmap_used,
2327 auxmap_used * 64,
2328 auxmap_used / 16 );
2329 VG_(message)(Vg_DebugMsg,
2330 " memcheck: auxmaps: %lld searches, %lld comparisons",
sewardj45d94cc2005-04-20 14:44:11 +00002331 n_auxmap_searches, n_auxmap_cmps );
sewardj23eb2fd2005-04-22 16:29:19 +00002332 VG_(message)(Vg_DebugMsg,
2333 " memcheck: secondaries: %d issued (%dk, %dM)",
2334 n_secmaps_issued,
2335 n_secmaps_issued * 64,
2336 n_secmaps_issued / 16 );
2337
2338 n_accessible_dist = 0;
2339 for (i = 0; i < N_PRIMARY_MAP; i++) {
2340 sm = primary_map[i];
2341 if (is_distinguished_sm(sm)
2342 && sm != &sm_distinguished[SM_DIST_NOACCESS])
2343 n_accessible_dist ++;
2344 }
2345 for (i = 0; i < auxmap_used; i++) {
2346 sm = auxmap[i].sm;
2347 if (is_distinguished_sm(sm)
2348 && sm != &sm_distinguished[SM_DIST_NOACCESS])
2349 n_accessible_dist ++;
2350 }
2351
2352 VG_(message)(Vg_DebugMsg,
2353 " memcheck: secondaries: %d accessible and distinguished (%dk, %dM)",
2354 n_accessible_dist,
2355 n_accessible_dist * 64,
2356 n_accessible_dist / 16 );
2357
sewardj45d94cc2005-04-20 14:44:11 +00002358 }
2359
njn5c004e42002-11-18 11:04:50 +00002360 if (0) {
2361 VG_(message)(Vg_DebugMsg,
2362 "------ Valgrind's client block stats follow ---------------" );
nethercote8b76fe52004-11-08 19:20:09 +00002363 show_client_block_stats();
njn5c004e42002-11-18 11:04:50 +00002364 }
njn25e49d8e72002-09-23 09:36:25 +00002365}
2366
njn26f02512004-11-22 18:33:15 +00002367VG_DETERMINE_INTERFACE_VERSION(TL_(pre_clo_init), 9./8)
fitzhardinge98abfc72003-12-16 02:05:15 +00002368
njn25e49d8e72002-09-23 09:36:25 +00002369/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00002370/*--- end mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00002371/*--------------------------------------------------------------------*/