blob: ea8edef5e54150ec2a4aba8cc08eb30f263a0c85 [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
nethercote137bc552003-11-14 17:47:54 +00009 This file is part of MemCheck, a heavyweight Valgrind tool for
njnc9539842002-10-02 13:26:35 +000010 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
njn53612422005-03-12 16:22:54 +000012 Copyright (C) 2000-2005 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
sewardjc859fbf2005-04-22 21:10:28 +000033/* TODO 22 Apr 05
sewardj45d94cc2005-04-20 14:44:11 +000034
sewardjc859fbf2005-04-22 21:10:28 +000035 test whether it would be faster, for LOADV4, to check
36 only for 8-byte validity on the fast path
sewardj45d94cc2005-04-20 14:44:11 +000037*/
38
njn25cac76cb2002-09-23 11:21:57 +000039#include "mc_include.h"
40#include "memcheck.h" /* for client requests */
njn4802b382005-06-11 04:58:29 +000041#include "pub_tool_aspacemgr.h"
njn97405b22005-06-02 03:39:33 +000042#include "pub_tool_libcbase.h"
njn132bfcc2005-06-04 19:16:06 +000043#include "pub_tool_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000044#include "pub_tool_libcprint.h"
njnf536bbb2005-06-13 04:21:38 +000045#include "pub_tool_machine.h"
njn25e49d8e72002-09-23 09:36:25 +000046
sewardj45d94cc2005-04-20 14:44:11 +000047
sewardjc1a2cda2005-04-21 17:34:00 +000048#define EXPECTED_TAKEN(cond) __builtin_expect((cond),1)
49#define EXPECTED_NOT_TAKEN(cond) __builtin_expect((cond),0)
50
51/* Define to debug the mem audit system. Set to:
52 0 no debugging, fast cases are used
53 1 some sanity checking, fast cases are used
54 2 max sanity checking, only slow cases are used
55*/
sewardj23eb2fd2005-04-22 16:29:19 +000056#define VG_DEBUG_MEMORY 0
sewardjc1a2cda2005-04-21 17:34:00 +000057
njn25e49d8e72002-09-23 09:36:25 +000058#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
59
njn25e49d8e72002-09-23 09:36:25 +000060
njn25e49d8e72002-09-23 09:36:25 +000061/*------------------------------------------------------------*/
sewardj45d94cc2005-04-20 14:44:11 +000062/*--- Basic A/V bitmap representation. ---*/
njn25e49d8e72002-09-23 09:36:25 +000063/*------------------------------------------------------------*/
64
sewardjc859fbf2005-04-22 21:10:28 +000065/* TODO: fix this comment */
66//zz /* All reads and writes are checked against a memory map, which
67//zz records the state of all memory in the process. The memory map is
68//zz organised like this:
69//zz
70//zz The top 16 bits of an address are used to index into a top-level
71//zz map table, containing 65536 entries. Each entry is a pointer to a
72//zz second-level map, which records the accesibililty and validity
73//zz permissions for the 65536 bytes indexed by the lower 16 bits of the
74//zz address. Each byte is represented by nine bits, one indicating
75//zz accessibility, the other eight validity. So each second-level map
76//zz contains 73728 bytes. This two-level arrangement conveniently
77//zz divides the 4G address space into 64k lumps, each size 64k bytes.
78//zz
79//zz All entries in the primary (top-level) map must point to a valid
80//zz secondary (second-level) map. Since most of the 4G of address
81//zz space will not be in use -- ie, not mapped at all -- there is a
njn02bc4b82005-05-15 17:28:26 +000082//zz distinguished secondary map, which indicates 'not addressible and
sewardjc859fbf2005-04-22 21:10:28 +000083//zz not valid' writeable for all bytes. Entries in the primary map for
84//zz which the entire 64k is not in use at all point at this
85//zz distinguished map.
86//zz
87//zz There are actually 4 distinguished secondaries. These are used to
88//zz represent a memory range which is either not addressable (validity
89//zz doesn't matter), addressable+not valid, addressable+valid.
90//zz
91//zz [...] lots of stuff deleted due to out of date-ness
92//zz
93//zz As a final optimisation, the alignment and address checks for
94//zz 4-byte loads and stores are combined in a neat way. The primary
95//zz map is extended to have 262144 entries (2^18), rather than 2^16.
96//zz The top 3/4 of these entries are permanently set to the
97//zz distinguished secondary map. For a 4-byte load/store, the
98//zz top-level map is indexed not with (addr >> 16) but instead f(addr),
99//zz where
100//zz
101//zz f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
102//zz = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
103//zz = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
104//zz
105//zz ie the lowest two bits are placed above the 16 high address bits.
106//zz If either of these two bits are nonzero, the address is misaligned;
107//zz this will select a secondary map from the upper 3/4 of the primary
108//zz map. Because this is always the distinguished secondary map, a
109//zz (bogus) address check failure will result. The failure handling
110//zz code can then figure out whether this is a genuine addr check
111//zz failure or whether it is a possibly-legitimate access at a
112//zz misaligned address.
113//zz */
114
sewardj45d94cc2005-04-20 14:44:11 +0000115/* --------------- Basic configuration --------------- */
sewardj95448072004-11-22 20:19:51 +0000116
sewardj23eb2fd2005-04-22 16:29:19 +0000117/* Only change this. N_PRIMARY_MAP *must* be a power of 2. */
sewardj21f7ff42005-04-28 10:32:02 +0000118
sewardje4ccc012005-05-02 12:53:38 +0000119#if VG_WORDSIZE == 4
sewardj21f7ff42005-04-28 10:32:02 +0000120
121/* cover the entire address space */
122# define N_PRIMARY_BITS 16
123
124#else
125
126/* Just handle the first 16G fast and the rest via auxiliary
127 primaries. */
128# define N_PRIMARY_BITS 18
129
130#endif
131
sewardj45d94cc2005-04-20 14:44:11 +0000132
sewardjc1a2cda2005-04-21 17:34:00 +0000133/* Do not change this. */
sewardje4ccc012005-05-02 12:53:38 +0000134#define N_PRIMARY_MAP ( ((UWord)1) << N_PRIMARY_BITS)
sewardjc1a2cda2005-04-21 17:34:00 +0000135
136/* Do not change this. */
sewardj23eb2fd2005-04-22 16:29:19 +0000137#define MAX_PRIMARY_ADDRESS (Addr)((((Addr)65536) * N_PRIMARY_MAP)-1)
138
139
140/* --------------- Stats maps --------------- */
141
142static Int n_secmaps_issued = 0;
143static ULong n_auxmap_searches = 0;
144static ULong n_auxmap_cmps = 0;
145static Int n_sanity_cheap = 0;
146static Int n_sanity_expensive = 0;
sewardj45d94cc2005-04-20 14:44:11 +0000147
148
149/* --------------- Secondary maps --------------- */
njn25e49d8e72002-09-23 09:36:25 +0000150
151typedef
152 struct {
sewardj45d94cc2005-04-20 14:44:11 +0000153 UChar abits[8192];
154 UChar vbyte[65536];
njn25e49d8e72002-09-23 09:36:25 +0000155 }
156 SecMap;
157
sewardj45d94cc2005-04-20 14:44:11 +0000158/* 3 distinguished secondary maps, one for no-access, one for
159 accessible but undefined, and one for accessible and defined.
160 Distinguished secondaries may never be modified.
161*/
162#define SM_DIST_NOACCESS 0
163#define SM_DIST_ACCESS_UNDEFINED 1
164#define SM_DIST_ACCESS_DEFINED 2
njnb8dca862005-03-14 02:42:44 +0000165
sewardj45d94cc2005-04-20 14:44:11 +0000166static SecMap sm_distinguished[3];
njnb8dca862005-03-14 02:42:44 +0000167
sewardj45d94cc2005-04-20 14:44:11 +0000168static inline Bool is_distinguished_sm ( SecMap* sm ) {
169 return sm >= &sm_distinguished[0] && sm <= &sm_distinguished[2];
170}
njnb8dca862005-03-14 02:42:44 +0000171
sewardj45d94cc2005-04-20 14:44:11 +0000172/* dist_sm points to one of our three distinguished secondaries. Make
173 a copy of it so that we can write to it.
174*/
175static SecMap* copy_for_writing ( SecMap* dist_sm )
176{
177 SecMap* new_sm;
178 tl_assert(dist_sm == &sm_distinguished[0]
179 || dist_sm == &sm_distinguished[1]
180 || dist_sm == &sm_distinguished[2]);
njnb8dca862005-03-14 02:42:44 +0000181
sewardj45d94cc2005-04-20 14:44:11 +0000182 new_sm = VG_(shadow_alloc)(sizeof(SecMap));
183 VG_(memcpy)(new_sm, dist_sm, sizeof(SecMap));
sewardj23eb2fd2005-04-22 16:29:19 +0000184 n_secmaps_issued++;
sewardj45d94cc2005-04-20 14:44:11 +0000185 return new_sm;
186}
njnb8dca862005-03-14 02:42:44 +0000187
sewardj45d94cc2005-04-20 14:44:11 +0000188
189/* --------------- Primary maps --------------- */
190
191/* The main primary map. This covers some initial part of the address
sewardj23eb2fd2005-04-22 16:29:19 +0000192 space, addresses 0 .. (N_PRIMARY_MAP << 16)-1. The rest of it is
sewardj45d94cc2005-04-20 14:44:11 +0000193 handled using the auxiliary primary map.
194*/
sewardj23eb2fd2005-04-22 16:29:19 +0000195static SecMap* primary_map[N_PRIMARY_MAP];
sewardj45d94cc2005-04-20 14:44:11 +0000196
197
198/* An entry in the auxiliary primary map. base must be a 64k-aligned
199 value, and sm points at the relevant secondary map. As with the
200 main primary map, the secondary may be either a real secondary, or
201 one of the three distinguished secondaries.
202*/
203typedef
204 struct {
sewardj23eb2fd2005-04-22 16:29:19 +0000205 Addr base;
sewardj45d94cc2005-04-20 14:44:11 +0000206 SecMap* sm;
207 }
208 AuxMapEnt;
209
210/* An expanding array of AuxMapEnts. */
sewardjaba741d2005-06-09 13:56:07 +0000211#define N_AUXMAPS 20000 /* HACK */
sewardj45d94cc2005-04-20 14:44:11 +0000212static AuxMapEnt hacky_auxmaps[N_AUXMAPS];
213static Int auxmap_size = N_AUXMAPS;
214static Int auxmap_used = 0;
215static AuxMapEnt* auxmap = &hacky_auxmaps[0];
216
sewardj45d94cc2005-04-20 14:44:11 +0000217
218/* Find an entry in the auxiliary map. If an entry is found, move it
219 one step closer to the front of the array, then return its address.
sewardj05fe85e2005-04-27 22:46:36 +0000220 If an entry is not found, return NULL. Note carefully that
sewardj45d94cc2005-04-20 14:44:11 +0000221 because a each call potentially rearranges the entries, each call
222 to this function invalidates ALL AuxMapEnt*s previously obtained by
223 calling this fn.
224*/
sewardj05fe85e2005-04-27 22:46:36 +0000225static AuxMapEnt* maybe_find_in_auxmap ( Addr a )
sewardj45d94cc2005-04-20 14:44:11 +0000226{
227 UWord i;
228 tl_assert(a > MAX_PRIMARY_ADDRESS);
229
230 a &= ~(Addr)0xFFFF;
231
232 /* Search .. */
233 n_auxmap_searches++;
234 for (i = 0; i < auxmap_used; i++) {
235 if (auxmap[i].base == a)
236 break;
237 }
238 n_auxmap_cmps += (ULong)(i+1);
239
240 if (i < auxmap_used) {
241 /* Found it. Nudge it a bit closer to the front. */
242 if (i > 0) {
243 AuxMapEnt tmp = auxmap[i-1];
244 auxmap[i-1] = auxmap[i];
245 auxmap[i] = tmp;
246 i--;
247 }
248 return &auxmap[i];
249 }
250
sewardj05fe85e2005-04-27 22:46:36 +0000251 return NULL;
252}
253
254
255/* Find an entry in the auxiliary map. If an entry is found, move it
256 one step closer to the front of the array, then return its address.
257 If an entry is not found, allocate one. Note carefully that
258 because a each call potentially rearranges the entries, each call
259 to this function invalidates ALL AuxMapEnt*s previously obtained by
260 calling this fn.
261*/
262static AuxMapEnt* find_or_alloc_in_auxmap ( Addr a )
263{
264 AuxMapEnt* am = maybe_find_in_auxmap(a);
265 if (am)
266 return am;
267
sewardj45d94cc2005-04-20 14:44:11 +0000268 /* We didn't find it. Hmm. This is a new piece of address space.
269 We'll need to allocate a new AuxMap entry for it. */
270 if (auxmap_used >= auxmap_size) {
271 tl_assert(auxmap_used == auxmap_size);
272 /* Out of auxmap entries. */
273 tl_assert2(0, "failed to expand the auxmap table");
274 }
275
276 tl_assert(auxmap_used < auxmap_size);
277
278 auxmap[auxmap_used].base = a & ~(Addr)0xFFFF;
279 auxmap[auxmap_used].sm = &sm_distinguished[SM_DIST_NOACCESS];
280
281 if (0)
282 VG_(printf)("new auxmap, base = 0x%llx\n",
283 (ULong)auxmap[auxmap_used].base );
284
285 auxmap_used++;
286 return &auxmap[auxmap_used-1];
287}
288
289
290/* --------------- SecMap fundamentals --------------- */
291
292/* Produce the secmap for 'a', either from the primary map or by
293 ensuring there is an entry for it in the aux primary map. The
294 secmap may be a distinguished one as the caller will only want to
295 be able to read it.
296*/
297static SecMap* get_secmap_readable ( Addr a )
298{
299 if (a <= MAX_PRIMARY_ADDRESS) {
300 UWord pm_off = a >> 16;
301 return primary_map[ pm_off ];
302 } else {
303 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
304 return am->sm;
305 }
306}
307
sewardj05fe85e2005-04-27 22:46:36 +0000308/* If 'a' has a SecMap, produce it. Else produce NULL. But don't
309 allocate one if one doesn't already exist. This is used by the
310 leak checker.
311*/
312static SecMap* maybe_get_secmap_for ( Addr a )
313{
314 if (a <= MAX_PRIMARY_ADDRESS) {
315 UWord pm_off = a >> 16;
316 return primary_map[ pm_off ];
317 } else {
318 AuxMapEnt* am = maybe_find_in_auxmap(a);
319 return am ? am->sm : NULL;
320 }
321}
322
323
324
sewardj45d94cc2005-04-20 14:44:11 +0000325/* Produce the secmap for 'a', either from the primary map or by
326 ensuring there is an entry for it in the aux primary map. The
327 secmap may not be a distinguished one, since the caller will want
328 to be able to write it. If it is a distinguished secondary, make a
329 writable copy of it, install it, and return the copy instead. (COW
330 semantics).
331*/
332static SecMap* get_secmap_writable ( Addr a )
333{
334 if (a <= MAX_PRIMARY_ADDRESS) {
335 UWord pm_off = a >> 16;
336 if (is_distinguished_sm(primary_map[ pm_off ]))
337 primary_map[pm_off] = copy_for_writing(primary_map[pm_off]);
338 return primary_map[pm_off];
339 } else {
340 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
341 if (is_distinguished_sm(am->sm))
342 am->sm = copy_for_writing(am->sm);
343 return am->sm;
344 }
345}
346
347
348/* --------------- Endianness helpers --------------- */
349
350/* Returns the offset in memory of the byteno-th most significant byte
351 in a wordszB-sized word, given the specified endianness. */
352static inline UWord byte_offset_w ( UWord wordszB, Bool bigendian,
353 UWord byteno ) {
354 return bigendian ? (wordszB-1-byteno) : byteno;
355}
356
357
358/* --------------- Fundamental functions --------------- */
359
360static
361void get_abit_and_vbyte ( /*OUT*/UWord* abit,
362 /*OUT*/UWord* vbyte,
363 Addr a )
364{
365 SecMap* sm = get_secmap_readable(a);
366 *vbyte = 0xFF & sm->vbyte[a & 0xFFFF];
367 *abit = read_bit_array(sm->abits, a & 0xFFFF);
368}
369
370static
371UWord get_abit ( Addr a )
372{
373 SecMap* sm = get_secmap_readable(a);
374 return read_bit_array(sm->abits, a & 0xFFFF);
375}
376
377static
378void set_abit_and_vbyte ( Addr a, UWord abit, UWord vbyte )
379{
380 SecMap* sm = get_secmap_writable(a);
381 sm->vbyte[a & 0xFFFF] = 0xFF & vbyte;
382 write_bit_array(sm->abits, a & 0xFFFF, abit);
383}
384
385static
386void set_vbyte ( Addr a, UWord vbyte )
387{
388 SecMap* sm = get_secmap_writable(a);
389 sm->vbyte[a & 0xFFFF] = 0xFF & vbyte;
390}
391
392
393/* --------------- Load/store slow cases. --------------- */
394
395static
396ULong mc_LOADVn_slow ( Addr a, SizeT szB, Bool bigendian )
397{
398 /* Make up a result V word, which contains the loaded data for
sewardjf3d57dd2005-04-22 20:23:27 +0000399 valid addresses and Defined for invalid addresses. Iterate over
400 the bytes in the word, from the most significant down to the
401 least. */
sewardj45d94cc2005-04-20 14:44:11 +0000402 ULong vw = VGM_WORD64_INVALID;
403 SizeT i = szB-1;
404 SizeT n_addrs_bad = 0;
405 Addr ai;
406 Bool aok;
407 UWord abit, vbyte;
408
sewardjc1a2cda2005-04-21 17:34:00 +0000409 PROF_EVENT(30, "mc_LOADVn_slow");
sewardj45d94cc2005-04-20 14:44:11 +0000410 tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
411
412 while (True) {
sewardjc1a2cda2005-04-21 17:34:00 +0000413 PROF_EVENT(31, "mc_LOADVn_slow(loop)");
sewardj45d94cc2005-04-20 14:44:11 +0000414 ai = a+byte_offset_w(szB,bigendian,i);
415 get_abit_and_vbyte(&abit, &vbyte, ai);
416 aok = abit == VGM_BIT_VALID;
417 if (!aok)
418 n_addrs_bad++;
419 vw <<= 8;
sewardjf3d57dd2005-04-22 20:23:27 +0000420 vw |= 0xFF & (aok ? vbyte : VGM_BYTE_VALID);
sewardj45d94cc2005-04-20 14:44:11 +0000421 if (i == 0) break;
422 i--;
423 }
424
425 if (n_addrs_bad > 0)
426 MAC_(record_address_error)( VG_(get_running_tid)(), a, szB, False );
427
sewardj45d94cc2005-04-20 14:44:11 +0000428 return vw;
429}
430
431
432static
433void mc_STOREVn_slow ( Addr a, SizeT szB, UWord vbytes, Bool bigendian )
434{
435 SizeT i;
436 SizeT n_addrs_bad = 0;
437 UWord abit;
438 Bool aok;
439 Addr ai;
440
sewardjc1a2cda2005-04-21 17:34:00 +0000441 PROF_EVENT(35, "mc_STOREVn_slow");
sewardj45d94cc2005-04-20 14:44:11 +0000442 tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
443
444 /* Dump vbytes in memory, iterating from least to most significant
445 byte. At the same time establish addressibility of the
446 location. */
447 for (i = 0; i < szB; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +0000448 PROF_EVENT(36, "mc_STOREVn_slow(loop)");
sewardj45d94cc2005-04-20 14:44:11 +0000449 ai = a+byte_offset_w(szB,bigendian,i);
450 abit = get_abit(ai);
451 aok = abit == VGM_BIT_VALID;
452 if (!aok)
453 n_addrs_bad++;
454 set_vbyte(ai, vbytes & 0xFF );
455 vbytes >>= 8;
456 }
457
458 /* If an address error has happened, report it. */
459 if (n_addrs_bad > 0)
460 MAC_(record_address_error)( VG_(get_running_tid)(), a, szB, True );
461}
462
463
sewardj45d94cc2005-04-20 14:44:11 +0000464//zz /* Reading/writing of the bitmaps, for aligned word-sized accesses. */
465//zz
466//zz static __inline__ UChar get_abits4_ALIGNED ( Addr a )
467//zz {
468//zz SecMap* sm;
469//zz UInt sm_off;
470//zz UChar abits8;
471//zz PROF_EVENT(24);
472//zz # ifdef VG_DEBUG_MEMORY
473//zz tl_assert(VG_IS_4_ALIGNED(a));
474//zz # endif
475//zz sm = primary_map[PM_IDX(a)];
476//zz sm_off = SM_OFF(a);
477//zz abits8 = sm->abits[sm_off >> 3];
478//zz abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
479//zz abits8 &= 0x0F;
480//zz return abits8;
481//zz }
482//zz
483//zz static UInt __inline__ get_vbytes4_ALIGNED ( Addr a )
484//zz {
485//zz SecMap* sm = primary_map[PM_IDX(a)];
486//zz UInt sm_off = SM_OFF(a);
487//zz PROF_EVENT(25);
488//zz # ifdef VG_DEBUG_MEMORY
489//zz tl_assert(VG_IS_4_ALIGNED(a));
490//zz # endif
491//zz return ((UInt*)(sm->vbyte))[sm_off >> 2];
492//zz }
493//zz
494//zz
495//zz static void __inline__ set_vbytes4_ALIGNED ( Addr a, UInt vbytes )
496//zz {
497//zz SecMap* sm;
498//zz UInt sm_off;
499//zz ENSURE_MAPPABLE(a, "set_vbytes4_ALIGNED");
500//zz sm = primary_map[PM_IDX(a)];
501//zz sm_off = SM_OFF(a);
502//zz PROF_EVENT(23);
503//zz # ifdef VG_DEBUG_MEMORY
504//zz tl_assert(VG_IS_4_ALIGNED(a));
505//zz # endif
506//zz ((UInt*)(sm->vbyte))[sm_off >> 2] = vbytes;
507//zz }
sewardjee070842003-07-05 17:53:55 +0000508
509
njn25e49d8e72002-09-23 09:36:25 +0000510/*------------------------------------------------------------*/
511/*--- Setting permissions over address ranges. ---*/
512/*------------------------------------------------------------*/
513
sewardj23eb2fd2005-04-22 16:29:19 +0000514/* Given address 'a', find the place where the pointer to a's
515 secondary map lives. If a falls into the primary map, the returned
516 value points to one of the entries in primary_map[]. Otherwise,
517 the auxiliary primary map is searched for 'a', or an entry is
518 created for it; either way, the returned value points to the
519 relevant AuxMapEnt's .sm field.
520
521 The point of this is to enable set_address_range_perms to assign
522 secondary maps in a uniform way, without worrying about whether a
523 given secondary map is pointed to from the main or auxiliary
524 primary map.
525*/
526
527static SecMap** find_secmap_binder_for_addr ( Addr aA )
528{
529 if (aA > MAX_PRIMARY_ADDRESS) {
530 AuxMapEnt* am = find_or_alloc_in_auxmap(aA);
531 return &am->sm;
532 } else {
533 UWord a = (UWord)aA;
534 UWord sec_no = (UWord)(a >> 16);
535# if VG_DEBUG_MEMORY >= 1
536 tl_assert(sec_no < N_PRIMARY_MAP);
537# endif
538 return &primary_map[sec_no];
539 }
540}
541
542
543static void set_address_range_perms ( Addr aA, SizeT len,
sewardj45d94cc2005-04-20 14:44:11 +0000544 UWord example_a_bit,
545 UWord example_v_bit )
njn25e49d8e72002-09-23 09:36:25 +0000546{
sewardj23eb2fd2005-04-22 16:29:19 +0000547 PROF_EVENT(150, "set_address_range_perms");
548
549 /* Check the permissions make sense. */
550 tl_assert(example_a_bit == VGM_BIT_VALID
551 || example_a_bit == VGM_BIT_INVALID);
552 tl_assert(example_v_bit == VGM_BIT_VALID
553 || example_v_bit == VGM_BIT_INVALID);
554 if (example_a_bit == VGM_BIT_INVALID)
555 tl_assert(example_v_bit == VGM_BIT_INVALID);
556
557 if (len == 0)
558 return;
559
sewardj1fa7d2c2005-06-13 18:22:17 +0000560 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
sewardj23eb2fd2005-04-22 16:29:19 +0000561 if (len > 100 * 1000 * 1000) {
562 VG_(message)(Vg_UserMsg,
563 "Warning: set address range perms: "
564 "large range %u, a %d, v %d",
565 len, example_a_bit, example_v_bit );
566 }
567 }
568
569 UWord a = (UWord)aA;
570
571# if VG_DEBUG_MEMORY >= 2
572
573 /*------------------ debug-only case ------------------ */
sewardj45d94cc2005-04-20 14:44:11 +0000574 SizeT i;
njn25e49d8e72002-09-23 09:36:25 +0000575
sewardj23eb2fd2005-04-22 16:29:19 +0000576 UWord example_vbyte = BIT_TO_BYTE(example_v_bit);
sewardj45d94cc2005-04-20 14:44:11 +0000577
578 tl_assert(sizeof(SizeT) == sizeof(Addr));
579
580 if (0 && len >= 4096)
581 VG_(printf)("s_a_r_p(0x%llx, %d, %d,%d)\n",
582 (ULong)a, len, example_a_bit, example_v_bit);
njn25e49d8e72002-09-23 09:36:25 +0000583
584 if (len == 0)
585 return;
586
sewardj45d94cc2005-04-20 14:44:11 +0000587 for (i = 0; i < len; i++) {
588 set_abit_and_vbyte(a+i, example_a_bit, example_vbyte);
njn25e49d8e72002-09-23 09:36:25 +0000589 }
njn25e49d8e72002-09-23 09:36:25 +0000590
sewardj23eb2fd2005-04-22 16:29:19 +0000591# else
592
593 /*------------------ standard handling ------------------ */
594 UWord vbits8, abits8, vbits32, v_off, a_off;
595 SecMap* sm;
596 SecMap** binder;
597 SecMap* example_dsm;
598
599 /* Decide on the distinguished secondary that we might want
600 to use (part of the space-compression scheme). */
601 if (example_a_bit == VGM_BIT_INVALID) {
602 example_dsm = &sm_distinguished[SM_DIST_NOACCESS];
603 } else {
604 if (example_v_bit == VGM_BIT_VALID) {
605 example_dsm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
606 } else {
607 example_dsm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
608 }
609 }
610
611 /* Make various wider versions of the A/V values to use. */
612 vbits8 = BIT_TO_BYTE(example_v_bit);
613 abits8 = BIT_TO_BYTE(example_a_bit);
614 vbits32 = (vbits8 << 24) | (vbits8 << 16) | (vbits8 << 8) | vbits8;
615
616 /* Slowly do parts preceding 8-byte alignment. */
617 while (True) {
618 if (len == 0) break;
619 PROF_EVENT(151, "set_address_range_perms-loop1-pre");
620 if (VG_IS_8_ALIGNED(a)) break;
621 set_abit_and_vbyte( a, example_a_bit, vbits8 );
622 a++;
623 len--;
624 }
625
626 if (len == 0)
627 return;
628
629 tl_assert(VG_IS_8_ALIGNED(a) && len > 0);
630
631 /* Now go in steps of 8 bytes. */
632 binder = find_secmap_binder_for_addr(a);
633
634 while (True) {
635
636 if (len < 8) break;
637
638 PROF_EVENT(152, "set_address_range_perms-loop8");
639
640 if ((a & SECONDARY_MASK) == 0) {
641 /* we just traversed a primary map boundary, so update the
642 binder. */
643 binder = find_secmap_binder_for_addr(a);
644 PROF_EVENT(153, "set_address_range_perms-update-binder");
645
646 /* Space-optimisation. If we are setting the entire
647 secondary map, just point this entry at one of our
648 distinguished secondaries. However, only do that if it
649 already points at a distinguished secondary, since doing
650 otherwise would leak the existing secondary. We could do
651 better and free up any pre-existing non-distinguished
652 secondary at this point, since we are guaranteed that each
653 non-dist secondary only has one pointer to it, and we have
654 that pointer right here. */
655 if (len >= SECONDARY_SIZE && is_distinguished_sm(*binder)) {
656 PROF_EVENT(154, "set_address_range_perms-entire-secmap");
657 *binder = example_dsm;
658 len -= SECONDARY_SIZE;
659 a += SECONDARY_SIZE;
660 continue;
661 }
662 }
663
664 /* If the primary is already pointing to a distinguished map
665 with the same properties as we're trying to set, then leave
666 it that way. */
667 if (*binder == example_dsm) {
668 a += 8;
669 len -= 8;
670 continue;
671 }
672
673 /* Make sure it's OK to write the secondary. */
674 if (is_distinguished_sm(*binder))
675 *binder = copy_for_writing(*binder);
676
677 sm = *binder;
678 v_off = a & 0xFFFF;
679 a_off = v_off >> 3;
680 sm->abits[a_off] = (UChar)abits8;
681 ((UInt*)(sm->vbyte))[(v_off >> 2) + 0] = (UInt)vbits32;
682 ((UInt*)(sm->vbyte))[(v_off >> 2) + 1] = (UInt)vbits32;
683
684 a += 8;
685 len -= 8;
686 }
687
688 if (len == 0)
689 return;
690
691 tl_assert(VG_IS_8_ALIGNED(a) && len > 0 && len < 8);
692
693 /* Finish the upper fragment. */
694 while (True) {
695 if (len == 0) break;
696 PROF_EVENT(155, "set_address_range_perms-loop1-post");
697 set_abit_and_vbyte ( a, example_a_bit, vbits8 );
698 a++;
699 len--;
700 }
701
702# endif
703}
sewardj45d94cc2005-04-20 14:44:11 +0000704
sewardjc859fbf2005-04-22 21:10:28 +0000705
706/* --- Set permissions for arbitrary address ranges --- */
njn25e49d8e72002-09-23 09:36:25 +0000707
nethercote8b76fe52004-11-08 19:20:09 +0000708static void mc_make_noaccess ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000709{
sewardjc1a2cda2005-04-21 17:34:00 +0000710 PROF_EVENT(40, "mc_make_noaccess");
nethercote8b76fe52004-11-08 19:20:09 +0000711 DEBUG("mc_make_noaccess(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000712 set_address_range_perms ( a, len, VGM_BIT_INVALID, VGM_BIT_INVALID );
713}
714
nethercote8b76fe52004-11-08 19:20:09 +0000715static void mc_make_writable ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000716{
sewardjc1a2cda2005-04-21 17:34:00 +0000717 PROF_EVENT(41, "mc_make_writable");
nethercote8b76fe52004-11-08 19:20:09 +0000718 DEBUG("mc_make_writable(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000719 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_INVALID );
720}
721
nethercote8b76fe52004-11-08 19:20:09 +0000722static void mc_make_readable ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000723{
sewardjc1a2cda2005-04-21 17:34:00 +0000724 PROF_EVENT(42, "mc_make_readable");
nethercote8b76fe52004-11-08 19:20:09 +0000725 DEBUG("mc_make_readable(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000726 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_VALID );
727}
728
njn9b007f62003-04-07 14:40:25 +0000729
sewardjc859fbf2005-04-22 21:10:28 +0000730/* --- Block-copy permissions (needed for implementing realloc()). --- */
731
732static void mc_copy_address_range_state ( Addr src, Addr dst, SizeT len )
733{
734 SizeT i;
735 UWord abit, vbyte;
736
737 DEBUG("mc_copy_address_range_state\n");
738
739 PROF_EVENT(50, "mc_copy_address_range_state");
740 for (i = 0; i < len; i++) {
741 PROF_EVENT(51, "mc_copy_address_range_state(loop)");
742 get_abit_and_vbyte( &abit, &vbyte, src+i );
743 set_abit_and_vbyte( dst+i, abit, vbyte );
744 }
745}
746
747
748/* --- Fast case permission setters, for dealing with stacks. --- */
749
njn9b007f62003-04-07 14:40:25 +0000750static __inline__
sewardj5d28efc2005-04-21 22:16:29 +0000751void make_aligned_word32_writable ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000752{
sewardj5d28efc2005-04-21 22:16:29 +0000753 PROF_EVENT(300, "make_aligned_word32_writable");
754
755# if VG_DEBUG_MEMORY >= 2
756 mc_make_writable(aA, 4);
757# else
758
759 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
sewardj23eb2fd2005-04-22 16:29:19 +0000760 PROF_EVENT(301, "make_aligned_word32_writable-slow1");
sewardj5d28efc2005-04-21 22:16:29 +0000761 mc_make_writable(aA, 4);
762 return;
763 }
764
765 UWord a = (UWord)aA;
766 UWord sec_no = (UWord)(a >> 16);
767# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +0000768 tl_assert(sec_no < N_PRIMARY_MAP);
sewardj5d28efc2005-04-21 22:16:29 +0000769# endif
770
771 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
772 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
773
774 SecMap* sm = primary_map[sec_no];
775 UWord v_off = a & 0xFFFF;
776 UWord a_off = v_off >> 3;
777
778 /* Paint the new area as uninitialised. */
779 ((UInt*)(sm->vbyte))[v_off >> 2] = VGM_WORD32_INVALID;
780
781 UWord mask = 0x0F;
782 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
783 /* mask now contains 1s where we wish to make address bits valid
784 (0s). */
785 sm->abits[a_off] &= ~mask;
786# endif
njn9b007f62003-04-07 14:40:25 +0000787}
788
sewardj5d28efc2005-04-21 22:16:29 +0000789
790static __inline__
791void make_aligned_word32_noaccess ( Addr aA )
792{
793 PROF_EVENT(310, "make_aligned_word32_noaccess");
794
795# if VG_DEBUG_MEMORY >= 2
796 mc_make_noaccess(aA, 4);
797# else
798
799 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
800 PROF_EVENT(311, "make_aligned_word32_noaccess-slow1");
801 mc_make_noaccess(aA, 4);
802 return;
803 }
804
805 UWord a = (UWord)aA;
806 UWord sec_no = (UWord)(a >> 16);
807# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +0000808 tl_assert(sec_no < N_PRIMARY_MAP);
sewardj5d28efc2005-04-21 22:16:29 +0000809# endif
810
811 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
812 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
813
814 SecMap* sm = primary_map[sec_no];
815 UWord v_off = a & 0xFFFF;
816 UWord a_off = v_off >> 3;
817
818 /* Paint the abandoned data as uninitialised. Probably not
819 necessary, but still .. */
820 ((UInt*)(sm->vbyte))[v_off >> 2] = VGM_WORD32_INVALID;
821
822 UWord mask = 0x0F;
823 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
824 /* mask now contains 1s where we wish to make address bits invalid
825 (1s). */
826 sm->abits[a_off] |= mask;
827# endif
828}
829
830
njn9b007f62003-04-07 14:40:25 +0000831/* Nb: by "aligned" here we mean 8-byte aligned */
832static __inline__
sewardj23eb2fd2005-04-22 16:29:19 +0000833void make_aligned_word64_writable ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000834{
sewardj23eb2fd2005-04-22 16:29:19 +0000835 PROF_EVENT(320, "make_aligned_word64_writable");
836
837# if VG_DEBUG_MEMORY >= 2
838 mc_make_writable(aA, 8);
839# else
840
841 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
842 PROF_EVENT(321, "make_aligned_word64_writable-slow1");
843 mc_make_writable(aA, 8);
844 return;
845 }
846
847 UWord a = (UWord)aA;
848 UWord sec_no = (UWord)(a >> 16);
849# if VG_DEBUG_MEMORY >= 1
850 tl_assert(sec_no < N_PRIMARY_MAP);
851# endif
852
853 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
854 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
855
856 SecMap* sm = primary_map[sec_no];
857 UWord v_off = a & 0xFFFF;
858 UWord a_off = v_off >> 3;
859
860 /* Paint the new area as uninitialised. */
861 ((ULong*)(sm->vbyte))[v_off >> 3] = VGM_WORD64_INVALID;
862
863 /* Make the relevant area accessible. */
864 sm->abits[a_off] = VGM_BYTE_VALID;
865# endif
njn9b007f62003-04-07 14:40:25 +0000866}
867
sewardj23eb2fd2005-04-22 16:29:19 +0000868
njn9b007f62003-04-07 14:40:25 +0000869static __inline__
sewardj23eb2fd2005-04-22 16:29:19 +0000870void make_aligned_word64_noaccess ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000871{
sewardj23eb2fd2005-04-22 16:29:19 +0000872 PROF_EVENT(330, "make_aligned_word64_noaccess");
873
874# if VG_DEBUG_MEMORY >= 2
875 mc_make_noaccess(aA, 8);
876# else
877
878 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
879 PROF_EVENT(331, "make_aligned_word64_noaccess-slow1");
880 mc_make_noaccess(aA, 8);
881 return;
882 }
883
884 UWord a = (UWord)aA;
885 UWord sec_no = (UWord)(a >> 16);
886# if VG_DEBUG_MEMORY >= 1
887 tl_assert(sec_no < N_PRIMARY_MAP);
888# endif
889
890 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
891 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
892
893 SecMap* sm = primary_map[sec_no];
894 UWord v_off = a & 0xFFFF;
895 UWord a_off = v_off >> 3;
896
897 /* Paint the abandoned data as uninitialised. Probably not
898 necessary, but still .. */
899 ((ULong*)(sm->vbyte))[v_off >> 3] = VGM_WORD64_INVALID;
900
901 /* Make the abandoned area inaccessible. */
902 sm->abits[a_off] = VGM_BYTE_INVALID;
903# endif
njn9b007f62003-04-07 14:40:25 +0000904}
905
sewardj23eb2fd2005-04-22 16:29:19 +0000906
sewardj45d94cc2005-04-20 14:44:11 +0000907/* The stack-pointer update handling functions */
908SP_UPDATE_HANDLERS ( make_aligned_word32_writable,
909 make_aligned_word32_noaccess,
910 make_aligned_word64_writable,
911 make_aligned_word64_noaccess,
912 mc_make_writable,
913 mc_make_noaccess
914 );
njn9b007f62003-04-07 14:40:25 +0000915
sewardj45d94cc2005-04-20 14:44:11 +0000916
sewardj826ec492005-05-12 18:05:00 +0000917void MC_(helperc_MAKE_STACK_UNINIT) ( Addr base, UWord len )
918{
919 tl_assert(sizeof(UWord) == sizeof(SizeT));
sewardj2a3a1a72005-05-12 23:25:43 +0000920 if (0)
921 VG_(printf)("helperc_MAKE_STACK_UNINIT %p %d\n", base, len );
922
923# if 0
924 /* Really slow version */
925 mc_make_writable(base, len);
926# endif
927
928# if 0
929 /* Slow(ish) version, which is fairly easily seen to be correct.
930 */
931 if (EXPECTED_TAKEN( VG_IS_8_ALIGNED(base) && len==128 )) {
932 make_aligned_word64_writable(base + 0);
933 make_aligned_word64_writable(base + 8);
934 make_aligned_word64_writable(base + 16);
935 make_aligned_word64_writable(base + 24);
936
937 make_aligned_word64_writable(base + 32);
938 make_aligned_word64_writable(base + 40);
939 make_aligned_word64_writable(base + 48);
940 make_aligned_word64_writable(base + 56);
941
942 make_aligned_word64_writable(base + 64);
943 make_aligned_word64_writable(base + 72);
944 make_aligned_word64_writable(base + 80);
945 make_aligned_word64_writable(base + 88);
946
947 make_aligned_word64_writable(base + 96);
948 make_aligned_word64_writable(base + 104);
949 make_aligned_word64_writable(base + 112);
950 make_aligned_word64_writable(base + 120);
951 } else {
952 mc_make_writable(base, len);
953 }
954# endif
955
956 /* Idea is: go fast when
957 * 8-aligned and length is 128
958 * the sm is available in the main primary map
959 * the address range falls entirely with a single
960 secondary map
961 * the SM is modifiable
962 If all those conditions hold, just update the V bits
963 by writing directly on the v-bit array. We don't care
964 about A bits; if the address range is marked invalid,
965 any attempt to access it will elicit an addressing error,
966 and that's good enough.
967 */
968 if (EXPECTED_TAKEN( len == 128
969 && VG_IS_8_ALIGNED(base)
970 )) {
971 /* Now we know the address range is suitably sized and
972 aligned. */
973 UWord a_lo = (UWord)base;
974 UWord a_hi = (UWord)(base + 127);
975 UWord sec_lo = a_lo >> 16;
976 UWord sec_hi = a_hi >> 16;
977
978 if (EXPECTED_TAKEN( sec_lo == sec_hi
979 && sec_lo <= N_PRIMARY_MAP
980 )) {
981 /* Now we know that the entire address range falls within a
982 single secondary map, and that that secondary 'lives' in
983 the main primary map. */
984 SecMap* sm = primary_map[sec_lo];
985
986 if (EXPECTED_TAKEN( !is_distinguished_sm(sm) )) {
987 /* And finally, now we know that the secondary in question
988 is modifiable. */
989 UWord v_off = a_lo & 0xFFFF;
990 ULong* p = (ULong*)(&sm->vbyte[v_off]);
991 p[ 0] = VGM_WORD64_INVALID;
992 p[ 1] = VGM_WORD64_INVALID;
993 p[ 2] = VGM_WORD64_INVALID;
994 p[ 3] = VGM_WORD64_INVALID;
995 p[ 4] = VGM_WORD64_INVALID;
996 p[ 5] = VGM_WORD64_INVALID;
997 p[ 6] = VGM_WORD64_INVALID;
998 p[ 7] = VGM_WORD64_INVALID;
999 p[ 8] = VGM_WORD64_INVALID;
1000 p[ 9] = VGM_WORD64_INVALID;
1001 p[10] = VGM_WORD64_INVALID;
1002 p[11] = VGM_WORD64_INVALID;
1003 p[12] = VGM_WORD64_INVALID;
1004 p[13] = VGM_WORD64_INVALID;
1005 p[14] = VGM_WORD64_INVALID;
1006 p[15] = VGM_WORD64_INVALID;
1007 return;
1008 }
1009 }
1010 }
1011
1012 /* else fall into slow case */
sewardj826ec492005-05-12 18:05:00 +00001013 mc_make_writable(base, len);
1014}
1015
1016
nethercote8b76fe52004-11-08 19:20:09 +00001017/*------------------------------------------------------------*/
1018/*--- Checking memory ---*/
1019/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001020
sewardje4ccc012005-05-02 12:53:38 +00001021typedef
1022 enum {
1023 MC_Ok = 5,
1024 MC_AddrErr = 6,
1025 MC_ValueErr = 7
1026 }
1027 MC_ReadResult;
1028
1029
njn25e49d8e72002-09-23 09:36:25 +00001030/* Check permissions for address range. If inadequate permissions
1031 exist, *bad_addr is set to the offending address, so the caller can
1032 know what it is. */
1033
sewardjecf8e102003-07-12 12:11:39 +00001034/* Returns True if [a .. a+len) is not addressible. Otherwise,
1035 returns False, and if bad_addr is non-NULL, sets *bad_addr to
1036 indicate the lowest failing address. Functions below are
1037 similar. */
nethercote8b76fe52004-11-08 19:20:09 +00001038static Bool mc_check_noaccess ( Addr a, SizeT len, Addr* bad_addr )
sewardjecf8e102003-07-12 12:11:39 +00001039{
nethercote451eae92004-11-02 13:06:32 +00001040 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +00001041 UWord abit;
sewardjc1a2cda2005-04-21 17:34:00 +00001042 PROF_EVENT(60, "mc_check_noaccess");
sewardjecf8e102003-07-12 12:11:39 +00001043 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +00001044 PROF_EVENT(61, "mc_check_noaccess(loop)");
sewardjecf8e102003-07-12 12:11:39 +00001045 abit = get_abit(a);
1046 if (abit == VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001047 if (bad_addr != NULL)
1048 *bad_addr = a;
sewardjecf8e102003-07-12 12:11:39 +00001049 return False;
1050 }
1051 a++;
1052 }
1053 return True;
1054}
1055
nethercote8b76fe52004-11-08 19:20:09 +00001056static Bool mc_check_writable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00001057{
nethercote451eae92004-11-02 13:06:32 +00001058 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +00001059 UWord abit;
sewardjc1a2cda2005-04-21 17:34:00 +00001060 PROF_EVENT(62, "mc_check_writable");
njn25e49d8e72002-09-23 09:36:25 +00001061 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +00001062 PROF_EVENT(63, "mc_check_writable(loop)");
njn25e49d8e72002-09-23 09:36:25 +00001063 abit = get_abit(a);
1064 if (abit == VGM_BIT_INVALID) {
1065 if (bad_addr != NULL) *bad_addr = a;
1066 return False;
1067 }
1068 a++;
1069 }
1070 return True;
1071}
1072
nethercote8b76fe52004-11-08 19:20:09 +00001073static MC_ReadResult mc_check_readable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00001074{
nethercote451eae92004-11-02 13:06:32 +00001075 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +00001076 UWord abit;
1077 UWord vbyte;
njn25e49d8e72002-09-23 09:36:25 +00001078
sewardjc1a2cda2005-04-21 17:34:00 +00001079 PROF_EVENT(64, "mc_check_readable");
nethercote8b76fe52004-11-08 19:20:09 +00001080 DEBUG("mc_check_readable\n");
njn25e49d8e72002-09-23 09:36:25 +00001081 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +00001082 PROF_EVENT(65, "mc_check_readable(loop)");
sewardj45d94cc2005-04-20 14:44:11 +00001083 get_abit_and_vbyte(&abit, &vbyte, a);
nethercote8b76fe52004-11-08 19:20:09 +00001084 // Report addressability errors in preference to definedness errors
1085 // by checking the A bits first.
1086 if (abit != VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001087 if (bad_addr != NULL)
1088 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001089 return MC_AddrErr;
1090 }
1091 if (vbyte != VGM_BYTE_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001092 if (bad_addr != NULL)
1093 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001094 return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +00001095 }
1096 a++;
1097 }
nethercote8b76fe52004-11-08 19:20:09 +00001098 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +00001099}
1100
1101
1102/* Check a zero-terminated ascii string. Tricky -- don't want to
1103 examine the actual bytes, to find the end, until we're sure it is
1104 safe to do so. */
1105
njn9b007f62003-04-07 14:40:25 +00001106static Bool mc_check_readable_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00001107{
sewardj45d94cc2005-04-20 14:44:11 +00001108 UWord abit;
1109 UWord vbyte;
sewardjc1a2cda2005-04-21 17:34:00 +00001110 PROF_EVENT(66, "mc_check_readable_asciiz");
njn5c004e42002-11-18 11:04:50 +00001111 DEBUG("mc_check_readable_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +00001112 while (True) {
sewardjc1a2cda2005-04-21 17:34:00 +00001113 PROF_EVENT(67, "mc_check_readable_asciiz(loop)");
sewardj45d94cc2005-04-20 14:44:11 +00001114 get_abit_and_vbyte(&abit, &vbyte, a);
nethercote8b76fe52004-11-08 19:20:09 +00001115 // As in mc_check_readable(), check A bits first
1116 if (abit != VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001117 if (bad_addr != NULL)
1118 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001119 return MC_AddrErr;
1120 }
1121 if (vbyte != VGM_BYTE_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001122 if (bad_addr != NULL)
1123 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001124 return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +00001125 }
1126 /* Ok, a is safe to read. */
sewardj45d94cc2005-04-20 14:44:11 +00001127 if (* ((UChar*)a) == 0)
1128 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +00001129 a++;
1130 }
1131}
1132
1133
1134/*------------------------------------------------------------*/
1135/*--- Memory event handlers ---*/
1136/*------------------------------------------------------------*/
1137
njn25e49d8e72002-09-23 09:36:25 +00001138static
njn72718642003-07-24 08:45:32 +00001139void mc_check_is_writable ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +00001140 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001141{
1142 Bool ok;
1143 Addr bad_addr;
1144
1145 VGP_PUSHCC(VgpCheckMem);
1146
1147 /* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
1148 base,base+size-1); */
nethercote8b76fe52004-11-08 19:20:09 +00001149 ok = mc_check_writable ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00001150 if (!ok) {
1151 switch (part) {
1152 case Vg_CoreSysCall:
nethercote8b76fe52004-11-08 19:20:09 +00001153 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False,
1154 /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00001155 break;
1156
1157 case Vg_CorePThread:
1158 case Vg_CoreSignal:
nethercote8b76fe52004-11-08 19:20:09 +00001159 MAC_(record_core_mem_error)( tid, /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00001160 break;
1161
1162 default:
njn67993252004-11-22 18:02:32 +00001163 VG_(tool_panic)("mc_check_is_writable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001164 }
1165 }
1166
1167 VGP_POPCC(VgpCheckMem);
1168}
1169
1170static
njn72718642003-07-24 08:45:32 +00001171void mc_check_is_readable ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +00001172 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001173{
njn25e49d8e72002-09-23 09:36:25 +00001174 Addr bad_addr;
nethercote8b76fe52004-11-08 19:20:09 +00001175 MC_ReadResult res;
njn25e49d8e72002-09-23 09:36:25 +00001176
1177 VGP_PUSHCC(VgpCheckMem);
1178
1179 /* VG_(message)(Vg_DebugMsg,"check is readable: %x .. %x",
1180 base,base+size-1); */
nethercote8b76fe52004-11-08 19:20:09 +00001181 res = mc_check_readable ( base, size, &bad_addr );
1182 if (MC_Ok != res) {
1183 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
1184
njn25e49d8e72002-09-23 09:36:25 +00001185 switch (part) {
1186 case Vg_CoreSysCall:
nethercote8b76fe52004-11-08 19:20:09 +00001187 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False,
1188 isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001189 break;
1190
1191 case Vg_CorePThread:
nethercote8b76fe52004-11-08 19:20:09 +00001192 MAC_(record_core_mem_error)( tid, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001193 break;
1194
1195 /* If we're being asked to jump to a silly address, record an error
1196 message before potentially crashing the entire system. */
1197 case Vg_CoreTranslate:
njn72718642003-07-24 08:45:32 +00001198 MAC_(record_jump_error)( tid, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00001199 break;
1200
1201 default:
njn67993252004-11-22 18:02:32 +00001202 VG_(tool_panic)("mc_check_is_readable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001203 }
1204 }
1205 VGP_POPCC(VgpCheckMem);
1206}
1207
1208static
njn72718642003-07-24 08:45:32 +00001209void mc_check_is_readable_asciiz ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +00001210 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +00001211{
nethercote8b76fe52004-11-08 19:20:09 +00001212 MC_ReadResult res;
njn5ab96ac2005-05-08 02:59:50 +00001213 Addr bad_addr = 0; // shut GCC up
njn25e49d8e72002-09-23 09:36:25 +00001214 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
1215
1216 VGP_PUSHCC(VgpCheckMem);
1217
njnca82cc02004-11-22 17:18:48 +00001218 tl_assert(part == Vg_CoreSysCall);
nethercote8b76fe52004-11-08 19:20:09 +00001219 res = mc_check_readable_asciiz ( (Addr)str, &bad_addr );
1220 if (MC_Ok != res) {
1221 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
1222 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001223 }
1224
1225 VGP_POPCC(VgpCheckMem);
1226}
1227
njn25e49d8e72002-09-23 09:36:25 +00001228static
nethercote451eae92004-11-02 13:06:32 +00001229void mc_new_mem_startup( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00001230{
njn1f3a9092002-10-04 09:22:30 +00001231 /* Ignore the permissions, just make it readable. Seems to work... */
nethercote451eae92004-11-02 13:06:32 +00001232 DEBUG("mc_new_mem_startup(%p, %llu, rr=%u, ww=%u, xx=%u)\n",
1233 a,(ULong)len,rr,ww,xx);
nethercote8b76fe52004-11-08 19:20:09 +00001234 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001235}
1236
1237static
nethercote451eae92004-11-02 13:06:32 +00001238void mc_new_mem_heap ( Addr a, SizeT len, Bool is_inited )
njn25e49d8e72002-09-23 09:36:25 +00001239{
1240 if (is_inited) {
nethercote8b76fe52004-11-08 19:20:09 +00001241 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001242 } else {
nethercote8b76fe52004-11-08 19:20:09 +00001243 mc_make_writable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001244 }
1245}
1246
1247static
njnb8dca862005-03-14 02:42:44 +00001248void mc_new_mem_mmap ( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00001249{
njnb8dca862005-03-14 02:42:44 +00001250 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001251}
1252
njncf45fd42004-11-24 16:30:22 +00001253static
1254void mc_post_mem_write(CorePart part, ThreadId tid, Addr a, SizeT len)
1255{
1256 mc_make_readable(a, len);
1257}
njn25e49d8e72002-09-23 09:36:25 +00001258
sewardj45d94cc2005-04-20 14:44:11 +00001259
njn25e49d8e72002-09-23 09:36:25 +00001260/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00001261/*--- Register event handlers ---*/
1262/*------------------------------------------------------------*/
1263
sewardj45d94cc2005-04-20 14:44:11 +00001264/* When some chunk of guest state is written, mark the corresponding
1265 shadow area as valid. This is used to initialise arbitrarily large
sewardj2c27f702005-05-03 18:19:05 +00001266 chunks of guest state, hence the (somewhat arbitrary) 1024 limit.
sewardj45d94cc2005-04-20 14:44:11 +00001267*/
1268static void mc_post_reg_write ( CorePart part, ThreadId tid,
1269 OffT offset, SizeT size)
njnd3040452003-05-19 15:04:06 +00001270{
sewardj6cf40ff2005-04-20 22:31:26 +00001271 UChar area[1024];
1272 tl_assert(size <= 1024);
njncf45fd42004-11-24 16:30:22 +00001273 VG_(memset)(area, VGM_BYTE_VALID, size);
1274 VG_(set_shadow_regs_area)( tid, offset, size, area );
njnd3040452003-05-19 15:04:06 +00001275}
1276
sewardj45d94cc2005-04-20 14:44:11 +00001277static
1278void mc_post_reg_write_clientcall ( ThreadId tid,
1279 OffT offset, SizeT size,
1280 Addr f)
njnd3040452003-05-19 15:04:06 +00001281{
njncf45fd42004-11-24 16:30:22 +00001282 mc_post_reg_write(/*dummy*/0, tid, offset, size);
njnd3040452003-05-19 15:04:06 +00001283}
1284
sewardj45d94cc2005-04-20 14:44:11 +00001285/* Look at the definedness of the guest's shadow state for
1286 [offset, offset+len). If any part of that is undefined, record
1287 a parameter error.
1288*/
1289static void mc_pre_reg_read ( CorePart part, ThreadId tid, Char* s,
1290 OffT offset, SizeT size)
nethercote8b76fe52004-11-08 19:20:09 +00001291{
sewardj45d94cc2005-04-20 14:44:11 +00001292 Int i;
1293 Bool bad;
1294
1295 UChar area[16];
1296 tl_assert(size <= 16);
1297
1298 VG_(get_shadow_regs_area)( tid, offset, size, area );
1299
1300 bad = False;
1301 for (i = 0; i < size; i++) {
1302 if (area[i] != VGM_BYTE_VALID) {
sewardj2c27f702005-05-03 18:19:05 +00001303 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00001304 break;
1305 }
nethercote8b76fe52004-11-08 19:20:09 +00001306 }
1307
sewardj45d94cc2005-04-20 14:44:11 +00001308 if (bad)
nethercote8b76fe52004-11-08 19:20:09 +00001309 MAC_(record_param_error) ( tid, 0, /*isReg*/True, /*isUnaddr*/False, s );
1310}
njnd3040452003-05-19 15:04:06 +00001311
njn25e49d8e72002-09-23 09:36:25 +00001312
sewardj6cf40ff2005-04-20 22:31:26 +00001313/*------------------------------------------------------------*/
njn9e63cb62005-05-08 18:34:59 +00001314/*--- Printing errors ---*/
1315/*------------------------------------------------------------*/
1316
njn51d827b2005-05-09 01:02:08 +00001317static void mc_pp_Error ( Error* err )
njn9e63cb62005-05-08 18:34:59 +00001318{
1319 MAC_Error* err_extra = VG_(get_error_extra)(err);
1320
sewardj71bc3cb2005-05-19 00:25:45 +00001321 HChar* xpre = VG_(clo_xml) ? " <what>" : "";
1322 HChar* xpost = VG_(clo_xml) ? "</what>" : "";
1323
njn9e63cb62005-05-08 18:34:59 +00001324 switch (VG_(get_error_kind)(err)) {
1325 case CoreMemErr: {
1326 Char* s = ( err_extra->isUnaddr ? "unaddressable" : "uninitialised" );
sewardj71bc3cb2005-05-19 00:25:45 +00001327 if (VG_(clo_xml))
1328 VG_(message)(Vg_UserMsg, " <kind>CoreMemError</kind>");
1329 /* What the hell *is* a CoreMemError? jrs 2005-May-18 */
1330 VG_(message)(Vg_UserMsg, "%s%s contains %s byte(s)%s",
1331 xpre, VG_(get_error_string)(err), s, xpost);
1332
njn9e63cb62005-05-08 18:34:59 +00001333 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1334 break;
1335
1336 }
1337
1338 case ValueErr:
1339 if (err_extra->size == 0) {
sewardj71bc3cb2005-05-19 00:25:45 +00001340 if (VG_(clo_xml))
1341 VG_(message)(Vg_UserMsg, " <kind>UninitCondition</kind>");
1342 VG_(message)(Vg_UserMsg, "%sConditional jump or move depends"
1343 " on uninitialised value(s)%s",
1344 xpre, xpost);
njn9e63cb62005-05-08 18:34:59 +00001345 } else {
sewardj71bc3cb2005-05-19 00:25:45 +00001346 if (VG_(clo_xml))
1347 VG_(message)(Vg_UserMsg, " <kind>UninitValue</kind>");
1348 VG_(message)(Vg_UserMsg,
1349 "%sUse of uninitialised value of size %d%s",
1350 xpre, err_extra->size, xpost);
njn9e63cb62005-05-08 18:34:59 +00001351 }
1352 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1353 break;
1354
1355 case ParamErr: {
1356 Bool isReg = ( Register == err_extra->addrinfo.akind );
1357 Char* s1 = ( isReg ? "contains" : "points to" );
1358 Char* s2 = ( err_extra->isUnaddr ? "unaddressable" : "uninitialised" );
1359 if (isReg) tl_assert(!err_extra->isUnaddr);
1360
sewardj71bc3cb2005-05-19 00:25:45 +00001361 if (VG_(clo_xml))
1362 VG_(message)(Vg_UserMsg, " <kind>SyscallParam</kind>");
1363 VG_(message)(Vg_UserMsg, "%sSyscall param %s %s %s byte(s)%s",
1364 xpre, VG_(get_error_string)(err), s1, s2, xpost);
njn9e63cb62005-05-08 18:34:59 +00001365
1366 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1367 MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
1368 break;
1369 }
1370 case UserErr: {
1371 Char* s = ( err_extra->isUnaddr ? "Unaddressable" : "Uninitialised" );
1372
sewardj71bc3cb2005-05-19 00:25:45 +00001373 if (VG_(clo_xml))
1374 VG_(message)(Vg_UserMsg, " <kind>ClientCheck</kind>");
njn9e63cb62005-05-08 18:34:59 +00001375 VG_(message)(Vg_UserMsg,
sewardj71bc3cb2005-05-19 00:25:45 +00001376 "%s%s byte(s) found during client check request%s",
1377 xpre, s, xpost);
njn9e63cb62005-05-08 18:34:59 +00001378
1379 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1380 MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
1381 break;
1382 }
1383 default:
1384 MAC_(pp_shared_Error)(err);
1385 break;
1386 }
1387}
1388
1389/*------------------------------------------------------------*/
1390/*--- Recording errors ---*/
1391/*------------------------------------------------------------*/
1392
njn02bc4b82005-05-15 17:28:26 +00001393/* Creates a copy of the 'extra' part, updates the copy with address info if
njn9e63cb62005-05-08 18:34:59 +00001394 necessary, and returns the copy. */
1395/* This one called from generated code and non-generated code. */
njn96364822005-05-08 19:04:53 +00001396static void mc_record_value_error ( ThreadId tid, Int size )
njn9e63cb62005-05-08 18:34:59 +00001397{
1398 MAC_Error err_extra;
1399
1400 MAC_(clear_MAC_Error)( &err_extra );
1401 err_extra.size = size;
1402 err_extra.isUnaddr = False;
1403 VG_(maybe_record_error)( tid, ValueErr, /*addr*/0, /*s*/NULL, &err_extra );
1404}
1405
1406/* This called from non-generated code */
1407
njn96364822005-05-08 19:04:53 +00001408static void mc_record_user_error ( ThreadId tid, Addr a, Bool isWrite,
1409 Bool isUnaddr )
njn9e63cb62005-05-08 18:34:59 +00001410{
1411 MAC_Error err_extra;
1412
1413 tl_assert(VG_INVALID_THREADID != tid);
1414 MAC_(clear_MAC_Error)( &err_extra );
1415 err_extra.addrinfo.akind = Undescribed;
1416 err_extra.isUnaddr = isUnaddr;
1417 VG_(maybe_record_error)( tid, UserErr, a, /*s*/NULL, &err_extra );
1418}
1419
1420/*------------------------------------------------------------*/
1421/*--- Suppressions ---*/
1422/*------------------------------------------------------------*/
1423
njn51d827b2005-05-09 01:02:08 +00001424static Bool mc_recognised_suppression ( Char* name, Supp* su )
njn9e63cb62005-05-08 18:34:59 +00001425{
1426 SuppKind skind;
1427
1428 if (MAC_(shared_recognised_suppression)(name, su))
1429 return True;
1430
1431 /* Extra suppressions not used by Addrcheck */
1432 else if (VG_STREQ(name, "Cond")) skind = Value0Supp;
1433 else if (VG_STREQ(name, "Value0")) skind = Value0Supp;/* backwards compat */
1434 else if (VG_STREQ(name, "Value1")) skind = Value1Supp;
1435 else if (VG_STREQ(name, "Value2")) skind = Value2Supp;
1436 else if (VG_STREQ(name, "Value4")) skind = Value4Supp;
1437 else if (VG_STREQ(name, "Value8")) skind = Value8Supp;
1438 else if (VG_STREQ(name, "Value16")) skind = Value16Supp;
1439 else
1440 return False;
1441
1442 VG_(set_supp_kind)(su, skind);
1443 return True;
1444}
1445
1446/*------------------------------------------------------------*/
sewardjc859fbf2005-04-22 21:10:28 +00001447/*--- Functions called directly from generated code: ---*/
1448/*--- Load/store handlers. ---*/
sewardj6cf40ff2005-04-20 22:31:26 +00001449/*------------------------------------------------------------*/
1450
1451/* Types: LOADV4, LOADV2, LOADV1 are:
1452 UWord fn ( Addr a )
1453 so they return 32-bits on 32-bit machines and 64-bits on
1454 64-bit machines. Addr has the same size as a host word.
1455
1456 LOADV8 is always ULong fn ( Addr a )
1457
1458 Similarly for STOREV1, STOREV2, STOREV4, the supplied vbits
1459 are a UWord, and for STOREV8 they are a ULong.
1460*/
1461
sewardj95448072004-11-22 20:19:51 +00001462/* ------------------------ Size = 8 ------------------------ */
1463
njn9fb73db2005-03-27 01:55:21 +00001464VGA_REGPARM(1)
sewardjf9d81612005-04-23 23:25:49 +00001465ULong MC_(helperc_LOADV8) ( Addr aA )
sewardj95448072004-11-22 20:19:51 +00001466{
sewardjf9d81612005-04-23 23:25:49 +00001467 PROF_EVENT(200, "helperc_LOADV8");
1468
1469# if VG_DEBUG_MEMORY >= 2
1470 return mc_LOADVn_slow( aA, 8, False/*littleendian*/ );
1471# else
1472
1473 const UWord mask = ~((0x10000-8) | ((N_PRIMARY_MAP-1) << 16));
1474 UWord a = (UWord)aA;
1475
1476 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1477 naturally aligned, or 'a' exceeds the range covered by the
1478 primary map. Either way we defer to the slow-path case. */
1479 if (EXPECTED_NOT_TAKEN(a & mask)) {
1480 PROF_EVENT(201, "helperc_LOADV8-slow1");
1481 return (UWord)mc_LOADVn_slow( aA, 8, False/*littleendian*/ );
1482 }
1483
1484 UWord sec_no = (UWord)(a >> 16);
1485
1486# if VG_DEBUG_MEMORY >= 1
1487 tl_assert(sec_no < N_PRIMARY_MAP);
1488# endif
1489
1490 SecMap* sm = primary_map[sec_no];
1491 UWord v_off = a & 0xFFFF;
1492 UWord a_off = v_off >> 3;
1493 UWord abits = (UWord)(sm->abits[a_off]);
1494
1495 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) {
1496 /* Handle common case quickly: a is suitably aligned, is mapped,
1497 and is addressible. */
1498 return ((ULong*)(sm->vbyte))[ v_off >> 3 ];
1499 } else {
1500 /* Slow but general case. */
1501 PROF_EVENT(202, "helperc_LOADV8-slow2");
1502 return mc_LOADVn_slow( a, 8, False/*littleendian*/ );
1503 }
1504
1505# endif
sewardj95448072004-11-22 20:19:51 +00001506}
1507
njn9fb73db2005-03-27 01:55:21 +00001508VGA_REGPARM(1)
sewardjf9d81612005-04-23 23:25:49 +00001509void MC_(helperc_STOREV8) ( Addr aA, ULong vbytes )
sewardj95448072004-11-22 20:19:51 +00001510{
sewardjf9d81612005-04-23 23:25:49 +00001511 PROF_EVENT(210, "helperc_STOREV8");
1512
1513# if VG_DEBUG_MEMORY >= 2
1514 mc_STOREVn_slow( aA, 8, vbytes, False/*littleendian*/ );
1515# else
1516
1517 const UWord mask = ~((0x10000-8) | ((N_PRIMARY_MAP-1) << 16));
1518 UWord a = (UWord)aA;
1519
1520 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1521 naturally aligned, or 'a' exceeds the range covered by the
1522 primary map. Either way we defer to the slow-path case. */
1523 if (EXPECTED_NOT_TAKEN(a & mask)) {
1524 PROF_EVENT(211, "helperc_STOREV8-slow1");
1525 mc_STOREVn_slow( aA, 8, vbytes, False/*littleendian*/ );
1526 return;
1527 }
1528
1529 UWord sec_no = (UWord)(a >> 16);
1530
1531# if VG_DEBUG_MEMORY >= 1
1532 tl_assert(sec_no < N_PRIMARY_MAP);
1533# endif
1534
1535 SecMap* sm = primary_map[sec_no];
1536 UWord v_off = a & 0xFFFF;
1537 UWord a_off = v_off >> 3;
1538 UWord abits = (UWord)(sm->abits[a_off]);
1539
1540 if (EXPECTED_TAKEN(!is_distinguished_sm(sm)
1541 && abits == VGM_BYTE_VALID)) {
1542 /* Handle common case quickly: a is suitably aligned, is mapped,
1543 and is addressible. */
1544 ((ULong*)(sm->vbyte))[ v_off >> 3 ] = vbytes;
1545 } else {
1546 /* Slow but general case. */
1547 PROF_EVENT(212, "helperc_STOREV8-slow2");
1548 mc_STOREVn_slow( aA, 8, vbytes, False/*littleendian*/ );
1549 }
1550# endif
sewardj95448072004-11-22 20:19:51 +00001551}
1552
1553/* ------------------------ Size = 4 ------------------------ */
1554
njn9fb73db2005-03-27 01:55:21 +00001555VGA_REGPARM(1)
sewardjc1a2cda2005-04-21 17:34:00 +00001556UWord MC_(helperc_LOADV4) ( Addr aA )
njn25e49d8e72002-09-23 09:36:25 +00001557{
sewardjc1a2cda2005-04-21 17:34:00 +00001558 PROF_EVENT(220, "helperc_LOADV4");
1559
1560# if VG_DEBUG_MEMORY >= 2
1561 return (UWord)mc_LOADVn_slow( aA, 4, False/*littleendian*/ );
1562# else
1563
sewardj23eb2fd2005-04-22 16:29:19 +00001564 const UWord mask = ~((0x10000-4) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001565 UWord a = (UWord)aA;
1566
1567 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1568 naturally aligned, or 'a' exceeds the range covered by the
1569 primary map. Either way we defer to the slow-path case. */
1570 if (EXPECTED_NOT_TAKEN(a & mask)) {
1571 PROF_EVENT(221, "helperc_LOADV4-slow1");
1572 return (UWord)mc_LOADVn_slow( aA, 4, False/*littleendian*/ );
1573 }
1574
1575 UWord sec_no = (UWord)(a >> 16);
1576
1577# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001578 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001579# endif
1580
1581 SecMap* sm = primary_map[sec_no];
1582 UWord v_off = a & 0xFFFF;
1583 UWord a_off = v_off >> 3;
1584 UWord abits = (UWord)(sm->abits[a_off]);
1585 abits >>= (a & 4);
1586 abits &= 15;
1587 if (EXPECTED_TAKEN(abits == VGM_NIBBLE_VALID)) {
1588 /* Handle common case quickly: a is suitably aligned, is mapped,
1589 and is addressible. */
sewardj5d28efc2005-04-21 22:16:29 +00001590 /* On a 32-bit platform, simply hoick the required 32 bits out of
1591 the vbyte array. On a 64-bit platform, also set the upper 32
1592 bits to 1 ("undefined"), just in case. This almost certainly
1593 isn't necessary, but be paranoid. */
1594 UWord ret = (UWord)0xFFFFFFFF00000000ULL;
1595 ret |= (UWord)( ((UInt*)(sm->vbyte))[ v_off >> 2 ] );
1596 return ret;
sewardjc1a2cda2005-04-21 17:34:00 +00001597 } else {
1598 /* Slow but general case. */
1599 PROF_EVENT(222, "helperc_LOADV4-slow2");
1600 return (UWord)mc_LOADVn_slow( a, 4, False/*littleendian*/ );
1601 }
1602
1603# endif
njn25e49d8e72002-09-23 09:36:25 +00001604}
1605
njn9fb73db2005-03-27 01:55:21 +00001606VGA_REGPARM(2)
sewardjc1a2cda2005-04-21 17:34:00 +00001607void MC_(helperc_STOREV4) ( Addr aA, UWord vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001608{
sewardjc1a2cda2005-04-21 17:34:00 +00001609 PROF_EVENT(230, "helperc_STOREV4");
1610
1611# if VG_DEBUG_MEMORY >= 2
sewardj23eb2fd2005-04-22 16:29:19 +00001612 mc_STOREVn_slow( aA, 4, (ULong)vbytes, False/*littleendian*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001613# else
1614
sewardj23eb2fd2005-04-22 16:29:19 +00001615 const UWord mask = ~((0x10000-4) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001616 UWord a = (UWord)aA;
1617
1618 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1619 naturally aligned, or 'a' exceeds the range covered by the
1620 primary map. Either way we defer to the slow-path case. */
1621 if (EXPECTED_NOT_TAKEN(a & mask)) {
1622 PROF_EVENT(231, "helperc_STOREV4-slow1");
1623 mc_STOREVn_slow( aA, 4, (ULong)vbytes, False/*littleendian*/ );
1624 return;
1625 }
1626
1627 UWord sec_no = (UWord)(a >> 16);
1628
1629# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001630 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001631# endif
1632
1633 SecMap* sm = primary_map[sec_no];
1634 UWord v_off = a & 0xFFFF;
1635 UWord a_off = v_off >> 3;
1636 UWord abits = (UWord)(sm->abits[a_off]);
1637 abits >>= (a & 4);
1638 abits &= 15;
1639 if (EXPECTED_TAKEN(!is_distinguished_sm(sm)
1640 && abits == VGM_NIBBLE_VALID)) {
1641 /* Handle common case quickly: a is suitably aligned, is mapped,
1642 and is addressible. */
1643 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = (UInt)vbytes;
1644 } else {
1645 /* Slow but general case. */
1646 PROF_EVENT(232, "helperc_STOREV4-slow2");
1647 mc_STOREVn_slow( aA, 4, (ULong)vbytes, False/*littleendian*/ );
1648 }
1649# endif
njn25e49d8e72002-09-23 09:36:25 +00001650}
1651
sewardj95448072004-11-22 20:19:51 +00001652/* ------------------------ Size = 2 ------------------------ */
1653
njn9fb73db2005-03-27 01:55:21 +00001654VGA_REGPARM(1)
sewardjc1a2cda2005-04-21 17:34:00 +00001655UWord MC_(helperc_LOADV2) ( Addr aA )
njn25e49d8e72002-09-23 09:36:25 +00001656{
sewardjc1a2cda2005-04-21 17:34:00 +00001657 PROF_EVENT(240, "helperc_LOADV2");
1658
1659# if VG_DEBUG_MEMORY >= 2
1660 return (UWord)mc_LOADVn_slow( aA, 2, False/*littleendian*/ );
1661# else
1662
sewardj23eb2fd2005-04-22 16:29:19 +00001663 const UWord mask = ~((0x10000-2) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001664 UWord a = (UWord)aA;
1665
1666 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1667 naturally aligned, or 'a' exceeds the range covered by the
1668 primary map. Either way we defer to the slow-path case. */
1669 if (EXPECTED_NOT_TAKEN(a & mask)) {
1670 PROF_EVENT(241, "helperc_LOADV2-slow1");
1671 return (UWord)mc_LOADVn_slow( aA, 2, False/*littleendian*/ );
1672 }
1673
1674 UWord sec_no = (UWord)(a >> 16);
1675
1676# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001677 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001678# endif
1679
1680 SecMap* sm = primary_map[sec_no];
1681 UWord v_off = a & 0xFFFF;
1682 UWord a_off = v_off >> 3;
1683 UWord abits = (UWord)(sm->abits[a_off]);
sewardjc1a2cda2005-04-21 17:34:00 +00001684 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) {
1685 /* Handle common case quickly: a is mapped, and the entire
1686 word32 it lives in is addressible. */
sewardj5d28efc2005-04-21 22:16:29 +00001687 /* Set the upper 16/48 bits of the result to 1 ("undefined"),
1688 just in case. This almost certainly isn't necessary, but be
1689 paranoid. */
sewardjc1a2cda2005-04-21 17:34:00 +00001690 return (~(UWord)0xFFFF)
1691 |
1692 (UWord)( ((UShort*)(sm->vbyte))[ v_off >> 1 ] );
1693 } else {
1694 /* Slow but general case. */
1695 PROF_EVENT(242, "helperc_LOADV2-slow2");
1696 return (UWord)mc_LOADVn_slow( aA, 2, False/*littleendian*/ );
1697 }
1698
1699# endif
njn25e49d8e72002-09-23 09:36:25 +00001700}
1701
njn9fb73db2005-03-27 01:55:21 +00001702VGA_REGPARM(2)
sewardj5d28efc2005-04-21 22:16:29 +00001703void MC_(helperc_STOREV2) ( Addr aA, UWord vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001704{
sewardjc1a2cda2005-04-21 17:34:00 +00001705 PROF_EVENT(250, "helperc_STOREV2");
sewardj5d28efc2005-04-21 22:16:29 +00001706
1707# if VG_DEBUG_MEMORY >= 2
sewardj23eb2fd2005-04-22 16:29:19 +00001708 mc_STOREVn_slow( aA, 2, (ULong)vbytes, False/*littleendian*/ );
sewardj5d28efc2005-04-21 22:16:29 +00001709# else
1710
sewardj23eb2fd2005-04-22 16:29:19 +00001711 const UWord mask = ~((0x10000-2) | ((N_PRIMARY_MAP-1) << 16));
sewardj5d28efc2005-04-21 22:16:29 +00001712 UWord a = (UWord)aA;
1713
1714 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1715 naturally aligned, or 'a' exceeds the range covered by the
1716 primary map. Either way we defer to the slow-path case. */
1717 if (EXPECTED_NOT_TAKEN(a & mask)) {
1718 PROF_EVENT(251, "helperc_STOREV2-slow1");
1719 mc_STOREVn_slow( aA, 2, (ULong)vbytes, False/*littleendian*/ );
1720 return;
1721 }
1722
1723 UWord sec_no = (UWord)(a >> 16);
1724
1725# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001726 tl_assert(sec_no < N_PRIMARY_MAP);
sewardj5d28efc2005-04-21 22:16:29 +00001727# endif
1728
1729 SecMap* sm = primary_map[sec_no];
1730 UWord v_off = a & 0xFFFF;
1731 UWord a_off = v_off >> 3;
1732 UWord abits = (UWord)(sm->abits[a_off]);
1733 if (EXPECTED_TAKEN(!is_distinguished_sm(sm)
1734 && abits == VGM_BYTE_VALID)) {
1735 /* Handle common case quickly. */
1736 ((UShort*)(sm->vbyte))[ v_off >> 1 ] = (UShort)vbytes;
1737 } else {
1738 /* Slow but general case. */
1739 PROF_EVENT(252, "helperc_STOREV2-slow2");
1740 mc_STOREVn_slow( aA, 2, (ULong)vbytes, False/*littleendian*/ );
1741 }
1742# endif
njn25e49d8e72002-09-23 09:36:25 +00001743}
1744
sewardj95448072004-11-22 20:19:51 +00001745/* ------------------------ Size = 1 ------------------------ */
1746
njn9fb73db2005-03-27 01:55:21 +00001747VGA_REGPARM(1)
sewardjc1a2cda2005-04-21 17:34:00 +00001748UWord MC_(helperc_LOADV1) ( Addr aA )
njn25e49d8e72002-09-23 09:36:25 +00001749{
sewardjc1a2cda2005-04-21 17:34:00 +00001750 PROF_EVENT(260, "helperc_LOADV1");
1751
1752# if VG_DEBUG_MEMORY >= 2
sewardj23eb2fd2005-04-22 16:29:19 +00001753 return (UWord)mc_LOADVn_slow( aA, 1, False/*littleendian*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001754# else
1755
sewardj23eb2fd2005-04-22 16:29:19 +00001756 const UWord mask = ~((0x10000-1) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001757 UWord a = (UWord)aA;
1758
1759 /* If any part of 'a' indicated by the mask is 1, it means 'a'
1760 exceeds the range covered by the primary map. In which case we
1761 defer to the slow-path case. */
1762 if (EXPECTED_NOT_TAKEN(a & mask)) {
1763 PROF_EVENT(261, "helperc_LOADV1-slow1");
1764 return (UWord)mc_LOADVn_slow( aA, 1, False/*littleendian*/ );
1765 }
1766
1767 UWord sec_no = (UWord)(a >> 16);
1768
1769# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001770 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001771# endif
1772
1773 SecMap* sm = primary_map[sec_no];
1774 UWord v_off = a & 0xFFFF;
1775 UWord a_off = v_off >> 3;
sewardj5d28efc2005-04-21 22:16:29 +00001776 UWord abits = (UWord)(sm->abits[a_off]);
sewardjc1a2cda2005-04-21 17:34:00 +00001777 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) {
1778 /* Handle common case quickly: a is mapped, and the entire
1779 word32 it lives in is addressible. */
sewardj5d28efc2005-04-21 22:16:29 +00001780 /* Set the upper 24/56 bits of the result to 1 ("undefined"),
1781 just in case. This almost certainly isn't necessary, but be
1782 paranoid. */
sewardjc1a2cda2005-04-21 17:34:00 +00001783 return (~(UWord)0xFF)
1784 |
1785 (UWord)( ((UChar*)(sm->vbyte))[ v_off ] );
1786 } else {
1787 /* Slow but general case. */
1788 PROF_EVENT(262, "helperc_LOADV1-slow2");
1789 return (UWord)mc_LOADVn_slow( aA, 1, False/*littleendian*/ );
1790 }
1791# endif
njn25e49d8e72002-09-23 09:36:25 +00001792}
1793
sewardjc1a2cda2005-04-21 17:34:00 +00001794
njn9fb73db2005-03-27 01:55:21 +00001795VGA_REGPARM(2)
sewardjc1a2cda2005-04-21 17:34:00 +00001796void MC_(helperc_STOREV1) ( Addr aA, UWord vbyte )
njn25e49d8e72002-09-23 09:36:25 +00001797{
sewardjc1a2cda2005-04-21 17:34:00 +00001798 PROF_EVENT(270, "helperc_STOREV1");
1799
1800# if VG_DEBUG_MEMORY >= 2
1801 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*littleendian*/ );
1802# else
1803
sewardj23eb2fd2005-04-22 16:29:19 +00001804 const UWord mask = ~((0x10000-1) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001805 UWord a = (UWord)aA;
1806 /* If any part of 'a' indicated by the mask is 1, it means 'a'
1807 exceeds the range covered by the primary map. In which case we
1808 defer to the slow-path case. */
1809 if (EXPECTED_NOT_TAKEN(a & mask)) {
1810 PROF_EVENT(271, "helperc_STOREV1-slow1");
1811 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*littleendian*/ );
1812 return;
1813 }
1814
1815 UWord sec_no = (UWord)(a >> 16);
1816
1817# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001818 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001819# endif
1820
1821 SecMap* sm = primary_map[sec_no];
1822 UWord v_off = a & 0xFFFF;
1823 UWord a_off = v_off >> 3;
sewardj5d28efc2005-04-21 22:16:29 +00001824 UWord abits = (UWord)(sm->abits[a_off]);
sewardjc1a2cda2005-04-21 17:34:00 +00001825 if (EXPECTED_TAKEN(!is_distinguished_sm(sm)
1826 && abits == VGM_BYTE_VALID)) {
1827 /* Handle common case quickly: a is mapped, the entire word32 it
1828 lives in is addressible. */
1829 ((UChar*)(sm->vbyte))[ v_off ] = (UChar)vbyte;
1830 } else {
1831 PROF_EVENT(272, "helperc_STOREV1-slow2");
1832 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*littleendian*/ );
1833 }
1834
1835# endif
njn25e49d8e72002-09-23 09:36:25 +00001836}
1837
1838
sewardjc859fbf2005-04-22 21:10:28 +00001839/*------------------------------------------------------------*/
1840/*--- Functions called directly from generated code: ---*/
1841/*--- Value-check failure handlers. ---*/
1842/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001843
njn5c004e42002-11-18 11:04:50 +00001844void MC_(helperc_value_check0_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001845{
njn9e63cb62005-05-08 18:34:59 +00001846 mc_record_value_error ( VG_(get_running_tid)(), 0 );
njn25e49d8e72002-09-23 09:36:25 +00001847}
1848
njn5c004e42002-11-18 11:04:50 +00001849void MC_(helperc_value_check1_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001850{
njn9e63cb62005-05-08 18:34:59 +00001851 mc_record_value_error ( VG_(get_running_tid)(), 1 );
njn25e49d8e72002-09-23 09:36:25 +00001852}
1853
njn5c004e42002-11-18 11:04:50 +00001854void MC_(helperc_value_check4_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001855{
njn9e63cb62005-05-08 18:34:59 +00001856 mc_record_value_error ( VG_(get_running_tid)(), 4 );
njn25e49d8e72002-09-23 09:36:25 +00001857}
1858
sewardj11bcc4e2005-04-23 22:38:38 +00001859void MC_(helperc_value_check8_fail) ( void )
1860{
njn9e63cb62005-05-08 18:34:59 +00001861 mc_record_value_error ( VG_(get_running_tid)(), 8 );
sewardj11bcc4e2005-04-23 22:38:38 +00001862}
1863
njn9fb73db2005-03-27 01:55:21 +00001864VGA_REGPARM(1) void MC_(helperc_complain_undef) ( HWord sz )
sewardj95448072004-11-22 20:19:51 +00001865{
njn9e63cb62005-05-08 18:34:59 +00001866 mc_record_value_error ( VG_(get_running_tid)(), (Int)sz );
sewardj95448072004-11-22 20:19:51 +00001867}
1868
njn25e49d8e72002-09-23 09:36:25 +00001869
sewardj45d94cc2005-04-20 14:44:11 +00001870//zz /*------------------------------------------------------------*/
1871//zz /*--- Metadata get/set functions, for client requests. ---*/
1872//zz /*------------------------------------------------------------*/
1873//zz
1874//zz /* Copy Vbits for src into vbits. Returns: 1 == OK, 2 == alignment
1875//zz error, 3 == addressing error. */
1876//zz static Int mc_get_or_set_vbits_for_client (
1877//zz ThreadId tid,
1878//zz Addr dataV,
1879//zz Addr vbitsV,
1880//zz SizeT size,
1881//zz Bool setting /* True <=> set vbits, False <=> get vbits */
1882//zz )
1883//zz {
1884//zz Bool addressibleD = True;
1885//zz Bool addressibleV = True;
1886//zz UInt* data = (UInt*)dataV;
1887//zz UInt* vbits = (UInt*)vbitsV;
1888//zz SizeT szW = size / 4; /* sigh */
1889//zz SizeT i;
1890//zz UInt* dataP = NULL; /* bogus init to keep gcc happy */
1891//zz UInt* vbitsP = NULL; /* ditto */
1892//zz
1893//zz /* Check alignment of args. */
1894//zz if (!(VG_IS_4_ALIGNED(data) && VG_IS_4_ALIGNED(vbits)))
1895//zz return 2;
1896//zz if ((size & 3) != 0)
1897//zz return 2;
1898//zz
1899//zz /* Check that arrays are addressible. */
1900//zz for (i = 0; i < szW; i++) {
1901//zz dataP = &data[i];
1902//zz vbitsP = &vbits[i];
1903//zz if (get_abits4_ALIGNED((Addr)dataP) != VGM_NIBBLE_VALID) {
1904//zz addressibleD = False;
1905//zz break;
1906//zz }
1907//zz if (get_abits4_ALIGNED((Addr)vbitsP) != VGM_NIBBLE_VALID) {
1908//zz addressibleV = False;
1909//zz break;
1910//zz }
1911//zz }
1912//zz if (!addressibleD) {
1913//zz MAC_(record_address_error)( tid, (Addr)dataP, 4,
1914//zz setting ? True : False );
1915//zz return 3;
1916//zz }
1917//zz if (!addressibleV) {
1918//zz MAC_(record_address_error)( tid, (Addr)vbitsP, 4,
1919//zz setting ? False : True );
1920//zz return 3;
1921//zz }
1922//zz
1923//zz /* Do the copy */
1924//zz if (setting) {
1925//zz /* setting */
1926//zz for (i = 0; i < szW; i++) {
1927//zz if (get_vbytes4_ALIGNED( (Addr)&vbits[i] ) != VGM_WORD_VALID)
njn9e63cb62005-05-08 18:34:59 +00001928//zz mc_record_value_error(tid, 4);
sewardj45d94cc2005-04-20 14:44:11 +00001929//zz set_vbytes4_ALIGNED( (Addr)&data[i], vbits[i] );
1930//zz }
1931//zz } else {
1932//zz /* getting */
1933//zz for (i = 0; i < szW; i++) {
1934//zz vbits[i] = get_vbytes4_ALIGNED( (Addr)&data[i] );
1935//zz set_vbytes4_ALIGNED( (Addr)&vbits[i], VGM_WORD_VALID );
1936//zz }
1937//zz }
1938//zz
1939//zz return 1;
1940//zz }
sewardj05fe85e2005-04-27 22:46:36 +00001941
1942
1943/*------------------------------------------------------------*/
1944/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1945/*------------------------------------------------------------*/
1946
1947/* For the memory leak detector, say whether an entire 64k chunk of
1948 address space is possibly in use, or not. If in doubt return
1949 True.
1950*/
1951static
1952Bool mc_is_within_valid_secondary ( Addr a )
1953{
1954 SecMap* sm = maybe_get_secmap_for ( a );
1955 if (sm == NULL || sm == &sm_distinguished[SM_DIST_NOACCESS]) {
1956 /* Definitely not in use. */
1957 return False;
1958 } else {
1959 return True;
1960 }
1961}
1962
1963
1964/* For the memory leak detector, say whether or not a given word
1965 address is to be regarded as valid. */
1966static
1967Bool mc_is_valid_aligned_word ( Addr a )
1968{
1969 tl_assert(sizeof(UWord) == 4 || sizeof(UWord) == 8);
1970 if (sizeof(UWord) == 4) {
1971 tl_assert(VG_IS_4_ALIGNED(a));
1972 } else {
1973 tl_assert(VG_IS_8_ALIGNED(a));
1974 }
1975 if (mc_check_readable( a, sizeof(UWord), NULL ) == MC_Ok) {
1976 return True;
1977 } else {
1978 return False;
1979 }
1980}
sewardja4495682002-10-21 07:29:59 +00001981
1982
nethercote996901a2004-08-03 13:29:09 +00001983/* Leak detector for this tool. We don't actually do anything, merely
sewardja4495682002-10-21 07:29:59 +00001984 run the generic leak detector with suitable parameters for this
nethercote996901a2004-08-03 13:29:09 +00001985 tool. */
njnb8dca862005-03-14 02:42:44 +00001986static void mc_detect_memory_leaks ( ThreadId tid, LeakCheckMode mode )
njn25e49d8e72002-09-23 09:36:25 +00001987{
sewardj05fe85e2005-04-27 22:46:36 +00001988 MAC_(do_detect_memory_leaks) (
1989 tid,
1990 mode,
1991 mc_is_within_valid_secondary,
1992 mc_is_valid_aligned_word
1993 );
njn25e49d8e72002-09-23 09:36:25 +00001994}
1995
1996
sewardjc859fbf2005-04-22 21:10:28 +00001997/*------------------------------------------------------------*/
1998/*--- Initialisation ---*/
1999/*------------------------------------------------------------*/
2000
2001static void init_shadow_memory ( void )
2002{
2003 Int i;
2004 SecMap* sm;
2005
2006 /* Build the 3 distinguished secondaries */
2007 tl_assert(VGM_BIT_INVALID == 1);
2008 tl_assert(VGM_BIT_VALID == 0);
2009 tl_assert(VGM_BYTE_INVALID == 0xFF);
2010 tl_assert(VGM_BYTE_VALID == 0);
2011
2012 /* Set A invalid, V invalid. */
2013 sm = &sm_distinguished[SM_DIST_NOACCESS];
2014 for (i = 0; i < 65536; i++)
2015 sm->vbyte[i] = VGM_BYTE_INVALID;
2016 for (i = 0; i < 8192; i++)
2017 sm->abits[i] = VGM_BYTE_INVALID;
2018
2019 /* Set A valid, V invalid. */
2020 sm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
2021 for (i = 0; i < 65536; i++)
2022 sm->vbyte[i] = VGM_BYTE_INVALID;
2023 for (i = 0; i < 8192; i++)
2024 sm->abits[i] = VGM_BYTE_VALID;
2025
2026 /* Set A valid, V valid. */
2027 sm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
2028 for (i = 0; i < 65536; i++)
2029 sm->vbyte[i] = VGM_BYTE_VALID;
2030 for (i = 0; i < 8192; i++)
2031 sm->abits[i] = VGM_BYTE_VALID;
2032
2033 /* Set up the primary map. */
2034 /* These entries gradually get overwritten as the used address
2035 space expands. */
2036 for (i = 0; i < N_PRIMARY_MAP; i++)
2037 primary_map[i] = &sm_distinguished[SM_DIST_NOACCESS];
2038
2039 /* auxmap_size = auxmap_used = 0;
2040 no ... these are statically initialised */
2041}
2042
2043
2044/*------------------------------------------------------------*/
2045/*--- Sanity check machinery (permanently engaged) ---*/
2046/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00002047
njn51d827b2005-05-09 01:02:08 +00002048static Bool mc_cheap_sanity_check ( void )
njn25e49d8e72002-09-23 09:36:25 +00002049{
jseward9800fd32004-01-04 23:08:04 +00002050 /* nothing useful we can rapidly check */
sewardj23eb2fd2005-04-22 16:29:19 +00002051 n_sanity_cheap++;
sewardjc1a2cda2005-04-21 17:34:00 +00002052 PROF_EVENT(490, "cheap_sanity_check");
jseward9800fd32004-01-04 23:08:04 +00002053 return True;
njn25e49d8e72002-09-23 09:36:25 +00002054}
2055
njn51d827b2005-05-09 01:02:08 +00002056static Bool mc_expensive_sanity_check ( void )
njn25e49d8e72002-09-23 09:36:25 +00002057{
sewardj23eb2fd2005-04-22 16:29:19 +00002058 Int i, n_secmaps_found;
sewardj45d94cc2005-04-20 14:44:11 +00002059 SecMap* sm;
sewardj23eb2fd2005-04-22 16:29:19 +00002060 Bool bad = False;
njn25e49d8e72002-09-23 09:36:25 +00002061
sewardj23eb2fd2005-04-22 16:29:19 +00002062 n_sanity_expensive++;
sewardjc1a2cda2005-04-21 17:34:00 +00002063 PROF_EVENT(491, "expensive_sanity_check");
2064
sewardj23eb2fd2005-04-22 16:29:19 +00002065 /* Check that the 3 distinguished SMs are still as they should
2066 be. */
njn25e49d8e72002-09-23 09:36:25 +00002067
sewardj45d94cc2005-04-20 14:44:11 +00002068 /* Check A invalid, V invalid. */
2069 sm = &sm_distinguished[SM_DIST_NOACCESS];
njn25e49d8e72002-09-23 09:36:25 +00002070 for (i = 0; i < 65536; i++)
sewardj45d94cc2005-04-20 14:44:11 +00002071 if (!(sm->vbyte[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002072 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002073 for (i = 0; i < 8192; i++)
2074 if (!(sm->abits[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002075 bad = True;
njn25e49d8e72002-09-23 09:36:25 +00002076
sewardj45d94cc2005-04-20 14:44:11 +00002077 /* Check A valid, V invalid. */
2078 sm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
2079 for (i = 0; i < 65536; i++)
2080 if (!(sm->vbyte[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002081 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002082 for (i = 0; i < 8192; i++)
2083 if (!(sm->abits[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002084 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002085
2086 /* Check A valid, V valid. */
2087 sm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
2088 for (i = 0; i < 65536; i++)
2089 if (!(sm->vbyte[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002090 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002091 for (i = 0; i < 8192; i++)
2092 if (!(sm->abits[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002093 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002094
sewardj23eb2fd2005-04-22 16:29:19 +00002095 if (bad) {
2096 VG_(printf)("memcheck expensive sanity: "
2097 "distinguished_secondaries have changed\n");
2098 return False;
2099 }
2100
2101 /* check nonsensical auxmap sizing */
sewardj45d94cc2005-04-20 14:44:11 +00002102 if (auxmap_used > auxmap_size)
sewardj23eb2fd2005-04-22 16:29:19 +00002103 bad = True;
2104
2105 if (bad) {
2106 VG_(printf)("memcheck expensive sanity: "
2107 "nonsensical auxmap sizing\n");
2108 return False;
2109 }
2110
2111 /* check that the number of secmaps issued matches the number that
2112 are reachable (iow, no secmap leaks) */
2113 n_secmaps_found = 0;
2114 for (i = 0; i < N_PRIMARY_MAP; i++) {
2115 if (primary_map[i] == NULL) {
2116 bad = True;
2117 } else {
2118 if (!is_distinguished_sm(primary_map[i]))
2119 n_secmaps_found++;
2120 }
2121 }
2122
2123 for (i = 0; i < auxmap_used; i++) {
2124 if (auxmap[i].sm == NULL) {
2125 bad = True;
2126 } else {
2127 if (!is_distinguished_sm(auxmap[i].sm))
2128 n_secmaps_found++;
2129 }
2130 }
2131
2132 if (n_secmaps_found != n_secmaps_issued)
2133 bad = True;
2134
2135 if (bad) {
2136 VG_(printf)("memcheck expensive sanity: "
2137 "apparent secmap leakage\n");
2138 return False;
2139 }
2140
2141 /* check that auxmap only covers address space that the primary
2142 doesn't */
2143
2144 for (i = 0; i < auxmap_used; i++)
2145 if (auxmap[i].base <= MAX_PRIMARY_ADDRESS)
2146 bad = True;
2147
2148 if (bad) {
2149 VG_(printf)("memcheck expensive sanity: "
2150 "auxmap covers wrong address space\n");
2151 return False;
2152 }
2153
2154 /* there is only one pointer to each secmap (expensive) */
njn25e49d8e72002-09-23 09:36:25 +00002155
2156 return True;
2157}
sewardj45d94cc2005-04-20 14:44:11 +00002158
njn25e49d8e72002-09-23 09:36:25 +00002159
njn25e49d8e72002-09-23 09:36:25 +00002160/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00002161/*--- Command line args ---*/
njn25e49d8e72002-09-23 09:36:25 +00002162/*------------------------------------------------------------*/
2163
njn51d827b2005-05-09 01:02:08 +00002164Bool MC_(clo_avoid_strlen_errors) = True;
njn43c799e2003-04-08 00:08:52 +00002165
njn51d827b2005-05-09 01:02:08 +00002166static Bool mc_process_cmd_line_option(Char* arg)
njn25e49d8e72002-09-23 09:36:25 +00002167{
njn45270a22005-03-27 01:00:11 +00002168 VG_BOOL_CLO(arg, "--avoid-strlen-errors", MC_(clo_avoid_strlen_errors))
njn25e49d8e72002-09-23 09:36:25 +00002169 else
njn43c799e2003-04-08 00:08:52 +00002170 return MAC_(process_common_cmd_line_option)(arg);
njn25e49d8e72002-09-23 09:36:25 +00002171
2172 return True;
njn25e49d8e72002-09-23 09:36:25 +00002173}
2174
njn51d827b2005-05-09 01:02:08 +00002175static void mc_print_usage(void)
njn25e49d8e72002-09-23 09:36:25 +00002176{
njn3e884182003-04-15 13:03:23 +00002177 MAC_(print_common_usage)();
2178 VG_(printf)(
2179" --avoid-strlen-errors=no|yes suppress errs from inlined strlen [yes]\n"
2180 );
2181}
2182
njn51d827b2005-05-09 01:02:08 +00002183static void mc_print_debug_usage(void)
njn3e884182003-04-15 13:03:23 +00002184{
2185 MAC_(print_common_debug_usage)();
2186 VG_(printf)(
sewardj8ec2cfc2002-10-13 00:57:26 +00002187" --cleanup=no|yes improve after instrumentation? [yes]\n"
njn3e884182003-04-15 13:03:23 +00002188 );
njn25e49d8e72002-09-23 09:36:25 +00002189}
2190
nethercote8b76fe52004-11-08 19:20:09 +00002191/*------------------------------------------------------------*/
2192/*--- Client requests ---*/
2193/*------------------------------------------------------------*/
2194
2195/* Client block management:
2196
2197 This is managed as an expanding array of client block descriptors.
2198 Indices of live descriptors are issued to the client, so it can ask
2199 to free them later. Therefore we cannot slide live entries down
2200 over dead ones. Instead we must use free/inuse flags and scan for
2201 an empty slot at allocation time. This in turn means allocation is
2202 relatively expensive, so we hope this does not happen too often.
nethercote8b76fe52004-11-08 19:20:09 +00002203
sewardjedc75ab2005-03-15 23:30:32 +00002204 An unused block has start == size == 0
2205*/
nethercote8b76fe52004-11-08 19:20:09 +00002206
2207typedef
2208 struct {
2209 Addr start;
2210 SizeT size;
2211 ExeContext* where;
sewardjedc75ab2005-03-15 23:30:32 +00002212 Char* desc;
nethercote8b76fe52004-11-08 19:20:09 +00002213 }
2214 CGenBlock;
2215
2216/* This subsystem is self-initialising. */
njn695c16e2005-03-27 03:40:28 +00002217static UInt cgb_size = 0;
2218static UInt cgb_used = 0;
2219static CGenBlock* cgbs = NULL;
nethercote8b76fe52004-11-08 19:20:09 +00002220
2221/* Stats for this subsystem. */
njn695c16e2005-03-27 03:40:28 +00002222static UInt cgb_used_MAX = 0; /* Max in use. */
2223static UInt cgb_allocs = 0; /* Number of allocs. */
2224static UInt cgb_discards = 0; /* Number of discards. */
2225static UInt cgb_search = 0; /* Number of searches. */
nethercote8b76fe52004-11-08 19:20:09 +00002226
2227
2228static
njn695c16e2005-03-27 03:40:28 +00002229Int alloc_client_block ( void )
nethercote8b76fe52004-11-08 19:20:09 +00002230{
2231 UInt i, sz_new;
2232 CGenBlock* cgbs_new;
2233
njn695c16e2005-03-27 03:40:28 +00002234 cgb_allocs++;
nethercote8b76fe52004-11-08 19:20:09 +00002235
njn695c16e2005-03-27 03:40:28 +00002236 for (i = 0; i < cgb_used; i++) {
2237 cgb_search++;
2238 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00002239 return i;
2240 }
2241
2242 /* Not found. Try to allocate one at the end. */
njn695c16e2005-03-27 03:40:28 +00002243 if (cgb_used < cgb_size) {
2244 cgb_used++;
2245 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00002246 }
2247
2248 /* Ok, we have to allocate a new one. */
njn695c16e2005-03-27 03:40:28 +00002249 tl_assert(cgb_used == cgb_size);
2250 sz_new = (cgbs == NULL) ? 10 : (2 * cgb_size);
nethercote8b76fe52004-11-08 19:20:09 +00002251
2252 cgbs_new = VG_(malloc)( sz_new * sizeof(CGenBlock) );
njn695c16e2005-03-27 03:40:28 +00002253 for (i = 0; i < cgb_used; i++)
2254 cgbs_new[i] = cgbs[i];
nethercote8b76fe52004-11-08 19:20:09 +00002255
njn695c16e2005-03-27 03:40:28 +00002256 if (cgbs != NULL)
2257 VG_(free)( cgbs );
2258 cgbs = cgbs_new;
nethercote8b76fe52004-11-08 19:20:09 +00002259
njn695c16e2005-03-27 03:40:28 +00002260 cgb_size = sz_new;
2261 cgb_used++;
2262 if (cgb_used > cgb_used_MAX)
2263 cgb_used_MAX = cgb_used;
2264 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00002265}
2266
2267
2268static void show_client_block_stats ( void )
2269{
2270 VG_(message)(Vg_DebugMsg,
2271 "general CBs: %d allocs, %d discards, %d maxinuse, %d search",
njn695c16e2005-03-27 03:40:28 +00002272 cgb_allocs, cgb_discards, cgb_used_MAX, cgb_search
nethercote8b76fe52004-11-08 19:20:09 +00002273 );
2274}
2275
2276static Bool find_addr(VgHashNode* sh_ch, void* ap)
2277{
2278 MAC_Chunk *m = (MAC_Chunk*)sh_ch;
2279 Addr a = *(Addr*)ap;
2280
njn717cde52005-05-10 02:47:21 +00002281 return VG_(addr_is_in_block)(a, m->data, m->size, MAC_MALLOC_REDZONE_SZB);
nethercote8b76fe52004-11-08 19:20:09 +00002282}
2283
2284static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai )
2285{
2286 UInt i;
2287 /* VG_(printf)("try to identify %d\n", a); */
2288
2289 /* Perhaps it's a general block ? */
njn695c16e2005-03-27 03:40:28 +00002290 for (i = 0; i < cgb_used; i++) {
2291 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00002292 continue;
njn717cde52005-05-10 02:47:21 +00002293 // Use zero as the redzone for client blocks.
2294 if (VG_(addr_is_in_block)(a, cgbs[i].start, cgbs[i].size, 0)) {
nethercote8b76fe52004-11-08 19:20:09 +00002295 MAC_Mempool **d, *mp;
2296
2297 /* OK - maybe it's a mempool, too? */
2298 mp = (MAC_Mempool*)VG_(HT_get_node)(MAC_(mempool_list),
njn695c16e2005-03-27 03:40:28 +00002299 (UWord)cgbs[i].start,
nethercote8b76fe52004-11-08 19:20:09 +00002300 (void*)&d);
2301 if(mp != NULL) {
2302 if(mp->chunks != NULL) {
2303 MAC_Chunk *mc;
2304
2305 mc = (MAC_Chunk*)VG_(HT_first_match)(mp->chunks, find_addr, &a);
2306 if(mc != NULL) {
2307 ai->akind = UserG;
2308 ai->blksize = mc->size;
2309 ai->rwoffset = (Int)(a) - (Int)mc->data;
2310 ai->lastchange = mc->where;
2311 return True;
2312 }
2313 }
2314 ai->akind = Mempool;
njn695c16e2005-03-27 03:40:28 +00002315 ai->blksize = cgbs[i].size;
2316 ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
2317 ai->lastchange = cgbs[i].where;
nethercote8b76fe52004-11-08 19:20:09 +00002318 return True;
2319 }
2320 ai->akind = UserG;
njn695c16e2005-03-27 03:40:28 +00002321 ai->blksize = cgbs[i].size;
2322 ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
2323 ai->lastchange = cgbs[i].where;
2324 ai->desc = cgbs[i].desc;
nethercote8b76fe52004-11-08 19:20:09 +00002325 return True;
2326 }
2327 }
2328 return False;
2329}
2330
njn51d827b2005-05-09 01:02:08 +00002331static Bool mc_handle_client_request ( ThreadId tid, UWord* arg, UWord* ret )
nethercote8b76fe52004-11-08 19:20:09 +00002332{
2333 Int i;
2334 Bool ok;
2335 Addr bad_addr;
2336
njnfc26ff92004-11-22 19:12:49 +00002337 if (!VG_IS_TOOL_USERREQ('M','C',arg[0])
nethercote8b76fe52004-11-08 19:20:09 +00002338 && VG_USERREQ__MALLOCLIKE_BLOCK != arg[0]
2339 && VG_USERREQ__FREELIKE_BLOCK != arg[0]
2340 && VG_USERREQ__CREATE_MEMPOOL != arg[0]
2341 && VG_USERREQ__DESTROY_MEMPOOL != arg[0]
2342 && VG_USERREQ__MEMPOOL_ALLOC != arg[0]
2343 && VG_USERREQ__MEMPOOL_FREE != arg[0])
2344 return False;
2345
2346 switch (arg[0]) {
2347 case VG_USERREQ__CHECK_WRITABLE: /* check writable */
2348 ok = mc_check_writable ( arg[1], arg[2], &bad_addr );
2349 if (!ok)
njn9e63cb62005-05-08 18:34:59 +00002350 mc_record_user_error ( tid, bad_addr, /*isWrite*/True,
2351 /*isUnaddr*/True );
nethercote8b76fe52004-11-08 19:20:09 +00002352 *ret = ok ? (UWord)NULL : bad_addr;
2353 break;
2354
2355 case VG_USERREQ__CHECK_READABLE: { /* check readable */
2356 MC_ReadResult res;
2357 res = mc_check_readable ( arg[1], arg[2], &bad_addr );
2358 if (MC_AddrErr == res)
njn9e63cb62005-05-08 18:34:59 +00002359 mc_record_user_error ( tid, bad_addr, /*isWrite*/False,
2360 /*isUnaddr*/True );
nethercote8b76fe52004-11-08 19:20:09 +00002361 else if (MC_ValueErr == res)
njn9e63cb62005-05-08 18:34:59 +00002362 mc_record_user_error ( tid, bad_addr, /*isWrite*/False,
2363 /*isUnaddr*/False );
nethercote8b76fe52004-11-08 19:20:09 +00002364 *ret = ( res==MC_Ok ? (UWord)NULL : bad_addr );
2365 break;
2366 }
2367
2368 case VG_USERREQ__DO_LEAK_CHECK:
njnb8dca862005-03-14 02:42:44 +00002369 mc_detect_memory_leaks(tid, arg[1] ? LC_Summary : LC_Full);
nethercote8b76fe52004-11-08 19:20:09 +00002370 *ret = 0; /* return value is meaningless */
2371 break;
2372
2373 case VG_USERREQ__MAKE_NOACCESS: /* make no access */
nethercote8b76fe52004-11-08 19:20:09 +00002374 mc_make_noaccess ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00002375 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00002376 break;
2377
2378 case VG_USERREQ__MAKE_WRITABLE: /* make writable */
nethercote8b76fe52004-11-08 19:20:09 +00002379 mc_make_writable ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00002380 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00002381 break;
2382
2383 case VG_USERREQ__MAKE_READABLE: /* make readable */
nethercote8b76fe52004-11-08 19:20:09 +00002384 mc_make_readable ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00002385 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00002386 break;
2387
sewardjedc75ab2005-03-15 23:30:32 +00002388 case VG_USERREQ__CREATE_BLOCK: /* describe a block */
2389 if (arg[1] != 0 && arg[2] != 0) {
njn695c16e2005-03-27 03:40:28 +00002390 i = alloc_client_block();
2391 /* VG_(printf)("allocated %d %p\n", i, cgbs); */
2392 cgbs[i].start = arg[1];
2393 cgbs[i].size = arg[2];
2394 cgbs[i].desc = VG_(strdup)((Char *)arg[3]);
2395 cgbs[i].where = VG_(record_ExeContext) ( tid );
sewardjedc75ab2005-03-15 23:30:32 +00002396
2397 *ret = i;
2398 } else
2399 *ret = -1;
2400 break;
2401
nethercote8b76fe52004-11-08 19:20:09 +00002402 case VG_USERREQ__DISCARD: /* discard */
njn695c16e2005-03-27 03:40:28 +00002403 if (cgbs == NULL
2404 || arg[2] >= cgb_used ||
2405 (cgbs[arg[2]].start == 0 && cgbs[arg[2]].size == 0)) {
sewardjedc75ab2005-03-15 23:30:32 +00002406 *ret = 1;
2407 } else {
njn695c16e2005-03-27 03:40:28 +00002408 tl_assert(arg[2] >= 0 && arg[2] < cgb_used);
2409 cgbs[arg[2]].start = cgbs[arg[2]].size = 0;
2410 VG_(free)(cgbs[arg[2]].desc);
2411 cgb_discards++;
sewardjedc75ab2005-03-15 23:30:32 +00002412 *ret = 0;
2413 }
nethercote8b76fe52004-11-08 19:20:09 +00002414 break;
2415
sewardj45d94cc2005-04-20 14:44:11 +00002416//zz case VG_USERREQ__GET_VBITS:
2417//zz /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
2418//zz error. */
2419//zz /* VG_(printf)("get_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
2420//zz *ret = mc_get_or_set_vbits_for_client
2421//zz ( tid, arg[1], arg[2], arg[3], False /* get them */ );
2422//zz break;
2423//zz
2424//zz case VG_USERREQ__SET_VBITS:
2425//zz /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
2426//zz error. */
2427//zz /* VG_(printf)("set_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
2428//zz *ret = mc_get_or_set_vbits_for_client
2429//zz ( tid, arg[1], arg[2], arg[3], True /* set them */ );
2430//zz break;
nethercote8b76fe52004-11-08 19:20:09 +00002431
2432 default:
2433 if (MAC_(handle_common_client_requests)(tid, arg, ret )) {
2434 return True;
2435 } else {
2436 VG_(message)(Vg_UserMsg,
2437 "Warning: unknown memcheck client request code %llx",
2438 (ULong)arg[0]);
2439 return False;
2440 }
2441 }
2442 return True;
2443}
njn25e49d8e72002-09-23 09:36:25 +00002444
2445/*------------------------------------------------------------*/
njn51d827b2005-05-09 01:02:08 +00002446/*--- Setup and finalisation ---*/
njn25e49d8e72002-09-23 09:36:25 +00002447/*------------------------------------------------------------*/
2448
njn51d827b2005-05-09 01:02:08 +00002449static void mc_post_clo_init ( void )
njn5c004e42002-11-18 11:04:50 +00002450{
sewardj71bc3cb2005-05-19 00:25:45 +00002451 /* If we've been asked to emit XML, mash around various other
2452 options so as to constrain the output somewhat. */
2453 if (VG_(clo_xml)) {
2454 /* Extract as much info as possible from the leak checker. */
sewardj09890d82005-05-20 02:45:15 +00002455 /* MAC_(clo_show_reachable) = True; */
sewardj71bc3cb2005-05-19 00:25:45 +00002456 MAC_(clo_leak_check) = LC_Full;
2457 }
njn5c004e42002-11-18 11:04:50 +00002458}
2459
njn51d827b2005-05-09 01:02:08 +00002460static void mc_fini ( Int exitcode )
njn5c004e42002-11-18 11:04:50 +00002461{
nethercote8b76fe52004-11-08 19:20:09 +00002462 MAC_(common_fini)( mc_detect_memory_leaks );
sewardj45d94cc2005-04-20 14:44:11 +00002463
sewardj23eb2fd2005-04-22 16:29:19 +00002464 Int i, n_accessible_dist;
2465 SecMap* sm;
2466
sewardj45d94cc2005-04-20 14:44:11 +00002467 if (VG_(clo_verbosity) > 1) {
2468 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00002469 " memcheck: sanity checks: %d cheap, %d expensive",
2470 n_sanity_cheap, n_sanity_expensive );
sewardj45d94cc2005-04-20 14:44:11 +00002471 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00002472 " memcheck: auxmaps: %d auxmap entries (%dk, %dM) in use",
2473 auxmap_used,
2474 auxmap_used * 64,
2475 auxmap_used / 16 );
2476 VG_(message)(Vg_DebugMsg,
2477 " memcheck: auxmaps: %lld searches, %lld comparisons",
sewardj45d94cc2005-04-20 14:44:11 +00002478 n_auxmap_searches, n_auxmap_cmps );
sewardj23eb2fd2005-04-22 16:29:19 +00002479 VG_(message)(Vg_DebugMsg,
2480 " memcheck: secondaries: %d issued (%dk, %dM)",
2481 n_secmaps_issued,
2482 n_secmaps_issued * 64,
2483 n_secmaps_issued / 16 );
2484
2485 n_accessible_dist = 0;
2486 for (i = 0; i < N_PRIMARY_MAP; i++) {
2487 sm = primary_map[i];
2488 if (is_distinguished_sm(sm)
2489 && sm != &sm_distinguished[SM_DIST_NOACCESS])
2490 n_accessible_dist ++;
2491 }
2492 for (i = 0; i < auxmap_used; i++) {
2493 sm = auxmap[i].sm;
2494 if (is_distinguished_sm(sm)
2495 && sm != &sm_distinguished[SM_DIST_NOACCESS])
2496 n_accessible_dist ++;
2497 }
2498
2499 VG_(message)(Vg_DebugMsg,
2500 " memcheck: secondaries: %d accessible and distinguished (%dk, %dM)",
2501 n_accessible_dist,
2502 n_accessible_dist * 64,
2503 n_accessible_dist / 16 );
2504
sewardj45d94cc2005-04-20 14:44:11 +00002505 }
2506
njn5c004e42002-11-18 11:04:50 +00002507 if (0) {
2508 VG_(message)(Vg_DebugMsg,
2509 "------ Valgrind's client block stats follow ---------------" );
nethercote8b76fe52004-11-08 19:20:09 +00002510 show_client_block_stats();
njn5c004e42002-11-18 11:04:50 +00002511 }
njn25e49d8e72002-09-23 09:36:25 +00002512}
2513
njn51d827b2005-05-09 01:02:08 +00002514static void mc_pre_clo_init(void)
2515{
2516 VG_(details_name) ("Memcheck");
2517 VG_(details_version) (NULL);
2518 VG_(details_description) ("a memory error detector");
2519 VG_(details_copyright_author)(
2520 "Copyright (C) 2002-2005, and GNU GPL'd, by Julian Seward et al.");
2521 VG_(details_bug_reports_to) (VG_BUGS_TO);
2522 VG_(details_avg_translation_sizeB) ( 370 );
2523
2524 VG_(basic_tool_funcs) (mc_post_clo_init,
2525 MC_(instrument),
2526 mc_fini);
2527
2528 VG_(needs_core_errors) ();
2529 VG_(needs_tool_errors) (MAC_(eq_Error),
2530 mc_pp_Error,
2531 MAC_(update_extra),
2532 mc_recognised_suppression,
2533 MAC_(read_extra_suppression_info),
2534 MAC_(error_matches_suppression),
2535 MAC_(get_error_name),
2536 MAC_(print_extra_suppression_info));
2537 VG_(needs_libc_freeres) ();
2538 VG_(needs_command_line_options)(mc_process_cmd_line_option,
2539 mc_print_usage,
2540 mc_print_debug_usage);
2541 VG_(needs_client_requests) (mc_handle_client_request);
2542 VG_(needs_sanity_checks) (mc_cheap_sanity_check,
2543 mc_expensive_sanity_check);
2544 VG_(needs_shadow_memory) ();
2545
2546 VG_(malloc_funcs) (MAC_(malloc),
2547 MAC_(__builtin_new),
2548 MAC_(__builtin_vec_new),
2549 MAC_(memalign),
2550 MAC_(calloc),
2551 MAC_(free),
2552 MAC_(__builtin_delete),
2553 MAC_(__builtin_vec_delete),
2554 MAC_(realloc),
2555 MAC_MALLOC_REDZONE_SZB );
2556
2557 MAC_( new_mem_heap) = & mc_new_mem_heap;
2558 MAC_( ban_mem_heap) = & mc_make_noaccess;
2559 MAC_(copy_mem_heap) = & mc_copy_address_range_state;
2560 MAC_( die_mem_heap) = & mc_make_noaccess;
2561 MAC_(check_noaccess) = & mc_check_noaccess;
2562
2563 VG_(track_new_mem_startup) ( & mc_new_mem_startup );
2564 VG_(track_new_mem_stack_signal)( & mc_make_writable );
2565 VG_(track_new_mem_brk) ( & mc_make_writable );
2566 VG_(track_new_mem_mmap) ( & mc_new_mem_mmap );
2567
2568 VG_(track_copy_mem_remap) ( & mc_copy_address_range_state );
2569
2570 VG_(track_die_mem_stack_signal)( & mc_make_noaccess );
2571 VG_(track_die_mem_brk) ( & mc_make_noaccess );
2572 VG_(track_die_mem_munmap) ( & mc_make_noaccess );
2573
2574 VG_(track_new_mem_stack_4) ( & MAC_(new_mem_stack_4) );
2575 VG_(track_new_mem_stack_8) ( & MAC_(new_mem_stack_8) );
2576 VG_(track_new_mem_stack_12) ( & MAC_(new_mem_stack_12) );
2577 VG_(track_new_mem_stack_16) ( & MAC_(new_mem_stack_16) );
2578 VG_(track_new_mem_stack_32) ( & MAC_(new_mem_stack_32) );
2579 VG_(track_new_mem_stack) ( & MAC_(new_mem_stack) );
2580
2581 VG_(track_die_mem_stack_4) ( & MAC_(die_mem_stack_4) );
2582 VG_(track_die_mem_stack_8) ( & MAC_(die_mem_stack_8) );
2583 VG_(track_die_mem_stack_12) ( & MAC_(die_mem_stack_12) );
2584 VG_(track_die_mem_stack_16) ( & MAC_(die_mem_stack_16) );
2585 VG_(track_die_mem_stack_32) ( & MAC_(die_mem_stack_32) );
2586 VG_(track_die_mem_stack) ( & MAC_(die_mem_stack) );
2587
2588 VG_(track_ban_mem_stack) ( & mc_make_noaccess );
2589
2590 VG_(track_pre_mem_read) ( & mc_check_is_readable );
2591 VG_(track_pre_mem_read_asciiz) ( & mc_check_is_readable_asciiz );
2592 VG_(track_pre_mem_write) ( & mc_check_is_writable );
2593 VG_(track_post_mem_write) ( & mc_post_mem_write );
2594
2595 VG_(track_pre_reg_read) ( & mc_pre_reg_read );
2596
2597 VG_(track_post_reg_write) ( & mc_post_reg_write );
2598 VG_(track_post_reg_write_clientcall_return)( & mc_post_reg_write_clientcall );
2599
2600 VG_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
2601 VG_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
2602 VG_(register_profile_event) ( VgpESPAdj, "adjust-ESP" );
2603
2604 /* Additional block description for VG_(describe_addr)() */
2605 MAC_(describe_addr_supp) = client_perm_maybe_describe;
2606
2607 init_shadow_memory();
2608 MAC_(common_pre_clo_init)();
2609
2610 tl_assert( mc_expensive_sanity_check() );
2611}
2612
2613VG_DETERMINE_INTERFACE_VERSION(mc_pre_clo_init, 9./8)
fitzhardinge98abfc72003-12-16 02:05:15 +00002614
njn25e49d8e72002-09-23 09:36:25 +00002615/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00002616/*--- end mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00002617/*--------------------------------------------------------------------*/