blob: a2e2e407d5689560a5957e3ec027267acae33721 [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
nethercote137bc552003-11-14 17:47:54 +00009 This file is part of MemCheck, a heavyweight Valgrind tool for
njnc9539842002-10-02 13:26:35 +000010 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
njn53612422005-03-12 16:22:54 +000012 Copyright (C) 2000-2005 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
sewardjc859fbf2005-04-22 21:10:28 +000033/* TODO 22 Apr 05
sewardj45d94cc2005-04-20 14:44:11 +000034
sewardjc859fbf2005-04-22 21:10:28 +000035 test whether it would be faster, for LOADV4, to check
36 only for 8-byte validity on the fast path
sewardj45d94cc2005-04-20 14:44:11 +000037*/
38
njnc7561b92005-06-19 01:24:32 +000039#include "pub_tool_basics.h"
njn4802b382005-06-11 04:58:29 +000040#include "pub_tool_aspacemgr.h"
njnc7561b92005-06-19 01:24:32 +000041#include "pub_tool_errormgr.h" // For mac_shared.h
42#include "pub_tool_execontext.h" // For mac_shared.h
43#include "pub_tool_hashtable.h" // For mac_shared.h
njn97405b22005-06-02 03:39:33 +000044#include "pub_tool_libcbase.h"
njn132bfcc2005-06-04 19:16:06 +000045#include "pub_tool_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000046#include "pub_tool_libcprint.h"
njnf536bbb2005-06-13 04:21:38 +000047#include "pub_tool_machine.h"
njnc7561b92005-06-19 01:24:32 +000048#include "pub_tool_mallocfree.h"
49#include "pub_tool_options.h"
50#include "pub_tool_profile.h" // For mac_shared.h
51#include "pub_tool_replacemalloc.h"
52#include "pub_tool_tooliface.h"
53#include "pub_tool_threadstate.h"
54
55#include "mc_include.h"
56#include "memcheck.h" /* for client requests */
njn25e49d8e72002-09-23 09:36:25 +000057
sewardj45d94cc2005-04-20 14:44:11 +000058
sewardjc1a2cda2005-04-21 17:34:00 +000059#define EXPECTED_TAKEN(cond) __builtin_expect((cond),1)
60#define EXPECTED_NOT_TAKEN(cond) __builtin_expect((cond),0)
61
62/* Define to debug the mem audit system. Set to:
63 0 no debugging, fast cases are used
64 1 some sanity checking, fast cases are used
65 2 max sanity checking, only slow cases are used
66*/
sewardj23eb2fd2005-04-22 16:29:19 +000067#define VG_DEBUG_MEMORY 0
sewardjc1a2cda2005-04-21 17:34:00 +000068
njn25e49d8e72002-09-23 09:36:25 +000069#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
70
njn25e49d8e72002-09-23 09:36:25 +000071
njn25e49d8e72002-09-23 09:36:25 +000072/*------------------------------------------------------------*/
sewardj45d94cc2005-04-20 14:44:11 +000073/*--- Basic A/V bitmap representation. ---*/
njn25e49d8e72002-09-23 09:36:25 +000074/*------------------------------------------------------------*/
75
sewardjc859fbf2005-04-22 21:10:28 +000076/* TODO: fix this comment */
77//zz /* All reads and writes are checked against a memory map, which
78//zz records the state of all memory in the process. The memory map is
79//zz organised like this:
80//zz
81//zz The top 16 bits of an address are used to index into a top-level
82//zz map table, containing 65536 entries. Each entry is a pointer to a
83//zz second-level map, which records the accesibililty and validity
84//zz permissions for the 65536 bytes indexed by the lower 16 bits of the
85//zz address. Each byte is represented by nine bits, one indicating
86//zz accessibility, the other eight validity. So each second-level map
87//zz contains 73728 bytes. This two-level arrangement conveniently
88//zz divides the 4G address space into 64k lumps, each size 64k bytes.
89//zz
90//zz All entries in the primary (top-level) map must point to a valid
91//zz secondary (second-level) map. Since most of the 4G of address
92//zz space will not be in use -- ie, not mapped at all -- there is a
njn02bc4b82005-05-15 17:28:26 +000093//zz distinguished secondary map, which indicates 'not addressible and
sewardjc859fbf2005-04-22 21:10:28 +000094//zz not valid' writeable for all bytes. Entries in the primary map for
95//zz which the entire 64k is not in use at all point at this
96//zz distinguished map.
97//zz
98//zz There are actually 4 distinguished secondaries. These are used to
99//zz represent a memory range which is either not addressable (validity
100//zz doesn't matter), addressable+not valid, addressable+valid.
101//zz
102//zz [...] lots of stuff deleted due to out of date-ness
103//zz
104//zz As a final optimisation, the alignment and address checks for
105//zz 4-byte loads and stores are combined in a neat way. The primary
106//zz map is extended to have 262144 entries (2^18), rather than 2^16.
107//zz The top 3/4 of these entries are permanently set to the
108//zz distinguished secondary map. For a 4-byte load/store, the
109//zz top-level map is indexed not with (addr >> 16) but instead f(addr),
110//zz where
111//zz
112//zz f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
113//zz = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
114//zz = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
115//zz
116//zz ie the lowest two bits are placed above the 16 high address bits.
117//zz If either of these two bits are nonzero, the address is misaligned;
118//zz this will select a secondary map from the upper 3/4 of the primary
119//zz map. Because this is always the distinguished secondary map, a
120//zz (bogus) address check failure will result. The failure handling
121//zz code can then figure out whether this is a genuine addr check
122//zz failure or whether it is a possibly-legitimate access at a
123//zz misaligned address.
124//zz */
125
sewardj45d94cc2005-04-20 14:44:11 +0000126/* --------------- Basic configuration --------------- */
sewardj95448072004-11-22 20:19:51 +0000127
sewardj23eb2fd2005-04-22 16:29:19 +0000128/* Only change this. N_PRIMARY_MAP *must* be a power of 2. */
sewardj21f7ff42005-04-28 10:32:02 +0000129
sewardje4ccc012005-05-02 12:53:38 +0000130#if VG_WORDSIZE == 4
sewardj21f7ff42005-04-28 10:32:02 +0000131
132/* cover the entire address space */
133# define N_PRIMARY_BITS 16
134
135#else
136
137/* Just handle the first 16G fast and the rest via auxiliary
138 primaries. */
139# define N_PRIMARY_BITS 18
140
141#endif
142
sewardj45d94cc2005-04-20 14:44:11 +0000143
sewardjc1a2cda2005-04-21 17:34:00 +0000144/* Do not change this. */
sewardje4ccc012005-05-02 12:53:38 +0000145#define N_PRIMARY_MAP ( ((UWord)1) << N_PRIMARY_BITS)
sewardjc1a2cda2005-04-21 17:34:00 +0000146
147/* Do not change this. */
sewardj23eb2fd2005-04-22 16:29:19 +0000148#define MAX_PRIMARY_ADDRESS (Addr)((((Addr)65536) * N_PRIMARY_MAP)-1)
149
150
151/* --------------- Stats maps --------------- */
152
153static Int n_secmaps_issued = 0;
154static ULong n_auxmap_searches = 0;
155static ULong n_auxmap_cmps = 0;
156static Int n_sanity_cheap = 0;
157static Int n_sanity_expensive = 0;
sewardj45d94cc2005-04-20 14:44:11 +0000158
159
160/* --------------- Secondary maps --------------- */
njn25e49d8e72002-09-23 09:36:25 +0000161
162typedef
163 struct {
sewardj45d94cc2005-04-20 14:44:11 +0000164 UChar abits[8192];
165 UChar vbyte[65536];
njn25e49d8e72002-09-23 09:36:25 +0000166 }
167 SecMap;
168
sewardj45d94cc2005-04-20 14:44:11 +0000169/* 3 distinguished secondary maps, one for no-access, one for
170 accessible but undefined, and one for accessible and defined.
171 Distinguished secondaries may never be modified.
172*/
173#define SM_DIST_NOACCESS 0
174#define SM_DIST_ACCESS_UNDEFINED 1
175#define SM_DIST_ACCESS_DEFINED 2
njnb8dca862005-03-14 02:42:44 +0000176
sewardj45d94cc2005-04-20 14:44:11 +0000177static SecMap sm_distinguished[3];
njnb8dca862005-03-14 02:42:44 +0000178
sewardj45d94cc2005-04-20 14:44:11 +0000179static inline Bool is_distinguished_sm ( SecMap* sm ) {
180 return sm >= &sm_distinguished[0] && sm <= &sm_distinguished[2];
181}
njnb8dca862005-03-14 02:42:44 +0000182
sewardj45d94cc2005-04-20 14:44:11 +0000183/* dist_sm points to one of our three distinguished secondaries. Make
184 a copy of it so that we can write to it.
185*/
186static SecMap* copy_for_writing ( SecMap* dist_sm )
187{
188 SecMap* new_sm;
189 tl_assert(dist_sm == &sm_distinguished[0]
190 || dist_sm == &sm_distinguished[1]
191 || dist_sm == &sm_distinguished[2]);
njnb8dca862005-03-14 02:42:44 +0000192
sewardj45d94cc2005-04-20 14:44:11 +0000193 new_sm = VG_(shadow_alloc)(sizeof(SecMap));
194 VG_(memcpy)(new_sm, dist_sm, sizeof(SecMap));
sewardj23eb2fd2005-04-22 16:29:19 +0000195 n_secmaps_issued++;
sewardj45d94cc2005-04-20 14:44:11 +0000196 return new_sm;
197}
njnb8dca862005-03-14 02:42:44 +0000198
sewardj45d94cc2005-04-20 14:44:11 +0000199
200/* --------------- Primary maps --------------- */
201
202/* The main primary map. This covers some initial part of the address
sewardj23eb2fd2005-04-22 16:29:19 +0000203 space, addresses 0 .. (N_PRIMARY_MAP << 16)-1. The rest of it is
sewardj45d94cc2005-04-20 14:44:11 +0000204 handled using the auxiliary primary map.
205*/
sewardj23eb2fd2005-04-22 16:29:19 +0000206static SecMap* primary_map[N_PRIMARY_MAP];
sewardj45d94cc2005-04-20 14:44:11 +0000207
208
209/* An entry in the auxiliary primary map. base must be a 64k-aligned
210 value, and sm points at the relevant secondary map. As with the
211 main primary map, the secondary may be either a real secondary, or
212 one of the three distinguished secondaries.
213*/
214typedef
215 struct {
sewardj23eb2fd2005-04-22 16:29:19 +0000216 Addr base;
sewardj45d94cc2005-04-20 14:44:11 +0000217 SecMap* sm;
218 }
219 AuxMapEnt;
220
221/* An expanding array of AuxMapEnts. */
sewardjaba741d2005-06-09 13:56:07 +0000222#define N_AUXMAPS 20000 /* HACK */
sewardj45d94cc2005-04-20 14:44:11 +0000223static AuxMapEnt hacky_auxmaps[N_AUXMAPS];
224static Int auxmap_size = N_AUXMAPS;
225static Int auxmap_used = 0;
226static AuxMapEnt* auxmap = &hacky_auxmaps[0];
227
sewardj45d94cc2005-04-20 14:44:11 +0000228
229/* Find an entry in the auxiliary map. If an entry is found, move it
230 one step closer to the front of the array, then return its address.
sewardj05fe85e2005-04-27 22:46:36 +0000231 If an entry is not found, return NULL. Note carefully that
sewardj45d94cc2005-04-20 14:44:11 +0000232 because a each call potentially rearranges the entries, each call
233 to this function invalidates ALL AuxMapEnt*s previously obtained by
234 calling this fn.
235*/
sewardj05fe85e2005-04-27 22:46:36 +0000236static AuxMapEnt* maybe_find_in_auxmap ( Addr a )
sewardj45d94cc2005-04-20 14:44:11 +0000237{
238 UWord i;
239 tl_assert(a > MAX_PRIMARY_ADDRESS);
240
241 a &= ~(Addr)0xFFFF;
242
243 /* Search .. */
244 n_auxmap_searches++;
245 for (i = 0; i < auxmap_used; i++) {
246 if (auxmap[i].base == a)
247 break;
248 }
249 n_auxmap_cmps += (ULong)(i+1);
250
251 if (i < auxmap_used) {
252 /* Found it. Nudge it a bit closer to the front. */
253 if (i > 0) {
254 AuxMapEnt tmp = auxmap[i-1];
255 auxmap[i-1] = auxmap[i];
256 auxmap[i] = tmp;
257 i--;
258 }
259 return &auxmap[i];
260 }
261
sewardj05fe85e2005-04-27 22:46:36 +0000262 return NULL;
263}
264
265
266/* Find an entry in the auxiliary map. If an entry is found, move it
267 one step closer to the front of the array, then return its address.
268 If an entry is not found, allocate one. Note carefully that
269 because a each call potentially rearranges the entries, each call
270 to this function invalidates ALL AuxMapEnt*s previously obtained by
271 calling this fn.
272*/
273static AuxMapEnt* find_or_alloc_in_auxmap ( Addr a )
274{
275 AuxMapEnt* am = maybe_find_in_auxmap(a);
276 if (am)
277 return am;
278
sewardj45d94cc2005-04-20 14:44:11 +0000279 /* We didn't find it. Hmm. This is a new piece of address space.
280 We'll need to allocate a new AuxMap entry for it. */
281 if (auxmap_used >= auxmap_size) {
282 tl_assert(auxmap_used == auxmap_size);
283 /* Out of auxmap entries. */
284 tl_assert2(0, "failed to expand the auxmap table");
285 }
286
287 tl_assert(auxmap_used < auxmap_size);
288
289 auxmap[auxmap_used].base = a & ~(Addr)0xFFFF;
290 auxmap[auxmap_used].sm = &sm_distinguished[SM_DIST_NOACCESS];
291
292 if (0)
293 VG_(printf)("new auxmap, base = 0x%llx\n",
294 (ULong)auxmap[auxmap_used].base );
295
296 auxmap_used++;
297 return &auxmap[auxmap_used-1];
298}
299
300
301/* --------------- SecMap fundamentals --------------- */
302
303/* Produce the secmap for 'a', either from the primary map or by
304 ensuring there is an entry for it in the aux primary map. The
305 secmap may be a distinguished one as the caller will only want to
306 be able to read it.
307*/
308static SecMap* get_secmap_readable ( Addr a )
309{
310 if (a <= MAX_PRIMARY_ADDRESS) {
311 UWord pm_off = a >> 16;
312 return primary_map[ pm_off ];
313 } else {
314 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
315 return am->sm;
316 }
317}
318
sewardj05fe85e2005-04-27 22:46:36 +0000319/* If 'a' has a SecMap, produce it. Else produce NULL. But don't
320 allocate one if one doesn't already exist. This is used by the
321 leak checker.
322*/
323static SecMap* maybe_get_secmap_for ( Addr a )
324{
325 if (a <= MAX_PRIMARY_ADDRESS) {
326 UWord pm_off = a >> 16;
327 return primary_map[ pm_off ];
328 } else {
329 AuxMapEnt* am = maybe_find_in_auxmap(a);
330 return am ? am->sm : NULL;
331 }
332}
333
334
335
sewardj45d94cc2005-04-20 14:44:11 +0000336/* Produce the secmap for 'a', either from the primary map or by
337 ensuring there is an entry for it in the aux primary map. The
338 secmap may not be a distinguished one, since the caller will want
339 to be able to write it. If it is a distinguished secondary, make a
340 writable copy of it, install it, and return the copy instead. (COW
341 semantics).
342*/
343static SecMap* get_secmap_writable ( Addr a )
344{
345 if (a <= MAX_PRIMARY_ADDRESS) {
346 UWord pm_off = a >> 16;
347 if (is_distinguished_sm(primary_map[ pm_off ]))
348 primary_map[pm_off] = copy_for_writing(primary_map[pm_off]);
349 return primary_map[pm_off];
350 } else {
351 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
352 if (is_distinguished_sm(am->sm))
353 am->sm = copy_for_writing(am->sm);
354 return am->sm;
355 }
356}
357
358
359/* --------------- Endianness helpers --------------- */
360
361/* Returns the offset in memory of the byteno-th most significant byte
362 in a wordszB-sized word, given the specified endianness. */
363static inline UWord byte_offset_w ( UWord wordszB, Bool bigendian,
364 UWord byteno ) {
365 return bigendian ? (wordszB-1-byteno) : byteno;
366}
367
368
369/* --------------- Fundamental functions --------------- */
370
371static
372void get_abit_and_vbyte ( /*OUT*/UWord* abit,
373 /*OUT*/UWord* vbyte,
374 Addr a )
375{
376 SecMap* sm = get_secmap_readable(a);
377 *vbyte = 0xFF & sm->vbyte[a & 0xFFFF];
378 *abit = read_bit_array(sm->abits, a & 0xFFFF);
379}
380
381static
382UWord get_abit ( Addr a )
383{
384 SecMap* sm = get_secmap_readable(a);
385 return read_bit_array(sm->abits, a & 0xFFFF);
386}
387
388static
389void set_abit_and_vbyte ( Addr a, UWord abit, UWord vbyte )
390{
391 SecMap* sm = get_secmap_writable(a);
392 sm->vbyte[a & 0xFFFF] = 0xFF & vbyte;
393 write_bit_array(sm->abits, a & 0xFFFF, abit);
394}
395
396static
397void set_vbyte ( Addr a, UWord vbyte )
398{
399 SecMap* sm = get_secmap_writable(a);
400 sm->vbyte[a & 0xFFFF] = 0xFF & vbyte;
401}
402
403
404/* --------------- Load/store slow cases. --------------- */
405
406static
407ULong mc_LOADVn_slow ( Addr a, SizeT szB, Bool bigendian )
408{
409 /* Make up a result V word, which contains the loaded data for
sewardjf3d57dd2005-04-22 20:23:27 +0000410 valid addresses and Defined for invalid addresses. Iterate over
411 the bytes in the word, from the most significant down to the
412 least. */
sewardj45d94cc2005-04-20 14:44:11 +0000413 ULong vw = VGM_WORD64_INVALID;
414 SizeT i = szB-1;
415 SizeT n_addrs_bad = 0;
416 Addr ai;
417 Bool aok;
418 UWord abit, vbyte;
419
sewardjc1a2cda2005-04-21 17:34:00 +0000420 PROF_EVENT(30, "mc_LOADVn_slow");
sewardj45d94cc2005-04-20 14:44:11 +0000421 tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
422
423 while (True) {
sewardjc1a2cda2005-04-21 17:34:00 +0000424 PROF_EVENT(31, "mc_LOADVn_slow(loop)");
sewardj45d94cc2005-04-20 14:44:11 +0000425 ai = a+byte_offset_w(szB,bigendian,i);
426 get_abit_and_vbyte(&abit, &vbyte, ai);
427 aok = abit == VGM_BIT_VALID;
428 if (!aok)
429 n_addrs_bad++;
430 vw <<= 8;
sewardjf3d57dd2005-04-22 20:23:27 +0000431 vw |= 0xFF & (aok ? vbyte : VGM_BYTE_VALID);
sewardj45d94cc2005-04-20 14:44:11 +0000432 if (i == 0) break;
433 i--;
434 }
435
436 if (n_addrs_bad > 0)
437 MAC_(record_address_error)( VG_(get_running_tid)(), a, szB, False );
438
sewardj45d94cc2005-04-20 14:44:11 +0000439 return vw;
440}
441
442
443static
444void mc_STOREVn_slow ( Addr a, SizeT szB, UWord vbytes, Bool bigendian )
445{
446 SizeT i;
447 SizeT n_addrs_bad = 0;
448 UWord abit;
449 Bool aok;
450 Addr ai;
451
sewardjc1a2cda2005-04-21 17:34:00 +0000452 PROF_EVENT(35, "mc_STOREVn_slow");
sewardj45d94cc2005-04-20 14:44:11 +0000453 tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
454
455 /* Dump vbytes in memory, iterating from least to most significant
456 byte. At the same time establish addressibility of the
457 location. */
458 for (i = 0; i < szB; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +0000459 PROF_EVENT(36, "mc_STOREVn_slow(loop)");
sewardj45d94cc2005-04-20 14:44:11 +0000460 ai = a+byte_offset_w(szB,bigendian,i);
461 abit = get_abit(ai);
462 aok = abit == VGM_BIT_VALID;
463 if (!aok)
464 n_addrs_bad++;
465 set_vbyte(ai, vbytes & 0xFF );
466 vbytes >>= 8;
467 }
468
469 /* If an address error has happened, report it. */
470 if (n_addrs_bad > 0)
471 MAC_(record_address_error)( VG_(get_running_tid)(), a, szB, True );
472}
473
474
sewardj45d94cc2005-04-20 14:44:11 +0000475//zz /* Reading/writing of the bitmaps, for aligned word-sized accesses. */
476//zz
477//zz static __inline__ UChar get_abits4_ALIGNED ( Addr a )
478//zz {
479//zz SecMap* sm;
480//zz UInt sm_off;
481//zz UChar abits8;
482//zz PROF_EVENT(24);
483//zz # ifdef VG_DEBUG_MEMORY
484//zz tl_assert(VG_IS_4_ALIGNED(a));
485//zz # endif
486//zz sm = primary_map[PM_IDX(a)];
487//zz sm_off = SM_OFF(a);
488//zz abits8 = sm->abits[sm_off >> 3];
489//zz abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
490//zz abits8 &= 0x0F;
491//zz return abits8;
492//zz }
493//zz
494//zz static UInt __inline__ get_vbytes4_ALIGNED ( Addr a )
495//zz {
496//zz SecMap* sm = primary_map[PM_IDX(a)];
497//zz UInt sm_off = SM_OFF(a);
498//zz PROF_EVENT(25);
499//zz # ifdef VG_DEBUG_MEMORY
500//zz tl_assert(VG_IS_4_ALIGNED(a));
501//zz # endif
502//zz return ((UInt*)(sm->vbyte))[sm_off >> 2];
503//zz }
504//zz
505//zz
506//zz static void __inline__ set_vbytes4_ALIGNED ( Addr a, UInt vbytes )
507//zz {
508//zz SecMap* sm;
509//zz UInt sm_off;
510//zz ENSURE_MAPPABLE(a, "set_vbytes4_ALIGNED");
511//zz sm = primary_map[PM_IDX(a)];
512//zz sm_off = SM_OFF(a);
513//zz PROF_EVENT(23);
514//zz # ifdef VG_DEBUG_MEMORY
515//zz tl_assert(VG_IS_4_ALIGNED(a));
516//zz # endif
517//zz ((UInt*)(sm->vbyte))[sm_off >> 2] = vbytes;
518//zz }
sewardjee070842003-07-05 17:53:55 +0000519
520
njn25e49d8e72002-09-23 09:36:25 +0000521/*------------------------------------------------------------*/
522/*--- Setting permissions over address ranges. ---*/
523/*------------------------------------------------------------*/
524
sewardj23eb2fd2005-04-22 16:29:19 +0000525/* Given address 'a', find the place where the pointer to a's
526 secondary map lives. If a falls into the primary map, the returned
527 value points to one of the entries in primary_map[]. Otherwise,
528 the auxiliary primary map is searched for 'a', or an entry is
529 created for it; either way, the returned value points to the
530 relevant AuxMapEnt's .sm field.
531
532 The point of this is to enable set_address_range_perms to assign
533 secondary maps in a uniform way, without worrying about whether a
534 given secondary map is pointed to from the main or auxiliary
535 primary map.
536*/
537
538static SecMap** find_secmap_binder_for_addr ( Addr aA )
539{
540 if (aA > MAX_PRIMARY_ADDRESS) {
541 AuxMapEnt* am = find_or_alloc_in_auxmap(aA);
542 return &am->sm;
543 } else {
544 UWord a = (UWord)aA;
545 UWord sec_no = (UWord)(a >> 16);
546# if VG_DEBUG_MEMORY >= 1
547 tl_assert(sec_no < N_PRIMARY_MAP);
548# endif
549 return &primary_map[sec_no];
550 }
551}
552
553
554static void set_address_range_perms ( Addr aA, SizeT len,
sewardj45d94cc2005-04-20 14:44:11 +0000555 UWord example_a_bit,
556 UWord example_v_bit )
njn25e49d8e72002-09-23 09:36:25 +0000557{
sewardj23eb2fd2005-04-22 16:29:19 +0000558 PROF_EVENT(150, "set_address_range_perms");
559
560 /* Check the permissions make sense. */
561 tl_assert(example_a_bit == VGM_BIT_VALID
562 || example_a_bit == VGM_BIT_INVALID);
563 tl_assert(example_v_bit == VGM_BIT_VALID
564 || example_v_bit == VGM_BIT_INVALID);
565 if (example_a_bit == VGM_BIT_INVALID)
566 tl_assert(example_v_bit == VGM_BIT_INVALID);
567
568 if (len == 0)
569 return;
570
sewardj1fa7d2c2005-06-13 18:22:17 +0000571 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
sewardj23eb2fd2005-04-22 16:29:19 +0000572 if (len > 100 * 1000 * 1000) {
573 VG_(message)(Vg_UserMsg,
574 "Warning: set address range perms: "
575 "large range %u, a %d, v %d",
576 len, example_a_bit, example_v_bit );
577 }
578 }
579
580 UWord a = (UWord)aA;
581
582# if VG_DEBUG_MEMORY >= 2
583
584 /*------------------ debug-only case ------------------ */
sewardj45d94cc2005-04-20 14:44:11 +0000585 SizeT i;
njn25e49d8e72002-09-23 09:36:25 +0000586
sewardj23eb2fd2005-04-22 16:29:19 +0000587 UWord example_vbyte = BIT_TO_BYTE(example_v_bit);
sewardj45d94cc2005-04-20 14:44:11 +0000588
589 tl_assert(sizeof(SizeT) == sizeof(Addr));
590
591 if (0 && len >= 4096)
592 VG_(printf)("s_a_r_p(0x%llx, %d, %d,%d)\n",
593 (ULong)a, len, example_a_bit, example_v_bit);
njn25e49d8e72002-09-23 09:36:25 +0000594
595 if (len == 0)
596 return;
597
sewardj45d94cc2005-04-20 14:44:11 +0000598 for (i = 0; i < len; i++) {
599 set_abit_and_vbyte(a+i, example_a_bit, example_vbyte);
njn25e49d8e72002-09-23 09:36:25 +0000600 }
njn25e49d8e72002-09-23 09:36:25 +0000601
sewardj23eb2fd2005-04-22 16:29:19 +0000602# else
603
604 /*------------------ standard handling ------------------ */
605 UWord vbits8, abits8, vbits32, v_off, a_off;
606 SecMap* sm;
607 SecMap** binder;
608 SecMap* example_dsm;
609
610 /* Decide on the distinguished secondary that we might want
611 to use (part of the space-compression scheme). */
612 if (example_a_bit == VGM_BIT_INVALID) {
613 example_dsm = &sm_distinguished[SM_DIST_NOACCESS];
614 } else {
615 if (example_v_bit == VGM_BIT_VALID) {
616 example_dsm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
617 } else {
618 example_dsm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
619 }
620 }
621
622 /* Make various wider versions of the A/V values to use. */
623 vbits8 = BIT_TO_BYTE(example_v_bit);
624 abits8 = BIT_TO_BYTE(example_a_bit);
625 vbits32 = (vbits8 << 24) | (vbits8 << 16) | (vbits8 << 8) | vbits8;
626
627 /* Slowly do parts preceding 8-byte alignment. */
628 while (True) {
629 if (len == 0) break;
630 PROF_EVENT(151, "set_address_range_perms-loop1-pre");
631 if (VG_IS_8_ALIGNED(a)) break;
632 set_abit_and_vbyte( a, example_a_bit, vbits8 );
633 a++;
634 len--;
635 }
636
637 if (len == 0)
638 return;
639
640 tl_assert(VG_IS_8_ALIGNED(a) && len > 0);
641
642 /* Now go in steps of 8 bytes. */
643 binder = find_secmap_binder_for_addr(a);
644
645 while (True) {
646
647 if (len < 8) break;
648
649 PROF_EVENT(152, "set_address_range_perms-loop8");
650
651 if ((a & SECONDARY_MASK) == 0) {
652 /* we just traversed a primary map boundary, so update the
653 binder. */
654 binder = find_secmap_binder_for_addr(a);
655 PROF_EVENT(153, "set_address_range_perms-update-binder");
656
657 /* Space-optimisation. If we are setting the entire
658 secondary map, just point this entry at one of our
659 distinguished secondaries. However, only do that if it
660 already points at a distinguished secondary, since doing
661 otherwise would leak the existing secondary. We could do
662 better and free up any pre-existing non-distinguished
663 secondary at this point, since we are guaranteed that each
664 non-dist secondary only has one pointer to it, and we have
665 that pointer right here. */
666 if (len >= SECONDARY_SIZE && is_distinguished_sm(*binder)) {
667 PROF_EVENT(154, "set_address_range_perms-entire-secmap");
668 *binder = example_dsm;
669 len -= SECONDARY_SIZE;
670 a += SECONDARY_SIZE;
671 continue;
672 }
673 }
674
675 /* If the primary is already pointing to a distinguished map
676 with the same properties as we're trying to set, then leave
677 it that way. */
678 if (*binder == example_dsm) {
679 a += 8;
680 len -= 8;
681 continue;
682 }
683
684 /* Make sure it's OK to write the secondary. */
685 if (is_distinguished_sm(*binder))
686 *binder = copy_for_writing(*binder);
687
688 sm = *binder;
689 v_off = a & 0xFFFF;
690 a_off = v_off >> 3;
691 sm->abits[a_off] = (UChar)abits8;
692 ((UInt*)(sm->vbyte))[(v_off >> 2) + 0] = (UInt)vbits32;
693 ((UInt*)(sm->vbyte))[(v_off >> 2) + 1] = (UInt)vbits32;
694
695 a += 8;
696 len -= 8;
697 }
698
699 if (len == 0)
700 return;
701
702 tl_assert(VG_IS_8_ALIGNED(a) && len > 0 && len < 8);
703
704 /* Finish the upper fragment. */
705 while (True) {
706 if (len == 0) break;
707 PROF_EVENT(155, "set_address_range_perms-loop1-post");
708 set_abit_and_vbyte ( a, example_a_bit, vbits8 );
709 a++;
710 len--;
711 }
712
713# endif
714}
sewardj45d94cc2005-04-20 14:44:11 +0000715
sewardjc859fbf2005-04-22 21:10:28 +0000716
717/* --- Set permissions for arbitrary address ranges --- */
njn25e49d8e72002-09-23 09:36:25 +0000718
nethercote8b76fe52004-11-08 19:20:09 +0000719static void mc_make_noaccess ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000720{
sewardjc1a2cda2005-04-21 17:34:00 +0000721 PROF_EVENT(40, "mc_make_noaccess");
nethercote8b76fe52004-11-08 19:20:09 +0000722 DEBUG("mc_make_noaccess(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000723 set_address_range_perms ( a, len, VGM_BIT_INVALID, VGM_BIT_INVALID );
724}
725
nethercote8b76fe52004-11-08 19:20:09 +0000726static void mc_make_writable ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000727{
sewardjc1a2cda2005-04-21 17:34:00 +0000728 PROF_EVENT(41, "mc_make_writable");
nethercote8b76fe52004-11-08 19:20:09 +0000729 DEBUG("mc_make_writable(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000730 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_INVALID );
731}
732
nethercote8b76fe52004-11-08 19:20:09 +0000733static void mc_make_readable ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000734{
sewardjc1a2cda2005-04-21 17:34:00 +0000735 PROF_EVENT(42, "mc_make_readable");
nethercote8b76fe52004-11-08 19:20:09 +0000736 DEBUG("mc_make_readable(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000737 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_VALID );
738}
739
njn9b007f62003-04-07 14:40:25 +0000740
sewardjc859fbf2005-04-22 21:10:28 +0000741/* --- Block-copy permissions (needed for implementing realloc()). --- */
742
743static void mc_copy_address_range_state ( Addr src, Addr dst, SizeT len )
744{
745 SizeT i;
746 UWord abit, vbyte;
747
748 DEBUG("mc_copy_address_range_state\n");
749
750 PROF_EVENT(50, "mc_copy_address_range_state");
751 for (i = 0; i < len; i++) {
752 PROF_EVENT(51, "mc_copy_address_range_state(loop)");
753 get_abit_and_vbyte( &abit, &vbyte, src+i );
754 set_abit_and_vbyte( dst+i, abit, vbyte );
755 }
756}
757
758
759/* --- Fast case permission setters, for dealing with stacks. --- */
760
njn9b007f62003-04-07 14:40:25 +0000761static __inline__
sewardj5d28efc2005-04-21 22:16:29 +0000762void make_aligned_word32_writable ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000763{
sewardj5d28efc2005-04-21 22:16:29 +0000764 PROF_EVENT(300, "make_aligned_word32_writable");
765
766# if VG_DEBUG_MEMORY >= 2
767 mc_make_writable(aA, 4);
768# else
769
770 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
sewardj23eb2fd2005-04-22 16:29:19 +0000771 PROF_EVENT(301, "make_aligned_word32_writable-slow1");
sewardj5d28efc2005-04-21 22:16:29 +0000772 mc_make_writable(aA, 4);
773 return;
774 }
775
776 UWord a = (UWord)aA;
777 UWord sec_no = (UWord)(a >> 16);
778# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +0000779 tl_assert(sec_no < N_PRIMARY_MAP);
sewardj5d28efc2005-04-21 22:16:29 +0000780# endif
781
782 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
783 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
784
785 SecMap* sm = primary_map[sec_no];
786 UWord v_off = a & 0xFFFF;
787 UWord a_off = v_off >> 3;
788
789 /* Paint the new area as uninitialised. */
790 ((UInt*)(sm->vbyte))[v_off >> 2] = VGM_WORD32_INVALID;
791
792 UWord mask = 0x0F;
793 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
794 /* mask now contains 1s where we wish to make address bits valid
795 (0s). */
796 sm->abits[a_off] &= ~mask;
797# endif
njn9b007f62003-04-07 14:40:25 +0000798}
799
sewardj5d28efc2005-04-21 22:16:29 +0000800
801static __inline__
802void make_aligned_word32_noaccess ( Addr aA )
803{
804 PROF_EVENT(310, "make_aligned_word32_noaccess");
805
806# if VG_DEBUG_MEMORY >= 2
807 mc_make_noaccess(aA, 4);
808# else
809
810 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
811 PROF_EVENT(311, "make_aligned_word32_noaccess-slow1");
812 mc_make_noaccess(aA, 4);
813 return;
814 }
815
816 UWord a = (UWord)aA;
817 UWord sec_no = (UWord)(a >> 16);
818# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +0000819 tl_assert(sec_no < N_PRIMARY_MAP);
sewardj5d28efc2005-04-21 22:16:29 +0000820# endif
821
822 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
823 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
824
825 SecMap* sm = primary_map[sec_no];
826 UWord v_off = a & 0xFFFF;
827 UWord a_off = v_off >> 3;
828
829 /* Paint the abandoned data as uninitialised. Probably not
830 necessary, but still .. */
831 ((UInt*)(sm->vbyte))[v_off >> 2] = VGM_WORD32_INVALID;
832
833 UWord mask = 0x0F;
834 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
835 /* mask now contains 1s where we wish to make address bits invalid
836 (1s). */
837 sm->abits[a_off] |= mask;
838# endif
839}
840
841
njn9b007f62003-04-07 14:40:25 +0000842/* Nb: by "aligned" here we mean 8-byte aligned */
843static __inline__
sewardj23eb2fd2005-04-22 16:29:19 +0000844void make_aligned_word64_writable ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000845{
sewardj23eb2fd2005-04-22 16:29:19 +0000846 PROF_EVENT(320, "make_aligned_word64_writable");
847
848# if VG_DEBUG_MEMORY >= 2
849 mc_make_writable(aA, 8);
850# else
851
852 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
853 PROF_EVENT(321, "make_aligned_word64_writable-slow1");
854 mc_make_writable(aA, 8);
855 return;
856 }
857
858 UWord a = (UWord)aA;
859 UWord sec_no = (UWord)(a >> 16);
860# if VG_DEBUG_MEMORY >= 1
861 tl_assert(sec_no < N_PRIMARY_MAP);
862# endif
863
864 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
865 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
866
867 SecMap* sm = primary_map[sec_no];
868 UWord v_off = a & 0xFFFF;
869 UWord a_off = v_off >> 3;
870
871 /* Paint the new area as uninitialised. */
872 ((ULong*)(sm->vbyte))[v_off >> 3] = VGM_WORD64_INVALID;
873
874 /* Make the relevant area accessible. */
875 sm->abits[a_off] = VGM_BYTE_VALID;
876# endif
njn9b007f62003-04-07 14:40:25 +0000877}
878
sewardj23eb2fd2005-04-22 16:29:19 +0000879
njn9b007f62003-04-07 14:40:25 +0000880static __inline__
sewardj23eb2fd2005-04-22 16:29:19 +0000881void make_aligned_word64_noaccess ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000882{
sewardj23eb2fd2005-04-22 16:29:19 +0000883 PROF_EVENT(330, "make_aligned_word64_noaccess");
884
885# if VG_DEBUG_MEMORY >= 2
886 mc_make_noaccess(aA, 8);
887# else
888
889 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
890 PROF_EVENT(331, "make_aligned_word64_noaccess-slow1");
891 mc_make_noaccess(aA, 8);
892 return;
893 }
894
895 UWord a = (UWord)aA;
896 UWord sec_no = (UWord)(a >> 16);
897# if VG_DEBUG_MEMORY >= 1
898 tl_assert(sec_no < N_PRIMARY_MAP);
899# endif
900
901 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
902 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
903
904 SecMap* sm = primary_map[sec_no];
905 UWord v_off = a & 0xFFFF;
906 UWord a_off = v_off >> 3;
907
908 /* Paint the abandoned data as uninitialised. Probably not
909 necessary, but still .. */
910 ((ULong*)(sm->vbyte))[v_off >> 3] = VGM_WORD64_INVALID;
911
912 /* Make the abandoned area inaccessible. */
913 sm->abits[a_off] = VGM_BYTE_INVALID;
914# endif
njn9b007f62003-04-07 14:40:25 +0000915}
916
sewardj23eb2fd2005-04-22 16:29:19 +0000917
sewardj45d94cc2005-04-20 14:44:11 +0000918/* The stack-pointer update handling functions */
919SP_UPDATE_HANDLERS ( make_aligned_word32_writable,
920 make_aligned_word32_noaccess,
921 make_aligned_word64_writable,
922 make_aligned_word64_noaccess,
923 mc_make_writable,
924 mc_make_noaccess
925 );
njn9b007f62003-04-07 14:40:25 +0000926
sewardj45d94cc2005-04-20 14:44:11 +0000927
sewardj826ec492005-05-12 18:05:00 +0000928void MC_(helperc_MAKE_STACK_UNINIT) ( Addr base, UWord len )
929{
930 tl_assert(sizeof(UWord) == sizeof(SizeT));
sewardj2a3a1a72005-05-12 23:25:43 +0000931 if (0)
932 VG_(printf)("helperc_MAKE_STACK_UNINIT %p %d\n", base, len );
933
934# if 0
935 /* Really slow version */
936 mc_make_writable(base, len);
937# endif
938
939# if 0
940 /* Slow(ish) version, which is fairly easily seen to be correct.
941 */
942 if (EXPECTED_TAKEN( VG_IS_8_ALIGNED(base) && len==128 )) {
943 make_aligned_word64_writable(base + 0);
944 make_aligned_word64_writable(base + 8);
945 make_aligned_word64_writable(base + 16);
946 make_aligned_word64_writable(base + 24);
947
948 make_aligned_word64_writable(base + 32);
949 make_aligned_word64_writable(base + 40);
950 make_aligned_word64_writable(base + 48);
951 make_aligned_word64_writable(base + 56);
952
953 make_aligned_word64_writable(base + 64);
954 make_aligned_word64_writable(base + 72);
955 make_aligned_word64_writable(base + 80);
956 make_aligned_word64_writable(base + 88);
957
958 make_aligned_word64_writable(base + 96);
959 make_aligned_word64_writable(base + 104);
960 make_aligned_word64_writable(base + 112);
961 make_aligned_word64_writable(base + 120);
962 } else {
963 mc_make_writable(base, len);
964 }
965# endif
966
967 /* Idea is: go fast when
968 * 8-aligned and length is 128
969 * the sm is available in the main primary map
970 * the address range falls entirely with a single
971 secondary map
972 * the SM is modifiable
973 If all those conditions hold, just update the V bits
974 by writing directly on the v-bit array. We don't care
975 about A bits; if the address range is marked invalid,
976 any attempt to access it will elicit an addressing error,
977 and that's good enough.
978 */
979 if (EXPECTED_TAKEN( len == 128
980 && VG_IS_8_ALIGNED(base)
981 )) {
982 /* Now we know the address range is suitably sized and
983 aligned. */
984 UWord a_lo = (UWord)base;
985 UWord a_hi = (UWord)(base + 127);
986 UWord sec_lo = a_lo >> 16;
987 UWord sec_hi = a_hi >> 16;
988
989 if (EXPECTED_TAKEN( sec_lo == sec_hi
990 && sec_lo <= N_PRIMARY_MAP
991 )) {
992 /* Now we know that the entire address range falls within a
993 single secondary map, and that that secondary 'lives' in
994 the main primary map. */
995 SecMap* sm = primary_map[sec_lo];
996
997 if (EXPECTED_TAKEN( !is_distinguished_sm(sm) )) {
998 /* And finally, now we know that the secondary in question
999 is modifiable. */
1000 UWord v_off = a_lo & 0xFFFF;
1001 ULong* p = (ULong*)(&sm->vbyte[v_off]);
1002 p[ 0] = VGM_WORD64_INVALID;
1003 p[ 1] = VGM_WORD64_INVALID;
1004 p[ 2] = VGM_WORD64_INVALID;
1005 p[ 3] = VGM_WORD64_INVALID;
1006 p[ 4] = VGM_WORD64_INVALID;
1007 p[ 5] = VGM_WORD64_INVALID;
1008 p[ 6] = VGM_WORD64_INVALID;
1009 p[ 7] = VGM_WORD64_INVALID;
1010 p[ 8] = VGM_WORD64_INVALID;
1011 p[ 9] = VGM_WORD64_INVALID;
1012 p[10] = VGM_WORD64_INVALID;
1013 p[11] = VGM_WORD64_INVALID;
1014 p[12] = VGM_WORD64_INVALID;
1015 p[13] = VGM_WORD64_INVALID;
1016 p[14] = VGM_WORD64_INVALID;
1017 p[15] = VGM_WORD64_INVALID;
1018 return;
1019 }
1020 }
1021 }
1022
1023 /* else fall into slow case */
sewardj826ec492005-05-12 18:05:00 +00001024 mc_make_writable(base, len);
1025}
1026
1027
nethercote8b76fe52004-11-08 19:20:09 +00001028/*------------------------------------------------------------*/
1029/*--- Checking memory ---*/
1030/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001031
sewardje4ccc012005-05-02 12:53:38 +00001032typedef
1033 enum {
1034 MC_Ok = 5,
1035 MC_AddrErr = 6,
1036 MC_ValueErr = 7
1037 }
1038 MC_ReadResult;
1039
1040
njn25e49d8e72002-09-23 09:36:25 +00001041/* Check permissions for address range. If inadequate permissions
1042 exist, *bad_addr is set to the offending address, so the caller can
1043 know what it is. */
1044
sewardjecf8e102003-07-12 12:11:39 +00001045/* Returns True if [a .. a+len) is not addressible. Otherwise,
1046 returns False, and if bad_addr is non-NULL, sets *bad_addr to
1047 indicate the lowest failing address. Functions below are
1048 similar. */
nethercote8b76fe52004-11-08 19:20:09 +00001049static Bool mc_check_noaccess ( Addr a, SizeT len, Addr* bad_addr )
sewardjecf8e102003-07-12 12:11:39 +00001050{
nethercote451eae92004-11-02 13:06:32 +00001051 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +00001052 UWord abit;
sewardjc1a2cda2005-04-21 17:34:00 +00001053 PROF_EVENT(60, "mc_check_noaccess");
sewardjecf8e102003-07-12 12:11:39 +00001054 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +00001055 PROF_EVENT(61, "mc_check_noaccess(loop)");
sewardjecf8e102003-07-12 12:11:39 +00001056 abit = get_abit(a);
1057 if (abit == VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001058 if (bad_addr != NULL)
1059 *bad_addr = a;
sewardjecf8e102003-07-12 12:11:39 +00001060 return False;
1061 }
1062 a++;
1063 }
1064 return True;
1065}
1066
nethercote8b76fe52004-11-08 19:20:09 +00001067static Bool mc_check_writable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00001068{
nethercote451eae92004-11-02 13:06:32 +00001069 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +00001070 UWord abit;
sewardjc1a2cda2005-04-21 17:34:00 +00001071 PROF_EVENT(62, "mc_check_writable");
njn25e49d8e72002-09-23 09:36:25 +00001072 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +00001073 PROF_EVENT(63, "mc_check_writable(loop)");
njn25e49d8e72002-09-23 09:36:25 +00001074 abit = get_abit(a);
1075 if (abit == VGM_BIT_INVALID) {
1076 if (bad_addr != NULL) *bad_addr = a;
1077 return False;
1078 }
1079 a++;
1080 }
1081 return True;
1082}
1083
nethercote8b76fe52004-11-08 19:20:09 +00001084static MC_ReadResult mc_check_readable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00001085{
nethercote451eae92004-11-02 13:06:32 +00001086 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +00001087 UWord abit;
1088 UWord vbyte;
njn25e49d8e72002-09-23 09:36:25 +00001089
sewardjc1a2cda2005-04-21 17:34:00 +00001090 PROF_EVENT(64, "mc_check_readable");
nethercote8b76fe52004-11-08 19:20:09 +00001091 DEBUG("mc_check_readable\n");
njn25e49d8e72002-09-23 09:36:25 +00001092 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +00001093 PROF_EVENT(65, "mc_check_readable(loop)");
sewardj45d94cc2005-04-20 14:44:11 +00001094 get_abit_and_vbyte(&abit, &vbyte, a);
nethercote8b76fe52004-11-08 19:20:09 +00001095 // Report addressability errors in preference to definedness errors
1096 // by checking the A bits first.
1097 if (abit != VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001098 if (bad_addr != NULL)
1099 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001100 return MC_AddrErr;
1101 }
1102 if (vbyte != VGM_BYTE_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001103 if (bad_addr != NULL)
1104 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001105 return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +00001106 }
1107 a++;
1108 }
nethercote8b76fe52004-11-08 19:20:09 +00001109 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +00001110}
1111
1112
1113/* Check a zero-terminated ascii string. Tricky -- don't want to
1114 examine the actual bytes, to find the end, until we're sure it is
1115 safe to do so. */
1116
njn9b007f62003-04-07 14:40:25 +00001117static Bool mc_check_readable_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00001118{
sewardj45d94cc2005-04-20 14:44:11 +00001119 UWord abit;
1120 UWord vbyte;
sewardjc1a2cda2005-04-21 17:34:00 +00001121 PROF_EVENT(66, "mc_check_readable_asciiz");
njn5c004e42002-11-18 11:04:50 +00001122 DEBUG("mc_check_readable_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +00001123 while (True) {
sewardjc1a2cda2005-04-21 17:34:00 +00001124 PROF_EVENT(67, "mc_check_readable_asciiz(loop)");
sewardj45d94cc2005-04-20 14:44:11 +00001125 get_abit_and_vbyte(&abit, &vbyte, a);
nethercote8b76fe52004-11-08 19:20:09 +00001126 // As in mc_check_readable(), check A bits first
1127 if (abit != VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001128 if (bad_addr != NULL)
1129 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001130 return MC_AddrErr;
1131 }
1132 if (vbyte != VGM_BYTE_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001133 if (bad_addr != NULL)
1134 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001135 return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +00001136 }
1137 /* Ok, a is safe to read. */
sewardj45d94cc2005-04-20 14:44:11 +00001138 if (* ((UChar*)a) == 0)
1139 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +00001140 a++;
1141 }
1142}
1143
1144
1145/*------------------------------------------------------------*/
1146/*--- Memory event handlers ---*/
1147/*------------------------------------------------------------*/
1148
njn25e49d8e72002-09-23 09:36:25 +00001149static
njn72718642003-07-24 08:45:32 +00001150void mc_check_is_writable ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +00001151 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001152{
1153 Bool ok;
1154 Addr bad_addr;
1155
1156 VGP_PUSHCC(VgpCheckMem);
1157
1158 /* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
1159 base,base+size-1); */
nethercote8b76fe52004-11-08 19:20:09 +00001160 ok = mc_check_writable ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00001161 if (!ok) {
1162 switch (part) {
1163 case Vg_CoreSysCall:
nethercote8b76fe52004-11-08 19:20:09 +00001164 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False,
1165 /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00001166 break;
1167
1168 case Vg_CorePThread:
1169 case Vg_CoreSignal:
nethercote8b76fe52004-11-08 19:20:09 +00001170 MAC_(record_core_mem_error)( tid, /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00001171 break;
1172
1173 default:
njn67993252004-11-22 18:02:32 +00001174 VG_(tool_panic)("mc_check_is_writable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001175 }
1176 }
1177
1178 VGP_POPCC(VgpCheckMem);
1179}
1180
1181static
njn72718642003-07-24 08:45:32 +00001182void mc_check_is_readable ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +00001183 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001184{
njn25e49d8e72002-09-23 09:36:25 +00001185 Addr bad_addr;
nethercote8b76fe52004-11-08 19:20:09 +00001186 MC_ReadResult res;
njn25e49d8e72002-09-23 09:36:25 +00001187
1188 VGP_PUSHCC(VgpCheckMem);
1189
1190 /* VG_(message)(Vg_DebugMsg,"check is readable: %x .. %x",
1191 base,base+size-1); */
nethercote8b76fe52004-11-08 19:20:09 +00001192 res = mc_check_readable ( base, size, &bad_addr );
1193 if (MC_Ok != res) {
1194 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
1195
njn25e49d8e72002-09-23 09:36:25 +00001196 switch (part) {
1197 case Vg_CoreSysCall:
nethercote8b76fe52004-11-08 19:20:09 +00001198 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False,
1199 isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001200 break;
1201
1202 case Vg_CorePThread:
nethercote8b76fe52004-11-08 19:20:09 +00001203 MAC_(record_core_mem_error)( tid, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001204 break;
1205
1206 /* If we're being asked to jump to a silly address, record an error
1207 message before potentially crashing the entire system. */
1208 case Vg_CoreTranslate:
njn72718642003-07-24 08:45:32 +00001209 MAC_(record_jump_error)( tid, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00001210 break;
1211
1212 default:
njn67993252004-11-22 18:02:32 +00001213 VG_(tool_panic)("mc_check_is_readable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001214 }
1215 }
1216 VGP_POPCC(VgpCheckMem);
1217}
1218
1219static
njn72718642003-07-24 08:45:32 +00001220void mc_check_is_readable_asciiz ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +00001221 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +00001222{
nethercote8b76fe52004-11-08 19:20:09 +00001223 MC_ReadResult res;
njn5ab96ac2005-05-08 02:59:50 +00001224 Addr bad_addr = 0; // shut GCC up
njn25e49d8e72002-09-23 09:36:25 +00001225 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
1226
1227 VGP_PUSHCC(VgpCheckMem);
1228
njnca82cc02004-11-22 17:18:48 +00001229 tl_assert(part == Vg_CoreSysCall);
nethercote8b76fe52004-11-08 19:20:09 +00001230 res = mc_check_readable_asciiz ( (Addr)str, &bad_addr );
1231 if (MC_Ok != res) {
1232 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
1233 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001234 }
1235
1236 VGP_POPCC(VgpCheckMem);
1237}
1238
njn25e49d8e72002-09-23 09:36:25 +00001239static
nethercote451eae92004-11-02 13:06:32 +00001240void mc_new_mem_startup( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00001241{
njn1f3a9092002-10-04 09:22:30 +00001242 /* Ignore the permissions, just make it readable. Seems to work... */
nethercote451eae92004-11-02 13:06:32 +00001243 DEBUG("mc_new_mem_startup(%p, %llu, rr=%u, ww=%u, xx=%u)\n",
1244 a,(ULong)len,rr,ww,xx);
nethercote8b76fe52004-11-08 19:20:09 +00001245 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001246}
1247
1248static
nethercote451eae92004-11-02 13:06:32 +00001249void mc_new_mem_heap ( Addr a, SizeT len, Bool is_inited )
njn25e49d8e72002-09-23 09:36:25 +00001250{
1251 if (is_inited) {
nethercote8b76fe52004-11-08 19:20:09 +00001252 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001253 } else {
nethercote8b76fe52004-11-08 19:20:09 +00001254 mc_make_writable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001255 }
1256}
1257
1258static
njnb8dca862005-03-14 02:42:44 +00001259void mc_new_mem_mmap ( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00001260{
njnb8dca862005-03-14 02:42:44 +00001261 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001262}
1263
njncf45fd42004-11-24 16:30:22 +00001264static
1265void mc_post_mem_write(CorePart part, ThreadId tid, Addr a, SizeT len)
1266{
1267 mc_make_readable(a, len);
1268}
njn25e49d8e72002-09-23 09:36:25 +00001269
sewardj45d94cc2005-04-20 14:44:11 +00001270
njn25e49d8e72002-09-23 09:36:25 +00001271/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00001272/*--- Register event handlers ---*/
1273/*------------------------------------------------------------*/
1274
sewardj45d94cc2005-04-20 14:44:11 +00001275/* When some chunk of guest state is written, mark the corresponding
1276 shadow area as valid. This is used to initialise arbitrarily large
sewardj2c27f702005-05-03 18:19:05 +00001277 chunks of guest state, hence the (somewhat arbitrary) 1024 limit.
sewardj45d94cc2005-04-20 14:44:11 +00001278*/
1279static void mc_post_reg_write ( CorePart part, ThreadId tid,
1280 OffT offset, SizeT size)
njnd3040452003-05-19 15:04:06 +00001281{
sewardj6cf40ff2005-04-20 22:31:26 +00001282 UChar area[1024];
1283 tl_assert(size <= 1024);
njncf45fd42004-11-24 16:30:22 +00001284 VG_(memset)(area, VGM_BYTE_VALID, size);
1285 VG_(set_shadow_regs_area)( tid, offset, size, area );
njnd3040452003-05-19 15:04:06 +00001286}
1287
sewardj45d94cc2005-04-20 14:44:11 +00001288static
1289void mc_post_reg_write_clientcall ( ThreadId tid,
1290 OffT offset, SizeT size,
1291 Addr f)
njnd3040452003-05-19 15:04:06 +00001292{
njncf45fd42004-11-24 16:30:22 +00001293 mc_post_reg_write(/*dummy*/0, tid, offset, size);
njnd3040452003-05-19 15:04:06 +00001294}
1295
sewardj45d94cc2005-04-20 14:44:11 +00001296/* Look at the definedness of the guest's shadow state for
1297 [offset, offset+len). If any part of that is undefined, record
1298 a parameter error.
1299*/
1300static void mc_pre_reg_read ( CorePart part, ThreadId tid, Char* s,
1301 OffT offset, SizeT size)
nethercote8b76fe52004-11-08 19:20:09 +00001302{
sewardj45d94cc2005-04-20 14:44:11 +00001303 Int i;
1304 Bool bad;
1305
1306 UChar area[16];
1307 tl_assert(size <= 16);
1308
1309 VG_(get_shadow_regs_area)( tid, offset, size, area );
1310
1311 bad = False;
1312 for (i = 0; i < size; i++) {
1313 if (area[i] != VGM_BYTE_VALID) {
sewardj2c27f702005-05-03 18:19:05 +00001314 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00001315 break;
1316 }
nethercote8b76fe52004-11-08 19:20:09 +00001317 }
1318
sewardj45d94cc2005-04-20 14:44:11 +00001319 if (bad)
nethercote8b76fe52004-11-08 19:20:09 +00001320 MAC_(record_param_error) ( tid, 0, /*isReg*/True, /*isUnaddr*/False, s );
1321}
njnd3040452003-05-19 15:04:06 +00001322
njn25e49d8e72002-09-23 09:36:25 +00001323
sewardj6cf40ff2005-04-20 22:31:26 +00001324/*------------------------------------------------------------*/
njn9e63cb62005-05-08 18:34:59 +00001325/*--- Printing errors ---*/
1326/*------------------------------------------------------------*/
1327
njn51d827b2005-05-09 01:02:08 +00001328static void mc_pp_Error ( Error* err )
njn9e63cb62005-05-08 18:34:59 +00001329{
1330 MAC_Error* err_extra = VG_(get_error_extra)(err);
1331
sewardj71bc3cb2005-05-19 00:25:45 +00001332 HChar* xpre = VG_(clo_xml) ? " <what>" : "";
1333 HChar* xpost = VG_(clo_xml) ? "</what>" : "";
1334
njn9e63cb62005-05-08 18:34:59 +00001335 switch (VG_(get_error_kind)(err)) {
1336 case CoreMemErr: {
1337 Char* s = ( err_extra->isUnaddr ? "unaddressable" : "uninitialised" );
sewardj71bc3cb2005-05-19 00:25:45 +00001338 if (VG_(clo_xml))
1339 VG_(message)(Vg_UserMsg, " <kind>CoreMemError</kind>");
1340 /* What the hell *is* a CoreMemError? jrs 2005-May-18 */
1341 VG_(message)(Vg_UserMsg, "%s%s contains %s byte(s)%s",
1342 xpre, VG_(get_error_string)(err), s, xpost);
1343
njn9e63cb62005-05-08 18:34:59 +00001344 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1345 break;
1346
1347 }
1348
1349 case ValueErr:
1350 if (err_extra->size == 0) {
sewardj71bc3cb2005-05-19 00:25:45 +00001351 if (VG_(clo_xml))
1352 VG_(message)(Vg_UserMsg, " <kind>UninitCondition</kind>");
1353 VG_(message)(Vg_UserMsg, "%sConditional jump or move depends"
1354 " on uninitialised value(s)%s",
1355 xpre, xpost);
njn9e63cb62005-05-08 18:34:59 +00001356 } else {
sewardj71bc3cb2005-05-19 00:25:45 +00001357 if (VG_(clo_xml))
1358 VG_(message)(Vg_UserMsg, " <kind>UninitValue</kind>");
1359 VG_(message)(Vg_UserMsg,
1360 "%sUse of uninitialised value of size %d%s",
1361 xpre, err_extra->size, xpost);
njn9e63cb62005-05-08 18:34:59 +00001362 }
1363 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1364 break;
1365
1366 case ParamErr: {
1367 Bool isReg = ( Register == err_extra->addrinfo.akind );
1368 Char* s1 = ( isReg ? "contains" : "points to" );
1369 Char* s2 = ( err_extra->isUnaddr ? "unaddressable" : "uninitialised" );
1370 if (isReg) tl_assert(!err_extra->isUnaddr);
1371
sewardj71bc3cb2005-05-19 00:25:45 +00001372 if (VG_(clo_xml))
1373 VG_(message)(Vg_UserMsg, " <kind>SyscallParam</kind>");
1374 VG_(message)(Vg_UserMsg, "%sSyscall param %s %s %s byte(s)%s",
1375 xpre, VG_(get_error_string)(err), s1, s2, xpost);
njn9e63cb62005-05-08 18:34:59 +00001376
1377 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1378 MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
1379 break;
1380 }
1381 case UserErr: {
1382 Char* s = ( err_extra->isUnaddr ? "Unaddressable" : "Uninitialised" );
1383
sewardj71bc3cb2005-05-19 00:25:45 +00001384 if (VG_(clo_xml))
1385 VG_(message)(Vg_UserMsg, " <kind>ClientCheck</kind>");
njn9e63cb62005-05-08 18:34:59 +00001386 VG_(message)(Vg_UserMsg,
sewardj71bc3cb2005-05-19 00:25:45 +00001387 "%s%s byte(s) found during client check request%s",
1388 xpre, s, xpost);
njn9e63cb62005-05-08 18:34:59 +00001389
1390 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1391 MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
1392 break;
1393 }
1394 default:
1395 MAC_(pp_shared_Error)(err);
1396 break;
1397 }
1398}
1399
1400/*------------------------------------------------------------*/
1401/*--- Recording errors ---*/
1402/*------------------------------------------------------------*/
1403
njn02bc4b82005-05-15 17:28:26 +00001404/* Creates a copy of the 'extra' part, updates the copy with address info if
njn9e63cb62005-05-08 18:34:59 +00001405 necessary, and returns the copy. */
1406/* This one called from generated code and non-generated code. */
njn96364822005-05-08 19:04:53 +00001407static void mc_record_value_error ( ThreadId tid, Int size )
njn9e63cb62005-05-08 18:34:59 +00001408{
1409 MAC_Error err_extra;
1410
1411 MAC_(clear_MAC_Error)( &err_extra );
1412 err_extra.size = size;
1413 err_extra.isUnaddr = False;
1414 VG_(maybe_record_error)( tid, ValueErr, /*addr*/0, /*s*/NULL, &err_extra );
1415}
1416
1417/* This called from non-generated code */
1418
njn96364822005-05-08 19:04:53 +00001419static void mc_record_user_error ( ThreadId tid, Addr a, Bool isWrite,
1420 Bool isUnaddr )
njn9e63cb62005-05-08 18:34:59 +00001421{
1422 MAC_Error err_extra;
1423
1424 tl_assert(VG_INVALID_THREADID != tid);
1425 MAC_(clear_MAC_Error)( &err_extra );
1426 err_extra.addrinfo.akind = Undescribed;
1427 err_extra.isUnaddr = isUnaddr;
1428 VG_(maybe_record_error)( tid, UserErr, a, /*s*/NULL, &err_extra );
1429}
1430
1431/*------------------------------------------------------------*/
1432/*--- Suppressions ---*/
1433/*------------------------------------------------------------*/
1434
njn51d827b2005-05-09 01:02:08 +00001435static Bool mc_recognised_suppression ( Char* name, Supp* su )
njn9e63cb62005-05-08 18:34:59 +00001436{
1437 SuppKind skind;
1438
1439 if (MAC_(shared_recognised_suppression)(name, su))
1440 return True;
1441
1442 /* Extra suppressions not used by Addrcheck */
1443 else if (VG_STREQ(name, "Cond")) skind = Value0Supp;
1444 else if (VG_STREQ(name, "Value0")) skind = Value0Supp;/* backwards compat */
1445 else if (VG_STREQ(name, "Value1")) skind = Value1Supp;
1446 else if (VG_STREQ(name, "Value2")) skind = Value2Supp;
1447 else if (VG_STREQ(name, "Value4")) skind = Value4Supp;
1448 else if (VG_STREQ(name, "Value8")) skind = Value8Supp;
1449 else if (VG_STREQ(name, "Value16")) skind = Value16Supp;
1450 else
1451 return False;
1452
1453 VG_(set_supp_kind)(su, skind);
1454 return True;
1455}
1456
1457/*------------------------------------------------------------*/
sewardjc859fbf2005-04-22 21:10:28 +00001458/*--- Functions called directly from generated code: ---*/
1459/*--- Load/store handlers. ---*/
sewardj6cf40ff2005-04-20 22:31:26 +00001460/*------------------------------------------------------------*/
1461
1462/* Types: LOADV4, LOADV2, LOADV1 are:
1463 UWord fn ( Addr a )
1464 so they return 32-bits on 32-bit machines and 64-bits on
1465 64-bit machines. Addr has the same size as a host word.
1466
1467 LOADV8 is always ULong fn ( Addr a )
1468
1469 Similarly for STOREV1, STOREV2, STOREV4, the supplied vbits
1470 are a UWord, and for STOREV8 they are a ULong.
1471*/
1472
sewardj95448072004-11-22 20:19:51 +00001473/* ------------------------ Size = 8 ------------------------ */
1474
njnaf839f52005-06-23 03:27:57 +00001475VG_REGPARM(1)
sewardjf9d81612005-04-23 23:25:49 +00001476ULong MC_(helperc_LOADV8) ( Addr aA )
sewardj95448072004-11-22 20:19:51 +00001477{
sewardjf9d81612005-04-23 23:25:49 +00001478 PROF_EVENT(200, "helperc_LOADV8");
1479
1480# if VG_DEBUG_MEMORY >= 2
1481 return mc_LOADVn_slow( aA, 8, False/*littleendian*/ );
1482# else
1483
1484 const UWord mask = ~((0x10000-8) | ((N_PRIMARY_MAP-1) << 16));
1485 UWord a = (UWord)aA;
1486
1487 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1488 naturally aligned, or 'a' exceeds the range covered by the
1489 primary map. Either way we defer to the slow-path case. */
1490 if (EXPECTED_NOT_TAKEN(a & mask)) {
1491 PROF_EVENT(201, "helperc_LOADV8-slow1");
1492 return (UWord)mc_LOADVn_slow( aA, 8, False/*littleendian*/ );
1493 }
1494
1495 UWord sec_no = (UWord)(a >> 16);
1496
1497# if VG_DEBUG_MEMORY >= 1
1498 tl_assert(sec_no < N_PRIMARY_MAP);
1499# endif
1500
1501 SecMap* sm = primary_map[sec_no];
1502 UWord v_off = a & 0xFFFF;
1503 UWord a_off = v_off >> 3;
1504 UWord abits = (UWord)(sm->abits[a_off]);
1505
1506 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) {
1507 /* Handle common case quickly: a is suitably aligned, is mapped,
1508 and is addressible. */
1509 return ((ULong*)(sm->vbyte))[ v_off >> 3 ];
1510 } else {
1511 /* Slow but general case. */
1512 PROF_EVENT(202, "helperc_LOADV8-slow2");
1513 return mc_LOADVn_slow( a, 8, False/*littleendian*/ );
1514 }
1515
1516# endif
sewardj95448072004-11-22 20:19:51 +00001517}
1518
njnaf839f52005-06-23 03:27:57 +00001519VG_REGPARM(1)
sewardjf9d81612005-04-23 23:25:49 +00001520void MC_(helperc_STOREV8) ( Addr aA, ULong vbytes )
sewardj95448072004-11-22 20:19:51 +00001521{
sewardjf9d81612005-04-23 23:25:49 +00001522 PROF_EVENT(210, "helperc_STOREV8");
1523
1524# if VG_DEBUG_MEMORY >= 2
1525 mc_STOREVn_slow( aA, 8, vbytes, False/*littleendian*/ );
1526# else
1527
1528 const UWord mask = ~((0x10000-8) | ((N_PRIMARY_MAP-1) << 16));
1529 UWord a = (UWord)aA;
1530
1531 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1532 naturally aligned, or 'a' exceeds the range covered by the
1533 primary map. Either way we defer to the slow-path case. */
1534 if (EXPECTED_NOT_TAKEN(a & mask)) {
1535 PROF_EVENT(211, "helperc_STOREV8-slow1");
1536 mc_STOREVn_slow( aA, 8, vbytes, False/*littleendian*/ );
1537 return;
1538 }
1539
1540 UWord sec_no = (UWord)(a >> 16);
1541
1542# if VG_DEBUG_MEMORY >= 1
1543 tl_assert(sec_no < N_PRIMARY_MAP);
1544# endif
1545
1546 SecMap* sm = primary_map[sec_no];
1547 UWord v_off = a & 0xFFFF;
1548 UWord a_off = v_off >> 3;
1549 UWord abits = (UWord)(sm->abits[a_off]);
1550
1551 if (EXPECTED_TAKEN(!is_distinguished_sm(sm)
1552 && abits == VGM_BYTE_VALID)) {
1553 /* Handle common case quickly: a is suitably aligned, is mapped,
1554 and is addressible. */
1555 ((ULong*)(sm->vbyte))[ v_off >> 3 ] = vbytes;
1556 } else {
1557 /* Slow but general case. */
1558 PROF_EVENT(212, "helperc_STOREV8-slow2");
1559 mc_STOREVn_slow( aA, 8, vbytes, False/*littleendian*/ );
1560 }
1561# endif
sewardj95448072004-11-22 20:19:51 +00001562}
1563
1564/* ------------------------ Size = 4 ------------------------ */
1565
njnaf839f52005-06-23 03:27:57 +00001566VG_REGPARM(1)
sewardjc1a2cda2005-04-21 17:34:00 +00001567UWord MC_(helperc_LOADV4) ( Addr aA )
njn25e49d8e72002-09-23 09:36:25 +00001568{
sewardjc1a2cda2005-04-21 17:34:00 +00001569 PROF_EVENT(220, "helperc_LOADV4");
1570
1571# if VG_DEBUG_MEMORY >= 2
1572 return (UWord)mc_LOADVn_slow( aA, 4, False/*littleendian*/ );
1573# else
1574
sewardj23eb2fd2005-04-22 16:29:19 +00001575 const UWord mask = ~((0x10000-4) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001576 UWord a = (UWord)aA;
1577
1578 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1579 naturally aligned, or 'a' exceeds the range covered by the
1580 primary map. Either way we defer to the slow-path case. */
1581 if (EXPECTED_NOT_TAKEN(a & mask)) {
1582 PROF_EVENT(221, "helperc_LOADV4-slow1");
1583 return (UWord)mc_LOADVn_slow( aA, 4, False/*littleendian*/ );
1584 }
1585
1586 UWord sec_no = (UWord)(a >> 16);
1587
1588# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001589 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001590# endif
1591
1592 SecMap* sm = primary_map[sec_no];
1593 UWord v_off = a & 0xFFFF;
1594 UWord a_off = v_off >> 3;
1595 UWord abits = (UWord)(sm->abits[a_off]);
1596 abits >>= (a & 4);
1597 abits &= 15;
1598 if (EXPECTED_TAKEN(abits == VGM_NIBBLE_VALID)) {
1599 /* Handle common case quickly: a is suitably aligned, is mapped,
1600 and is addressible. */
sewardj5d28efc2005-04-21 22:16:29 +00001601 /* On a 32-bit platform, simply hoick the required 32 bits out of
1602 the vbyte array. On a 64-bit platform, also set the upper 32
1603 bits to 1 ("undefined"), just in case. This almost certainly
1604 isn't necessary, but be paranoid. */
1605 UWord ret = (UWord)0xFFFFFFFF00000000ULL;
1606 ret |= (UWord)( ((UInt*)(sm->vbyte))[ v_off >> 2 ] );
1607 return ret;
sewardjc1a2cda2005-04-21 17:34:00 +00001608 } else {
1609 /* Slow but general case. */
1610 PROF_EVENT(222, "helperc_LOADV4-slow2");
1611 return (UWord)mc_LOADVn_slow( a, 4, False/*littleendian*/ );
1612 }
1613
1614# endif
njn25e49d8e72002-09-23 09:36:25 +00001615}
1616
njnaf839f52005-06-23 03:27:57 +00001617VG_REGPARM(2)
sewardjc1a2cda2005-04-21 17:34:00 +00001618void MC_(helperc_STOREV4) ( Addr aA, UWord vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001619{
sewardjc1a2cda2005-04-21 17:34:00 +00001620 PROF_EVENT(230, "helperc_STOREV4");
1621
1622# if VG_DEBUG_MEMORY >= 2
sewardj23eb2fd2005-04-22 16:29:19 +00001623 mc_STOREVn_slow( aA, 4, (ULong)vbytes, False/*littleendian*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001624# else
1625
sewardj23eb2fd2005-04-22 16:29:19 +00001626 const UWord mask = ~((0x10000-4) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001627 UWord a = (UWord)aA;
1628
1629 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1630 naturally aligned, or 'a' exceeds the range covered by the
1631 primary map. Either way we defer to the slow-path case. */
1632 if (EXPECTED_NOT_TAKEN(a & mask)) {
1633 PROF_EVENT(231, "helperc_STOREV4-slow1");
1634 mc_STOREVn_slow( aA, 4, (ULong)vbytes, False/*littleendian*/ );
1635 return;
1636 }
1637
1638 UWord sec_no = (UWord)(a >> 16);
1639
1640# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001641 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001642# endif
1643
1644 SecMap* sm = primary_map[sec_no];
1645 UWord v_off = a & 0xFFFF;
1646 UWord a_off = v_off >> 3;
1647 UWord abits = (UWord)(sm->abits[a_off]);
1648 abits >>= (a & 4);
1649 abits &= 15;
1650 if (EXPECTED_TAKEN(!is_distinguished_sm(sm)
1651 && abits == VGM_NIBBLE_VALID)) {
1652 /* Handle common case quickly: a is suitably aligned, is mapped,
1653 and is addressible. */
1654 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = (UInt)vbytes;
1655 } else {
1656 /* Slow but general case. */
1657 PROF_EVENT(232, "helperc_STOREV4-slow2");
1658 mc_STOREVn_slow( aA, 4, (ULong)vbytes, False/*littleendian*/ );
1659 }
1660# endif
njn25e49d8e72002-09-23 09:36:25 +00001661}
1662
sewardj95448072004-11-22 20:19:51 +00001663/* ------------------------ Size = 2 ------------------------ */
1664
njnaf839f52005-06-23 03:27:57 +00001665VG_REGPARM(1)
sewardjc1a2cda2005-04-21 17:34:00 +00001666UWord MC_(helperc_LOADV2) ( Addr aA )
njn25e49d8e72002-09-23 09:36:25 +00001667{
sewardjc1a2cda2005-04-21 17:34:00 +00001668 PROF_EVENT(240, "helperc_LOADV2");
1669
1670# if VG_DEBUG_MEMORY >= 2
1671 return (UWord)mc_LOADVn_slow( aA, 2, False/*littleendian*/ );
1672# else
1673
sewardj23eb2fd2005-04-22 16:29:19 +00001674 const UWord mask = ~((0x10000-2) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001675 UWord a = (UWord)aA;
1676
1677 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1678 naturally aligned, or 'a' exceeds the range covered by the
1679 primary map. Either way we defer to the slow-path case. */
1680 if (EXPECTED_NOT_TAKEN(a & mask)) {
1681 PROF_EVENT(241, "helperc_LOADV2-slow1");
1682 return (UWord)mc_LOADVn_slow( aA, 2, False/*littleendian*/ );
1683 }
1684
1685 UWord sec_no = (UWord)(a >> 16);
1686
1687# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001688 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001689# endif
1690
1691 SecMap* sm = primary_map[sec_no];
1692 UWord v_off = a & 0xFFFF;
1693 UWord a_off = v_off >> 3;
1694 UWord abits = (UWord)(sm->abits[a_off]);
sewardjc1a2cda2005-04-21 17:34:00 +00001695 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) {
1696 /* Handle common case quickly: a is mapped, and the entire
1697 word32 it lives in is addressible. */
sewardj5d28efc2005-04-21 22:16:29 +00001698 /* Set the upper 16/48 bits of the result to 1 ("undefined"),
1699 just in case. This almost certainly isn't necessary, but be
1700 paranoid. */
sewardjc1a2cda2005-04-21 17:34:00 +00001701 return (~(UWord)0xFFFF)
1702 |
1703 (UWord)( ((UShort*)(sm->vbyte))[ v_off >> 1 ] );
1704 } else {
1705 /* Slow but general case. */
1706 PROF_EVENT(242, "helperc_LOADV2-slow2");
1707 return (UWord)mc_LOADVn_slow( aA, 2, False/*littleendian*/ );
1708 }
1709
1710# endif
njn25e49d8e72002-09-23 09:36:25 +00001711}
1712
njnaf839f52005-06-23 03:27:57 +00001713VG_REGPARM(2)
sewardj5d28efc2005-04-21 22:16:29 +00001714void MC_(helperc_STOREV2) ( Addr aA, UWord vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001715{
sewardjc1a2cda2005-04-21 17:34:00 +00001716 PROF_EVENT(250, "helperc_STOREV2");
sewardj5d28efc2005-04-21 22:16:29 +00001717
1718# if VG_DEBUG_MEMORY >= 2
sewardj23eb2fd2005-04-22 16:29:19 +00001719 mc_STOREVn_slow( aA, 2, (ULong)vbytes, False/*littleendian*/ );
sewardj5d28efc2005-04-21 22:16:29 +00001720# else
1721
sewardj23eb2fd2005-04-22 16:29:19 +00001722 const UWord mask = ~((0x10000-2) | ((N_PRIMARY_MAP-1) << 16));
sewardj5d28efc2005-04-21 22:16:29 +00001723 UWord a = (UWord)aA;
1724
1725 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1726 naturally aligned, or 'a' exceeds the range covered by the
1727 primary map. Either way we defer to the slow-path case. */
1728 if (EXPECTED_NOT_TAKEN(a & mask)) {
1729 PROF_EVENT(251, "helperc_STOREV2-slow1");
1730 mc_STOREVn_slow( aA, 2, (ULong)vbytes, False/*littleendian*/ );
1731 return;
1732 }
1733
1734 UWord sec_no = (UWord)(a >> 16);
1735
1736# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001737 tl_assert(sec_no < N_PRIMARY_MAP);
sewardj5d28efc2005-04-21 22:16:29 +00001738# endif
1739
1740 SecMap* sm = primary_map[sec_no];
1741 UWord v_off = a & 0xFFFF;
1742 UWord a_off = v_off >> 3;
1743 UWord abits = (UWord)(sm->abits[a_off]);
1744 if (EXPECTED_TAKEN(!is_distinguished_sm(sm)
1745 && abits == VGM_BYTE_VALID)) {
1746 /* Handle common case quickly. */
1747 ((UShort*)(sm->vbyte))[ v_off >> 1 ] = (UShort)vbytes;
1748 } else {
1749 /* Slow but general case. */
1750 PROF_EVENT(252, "helperc_STOREV2-slow2");
1751 mc_STOREVn_slow( aA, 2, (ULong)vbytes, False/*littleendian*/ );
1752 }
1753# endif
njn25e49d8e72002-09-23 09:36:25 +00001754}
1755
sewardj95448072004-11-22 20:19:51 +00001756/* ------------------------ Size = 1 ------------------------ */
1757
njnaf839f52005-06-23 03:27:57 +00001758VG_REGPARM(1)
sewardjc1a2cda2005-04-21 17:34:00 +00001759UWord MC_(helperc_LOADV1) ( Addr aA )
njn25e49d8e72002-09-23 09:36:25 +00001760{
sewardjc1a2cda2005-04-21 17:34:00 +00001761 PROF_EVENT(260, "helperc_LOADV1");
1762
1763# if VG_DEBUG_MEMORY >= 2
sewardj23eb2fd2005-04-22 16:29:19 +00001764 return (UWord)mc_LOADVn_slow( aA, 1, False/*littleendian*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001765# else
1766
sewardj23eb2fd2005-04-22 16:29:19 +00001767 const UWord mask = ~((0x10000-1) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001768 UWord a = (UWord)aA;
1769
1770 /* If any part of 'a' indicated by the mask is 1, it means 'a'
1771 exceeds the range covered by the primary map. In which case we
1772 defer to the slow-path case. */
1773 if (EXPECTED_NOT_TAKEN(a & mask)) {
1774 PROF_EVENT(261, "helperc_LOADV1-slow1");
1775 return (UWord)mc_LOADVn_slow( aA, 1, False/*littleendian*/ );
1776 }
1777
1778 UWord sec_no = (UWord)(a >> 16);
1779
1780# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001781 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001782# endif
1783
1784 SecMap* sm = primary_map[sec_no];
1785 UWord v_off = a & 0xFFFF;
1786 UWord a_off = v_off >> 3;
sewardj5d28efc2005-04-21 22:16:29 +00001787 UWord abits = (UWord)(sm->abits[a_off]);
sewardjc1a2cda2005-04-21 17:34:00 +00001788 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) {
1789 /* Handle common case quickly: a is mapped, and the entire
1790 word32 it lives in is addressible. */
sewardj5d28efc2005-04-21 22:16:29 +00001791 /* Set the upper 24/56 bits of the result to 1 ("undefined"),
1792 just in case. This almost certainly isn't necessary, but be
1793 paranoid. */
sewardjc1a2cda2005-04-21 17:34:00 +00001794 return (~(UWord)0xFF)
1795 |
1796 (UWord)( ((UChar*)(sm->vbyte))[ v_off ] );
1797 } else {
1798 /* Slow but general case. */
1799 PROF_EVENT(262, "helperc_LOADV1-slow2");
1800 return (UWord)mc_LOADVn_slow( aA, 1, False/*littleendian*/ );
1801 }
1802# endif
njn25e49d8e72002-09-23 09:36:25 +00001803}
1804
sewardjc1a2cda2005-04-21 17:34:00 +00001805
njnaf839f52005-06-23 03:27:57 +00001806VG_REGPARM(2)
sewardjc1a2cda2005-04-21 17:34:00 +00001807void MC_(helperc_STOREV1) ( Addr aA, UWord vbyte )
njn25e49d8e72002-09-23 09:36:25 +00001808{
sewardjc1a2cda2005-04-21 17:34:00 +00001809 PROF_EVENT(270, "helperc_STOREV1");
1810
1811# if VG_DEBUG_MEMORY >= 2
1812 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*littleendian*/ );
1813# else
1814
sewardj23eb2fd2005-04-22 16:29:19 +00001815 const UWord mask = ~((0x10000-1) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001816 UWord a = (UWord)aA;
1817 /* If any part of 'a' indicated by the mask is 1, it means 'a'
1818 exceeds the range covered by the primary map. In which case we
1819 defer to the slow-path case. */
1820 if (EXPECTED_NOT_TAKEN(a & mask)) {
1821 PROF_EVENT(271, "helperc_STOREV1-slow1");
1822 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*littleendian*/ );
1823 return;
1824 }
1825
1826 UWord sec_no = (UWord)(a >> 16);
1827
1828# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001829 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001830# endif
1831
1832 SecMap* sm = primary_map[sec_no];
1833 UWord v_off = a & 0xFFFF;
1834 UWord a_off = v_off >> 3;
sewardj5d28efc2005-04-21 22:16:29 +00001835 UWord abits = (UWord)(sm->abits[a_off]);
sewardjc1a2cda2005-04-21 17:34:00 +00001836 if (EXPECTED_TAKEN(!is_distinguished_sm(sm)
1837 && abits == VGM_BYTE_VALID)) {
1838 /* Handle common case quickly: a is mapped, the entire word32 it
1839 lives in is addressible. */
1840 ((UChar*)(sm->vbyte))[ v_off ] = (UChar)vbyte;
1841 } else {
1842 PROF_EVENT(272, "helperc_STOREV1-slow2");
1843 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*littleendian*/ );
1844 }
1845
1846# endif
njn25e49d8e72002-09-23 09:36:25 +00001847}
1848
1849
sewardjc859fbf2005-04-22 21:10:28 +00001850/*------------------------------------------------------------*/
1851/*--- Functions called directly from generated code: ---*/
1852/*--- Value-check failure handlers. ---*/
1853/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001854
njn5c004e42002-11-18 11:04:50 +00001855void MC_(helperc_value_check0_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001856{
njn9e63cb62005-05-08 18:34:59 +00001857 mc_record_value_error ( VG_(get_running_tid)(), 0 );
njn25e49d8e72002-09-23 09:36:25 +00001858}
1859
njn5c004e42002-11-18 11:04:50 +00001860void MC_(helperc_value_check1_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001861{
njn9e63cb62005-05-08 18:34:59 +00001862 mc_record_value_error ( VG_(get_running_tid)(), 1 );
njn25e49d8e72002-09-23 09:36:25 +00001863}
1864
njn5c004e42002-11-18 11:04:50 +00001865void MC_(helperc_value_check4_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001866{
njn9e63cb62005-05-08 18:34:59 +00001867 mc_record_value_error ( VG_(get_running_tid)(), 4 );
njn25e49d8e72002-09-23 09:36:25 +00001868}
1869
sewardj11bcc4e2005-04-23 22:38:38 +00001870void MC_(helperc_value_check8_fail) ( void )
1871{
njn9e63cb62005-05-08 18:34:59 +00001872 mc_record_value_error ( VG_(get_running_tid)(), 8 );
sewardj11bcc4e2005-04-23 22:38:38 +00001873}
1874
njnaf839f52005-06-23 03:27:57 +00001875VG_REGPARM(1) void MC_(helperc_complain_undef) ( HWord sz )
sewardj95448072004-11-22 20:19:51 +00001876{
njn9e63cb62005-05-08 18:34:59 +00001877 mc_record_value_error ( VG_(get_running_tid)(), (Int)sz );
sewardj95448072004-11-22 20:19:51 +00001878}
1879
njn25e49d8e72002-09-23 09:36:25 +00001880
sewardj45d94cc2005-04-20 14:44:11 +00001881//zz /*------------------------------------------------------------*/
1882//zz /*--- Metadata get/set functions, for client requests. ---*/
1883//zz /*------------------------------------------------------------*/
1884//zz
1885//zz /* Copy Vbits for src into vbits. Returns: 1 == OK, 2 == alignment
1886//zz error, 3 == addressing error. */
1887//zz static Int mc_get_or_set_vbits_for_client (
1888//zz ThreadId tid,
1889//zz Addr dataV,
1890//zz Addr vbitsV,
1891//zz SizeT size,
1892//zz Bool setting /* True <=> set vbits, False <=> get vbits */
1893//zz )
1894//zz {
1895//zz Bool addressibleD = True;
1896//zz Bool addressibleV = True;
1897//zz UInt* data = (UInt*)dataV;
1898//zz UInt* vbits = (UInt*)vbitsV;
1899//zz SizeT szW = size / 4; /* sigh */
1900//zz SizeT i;
1901//zz UInt* dataP = NULL; /* bogus init to keep gcc happy */
1902//zz UInt* vbitsP = NULL; /* ditto */
1903//zz
1904//zz /* Check alignment of args. */
1905//zz if (!(VG_IS_4_ALIGNED(data) && VG_IS_4_ALIGNED(vbits)))
1906//zz return 2;
1907//zz if ((size & 3) != 0)
1908//zz return 2;
1909//zz
1910//zz /* Check that arrays are addressible. */
1911//zz for (i = 0; i < szW; i++) {
1912//zz dataP = &data[i];
1913//zz vbitsP = &vbits[i];
1914//zz if (get_abits4_ALIGNED((Addr)dataP) != VGM_NIBBLE_VALID) {
1915//zz addressibleD = False;
1916//zz break;
1917//zz }
1918//zz if (get_abits4_ALIGNED((Addr)vbitsP) != VGM_NIBBLE_VALID) {
1919//zz addressibleV = False;
1920//zz break;
1921//zz }
1922//zz }
1923//zz if (!addressibleD) {
1924//zz MAC_(record_address_error)( tid, (Addr)dataP, 4,
1925//zz setting ? True : False );
1926//zz return 3;
1927//zz }
1928//zz if (!addressibleV) {
1929//zz MAC_(record_address_error)( tid, (Addr)vbitsP, 4,
1930//zz setting ? False : True );
1931//zz return 3;
1932//zz }
1933//zz
1934//zz /* Do the copy */
1935//zz if (setting) {
1936//zz /* setting */
1937//zz for (i = 0; i < szW; i++) {
1938//zz if (get_vbytes4_ALIGNED( (Addr)&vbits[i] ) != VGM_WORD_VALID)
njn9e63cb62005-05-08 18:34:59 +00001939//zz mc_record_value_error(tid, 4);
sewardj45d94cc2005-04-20 14:44:11 +00001940//zz set_vbytes4_ALIGNED( (Addr)&data[i], vbits[i] );
1941//zz }
1942//zz } else {
1943//zz /* getting */
1944//zz for (i = 0; i < szW; i++) {
1945//zz vbits[i] = get_vbytes4_ALIGNED( (Addr)&data[i] );
1946//zz set_vbytes4_ALIGNED( (Addr)&vbits[i], VGM_WORD_VALID );
1947//zz }
1948//zz }
1949//zz
1950//zz return 1;
1951//zz }
sewardj05fe85e2005-04-27 22:46:36 +00001952
1953
1954/*------------------------------------------------------------*/
1955/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1956/*------------------------------------------------------------*/
1957
1958/* For the memory leak detector, say whether an entire 64k chunk of
1959 address space is possibly in use, or not. If in doubt return
1960 True.
1961*/
1962static
1963Bool mc_is_within_valid_secondary ( Addr a )
1964{
1965 SecMap* sm = maybe_get_secmap_for ( a );
1966 if (sm == NULL || sm == &sm_distinguished[SM_DIST_NOACCESS]) {
1967 /* Definitely not in use. */
1968 return False;
1969 } else {
1970 return True;
1971 }
1972}
1973
1974
1975/* For the memory leak detector, say whether or not a given word
1976 address is to be regarded as valid. */
1977static
1978Bool mc_is_valid_aligned_word ( Addr a )
1979{
1980 tl_assert(sizeof(UWord) == 4 || sizeof(UWord) == 8);
1981 if (sizeof(UWord) == 4) {
1982 tl_assert(VG_IS_4_ALIGNED(a));
1983 } else {
1984 tl_assert(VG_IS_8_ALIGNED(a));
1985 }
1986 if (mc_check_readable( a, sizeof(UWord), NULL ) == MC_Ok) {
1987 return True;
1988 } else {
1989 return False;
1990 }
1991}
sewardja4495682002-10-21 07:29:59 +00001992
1993
nethercote996901a2004-08-03 13:29:09 +00001994/* Leak detector for this tool. We don't actually do anything, merely
sewardja4495682002-10-21 07:29:59 +00001995 run the generic leak detector with suitable parameters for this
nethercote996901a2004-08-03 13:29:09 +00001996 tool. */
njnb8dca862005-03-14 02:42:44 +00001997static void mc_detect_memory_leaks ( ThreadId tid, LeakCheckMode mode )
njn25e49d8e72002-09-23 09:36:25 +00001998{
sewardj05fe85e2005-04-27 22:46:36 +00001999 MAC_(do_detect_memory_leaks) (
2000 tid,
2001 mode,
2002 mc_is_within_valid_secondary,
2003 mc_is_valid_aligned_word
2004 );
njn25e49d8e72002-09-23 09:36:25 +00002005}
2006
2007
sewardjc859fbf2005-04-22 21:10:28 +00002008/*------------------------------------------------------------*/
2009/*--- Initialisation ---*/
2010/*------------------------------------------------------------*/
2011
2012static void init_shadow_memory ( void )
2013{
2014 Int i;
2015 SecMap* sm;
2016
2017 /* Build the 3 distinguished secondaries */
2018 tl_assert(VGM_BIT_INVALID == 1);
2019 tl_assert(VGM_BIT_VALID == 0);
2020 tl_assert(VGM_BYTE_INVALID == 0xFF);
2021 tl_assert(VGM_BYTE_VALID == 0);
2022
2023 /* Set A invalid, V invalid. */
2024 sm = &sm_distinguished[SM_DIST_NOACCESS];
2025 for (i = 0; i < 65536; i++)
2026 sm->vbyte[i] = VGM_BYTE_INVALID;
2027 for (i = 0; i < 8192; i++)
2028 sm->abits[i] = VGM_BYTE_INVALID;
2029
2030 /* Set A valid, V invalid. */
2031 sm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
2032 for (i = 0; i < 65536; i++)
2033 sm->vbyte[i] = VGM_BYTE_INVALID;
2034 for (i = 0; i < 8192; i++)
2035 sm->abits[i] = VGM_BYTE_VALID;
2036
2037 /* Set A valid, V valid. */
2038 sm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
2039 for (i = 0; i < 65536; i++)
2040 sm->vbyte[i] = VGM_BYTE_VALID;
2041 for (i = 0; i < 8192; i++)
2042 sm->abits[i] = VGM_BYTE_VALID;
2043
2044 /* Set up the primary map. */
2045 /* These entries gradually get overwritten as the used address
2046 space expands. */
2047 for (i = 0; i < N_PRIMARY_MAP; i++)
2048 primary_map[i] = &sm_distinguished[SM_DIST_NOACCESS];
2049
2050 /* auxmap_size = auxmap_used = 0;
2051 no ... these are statically initialised */
2052}
2053
2054
2055/*------------------------------------------------------------*/
2056/*--- Sanity check machinery (permanently engaged) ---*/
2057/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00002058
njn51d827b2005-05-09 01:02:08 +00002059static Bool mc_cheap_sanity_check ( void )
njn25e49d8e72002-09-23 09:36:25 +00002060{
jseward9800fd32004-01-04 23:08:04 +00002061 /* nothing useful we can rapidly check */
sewardj23eb2fd2005-04-22 16:29:19 +00002062 n_sanity_cheap++;
sewardjc1a2cda2005-04-21 17:34:00 +00002063 PROF_EVENT(490, "cheap_sanity_check");
jseward9800fd32004-01-04 23:08:04 +00002064 return True;
njn25e49d8e72002-09-23 09:36:25 +00002065}
2066
njn51d827b2005-05-09 01:02:08 +00002067static Bool mc_expensive_sanity_check ( void )
njn25e49d8e72002-09-23 09:36:25 +00002068{
sewardj23eb2fd2005-04-22 16:29:19 +00002069 Int i, n_secmaps_found;
sewardj45d94cc2005-04-20 14:44:11 +00002070 SecMap* sm;
sewardj23eb2fd2005-04-22 16:29:19 +00002071 Bool bad = False;
njn25e49d8e72002-09-23 09:36:25 +00002072
sewardj23eb2fd2005-04-22 16:29:19 +00002073 n_sanity_expensive++;
sewardjc1a2cda2005-04-21 17:34:00 +00002074 PROF_EVENT(491, "expensive_sanity_check");
2075
sewardj23eb2fd2005-04-22 16:29:19 +00002076 /* Check that the 3 distinguished SMs are still as they should
2077 be. */
njn25e49d8e72002-09-23 09:36:25 +00002078
sewardj45d94cc2005-04-20 14:44:11 +00002079 /* Check A invalid, V invalid. */
2080 sm = &sm_distinguished[SM_DIST_NOACCESS];
njn25e49d8e72002-09-23 09:36:25 +00002081 for (i = 0; i < 65536; i++)
sewardj45d94cc2005-04-20 14:44:11 +00002082 if (!(sm->vbyte[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002083 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002084 for (i = 0; i < 8192; i++)
2085 if (!(sm->abits[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002086 bad = True;
njn25e49d8e72002-09-23 09:36:25 +00002087
sewardj45d94cc2005-04-20 14:44:11 +00002088 /* Check A valid, V invalid. */
2089 sm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
2090 for (i = 0; i < 65536; i++)
2091 if (!(sm->vbyte[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002092 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002093 for (i = 0; i < 8192; i++)
2094 if (!(sm->abits[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002095 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002096
2097 /* Check A valid, V valid. */
2098 sm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
2099 for (i = 0; i < 65536; i++)
2100 if (!(sm->vbyte[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002101 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002102 for (i = 0; i < 8192; i++)
2103 if (!(sm->abits[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002104 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002105
sewardj23eb2fd2005-04-22 16:29:19 +00002106 if (bad) {
2107 VG_(printf)("memcheck expensive sanity: "
2108 "distinguished_secondaries have changed\n");
2109 return False;
2110 }
2111
2112 /* check nonsensical auxmap sizing */
sewardj45d94cc2005-04-20 14:44:11 +00002113 if (auxmap_used > auxmap_size)
sewardj23eb2fd2005-04-22 16:29:19 +00002114 bad = True;
2115
2116 if (bad) {
2117 VG_(printf)("memcheck expensive sanity: "
2118 "nonsensical auxmap sizing\n");
2119 return False;
2120 }
2121
2122 /* check that the number of secmaps issued matches the number that
2123 are reachable (iow, no secmap leaks) */
2124 n_secmaps_found = 0;
2125 for (i = 0; i < N_PRIMARY_MAP; i++) {
2126 if (primary_map[i] == NULL) {
2127 bad = True;
2128 } else {
2129 if (!is_distinguished_sm(primary_map[i]))
2130 n_secmaps_found++;
2131 }
2132 }
2133
2134 for (i = 0; i < auxmap_used; i++) {
2135 if (auxmap[i].sm == NULL) {
2136 bad = True;
2137 } else {
2138 if (!is_distinguished_sm(auxmap[i].sm))
2139 n_secmaps_found++;
2140 }
2141 }
2142
2143 if (n_secmaps_found != n_secmaps_issued)
2144 bad = True;
2145
2146 if (bad) {
2147 VG_(printf)("memcheck expensive sanity: "
2148 "apparent secmap leakage\n");
2149 return False;
2150 }
2151
2152 /* check that auxmap only covers address space that the primary
2153 doesn't */
2154
2155 for (i = 0; i < auxmap_used; i++)
2156 if (auxmap[i].base <= MAX_PRIMARY_ADDRESS)
2157 bad = True;
2158
2159 if (bad) {
2160 VG_(printf)("memcheck expensive sanity: "
2161 "auxmap covers wrong address space\n");
2162 return False;
2163 }
2164
2165 /* there is only one pointer to each secmap (expensive) */
njn25e49d8e72002-09-23 09:36:25 +00002166
2167 return True;
2168}
sewardj45d94cc2005-04-20 14:44:11 +00002169
njn25e49d8e72002-09-23 09:36:25 +00002170
njn25e49d8e72002-09-23 09:36:25 +00002171/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00002172/*--- Command line args ---*/
njn25e49d8e72002-09-23 09:36:25 +00002173/*------------------------------------------------------------*/
2174
njn51d827b2005-05-09 01:02:08 +00002175Bool MC_(clo_avoid_strlen_errors) = True;
njn43c799e2003-04-08 00:08:52 +00002176
njn51d827b2005-05-09 01:02:08 +00002177static Bool mc_process_cmd_line_option(Char* arg)
njn25e49d8e72002-09-23 09:36:25 +00002178{
njn45270a22005-03-27 01:00:11 +00002179 VG_BOOL_CLO(arg, "--avoid-strlen-errors", MC_(clo_avoid_strlen_errors))
njn25e49d8e72002-09-23 09:36:25 +00002180 else
njn43c799e2003-04-08 00:08:52 +00002181 return MAC_(process_common_cmd_line_option)(arg);
njn25e49d8e72002-09-23 09:36:25 +00002182
2183 return True;
njn25e49d8e72002-09-23 09:36:25 +00002184}
2185
njn51d827b2005-05-09 01:02:08 +00002186static void mc_print_usage(void)
njn25e49d8e72002-09-23 09:36:25 +00002187{
njn3e884182003-04-15 13:03:23 +00002188 MAC_(print_common_usage)();
2189 VG_(printf)(
2190" --avoid-strlen-errors=no|yes suppress errs from inlined strlen [yes]\n"
2191 );
2192}
2193
njn51d827b2005-05-09 01:02:08 +00002194static void mc_print_debug_usage(void)
njn3e884182003-04-15 13:03:23 +00002195{
2196 MAC_(print_common_debug_usage)();
2197 VG_(printf)(
sewardj8ec2cfc2002-10-13 00:57:26 +00002198" --cleanup=no|yes improve after instrumentation? [yes]\n"
njn3e884182003-04-15 13:03:23 +00002199 );
njn25e49d8e72002-09-23 09:36:25 +00002200}
2201
nethercote8b76fe52004-11-08 19:20:09 +00002202/*------------------------------------------------------------*/
2203/*--- Client requests ---*/
2204/*------------------------------------------------------------*/
2205
2206/* Client block management:
2207
2208 This is managed as an expanding array of client block descriptors.
2209 Indices of live descriptors are issued to the client, so it can ask
2210 to free them later. Therefore we cannot slide live entries down
2211 over dead ones. Instead we must use free/inuse flags and scan for
2212 an empty slot at allocation time. This in turn means allocation is
2213 relatively expensive, so we hope this does not happen too often.
nethercote8b76fe52004-11-08 19:20:09 +00002214
sewardjedc75ab2005-03-15 23:30:32 +00002215 An unused block has start == size == 0
2216*/
nethercote8b76fe52004-11-08 19:20:09 +00002217
2218typedef
2219 struct {
2220 Addr start;
2221 SizeT size;
2222 ExeContext* where;
sewardjedc75ab2005-03-15 23:30:32 +00002223 Char* desc;
nethercote8b76fe52004-11-08 19:20:09 +00002224 }
2225 CGenBlock;
2226
2227/* This subsystem is self-initialising. */
njn695c16e2005-03-27 03:40:28 +00002228static UInt cgb_size = 0;
2229static UInt cgb_used = 0;
2230static CGenBlock* cgbs = NULL;
nethercote8b76fe52004-11-08 19:20:09 +00002231
2232/* Stats for this subsystem. */
njn695c16e2005-03-27 03:40:28 +00002233static UInt cgb_used_MAX = 0; /* Max in use. */
2234static UInt cgb_allocs = 0; /* Number of allocs. */
2235static UInt cgb_discards = 0; /* Number of discards. */
2236static UInt cgb_search = 0; /* Number of searches. */
nethercote8b76fe52004-11-08 19:20:09 +00002237
2238
2239static
njn695c16e2005-03-27 03:40:28 +00002240Int alloc_client_block ( void )
nethercote8b76fe52004-11-08 19:20:09 +00002241{
2242 UInt i, sz_new;
2243 CGenBlock* cgbs_new;
2244
njn695c16e2005-03-27 03:40:28 +00002245 cgb_allocs++;
nethercote8b76fe52004-11-08 19:20:09 +00002246
njn695c16e2005-03-27 03:40:28 +00002247 for (i = 0; i < cgb_used; i++) {
2248 cgb_search++;
2249 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00002250 return i;
2251 }
2252
2253 /* Not found. Try to allocate one at the end. */
njn695c16e2005-03-27 03:40:28 +00002254 if (cgb_used < cgb_size) {
2255 cgb_used++;
2256 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00002257 }
2258
2259 /* Ok, we have to allocate a new one. */
njn695c16e2005-03-27 03:40:28 +00002260 tl_assert(cgb_used == cgb_size);
2261 sz_new = (cgbs == NULL) ? 10 : (2 * cgb_size);
nethercote8b76fe52004-11-08 19:20:09 +00002262
2263 cgbs_new = VG_(malloc)( sz_new * sizeof(CGenBlock) );
njn695c16e2005-03-27 03:40:28 +00002264 for (i = 0; i < cgb_used; i++)
2265 cgbs_new[i] = cgbs[i];
nethercote8b76fe52004-11-08 19:20:09 +00002266
njn695c16e2005-03-27 03:40:28 +00002267 if (cgbs != NULL)
2268 VG_(free)( cgbs );
2269 cgbs = cgbs_new;
nethercote8b76fe52004-11-08 19:20:09 +00002270
njn695c16e2005-03-27 03:40:28 +00002271 cgb_size = sz_new;
2272 cgb_used++;
2273 if (cgb_used > cgb_used_MAX)
2274 cgb_used_MAX = cgb_used;
2275 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00002276}
2277
2278
2279static void show_client_block_stats ( void )
2280{
2281 VG_(message)(Vg_DebugMsg,
2282 "general CBs: %d allocs, %d discards, %d maxinuse, %d search",
njn695c16e2005-03-27 03:40:28 +00002283 cgb_allocs, cgb_discards, cgb_used_MAX, cgb_search
nethercote8b76fe52004-11-08 19:20:09 +00002284 );
2285}
2286
2287static Bool find_addr(VgHashNode* sh_ch, void* ap)
2288{
2289 MAC_Chunk *m = (MAC_Chunk*)sh_ch;
2290 Addr a = *(Addr*)ap;
2291
njn717cde52005-05-10 02:47:21 +00002292 return VG_(addr_is_in_block)(a, m->data, m->size, MAC_MALLOC_REDZONE_SZB);
nethercote8b76fe52004-11-08 19:20:09 +00002293}
2294
2295static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai )
2296{
2297 UInt i;
2298 /* VG_(printf)("try to identify %d\n", a); */
2299
2300 /* Perhaps it's a general block ? */
njn695c16e2005-03-27 03:40:28 +00002301 for (i = 0; i < cgb_used; i++) {
2302 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00002303 continue;
njn717cde52005-05-10 02:47:21 +00002304 // Use zero as the redzone for client blocks.
2305 if (VG_(addr_is_in_block)(a, cgbs[i].start, cgbs[i].size, 0)) {
nethercote8b76fe52004-11-08 19:20:09 +00002306 MAC_Mempool **d, *mp;
2307
2308 /* OK - maybe it's a mempool, too? */
2309 mp = (MAC_Mempool*)VG_(HT_get_node)(MAC_(mempool_list),
njn695c16e2005-03-27 03:40:28 +00002310 (UWord)cgbs[i].start,
nethercote8b76fe52004-11-08 19:20:09 +00002311 (void*)&d);
2312 if(mp != NULL) {
2313 if(mp->chunks != NULL) {
2314 MAC_Chunk *mc;
2315
2316 mc = (MAC_Chunk*)VG_(HT_first_match)(mp->chunks, find_addr, &a);
2317 if(mc != NULL) {
2318 ai->akind = UserG;
2319 ai->blksize = mc->size;
2320 ai->rwoffset = (Int)(a) - (Int)mc->data;
2321 ai->lastchange = mc->where;
2322 return True;
2323 }
2324 }
2325 ai->akind = Mempool;
njn695c16e2005-03-27 03:40:28 +00002326 ai->blksize = cgbs[i].size;
2327 ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
2328 ai->lastchange = cgbs[i].where;
nethercote8b76fe52004-11-08 19:20:09 +00002329 return True;
2330 }
2331 ai->akind = UserG;
njn695c16e2005-03-27 03:40:28 +00002332 ai->blksize = cgbs[i].size;
2333 ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
2334 ai->lastchange = cgbs[i].where;
2335 ai->desc = cgbs[i].desc;
nethercote8b76fe52004-11-08 19:20:09 +00002336 return True;
2337 }
2338 }
2339 return False;
2340}
2341
njn51d827b2005-05-09 01:02:08 +00002342static Bool mc_handle_client_request ( ThreadId tid, UWord* arg, UWord* ret )
nethercote8b76fe52004-11-08 19:20:09 +00002343{
2344 Int i;
2345 Bool ok;
2346 Addr bad_addr;
2347
njnfc26ff92004-11-22 19:12:49 +00002348 if (!VG_IS_TOOL_USERREQ('M','C',arg[0])
nethercote8b76fe52004-11-08 19:20:09 +00002349 && VG_USERREQ__MALLOCLIKE_BLOCK != arg[0]
2350 && VG_USERREQ__FREELIKE_BLOCK != arg[0]
2351 && VG_USERREQ__CREATE_MEMPOOL != arg[0]
2352 && VG_USERREQ__DESTROY_MEMPOOL != arg[0]
2353 && VG_USERREQ__MEMPOOL_ALLOC != arg[0]
2354 && VG_USERREQ__MEMPOOL_FREE != arg[0])
2355 return False;
2356
2357 switch (arg[0]) {
2358 case VG_USERREQ__CHECK_WRITABLE: /* check writable */
2359 ok = mc_check_writable ( arg[1], arg[2], &bad_addr );
2360 if (!ok)
njn9e63cb62005-05-08 18:34:59 +00002361 mc_record_user_error ( tid, bad_addr, /*isWrite*/True,
2362 /*isUnaddr*/True );
nethercote8b76fe52004-11-08 19:20:09 +00002363 *ret = ok ? (UWord)NULL : bad_addr;
2364 break;
2365
2366 case VG_USERREQ__CHECK_READABLE: { /* check readable */
2367 MC_ReadResult res;
2368 res = mc_check_readable ( arg[1], arg[2], &bad_addr );
2369 if (MC_AddrErr == res)
njn9e63cb62005-05-08 18:34:59 +00002370 mc_record_user_error ( tid, bad_addr, /*isWrite*/False,
2371 /*isUnaddr*/True );
nethercote8b76fe52004-11-08 19:20:09 +00002372 else if (MC_ValueErr == res)
njn9e63cb62005-05-08 18:34:59 +00002373 mc_record_user_error ( tid, bad_addr, /*isWrite*/False,
2374 /*isUnaddr*/False );
nethercote8b76fe52004-11-08 19:20:09 +00002375 *ret = ( res==MC_Ok ? (UWord)NULL : bad_addr );
2376 break;
2377 }
2378
2379 case VG_USERREQ__DO_LEAK_CHECK:
njnb8dca862005-03-14 02:42:44 +00002380 mc_detect_memory_leaks(tid, arg[1] ? LC_Summary : LC_Full);
nethercote8b76fe52004-11-08 19:20:09 +00002381 *ret = 0; /* return value is meaningless */
2382 break;
2383
2384 case VG_USERREQ__MAKE_NOACCESS: /* make no access */
nethercote8b76fe52004-11-08 19:20:09 +00002385 mc_make_noaccess ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00002386 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00002387 break;
2388
2389 case VG_USERREQ__MAKE_WRITABLE: /* make writable */
nethercote8b76fe52004-11-08 19:20:09 +00002390 mc_make_writable ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00002391 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00002392 break;
2393
2394 case VG_USERREQ__MAKE_READABLE: /* make readable */
nethercote8b76fe52004-11-08 19:20:09 +00002395 mc_make_readable ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00002396 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00002397 break;
2398
sewardjedc75ab2005-03-15 23:30:32 +00002399 case VG_USERREQ__CREATE_BLOCK: /* describe a block */
2400 if (arg[1] != 0 && arg[2] != 0) {
njn695c16e2005-03-27 03:40:28 +00002401 i = alloc_client_block();
2402 /* VG_(printf)("allocated %d %p\n", i, cgbs); */
2403 cgbs[i].start = arg[1];
2404 cgbs[i].size = arg[2];
2405 cgbs[i].desc = VG_(strdup)((Char *)arg[3]);
2406 cgbs[i].where = VG_(record_ExeContext) ( tid );
sewardjedc75ab2005-03-15 23:30:32 +00002407
2408 *ret = i;
2409 } else
2410 *ret = -1;
2411 break;
2412
nethercote8b76fe52004-11-08 19:20:09 +00002413 case VG_USERREQ__DISCARD: /* discard */
njn695c16e2005-03-27 03:40:28 +00002414 if (cgbs == NULL
2415 || arg[2] >= cgb_used ||
2416 (cgbs[arg[2]].start == 0 && cgbs[arg[2]].size == 0)) {
sewardjedc75ab2005-03-15 23:30:32 +00002417 *ret = 1;
2418 } else {
njn695c16e2005-03-27 03:40:28 +00002419 tl_assert(arg[2] >= 0 && arg[2] < cgb_used);
2420 cgbs[arg[2]].start = cgbs[arg[2]].size = 0;
2421 VG_(free)(cgbs[arg[2]].desc);
2422 cgb_discards++;
sewardjedc75ab2005-03-15 23:30:32 +00002423 *ret = 0;
2424 }
nethercote8b76fe52004-11-08 19:20:09 +00002425 break;
2426
sewardj45d94cc2005-04-20 14:44:11 +00002427//zz case VG_USERREQ__GET_VBITS:
2428//zz /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
2429//zz error. */
2430//zz /* VG_(printf)("get_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
2431//zz *ret = mc_get_or_set_vbits_for_client
2432//zz ( tid, arg[1], arg[2], arg[3], False /* get them */ );
2433//zz break;
2434//zz
2435//zz case VG_USERREQ__SET_VBITS:
2436//zz /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
2437//zz error. */
2438//zz /* VG_(printf)("set_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
2439//zz *ret = mc_get_or_set_vbits_for_client
2440//zz ( tid, arg[1], arg[2], arg[3], True /* set them */ );
2441//zz break;
nethercote8b76fe52004-11-08 19:20:09 +00002442
2443 default:
2444 if (MAC_(handle_common_client_requests)(tid, arg, ret )) {
2445 return True;
2446 } else {
2447 VG_(message)(Vg_UserMsg,
2448 "Warning: unknown memcheck client request code %llx",
2449 (ULong)arg[0]);
2450 return False;
2451 }
2452 }
2453 return True;
2454}
njn25e49d8e72002-09-23 09:36:25 +00002455
2456/*------------------------------------------------------------*/
njn51d827b2005-05-09 01:02:08 +00002457/*--- Setup and finalisation ---*/
njn25e49d8e72002-09-23 09:36:25 +00002458/*------------------------------------------------------------*/
2459
njn51d827b2005-05-09 01:02:08 +00002460static void mc_post_clo_init ( void )
njn5c004e42002-11-18 11:04:50 +00002461{
sewardj71bc3cb2005-05-19 00:25:45 +00002462 /* If we've been asked to emit XML, mash around various other
2463 options so as to constrain the output somewhat. */
2464 if (VG_(clo_xml)) {
2465 /* Extract as much info as possible from the leak checker. */
sewardj09890d82005-05-20 02:45:15 +00002466 /* MAC_(clo_show_reachable) = True; */
sewardj71bc3cb2005-05-19 00:25:45 +00002467 MAC_(clo_leak_check) = LC_Full;
2468 }
njn5c004e42002-11-18 11:04:50 +00002469}
2470
njn51d827b2005-05-09 01:02:08 +00002471static void mc_fini ( Int exitcode )
njn5c004e42002-11-18 11:04:50 +00002472{
nethercote8b76fe52004-11-08 19:20:09 +00002473 MAC_(common_fini)( mc_detect_memory_leaks );
sewardj45d94cc2005-04-20 14:44:11 +00002474
sewardj23eb2fd2005-04-22 16:29:19 +00002475 Int i, n_accessible_dist;
2476 SecMap* sm;
2477
sewardj45d94cc2005-04-20 14:44:11 +00002478 if (VG_(clo_verbosity) > 1) {
2479 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00002480 " memcheck: sanity checks: %d cheap, %d expensive",
2481 n_sanity_cheap, n_sanity_expensive );
sewardj45d94cc2005-04-20 14:44:11 +00002482 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00002483 " memcheck: auxmaps: %d auxmap entries (%dk, %dM) in use",
2484 auxmap_used,
2485 auxmap_used * 64,
2486 auxmap_used / 16 );
2487 VG_(message)(Vg_DebugMsg,
2488 " memcheck: auxmaps: %lld searches, %lld comparisons",
sewardj45d94cc2005-04-20 14:44:11 +00002489 n_auxmap_searches, n_auxmap_cmps );
sewardj23eb2fd2005-04-22 16:29:19 +00002490 VG_(message)(Vg_DebugMsg,
2491 " memcheck: secondaries: %d issued (%dk, %dM)",
2492 n_secmaps_issued,
2493 n_secmaps_issued * 64,
2494 n_secmaps_issued / 16 );
2495
2496 n_accessible_dist = 0;
2497 for (i = 0; i < N_PRIMARY_MAP; i++) {
2498 sm = primary_map[i];
2499 if (is_distinguished_sm(sm)
2500 && sm != &sm_distinguished[SM_DIST_NOACCESS])
2501 n_accessible_dist ++;
2502 }
2503 for (i = 0; i < auxmap_used; i++) {
2504 sm = auxmap[i].sm;
2505 if (is_distinguished_sm(sm)
2506 && sm != &sm_distinguished[SM_DIST_NOACCESS])
2507 n_accessible_dist ++;
2508 }
2509
2510 VG_(message)(Vg_DebugMsg,
2511 " memcheck: secondaries: %d accessible and distinguished (%dk, %dM)",
2512 n_accessible_dist,
2513 n_accessible_dist * 64,
2514 n_accessible_dist / 16 );
2515
sewardj45d94cc2005-04-20 14:44:11 +00002516 }
2517
njn5c004e42002-11-18 11:04:50 +00002518 if (0) {
2519 VG_(message)(Vg_DebugMsg,
2520 "------ Valgrind's client block stats follow ---------------" );
nethercote8b76fe52004-11-08 19:20:09 +00002521 show_client_block_stats();
njn5c004e42002-11-18 11:04:50 +00002522 }
njn25e49d8e72002-09-23 09:36:25 +00002523}
2524
njn51d827b2005-05-09 01:02:08 +00002525static void mc_pre_clo_init(void)
2526{
2527 VG_(details_name) ("Memcheck");
2528 VG_(details_version) (NULL);
2529 VG_(details_description) ("a memory error detector");
2530 VG_(details_copyright_author)(
2531 "Copyright (C) 2002-2005, and GNU GPL'd, by Julian Seward et al.");
2532 VG_(details_bug_reports_to) (VG_BUGS_TO);
2533 VG_(details_avg_translation_sizeB) ( 370 );
2534
2535 VG_(basic_tool_funcs) (mc_post_clo_init,
2536 MC_(instrument),
2537 mc_fini);
2538
2539 VG_(needs_core_errors) ();
2540 VG_(needs_tool_errors) (MAC_(eq_Error),
2541 mc_pp_Error,
2542 MAC_(update_extra),
2543 mc_recognised_suppression,
2544 MAC_(read_extra_suppression_info),
2545 MAC_(error_matches_suppression),
2546 MAC_(get_error_name),
2547 MAC_(print_extra_suppression_info));
2548 VG_(needs_libc_freeres) ();
2549 VG_(needs_command_line_options)(mc_process_cmd_line_option,
2550 mc_print_usage,
2551 mc_print_debug_usage);
2552 VG_(needs_client_requests) (mc_handle_client_request);
2553 VG_(needs_sanity_checks) (mc_cheap_sanity_check,
2554 mc_expensive_sanity_check);
2555 VG_(needs_shadow_memory) ();
2556
njnfc51f8d2005-06-21 03:20:17 +00002557 VG_(needs_malloc_replacement) (MAC_(malloc),
njn51d827b2005-05-09 01:02:08 +00002558 MAC_(__builtin_new),
2559 MAC_(__builtin_vec_new),
2560 MAC_(memalign),
2561 MAC_(calloc),
2562 MAC_(free),
2563 MAC_(__builtin_delete),
2564 MAC_(__builtin_vec_delete),
2565 MAC_(realloc),
2566 MAC_MALLOC_REDZONE_SZB );
2567
2568 MAC_( new_mem_heap) = & mc_new_mem_heap;
2569 MAC_( ban_mem_heap) = & mc_make_noaccess;
2570 MAC_(copy_mem_heap) = & mc_copy_address_range_state;
2571 MAC_( die_mem_heap) = & mc_make_noaccess;
2572 MAC_(check_noaccess) = & mc_check_noaccess;
2573
2574 VG_(track_new_mem_startup) ( & mc_new_mem_startup );
2575 VG_(track_new_mem_stack_signal)( & mc_make_writable );
2576 VG_(track_new_mem_brk) ( & mc_make_writable );
2577 VG_(track_new_mem_mmap) ( & mc_new_mem_mmap );
2578
2579 VG_(track_copy_mem_remap) ( & mc_copy_address_range_state );
2580
2581 VG_(track_die_mem_stack_signal)( & mc_make_noaccess );
2582 VG_(track_die_mem_brk) ( & mc_make_noaccess );
2583 VG_(track_die_mem_munmap) ( & mc_make_noaccess );
2584
2585 VG_(track_new_mem_stack_4) ( & MAC_(new_mem_stack_4) );
2586 VG_(track_new_mem_stack_8) ( & MAC_(new_mem_stack_8) );
2587 VG_(track_new_mem_stack_12) ( & MAC_(new_mem_stack_12) );
2588 VG_(track_new_mem_stack_16) ( & MAC_(new_mem_stack_16) );
2589 VG_(track_new_mem_stack_32) ( & MAC_(new_mem_stack_32) );
2590 VG_(track_new_mem_stack) ( & MAC_(new_mem_stack) );
2591
2592 VG_(track_die_mem_stack_4) ( & MAC_(die_mem_stack_4) );
2593 VG_(track_die_mem_stack_8) ( & MAC_(die_mem_stack_8) );
2594 VG_(track_die_mem_stack_12) ( & MAC_(die_mem_stack_12) );
2595 VG_(track_die_mem_stack_16) ( & MAC_(die_mem_stack_16) );
2596 VG_(track_die_mem_stack_32) ( & MAC_(die_mem_stack_32) );
2597 VG_(track_die_mem_stack) ( & MAC_(die_mem_stack) );
2598
2599 VG_(track_ban_mem_stack) ( & mc_make_noaccess );
2600
2601 VG_(track_pre_mem_read) ( & mc_check_is_readable );
2602 VG_(track_pre_mem_read_asciiz) ( & mc_check_is_readable_asciiz );
2603 VG_(track_pre_mem_write) ( & mc_check_is_writable );
2604 VG_(track_post_mem_write) ( & mc_post_mem_write );
2605
2606 VG_(track_pre_reg_read) ( & mc_pre_reg_read );
2607
2608 VG_(track_post_reg_write) ( & mc_post_reg_write );
2609 VG_(track_post_reg_write_clientcall_return)( & mc_post_reg_write_clientcall );
2610
2611 VG_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
2612 VG_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
2613 VG_(register_profile_event) ( VgpESPAdj, "adjust-ESP" );
2614
2615 /* Additional block description for VG_(describe_addr)() */
2616 MAC_(describe_addr_supp) = client_perm_maybe_describe;
2617
2618 init_shadow_memory();
2619 MAC_(common_pre_clo_init)();
2620
2621 tl_assert( mc_expensive_sanity_check() );
2622}
2623
2624VG_DETERMINE_INTERFACE_VERSION(mc_pre_clo_init, 9./8)
fitzhardinge98abfc72003-12-16 02:05:15 +00002625
njn25e49d8e72002-09-23 09:36:25 +00002626/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00002627/*--- end mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00002628/*--------------------------------------------------------------------*/