blob: 3a667e202f342940801f197dc3ff7ad0227be49f [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
nethercote137bc552003-11-14 17:47:54 +00009 This file is part of MemCheck, a heavyweight Valgrind tool for
njnc9539842002-10-02 13:26:35 +000010 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
njn53612422005-03-12 16:22:54 +000012 Copyright (C) 2000-2005 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
sewardjc859fbf2005-04-22 21:10:28 +000033/* TODO 22 Apr 05
sewardj45d94cc2005-04-20 14:44:11 +000034
sewardjc859fbf2005-04-22 21:10:28 +000035 test whether it would be faster, for LOADV4, to check
36 only for 8-byte validity on the fast path
sewardj45d94cc2005-04-20 14:44:11 +000037*/
38
njn25cac76cb2002-09-23 11:21:57 +000039#include "mc_include.h"
40#include "memcheck.h" /* for client requests */
njn4802b382005-06-11 04:58:29 +000041#include "pub_tool_aspacemgr.h"
njn97405b22005-06-02 03:39:33 +000042#include "pub_tool_libcbase.h"
njn132bfcc2005-06-04 19:16:06 +000043#include "pub_tool_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000044#include "pub_tool_libcprint.h"
njn25e49d8e72002-09-23 09:36:25 +000045
sewardj45d94cc2005-04-20 14:44:11 +000046
sewardjc1a2cda2005-04-21 17:34:00 +000047#define EXPECTED_TAKEN(cond) __builtin_expect((cond),1)
48#define EXPECTED_NOT_TAKEN(cond) __builtin_expect((cond),0)
49
50/* Define to debug the mem audit system. Set to:
51 0 no debugging, fast cases are used
52 1 some sanity checking, fast cases are used
53 2 max sanity checking, only slow cases are used
54*/
sewardj23eb2fd2005-04-22 16:29:19 +000055#define VG_DEBUG_MEMORY 0
sewardjc1a2cda2005-04-21 17:34:00 +000056
njn25e49d8e72002-09-23 09:36:25 +000057#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
58
njn25e49d8e72002-09-23 09:36:25 +000059
njn25e49d8e72002-09-23 09:36:25 +000060/*------------------------------------------------------------*/
sewardj45d94cc2005-04-20 14:44:11 +000061/*--- Basic A/V bitmap representation. ---*/
njn25e49d8e72002-09-23 09:36:25 +000062/*------------------------------------------------------------*/
63
sewardjc859fbf2005-04-22 21:10:28 +000064/* TODO: fix this comment */
65//zz /* All reads and writes are checked against a memory map, which
66//zz records the state of all memory in the process. The memory map is
67//zz organised like this:
68//zz
69//zz The top 16 bits of an address are used to index into a top-level
70//zz map table, containing 65536 entries. Each entry is a pointer to a
71//zz second-level map, which records the accesibililty and validity
72//zz permissions for the 65536 bytes indexed by the lower 16 bits of the
73//zz address. Each byte is represented by nine bits, one indicating
74//zz accessibility, the other eight validity. So each second-level map
75//zz contains 73728 bytes. This two-level arrangement conveniently
76//zz divides the 4G address space into 64k lumps, each size 64k bytes.
77//zz
78//zz All entries in the primary (top-level) map must point to a valid
79//zz secondary (second-level) map. Since most of the 4G of address
80//zz space will not be in use -- ie, not mapped at all -- there is a
njn02bc4b82005-05-15 17:28:26 +000081//zz distinguished secondary map, which indicates 'not addressible and
sewardjc859fbf2005-04-22 21:10:28 +000082//zz not valid' writeable for all bytes. Entries in the primary map for
83//zz which the entire 64k is not in use at all point at this
84//zz distinguished map.
85//zz
86//zz There are actually 4 distinguished secondaries. These are used to
87//zz represent a memory range which is either not addressable (validity
88//zz doesn't matter), addressable+not valid, addressable+valid.
89//zz
90//zz [...] lots of stuff deleted due to out of date-ness
91//zz
92//zz As a final optimisation, the alignment and address checks for
93//zz 4-byte loads and stores are combined in a neat way. The primary
94//zz map is extended to have 262144 entries (2^18), rather than 2^16.
95//zz The top 3/4 of these entries are permanently set to the
96//zz distinguished secondary map. For a 4-byte load/store, the
97//zz top-level map is indexed not with (addr >> 16) but instead f(addr),
98//zz where
99//zz
100//zz f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
101//zz = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
102//zz = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
103//zz
104//zz ie the lowest two bits are placed above the 16 high address bits.
105//zz If either of these two bits are nonzero, the address is misaligned;
106//zz this will select a secondary map from the upper 3/4 of the primary
107//zz map. Because this is always the distinguished secondary map, a
108//zz (bogus) address check failure will result. The failure handling
109//zz code can then figure out whether this is a genuine addr check
110//zz failure or whether it is a possibly-legitimate access at a
111//zz misaligned address.
112//zz */
113
sewardj45d94cc2005-04-20 14:44:11 +0000114/* --------------- Basic configuration --------------- */
sewardj95448072004-11-22 20:19:51 +0000115
sewardj23eb2fd2005-04-22 16:29:19 +0000116/* Only change this. N_PRIMARY_MAP *must* be a power of 2. */
sewardj21f7ff42005-04-28 10:32:02 +0000117
sewardje4ccc012005-05-02 12:53:38 +0000118#if VG_WORDSIZE == 4
sewardj21f7ff42005-04-28 10:32:02 +0000119
120/* cover the entire address space */
121# define N_PRIMARY_BITS 16
122
123#else
124
125/* Just handle the first 16G fast and the rest via auxiliary
126 primaries. */
127# define N_PRIMARY_BITS 18
128
129#endif
130
sewardj45d94cc2005-04-20 14:44:11 +0000131
sewardjc1a2cda2005-04-21 17:34:00 +0000132/* Do not change this. */
sewardje4ccc012005-05-02 12:53:38 +0000133#define N_PRIMARY_MAP ( ((UWord)1) << N_PRIMARY_BITS)
sewardjc1a2cda2005-04-21 17:34:00 +0000134
135/* Do not change this. */
sewardj23eb2fd2005-04-22 16:29:19 +0000136#define MAX_PRIMARY_ADDRESS (Addr)((((Addr)65536) * N_PRIMARY_MAP)-1)
137
138
139/* --------------- Stats maps --------------- */
140
141static Int n_secmaps_issued = 0;
142static ULong n_auxmap_searches = 0;
143static ULong n_auxmap_cmps = 0;
144static Int n_sanity_cheap = 0;
145static Int n_sanity_expensive = 0;
sewardj45d94cc2005-04-20 14:44:11 +0000146
147
148/* --------------- Secondary maps --------------- */
njn25e49d8e72002-09-23 09:36:25 +0000149
150typedef
151 struct {
sewardj45d94cc2005-04-20 14:44:11 +0000152 UChar abits[8192];
153 UChar vbyte[65536];
njn25e49d8e72002-09-23 09:36:25 +0000154 }
155 SecMap;
156
sewardj45d94cc2005-04-20 14:44:11 +0000157/* 3 distinguished secondary maps, one for no-access, one for
158 accessible but undefined, and one for accessible and defined.
159 Distinguished secondaries may never be modified.
160*/
161#define SM_DIST_NOACCESS 0
162#define SM_DIST_ACCESS_UNDEFINED 1
163#define SM_DIST_ACCESS_DEFINED 2
njnb8dca862005-03-14 02:42:44 +0000164
sewardj45d94cc2005-04-20 14:44:11 +0000165static SecMap sm_distinguished[3];
njnb8dca862005-03-14 02:42:44 +0000166
sewardj45d94cc2005-04-20 14:44:11 +0000167static inline Bool is_distinguished_sm ( SecMap* sm ) {
168 return sm >= &sm_distinguished[0] && sm <= &sm_distinguished[2];
169}
njnb8dca862005-03-14 02:42:44 +0000170
sewardj45d94cc2005-04-20 14:44:11 +0000171/* dist_sm points to one of our three distinguished secondaries. Make
172 a copy of it so that we can write to it.
173*/
174static SecMap* copy_for_writing ( SecMap* dist_sm )
175{
176 SecMap* new_sm;
177 tl_assert(dist_sm == &sm_distinguished[0]
178 || dist_sm == &sm_distinguished[1]
179 || dist_sm == &sm_distinguished[2]);
njnb8dca862005-03-14 02:42:44 +0000180
sewardj45d94cc2005-04-20 14:44:11 +0000181 new_sm = VG_(shadow_alloc)(sizeof(SecMap));
182 VG_(memcpy)(new_sm, dist_sm, sizeof(SecMap));
sewardj23eb2fd2005-04-22 16:29:19 +0000183 n_secmaps_issued++;
sewardj45d94cc2005-04-20 14:44:11 +0000184 return new_sm;
185}
njnb8dca862005-03-14 02:42:44 +0000186
sewardj45d94cc2005-04-20 14:44:11 +0000187
188/* --------------- Primary maps --------------- */
189
190/* The main primary map. This covers some initial part of the address
sewardj23eb2fd2005-04-22 16:29:19 +0000191 space, addresses 0 .. (N_PRIMARY_MAP << 16)-1. The rest of it is
sewardj45d94cc2005-04-20 14:44:11 +0000192 handled using the auxiliary primary map.
193*/
sewardj23eb2fd2005-04-22 16:29:19 +0000194static SecMap* primary_map[N_PRIMARY_MAP];
sewardj45d94cc2005-04-20 14:44:11 +0000195
196
197/* An entry in the auxiliary primary map. base must be a 64k-aligned
198 value, and sm points at the relevant secondary map. As with the
199 main primary map, the secondary may be either a real secondary, or
200 one of the three distinguished secondaries.
201*/
202typedef
203 struct {
sewardj23eb2fd2005-04-22 16:29:19 +0000204 Addr base;
sewardj45d94cc2005-04-20 14:44:11 +0000205 SecMap* sm;
206 }
207 AuxMapEnt;
208
209/* An expanding array of AuxMapEnts. */
sewardjaba741d2005-06-09 13:56:07 +0000210#define N_AUXMAPS 20000 /* HACK */
sewardj45d94cc2005-04-20 14:44:11 +0000211static AuxMapEnt hacky_auxmaps[N_AUXMAPS];
212static Int auxmap_size = N_AUXMAPS;
213static Int auxmap_used = 0;
214static AuxMapEnt* auxmap = &hacky_auxmaps[0];
215
sewardj45d94cc2005-04-20 14:44:11 +0000216
217/* Find an entry in the auxiliary map. If an entry is found, move it
218 one step closer to the front of the array, then return its address.
sewardj05fe85e2005-04-27 22:46:36 +0000219 If an entry is not found, return NULL. Note carefully that
sewardj45d94cc2005-04-20 14:44:11 +0000220 because a each call potentially rearranges the entries, each call
221 to this function invalidates ALL AuxMapEnt*s previously obtained by
222 calling this fn.
223*/
sewardj05fe85e2005-04-27 22:46:36 +0000224static AuxMapEnt* maybe_find_in_auxmap ( Addr a )
sewardj45d94cc2005-04-20 14:44:11 +0000225{
226 UWord i;
227 tl_assert(a > MAX_PRIMARY_ADDRESS);
228
229 a &= ~(Addr)0xFFFF;
230
231 /* Search .. */
232 n_auxmap_searches++;
233 for (i = 0; i < auxmap_used; i++) {
234 if (auxmap[i].base == a)
235 break;
236 }
237 n_auxmap_cmps += (ULong)(i+1);
238
239 if (i < auxmap_used) {
240 /* Found it. Nudge it a bit closer to the front. */
241 if (i > 0) {
242 AuxMapEnt tmp = auxmap[i-1];
243 auxmap[i-1] = auxmap[i];
244 auxmap[i] = tmp;
245 i--;
246 }
247 return &auxmap[i];
248 }
249
sewardj05fe85e2005-04-27 22:46:36 +0000250 return NULL;
251}
252
253
254/* Find an entry in the auxiliary map. If an entry is found, move it
255 one step closer to the front of the array, then return its address.
256 If an entry is not found, allocate one. Note carefully that
257 because a each call potentially rearranges the entries, each call
258 to this function invalidates ALL AuxMapEnt*s previously obtained by
259 calling this fn.
260*/
261static AuxMapEnt* find_or_alloc_in_auxmap ( Addr a )
262{
263 AuxMapEnt* am = maybe_find_in_auxmap(a);
264 if (am)
265 return am;
266
sewardj45d94cc2005-04-20 14:44:11 +0000267 /* We didn't find it. Hmm. This is a new piece of address space.
268 We'll need to allocate a new AuxMap entry for it. */
269 if (auxmap_used >= auxmap_size) {
270 tl_assert(auxmap_used == auxmap_size);
271 /* Out of auxmap entries. */
272 tl_assert2(0, "failed to expand the auxmap table");
273 }
274
275 tl_assert(auxmap_used < auxmap_size);
276
277 auxmap[auxmap_used].base = a & ~(Addr)0xFFFF;
278 auxmap[auxmap_used].sm = &sm_distinguished[SM_DIST_NOACCESS];
279
280 if (0)
281 VG_(printf)("new auxmap, base = 0x%llx\n",
282 (ULong)auxmap[auxmap_used].base );
283
284 auxmap_used++;
285 return &auxmap[auxmap_used-1];
286}
287
288
289/* --------------- SecMap fundamentals --------------- */
290
291/* Produce the secmap for 'a', either from the primary map or by
292 ensuring there is an entry for it in the aux primary map. The
293 secmap may be a distinguished one as the caller will only want to
294 be able to read it.
295*/
296static SecMap* get_secmap_readable ( Addr a )
297{
298 if (a <= MAX_PRIMARY_ADDRESS) {
299 UWord pm_off = a >> 16;
300 return primary_map[ pm_off ];
301 } else {
302 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
303 return am->sm;
304 }
305}
306
sewardj05fe85e2005-04-27 22:46:36 +0000307/* If 'a' has a SecMap, produce it. Else produce NULL. But don't
308 allocate one if one doesn't already exist. This is used by the
309 leak checker.
310*/
311static SecMap* maybe_get_secmap_for ( Addr a )
312{
313 if (a <= MAX_PRIMARY_ADDRESS) {
314 UWord pm_off = a >> 16;
315 return primary_map[ pm_off ];
316 } else {
317 AuxMapEnt* am = maybe_find_in_auxmap(a);
318 return am ? am->sm : NULL;
319 }
320}
321
322
323
sewardj45d94cc2005-04-20 14:44:11 +0000324/* Produce the secmap for 'a', either from the primary map or by
325 ensuring there is an entry for it in the aux primary map. The
326 secmap may not be a distinguished one, since the caller will want
327 to be able to write it. If it is a distinguished secondary, make a
328 writable copy of it, install it, and return the copy instead. (COW
329 semantics).
330*/
331static SecMap* get_secmap_writable ( Addr a )
332{
333 if (a <= MAX_PRIMARY_ADDRESS) {
334 UWord pm_off = a >> 16;
335 if (is_distinguished_sm(primary_map[ pm_off ]))
336 primary_map[pm_off] = copy_for_writing(primary_map[pm_off]);
337 return primary_map[pm_off];
338 } else {
339 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
340 if (is_distinguished_sm(am->sm))
341 am->sm = copy_for_writing(am->sm);
342 return am->sm;
343 }
344}
345
346
347/* --------------- Endianness helpers --------------- */
348
349/* Returns the offset in memory of the byteno-th most significant byte
350 in a wordszB-sized word, given the specified endianness. */
351static inline UWord byte_offset_w ( UWord wordszB, Bool bigendian,
352 UWord byteno ) {
353 return bigendian ? (wordszB-1-byteno) : byteno;
354}
355
356
357/* --------------- Fundamental functions --------------- */
358
359static
360void get_abit_and_vbyte ( /*OUT*/UWord* abit,
361 /*OUT*/UWord* vbyte,
362 Addr a )
363{
364 SecMap* sm = get_secmap_readable(a);
365 *vbyte = 0xFF & sm->vbyte[a & 0xFFFF];
366 *abit = read_bit_array(sm->abits, a & 0xFFFF);
367}
368
369static
370UWord get_abit ( Addr a )
371{
372 SecMap* sm = get_secmap_readable(a);
373 return read_bit_array(sm->abits, a & 0xFFFF);
374}
375
376static
377void set_abit_and_vbyte ( Addr a, UWord abit, UWord vbyte )
378{
379 SecMap* sm = get_secmap_writable(a);
380 sm->vbyte[a & 0xFFFF] = 0xFF & vbyte;
381 write_bit_array(sm->abits, a & 0xFFFF, abit);
382}
383
384static
385void set_vbyte ( Addr a, UWord vbyte )
386{
387 SecMap* sm = get_secmap_writable(a);
388 sm->vbyte[a & 0xFFFF] = 0xFF & vbyte;
389}
390
391
392/* --------------- Load/store slow cases. --------------- */
393
394static
395ULong mc_LOADVn_slow ( Addr a, SizeT szB, Bool bigendian )
396{
397 /* Make up a result V word, which contains the loaded data for
sewardjf3d57dd2005-04-22 20:23:27 +0000398 valid addresses and Defined for invalid addresses. Iterate over
399 the bytes in the word, from the most significant down to the
400 least. */
sewardj45d94cc2005-04-20 14:44:11 +0000401 ULong vw = VGM_WORD64_INVALID;
402 SizeT i = szB-1;
403 SizeT n_addrs_bad = 0;
404 Addr ai;
405 Bool aok;
406 UWord abit, vbyte;
407
sewardjc1a2cda2005-04-21 17:34:00 +0000408 PROF_EVENT(30, "mc_LOADVn_slow");
sewardj45d94cc2005-04-20 14:44:11 +0000409 tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
410
411 while (True) {
sewardjc1a2cda2005-04-21 17:34:00 +0000412 PROF_EVENT(31, "mc_LOADVn_slow(loop)");
sewardj45d94cc2005-04-20 14:44:11 +0000413 ai = a+byte_offset_w(szB,bigendian,i);
414 get_abit_and_vbyte(&abit, &vbyte, ai);
415 aok = abit == VGM_BIT_VALID;
416 if (!aok)
417 n_addrs_bad++;
418 vw <<= 8;
sewardjf3d57dd2005-04-22 20:23:27 +0000419 vw |= 0xFF & (aok ? vbyte : VGM_BYTE_VALID);
sewardj45d94cc2005-04-20 14:44:11 +0000420 if (i == 0) break;
421 i--;
422 }
423
424 if (n_addrs_bad > 0)
425 MAC_(record_address_error)( VG_(get_running_tid)(), a, szB, False );
426
sewardj45d94cc2005-04-20 14:44:11 +0000427 return vw;
428}
429
430
431static
432void mc_STOREVn_slow ( Addr a, SizeT szB, UWord vbytes, Bool bigendian )
433{
434 SizeT i;
435 SizeT n_addrs_bad = 0;
436 UWord abit;
437 Bool aok;
438 Addr ai;
439
sewardjc1a2cda2005-04-21 17:34:00 +0000440 PROF_EVENT(35, "mc_STOREVn_slow");
sewardj45d94cc2005-04-20 14:44:11 +0000441 tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
442
443 /* Dump vbytes in memory, iterating from least to most significant
444 byte. At the same time establish addressibility of the
445 location. */
446 for (i = 0; i < szB; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +0000447 PROF_EVENT(36, "mc_STOREVn_slow(loop)");
sewardj45d94cc2005-04-20 14:44:11 +0000448 ai = a+byte_offset_w(szB,bigendian,i);
449 abit = get_abit(ai);
450 aok = abit == VGM_BIT_VALID;
451 if (!aok)
452 n_addrs_bad++;
453 set_vbyte(ai, vbytes & 0xFF );
454 vbytes >>= 8;
455 }
456
457 /* If an address error has happened, report it. */
458 if (n_addrs_bad > 0)
459 MAC_(record_address_error)( VG_(get_running_tid)(), a, szB, True );
460}
461
462
sewardj45d94cc2005-04-20 14:44:11 +0000463//zz /* Reading/writing of the bitmaps, for aligned word-sized accesses. */
464//zz
465//zz static __inline__ UChar get_abits4_ALIGNED ( Addr a )
466//zz {
467//zz SecMap* sm;
468//zz UInt sm_off;
469//zz UChar abits8;
470//zz PROF_EVENT(24);
471//zz # ifdef VG_DEBUG_MEMORY
472//zz tl_assert(VG_IS_4_ALIGNED(a));
473//zz # endif
474//zz sm = primary_map[PM_IDX(a)];
475//zz sm_off = SM_OFF(a);
476//zz abits8 = sm->abits[sm_off >> 3];
477//zz abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
478//zz abits8 &= 0x0F;
479//zz return abits8;
480//zz }
481//zz
482//zz static UInt __inline__ get_vbytes4_ALIGNED ( Addr a )
483//zz {
484//zz SecMap* sm = primary_map[PM_IDX(a)];
485//zz UInt sm_off = SM_OFF(a);
486//zz PROF_EVENT(25);
487//zz # ifdef VG_DEBUG_MEMORY
488//zz tl_assert(VG_IS_4_ALIGNED(a));
489//zz # endif
490//zz return ((UInt*)(sm->vbyte))[sm_off >> 2];
491//zz }
492//zz
493//zz
494//zz static void __inline__ set_vbytes4_ALIGNED ( Addr a, UInt vbytes )
495//zz {
496//zz SecMap* sm;
497//zz UInt sm_off;
498//zz ENSURE_MAPPABLE(a, "set_vbytes4_ALIGNED");
499//zz sm = primary_map[PM_IDX(a)];
500//zz sm_off = SM_OFF(a);
501//zz PROF_EVENT(23);
502//zz # ifdef VG_DEBUG_MEMORY
503//zz tl_assert(VG_IS_4_ALIGNED(a));
504//zz # endif
505//zz ((UInt*)(sm->vbyte))[sm_off >> 2] = vbytes;
506//zz }
sewardjee070842003-07-05 17:53:55 +0000507
508
njn25e49d8e72002-09-23 09:36:25 +0000509/*------------------------------------------------------------*/
510/*--- Setting permissions over address ranges. ---*/
511/*------------------------------------------------------------*/
512
sewardj23eb2fd2005-04-22 16:29:19 +0000513/* Given address 'a', find the place where the pointer to a's
514 secondary map lives. If a falls into the primary map, the returned
515 value points to one of the entries in primary_map[]. Otherwise,
516 the auxiliary primary map is searched for 'a', or an entry is
517 created for it; either way, the returned value points to the
518 relevant AuxMapEnt's .sm field.
519
520 The point of this is to enable set_address_range_perms to assign
521 secondary maps in a uniform way, without worrying about whether a
522 given secondary map is pointed to from the main or auxiliary
523 primary map.
524*/
525
526static SecMap** find_secmap_binder_for_addr ( Addr aA )
527{
528 if (aA > MAX_PRIMARY_ADDRESS) {
529 AuxMapEnt* am = find_or_alloc_in_auxmap(aA);
530 return &am->sm;
531 } else {
532 UWord a = (UWord)aA;
533 UWord sec_no = (UWord)(a >> 16);
534# if VG_DEBUG_MEMORY >= 1
535 tl_assert(sec_no < N_PRIMARY_MAP);
536# endif
537 return &primary_map[sec_no];
538 }
539}
540
541
542static void set_address_range_perms ( Addr aA, SizeT len,
sewardj45d94cc2005-04-20 14:44:11 +0000543 UWord example_a_bit,
544 UWord example_v_bit )
njn25e49d8e72002-09-23 09:36:25 +0000545{
sewardj23eb2fd2005-04-22 16:29:19 +0000546 PROF_EVENT(150, "set_address_range_perms");
547
548 /* Check the permissions make sense. */
549 tl_assert(example_a_bit == VGM_BIT_VALID
550 || example_a_bit == VGM_BIT_INVALID);
551 tl_assert(example_v_bit == VGM_BIT_VALID
552 || example_v_bit == VGM_BIT_INVALID);
553 if (example_a_bit == VGM_BIT_INVALID)
554 tl_assert(example_v_bit == VGM_BIT_INVALID);
555
556 if (len == 0)
557 return;
558
559 if (VG_(clo_verbosity) > 0) {
560 if (len > 100 * 1000 * 1000) {
561 VG_(message)(Vg_UserMsg,
562 "Warning: set address range perms: "
563 "large range %u, a %d, v %d",
564 len, example_a_bit, example_v_bit );
565 }
566 }
567
568 UWord a = (UWord)aA;
569
570# if VG_DEBUG_MEMORY >= 2
571
572 /*------------------ debug-only case ------------------ */
sewardj45d94cc2005-04-20 14:44:11 +0000573 SizeT i;
njn25e49d8e72002-09-23 09:36:25 +0000574
sewardj23eb2fd2005-04-22 16:29:19 +0000575 UWord example_vbyte = BIT_TO_BYTE(example_v_bit);
sewardj45d94cc2005-04-20 14:44:11 +0000576
577 tl_assert(sizeof(SizeT) == sizeof(Addr));
578
579 if (0 && len >= 4096)
580 VG_(printf)("s_a_r_p(0x%llx, %d, %d,%d)\n",
581 (ULong)a, len, example_a_bit, example_v_bit);
njn25e49d8e72002-09-23 09:36:25 +0000582
583 if (len == 0)
584 return;
585
sewardj45d94cc2005-04-20 14:44:11 +0000586 for (i = 0; i < len; i++) {
587 set_abit_and_vbyte(a+i, example_a_bit, example_vbyte);
njn25e49d8e72002-09-23 09:36:25 +0000588 }
njn25e49d8e72002-09-23 09:36:25 +0000589
sewardj23eb2fd2005-04-22 16:29:19 +0000590# else
591
592 /*------------------ standard handling ------------------ */
593 UWord vbits8, abits8, vbits32, v_off, a_off;
594 SecMap* sm;
595 SecMap** binder;
596 SecMap* example_dsm;
597
598 /* Decide on the distinguished secondary that we might want
599 to use (part of the space-compression scheme). */
600 if (example_a_bit == VGM_BIT_INVALID) {
601 example_dsm = &sm_distinguished[SM_DIST_NOACCESS];
602 } else {
603 if (example_v_bit == VGM_BIT_VALID) {
604 example_dsm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
605 } else {
606 example_dsm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
607 }
608 }
609
610 /* Make various wider versions of the A/V values to use. */
611 vbits8 = BIT_TO_BYTE(example_v_bit);
612 abits8 = BIT_TO_BYTE(example_a_bit);
613 vbits32 = (vbits8 << 24) | (vbits8 << 16) | (vbits8 << 8) | vbits8;
614
615 /* Slowly do parts preceding 8-byte alignment. */
616 while (True) {
617 if (len == 0) break;
618 PROF_EVENT(151, "set_address_range_perms-loop1-pre");
619 if (VG_IS_8_ALIGNED(a)) break;
620 set_abit_and_vbyte( a, example_a_bit, vbits8 );
621 a++;
622 len--;
623 }
624
625 if (len == 0)
626 return;
627
628 tl_assert(VG_IS_8_ALIGNED(a) && len > 0);
629
630 /* Now go in steps of 8 bytes. */
631 binder = find_secmap_binder_for_addr(a);
632
633 while (True) {
634
635 if (len < 8) break;
636
637 PROF_EVENT(152, "set_address_range_perms-loop8");
638
639 if ((a & SECONDARY_MASK) == 0) {
640 /* we just traversed a primary map boundary, so update the
641 binder. */
642 binder = find_secmap_binder_for_addr(a);
643 PROF_EVENT(153, "set_address_range_perms-update-binder");
644
645 /* Space-optimisation. If we are setting the entire
646 secondary map, just point this entry at one of our
647 distinguished secondaries. However, only do that if it
648 already points at a distinguished secondary, since doing
649 otherwise would leak the existing secondary. We could do
650 better and free up any pre-existing non-distinguished
651 secondary at this point, since we are guaranteed that each
652 non-dist secondary only has one pointer to it, and we have
653 that pointer right here. */
654 if (len >= SECONDARY_SIZE && is_distinguished_sm(*binder)) {
655 PROF_EVENT(154, "set_address_range_perms-entire-secmap");
656 *binder = example_dsm;
657 len -= SECONDARY_SIZE;
658 a += SECONDARY_SIZE;
659 continue;
660 }
661 }
662
663 /* If the primary is already pointing to a distinguished map
664 with the same properties as we're trying to set, then leave
665 it that way. */
666 if (*binder == example_dsm) {
667 a += 8;
668 len -= 8;
669 continue;
670 }
671
672 /* Make sure it's OK to write the secondary. */
673 if (is_distinguished_sm(*binder))
674 *binder = copy_for_writing(*binder);
675
676 sm = *binder;
677 v_off = a & 0xFFFF;
678 a_off = v_off >> 3;
679 sm->abits[a_off] = (UChar)abits8;
680 ((UInt*)(sm->vbyte))[(v_off >> 2) + 0] = (UInt)vbits32;
681 ((UInt*)(sm->vbyte))[(v_off >> 2) + 1] = (UInt)vbits32;
682
683 a += 8;
684 len -= 8;
685 }
686
687 if (len == 0)
688 return;
689
690 tl_assert(VG_IS_8_ALIGNED(a) && len > 0 && len < 8);
691
692 /* Finish the upper fragment. */
693 while (True) {
694 if (len == 0) break;
695 PROF_EVENT(155, "set_address_range_perms-loop1-post");
696 set_abit_and_vbyte ( a, example_a_bit, vbits8 );
697 a++;
698 len--;
699 }
700
701# endif
702}
sewardj45d94cc2005-04-20 14:44:11 +0000703
sewardjc859fbf2005-04-22 21:10:28 +0000704
705/* --- Set permissions for arbitrary address ranges --- */
njn25e49d8e72002-09-23 09:36:25 +0000706
nethercote8b76fe52004-11-08 19:20:09 +0000707static void mc_make_noaccess ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000708{
sewardjc1a2cda2005-04-21 17:34:00 +0000709 PROF_EVENT(40, "mc_make_noaccess");
nethercote8b76fe52004-11-08 19:20:09 +0000710 DEBUG("mc_make_noaccess(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000711 set_address_range_perms ( a, len, VGM_BIT_INVALID, VGM_BIT_INVALID );
712}
713
nethercote8b76fe52004-11-08 19:20:09 +0000714static void mc_make_writable ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000715{
sewardjc1a2cda2005-04-21 17:34:00 +0000716 PROF_EVENT(41, "mc_make_writable");
nethercote8b76fe52004-11-08 19:20:09 +0000717 DEBUG("mc_make_writable(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000718 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_INVALID );
719}
720
nethercote8b76fe52004-11-08 19:20:09 +0000721static void mc_make_readable ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000722{
sewardjc1a2cda2005-04-21 17:34:00 +0000723 PROF_EVENT(42, "mc_make_readable");
nethercote8b76fe52004-11-08 19:20:09 +0000724 DEBUG("mc_make_readable(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000725 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_VALID );
726}
727
njn9b007f62003-04-07 14:40:25 +0000728
sewardjc859fbf2005-04-22 21:10:28 +0000729/* --- Block-copy permissions (needed for implementing realloc()). --- */
730
731static void mc_copy_address_range_state ( Addr src, Addr dst, SizeT len )
732{
733 SizeT i;
734 UWord abit, vbyte;
735
736 DEBUG("mc_copy_address_range_state\n");
737
738 PROF_EVENT(50, "mc_copy_address_range_state");
739 for (i = 0; i < len; i++) {
740 PROF_EVENT(51, "mc_copy_address_range_state(loop)");
741 get_abit_and_vbyte( &abit, &vbyte, src+i );
742 set_abit_and_vbyte( dst+i, abit, vbyte );
743 }
744}
745
746
747/* --- Fast case permission setters, for dealing with stacks. --- */
748
njn9b007f62003-04-07 14:40:25 +0000749static __inline__
sewardj5d28efc2005-04-21 22:16:29 +0000750void make_aligned_word32_writable ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000751{
sewardj5d28efc2005-04-21 22:16:29 +0000752 PROF_EVENT(300, "make_aligned_word32_writable");
753
754# if VG_DEBUG_MEMORY >= 2
755 mc_make_writable(aA, 4);
756# else
757
758 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
sewardj23eb2fd2005-04-22 16:29:19 +0000759 PROF_EVENT(301, "make_aligned_word32_writable-slow1");
sewardj5d28efc2005-04-21 22:16:29 +0000760 mc_make_writable(aA, 4);
761 return;
762 }
763
764 UWord a = (UWord)aA;
765 UWord sec_no = (UWord)(a >> 16);
766# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +0000767 tl_assert(sec_no < N_PRIMARY_MAP);
sewardj5d28efc2005-04-21 22:16:29 +0000768# endif
769
770 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
771 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
772
773 SecMap* sm = primary_map[sec_no];
774 UWord v_off = a & 0xFFFF;
775 UWord a_off = v_off >> 3;
776
777 /* Paint the new area as uninitialised. */
778 ((UInt*)(sm->vbyte))[v_off >> 2] = VGM_WORD32_INVALID;
779
780 UWord mask = 0x0F;
781 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
782 /* mask now contains 1s where we wish to make address bits valid
783 (0s). */
784 sm->abits[a_off] &= ~mask;
785# endif
njn9b007f62003-04-07 14:40:25 +0000786}
787
sewardj5d28efc2005-04-21 22:16:29 +0000788
789static __inline__
790void make_aligned_word32_noaccess ( Addr aA )
791{
792 PROF_EVENT(310, "make_aligned_word32_noaccess");
793
794# if VG_DEBUG_MEMORY >= 2
795 mc_make_noaccess(aA, 4);
796# else
797
798 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
799 PROF_EVENT(311, "make_aligned_word32_noaccess-slow1");
800 mc_make_noaccess(aA, 4);
801 return;
802 }
803
804 UWord a = (UWord)aA;
805 UWord sec_no = (UWord)(a >> 16);
806# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +0000807 tl_assert(sec_no < N_PRIMARY_MAP);
sewardj5d28efc2005-04-21 22:16:29 +0000808# endif
809
810 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
811 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
812
813 SecMap* sm = primary_map[sec_no];
814 UWord v_off = a & 0xFFFF;
815 UWord a_off = v_off >> 3;
816
817 /* Paint the abandoned data as uninitialised. Probably not
818 necessary, but still .. */
819 ((UInt*)(sm->vbyte))[v_off >> 2] = VGM_WORD32_INVALID;
820
821 UWord mask = 0x0F;
822 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
823 /* mask now contains 1s where we wish to make address bits invalid
824 (1s). */
825 sm->abits[a_off] |= mask;
826# endif
827}
828
829
njn9b007f62003-04-07 14:40:25 +0000830/* Nb: by "aligned" here we mean 8-byte aligned */
831static __inline__
sewardj23eb2fd2005-04-22 16:29:19 +0000832void make_aligned_word64_writable ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000833{
sewardj23eb2fd2005-04-22 16:29:19 +0000834 PROF_EVENT(320, "make_aligned_word64_writable");
835
836# if VG_DEBUG_MEMORY >= 2
837 mc_make_writable(aA, 8);
838# else
839
840 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
841 PROF_EVENT(321, "make_aligned_word64_writable-slow1");
842 mc_make_writable(aA, 8);
843 return;
844 }
845
846 UWord a = (UWord)aA;
847 UWord sec_no = (UWord)(a >> 16);
848# if VG_DEBUG_MEMORY >= 1
849 tl_assert(sec_no < N_PRIMARY_MAP);
850# endif
851
852 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
853 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
854
855 SecMap* sm = primary_map[sec_no];
856 UWord v_off = a & 0xFFFF;
857 UWord a_off = v_off >> 3;
858
859 /* Paint the new area as uninitialised. */
860 ((ULong*)(sm->vbyte))[v_off >> 3] = VGM_WORD64_INVALID;
861
862 /* Make the relevant area accessible. */
863 sm->abits[a_off] = VGM_BYTE_VALID;
864# endif
njn9b007f62003-04-07 14:40:25 +0000865}
866
sewardj23eb2fd2005-04-22 16:29:19 +0000867
njn9b007f62003-04-07 14:40:25 +0000868static __inline__
sewardj23eb2fd2005-04-22 16:29:19 +0000869void make_aligned_word64_noaccess ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000870{
sewardj23eb2fd2005-04-22 16:29:19 +0000871 PROF_EVENT(330, "make_aligned_word64_noaccess");
872
873# if VG_DEBUG_MEMORY >= 2
874 mc_make_noaccess(aA, 8);
875# else
876
877 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
878 PROF_EVENT(331, "make_aligned_word64_noaccess-slow1");
879 mc_make_noaccess(aA, 8);
880 return;
881 }
882
883 UWord a = (UWord)aA;
884 UWord sec_no = (UWord)(a >> 16);
885# if VG_DEBUG_MEMORY >= 1
886 tl_assert(sec_no < N_PRIMARY_MAP);
887# endif
888
889 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
890 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
891
892 SecMap* sm = primary_map[sec_no];
893 UWord v_off = a & 0xFFFF;
894 UWord a_off = v_off >> 3;
895
896 /* Paint the abandoned data as uninitialised. Probably not
897 necessary, but still .. */
898 ((ULong*)(sm->vbyte))[v_off >> 3] = VGM_WORD64_INVALID;
899
900 /* Make the abandoned area inaccessible. */
901 sm->abits[a_off] = VGM_BYTE_INVALID;
902# endif
njn9b007f62003-04-07 14:40:25 +0000903}
904
sewardj23eb2fd2005-04-22 16:29:19 +0000905
sewardj45d94cc2005-04-20 14:44:11 +0000906/* The stack-pointer update handling functions */
907SP_UPDATE_HANDLERS ( make_aligned_word32_writable,
908 make_aligned_word32_noaccess,
909 make_aligned_word64_writable,
910 make_aligned_word64_noaccess,
911 mc_make_writable,
912 mc_make_noaccess
913 );
njn9b007f62003-04-07 14:40:25 +0000914
sewardj45d94cc2005-04-20 14:44:11 +0000915
sewardj826ec492005-05-12 18:05:00 +0000916void MC_(helperc_MAKE_STACK_UNINIT) ( Addr base, UWord len )
917{
918 tl_assert(sizeof(UWord) == sizeof(SizeT));
sewardj2a3a1a72005-05-12 23:25:43 +0000919 if (0)
920 VG_(printf)("helperc_MAKE_STACK_UNINIT %p %d\n", base, len );
921
922# if 0
923 /* Really slow version */
924 mc_make_writable(base, len);
925# endif
926
927# if 0
928 /* Slow(ish) version, which is fairly easily seen to be correct.
929 */
930 if (EXPECTED_TAKEN( VG_IS_8_ALIGNED(base) && len==128 )) {
931 make_aligned_word64_writable(base + 0);
932 make_aligned_word64_writable(base + 8);
933 make_aligned_word64_writable(base + 16);
934 make_aligned_word64_writable(base + 24);
935
936 make_aligned_word64_writable(base + 32);
937 make_aligned_word64_writable(base + 40);
938 make_aligned_word64_writable(base + 48);
939 make_aligned_word64_writable(base + 56);
940
941 make_aligned_word64_writable(base + 64);
942 make_aligned_word64_writable(base + 72);
943 make_aligned_word64_writable(base + 80);
944 make_aligned_word64_writable(base + 88);
945
946 make_aligned_word64_writable(base + 96);
947 make_aligned_word64_writable(base + 104);
948 make_aligned_word64_writable(base + 112);
949 make_aligned_word64_writable(base + 120);
950 } else {
951 mc_make_writable(base, len);
952 }
953# endif
954
955 /* Idea is: go fast when
956 * 8-aligned and length is 128
957 * the sm is available in the main primary map
958 * the address range falls entirely with a single
959 secondary map
960 * the SM is modifiable
961 If all those conditions hold, just update the V bits
962 by writing directly on the v-bit array. We don't care
963 about A bits; if the address range is marked invalid,
964 any attempt to access it will elicit an addressing error,
965 and that's good enough.
966 */
967 if (EXPECTED_TAKEN( len == 128
968 && VG_IS_8_ALIGNED(base)
969 )) {
970 /* Now we know the address range is suitably sized and
971 aligned. */
972 UWord a_lo = (UWord)base;
973 UWord a_hi = (UWord)(base + 127);
974 UWord sec_lo = a_lo >> 16;
975 UWord sec_hi = a_hi >> 16;
976
977 if (EXPECTED_TAKEN( sec_lo == sec_hi
978 && sec_lo <= N_PRIMARY_MAP
979 )) {
980 /* Now we know that the entire address range falls within a
981 single secondary map, and that that secondary 'lives' in
982 the main primary map. */
983 SecMap* sm = primary_map[sec_lo];
984
985 if (EXPECTED_TAKEN( !is_distinguished_sm(sm) )) {
986 /* And finally, now we know that the secondary in question
987 is modifiable. */
988 UWord v_off = a_lo & 0xFFFF;
989 ULong* p = (ULong*)(&sm->vbyte[v_off]);
990 p[ 0] = VGM_WORD64_INVALID;
991 p[ 1] = VGM_WORD64_INVALID;
992 p[ 2] = VGM_WORD64_INVALID;
993 p[ 3] = VGM_WORD64_INVALID;
994 p[ 4] = VGM_WORD64_INVALID;
995 p[ 5] = VGM_WORD64_INVALID;
996 p[ 6] = VGM_WORD64_INVALID;
997 p[ 7] = VGM_WORD64_INVALID;
998 p[ 8] = VGM_WORD64_INVALID;
999 p[ 9] = VGM_WORD64_INVALID;
1000 p[10] = VGM_WORD64_INVALID;
1001 p[11] = VGM_WORD64_INVALID;
1002 p[12] = VGM_WORD64_INVALID;
1003 p[13] = VGM_WORD64_INVALID;
1004 p[14] = VGM_WORD64_INVALID;
1005 p[15] = VGM_WORD64_INVALID;
1006 return;
1007 }
1008 }
1009 }
1010
1011 /* else fall into slow case */
sewardj826ec492005-05-12 18:05:00 +00001012 mc_make_writable(base, len);
1013}
1014
1015
nethercote8b76fe52004-11-08 19:20:09 +00001016/*------------------------------------------------------------*/
1017/*--- Checking memory ---*/
1018/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001019
sewardje4ccc012005-05-02 12:53:38 +00001020typedef
1021 enum {
1022 MC_Ok = 5,
1023 MC_AddrErr = 6,
1024 MC_ValueErr = 7
1025 }
1026 MC_ReadResult;
1027
1028
njn25e49d8e72002-09-23 09:36:25 +00001029/* Check permissions for address range. If inadequate permissions
1030 exist, *bad_addr is set to the offending address, so the caller can
1031 know what it is. */
1032
sewardjecf8e102003-07-12 12:11:39 +00001033/* Returns True if [a .. a+len) is not addressible. Otherwise,
1034 returns False, and if bad_addr is non-NULL, sets *bad_addr to
1035 indicate the lowest failing address. Functions below are
1036 similar. */
nethercote8b76fe52004-11-08 19:20:09 +00001037static Bool mc_check_noaccess ( Addr a, SizeT len, Addr* bad_addr )
sewardjecf8e102003-07-12 12:11:39 +00001038{
nethercote451eae92004-11-02 13:06:32 +00001039 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +00001040 UWord abit;
sewardjc1a2cda2005-04-21 17:34:00 +00001041 PROF_EVENT(60, "mc_check_noaccess");
sewardjecf8e102003-07-12 12:11:39 +00001042 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +00001043 PROF_EVENT(61, "mc_check_noaccess(loop)");
sewardjecf8e102003-07-12 12:11:39 +00001044 abit = get_abit(a);
1045 if (abit == VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001046 if (bad_addr != NULL)
1047 *bad_addr = a;
sewardjecf8e102003-07-12 12:11:39 +00001048 return False;
1049 }
1050 a++;
1051 }
1052 return True;
1053}
1054
nethercote8b76fe52004-11-08 19:20:09 +00001055static Bool mc_check_writable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00001056{
nethercote451eae92004-11-02 13:06:32 +00001057 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +00001058 UWord abit;
sewardjc1a2cda2005-04-21 17:34:00 +00001059 PROF_EVENT(62, "mc_check_writable");
njn25e49d8e72002-09-23 09:36:25 +00001060 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +00001061 PROF_EVENT(63, "mc_check_writable(loop)");
njn25e49d8e72002-09-23 09:36:25 +00001062 abit = get_abit(a);
1063 if (abit == VGM_BIT_INVALID) {
1064 if (bad_addr != NULL) *bad_addr = a;
1065 return False;
1066 }
1067 a++;
1068 }
1069 return True;
1070}
1071
nethercote8b76fe52004-11-08 19:20:09 +00001072static MC_ReadResult mc_check_readable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00001073{
nethercote451eae92004-11-02 13:06:32 +00001074 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +00001075 UWord abit;
1076 UWord vbyte;
njn25e49d8e72002-09-23 09:36:25 +00001077
sewardjc1a2cda2005-04-21 17:34:00 +00001078 PROF_EVENT(64, "mc_check_readable");
nethercote8b76fe52004-11-08 19:20:09 +00001079 DEBUG("mc_check_readable\n");
njn25e49d8e72002-09-23 09:36:25 +00001080 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +00001081 PROF_EVENT(65, "mc_check_readable(loop)");
sewardj45d94cc2005-04-20 14:44:11 +00001082 get_abit_and_vbyte(&abit, &vbyte, a);
nethercote8b76fe52004-11-08 19:20:09 +00001083 // Report addressability errors in preference to definedness errors
1084 // by checking the A bits first.
1085 if (abit != VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001086 if (bad_addr != NULL)
1087 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001088 return MC_AddrErr;
1089 }
1090 if (vbyte != VGM_BYTE_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001091 if (bad_addr != NULL)
1092 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001093 return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +00001094 }
1095 a++;
1096 }
nethercote8b76fe52004-11-08 19:20:09 +00001097 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +00001098}
1099
1100
1101/* Check a zero-terminated ascii string. Tricky -- don't want to
1102 examine the actual bytes, to find the end, until we're sure it is
1103 safe to do so. */
1104
njn9b007f62003-04-07 14:40:25 +00001105static Bool mc_check_readable_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00001106{
sewardj45d94cc2005-04-20 14:44:11 +00001107 UWord abit;
1108 UWord vbyte;
sewardjc1a2cda2005-04-21 17:34:00 +00001109 PROF_EVENT(66, "mc_check_readable_asciiz");
njn5c004e42002-11-18 11:04:50 +00001110 DEBUG("mc_check_readable_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +00001111 while (True) {
sewardjc1a2cda2005-04-21 17:34:00 +00001112 PROF_EVENT(67, "mc_check_readable_asciiz(loop)");
sewardj45d94cc2005-04-20 14:44:11 +00001113 get_abit_and_vbyte(&abit, &vbyte, a);
nethercote8b76fe52004-11-08 19:20:09 +00001114 // As in mc_check_readable(), check A bits first
1115 if (abit != VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001116 if (bad_addr != NULL)
1117 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001118 return MC_AddrErr;
1119 }
1120 if (vbyte != VGM_BYTE_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001121 if (bad_addr != NULL)
1122 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001123 return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +00001124 }
1125 /* Ok, a is safe to read. */
sewardj45d94cc2005-04-20 14:44:11 +00001126 if (* ((UChar*)a) == 0)
1127 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +00001128 a++;
1129 }
1130}
1131
1132
1133/*------------------------------------------------------------*/
1134/*--- Memory event handlers ---*/
1135/*------------------------------------------------------------*/
1136
njn25e49d8e72002-09-23 09:36:25 +00001137static
njn72718642003-07-24 08:45:32 +00001138void mc_check_is_writable ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +00001139 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001140{
1141 Bool ok;
1142 Addr bad_addr;
1143
1144 VGP_PUSHCC(VgpCheckMem);
1145
1146 /* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
1147 base,base+size-1); */
nethercote8b76fe52004-11-08 19:20:09 +00001148 ok = mc_check_writable ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00001149 if (!ok) {
1150 switch (part) {
1151 case Vg_CoreSysCall:
nethercote8b76fe52004-11-08 19:20:09 +00001152 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False,
1153 /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00001154 break;
1155
1156 case Vg_CorePThread:
1157 case Vg_CoreSignal:
nethercote8b76fe52004-11-08 19:20:09 +00001158 MAC_(record_core_mem_error)( tid, /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00001159 break;
1160
1161 default:
njn67993252004-11-22 18:02:32 +00001162 VG_(tool_panic)("mc_check_is_writable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001163 }
1164 }
1165
1166 VGP_POPCC(VgpCheckMem);
1167}
1168
1169static
njn72718642003-07-24 08:45:32 +00001170void mc_check_is_readable ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +00001171 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001172{
njn25e49d8e72002-09-23 09:36:25 +00001173 Addr bad_addr;
nethercote8b76fe52004-11-08 19:20:09 +00001174 MC_ReadResult res;
njn25e49d8e72002-09-23 09:36:25 +00001175
1176 VGP_PUSHCC(VgpCheckMem);
1177
1178 /* VG_(message)(Vg_DebugMsg,"check is readable: %x .. %x",
1179 base,base+size-1); */
nethercote8b76fe52004-11-08 19:20:09 +00001180 res = mc_check_readable ( base, size, &bad_addr );
1181 if (MC_Ok != res) {
1182 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
1183
njn25e49d8e72002-09-23 09:36:25 +00001184 switch (part) {
1185 case Vg_CoreSysCall:
nethercote8b76fe52004-11-08 19:20:09 +00001186 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False,
1187 isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001188 break;
1189
1190 case Vg_CorePThread:
nethercote8b76fe52004-11-08 19:20:09 +00001191 MAC_(record_core_mem_error)( tid, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001192 break;
1193
1194 /* If we're being asked to jump to a silly address, record an error
1195 message before potentially crashing the entire system. */
1196 case Vg_CoreTranslate:
njn72718642003-07-24 08:45:32 +00001197 MAC_(record_jump_error)( tid, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00001198 break;
1199
1200 default:
njn67993252004-11-22 18:02:32 +00001201 VG_(tool_panic)("mc_check_is_readable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001202 }
1203 }
1204 VGP_POPCC(VgpCheckMem);
1205}
1206
1207static
njn72718642003-07-24 08:45:32 +00001208void mc_check_is_readable_asciiz ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +00001209 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +00001210{
nethercote8b76fe52004-11-08 19:20:09 +00001211 MC_ReadResult res;
njn5ab96ac2005-05-08 02:59:50 +00001212 Addr bad_addr = 0; // shut GCC up
njn25e49d8e72002-09-23 09:36:25 +00001213 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
1214
1215 VGP_PUSHCC(VgpCheckMem);
1216
njnca82cc02004-11-22 17:18:48 +00001217 tl_assert(part == Vg_CoreSysCall);
nethercote8b76fe52004-11-08 19:20:09 +00001218 res = mc_check_readable_asciiz ( (Addr)str, &bad_addr );
1219 if (MC_Ok != res) {
1220 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
1221 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001222 }
1223
1224 VGP_POPCC(VgpCheckMem);
1225}
1226
njn25e49d8e72002-09-23 09:36:25 +00001227static
nethercote451eae92004-11-02 13:06:32 +00001228void mc_new_mem_startup( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00001229{
njn1f3a9092002-10-04 09:22:30 +00001230 /* Ignore the permissions, just make it readable. Seems to work... */
nethercote451eae92004-11-02 13:06:32 +00001231 DEBUG("mc_new_mem_startup(%p, %llu, rr=%u, ww=%u, xx=%u)\n",
1232 a,(ULong)len,rr,ww,xx);
nethercote8b76fe52004-11-08 19:20:09 +00001233 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001234}
1235
1236static
nethercote451eae92004-11-02 13:06:32 +00001237void mc_new_mem_heap ( Addr a, SizeT len, Bool is_inited )
njn25e49d8e72002-09-23 09:36:25 +00001238{
1239 if (is_inited) {
nethercote8b76fe52004-11-08 19:20:09 +00001240 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001241 } else {
nethercote8b76fe52004-11-08 19:20:09 +00001242 mc_make_writable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001243 }
1244}
1245
1246static
njnb8dca862005-03-14 02:42:44 +00001247void mc_new_mem_mmap ( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00001248{
njnb8dca862005-03-14 02:42:44 +00001249 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001250}
1251
njncf45fd42004-11-24 16:30:22 +00001252static
1253void mc_post_mem_write(CorePart part, ThreadId tid, Addr a, SizeT len)
1254{
1255 mc_make_readable(a, len);
1256}
njn25e49d8e72002-09-23 09:36:25 +00001257
sewardj45d94cc2005-04-20 14:44:11 +00001258
njn25e49d8e72002-09-23 09:36:25 +00001259/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00001260/*--- Register event handlers ---*/
1261/*------------------------------------------------------------*/
1262
sewardj45d94cc2005-04-20 14:44:11 +00001263/* When some chunk of guest state is written, mark the corresponding
1264 shadow area as valid. This is used to initialise arbitrarily large
sewardj2c27f702005-05-03 18:19:05 +00001265 chunks of guest state, hence the (somewhat arbitrary) 1024 limit.
sewardj45d94cc2005-04-20 14:44:11 +00001266*/
1267static void mc_post_reg_write ( CorePart part, ThreadId tid,
1268 OffT offset, SizeT size)
njnd3040452003-05-19 15:04:06 +00001269{
sewardj6cf40ff2005-04-20 22:31:26 +00001270 UChar area[1024];
1271 tl_assert(size <= 1024);
njncf45fd42004-11-24 16:30:22 +00001272 VG_(memset)(area, VGM_BYTE_VALID, size);
1273 VG_(set_shadow_regs_area)( tid, offset, size, area );
njnd3040452003-05-19 15:04:06 +00001274}
1275
sewardj45d94cc2005-04-20 14:44:11 +00001276static
1277void mc_post_reg_write_clientcall ( ThreadId tid,
1278 OffT offset, SizeT size,
1279 Addr f)
njnd3040452003-05-19 15:04:06 +00001280{
njncf45fd42004-11-24 16:30:22 +00001281 mc_post_reg_write(/*dummy*/0, tid, offset, size);
njnd3040452003-05-19 15:04:06 +00001282}
1283
sewardj45d94cc2005-04-20 14:44:11 +00001284/* Look at the definedness of the guest's shadow state for
1285 [offset, offset+len). If any part of that is undefined, record
1286 a parameter error.
1287*/
1288static void mc_pre_reg_read ( CorePart part, ThreadId tid, Char* s,
1289 OffT offset, SizeT size)
nethercote8b76fe52004-11-08 19:20:09 +00001290{
sewardj45d94cc2005-04-20 14:44:11 +00001291 Int i;
1292 Bool bad;
1293
1294 UChar area[16];
1295 tl_assert(size <= 16);
1296
1297 VG_(get_shadow_regs_area)( tid, offset, size, area );
1298
1299 bad = False;
1300 for (i = 0; i < size; i++) {
1301 if (area[i] != VGM_BYTE_VALID) {
sewardj2c27f702005-05-03 18:19:05 +00001302 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00001303 break;
1304 }
nethercote8b76fe52004-11-08 19:20:09 +00001305 }
1306
sewardj45d94cc2005-04-20 14:44:11 +00001307 if (bad)
nethercote8b76fe52004-11-08 19:20:09 +00001308 MAC_(record_param_error) ( tid, 0, /*isReg*/True, /*isUnaddr*/False, s );
1309}
njnd3040452003-05-19 15:04:06 +00001310
njn25e49d8e72002-09-23 09:36:25 +00001311
sewardj6cf40ff2005-04-20 22:31:26 +00001312/*------------------------------------------------------------*/
njn9e63cb62005-05-08 18:34:59 +00001313/*--- Printing errors ---*/
1314/*------------------------------------------------------------*/
1315
njn51d827b2005-05-09 01:02:08 +00001316static void mc_pp_Error ( Error* err )
njn9e63cb62005-05-08 18:34:59 +00001317{
1318 MAC_Error* err_extra = VG_(get_error_extra)(err);
1319
sewardj71bc3cb2005-05-19 00:25:45 +00001320 HChar* xpre = VG_(clo_xml) ? " <what>" : "";
1321 HChar* xpost = VG_(clo_xml) ? "</what>" : "";
1322
njn9e63cb62005-05-08 18:34:59 +00001323 switch (VG_(get_error_kind)(err)) {
1324 case CoreMemErr: {
1325 Char* s = ( err_extra->isUnaddr ? "unaddressable" : "uninitialised" );
sewardj71bc3cb2005-05-19 00:25:45 +00001326 if (VG_(clo_xml))
1327 VG_(message)(Vg_UserMsg, " <kind>CoreMemError</kind>");
1328 /* What the hell *is* a CoreMemError? jrs 2005-May-18 */
1329 VG_(message)(Vg_UserMsg, "%s%s contains %s byte(s)%s",
1330 xpre, VG_(get_error_string)(err), s, xpost);
1331
njn9e63cb62005-05-08 18:34:59 +00001332 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1333 break;
1334
1335 }
1336
1337 case ValueErr:
1338 if (err_extra->size == 0) {
sewardj71bc3cb2005-05-19 00:25:45 +00001339 if (VG_(clo_xml))
1340 VG_(message)(Vg_UserMsg, " <kind>UninitCondition</kind>");
1341 VG_(message)(Vg_UserMsg, "%sConditional jump or move depends"
1342 " on uninitialised value(s)%s",
1343 xpre, xpost);
njn9e63cb62005-05-08 18:34:59 +00001344 } else {
sewardj71bc3cb2005-05-19 00:25:45 +00001345 if (VG_(clo_xml))
1346 VG_(message)(Vg_UserMsg, " <kind>UninitValue</kind>");
1347 VG_(message)(Vg_UserMsg,
1348 "%sUse of uninitialised value of size %d%s",
1349 xpre, err_extra->size, xpost);
njn9e63cb62005-05-08 18:34:59 +00001350 }
1351 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1352 break;
1353
1354 case ParamErr: {
1355 Bool isReg = ( Register == err_extra->addrinfo.akind );
1356 Char* s1 = ( isReg ? "contains" : "points to" );
1357 Char* s2 = ( err_extra->isUnaddr ? "unaddressable" : "uninitialised" );
1358 if (isReg) tl_assert(!err_extra->isUnaddr);
1359
sewardj71bc3cb2005-05-19 00:25:45 +00001360 if (VG_(clo_xml))
1361 VG_(message)(Vg_UserMsg, " <kind>SyscallParam</kind>");
1362 VG_(message)(Vg_UserMsg, "%sSyscall param %s %s %s byte(s)%s",
1363 xpre, VG_(get_error_string)(err), s1, s2, xpost);
njn9e63cb62005-05-08 18:34:59 +00001364
1365 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1366 MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
1367 break;
1368 }
1369 case UserErr: {
1370 Char* s = ( err_extra->isUnaddr ? "Unaddressable" : "Uninitialised" );
1371
sewardj71bc3cb2005-05-19 00:25:45 +00001372 if (VG_(clo_xml))
1373 VG_(message)(Vg_UserMsg, " <kind>ClientCheck</kind>");
njn9e63cb62005-05-08 18:34:59 +00001374 VG_(message)(Vg_UserMsg,
sewardj71bc3cb2005-05-19 00:25:45 +00001375 "%s%s byte(s) found during client check request%s",
1376 xpre, s, xpost);
njn9e63cb62005-05-08 18:34:59 +00001377
1378 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1379 MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
1380 break;
1381 }
1382 default:
1383 MAC_(pp_shared_Error)(err);
1384 break;
1385 }
1386}
1387
1388/*------------------------------------------------------------*/
1389/*--- Recording errors ---*/
1390/*------------------------------------------------------------*/
1391
njn02bc4b82005-05-15 17:28:26 +00001392/* Creates a copy of the 'extra' part, updates the copy with address info if
njn9e63cb62005-05-08 18:34:59 +00001393 necessary, and returns the copy. */
1394/* This one called from generated code and non-generated code. */
njn96364822005-05-08 19:04:53 +00001395static void mc_record_value_error ( ThreadId tid, Int size )
njn9e63cb62005-05-08 18:34:59 +00001396{
1397 MAC_Error err_extra;
1398
1399 MAC_(clear_MAC_Error)( &err_extra );
1400 err_extra.size = size;
1401 err_extra.isUnaddr = False;
1402 VG_(maybe_record_error)( tid, ValueErr, /*addr*/0, /*s*/NULL, &err_extra );
1403}
1404
1405/* This called from non-generated code */
1406
njn96364822005-05-08 19:04:53 +00001407static void mc_record_user_error ( ThreadId tid, Addr a, Bool isWrite,
1408 Bool isUnaddr )
njn9e63cb62005-05-08 18:34:59 +00001409{
1410 MAC_Error err_extra;
1411
1412 tl_assert(VG_INVALID_THREADID != tid);
1413 MAC_(clear_MAC_Error)( &err_extra );
1414 err_extra.addrinfo.akind = Undescribed;
1415 err_extra.isUnaddr = isUnaddr;
1416 VG_(maybe_record_error)( tid, UserErr, a, /*s*/NULL, &err_extra );
1417}
1418
1419/*------------------------------------------------------------*/
1420/*--- Suppressions ---*/
1421/*------------------------------------------------------------*/
1422
njn51d827b2005-05-09 01:02:08 +00001423static Bool mc_recognised_suppression ( Char* name, Supp* su )
njn9e63cb62005-05-08 18:34:59 +00001424{
1425 SuppKind skind;
1426
1427 if (MAC_(shared_recognised_suppression)(name, su))
1428 return True;
1429
1430 /* Extra suppressions not used by Addrcheck */
1431 else if (VG_STREQ(name, "Cond")) skind = Value0Supp;
1432 else if (VG_STREQ(name, "Value0")) skind = Value0Supp;/* backwards compat */
1433 else if (VG_STREQ(name, "Value1")) skind = Value1Supp;
1434 else if (VG_STREQ(name, "Value2")) skind = Value2Supp;
1435 else if (VG_STREQ(name, "Value4")) skind = Value4Supp;
1436 else if (VG_STREQ(name, "Value8")) skind = Value8Supp;
1437 else if (VG_STREQ(name, "Value16")) skind = Value16Supp;
1438 else
1439 return False;
1440
1441 VG_(set_supp_kind)(su, skind);
1442 return True;
1443}
1444
1445/*------------------------------------------------------------*/
sewardjc859fbf2005-04-22 21:10:28 +00001446/*--- Functions called directly from generated code: ---*/
1447/*--- Load/store handlers. ---*/
sewardj6cf40ff2005-04-20 22:31:26 +00001448/*------------------------------------------------------------*/
1449
1450/* Types: LOADV4, LOADV2, LOADV1 are:
1451 UWord fn ( Addr a )
1452 so they return 32-bits on 32-bit machines and 64-bits on
1453 64-bit machines. Addr has the same size as a host word.
1454
1455 LOADV8 is always ULong fn ( Addr a )
1456
1457 Similarly for STOREV1, STOREV2, STOREV4, the supplied vbits
1458 are a UWord, and for STOREV8 they are a ULong.
1459*/
1460
sewardj95448072004-11-22 20:19:51 +00001461/* ------------------------ Size = 8 ------------------------ */
1462
njn9fb73db2005-03-27 01:55:21 +00001463VGA_REGPARM(1)
sewardjf9d81612005-04-23 23:25:49 +00001464ULong MC_(helperc_LOADV8) ( Addr aA )
sewardj95448072004-11-22 20:19:51 +00001465{
sewardjf9d81612005-04-23 23:25:49 +00001466 PROF_EVENT(200, "helperc_LOADV8");
1467
1468# if VG_DEBUG_MEMORY >= 2
1469 return mc_LOADVn_slow( aA, 8, False/*littleendian*/ );
1470# else
1471
1472 const UWord mask = ~((0x10000-8) | ((N_PRIMARY_MAP-1) << 16));
1473 UWord a = (UWord)aA;
1474
1475 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1476 naturally aligned, or 'a' exceeds the range covered by the
1477 primary map. Either way we defer to the slow-path case. */
1478 if (EXPECTED_NOT_TAKEN(a & mask)) {
1479 PROF_EVENT(201, "helperc_LOADV8-slow1");
1480 return (UWord)mc_LOADVn_slow( aA, 8, False/*littleendian*/ );
1481 }
1482
1483 UWord sec_no = (UWord)(a >> 16);
1484
1485# if VG_DEBUG_MEMORY >= 1
1486 tl_assert(sec_no < N_PRIMARY_MAP);
1487# endif
1488
1489 SecMap* sm = primary_map[sec_no];
1490 UWord v_off = a & 0xFFFF;
1491 UWord a_off = v_off >> 3;
1492 UWord abits = (UWord)(sm->abits[a_off]);
1493
1494 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) {
1495 /* Handle common case quickly: a is suitably aligned, is mapped,
1496 and is addressible. */
1497 return ((ULong*)(sm->vbyte))[ v_off >> 3 ];
1498 } else {
1499 /* Slow but general case. */
1500 PROF_EVENT(202, "helperc_LOADV8-slow2");
1501 return mc_LOADVn_slow( a, 8, False/*littleendian*/ );
1502 }
1503
1504# endif
sewardj95448072004-11-22 20:19:51 +00001505}
1506
njn9fb73db2005-03-27 01:55:21 +00001507VGA_REGPARM(1)
sewardjf9d81612005-04-23 23:25:49 +00001508void MC_(helperc_STOREV8) ( Addr aA, ULong vbytes )
sewardj95448072004-11-22 20:19:51 +00001509{
sewardjf9d81612005-04-23 23:25:49 +00001510 PROF_EVENT(210, "helperc_STOREV8");
1511
1512# if VG_DEBUG_MEMORY >= 2
1513 mc_STOREVn_slow( aA, 8, vbytes, False/*littleendian*/ );
1514# else
1515
1516 const UWord mask = ~((0x10000-8) | ((N_PRIMARY_MAP-1) << 16));
1517 UWord a = (UWord)aA;
1518
1519 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1520 naturally aligned, or 'a' exceeds the range covered by the
1521 primary map. Either way we defer to the slow-path case. */
1522 if (EXPECTED_NOT_TAKEN(a & mask)) {
1523 PROF_EVENT(211, "helperc_STOREV8-slow1");
1524 mc_STOREVn_slow( aA, 8, vbytes, False/*littleendian*/ );
1525 return;
1526 }
1527
1528 UWord sec_no = (UWord)(a >> 16);
1529
1530# if VG_DEBUG_MEMORY >= 1
1531 tl_assert(sec_no < N_PRIMARY_MAP);
1532# endif
1533
1534 SecMap* sm = primary_map[sec_no];
1535 UWord v_off = a & 0xFFFF;
1536 UWord a_off = v_off >> 3;
1537 UWord abits = (UWord)(sm->abits[a_off]);
1538
1539 if (EXPECTED_TAKEN(!is_distinguished_sm(sm)
1540 && abits == VGM_BYTE_VALID)) {
1541 /* Handle common case quickly: a is suitably aligned, is mapped,
1542 and is addressible. */
1543 ((ULong*)(sm->vbyte))[ v_off >> 3 ] = vbytes;
1544 } else {
1545 /* Slow but general case. */
1546 PROF_EVENT(212, "helperc_STOREV8-slow2");
1547 mc_STOREVn_slow( aA, 8, vbytes, False/*littleendian*/ );
1548 }
1549# endif
sewardj95448072004-11-22 20:19:51 +00001550}
1551
1552/* ------------------------ Size = 4 ------------------------ */
1553
njn9fb73db2005-03-27 01:55:21 +00001554VGA_REGPARM(1)
sewardjc1a2cda2005-04-21 17:34:00 +00001555UWord MC_(helperc_LOADV4) ( Addr aA )
njn25e49d8e72002-09-23 09:36:25 +00001556{
sewardjc1a2cda2005-04-21 17:34:00 +00001557 PROF_EVENT(220, "helperc_LOADV4");
1558
1559# if VG_DEBUG_MEMORY >= 2
1560 return (UWord)mc_LOADVn_slow( aA, 4, False/*littleendian*/ );
1561# else
1562
sewardj23eb2fd2005-04-22 16:29:19 +00001563 const UWord mask = ~((0x10000-4) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001564 UWord a = (UWord)aA;
1565
1566 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1567 naturally aligned, or 'a' exceeds the range covered by the
1568 primary map. Either way we defer to the slow-path case. */
1569 if (EXPECTED_NOT_TAKEN(a & mask)) {
1570 PROF_EVENT(221, "helperc_LOADV4-slow1");
1571 return (UWord)mc_LOADVn_slow( aA, 4, False/*littleendian*/ );
1572 }
1573
1574 UWord sec_no = (UWord)(a >> 16);
1575
1576# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001577 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001578# endif
1579
1580 SecMap* sm = primary_map[sec_no];
1581 UWord v_off = a & 0xFFFF;
1582 UWord a_off = v_off >> 3;
1583 UWord abits = (UWord)(sm->abits[a_off]);
1584 abits >>= (a & 4);
1585 abits &= 15;
1586 if (EXPECTED_TAKEN(abits == VGM_NIBBLE_VALID)) {
1587 /* Handle common case quickly: a is suitably aligned, is mapped,
1588 and is addressible. */
sewardj5d28efc2005-04-21 22:16:29 +00001589 /* On a 32-bit platform, simply hoick the required 32 bits out of
1590 the vbyte array. On a 64-bit platform, also set the upper 32
1591 bits to 1 ("undefined"), just in case. This almost certainly
1592 isn't necessary, but be paranoid. */
1593 UWord ret = (UWord)0xFFFFFFFF00000000ULL;
1594 ret |= (UWord)( ((UInt*)(sm->vbyte))[ v_off >> 2 ] );
1595 return ret;
sewardjc1a2cda2005-04-21 17:34:00 +00001596 } else {
1597 /* Slow but general case. */
1598 PROF_EVENT(222, "helperc_LOADV4-slow2");
1599 return (UWord)mc_LOADVn_slow( a, 4, False/*littleendian*/ );
1600 }
1601
1602# endif
njn25e49d8e72002-09-23 09:36:25 +00001603}
1604
njn9fb73db2005-03-27 01:55:21 +00001605VGA_REGPARM(2)
sewardjc1a2cda2005-04-21 17:34:00 +00001606void MC_(helperc_STOREV4) ( Addr aA, UWord vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001607{
sewardjc1a2cda2005-04-21 17:34:00 +00001608 PROF_EVENT(230, "helperc_STOREV4");
1609
1610# if VG_DEBUG_MEMORY >= 2
sewardj23eb2fd2005-04-22 16:29:19 +00001611 mc_STOREVn_slow( aA, 4, (ULong)vbytes, False/*littleendian*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001612# else
1613
sewardj23eb2fd2005-04-22 16:29:19 +00001614 const UWord mask = ~((0x10000-4) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001615 UWord a = (UWord)aA;
1616
1617 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1618 naturally aligned, or 'a' exceeds the range covered by the
1619 primary map. Either way we defer to the slow-path case. */
1620 if (EXPECTED_NOT_TAKEN(a & mask)) {
1621 PROF_EVENT(231, "helperc_STOREV4-slow1");
1622 mc_STOREVn_slow( aA, 4, (ULong)vbytes, False/*littleendian*/ );
1623 return;
1624 }
1625
1626 UWord sec_no = (UWord)(a >> 16);
1627
1628# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001629 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001630# endif
1631
1632 SecMap* sm = primary_map[sec_no];
1633 UWord v_off = a & 0xFFFF;
1634 UWord a_off = v_off >> 3;
1635 UWord abits = (UWord)(sm->abits[a_off]);
1636 abits >>= (a & 4);
1637 abits &= 15;
1638 if (EXPECTED_TAKEN(!is_distinguished_sm(sm)
1639 && abits == VGM_NIBBLE_VALID)) {
1640 /* Handle common case quickly: a is suitably aligned, is mapped,
1641 and is addressible. */
1642 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = (UInt)vbytes;
1643 } else {
1644 /* Slow but general case. */
1645 PROF_EVENT(232, "helperc_STOREV4-slow2");
1646 mc_STOREVn_slow( aA, 4, (ULong)vbytes, False/*littleendian*/ );
1647 }
1648# endif
njn25e49d8e72002-09-23 09:36:25 +00001649}
1650
sewardj95448072004-11-22 20:19:51 +00001651/* ------------------------ Size = 2 ------------------------ */
1652
njn9fb73db2005-03-27 01:55:21 +00001653VGA_REGPARM(1)
sewardjc1a2cda2005-04-21 17:34:00 +00001654UWord MC_(helperc_LOADV2) ( Addr aA )
njn25e49d8e72002-09-23 09:36:25 +00001655{
sewardjc1a2cda2005-04-21 17:34:00 +00001656 PROF_EVENT(240, "helperc_LOADV2");
1657
1658# if VG_DEBUG_MEMORY >= 2
1659 return (UWord)mc_LOADVn_slow( aA, 2, False/*littleendian*/ );
1660# else
1661
sewardj23eb2fd2005-04-22 16:29:19 +00001662 const UWord mask = ~((0x10000-2) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001663 UWord a = (UWord)aA;
1664
1665 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1666 naturally aligned, or 'a' exceeds the range covered by the
1667 primary map. Either way we defer to the slow-path case. */
1668 if (EXPECTED_NOT_TAKEN(a & mask)) {
1669 PROF_EVENT(241, "helperc_LOADV2-slow1");
1670 return (UWord)mc_LOADVn_slow( aA, 2, False/*littleendian*/ );
1671 }
1672
1673 UWord sec_no = (UWord)(a >> 16);
1674
1675# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001676 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001677# endif
1678
1679 SecMap* sm = primary_map[sec_no];
1680 UWord v_off = a & 0xFFFF;
1681 UWord a_off = v_off >> 3;
1682 UWord abits = (UWord)(sm->abits[a_off]);
sewardjc1a2cda2005-04-21 17:34:00 +00001683 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) {
1684 /* Handle common case quickly: a is mapped, and the entire
1685 word32 it lives in is addressible. */
sewardj5d28efc2005-04-21 22:16:29 +00001686 /* Set the upper 16/48 bits of the result to 1 ("undefined"),
1687 just in case. This almost certainly isn't necessary, but be
1688 paranoid. */
sewardjc1a2cda2005-04-21 17:34:00 +00001689 return (~(UWord)0xFFFF)
1690 |
1691 (UWord)( ((UShort*)(sm->vbyte))[ v_off >> 1 ] );
1692 } else {
1693 /* Slow but general case. */
1694 PROF_EVENT(242, "helperc_LOADV2-slow2");
1695 return (UWord)mc_LOADVn_slow( aA, 2, False/*littleendian*/ );
1696 }
1697
1698# endif
njn25e49d8e72002-09-23 09:36:25 +00001699}
1700
njn9fb73db2005-03-27 01:55:21 +00001701VGA_REGPARM(2)
sewardj5d28efc2005-04-21 22:16:29 +00001702void MC_(helperc_STOREV2) ( Addr aA, UWord vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001703{
sewardjc1a2cda2005-04-21 17:34:00 +00001704 PROF_EVENT(250, "helperc_STOREV2");
sewardj5d28efc2005-04-21 22:16:29 +00001705
1706# if VG_DEBUG_MEMORY >= 2
sewardj23eb2fd2005-04-22 16:29:19 +00001707 mc_STOREVn_slow( aA, 2, (ULong)vbytes, False/*littleendian*/ );
sewardj5d28efc2005-04-21 22:16:29 +00001708# else
1709
sewardj23eb2fd2005-04-22 16:29:19 +00001710 const UWord mask = ~((0x10000-2) | ((N_PRIMARY_MAP-1) << 16));
sewardj5d28efc2005-04-21 22:16:29 +00001711 UWord a = (UWord)aA;
1712
1713 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1714 naturally aligned, or 'a' exceeds the range covered by the
1715 primary map. Either way we defer to the slow-path case. */
1716 if (EXPECTED_NOT_TAKEN(a & mask)) {
1717 PROF_EVENT(251, "helperc_STOREV2-slow1");
1718 mc_STOREVn_slow( aA, 2, (ULong)vbytes, False/*littleendian*/ );
1719 return;
1720 }
1721
1722 UWord sec_no = (UWord)(a >> 16);
1723
1724# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001725 tl_assert(sec_no < N_PRIMARY_MAP);
sewardj5d28efc2005-04-21 22:16:29 +00001726# endif
1727
1728 SecMap* sm = primary_map[sec_no];
1729 UWord v_off = a & 0xFFFF;
1730 UWord a_off = v_off >> 3;
1731 UWord abits = (UWord)(sm->abits[a_off]);
1732 if (EXPECTED_TAKEN(!is_distinguished_sm(sm)
1733 && abits == VGM_BYTE_VALID)) {
1734 /* Handle common case quickly. */
1735 ((UShort*)(sm->vbyte))[ v_off >> 1 ] = (UShort)vbytes;
1736 } else {
1737 /* Slow but general case. */
1738 PROF_EVENT(252, "helperc_STOREV2-slow2");
1739 mc_STOREVn_slow( aA, 2, (ULong)vbytes, False/*littleendian*/ );
1740 }
1741# endif
njn25e49d8e72002-09-23 09:36:25 +00001742}
1743
sewardj95448072004-11-22 20:19:51 +00001744/* ------------------------ Size = 1 ------------------------ */
1745
njn9fb73db2005-03-27 01:55:21 +00001746VGA_REGPARM(1)
sewardjc1a2cda2005-04-21 17:34:00 +00001747UWord MC_(helperc_LOADV1) ( Addr aA )
njn25e49d8e72002-09-23 09:36:25 +00001748{
sewardjc1a2cda2005-04-21 17:34:00 +00001749 PROF_EVENT(260, "helperc_LOADV1");
1750
1751# if VG_DEBUG_MEMORY >= 2
sewardj23eb2fd2005-04-22 16:29:19 +00001752 return (UWord)mc_LOADVn_slow( aA, 1, False/*littleendian*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001753# else
1754
sewardj23eb2fd2005-04-22 16:29:19 +00001755 const UWord mask = ~((0x10000-1) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001756 UWord a = (UWord)aA;
1757
1758 /* If any part of 'a' indicated by the mask is 1, it means 'a'
1759 exceeds the range covered by the primary map. In which case we
1760 defer to the slow-path case. */
1761 if (EXPECTED_NOT_TAKEN(a & mask)) {
1762 PROF_EVENT(261, "helperc_LOADV1-slow1");
1763 return (UWord)mc_LOADVn_slow( aA, 1, False/*littleendian*/ );
1764 }
1765
1766 UWord sec_no = (UWord)(a >> 16);
1767
1768# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001769 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001770# endif
1771
1772 SecMap* sm = primary_map[sec_no];
1773 UWord v_off = a & 0xFFFF;
1774 UWord a_off = v_off >> 3;
sewardj5d28efc2005-04-21 22:16:29 +00001775 UWord abits = (UWord)(sm->abits[a_off]);
sewardjc1a2cda2005-04-21 17:34:00 +00001776 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) {
1777 /* Handle common case quickly: a is mapped, and the entire
1778 word32 it lives in is addressible. */
sewardj5d28efc2005-04-21 22:16:29 +00001779 /* Set the upper 24/56 bits of the result to 1 ("undefined"),
1780 just in case. This almost certainly isn't necessary, but be
1781 paranoid. */
sewardjc1a2cda2005-04-21 17:34:00 +00001782 return (~(UWord)0xFF)
1783 |
1784 (UWord)( ((UChar*)(sm->vbyte))[ v_off ] );
1785 } else {
1786 /* Slow but general case. */
1787 PROF_EVENT(262, "helperc_LOADV1-slow2");
1788 return (UWord)mc_LOADVn_slow( aA, 1, False/*littleendian*/ );
1789 }
1790# endif
njn25e49d8e72002-09-23 09:36:25 +00001791}
1792
sewardjc1a2cda2005-04-21 17:34:00 +00001793
njn9fb73db2005-03-27 01:55:21 +00001794VGA_REGPARM(2)
sewardjc1a2cda2005-04-21 17:34:00 +00001795void MC_(helperc_STOREV1) ( Addr aA, UWord vbyte )
njn25e49d8e72002-09-23 09:36:25 +00001796{
sewardjc1a2cda2005-04-21 17:34:00 +00001797 PROF_EVENT(270, "helperc_STOREV1");
1798
1799# if VG_DEBUG_MEMORY >= 2
1800 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*littleendian*/ );
1801# else
1802
sewardj23eb2fd2005-04-22 16:29:19 +00001803 const UWord mask = ~((0x10000-1) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001804 UWord a = (UWord)aA;
1805 /* If any part of 'a' indicated by the mask is 1, it means 'a'
1806 exceeds the range covered by the primary map. In which case we
1807 defer to the slow-path case. */
1808 if (EXPECTED_NOT_TAKEN(a & mask)) {
1809 PROF_EVENT(271, "helperc_STOREV1-slow1");
1810 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*littleendian*/ );
1811 return;
1812 }
1813
1814 UWord sec_no = (UWord)(a >> 16);
1815
1816# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001817 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001818# endif
1819
1820 SecMap* sm = primary_map[sec_no];
1821 UWord v_off = a & 0xFFFF;
1822 UWord a_off = v_off >> 3;
sewardj5d28efc2005-04-21 22:16:29 +00001823 UWord abits = (UWord)(sm->abits[a_off]);
sewardjc1a2cda2005-04-21 17:34:00 +00001824 if (EXPECTED_TAKEN(!is_distinguished_sm(sm)
1825 && abits == VGM_BYTE_VALID)) {
1826 /* Handle common case quickly: a is mapped, the entire word32 it
1827 lives in is addressible. */
1828 ((UChar*)(sm->vbyte))[ v_off ] = (UChar)vbyte;
1829 } else {
1830 PROF_EVENT(272, "helperc_STOREV1-slow2");
1831 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*littleendian*/ );
1832 }
1833
1834# endif
njn25e49d8e72002-09-23 09:36:25 +00001835}
1836
1837
sewardjc859fbf2005-04-22 21:10:28 +00001838/*------------------------------------------------------------*/
1839/*--- Functions called directly from generated code: ---*/
1840/*--- Value-check failure handlers. ---*/
1841/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001842
njn5c004e42002-11-18 11:04:50 +00001843void MC_(helperc_value_check0_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001844{
njn9e63cb62005-05-08 18:34:59 +00001845 mc_record_value_error ( VG_(get_running_tid)(), 0 );
njn25e49d8e72002-09-23 09:36:25 +00001846}
1847
njn5c004e42002-11-18 11:04:50 +00001848void MC_(helperc_value_check1_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001849{
njn9e63cb62005-05-08 18:34:59 +00001850 mc_record_value_error ( VG_(get_running_tid)(), 1 );
njn25e49d8e72002-09-23 09:36:25 +00001851}
1852
njn5c004e42002-11-18 11:04:50 +00001853void MC_(helperc_value_check4_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001854{
njn9e63cb62005-05-08 18:34:59 +00001855 mc_record_value_error ( VG_(get_running_tid)(), 4 );
njn25e49d8e72002-09-23 09:36:25 +00001856}
1857
sewardj11bcc4e2005-04-23 22:38:38 +00001858void MC_(helperc_value_check8_fail) ( void )
1859{
njn9e63cb62005-05-08 18:34:59 +00001860 mc_record_value_error ( VG_(get_running_tid)(), 8 );
sewardj11bcc4e2005-04-23 22:38:38 +00001861}
1862
njn9fb73db2005-03-27 01:55:21 +00001863VGA_REGPARM(1) void MC_(helperc_complain_undef) ( HWord sz )
sewardj95448072004-11-22 20:19:51 +00001864{
njn9e63cb62005-05-08 18:34:59 +00001865 mc_record_value_error ( VG_(get_running_tid)(), (Int)sz );
sewardj95448072004-11-22 20:19:51 +00001866}
1867
njn25e49d8e72002-09-23 09:36:25 +00001868
sewardj45d94cc2005-04-20 14:44:11 +00001869//zz /*------------------------------------------------------------*/
1870//zz /*--- Metadata get/set functions, for client requests. ---*/
1871//zz /*------------------------------------------------------------*/
1872//zz
1873//zz /* Copy Vbits for src into vbits. Returns: 1 == OK, 2 == alignment
1874//zz error, 3 == addressing error. */
1875//zz static Int mc_get_or_set_vbits_for_client (
1876//zz ThreadId tid,
1877//zz Addr dataV,
1878//zz Addr vbitsV,
1879//zz SizeT size,
1880//zz Bool setting /* True <=> set vbits, False <=> get vbits */
1881//zz )
1882//zz {
1883//zz Bool addressibleD = True;
1884//zz Bool addressibleV = True;
1885//zz UInt* data = (UInt*)dataV;
1886//zz UInt* vbits = (UInt*)vbitsV;
1887//zz SizeT szW = size / 4; /* sigh */
1888//zz SizeT i;
1889//zz UInt* dataP = NULL; /* bogus init to keep gcc happy */
1890//zz UInt* vbitsP = NULL; /* ditto */
1891//zz
1892//zz /* Check alignment of args. */
1893//zz if (!(VG_IS_4_ALIGNED(data) && VG_IS_4_ALIGNED(vbits)))
1894//zz return 2;
1895//zz if ((size & 3) != 0)
1896//zz return 2;
1897//zz
1898//zz /* Check that arrays are addressible. */
1899//zz for (i = 0; i < szW; i++) {
1900//zz dataP = &data[i];
1901//zz vbitsP = &vbits[i];
1902//zz if (get_abits4_ALIGNED((Addr)dataP) != VGM_NIBBLE_VALID) {
1903//zz addressibleD = False;
1904//zz break;
1905//zz }
1906//zz if (get_abits4_ALIGNED((Addr)vbitsP) != VGM_NIBBLE_VALID) {
1907//zz addressibleV = False;
1908//zz break;
1909//zz }
1910//zz }
1911//zz if (!addressibleD) {
1912//zz MAC_(record_address_error)( tid, (Addr)dataP, 4,
1913//zz setting ? True : False );
1914//zz return 3;
1915//zz }
1916//zz if (!addressibleV) {
1917//zz MAC_(record_address_error)( tid, (Addr)vbitsP, 4,
1918//zz setting ? False : True );
1919//zz return 3;
1920//zz }
1921//zz
1922//zz /* Do the copy */
1923//zz if (setting) {
1924//zz /* setting */
1925//zz for (i = 0; i < szW; i++) {
1926//zz if (get_vbytes4_ALIGNED( (Addr)&vbits[i] ) != VGM_WORD_VALID)
njn9e63cb62005-05-08 18:34:59 +00001927//zz mc_record_value_error(tid, 4);
sewardj45d94cc2005-04-20 14:44:11 +00001928//zz set_vbytes4_ALIGNED( (Addr)&data[i], vbits[i] );
1929//zz }
1930//zz } else {
1931//zz /* getting */
1932//zz for (i = 0; i < szW; i++) {
1933//zz vbits[i] = get_vbytes4_ALIGNED( (Addr)&data[i] );
1934//zz set_vbytes4_ALIGNED( (Addr)&vbits[i], VGM_WORD_VALID );
1935//zz }
1936//zz }
1937//zz
1938//zz return 1;
1939//zz }
sewardj05fe85e2005-04-27 22:46:36 +00001940
1941
1942/*------------------------------------------------------------*/
1943/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1944/*------------------------------------------------------------*/
1945
1946/* For the memory leak detector, say whether an entire 64k chunk of
1947 address space is possibly in use, or not. If in doubt return
1948 True.
1949*/
1950static
1951Bool mc_is_within_valid_secondary ( Addr a )
1952{
1953 SecMap* sm = maybe_get_secmap_for ( a );
1954 if (sm == NULL || sm == &sm_distinguished[SM_DIST_NOACCESS]) {
1955 /* Definitely not in use. */
1956 return False;
1957 } else {
1958 return True;
1959 }
1960}
1961
1962
1963/* For the memory leak detector, say whether or not a given word
1964 address is to be regarded as valid. */
1965static
1966Bool mc_is_valid_aligned_word ( Addr a )
1967{
1968 tl_assert(sizeof(UWord) == 4 || sizeof(UWord) == 8);
1969 if (sizeof(UWord) == 4) {
1970 tl_assert(VG_IS_4_ALIGNED(a));
1971 } else {
1972 tl_assert(VG_IS_8_ALIGNED(a));
1973 }
1974 if (mc_check_readable( a, sizeof(UWord), NULL ) == MC_Ok) {
1975 return True;
1976 } else {
1977 return False;
1978 }
1979}
sewardja4495682002-10-21 07:29:59 +00001980
1981
nethercote996901a2004-08-03 13:29:09 +00001982/* Leak detector for this tool. We don't actually do anything, merely
sewardja4495682002-10-21 07:29:59 +00001983 run the generic leak detector with suitable parameters for this
nethercote996901a2004-08-03 13:29:09 +00001984 tool. */
njnb8dca862005-03-14 02:42:44 +00001985static void mc_detect_memory_leaks ( ThreadId tid, LeakCheckMode mode )
njn25e49d8e72002-09-23 09:36:25 +00001986{
sewardj05fe85e2005-04-27 22:46:36 +00001987 MAC_(do_detect_memory_leaks) (
1988 tid,
1989 mode,
1990 mc_is_within_valid_secondary,
1991 mc_is_valid_aligned_word
1992 );
njn25e49d8e72002-09-23 09:36:25 +00001993}
1994
1995
sewardjc859fbf2005-04-22 21:10:28 +00001996/*------------------------------------------------------------*/
1997/*--- Initialisation ---*/
1998/*------------------------------------------------------------*/
1999
2000static void init_shadow_memory ( void )
2001{
2002 Int i;
2003 SecMap* sm;
2004
2005 /* Build the 3 distinguished secondaries */
2006 tl_assert(VGM_BIT_INVALID == 1);
2007 tl_assert(VGM_BIT_VALID == 0);
2008 tl_assert(VGM_BYTE_INVALID == 0xFF);
2009 tl_assert(VGM_BYTE_VALID == 0);
2010
2011 /* Set A invalid, V invalid. */
2012 sm = &sm_distinguished[SM_DIST_NOACCESS];
2013 for (i = 0; i < 65536; i++)
2014 sm->vbyte[i] = VGM_BYTE_INVALID;
2015 for (i = 0; i < 8192; i++)
2016 sm->abits[i] = VGM_BYTE_INVALID;
2017
2018 /* Set A valid, V invalid. */
2019 sm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
2020 for (i = 0; i < 65536; i++)
2021 sm->vbyte[i] = VGM_BYTE_INVALID;
2022 for (i = 0; i < 8192; i++)
2023 sm->abits[i] = VGM_BYTE_VALID;
2024
2025 /* Set A valid, V valid. */
2026 sm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
2027 for (i = 0; i < 65536; i++)
2028 sm->vbyte[i] = VGM_BYTE_VALID;
2029 for (i = 0; i < 8192; i++)
2030 sm->abits[i] = VGM_BYTE_VALID;
2031
2032 /* Set up the primary map. */
2033 /* These entries gradually get overwritten as the used address
2034 space expands. */
2035 for (i = 0; i < N_PRIMARY_MAP; i++)
2036 primary_map[i] = &sm_distinguished[SM_DIST_NOACCESS];
2037
2038 /* auxmap_size = auxmap_used = 0;
2039 no ... these are statically initialised */
2040}
2041
2042
2043/*------------------------------------------------------------*/
2044/*--- Sanity check machinery (permanently engaged) ---*/
2045/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00002046
njn51d827b2005-05-09 01:02:08 +00002047static Bool mc_cheap_sanity_check ( void )
njn25e49d8e72002-09-23 09:36:25 +00002048{
jseward9800fd32004-01-04 23:08:04 +00002049 /* nothing useful we can rapidly check */
sewardj23eb2fd2005-04-22 16:29:19 +00002050 n_sanity_cheap++;
sewardjc1a2cda2005-04-21 17:34:00 +00002051 PROF_EVENT(490, "cheap_sanity_check");
jseward9800fd32004-01-04 23:08:04 +00002052 return True;
njn25e49d8e72002-09-23 09:36:25 +00002053}
2054
njn51d827b2005-05-09 01:02:08 +00002055static Bool mc_expensive_sanity_check ( void )
njn25e49d8e72002-09-23 09:36:25 +00002056{
sewardj23eb2fd2005-04-22 16:29:19 +00002057 Int i, n_secmaps_found;
sewardj45d94cc2005-04-20 14:44:11 +00002058 SecMap* sm;
sewardj23eb2fd2005-04-22 16:29:19 +00002059 Bool bad = False;
njn25e49d8e72002-09-23 09:36:25 +00002060
sewardj23eb2fd2005-04-22 16:29:19 +00002061 n_sanity_expensive++;
sewardjc1a2cda2005-04-21 17:34:00 +00002062 PROF_EVENT(491, "expensive_sanity_check");
2063
sewardj23eb2fd2005-04-22 16:29:19 +00002064 /* Check that the 3 distinguished SMs are still as they should
2065 be. */
njn25e49d8e72002-09-23 09:36:25 +00002066
sewardj45d94cc2005-04-20 14:44:11 +00002067 /* Check A invalid, V invalid. */
2068 sm = &sm_distinguished[SM_DIST_NOACCESS];
njn25e49d8e72002-09-23 09:36:25 +00002069 for (i = 0; i < 65536; i++)
sewardj45d94cc2005-04-20 14:44:11 +00002070 if (!(sm->vbyte[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002071 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002072 for (i = 0; i < 8192; i++)
2073 if (!(sm->abits[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002074 bad = True;
njn25e49d8e72002-09-23 09:36:25 +00002075
sewardj45d94cc2005-04-20 14:44:11 +00002076 /* Check A valid, V invalid. */
2077 sm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
2078 for (i = 0; i < 65536; i++)
2079 if (!(sm->vbyte[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002080 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002081 for (i = 0; i < 8192; i++)
2082 if (!(sm->abits[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002083 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002084
2085 /* Check A valid, V valid. */
2086 sm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
2087 for (i = 0; i < 65536; i++)
2088 if (!(sm->vbyte[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002089 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002090 for (i = 0; i < 8192; i++)
2091 if (!(sm->abits[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002092 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002093
sewardj23eb2fd2005-04-22 16:29:19 +00002094 if (bad) {
2095 VG_(printf)("memcheck expensive sanity: "
2096 "distinguished_secondaries have changed\n");
2097 return False;
2098 }
2099
2100 /* check nonsensical auxmap sizing */
sewardj45d94cc2005-04-20 14:44:11 +00002101 if (auxmap_used > auxmap_size)
sewardj23eb2fd2005-04-22 16:29:19 +00002102 bad = True;
2103
2104 if (bad) {
2105 VG_(printf)("memcheck expensive sanity: "
2106 "nonsensical auxmap sizing\n");
2107 return False;
2108 }
2109
2110 /* check that the number of secmaps issued matches the number that
2111 are reachable (iow, no secmap leaks) */
2112 n_secmaps_found = 0;
2113 for (i = 0; i < N_PRIMARY_MAP; i++) {
2114 if (primary_map[i] == NULL) {
2115 bad = True;
2116 } else {
2117 if (!is_distinguished_sm(primary_map[i]))
2118 n_secmaps_found++;
2119 }
2120 }
2121
2122 for (i = 0; i < auxmap_used; i++) {
2123 if (auxmap[i].sm == NULL) {
2124 bad = True;
2125 } else {
2126 if (!is_distinguished_sm(auxmap[i].sm))
2127 n_secmaps_found++;
2128 }
2129 }
2130
2131 if (n_secmaps_found != n_secmaps_issued)
2132 bad = True;
2133
2134 if (bad) {
2135 VG_(printf)("memcheck expensive sanity: "
2136 "apparent secmap leakage\n");
2137 return False;
2138 }
2139
2140 /* check that auxmap only covers address space that the primary
2141 doesn't */
2142
2143 for (i = 0; i < auxmap_used; i++)
2144 if (auxmap[i].base <= MAX_PRIMARY_ADDRESS)
2145 bad = True;
2146
2147 if (bad) {
2148 VG_(printf)("memcheck expensive sanity: "
2149 "auxmap covers wrong address space\n");
2150 return False;
2151 }
2152
2153 /* there is only one pointer to each secmap (expensive) */
njn25e49d8e72002-09-23 09:36:25 +00002154
2155 return True;
2156}
sewardj45d94cc2005-04-20 14:44:11 +00002157
njn25e49d8e72002-09-23 09:36:25 +00002158
njn25e49d8e72002-09-23 09:36:25 +00002159/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00002160/*--- Command line args ---*/
njn25e49d8e72002-09-23 09:36:25 +00002161/*------------------------------------------------------------*/
2162
njn51d827b2005-05-09 01:02:08 +00002163Bool MC_(clo_avoid_strlen_errors) = True;
njn43c799e2003-04-08 00:08:52 +00002164
njn51d827b2005-05-09 01:02:08 +00002165static Bool mc_process_cmd_line_option(Char* arg)
njn25e49d8e72002-09-23 09:36:25 +00002166{
njn45270a22005-03-27 01:00:11 +00002167 VG_BOOL_CLO(arg, "--avoid-strlen-errors", MC_(clo_avoid_strlen_errors))
njn25e49d8e72002-09-23 09:36:25 +00002168 else
njn43c799e2003-04-08 00:08:52 +00002169 return MAC_(process_common_cmd_line_option)(arg);
njn25e49d8e72002-09-23 09:36:25 +00002170
2171 return True;
njn25e49d8e72002-09-23 09:36:25 +00002172}
2173
njn51d827b2005-05-09 01:02:08 +00002174static void mc_print_usage(void)
njn25e49d8e72002-09-23 09:36:25 +00002175{
njn3e884182003-04-15 13:03:23 +00002176 MAC_(print_common_usage)();
2177 VG_(printf)(
2178" --avoid-strlen-errors=no|yes suppress errs from inlined strlen [yes]\n"
2179 );
2180}
2181
njn51d827b2005-05-09 01:02:08 +00002182static void mc_print_debug_usage(void)
njn3e884182003-04-15 13:03:23 +00002183{
2184 MAC_(print_common_debug_usage)();
2185 VG_(printf)(
sewardj8ec2cfc2002-10-13 00:57:26 +00002186" --cleanup=no|yes improve after instrumentation? [yes]\n"
njn3e884182003-04-15 13:03:23 +00002187 );
njn25e49d8e72002-09-23 09:36:25 +00002188}
2189
nethercote8b76fe52004-11-08 19:20:09 +00002190/*------------------------------------------------------------*/
2191/*--- Client requests ---*/
2192/*------------------------------------------------------------*/
2193
2194/* Client block management:
2195
2196 This is managed as an expanding array of client block descriptors.
2197 Indices of live descriptors are issued to the client, so it can ask
2198 to free them later. Therefore we cannot slide live entries down
2199 over dead ones. Instead we must use free/inuse flags and scan for
2200 an empty slot at allocation time. This in turn means allocation is
2201 relatively expensive, so we hope this does not happen too often.
nethercote8b76fe52004-11-08 19:20:09 +00002202
sewardjedc75ab2005-03-15 23:30:32 +00002203 An unused block has start == size == 0
2204*/
nethercote8b76fe52004-11-08 19:20:09 +00002205
2206typedef
2207 struct {
2208 Addr start;
2209 SizeT size;
2210 ExeContext* where;
sewardjedc75ab2005-03-15 23:30:32 +00002211 Char* desc;
nethercote8b76fe52004-11-08 19:20:09 +00002212 }
2213 CGenBlock;
2214
2215/* This subsystem is self-initialising. */
njn695c16e2005-03-27 03:40:28 +00002216static UInt cgb_size = 0;
2217static UInt cgb_used = 0;
2218static CGenBlock* cgbs = NULL;
nethercote8b76fe52004-11-08 19:20:09 +00002219
2220/* Stats for this subsystem. */
njn695c16e2005-03-27 03:40:28 +00002221static UInt cgb_used_MAX = 0; /* Max in use. */
2222static UInt cgb_allocs = 0; /* Number of allocs. */
2223static UInt cgb_discards = 0; /* Number of discards. */
2224static UInt cgb_search = 0; /* Number of searches. */
nethercote8b76fe52004-11-08 19:20:09 +00002225
2226
2227static
njn695c16e2005-03-27 03:40:28 +00002228Int alloc_client_block ( void )
nethercote8b76fe52004-11-08 19:20:09 +00002229{
2230 UInt i, sz_new;
2231 CGenBlock* cgbs_new;
2232
njn695c16e2005-03-27 03:40:28 +00002233 cgb_allocs++;
nethercote8b76fe52004-11-08 19:20:09 +00002234
njn695c16e2005-03-27 03:40:28 +00002235 for (i = 0; i < cgb_used; i++) {
2236 cgb_search++;
2237 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00002238 return i;
2239 }
2240
2241 /* Not found. Try to allocate one at the end. */
njn695c16e2005-03-27 03:40:28 +00002242 if (cgb_used < cgb_size) {
2243 cgb_used++;
2244 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00002245 }
2246
2247 /* Ok, we have to allocate a new one. */
njn695c16e2005-03-27 03:40:28 +00002248 tl_assert(cgb_used == cgb_size);
2249 sz_new = (cgbs == NULL) ? 10 : (2 * cgb_size);
nethercote8b76fe52004-11-08 19:20:09 +00002250
2251 cgbs_new = VG_(malloc)( sz_new * sizeof(CGenBlock) );
njn695c16e2005-03-27 03:40:28 +00002252 for (i = 0; i < cgb_used; i++)
2253 cgbs_new[i] = cgbs[i];
nethercote8b76fe52004-11-08 19:20:09 +00002254
njn695c16e2005-03-27 03:40:28 +00002255 if (cgbs != NULL)
2256 VG_(free)( cgbs );
2257 cgbs = cgbs_new;
nethercote8b76fe52004-11-08 19:20:09 +00002258
njn695c16e2005-03-27 03:40:28 +00002259 cgb_size = sz_new;
2260 cgb_used++;
2261 if (cgb_used > cgb_used_MAX)
2262 cgb_used_MAX = cgb_used;
2263 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00002264}
2265
2266
2267static void show_client_block_stats ( void )
2268{
2269 VG_(message)(Vg_DebugMsg,
2270 "general CBs: %d allocs, %d discards, %d maxinuse, %d search",
njn695c16e2005-03-27 03:40:28 +00002271 cgb_allocs, cgb_discards, cgb_used_MAX, cgb_search
nethercote8b76fe52004-11-08 19:20:09 +00002272 );
2273}
2274
2275static Bool find_addr(VgHashNode* sh_ch, void* ap)
2276{
2277 MAC_Chunk *m = (MAC_Chunk*)sh_ch;
2278 Addr a = *(Addr*)ap;
2279
njn717cde52005-05-10 02:47:21 +00002280 return VG_(addr_is_in_block)(a, m->data, m->size, MAC_MALLOC_REDZONE_SZB);
nethercote8b76fe52004-11-08 19:20:09 +00002281}
2282
2283static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai )
2284{
2285 UInt i;
2286 /* VG_(printf)("try to identify %d\n", a); */
2287
2288 /* Perhaps it's a general block ? */
njn695c16e2005-03-27 03:40:28 +00002289 for (i = 0; i < cgb_used; i++) {
2290 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00002291 continue;
njn717cde52005-05-10 02:47:21 +00002292 // Use zero as the redzone for client blocks.
2293 if (VG_(addr_is_in_block)(a, cgbs[i].start, cgbs[i].size, 0)) {
nethercote8b76fe52004-11-08 19:20:09 +00002294 MAC_Mempool **d, *mp;
2295
2296 /* OK - maybe it's a mempool, too? */
2297 mp = (MAC_Mempool*)VG_(HT_get_node)(MAC_(mempool_list),
njn695c16e2005-03-27 03:40:28 +00002298 (UWord)cgbs[i].start,
nethercote8b76fe52004-11-08 19:20:09 +00002299 (void*)&d);
2300 if(mp != NULL) {
2301 if(mp->chunks != NULL) {
2302 MAC_Chunk *mc;
2303
2304 mc = (MAC_Chunk*)VG_(HT_first_match)(mp->chunks, find_addr, &a);
2305 if(mc != NULL) {
2306 ai->akind = UserG;
2307 ai->blksize = mc->size;
2308 ai->rwoffset = (Int)(a) - (Int)mc->data;
2309 ai->lastchange = mc->where;
2310 return True;
2311 }
2312 }
2313 ai->akind = Mempool;
njn695c16e2005-03-27 03:40:28 +00002314 ai->blksize = cgbs[i].size;
2315 ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
2316 ai->lastchange = cgbs[i].where;
nethercote8b76fe52004-11-08 19:20:09 +00002317 return True;
2318 }
2319 ai->akind = UserG;
njn695c16e2005-03-27 03:40:28 +00002320 ai->blksize = cgbs[i].size;
2321 ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
2322 ai->lastchange = cgbs[i].where;
2323 ai->desc = cgbs[i].desc;
nethercote8b76fe52004-11-08 19:20:09 +00002324 return True;
2325 }
2326 }
2327 return False;
2328}
2329
njn51d827b2005-05-09 01:02:08 +00002330static Bool mc_handle_client_request ( ThreadId tid, UWord* arg, UWord* ret )
nethercote8b76fe52004-11-08 19:20:09 +00002331{
2332 Int i;
2333 Bool ok;
2334 Addr bad_addr;
2335
njnfc26ff92004-11-22 19:12:49 +00002336 if (!VG_IS_TOOL_USERREQ('M','C',arg[0])
nethercote8b76fe52004-11-08 19:20:09 +00002337 && VG_USERREQ__MALLOCLIKE_BLOCK != arg[0]
2338 && VG_USERREQ__FREELIKE_BLOCK != arg[0]
2339 && VG_USERREQ__CREATE_MEMPOOL != arg[0]
2340 && VG_USERREQ__DESTROY_MEMPOOL != arg[0]
2341 && VG_USERREQ__MEMPOOL_ALLOC != arg[0]
2342 && VG_USERREQ__MEMPOOL_FREE != arg[0])
2343 return False;
2344
2345 switch (arg[0]) {
2346 case VG_USERREQ__CHECK_WRITABLE: /* check writable */
2347 ok = mc_check_writable ( arg[1], arg[2], &bad_addr );
2348 if (!ok)
njn9e63cb62005-05-08 18:34:59 +00002349 mc_record_user_error ( tid, bad_addr, /*isWrite*/True,
2350 /*isUnaddr*/True );
nethercote8b76fe52004-11-08 19:20:09 +00002351 *ret = ok ? (UWord)NULL : bad_addr;
2352 break;
2353
2354 case VG_USERREQ__CHECK_READABLE: { /* check readable */
2355 MC_ReadResult res;
2356 res = mc_check_readable ( arg[1], arg[2], &bad_addr );
2357 if (MC_AddrErr == res)
njn9e63cb62005-05-08 18:34:59 +00002358 mc_record_user_error ( tid, bad_addr, /*isWrite*/False,
2359 /*isUnaddr*/True );
nethercote8b76fe52004-11-08 19:20:09 +00002360 else if (MC_ValueErr == res)
njn9e63cb62005-05-08 18:34:59 +00002361 mc_record_user_error ( tid, bad_addr, /*isWrite*/False,
2362 /*isUnaddr*/False );
nethercote8b76fe52004-11-08 19:20:09 +00002363 *ret = ( res==MC_Ok ? (UWord)NULL : bad_addr );
2364 break;
2365 }
2366
2367 case VG_USERREQ__DO_LEAK_CHECK:
njnb8dca862005-03-14 02:42:44 +00002368 mc_detect_memory_leaks(tid, arg[1] ? LC_Summary : LC_Full);
nethercote8b76fe52004-11-08 19:20:09 +00002369 *ret = 0; /* return value is meaningless */
2370 break;
2371
2372 case VG_USERREQ__MAKE_NOACCESS: /* make no access */
nethercote8b76fe52004-11-08 19:20:09 +00002373 mc_make_noaccess ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00002374 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00002375 break;
2376
2377 case VG_USERREQ__MAKE_WRITABLE: /* make writable */
nethercote8b76fe52004-11-08 19:20:09 +00002378 mc_make_writable ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00002379 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00002380 break;
2381
2382 case VG_USERREQ__MAKE_READABLE: /* make readable */
nethercote8b76fe52004-11-08 19:20:09 +00002383 mc_make_readable ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00002384 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00002385 break;
2386
sewardjedc75ab2005-03-15 23:30:32 +00002387 case VG_USERREQ__CREATE_BLOCK: /* describe a block */
2388 if (arg[1] != 0 && arg[2] != 0) {
njn695c16e2005-03-27 03:40:28 +00002389 i = alloc_client_block();
2390 /* VG_(printf)("allocated %d %p\n", i, cgbs); */
2391 cgbs[i].start = arg[1];
2392 cgbs[i].size = arg[2];
2393 cgbs[i].desc = VG_(strdup)((Char *)arg[3]);
2394 cgbs[i].where = VG_(record_ExeContext) ( tid );
sewardjedc75ab2005-03-15 23:30:32 +00002395
2396 *ret = i;
2397 } else
2398 *ret = -1;
2399 break;
2400
nethercote8b76fe52004-11-08 19:20:09 +00002401 case VG_USERREQ__DISCARD: /* discard */
njn695c16e2005-03-27 03:40:28 +00002402 if (cgbs == NULL
2403 || arg[2] >= cgb_used ||
2404 (cgbs[arg[2]].start == 0 && cgbs[arg[2]].size == 0)) {
sewardjedc75ab2005-03-15 23:30:32 +00002405 *ret = 1;
2406 } else {
njn695c16e2005-03-27 03:40:28 +00002407 tl_assert(arg[2] >= 0 && arg[2] < cgb_used);
2408 cgbs[arg[2]].start = cgbs[arg[2]].size = 0;
2409 VG_(free)(cgbs[arg[2]].desc);
2410 cgb_discards++;
sewardjedc75ab2005-03-15 23:30:32 +00002411 *ret = 0;
2412 }
nethercote8b76fe52004-11-08 19:20:09 +00002413 break;
2414
sewardj45d94cc2005-04-20 14:44:11 +00002415//zz case VG_USERREQ__GET_VBITS:
2416//zz /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
2417//zz error. */
2418//zz /* VG_(printf)("get_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
2419//zz *ret = mc_get_or_set_vbits_for_client
2420//zz ( tid, arg[1], arg[2], arg[3], False /* get them */ );
2421//zz break;
2422//zz
2423//zz case VG_USERREQ__SET_VBITS:
2424//zz /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
2425//zz error. */
2426//zz /* VG_(printf)("set_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
2427//zz *ret = mc_get_or_set_vbits_for_client
2428//zz ( tid, arg[1], arg[2], arg[3], True /* set them */ );
2429//zz break;
nethercote8b76fe52004-11-08 19:20:09 +00002430
2431 default:
2432 if (MAC_(handle_common_client_requests)(tid, arg, ret )) {
2433 return True;
2434 } else {
2435 VG_(message)(Vg_UserMsg,
2436 "Warning: unknown memcheck client request code %llx",
2437 (ULong)arg[0]);
2438 return False;
2439 }
2440 }
2441 return True;
2442}
njn25e49d8e72002-09-23 09:36:25 +00002443
2444/*------------------------------------------------------------*/
njn51d827b2005-05-09 01:02:08 +00002445/*--- Setup and finalisation ---*/
njn25e49d8e72002-09-23 09:36:25 +00002446/*------------------------------------------------------------*/
2447
njn51d827b2005-05-09 01:02:08 +00002448static void mc_post_clo_init ( void )
njn5c004e42002-11-18 11:04:50 +00002449{
sewardj71bc3cb2005-05-19 00:25:45 +00002450 /* If we've been asked to emit XML, mash around various other
2451 options so as to constrain the output somewhat. */
2452 if (VG_(clo_xml)) {
2453 /* Extract as much info as possible from the leak checker. */
sewardj09890d82005-05-20 02:45:15 +00002454 /* MAC_(clo_show_reachable) = True; */
sewardj71bc3cb2005-05-19 00:25:45 +00002455 MAC_(clo_leak_check) = LC_Full;
2456 }
njn5c004e42002-11-18 11:04:50 +00002457}
2458
njn51d827b2005-05-09 01:02:08 +00002459static void mc_fini ( Int exitcode )
njn5c004e42002-11-18 11:04:50 +00002460{
nethercote8b76fe52004-11-08 19:20:09 +00002461 MAC_(common_fini)( mc_detect_memory_leaks );
sewardj45d94cc2005-04-20 14:44:11 +00002462
sewardj23eb2fd2005-04-22 16:29:19 +00002463 Int i, n_accessible_dist;
2464 SecMap* sm;
2465
sewardj45d94cc2005-04-20 14:44:11 +00002466 if (VG_(clo_verbosity) > 1) {
2467 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00002468 " memcheck: sanity checks: %d cheap, %d expensive",
2469 n_sanity_cheap, n_sanity_expensive );
sewardj45d94cc2005-04-20 14:44:11 +00002470 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00002471 " memcheck: auxmaps: %d auxmap entries (%dk, %dM) in use",
2472 auxmap_used,
2473 auxmap_used * 64,
2474 auxmap_used / 16 );
2475 VG_(message)(Vg_DebugMsg,
2476 " memcheck: auxmaps: %lld searches, %lld comparisons",
sewardj45d94cc2005-04-20 14:44:11 +00002477 n_auxmap_searches, n_auxmap_cmps );
sewardj23eb2fd2005-04-22 16:29:19 +00002478 VG_(message)(Vg_DebugMsg,
2479 " memcheck: secondaries: %d issued (%dk, %dM)",
2480 n_secmaps_issued,
2481 n_secmaps_issued * 64,
2482 n_secmaps_issued / 16 );
2483
2484 n_accessible_dist = 0;
2485 for (i = 0; i < N_PRIMARY_MAP; i++) {
2486 sm = primary_map[i];
2487 if (is_distinguished_sm(sm)
2488 && sm != &sm_distinguished[SM_DIST_NOACCESS])
2489 n_accessible_dist ++;
2490 }
2491 for (i = 0; i < auxmap_used; i++) {
2492 sm = auxmap[i].sm;
2493 if (is_distinguished_sm(sm)
2494 && sm != &sm_distinguished[SM_DIST_NOACCESS])
2495 n_accessible_dist ++;
2496 }
2497
2498 VG_(message)(Vg_DebugMsg,
2499 " memcheck: secondaries: %d accessible and distinguished (%dk, %dM)",
2500 n_accessible_dist,
2501 n_accessible_dist * 64,
2502 n_accessible_dist / 16 );
2503
sewardj45d94cc2005-04-20 14:44:11 +00002504 }
2505
njn5c004e42002-11-18 11:04:50 +00002506 if (0) {
2507 VG_(message)(Vg_DebugMsg,
2508 "------ Valgrind's client block stats follow ---------------" );
nethercote8b76fe52004-11-08 19:20:09 +00002509 show_client_block_stats();
njn5c004e42002-11-18 11:04:50 +00002510 }
njn25e49d8e72002-09-23 09:36:25 +00002511}
2512
njn51d827b2005-05-09 01:02:08 +00002513static void mc_pre_clo_init(void)
2514{
2515 VG_(details_name) ("Memcheck");
2516 VG_(details_version) (NULL);
2517 VG_(details_description) ("a memory error detector");
2518 VG_(details_copyright_author)(
2519 "Copyright (C) 2002-2005, and GNU GPL'd, by Julian Seward et al.");
2520 VG_(details_bug_reports_to) (VG_BUGS_TO);
2521 VG_(details_avg_translation_sizeB) ( 370 );
2522
2523 VG_(basic_tool_funcs) (mc_post_clo_init,
2524 MC_(instrument),
2525 mc_fini);
2526
2527 VG_(needs_core_errors) ();
2528 VG_(needs_tool_errors) (MAC_(eq_Error),
2529 mc_pp_Error,
2530 MAC_(update_extra),
2531 mc_recognised_suppression,
2532 MAC_(read_extra_suppression_info),
2533 MAC_(error_matches_suppression),
2534 MAC_(get_error_name),
2535 MAC_(print_extra_suppression_info));
2536 VG_(needs_libc_freeres) ();
2537 VG_(needs_command_line_options)(mc_process_cmd_line_option,
2538 mc_print_usage,
2539 mc_print_debug_usage);
2540 VG_(needs_client_requests) (mc_handle_client_request);
2541 VG_(needs_sanity_checks) (mc_cheap_sanity_check,
2542 mc_expensive_sanity_check);
2543 VG_(needs_shadow_memory) ();
2544
2545 VG_(malloc_funcs) (MAC_(malloc),
2546 MAC_(__builtin_new),
2547 MAC_(__builtin_vec_new),
2548 MAC_(memalign),
2549 MAC_(calloc),
2550 MAC_(free),
2551 MAC_(__builtin_delete),
2552 MAC_(__builtin_vec_delete),
2553 MAC_(realloc),
2554 MAC_MALLOC_REDZONE_SZB );
2555
2556 MAC_( new_mem_heap) = & mc_new_mem_heap;
2557 MAC_( ban_mem_heap) = & mc_make_noaccess;
2558 MAC_(copy_mem_heap) = & mc_copy_address_range_state;
2559 MAC_( die_mem_heap) = & mc_make_noaccess;
2560 MAC_(check_noaccess) = & mc_check_noaccess;
2561
2562 VG_(track_new_mem_startup) ( & mc_new_mem_startup );
2563 VG_(track_new_mem_stack_signal)( & mc_make_writable );
2564 VG_(track_new_mem_brk) ( & mc_make_writable );
2565 VG_(track_new_mem_mmap) ( & mc_new_mem_mmap );
2566
2567 VG_(track_copy_mem_remap) ( & mc_copy_address_range_state );
2568
2569 VG_(track_die_mem_stack_signal)( & mc_make_noaccess );
2570 VG_(track_die_mem_brk) ( & mc_make_noaccess );
2571 VG_(track_die_mem_munmap) ( & mc_make_noaccess );
2572
2573 VG_(track_new_mem_stack_4) ( & MAC_(new_mem_stack_4) );
2574 VG_(track_new_mem_stack_8) ( & MAC_(new_mem_stack_8) );
2575 VG_(track_new_mem_stack_12) ( & MAC_(new_mem_stack_12) );
2576 VG_(track_new_mem_stack_16) ( & MAC_(new_mem_stack_16) );
2577 VG_(track_new_mem_stack_32) ( & MAC_(new_mem_stack_32) );
2578 VG_(track_new_mem_stack) ( & MAC_(new_mem_stack) );
2579
2580 VG_(track_die_mem_stack_4) ( & MAC_(die_mem_stack_4) );
2581 VG_(track_die_mem_stack_8) ( & MAC_(die_mem_stack_8) );
2582 VG_(track_die_mem_stack_12) ( & MAC_(die_mem_stack_12) );
2583 VG_(track_die_mem_stack_16) ( & MAC_(die_mem_stack_16) );
2584 VG_(track_die_mem_stack_32) ( & MAC_(die_mem_stack_32) );
2585 VG_(track_die_mem_stack) ( & MAC_(die_mem_stack) );
2586
2587 VG_(track_ban_mem_stack) ( & mc_make_noaccess );
2588
2589 VG_(track_pre_mem_read) ( & mc_check_is_readable );
2590 VG_(track_pre_mem_read_asciiz) ( & mc_check_is_readable_asciiz );
2591 VG_(track_pre_mem_write) ( & mc_check_is_writable );
2592 VG_(track_post_mem_write) ( & mc_post_mem_write );
2593
2594 VG_(track_pre_reg_read) ( & mc_pre_reg_read );
2595
2596 VG_(track_post_reg_write) ( & mc_post_reg_write );
2597 VG_(track_post_reg_write_clientcall_return)( & mc_post_reg_write_clientcall );
2598
2599 VG_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
2600 VG_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
2601 VG_(register_profile_event) ( VgpESPAdj, "adjust-ESP" );
2602
2603 /* Additional block description for VG_(describe_addr)() */
2604 MAC_(describe_addr_supp) = client_perm_maybe_describe;
2605
2606 init_shadow_memory();
2607 MAC_(common_pre_clo_init)();
2608
2609 tl_assert( mc_expensive_sanity_check() );
2610}
2611
2612VG_DETERMINE_INTERFACE_VERSION(mc_pre_clo_init, 9./8)
fitzhardinge98abfc72003-12-16 02:05:15 +00002613
njn25e49d8e72002-09-23 09:36:25 +00002614/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00002615/*--- end mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00002616/*--------------------------------------------------------------------*/