blob: afe2493edc4bd242de971af97319342ab507b9b8 [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
nethercote137bc552003-11-14 17:47:54 +00009 This file is part of MemCheck, a heavyweight Valgrind tool for
njnc9539842002-10-02 13:26:35 +000010 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
njn53612422005-03-12 16:22:54 +000012 Copyright (C) 2000-2005 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
sewardjc859fbf2005-04-22 21:10:28 +000033/* TODO 22 Apr 05
sewardj45d94cc2005-04-20 14:44:11 +000034
sewardjc859fbf2005-04-22 21:10:28 +000035 test whether it would be faster, for LOADV4, to check
36 only for 8-byte validity on the fast path
sewardj45d94cc2005-04-20 14:44:11 +000037*/
38
njnc7561b92005-06-19 01:24:32 +000039#include "pub_tool_basics.h"
njn4802b382005-06-11 04:58:29 +000040#include "pub_tool_aspacemgr.h"
njnc7561b92005-06-19 01:24:32 +000041#include "pub_tool_errormgr.h" // For mac_shared.h
42#include "pub_tool_execontext.h" // For mac_shared.h
43#include "pub_tool_hashtable.h" // For mac_shared.h
njn97405b22005-06-02 03:39:33 +000044#include "pub_tool_libcbase.h"
njn132bfcc2005-06-04 19:16:06 +000045#include "pub_tool_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000046#include "pub_tool_libcprint.h"
njnf536bbb2005-06-13 04:21:38 +000047#include "pub_tool_machine.h"
njnc7561b92005-06-19 01:24:32 +000048#include "pub_tool_mallocfree.h"
49#include "pub_tool_options.h"
50#include "pub_tool_profile.h" // For mac_shared.h
51#include "pub_tool_replacemalloc.h"
52#include "pub_tool_tooliface.h"
53#include "pub_tool_threadstate.h"
54
55#include "mc_include.h"
56#include "memcheck.h" /* for client requests */
njn25e49d8e72002-09-23 09:36:25 +000057
sewardj45d94cc2005-04-20 14:44:11 +000058
sewardjc1a2cda2005-04-21 17:34:00 +000059#define EXPECTED_TAKEN(cond) __builtin_expect((cond),1)
60#define EXPECTED_NOT_TAKEN(cond) __builtin_expect((cond),0)
61
62/* Define to debug the mem audit system. Set to:
63 0 no debugging, fast cases are used
64 1 some sanity checking, fast cases are used
65 2 max sanity checking, only slow cases are used
66*/
sewardj23eb2fd2005-04-22 16:29:19 +000067#define VG_DEBUG_MEMORY 0
sewardjc1a2cda2005-04-21 17:34:00 +000068
njn25e49d8e72002-09-23 09:36:25 +000069#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
70
njn25e49d8e72002-09-23 09:36:25 +000071
njn25e49d8e72002-09-23 09:36:25 +000072/*------------------------------------------------------------*/
sewardj45d94cc2005-04-20 14:44:11 +000073/*--- Basic A/V bitmap representation. ---*/
njn25e49d8e72002-09-23 09:36:25 +000074/*------------------------------------------------------------*/
75
sewardjc859fbf2005-04-22 21:10:28 +000076/* TODO: fix this comment */
77//zz /* All reads and writes are checked against a memory map, which
78//zz records the state of all memory in the process. The memory map is
79//zz organised like this:
80//zz
81//zz The top 16 bits of an address are used to index into a top-level
82//zz map table, containing 65536 entries. Each entry is a pointer to a
83//zz second-level map, which records the accesibililty and validity
84//zz permissions for the 65536 bytes indexed by the lower 16 bits of the
85//zz address. Each byte is represented by nine bits, one indicating
86//zz accessibility, the other eight validity. So each second-level map
87//zz contains 73728 bytes. This two-level arrangement conveniently
88//zz divides the 4G address space into 64k lumps, each size 64k bytes.
89//zz
90//zz All entries in the primary (top-level) map must point to a valid
91//zz secondary (second-level) map. Since most of the 4G of address
92//zz space will not be in use -- ie, not mapped at all -- there is a
njn02bc4b82005-05-15 17:28:26 +000093//zz distinguished secondary map, which indicates 'not addressible and
sewardjc859fbf2005-04-22 21:10:28 +000094//zz not valid' writeable for all bytes. Entries in the primary map for
95//zz which the entire 64k is not in use at all point at this
96//zz distinguished map.
97//zz
98//zz There are actually 4 distinguished secondaries. These are used to
99//zz represent a memory range which is either not addressable (validity
100//zz doesn't matter), addressable+not valid, addressable+valid.
101//zz
102//zz [...] lots of stuff deleted due to out of date-ness
103//zz
104//zz As a final optimisation, the alignment and address checks for
105//zz 4-byte loads and stores are combined in a neat way. The primary
106//zz map is extended to have 262144 entries (2^18), rather than 2^16.
107//zz The top 3/4 of these entries are permanently set to the
108//zz distinguished secondary map. For a 4-byte load/store, the
109//zz top-level map is indexed not with (addr >> 16) but instead f(addr),
110//zz where
111//zz
112//zz f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
113//zz = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
114//zz = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
115//zz
116//zz ie the lowest two bits are placed above the 16 high address bits.
117//zz If either of these two bits are nonzero, the address is misaligned;
118//zz this will select a secondary map from the upper 3/4 of the primary
119//zz map. Because this is always the distinguished secondary map, a
120//zz (bogus) address check failure will result. The failure handling
121//zz code can then figure out whether this is a genuine addr check
122//zz failure or whether it is a possibly-legitimate access at a
123//zz misaligned address.
124//zz */
125
sewardj45d94cc2005-04-20 14:44:11 +0000126/* --------------- Basic configuration --------------- */
sewardj95448072004-11-22 20:19:51 +0000127
sewardj23eb2fd2005-04-22 16:29:19 +0000128/* Only change this. N_PRIMARY_MAP *must* be a power of 2. */
sewardj21f7ff42005-04-28 10:32:02 +0000129
sewardje4ccc012005-05-02 12:53:38 +0000130#if VG_WORDSIZE == 4
sewardj21f7ff42005-04-28 10:32:02 +0000131
132/* cover the entire address space */
133# define N_PRIMARY_BITS 16
134
135#else
136
137/* Just handle the first 16G fast and the rest via auxiliary
138 primaries. */
139# define N_PRIMARY_BITS 18
140
141#endif
142
sewardj45d94cc2005-04-20 14:44:11 +0000143
sewardjc1a2cda2005-04-21 17:34:00 +0000144/* Do not change this. */
sewardje4ccc012005-05-02 12:53:38 +0000145#define N_PRIMARY_MAP ( ((UWord)1) << N_PRIMARY_BITS)
sewardjc1a2cda2005-04-21 17:34:00 +0000146
147/* Do not change this. */
sewardj23eb2fd2005-04-22 16:29:19 +0000148#define MAX_PRIMARY_ADDRESS (Addr)((((Addr)65536) * N_PRIMARY_MAP)-1)
149
150
151/* --------------- Stats maps --------------- */
152
153static Int n_secmaps_issued = 0;
154static ULong n_auxmap_searches = 0;
155static ULong n_auxmap_cmps = 0;
156static Int n_sanity_cheap = 0;
157static Int n_sanity_expensive = 0;
sewardj45d94cc2005-04-20 14:44:11 +0000158
159
160/* --------------- Secondary maps --------------- */
njn25e49d8e72002-09-23 09:36:25 +0000161
162typedef
163 struct {
sewardj45d94cc2005-04-20 14:44:11 +0000164 UChar abits[8192];
165 UChar vbyte[65536];
njn25e49d8e72002-09-23 09:36:25 +0000166 }
167 SecMap;
168
sewardj45d94cc2005-04-20 14:44:11 +0000169/* 3 distinguished secondary maps, one for no-access, one for
170 accessible but undefined, and one for accessible and defined.
171 Distinguished secondaries may never be modified.
172*/
173#define SM_DIST_NOACCESS 0
174#define SM_DIST_ACCESS_UNDEFINED 1
175#define SM_DIST_ACCESS_DEFINED 2
njnb8dca862005-03-14 02:42:44 +0000176
sewardj45d94cc2005-04-20 14:44:11 +0000177static SecMap sm_distinguished[3];
njnb8dca862005-03-14 02:42:44 +0000178
sewardj45d94cc2005-04-20 14:44:11 +0000179static inline Bool is_distinguished_sm ( SecMap* sm ) {
180 return sm >= &sm_distinguished[0] && sm <= &sm_distinguished[2];
181}
njnb8dca862005-03-14 02:42:44 +0000182
sewardj45d94cc2005-04-20 14:44:11 +0000183/* dist_sm points to one of our three distinguished secondaries. Make
184 a copy of it so that we can write to it.
185*/
186static SecMap* copy_for_writing ( SecMap* dist_sm )
187{
188 SecMap* new_sm;
189 tl_assert(dist_sm == &sm_distinguished[0]
190 || dist_sm == &sm_distinguished[1]
191 || dist_sm == &sm_distinguished[2]);
njnb8dca862005-03-14 02:42:44 +0000192
sewardj45f4e7c2005-09-27 19:20:21 +0000193 new_sm = VG_(am_shadow_alloc)(sizeof(SecMap));
194 if (new_sm == NULL)
195 VG_(out_of_memory_NORETURN)( "memcheck:allocate new SecMap",
196 sizeof(SecMap) );
sewardj45d94cc2005-04-20 14:44:11 +0000197 VG_(memcpy)(new_sm, dist_sm, sizeof(SecMap));
sewardj23eb2fd2005-04-22 16:29:19 +0000198 n_secmaps_issued++;
sewardj45d94cc2005-04-20 14:44:11 +0000199 return new_sm;
200}
njnb8dca862005-03-14 02:42:44 +0000201
sewardj45d94cc2005-04-20 14:44:11 +0000202
203/* --------------- Primary maps --------------- */
204
205/* The main primary map. This covers some initial part of the address
sewardj23eb2fd2005-04-22 16:29:19 +0000206 space, addresses 0 .. (N_PRIMARY_MAP << 16)-1. The rest of it is
sewardj45d94cc2005-04-20 14:44:11 +0000207 handled using the auxiliary primary map.
208*/
sewardj23eb2fd2005-04-22 16:29:19 +0000209static SecMap* primary_map[N_PRIMARY_MAP];
sewardj45d94cc2005-04-20 14:44:11 +0000210
211
212/* An entry in the auxiliary primary map. base must be a 64k-aligned
213 value, and sm points at the relevant secondary map. As with the
214 main primary map, the secondary may be either a real secondary, or
215 one of the three distinguished secondaries.
216*/
217typedef
218 struct {
sewardj23eb2fd2005-04-22 16:29:19 +0000219 Addr base;
sewardj45d94cc2005-04-20 14:44:11 +0000220 SecMap* sm;
221 }
222 AuxMapEnt;
223
224/* An expanding array of AuxMapEnts. */
sewardjaba741d2005-06-09 13:56:07 +0000225#define N_AUXMAPS 20000 /* HACK */
sewardj45d94cc2005-04-20 14:44:11 +0000226static AuxMapEnt hacky_auxmaps[N_AUXMAPS];
227static Int auxmap_size = N_AUXMAPS;
228static Int auxmap_used = 0;
229static AuxMapEnt* auxmap = &hacky_auxmaps[0];
230
sewardj45d94cc2005-04-20 14:44:11 +0000231
232/* Find an entry in the auxiliary map. If an entry is found, move it
233 one step closer to the front of the array, then return its address.
sewardj05fe85e2005-04-27 22:46:36 +0000234 If an entry is not found, return NULL. Note carefully that
sewardj45d94cc2005-04-20 14:44:11 +0000235 because a each call potentially rearranges the entries, each call
236 to this function invalidates ALL AuxMapEnt*s previously obtained by
237 calling this fn.
238*/
sewardj05fe85e2005-04-27 22:46:36 +0000239static AuxMapEnt* maybe_find_in_auxmap ( Addr a )
sewardj45d94cc2005-04-20 14:44:11 +0000240{
241 UWord i;
242 tl_assert(a > MAX_PRIMARY_ADDRESS);
243
244 a &= ~(Addr)0xFFFF;
245
246 /* Search .. */
247 n_auxmap_searches++;
248 for (i = 0; i < auxmap_used; i++) {
249 if (auxmap[i].base == a)
250 break;
251 }
252 n_auxmap_cmps += (ULong)(i+1);
253
254 if (i < auxmap_used) {
255 /* Found it. Nudge it a bit closer to the front. */
256 if (i > 0) {
257 AuxMapEnt tmp = auxmap[i-1];
258 auxmap[i-1] = auxmap[i];
259 auxmap[i] = tmp;
260 i--;
261 }
262 return &auxmap[i];
263 }
264
sewardj05fe85e2005-04-27 22:46:36 +0000265 return NULL;
266}
267
268
269/* Find an entry in the auxiliary map. If an entry is found, move it
270 one step closer to the front of the array, then return its address.
271 If an entry is not found, allocate one. Note carefully that
272 because a each call potentially rearranges the entries, each call
273 to this function invalidates ALL AuxMapEnt*s previously obtained by
274 calling this fn.
275*/
276static AuxMapEnt* find_or_alloc_in_auxmap ( Addr a )
277{
278 AuxMapEnt* am = maybe_find_in_auxmap(a);
279 if (am)
280 return am;
281
sewardj45d94cc2005-04-20 14:44:11 +0000282 /* We didn't find it. Hmm. This is a new piece of address space.
283 We'll need to allocate a new AuxMap entry for it. */
284 if (auxmap_used >= auxmap_size) {
285 tl_assert(auxmap_used == auxmap_size);
286 /* Out of auxmap entries. */
287 tl_assert2(0, "failed to expand the auxmap table");
288 }
289
290 tl_assert(auxmap_used < auxmap_size);
291
292 auxmap[auxmap_used].base = a & ~(Addr)0xFFFF;
293 auxmap[auxmap_used].sm = &sm_distinguished[SM_DIST_NOACCESS];
294
295 if (0)
296 VG_(printf)("new auxmap, base = 0x%llx\n",
297 (ULong)auxmap[auxmap_used].base );
298
299 auxmap_used++;
300 return &auxmap[auxmap_used-1];
301}
302
303
304/* --------------- SecMap fundamentals --------------- */
305
306/* Produce the secmap for 'a', either from the primary map or by
307 ensuring there is an entry for it in the aux primary map. The
308 secmap may be a distinguished one as the caller will only want to
309 be able to read it.
310*/
311static SecMap* get_secmap_readable ( Addr a )
312{
313 if (a <= MAX_PRIMARY_ADDRESS) {
314 UWord pm_off = a >> 16;
315 return primary_map[ pm_off ];
316 } else {
317 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
318 return am->sm;
319 }
320}
321
sewardj05fe85e2005-04-27 22:46:36 +0000322/* If 'a' has a SecMap, produce it. Else produce NULL. But don't
323 allocate one if one doesn't already exist. This is used by the
324 leak checker.
325*/
326static SecMap* maybe_get_secmap_for ( Addr a )
327{
328 if (a <= MAX_PRIMARY_ADDRESS) {
329 UWord pm_off = a >> 16;
330 return primary_map[ pm_off ];
331 } else {
332 AuxMapEnt* am = maybe_find_in_auxmap(a);
333 return am ? am->sm : NULL;
334 }
335}
336
337
338
sewardj45d94cc2005-04-20 14:44:11 +0000339/* Produce the secmap for 'a', either from the primary map or by
340 ensuring there is an entry for it in the aux primary map. The
341 secmap may not be a distinguished one, since the caller will want
342 to be able to write it. If it is a distinguished secondary, make a
343 writable copy of it, install it, and return the copy instead. (COW
344 semantics).
345*/
346static SecMap* get_secmap_writable ( Addr a )
347{
348 if (a <= MAX_PRIMARY_ADDRESS) {
349 UWord pm_off = a >> 16;
350 if (is_distinguished_sm(primary_map[ pm_off ]))
351 primary_map[pm_off] = copy_for_writing(primary_map[pm_off]);
352 return primary_map[pm_off];
353 } else {
354 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
355 if (is_distinguished_sm(am->sm))
356 am->sm = copy_for_writing(am->sm);
357 return am->sm;
358 }
359}
360
361
362/* --------------- Endianness helpers --------------- */
363
364/* Returns the offset in memory of the byteno-th most significant byte
365 in a wordszB-sized word, given the specified endianness. */
366static inline UWord byte_offset_w ( UWord wordszB, Bool bigendian,
367 UWord byteno ) {
368 return bigendian ? (wordszB-1-byteno) : byteno;
369}
370
371
372/* --------------- Fundamental functions --------------- */
373
374static
375void get_abit_and_vbyte ( /*OUT*/UWord* abit,
376 /*OUT*/UWord* vbyte,
377 Addr a )
378{
379 SecMap* sm = get_secmap_readable(a);
380 *vbyte = 0xFF & sm->vbyte[a & 0xFFFF];
381 *abit = read_bit_array(sm->abits, a & 0xFFFF);
382}
383
384static
385UWord get_abit ( Addr a )
386{
387 SecMap* sm = get_secmap_readable(a);
388 return read_bit_array(sm->abits, a & 0xFFFF);
389}
390
391static
392void set_abit_and_vbyte ( Addr a, UWord abit, UWord vbyte )
393{
394 SecMap* sm = get_secmap_writable(a);
395 sm->vbyte[a & 0xFFFF] = 0xFF & vbyte;
396 write_bit_array(sm->abits, a & 0xFFFF, abit);
397}
398
399static
400void set_vbyte ( Addr a, UWord vbyte )
401{
402 SecMap* sm = get_secmap_writable(a);
403 sm->vbyte[a & 0xFFFF] = 0xFF & vbyte;
404}
405
406
407/* --------------- Load/store slow cases. --------------- */
408
409static
410ULong mc_LOADVn_slow ( Addr a, SizeT szB, Bool bigendian )
411{
412 /* Make up a result V word, which contains the loaded data for
sewardjf3d57dd2005-04-22 20:23:27 +0000413 valid addresses and Defined for invalid addresses. Iterate over
414 the bytes in the word, from the most significant down to the
415 least. */
sewardj45d94cc2005-04-20 14:44:11 +0000416 ULong vw = VGM_WORD64_INVALID;
417 SizeT i = szB-1;
418 SizeT n_addrs_bad = 0;
419 Addr ai;
420 Bool aok;
421 UWord abit, vbyte;
422
sewardjc1a2cda2005-04-21 17:34:00 +0000423 PROF_EVENT(30, "mc_LOADVn_slow");
sewardj45d94cc2005-04-20 14:44:11 +0000424 tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
425
426 while (True) {
sewardjc1a2cda2005-04-21 17:34:00 +0000427 PROF_EVENT(31, "mc_LOADVn_slow(loop)");
sewardj45d94cc2005-04-20 14:44:11 +0000428 ai = a+byte_offset_w(szB,bigendian,i);
429 get_abit_and_vbyte(&abit, &vbyte, ai);
430 aok = abit == VGM_BIT_VALID;
431 if (!aok)
432 n_addrs_bad++;
433 vw <<= 8;
sewardjf3d57dd2005-04-22 20:23:27 +0000434 vw |= 0xFF & (aok ? vbyte : VGM_BYTE_VALID);
sewardj45d94cc2005-04-20 14:44:11 +0000435 if (i == 0) break;
436 i--;
437 }
438
439 if (n_addrs_bad > 0)
440 MAC_(record_address_error)( VG_(get_running_tid)(), a, szB, False );
441
sewardj45d94cc2005-04-20 14:44:11 +0000442 return vw;
443}
444
445
446static
447void mc_STOREVn_slow ( Addr a, SizeT szB, UWord vbytes, Bool bigendian )
448{
449 SizeT i;
450 SizeT n_addrs_bad = 0;
451 UWord abit;
452 Bool aok;
453 Addr ai;
454
sewardjc1a2cda2005-04-21 17:34:00 +0000455 PROF_EVENT(35, "mc_STOREVn_slow");
sewardj45d94cc2005-04-20 14:44:11 +0000456 tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
457
458 /* Dump vbytes in memory, iterating from least to most significant
459 byte. At the same time establish addressibility of the
460 location. */
461 for (i = 0; i < szB; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +0000462 PROF_EVENT(36, "mc_STOREVn_slow(loop)");
sewardj45d94cc2005-04-20 14:44:11 +0000463 ai = a+byte_offset_w(szB,bigendian,i);
464 abit = get_abit(ai);
465 aok = abit == VGM_BIT_VALID;
466 if (!aok)
467 n_addrs_bad++;
468 set_vbyte(ai, vbytes & 0xFF );
469 vbytes >>= 8;
470 }
471
472 /* If an address error has happened, report it. */
473 if (n_addrs_bad > 0)
474 MAC_(record_address_error)( VG_(get_running_tid)(), a, szB, True );
475}
476
477
sewardj45d94cc2005-04-20 14:44:11 +0000478//zz /* Reading/writing of the bitmaps, for aligned word-sized accesses. */
479//zz
480//zz static __inline__ UChar get_abits4_ALIGNED ( Addr a )
481//zz {
482//zz SecMap* sm;
483//zz UInt sm_off;
484//zz UChar abits8;
485//zz PROF_EVENT(24);
486//zz # ifdef VG_DEBUG_MEMORY
487//zz tl_assert(VG_IS_4_ALIGNED(a));
488//zz # endif
489//zz sm = primary_map[PM_IDX(a)];
490//zz sm_off = SM_OFF(a);
491//zz abits8 = sm->abits[sm_off >> 3];
492//zz abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
493//zz abits8 &= 0x0F;
494//zz return abits8;
495//zz }
496//zz
497//zz static UInt __inline__ get_vbytes4_ALIGNED ( Addr a )
498//zz {
499//zz SecMap* sm = primary_map[PM_IDX(a)];
500//zz UInt sm_off = SM_OFF(a);
501//zz PROF_EVENT(25);
502//zz # ifdef VG_DEBUG_MEMORY
503//zz tl_assert(VG_IS_4_ALIGNED(a));
504//zz # endif
505//zz return ((UInt*)(sm->vbyte))[sm_off >> 2];
506//zz }
507//zz
508//zz
509//zz static void __inline__ set_vbytes4_ALIGNED ( Addr a, UInt vbytes )
510//zz {
511//zz SecMap* sm;
512//zz UInt sm_off;
513//zz ENSURE_MAPPABLE(a, "set_vbytes4_ALIGNED");
514//zz sm = primary_map[PM_IDX(a)];
515//zz sm_off = SM_OFF(a);
516//zz PROF_EVENT(23);
517//zz # ifdef VG_DEBUG_MEMORY
518//zz tl_assert(VG_IS_4_ALIGNED(a));
519//zz # endif
520//zz ((UInt*)(sm->vbyte))[sm_off >> 2] = vbytes;
521//zz }
sewardjee070842003-07-05 17:53:55 +0000522
523
njn25e49d8e72002-09-23 09:36:25 +0000524/*------------------------------------------------------------*/
525/*--- Setting permissions over address ranges. ---*/
526/*------------------------------------------------------------*/
527
sewardj23eb2fd2005-04-22 16:29:19 +0000528/* Given address 'a', find the place where the pointer to a's
529 secondary map lives. If a falls into the primary map, the returned
530 value points to one of the entries in primary_map[]. Otherwise,
531 the auxiliary primary map is searched for 'a', or an entry is
532 created for it; either way, the returned value points to the
533 relevant AuxMapEnt's .sm field.
534
535 The point of this is to enable set_address_range_perms to assign
536 secondary maps in a uniform way, without worrying about whether a
537 given secondary map is pointed to from the main or auxiliary
538 primary map.
539*/
540
541static SecMap** find_secmap_binder_for_addr ( Addr aA )
542{
543 if (aA > MAX_PRIMARY_ADDRESS) {
544 AuxMapEnt* am = find_or_alloc_in_auxmap(aA);
545 return &am->sm;
546 } else {
547 UWord a = (UWord)aA;
548 UWord sec_no = (UWord)(a >> 16);
549# if VG_DEBUG_MEMORY >= 1
550 tl_assert(sec_no < N_PRIMARY_MAP);
551# endif
552 return &primary_map[sec_no];
553 }
554}
555
556
557static void set_address_range_perms ( Addr aA, SizeT len,
sewardj45d94cc2005-04-20 14:44:11 +0000558 UWord example_a_bit,
559 UWord example_v_bit )
njn25e49d8e72002-09-23 09:36:25 +0000560{
sewardj23eb2fd2005-04-22 16:29:19 +0000561 PROF_EVENT(150, "set_address_range_perms");
562
563 /* Check the permissions make sense. */
564 tl_assert(example_a_bit == VGM_BIT_VALID
565 || example_a_bit == VGM_BIT_INVALID);
566 tl_assert(example_v_bit == VGM_BIT_VALID
567 || example_v_bit == VGM_BIT_INVALID);
568 if (example_a_bit == VGM_BIT_INVALID)
569 tl_assert(example_v_bit == VGM_BIT_INVALID);
570
571 if (len == 0)
572 return;
573
sewardj1fa7d2c2005-06-13 18:22:17 +0000574 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
sewardj23eb2fd2005-04-22 16:29:19 +0000575 if (len > 100 * 1000 * 1000) {
576 VG_(message)(Vg_UserMsg,
577 "Warning: set address range perms: "
578 "large range %u, a %d, v %d",
579 len, example_a_bit, example_v_bit );
580 }
581 }
582
583 UWord a = (UWord)aA;
584
585# if VG_DEBUG_MEMORY >= 2
586
587 /*------------------ debug-only case ------------------ */
sewardj45d94cc2005-04-20 14:44:11 +0000588 SizeT i;
njn25e49d8e72002-09-23 09:36:25 +0000589
sewardj23eb2fd2005-04-22 16:29:19 +0000590 UWord example_vbyte = BIT_TO_BYTE(example_v_bit);
sewardj45d94cc2005-04-20 14:44:11 +0000591
592 tl_assert(sizeof(SizeT) == sizeof(Addr));
593
594 if (0 && len >= 4096)
595 VG_(printf)("s_a_r_p(0x%llx, %d, %d,%d)\n",
596 (ULong)a, len, example_a_bit, example_v_bit);
njn25e49d8e72002-09-23 09:36:25 +0000597
598 if (len == 0)
599 return;
600
sewardj45d94cc2005-04-20 14:44:11 +0000601 for (i = 0; i < len; i++) {
602 set_abit_and_vbyte(a+i, example_a_bit, example_vbyte);
njn25e49d8e72002-09-23 09:36:25 +0000603 }
njn25e49d8e72002-09-23 09:36:25 +0000604
sewardj23eb2fd2005-04-22 16:29:19 +0000605# else
606
607 /*------------------ standard handling ------------------ */
608 UWord vbits8, abits8, vbits32, v_off, a_off;
609 SecMap* sm;
610 SecMap** binder;
611 SecMap* example_dsm;
612
613 /* Decide on the distinguished secondary that we might want
614 to use (part of the space-compression scheme). */
615 if (example_a_bit == VGM_BIT_INVALID) {
616 example_dsm = &sm_distinguished[SM_DIST_NOACCESS];
617 } else {
618 if (example_v_bit == VGM_BIT_VALID) {
619 example_dsm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
620 } else {
621 example_dsm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
622 }
623 }
624
625 /* Make various wider versions of the A/V values to use. */
626 vbits8 = BIT_TO_BYTE(example_v_bit);
627 abits8 = BIT_TO_BYTE(example_a_bit);
628 vbits32 = (vbits8 << 24) | (vbits8 << 16) | (vbits8 << 8) | vbits8;
629
630 /* Slowly do parts preceding 8-byte alignment. */
631 while (True) {
632 if (len == 0) break;
633 PROF_EVENT(151, "set_address_range_perms-loop1-pre");
634 if (VG_IS_8_ALIGNED(a)) break;
635 set_abit_and_vbyte( a, example_a_bit, vbits8 );
636 a++;
637 len--;
638 }
639
640 if (len == 0)
641 return;
642
643 tl_assert(VG_IS_8_ALIGNED(a) && len > 0);
644
645 /* Now go in steps of 8 bytes. */
646 binder = find_secmap_binder_for_addr(a);
647
648 while (True) {
649
650 if (len < 8) break;
651
652 PROF_EVENT(152, "set_address_range_perms-loop8");
653
654 if ((a & SECONDARY_MASK) == 0) {
655 /* we just traversed a primary map boundary, so update the
656 binder. */
657 binder = find_secmap_binder_for_addr(a);
658 PROF_EVENT(153, "set_address_range_perms-update-binder");
659
660 /* Space-optimisation. If we are setting the entire
661 secondary map, just point this entry at one of our
662 distinguished secondaries. However, only do that if it
663 already points at a distinguished secondary, since doing
664 otherwise would leak the existing secondary. We could do
665 better and free up any pre-existing non-distinguished
666 secondary at this point, since we are guaranteed that each
667 non-dist secondary only has one pointer to it, and we have
668 that pointer right here. */
669 if (len >= SECONDARY_SIZE && is_distinguished_sm(*binder)) {
670 PROF_EVENT(154, "set_address_range_perms-entire-secmap");
671 *binder = example_dsm;
672 len -= SECONDARY_SIZE;
673 a += SECONDARY_SIZE;
674 continue;
675 }
676 }
677
678 /* If the primary is already pointing to a distinguished map
679 with the same properties as we're trying to set, then leave
680 it that way. */
681 if (*binder == example_dsm) {
682 a += 8;
683 len -= 8;
684 continue;
685 }
686
687 /* Make sure it's OK to write the secondary. */
688 if (is_distinguished_sm(*binder))
689 *binder = copy_for_writing(*binder);
690
691 sm = *binder;
692 v_off = a & 0xFFFF;
693 a_off = v_off >> 3;
694 sm->abits[a_off] = (UChar)abits8;
695 ((UInt*)(sm->vbyte))[(v_off >> 2) + 0] = (UInt)vbits32;
696 ((UInt*)(sm->vbyte))[(v_off >> 2) + 1] = (UInt)vbits32;
697
698 a += 8;
699 len -= 8;
700 }
701
702 if (len == 0)
703 return;
704
705 tl_assert(VG_IS_8_ALIGNED(a) && len > 0 && len < 8);
706
707 /* Finish the upper fragment. */
708 while (True) {
709 if (len == 0) break;
710 PROF_EVENT(155, "set_address_range_perms-loop1-post");
711 set_abit_and_vbyte ( a, example_a_bit, vbits8 );
712 a++;
713 len--;
714 }
715
716# endif
717}
sewardj45d94cc2005-04-20 14:44:11 +0000718
sewardjc859fbf2005-04-22 21:10:28 +0000719
720/* --- Set permissions for arbitrary address ranges --- */
njn25e49d8e72002-09-23 09:36:25 +0000721
nethercote8b76fe52004-11-08 19:20:09 +0000722static void mc_make_noaccess ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000723{
sewardjc1a2cda2005-04-21 17:34:00 +0000724 PROF_EVENT(40, "mc_make_noaccess");
nethercote8b76fe52004-11-08 19:20:09 +0000725 DEBUG("mc_make_noaccess(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000726 set_address_range_perms ( a, len, VGM_BIT_INVALID, VGM_BIT_INVALID );
727}
728
nethercote8b76fe52004-11-08 19:20:09 +0000729static void mc_make_writable ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000730{
sewardjc1a2cda2005-04-21 17:34:00 +0000731 PROF_EVENT(41, "mc_make_writable");
nethercote8b76fe52004-11-08 19:20:09 +0000732 DEBUG("mc_make_writable(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000733 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_INVALID );
734}
735
nethercote8b76fe52004-11-08 19:20:09 +0000736static void mc_make_readable ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000737{
sewardjc1a2cda2005-04-21 17:34:00 +0000738 PROF_EVENT(42, "mc_make_readable");
nethercote8b76fe52004-11-08 19:20:09 +0000739 DEBUG("mc_make_readable(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000740 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_VALID );
741}
742
njn9b007f62003-04-07 14:40:25 +0000743
sewardj45f4e7c2005-09-27 19:20:21 +0000744/* --- Block-copy permissions (needed for implementing realloc() and
745 sys_mremap). --- */
sewardjc859fbf2005-04-22 21:10:28 +0000746
747static void mc_copy_address_range_state ( Addr src, Addr dst, SizeT len )
748{
sewardj45f4e7c2005-09-27 19:20:21 +0000749 SizeT i, j;
sewardjc859fbf2005-04-22 21:10:28 +0000750 UWord abit, vbyte;
751
752 DEBUG("mc_copy_address_range_state\n");
sewardjc859fbf2005-04-22 21:10:28 +0000753 PROF_EVENT(50, "mc_copy_address_range_state");
sewardj45f4e7c2005-09-27 19:20:21 +0000754
755 if (len == 0)
756 return;
757
758 if (src < dst) {
759 for (i = 0, j = len-1; i < len; i++, j--) {
760 PROF_EVENT(51, "mc_copy_address_range_state(loop)");
761 get_abit_and_vbyte( &abit, &vbyte, src+j );
762 set_abit_and_vbyte( dst+j, abit, vbyte );
763 }
764 }
765
766 if (src > dst) {
767 for (i = 0; i < len; i++) {
768 PROF_EVENT(51, "mc_copy_address_range_state(loop)");
769 get_abit_and_vbyte( &abit, &vbyte, src+i );
770 set_abit_and_vbyte( dst+i, abit, vbyte );
771 }
sewardjc859fbf2005-04-22 21:10:28 +0000772 }
773}
774
775
776/* --- Fast case permission setters, for dealing with stacks. --- */
777
njn9b007f62003-04-07 14:40:25 +0000778static __inline__
sewardj5d28efc2005-04-21 22:16:29 +0000779void make_aligned_word32_writable ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000780{
sewardj5d28efc2005-04-21 22:16:29 +0000781 PROF_EVENT(300, "make_aligned_word32_writable");
782
783# if VG_DEBUG_MEMORY >= 2
784 mc_make_writable(aA, 4);
785# else
786
787 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
sewardj23eb2fd2005-04-22 16:29:19 +0000788 PROF_EVENT(301, "make_aligned_word32_writable-slow1");
sewardj5d28efc2005-04-21 22:16:29 +0000789 mc_make_writable(aA, 4);
790 return;
791 }
792
793 UWord a = (UWord)aA;
794 UWord sec_no = (UWord)(a >> 16);
795# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +0000796 tl_assert(sec_no < N_PRIMARY_MAP);
sewardj5d28efc2005-04-21 22:16:29 +0000797# endif
798
799 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
800 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
801
802 SecMap* sm = primary_map[sec_no];
803 UWord v_off = a & 0xFFFF;
804 UWord a_off = v_off >> 3;
805
806 /* Paint the new area as uninitialised. */
807 ((UInt*)(sm->vbyte))[v_off >> 2] = VGM_WORD32_INVALID;
808
809 UWord mask = 0x0F;
810 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
811 /* mask now contains 1s where we wish to make address bits valid
812 (0s). */
813 sm->abits[a_off] &= ~mask;
814# endif
njn9b007f62003-04-07 14:40:25 +0000815}
816
sewardj5d28efc2005-04-21 22:16:29 +0000817
818static __inline__
819void make_aligned_word32_noaccess ( Addr aA )
820{
821 PROF_EVENT(310, "make_aligned_word32_noaccess");
822
823# if VG_DEBUG_MEMORY >= 2
824 mc_make_noaccess(aA, 4);
825# else
826
827 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
828 PROF_EVENT(311, "make_aligned_word32_noaccess-slow1");
829 mc_make_noaccess(aA, 4);
830 return;
831 }
832
833 UWord a = (UWord)aA;
834 UWord sec_no = (UWord)(a >> 16);
835# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +0000836 tl_assert(sec_no < N_PRIMARY_MAP);
sewardj5d28efc2005-04-21 22:16:29 +0000837# endif
838
839 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
840 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
841
842 SecMap* sm = primary_map[sec_no];
843 UWord v_off = a & 0xFFFF;
844 UWord a_off = v_off >> 3;
845
846 /* Paint the abandoned data as uninitialised. Probably not
847 necessary, but still .. */
848 ((UInt*)(sm->vbyte))[v_off >> 2] = VGM_WORD32_INVALID;
849
850 UWord mask = 0x0F;
851 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
852 /* mask now contains 1s where we wish to make address bits invalid
853 (1s). */
854 sm->abits[a_off] |= mask;
855# endif
856}
857
858
njn9b007f62003-04-07 14:40:25 +0000859/* Nb: by "aligned" here we mean 8-byte aligned */
860static __inline__
sewardj23eb2fd2005-04-22 16:29:19 +0000861void make_aligned_word64_writable ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000862{
sewardj23eb2fd2005-04-22 16:29:19 +0000863 PROF_EVENT(320, "make_aligned_word64_writable");
864
865# if VG_DEBUG_MEMORY >= 2
866 mc_make_writable(aA, 8);
867# else
868
869 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
870 PROF_EVENT(321, "make_aligned_word64_writable-slow1");
871 mc_make_writable(aA, 8);
872 return;
873 }
874
875 UWord a = (UWord)aA;
876 UWord sec_no = (UWord)(a >> 16);
877# if VG_DEBUG_MEMORY >= 1
878 tl_assert(sec_no < N_PRIMARY_MAP);
879# endif
880
881 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
882 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
883
884 SecMap* sm = primary_map[sec_no];
885 UWord v_off = a & 0xFFFF;
886 UWord a_off = v_off >> 3;
887
888 /* Paint the new area as uninitialised. */
889 ((ULong*)(sm->vbyte))[v_off >> 3] = VGM_WORD64_INVALID;
890
891 /* Make the relevant area accessible. */
892 sm->abits[a_off] = VGM_BYTE_VALID;
893# endif
njn9b007f62003-04-07 14:40:25 +0000894}
895
sewardj23eb2fd2005-04-22 16:29:19 +0000896
njn9b007f62003-04-07 14:40:25 +0000897static __inline__
sewardj23eb2fd2005-04-22 16:29:19 +0000898void make_aligned_word64_noaccess ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000899{
sewardj23eb2fd2005-04-22 16:29:19 +0000900 PROF_EVENT(330, "make_aligned_word64_noaccess");
901
902# if VG_DEBUG_MEMORY >= 2
903 mc_make_noaccess(aA, 8);
904# else
905
906 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
907 PROF_EVENT(331, "make_aligned_word64_noaccess-slow1");
908 mc_make_noaccess(aA, 8);
909 return;
910 }
911
912 UWord a = (UWord)aA;
913 UWord sec_no = (UWord)(a >> 16);
914# if VG_DEBUG_MEMORY >= 1
915 tl_assert(sec_no < N_PRIMARY_MAP);
916# endif
917
918 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
919 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
920
921 SecMap* sm = primary_map[sec_no];
922 UWord v_off = a & 0xFFFF;
923 UWord a_off = v_off >> 3;
924
925 /* Paint the abandoned data as uninitialised. Probably not
926 necessary, but still .. */
927 ((ULong*)(sm->vbyte))[v_off >> 3] = VGM_WORD64_INVALID;
928
929 /* Make the abandoned area inaccessible. */
930 sm->abits[a_off] = VGM_BYTE_INVALID;
931# endif
njn9b007f62003-04-07 14:40:25 +0000932}
933
sewardj23eb2fd2005-04-22 16:29:19 +0000934
sewardj45d94cc2005-04-20 14:44:11 +0000935/* The stack-pointer update handling functions */
936SP_UPDATE_HANDLERS ( make_aligned_word32_writable,
937 make_aligned_word32_noaccess,
938 make_aligned_word64_writable,
939 make_aligned_word64_noaccess,
940 mc_make_writable,
941 mc_make_noaccess
942 );
njn9b007f62003-04-07 14:40:25 +0000943
sewardj45d94cc2005-04-20 14:44:11 +0000944
sewardj826ec492005-05-12 18:05:00 +0000945void MC_(helperc_MAKE_STACK_UNINIT) ( Addr base, UWord len )
946{
947 tl_assert(sizeof(UWord) == sizeof(SizeT));
sewardj2a3a1a72005-05-12 23:25:43 +0000948 if (0)
949 VG_(printf)("helperc_MAKE_STACK_UNINIT %p %d\n", base, len );
950
951# if 0
952 /* Really slow version */
953 mc_make_writable(base, len);
954# endif
955
956# if 0
957 /* Slow(ish) version, which is fairly easily seen to be correct.
958 */
959 if (EXPECTED_TAKEN( VG_IS_8_ALIGNED(base) && len==128 )) {
960 make_aligned_word64_writable(base + 0);
961 make_aligned_word64_writable(base + 8);
962 make_aligned_word64_writable(base + 16);
963 make_aligned_word64_writable(base + 24);
964
965 make_aligned_word64_writable(base + 32);
966 make_aligned_word64_writable(base + 40);
967 make_aligned_word64_writable(base + 48);
968 make_aligned_word64_writable(base + 56);
969
970 make_aligned_word64_writable(base + 64);
971 make_aligned_word64_writable(base + 72);
972 make_aligned_word64_writable(base + 80);
973 make_aligned_word64_writable(base + 88);
974
975 make_aligned_word64_writable(base + 96);
976 make_aligned_word64_writable(base + 104);
977 make_aligned_word64_writable(base + 112);
978 make_aligned_word64_writable(base + 120);
979 } else {
980 mc_make_writable(base, len);
981 }
982# endif
983
984 /* Idea is: go fast when
985 * 8-aligned and length is 128
986 * the sm is available in the main primary map
987 * the address range falls entirely with a single
988 secondary map
989 * the SM is modifiable
990 If all those conditions hold, just update the V bits
991 by writing directly on the v-bit array. We don't care
992 about A bits; if the address range is marked invalid,
993 any attempt to access it will elicit an addressing error,
994 and that's good enough.
995 */
996 if (EXPECTED_TAKEN( len == 128
997 && VG_IS_8_ALIGNED(base)
998 )) {
999 /* Now we know the address range is suitably sized and
1000 aligned. */
1001 UWord a_lo = (UWord)base;
1002 UWord a_hi = (UWord)(base + 127);
1003 UWord sec_lo = a_lo >> 16;
1004 UWord sec_hi = a_hi >> 16;
1005
1006 if (EXPECTED_TAKEN( sec_lo == sec_hi
1007 && sec_lo <= N_PRIMARY_MAP
1008 )) {
1009 /* Now we know that the entire address range falls within a
1010 single secondary map, and that that secondary 'lives' in
1011 the main primary map. */
1012 SecMap* sm = primary_map[sec_lo];
1013
1014 if (EXPECTED_TAKEN( !is_distinguished_sm(sm) )) {
1015 /* And finally, now we know that the secondary in question
1016 is modifiable. */
1017 UWord v_off = a_lo & 0xFFFF;
1018 ULong* p = (ULong*)(&sm->vbyte[v_off]);
1019 p[ 0] = VGM_WORD64_INVALID;
1020 p[ 1] = VGM_WORD64_INVALID;
1021 p[ 2] = VGM_WORD64_INVALID;
1022 p[ 3] = VGM_WORD64_INVALID;
1023 p[ 4] = VGM_WORD64_INVALID;
1024 p[ 5] = VGM_WORD64_INVALID;
1025 p[ 6] = VGM_WORD64_INVALID;
1026 p[ 7] = VGM_WORD64_INVALID;
1027 p[ 8] = VGM_WORD64_INVALID;
1028 p[ 9] = VGM_WORD64_INVALID;
1029 p[10] = VGM_WORD64_INVALID;
1030 p[11] = VGM_WORD64_INVALID;
1031 p[12] = VGM_WORD64_INVALID;
1032 p[13] = VGM_WORD64_INVALID;
1033 p[14] = VGM_WORD64_INVALID;
1034 p[15] = VGM_WORD64_INVALID;
1035 return;
1036 }
1037 }
1038 }
1039
1040 /* else fall into slow case */
sewardj826ec492005-05-12 18:05:00 +00001041 mc_make_writable(base, len);
1042}
1043
1044
nethercote8b76fe52004-11-08 19:20:09 +00001045/*------------------------------------------------------------*/
1046/*--- Checking memory ---*/
1047/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001048
sewardje4ccc012005-05-02 12:53:38 +00001049typedef
1050 enum {
1051 MC_Ok = 5,
1052 MC_AddrErr = 6,
1053 MC_ValueErr = 7
1054 }
1055 MC_ReadResult;
1056
1057
njn25e49d8e72002-09-23 09:36:25 +00001058/* Check permissions for address range. If inadequate permissions
1059 exist, *bad_addr is set to the offending address, so the caller can
1060 know what it is. */
1061
sewardjecf8e102003-07-12 12:11:39 +00001062/* Returns True if [a .. a+len) is not addressible. Otherwise,
1063 returns False, and if bad_addr is non-NULL, sets *bad_addr to
1064 indicate the lowest failing address. Functions below are
1065 similar. */
nethercote8b76fe52004-11-08 19:20:09 +00001066static Bool mc_check_noaccess ( Addr a, SizeT len, Addr* bad_addr )
sewardjecf8e102003-07-12 12:11:39 +00001067{
nethercote451eae92004-11-02 13:06:32 +00001068 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +00001069 UWord abit;
sewardjc1a2cda2005-04-21 17:34:00 +00001070 PROF_EVENT(60, "mc_check_noaccess");
sewardjecf8e102003-07-12 12:11:39 +00001071 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +00001072 PROF_EVENT(61, "mc_check_noaccess(loop)");
sewardjecf8e102003-07-12 12:11:39 +00001073 abit = get_abit(a);
1074 if (abit == VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001075 if (bad_addr != NULL)
1076 *bad_addr = a;
sewardjecf8e102003-07-12 12:11:39 +00001077 return False;
1078 }
1079 a++;
1080 }
1081 return True;
1082}
1083
nethercote8b76fe52004-11-08 19:20:09 +00001084static Bool mc_check_writable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00001085{
nethercote451eae92004-11-02 13:06:32 +00001086 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +00001087 UWord abit;
sewardjc1a2cda2005-04-21 17:34:00 +00001088 PROF_EVENT(62, "mc_check_writable");
njn25e49d8e72002-09-23 09:36:25 +00001089 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +00001090 PROF_EVENT(63, "mc_check_writable(loop)");
njn25e49d8e72002-09-23 09:36:25 +00001091 abit = get_abit(a);
1092 if (abit == VGM_BIT_INVALID) {
1093 if (bad_addr != NULL) *bad_addr = a;
1094 return False;
1095 }
1096 a++;
1097 }
1098 return True;
1099}
1100
nethercote8b76fe52004-11-08 19:20:09 +00001101static MC_ReadResult mc_check_readable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00001102{
nethercote451eae92004-11-02 13:06:32 +00001103 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +00001104 UWord abit;
1105 UWord vbyte;
njn25e49d8e72002-09-23 09:36:25 +00001106
sewardjc1a2cda2005-04-21 17:34:00 +00001107 PROF_EVENT(64, "mc_check_readable");
nethercote8b76fe52004-11-08 19:20:09 +00001108 DEBUG("mc_check_readable\n");
njn25e49d8e72002-09-23 09:36:25 +00001109 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +00001110 PROF_EVENT(65, "mc_check_readable(loop)");
sewardj45d94cc2005-04-20 14:44:11 +00001111 get_abit_and_vbyte(&abit, &vbyte, a);
nethercote8b76fe52004-11-08 19:20:09 +00001112 // Report addressability errors in preference to definedness errors
1113 // by checking the A bits first.
1114 if (abit != VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001115 if (bad_addr != NULL)
1116 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001117 return MC_AddrErr;
1118 }
1119 if (vbyte != VGM_BYTE_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001120 if (bad_addr != NULL)
1121 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001122 return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +00001123 }
1124 a++;
1125 }
nethercote8b76fe52004-11-08 19:20:09 +00001126 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +00001127}
1128
1129
1130/* Check a zero-terminated ascii string. Tricky -- don't want to
1131 examine the actual bytes, to find the end, until we're sure it is
1132 safe to do so. */
1133
njn9b007f62003-04-07 14:40:25 +00001134static Bool mc_check_readable_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00001135{
sewardj45d94cc2005-04-20 14:44:11 +00001136 UWord abit;
1137 UWord vbyte;
sewardjc1a2cda2005-04-21 17:34:00 +00001138 PROF_EVENT(66, "mc_check_readable_asciiz");
njn5c004e42002-11-18 11:04:50 +00001139 DEBUG("mc_check_readable_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +00001140 while (True) {
sewardjc1a2cda2005-04-21 17:34:00 +00001141 PROF_EVENT(67, "mc_check_readable_asciiz(loop)");
sewardj45d94cc2005-04-20 14:44:11 +00001142 get_abit_and_vbyte(&abit, &vbyte, a);
nethercote8b76fe52004-11-08 19:20:09 +00001143 // As in mc_check_readable(), check A bits first
1144 if (abit != VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001145 if (bad_addr != NULL)
1146 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001147 return MC_AddrErr;
1148 }
1149 if (vbyte != VGM_BYTE_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001150 if (bad_addr != NULL)
1151 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001152 return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +00001153 }
1154 /* Ok, a is safe to read. */
sewardj45d94cc2005-04-20 14:44:11 +00001155 if (* ((UChar*)a) == 0)
1156 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +00001157 a++;
1158 }
1159}
1160
1161
1162/*------------------------------------------------------------*/
1163/*--- Memory event handlers ---*/
1164/*------------------------------------------------------------*/
1165
njn25e49d8e72002-09-23 09:36:25 +00001166static
njn72718642003-07-24 08:45:32 +00001167void mc_check_is_writable ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +00001168 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001169{
1170 Bool ok;
1171 Addr bad_addr;
1172
1173 VGP_PUSHCC(VgpCheckMem);
1174
1175 /* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
1176 base,base+size-1); */
nethercote8b76fe52004-11-08 19:20:09 +00001177 ok = mc_check_writable ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00001178 if (!ok) {
1179 switch (part) {
1180 case Vg_CoreSysCall:
nethercote8b76fe52004-11-08 19:20:09 +00001181 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False,
1182 /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00001183 break;
1184
1185 case Vg_CorePThread:
1186 case Vg_CoreSignal:
nethercote8b76fe52004-11-08 19:20:09 +00001187 MAC_(record_core_mem_error)( tid, /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00001188 break;
1189
1190 default:
njn67993252004-11-22 18:02:32 +00001191 VG_(tool_panic)("mc_check_is_writable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001192 }
1193 }
1194
1195 VGP_POPCC(VgpCheckMem);
1196}
1197
1198static
njn72718642003-07-24 08:45:32 +00001199void mc_check_is_readable ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +00001200 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001201{
njn25e49d8e72002-09-23 09:36:25 +00001202 Addr bad_addr;
nethercote8b76fe52004-11-08 19:20:09 +00001203 MC_ReadResult res;
njn25e49d8e72002-09-23 09:36:25 +00001204
1205 VGP_PUSHCC(VgpCheckMem);
1206
nethercote8b76fe52004-11-08 19:20:09 +00001207 res = mc_check_readable ( base, size, &bad_addr );
sewardj45f4e7c2005-09-27 19:20:21 +00001208
1209 if (0)
1210 VG_(printf)("mc_check_is_readable(0x%x, %d, %s) -> %s\n",
1211 (UInt)base, (Int)size, s, res==MC_Ok ? "yes" : "no" );
1212
nethercote8b76fe52004-11-08 19:20:09 +00001213 if (MC_Ok != res) {
1214 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
sewardj45f4e7c2005-09-27 19:20:21 +00001215
njn25e49d8e72002-09-23 09:36:25 +00001216 switch (part) {
1217 case Vg_CoreSysCall:
nethercote8b76fe52004-11-08 19:20:09 +00001218 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False,
1219 isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001220 break;
1221
1222 case Vg_CorePThread:
nethercote8b76fe52004-11-08 19:20:09 +00001223 MAC_(record_core_mem_error)( tid, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001224 break;
1225
1226 /* If we're being asked to jump to a silly address, record an error
1227 message before potentially crashing the entire system. */
1228 case Vg_CoreTranslate:
njn72718642003-07-24 08:45:32 +00001229 MAC_(record_jump_error)( tid, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00001230 break;
1231
1232 default:
njn67993252004-11-22 18:02:32 +00001233 VG_(tool_panic)("mc_check_is_readable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001234 }
1235 }
1236 VGP_POPCC(VgpCheckMem);
1237}
1238
1239static
njn72718642003-07-24 08:45:32 +00001240void mc_check_is_readable_asciiz ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +00001241 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +00001242{
nethercote8b76fe52004-11-08 19:20:09 +00001243 MC_ReadResult res;
njn5ab96ac2005-05-08 02:59:50 +00001244 Addr bad_addr = 0; // shut GCC up
njn25e49d8e72002-09-23 09:36:25 +00001245 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
1246
1247 VGP_PUSHCC(VgpCheckMem);
1248
njnca82cc02004-11-22 17:18:48 +00001249 tl_assert(part == Vg_CoreSysCall);
nethercote8b76fe52004-11-08 19:20:09 +00001250 res = mc_check_readable_asciiz ( (Addr)str, &bad_addr );
1251 if (MC_Ok != res) {
1252 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
1253 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001254 }
1255
1256 VGP_POPCC(VgpCheckMem);
1257}
1258
njn25e49d8e72002-09-23 09:36:25 +00001259static
nethercote451eae92004-11-02 13:06:32 +00001260void mc_new_mem_startup( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00001261{
njn1f3a9092002-10-04 09:22:30 +00001262 /* Ignore the permissions, just make it readable. Seems to work... */
nethercote451eae92004-11-02 13:06:32 +00001263 DEBUG("mc_new_mem_startup(%p, %llu, rr=%u, ww=%u, xx=%u)\n",
1264 a,(ULong)len,rr,ww,xx);
nethercote8b76fe52004-11-08 19:20:09 +00001265 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001266}
1267
1268static
nethercote451eae92004-11-02 13:06:32 +00001269void mc_new_mem_heap ( Addr a, SizeT len, Bool is_inited )
njn25e49d8e72002-09-23 09:36:25 +00001270{
1271 if (is_inited) {
nethercote8b76fe52004-11-08 19:20:09 +00001272 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001273 } else {
nethercote8b76fe52004-11-08 19:20:09 +00001274 mc_make_writable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001275 }
1276}
1277
1278static
njnb8dca862005-03-14 02:42:44 +00001279void mc_new_mem_mmap ( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00001280{
njnb8dca862005-03-14 02:42:44 +00001281 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001282}
1283
njncf45fd42004-11-24 16:30:22 +00001284static
1285void mc_post_mem_write(CorePart part, ThreadId tid, Addr a, SizeT len)
1286{
1287 mc_make_readable(a, len);
1288}
njn25e49d8e72002-09-23 09:36:25 +00001289
sewardj45d94cc2005-04-20 14:44:11 +00001290
njn25e49d8e72002-09-23 09:36:25 +00001291/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00001292/*--- Register event handlers ---*/
1293/*------------------------------------------------------------*/
1294
sewardj45d94cc2005-04-20 14:44:11 +00001295/* When some chunk of guest state is written, mark the corresponding
1296 shadow area as valid. This is used to initialise arbitrarily large
sewardj2c27f702005-05-03 18:19:05 +00001297 chunks of guest state, hence the (somewhat arbitrary) 1024 limit.
sewardj45d94cc2005-04-20 14:44:11 +00001298*/
1299static void mc_post_reg_write ( CorePart part, ThreadId tid,
1300 OffT offset, SizeT size)
njnd3040452003-05-19 15:04:06 +00001301{
sewardj6cf40ff2005-04-20 22:31:26 +00001302 UChar area[1024];
1303 tl_assert(size <= 1024);
njncf45fd42004-11-24 16:30:22 +00001304 VG_(memset)(area, VGM_BYTE_VALID, size);
1305 VG_(set_shadow_regs_area)( tid, offset, size, area );
njnd3040452003-05-19 15:04:06 +00001306}
1307
sewardj45d94cc2005-04-20 14:44:11 +00001308static
1309void mc_post_reg_write_clientcall ( ThreadId tid,
1310 OffT offset, SizeT size,
1311 Addr f)
njnd3040452003-05-19 15:04:06 +00001312{
njncf45fd42004-11-24 16:30:22 +00001313 mc_post_reg_write(/*dummy*/0, tid, offset, size);
njnd3040452003-05-19 15:04:06 +00001314}
1315
sewardj45d94cc2005-04-20 14:44:11 +00001316/* Look at the definedness of the guest's shadow state for
1317 [offset, offset+len). If any part of that is undefined, record
1318 a parameter error.
1319*/
1320static void mc_pre_reg_read ( CorePart part, ThreadId tid, Char* s,
1321 OffT offset, SizeT size)
nethercote8b76fe52004-11-08 19:20:09 +00001322{
sewardj45d94cc2005-04-20 14:44:11 +00001323 Int i;
1324 Bool bad;
1325
1326 UChar area[16];
1327 tl_assert(size <= 16);
1328
1329 VG_(get_shadow_regs_area)( tid, offset, size, area );
1330
1331 bad = False;
1332 for (i = 0; i < size; i++) {
1333 if (area[i] != VGM_BYTE_VALID) {
sewardj2c27f702005-05-03 18:19:05 +00001334 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00001335 break;
1336 }
nethercote8b76fe52004-11-08 19:20:09 +00001337 }
1338
sewardj45d94cc2005-04-20 14:44:11 +00001339 if (bad)
nethercote8b76fe52004-11-08 19:20:09 +00001340 MAC_(record_param_error) ( tid, 0, /*isReg*/True, /*isUnaddr*/False, s );
1341}
njnd3040452003-05-19 15:04:06 +00001342
njn25e49d8e72002-09-23 09:36:25 +00001343
sewardj6cf40ff2005-04-20 22:31:26 +00001344/*------------------------------------------------------------*/
njn9e63cb62005-05-08 18:34:59 +00001345/*--- Printing errors ---*/
1346/*------------------------------------------------------------*/
1347
njn51d827b2005-05-09 01:02:08 +00001348static void mc_pp_Error ( Error* err )
njn9e63cb62005-05-08 18:34:59 +00001349{
1350 MAC_Error* err_extra = VG_(get_error_extra)(err);
1351
sewardj71bc3cb2005-05-19 00:25:45 +00001352 HChar* xpre = VG_(clo_xml) ? " <what>" : "";
1353 HChar* xpost = VG_(clo_xml) ? "</what>" : "";
1354
njn9e63cb62005-05-08 18:34:59 +00001355 switch (VG_(get_error_kind)(err)) {
1356 case CoreMemErr: {
1357 Char* s = ( err_extra->isUnaddr ? "unaddressable" : "uninitialised" );
sewardj71bc3cb2005-05-19 00:25:45 +00001358 if (VG_(clo_xml))
1359 VG_(message)(Vg_UserMsg, " <kind>CoreMemError</kind>");
1360 /* What the hell *is* a CoreMemError? jrs 2005-May-18 */
1361 VG_(message)(Vg_UserMsg, "%s%s contains %s byte(s)%s",
1362 xpre, VG_(get_error_string)(err), s, xpost);
1363
njn9e63cb62005-05-08 18:34:59 +00001364 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1365 break;
1366
1367 }
1368
1369 case ValueErr:
1370 if (err_extra->size == 0) {
sewardj71bc3cb2005-05-19 00:25:45 +00001371 if (VG_(clo_xml))
1372 VG_(message)(Vg_UserMsg, " <kind>UninitCondition</kind>");
1373 VG_(message)(Vg_UserMsg, "%sConditional jump or move depends"
1374 " on uninitialised value(s)%s",
1375 xpre, xpost);
njn9e63cb62005-05-08 18:34:59 +00001376 } else {
sewardj71bc3cb2005-05-19 00:25:45 +00001377 if (VG_(clo_xml))
1378 VG_(message)(Vg_UserMsg, " <kind>UninitValue</kind>");
1379 VG_(message)(Vg_UserMsg,
1380 "%sUse of uninitialised value of size %d%s",
1381 xpre, err_extra->size, xpost);
njn9e63cb62005-05-08 18:34:59 +00001382 }
1383 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1384 break;
1385
1386 case ParamErr: {
1387 Bool isReg = ( Register == err_extra->addrinfo.akind );
1388 Char* s1 = ( isReg ? "contains" : "points to" );
1389 Char* s2 = ( err_extra->isUnaddr ? "unaddressable" : "uninitialised" );
1390 if (isReg) tl_assert(!err_extra->isUnaddr);
1391
sewardj71bc3cb2005-05-19 00:25:45 +00001392 if (VG_(clo_xml))
1393 VG_(message)(Vg_UserMsg, " <kind>SyscallParam</kind>");
1394 VG_(message)(Vg_UserMsg, "%sSyscall param %s %s %s byte(s)%s",
1395 xpre, VG_(get_error_string)(err), s1, s2, xpost);
njn9e63cb62005-05-08 18:34:59 +00001396
1397 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1398 MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
1399 break;
1400 }
1401 case UserErr: {
1402 Char* s = ( err_extra->isUnaddr ? "Unaddressable" : "Uninitialised" );
1403
sewardj71bc3cb2005-05-19 00:25:45 +00001404 if (VG_(clo_xml))
1405 VG_(message)(Vg_UserMsg, " <kind>ClientCheck</kind>");
njn9e63cb62005-05-08 18:34:59 +00001406 VG_(message)(Vg_UserMsg,
sewardj71bc3cb2005-05-19 00:25:45 +00001407 "%s%s byte(s) found during client check request%s",
1408 xpre, s, xpost);
njn9e63cb62005-05-08 18:34:59 +00001409
1410 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1411 MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
1412 break;
1413 }
1414 default:
1415 MAC_(pp_shared_Error)(err);
1416 break;
1417 }
1418}
1419
1420/*------------------------------------------------------------*/
1421/*--- Recording errors ---*/
1422/*------------------------------------------------------------*/
1423
njn02bc4b82005-05-15 17:28:26 +00001424/* Creates a copy of the 'extra' part, updates the copy with address info if
njn9e63cb62005-05-08 18:34:59 +00001425 necessary, and returns the copy. */
1426/* This one called from generated code and non-generated code. */
njn96364822005-05-08 19:04:53 +00001427static void mc_record_value_error ( ThreadId tid, Int size )
njn9e63cb62005-05-08 18:34:59 +00001428{
1429 MAC_Error err_extra;
1430
1431 MAC_(clear_MAC_Error)( &err_extra );
1432 err_extra.size = size;
1433 err_extra.isUnaddr = False;
1434 VG_(maybe_record_error)( tid, ValueErr, /*addr*/0, /*s*/NULL, &err_extra );
1435}
1436
1437/* This called from non-generated code */
1438
njn96364822005-05-08 19:04:53 +00001439static void mc_record_user_error ( ThreadId tid, Addr a, Bool isWrite,
1440 Bool isUnaddr )
njn9e63cb62005-05-08 18:34:59 +00001441{
1442 MAC_Error err_extra;
1443
1444 tl_assert(VG_INVALID_THREADID != tid);
1445 MAC_(clear_MAC_Error)( &err_extra );
1446 err_extra.addrinfo.akind = Undescribed;
1447 err_extra.isUnaddr = isUnaddr;
1448 VG_(maybe_record_error)( tid, UserErr, a, /*s*/NULL, &err_extra );
1449}
1450
1451/*------------------------------------------------------------*/
1452/*--- Suppressions ---*/
1453/*------------------------------------------------------------*/
1454
njn51d827b2005-05-09 01:02:08 +00001455static Bool mc_recognised_suppression ( Char* name, Supp* su )
njn9e63cb62005-05-08 18:34:59 +00001456{
1457 SuppKind skind;
1458
1459 if (MAC_(shared_recognised_suppression)(name, su))
1460 return True;
1461
1462 /* Extra suppressions not used by Addrcheck */
1463 else if (VG_STREQ(name, "Cond")) skind = Value0Supp;
1464 else if (VG_STREQ(name, "Value0")) skind = Value0Supp;/* backwards compat */
1465 else if (VG_STREQ(name, "Value1")) skind = Value1Supp;
1466 else if (VG_STREQ(name, "Value2")) skind = Value2Supp;
1467 else if (VG_STREQ(name, "Value4")) skind = Value4Supp;
1468 else if (VG_STREQ(name, "Value8")) skind = Value8Supp;
1469 else if (VG_STREQ(name, "Value16")) skind = Value16Supp;
1470 else
1471 return False;
1472
1473 VG_(set_supp_kind)(su, skind);
1474 return True;
1475}
1476
1477/*------------------------------------------------------------*/
sewardjc859fbf2005-04-22 21:10:28 +00001478/*--- Functions called directly from generated code: ---*/
1479/*--- Load/store handlers. ---*/
sewardj6cf40ff2005-04-20 22:31:26 +00001480/*------------------------------------------------------------*/
1481
1482/* Types: LOADV4, LOADV2, LOADV1 are:
1483 UWord fn ( Addr a )
1484 so they return 32-bits on 32-bit machines and 64-bits on
1485 64-bit machines. Addr has the same size as a host word.
1486
1487 LOADV8 is always ULong fn ( Addr a )
1488
1489 Similarly for STOREV1, STOREV2, STOREV4, the supplied vbits
1490 are a UWord, and for STOREV8 they are a ULong.
1491*/
1492
sewardj95448072004-11-22 20:19:51 +00001493/* ------------------------ Size = 8 ------------------------ */
1494
sewardj8cf88b72005-07-08 01:29:33 +00001495#define MAKE_LOADV8(nAME,iS_BIGENDIAN) \
1496 \
1497 VG_REGPARM(1) \
1498 ULong nAME ( Addr aA ) \
1499 { \
1500 PROF_EVENT(200, #nAME); \
1501 \
1502 if (VG_DEBUG_MEMORY >= 2) \
1503 return mc_LOADVn_slow( aA, 8, iS_BIGENDIAN ); \
1504 \
1505 const UWord mask = ~((0x10000-8) | ((N_PRIMARY_MAP-1) << 16)); \
1506 UWord a = (UWord)aA; \
1507 \
1508 /* If any part of 'a' indicated by the mask is 1, either */ \
1509 /* 'a' is not naturally aligned, or 'a' exceeds the range */ \
1510 /* covered by the primary map. Either way we defer to the */ \
1511 /* slow-path case. */ \
1512 if (EXPECTED_NOT_TAKEN(a & mask)) { \
1513 PROF_EVENT(201, #nAME"-slow1"); \
1514 return (UWord)mc_LOADVn_slow( aA, 8, iS_BIGENDIAN ); \
1515 } \
1516 \
1517 UWord sec_no = (UWord)(a >> 16); \
1518 \
1519 if (VG_DEBUG_MEMORY >= 1) \
1520 tl_assert(sec_no < N_PRIMARY_MAP); \
1521 \
1522 SecMap* sm = primary_map[sec_no]; \
1523 UWord v_off = a & 0xFFFF; \
1524 UWord a_off = v_off >> 3; \
1525 UWord abits = (UWord)(sm->abits[a_off]); \
1526 \
1527 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) { \
1528 /* Handle common case quickly: a is suitably aligned, */ \
1529 /* is mapped, and is addressible. */ \
1530 return ((ULong*)(sm->vbyte))[ v_off >> 3 ]; \
1531 } else { \
1532 /* Slow but general case. */ \
1533 PROF_EVENT(202, #nAME"-slow2"); \
1534 return mc_LOADVn_slow( a, 8, iS_BIGENDIAN ); \
1535 } \
sewardjf9d81612005-04-23 23:25:49 +00001536 }
1537
sewardj8cf88b72005-07-08 01:29:33 +00001538MAKE_LOADV8( MC_(helperc_LOADV8be), True /*bigendian*/ );
1539MAKE_LOADV8( MC_(helperc_LOADV8le), False/*littleendian*/ );
sewardjf9d81612005-04-23 23:25:49 +00001540
sewardjf9d81612005-04-23 23:25:49 +00001541
sewardj8cf88b72005-07-08 01:29:33 +00001542#define MAKE_STOREV8(nAME,iS_BIGENDIAN) \
1543 \
1544 VG_REGPARM(1) \
1545 void nAME ( Addr aA, ULong vbytes ) \
1546 { \
1547 PROF_EVENT(210, #nAME); \
1548 \
1549 if (VG_DEBUG_MEMORY >= 2) \
1550 mc_STOREVn_slow( aA, 8, vbytes, iS_BIGENDIAN ); \
1551 \
1552 const UWord mask = ~((0x10000-8) | ((N_PRIMARY_MAP-1) << 16)); \
1553 UWord a = (UWord)aA; \
1554 \
1555 /* If any part of 'a' indicated by the mask is 1, either */ \
1556 /* 'a' is not naturally aligned, or 'a' exceeds the range */ \
1557 /* covered by the primary map. Either way we defer to the */ \
1558 /* slow-path case. */ \
1559 if (EXPECTED_NOT_TAKEN(a & mask)) { \
1560 PROF_EVENT(211, #nAME"-slow1"); \
1561 mc_STOREVn_slow( aA, 8, vbytes, iS_BIGENDIAN ); \
1562 return; \
1563 } \
1564 \
1565 UWord sec_no = (UWord)(a >> 16); \
1566 \
1567 if (VG_DEBUG_MEMORY >= 1) \
1568 tl_assert(sec_no < N_PRIMARY_MAP); \
1569 \
1570 SecMap* sm = primary_map[sec_no]; \
1571 UWord v_off = a & 0xFFFF; \
1572 UWord a_off = v_off >> 3; \
1573 UWord abits = (UWord)(sm->abits[a_off]); \
1574 \
1575 if (EXPECTED_TAKEN(!is_distinguished_sm(sm) \
1576 && abits == VGM_BYTE_VALID)) { \
1577 /* Handle common case quickly: a is suitably aligned, */ \
1578 /* is mapped, and is addressible. */ \
1579 ((ULong*)(sm->vbyte))[ v_off >> 3 ] = vbytes; \
1580 } else { \
1581 /* Slow but general case. */ \
1582 PROF_EVENT(212, #nAME"-slow2"); \
1583 mc_STOREVn_slow( aA, 8, vbytes, iS_BIGENDIAN ); \
1584 } \
sewardjf9d81612005-04-23 23:25:49 +00001585 }
1586
sewardj8cf88b72005-07-08 01:29:33 +00001587MAKE_STOREV8( MC_(helperc_STOREV8be), True /*bigendian*/ );
1588MAKE_STOREV8( MC_(helperc_STOREV8le), False/*littleendian*/ );
sewardj95448072004-11-22 20:19:51 +00001589
sewardj95448072004-11-22 20:19:51 +00001590
1591/* ------------------------ Size = 4 ------------------------ */
1592
sewardj8cf88b72005-07-08 01:29:33 +00001593#define MAKE_LOADV4(nAME,iS_BIGENDIAN) \
1594 \
1595 VG_REGPARM(1) \
1596 UWord nAME ( Addr aA ) \
1597 { \
1598 PROF_EVENT(220, #nAME); \
1599 \
1600 if (VG_DEBUG_MEMORY >= 2) \
1601 return (UWord)mc_LOADVn_slow( aA, 4, iS_BIGENDIAN ); \
1602 \
1603 const UWord mask = ~((0x10000-4) | ((N_PRIMARY_MAP-1) << 16)); \
1604 UWord a = (UWord)aA; \
1605 \
1606 /* If any part of 'a' indicated by the mask is 1, either */ \
1607 /* 'a' is not naturally aligned, or 'a' exceeds the range */ \
1608 /* covered by the primary map. Either way we defer to the */ \
1609 /* slow-path case. */ \
1610 if (EXPECTED_NOT_TAKEN(a & mask)) { \
1611 PROF_EVENT(221, #nAME"-slow1"); \
1612 return (UWord)mc_LOADVn_slow( aA, 4, iS_BIGENDIAN ); \
1613 } \
1614 \
1615 UWord sec_no = (UWord)(a >> 16); \
1616 \
1617 if (VG_DEBUG_MEMORY >= 1) \
1618 tl_assert(sec_no < N_PRIMARY_MAP); \
1619 \
1620 SecMap* sm = primary_map[sec_no]; \
1621 UWord v_off = a & 0xFFFF; \
1622 UWord a_off = v_off >> 3; \
1623 UWord abits = (UWord)(sm->abits[a_off]); \
1624 abits >>= (a & 4); \
1625 abits &= 15; \
1626 if (EXPECTED_TAKEN(abits == VGM_NIBBLE_VALID)) { \
1627 /* Handle common case quickly: a is suitably aligned, */ \
1628 /* is mapped, and is addressible. */ \
1629 /* On a 32-bit platform, simply hoick the required 32 */ \
1630 /* bits out of the vbyte array. On a 64-bit platform, */ \
1631 /* also set the upper 32 bits to 1 ("undefined"), just */ \
1632 /* in case. This almost certainly isn't necessary, */ \
1633 /* but be paranoid. */ \
1634 UWord ret = (UWord)0xFFFFFFFF00000000ULL; \
1635 ret |= (UWord)( ((UInt*)(sm->vbyte))[ v_off >> 2 ] ); \
1636 return ret; \
1637 } else { \
1638 /* Slow but general case. */ \
1639 PROF_EVENT(222, #nAME"-slow2"); \
1640 return (UWord)mc_LOADVn_slow( a, 4, iS_BIGENDIAN ); \
1641 } \
sewardjc1a2cda2005-04-21 17:34:00 +00001642 }
1643
sewardj8cf88b72005-07-08 01:29:33 +00001644MAKE_LOADV4( MC_(helperc_LOADV4be), True /*bigendian*/ );
1645MAKE_LOADV4( MC_(helperc_LOADV4le), False/*littleendian*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001646
sewardjc1a2cda2005-04-21 17:34:00 +00001647
sewardj8cf88b72005-07-08 01:29:33 +00001648#define MAKE_STOREV4(nAME,iS_BIGENDIAN) \
1649 \
1650 VG_REGPARM(2) \
1651 void nAME ( Addr aA, UWord vbytes ) \
1652 { \
1653 PROF_EVENT(230, #nAME); \
1654 \
1655 if (VG_DEBUG_MEMORY >= 2) \
1656 mc_STOREVn_slow( aA, 4, (ULong)vbytes, iS_BIGENDIAN ); \
1657 \
1658 const UWord mask = ~((0x10000-4) | ((N_PRIMARY_MAP-1) << 16)); \
1659 UWord a = (UWord)aA; \
1660 \
1661 /* If any part of 'a' indicated by the mask is 1, either */ \
1662 /* 'a' is not naturally aligned, or 'a' exceeds the range */ \
1663 /* covered by the primary map. Either way we defer to the */ \
1664 /* slow-path case. */ \
1665 if (EXPECTED_NOT_TAKEN(a & mask)) { \
1666 PROF_EVENT(231, #nAME"-slow1"); \
1667 mc_STOREVn_slow( aA, 4, (ULong)vbytes, iS_BIGENDIAN ); \
1668 return; \
1669 } \
1670 \
1671 UWord sec_no = (UWord)(a >> 16); \
1672 \
1673 if (VG_DEBUG_MEMORY >= 1) \
1674 tl_assert(sec_no < N_PRIMARY_MAP); \
1675 \
1676 SecMap* sm = primary_map[sec_no]; \
1677 UWord v_off = a & 0xFFFF; \
1678 UWord a_off = v_off >> 3; \
1679 UWord abits = (UWord)(sm->abits[a_off]); \
1680 abits >>= (a & 4); \
1681 abits &= 15; \
1682 if (EXPECTED_TAKEN(!is_distinguished_sm(sm) \
1683 && abits == VGM_NIBBLE_VALID)) { \
1684 /* Handle common case quickly: a is suitably aligned, */ \
1685 /* is mapped, and is addressible. */ \
1686 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = (UInt)vbytes; \
1687 } else { \
1688 /* Slow but general case. */ \
1689 PROF_EVENT(232, #nAME"-slow2"); \
1690 mc_STOREVn_slow( aA, 4, (ULong)vbytes, iS_BIGENDIAN ); \
1691 } \
sewardjc1a2cda2005-04-21 17:34:00 +00001692 }
1693
sewardj8cf88b72005-07-08 01:29:33 +00001694MAKE_STOREV4( MC_(helperc_STOREV4be), True /*bigendian*/ );
1695MAKE_STOREV4( MC_(helperc_STOREV4le), False/*littleendian*/ );
njn25e49d8e72002-09-23 09:36:25 +00001696
njn25e49d8e72002-09-23 09:36:25 +00001697
sewardj95448072004-11-22 20:19:51 +00001698/* ------------------------ Size = 2 ------------------------ */
1699
sewardj8cf88b72005-07-08 01:29:33 +00001700#define MAKE_LOADV2(nAME,iS_BIGENDIAN) \
1701 \
1702 VG_REGPARM(1) \
1703 UWord nAME ( Addr aA ) \
1704 { \
1705 PROF_EVENT(240, #nAME); \
1706 \
1707 if (VG_DEBUG_MEMORY >= 2) \
1708 return (UWord)mc_LOADVn_slow( aA, 2, iS_BIGENDIAN ); \
1709 \
1710 const UWord mask = ~((0x10000-2) | ((N_PRIMARY_MAP-1) << 16)); \
1711 UWord a = (UWord)aA; \
1712 \
1713 /* If any part of 'a' indicated by the mask is 1, either */ \
1714 /* 'a' is not naturally aligned, or 'a' exceeds the range */ \
1715 /* covered by the primary map. Either way we defer to the */ \
1716 /* slow-path case. */ \
1717 if (EXPECTED_NOT_TAKEN(a & mask)) { \
1718 PROF_EVENT(241, #nAME"-slow1"); \
1719 return (UWord)mc_LOADVn_slow( aA, 2, iS_BIGENDIAN ); \
1720 } \
1721 \
1722 UWord sec_no = (UWord)(a >> 16); \
1723 \
1724 if (VG_DEBUG_MEMORY >= 1) \
1725 tl_assert(sec_no < N_PRIMARY_MAP); \
1726 \
1727 SecMap* sm = primary_map[sec_no]; \
1728 UWord v_off = a & 0xFFFF; \
1729 UWord a_off = v_off >> 3; \
1730 UWord abits = (UWord)(sm->abits[a_off]); \
1731 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) { \
1732 /* Handle common case quickly: a is mapped, and the */ \
1733 /* entire word32 it lives in is addressible. */ \
1734 /* Set the upper 16/48 bits of the result to 1 */ \
1735 /* ("undefined"), just in case. This almost certainly */ \
1736 /* isn't necessary, but be paranoid. */ \
1737 return (~(UWord)0xFFFF) \
1738 | \
1739 (UWord)( ((UShort*)(sm->vbyte))[ v_off >> 1 ] ); \
1740 } else { \
1741 /* Slow but general case. */ \
1742 PROF_EVENT(242, #nAME"-slow2"); \
1743 return (UWord)mc_LOADVn_slow( aA, 2, iS_BIGENDIAN ); \
1744 } \
sewardjc1a2cda2005-04-21 17:34:00 +00001745 }
1746
sewardj8cf88b72005-07-08 01:29:33 +00001747MAKE_LOADV2( MC_(helperc_LOADV2be), True /*bigendian*/ );
1748MAKE_LOADV2( MC_(helperc_LOADV2le), False/*littleendian*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001749
sewardjc1a2cda2005-04-21 17:34:00 +00001750
sewardj8cf88b72005-07-08 01:29:33 +00001751#define MAKE_STOREV2(nAME,iS_BIGENDIAN) \
1752 \
1753 VG_REGPARM(2) \
1754 void nAME ( Addr aA, UWord vbytes ) \
1755 { \
1756 PROF_EVENT(250, #nAME); \
1757 \
1758 if (VG_DEBUG_MEMORY >= 2) \
1759 mc_STOREVn_slow( aA, 2, (ULong)vbytes, iS_BIGENDIAN ); \
1760 \
1761 const UWord mask = ~((0x10000-2) | ((N_PRIMARY_MAP-1) << 16)); \
1762 UWord a = (UWord)aA; \
1763 \
1764 /* If any part of 'a' indicated by the mask is 1, either */ \
1765 /* 'a' is not naturally aligned, or 'a' exceeds the range */ \
1766 /* covered by the primary map. Either way we defer to the */ \
1767 /* slow-path case. */ \
1768 if (EXPECTED_NOT_TAKEN(a & mask)) { \
1769 PROF_EVENT(251, #nAME"-slow1"); \
1770 mc_STOREVn_slow( aA, 2, (ULong)vbytes, iS_BIGENDIAN ); \
1771 return; \
1772 } \
1773 \
1774 UWord sec_no = (UWord)(a >> 16); \
1775 \
1776 if (VG_DEBUG_MEMORY >= 1) \
1777 tl_assert(sec_no < N_PRIMARY_MAP); \
1778 \
1779 SecMap* sm = primary_map[sec_no]; \
1780 UWord v_off = a & 0xFFFF; \
1781 UWord a_off = v_off >> 3; \
1782 UWord abits = (UWord)(sm->abits[a_off]); \
1783 if (EXPECTED_TAKEN(!is_distinguished_sm(sm) \
1784 && abits == VGM_BYTE_VALID)) { \
1785 /* Handle common case quickly. */ \
1786 ((UShort*)(sm->vbyte))[ v_off >> 1 ] = (UShort)vbytes; \
1787 } else { \
1788 /* Slow but general case. */ \
1789 PROF_EVENT(252, #nAME"-slow2"); \
1790 mc_STOREVn_slow( aA, 2, (ULong)vbytes, iS_BIGENDIAN ); \
1791 } \
sewardjc1a2cda2005-04-21 17:34:00 +00001792 }
1793
njn25e49d8e72002-09-23 09:36:25 +00001794
sewardj8cf88b72005-07-08 01:29:33 +00001795MAKE_STOREV2( MC_(helperc_STOREV2be), True /*bigendian*/ );
1796MAKE_STOREV2( MC_(helperc_STOREV2le), False/*littleendian*/ );
sewardj5d28efc2005-04-21 22:16:29 +00001797
njn25e49d8e72002-09-23 09:36:25 +00001798
sewardj95448072004-11-22 20:19:51 +00001799/* ------------------------ Size = 1 ------------------------ */
sewardj8cf88b72005-07-08 01:29:33 +00001800/* Note: endianness is irrelevant for size == 1 */
sewardj95448072004-11-22 20:19:51 +00001801
njnaf839f52005-06-23 03:27:57 +00001802VG_REGPARM(1)
sewardj8cf88b72005-07-08 01:29:33 +00001803UWord MC_(helperc_LOADV1) ( Addr aA )
njn25e49d8e72002-09-23 09:36:25 +00001804{
sewardj8cf88b72005-07-08 01:29:33 +00001805 PROF_EVENT(260, "helperc_LOADV1");
sewardjc1a2cda2005-04-21 17:34:00 +00001806
1807# if VG_DEBUG_MEMORY >= 2
sewardj8cf88b72005-07-08 01:29:33 +00001808 return (UWord)mc_LOADVn_slow( aA, 1, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001809# else
1810
sewardj23eb2fd2005-04-22 16:29:19 +00001811 const UWord mask = ~((0x10000-1) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001812 UWord a = (UWord)aA;
1813
1814 /* If any part of 'a' indicated by the mask is 1, it means 'a'
1815 exceeds the range covered by the primary map. In which case we
1816 defer to the slow-path case. */
1817 if (EXPECTED_NOT_TAKEN(a & mask)) {
sewardj8cf88b72005-07-08 01:29:33 +00001818 PROF_EVENT(261, "helperc_LOADV1-slow1");
1819 return (UWord)mc_LOADVn_slow( aA, 1, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001820 }
1821
1822 UWord sec_no = (UWord)(a >> 16);
1823
1824# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001825 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001826# endif
1827
1828 SecMap* sm = primary_map[sec_no];
1829 UWord v_off = a & 0xFFFF;
1830 UWord a_off = v_off >> 3;
sewardj5d28efc2005-04-21 22:16:29 +00001831 UWord abits = (UWord)(sm->abits[a_off]);
sewardjc1a2cda2005-04-21 17:34:00 +00001832 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) {
1833 /* Handle common case quickly: a is mapped, and the entire
1834 word32 it lives in is addressible. */
sewardj5d28efc2005-04-21 22:16:29 +00001835 /* Set the upper 24/56 bits of the result to 1 ("undefined"),
1836 just in case. This almost certainly isn't necessary, but be
1837 paranoid. */
sewardjc1a2cda2005-04-21 17:34:00 +00001838 return (~(UWord)0xFF)
1839 |
1840 (UWord)( ((UChar*)(sm->vbyte))[ v_off ] );
1841 } else {
1842 /* Slow but general case. */
sewardj8cf88b72005-07-08 01:29:33 +00001843 PROF_EVENT(262, "helperc_LOADV1-slow2");
1844 return (UWord)mc_LOADVn_slow( aA, 1, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001845 }
1846# endif
njn25e49d8e72002-09-23 09:36:25 +00001847}
1848
sewardjc1a2cda2005-04-21 17:34:00 +00001849
njnaf839f52005-06-23 03:27:57 +00001850VG_REGPARM(2)
sewardj8cf88b72005-07-08 01:29:33 +00001851void MC_(helperc_STOREV1) ( Addr aA, UWord vbyte )
njn25e49d8e72002-09-23 09:36:25 +00001852{
sewardj8cf88b72005-07-08 01:29:33 +00001853 PROF_EVENT(270, "helperc_STOREV1");
sewardjc1a2cda2005-04-21 17:34:00 +00001854
1855# if VG_DEBUG_MEMORY >= 2
sewardj8cf88b72005-07-08 01:29:33 +00001856 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001857# else
1858
sewardj23eb2fd2005-04-22 16:29:19 +00001859 const UWord mask = ~((0x10000-1) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001860 UWord a = (UWord)aA;
1861 /* If any part of 'a' indicated by the mask is 1, it means 'a'
1862 exceeds the range covered by the primary map. In which case we
1863 defer to the slow-path case. */
1864 if (EXPECTED_NOT_TAKEN(a & mask)) {
sewardj8cf88b72005-07-08 01:29:33 +00001865 PROF_EVENT(271, "helperc_STOREV1-slow1");
1866 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001867 return;
1868 }
1869
1870 UWord sec_no = (UWord)(a >> 16);
1871
1872# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001873 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001874# endif
1875
1876 SecMap* sm = primary_map[sec_no];
1877 UWord v_off = a & 0xFFFF;
1878 UWord a_off = v_off >> 3;
sewardj5d28efc2005-04-21 22:16:29 +00001879 UWord abits = (UWord)(sm->abits[a_off]);
sewardjc1a2cda2005-04-21 17:34:00 +00001880 if (EXPECTED_TAKEN(!is_distinguished_sm(sm)
1881 && abits == VGM_BYTE_VALID)) {
1882 /* Handle common case quickly: a is mapped, the entire word32 it
1883 lives in is addressible. */
1884 ((UChar*)(sm->vbyte))[ v_off ] = (UChar)vbyte;
1885 } else {
sewardj8cf88b72005-07-08 01:29:33 +00001886 PROF_EVENT(272, "helperc_STOREV1-slow2");
1887 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001888 }
1889
1890# endif
njn25e49d8e72002-09-23 09:36:25 +00001891}
1892
1893
sewardjc859fbf2005-04-22 21:10:28 +00001894/*------------------------------------------------------------*/
1895/*--- Functions called directly from generated code: ---*/
1896/*--- Value-check failure handlers. ---*/
1897/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001898
njn5c004e42002-11-18 11:04:50 +00001899void MC_(helperc_value_check0_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001900{
njn9e63cb62005-05-08 18:34:59 +00001901 mc_record_value_error ( VG_(get_running_tid)(), 0 );
njn25e49d8e72002-09-23 09:36:25 +00001902}
1903
njn5c004e42002-11-18 11:04:50 +00001904void MC_(helperc_value_check1_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001905{
njn9e63cb62005-05-08 18:34:59 +00001906 mc_record_value_error ( VG_(get_running_tid)(), 1 );
njn25e49d8e72002-09-23 09:36:25 +00001907}
1908
njn5c004e42002-11-18 11:04:50 +00001909void MC_(helperc_value_check4_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001910{
njn9e63cb62005-05-08 18:34:59 +00001911 mc_record_value_error ( VG_(get_running_tid)(), 4 );
njn25e49d8e72002-09-23 09:36:25 +00001912}
1913
sewardj11bcc4e2005-04-23 22:38:38 +00001914void MC_(helperc_value_check8_fail) ( void )
1915{
njn9e63cb62005-05-08 18:34:59 +00001916 mc_record_value_error ( VG_(get_running_tid)(), 8 );
sewardj11bcc4e2005-04-23 22:38:38 +00001917}
1918
njnaf839f52005-06-23 03:27:57 +00001919VG_REGPARM(1) void MC_(helperc_complain_undef) ( HWord sz )
sewardj95448072004-11-22 20:19:51 +00001920{
njn9e63cb62005-05-08 18:34:59 +00001921 mc_record_value_error ( VG_(get_running_tid)(), (Int)sz );
sewardj95448072004-11-22 20:19:51 +00001922}
1923
njn25e49d8e72002-09-23 09:36:25 +00001924
sewardj45d94cc2005-04-20 14:44:11 +00001925//zz /*------------------------------------------------------------*/
1926//zz /*--- Metadata get/set functions, for client requests. ---*/
1927//zz /*------------------------------------------------------------*/
1928//zz
1929//zz /* Copy Vbits for src into vbits. Returns: 1 == OK, 2 == alignment
1930//zz error, 3 == addressing error. */
1931//zz static Int mc_get_or_set_vbits_for_client (
1932//zz ThreadId tid,
1933//zz Addr dataV,
1934//zz Addr vbitsV,
1935//zz SizeT size,
1936//zz Bool setting /* True <=> set vbits, False <=> get vbits */
1937//zz )
1938//zz {
1939//zz Bool addressibleD = True;
1940//zz Bool addressibleV = True;
1941//zz UInt* data = (UInt*)dataV;
1942//zz UInt* vbits = (UInt*)vbitsV;
1943//zz SizeT szW = size / 4; /* sigh */
1944//zz SizeT i;
1945//zz UInt* dataP = NULL; /* bogus init to keep gcc happy */
1946//zz UInt* vbitsP = NULL; /* ditto */
1947//zz
1948//zz /* Check alignment of args. */
1949//zz if (!(VG_IS_4_ALIGNED(data) && VG_IS_4_ALIGNED(vbits)))
1950//zz return 2;
1951//zz if ((size & 3) != 0)
1952//zz return 2;
1953//zz
1954//zz /* Check that arrays are addressible. */
1955//zz for (i = 0; i < szW; i++) {
1956//zz dataP = &data[i];
1957//zz vbitsP = &vbits[i];
1958//zz if (get_abits4_ALIGNED((Addr)dataP) != VGM_NIBBLE_VALID) {
1959//zz addressibleD = False;
1960//zz break;
1961//zz }
1962//zz if (get_abits4_ALIGNED((Addr)vbitsP) != VGM_NIBBLE_VALID) {
1963//zz addressibleV = False;
1964//zz break;
1965//zz }
1966//zz }
1967//zz if (!addressibleD) {
1968//zz MAC_(record_address_error)( tid, (Addr)dataP, 4,
1969//zz setting ? True : False );
1970//zz return 3;
1971//zz }
1972//zz if (!addressibleV) {
1973//zz MAC_(record_address_error)( tid, (Addr)vbitsP, 4,
1974//zz setting ? False : True );
1975//zz return 3;
1976//zz }
1977//zz
1978//zz /* Do the copy */
1979//zz if (setting) {
1980//zz /* setting */
1981//zz for (i = 0; i < szW; i++) {
1982//zz if (get_vbytes4_ALIGNED( (Addr)&vbits[i] ) != VGM_WORD_VALID)
njn9e63cb62005-05-08 18:34:59 +00001983//zz mc_record_value_error(tid, 4);
sewardj45d94cc2005-04-20 14:44:11 +00001984//zz set_vbytes4_ALIGNED( (Addr)&data[i], vbits[i] );
1985//zz }
1986//zz } else {
1987//zz /* getting */
1988//zz for (i = 0; i < szW; i++) {
1989//zz vbits[i] = get_vbytes4_ALIGNED( (Addr)&data[i] );
1990//zz set_vbytes4_ALIGNED( (Addr)&vbits[i], VGM_WORD_VALID );
1991//zz }
1992//zz }
1993//zz
1994//zz return 1;
1995//zz }
sewardj05fe85e2005-04-27 22:46:36 +00001996
1997
1998/*------------------------------------------------------------*/
1999/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
2000/*------------------------------------------------------------*/
2001
2002/* For the memory leak detector, say whether an entire 64k chunk of
2003 address space is possibly in use, or not. If in doubt return
2004 True.
2005*/
2006static
2007Bool mc_is_within_valid_secondary ( Addr a )
2008{
2009 SecMap* sm = maybe_get_secmap_for ( a );
2010 if (sm == NULL || sm == &sm_distinguished[SM_DIST_NOACCESS]) {
2011 /* Definitely not in use. */
2012 return False;
2013 } else {
2014 return True;
2015 }
2016}
2017
2018
2019/* For the memory leak detector, say whether or not a given word
2020 address is to be regarded as valid. */
2021static
2022Bool mc_is_valid_aligned_word ( Addr a )
2023{
2024 tl_assert(sizeof(UWord) == 4 || sizeof(UWord) == 8);
2025 if (sizeof(UWord) == 4) {
2026 tl_assert(VG_IS_4_ALIGNED(a));
2027 } else {
2028 tl_assert(VG_IS_8_ALIGNED(a));
2029 }
2030 if (mc_check_readable( a, sizeof(UWord), NULL ) == MC_Ok) {
2031 return True;
2032 } else {
2033 return False;
2034 }
2035}
sewardja4495682002-10-21 07:29:59 +00002036
2037
nethercote996901a2004-08-03 13:29:09 +00002038/* Leak detector for this tool. We don't actually do anything, merely
sewardja4495682002-10-21 07:29:59 +00002039 run the generic leak detector with suitable parameters for this
nethercote996901a2004-08-03 13:29:09 +00002040 tool. */
njnb8dca862005-03-14 02:42:44 +00002041static void mc_detect_memory_leaks ( ThreadId tid, LeakCheckMode mode )
njn25e49d8e72002-09-23 09:36:25 +00002042{
sewardj05fe85e2005-04-27 22:46:36 +00002043 MAC_(do_detect_memory_leaks) (
2044 tid,
2045 mode,
2046 mc_is_within_valid_secondary,
2047 mc_is_valid_aligned_word
2048 );
njn25e49d8e72002-09-23 09:36:25 +00002049}
2050
2051
sewardjc859fbf2005-04-22 21:10:28 +00002052/*------------------------------------------------------------*/
2053/*--- Initialisation ---*/
2054/*------------------------------------------------------------*/
2055
2056static void init_shadow_memory ( void )
2057{
2058 Int i;
2059 SecMap* sm;
2060
2061 /* Build the 3 distinguished secondaries */
2062 tl_assert(VGM_BIT_INVALID == 1);
2063 tl_assert(VGM_BIT_VALID == 0);
2064 tl_assert(VGM_BYTE_INVALID == 0xFF);
2065 tl_assert(VGM_BYTE_VALID == 0);
2066
2067 /* Set A invalid, V invalid. */
2068 sm = &sm_distinguished[SM_DIST_NOACCESS];
2069 for (i = 0; i < 65536; i++)
2070 sm->vbyte[i] = VGM_BYTE_INVALID;
2071 for (i = 0; i < 8192; i++)
2072 sm->abits[i] = VGM_BYTE_INVALID;
2073
2074 /* Set A valid, V invalid. */
2075 sm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
2076 for (i = 0; i < 65536; i++)
2077 sm->vbyte[i] = VGM_BYTE_INVALID;
2078 for (i = 0; i < 8192; i++)
2079 sm->abits[i] = VGM_BYTE_VALID;
2080
2081 /* Set A valid, V valid. */
2082 sm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
2083 for (i = 0; i < 65536; i++)
2084 sm->vbyte[i] = VGM_BYTE_VALID;
2085 for (i = 0; i < 8192; i++)
2086 sm->abits[i] = VGM_BYTE_VALID;
2087
2088 /* Set up the primary map. */
2089 /* These entries gradually get overwritten as the used address
2090 space expands. */
2091 for (i = 0; i < N_PRIMARY_MAP; i++)
2092 primary_map[i] = &sm_distinguished[SM_DIST_NOACCESS];
2093
2094 /* auxmap_size = auxmap_used = 0;
2095 no ... these are statically initialised */
2096}
2097
2098
2099/*------------------------------------------------------------*/
2100/*--- Sanity check machinery (permanently engaged) ---*/
2101/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00002102
njn51d827b2005-05-09 01:02:08 +00002103static Bool mc_cheap_sanity_check ( void )
njn25e49d8e72002-09-23 09:36:25 +00002104{
jseward9800fd32004-01-04 23:08:04 +00002105 /* nothing useful we can rapidly check */
sewardj23eb2fd2005-04-22 16:29:19 +00002106 n_sanity_cheap++;
sewardjc1a2cda2005-04-21 17:34:00 +00002107 PROF_EVENT(490, "cheap_sanity_check");
jseward9800fd32004-01-04 23:08:04 +00002108 return True;
njn25e49d8e72002-09-23 09:36:25 +00002109}
2110
njn51d827b2005-05-09 01:02:08 +00002111static Bool mc_expensive_sanity_check ( void )
njn25e49d8e72002-09-23 09:36:25 +00002112{
sewardj23eb2fd2005-04-22 16:29:19 +00002113 Int i, n_secmaps_found;
sewardj45d94cc2005-04-20 14:44:11 +00002114 SecMap* sm;
sewardj23eb2fd2005-04-22 16:29:19 +00002115 Bool bad = False;
njn25e49d8e72002-09-23 09:36:25 +00002116
sewardj23eb2fd2005-04-22 16:29:19 +00002117 n_sanity_expensive++;
sewardjc1a2cda2005-04-21 17:34:00 +00002118 PROF_EVENT(491, "expensive_sanity_check");
2119
sewardj23eb2fd2005-04-22 16:29:19 +00002120 /* Check that the 3 distinguished SMs are still as they should
2121 be. */
njn25e49d8e72002-09-23 09:36:25 +00002122
sewardj45d94cc2005-04-20 14:44:11 +00002123 /* Check A invalid, V invalid. */
2124 sm = &sm_distinguished[SM_DIST_NOACCESS];
njn25e49d8e72002-09-23 09:36:25 +00002125 for (i = 0; i < 65536; i++)
sewardj45d94cc2005-04-20 14:44:11 +00002126 if (!(sm->vbyte[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002127 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002128 for (i = 0; i < 8192; i++)
2129 if (!(sm->abits[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002130 bad = True;
njn25e49d8e72002-09-23 09:36:25 +00002131
sewardj45d94cc2005-04-20 14:44:11 +00002132 /* Check A valid, V invalid. */
2133 sm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
2134 for (i = 0; i < 65536; i++)
2135 if (!(sm->vbyte[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002136 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002137 for (i = 0; i < 8192; i++)
2138 if (!(sm->abits[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002139 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002140
2141 /* Check A valid, V valid. */
2142 sm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
2143 for (i = 0; i < 65536; i++)
2144 if (!(sm->vbyte[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002145 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002146 for (i = 0; i < 8192; i++)
2147 if (!(sm->abits[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002148 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002149
sewardj23eb2fd2005-04-22 16:29:19 +00002150 if (bad) {
2151 VG_(printf)("memcheck expensive sanity: "
2152 "distinguished_secondaries have changed\n");
2153 return False;
2154 }
2155
2156 /* check nonsensical auxmap sizing */
sewardj45d94cc2005-04-20 14:44:11 +00002157 if (auxmap_used > auxmap_size)
sewardj23eb2fd2005-04-22 16:29:19 +00002158 bad = True;
2159
2160 if (bad) {
2161 VG_(printf)("memcheck expensive sanity: "
2162 "nonsensical auxmap sizing\n");
2163 return False;
2164 }
2165
2166 /* check that the number of secmaps issued matches the number that
2167 are reachable (iow, no secmap leaks) */
2168 n_secmaps_found = 0;
2169 for (i = 0; i < N_PRIMARY_MAP; i++) {
2170 if (primary_map[i] == NULL) {
2171 bad = True;
2172 } else {
2173 if (!is_distinguished_sm(primary_map[i]))
2174 n_secmaps_found++;
2175 }
2176 }
2177
2178 for (i = 0; i < auxmap_used; i++) {
2179 if (auxmap[i].sm == NULL) {
2180 bad = True;
2181 } else {
2182 if (!is_distinguished_sm(auxmap[i].sm))
2183 n_secmaps_found++;
2184 }
2185 }
2186
2187 if (n_secmaps_found != n_secmaps_issued)
2188 bad = True;
2189
2190 if (bad) {
2191 VG_(printf)("memcheck expensive sanity: "
2192 "apparent secmap leakage\n");
2193 return False;
2194 }
2195
2196 /* check that auxmap only covers address space that the primary
2197 doesn't */
2198
2199 for (i = 0; i < auxmap_used; i++)
2200 if (auxmap[i].base <= MAX_PRIMARY_ADDRESS)
2201 bad = True;
2202
2203 if (bad) {
2204 VG_(printf)("memcheck expensive sanity: "
2205 "auxmap covers wrong address space\n");
2206 return False;
2207 }
2208
2209 /* there is only one pointer to each secmap (expensive) */
njn25e49d8e72002-09-23 09:36:25 +00002210
2211 return True;
2212}
sewardj45d94cc2005-04-20 14:44:11 +00002213
njn25e49d8e72002-09-23 09:36:25 +00002214
njn25e49d8e72002-09-23 09:36:25 +00002215/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00002216/*--- Command line args ---*/
njn25e49d8e72002-09-23 09:36:25 +00002217/*------------------------------------------------------------*/
2218
njn51d827b2005-05-09 01:02:08 +00002219Bool MC_(clo_avoid_strlen_errors) = True;
njn43c799e2003-04-08 00:08:52 +00002220
njn51d827b2005-05-09 01:02:08 +00002221static Bool mc_process_cmd_line_option(Char* arg)
njn25e49d8e72002-09-23 09:36:25 +00002222{
njn45270a22005-03-27 01:00:11 +00002223 VG_BOOL_CLO(arg, "--avoid-strlen-errors", MC_(clo_avoid_strlen_errors))
njn25e49d8e72002-09-23 09:36:25 +00002224 else
njn43c799e2003-04-08 00:08:52 +00002225 return MAC_(process_common_cmd_line_option)(arg);
njn25e49d8e72002-09-23 09:36:25 +00002226
2227 return True;
njn25e49d8e72002-09-23 09:36:25 +00002228}
2229
njn51d827b2005-05-09 01:02:08 +00002230static void mc_print_usage(void)
njn25e49d8e72002-09-23 09:36:25 +00002231{
njn3e884182003-04-15 13:03:23 +00002232 MAC_(print_common_usage)();
2233 VG_(printf)(
2234" --avoid-strlen-errors=no|yes suppress errs from inlined strlen [yes]\n"
2235 );
2236}
2237
njn51d827b2005-05-09 01:02:08 +00002238static void mc_print_debug_usage(void)
njn3e884182003-04-15 13:03:23 +00002239{
2240 MAC_(print_common_debug_usage)();
2241 VG_(printf)(
sewardj8ec2cfc2002-10-13 00:57:26 +00002242" --cleanup=no|yes improve after instrumentation? [yes]\n"
njn3e884182003-04-15 13:03:23 +00002243 );
njn25e49d8e72002-09-23 09:36:25 +00002244}
2245
nethercote8b76fe52004-11-08 19:20:09 +00002246/*------------------------------------------------------------*/
2247/*--- Client requests ---*/
2248/*------------------------------------------------------------*/
2249
2250/* Client block management:
2251
2252 This is managed as an expanding array of client block descriptors.
2253 Indices of live descriptors are issued to the client, so it can ask
2254 to free them later. Therefore we cannot slide live entries down
2255 over dead ones. Instead we must use free/inuse flags and scan for
2256 an empty slot at allocation time. This in turn means allocation is
2257 relatively expensive, so we hope this does not happen too often.
nethercote8b76fe52004-11-08 19:20:09 +00002258
sewardjedc75ab2005-03-15 23:30:32 +00002259 An unused block has start == size == 0
2260*/
nethercote8b76fe52004-11-08 19:20:09 +00002261
2262typedef
2263 struct {
2264 Addr start;
2265 SizeT size;
2266 ExeContext* where;
sewardj8cf88b72005-07-08 01:29:33 +00002267 Char* desc;
nethercote8b76fe52004-11-08 19:20:09 +00002268 }
2269 CGenBlock;
2270
2271/* This subsystem is self-initialising. */
njn695c16e2005-03-27 03:40:28 +00002272static UInt cgb_size = 0;
2273static UInt cgb_used = 0;
2274static CGenBlock* cgbs = NULL;
nethercote8b76fe52004-11-08 19:20:09 +00002275
2276/* Stats for this subsystem. */
njn695c16e2005-03-27 03:40:28 +00002277static UInt cgb_used_MAX = 0; /* Max in use. */
2278static UInt cgb_allocs = 0; /* Number of allocs. */
2279static UInt cgb_discards = 0; /* Number of discards. */
2280static UInt cgb_search = 0; /* Number of searches. */
nethercote8b76fe52004-11-08 19:20:09 +00002281
2282
2283static
njn695c16e2005-03-27 03:40:28 +00002284Int alloc_client_block ( void )
nethercote8b76fe52004-11-08 19:20:09 +00002285{
2286 UInt i, sz_new;
2287 CGenBlock* cgbs_new;
2288
njn695c16e2005-03-27 03:40:28 +00002289 cgb_allocs++;
nethercote8b76fe52004-11-08 19:20:09 +00002290
njn695c16e2005-03-27 03:40:28 +00002291 for (i = 0; i < cgb_used; i++) {
2292 cgb_search++;
2293 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00002294 return i;
2295 }
2296
2297 /* Not found. Try to allocate one at the end. */
njn695c16e2005-03-27 03:40:28 +00002298 if (cgb_used < cgb_size) {
2299 cgb_used++;
2300 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00002301 }
2302
2303 /* Ok, we have to allocate a new one. */
njn695c16e2005-03-27 03:40:28 +00002304 tl_assert(cgb_used == cgb_size);
2305 sz_new = (cgbs == NULL) ? 10 : (2 * cgb_size);
nethercote8b76fe52004-11-08 19:20:09 +00002306
2307 cgbs_new = VG_(malloc)( sz_new * sizeof(CGenBlock) );
njn695c16e2005-03-27 03:40:28 +00002308 for (i = 0; i < cgb_used; i++)
2309 cgbs_new[i] = cgbs[i];
nethercote8b76fe52004-11-08 19:20:09 +00002310
njn695c16e2005-03-27 03:40:28 +00002311 if (cgbs != NULL)
2312 VG_(free)( cgbs );
2313 cgbs = cgbs_new;
nethercote8b76fe52004-11-08 19:20:09 +00002314
njn695c16e2005-03-27 03:40:28 +00002315 cgb_size = sz_new;
2316 cgb_used++;
2317 if (cgb_used > cgb_used_MAX)
2318 cgb_used_MAX = cgb_used;
2319 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00002320}
2321
2322
2323static void show_client_block_stats ( void )
2324{
2325 VG_(message)(Vg_DebugMsg,
2326 "general CBs: %d allocs, %d discards, %d maxinuse, %d search",
njn695c16e2005-03-27 03:40:28 +00002327 cgb_allocs, cgb_discards, cgb_used_MAX, cgb_search
nethercote8b76fe52004-11-08 19:20:09 +00002328 );
2329}
2330
nethercote8b76fe52004-11-08 19:20:09 +00002331static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai )
2332{
2333 UInt i;
2334 /* VG_(printf)("try to identify %d\n", a); */
2335
2336 /* Perhaps it's a general block ? */
njn695c16e2005-03-27 03:40:28 +00002337 for (i = 0; i < cgb_used; i++) {
2338 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00002339 continue;
njn717cde52005-05-10 02:47:21 +00002340 // Use zero as the redzone for client blocks.
2341 if (VG_(addr_is_in_block)(a, cgbs[i].start, cgbs[i].size, 0)) {
nethercote8b76fe52004-11-08 19:20:09 +00002342 /* OK - maybe it's a mempool, too? */
njn12627272005-08-14 18:32:16 +00002343 MAC_Mempool* mp = VG_(HT_lookup)(MAC_(mempool_list),
2344 (UWord)cgbs[i].start);
njn1d0cb0d2005-08-15 01:52:02 +00002345 if (mp != NULL) {
2346 if (mp->chunks != NULL) {
2347 MAC_Chunk* mc;
njnf66dbfc2005-08-15 01:54:05 +00002348 VG_(HT_ResetIter)(mp->chunks);
2349 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
njn1d0cb0d2005-08-15 01:52:02 +00002350 if (VG_(addr_is_in_block)(a, mc->data, mc->size,
2351 MAC_MALLOC_REDZONE_SZB)) {
2352 ai->akind = UserG;
2353 ai->blksize = mc->size;
2354 ai->rwoffset = (Int)(a) - (Int)mc->data;
2355 ai->lastchange = mc->where;
2356 return True;
2357 }
nethercote8b76fe52004-11-08 19:20:09 +00002358 }
2359 }
njn1d0cb0d2005-08-15 01:52:02 +00002360 ai->akind = Mempool;
2361 ai->blksize = cgbs[i].size;
2362 ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
njn695c16e2005-03-27 03:40:28 +00002363 ai->lastchange = cgbs[i].where;
nethercote8b76fe52004-11-08 19:20:09 +00002364 return True;
2365 }
njn1d0cb0d2005-08-15 01:52:02 +00002366 ai->akind = UserG;
2367 ai->blksize = cgbs[i].size;
2368 ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
njn695c16e2005-03-27 03:40:28 +00002369 ai->lastchange = cgbs[i].where;
njn1d0cb0d2005-08-15 01:52:02 +00002370 ai->desc = cgbs[i].desc;
nethercote8b76fe52004-11-08 19:20:09 +00002371 return True;
2372 }
2373 }
2374 return False;
2375}
2376
njn51d827b2005-05-09 01:02:08 +00002377static Bool mc_handle_client_request ( ThreadId tid, UWord* arg, UWord* ret )
nethercote8b76fe52004-11-08 19:20:09 +00002378{
2379 Int i;
2380 Bool ok;
2381 Addr bad_addr;
2382
njnfc26ff92004-11-22 19:12:49 +00002383 if (!VG_IS_TOOL_USERREQ('M','C',arg[0])
nethercote8b76fe52004-11-08 19:20:09 +00002384 && VG_USERREQ__MALLOCLIKE_BLOCK != arg[0]
2385 && VG_USERREQ__FREELIKE_BLOCK != arg[0]
2386 && VG_USERREQ__CREATE_MEMPOOL != arg[0]
2387 && VG_USERREQ__DESTROY_MEMPOOL != arg[0]
2388 && VG_USERREQ__MEMPOOL_ALLOC != arg[0]
2389 && VG_USERREQ__MEMPOOL_FREE != arg[0])
2390 return False;
2391
2392 switch (arg[0]) {
2393 case VG_USERREQ__CHECK_WRITABLE: /* check writable */
2394 ok = mc_check_writable ( arg[1], arg[2], &bad_addr );
2395 if (!ok)
njn9e63cb62005-05-08 18:34:59 +00002396 mc_record_user_error ( tid, bad_addr, /*isWrite*/True,
2397 /*isUnaddr*/True );
nethercote8b76fe52004-11-08 19:20:09 +00002398 *ret = ok ? (UWord)NULL : bad_addr;
sewardj8cf88b72005-07-08 01:29:33 +00002399 break;
nethercote8b76fe52004-11-08 19:20:09 +00002400
2401 case VG_USERREQ__CHECK_READABLE: { /* check readable */
2402 MC_ReadResult res;
2403 res = mc_check_readable ( arg[1], arg[2], &bad_addr );
2404 if (MC_AddrErr == res)
njn9e63cb62005-05-08 18:34:59 +00002405 mc_record_user_error ( tid, bad_addr, /*isWrite*/False,
2406 /*isUnaddr*/True );
nethercote8b76fe52004-11-08 19:20:09 +00002407 else if (MC_ValueErr == res)
njn9e63cb62005-05-08 18:34:59 +00002408 mc_record_user_error ( tid, bad_addr, /*isWrite*/False,
2409 /*isUnaddr*/False );
nethercote8b76fe52004-11-08 19:20:09 +00002410 *ret = ( res==MC_Ok ? (UWord)NULL : bad_addr );
sewardj8cf88b72005-07-08 01:29:33 +00002411 break;
nethercote8b76fe52004-11-08 19:20:09 +00002412 }
2413
2414 case VG_USERREQ__DO_LEAK_CHECK:
njnb8dca862005-03-14 02:42:44 +00002415 mc_detect_memory_leaks(tid, arg[1] ? LC_Summary : LC_Full);
sewardj8cf88b72005-07-08 01:29:33 +00002416 *ret = 0; /* return value is meaningless */
2417 break;
nethercote8b76fe52004-11-08 19:20:09 +00002418
2419 case VG_USERREQ__MAKE_NOACCESS: /* make no access */
nethercote8b76fe52004-11-08 19:20:09 +00002420 mc_make_noaccess ( arg[1], arg[2] );
sewardj8cf88b72005-07-08 01:29:33 +00002421 *ret = -1;
2422 break;
nethercote8b76fe52004-11-08 19:20:09 +00002423
2424 case VG_USERREQ__MAKE_WRITABLE: /* make writable */
nethercote8b76fe52004-11-08 19:20:09 +00002425 mc_make_writable ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00002426 *ret = -1;
sewardj8cf88b72005-07-08 01:29:33 +00002427 break;
nethercote8b76fe52004-11-08 19:20:09 +00002428
2429 case VG_USERREQ__MAKE_READABLE: /* make readable */
nethercote8b76fe52004-11-08 19:20:09 +00002430 mc_make_readable ( arg[1], arg[2] );
sewardj8cf88b72005-07-08 01:29:33 +00002431 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00002432 break;
2433
sewardjedc75ab2005-03-15 23:30:32 +00002434 case VG_USERREQ__CREATE_BLOCK: /* describe a block */
sewardj8cf88b72005-07-08 01:29:33 +00002435 if (arg[1] != 0 && arg[2] != 0) {
2436 i = alloc_client_block();
2437 /* VG_(printf)("allocated %d %p\n", i, cgbs); */
2438 cgbs[i].start = arg[1];
2439 cgbs[i].size = arg[2];
2440 cgbs[i].desc = VG_(strdup)((Char *)arg[3]);
2441 cgbs[i].where = VG_(record_ExeContext) ( tid );
sewardjedc75ab2005-03-15 23:30:32 +00002442
sewardj8cf88b72005-07-08 01:29:33 +00002443 *ret = i;
2444 } else
2445 *ret = -1;
2446 break;
sewardjedc75ab2005-03-15 23:30:32 +00002447
nethercote8b76fe52004-11-08 19:20:09 +00002448 case VG_USERREQ__DISCARD: /* discard */
njn695c16e2005-03-27 03:40:28 +00002449 if (cgbs == NULL
2450 || arg[2] >= cgb_used ||
sewardj8cf88b72005-07-08 01:29:33 +00002451 (cgbs[arg[2]].start == 0 && cgbs[arg[2]].size == 0)) {
sewardjedc75ab2005-03-15 23:30:32 +00002452 *ret = 1;
sewardj8cf88b72005-07-08 01:29:33 +00002453 } else {
2454 tl_assert(arg[2] >= 0 && arg[2] < cgb_used);
2455 cgbs[arg[2]].start = cgbs[arg[2]].size = 0;
2456 VG_(free)(cgbs[arg[2]].desc);
2457 cgb_discards++;
2458 *ret = 0;
2459 }
2460 break;
nethercote8b76fe52004-11-08 19:20:09 +00002461
sewardj45d94cc2005-04-20 14:44:11 +00002462//zz case VG_USERREQ__GET_VBITS:
2463//zz /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
2464//zz error. */
2465//zz /* VG_(printf)("get_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
2466//zz *ret = mc_get_or_set_vbits_for_client
2467//zz ( tid, arg[1], arg[2], arg[3], False /* get them */ );
2468//zz break;
2469//zz
2470//zz case VG_USERREQ__SET_VBITS:
2471//zz /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
2472//zz error. */
2473//zz /* VG_(printf)("set_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
2474//zz *ret = mc_get_or_set_vbits_for_client
2475//zz ( tid, arg[1], arg[2], arg[3], True /* set them */ );
2476//zz break;
nethercote8b76fe52004-11-08 19:20:09 +00002477
2478 default:
2479 if (MAC_(handle_common_client_requests)(tid, arg, ret )) {
2480 return True;
2481 } else {
2482 VG_(message)(Vg_UserMsg,
2483 "Warning: unknown memcheck client request code %llx",
2484 (ULong)arg[0]);
2485 return False;
2486 }
2487 }
2488 return True;
2489}
njn25e49d8e72002-09-23 09:36:25 +00002490
2491/*------------------------------------------------------------*/
njn51d827b2005-05-09 01:02:08 +00002492/*--- Setup and finalisation ---*/
njn25e49d8e72002-09-23 09:36:25 +00002493/*------------------------------------------------------------*/
2494
njn51d827b2005-05-09 01:02:08 +00002495static void mc_post_clo_init ( void )
njn5c004e42002-11-18 11:04:50 +00002496{
sewardj71bc3cb2005-05-19 00:25:45 +00002497 /* If we've been asked to emit XML, mash around various other
2498 options so as to constrain the output somewhat. */
2499 if (VG_(clo_xml)) {
2500 /* Extract as much info as possible from the leak checker. */
sewardj09890d82005-05-20 02:45:15 +00002501 /* MAC_(clo_show_reachable) = True; */
sewardj71bc3cb2005-05-19 00:25:45 +00002502 MAC_(clo_leak_check) = LC_Full;
2503 }
njn5c004e42002-11-18 11:04:50 +00002504}
2505
njn51d827b2005-05-09 01:02:08 +00002506static void mc_fini ( Int exitcode )
njn5c004e42002-11-18 11:04:50 +00002507{
nethercote8b76fe52004-11-08 19:20:09 +00002508 MAC_(common_fini)( mc_detect_memory_leaks );
sewardj45d94cc2005-04-20 14:44:11 +00002509
sewardj23eb2fd2005-04-22 16:29:19 +00002510 Int i, n_accessible_dist;
2511 SecMap* sm;
2512
sewardj45d94cc2005-04-20 14:44:11 +00002513 if (VG_(clo_verbosity) > 1) {
2514 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00002515 " memcheck: sanity checks: %d cheap, %d expensive",
2516 n_sanity_cheap, n_sanity_expensive );
sewardj45d94cc2005-04-20 14:44:11 +00002517 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00002518 " memcheck: auxmaps: %d auxmap entries (%dk, %dM) in use",
2519 auxmap_used,
2520 auxmap_used * 64,
2521 auxmap_used / 16 );
2522 VG_(message)(Vg_DebugMsg,
2523 " memcheck: auxmaps: %lld searches, %lld comparisons",
sewardj45d94cc2005-04-20 14:44:11 +00002524 n_auxmap_searches, n_auxmap_cmps );
sewardj23eb2fd2005-04-22 16:29:19 +00002525 VG_(message)(Vg_DebugMsg,
2526 " memcheck: secondaries: %d issued (%dk, %dM)",
2527 n_secmaps_issued,
2528 n_secmaps_issued * 64,
2529 n_secmaps_issued / 16 );
2530
2531 n_accessible_dist = 0;
2532 for (i = 0; i < N_PRIMARY_MAP; i++) {
2533 sm = primary_map[i];
2534 if (is_distinguished_sm(sm)
2535 && sm != &sm_distinguished[SM_DIST_NOACCESS])
2536 n_accessible_dist ++;
2537 }
2538 for (i = 0; i < auxmap_used; i++) {
2539 sm = auxmap[i].sm;
2540 if (is_distinguished_sm(sm)
2541 && sm != &sm_distinguished[SM_DIST_NOACCESS])
2542 n_accessible_dist ++;
2543 }
2544
2545 VG_(message)(Vg_DebugMsg,
2546 " memcheck: secondaries: %d accessible and distinguished (%dk, %dM)",
2547 n_accessible_dist,
2548 n_accessible_dist * 64,
2549 n_accessible_dist / 16 );
2550
sewardj45d94cc2005-04-20 14:44:11 +00002551 }
2552
njn5c004e42002-11-18 11:04:50 +00002553 if (0) {
2554 VG_(message)(Vg_DebugMsg,
2555 "------ Valgrind's client block stats follow ---------------" );
nethercote8b76fe52004-11-08 19:20:09 +00002556 show_client_block_stats();
njn5c004e42002-11-18 11:04:50 +00002557 }
njn25e49d8e72002-09-23 09:36:25 +00002558}
2559
njn51d827b2005-05-09 01:02:08 +00002560static void mc_pre_clo_init(void)
2561{
2562 VG_(details_name) ("Memcheck");
2563 VG_(details_version) (NULL);
2564 VG_(details_description) ("a memory error detector");
2565 VG_(details_copyright_author)(
2566 "Copyright (C) 2002-2005, and GNU GPL'd, by Julian Seward et al.");
2567 VG_(details_bug_reports_to) (VG_BUGS_TO);
2568 VG_(details_avg_translation_sizeB) ( 370 );
2569
2570 VG_(basic_tool_funcs) (mc_post_clo_init,
2571 MC_(instrument),
2572 mc_fini);
2573
2574 VG_(needs_core_errors) ();
2575 VG_(needs_tool_errors) (MAC_(eq_Error),
2576 mc_pp_Error,
2577 MAC_(update_extra),
2578 mc_recognised_suppression,
2579 MAC_(read_extra_suppression_info),
2580 MAC_(error_matches_suppression),
2581 MAC_(get_error_name),
2582 MAC_(print_extra_suppression_info));
2583 VG_(needs_libc_freeres) ();
2584 VG_(needs_command_line_options)(mc_process_cmd_line_option,
2585 mc_print_usage,
2586 mc_print_debug_usage);
2587 VG_(needs_client_requests) (mc_handle_client_request);
2588 VG_(needs_sanity_checks) (mc_cheap_sanity_check,
2589 mc_expensive_sanity_check);
2590 VG_(needs_shadow_memory) ();
2591
njnfc51f8d2005-06-21 03:20:17 +00002592 VG_(needs_malloc_replacement) (MAC_(malloc),
njn51d827b2005-05-09 01:02:08 +00002593 MAC_(__builtin_new),
2594 MAC_(__builtin_vec_new),
2595 MAC_(memalign),
2596 MAC_(calloc),
2597 MAC_(free),
2598 MAC_(__builtin_delete),
2599 MAC_(__builtin_vec_delete),
2600 MAC_(realloc),
2601 MAC_MALLOC_REDZONE_SZB );
2602
2603 MAC_( new_mem_heap) = & mc_new_mem_heap;
2604 MAC_( ban_mem_heap) = & mc_make_noaccess;
2605 MAC_(copy_mem_heap) = & mc_copy_address_range_state;
2606 MAC_( die_mem_heap) = & mc_make_noaccess;
2607 MAC_(check_noaccess) = & mc_check_noaccess;
2608
2609 VG_(track_new_mem_startup) ( & mc_new_mem_startup );
2610 VG_(track_new_mem_stack_signal)( & mc_make_writable );
2611 VG_(track_new_mem_brk) ( & mc_make_writable );
2612 VG_(track_new_mem_mmap) ( & mc_new_mem_mmap );
2613
2614 VG_(track_copy_mem_remap) ( & mc_copy_address_range_state );
2615
2616 VG_(track_die_mem_stack_signal)( & mc_make_noaccess );
2617 VG_(track_die_mem_brk) ( & mc_make_noaccess );
2618 VG_(track_die_mem_munmap) ( & mc_make_noaccess );
2619
2620 VG_(track_new_mem_stack_4) ( & MAC_(new_mem_stack_4) );
2621 VG_(track_new_mem_stack_8) ( & MAC_(new_mem_stack_8) );
2622 VG_(track_new_mem_stack_12) ( & MAC_(new_mem_stack_12) );
2623 VG_(track_new_mem_stack_16) ( & MAC_(new_mem_stack_16) );
2624 VG_(track_new_mem_stack_32) ( & MAC_(new_mem_stack_32) );
2625 VG_(track_new_mem_stack) ( & MAC_(new_mem_stack) );
2626
2627 VG_(track_die_mem_stack_4) ( & MAC_(die_mem_stack_4) );
2628 VG_(track_die_mem_stack_8) ( & MAC_(die_mem_stack_8) );
2629 VG_(track_die_mem_stack_12) ( & MAC_(die_mem_stack_12) );
2630 VG_(track_die_mem_stack_16) ( & MAC_(die_mem_stack_16) );
2631 VG_(track_die_mem_stack_32) ( & MAC_(die_mem_stack_32) );
2632 VG_(track_die_mem_stack) ( & MAC_(die_mem_stack) );
2633
2634 VG_(track_ban_mem_stack) ( & mc_make_noaccess );
2635
2636 VG_(track_pre_mem_read) ( & mc_check_is_readable );
2637 VG_(track_pre_mem_read_asciiz) ( & mc_check_is_readable_asciiz );
2638 VG_(track_pre_mem_write) ( & mc_check_is_writable );
2639 VG_(track_post_mem_write) ( & mc_post_mem_write );
2640
2641 VG_(track_pre_reg_read) ( & mc_pre_reg_read );
2642
2643 VG_(track_post_reg_write) ( & mc_post_reg_write );
2644 VG_(track_post_reg_write_clientcall_return)( & mc_post_reg_write_clientcall );
2645
2646 VG_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
2647 VG_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
2648 VG_(register_profile_event) ( VgpESPAdj, "adjust-ESP" );
2649
2650 /* Additional block description for VG_(describe_addr)() */
2651 MAC_(describe_addr_supp) = client_perm_maybe_describe;
2652
2653 init_shadow_memory();
2654 MAC_(common_pre_clo_init)();
2655
2656 tl_assert( mc_expensive_sanity_check() );
2657}
2658
sewardj45f4e7c2005-09-27 19:20:21 +00002659VG_DETERMINE_INTERFACE_VERSION(mc_pre_clo_init)
fitzhardinge98abfc72003-12-16 02:05:15 +00002660
njn25e49d8e72002-09-23 09:36:25 +00002661/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00002662/*--- end mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00002663/*--------------------------------------------------------------------*/