blob: 258060f78b4c9ba8724f1d3aa55b0c83e3aa9bb1 [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
nethercote137bc552003-11-14 17:47:54 +00009 This file is part of MemCheck, a heavyweight Valgrind tool for
njnc9539842002-10-02 13:26:35 +000010 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
njn53612422005-03-12 16:22:54 +000012 Copyright (C) 2000-2005 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
sewardjc859fbf2005-04-22 21:10:28 +000033/* TODO 22 Apr 05
sewardj45d94cc2005-04-20 14:44:11 +000034
sewardjc859fbf2005-04-22 21:10:28 +000035 test whether it would be faster, for LOADV4, to check
36 only for 8-byte validity on the fast path
sewardj45d94cc2005-04-20 14:44:11 +000037*/
38
njnc7561b92005-06-19 01:24:32 +000039#include "pub_tool_basics.h"
njn4802b382005-06-11 04:58:29 +000040#include "pub_tool_aspacemgr.h"
njnc7561b92005-06-19 01:24:32 +000041#include "pub_tool_errormgr.h" // For mac_shared.h
42#include "pub_tool_execontext.h" // For mac_shared.h
43#include "pub_tool_hashtable.h" // For mac_shared.h
njn97405b22005-06-02 03:39:33 +000044#include "pub_tool_libcbase.h"
njn132bfcc2005-06-04 19:16:06 +000045#include "pub_tool_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000046#include "pub_tool_libcprint.h"
njnf536bbb2005-06-13 04:21:38 +000047#include "pub_tool_machine.h"
njnc7561b92005-06-19 01:24:32 +000048#include "pub_tool_mallocfree.h"
49#include "pub_tool_options.h"
50#include "pub_tool_profile.h" // For mac_shared.h
51#include "pub_tool_replacemalloc.h"
52#include "pub_tool_tooliface.h"
53#include "pub_tool_threadstate.h"
54
55#include "mc_include.h"
56#include "memcheck.h" /* for client requests */
njn25e49d8e72002-09-23 09:36:25 +000057
sewardj45d94cc2005-04-20 14:44:11 +000058
sewardjc1a2cda2005-04-21 17:34:00 +000059#define EXPECTED_TAKEN(cond) __builtin_expect((cond),1)
60#define EXPECTED_NOT_TAKEN(cond) __builtin_expect((cond),0)
61
62/* Define to debug the mem audit system. Set to:
63 0 no debugging, fast cases are used
64 1 some sanity checking, fast cases are used
65 2 max sanity checking, only slow cases are used
66*/
sewardj23eb2fd2005-04-22 16:29:19 +000067#define VG_DEBUG_MEMORY 0
sewardjc1a2cda2005-04-21 17:34:00 +000068
njn25e49d8e72002-09-23 09:36:25 +000069#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
70
njn25e49d8e72002-09-23 09:36:25 +000071
njn25e49d8e72002-09-23 09:36:25 +000072/*------------------------------------------------------------*/
sewardj45d94cc2005-04-20 14:44:11 +000073/*--- Basic A/V bitmap representation. ---*/
njn25e49d8e72002-09-23 09:36:25 +000074/*------------------------------------------------------------*/
75
sewardjc859fbf2005-04-22 21:10:28 +000076/* TODO: fix this comment */
77//zz /* All reads and writes are checked against a memory map, which
78//zz records the state of all memory in the process. The memory map is
79//zz organised like this:
80//zz
81//zz The top 16 bits of an address are used to index into a top-level
82//zz map table, containing 65536 entries. Each entry is a pointer to a
83//zz second-level map, which records the accesibililty and validity
84//zz permissions for the 65536 bytes indexed by the lower 16 bits of the
85//zz address. Each byte is represented by nine bits, one indicating
86//zz accessibility, the other eight validity. So each second-level map
87//zz contains 73728 bytes. This two-level arrangement conveniently
88//zz divides the 4G address space into 64k lumps, each size 64k bytes.
89//zz
90//zz All entries in the primary (top-level) map must point to a valid
91//zz secondary (second-level) map. Since most of the 4G of address
92//zz space will not be in use -- ie, not mapped at all -- there is a
njn02bc4b82005-05-15 17:28:26 +000093//zz distinguished secondary map, which indicates 'not addressible and
sewardjc859fbf2005-04-22 21:10:28 +000094//zz not valid' writeable for all bytes. Entries in the primary map for
95//zz which the entire 64k is not in use at all point at this
96//zz distinguished map.
97//zz
98//zz There are actually 4 distinguished secondaries. These are used to
99//zz represent a memory range which is either not addressable (validity
100//zz doesn't matter), addressable+not valid, addressable+valid.
101//zz
102//zz [...] lots of stuff deleted due to out of date-ness
103//zz
104//zz As a final optimisation, the alignment and address checks for
105//zz 4-byte loads and stores are combined in a neat way. The primary
106//zz map is extended to have 262144 entries (2^18), rather than 2^16.
107//zz The top 3/4 of these entries are permanently set to the
108//zz distinguished secondary map. For a 4-byte load/store, the
109//zz top-level map is indexed not with (addr >> 16) but instead f(addr),
110//zz where
111//zz
112//zz f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
113//zz = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
114//zz = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
115//zz
116//zz ie the lowest two bits are placed above the 16 high address bits.
117//zz If either of these two bits are nonzero, the address is misaligned;
118//zz this will select a secondary map from the upper 3/4 of the primary
119//zz map. Because this is always the distinguished secondary map, a
120//zz (bogus) address check failure will result. The failure handling
121//zz code can then figure out whether this is a genuine addr check
122//zz failure or whether it is a possibly-legitimate access at a
123//zz misaligned address.
124//zz */
125
sewardj45d94cc2005-04-20 14:44:11 +0000126/* --------------- Basic configuration --------------- */
sewardj95448072004-11-22 20:19:51 +0000127
sewardj23eb2fd2005-04-22 16:29:19 +0000128/* Only change this. N_PRIMARY_MAP *must* be a power of 2. */
sewardj21f7ff42005-04-28 10:32:02 +0000129
sewardje4ccc012005-05-02 12:53:38 +0000130#if VG_WORDSIZE == 4
sewardj21f7ff42005-04-28 10:32:02 +0000131
132/* cover the entire address space */
133# define N_PRIMARY_BITS 16
134
135#else
136
sewardj34483bc2005-09-28 11:50:20 +0000137/* Just handle the first 32G fast and the rest via auxiliary
sewardj21f7ff42005-04-28 10:32:02 +0000138 primaries. */
sewardj34483bc2005-09-28 11:50:20 +0000139# define N_PRIMARY_BITS 19
sewardj21f7ff42005-04-28 10:32:02 +0000140
141#endif
142
sewardj45d94cc2005-04-20 14:44:11 +0000143
sewardjc1a2cda2005-04-21 17:34:00 +0000144/* Do not change this. */
sewardje4ccc012005-05-02 12:53:38 +0000145#define N_PRIMARY_MAP ( ((UWord)1) << N_PRIMARY_BITS)
sewardjc1a2cda2005-04-21 17:34:00 +0000146
147/* Do not change this. */
sewardj23eb2fd2005-04-22 16:29:19 +0000148#define MAX_PRIMARY_ADDRESS (Addr)((((Addr)65536) * N_PRIMARY_MAP)-1)
149
150
151/* --------------- Stats maps --------------- */
152
153static Int n_secmaps_issued = 0;
154static ULong n_auxmap_searches = 0;
155static ULong n_auxmap_cmps = 0;
156static Int n_sanity_cheap = 0;
157static Int n_sanity_expensive = 0;
sewardj45d94cc2005-04-20 14:44:11 +0000158
159
160/* --------------- Secondary maps --------------- */
njn25e49d8e72002-09-23 09:36:25 +0000161
162typedef
163 struct {
sewardj45d94cc2005-04-20 14:44:11 +0000164 UChar abits[8192];
165 UChar vbyte[65536];
njn25e49d8e72002-09-23 09:36:25 +0000166 }
167 SecMap;
168
sewardj45d94cc2005-04-20 14:44:11 +0000169/* 3 distinguished secondary maps, one for no-access, one for
170 accessible but undefined, and one for accessible and defined.
171 Distinguished secondaries may never be modified.
172*/
173#define SM_DIST_NOACCESS 0
174#define SM_DIST_ACCESS_UNDEFINED 1
175#define SM_DIST_ACCESS_DEFINED 2
njnb8dca862005-03-14 02:42:44 +0000176
sewardj45d94cc2005-04-20 14:44:11 +0000177static SecMap sm_distinguished[3];
njnb8dca862005-03-14 02:42:44 +0000178
sewardj45d94cc2005-04-20 14:44:11 +0000179static inline Bool is_distinguished_sm ( SecMap* sm ) {
180 return sm >= &sm_distinguished[0] && sm <= &sm_distinguished[2];
181}
njnb8dca862005-03-14 02:42:44 +0000182
sewardj45d94cc2005-04-20 14:44:11 +0000183/* dist_sm points to one of our three distinguished secondaries. Make
184 a copy of it so that we can write to it.
185*/
186static SecMap* copy_for_writing ( SecMap* dist_sm )
187{
188 SecMap* new_sm;
189 tl_assert(dist_sm == &sm_distinguished[0]
190 || dist_sm == &sm_distinguished[1]
191 || dist_sm == &sm_distinguished[2]);
njnb8dca862005-03-14 02:42:44 +0000192
sewardj45f4e7c2005-09-27 19:20:21 +0000193 new_sm = VG_(am_shadow_alloc)(sizeof(SecMap));
194 if (new_sm == NULL)
195 VG_(out_of_memory_NORETURN)( "memcheck:allocate new SecMap",
196 sizeof(SecMap) );
sewardj45d94cc2005-04-20 14:44:11 +0000197 VG_(memcpy)(new_sm, dist_sm, sizeof(SecMap));
sewardj23eb2fd2005-04-22 16:29:19 +0000198 n_secmaps_issued++;
sewardj45d94cc2005-04-20 14:44:11 +0000199 return new_sm;
200}
njnb8dca862005-03-14 02:42:44 +0000201
sewardj45d94cc2005-04-20 14:44:11 +0000202
203/* --------------- Primary maps --------------- */
204
205/* The main primary map. This covers some initial part of the address
sewardj23eb2fd2005-04-22 16:29:19 +0000206 space, addresses 0 .. (N_PRIMARY_MAP << 16)-1. The rest of it is
sewardj45d94cc2005-04-20 14:44:11 +0000207 handled using the auxiliary primary map.
208*/
sewardj23eb2fd2005-04-22 16:29:19 +0000209static SecMap* primary_map[N_PRIMARY_MAP];
sewardj45d94cc2005-04-20 14:44:11 +0000210
211
212/* An entry in the auxiliary primary map. base must be a 64k-aligned
213 value, and sm points at the relevant secondary map. As with the
214 main primary map, the secondary may be either a real secondary, or
215 one of the three distinguished secondaries.
216*/
217typedef
218 struct {
sewardj23eb2fd2005-04-22 16:29:19 +0000219 Addr base;
sewardj45d94cc2005-04-20 14:44:11 +0000220 SecMap* sm;
221 }
222 AuxMapEnt;
223
224/* An expanding array of AuxMapEnts. */
sewardjaba741d2005-06-09 13:56:07 +0000225#define N_AUXMAPS 20000 /* HACK */
sewardj45d94cc2005-04-20 14:44:11 +0000226static AuxMapEnt hacky_auxmaps[N_AUXMAPS];
227static Int auxmap_size = N_AUXMAPS;
228static Int auxmap_used = 0;
229static AuxMapEnt* auxmap = &hacky_auxmaps[0];
230
sewardj45d94cc2005-04-20 14:44:11 +0000231
232/* Find an entry in the auxiliary map. If an entry is found, move it
233 one step closer to the front of the array, then return its address.
sewardj05fe85e2005-04-27 22:46:36 +0000234 If an entry is not found, return NULL. Note carefully that
sewardj45d94cc2005-04-20 14:44:11 +0000235 because a each call potentially rearranges the entries, each call
236 to this function invalidates ALL AuxMapEnt*s previously obtained by
237 calling this fn.
238*/
sewardj05fe85e2005-04-27 22:46:36 +0000239static AuxMapEnt* maybe_find_in_auxmap ( Addr a )
sewardj45d94cc2005-04-20 14:44:11 +0000240{
241 UWord i;
242 tl_assert(a > MAX_PRIMARY_ADDRESS);
243
244 a &= ~(Addr)0xFFFF;
245
246 /* Search .. */
247 n_auxmap_searches++;
248 for (i = 0; i < auxmap_used; i++) {
249 if (auxmap[i].base == a)
250 break;
251 }
252 n_auxmap_cmps += (ULong)(i+1);
253
254 if (i < auxmap_used) {
255 /* Found it. Nudge it a bit closer to the front. */
256 if (i > 0) {
257 AuxMapEnt tmp = auxmap[i-1];
258 auxmap[i-1] = auxmap[i];
259 auxmap[i] = tmp;
260 i--;
261 }
262 return &auxmap[i];
263 }
264
sewardj05fe85e2005-04-27 22:46:36 +0000265 return NULL;
266}
267
268
269/* Find an entry in the auxiliary map. If an entry is found, move it
270 one step closer to the front of the array, then return its address.
271 If an entry is not found, allocate one. Note carefully that
272 because a each call potentially rearranges the entries, each call
273 to this function invalidates ALL AuxMapEnt*s previously obtained by
274 calling this fn.
275*/
276static AuxMapEnt* find_or_alloc_in_auxmap ( Addr a )
277{
278 AuxMapEnt* am = maybe_find_in_auxmap(a);
279 if (am)
280 return am;
281
sewardj45d94cc2005-04-20 14:44:11 +0000282 /* We didn't find it. Hmm. This is a new piece of address space.
283 We'll need to allocate a new AuxMap entry for it. */
284 if (auxmap_used >= auxmap_size) {
285 tl_assert(auxmap_used == auxmap_size);
286 /* Out of auxmap entries. */
287 tl_assert2(0, "failed to expand the auxmap table");
288 }
289
290 tl_assert(auxmap_used < auxmap_size);
291
292 auxmap[auxmap_used].base = a & ~(Addr)0xFFFF;
293 auxmap[auxmap_used].sm = &sm_distinguished[SM_DIST_NOACCESS];
294
295 if (0)
296 VG_(printf)("new auxmap, base = 0x%llx\n",
297 (ULong)auxmap[auxmap_used].base );
298
299 auxmap_used++;
300 return &auxmap[auxmap_used-1];
301}
302
303
304/* --------------- SecMap fundamentals --------------- */
305
306/* Produce the secmap for 'a', either from the primary map or by
307 ensuring there is an entry for it in the aux primary map. The
308 secmap may be a distinguished one as the caller will only want to
309 be able to read it.
310*/
311static SecMap* get_secmap_readable ( Addr a )
312{
313 if (a <= MAX_PRIMARY_ADDRESS) {
314 UWord pm_off = a >> 16;
315 return primary_map[ pm_off ];
316 } else {
317 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
318 return am->sm;
319 }
320}
321
sewardj05fe85e2005-04-27 22:46:36 +0000322/* If 'a' has a SecMap, produce it. Else produce NULL. But don't
323 allocate one if one doesn't already exist. This is used by the
324 leak checker.
325*/
326static SecMap* maybe_get_secmap_for ( Addr a )
327{
328 if (a <= MAX_PRIMARY_ADDRESS) {
329 UWord pm_off = a >> 16;
330 return primary_map[ pm_off ];
331 } else {
332 AuxMapEnt* am = maybe_find_in_auxmap(a);
333 return am ? am->sm : NULL;
334 }
335}
336
337
338
sewardj45d94cc2005-04-20 14:44:11 +0000339/* Produce the secmap for 'a', either from the primary map or by
340 ensuring there is an entry for it in the aux primary map. The
341 secmap may not be a distinguished one, since the caller will want
342 to be able to write it. If it is a distinguished secondary, make a
343 writable copy of it, install it, and return the copy instead. (COW
344 semantics).
345*/
346static SecMap* get_secmap_writable ( Addr a )
347{
348 if (a <= MAX_PRIMARY_ADDRESS) {
349 UWord pm_off = a >> 16;
350 if (is_distinguished_sm(primary_map[ pm_off ]))
351 primary_map[pm_off] = copy_for_writing(primary_map[pm_off]);
352 return primary_map[pm_off];
353 } else {
354 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
355 if (is_distinguished_sm(am->sm))
356 am->sm = copy_for_writing(am->sm);
357 return am->sm;
358 }
359}
360
361
362/* --------------- Endianness helpers --------------- */
363
364/* Returns the offset in memory of the byteno-th most significant byte
365 in a wordszB-sized word, given the specified endianness. */
366static inline UWord byte_offset_w ( UWord wordszB, Bool bigendian,
367 UWord byteno ) {
368 return bigendian ? (wordszB-1-byteno) : byteno;
369}
370
371
372/* --------------- Fundamental functions --------------- */
373
374static
375void get_abit_and_vbyte ( /*OUT*/UWord* abit,
376 /*OUT*/UWord* vbyte,
377 Addr a )
378{
379 SecMap* sm = get_secmap_readable(a);
380 *vbyte = 0xFF & sm->vbyte[a & 0xFFFF];
381 *abit = read_bit_array(sm->abits, a & 0xFFFF);
382}
383
384static
385UWord get_abit ( Addr a )
386{
387 SecMap* sm = get_secmap_readable(a);
388 return read_bit_array(sm->abits, a & 0xFFFF);
389}
390
391static
392void set_abit_and_vbyte ( Addr a, UWord abit, UWord vbyte )
393{
394 SecMap* sm = get_secmap_writable(a);
395 sm->vbyte[a & 0xFFFF] = 0xFF & vbyte;
396 write_bit_array(sm->abits, a & 0xFFFF, abit);
397}
398
399static
400void set_vbyte ( Addr a, UWord vbyte )
401{
402 SecMap* sm = get_secmap_writable(a);
403 sm->vbyte[a & 0xFFFF] = 0xFF & vbyte;
404}
405
406
407/* --------------- Load/store slow cases. --------------- */
408
409static
410ULong mc_LOADVn_slow ( Addr a, SizeT szB, Bool bigendian )
411{
412 /* Make up a result V word, which contains the loaded data for
sewardjf3d57dd2005-04-22 20:23:27 +0000413 valid addresses and Defined for invalid addresses. Iterate over
414 the bytes in the word, from the most significant down to the
415 least. */
sewardj45d94cc2005-04-20 14:44:11 +0000416 ULong vw = VGM_WORD64_INVALID;
417 SizeT i = szB-1;
418 SizeT n_addrs_bad = 0;
419 Addr ai;
sewardj0ded7a42005-11-08 02:25:37 +0000420 Bool aok, partial_load_exemption_applies;
sewardj45d94cc2005-04-20 14:44:11 +0000421 UWord abit, vbyte;
422
sewardjc1a2cda2005-04-21 17:34:00 +0000423 PROF_EVENT(30, "mc_LOADVn_slow");
sewardj45d94cc2005-04-20 14:44:11 +0000424 tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
425
426 while (True) {
sewardjc1a2cda2005-04-21 17:34:00 +0000427 PROF_EVENT(31, "mc_LOADVn_slow(loop)");
sewardj45d94cc2005-04-20 14:44:11 +0000428 ai = a+byte_offset_w(szB,bigendian,i);
429 get_abit_and_vbyte(&abit, &vbyte, ai);
430 aok = abit == VGM_BIT_VALID;
431 if (!aok)
432 n_addrs_bad++;
433 vw <<= 8;
sewardjf3d57dd2005-04-22 20:23:27 +0000434 vw |= 0xFF & (aok ? vbyte : VGM_BYTE_VALID);
sewardj45d94cc2005-04-20 14:44:11 +0000435 if (i == 0) break;
436 i--;
437 }
438
sewardj0ded7a42005-11-08 02:25:37 +0000439 /* This is a hack which avoids producing errors for code which
440 insists in stepping along byte strings in aligned word-sized
441 chunks, and there is a partially defined word at the end. (eg,
442 optimised strlen). Such code is basically broken at least WRT
443 semantics of ANSI C, but sometimes users don't have the option
444 to fix it, and so this option is provided. Note it is now
445 defaulted to not-engaged.
446
447 A load from a partially-addressible place is allowed if:
448 - the command-line flag is set
449 - it's a word-sized, word-aligned load
450 - at least one of the addresses in the word *is* valid
451 */
452 partial_load_exemption_applies
453 = MAC_(clo_partial_loads_ok) && szB == VG_WORDSIZE
454 && VG_IS_WORD_ALIGNED(a)
455 && n_addrs_bad < VG_WORDSIZE;
456
457 if (n_addrs_bad > 0 && !partial_load_exemption_applies)
sewardj45d94cc2005-04-20 14:44:11 +0000458 MAC_(record_address_error)( VG_(get_running_tid)(), a, szB, False );
459
sewardj45d94cc2005-04-20 14:44:11 +0000460 return vw;
461}
462
463
464static
465void mc_STOREVn_slow ( Addr a, SizeT szB, UWord vbytes, Bool bigendian )
466{
467 SizeT i;
468 SizeT n_addrs_bad = 0;
469 UWord abit;
470 Bool aok;
471 Addr ai;
472
sewardjc1a2cda2005-04-21 17:34:00 +0000473 PROF_EVENT(35, "mc_STOREVn_slow");
sewardj45d94cc2005-04-20 14:44:11 +0000474 tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
475
476 /* Dump vbytes in memory, iterating from least to most significant
477 byte. At the same time establish addressibility of the
478 location. */
479 for (i = 0; i < szB; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +0000480 PROF_EVENT(36, "mc_STOREVn_slow(loop)");
sewardj45d94cc2005-04-20 14:44:11 +0000481 ai = a+byte_offset_w(szB,bigendian,i);
482 abit = get_abit(ai);
483 aok = abit == VGM_BIT_VALID;
484 if (!aok)
485 n_addrs_bad++;
486 set_vbyte(ai, vbytes & 0xFF );
487 vbytes >>= 8;
488 }
489
490 /* If an address error has happened, report it. */
491 if (n_addrs_bad > 0)
492 MAC_(record_address_error)( VG_(get_running_tid)(), a, szB, True );
493}
494
495
sewardj45d94cc2005-04-20 14:44:11 +0000496//zz /* Reading/writing of the bitmaps, for aligned word-sized accesses. */
497//zz
498//zz static __inline__ UChar get_abits4_ALIGNED ( Addr a )
499//zz {
500//zz SecMap* sm;
501//zz UInt sm_off;
502//zz UChar abits8;
503//zz PROF_EVENT(24);
504//zz # ifdef VG_DEBUG_MEMORY
505//zz tl_assert(VG_IS_4_ALIGNED(a));
506//zz # endif
507//zz sm = primary_map[PM_IDX(a)];
508//zz sm_off = SM_OFF(a);
509//zz abits8 = sm->abits[sm_off >> 3];
510//zz abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
511//zz abits8 &= 0x0F;
512//zz return abits8;
513//zz }
514//zz
515//zz static UInt __inline__ get_vbytes4_ALIGNED ( Addr a )
516//zz {
517//zz SecMap* sm = primary_map[PM_IDX(a)];
518//zz UInt sm_off = SM_OFF(a);
519//zz PROF_EVENT(25);
520//zz # ifdef VG_DEBUG_MEMORY
521//zz tl_assert(VG_IS_4_ALIGNED(a));
522//zz # endif
523//zz return ((UInt*)(sm->vbyte))[sm_off >> 2];
524//zz }
525//zz
526//zz
527//zz static void __inline__ set_vbytes4_ALIGNED ( Addr a, UInt vbytes )
528//zz {
529//zz SecMap* sm;
530//zz UInt sm_off;
531//zz ENSURE_MAPPABLE(a, "set_vbytes4_ALIGNED");
532//zz sm = primary_map[PM_IDX(a)];
533//zz sm_off = SM_OFF(a);
534//zz PROF_EVENT(23);
535//zz # ifdef VG_DEBUG_MEMORY
536//zz tl_assert(VG_IS_4_ALIGNED(a));
537//zz # endif
538//zz ((UInt*)(sm->vbyte))[sm_off >> 2] = vbytes;
539//zz }
sewardjee070842003-07-05 17:53:55 +0000540
541
njn25e49d8e72002-09-23 09:36:25 +0000542/*------------------------------------------------------------*/
543/*--- Setting permissions over address ranges. ---*/
544/*------------------------------------------------------------*/
545
sewardj23eb2fd2005-04-22 16:29:19 +0000546/* Given address 'a', find the place where the pointer to a's
547 secondary map lives. If a falls into the primary map, the returned
548 value points to one of the entries in primary_map[]. Otherwise,
549 the auxiliary primary map is searched for 'a', or an entry is
550 created for it; either way, the returned value points to the
551 relevant AuxMapEnt's .sm field.
552
553 The point of this is to enable set_address_range_perms to assign
554 secondary maps in a uniform way, without worrying about whether a
555 given secondary map is pointed to from the main or auxiliary
556 primary map.
557*/
558
559static SecMap** find_secmap_binder_for_addr ( Addr aA )
560{
561 if (aA > MAX_PRIMARY_ADDRESS) {
562 AuxMapEnt* am = find_or_alloc_in_auxmap(aA);
563 return &am->sm;
564 } else {
565 UWord a = (UWord)aA;
566 UWord sec_no = (UWord)(a >> 16);
567# if VG_DEBUG_MEMORY >= 1
568 tl_assert(sec_no < N_PRIMARY_MAP);
569# endif
570 return &primary_map[sec_no];
571 }
572}
573
574
575static void set_address_range_perms ( Addr aA, SizeT len,
sewardj45d94cc2005-04-20 14:44:11 +0000576 UWord example_a_bit,
577 UWord example_v_bit )
njn25e49d8e72002-09-23 09:36:25 +0000578{
sewardjae986ca2005-10-12 12:53:20 +0000579 UWord a, vbits8, abits8, vbits32, v_off, a_off;
580 SecMap* sm;
581 SecMap** binder;
582 SecMap* example_dsm;
583
sewardj23eb2fd2005-04-22 16:29:19 +0000584 PROF_EVENT(150, "set_address_range_perms");
585
586 /* Check the permissions make sense. */
587 tl_assert(example_a_bit == VGM_BIT_VALID
588 || example_a_bit == VGM_BIT_INVALID);
589 tl_assert(example_v_bit == VGM_BIT_VALID
590 || example_v_bit == VGM_BIT_INVALID);
591 if (example_a_bit == VGM_BIT_INVALID)
592 tl_assert(example_v_bit == VGM_BIT_INVALID);
593
594 if (len == 0)
595 return;
596
sewardj1fa7d2c2005-06-13 18:22:17 +0000597 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
sewardj23eb2fd2005-04-22 16:29:19 +0000598 if (len > 100 * 1000 * 1000) {
599 VG_(message)(Vg_UserMsg,
600 "Warning: set address range perms: "
sewardj9273eb92005-09-28 20:00:30 +0000601 "large range %lu, a %d, v %d",
sewardj23eb2fd2005-04-22 16:29:19 +0000602 len, example_a_bit, example_v_bit );
603 }
604 }
605
sewardjae986ca2005-10-12 12:53:20 +0000606 a = (UWord)aA;
sewardj23eb2fd2005-04-22 16:29:19 +0000607
608# if VG_DEBUG_MEMORY >= 2
609
610 /*------------------ debug-only case ------------------ */
sewardjae986ca2005-10-12 12:53:20 +0000611 { SizeT i;
njn25e49d8e72002-09-23 09:36:25 +0000612
sewardjae986ca2005-10-12 12:53:20 +0000613 UWord example_vbyte = BIT_TO_BYTE(example_v_bit);
sewardj45d94cc2005-04-20 14:44:11 +0000614
sewardjae986ca2005-10-12 12:53:20 +0000615 tl_assert(sizeof(SizeT) == sizeof(Addr));
sewardj45d94cc2005-04-20 14:44:11 +0000616
sewardjae986ca2005-10-12 12:53:20 +0000617 if (0 && len >= 4096)
618 VG_(printf)("s_a_r_p(0x%llx, %d, %d,%d)\n",
619 (ULong)a, len, example_a_bit, example_v_bit);
njn25e49d8e72002-09-23 09:36:25 +0000620
sewardjae986ca2005-10-12 12:53:20 +0000621 if (len == 0)
622 return;
njn25e49d8e72002-09-23 09:36:25 +0000623
sewardjae986ca2005-10-12 12:53:20 +0000624 for (i = 0; i < len; i++) {
625 set_abit_and_vbyte(a+i, example_a_bit, example_vbyte);
626 }
njn25e49d8e72002-09-23 09:36:25 +0000627 }
njn25e49d8e72002-09-23 09:36:25 +0000628
sewardj23eb2fd2005-04-22 16:29:19 +0000629# else
630
631 /*------------------ standard handling ------------------ */
sewardj23eb2fd2005-04-22 16:29:19 +0000632
633 /* Decide on the distinguished secondary that we might want
634 to use (part of the space-compression scheme). */
635 if (example_a_bit == VGM_BIT_INVALID) {
636 example_dsm = &sm_distinguished[SM_DIST_NOACCESS];
637 } else {
638 if (example_v_bit == VGM_BIT_VALID) {
639 example_dsm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
640 } else {
641 example_dsm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
642 }
643 }
644
645 /* Make various wider versions of the A/V values to use. */
646 vbits8 = BIT_TO_BYTE(example_v_bit);
647 abits8 = BIT_TO_BYTE(example_a_bit);
648 vbits32 = (vbits8 << 24) | (vbits8 << 16) | (vbits8 << 8) | vbits8;
649
650 /* Slowly do parts preceding 8-byte alignment. */
651 while (True) {
652 if (len == 0) break;
653 PROF_EVENT(151, "set_address_range_perms-loop1-pre");
654 if (VG_IS_8_ALIGNED(a)) break;
655 set_abit_and_vbyte( a, example_a_bit, vbits8 );
656 a++;
657 len--;
658 }
659
660 if (len == 0)
661 return;
662
663 tl_assert(VG_IS_8_ALIGNED(a) && len > 0);
664
665 /* Now go in steps of 8 bytes. */
666 binder = find_secmap_binder_for_addr(a);
667
668 while (True) {
669
670 if (len < 8) break;
671
672 PROF_EVENT(152, "set_address_range_perms-loop8");
673
674 if ((a & SECONDARY_MASK) == 0) {
675 /* we just traversed a primary map boundary, so update the
676 binder. */
677 binder = find_secmap_binder_for_addr(a);
678 PROF_EVENT(153, "set_address_range_perms-update-binder");
679
680 /* Space-optimisation. If we are setting the entire
681 secondary map, just point this entry at one of our
682 distinguished secondaries. However, only do that if it
683 already points at a distinguished secondary, since doing
684 otherwise would leak the existing secondary. We could do
685 better and free up any pre-existing non-distinguished
686 secondary at this point, since we are guaranteed that each
687 non-dist secondary only has one pointer to it, and we have
688 that pointer right here. */
689 if (len >= SECONDARY_SIZE && is_distinguished_sm(*binder)) {
690 PROF_EVENT(154, "set_address_range_perms-entire-secmap");
691 *binder = example_dsm;
692 len -= SECONDARY_SIZE;
693 a += SECONDARY_SIZE;
694 continue;
695 }
696 }
697
698 /* If the primary is already pointing to a distinguished map
699 with the same properties as we're trying to set, then leave
700 it that way. */
701 if (*binder == example_dsm) {
702 a += 8;
703 len -= 8;
704 continue;
705 }
706
707 /* Make sure it's OK to write the secondary. */
708 if (is_distinguished_sm(*binder))
709 *binder = copy_for_writing(*binder);
710
711 sm = *binder;
712 v_off = a & 0xFFFF;
713 a_off = v_off >> 3;
714 sm->abits[a_off] = (UChar)abits8;
715 ((UInt*)(sm->vbyte))[(v_off >> 2) + 0] = (UInt)vbits32;
716 ((UInt*)(sm->vbyte))[(v_off >> 2) + 1] = (UInt)vbits32;
717
718 a += 8;
719 len -= 8;
720 }
721
722 if (len == 0)
723 return;
724
725 tl_assert(VG_IS_8_ALIGNED(a) && len > 0 && len < 8);
726
727 /* Finish the upper fragment. */
728 while (True) {
729 if (len == 0) break;
730 PROF_EVENT(155, "set_address_range_perms-loop1-post");
731 set_abit_and_vbyte ( a, example_a_bit, vbits8 );
732 a++;
733 len--;
734 }
735
736# endif
737}
sewardj45d94cc2005-04-20 14:44:11 +0000738
sewardjc859fbf2005-04-22 21:10:28 +0000739
740/* --- Set permissions for arbitrary address ranges --- */
njn25e49d8e72002-09-23 09:36:25 +0000741
nethercote8b76fe52004-11-08 19:20:09 +0000742static void mc_make_noaccess ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000743{
sewardjc1a2cda2005-04-21 17:34:00 +0000744 PROF_EVENT(40, "mc_make_noaccess");
nethercote8b76fe52004-11-08 19:20:09 +0000745 DEBUG("mc_make_noaccess(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000746 set_address_range_perms ( a, len, VGM_BIT_INVALID, VGM_BIT_INVALID );
747}
748
nethercote8b76fe52004-11-08 19:20:09 +0000749static void mc_make_writable ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000750{
sewardjc1a2cda2005-04-21 17:34:00 +0000751 PROF_EVENT(41, "mc_make_writable");
nethercote8b76fe52004-11-08 19:20:09 +0000752 DEBUG("mc_make_writable(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000753 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_INVALID );
754}
755
nethercote8b76fe52004-11-08 19:20:09 +0000756static void mc_make_readable ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000757{
sewardjc1a2cda2005-04-21 17:34:00 +0000758 PROF_EVENT(42, "mc_make_readable");
nethercote8b76fe52004-11-08 19:20:09 +0000759 DEBUG("mc_make_readable(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000760 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_VALID );
761}
762
njn9b007f62003-04-07 14:40:25 +0000763
sewardj45f4e7c2005-09-27 19:20:21 +0000764/* --- Block-copy permissions (needed for implementing realloc() and
765 sys_mremap). --- */
sewardjc859fbf2005-04-22 21:10:28 +0000766
767static void mc_copy_address_range_state ( Addr src, Addr dst, SizeT len )
768{
sewardj45f4e7c2005-09-27 19:20:21 +0000769 SizeT i, j;
sewardjc859fbf2005-04-22 21:10:28 +0000770 UWord abit, vbyte;
771
772 DEBUG("mc_copy_address_range_state\n");
sewardjc859fbf2005-04-22 21:10:28 +0000773 PROF_EVENT(50, "mc_copy_address_range_state");
sewardj45f4e7c2005-09-27 19:20:21 +0000774
775 if (len == 0)
776 return;
777
778 if (src < dst) {
779 for (i = 0, j = len-1; i < len; i++, j--) {
780 PROF_EVENT(51, "mc_copy_address_range_state(loop)");
781 get_abit_and_vbyte( &abit, &vbyte, src+j );
782 set_abit_and_vbyte( dst+j, abit, vbyte );
783 }
784 }
785
786 if (src > dst) {
787 for (i = 0; i < len; i++) {
788 PROF_EVENT(51, "mc_copy_address_range_state(loop)");
789 get_abit_and_vbyte( &abit, &vbyte, src+i );
790 set_abit_and_vbyte( dst+i, abit, vbyte );
791 }
sewardjc859fbf2005-04-22 21:10:28 +0000792 }
793}
794
795
796/* --- Fast case permission setters, for dealing with stacks. --- */
797
njn9b007f62003-04-07 14:40:25 +0000798static __inline__
sewardj5d28efc2005-04-21 22:16:29 +0000799void make_aligned_word32_writable ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000800{
sewardjae986ca2005-10-12 12:53:20 +0000801 UWord a, sec_no, v_off, a_off, mask;
802 SecMap* sm;
803
sewardj5d28efc2005-04-21 22:16:29 +0000804 PROF_EVENT(300, "make_aligned_word32_writable");
805
806# if VG_DEBUG_MEMORY >= 2
807 mc_make_writable(aA, 4);
808# else
809
810 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
sewardj23eb2fd2005-04-22 16:29:19 +0000811 PROF_EVENT(301, "make_aligned_word32_writable-slow1");
sewardj5d28efc2005-04-21 22:16:29 +0000812 mc_make_writable(aA, 4);
813 return;
814 }
815
sewardjae986ca2005-10-12 12:53:20 +0000816 a = (UWord)aA;
817 sec_no = (UWord)(a >> 16);
sewardj5d28efc2005-04-21 22:16:29 +0000818# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +0000819 tl_assert(sec_no < N_PRIMARY_MAP);
sewardj5d28efc2005-04-21 22:16:29 +0000820# endif
821
822 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
823 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
824
sewardjae986ca2005-10-12 12:53:20 +0000825 sm = primary_map[sec_no];
826 v_off = a & 0xFFFF;
827 a_off = v_off >> 3;
sewardj5d28efc2005-04-21 22:16:29 +0000828
829 /* Paint the new area as uninitialised. */
830 ((UInt*)(sm->vbyte))[v_off >> 2] = VGM_WORD32_INVALID;
831
sewardjae986ca2005-10-12 12:53:20 +0000832 mask = 0x0F;
sewardj5d28efc2005-04-21 22:16:29 +0000833 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
834 /* mask now contains 1s where we wish to make address bits valid
835 (0s). */
836 sm->abits[a_off] &= ~mask;
837# endif
njn9b007f62003-04-07 14:40:25 +0000838}
839
sewardj5d28efc2005-04-21 22:16:29 +0000840
841static __inline__
842void make_aligned_word32_noaccess ( Addr aA )
843{
sewardjae986ca2005-10-12 12:53:20 +0000844 UWord a, sec_no, v_off, a_off, mask;
845 SecMap* sm;
846
sewardj5d28efc2005-04-21 22:16:29 +0000847 PROF_EVENT(310, "make_aligned_word32_noaccess");
848
849# if VG_DEBUG_MEMORY >= 2
850 mc_make_noaccess(aA, 4);
851# else
852
853 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
854 PROF_EVENT(311, "make_aligned_word32_noaccess-slow1");
855 mc_make_noaccess(aA, 4);
856 return;
857 }
858
sewardjae986ca2005-10-12 12:53:20 +0000859 a = (UWord)aA;
860 sec_no = (UWord)(a >> 16);
sewardj5d28efc2005-04-21 22:16:29 +0000861# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +0000862 tl_assert(sec_no < N_PRIMARY_MAP);
sewardj5d28efc2005-04-21 22:16:29 +0000863# endif
864
865 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
866 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
867
sewardjae986ca2005-10-12 12:53:20 +0000868 sm = primary_map[sec_no];
869 v_off = a & 0xFFFF;
870 a_off = v_off >> 3;
sewardj5d28efc2005-04-21 22:16:29 +0000871
872 /* Paint the abandoned data as uninitialised. Probably not
873 necessary, but still .. */
874 ((UInt*)(sm->vbyte))[v_off >> 2] = VGM_WORD32_INVALID;
875
sewardjae986ca2005-10-12 12:53:20 +0000876 mask = 0x0F;
sewardj5d28efc2005-04-21 22:16:29 +0000877 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
878 /* mask now contains 1s where we wish to make address bits invalid
879 (1s). */
880 sm->abits[a_off] |= mask;
881# endif
882}
883
884
njn9b007f62003-04-07 14:40:25 +0000885/* Nb: by "aligned" here we mean 8-byte aligned */
886static __inline__
sewardj23eb2fd2005-04-22 16:29:19 +0000887void make_aligned_word64_writable ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000888{
sewardjae986ca2005-10-12 12:53:20 +0000889 UWord a, sec_no, v_off, a_off;
890 SecMap* sm;
891
sewardj23eb2fd2005-04-22 16:29:19 +0000892 PROF_EVENT(320, "make_aligned_word64_writable");
893
894# if VG_DEBUG_MEMORY >= 2
895 mc_make_writable(aA, 8);
896# else
897
898 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
899 PROF_EVENT(321, "make_aligned_word64_writable-slow1");
900 mc_make_writable(aA, 8);
901 return;
902 }
903
sewardjae986ca2005-10-12 12:53:20 +0000904 a = (UWord)aA;
905 sec_no = (UWord)(a >> 16);
sewardj23eb2fd2005-04-22 16:29:19 +0000906# if VG_DEBUG_MEMORY >= 1
907 tl_assert(sec_no < N_PRIMARY_MAP);
908# endif
909
910 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
911 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
912
sewardjae986ca2005-10-12 12:53:20 +0000913 sm = primary_map[sec_no];
914 v_off = a & 0xFFFF;
915 a_off = v_off >> 3;
sewardj23eb2fd2005-04-22 16:29:19 +0000916
917 /* Paint the new area as uninitialised. */
918 ((ULong*)(sm->vbyte))[v_off >> 3] = VGM_WORD64_INVALID;
919
920 /* Make the relevant area accessible. */
921 sm->abits[a_off] = VGM_BYTE_VALID;
922# endif
njn9b007f62003-04-07 14:40:25 +0000923}
924
sewardj23eb2fd2005-04-22 16:29:19 +0000925
njn9b007f62003-04-07 14:40:25 +0000926static __inline__
sewardj23eb2fd2005-04-22 16:29:19 +0000927void make_aligned_word64_noaccess ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000928{
sewardjae986ca2005-10-12 12:53:20 +0000929 UWord a, sec_no, v_off, a_off;
930 SecMap* sm;
931
sewardj23eb2fd2005-04-22 16:29:19 +0000932 PROF_EVENT(330, "make_aligned_word64_noaccess");
933
934# if VG_DEBUG_MEMORY >= 2
935 mc_make_noaccess(aA, 8);
936# else
937
938 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
939 PROF_EVENT(331, "make_aligned_word64_noaccess-slow1");
940 mc_make_noaccess(aA, 8);
941 return;
942 }
943
sewardjae986ca2005-10-12 12:53:20 +0000944 a = (UWord)aA;
945 sec_no = (UWord)(a >> 16);
sewardj23eb2fd2005-04-22 16:29:19 +0000946# if VG_DEBUG_MEMORY >= 1
947 tl_assert(sec_no < N_PRIMARY_MAP);
948# endif
949
950 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
951 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
952
sewardjae986ca2005-10-12 12:53:20 +0000953 sm = primary_map[sec_no];
954 v_off = a & 0xFFFF;
955 a_off = v_off >> 3;
sewardj23eb2fd2005-04-22 16:29:19 +0000956
957 /* Paint the abandoned data as uninitialised. Probably not
958 necessary, but still .. */
959 ((ULong*)(sm->vbyte))[v_off >> 3] = VGM_WORD64_INVALID;
960
961 /* Make the abandoned area inaccessible. */
962 sm->abits[a_off] = VGM_BYTE_INVALID;
963# endif
njn9b007f62003-04-07 14:40:25 +0000964}
965
sewardj23eb2fd2005-04-22 16:29:19 +0000966
sewardj45d94cc2005-04-20 14:44:11 +0000967/* The stack-pointer update handling functions */
968SP_UPDATE_HANDLERS ( make_aligned_word32_writable,
969 make_aligned_word32_noaccess,
970 make_aligned_word64_writable,
971 make_aligned_word64_noaccess,
972 mc_make_writable,
973 mc_make_noaccess
974 );
njn9b007f62003-04-07 14:40:25 +0000975
sewardj45d94cc2005-04-20 14:44:11 +0000976
sewardj826ec492005-05-12 18:05:00 +0000977void MC_(helperc_MAKE_STACK_UNINIT) ( Addr base, UWord len )
978{
979 tl_assert(sizeof(UWord) == sizeof(SizeT));
sewardj2a3a1a72005-05-12 23:25:43 +0000980 if (0)
981 VG_(printf)("helperc_MAKE_STACK_UNINIT %p %d\n", base, len );
982
983# if 0
984 /* Really slow version */
985 mc_make_writable(base, len);
986# endif
987
988# if 0
989 /* Slow(ish) version, which is fairly easily seen to be correct.
990 */
991 if (EXPECTED_TAKEN( VG_IS_8_ALIGNED(base) && len==128 )) {
992 make_aligned_word64_writable(base + 0);
993 make_aligned_word64_writable(base + 8);
994 make_aligned_word64_writable(base + 16);
995 make_aligned_word64_writable(base + 24);
996
997 make_aligned_word64_writable(base + 32);
998 make_aligned_word64_writable(base + 40);
999 make_aligned_word64_writable(base + 48);
1000 make_aligned_word64_writable(base + 56);
1001
1002 make_aligned_word64_writable(base + 64);
1003 make_aligned_word64_writable(base + 72);
1004 make_aligned_word64_writable(base + 80);
1005 make_aligned_word64_writable(base + 88);
1006
1007 make_aligned_word64_writable(base + 96);
1008 make_aligned_word64_writable(base + 104);
1009 make_aligned_word64_writable(base + 112);
1010 make_aligned_word64_writable(base + 120);
1011 } else {
1012 mc_make_writable(base, len);
1013 }
1014# endif
1015
1016 /* Idea is: go fast when
1017 * 8-aligned and length is 128
1018 * the sm is available in the main primary map
1019 * the address range falls entirely with a single
1020 secondary map
1021 * the SM is modifiable
1022 If all those conditions hold, just update the V bits
1023 by writing directly on the v-bit array. We don't care
1024 about A bits; if the address range is marked invalid,
1025 any attempt to access it will elicit an addressing error,
1026 and that's good enough.
1027 */
1028 if (EXPECTED_TAKEN( len == 128
1029 && VG_IS_8_ALIGNED(base)
1030 )) {
1031 /* Now we know the address range is suitably sized and
1032 aligned. */
1033 UWord a_lo = (UWord)base;
1034 UWord a_hi = (UWord)(base + 127);
1035 UWord sec_lo = a_lo >> 16;
1036 UWord sec_hi = a_hi >> 16;
1037
1038 if (EXPECTED_TAKEN( sec_lo == sec_hi
1039 && sec_lo <= N_PRIMARY_MAP
1040 )) {
1041 /* Now we know that the entire address range falls within a
1042 single secondary map, and that that secondary 'lives' in
1043 the main primary map. */
1044 SecMap* sm = primary_map[sec_lo];
1045
1046 if (EXPECTED_TAKEN( !is_distinguished_sm(sm) )) {
1047 /* And finally, now we know that the secondary in question
1048 is modifiable. */
1049 UWord v_off = a_lo & 0xFFFF;
1050 ULong* p = (ULong*)(&sm->vbyte[v_off]);
1051 p[ 0] = VGM_WORD64_INVALID;
1052 p[ 1] = VGM_WORD64_INVALID;
1053 p[ 2] = VGM_WORD64_INVALID;
1054 p[ 3] = VGM_WORD64_INVALID;
1055 p[ 4] = VGM_WORD64_INVALID;
1056 p[ 5] = VGM_WORD64_INVALID;
1057 p[ 6] = VGM_WORD64_INVALID;
1058 p[ 7] = VGM_WORD64_INVALID;
1059 p[ 8] = VGM_WORD64_INVALID;
1060 p[ 9] = VGM_WORD64_INVALID;
1061 p[10] = VGM_WORD64_INVALID;
1062 p[11] = VGM_WORD64_INVALID;
1063 p[12] = VGM_WORD64_INVALID;
1064 p[13] = VGM_WORD64_INVALID;
1065 p[14] = VGM_WORD64_INVALID;
1066 p[15] = VGM_WORD64_INVALID;
1067 return;
1068 }
1069 }
1070 }
1071
1072 /* else fall into slow case */
sewardj826ec492005-05-12 18:05:00 +00001073 mc_make_writable(base, len);
1074}
1075
1076
nethercote8b76fe52004-11-08 19:20:09 +00001077/*------------------------------------------------------------*/
1078/*--- Checking memory ---*/
1079/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001080
sewardje4ccc012005-05-02 12:53:38 +00001081typedef
1082 enum {
1083 MC_Ok = 5,
1084 MC_AddrErr = 6,
1085 MC_ValueErr = 7
1086 }
1087 MC_ReadResult;
1088
1089
njn25e49d8e72002-09-23 09:36:25 +00001090/* Check permissions for address range. If inadequate permissions
1091 exist, *bad_addr is set to the offending address, so the caller can
1092 know what it is. */
1093
sewardjecf8e102003-07-12 12:11:39 +00001094/* Returns True if [a .. a+len) is not addressible. Otherwise,
1095 returns False, and if bad_addr is non-NULL, sets *bad_addr to
1096 indicate the lowest failing address. Functions below are
1097 similar. */
nethercote8b76fe52004-11-08 19:20:09 +00001098static Bool mc_check_noaccess ( Addr a, SizeT len, Addr* bad_addr )
sewardjecf8e102003-07-12 12:11:39 +00001099{
nethercote451eae92004-11-02 13:06:32 +00001100 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +00001101 UWord abit;
sewardjc1a2cda2005-04-21 17:34:00 +00001102 PROF_EVENT(60, "mc_check_noaccess");
sewardjecf8e102003-07-12 12:11:39 +00001103 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +00001104 PROF_EVENT(61, "mc_check_noaccess(loop)");
sewardjecf8e102003-07-12 12:11:39 +00001105 abit = get_abit(a);
1106 if (abit == VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001107 if (bad_addr != NULL)
1108 *bad_addr = a;
sewardjecf8e102003-07-12 12:11:39 +00001109 return False;
1110 }
1111 a++;
1112 }
1113 return True;
1114}
1115
nethercote8b76fe52004-11-08 19:20:09 +00001116static Bool mc_check_writable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00001117{
nethercote451eae92004-11-02 13:06:32 +00001118 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +00001119 UWord abit;
sewardjc1a2cda2005-04-21 17:34:00 +00001120 PROF_EVENT(62, "mc_check_writable");
njn25e49d8e72002-09-23 09:36:25 +00001121 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +00001122 PROF_EVENT(63, "mc_check_writable(loop)");
njn25e49d8e72002-09-23 09:36:25 +00001123 abit = get_abit(a);
1124 if (abit == VGM_BIT_INVALID) {
1125 if (bad_addr != NULL) *bad_addr = a;
1126 return False;
1127 }
1128 a++;
1129 }
1130 return True;
1131}
1132
nethercote8b76fe52004-11-08 19:20:09 +00001133static MC_ReadResult mc_check_readable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00001134{
nethercote451eae92004-11-02 13:06:32 +00001135 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +00001136 UWord abit;
1137 UWord vbyte;
njn25e49d8e72002-09-23 09:36:25 +00001138
sewardjc1a2cda2005-04-21 17:34:00 +00001139 PROF_EVENT(64, "mc_check_readable");
nethercote8b76fe52004-11-08 19:20:09 +00001140 DEBUG("mc_check_readable\n");
njn25e49d8e72002-09-23 09:36:25 +00001141 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +00001142 PROF_EVENT(65, "mc_check_readable(loop)");
sewardj45d94cc2005-04-20 14:44:11 +00001143 get_abit_and_vbyte(&abit, &vbyte, a);
nethercote8b76fe52004-11-08 19:20:09 +00001144 // Report addressability errors in preference to definedness errors
1145 // by checking the A bits first.
1146 if (abit != VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001147 if (bad_addr != NULL)
1148 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001149 return MC_AddrErr;
1150 }
1151 if (vbyte != VGM_BYTE_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001152 if (bad_addr != NULL)
1153 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001154 return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +00001155 }
1156 a++;
1157 }
nethercote8b76fe52004-11-08 19:20:09 +00001158 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +00001159}
1160
1161
1162/* Check a zero-terminated ascii string. Tricky -- don't want to
1163 examine the actual bytes, to find the end, until we're sure it is
1164 safe to do so. */
1165
njn9b007f62003-04-07 14:40:25 +00001166static Bool mc_check_readable_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00001167{
sewardj45d94cc2005-04-20 14:44:11 +00001168 UWord abit;
1169 UWord vbyte;
sewardjc1a2cda2005-04-21 17:34:00 +00001170 PROF_EVENT(66, "mc_check_readable_asciiz");
njn5c004e42002-11-18 11:04:50 +00001171 DEBUG("mc_check_readable_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +00001172 while (True) {
sewardjc1a2cda2005-04-21 17:34:00 +00001173 PROF_EVENT(67, "mc_check_readable_asciiz(loop)");
sewardj45d94cc2005-04-20 14:44:11 +00001174 get_abit_and_vbyte(&abit, &vbyte, a);
nethercote8b76fe52004-11-08 19:20:09 +00001175 // As in mc_check_readable(), check A bits first
1176 if (abit != VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001177 if (bad_addr != NULL)
1178 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001179 return MC_AddrErr;
1180 }
1181 if (vbyte != VGM_BYTE_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001182 if (bad_addr != NULL)
1183 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001184 return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +00001185 }
1186 /* Ok, a is safe to read. */
sewardj45d94cc2005-04-20 14:44:11 +00001187 if (* ((UChar*)a) == 0)
1188 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +00001189 a++;
1190 }
1191}
1192
1193
1194/*------------------------------------------------------------*/
1195/*--- Memory event handlers ---*/
1196/*------------------------------------------------------------*/
1197
njn25e49d8e72002-09-23 09:36:25 +00001198static
njn72718642003-07-24 08:45:32 +00001199void mc_check_is_writable ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +00001200 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001201{
1202 Bool ok;
1203 Addr bad_addr;
1204
1205 VGP_PUSHCC(VgpCheckMem);
1206
1207 /* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
1208 base,base+size-1); */
nethercote8b76fe52004-11-08 19:20:09 +00001209 ok = mc_check_writable ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00001210 if (!ok) {
1211 switch (part) {
1212 case Vg_CoreSysCall:
nethercote8b76fe52004-11-08 19:20:09 +00001213 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False,
1214 /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00001215 break;
1216
1217 case Vg_CorePThread:
1218 case Vg_CoreSignal:
nethercote8b76fe52004-11-08 19:20:09 +00001219 MAC_(record_core_mem_error)( tid, /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00001220 break;
1221
1222 default:
njn67993252004-11-22 18:02:32 +00001223 VG_(tool_panic)("mc_check_is_writable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001224 }
1225 }
1226
1227 VGP_POPCC(VgpCheckMem);
1228}
1229
1230static
njn72718642003-07-24 08:45:32 +00001231void mc_check_is_readable ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +00001232 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001233{
njn25e49d8e72002-09-23 09:36:25 +00001234 Addr bad_addr;
nethercote8b76fe52004-11-08 19:20:09 +00001235 MC_ReadResult res;
njn25e49d8e72002-09-23 09:36:25 +00001236
1237 VGP_PUSHCC(VgpCheckMem);
1238
nethercote8b76fe52004-11-08 19:20:09 +00001239 res = mc_check_readable ( base, size, &bad_addr );
sewardj45f4e7c2005-09-27 19:20:21 +00001240
1241 if (0)
1242 VG_(printf)("mc_check_is_readable(0x%x, %d, %s) -> %s\n",
1243 (UInt)base, (Int)size, s, res==MC_Ok ? "yes" : "no" );
1244
nethercote8b76fe52004-11-08 19:20:09 +00001245 if (MC_Ok != res) {
1246 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
sewardj45f4e7c2005-09-27 19:20:21 +00001247
njn25e49d8e72002-09-23 09:36:25 +00001248 switch (part) {
1249 case Vg_CoreSysCall:
nethercote8b76fe52004-11-08 19:20:09 +00001250 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False,
1251 isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001252 break;
1253
1254 case Vg_CorePThread:
nethercote8b76fe52004-11-08 19:20:09 +00001255 MAC_(record_core_mem_error)( tid, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001256 break;
1257
1258 /* If we're being asked to jump to a silly address, record an error
1259 message before potentially crashing the entire system. */
1260 case Vg_CoreTranslate:
njn72718642003-07-24 08:45:32 +00001261 MAC_(record_jump_error)( tid, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00001262 break;
1263
1264 default:
njn67993252004-11-22 18:02:32 +00001265 VG_(tool_panic)("mc_check_is_readable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001266 }
1267 }
1268 VGP_POPCC(VgpCheckMem);
1269}
1270
1271static
njn72718642003-07-24 08:45:32 +00001272void mc_check_is_readable_asciiz ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +00001273 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +00001274{
nethercote8b76fe52004-11-08 19:20:09 +00001275 MC_ReadResult res;
njn5ab96ac2005-05-08 02:59:50 +00001276 Addr bad_addr = 0; // shut GCC up
njn25e49d8e72002-09-23 09:36:25 +00001277 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
1278
1279 VGP_PUSHCC(VgpCheckMem);
1280
njnca82cc02004-11-22 17:18:48 +00001281 tl_assert(part == Vg_CoreSysCall);
nethercote8b76fe52004-11-08 19:20:09 +00001282 res = mc_check_readable_asciiz ( (Addr)str, &bad_addr );
1283 if (MC_Ok != res) {
1284 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
1285 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001286 }
1287
1288 VGP_POPCC(VgpCheckMem);
1289}
1290
njn25e49d8e72002-09-23 09:36:25 +00001291static
nethercote451eae92004-11-02 13:06:32 +00001292void mc_new_mem_startup( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00001293{
njn1f3a9092002-10-04 09:22:30 +00001294 /* Ignore the permissions, just make it readable. Seems to work... */
nethercote451eae92004-11-02 13:06:32 +00001295 DEBUG("mc_new_mem_startup(%p, %llu, rr=%u, ww=%u, xx=%u)\n",
1296 a,(ULong)len,rr,ww,xx);
nethercote8b76fe52004-11-08 19:20:09 +00001297 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001298}
1299
1300static
nethercote451eae92004-11-02 13:06:32 +00001301void mc_new_mem_heap ( Addr a, SizeT len, Bool is_inited )
njn25e49d8e72002-09-23 09:36:25 +00001302{
1303 if (is_inited) {
nethercote8b76fe52004-11-08 19:20:09 +00001304 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001305 } else {
nethercote8b76fe52004-11-08 19:20:09 +00001306 mc_make_writable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001307 }
1308}
1309
1310static
njnb8dca862005-03-14 02:42:44 +00001311void mc_new_mem_mmap ( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00001312{
njnb8dca862005-03-14 02:42:44 +00001313 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001314}
1315
njncf45fd42004-11-24 16:30:22 +00001316static
1317void mc_post_mem_write(CorePart part, ThreadId tid, Addr a, SizeT len)
1318{
1319 mc_make_readable(a, len);
1320}
njn25e49d8e72002-09-23 09:36:25 +00001321
sewardj45d94cc2005-04-20 14:44:11 +00001322
njn25e49d8e72002-09-23 09:36:25 +00001323/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00001324/*--- Register event handlers ---*/
1325/*------------------------------------------------------------*/
1326
sewardj45d94cc2005-04-20 14:44:11 +00001327/* When some chunk of guest state is written, mark the corresponding
1328 shadow area as valid. This is used to initialise arbitrarily large
sewardj2c27f702005-05-03 18:19:05 +00001329 chunks of guest state, hence the (somewhat arbitrary) 1024 limit.
sewardj45d94cc2005-04-20 14:44:11 +00001330*/
1331static void mc_post_reg_write ( CorePart part, ThreadId tid,
1332 OffT offset, SizeT size)
njnd3040452003-05-19 15:04:06 +00001333{
sewardj6cf40ff2005-04-20 22:31:26 +00001334 UChar area[1024];
1335 tl_assert(size <= 1024);
njncf45fd42004-11-24 16:30:22 +00001336 VG_(memset)(area, VGM_BYTE_VALID, size);
1337 VG_(set_shadow_regs_area)( tid, offset, size, area );
njnd3040452003-05-19 15:04:06 +00001338}
1339
sewardj45d94cc2005-04-20 14:44:11 +00001340static
1341void mc_post_reg_write_clientcall ( ThreadId tid,
1342 OffT offset, SizeT size,
1343 Addr f)
njnd3040452003-05-19 15:04:06 +00001344{
njncf45fd42004-11-24 16:30:22 +00001345 mc_post_reg_write(/*dummy*/0, tid, offset, size);
njnd3040452003-05-19 15:04:06 +00001346}
1347
sewardj45d94cc2005-04-20 14:44:11 +00001348/* Look at the definedness of the guest's shadow state for
1349 [offset, offset+len). If any part of that is undefined, record
1350 a parameter error.
1351*/
1352static void mc_pre_reg_read ( CorePart part, ThreadId tid, Char* s,
1353 OffT offset, SizeT size)
nethercote8b76fe52004-11-08 19:20:09 +00001354{
sewardj45d94cc2005-04-20 14:44:11 +00001355 Int i;
1356 Bool bad;
1357
1358 UChar area[16];
1359 tl_assert(size <= 16);
1360
1361 VG_(get_shadow_regs_area)( tid, offset, size, area );
1362
1363 bad = False;
1364 for (i = 0; i < size; i++) {
1365 if (area[i] != VGM_BYTE_VALID) {
sewardj2c27f702005-05-03 18:19:05 +00001366 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00001367 break;
1368 }
nethercote8b76fe52004-11-08 19:20:09 +00001369 }
1370
sewardj45d94cc2005-04-20 14:44:11 +00001371 if (bad)
nethercote8b76fe52004-11-08 19:20:09 +00001372 MAC_(record_param_error) ( tid, 0, /*isReg*/True, /*isUnaddr*/False, s );
1373}
njnd3040452003-05-19 15:04:06 +00001374
njn25e49d8e72002-09-23 09:36:25 +00001375
sewardj6cf40ff2005-04-20 22:31:26 +00001376/*------------------------------------------------------------*/
njn9e63cb62005-05-08 18:34:59 +00001377/*--- Printing errors ---*/
1378/*------------------------------------------------------------*/
1379
njn51d827b2005-05-09 01:02:08 +00001380static void mc_pp_Error ( Error* err )
njn9e63cb62005-05-08 18:34:59 +00001381{
1382 MAC_Error* err_extra = VG_(get_error_extra)(err);
1383
sewardj71bc3cb2005-05-19 00:25:45 +00001384 HChar* xpre = VG_(clo_xml) ? " <what>" : "";
1385 HChar* xpost = VG_(clo_xml) ? "</what>" : "";
1386
njn9e63cb62005-05-08 18:34:59 +00001387 switch (VG_(get_error_kind)(err)) {
1388 case CoreMemErr: {
1389 Char* s = ( err_extra->isUnaddr ? "unaddressable" : "uninitialised" );
sewardj71bc3cb2005-05-19 00:25:45 +00001390 if (VG_(clo_xml))
1391 VG_(message)(Vg_UserMsg, " <kind>CoreMemError</kind>");
1392 /* What the hell *is* a CoreMemError? jrs 2005-May-18 */
1393 VG_(message)(Vg_UserMsg, "%s%s contains %s byte(s)%s",
1394 xpre, VG_(get_error_string)(err), s, xpost);
1395
njn9e63cb62005-05-08 18:34:59 +00001396 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1397 break;
1398
1399 }
1400
1401 case ValueErr:
1402 if (err_extra->size == 0) {
sewardj71bc3cb2005-05-19 00:25:45 +00001403 if (VG_(clo_xml))
1404 VG_(message)(Vg_UserMsg, " <kind>UninitCondition</kind>");
1405 VG_(message)(Vg_UserMsg, "%sConditional jump or move depends"
1406 " on uninitialised value(s)%s",
1407 xpre, xpost);
njn9e63cb62005-05-08 18:34:59 +00001408 } else {
sewardj71bc3cb2005-05-19 00:25:45 +00001409 if (VG_(clo_xml))
1410 VG_(message)(Vg_UserMsg, " <kind>UninitValue</kind>");
1411 VG_(message)(Vg_UserMsg,
1412 "%sUse of uninitialised value of size %d%s",
1413 xpre, err_extra->size, xpost);
njn9e63cb62005-05-08 18:34:59 +00001414 }
1415 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1416 break;
1417
1418 case ParamErr: {
1419 Bool isReg = ( Register == err_extra->addrinfo.akind );
1420 Char* s1 = ( isReg ? "contains" : "points to" );
1421 Char* s2 = ( err_extra->isUnaddr ? "unaddressable" : "uninitialised" );
1422 if (isReg) tl_assert(!err_extra->isUnaddr);
1423
sewardj71bc3cb2005-05-19 00:25:45 +00001424 if (VG_(clo_xml))
1425 VG_(message)(Vg_UserMsg, " <kind>SyscallParam</kind>");
1426 VG_(message)(Vg_UserMsg, "%sSyscall param %s %s %s byte(s)%s",
1427 xpre, VG_(get_error_string)(err), s1, s2, xpost);
njn9e63cb62005-05-08 18:34:59 +00001428
1429 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1430 MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
1431 break;
1432 }
1433 case UserErr: {
1434 Char* s = ( err_extra->isUnaddr ? "Unaddressable" : "Uninitialised" );
1435
sewardj71bc3cb2005-05-19 00:25:45 +00001436 if (VG_(clo_xml))
1437 VG_(message)(Vg_UserMsg, " <kind>ClientCheck</kind>");
njn9e63cb62005-05-08 18:34:59 +00001438 VG_(message)(Vg_UserMsg,
sewardj71bc3cb2005-05-19 00:25:45 +00001439 "%s%s byte(s) found during client check request%s",
1440 xpre, s, xpost);
njn9e63cb62005-05-08 18:34:59 +00001441
1442 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1443 MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
1444 break;
1445 }
1446 default:
1447 MAC_(pp_shared_Error)(err);
1448 break;
1449 }
1450}
1451
1452/*------------------------------------------------------------*/
1453/*--- Recording errors ---*/
1454/*------------------------------------------------------------*/
1455
njn02bc4b82005-05-15 17:28:26 +00001456/* Creates a copy of the 'extra' part, updates the copy with address info if
njn9e63cb62005-05-08 18:34:59 +00001457 necessary, and returns the copy. */
1458/* This one called from generated code and non-generated code. */
njn96364822005-05-08 19:04:53 +00001459static void mc_record_value_error ( ThreadId tid, Int size )
njn9e63cb62005-05-08 18:34:59 +00001460{
1461 MAC_Error err_extra;
1462
1463 MAC_(clear_MAC_Error)( &err_extra );
1464 err_extra.size = size;
1465 err_extra.isUnaddr = False;
1466 VG_(maybe_record_error)( tid, ValueErr, /*addr*/0, /*s*/NULL, &err_extra );
1467}
1468
1469/* This called from non-generated code */
1470
njn96364822005-05-08 19:04:53 +00001471static void mc_record_user_error ( ThreadId tid, Addr a, Bool isWrite,
1472 Bool isUnaddr )
njn9e63cb62005-05-08 18:34:59 +00001473{
1474 MAC_Error err_extra;
1475
1476 tl_assert(VG_INVALID_THREADID != tid);
1477 MAC_(clear_MAC_Error)( &err_extra );
1478 err_extra.addrinfo.akind = Undescribed;
1479 err_extra.isUnaddr = isUnaddr;
1480 VG_(maybe_record_error)( tid, UserErr, a, /*s*/NULL, &err_extra );
1481}
1482
1483/*------------------------------------------------------------*/
1484/*--- Suppressions ---*/
1485/*------------------------------------------------------------*/
1486
njn51d827b2005-05-09 01:02:08 +00001487static Bool mc_recognised_suppression ( Char* name, Supp* su )
njn9e63cb62005-05-08 18:34:59 +00001488{
1489 SuppKind skind;
1490
1491 if (MAC_(shared_recognised_suppression)(name, su))
1492 return True;
1493
1494 /* Extra suppressions not used by Addrcheck */
1495 else if (VG_STREQ(name, "Cond")) skind = Value0Supp;
1496 else if (VG_STREQ(name, "Value0")) skind = Value0Supp;/* backwards compat */
1497 else if (VG_STREQ(name, "Value1")) skind = Value1Supp;
1498 else if (VG_STREQ(name, "Value2")) skind = Value2Supp;
1499 else if (VG_STREQ(name, "Value4")) skind = Value4Supp;
1500 else if (VG_STREQ(name, "Value8")) skind = Value8Supp;
1501 else if (VG_STREQ(name, "Value16")) skind = Value16Supp;
1502 else
1503 return False;
1504
1505 VG_(set_supp_kind)(su, skind);
1506 return True;
1507}
1508
1509/*------------------------------------------------------------*/
sewardjc859fbf2005-04-22 21:10:28 +00001510/*--- Functions called directly from generated code: ---*/
1511/*--- Load/store handlers. ---*/
sewardj6cf40ff2005-04-20 22:31:26 +00001512/*------------------------------------------------------------*/
1513
1514/* Types: LOADV4, LOADV2, LOADV1 are:
1515 UWord fn ( Addr a )
1516 so they return 32-bits on 32-bit machines and 64-bits on
1517 64-bit machines. Addr has the same size as a host word.
1518
1519 LOADV8 is always ULong fn ( Addr a )
1520
1521 Similarly for STOREV1, STOREV2, STOREV4, the supplied vbits
1522 are a UWord, and for STOREV8 they are a ULong.
1523*/
1524
sewardj95448072004-11-22 20:19:51 +00001525/* ------------------------ Size = 8 ------------------------ */
1526
sewardj8cf88b72005-07-08 01:29:33 +00001527#define MAKE_LOADV8(nAME,iS_BIGENDIAN) \
1528 \
1529 VG_REGPARM(1) \
1530 ULong nAME ( Addr aA ) \
1531 { \
sewardjae986ca2005-10-12 12:53:20 +00001532 UWord mask, a, sec_no, v_off, a_off, abits; \
1533 SecMap* sm; \
1534 \
sewardj8cf88b72005-07-08 01:29:33 +00001535 PROF_EVENT(200, #nAME); \
1536 \
1537 if (VG_DEBUG_MEMORY >= 2) \
1538 return mc_LOADVn_slow( aA, 8, iS_BIGENDIAN ); \
1539 \
sewardjae986ca2005-10-12 12:53:20 +00001540 mask = ~((0x10000-8) | ((N_PRIMARY_MAP-1) << 16)); \
1541 a = (UWord)aA; \
sewardj8cf88b72005-07-08 01:29:33 +00001542 \
1543 /* If any part of 'a' indicated by the mask is 1, either */ \
1544 /* 'a' is not naturally aligned, or 'a' exceeds the range */ \
1545 /* covered by the primary map. Either way we defer to the */ \
1546 /* slow-path case. */ \
1547 if (EXPECTED_NOT_TAKEN(a & mask)) { \
1548 PROF_EVENT(201, #nAME"-slow1"); \
1549 return (UWord)mc_LOADVn_slow( aA, 8, iS_BIGENDIAN ); \
1550 } \
1551 \
sewardjae986ca2005-10-12 12:53:20 +00001552 sec_no = (UWord)(a >> 16); \
sewardj8cf88b72005-07-08 01:29:33 +00001553 \
1554 if (VG_DEBUG_MEMORY >= 1) \
1555 tl_assert(sec_no < N_PRIMARY_MAP); \
1556 \
sewardjae986ca2005-10-12 12:53:20 +00001557 sm = primary_map[sec_no]; \
1558 v_off = a & 0xFFFF; \
1559 a_off = v_off >> 3; \
1560 abits = (UWord)(sm->abits[a_off]); \
sewardj8cf88b72005-07-08 01:29:33 +00001561 \
1562 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) { \
1563 /* Handle common case quickly: a is suitably aligned, */ \
1564 /* is mapped, and is addressible. */ \
1565 return ((ULong*)(sm->vbyte))[ v_off >> 3 ]; \
1566 } else { \
1567 /* Slow but general case. */ \
1568 PROF_EVENT(202, #nAME"-slow2"); \
1569 return mc_LOADVn_slow( a, 8, iS_BIGENDIAN ); \
1570 } \
sewardjf9d81612005-04-23 23:25:49 +00001571 }
1572
sewardj8cf88b72005-07-08 01:29:33 +00001573MAKE_LOADV8( MC_(helperc_LOADV8be), True /*bigendian*/ );
1574MAKE_LOADV8( MC_(helperc_LOADV8le), False/*littleendian*/ );
sewardjf9d81612005-04-23 23:25:49 +00001575
sewardjf9d81612005-04-23 23:25:49 +00001576
sewardj8cf88b72005-07-08 01:29:33 +00001577#define MAKE_STOREV8(nAME,iS_BIGENDIAN) \
1578 \
1579 VG_REGPARM(1) \
1580 void nAME ( Addr aA, ULong vbytes ) \
1581 { \
sewardjae986ca2005-10-12 12:53:20 +00001582 UWord mask, a, sec_no, v_off, a_off, abits; \
1583 SecMap* sm; \
1584 \
sewardj8cf88b72005-07-08 01:29:33 +00001585 PROF_EVENT(210, #nAME); \
1586 \
1587 if (VG_DEBUG_MEMORY >= 2) \
1588 mc_STOREVn_slow( aA, 8, vbytes, iS_BIGENDIAN ); \
1589 \
sewardjae986ca2005-10-12 12:53:20 +00001590 mask = ~((0x10000-8) | ((N_PRIMARY_MAP-1) << 16)); \
1591 a = (UWord)aA; \
sewardj8cf88b72005-07-08 01:29:33 +00001592 \
1593 /* If any part of 'a' indicated by the mask is 1, either */ \
1594 /* 'a' is not naturally aligned, or 'a' exceeds the range */ \
1595 /* covered by the primary map. Either way we defer to the */ \
1596 /* slow-path case. */ \
1597 if (EXPECTED_NOT_TAKEN(a & mask)) { \
1598 PROF_EVENT(211, #nAME"-slow1"); \
1599 mc_STOREVn_slow( aA, 8, vbytes, iS_BIGENDIAN ); \
1600 return; \
1601 } \
1602 \
sewardjae986ca2005-10-12 12:53:20 +00001603 sec_no = (UWord)(a >> 16); \
sewardj8cf88b72005-07-08 01:29:33 +00001604 \
1605 if (VG_DEBUG_MEMORY >= 1) \
1606 tl_assert(sec_no < N_PRIMARY_MAP); \
1607 \
sewardjae986ca2005-10-12 12:53:20 +00001608 sm = primary_map[sec_no]; \
1609 v_off = a & 0xFFFF; \
1610 a_off = v_off >> 3; \
1611 abits = (UWord)(sm->abits[a_off]); \
sewardj8cf88b72005-07-08 01:29:33 +00001612 \
1613 if (EXPECTED_TAKEN(!is_distinguished_sm(sm) \
1614 && abits == VGM_BYTE_VALID)) { \
1615 /* Handle common case quickly: a is suitably aligned, */ \
1616 /* is mapped, and is addressible. */ \
1617 ((ULong*)(sm->vbyte))[ v_off >> 3 ] = vbytes; \
1618 } else { \
1619 /* Slow but general case. */ \
1620 PROF_EVENT(212, #nAME"-slow2"); \
1621 mc_STOREVn_slow( aA, 8, vbytes, iS_BIGENDIAN ); \
1622 } \
sewardjf9d81612005-04-23 23:25:49 +00001623 }
1624
sewardj8cf88b72005-07-08 01:29:33 +00001625MAKE_STOREV8( MC_(helperc_STOREV8be), True /*bigendian*/ );
1626MAKE_STOREV8( MC_(helperc_STOREV8le), False/*littleendian*/ );
sewardj95448072004-11-22 20:19:51 +00001627
sewardj95448072004-11-22 20:19:51 +00001628
1629/* ------------------------ Size = 4 ------------------------ */
1630
sewardj8cf88b72005-07-08 01:29:33 +00001631#define MAKE_LOADV4(nAME,iS_BIGENDIAN) \
1632 \
1633 VG_REGPARM(1) \
1634 UWord nAME ( Addr aA ) \
1635 { \
sewardjae986ca2005-10-12 12:53:20 +00001636 UWord mask, a, sec_no, v_off, a_off, abits; \
1637 SecMap* sm; \
1638 \
sewardj8cf88b72005-07-08 01:29:33 +00001639 PROF_EVENT(220, #nAME); \
1640 \
1641 if (VG_DEBUG_MEMORY >= 2) \
1642 return (UWord)mc_LOADVn_slow( aA, 4, iS_BIGENDIAN ); \
1643 \
sewardjae986ca2005-10-12 12:53:20 +00001644 mask = ~((0x10000-4) | ((N_PRIMARY_MAP-1) << 16)); \
1645 a = (UWord)aA; \
sewardj8cf88b72005-07-08 01:29:33 +00001646 \
1647 /* If any part of 'a' indicated by the mask is 1, either */ \
1648 /* 'a' is not naturally aligned, or 'a' exceeds the range */ \
1649 /* covered by the primary map. Either way we defer to the */ \
1650 /* slow-path case. */ \
1651 if (EXPECTED_NOT_TAKEN(a & mask)) { \
1652 PROF_EVENT(221, #nAME"-slow1"); \
1653 return (UWord)mc_LOADVn_slow( aA, 4, iS_BIGENDIAN ); \
1654 } \
1655 \
sewardjae986ca2005-10-12 12:53:20 +00001656 sec_no = (UWord)(a >> 16); \
sewardj8cf88b72005-07-08 01:29:33 +00001657 \
1658 if (VG_DEBUG_MEMORY >= 1) \
1659 tl_assert(sec_no < N_PRIMARY_MAP); \
1660 \
sewardjae986ca2005-10-12 12:53:20 +00001661 sm = primary_map[sec_no]; \
1662 v_off = a & 0xFFFF; \
1663 a_off = v_off >> 3; \
1664 abits = (UWord)(sm->abits[a_off]); \
sewardj8cf88b72005-07-08 01:29:33 +00001665 abits >>= (a & 4); \
1666 abits &= 15; \
1667 if (EXPECTED_TAKEN(abits == VGM_NIBBLE_VALID)) { \
1668 /* Handle common case quickly: a is suitably aligned, */ \
1669 /* is mapped, and is addressible. */ \
1670 /* On a 32-bit platform, simply hoick the required 32 */ \
1671 /* bits out of the vbyte array. On a 64-bit platform, */ \
1672 /* also set the upper 32 bits to 1 ("undefined"), just */ \
1673 /* in case. This almost certainly isn't necessary, */ \
1674 /* but be paranoid. */ \
1675 UWord ret = (UWord)0xFFFFFFFF00000000ULL; \
1676 ret |= (UWord)( ((UInt*)(sm->vbyte))[ v_off >> 2 ] ); \
1677 return ret; \
1678 } else { \
1679 /* Slow but general case. */ \
1680 PROF_EVENT(222, #nAME"-slow2"); \
1681 return (UWord)mc_LOADVn_slow( a, 4, iS_BIGENDIAN ); \
1682 } \
sewardjc1a2cda2005-04-21 17:34:00 +00001683 }
1684
sewardj8cf88b72005-07-08 01:29:33 +00001685MAKE_LOADV4( MC_(helperc_LOADV4be), True /*bigendian*/ );
1686MAKE_LOADV4( MC_(helperc_LOADV4le), False/*littleendian*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001687
sewardjc1a2cda2005-04-21 17:34:00 +00001688
sewardj8cf88b72005-07-08 01:29:33 +00001689#define MAKE_STOREV4(nAME,iS_BIGENDIAN) \
1690 \
1691 VG_REGPARM(2) \
1692 void nAME ( Addr aA, UWord vbytes ) \
1693 { \
sewardjae986ca2005-10-12 12:53:20 +00001694 UWord mask, a, sec_no, v_off, a_off, abits; \
1695 SecMap* sm; \
1696 \
sewardj8cf88b72005-07-08 01:29:33 +00001697 PROF_EVENT(230, #nAME); \
1698 \
1699 if (VG_DEBUG_MEMORY >= 2) \
1700 mc_STOREVn_slow( aA, 4, (ULong)vbytes, iS_BIGENDIAN ); \
1701 \
sewardjae986ca2005-10-12 12:53:20 +00001702 mask = ~((0x10000-4) | ((N_PRIMARY_MAP-1) << 16)); \
1703 a = (UWord)aA; \
sewardj8cf88b72005-07-08 01:29:33 +00001704 \
1705 /* If any part of 'a' indicated by the mask is 1, either */ \
1706 /* 'a' is not naturally aligned, or 'a' exceeds the range */ \
1707 /* covered by the primary map. Either way we defer to the */ \
1708 /* slow-path case. */ \
1709 if (EXPECTED_NOT_TAKEN(a & mask)) { \
1710 PROF_EVENT(231, #nAME"-slow1"); \
1711 mc_STOREVn_slow( aA, 4, (ULong)vbytes, iS_BIGENDIAN ); \
1712 return; \
1713 } \
1714 \
sewardjae986ca2005-10-12 12:53:20 +00001715 sec_no = (UWord)(a >> 16); \
sewardj8cf88b72005-07-08 01:29:33 +00001716 \
1717 if (VG_DEBUG_MEMORY >= 1) \
1718 tl_assert(sec_no < N_PRIMARY_MAP); \
1719 \
sewardjae986ca2005-10-12 12:53:20 +00001720 sm = primary_map[sec_no]; \
1721 v_off = a & 0xFFFF; \
1722 a_off = v_off >> 3; \
1723 abits = (UWord)(sm->abits[a_off]); \
sewardj8cf88b72005-07-08 01:29:33 +00001724 abits >>= (a & 4); \
1725 abits &= 15; \
1726 if (EXPECTED_TAKEN(!is_distinguished_sm(sm) \
1727 && abits == VGM_NIBBLE_VALID)) { \
1728 /* Handle common case quickly: a is suitably aligned, */ \
1729 /* is mapped, and is addressible. */ \
1730 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = (UInt)vbytes; \
1731 } else { \
1732 /* Slow but general case. */ \
1733 PROF_EVENT(232, #nAME"-slow2"); \
1734 mc_STOREVn_slow( aA, 4, (ULong)vbytes, iS_BIGENDIAN ); \
1735 } \
sewardjc1a2cda2005-04-21 17:34:00 +00001736 }
1737
sewardj8cf88b72005-07-08 01:29:33 +00001738MAKE_STOREV4( MC_(helperc_STOREV4be), True /*bigendian*/ );
1739MAKE_STOREV4( MC_(helperc_STOREV4le), False/*littleendian*/ );
njn25e49d8e72002-09-23 09:36:25 +00001740
njn25e49d8e72002-09-23 09:36:25 +00001741
sewardj95448072004-11-22 20:19:51 +00001742/* ------------------------ Size = 2 ------------------------ */
1743
sewardj8cf88b72005-07-08 01:29:33 +00001744#define MAKE_LOADV2(nAME,iS_BIGENDIAN) \
1745 \
1746 VG_REGPARM(1) \
1747 UWord nAME ( Addr aA ) \
1748 { \
sewardjae986ca2005-10-12 12:53:20 +00001749 UWord mask, a, sec_no, v_off, a_off, abits; \
1750 SecMap* sm; \
1751 \
sewardj8cf88b72005-07-08 01:29:33 +00001752 PROF_EVENT(240, #nAME); \
1753 \
1754 if (VG_DEBUG_MEMORY >= 2) \
1755 return (UWord)mc_LOADVn_slow( aA, 2, iS_BIGENDIAN ); \
1756 \
sewardjae986ca2005-10-12 12:53:20 +00001757 mask = ~((0x10000-2) | ((N_PRIMARY_MAP-1) << 16)); \
1758 a = (UWord)aA; \
sewardj8cf88b72005-07-08 01:29:33 +00001759 \
1760 /* If any part of 'a' indicated by the mask is 1, either */ \
1761 /* 'a' is not naturally aligned, or 'a' exceeds the range */ \
1762 /* covered by the primary map. Either way we defer to the */ \
1763 /* slow-path case. */ \
1764 if (EXPECTED_NOT_TAKEN(a & mask)) { \
1765 PROF_EVENT(241, #nAME"-slow1"); \
1766 return (UWord)mc_LOADVn_slow( aA, 2, iS_BIGENDIAN ); \
1767 } \
1768 \
sewardjae986ca2005-10-12 12:53:20 +00001769 sec_no = (UWord)(a >> 16); \
sewardj8cf88b72005-07-08 01:29:33 +00001770 \
1771 if (VG_DEBUG_MEMORY >= 1) \
1772 tl_assert(sec_no < N_PRIMARY_MAP); \
1773 \
sewardjae986ca2005-10-12 12:53:20 +00001774 sm = primary_map[sec_no]; \
1775 v_off = a & 0xFFFF; \
1776 a_off = v_off >> 3; \
1777 abits = (UWord)(sm->abits[a_off]); \
sewardj8cf88b72005-07-08 01:29:33 +00001778 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) { \
1779 /* Handle common case quickly: a is mapped, and the */ \
1780 /* entire word32 it lives in is addressible. */ \
1781 /* Set the upper 16/48 bits of the result to 1 */ \
1782 /* ("undefined"), just in case. This almost certainly */ \
1783 /* isn't necessary, but be paranoid. */ \
1784 return (~(UWord)0xFFFF) \
1785 | \
1786 (UWord)( ((UShort*)(sm->vbyte))[ v_off >> 1 ] ); \
1787 } else { \
1788 /* Slow but general case. */ \
1789 PROF_EVENT(242, #nAME"-slow2"); \
1790 return (UWord)mc_LOADVn_slow( aA, 2, iS_BIGENDIAN ); \
1791 } \
sewardjc1a2cda2005-04-21 17:34:00 +00001792 }
1793
sewardj8cf88b72005-07-08 01:29:33 +00001794MAKE_LOADV2( MC_(helperc_LOADV2be), True /*bigendian*/ );
1795MAKE_LOADV2( MC_(helperc_LOADV2le), False/*littleendian*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001796
sewardjc1a2cda2005-04-21 17:34:00 +00001797
sewardj8cf88b72005-07-08 01:29:33 +00001798#define MAKE_STOREV2(nAME,iS_BIGENDIAN) \
1799 \
1800 VG_REGPARM(2) \
1801 void nAME ( Addr aA, UWord vbytes ) \
1802 { \
sewardjae986ca2005-10-12 12:53:20 +00001803 UWord mask, a, sec_no, v_off, a_off, abits; \
1804 SecMap* sm; \
1805 \
sewardj8cf88b72005-07-08 01:29:33 +00001806 PROF_EVENT(250, #nAME); \
1807 \
1808 if (VG_DEBUG_MEMORY >= 2) \
1809 mc_STOREVn_slow( aA, 2, (ULong)vbytes, iS_BIGENDIAN ); \
1810 \
sewardjae986ca2005-10-12 12:53:20 +00001811 mask = ~((0x10000-2) | ((N_PRIMARY_MAP-1) << 16)); \
1812 a = (UWord)aA; \
sewardj8cf88b72005-07-08 01:29:33 +00001813 \
1814 /* If any part of 'a' indicated by the mask is 1, either */ \
1815 /* 'a' is not naturally aligned, or 'a' exceeds the range */ \
1816 /* covered by the primary map. Either way we defer to the */ \
1817 /* slow-path case. */ \
1818 if (EXPECTED_NOT_TAKEN(a & mask)) { \
1819 PROF_EVENT(251, #nAME"-slow1"); \
1820 mc_STOREVn_slow( aA, 2, (ULong)vbytes, iS_BIGENDIAN ); \
1821 return; \
1822 } \
1823 \
sewardjae986ca2005-10-12 12:53:20 +00001824 sec_no = (UWord)(a >> 16); \
sewardj8cf88b72005-07-08 01:29:33 +00001825 \
1826 if (VG_DEBUG_MEMORY >= 1) \
1827 tl_assert(sec_no < N_PRIMARY_MAP); \
1828 \
sewardjae986ca2005-10-12 12:53:20 +00001829 sm = primary_map[sec_no]; \
1830 v_off = a & 0xFFFF; \
1831 a_off = v_off >> 3; \
1832 abits = (UWord)(sm->abits[a_off]); \
sewardj8cf88b72005-07-08 01:29:33 +00001833 if (EXPECTED_TAKEN(!is_distinguished_sm(sm) \
1834 && abits == VGM_BYTE_VALID)) { \
1835 /* Handle common case quickly. */ \
1836 ((UShort*)(sm->vbyte))[ v_off >> 1 ] = (UShort)vbytes; \
1837 } else { \
1838 /* Slow but general case. */ \
1839 PROF_EVENT(252, #nAME"-slow2"); \
1840 mc_STOREVn_slow( aA, 2, (ULong)vbytes, iS_BIGENDIAN ); \
1841 } \
sewardjc1a2cda2005-04-21 17:34:00 +00001842 }
1843
njn25e49d8e72002-09-23 09:36:25 +00001844
sewardj8cf88b72005-07-08 01:29:33 +00001845MAKE_STOREV2( MC_(helperc_STOREV2be), True /*bigendian*/ );
1846MAKE_STOREV2( MC_(helperc_STOREV2le), False/*littleendian*/ );
sewardj5d28efc2005-04-21 22:16:29 +00001847
njn25e49d8e72002-09-23 09:36:25 +00001848
sewardj95448072004-11-22 20:19:51 +00001849/* ------------------------ Size = 1 ------------------------ */
sewardj8cf88b72005-07-08 01:29:33 +00001850/* Note: endianness is irrelevant for size == 1 */
sewardj95448072004-11-22 20:19:51 +00001851
njnaf839f52005-06-23 03:27:57 +00001852VG_REGPARM(1)
sewardj8cf88b72005-07-08 01:29:33 +00001853UWord MC_(helperc_LOADV1) ( Addr aA )
njn25e49d8e72002-09-23 09:36:25 +00001854{
sewardjae986ca2005-10-12 12:53:20 +00001855 UWord mask, a, sec_no, v_off, a_off, abits;
1856 SecMap* sm;
1857
sewardj8cf88b72005-07-08 01:29:33 +00001858 PROF_EVENT(260, "helperc_LOADV1");
sewardjc1a2cda2005-04-21 17:34:00 +00001859
1860# if VG_DEBUG_MEMORY >= 2
sewardj8cf88b72005-07-08 01:29:33 +00001861 return (UWord)mc_LOADVn_slow( aA, 1, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001862# else
1863
sewardjae986ca2005-10-12 12:53:20 +00001864 mask = ~((0x10000-1) | ((N_PRIMARY_MAP-1) << 16));
1865 a = (UWord)aA;
sewardjc1a2cda2005-04-21 17:34:00 +00001866
1867 /* If any part of 'a' indicated by the mask is 1, it means 'a'
1868 exceeds the range covered by the primary map. In which case we
1869 defer to the slow-path case. */
1870 if (EXPECTED_NOT_TAKEN(a & mask)) {
sewardj8cf88b72005-07-08 01:29:33 +00001871 PROF_EVENT(261, "helperc_LOADV1-slow1");
1872 return (UWord)mc_LOADVn_slow( aA, 1, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001873 }
1874
sewardjae986ca2005-10-12 12:53:20 +00001875 sec_no = (UWord)(a >> 16);
sewardjc1a2cda2005-04-21 17:34:00 +00001876
1877# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001878 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001879# endif
1880
sewardjae986ca2005-10-12 12:53:20 +00001881 sm = primary_map[sec_no];
1882 v_off = a & 0xFFFF;
1883 a_off = v_off >> 3;
1884 abits = (UWord)(sm->abits[a_off]);
sewardjc1a2cda2005-04-21 17:34:00 +00001885 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) {
1886 /* Handle common case quickly: a is mapped, and the entire
1887 word32 it lives in is addressible. */
sewardj5d28efc2005-04-21 22:16:29 +00001888 /* Set the upper 24/56 bits of the result to 1 ("undefined"),
1889 just in case. This almost certainly isn't necessary, but be
1890 paranoid. */
sewardjc1a2cda2005-04-21 17:34:00 +00001891 return (~(UWord)0xFF)
1892 |
1893 (UWord)( ((UChar*)(sm->vbyte))[ v_off ] );
1894 } else {
1895 /* Slow but general case. */
sewardj8cf88b72005-07-08 01:29:33 +00001896 PROF_EVENT(262, "helperc_LOADV1-slow2");
1897 return (UWord)mc_LOADVn_slow( aA, 1, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001898 }
1899# endif
njn25e49d8e72002-09-23 09:36:25 +00001900}
1901
sewardjc1a2cda2005-04-21 17:34:00 +00001902
njnaf839f52005-06-23 03:27:57 +00001903VG_REGPARM(2)
sewardj8cf88b72005-07-08 01:29:33 +00001904void MC_(helperc_STOREV1) ( Addr aA, UWord vbyte )
njn25e49d8e72002-09-23 09:36:25 +00001905{
sewardjae986ca2005-10-12 12:53:20 +00001906 UWord mask, a, sec_no, v_off, a_off, abits;
1907 SecMap* sm;
1908
sewardj8cf88b72005-07-08 01:29:33 +00001909 PROF_EVENT(270, "helperc_STOREV1");
sewardjc1a2cda2005-04-21 17:34:00 +00001910
1911# if VG_DEBUG_MEMORY >= 2
sewardj8cf88b72005-07-08 01:29:33 +00001912 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001913# else
1914
sewardjae986ca2005-10-12 12:53:20 +00001915 mask = ~((0x10000-1) | ((N_PRIMARY_MAP-1) << 16));
1916 a = (UWord)aA;
sewardjc1a2cda2005-04-21 17:34:00 +00001917 /* If any part of 'a' indicated by the mask is 1, it means 'a'
1918 exceeds the range covered by the primary map. In which case we
1919 defer to the slow-path case. */
1920 if (EXPECTED_NOT_TAKEN(a & mask)) {
sewardj8cf88b72005-07-08 01:29:33 +00001921 PROF_EVENT(271, "helperc_STOREV1-slow1");
1922 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001923 return;
1924 }
1925
sewardjae986ca2005-10-12 12:53:20 +00001926 sec_no = (UWord)(a >> 16);
sewardjc1a2cda2005-04-21 17:34:00 +00001927
1928# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001929 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001930# endif
1931
sewardjae986ca2005-10-12 12:53:20 +00001932 sm = primary_map[sec_no];
1933 v_off = a & 0xFFFF;
1934 a_off = v_off >> 3;
1935 abits = (UWord)(sm->abits[a_off]);
sewardjc1a2cda2005-04-21 17:34:00 +00001936 if (EXPECTED_TAKEN(!is_distinguished_sm(sm)
1937 && abits == VGM_BYTE_VALID)) {
1938 /* Handle common case quickly: a is mapped, the entire word32 it
1939 lives in is addressible. */
1940 ((UChar*)(sm->vbyte))[ v_off ] = (UChar)vbyte;
1941 } else {
sewardj8cf88b72005-07-08 01:29:33 +00001942 PROF_EVENT(272, "helperc_STOREV1-slow2");
1943 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001944 }
1945
1946# endif
njn25e49d8e72002-09-23 09:36:25 +00001947}
1948
1949
sewardjc859fbf2005-04-22 21:10:28 +00001950/*------------------------------------------------------------*/
1951/*--- Functions called directly from generated code: ---*/
1952/*--- Value-check failure handlers. ---*/
1953/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001954
njn5c004e42002-11-18 11:04:50 +00001955void MC_(helperc_value_check0_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001956{
njn9e63cb62005-05-08 18:34:59 +00001957 mc_record_value_error ( VG_(get_running_tid)(), 0 );
njn25e49d8e72002-09-23 09:36:25 +00001958}
1959
njn5c004e42002-11-18 11:04:50 +00001960void MC_(helperc_value_check1_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001961{
njn9e63cb62005-05-08 18:34:59 +00001962 mc_record_value_error ( VG_(get_running_tid)(), 1 );
njn25e49d8e72002-09-23 09:36:25 +00001963}
1964
njn5c004e42002-11-18 11:04:50 +00001965void MC_(helperc_value_check4_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001966{
njn9e63cb62005-05-08 18:34:59 +00001967 mc_record_value_error ( VG_(get_running_tid)(), 4 );
njn25e49d8e72002-09-23 09:36:25 +00001968}
1969
sewardj11bcc4e2005-04-23 22:38:38 +00001970void MC_(helperc_value_check8_fail) ( void )
1971{
njn9e63cb62005-05-08 18:34:59 +00001972 mc_record_value_error ( VG_(get_running_tid)(), 8 );
sewardj11bcc4e2005-04-23 22:38:38 +00001973}
1974
njnaf839f52005-06-23 03:27:57 +00001975VG_REGPARM(1) void MC_(helperc_complain_undef) ( HWord sz )
sewardj95448072004-11-22 20:19:51 +00001976{
njn9e63cb62005-05-08 18:34:59 +00001977 mc_record_value_error ( VG_(get_running_tid)(), (Int)sz );
sewardj95448072004-11-22 20:19:51 +00001978}
1979
njn25e49d8e72002-09-23 09:36:25 +00001980
sewardj45d94cc2005-04-20 14:44:11 +00001981//zz /*------------------------------------------------------------*/
1982//zz /*--- Metadata get/set functions, for client requests. ---*/
1983//zz /*------------------------------------------------------------*/
1984//zz
1985//zz /* Copy Vbits for src into vbits. Returns: 1 == OK, 2 == alignment
1986//zz error, 3 == addressing error. */
1987//zz static Int mc_get_or_set_vbits_for_client (
1988//zz ThreadId tid,
1989//zz Addr dataV,
1990//zz Addr vbitsV,
1991//zz SizeT size,
1992//zz Bool setting /* True <=> set vbits, False <=> get vbits */
1993//zz )
1994//zz {
1995//zz Bool addressibleD = True;
1996//zz Bool addressibleV = True;
1997//zz UInt* data = (UInt*)dataV;
1998//zz UInt* vbits = (UInt*)vbitsV;
1999//zz SizeT szW = size / 4; /* sigh */
2000//zz SizeT i;
2001//zz UInt* dataP = NULL; /* bogus init to keep gcc happy */
2002//zz UInt* vbitsP = NULL; /* ditto */
2003//zz
2004//zz /* Check alignment of args. */
2005//zz if (!(VG_IS_4_ALIGNED(data) && VG_IS_4_ALIGNED(vbits)))
2006//zz return 2;
2007//zz if ((size & 3) != 0)
2008//zz return 2;
2009//zz
2010//zz /* Check that arrays are addressible. */
2011//zz for (i = 0; i < szW; i++) {
2012//zz dataP = &data[i];
2013//zz vbitsP = &vbits[i];
2014//zz if (get_abits4_ALIGNED((Addr)dataP) != VGM_NIBBLE_VALID) {
2015//zz addressibleD = False;
2016//zz break;
2017//zz }
2018//zz if (get_abits4_ALIGNED((Addr)vbitsP) != VGM_NIBBLE_VALID) {
2019//zz addressibleV = False;
2020//zz break;
2021//zz }
2022//zz }
2023//zz if (!addressibleD) {
2024//zz MAC_(record_address_error)( tid, (Addr)dataP, 4,
2025//zz setting ? True : False );
2026//zz return 3;
2027//zz }
2028//zz if (!addressibleV) {
2029//zz MAC_(record_address_error)( tid, (Addr)vbitsP, 4,
2030//zz setting ? False : True );
2031//zz return 3;
2032//zz }
2033//zz
2034//zz /* Do the copy */
2035//zz if (setting) {
2036//zz /* setting */
2037//zz for (i = 0; i < szW; i++) {
2038//zz if (get_vbytes4_ALIGNED( (Addr)&vbits[i] ) != VGM_WORD_VALID)
njn9e63cb62005-05-08 18:34:59 +00002039//zz mc_record_value_error(tid, 4);
sewardj45d94cc2005-04-20 14:44:11 +00002040//zz set_vbytes4_ALIGNED( (Addr)&data[i], vbits[i] );
2041//zz }
2042//zz } else {
2043//zz /* getting */
2044//zz for (i = 0; i < szW; i++) {
2045//zz vbits[i] = get_vbytes4_ALIGNED( (Addr)&data[i] );
2046//zz set_vbytes4_ALIGNED( (Addr)&vbits[i], VGM_WORD_VALID );
2047//zz }
2048//zz }
2049//zz
2050//zz return 1;
2051//zz }
sewardj05fe85e2005-04-27 22:46:36 +00002052
2053
2054/*------------------------------------------------------------*/
2055/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
2056/*------------------------------------------------------------*/
2057
2058/* For the memory leak detector, say whether an entire 64k chunk of
2059 address space is possibly in use, or not. If in doubt return
2060 True.
2061*/
2062static
2063Bool mc_is_within_valid_secondary ( Addr a )
2064{
2065 SecMap* sm = maybe_get_secmap_for ( a );
2066 if (sm == NULL || sm == &sm_distinguished[SM_DIST_NOACCESS]) {
2067 /* Definitely not in use. */
2068 return False;
2069 } else {
2070 return True;
2071 }
2072}
2073
2074
2075/* For the memory leak detector, say whether or not a given word
2076 address is to be regarded as valid. */
2077static
2078Bool mc_is_valid_aligned_word ( Addr a )
2079{
2080 tl_assert(sizeof(UWord) == 4 || sizeof(UWord) == 8);
2081 if (sizeof(UWord) == 4) {
2082 tl_assert(VG_IS_4_ALIGNED(a));
2083 } else {
2084 tl_assert(VG_IS_8_ALIGNED(a));
2085 }
2086 if (mc_check_readable( a, sizeof(UWord), NULL ) == MC_Ok) {
2087 return True;
2088 } else {
2089 return False;
2090 }
2091}
sewardja4495682002-10-21 07:29:59 +00002092
2093
nethercote996901a2004-08-03 13:29:09 +00002094/* Leak detector for this tool. We don't actually do anything, merely
sewardja4495682002-10-21 07:29:59 +00002095 run the generic leak detector with suitable parameters for this
nethercote996901a2004-08-03 13:29:09 +00002096 tool. */
njnb8dca862005-03-14 02:42:44 +00002097static void mc_detect_memory_leaks ( ThreadId tid, LeakCheckMode mode )
njn25e49d8e72002-09-23 09:36:25 +00002098{
sewardj05fe85e2005-04-27 22:46:36 +00002099 MAC_(do_detect_memory_leaks) (
2100 tid,
2101 mode,
2102 mc_is_within_valid_secondary,
2103 mc_is_valid_aligned_word
2104 );
njn25e49d8e72002-09-23 09:36:25 +00002105}
2106
2107
sewardjc859fbf2005-04-22 21:10:28 +00002108/*------------------------------------------------------------*/
2109/*--- Initialisation ---*/
2110/*------------------------------------------------------------*/
2111
2112static void init_shadow_memory ( void )
2113{
2114 Int i;
2115 SecMap* sm;
2116
2117 /* Build the 3 distinguished secondaries */
2118 tl_assert(VGM_BIT_INVALID == 1);
2119 tl_assert(VGM_BIT_VALID == 0);
2120 tl_assert(VGM_BYTE_INVALID == 0xFF);
2121 tl_assert(VGM_BYTE_VALID == 0);
2122
2123 /* Set A invalid, V invalid. */
2124 sm = &sm_distinguished[SM_DIST_NOACCESS];
2125 for (i = 0; i < 65536; i++)
2126 sm->vbyte[i] = VGM_BYTE_INVALID;
2127 for (i = 0; i < 8192; i++)
2128 sm->abits[i] = VGM_BYTE_INVALID;
2129
2130 /* Set A valid, V invalid. */
2131 sm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
2132 for (i = 0; i < 65536; i++)
2133 sm->vbyte[i] = VGM_BYTE_INVALID;
2134 for (i = 0; i < 8192; i++)
2135 sm->abits[i] = VGM_BYTE_VALID;
2136
2137 /* Set A valid, V valid. */
2138 sm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
2139 for (i = 0; i < 65536; i++)
2140 sm->vbyte[i] = VGM_BYTE_VALID;
2141 for (i = 0; i < 8192; i++)
2142 sm->abits[i] = VGM_BYTE_VALID;
2143
2144 /* Set up the primary map. */
2145 /* These entries gradually get overwritten as the used address
2146 space expands. */
2147 for (i = 0; i < N_PRIMARY_MAP; i++)
2148 primary_map[i] = &sm_distinguished[SM_DIST_NOACCESS];
2149
2150 /* auxmap_size = auxmap_used = 0;
2151 no ... these are statically initialised */
2152}
2153
2154
2155/*------------------------------------------------------------*/
2156/*--- Sanity check machinery (permanently engaged) ---*/
2157/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00002158
njn51d827b2005-05-09 01:02:08 +00002159static Bool mc_cheap_sanity_check ( void )
njn25e49d8e72002-09-23 09:36:25 +00002160{
jseward9800fd32004-01-04 23:08:04 +00002161 /* nothing useful we can rapidly check */
sewardj23eb2fd2005-04-22 16:29:19 +00002162 n_sanity_cheap++;
sewardjc1a2cda2005-04-21 17:34:00 +00002163 PROF_EVENT(490, "cheap_sanity_check");
jseward9800fd32004-01-04 23:08:04 +00002164 return True;
njn25e49d8e72002-09-23 09:36:25 +00002165}
2166
njn51d827b2005-05-09 01:02:08 +00002167static Bool mc_expensive_sanity_check ( void )
njn25e49d8e72002-09-23 09:36:25 +00002168{
sewardj23eb2fd2005-04-22 16:29:19 +00002169 Int i, n_secmaps_found;
sewardj45d94cc2005-04-20 14:44:11 +00002170 SecMap* sm;
sewardj23eb2fd2005-04-22 16:29:19 +00002171 Bool bad = False;
njn25e49d8e72002-09-23 09:36:25 +00002172
sewardj23eb2fd2005-04-22 16:29:19 +00002173 n_sanity_expensive++;
sewardjc1a2cda2005-04-21 17:34:00 +00002174 PROF_EVENT(491, "expensive_sanity_check");
2175
sewardj23eb2fd2005-04-22 16:29:19 +00002176 /* Check that the 3 distinguished SMs are still as they should
2177 be. */
njn25e49d8e72002-09-23 09:36:25 +00002178
sewardj45d94cc2005-04-20 14:44:11 +00002179 /* Check A invalid, V invalid. */
2180 sm = &sm_distinguished[SM_DIST_NOACCESS];
njn25e49d8e72002-09-23 09:36:25 +00002181 for (i = 0; i < 65536; i++)
sewardj45d94cc2005-04-20 14:44:11 +00002182 if (!(sm->vbyte[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002183 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002184 for (i = 0; i < 8192; i++)
2185 if (!(sm->abits[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002186 bad = True;
njn25e49d8e72002-09-23 09:36:25 +00002187
sewardj45d94cc2005-04-20 14:44:11 +00002188 /* Check A valid, V invalid. */
2189 sm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
2190 for (i = 0; i < 65536; i++)
2191 if (!(sm->vbyte[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002192 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002193 for (i = 0; i < 8192; i++)
2194 if (!(sm->abits[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002195 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002196
2197 /* Check A valid, V valid. */
2198 sm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
2199 for (i = 0; i < 65536; i++)
2200 if (!(sm->vbyte[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002201 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002202 for (i = 0; i < 8192; i++)
2203 if (!(sm->abits[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002204 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002205
sewardj23eb2fd2005-04-22 16:29:19 +00002206 if (bad) {
2207 VG_(printf)("memcheck expensive sanity: "
2208 "distinguished_secondaries have changed\n");
2209 return False;
2210 }
2211
2212 /* check nonsensical auxmap sizing */
sewardj45d94cc2005-04-20 14:44:11 +00002213 if (auxmap_used > auxmap_size)
sewardj23eb2fd2005-04-22 16:29:19 +00002214 bad = True;
2215
2216 if (bad) {
2217 VG_(printf)("memcheck expensive sanity: "
2218 "nonsensical auxmap sizing\n");
2219 return False;
2220 }
2221
2222 /* check that the number of secmaps issued matches the number that
2223 are reachable (iow, no secmap leaks) */
2224 n_secmaps_found = 0;
2225 for (i = 0; i < N_PRIMARY_MAP; i++) {
2226 if (primary_map[i] == NULL) {
2227 bad = True;
2228 } else {
2229 if (!is_distinguished_sm(primary_map[i]))
2230 n_secmaps_found++;
2231 }
2232 }
2233
2234 for (i = 0; i < auxmap_used; i++) {
2235 if (auxmap[i].sm == NULL) {
2236 bad = True;
2237 } else {
2238 if (!is_distinguished_sm(auxmap[i].sm))
2239 n_secmaps_found++;
2240 }
2241 }
2242
2243 if (n_secmaps_found != n_secmaps_issued)
2244 bad = True;
2245
2246 if (bad) {
2247 VG_(printf)("memcheck expensive sanity: "
2248 "apparent secmap leakage\n");
2249 return False;
2250 }
2251
2252 /* check that auxmap only covers address space that the primary
2253 doesn't */
2254
2255 for (i = 0; i < auxmap_used; i++)
2256 if (auxmap[i].base <= MAX_PRIMARY_ADDRESS)
2257 bad = True;
2258
2259 if (bad) {
2260 VG_(printf)("memcheck expensive sanity: "
2261 "auxmap covers wrong address space\n");
2262 return False;
2263 }
2264
2265 /* there is only one pointer to each secmap (expensive) */
njn25e49d8e72002-09-23 09:36:25 +00002266
2267 return True;
2268}
sewardj45d94cc2005-04-20 14:44:11 +00002269
njn25e49d8e72002-09-23 09:36:25 +00002270
njn25e49d8e72002-09-23 09:36:25 +00002271/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00002272/*--- Command line args ---*/
njn25e49d8e72002-09-23 09:36:25 +00002273/*------------------------------------------------------------*/
2274
njn51d827b2005-05-09 01:02:08 +00002275static Bool mc_process_cmd_line_option(Char* arg)
njn25e49d8e72002-09-23 09:36:25 +00002276{
sewardjf3418c02005-11-08 14:10:24 +00002277 return MAC_(process_common_cmd_line_option)(arg);
njn25e49d8e72002-09-23 09:36:25 +00002278}
2279
njn51d827b2005-05-09 01:02:08 +00002280static void mc_print_usage(void)
njn25e49d8e72002-09-23 09:36:25 +00002281{
njn3e884182003-04-15 13:03:23 +00002282 MAC_(print_common_usage)();
njn3e884182003-04-15 13:03:23 +00002283}
2284
njn51d827b2005-05-09 01:02:08 +00002285static void mc_print_debug_usage(void)
njn3e884182003-04-15 13:03:23 +00002286{
2287 MAC_(print_common_debug_usage)();
njn25e49d8e72002-09-23 09:36:25 +00002288}
2289
sewardjf3418c02005-11-08 14:10:24 +00002290
nethercote8b76fe52004-11-08 19:20:09 +00002291/*------------------------------------------------------------*/
2292/*--- Client requests ---*/
2293/*------------------------------------------------------------*/
2294
2295/* Client block management:
2296
2297 This is managed as an expanding array of client block descriptors.
2298 Indices of live descriptors are issued to the client, so it can ask
2299 to free them later. Therefore we cannot slide live entries down
2300 over dead ones. Instead we must use free/inuse flags and scan for
2301 an empty slot at allocation time. This in turn means allocation is
2302 relatively expensive, so we hope this does not happen too often.
nethercote8b76fe52004-11-08 19:20:09 +00002303
sewardjedc75ab2005-03-15 23:30:32 +00002304 An unused block has start == size == 0
2305*/
nethercote8b76fe52004-11-08 19:20:09 +00002306
2307typedef
2308 struct {
2309 Addr start;
2310 SizeT size;
2311 ExeContext* where;
sewardj8cf88b72005-07-08 01:29:33 +00002312 Char* desc;
nethercote8b76fe52004-11-08 19:20:09 +00002313 }
2314 CGenBlock;
2315
2316/* This subsystem is self-initialising. */
njn695c16e2005-03-27 03:40:28 +00002317static UInt cgb_size = 0;
2318static UInt cgb_used = 0;
2319static CGenBlock* cgbs = NULL;
nethercote8b76fe52004-11-08 19:20:09 +00002320
2321/* Stats for this subsystem. */
njn695c16e2005-03-27 03:40:28 +00002322static UInt cgb_used_MAX = 0; /* Max in use. */
2323static UInt cgb_allocs = 0; /* Number of allocs. */
2324static UInt cgb_discards = 0; /* Number of discards. */
2325static UInt cgb_search = 0; /* Number of searches. */
nethercote8b76fe52004-11-08 19:20:09 +00002326
2327
2328static
njn695c16e2005-03-27 03:40:28 +00002329Int alloc_client_block ( void )
nethercote8b76fe52004-11-08 19:20:09 +00002330{
2331 UInt i, sz_new;
2332 CGenBlock* cgbs_new;
2333
njn695c16e2005-03-27 03:40:28 +00002334 cgb_allocs++;
nethercote8b76fe52004-11-08 19:20:09 +00002335
njn695c16e2005-03-27 03:40:28 +00002336 for (i = 0; i < cgb_used; i++) {
2337 cgb_search++;
2338 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00002339 return i;
2340 }
2341
2342 /* Not found. Try to allocate one at the end. */
njn695c16e2005-03-27 03:40:28 +00002343 if (cgb_used < cgb_size) {
2344 cgb_used++;
2345 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00002346 }
2347
2348 /* Ok, we have to allocate a new one. */
njn695c16e2005-03-27 03:40:28 +00002349 tl_assert(cgb_used == cgb_size);
2350 sz_new = (cgbs == NULL) ? 10 : (2 * cgb_size);
nethercote8b76fe52004-11-08 19:20:09 +00002351
2352 cgbs_new = VG_(malloc)( sz_new * sizeof(CGenBlock) );
njn695c16e2005-03-27 03:40:28 +00002353 for (i = 0; i < cgb_used; i++)
2354 cgbs_new[i] = cgbs[i];
nethercote8b76fe52004-11-08 19:20:09 +00002355
njn695c16e2005-03-27 03:40:28 +00002356 if (cgbs != NULL)
2357 VG_(free)( cgbs );
2358 cgbs = cgbs_new;
nethercote8b76fe52004-11-08 19:20:09 +00002359
njn695c16e2005-03-27 03:40:28 +00002360 cgb_size = sz_new;
2361 cgb_used++;
2362 if (cgb_used > cgb_used_MAX)
2363 cgb_used_MAX = cgb_used;
2364 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00002365}
2366
2367
2368static void show_client_block_stats ( void )
2369{
2370 VG_(message)(Vg_DebugMsg,
2371 "general CBs: %d allocs, %d discards, %d maxinuse, %d search",
njn695c16e2005-03-27 03:40:28 +00002372 cgb_allocs, cgb_discards, cgb_used_MAX, cgb_search
nethercote8b76fe52004-11-08 19:20:09 +00002373 );
2374}
2375
nethercote8b76fe52004-11-08 19:20:09 +00002376static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai )
2377{
2378 UInt i;
2379 /* VG_(printf)("try to identify %d\n", a); */
2380
2381 /* Perhaps it's a general block ? */
njn695c16e2005-03-27 03:40:28 +00002382 for (i = 0; i < cgb_used; i++) {
2383 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00002384 continue;
njn717cde52005-05-10 02:47:21 +00002385 // Use zero as the redzone for client blocks.
2386 if (VG_(addr_is_in_block)(a, cgbs[i].start, cgbs[i].size, 0)) {
nethercote8b76fe52004-11-08 19:20:09 +00002387 /* OK - maybe it's a mempool, too? */
njn12627272005-08-14 18:32:16 +00002388 MAC_Mempool* mp = VG_(HT_lookup)(MAC_(mempool_list),
2389 (UWord)cgbs[i].start);
njn1d0cb0d2005-08-15 01:52:02 +00002390 if (mp != NULL) {
2391 if (mp->chunks != NULL) {
2392 MAC_Chunk* mc;
njnf66dbfc2005-08-15 01:54:05 +00002393 VG_(HT_ResetIter)(mp->chunks);
2394 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
njn1d0cb0d2005-08-15 01:52:02 +00002395 if (VG_(addr_is_in_block)(a, mc->data, mc->size,
2396 MAC_MALLOC_REDZONE_SZB)) {
2397 ai->akind = UserG;
2398 ai->blksize = mc->size;
2399 ai->rwoffset = (Int)(a) - (Int)mc->data;
2400 ai->lastchange = mc->where;
2401 return True;
2402 }
nethercote8b76fe52004-11-08 19:20:09 +00002403 }
2404 }
njn1d0cb0d2005-08-15 01:52:02 +00002405 ai->akind = Mempool;
2406 ai->blksize = cgbs[i].size;
2407 ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
njn695c16e2005-03-27 03:40:28 +00002408 ai->lastchange = cgbs[i].where;
nethercote8b76fe52004-11-08 19:20:09 +00002409 return True;
2410 }
njn1d0cb0d2005-08-15 01:52:02 +00002411 ai->akind = UserG;
2412 ai->blksize = cgbs[i].size;
2413 ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
njn695c16e2005-03-27 03:40:28 +00002414 ai->lastchange = cgbs[i].where;
njn1d0cb0d2005-08-15 01:52:02 +00002415 ai->desc = cgbs[i].desc;
nethercote8b76fe52004-11-08 19:20:09 +00002416 return True;
2417 }
2418 }
2419 return False;
2420}
2421
njn51d827b2005-05-09 01:02:08 +00002422static Bool mc_handle_client_request ( ThreadId tid, UWord* arg, UWord* ret )
nethercote8b76fe52004-11-08 19:20:09 +00002423{
2424 Int i;
2425 Bool ok;
2426 Addr bad_addr;
2427
njnfc26ff92004-11-22 19:12:49 +00002428 if (!VG_IS_TOOL_USERREQ('M','C',arg[0])
nethercote8b76fe52004-11-08 19:20:09 +00002429 && VG_USERREQ__MALLOCLIKE_BLOCK != arg[0]
2430 && VG_USERREQ__FREELIKE_BLOCK != arg[0]
2431 && VG_USERREQ__CREATE_MEMPOOL != arg[0]
2432 && VG_USERREQ__DESTROY_MEMPOOL != arg[0]
2433 && VG_USERREQ__MEMPOOL_ALLOC != arg[0]
2434 && VG_USERREQ__MEMPOOL_FREE != arg[0])
2435 return False;
2436
2437 switch (arg[0]) {
2438 case VG_USERREQ__CHECK_WRITABLE: /* check writable */
2439 ok = mc_check_writable ( arg[1], arg[2], &bad_addr );
2440 if (!ok)
njn9e63cb62005-05-08 18:34:59 +00002441 mc_record_user_error ( tid, bad_addr, /*isWrite*/True,
2442 /*isUnaddr*/True );
nethercote8b76fe52004-11-08 19:20:09 +00002443 *ret = ok ? (UWord)NULL : bad_addr;
sewardj8cf88b72005-07-08 01:29:33 +00002444 break;
nethercote8b76fe52004-11-08 19:20:09 +00002445
2446 case VG_USERREQ__CHECK_READABLE: { /* check readable */
2447 MC_ReadResult res;
2448 res = mc_check_readable ( arg[1], arg[2], &bad_addr );
2449 if (MC_AddrErr == res)
njn9e63cb62005-05-08 18:34:59 +00002450 mc_record_user_error ( tid, bad_addr, /*isWrite*/False,
2451 /*isUnaddr*/True );
nethercote8b76fe52004-11-08 19:20:09 +00002452 else if (MC_ValueErr == res)
njn9e63cb62005-05-08 18:34:59 +00002453 mc_record_user_error ( tid, bad_addr, /*isWrite*/False,
2454 /*isUnaddr*/False );
nethercote8b76fe52004-11-08 19:20:09 +00002455 *ret = ( res==MC_Ok ? (UWord)NULL : bad_addr );
sewardj8cf88b72005-07-08 01:29:33 +00002456 break;
nethercote8b76fe52004-11-08 19:20:09 +00002457 }
2458
2459 case VG_USERREQ__DO_LEAK_CHECK:
njnb8dca862005-03-14 02:42:44 +00002460 mc_detect_memory_leaks(tid, arg[1] ? LC_Summary : LC_Full);
sewardj8cf88b72005-07-08 01:29:33 +00002461 *ret = 0; /* return value is meaningless */
2462 break;
nethercote8b76fe52004-11-08 19:20:09 +00002463
2464 case VG_USERREQ__MAKE_NOACCESS: /* make no access */
nethercote8b76fe52004-11-08 19:20:09 +00002465 mc_make_noaccess ( arg[1], arg[2] );
sewardj8cf88b72005-07-08 01:29:33 +00002466 *ret = -1;
2467 break;
nethercote8b76fe52004-11-08 19:20:09 +00002468
2469 case VG_USERREQ__MAKE_WRITABLE: /* make writable */
nethercote8b76fe52004-11-08 19:20:09 +00002470 mc_make_writable ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00002471 *ret = -1;
sewardj8cf88b72005-07-08 01:29:33 +00002472 break;
nethercote8b76fe52004-11-08 19:20:09 +00002473
2474 case VG_USERREQ__MAKE_READABLE: /* make readable */
nethercote8b76fe52004-11-08 19:20:09 +00002475 mc_make_readable ( arg[1], arg[2] );
sewardj8cf88b72005-07-08 01:29:33 +00002476 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00002477 break;
2478
sewardjedc75ab2005-03-15 23:30:32 +00002479 case VG_USERREQ__CREATE_BLOCK: /* describe a block */
sewardj8cf88b72005-07-08 01:29:33 +00002480 if (arg[1] != 0 && arg[2] != 0) {
2481 i = alloc_client_block();
2482 /* VG_(printf)("allocated %d %p\n", i, cgbs); */
2483 cgbs[i].start = arg[1];
2484 cgbs[i].size = arg[2];
2485 cgbs[i].desc = VG_(strdup)((Char *)arg[3]);
2486 cgbs[i].where = VG_(record_ExeContext) ( tid );
sewardjedc75ab2005-03-15 23:30:32 +00002487
sewardj8cf88b72005-07-08 01:29:33 +00002488 *ret = i;
2489 } else
2490 *ret = -1;
2491 break;
sewardjedc75ab2005-03-15 23:30:32 +00002492
nethercote8b76fe52004-11-08 19:20:09 +00002493 case VG_USERREQ__DISCARD: /* discard */
njn695c16e2005-03-27 03:40:28 +00002494 if (cgbs == NULL
2495 || arg[2] >= cgb_used ||
sewardj8cf88b72005-07-08 01:29:33 +00002496 (cgbs[arg[2]].start == 0 && cgbs[arg[2]].size == 0)) {
sewardjedc75ab2005-03-15 23:30:32 +00002497 *ret = 1;
sewardj8cf88b72005-07-08 01:29:33 +00002498 } else {
2499 tl_assert(arg[2] >= 0 && arg[2] < cgb_used);
2500 cgbs[arg[2]].start = cgbs[arg[2]].size = 0;
2501 VG_(free)(cgbs[arg[2]].desc);
2502 cgb_discards++;
2503 *ret = 0;
2504 }
2505 break;
nethercote8b76fe52004-11-08 19:20:09 +00002506
sewardj45d94cc2005-04-20 14:44:11 +00002507//zz case VG_USERREQ__GET_VBITS:
2508//zz /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
2509//zz error. */
2510//zz /* VG_(printf)("get_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
2511//zz *ret = mc_get_or_set_vbits_for_client
2512//zz ( tid, arg[1], arg[2], arg[3], False /* get them */ );
2513//zz break;
2514//zz
2515//zz case VG_USERREQ__SET_VBITS:
2516//zz /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
2517//zz error. */
2518//zz /* VG_(printf)("set_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
2519//zz *ret = mc_get_or_set_vbits_for_client
2520//zz ( tid, arg[1], arg[2], arg[3], True /* set them */ );
2521//zz break;
nethercote8b76fe52004-11-08 19:20:09 +00002522
2523 default:
2524 if (MAC_(handle_common_client_requests)(tid, arg, ret )) {
2525 return True;
2526 } else {
2527 VG_(message)(Vg_UserMsg,
2528 "Warning: unknown memcheck client request code %llx",
2529 (ULong)arg[0]);
2530 return False;
2531 }
2532 }
2533 return True;
2534}
njn25e49d8e72002-09-23 09:36:25 +00002535
2536/*------------------------------------------------------------*/
njn51d827b2005-05-09 01:02:08 +00002537/*--- Setup and finalisation ---*/
njn25e49d8e72002-09-23 09:36:25 +00002538/*------------------------------------------------------------*/
2539
njn51d827b2005-05-09 01:02:08 +00002540static void mc_post_clo_init ( void )
njn5c004e42002-11-18 11:04:50 +00002541{
sewardj71bc3cb2005-05-19 00:25:45 +00002542 /* If we've been asked to emit XML, mash around various other
2543 options so as to constrain the output somewhat. */
2544 if (VG_(clo_xml)) {
2545 /* Extract as much info as possible from the leak checker. */
sewardj09890d82005-05-20 02:45:15 +00002546 /* MAC_(clo_show_reachable) = True; */
sewardj71bc3cb2005-05-19 00:25:45 +00002547 MAC_(clo_leak_check) = LC_Full;
2548 }
njn5c004e42002-11-18 11:04:50 +00002549}
2550
njn51d827b2005-05-09 01:02:08 +00002551static void mc_fini ( Int exitcode )
njn5c004e42002-11-18 11:04:50 +00002552{
sewardj23eb2fd2005-04-22 16:29:19 +00002553 Int i, n_accessible_dist;
2554 SecMap* sm;
2555
sewardjae986ca2005-10-12 12:53:20 +00002556 MAC_(common_fini)( mc_detect_memory_leaks );
2557
sewardj45d94cc2005-04-20 14:44:11 +00002558 if (VG_(clo_verbosity) > 1) {
2559 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00002560 " memcheck: sanity checks: %d cheap, %d expensive",
2561 n_sanity_cheap, n_sanity_expensive );
sewardj45d94cc2005-04-20 14:44:11 +00002562 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00002563 " memcheck: auxmaps: %d auxmap entries (%dk, %dM) in use",
2564 auxmap_used,
2565 auxmap_used * 64,
2566 auxmap_used / 16 );
2567 VG_(message)(Vg_DebugMsg,
2568 " memcheck: auxmaps: %lld searches, %lld comparisons",
sewardj45d94cc2005-04-20 14:44:11 +00002569 n_auxmap_searches, n_auxmap_cmps );
sewardj23eb2fd2005-04-22 16:29:19 +00002570 VG_(message)(Vg_DebugMsg,
2571 " memcheck: secondaries: %d issued (%dk, %dM)",
2572 n_secmaps_issued,
2573 n_secmaps_issued * 64,
2574 n_secmaps_issued / 16 );
2575
2576 n_accessible_dist = 0;
2577 for (i = 0; i < N_PRIMARY_MAP; i++) {
2578 sm = primary_map[i];
2579 if (is_distinguished_sm(sm)
2580 && sm != &sm_distinguished[SM_DIST_NOACCESS])
2581 n_accessible_dist ++;
2582 }
2583 for (i = 0; i < auxmap_used; i++) {
2584 sm = auxmap[i].sm;
2585 if (is_distinguished_sm(sm)
2586 && sm != &sm_distinguished[SM_DIST_NOACCESS])
2587 n_accessible_dist ++;
2588 }
2589
2590 VG_(message)(Vg_DebugMsg,
2591 " memcheck: secondaries: %d accessible and distinguished (%dk, %dM)",
2592 n_accessible_dist,
2593 n_accessible_dist * 64,
2594 n_accessible_dist / 16 );
2595
sewardj45d94cc2005-04-20 14:44:11 +00002596 }
2597
njn5c004e42002-11-18 11:04:50 +00002598 if (0) {
2599 VG_(message)(Vg_DebugMsg,
2600 "------ Valgrind's client block stats follow ---------------" );
nethercote8b76fe52004-11-08 19:20:09 +00002601 show_client_block_stats();
njn5c004e42002-11-18 11:04:50 +00002602 }
njn25e49d8e72002-09-23 09:36:25 +00002603}
2604
njn51d827b2005-05-09 01:02:08 +00002605static void mc_pre_clo_init(void)
2606{
2607 VG_(details_name) ("Memcheck");
2608 VG_(details_version) (NULL);
2609 VG_(details_description) ("a memory error detector");
2610 VG_(details_copyright_author)(
2611 "Copyright (C) 2002-2005, and GNU GPL'd, by Julian Seward et al.");
2612 VG_(details_bug_reports_to) (VG_BUGS_TO);
2613 VG_(details_avg_translation_sizeB) ( 370 );
2614
2615 VG_(basic_tool_funcs) (mc_post_clo_init,
2616 MC_(instrument),
2617 mc_fini);
2618
2619 VG_(needs_core_errors) ();
2620 VG_(needs_tool_errors) (MAC_(eq_Error),
2621 mc_pp_Error,
2622 MAC_(update_extra),
2623 mc_recognised_suppression,
2624 MAC_(read_extra_suppression_info),
2625 MAC_(error_matches_suppression),
2626 MAC_(get_error_name),
2627 MAC_(print_extra_suppression_info));
2628 VG_(needs_libc_freeres) ();
2629 VG_(needs_command_line_options)(mc_process_cmd_line_option,
2630 mc_print_usage,
2631 mc_print_debug_usage);
2632 VG_(needs_client_requests) (mc_handle_client_request);
2633 VG_(needs_sanity_checks) (mc_cheap_sanity_check,
2634 mc_expensive_sanity_check);
njn51d827b2005-05-09 01:02:08 +00002635
njnfc51f8d2005-06-21 03:20:17 +00002636 VG_(needs_malloc_replacement) (MAC_(malloc),
njn51d827b2005-05-09 01:02:08 +00002637 MAC_(__builtin_new),
2638 MAC_(__builtin_vec_new),
2639 MAC_(memalign),
2640 MAC_(calloc),
2641 MAC_(free),
2642 MAC_(__builtin_delete),
2643 MAC_(__builtin_vec_delete),
2644 MAC_(realloc),
2645 MAC_MALLOC_REDZONE_SZB );
2646
2647 MAC_( new_mem_heap) = & mc_new_mem_heap;
2648 MAC_( ban_mem_heap) = & mc_make_noaccess;
2649 MAC_(copy_mem_heap) = & mc_copy_address_range_state;
2650 MAC_( die_mem_heap) = & mc_make_noaccess;
2651 MAC_(check_noaccess) = & mc_check_noaccess;
2652
2653 VG_(track_new_mem_startup) ( & mc_new_mem_startup );
2654 VG_(track_new_mem_stack_signal)( & mc_make_writable );
2655 VG_(track_new_mem_brk) ( & mc_make_writable );
2656 VG_(track_new_mem_mmap) ( & mc_new_mem_mmap );
2657
2658 VG_(track_copy_mem_remap) ( & mc_copy_address_range_state );
njn81623712005-10-07 04:48:37 +00002659
2660 // Nb: we don't do anything with mprotect. This means that V bits are
2661 // preserved if a program, for example, marks some memory as inaccessible
2662 // and then later marks it as accessible again.
2663 //
2664 // If an access violation occurs (eg. writing to read-only memory) we let
2665 // it fault and print an informative termination message. This doesn't
2666 // happen if the program catches the signal, though, which is bad. If we
2667 // had two A bits (for readability and writability) that were completely
2668 // distinct from V bits, then we could handle all this properly.
2669 VG_(track_change_mem_mprotect) ( NULL );
njn51d827b2005-05-09 01:02:08 +00002670
2671 VG_(track_die_mem_stack_signal)( & mc_make_noaccess );
2672 VG_(track_die_mem_brk) ( & mc_make_noaccess );
2673 VG_(track_die_mem_munmap) ( & mc_make_noaccess );
2674
2675 VG_(track_new_mem_stack_4) ( & MAC_(new_mem_stack_4) );
2676 VG_(track_new_mem_stack_8) ( & MAC_(new_mem_stack_8) );
2677 VG_(track_new_mem_stack_12) ( & MAC_(new_mem_stack_12) );
2678 VG_(track_new_mem_stack_16) ( & MAC_(new_mem_stack_16) );
2679 VG_(track_new_mem_stack_32) ( & MAC_(new_mem_stack_32) );
2680 VG_(track_new_mem_stack) ( & MAC_(new_mem_stack) );
2681
2682 VG_(track_die_mem_stack_4) ( & MAC_(die_mem_stack_4) );
2683 VG_(track_die_mem_stack_8) ( & MAC_(die_mem_stack_8) );
2684 VG_(track_die_mem_stack_12) ( & MAC_(die_mem_stack_12) );
2685 VG_(track_die_mem_stack_16) ( & MAC_(die_mem_stack_16) );
2686 VG_(track_die_mem_stack_32) ( & MAC_(die_mem_stack_32) );
2687 VG_(track_die_mem_stack) ( & MAC_(die_mem_stack) );
2688
2689 VG_(track_ban_mem_stack) ( & mc_make_noaccess );
2690
2691 VG_(track_pre_mem_read) ( & mc_check_is_readable );
2692 VG_(track_pre_mem_read_asciiz) ( & mc_check_is_readable_asciiz );
2693 VG_(track_pre_mem_write) ( & mc_check_is_writable );
2694 VG_(track_post_mem_write) ( & mc_post_mem_write );
2695
2696 VG_(track_pre_reg_read) ( & mc_pre_reg_read );
2697
2698 VG_(track_post_reg_write) ( & mc_post_reg_write );
2699 VG_(track_post_reg_write_clientcall_return)( & mc_post_reg_write_clientcall );
2700
2701 VG_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
2702 VG_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
2703 VG_(register_profile_event) ( VgpESPAdj, "adjust-ESP" );
2704
2705 /* Additional block description for VG_(describe_addr)() */
2706 MAC_(describe_addr_supp) = client_perm_maybe_describe;
2707
2708 init_shadow_memory();
2709 MAC_(common_pre_clo_init)();
2710
2711 tl_assert( mc_expensive_sanity_check() );
2712}
2713
sewardj45f4e7c2005-09-27 19:20:21 +00002714VG_DETERMINE_INTERFACE_VERSION(mc_pre_clo_init)
fitzhardinge98abfc72003-12-16 02:05:15 +00002715
njn25e49d8e72002-09-23 09:36:25 +00002716/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00002717/*--- end mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00002718/*--------------------------------------------------------------------*/