blob: a8972220d4456de7baf11f70a95aa5409da769ac [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
nethercote137bc552003-11-14 17:47:54 +00009 This file is part of MemCheck, a heavyweight Valgrind tool for
njnc9539842002-10-02 13:26:35 +000010 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
njn53612422005-03-12 16:22:54 +000012 Copyright (C) 2000-2005 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
sewardjc859fbf2005-04-22 21:10:28 +000033/* TODO 22 Apr 05
sewardj45d94cc2005-04-20 14:44:11 +000034
sewardjc859fbf2005-04-22 21:10:28 +000035 test whether it would be faster, for LOADV4, to check
36 only for 8-byte validity on the fast path
sewardj45d94cc2005-04-20 14:44:11 +000037*/
38
njnc7561b92005-06-19 01:24:32 +000039#include "pub_tool_basics.h"
njn4802b382005-06-11 04:58:29 +000040#include "pub_tool_aspacemgr.h"
njnc7561b92005-06-19 01:24:32 +000041#include "pub_tool_errormgr.h" // For mac_shared.h
42#include "pub_tool_execontext.h" // For mac_shared.h
43#include "pub_tool_hashtable.h" // For mac_shared.h
njn97405b22005-06-02 03:39:33 +000044#include "pub_tool_libcbase.h"
njn132bfcc2005-06-04 19:16:06 +000045#include "pub_tool_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000046#include "pub_tool_libcprint.h"
njnf536bbb2005-06-13 04:21:38 +000047#include "pub_tool_machine.h"
njnc7561b92005-06-19 01:24:32 +000048#include "pub_tool_mallocfree.h"
49#include "pub_tool_options.h"
njnc7561b92005-06-19 01:24:32 +000050#include "pub_tool_replacemalloc.h"
51#include "pub_tool_tooliface.h"
52#include "pub_tool_threadstate.h"
53
54#include "mc_include.h"
55#include "memcheck.h" /* for client requests */
njn25e49d8e72002-09-23 09:36:25 +000056
sewardj45d94cc2005-04-20 14:44:11 +000057
tomd55121e2005-12-19 12:40:13 +000058#ifdef HAVE_BUILTIN_EXPECT
sewardjc1a2cda2005-04-21 17:34:00 +000059#define EXPECTED_TAKEN(cond) __builtin_expect((cond),1)
60#define EXPECTED_NOT_TAKEN(cond) __builtin_expect((cond),0)
tomd55121e2005-12-19 12:40:13 +000061#else
62#define EXPECTED_TAKEN(cond) (cond)
63#define EXPECTED_NOT_TAKEN(cond) (cond)
64#endif
sewardjc1a2cda2005-04-21 17:34:00 +000065
66/* Define to debug the mem audit system. Set to:
67 0 no debugging, fast cases are used
68 1 some sanity checking, fast cases are used
69 2 max sanity checking, only slow cases are used
70*/
sewardj23eb2fd2005-04-22 16:29:19 +000071#define VG_DEBUG_MEMORY 0
sewardjc1a2cda2005-04-21 17:34:00 +000072
njn25e49d8e72002-09-23 09:36:25 +000073#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
74
njn25e49d8e72002-09-23 09:36:25 +000075
njn25e49d8e72002-09-23 09:36:25 +000076/*------------------------------------------------------------*/
sewardj45d94cc2005-04-20 14:44:11 +000077/*--- Basic A/V bitmap representation. ---*/
njn25e49d8e72002-09-23 09:36:25 +000078/*------------------------------------------------------------*/
79
sewardjc859fbf2005-04-22 21:10:28 +000080/* TODO: fix this comment */
81//zz /* All reads and writes are checked against a memory map, which
82//zz records the state of all memory in the process. The memory map is
83//zz organised like this:
84//zz
85//zz The top 16 bits of an address are used to index into a top-level
86//zz map table, containing 65536 entries. Each entry is a pointer to a
87//zz second-level map, which records the accesibililty and validity
88//zz permissions for the 65536 bytes indexed by the lower 16 bits of the
89//zz address. Each byte is represented by nine bits, one indicating
90//zz accessibility, the other eight validity. So each second-level map
91//zz contains 73728 bytes. This two-level arrangement conveniently
92//zz divides the 4G address space into 64k lumps, each size 64k bytes.
93//zz
94//zz All entries in the primary (top-level) map must point to a valid
95//zz secondary (second-level) map. Since most of the 4G of address
96//zz space will not be in use -- ie, not mapped at all -- there is a
njn02bc4b82005-05-15 17:28:26 +000097//zz distinguished secondary map, which indicates 'not addressible and
sewardjc859fbf2005-04-22 21:10:28 +000098//zz not valid' writeable for all bytes. Entries in the primary map for
99//zz which the entire 64k is not in use at all point at this
100//zz distinguished map.
101//zz
102//zz There are actually 4 distinguished secondaries. These are used to
103//zz represent a memory range which is either not addressable (validity
104//zz doesn't matter), addressable+not valid, addressable+valid.
sewardjc859fbf2005-04-22 21:10:28 +0000105//zz */
106
sewardj45d94cc2005-04-20 14:44:11 +0000107/* --------------- Basic configuration --------------- */
sewardj95448072004-11-22 20:19:51 +0000108
sewardj23eb2fd2005-04-22 16:29:19 +0000109/* Only change this. N_PRIMARY_MAP *must* be a power of 2. */
sewardj21f7ff42005-04-28 10:32:02 +0000110
sewardje4ccc012005-05-02 12:53:38 +0000111#if VG_WORDSIZE == 4
sewardj21f7ff42005-04-28 10:32:02 +0000112
113/* cover the entire address space */
114# define N_PRIMARY_BITS 16
115
116#else
117
sewardj34483bc2005-09-28 11:50:20 +0000118/* Just handle the first 32G fast and the rest via auxiliary
sewardj21f7ff42005-04-28 10:32:02 +0000119 primaries. */
sewardj34483bc2005-09-28 11:50:20 +0000120# define N_PRIMARY_BITS 19
sewardj21f7ff42005-04-28 10:32:02 +0000121
122#endif
123
sewardj45d94cc2005-04-20 14:44:11 +0000124
sewardjc1a2cda2005-04-21 17:34:00 +0000125/* Do not change this. */
sewardje4ccc012005-05-02 12:53:38 +0000126#define N_PRIMARY_MAP ( ((UWord)1) << N_PRIMARY_BITS)
sewardjc1a2cda2005-04-21 17:34:00 +0000127
128/* Do not change this. */
sewardj23eb2fd2005-04-22 16:29:19 +0000129#define MAX_PRIMARY_ADDRESS (Addr)((((Addr)65536) * N_PRIMARY_MAP)-1)
130
131
132/* --------------- Stats maps --------------- */
133
134static Int n_secmaps_issued = 0;
135static ULong n_auxmap_searches = 0;
136static ULong n_auxmap_cmps = 0;
137static Int n_sanity_cheap = 0;
138static Int n_sanity_expensive = 0;
sewardj45d94cc2005-04-20 14:44:11 +0000139
140
141/* --------------- Secondary maps --------------- */
njn25e49d8e72002-09-23 09:36:25 +0000142
143typedef
144 struct {
sewardj45d94cc2005-04-20 14:44:11 +0000145 UChar abits[8192];
146 UChar vbyte[65536];
njn25e49d8e72002-09-23 09:36:25 +0000147 }
148 SecMap;
149
sewardj45d94cc2005-04-20 14:44:11 +0000150/* 3 distinguished secondary maps, one for no-access, one for
151 accessible but undefined, and one for accessible and defined.
152 Distinguished secondaries may never be modified.
153*/
154#define SM_DIST_NOACCESS 0
155#define SM_DIST_ACCESS_UNDEFINED 1
156#define SM_DIST_ACCESS_DEFINED 2
njnb8dca862005-03-14 02:42:44 +0000157
sewardj45d94cc2005-04-20 14:44:11 +0000158static SecMap sm_distinguished[3];
njnb8dca862005-03-14 02:42:44 +0000159
sewardj45d94cc2005-04-20 14:44:11 +0000160static inline Bool is_distinguished_sm ( SecMap* sm ) {
161 return sm >= &sm_distinguished[0] && sm <= &sm_distinguished[2];
162}
njnb8dca862005-03-14 02:42:44 +0000163
sewardj45d94cc2005-04-20 14:44:11 +0000164/* dist_sm points to one of our three distinguished secondaries. Make
165 a copy of it so that we can write to it.
166*/
167static SecMap* copy_for_writing ( SecMap* dist_sm )
168{
169 SecMap* new_sm;
170 tl_assert(dist_sm == &sm_distinguished[0]
171 || dist_sm == &sm_distinguished[1]
172 || dist_sm == &sm_distinguished[2]);
njnb8dca862005-03-14 02:42:44 +0000173
sewardj45f4e7c2005-09-27 19:20:21 +0000174 new_sm = VG_(am_shadow_alloc)(sizeof(SecMap));
175 if (new_sm == NULL)
176 VG_(out_of_memory_NORETURN)( "memcheck:allocate new SecMap",
177 sizeof(SecMap) );
sewardj45d94cc2005-04-20 14:44:11 +0000178 VG_(memcpy)(new_sm, dist_sm, sizeof(SecMap));
sewardj23eb2fd2005-04-22 16:29:19 +0000179 n_secmaps_issued++;
sewardj45d94cc2005-04-20 14:44:11 +0000180 return new_sm;
181}
njnb8dca862005-03-14 02:42:44 +0000182
sewardj45d94cc2005-04-20 14:44:11 +0000183
184/* --------------- Primary maps --------------- */
185
186/* The main primary map. This covers some initial part of the address
sewardj23eb2fd2005-04-22 16:29:19 +0000187 space, addresses 0 .. (N_PRIMARY_MAP << 16)-1. The rest of it is
sewardj45d94cc2005-04-20 14:44:11 +0000188 handled using the auxiliary primary map.
189*/
sewardj23eb2fd2005-04-22 16:29:19 +0000190static SecMap* primary_map[N_PRIMARY_MAP];
sewardj45d94cc2005-04-20 14:44:11 +0000191
192
193/* An entry in the auxiliary primary map. base must be a 64k-aligned
194 value, and sm points at the relevant secondary map. As with the
195 main primary map, the secondary may be either a real secondary, or
196 one of the three distinguished secondaries.
197*/
198typedef
199 struct {
sewardj23eb2fd2005-04-22 16:29:19 +0000200 Addr base;
sewardj45d94cc2005-04-20 14:44:11 +0000201 SecMap* sm;
202 }
203 AuxMapEnt;
204
205/* An expanding array of AuxMapEnts. */
sewardjaba741d2005-06-09 13:56:07 +0000206#define N_AUXMAPS 20000 /* HACK */
sewardj45d94cc2005-04-20 14:44:11 +0000207static AuxMapEnt hacky_auxmaps[N_AUXMAPS];
208static Int auxmap_size = N_AUXMAPS;
209static Int auxmap_used = 0;
210static AuxMapEnt* auxmap = &hacky_auxmaps[0];
211
sewardj45d94cc2005-04-20 14:44:11 +0000212
213/* Find an entry in the auxiliary map. If an entry is found, move it
214 one step closer to the front of the array, then return its address.
sewardj05fe85e2005-04-27 22:46:36 +0000215 If an entry is not found, return NULL. Note carefully that
sewardj45d94cc2005-04-20 14:44:11 +0000216 because a each call potentially rearranges the entries, each call
217 to this function invalidates ALL AuxMapEnt*s previously obtained by
218 calling this fn.
219*/
sewardj05fe85e2005-04-27 22:46:36 +0000220static AuxMapEnt* maybe_find_in_auxmap ( Addr a )
sewardj45d94cc2005-04-20 14:44:11 +0000221{
222 UWord i;
223 tl_assert(a > MAX_PRIMARY_ADDRESS);
224
225 a &= ~(Addr)0xFFFF;
226
227 /* Search .. */
228 n_auxmap_searches++;
229 for (i = 0; i < auxmap_used; i++) {
230 if (auxmap[i].base == a)
231 break;
232 }
233 n_auxmap_cmps += (ULong)(i+1);
234
235 if (i < auxmap_used) {
236 /* Found it. Nudge it a bit closer to the front. */
237 if (i > 0) {
238 AuxMapEnt tmp = auxmap[i-1];
239 auxmap[i-1] = auxmap[i];
240 auxmap[i] = tmp;
241 i--;
242 }
243 return &auxmap[i];
244 }
245
sewardj05fe85e2005-04-27 22:46:36 +0000246 return NULL;
247}
248
249
250/* Find an entry in the auxiliary map. If an entry is found, move it
251 one step closer to the front of the array, then return its address.
252 If an entry is not found, allocate one. Note carefully that
253 because a each call potentially rearranges the entries, each call
254 to this function invalidates ALL AuxMapEnt*s previously obtained by
255 calling this fn.
256*/
257static AuxMapEnt* find_or_alloc_in_auxmap ( Addr a )
258{
259 AuxMapEnt* am = maybe_find_in_auxmap(a);
260 if (am)
261 return am;
262
sewardj45d94cc2005-04-20 14:44:11 +0000263 /* We didn't find it. Hmm. This is a new piece of address space.
264 We'll need to allocate a new AuxMap entry for it. */
265 if (auxmap_used >= auxmap_size) {
266 tl_assert(auxmap_used == auxmap_size);
267 /* Out of auxmap entries. */
268 tl_assert2(0, "failed to expand the auxmap table");
269 }
270
271 tl_assert(auxmap_used < auxmap_size);
272
273 auxmap[auxmap_used].base = a & ~(Addr)0xFFFF;
274 auxmap[auxmap_used].sm = &sm_distinguished[SM_DIST_NOACCESS];
275
276 if (0)
277 VG_(printf)("new auxmap, base = 0x%llx\n",
278 (ULong)auxmap[auxmap_used].base );
279
280 auxmap_used++;
281 return &auxmap[auxmap_used-1];
282}
283
284
285/* --------------- SecMap fundamentals --------------- */
286
287/* Produce the secmap for 'a', either from the primary map or by
288 ensuring there is an entry for it in the aux primary map. The
289 secmap may be a distinguished one as the caller will only want to
290 be able to read it.
291*/
292static SecMap* get_secmap_readable ( Addr a )
293{
294 if (a <= MAX_PRIMARY_ADDRESS) {
295 UWord pm_off = a >> 16;
296 return primary_map[ pm_off ];
297 } else {
298 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
299 return am->sm;
300 }
301}
302
sewardj05fe85e2005-04-27 22:46:36 +0000303/* If 'a' has a SecMap, produce it. Else produce NULL. But don't
304 allocate one if one doesn't already exist. This is used by the
305 leak checker.
306*/
307static SecMap* maybe_get_secmap_for ( Addr a )
308{
309 if (a <= MAX_PRIMARY_ADDRESS) {
310 UWord pm_off = a >> 16;
311 return primary_map[ pm_off ];
312 } else {
313 AuxMapEnt* am = maybe_find_in_auxmap(a);
314 return am ? am->sm : NULL;
315 }
316}
317
318
319
sewardj45d94cc2005-04-20 14:44:11 +0000320/* Produce the secmap for 'a', either from the primary map or by
321 ensuring there is an entry for it in the aux primary map. The
322 secmap may not be a distinguished one, since the caller will want
323 to be able to write it. If it is a distinguished secondary, make a
324 writable copy of it, install it, and return the copy instead. (COW
325 semantics).
326*/
327static SecMap* get_secmap_writable ( Addr a )
328{
329 if (a <= MAX_PRIMARY_ADDRESS) {
330 UWord pm_off = a >> 16;
331 if (is_distinguished_sm(primary_map[ pm_off ]))
332 primary_map[pm_off] = copy_for_writing(primary_map[pm_off]);
333 return primary_map[pm_off];
334 } else {
335 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
336 if (is_distinguished_sm(am->sm))
337 am->sm = copy_for_writing(am->sm);
338 return am->sm;
339 }
340}
341
342
343/* --------------- Endianness helpers --------------- */
344
345/* Returns the offset in memory of the byteno-th most significant byte
346 in a wordszB-sized word, given the specified endianness. */
347static inline UWord byte_offset_w ( UWord wordszB, Bool bigendian,
348 UWord byteno ) {
349 return bigendian ? (wordszB-1-byteno) : byteno;
350}
351
352
353/* --------------- Fundamental functions --------------- */
354
355static
356void get_abit_and_vbyte ( /*OUT*/UWord* abit,
357 /*OUT*/UWord* vbyte,
358 Addr a )
359{
360 SecMap* sm = get_secmap_readable(a);
361 *vbyte = 0xFF & sm->vbyte[a & 0xFFFF];
362 *abit = read_bit_array(sm->abits, a & 0xFFFF);
363}
364
365static
366UWord get_abit ( Addr a )
367{
368 SecMap* sm = get_secmap_readable(a);
369 return read_bit_array(sm->abits, a & 0xFFFF);
370}
371
372static
373void set_abit_and_vbyte ( Addr a, UWord abit, UWord vbyte )
374{
375 SecMap* sm = get_secmap_writable(a);
376 sm->vbyte[a & 0xFFFF] = 0xFF & vbyte;
377 write_bit_array(sm->abits, a & 0xFFFF, abit);
378}
379
380static
381void set_vbyte ( Addr a, UWord vbyte )
382{
383 SecMap* sm = get_secmap_writable(a);
384 sm->vbyte[a & 0xFFFF] = 0xFF & vbyte;
385}
386
387
388/* --------------- Load/store slow cases. --------------- */
389
390static
391ULong mc_LOADVn_slow ( Addr a, SizeT szB, Bool bigendian )
392{
393 /* Make up a result V word, which contains the loaded data for
sewardjf3d57dd2005-04-22 20:23:27 +0000394 valid addresses and Defined for invalid addresses. Iterate over
395 the bytes in the word, from the most significant down to the
396 least. */
sewardj45d94cc2005-04-20 14:44:11 +0000397 ULong vw = VGM_WORD64_INVALID;
398 SizeT i = szB-1;
399 SizeT n_addrs_bad = 0;
400 Addr ai;
sewardj0ded7a42005-11-08 02:25:37 +0000401 Bool aok, partial_load_exemption_applies;
sewardj45d94cc2005-04-20 14:44:11 +0000402 UWord abit, vbyte;
403
sewardjc1a2cda2005-04-21 17:34:00 +0000404 PROF_EVENT(30, "mc_LOADVn_slow");
sewardj45d94cc2005-04-20 14:44:11 +0000405 tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
406
407 while (True) {
sewardjc1a2cda2005-04-21 17:34:00 +0000408 PROF_EVENT(31, "mc_LOADVn_slow(loop)");
sewardj45d94cc2005-04-20 14:44:11 +0000409 ai = a+byte_offset_w(szB,bigendian,i);
410 get_abit_and_vbyte(&abit, &vbyte, ai);
411 aok = abit == VGM_BIT_VALID;
412 if (!aok)
413 n_addrs_bad++;
414 vw <<= 8;
sewardjf3d57dd2005-04-22 20:23:27 +0000415 vw |= 0xFF & (aok ? vbyte : VGM_BYTE_VALID);
sewardj45d94cc2005-04-20 14:44:11 +0000416 if (i == 0) break;
417 i--;
418 }
419
sewardj0ded7a42005-11-08 02:25:37 +0000420 /* This is a hack which avoids producing errors for code which
421 insists in stepping along byte strings in aligned word-sized
422 chunks, and there is a partially defined word at the end. (eg,
423 optimised strlen). Such code is basically broken at least WRT
424 semantics of ANSI C, but sometimes users don't have the option
425 to fix it, and so this option is provided. Note it is now
426 defaulted to not-engaged.
427
428 A load from a partially-addressible place is allowed if:
429 - the command-line flag is set
430 - it's a word-sized, word-aligned load
431 - at least one of the addresses in the word *is* valid
432 */
433 partial_load_exemption_applies
434 = MAC_(clo_partial_loads_ok) && szB == VG_WORDSIZE
435 && VG_IS_WORD_ALIGNED(a)
436 && n_addrs_bad < VG_WORDSIZE;
437
438 if (n_addrs_bad > 0 && !partial_load_exemption_applies)
sewardj45d94cc2005-04-20 14:44:11 +0000439 MAC_(record_address_error)( VG_(get_running_tid)(), a, szB, False );
440
sewardj45d94cc2005-04-20 14:44:11 +0000441 return vw;
442}
443
444
445static
sewardj71ef8e72005-11-20 19:08:08 +0000446void mc_STOREVn_slow ( Addr a, SizeT szB, ULong vbytes, Bool bigendian )
sewardj45d94cc2005-04-20 14:44:11 +0000447{
448 SizeT i;
449 SizeT n_addrs_bad = 0;
450 UWord abit;
451 Bool aok;
452 Addr ai;
453
sewardjc1a2cda2005-04-21 17:34:00 +0000454 PROF_EVENT(35, "mc_STOREVn_slow");
sewardj45d94cc2005-04-20 14:44:11 +0000455 tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
456
457 /* Dump vbytes in memory, iterating from least to most significant
458 byte. At the same time establish addressibility of the
459 location. */
460 for (i = 0; i < szB; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +0000461 PROF_EVENT(36, "mc_STOREVn_slow(loop)");
sewardj45d94cc2005-04-20 14:44:11 +0000462 ai = a+byte_offset_w(szB,bigendian,i);
463 abit = get_abit(ai);
464 aok = abit == VGM_BIT_VALID;
465 if (!aok)
466 n_addrs_bad++;
467 set_vbyte(ai, vbytes & 0xFF );
468 vbytes >>= 8;
469 }
470
471 /* If an address error has happened, report it. */
472 if (n_addrs_bad > 0)
473 MAC_(record_address_error)( VG_(get_running_tid)(), a, szB, True );
474}
475
476
sewardj45d94cc2005-04-20 14:44:11 +0000477//zz /* Reading/writing of the bitmaps, for aligned word-sized accesses. */
478//zz
479//zz static __inline__ UChar get_abits4_ALIGNED ( Addr a )
480//zz {
481//zz SecMap* sm;
482//zz UInt sm_off;
483//zz UChar abits8;
484//zz PROF_EVENT(24);
485//zz # ifdef VG_DEBUG_MEMORY
486//zz tl_assert(VG_IS_4_ALIGNED(a));
487//zz # endif
488//zz sm = primary_map[PM_IDX(a)];
489//zz sm_off = SM_OFF(a);
490//zz abits8 = sm->abits[sm_off >> 3];
491//zz abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
492//zz abits8 &= 0x0F;
493//zz return abits8;
494//zz }
495//zz
496//zz static UInt __inline__ get_vbytes4_ALIGNED ( Addr a )
497//zz {
498//zz SecMap* sm = primary_map[PM_IDX(a)];
499//zz UInt sm_off = SM_OFF(a);
500//zz PROF_EVENT(25);
501//zz # ifdef VG_DEBUG_MEMORY
502//zz tl_assert(VG_IS_4_ALIGNED(a));
503//zz # endif
504//zz return ((UInt*)(sm->vbyte))[sm_off >> 2];
505//zz }
506//zz
507//zz
508//zz static void __inline__ set_vbytes4_ALIGNED ( Addr a, UInt vbytes )
509//zz {
510//zz SecMap* sm;
511//zz UInt sm_off;
512//zz ENSURE_MAPPABLE(a, "set_vbytes4_ALIGNED");
513//zz sm = primary_map[PM_IDX(a)];
514//zz sm_off = SM_OFF(a);
515//zz PROF_EVENT(23);
516//zz # ifdef VG_DEBUG_MEMORY
517//zz tl_assert(VG_IS_4_ALIGNED(a));
518//zz # endif
519//zz ((UInt*)(sm->vbyte))[sm_off >> 2] = vbytes;
520//zz }
sewardjee070842003-07-05 17:53:55 +0000521
522
njn25e49d8e72002-09-23 09:36:25 +0000523/*------------------------------------------------------------*/
524/*--- Setting permissions over address ranges. ---*/
525/*------------------------------------------------------------*/
526
sewardj23eb2fd2005-04-22 16:29:19 +0000527/* Given address 'a', find the place where the pointer to a's
528 secondary map lives. If a falls into the primary map, the returned
529 value points to one of the entries in primary_map[]. Otherwise,
530 the auxiliary primary map is searched for 'a', or an entry is
531 created for it; either way, the returned value points to the
532 relevant AuxMapEnt's .sm field.
533
534 The point of this is to enable set_address_range_perms to assign
535 secondary maps in a uniform way, without worrying about whether a
536 given secondary map is pointed to from the main or auxiliary
537 primary map.
538*/
539
540static SecMap** find_secmap_binder_for_addr ( Addr aA )
541{
542 if (aA > MAX_PRIMARY_ADDRESS) {
543 AuxMapEnt* am = find_or_alloc_in_auxmap(aA);
544 return &am->sm;
545 } else {
546 UWord a = (UWord)aA;
547 UWord sec_no = (UWord)(a >> 16);
548# if VG_DEBUG_MEMORY >= 1
549 tl_assert(sec_no < N_PRIMARY_MAP);
550# endif
551 return &primary_map[sec_no];
552 }
553}
554
555
556static void set_address_range_perms ( Addr aA, SizeT len,
sewardj45d94cc2005-04-20 14:44:11 +0000557 UWord example_a_bit,
558 UWord example_v_bit )
njn25e49d8e72002-09-23 09:36:25 +0000559{
sewardjae986ca2005-10-12 12:53:20 +0000560 UWord a, vbits8, abits8, vbits32, v_off, a_off;
561 SecMap* sm;
562 SecMap** binder;
563 SecMap* example_dsm;
564
sewardj23eb2fd2005-04-22 16:29:19 +0000565 PROF_EVENT(150, "set_address_range_perms");
566
567 /* Check the permissions make sense. */
568 tl_assert(example_a_bit == VGM_BIT_VALID
569 || example_a_bit == VGM_BIT_INVALID);
570 tl_assert(example_v_bit == VGM_BIT_VALID
571 || example_v_bit == VGM_BIT_INVALID);
572 if (example_a_bit == VGM_BIT_INVALID)
573 tl_assert(example_v_bit == VGM_BIT_INVALID);
574
575 if (len == 0)
576 return;
577
sewardj1fa7d2c2005-06-13 18:22:17 +0000578 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
sewardj23eb2fd2005-04-22 16:29:19 +0000579 if (len > 100 * 1000 * 1000) {
580 VG_(message)(Vg_UserMsg,
581 "Warning: set address range perms: "
sewardj9273eb92005-09-28 20:00:30 +0000582 "large range %lu, a %d, v %d",
sewardj23eb2fd2005-04-22 16:29:19 +0000583 len, example_a_bit, example_v_bit );
584 }
585 }
586
sewardjae986ca2005-10-12 12:53:20 +0000587 a = (UWord)aA;
sewardj23eb2fd2005-04-22 16:29:19 +0000588
589# if VG_DEBUG_MEMORY >= 2
590
591 /*------------------ debug-only case ------------------ */
sewardjae986ca2005-10-12 12:53:20 +0000592 { SizeT i;
njn25e49d8e72002-09-23 09:36:25 +0000593
sewardjae986ca2005-10-12 12:53:20 +0000594 UWord example_vbyte = BIT_TO_BYTE(example_v_bit);
sewardj45d94cc2005-04-20 14:44:11 +0000595
sewardjae986ca2005-10-12 12:53:20 +0000596 tl_assert(sizeof(SizeT) == sizeof(Addr));
sewardj45d94cc2005-04-20 14:44:11 +0000597
sewardjae986ca2005-10-12 12:53:20 +0000598 if (0 && len >= 4096)
599 VG_(printf)("s_a_r_p(0x%llx, %d, %d,%d)\n",
600 (ULong)a, len, example_a_bit, example_v_bit);
njn25e49d8e72002-09-23 09:36:25 +0000601
sewardjae986ca2005-10-12 12:53:20 +0000602 if (len == 0)
603 return;
njn25e49d8e72002-09-23 09:36:25 +0000604
sewardjae986ca2005-10-12 12:53:20 +0000605 for (i = 0; i < len; i++) {
606 set_abit_and_vbyte(a+i, example_a_bit, example_vbyte);
607 }
njn25e49d8e72002-09-23 09:36:25 +0000608 }
njn25e49d8e72002-09-23 09:36:25 +0000609
sewardj23eb2fd2005-04-22 16:29:19 +0000610# else
611
612 /*------------------ standard handling ------------------ */
sewardj23eb2fd2005-04-22 16:29:19 +0000613
614 /* Decide on the distinguished secondary that we might want
615 to use (part of the space-compression scheme). */
616 if (example_a_bit == VGM_BIT_INVALID) {
617 example_dsm = &sm_distinguished[SM_DIST_NOACCESS];
618 } else {
619 if (example_v_bit == VGM_BIT_VALID) {
620 example_dsm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
621 } else {
622 example_dsm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
623 }
624 }
625
626 /* Make various wider versions of the A/V values to use. */
627 vbits8 = BIT_TO_BYTE(example_v_bit);
628 abits8 = BIT_TO_BYTE(example_a_bit);
629 vbits32 = (vbits8 << 24) | (vbits8 << 16) | (vbits8 << 8) | vbits8;
630
631 /* Slowly do parts preceding 8-byte alignment. */
632 while (True) {
633 if (len == 0) break;
634 PROF_EVENT(151, "set_address_range_perms-loop1-pre");
635 if (VG_IS_8_ALIGNED(a)) break;
636 set_abit_and_vbyte( a, example_a_bit, vbits8 );
637 a++;
638 len--;
639 }
640
641 if (len == 0)
642 return;
643
644 tl_assert(VG_IS_8_ALIGNED(a) && len > 0);
645
646 /* Now go in steps of 8 bytes. */
647 binder = find_secmap_binder_for_addr(a);
648
649 while (True) {
650
651 if (len < 8) break;
652
653 PROF_EVENT(152, "set_address_range_perms-loop8");
654
655 if ((a & SECONDARY_MASK) == 0) {
656 /* we just traversed a primary map boundary, so update the
657 binder. */
658 binder = find_secmap_binder_for_addr(a);
659 PROF_EVENT(153, "set_address_range_perms-update-binder");
660
661 /* Space-optimisation. If we are setting the entire
662 secondary map, just point this entry at one of our
663 distinguished secondaries. However, only do that if it
664 already points at a distinguished secondary, since doing
665 otherwise would leak the existing secondary. We could do
666 better and free up any pre-existing non-distinguished
667 secondary at this point, since we are guaranteed that each
668 non-dist secondary only has one pointer to it, and we have
669 that pointer right here. */
670 if (len >= SECONDARY_SIZE && is_distinguished_sm(*binder)) {
671 PROF_EVENT(154, "set_address_range_perms-entire-secmap");
672 *binder = example_dsm;
673 len -= SECONDARY_SIZE;
674 a += SECONDARY_SIZE;
675 continue;
676 }
677 }
678
679 /* If the primary is already pointing to a distinguished map
680 with the same properties as we're trying to set, then leave
681 it that way. */
682 if (*binder == example_dsm) {
683 a += 8;
684 len -= 8;
685 continue;
686 }
687
688 /* Make sure it's OK to write the secondary. */
689 if (is_distinguished_sm(*binder))
690 *binder = copy_for_writing(*binder);
691
692 sm = *binder;
693 v_off = a & 0xFFFF;
694 a_off = v_off >> 3;
695 sm->abits[a_off] = (UChar)abits8;
696 ((UInt*)(sm->vbyte))[(v_off >> 2) + 0] = (UInt)vbits32;
697 ((UInt*)(sm->vbyte))[(v_off >> 2) + 1] = (UInt)vbits32;
698
699 a += 8;
700 len -= 8;
701 }
702
703 if (len == 0)
704 return;
705
706 tl_assert(VG_IS_8_ALIGNED(a) && len > 0 && len < 8);
707
708 /* Finish the upper fragment. */
709 while (True) {
710 if (len == 0) break;
711 PROF_EVENT(155, "set_address_range_perms-loop1-post");
712 set_abit_and_vbyte ( a, example_a_bit, vbits8 );
713 a++;
714 len--;
715 }
716
717# endif
718}
sewardj45d94cc2005-04-20 14:44:11 +0000719
sewardjc859fbf2005-04-22 21:10:28 +0000720
721/* --- Set permissions for arbitrary address ranges --- */
njn25e49d8e72002-09-23 09:36:25 +0000722
nethercote8b76fe52004-11-08 19:20:09 +0000723static void mc_make_noaccess ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000724{
sewardjc1a2cda2005-04-21 17:34:00 +0000725 PROF_EVENT(40, "mc_make_noaccess");
nethercote8b76fe52004-11-08 19:20:09 +0000726 DEBUG("mc_make_noaccess(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000727 set_address_range_perms ( a, len, VGM_BIT_INVALID, VGM_BIT_INVALID );
728}
729
nethercote8b76fe52004-11-08 19:20:09 +0000730static void mc_make_writable ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000731{
sewardjc1a2cda2005-04-21 17:34:00 +0000732 PROF_EVENT(41, "mc_make_writable");
nethercote8b76fe52004-11-08 19:20:09 +0000733 DEBUG("mc_make_writable(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000734 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_INVALID );
735}
736
nethercote8b76fe52004-11-08 19:20:09 +0000737static void mc_make_readable ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000738{
sewardjc1a2cda2005-04-21 17:34:00 +0000739 PROF_EVENT(42, "mc_make_readable");
nethercote8b76fe52004-11-08 19:20:09 +0000740 DEBUG("mc_make_readable(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000741 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_VALID );
742}
743
njn9b007f62003-04-07 14:40:25 +0000744
sewardj45f4e7c2005-09-27 19:20:21 +0000745/* --- Block-copy permissions (needed for implementing realloc() and
746 sys_mremap). --- */
sewardjc859fbf2005-04-22 21:10:28 +0000747
748static void mc_copy_address_range_state ( Addr src, Addr dst, SizeT len )
749{
sewardj45f4e7c2005-09-27 19:20:21 +0000750 SizeT i, j;
sewardjc859fbf2005-04-22 21:10:28 +0000751 UWord abit, vbyte;
752
753 DEBUG("mc_copy_address_range_state\n");
sewardjc859fbf2005-04-22 21:10:28 +0000754 PROF_EVENT(50, "mc_copy_address_range_state");
sewardj45f4e7c2005-09-27 19:20:21 +0000755
756 if (len == 0)
757 return;
758
759 if (src < dst) {
760 for (i = 0, j = len-1; i < len; i++, j--) {
761 PROF_EVENT(51, "mc_copy_address_range_state(loop)");
762 get_abit_and_vbyte( &abit, &vbyte, src+j );
763 set_abit_and_vbyte( dst+j, abit, vbyte );
764 }
765 }
766
767 if (src > dst) {
768 for (i = 0; i < len; i++) {
769 PROF_EVENT(51, "mc_copy_address_range_state(loop)");
770 get_abit_and_vbyte( &abit, &vbyte, src+i );
771 set_abit_and_vbyte( dst+i, abit, vbyte );
772 }
sewardjc859fbf2005-04-22 21:10:28 +0000773 }
774}
775
776
777/* --- Fast case permission setters, for dealing with stacks. --- */
778
njn9b007f62003-04-07 14:40:25 +0000779static __inline__
sewardj5d28efc2005-04-21 22:16:29 +0000780void make_aligned_word32_writable ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000781{
sewardjae986ca2005-10-12 12:53:20 +0000782 UWord a, sec_no, v_off, a_off, mask;
783 SecMap* sm;
784
sewardj5d28efc2005-04-21 22:16:29 +0000785 PROF_EVENT(300, "make_aligned_word32_writable");
786
787# if VG_DEBUG_MEMORY >= 2
788 mc_make_writable(aA, 4);
789# else
790
791 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
sewardj23eb2fd2005-04-22 16:29:19 +0000792 PROF_EVENT(301, "make_aligned_word32_writable-slow1");
sewardj5d28efc2005-04-21 22:16:29 +0000793 mc_make_writable(aA, 4);
794 return;
795 }
796
sewardjae986ca2005-10-12 12:53:20 +0000797 a = (UWord)aA;
798 sec_no = (UWord)(a >> 16);
sewardj5d28efc2005-04-21 22:16:29 +0000799# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +0000800 tl_assert(sec_no < N_PRIMARY_MAP);
sewardj5d28efc2005-04-21 22:16:29 +0000801# endif
802
803 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
804 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
805
sewardjae986ca2005-10-12 12:53:20 +0000806 sm = primary_map[sec_no];
807 v_off = a & 0xFFFF;
808 a_off = v_off >> 3;
sewardj5d28efc2005-04-21 22:16:29 +0000809
810 /* Paint the new area as uninitialised. */
811 ((UInt*)(sm->vbyte))[v_off >> 2] = VGM_WORD32_INVALID;
812
sewardjae986ca2005-10-12 12:53:20 +0000813 mask = 0x0F;
sewardj5d28efc2005-04-21 22:16:29 +0000814 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
815 /* mask now contains 1s where we wish to make address bits valid
816 (0s). */
817 sm->abits[a_off] &= ~mask;
818# endif
njn9b007f62003-04-07 14:40:25 +0000819}
820
sewardj5d28efc2005-04-21 22:16:29 +0000821
822static __inline__
823void make_aligned_word32_noaccess ( Addr aA )
824{
sewardjae986ca2005-10-12 12:53:20 +0000825 UWord a, sec_no, v_off, a_off, mask;
826 SecMap* sm;
827
sewardj5d28efc2005-04-21 22:16:29 +0000828 PROF_EVENT(310, "make_aligned_word32_noaccess");
829
830# if VG_DEBUG_MEMORY >= 2
831 mc_make_noaccess(aA, 4);
832# else
833
834 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
835 PROF_EVENT(311, "make_aligned_word32_noaccess-slow1");
836 mc_make_noaccess(aA, 4);
837 return;
838 }
839
sewardjae986ca2005-10-12 12:53:20 +0000840 a = (UWord)aA;
841 sec_no = (UWord)(a >> 16);
sewardj5d28efc2005-04-21 22:16:29 +0000842# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +0000843 tl_assert(sec_no < N_PRIMARY_MAP);
sewardj5d28efc2005-04-21 22:16:29 +0000844# endif
845
846 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
847 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
848
sewardjae986ca2005-10-12 12:53:20 +0000849 sm = primary_map[sec_no];
850 v_off = a & 0xFFFF;
851 a_off = v_off >> 3;
sewardj5d28efc2005-04-21 22:16:29 +0000852
853 /* Paint the abandoned data as uninitialised. Probably not
854 necessary, but still .. */
855 ((UInt*)(sm->vbyte))[v_off >> 2] = VGM_WORD32_INVALID;
856
sewardjae986ca2005-10-12 12:53:20 +0000857 mask = 0x0F;
sewardj5d28efc2005-04-21 22:16:29 +0000858 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
859 /* mask now contains 1s where we wish to make address bits invalid
860 (1s). */
861 sm->abits[a_off] |= mask;
862# endif
863}
864
865
njn9b007f62003-04-07 14:40:25 +0000866/* Nb: by "aligned" here we mean 8-byte aligned */
867static __inline__
sewardj23eb2fd2005-04-22 16:29:19 +0000868void make_aligned_word64_writable ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000869{
sewardjae986ca2005-10-12 12:53:20 +0000870 UWord a, sec_no, v_off, a_off;
871 SecMap* sm;
872
sewardj23eb2fd2005-04-22 16:29:19 +0000873 PROF_EVENT(320, "make_aligned_word64_writable");
874
875# if VG_DEBUG_MEMORY >= 2
876 mc_make_writable(aA, 8);
877# else
878
879 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
880 PROF_EVENT(321, "make_aligned_word64_writable-slow1");
881 mc_make_writable(aA, 8);
882 return;
883 }
884
sewardjae986ca2005-10-12 12:53:20 +0000885 a = (UWord)aA;
886 sec_no = (UWord)(a >> 16);
sewardj23eb2fd2005-04-22 16:29:19 +0000887# if VG_DEBUG_MEMORY >= 1
888 tl_assert(sec_no < N_PRIMARY_MAP);
889# endif
890
891 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
892 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
893
sewardjae986ca2005-10-12 12:53:20 +0000894 sm = primary_map[sec_no];
895 v_off = a & 0xFFFF;
896 a_off = v_off >> 3;
sewardj23eb2fd2005-04-22 16:29:19 +0000897
898 /* Paint the new area as uninitialised. */
899 ((ULong*)(sm->vbyte))[v_off >> 3] = VGM_WORD64_INVALID;
900
901 /* Make the relevant area accessible. */
902 sm->abits[a_off] = VGM_BYTE_VALID;
903# endif
njn9b007f62003-04-07 14:40:25 +0000904}
905
sewardj23eb2fd2005-04-22 16:29:19 +0000906
njn9b007f62003-04-07 14:40:25 +0000907static __inline__
sewardj23eb2fd2005-04-22 16:29:19 +0000908void make_aligned_word64_noaccess ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000909{
sewardjae986ca2005-10-12 12:53:20 +0000910 UWord a, sec_no, v_off, a_off;
911 SecMap* sm;
912
sewardj23eb2fd2005-04-22 16:29:19 +0000913 PROF_EVENT(330, "make_aligned_word64_noaccess");
914
915# if VG_DEBUG_MEMORY >= 2
916 mc_make_noaccess(aA, 8);
917# else
918
919 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
920 PROF_EVENT(331, "make_aligned_word64_noaccess-slow1");
921 mc_make_noaccess(aA, 8);
922 return;
923 }
924
sewardjae986ca2005-10-12 12:53:20 +0000925 a = (UWord)aA;
926 sec_no = (UWord)(a >> 16);
sewardj23eb2fd2005-04-22 16:29:19 +0000927# if VG_DEBUG_MEMORY >= 1
928 tl_assert(sec_no < N_PRIMARY_MAP);
929# endif
930
931 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
932 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
933
sewardjae986ca2005-10-12 12:53:20 +0000934 sm = primary_map[sec_no];
935 v_off = a & 0xFFFF;
936 a_off = v_off >> 3;
sewardj23eb2fd2005-04-22 16:29:19 +0000937
938 /* Paint the abandoned data as uninitialised. Probably not
939 necessary, but still .. */
940 ((ULong*)(sm->vbyte))[v_off >> 3] = VGM_WORD64_INVALID;
941
942 /* Make the abandoned area inaccessible. */
943 sm->abits[a_off] = VGM_BYTE_INVALID;
944# endif
njn9b007f62003-04-07 14:40:25 +0000945}
946
sewardj23eb2fd2005-04-22 16:29:19 +0000947
sewardj45d94cc2005-04-20 14:44:11 +0000948/* The stack-pointer update handling functions */
949SP_UPDATE_HANDLERS ( make_aligned_word32_writable,
950 make_aligned_word32_noaccess,
951 make_aligned_word64_writable,
952 make_aligned_word64_noaccess,
953 mc_make_writable,
954 mc_make_noaccess
955 );
njn9b007f62003-04-07 14:40:25 +0000956
sewardj45d94cc2005-04-20 14:44:11 +0000957
sewardj826ec492005-05-12 18:05:00 +0000958void MC_(helperc_MAKE_STACK_UNINIT) ( Addr base, UWord len )
959{
960 tl_assert(sizeof(UWord) == sizeof(SizeT));
sewardj2a3a1a72005-05-12 23:25:43 +0000961 if (0)
962 VG_(printf)("helperc_MAKE_STACK_UNINIT %p %d\n", base, len );
963
964# if 0
965 /* Really slow version */
966 mc_make_writable(base, len);
967# endif
968
969# if 0
970 /* Slow(ish) version, which is fairly easily seen to be correct.
971 */
972 if (EXPECTED_TAKEN( VG_IS_8_ALIGNED(base) && len==128 )) {
973 make_aligned_word64_writable(base + 0);
974 make_aligned_word64_writable(base + 8);
975 make_aligned_word64_writable(base + 16);
976 make_aligned_word64_writable(base + 24);
977
978 make_aligned_word64_writable(base + 32);
979 make_aligned_word64_writable(base + 40);
980 make_aligned_word64_writable(base + 48);
981 make_aligned_word64_writable(base + 56);
982
983 make_aligned_word64_writable(base + 64);
984 make_aligned_word64_writable(base + 72);
985 make_aligned_word64_writable(base + 80);
986 make_aligned_word64_writable(base + 88);
987
988 make_aligned_word64_writable(base + 96);
989 make_aligned_word64_writable(base + 104);
990 make_aligned_word64_writable(base + 112);
991 make_aligned_word64_writable(base + 120);
992 } else {
993 mc_make_writable(base, len);
994 }
995# endif
996
997 /* Idea is: go fast when
998 * 8-aligned and length is 128
999 * the sm is available in the main primary map
1000 * the address range falls entirely with a single
1001 secondary map
1002 * the SM is modifiable
1003 If all those conditions hold, just update the V bits
1004 by writing directly on the v-bit array. We don't care
1005 about A bits; if the address range is marked invalid,
1006 any attempt to access it will elicit an addressing error,
1007 and that's good enough.
1008 */
1009 if (EXPECTED_TAKEN( len == 128
1010 && VG_IS_8_ALIGNED(base)
1011 )) {
1012 /* Now we know the address range is suitably sized and
1013 aligned. */
1014 UWord a_lo = (UWord)base;
1015 UWord a_hi = (UWord)(base + 127);
1016 UWord sec_lo = a_lo >> 16;
1017 UWord sec_hi = a_hi >> 16;
1018
1019 if (EXPECTED_TAKEN( sec_lo == sec_hi
1020 && sec_lo <= N_PRIMARY_MAP
1021 )) {
1022 /* Now we know that the entire address range falls within a
1023 single secondary map, and that that secondary 'lives' in
1024 the main primary map. */
1025 SecMap* sm = primary_map[sec_lo];
1026
1027 if (EXPECTED_TAKEN( !is_distinguished_sm(sm) )) {
1028 /* And finally, now we know that the secondary in question
1029 is modifiable. */
1030 UWord v_off = a_lo & 0xFFFF;
1031 ULong* p = (ULong*)(&sm->vbyte[v_off]);
1032 p[ 0] = VGM_WORD64_INVALID;
1033 p[ 1] = VGM_WORD64_INVALID;
1034 p[ 2] = VGM_WORD64_INVALID;
1035 p[ 3] = VGM_WORD64_INVALID;
1036 p[ 4] = VGM_WORD64_INVALID;
1037 p[ 5] = VGM_WORD64_INVALID;
1038 p[ 6] = VGM_WORD64_INVALID;
1039 p[ 7] = VGM_WORD64_INVALID;
1040 p[ 8] = VGM_WORD64_INVALID;
1041 p[ 9] = VGM_WORD64_INVALID;
1042 p[10] = VGM_WORD64_INVALID;
1043 p[11] = VGM_WORD64_INVALID;
1044 p[12] = VGM_WORD64_INVALID;
1045 p[13] = VGM_WORD64_INVALID;
1046 p[14] = VGM_WORD64_INVALID;
1047 p[15] = VGM_WORD64_INVALID;
1048 return;
1049 }
1050 }
1051 }
1052
1053 /* else fall into slow case */
sewardj826ec492005-05-12 18:05:00 +00001054 mc_make_writable(base, len);
1055}
1056
1057
nethercote8b76fe52004-11-08 19:20:09 +00001058/*------------------------------------------------------------*/
1059/*--- Checking memory ---*/
1060/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001061
sewardje4ccc012005-05-02 12:53:38 +00001062typedef
1063 enum {
1064 MC_Ok = 5,
1065 MC_AddrErr = 6,
1066 MC_ValueErr = 7
1067 }
1068 MC_ReadResult;
1069
1070
njn25e49d8e72002-09-23 09:36:25 +00001071/* Check permissions for address range. If inadequate permissions
1072 exist, *bad_addr is set to the offending address, so the caller can
1073 know what it is. */
1074
sewardjecf8e102003-07-12 12:11:39 +00001075/* Returns True if [a .. a+len) is not addressible. Otherwise,
1076 returns False, and if bad_addr is non-NULL, sets *bad_addr to
1077 indicate the lowest failing address. Functions below are
1078 similar. */
nethercote8b76fe52004-11-08 19:20:09 +00001079static Bool mc_check_noaccess ( Addr a, SizeT len, Addr* bad_addr )
sewardjecf8e102003-07-12 12:11:39 +00001080{
nethercote451eae92004-11-02 13:06:32 +00001081 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +00001082 UWord abit;
sewardjc1a2cda2005-04-21 17:34:00 +00001083 PROF_EVENT(60, "mc_check_noaccess");
sewardjecf8e102003-07-12 12:11:39 +00001084 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +00001085 PROF_EVENT(61, "mc_check_noaccess(loop)");
sewardjecf8e102003-07-12 12:11:39 +00001086 abit = get_abit(a);
1087 if (abit == VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001088 if (bad_addr != NULL)
1089 *bad_addr = a;
sewardjecf8e102003-07-12 12:11:39 +00001090 return False;
1091 }
1092 a++;
1093 }
1094 return True;
1095}
1096
nethercote8b76fe52004-11-08 19:20:09 +00001097static Bool mc_check_writable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00001098{
nethercote451eae92004-11-02 13:06:32 +00001099 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +00001100 UWord abit;
sewardjc1a2cda2005-04-21 17:34:00 +00001101 PROF_EVENT(62, "mc_check_writable");
njn25e49d8e72002-09-23 09:36:25 +00001102 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +00001103 PROF_EVENT(63, "mc_check_writable(loop)");
njn25e49d8e72002-09-23 09:36:25 +00001104 abit = get_abit(a);
1105 if (abit == VGM_BIT_INVALID) {
1106 if (bad_addr != NULL) *bad_addr = a;
1107 return False;
1108 }
1109 a++;
1110 }
1111 return True;
1112}
1113
nethercote8b76fe52004-11-08 19:20:09 +00001114static MC_ReadResult mc_check_readable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00001115{
nethercote451eae92004-11-02 13:06:32 +00001116 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +00001117 UWord abit;
1118 UWord vbyte;
njn25e49d8e72002-09-23 09:36:25 +00001119
sewardjc1a2cda2005-04-21 17:34:00 +00001120 PROF_EVENT(64, "mc_check_readable");
nethercote8b76fe52004-11-08 19:20:09 +00001121 DEBUG("mc_check_readable\n");
njn25e49d8e72002-09-23 09:36:25 +00001122 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +00001123 PROF_EVENT(65, "mc_check_readable(loop)");
sewardj45d94cc2005-04-20 14:44:11 +00001124 get_abit_and_vbyte(&abit, &vbyte, a);
nethercote8b76fe52004-11-08 19:20:09 +00001125 // Report addressability errors in preference to definedness errors
1126 // by checking the A bits first.
1127 if (abit != VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001128 if (bad_addr != NULL)
1129 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001130 return MC_AddrErr;
1131 }
1132 if (vbyte != VGM_BYTE_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001133 if (bad_addr != NULL)
1134 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001135 return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +00001136 }
1137 a++;
1138 }
nethercote8b76fe52004-11-08 19:20:09 +00001139 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +00001140}
1141
1142
1143/* Check a zero-terminated ascii string. Tricky -- don't want to
1144 examine the actual bytes, to find the end, until we're sure it is
1145 safe to do so. */
1146
njn9b007f62003-04-07 14:40:25 +00001147static Bool mc_check_readable_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00001148{
sewardj45d94cc2005-04-20 14:44:11 +00001149 UWord abit;
1150 UWord vbyte;
sewardjc1a2cda2005-04-21 17:34:00 +00001151 PROF_EVENT(66, "mc_check_readable_asciiz");
njn5c004e42002-11-18 11:04:50 +00001152 DEBUG("mc_check_readable_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +00001153 while (True) {
sewardjc1a2cda2005-04-21 17:34:00 +00001154 PROF_EVENT(67, "mc_check_readable_asciiz(loop)");
sewardj45d94cc2005-04-20 14:44:11 +00001155 get_abit_and_vbyte(&abit, &vbyte, a);
nethercote8b76fe52004-11-08 19:20:09 +00001156 // As in mc_check_readable(), check A bits first
1157 if (abit != VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001158 if (bad_addr != NULL)
1159 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001160 return MC_AddrErr;
1161 }
1162 if (vbyte != VGM_BYTE_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001163 if (bad_addr != NULL)
1164 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001165 return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +00001166 }
1167 /* Ok, a is safe to read. */
sewardj45d94cc2005-04-20 14:44:11 +00001168 if (* ((UChar*)a) == 0)
1169 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +00001170 a++;
1171 }
1172}
1173
1174
1175/*------------------------------------------------------------*/
1176/*--- Memory event handlers ---*/
1177/*------------------------------------------------------------*/
1178
njn25e49d8e72002-09-23 09:36:25 +00001179static
njn72718642003-07-24 08:45:32 +00001180void mc_check_is_writable ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +00001181 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001182{
1183 Bool ok;
1184 Addr bad_addr;
1185
njn25e49d8e72002-09-23 09:36:25 +00001186 /* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
1187 base,base+size-1); */
nethercote8b76fe52004-11-08 19:20:09 +00001188 ok = mc_check_writable ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00001189 if (!ok) {
1190 switch (part) {
1191 case Vg_CoreSysCall:
nethercote8b76fe52004-11-08 19:20:09 +00001192 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False,
1193 /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00001194 break;
1195
1196 case Vg_CorePThread:
1197 case Vg_CoreSignal:
nethercote8b76fe52004-11-08 19:20:09 +00001198 MAC_(record_core_mem_error)( tid, /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00001199 break;
1200
1201 default:
njn67993252004-11-22 18:02:32 +00001202 VG_(tool_panic)("mc_check_is_writable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001203 }
1204 }
njn25e49d8e72002-09-23 09:36:25 +00001205}
1206
1207static
njn72718642003-07-24 08:45:32 +00001208void mc_check_is_readable ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +00001209 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001210{
njn25e49d8e72002-09-23 09:36:25 +00001211 Addr bad_addr;
nethercote8b76fe52004-11-08 19:20:09 +00001212 MC_ReadResult res;
njn25e49d8e72002-09-23 09:36:25 +00001213
nethercote8b76fe52004-11-08 19:20:09 +00001214 res = mc_check_readable ( base, size, &bad_addr );
sewardj45f4e7c2005-09-27 19:20:21 +00001215
1216 if (0)
1217 VG_(printf)("mc_check_is_readable(0x%x, %d, %s) -> %s\n",
1218 (UInt)base, (Int)size, s, res==MC_Ok ? "yes" : "no" );
1219
nethercote8b76fe52004-11-08 19:20:09 +00001220 if (MC_Ok != res) {
1221 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
sewardj45f4e7c2005-09-27 19:20:21 +00001222
njn25e49d8e72002-09-23 09:36:25 +00001223 switch (part) {
1224 case Vg_CoreSysCall:
nethercote8b76fe52004-11-08 19:20:09 +00001225 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False,
1226 isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001227 break;
1228
1229 case Vg_CorePThread:
nethercote8b76fe52004-11-08 19:20:09 +00001230 MAC_(record_core_mem_error)( tid, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001231 break;
1232
1233 /* If we're being asked to jump to a silly address, record an error
1234 message before potentially crashing the entire system. */
1235 case Vg_CoreTranslate:
njn72718642003-07-24 08:45:32 +00001236 MAC_(record_jump_error)( tid, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00001237 break;
1238
1239 default:
njn67993252004-11-22 18:02:32 +00001240 VG_(tool_panic)("mc_check_is_readable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001241 }
1242 }
njn25e49d8e72002-09-23 09:36:25 +00001243}
1244
1245static
njn72718642003-07-24 08:45:32 +00001246void mc_check_is_readable_asciiz ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +00001247 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +00001248{
nethercote8b76fe52004-11-08 19:20:09 +00001249 MC_ReadResult res;
njn5ab96ac2005-05-08 02:59:50 +00001250 Addr bad_addr = 0; // shut GCC up
njn25e49d8e72002-09-23 09:36:25 +00001251 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
1252
njnca82cc02004-11-22 17:18:48 +00001253 tl_assert(part == Vg_CoreSysCall);
nethercote8b76fe52004-11-08 19:20:09 +00001254 res = mc_check_readable_asciiz ( (Addr)str, &bad_addr );
1255 if (MC_Ok != res) {
1256 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
1257 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001258 }
njn25e49d8e72002-09-23 09:36:25 +00001259}
1260
njn25e49d8e72002-09-23 09:36:25 +00001261static
nethercote451eae92004-11-02 13:06:32 +00001262void mc_new_mem_startup( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00001263{
njn1f3a9092002-10-04 09:22:30 +00001264 /* Ignore the permissions, just make it readable. Seems to work... */
nethercote451eae92004-11-02 13:06:32 +00001265 DEBUG("mc_new_mem_startup(%p, %llu, rr=%u, ww=%u, xx=%u)\n",
1266 a,(ULong)len,rr,ww,xx);
nethercote8b76fe52004-11-08 19:20:09 +00001267 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001268}
1269
1270static
nethercote451eae92004-11-02 13:06:32 +00001271void mc_new_mem_heap ( Addr a, SizeT len, Bool is_inited )
njn25e49d8e72002-09-23 09:36:25 +00001272{
1273 if (is_inited) {
nethercote8b76fe52004-11-08 19:20:09 +00001274 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001275 } else {
nethercote8b76fe52004-11-08 19:20:09 +00001276 mc_make_writable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001277 }
1278}
1279
1280static
njnb8dca862005-03-14 02:42:44 +00001281void mc_new_mem_mmap ( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00001282{
njnb8dca862005-03-14 02:42:44 +00001283 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001284}
1285
njncf45fd42004-11-24 16:30:22 +00001286static
1287void mc_post_mem_write(CorePart part, ThreadId tid, Addr a, SizeT len)
1288{
1289 mc_make_readable(a, len);
1290}
njn25e49d8e72002-09-23 09:36:25 +00001291
sewardj45d94cc2005-04-20 14:44:11 +00001292
njn25e49d8e72002-09-23 09:36:25 +00001293/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00001294/*--- Register event handlers ---*/
1295/*------------------------------------------------------------*/
1296
sewardj45d94cc2005-04-20 14:44:11 +00001297/* When some chunk of guest state is written, mark the corresponding
1298 shadow area as valid. This is used to initialise arbitrarily large
sewardj62eae5f2006-01-17 01:58:24 +00001299 chunks of guest state, hence the _SIZE value, which has to be as
1300 big as the biggest guest state.
sewardj45d94cc2005-04-20 14:44:11 +00001301*/
1302static void mc_post_reg_write ( CorePart part, ThreadId tid,
1303 OffT offset, SizeT size)
njnd3040452003-05-19 15:04:06 +00001304{
sewardj62eae5f2006-01-17 01:58:24 +00001305# define MAX_REG_WRITE_SIZE 1264
cerion21082042005-12-06 19:07:08 +00001306 UChar area[MAX_REG_WRITE_SIZE];
1307 tl_assert(size <= MAX_REG_WRITE_SIZE);
njncf45fd42004-11-24 16:30:22 +00001308 VG_(memset)(area, VGM_BYTE_VALID, size);
1309 VG_(set_shadow_regs_area)( tid, offset, size, area );
cerion21082042005-12-06 19:07:08 +00001310# undef MAX_REG_WRITE_SIZE
njnd3040452003-05-19 15:04:06 +00001311}
1312
sewardj45d94cc2005-04-20 14:44:11 +00001313static
1314void mc_post_reg_write_clientcall ( ThreadId tid,
1315 OffT offset, SizeT size,
1316 Addr f)
njnd3040452003-05-19 15:04:06 +00001317{
njncf45fd42004-11-24 16:30:22 +00001318 mc_post_reg_write(/*dummy*/0, tid, offset, size);
njnd3040452003-05-19 15:04:06 +00001319}
1320
sewardj45d94cc2005-04-20 14:44:11 +00001321/* Look at the definedness of the guest's shadow state for
1322 [offset, offset+len). If any part of that is undefined, record
1323 a parameter error.
1324*/
1325static void mc_pre_reg_read ( CorePart part, ThreadId tid, Char* s,
1326 OffT offset, SizeT size)
nethercote8b76fe52004-11-08 19:20:09 +00001327{
sewardj45d94cc2005-04-20 14:44:11 +00001328 Int i;
1329 Bool bad;
1330
1331 UChar area[16];
1332 tl_assert(size <= 16);
1333
1334 VG_(get_shadow_regs_area)( tid, offset, size, area );
1335
1336 bad = False;
1337 for (i = 0; i < size; i++) {
1338 if (area[i] != VGM_BYTE_VALID) {
sewardj2c27f702005-05-03 18:19:05 +00001339 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00001340 break;
1341 }
nethercote8b76fe52004-11-08 19:20:09 +00001342 }
1343
sewardj45d94cc2005-04-20 14:44:11 +00001344 if (bad)
nethercote8b76fe52004-11-08 19:20:09 +00001345 MAC_(record_param_error) ( tid, 0, /*isReg*/True, /*isUnaddr*/False, s );
1346}
njnd3040452003-05-19 15:04:06 +00001347
njn25e49d8e72002-09-23 09:36:25 +00001348
sewardj6cf40ff2005-04-20 22:31:26 +00001349/*------------------------------------------------------------*/
njn9e63cb62005-05-08 18:34:59 +00001350/*--- Printing errors ---*/
1351/*------------------------------------------------------------*/
1352
njn51d827b2005-05-09 01:02:08 +00001353static void mc_pp_Error ( Error* err )
njn9e63cb62005-05-08 18:34:59 +00001354{
1355 MAC_Error* err_extra = VG_(get_error_extra)(err);
1356
sewardj71bc3cb2005-05-19 00:25:45 +00001357 HChar* xpre = VG_(clo_xml) ? " <what>" : "";
1358 HChar* xpost = VG_(clo_xml) ? "</what>" : "";
1359
njn9e63cb62005-05-08 18:34:59 +00001360 switch (VG_(get_error_kind)(err)) {
1361 case CoreMemErr: {
1362 Char* s = ( err_extra->isUnaddr ? "unaddressable" : "uninitialised" );
sewardj71bc3cb2005-05-19 00:25:45 +00001363 if (VG_(clo_xml))
1364 VG_(message)(Vg_UserMsg, " <kind>CoreMemError</kind>");
1365 /* What the hell *is* a CoreMemError? jrs 2005-May-18 */
1366 VG_(message)(Vg_UserMsg, "%s%s contains %s byte(s)%s",
1367 xpre, VG_(get_error_string)(err), s, xpost);
1368
njn9e63cb62005-05-08 18:34:59 +00001369 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1370 break;
1371
1372 }
1373
1374 case ValueErr:
1375 if (err_extra->size == 0) {
sewardj71bc3cb2005-05-19 00:25:45 +00001376 if (VG_(clo_xml))
1377 VG_(message)(Vg_UserMsg, " <kind>UninitCondition</kind>");
1378 VG_(message)(Vg_UserMsg, "%sConditional jump or move depends"
1379 " on uninitialised value(s)%s",
1380 xpre, xpost);
njn9e63cb62005-05-08 18:34:59 +00001381 } else {
sewardj71bc3cb2005-05-19 00:25:45 +00001382 if (VG_(clo_xml))
1383 VG_(message)(Vg_UserMsg, " <kind>UninitValue</kind>");
1384 VG_(message)(Vg_UserMsg,
1385 "%sUse of uninitialised value of size %d%s",
1386 xpre, err_extra->size, xpost);
njn9e63cb62005-05-08 18:34:59 +00001387 }
1388 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1389 break;
1390
1391 case ParamErr: {
1392 Bool isReg = ( Register == err_extra->addrinfo.akind );
1393 Char* s1 = ( isReg ? "contains" : "points to" );
1394 Char* s2 = ( err_extra->isUnaddr ? "unaddressable" : "uninitialised" );
1395 if (isReg) tl_assert(!err_extra->isUnaddr);
1396
sewardj71bc3cb2005-05-19 00:25:45 +00001397 if (VG_(clo_xml))
1398 VG_(message)(Vg_UserMsg, " <kind>SyscallParam</kind>");
1399 VG_(message)(Vg_UserMsg, "%sSyscall param %s %s %s byte(s)%s",
1400 xpre, VG_(get_error_string)(err), s1, s2, xpost);
njn9e63cb62005-05-08 18:34:59 +00001401
1402 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1403 MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
1404 break;
1405 }
1406 case UserErr: {
1407 Char* s = ( err_extra->isUnaddr ? "Unaddressable" : "Uninitialised" );
1408
sewardj71bc3cb2005-05-19 00:25:45 +00001409 if (VG_(clo_xml))
1410 VG_(message)(Vg_UserMsg, " <kind>ClientCheck</kind>");
njn9e63cb62005-05-08 18:34:59 +00001411 VG_(message)(Vg_UserMsg,
sewardj71bc3cb2005-05-19 00:25:45 +00001412 "%s%s byte(s) found during client check request%s",
1413 xpre, s, xpost);
njn9e63cb62005-05-08 18:34:59 +00001414
1415 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1416 MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
1417 break;
1418 }
1419 default:
1420 MAC_(pp_shared_Error)(err);
1421 break;
1422 }
1423}
1424
1425/*------------------------------------------------------------*/
1426/*--- Recording errors ---*/
1427/*------------------------------------------------------------*/
1428
njn02bc4b82005-05-15 17:28:26 +00001429/* Creates a copy of the 'extra' part, updates the copy with address info if
njn9e63cb62005-05-08 18:34:59 +00001430 necessary, and returns the copy. */
1431/* This one called from generated code and non-generated code. */
njn96364822005-05-08 19:04:53 +00001432static void mc_record_value_error ( ThreadId tid, Int size )
njn9e63cb62005-05-08 18:34:59 +00001433{
1434 MAC_Error err_extra;
1435
1436 MAC_(clear_MAC_Error)( &err_extra );
1437 err_extra.size = size;
1438 err_extra.isUnaddr = False;
1439 VG_(maybe_record_error)( tid, ValueErr, /*addr*/0, /*s*/NULL, &err_extra );
1440}
1441
1442/* This called from non-generated code */
1443
njn96364822005-05-08 19:04:53 +00001444static void mc_record_user_error ( ThreadId tid, Addr a, Bool isWrite,
1445 Bool isUnaddr )
njn9e63cb62005-05-08 18:34:59 +00001446{
1447 MAC_Error err_extra;
1448
1449 tl_assert(VG_INVALID_THREADID != tid);
1450 MAC_(clear_MAC_Error)( &err_extra );
1451 err_extra.addrinfo.akind = Undescribed;
1452 err_extra.isUnaddr = isUnaddr;
1453 VG_(maybe_record_error)( tid, UserErr, a, /*s*/NULL, &err_extra );
1454}
1455
1456/*------------------------------------------------------------*/
1457/*--- Suppressions ---*/
1458/*------------------------------------------------------------*/
1459
njn51d827b2005-05-09 01:02:08 +00001460static Bool mc_recognised_suppression ( Char* name, Supp* su )
njn9e63cb62005-05-08 18:34:59 +00001461{
1462 SuppKind skind;
1463
1464 if (MAC_(shared_recognised_suppression)(name, su))
1465 return True;
1466
1467 /* Extra suppressions not used by Addrcheck */
1468 else if (VG_STREQ(name, "Cond")) skind = Value0Supp;
1469 else if (VG_STREQ(name, "Value0")) skind = Value0Supp;/* backwards compat */
1470 else if (VG_STREQ(name, "Value1")) skind = Value1Supp;
1471 else if (VG_STREQ(name, "Value2")) skind = Value2Supp;
1472 else if (VG_STREQ(name, "Value4")) skind = Value4Supp;
1473 else if (VG_STREQ(name, "Value8")) skind = Value8Supp;
1474 else if (VG_STREQ(name, "Value16")) skind = Value16Supp;
1475 else
1476 return False;
1477
1478 VG_(set_supp_kind)(su, skind);
1479 return True;
1480}
1481
1482/*------------------------------------------------------------*/
sewardjc859fbf2005-04-22 21:10:28 +00001483/*--- Functions called directly from generated code: ---*/
1484/*--- Load/store handlers. ---*/
sewardj6cf40ff2005-04-20 22:31:26 +00001485/*------------------------------------------------------------*/
1486
1487/* Types: LOADV4, LOADV2, LOADV1 are:
1488 UWord fn ( Addr a )
1489 so they return 32-bits on 32-bit machines and 64-bits on
1490 64-bit machines. Addr has the same size as a host word.
1491
1492 LOADV8 is always ULong fn ( Addr a )
1493
1494 Similarly for STOREV1, STOREV2, STOREV4, the supplied vbits
1495 are a UWord, and for STOREV8 they are a ULong.
1496*/
1497
sewardj95448072004-11-22 20:19:51 +00001498/* ------------------------ Size = 8 ------------------------ */
1499
sewardj8cf88b72005-07-08 01:29:33 +00001500#define MAKE_LOADV8(nAME,iS_BIGENDIAN) \
1501 \
1502 VG_REGPARM(1) \
1503 ULong nAME ( Addr aA ) \
1504 { \
sewardjae986ca2005-10-12 12:53:20 +00001505 UWord mask, a, sec_no, v_off, a_off, abits; \
1506 SecMap* sm; \
1507 \
sewardj8cf88b72005-07-08 01:29:33 +00001508 PROF_EVENT(200, #nAME); \
1509 \
1510 if (VG_DEBUG_MEMORY >= 2) \
1511 return mc_LOADVn_slow( aA, 8, iS_BIGENDIAN ); \
1512 \
sewardjae986ca2005-10-12 12:53:20 +00001513 mask = ~((0x10000-8) | ((N_PRIMARY_MAP-1) << 16)); \
1514 a = (UWord)aA; \
sewardj8cf88b72005-07-08 01:29:33 +00001515 \
1516 /* If any part of 'a' indicated by the mask is 1, either */ \
1517 /* 'a' is not naturally aligned, or 'a' exceeds the range */ \
1518 /* covered by the primary map. Either way we defer to the */ \
1519 /* slow-path case. */ \
1520 if (EXPECTED_NOT_TAKEN(a & mask)) { \
1521 PROF_EVENT(201, #nAME"-slow1"); \
sewardj78947932006-01-05 14:09:46 +00001522 return (ULong)mc_LOADVn_slow( aA, 8, iS_BIGENDIAN ); \
sewardj8cf88b72005-07-08 01:29:33 +00001523 } \
1524 \
sewardjae986ca2005-10-12 12:53:20 +00001525 sec_no = (UWord)(a >> 16); \
sewardj8cf88b72005-07-08 01:29:33 +00001526 \
1527 if (VG_DEBUG_MEMORY >= 1) \
1528 tl_assert(sec_no < N_PRIMARY_MAP); \
1529 \
sewardjae986ca2005-10-12 12:53:20 +00001530 sm = primary_map[sec_no]; \
1531 v_off = a & 0xFFFF; \
1532 a_off = v_off >> 3; \
1533 abits = (UWord)(sm->abits[a_off]); \
sewardj8cf88b72005-07-08 01:29:33 +00001534 \
1535 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) { \
1536 /* Handle common case quickly: a is suitably aligned, */ \
1537 /* is mapped, and is addressible. */ \
1538 return ((ULong*)(sm->vbyte))[ v_off >> 3 ]; \
1539 } else { \
1540 /* Slow but general case. */ \
1541 PROF_EVENT(202, #nAME"-slow2"); \
1542 return mc_LOADVn_slow( a, 8, iS_BIGENDIAN ); \
1543 } \
sewardjf9d81612005-04-23 23:25:49 +00001544 }
1545
sewardj8cf88b72005-07-08 01:29:33 +00001546MAKE_LOADV8( MC_(helperc_LOADV8be), True /*bigendian*/ );
1547MAKE_LOADV8( MC_(helperc_LOADV8le), False/*littleendian*/ );
sewardjf9d81612005-04-23 23:25:49 +00001548
sewardjf9d81612005-04-23 23:25:49 +00001549
sewardj8cf88b72005-07-08 01:29:33 +00001550#define MAKE_STOREV8(nAME,iS_BIGENDIAN) \
1551 \
1552 VG_REGPARM(1) \
1553 void nAME ( Addr aA, ULong vbytes ) \
1554 { \
sewardjae986ca2005-10-12 12:53:20 +00001555 UWord mask, a, sec_no, v_off, a_off, abits; \
1556 SecMap* sm; \
1557 \
sewardj8cf88b72005-07-08 01:29:33 +00001558 PROF_EVENT(210, #nAME); \
1559 \
1560 if (VG_DEBUG_MEMORY >= 2) \
1561 mc_STOREVn_slow( aA, 8, vbytes, iS_BIGENDIAN ); \
1562 \
sewardjae986ca2005-10-12 12:53:20 +00001563 mask = ~((0x10000-8) | ((N_PRIMARY_MAP-1) << 16)); \
1564 a = (UWord)aA; \
sewardj8cf88b72005-07-08 01:29:33 +00001565 \
1566 /* If any part of 'a' indicated by the mask is 1, either */ \
1567 /* 'a' is not naturally aligned, or 'a' exceeds the range */ \
1568 /* covered by the primary map. Either way we defer to the */ \
1569 /* slow-path case. */ \
1570 if (EXPECTED_NOT_TAKEN(a & mask)) { \
1571 PROF_EVENT(211, #nAME"-slow1"); \
1572 mc_STOREVn_slow( aA, 8, vbytes, iS_BIGENDIAN ); \
1573 return; \
1574 } \
1575 \
sewardjae986ca2005-10-12 12:53:20 +00001576 sec_no = (UWord)(a >> 16); \
sewardj8cf88b72005-07-08 01:29:33 +00001577 \
1578 if (VG_DEBUG_MEMORY >= 1) \
1579 tl_assert(sec_no < N_PRIMARY_MAP); \
1580 \
sewardjae986ca2005-10-12 12:53:20 +00001581 sm = primary_map[sec_no]; \
1582 v_off = a & 0xFFFF; \
1583 a_off = v_off >> 3; \
1584 abits = (UWord)(sm->abits[a_off]); \
sewardj8cf88b72005-07-08 01:29:33 +00001585 \
1586 if (EXPECTED_TAKEN(!is_distinguished_sm(sm) \
1587 && abits == VGM_BYTE_VALID)) { \
1588 /* Handle common case quickly: a is suitably aligned, */ \
1589 /* is mapped, and is addressible. */ \
1590 ((ULong*)(sm->vbyte))[ v_off >> 3 ] = vbytes; \
1591 } else { \
1592 /* Slow but general case. */ \
1593 PROF_EVENT(212, #nAME"-slow2"); \
1594 mc_STOREVn_slow( aA, 8, vbytes, iS_BIGENDIAN ); \
1595 } \
sewardjf9d81612005-04-23 23:25:49 +00001596 }
1597
sewardj8cf88b72005-07-08 01:29:33 +00001598MAKE_STOREV8( MC_(helperc_STOREV8be), True /*bigendian*/ );
1599MAKE_STOREV8( MC_(helperc_STOREV8le), False/*littleendian*/ );
sewardj95448072004-11-22 20:19:51 +00001600
sewardj95448072004-11-22 20:19:51 +00001601
1602/* ------------------------ Size = 4 ------------------------ */
1603
sewardj8cf88b72005-07-08 01:29:33 +00001604#define MAKE_LOADV4(nAME,iS_BIGENDIAN) \
1605 \
1606 VG_REGPARM(1) \
1607 UWord nAME ( Addr aA ) \
1608 { \
sewardjae986ca2005-10-12 12:53:20 +00001609 UWord mask, a, sec_no, v_off, a_off, abits; \
1610 SecMap* sm; \
1611 \
sewardj8cf88b72005-07-08 01:29:33 +00001612 PROF_EVENT(220, #nAME); \
1613 \
1614 if (VG_DEBUG_MEMORY >= 2) \
1615 return (UWord)mc_LOADVn_slow( aA, 4, iS_BIGENDIAN ); \
1616 \
sewardjae986ca2005-10-12 12:53:20 +00001617 mask = ~((0x10000-4) | ((N_PRIMARY_MAP-1) << 16)); \
1618 a = (UWord)aA; \
sewardj8cf88b72005-07-08 01:29:33 +00001619 \
1620 /* If any part of 'a' indicated by the mask is 1, either */ \
1621 /* 'a' is not naturally aligned, or 'a' exceeds the range */ \
1622 /* covered by the primary map. Either way we defer to the */ \
1623 /* slow-path case. */ \
1624 if (EXPECTED_NOT_TAKEN(a & mask)) { \
1625 PROF_EVENT(221, #nAME"-slow1"); \
1626 return (UWord)mc_LOADVn_slow( aA, 4, iS_BIGENDIAN ); \
1627 } \
1628 \
sewardjae986ca2005-10-12 12:53:20 +00001629 sec_no = (UWord)(a >> 16); \
sewardj8cf88b72005-07-08 01:29:33 +00001630 \
1631 if (VG_DEBUG_MEMORY >= 1) \
1632 tl_assert(sec_no < N_PRIMARY_MAP); \
1633 \
sewardjae986ca2005-10-12 12:53:20 +00001634 sm = primary_map[sec_no]; \
1635 v_off = a & 0xFFFF; \
1636 a_off = v_off >> 3; \
1637 abits = (UWord)(sm->abits[a_off]); \
sewardj8cf88b72005-07-08 01:29:33 +00001638 abits >>= (a & 4); \
1639 abits &= 15; \
1640 if (EXPECTED_TAKEN(abits == VGM_NIBBLE_VALID)) { \
1641 /* Handle common case quickly: a is suitably aligned, */ \
1642 /* is mapped, and is addressible. */ \
1643 /* On a 32-bit platform, simply hoick the required 32 */ \
1644 /* bits out of the vbyte array. On a 64-bit platform, */ \
1645 /* also set the upper 32 bits to 1 ("undefined"), just */ \
1646 /* in case. This almost certainly isn't necessary, */ \
1647 /* but be paranoid. */ \
1648 UWord ret = (UWord)0xFFFFFFFF00000000ULL; \
1649 ret |= (UWord)( ((UInt*)(sm->vbyte))[ v_off >> 2 ] ); \
1650 return ret; \
1651 } else { \
1652 /* Slow but general case. */ \
1653 PROF_EVENT(222, #nAME"-slow2"); \
1654 return (UWord)mc_LOADVn_slow( a, 4, iS_BIGENDIAN ); \
1655 } \
sewardjc1a2cda2005-04-21 17:34:00 +00001656 }
1657
sewardj8cf88b72005-07-08 01:29:33 +00001658MAKE_LOADV4( MC_(helperc_LOADV4be), True /*bigendian*/ );
1659MAKE_LOADV4( MC_(helperc_LOADV4le), False/*littleendian*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001660
sewardjc1a2cda2005-04-21 17:34:00 +00001661
sewardj8cf88b72005-07-08 01:29:33 +00001662#define MAKE_STOREV4(nAME,iS_BIGENDIAN) \
1663 \
1664 VG_REGPARM(2) \
1665 void nAME ( Addr aA, UWord vbytes ) \
1666 { \
sewardjae986ca2005-10-12 12:53:20 +00001667 UWord mask, a, sec_no, v_off, a_off, abits; \
1668 SecMap* sm; \
1669 \
sewardj8cf88b72005-07-08 01:29:33 +00001670 PROF_EVENT(230, #nAME); \
1671 \
1672 if (VG_DEBUG_MEMORY >= 2) \
1673 mc_STOREVn_slow( aA, 4, (ULong)vbytes, iS_BIGENDIAN ); \
1674 \
sewardjae986ca2005-10-12 12:53:20 +00001675 mask = ~((0x10000-4) | ((N_PRIMARY_MAP-1) << 16)); \
1676 a = (UWord)aA; \
sewardj8cf88b72005-07-08 01:29:33 +00001677 \
1678 /* If any part of 'a' indicated by the mask is 1, either */ \
1679 /* 'a' is not naturally aligned, or 'a' exceeds the range */ \
1680 /* covered by the primary map. Either way we defer to the */ \
1681 /* slow-path case. */ \
1682 if (EXPECTED_NOT_TAKEN(a & mask)) { \
1683 PROF_EVENT(231, #nAME"-slow1"); \
1684 mc_STOREVn_slow( aA, 4, (ULong)vbytes, iS_BIGENDIAN ); \
1685 return; \
1686 } \
1687 \
sewardjae986ca2005-10-12 12:53:20 +00001688 sec_no = (UWord)(a >> 16); \
sewardj8cf88b72005-07-08 01:29:33 +00001689 \
1690 if (VG_DEBUG_MEMORY >= 1) \
1691 tl_assert(sec_no < N_PRIMARY_MAP); \
1692 \
sewardjae986ca2005-10-12 12:53:20 +00001693 sm = primary_map[sec_no]; \
1694 v_off = a & 0xFFFF; \
1695 a_off = v_off >> 3; \
1696 abits = (UWord)(sm->abits[a_off]); \
sewardj8cf88b72005-07-08 01:29:33 +00001697 abits >>= (a & 4); \
1698 abits &= 15; \
1699 if (EXPECTED_TAKEN(!is_distinguished_sm(sm) \
1700 && abits == VGM_NIBBLE_VALID)) { \
1701 /* Handle common case quickly: a is suitably aligned, */ \
1702 /* is mapped, and is addressible. */ \
1703 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = (UInt)vbytes; \
1704 } else { \
1705 /* Slow but general case. */ \
1706 PROF_EVENT(232, #nAME"-slow2"); \
1707 mc_STOREVn_slow( aA, 4, (ULong)vbytes, iS_BIGENDIAN ); \
1708 } \
sewardjc1a2cda2005-04-21 17:34:00 +00001709 }
1710
sewardj8cf88b72005-07-08 01:29:33 +00001711MAKE_STOREV4( MC_(helperc_STOREV4be), True /*bigendian*/ );
1712MAKE_STOREV4( MC_(helperc_STOREV4le), False/*littleendian*/ );
njn25e49d8e72002-09-23 09:36:25 +00001713
njn25e49d8e72002-09-23 09:36:25 +00001714
sewardj95448072004-11-22 20:19:51 +00001715/* ------------------------ Size = 2 ------------------------ */
1716
sewardj8cf88b72005-07-08 01:29:33 +00001717#define MAKE_LOADV2(nAME,iS_BIGENDIAN) \
1718 \
1719 VG_REGPARM(1) \
1720 UWord nAME ( Addr aA ) \
1721 { \
sewardjae986ca2005-10-12 12:53:20 +00001722 UWord mask, a, sec_no, v_off, a_off, abits; \
1723 SecMap* sm; \
1724 \
sewardj8cf88b72005-07-08 01:29:33 +00001725 PROF_EVENT(240, #nAME); \
1726 \
1727 if (VG_DEBUG_MEMORY >= 2) \
1728 return (UWord)mc_LOADVn_slow( aA, 2, iS_BIGENDIAN ); \
1729 \
sewardjae986ca2005-10-12 12:53:20 +00001730 mask = ~((0x10000-2) | ((N_PRIMARY_MAP-1) << 16)); \
1731 a = (UWord)aA; \
sewardj8cf88b72005-07-08 01:29:33 +00001732 \
1733 /* If any part of 'a' indicated by the mask is 1, either */ \
1734 /* 'a' is not naturally aligned, or 'a' exceeds the range */ \
1735 /* covered by the primary map. Either way we defer to the */ \
1736 /* slow-path case. */ \
1737 if (EXPECTED_NOT_TAKEN(a & mask)) { \
1738 PROF_EVENT(241, #nAME"-slow1"); \
1739 return (UWord)mc_LOADVn_slow( aA, 2, iS_BIGENDIAN ); \
1740 } \
1741 \
sewardjae986ca2005-10-12 12:53:20 +00001742 sec_no = (UWord)(a >> 16); \
sewardj8cf88b72005-07-08 01:29:33 +00001743 \
1744 if (VG_DEBUG_MEMORY >= 1) \
1745 tl_assert(sec_no < N_PRIMARY_MAP); \
1746 \
sewardjae986ca2005-10-12 12:53:20 +00001747 sm = primary_map[sec_no]; \
1748 v_off = a & 0xFFFF; \
1749 a_off = v_off >> 3; \
1750 abits = (UWord)(sm->abits[a_off]); \
sewardj8cf88b72005-07-08 01:29:33 +00001751 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) { \
1752 /* Handle common case quickly: a is mapped, and the */ \
1753 /* entire word32 it lives in is addressible. */ \
1754 /* Set the upper 16/48 bits of the result to 1 */ \
1755 /* ("undefined"), just in case. This almost certainly */ \
1756 /* isn't necessary, but be paranoid. */ \
1757 return (~(UWord)0xFFFF) \
1758 | \
1759 (UWord)( ((UShort*)(sm->vbyte))[ v_off >> 1 ] ); \
1760 } else { \
1761 /* Slow but general case. */ \
1762 PROF_EVENT(242, #nAME"-slow2"); \
1763 return (UWord)mc_LOADVn_slow( aA, 2, iS_BIGENDIAN ); \
1764 } \
sewardjc1a2cda2005-04-21 17:34:00 +00001765 }
1766
sewardj8cf88b72005-07-08 01:29:33 +00001767MAKE_LOADV2( MC_(helperc_LOADV2be), True /*bigendian*/ );
1768MAKE_LOADV2( MC_(helperc_LOADV2le), False/*littleendian*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001769
sewardjc1a2cda2005-04-21 17:34:00 +00001770
sewardj8cf88b72005-07-08 01:29:33 +00001771#define MAKE_STOREV2(nAME,iS_BIGENDIAN) \
1772 \
1773 VG_REGPARM(2) \
1774 void nAME ( Addr aA, UWord vbytes ) \
1775 { \
sewardjae986ca2005-10-12 12:53:20 +00001776 UWord mask, a, sec_no, v_off, a_off, abits; \
1777 SecMap* sm; \
1778 \
sewardj8cf88b72005-07-08 01:29:33 +00001779 PROF_EVENT(250, #nAME); \
1780 \
1781 if (VG_DEBUG_MEMORY >= 2) \
1782 mc_STOREVn_slow( aA, 2, (ULong)vbytes, iS_BIGENDIAN ); \
1783 \
sewardjae986ca2005-10-12 12:53:20 +00001784 mask = ~((0x10000-2) | ((N_PRIMARY_MAP-1) << 16)); \
1785 a = (UWord)aA; \
sewardj8cf88b72005-07-08 01:29:33 +00001786 \
1787 /* If any part of 'a' indicated by the mask is 1, either */ \
1788 /* 'a' is not naturally aligned, or 'a' exceeds the range */ \
1789 /* covered by the primary map. Either way we defer to the */ \
1790 /* slow-path case. */ \
1791 if (EXPECTED_NOT_TAKEN(a & mask)) { \
1792 PROF_EVENT(251, #nAME"-slow1"); \
1793 mc_STOREVn_slow( aA, 2, (ULong)vbytes, iS_BIGENDIAN ); \
1794 return; \
1795 } \
1796 \
sewardjae986ca2005-10-12 12:53:20 +00001797 sec_no = (UWord)(a >> 16); \
sewardj8cf88b72005-07-08 01:29:33 +00001798 \
1799 if (VG_DEBUG_MEMORY >= 1) \
1800 tl_assert(sec_no < N_PRIMARY_MAP); \
1801 \
sewardjae986ca2005-10-12 12:53:20 +00001802 sm = primary_map[sec_no]; \
1803 v_off = a & 0xFFFF; \
1804 a_off = v_off >> 3; \
1805 abits = (UWord)(sm->abits[a_off]); \
sewardj8cf88b72005-07-08 01:29:33 +00001806 if (EXPECTED_TAKEN(!is_distinguished_sm(sm) \
1807 && abits == VGM_BYTE_VALID)) { \
1808 /* Handle common case quickly. */ \
1809 ((UShort*)(sm->vbyte))[ v_off >> 1 ] = (UShort)vbytes; \
1810 } else { \
1811 /* Slow but general case. */ \
1812 PROF_EVENT(252, #nAME"-slow2"); \
1813 mc_STOREVn_slow( aA, 2, (ULong)vbytes, iS_BIGENDIAN ); \
1814 } \
sewardjc1a2cda2005-04-21 17:34:00 +00001815 }
1816
njn25e49d8e72002-09-23 09:36:25 +00001817
sewardj8cf88b72005-07-08 01:29:33 +00001818MAKE_STOREV2( MC_(helperc_STOREV2be), True /*bigendian*/ );
1819MAKE_STOREV2( MC_(helperc_STOREV2le), False/*littleendian*/ );
sewardj5d28efc2005-04-21 22:16:29 +00001820
njn25e49d8e72002-09-23 09:36:25 +00001821
sewardj95448072004-11-22 20:19:51 +00001822/* ------------------------ Size = 1 ------------------------ */
sewardj8cf88b72005-07-08 01:29:33 +00001823/* Note: endianness is irrelevant for size == 1 */
sewardj95448072004-11-22 20:19:51 +00001824
njnaf839f52005-06-23 03:27:57 +00001825VG_REGPARM(1)
sewardj8cf88b72005-07-08 01:29:33 +00001826UWord MC_(helperc_LOADV1) ( Addr aA )
njn25e49d8e72002-09-23 09:36:25 +00001827{
sewardjae986ca2005-10-12 12:53:20 +00001828 UWord mask, a, sec_no, v_off, a_off, abits;
1829 SecMap* sm;
1830
sewardj8cf88b72005-07-08 01:29:33 +00001831 PROF_EVENT(260, "helperc_LOADV1");
sewardjc1a2cda2005-04-21 17:34:00 +00001832
1833# if VG_DEBUG_MEMORY >= 2
sewardj8cf88b72005-07-08 01:29:33 +00001834 return (UWord)mc_LOADVn_slow( aA, 1, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001835# else
1836
sewardjae986ca2005-10-12 12:53:20 +00001837 mask = ~((0x10000-1) | ((N_PRIMARY_MAP-1) << 16));
1838 a = (UWord)aA;
sewardjc1a2cda2005-04-21 17:34:00 +00001839
1840 /* If any part of 'a' indicated by the mask is 1, it means 'a'
1841 exceeds the range covered by the primary map. In which case we
1842 defer to the slow-path case. */
1843 if (EXPECTED_NOT_TAKEN(a & mask)) {
sewardj8cf88b72005-07-08 01:29:33 +00001844 PROF_EVENT(261, "helperc_LOADV1-slow1");
1845 return (UWord)mc_LOADVn_slow( aA, 1, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001846 }
1847
sewardjae986ca2005-10-12 12:53:20 +00001848 sec_no = (UWord)(a >> 16);
sewardjc1a2cda2005-04-21 17:34:00 +00001849
1850# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001851 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001852# endif
1853
sewardjae986ca2005-10-12 12:53:20 +00001854 sm = primary_map[sec_no];
1855 v_off = a & 0xFFFF;
1856 a_off = v_off >> 3;
1857 abits = (UWord)(sm->abits[a_off]);
sewardjc1a2cda2005-04-21 17:34:00 +00001858 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) {
1859 /* Handle common case quickly: a is mapped, and the entire
1860 word32 it lives in is addressible. */
sewardj5d28efc2005-04-21 22:16:29 +00001861 /* Set the upper 24/56 bits of the result to 1 ("undefined"),
1862 just in case. This almost certainly isn't necessary, but be
1863 paranoid. */
sewardjc1a2cda2005-04-21 17:34:00 +00001864 return (~(UWord)0xFF)
1865 |
1866 (UWord)( ((UChar*)(sm->vbyte))[ v_off ] );
1867 } else {
1868 /* Slow but general case. */
sewardj8cf88b72005-07-08 01:29:33 +00001869 PROF_EVENT(262, "helperc_LOADV1-slow2");
1870 return (UWord)mc_LOADVn_slow( aA, 1, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001871 }
1872# endif
njn25e49d8e72002-09-23 09:36:25 +00001873}
1874
sewardjc1a2cda2005-04-21 17:34:00 +00001875
njnaf839f52005-06-23 03:27:57 +00001876VG_REGPARM(2)
sewardj8cf88b72005-07-08 01:29:33 +00001877void MC_(helperc_STOREV1) ( Addr aA, UWord vbyte )
njn25e49d8e72002-09-23 09:36:25 +00001878{
sewardjae986ca2005-10-12 12:53:20 +00001879 UWord mask, a, sec_no, v_off, a_off, abits;
1880 SecMap* sm;
1881
sewardj8cf88b72005-07-08 01:29:33 +00001882 PROF_EVENT(270, "helperc_STOREV1");
sewardjc1a2cda2005-04-21 17:34:00 +00001883
1884# if VG_DEBUG_MEMORY >= 2
sewardj8cf88b72005-07-08 01:29:33 +00001885 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001886# else
1887
sewardjae986ca2005-10-12 12:53:20 +00001888 mask = ~((0x10000-1) | ((N_PRIMARY_MAP-1) << 16));
1889 a = (UWord)aA;
sewardjc1a2cda2005-04-21 17:34:00 +00001890 /* If any part of 'a' indicated by the mask is 1, it means 'a'
1891 exceeds the range covered by the primary map. In which case we
1892 defer to the slow-path case. */
1893 if (EXPECTED_NOT_TAKEN(a & mask)) {
sewardj8cf88b72005-07-08 01:29:33 +00001894 PROF_EVENT(271, "helperc_STOREV1-slow1");
1895 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001896 return;
1897 }
1898
sewardjae986ca2005-10-12 12:53:20 +00001899 sec_no = (UWord)(a >> 16);
sewardjc1a2cda2005-04-21 17:34:00 +00001900
1901# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001902 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001903# endif
1904
sewardjae986ca2005-10-12 12:53:20 +00001905 sm = primary_map[sec_no];
1906 v_off = a & 0xFFFF;
1907 a_off = v_off >> 3;
1908 abits = (UWord)(sm->abits[a_off]);
sewardjc1a2cda2005-04-21 17:34:00 +00001909 if (EXPECTED_TAKEN(!is_distinguished_sm(sm)
1910 && abits == VGM_BYTE_VALID)) {
1911 /* Handle common case quickly: a is mapped, the entire word32 it
1912 lives in is addressible. */
1913 ((UChar*)(sm->vbyte))[ v_off ] = (UChar)vbyte;
1914 } else {
sewardj8cf88b72005-07-08 01:29:33 +00001915 PROF_EVENT(272, "helperc_STOREV1-slow2");
1916 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001917 }
1918
1919# endif
njn25e49d8e72002-09-23 09:36:25 +00001920}
1921
1922
sewardjc859fbf2005-04-22 21:10:28 +00001923/*------------------------------------------------------------*/
1924/*--- Functions called directly from generated code: ---*/
1925/*--- Value-check failure handlers. ---*/
1926/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001927
njn5c004e42002-11-18 11:04:50 +00001928void MC_(helperc_value_check0_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001929{
njn9e63cb62005-05-08 18:34:59 +00001930 mc_record_value_error ( VG_(get_running_tid)(), 0 );
njn25e49d8e72002-09-23 09:36:25 +00001931}
1932
njn5c004e42002-11-18 11:04:50 +00001933void MC_(helperc_value_check1_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001934{
njn9e63cb62005-05-08 18:34:59 +00001935 mc_record_value_error ( VG_(get_running_tid)(), 1 );
njn25e49d8e72002-09-23 09:36:25 +00001936}
1937
njn5c004e42002-11-18 11:04:50 +00001938void MC_(helperc_value_check4_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001939{
njn9e63cb62005-05-08 18:34:59 +00001940 mc_record_value_error ( VG_(get_running_tid)(), 4 );
njn25e49d8e72002-09-23 09:36:25 +00001941}
1942
sewardj11bcc4e2005-04-23 22:38:38 +00001943void MC_(helperc_value_check8_fail) ( void )
1944{
njn9e63cb62005-05-08 18:34:59 +00001945 mc_record_value_error ( VG_(get_running_tid)(), 8 );
sewardj11bcc4e2005-04-23 22:38:38 +00001946}
1947
njnaf839f52005-06-23 03:27:57 +00001948VG_REGPARM(1) void MC_(helperc_complain_undef) ( HWord sz )
sewardj95448072004-11-22 20:19:51 +00001949{
njn9e63cb62005-05-08 18:34:59 +00001950 mc_record_value_error ( VG_(get_running_tid)(), (Int)sz );
sewardj95448072004-11-22 20:19:51 +00001951}
1952
njn25e49d8e72002-09-23 09:36:25 +00001953
sewardj45d94cc2005-04-20 14:44:11 +00001954//zz /*------------------------------------------------------------*/
1955//zz /*--- Metadata get/set functions, for client requests. ---*/
1956//zz /*------------------------------------------------------------*/
1957//zz
1958//zz /* Copy Vbits for src into vbits. Returns: 1 == OK, 2 == alignment
1959//zz error, 3 == addressing error. */
1960//zz static Int mc_get_or_set_vbits_for_client (
1961//zz ThreadId tid,
1962//zz Addr dataV,
1963//zz Addr vbitsV,
1964//zz SizeT size,
1965//zz Bool setting /* True <=> set vbits, False <=> get vbits */
1966//zz )
1967//zz {
1968//zz Bool addressibleD = True;
1969//zz Bool addressibleV = True;
1970//zz UInt* data = (UInt*)dataV;
1971//zz UInt* vbits = (UInt*)vbitsV;
1972//zz SizeT szW = size / 4; /* sigh */
1973//zz SizeT i;
1974//zz UInt* dataP = NULL; /* bogus init to keep gcc happy */
1975//zz UInt* vbitsP = NULL; /* ditto */
1976//zz
1977//zz /* Check alignment of args. */
1978//zz if (!(VG_IS_4_ALIGNED(data) && VG_IS_4_ALIGNED(vbits)))
1979//zz return 2;
1980//zz if ((size & 3) != 0)
1981//zz return 2;
1982//zz
1983//zz /* Check that arrays are addressible. */
1984//zz for (i = 0; i < szW; i++) {
1985//zz dataP = &data[i];
1986//zz vbitsP = &vbits[i];
1987//zz if (get_abits4_ALIGNED((Addr)dataP) != VGM_NIBBLE_VALID) {
1988//zz addressibleD = False;
1989//zz break;
1990//zz }
1991//zz if (get_abits4_ALIGNED((Addr)vbitsP) != VGM_NIBBLE_VALID) {
1992//zz addressibleV = False;
1993//zz break;
1994//zz }
1995//zz }
1996//zz if (!addressibleD) {
1997//zz MAC_(record_address_error)( tid, (Addr)dataP, 4,
1998//zz setting ? True : False );
1999//zz return 3;
2000//zz }
2001//zz if (!addressibleV) {
2002//zz MAC_(record_address_error)( tid, (Addr)vbitsP, 4,
2003//zz setting ? False : True );
2004//zz return 3;
2005//zz }
2006//zz
2007//zz /* Do the copy */
2008//zz if (setting) {
2009//zz /* setting */
2010//zz for (i = 0; i < szW; i++) {
2011//zz if (get_vbytes4_ALIGNED( (Addr)&vbits[i] ) != VGM_WORD_VALID)
njn9e63cb62005-05-08 18:34:59 +00002012//zz mc_record_value_error(tid, 4);
sewardj45d94cc2005-04-20 14:44:11 +00002013//zz set_vbytes4_ALIGNED( (Addr)&data[i], vbits[i] );
2014//zz }
2015//zz } else {
2016//zz /* getting */
2017//zz for (i = 0; i < szW; i++) {
2018//zz vbits[i] = get_vbytes4_ALIGNED( (Addr)&data[i] );
2019//zz set_vbytes4_ALIGNED( (Addr)&vbits[i], VGM_WORD_VALID );
2020//zz }
2021//zz }
2022//zz
2023//zz return 1;
2024//zz }
sewardj05fe85e2005-04-27 22:46:36 +00002025
2026
2027/*------------------------------------------------------------*/
2028/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
2029/*------------------------------------------------------------*/
2030
2031/* For the memory leak detector, say whether an entire 64k chunk of
2032 address space is possibly in use, or not. If in doubt return
2033 True.
2034*/
2035static
2036Bool mc_is_within_valid_secondary ( Addr a )
2037{
2038 SecMap* sm = maybe_get_secmap_for ( a );
2039 if (sm == NULL || sm == &sm_distinguished[SM_DIST_NOACCESS]) {
2040 /* Definitely not in use. */
2041 return False;
2042 } else {
2043 return True;
2044 }
2045}
2046
2047
2048/* For the memory leak detector, say whether or not a given word
2049 address is to be regarded as valid. */
2050static
2051Bool mc_is_valid_aligned_word ( Addr a )
2052{
2053 tl_assert(sizeof(UWord) == 4 || sizeof(UWord) == 8);
2054 if (sizeof(UWord) == 4) {
2055 tl_assert(VG_IS_4_ALIGNED(a));
2056 } else {
2057 tl_assert(VG_IS_8_ALIGNED(a));
2058 }
2059 if (mc_check_readable( a, sizeof(UWord), NULL ) == MC_Ok) {
2060 return True;
2061 } else {
2062 return False;
2063 }
2064}
sewardja4495682002-10-21 07:29:59 +00002065
2066
nethercote996901a2004-08-03 13:29:09 +00002067/* Leak detector for this tool. We don't actually do anything, merely
sewardja4495682002-10-21 07:29:59 +00002068 run the generic leak detector with suitable parameters for this
nethercote996901a2004-08-03 13:29:09 +00002069 tool. */
njnb8dca862005-03-14 02:42:44 +00002070static void mc_detect_memory_leaks ( ThreadId tid, LeakCheckMode mode )
njn25e49d8e72002-09-23 09:36:25 +00002071{
sewardj05fe85e2005-04-27 22:46:36 +00002072 MAC_(do_detect_memory_leaks) (
2073 tid,
2074 mode,
2075 mc_is_within_valid_secondary,
2076 mc_is_valid_aligned_word
2077 );
njn25e49d8e72002-09-23 09:36:25 +00002078}
2079
2080
sewardjc859fbf2005-04-22 21:10:28 +00002081/*------------------------------------------------------------*/
2082/*--- Initialisation ---*/
2083/*------------------------------------------------------------*/
2084
2085static void init_shadow_memory ( void )
2086{
2087 Int i;
2088 SecMap* sm;
2089
2090 /* Build the 3 distinguished secondaries */
2091 tl_assert(VGM_BIT_INVALID == 1);
2092 tl_assert(VGM_BIT_VALID == 0);
2093 tl_assert(VGM_BYTE_INVALID == 0xFF);
2094 tl_assert(VGM_BYTE_VALID == 0);
2095
2096 /* Set A invalid, V invalid. */
2097 sm = &sm_distinguished[SM_DIST_NOACCESS];
2098 for (i = 0; i < 65536; i++)
2099 sm->vbyte[i] = VGM_BYTE_INVALID;
2100 for (i = 0; i < 8192; i++)
2101 sm->abits[i] = VGM_BYTE_INVALID;
2102
2103 /* Set A valid, V invalid. */
2104 sm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
2105 for (i = 0; i < 65536; i++)
2106 sm->vbyte[i] = VGM_BYTE_INVALID;
2107 for (i = 0; i < 8192; i++)
2108 sm->abits[i] = VGM_BYTE_VALID;
2109
2110 /* Set A valid, V valid. */
2111 sm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
2112 for (i = 0; i < 65536; i++)
2113 sm->vbyte[i] = VGM_BYTE_VALID;
2114 for (i = 0; i < 8192; i++)
2115 sm->abits[i] = VGM_BYTE_VALID;
2116
2117 /* Set up the primary map. */
2118 /* These entries gradually get overwritten as the used address
2119 space expands. */
2120 for (i = 0; i < N_PRIMARY_MAP; i++)
2121 primary_map[i] = &sm_distinguished[SM_DIST_NOACCESS];
2122
2123 /* auxmap_size = auxmap_used = 0;
2124 no ... these are statically initialised */
2125}
2126
2127
2128/*------------------------------------------------------------*/
2129/*--- Sanity check machinery (permanently engaged) ---*/
2130/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00002131
njn51d827b2005-05-09 01:02:08 +00002132static Bool mc_cheap_sanity_check ( void )
njn25e49d8e72002-09-23 09:36:25 +00002133{
jseward9800fd32004-01-04 23:08:04 +00002134 /* nothing useful we can rapidly check */
sewardj23eb2fd2005-04-22 16:29:19 +00002135 n_sanity_cheap++;
sewardjc1a2cda2005-04-21 17:34:00 +00002136 PROF_EVENT(490, "cheap_sanity_check");
jseward9800fd32004-01-04 23:08:04 +00002137 return True;
njn25e49d8e72002-09-23 09:36:25 +00002138}
2139
njn51d827b2005-05-09 01:02:08 +00002140static Bool mc_expensive_sanity_check ( void )
njn25e49d8e72002-09-23 09:36:25 +00002141{
sewardj23eb2fd2005-04-22 16:29:19 +00002142 Int i, n_secmaps_found;
sewardj45d94cc2005-04-20 14:44:11 +00002143 SecMap* sm;
sewardj23eb2fd2005-04-22 16:29:19 +00002144 Bool bad = False;
njn25e49d8e72002-09-23 09:36:25 +00002145
sewardj23eb2fd2005-04-22 16:29:19 +00002146 n_sanity_expensive++;
sewardjc1a2cda2005-04-21 17:34:00 +00002147 PROF_EVENT(491, "expensive_sanity_check");
2148
sewardj23eb2fd2005-04-22 16:29:19 +00002149 /* Check that the 3 distinguished SMs are still as they should
2150 be. */
njn25e49d8e72002-09-23 09:36:25 +00002151
sewardj45d94cc2005-04-20 14:44:11 +00002152 /* Check A invalid, V invalid. */
2153 sm = &sm_distinguished[SM_DIST_NOACCESS];
njn25e49d8e72002-09-23 09:36:25 +00002154 for (i = 0; i < 65536; i++)
sewardj45d94cc2005-04-20 14:44:11 +00002155 if (!(sm->vbyte[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002156 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002157 for (i = 0; i < 8192; i++)
2158 if (!(sm->abits[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002159 bad = True;
njn25e49d8e72002-09-23 09:36:25 +00002160
sewardj45d94cc2005-04-20 14:44:11 +00002161 /* Check A valid, V invalid. */
2162 sm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
2163 for (i = 0; i < 65536; i++)
2164 if (!(sm->vbyte[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002165 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002166 for (i = 0; i < 8192; i++)
2167 if (!(sm->abits[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002168 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002169
2170 /* Check A valid, V valid. */
2171 sm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
2172 for (i = 0; i < 65536; i++)
2173 if (!(sm->vbyte[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002174 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002175 for (i = 0; i < 8192; i++)
2176 if (!(sm->abits[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002177 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002178
sewardj23eb2fd2005-04-22 16:29:19 +00002179 if (bad) {
2180 VG_(printf)("memcheck expensive sanity: "
2181 "distinguished_secondaries have changed\n");
2182 return False;
2183 }
2184
2185 /* check nonsensical auxmap sizing */
sewardj45d94cc2005-04-20 14:44:11 +00002186 if (auxmap_used > auxmap_size)
sewardj23eb2fd2005-04-22 16:29:19 +00002187 bad = True;
2188
2189 if (bad) {
2190 VG_(printf)("memcheck expensive sanity: "
2191 "nonsensical auxmap sizing\n");
2192 return False;
2193 }
2194
2195 /* check that the number of secmaps issued matches the number that
2196 are reachable (iow, no secmap leaks) */
2197 n_secmaps_found = 0;
2198 for (i = 0; i < N_PRIMARY_MAP; i++) {
2199 if (primary_map[i] == NULL) {
2200 bad = True;
2201 } else {
2202 if (!is_distinguished_sm(primary_map[i]))
2203 n_secmaps_found++;
2204 }
2205 }
2206
2207 for (i = 0; i < auxmap_used; i++) {
2208 if (auxmap[i].sm == NULL) {
2209 bad = True;
2210 } else {
2211 if (!is_distinguished_sm(auxmap[i].sm))
2212 n_secmaps_found++;
2213 }
2214 }
2215
2216 if (n_secmaps_found != n_secmaps_issued)
2217 bad = True;
2218
2219 if (bad) {
2220 VG_(printf)("memcheck expensive sanity: "
2221 "apparent secmap leakage\n");
2222 return False;
2223 }
2224
2225 /* check that auxmap only covers address space that the primary
2226 doesn't */
2227
2228 for (i = 0; i < auxmap_used; i++)
2229 if (auxmap[i].base <= MAX_PRIMARY_ADDRESS)
2230 bad = True;
2231
2232 if (bad) {
2233 VG_(printf)("memcheck expensive sanity: "
2234 "auxmap covers wrong address space\n");
2235 return False;
2236 }
2237
2238 /* there is only one pointer to each secmap (expensive) */
njn25e49d8e72002-09-23 09:36:25 +00002239
2240 return True;
2241}
sewardj45d94cc2005-04-20 14:44:11 +00002242
njn25e49d8e72002-09-23 09:36:25 +00002243
njn25e49d8e72002-09-23 09:36:25 +00002244/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00002245/*--- Command line args ---*/
njn25e49d8e72002-09-23 09:36:25 +00002246/*------------------------------------------------------------*/
2247
njn51d827b2005-05-09 01:02:08 +00002248static Bool mc_process_cmd_line_option(Char* arg)
njn25e49d8e72002-09-23 09:36:25 +00002249{
sewardjf3418c02005-11-08 14:10:24 +00002250 return MAC_(process_common_cmd_line_option)(arg);
njn25e49d8e72002-09-23 09:36:25 +00002251}
2252
njn51d827b2005-05-09 01:02:08 +00002253static void mc_print_usage(void)
njn25e49d8e72002-09-23 09:36:25 +00002254{
njn3e884182003-04-15 13:03:23 +00002255 MAC_(print_common_usage)();
njn3e884182003-04-15 13:03:23 +00002256}
2257
njn51d827b2005-05-09 01:02:08 +00002258static void mc_print_debug_usage(void)
njn3e884182003-04-15 13:03:23 +00002259{
2260 MAC_(print_common_debug_usage)();
njn25e49d8e72002-09-23 09:36:25 +00002261}
2262
sewardjf3418c02005-11-08 14:10:24 +00002263
nethercote8b76fe52004-11-08 19:20:09 +00002264/*------------------------------------------------------------*/
2265/*--- Client requests ---*/
2266/*------------------------------------------------------------*/
2267
2268/* Client block management:
2269
2270 This is managed as an expanding array of client block descriptors.
2271 Indices of live descriptors are issued to the client, so it can ask
2272 to free them later. Therefore we cannot slide live entries down
2273 over dead ones. Instead we must use free/inuse flags and scan for
2274 an empty slot at allocation time. This in turn means allocation is
2275 relatively expensive, so we hope this does not happen too often.
nethercote8b76fe52004-11-08 19:20:09 +00002276
sewardjedc75ab2005-03-15 23:30:32 +00002277 An unused block has start == size == 0
2278*/
nethercote8b76fe52004-11-08 19:20:09 +00002279
2280typedef
2281 struct {
2282 Addr start;
2283 SizeT size;
2284 ExeContext* where;
sewardj8cf88b72005-07-08 01:29:33 +00002285 Char* desc;
nethercote8b76fe52004-11-08 19:20:09 +00002286 }
2287 CGenBlock;
2288
2289/* This subsystem is self-initialising. */
njn695c16e2005-03-27 03:40:28 +00002290static UInt cgb_size = 0;
2291static UInt cgb_used = 0;
2292static CGenBlock* cgbs = NULL;
nethercote8b76fe52004-11-08 19:20:09 +00002293
2294/* Stats for this subsystem. */
njn695c16e2005-03-27 03:40:28 +00002295static UInt cgb_used_MAX = 0; /* Max in use. */
2296static UInt cgb_allocs = 0; /* Number of allocs. */
2297static UInt cgb_discards = 0; /* Number of discards. */
2298static UInt cgb_search = 0; /* Number of searches. */
nethercote8b76fe52004-11-08 19:20:09 +00002299
2300
2301static
njn695c16e2005-03-27 03:40:28 +00002302Int alloc_client_block ( void )
nethercote8b76fe52004-11-08 19:20:09 +00002303{
2304 UInt i, sz_new;
2305 CGenBlock* cgbs_new;
2306
njn695c16e2005-03-27 03:40:28 +00002307 cgb_allocs++;
nethercote8b76fe52004-11-08 19:20:09 +00002308
njn695c16e2005-03-27 03:40:28 +00002309 for (i = 0; i < cgb_used; i++) {
2310 cgb_search++;
2311 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00002312 return i;
2313 }
2314
2315 /* Not found. Try to allocate one at the end. */
njn695c16e2005-03-27 03:40:28 +00002316 if (cgb_used < cgb_size) {
2317 cgb_used++;
2318 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00002319 }
2320
2321 /* Ok, we have to allocate a new one. */
njn695c16e2005-03-27 03:40:28 +00002322 tl_assert(cgb_used == cgb_size);
2323 sz_new = (cgbs == NULL) ? 10 : (2 * cgb_size);
nethercote8b76fe52004-11-08 19:20:09 +00002324
2325 cgbs_new = VG_(malloc)( sz_new * sizeof(CGenBlock) );
njn695c16e2005-03-27 03:40:28 +00002326 for (i = 0; i < cgb_used; i++)
2327 cgbs_new[i] = cgbs[i];
nethercote8b76fe52004-11-08 19:20:09 +00002328
njn695c16e2005-03-27 03:40:28 +00002329 if (cgbs != NULL)
2330 VG_(free)( cgbs );
2331 cgbs = cgbs_new;
nethercote8b76fe52004-11-08 19:20:09 +00002332
njn695c16e2005-03-27 03:40:28 +00002333 cgb_size = sz_new;
2334 cgb_used++;
2335 if (cgb_used > cgb_used_MAX)
2336 cgb_used_MAX = cgb_used;
2337 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00002338}
2339
2340
2341static void show_client_block_stats ( void )
2342{
2343 VG_(message)(Vg_DebugMsg,
2344 "general CBs: %d allocs, %d discards, %d maxinuse, %d search",
njn695c16e2005-03-27 03:40:28 +00002345 cgb_allocs, cgb_discards, cgb_used_MAX, cgb_search
nethercote8b76fe52004-11-08 19:20:09 +00002346 );
2347}
2348
nethercote8b76fe52004-11-08 19:20:09 +00002349static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai )
2350{
2351 UInt i;
2352 /* VG_(printf)("try to identify %d\n", a); */
2353
2354 /* Perhaps it's a general block ? */
njn695c16e2005-03-27 03:40:28 +00002355 for (i = 0; i < cgb_used; i++) {
2356 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00002357 continue;
njn717cde52005-05-10 02:47:21 +00002358 // Use zero as the redzone for client blocks.
2359 if (VG_(addr_is_in_block)(a, cgbs[i].start, cgbs[i].size, 0)) {
nethercote8b76fe52004-11-08 19:20:09 +00002360 /* OK - maybe it's a mempool, too? */
njn12627272005-08-14 18:32:16 +00002361 MAC_Mempool* mp = VG_(HT_lookup)(MAC_(mempool_list),
2362 (UWord)cgbs[i].start);
njn1d0cb0d2005-08-15 01:52:02 +00002363 if (mp != NULL) {
2364 if (mp->chunks != NULL) {
2365 MAC_Chunk* mc;
njnf66dbfc2005-08-15 01:54:05 +00002366 VG_(HT_ResetIter)(mp->chunks);
2367 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
njn1d0cb0d2005-08-15 01:52:02 +00002368 if (VG_(addr_is_in_block)(a, mc->data, mc->size,
2369 MAC_MALLOC_REDZONE_SZB)) {
2370 ai->akind = UserG;
2371 ai->blksize = mc->size;
2372 ai->rwoffset = (Int)(a) - (Int)mc->data;
2373 ai->lastchange = mc->where;
2374 return True;
2375 }
nethercote8b76fe52004-11-08 19:20:09 +00002376 }
2377 }
njn1d0cb0d2005-08-15 01:52:02 +00002378 ai->akind = Mempool;
2379 ai->blksize = cgbs[i].size;
2380 ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
njn695c16e2005-03-27 03:40:28 +00002381 ai->lastchange = cgbs[i].where;
nethercote8b76fe52004-11-08 19:20:09 +00002382 return True;
2383 }
njn1d0cb0d2005-08-15 01:52:02 +00002384 ai->akind = UserG;
2385 ai->blksize = cgbs[i].size;
2386 ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
njn695c16e2005-03-27 03:40:28 +00002387 ai->lastchange = cgbs[i].where;
njn1d0cb0d2005-08-15 01:52:02 +00002388 ai->desc = cgbs[i].desc;
nethercote8b76fe52004-11-08 19:20:09 +00002389 return True;
2390 }
2391 }
2392 return False;
2393}
2394
njn51d827b2005-05-09 01:02:08 +00002395static Bool mc_handle_client_request ( ThreadId tid, UWord* arg, UWord* ret )
nethercote8b76fe52004-11-08 19:20:09 +00002396{
2397 Int i;
2398 Bool ok;
2399 Addr bad_addr;
2400
njnfc26ff92004-11-22 19:12:49 +00002401 if (!VG_IS_TOOL_USERREQ('M','C',arg[0])
nethercote8b76fe52004-11-08 19:20:09 +00002402 && VG_USERREQ__MALLOCLIKE_BLOCK != arg[0]
2403 && VG_USERREQ__FREELIKE_BLOCK != arg[0]
2404 && VG_USERREQ__CREATE_MEMPOOL != arg[0]
2405 && VG_USERREQ__DESTROY_MEMPOOL != arg[0]
2406 && VG_USERREQ__MEMPOOL_ALLOC != arg[0]
2407 && VG_USERREQ__MEMPOOL_FREE != arg[0])
2408 return False;
2409
2410 switch (arg[0]) {
2411 case VG_USERREQ__CHECK_WRITABLE: /* check writable */
2412 ok = mc_check_writable ( arg[1], arg[2], &bad_addr );
2413 if (!ok)
njn9e63cb62005-05-08 18:34:59 +00002414 mc_record_user_error ( tid, bad_addr, /*isWrite*/True,
2415 /*isUnaddr*/True );
nethercote8b76fe52004-11-08 19:20:09 +00002416 *ret = ok ? (UWord)NULL : bad_addr;
sewardj8cf88b72005-07-08 01:29:33 +00002417 break;
nethercote8b76fe52004-11-08 19:20:09 +00002418
2419 case VG_USERREQ__CHECK_READABLE: { /* check readable */
2420 MC_ReadResult res;
2421 res = mc_check_readable ( arg[1], arg[2], &bad_addr );
2422 if (MC_AddrErr == res)
njn9e63cb62005-05-08 18:34:59 +00002423 mc_record_user_error ( tid, bad_addr, /*isWrite*/False,
2424 /*isUnaddr*/True );
nethercote8b76fe52004-11-08 19:20:09 +00002425 else if (MC_ValueErr == res)
njn9e63cb62005-05-08 18:34:59 +00002426 mc_record_user_error ( tid, bad_addr, /*isWrite*/False,
2427 /*isUnaddr*/False );
nethercote8b76fe52004-11-08 19:20:09 +00002428 *ret = ( res==MC_Ok ? (UWord)NULL : bad_addr );
sewardj8cf88b72005-07-08 01:29:33 +00002429 break;
nethercote8b76fe52004-11-08 19:20:09 +00002430 }
2431
2432 case VG_USERREQ__DO_LEAK_CHECK:
njnb8dca862005-03-14 02:42:44 +00002433 mc_detect_memory_leaks(tid, arg[1] ? LC_Summary : LC_Full);
sewardj8cf88b72005-07-08 01:29:33 +00002434 *ret = 0; /* return value is meaningless */
2435 break;
nethercote8b76fe52004-11-08 19:20:09 +00002436
2437 case VG_USERREQ__MAKE_NOACCESS: /* make no access */
nethercote8b76fe52004-11-08 19:20:09 +00002438 mc_make_noaccess ( arg[1], arg[2] );
sewardj8cf88b72005-07-08 01:29:33 +00002439 *ret = -1;
2440 break;
nethercote8b76fe52004-11-08 19:20:09 +00002441
2442 case VG_USERREQ__MAKE_WRITABLE: /* make writable */
nethercote8b76fe52004-11-08 19:20:09 +00002443 mc_make_writable ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00002444 *ret = -1;
sewardj8cf88b72005-07-08 01:29:33 +00002445 break;
nethercote8b76fe52004-11-08 19:20:09 +00002446
2447 case VG_USERREQ__MAKE_READABLE: /* make readable */
nethercote8b76fe52004-11-08 19:20:09 +00002448 mc_make_readable ( arg[1], arg[2] );
sewardj8cf88b72005-07-08 01:29:33 +00002449 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00002450 break;
2451
sewardjedc75ab2005-03-15 23:30:32 +00002452 case VG_USERREQ__CREATE_BLOCK: /* describe a block */
sewardj8cf88b72005-07-08 01:29:33 +00002453 if (arg[1] != 0 && arg[2] != 0) {
2454 i = alloc_client_block();
2455 /* VG_(printf)("allocated %d %p\n", i, cgbs); */
2456 cgbs[i].start = arg[1];
2457 cgbs[i].size = arg[2];
2458 cgbs[i].desc = VG_(strdup)((Char *)arg[3]);
2459 cgbs[i].where = VG_(record_ExeContext) ( tid );
sewardjedc75ab2005-03-15 23:30:32 +00002460
sewardj8cf88b72005-07-08 01:29:33 +00002461 *ret = i;
2462 } else
2463 *ret = -1;
2464 break;
sewardjedc75ab2005-03-15 23:30:32 +00002465
nethercote8b76fe52004-11-08 19:20:09 +00002466 case VG_USERREQ__DISCARD: /* discard */
njn695c16e2005-03-27 03:40:28 +00002467 if (cgbs == NULL
2468 || arg[2] >= cgb_used ||
sewardj8cf88b72005-07-08 01:29:33 +00002469 (cgbs[arg[2]].start == 0 && cgbs[arg[2]].size == 0)) {
sewardjedc75ab2005-03-15 23:30:32 +00002470 *ret = 1;
sewardj8cf88b72005-07-08 01:29:33 +00002471 } else {
2472 tl_assert(arg[2] >= 0 && arg[2] < cgb_used);
2473 cgbs[arg[2]].start = cgbs[arg[2]].size = 0;
2474 VG_(free)(cgbs[arg[2]].desc);
2475 cgb_discards++;
2476 *ret = 0;
2477 }
2478 break;
nethercote8b76fe52004-11-08 19:20:09 +00002479
sewardj45d94cc2005-04-20 14:44:11 +00002480//zz case VG_USERREQ__GET_VBITS:
2481//zz /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
2482//zz error. */
2483//zz /* VG_(printf)("get_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
2484//zz *ret = mc_get_or_set_vbits_for_client
2485//zz ( tid, arg[1], arg[2], arg[3], False /* get them */ );
2486//zz break;
2487//zz
2488//zz case VG_USERREQ__SET_VBITS:
2489//zz /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
2490//zz error. */
2491//zz /* VG_(printf)("set_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
2492//zz *ret = mc_get_or_set_vbits_for_client
2493//zz ( tid, arg[1], arg[2], arg[3], True /* set them */ );
2494//zz break;
nethercote8b76fe52004-11-08 19:20:09 +00002495
2496 default:
2497 if (MAC_(handle_common_client_requests)(tid, arg, ret )) {
2498 return True;
2499 } else {
2500 VG_(message)(Vg_UserMsg,
2501 "Warning: unknown memcheck client request code %llx",
2502 (ULong)arg[0]);
2503 return False;
2504 }
2505 }
2506 return True;
2507}
njn25e49d8e72002-09-23 09:36:25 +00002508
2509/*------------------------------------------------------------*/
njn51d827b2005-05-09 01:02:08 +00002510/*--- Setup and finalisation ---*/
njn25e49d8e72002-09-23 09:36:25 +00002511/*------------------------------------------------------------*/
2512
njn51d827b2005-05-09 01:02:08 +00002513static void mc_post_clo_init ( void )
njn5c004e42002-11-18 11:04:50 +00002514{
sewardj71bc3cb2005-05-19 00:25:45 +00002515 /* If we've been asked to emit XML, mash around various other
2516 options so as to constrain the output somewhat. */
2517 if (VG_(clo_xml)) {
2518 /* Extract as much info as possible from the leak checker. */
sewardj09890d82005-05-20 02:45:15 +00002519 /* MAC_(clo_show_reachable) = True; */
sewardj71bc3cb2005-05-19 00:25:45 +00002520 MAC_(clo_leak_check) = LC_Full;
2521 }
njn5c004e42002-11-18 11:04:50 +00002522}
2523
njn51d827b2005-05-09 01:02:08 +00002524static void mc_fini ( Int exitcode )
njn5c004e42002-11-18 11:04:50 +00002525{
sewardj23eb2fd2005-04-22 16:29:19 +00002526 Int i, n_accessible_dist;
2527 SecMap* sm;
2528
sewardjae986ca2005-10-12 12:53:20 +00002529 MAC_(common_fini)( mc_detect_memory_leaks );
2530
sewardj45d94cc2005-04-20 14:44:11 +00002531 if (VG_(clo_verbosity) > 1) {
2532 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00002533 " memcheck: sanity checks: %d cheap, %d expensive",
2534 n_sanity_cheap, n_sanity_expensive );
sewardj45d94cc2005-04-20 14:44:11 +00002535 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00002536 " memcheck: auxmaps: %d auxmap entries (%dk, %dM) in use",
2537 auxmap_used,
2538 auxmap_used * 64,
2539 auxmap_used / 16 );
2540 VG_(message)(Vg_DebugMsg,
2541 " memcheck: auxmaps: %lld searches, %lld comparisons",
sewardj45d94cc2005-04-20 14:44:11 +00002542 n_auxmap_searches, n_auxmap_cmps );
sewardj23eb2fd2005-04-22 16:29:19 +00002543 VG_(message)(Vg_DebugMsg,
2544 " memcheck: secondaries: %d issued (%dk, %dM)",
2545 n_secmaps_issued,
2546 n_secmaps_issued * 64,
2547 n_secmaps_issued / 16 );
2548
2549 n_accessible_dist = 0;
2550 for (i = 0; i < N_PRIMARY_MAP; i++) {
2551 sm = primary_map[i];
2552 if (is_distinguished_sm(sm)
2553 && sm != &sm_distinguished[SM_DIST_NOACCESS])
2554 n_accessible_dist ++;
2555 }
2556 for (i = 0; i < auxmap_used; i++) {
2557 sm = auxmap[i].sm;
2558 if (is_distinguished_sm(sm)
2559 && sm != &sm_distinguished[SM_DIST_NOACCESS])
2560 n_accessible_dist ++;
2561 }
2562
2563 VG_(message)(Vg_DebugMsg,
2564 " memcheck: secondaries: %d accessible and distinguished (%dk, %dM)",
2565 n_accessible_dist,
2566 n_accessible_dist * 64,
2567 n_accessible_dist / 16 );
2568
sewardj45d94cc2005-04-20 14:44:11 +00002569 }
2570
njn5c004e42002-11-18 11:04:50 +00002571 if (0) {
2572 VG_(message)(Vg_DebugMsg,
2573 "------ Valgrind's client block stats follow ---------------" );
nethercote8b76fe52004-11-08 19:20:09 +00002574 show_client_block_stats();
njn5c004e42002-11-18 11:04:50 +00002575 }
njn25e49d8e72002-09-23 09:36:25 +00002576}
2577
njn51d827b2005-05-09 01:02:08 +00002578static void mc_pre_clo_init(void)
2579{
2580 VG_(details_name) ("Memcheck");
2581 VG_(details_version) (NULL);
2582 VG_(details_description) ("a memory error detector");
2583 VG_(details_copyright_author)(
2584 "Copyright (C) 2002-2005, and GNU GPL'd, by Julian Seward et al.");
2585 VG_(details_bug_reports_to) (VG_BUGS_TO);
2586 VG_(details_avg_translation_sizeB) ( 370 );
2587
2588 VG_(basic_tool_funcs) (mc_post_clo_init,
2589 MC_(instrument),
2590 mc_fini);
2591
2592 VG_(needs_core_errors) ();
2593 VG_(needs_tool_errors) (MAC_(eq_Error),
2594 mc_pp_Error,
2595 MAC_(update_extra),
2596 mc_recognised_suppression,
2597 MAC_(read_extra_suppression_info),
2598 MAC_(error_matches_suppression),
2599 MAC_(get_error_name),
2600 MAC_(print_extra_suppression_info));
2601 VG_(needs_libc_freeres) ();
2602 VG_(needs_command_line_options)(mc_process_cmd_line_option,
2603 mc_print_usage,
2604 mc_print_debug_usage);
2605 VG_(needs_client_requests) (mc_handle_client_request);
2606 VG_(needs_sanity_checks) (mc_cheap_sanity_check,
2607 mc_expensive_sanity_check);
njn51d827b2005-05-09 01:02:08 +00002608
njnfc51f8d2005-06-21 03:20:17 +00002609 VG_(needs_malloc_replacement) (MAC_(malloc),
njn51d827b2005-05-09 01:02:08 +00002610 MAC_(__builtin_new),
2611 MAC_(__builtin_vec_new),
2612 MAC_(memalign),
2613 MAC_(calloc),
2614 MAC_(free),
2615 MAC_(__builtin_delete),
2616 MAC_(__builtin_vec_delete),
2617 MAC_(realloc),
2618 MAC_MALLOC_REDZONE_SZB );
2619
2620 MAC_( new_mem_heap) = & mc_new_mem_heap;
2621 MAC_( ban_mem_heap) = & mc_make_noaccess;
2622 MAC_(copy_mem_heap) = & mc_copy_address_range_state;
2623 MAC_( die_mem_heap) = & mc_make_noaccess;
2624 MAC_(check_noaccess) = & mc_check_noaccess;
2625
2626 VG_(track_new_mem_startup) ( & mc_new_mem_startup );
2627 VG_(track_new_mem_stack_signal)( & mc_make_writable );
2628 VG_(track_new_mem_brk) ( & mc_make_writable );
2629 VG_(track_new_mem_mmap) ( & mc_new_mem_mmap );
2630
2631 VG_(track_copy_mem_remap) ( & mc_copy_address_range_state );
njn81623712005-10-07 04:48:37 +00002632
2633 // Nb: we don't do anything with mprotect. This means that V bits are
2634 // preserved if a program, for example, marks some memory as inaccessible
2635 // and then later marks it as accessible again.
2636 //
2637 // If an access violation occurs (eg. writing to read-only memory) we let
2638 // it fault and print an informative termination message. This doesn't
2639 // happen if the program catches the signal, though, which is bad. If we
2640 // had two A bits (for readability and writability) that were completely
2641 // distinct from V bits, then we could handle all this properly.
2642 VG_(track_change_mem_mprotect) ( NULL );
njn51d827b2005-05-09 01:02:08 +00002643
2644 VG_(track_die_mem_stack_signal)( & mc_make_noaccess );
2645 VG_(track_die_mem_brk) ( & mc_make_noaccess );
2646 VG_(track_die_mem_munmap) ( & mc_make_noaccess );
2647
2648 VG_(track_new_mem_stack_4) ( & MAC_(new_mem_stack_4) );
2649 VG_(track_new_mem_stack_8) ( & MAC_(new_mem_stack_8) );
2650 VG_(track_new_mem_stack_12) ( & MAC_(new_mem_stack_12) );
2651 VG_(track_new_mem_stack_16) ( & MAC_(new_mem_stack_16) );
2652 VG_(track_new_mem_stack_32) ( & MAC_(new_mem_stack_32) );
2653 VG_(track_new_mem_stack) ( & MAC_(new_mem_stack) );
2654
2655 VG_(track_die_mem_stack_4) ( & MAC_(die_mem_stack_4) );
2656 VG_(track_die_mem_stack_8) ( & MAC_(die_mem_stack_8) );
2657 VG_(track_die_mem_stack_12) ( & MAC_(die_mem_stack_12) );
2658 VG_(track_die_mem_stack_16) ( & MAC_(die_mem_stack_16) );
2659 VG_(track_die_mem_stack_32) ( & MAC_(die_mem_stack_32) );
2660 VG_(track_die_mem_stack) ( & MAC_(die_mem_stack) );
2661
2662 VG_(track_ban_mem_stack) ( & mc_make_noaccess );
2663
2664 VG_(track_pre_mem_read) ( & mc_check_is_readable );
2665 VG_(track_pre_mem_read_asciiz) ( & mc_check_is_readable_asciiz );
2666 VG_(track_pre_mem_write) ( & mc_check_is_writable );
2667 VG_(track_post_mem_write) ( & mc_post_mem_write );
2668
2669 VG_(track_pre_reg_read) ( & mc_pre_reg_read );
2670
2671 VG_(track_post_reg_write) ( & mc_post_reg_write );
2672 VG_(track_post_reg_write_clientcall_return)( & mc_post_reg_write_clientcall );
2673
njn51d827b2005-05-09 01:02:08 +00002674 /* Additional block description for VG_(describe_addr)() */
2675 MAC_(describe_addr_supp) = client_perm_maybe_describe;
2676
2677 init_shadow_memory();
2678 MAC_(common_pre_clo_init)();
2679
2680 tl_assert( mc_expensive_sanity_check() );
2681}
2682
sewardj45f4e7c2005-09-27 19:20:21 +00002683VG_DETERMINE_INTERFACE_VERSION(mc_pre_clo_init)
fitzhardinge98abfc72003-12-16 02:05:15 +00002684
njn25e49d8e72002-09-23 09:36:25 +00002685/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00002686/*--- end mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00002687/*--------------------------------------------------------------------*/