blob: 013a4cf8751e3758ac0ce37da6720e5bc2a89061 [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
nethercote137bc552003-11-14 17:47:54 +00009 This file is part of MemCheck, a heavyweight Valgrind tool for
njnc9539842002-10-02 13:26:35 +000010 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
njn53612422005-03-12 16:22:54 +000012 Copyright (C) 2000-2005 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
sewardjc859fbf2005-04-22 21:10:28 +000033/* TODO 22 Apr 05
sewardj45d94cc2005-04-20 14:44:11 +000034
sewardjc859fbf2005-04-22 21:10:28 +000035 test whether it would be faster, for LOADV4, to check
36 only for 8-byte validity on the fast path
sewardj45d94cc2005-04-20 14:44:11 +000037*/
38
njnc7561b92005-06-19 01:24:32 +000039#include "pub_tool_basics.h"
njn4802b382005-06-11 04:58:29 +000040#include "pub_tool_aspacemgr.h"
njnc7561b92005-06-19 01:24:32 +000041#include "pub_tool_errormgr.h" // For mac_shared.h
42#include "pub_tool_execontext.h" // For mac_shared.h
43#include "pub_tool_hashtable.h" // For mac_shared.h
njn97405b22005-06-02 03:39:33 +000044#include "pub_tool_libcbase.h"
njn132bfcc2005-06-04 19:16:06 +000045#include "pub_tool_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000046#include "pub_tool_libcprint.h"
njnf536bbb2005-06-13 04:21:38 +000047#include "pub_tool_machine.h"
njnc7561b92005-06-19 01:24:32 +000048#include "pub_tool_mallocfree.h"
49#include "pub_tool_options.h"
njnc7561b92005-06-19 01:24:32 +000050#include "pub_tool_replacemalloc.h"
51#include "pub_tool_tooliface.h"
52#include "pub_tool_threadstate.h"
53
54#include "mc_include.h"
55#include "memcheck.h" /* for client requests */
njn25e49d8e72002-09-23 09:36:25 +000056
sewardj45d94cc2005-04-20 14:44:11 +000057
tomd55121e2005-12-19 12:40:13 +000058#ifdef HAVE_BUILTIN_EXPECT
sewardjc1a2cda2005-04-21 17:34:00 +000059#define EXPECTED_TAKEN(cond) __builtin_expect((cond),1)
60#define EXPECTED_NOT_TAKEN(cond) __builtin_expect((cond),0)
tomd55121e2005-12-19 12:40:13 +000061#else
62#define EXPECTED_TAKEN(cond) (cond)
63#define EXPECTED_NOT_TAKEN(cond) (cond)
64#endif
sewardjc1a2cda2005-04-21 17:34:00 +000065
66/* Define to debug the mem audit system. Set to:
67 0 no debugging, fast cases are used
68 1 some sanity checking, fast cases are used
69 2 max sanity checking, only slow cases are used
70*/
sewardj23eb2fd2005-04-22 16:29:19 +000071#define VG_DEBUG_MEMORY 0
sewardjc1a2cda2005-04-21 17:34:00 +000072
njn25e49d8e72002-09-23 09:36:25 +000073#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
74
njn25e49d8e72002-09-23 09:36:25 +000075
njn25e49d8e72002-09-23 09:36:25 +000076/*------------------------------------------------------------*/
sewardj45d94cc2005-04-20 14:44:11 +000077/*--- Basic A/V bitmap representation. ---*/
njn25e49d8e72002-09-23 09:36:25 +000078/*------------------------------------------------------------*/
79
sewardjc859fbf2005-04-22 21:10:28 +000080/* TODO: fix this comment */
81//zz /* All reads and writes are checked against a memory map, which
82//zz records the state of all memory in the process. The memory map is
83//zz organised like this:
84//zz
85//zz The top 16 bits of an address are used to index into a top-level
86//zz map table, containing 65536 entries. Each entry is a pointer to a
87//zz second-level map, which records the accesibililty and validity
88//zz permissions for the 65536 bytes indexed by the lower 16 bits of the
89//zz address. Each byte is represented by nine bits, one indicating
90//zz accessibility, the other eight validity. So each second-level map
91//zz contains 73728 bytes. This two-level arrangement conveniently
92//zz divides the 4G address space into 64k lumps, each size 64k bytes.
93//zz
94//zz All entries in the primary (top-level) map must point to a valid
95//zz secondary (second-level) map. Since most of the 4G of address
96//zz space will not be in use -- ie, not mapped at all -- there is a
njn02bc4b82005-05-15 17:28:26 +000097//zz distinguished secondary map, which indicates 'not addressible and
sewardjc859fbf2005-04-22 21:10:28 +000098//zz not valid' writeable for all bytes. Entries in the primary map for
99//zz which the entire 64k is not in use at all point at this
100//zz distinguished map.
101//zz
102//zz There are actually 4 distinguished secondaries. These are used to
103//zz represent a memory range which is either not addressable (validity
104//zz doesn't matter), addressable+not valid, addressable+valid.
sewardjc859fbf2005-04-22 21:10:28 +0000105//zz */
106
sewardj45d94cc2005-04-20 14:44:11 +0000107/* --------------- Basic configuration --------------- */
sewardj95448072004-11-22 20:19:51 +0000108
sewardj23eb2fd2005-04-22 16:29:19 +0000109/* Only change this. N_PRIMARY_MAP *must* be a power of 2. */
sewardj21f7ff42005-04-28 10:32:02 +0000110
sewardje4ccc012005-05-02 12:53:38 +0000111#if VG_WORDSIZE == 4
sewardj21f7ff42005-04-28 10:32:02 +0000112
113/* cover the entire address space */
114# define N_PRIMARY_BITS 16
115
116#else
117
sewardj34483bc2005-09-28 11:50:20 +0000118/* Just handle the first 32G fast and the rest via auxiliary
sewardj21f7ff42005-04-28 10:32:02 +0000119 primaries. */
sewardj34483bc2005-09-28 11:50:20 +0000120# define N_PRIMARY_BITS 19
sewardj21f7ff42005-04-28 10:32:02 +0000121
122#endif
123
sewardj45d94cc2005-04-20 14:44:11 +0000124
sewardjc1a2cda2005-04-21 17:34:00 +0000125/* Do not change this. */
sewardje4ccc012005-05-02 12:53:38 +0000126#define N_PRIMARY_MAP ( ((UWord)1) << N_PRIMARY_BITS)
sewardjc1a2cda2005-04-21 17:34:00 +0000127
128/* Do not change this. */
sewardj23eb2fd2005-04-22 16:29:19 +0000129#define MAX_PRIMARY_ADDRESS (Addr)((((Addr)65536) * N_PRIMARY_MAP)-1)
130
131
132/* --------------- Stats maps --------------- */
133
134static Int n_secmaps_issued = 0;
135static ULong n_auxmap_searches = 0;
136static ULong n_auxmap_cmps = 0;
137static Int n_sanity_cheap = 0;
138static Int n_sanity_expensive = 0;
sewardj45d94cc2005-04-20 14:44:11 +0000139
140
141/* --------------- Secondary maps --------------- */
njn25e49d8e72002-09-23 09:36:25 +0000142
143typedef
144 struct {
sewardj45d94cc2005-04-20 14:44:11 +0000145 UChar abits[8192];
146 UChar vbyte[65536];
njn25e49d8e72002-09-23 09:36:25 +0000147 }
148 SecMap;
149
sewardj45d94cc2005-04-20 14:44:11 +0000150/* 3 distinguished secondary maps, one for no-access, one for
151 accessible but undefined, and one for accessible and defined.
152 Distinguished secondaries may never be modified.
153*/
154#define SM_DIST_NOACCESS 0
155#define SM_DIST_ACCESS_UNDEFINED 1
156#define SM_DIST_ACCESS_DEFINED 2
njnb8dca862005-03-14 02:42:44 +0000157
sewardj45d94cc2005-04-20 14:44:11 +0000158static SecMap sm_distinguished[3];
njnb8dca862005-03-14 02:42:44 +0000159
sewardj45d94cc2005-04-20 14:44:11 +0000160static inline Bool is_distinguished_sm ( SecMap* sm ) {
161 return sm >= &sm_distinguished[0] && sm <= &sm_distinguished[2];
162}
njnb8dca862005-03-14 02:42:44 +0000163
sewardj45d94cc2005-04-20 14:44:11 +0000164/* dist_sm points to one of our three distinguished secondaries. Make
165 a copy of it so that we can write to it.
166*/
167static SecMap* copy_for_writing ( SecMap* dist_sm )
168{
169 SecMap* new_sm;
170 tl_assert(dist_sm == &sm_distinguished[0]
171 || dist_sm == &sm_distinguished[1]
172 || dist_sm == &sm_distinguished[2]);
njnb8dca862005-03-14 02:42:44 +0000173
sewardj45f4e7c2005-09-27 19:20:21 +0000174 new_sm = VG_(am_shadow_alloc)(sizeof(SecMap));
175 if (new_sm == NULL)
176 VG_(out_of_memory_NORETURN)( "memcheck:allocate new SecMap",
177 sizeof(SecMap) );
sewardj45d94cc2005-04-20 14:44:11 +0000178 VG_(memcpy)(new_sm, dist_sm, sizeof(SecMap));
sewardj23eb2fd2005-04-22 16:29:19 +0000179 n_secmaps_issued++;
sewardj45d94cc2005-04-20 14:44:11 +0000180 return new_sm;
181}
njnb8dca862005-03-14 02:42:44 +0000182
sewardj45d94cc2005-04-20 14:44:11 +0000183
184/* --------------- Primary maps --------------- */
185
186/* The main primary map. This covers some initial part of the address
sewardj23eb2fd2005-04-22 16:29:19 +0000187 space, addresses 0 .. (N_PRIMARY_MAP << 16)-1. The rest of it is
sewardj45d94cc2005-04-20 14:44:11 +0000188 handled using the auxiliary primary map.
189*/
sewardj23eb2fd2005-04-22 16:29:19 +0000190static SecMap* primary_map[N_PRIMARY_MAP];
sewardj45d94cc2005-04-20 14:44:11 +0000191
192
193/* An entry in the auxiliary primary map. base must be a 64k-aligned
194 value, and sm points at the relevant secondary map. As with the
195 main primary map, the secondary may be either a real secondary, or
196 one of the three distinguished secondaries.
197*/
198typedef
199 struct {
sewardj23eb2fd2005-04-22 16:29:19 +0000200 Addr base;
sewardj45d94cc2005-04-20 14:44:11 +0000201 SecMap* sm;
202 }
203 AuxMapEnt;
204
205/* An expanding array of AuxMapEnts. */
sewardjaba741d2005-06-09 13:56:07 +0000206#define N_AUXMAPS 20000 /* HACK */
sewardj45d94cc2005-04-20 14:44:11 +0000207static AuxMapEnt hacky_auxmaps[N_AUXMAPS];
208static Int auxmap_size = N_AUXMAPS;
209static Int auxmap_used = 0;
210static AuxMapEnt* auxmap = &hacky_auxmaps[0];
211
sewardj45d94cc2005-04-20 14:44:11 +0000212
213/* Find an entry in the auxiliary map. If an entry is found, move it
214 one step closer to the front of the array, then return its address.
sewardj05fe85e2005-04-27 22:46:36 +0000215 If an entry is not found, return NULL. Note carefully that
sewardj45d94cc2005-04-20 14:44:11 +0000216 because a each call potentially rearranges the entries, each call
217 to this function invalidates ALL AuxMapEnt*s previously obtained by
218 calling this fn.
219*/
sewardj05fe85e2005-04-27 22:46:36 +0000220static AuxMapEnt* maybe_find_in_auxmap ( Addr a )
sewardj45d94cc2005-04-20 14:44:11 +0000221{
222 UWord i;
223 tl_assert(a > MAX_PRIMARY_ADDRESS);
224
225 a &= ~(Addr)0xFFFF;
226
227 /* Search .. */
228 n_auxmap_searches++;
229 for (i = 0; i < auxmap_used; i++) {
230 if (auxmap[i].base == a)
231 break;
232 }
233 n_auxmap_cmps += (ULong)(i+1);
234
235 if (i < auxmap_used) {
236 /* Found it. Nudge it a bit closer to the front. */
237 if (i > 0) {
238 AuxMapEnt tmp = auxmap[i-1];
239 auxmap[i-1] = auxmap[i];
240 auxmap[i] = tmp;
241 i--;
242 }
243 return &auxmap[i];
244 }
245
sewardj05fe85e2005-04-27 22:46:36 +0000246 return NULL;
247}
248
249
250/* Find an entry in the auxiliary map. If an entry is found, move it
251 one step closer to the front of the array, then return its address.
252 If an entry is not found, allocate one. Note carefully that
253 because a each call potentially rearranges the entries, each call
254 to this function invalidates ALL AuxMapEnt*s previously obtained by
255 calling this fn.
256*/
257static AuxMapEnt* find_or_alloc_in_auxmap ( Addr a )
258{
259 AuxMapEnt* am = maybe_find_in_auxmap(a);
260 if (am)
261 return am;
262
sewardj45d94cc2005-04-20 14:44:11 +0000263 /* We didn't find it. Hmm. This is a new piece of address space.
264 We'll need to allocate a new AuxMap entry for it. */
265 if (auxmap_used >= auxmap_size) {
266 tl_assert(auxmap_used == auxmap_size);
267 /* Out of auxmap entries. */
268 tl_assert2(0, "failed to expand the auxmap table");
269 }
270
271 tl_assert(auxmap_used < auxmap_size);
272
273 auxmap[auxmap_used].base = a & ~(Addr)0xFFFF;
274 auxmap[auxmap_used].sm = &sm_distinguished[SM_DIST_NOACCESS];
275
276 if (0)
277 VG_(printf)("new auxmap, base = 0x%llx\n",
278 (ULong)auxmap[auxmap_used].base );
279
280 auxmap_used++;
281 return &auxmap[auxmap_used-1];
282}
283
284
285/* --------------- SecMap fundamentals --------------- */
286
287/* Produce the secmap for 'a', either from the primary map or by
288 ensuring there is an entry for it in the aux primary map. The
289 secmap may be a distinguished one as the caller will only want to
290 be able to read it.
291*/
292static SecMap* get_secmap_readable ( Addr a )
293{
294 if (a <= MAX_PRIMARY_ADDRESS) {
295 UWord pm_off = a >> 16;
296 return primary_map[ pm_off ];
297 } else {
298 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
299 return am->sm;
300 }
301}
302
sewardj05fe85e2005-04-27 22:46:36 +0000303/* If 'a' has a SecMap, produce it. Else produce NULL. But don't
304 allocate one if one doesn't already exist. This is used by the
305 leak checker.
306*/
307static SecMap* maybe_get_secmap_for ( Addr a )
308{
309 if (a <= MAX_PRIMARY_ADDRESS) {
310 UWord pm_off = a >> 16;
311 return primary_map[ pm_off ];
312 } else {
313 AuxMapEnt* am = maybe_find_in_auxmap(a);
314 return am ? am->sm : NULL;
315 }
316}
317
318
319
sewardj45d94cc2005-04-20 14:44:11 +0000320/* Produce the secmap for 'a', either from the primary map or by
321 ensuring there is an entry for it in the aux primary map. The
322 secmap may not be a distinguished one, since the caller will want
323 to be able to write it. If it is a distinguished secondary, make a
324 writable copy of it, install it, and return the copy instead. (COW
325 semantics).
326*/
327static SecMap* get_secmap_writable ( Addr a )
328{
329 if (a <= MAX_PRIMARY_ADDRESS) {
330 UWord pm_off = a >> 16;
331 if (is_distinguished_sm(primary_map[ pm_off ]))
332 primary_map[pm_off] = copy_for_writing(primary_map[pm_off]);
333 return primary_map[pm_off];
334 } else {
335 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
336 if (is_distinguished_sm(am->sm))
337 am->sm = copy_for_writing(am->sm);
338 return am->sm;
339 }
340}
341
342
343/* --------------- Endianness helpers --------------- */
344
345/* Returns the offset in memory of the byteno-th most significant byte
346 in a wordszB-sized word, given the specified endianness. */
347static inline UWord byte_offset_w ( UWord wordszB, Bool bigendian,
348 UWord byteno ) {
349 return bigendian ? (wordszB-1-byteno) : byteno;
350}
351
352
353/* --------------- Fundamental functions --------------- */
354
355static
356void get_abit_and_vbyte ( /*OUT*/UWord* abit,
357 /*OUT*/UWord* vbyte,
358 Addr a )
359{
360 SecMap* sm = get_secmap_readable(a);
361 *vbyte = 0xFF & sm->vbyte[a & 0xFFFF];
362 *abit = read_bit_array(sm->abits, a & 0xFFFF);
363}
364
365static
366UWord get_abit ( Addr a )
367{
368 SecMap* sm = get_secmap_readable(a);
369 return read_bit_array(sm->abits, a & 0xFFFF);
370}
371
372static
373void set_abit_and_vbyte ( Addr a, UWord abit, UWord vbyte )
374{
375 SecMap* sm = get_secmap_writable(a);
376 sm->vbyte[a & 0xFFFF] = 0xFF & vbyte;
377 write_bit_array(sm->abits, a & 0xFFFF, abit);
378}
379
380static
381void set_vbyte ( Addr a, UWord vbyte )
382{
383 SecMap* sm = get_secmap_writable(a);
384 sm->vbyte[a & 0xFFFF] = 0xFF & vbyte;
385}
386
387
388/* --------------- Load/store slow cases. --------------- */
389
390static
391ULong mc_LOADVn_slow ( Addr a, SizeT szB, Bool bigendian )
392{
393 /* Make up a result V word, which contains the loaded data for
sewardjf3d57dd2005-04-22 20:23:27 +0000394 valid addresses and Defined for invalid addresses. Iterate over
395 the bytes in the word, from the most significant down to the
396 least. */
sewardj45d94cc2005-04-20 14:44:11 +0000397 ULong vw = VGM_WORD64_INVALID;
398 SizeT i = szB-1;
399 SizeT n_addrs_bad = 0;
400 Addr ai;
sewardj0ded7a42005-11-08 02:25:37 +0000401 Bool aok, partial_load_exemption_applies;
sewardj45d94cc2005-04-20 14:44:11 +0000402 UWord abit, vbyte;
403
sewardjc1a2cda2005-04-21 17:34:00 +0000404 PROF_EVENT(30, "mc_LOADVn_slow");
sewardj45d94cc2005-04-20 14:44:11 +0000405 tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
406
407 while (True) {
sewardjc1a2cda2005-04-21 17:34:00 +0000408 PROF_EVENT(31, "mc_LOADVn_slow(loop)");
sewardj45d94cc2005-04-20 14:44:11 +0000409 ai = a+byte_offset_w(szB,bigendian,i);
410 get_abit_and_vbyte(&abit, &vbyte, ai);
411 aok = abit == VGM_BIT_VALID;
412 if (!aok)
413 n_addrs_bad++;
414 vw <<= 8;
sewardjf3d57dd2005-04-22 20:23:27 +0000415 vw |= 0xFF & (aok ? vbyte : VGM_BYTE_VALID);
sewardj45d94cc2005-04-20 14:44:11 +0000416 if (i == 0) break;
417 i--;
418 }
419
sewardj0ded7a42005-11-08 02:25:37 +0000420 /* This is a hack which avoids producing errors for code which
421 insists in stepping along byte strings in aligned word-sized
422 chunks, and there is a partially defined word at the end. (eg,
423 optimised strlen). Such code is basically broken at least WRT
424 semantics of ANSI C, but sometimes users don't have the option
425 to fix it, and so this option is provided. Note it is now
426 defaulted to not-engaged.
427
428 A load from a partially-addressible place is allowed if:
429 - the command-line flag is set
430 - it's a word-sized, word-aligned load
431 - at least one of the addresses in the word *is* valid
432 */
433 partial_load_exemption_applies
434 = MAC_(clo_partial_loads_ok) && szB == VG_WORDSIZE
435 && VG_IS_WORD_ALIGNED(a)
436 && n_addrs_bad < VG_WORDSIZE;
437
438 if (n_addrs_bad > 0 && !partial_load_exemption_applies)
sewardj45d94cc2005-04-20 14:44:11 +0000439 MAC_(record_address_error)( VG_(get_running_tid)(), a, szB, False );
440
sewardj45d94cc2005-04-20 14:44:11 +0000441 return vw;
442}
443
444
445static
sewardj71ef8e72005-11-20 19:08:08 +0000446void mc_STOREVn_slow ( Addr a, SizeT szB, ULong vbytes, Bool bigendian )
sewardj45d94cc2005-04-20 14:44:11 +0000447{
448 SizeT i;
449 SizeT n_addrs_bad = 0;
450 UWord abit;
451 Bool aok;
452 Addr ai;
453
sewardjc1a2cda2005-04-21 17:34:00 +0000454 PROF_EVENT(35, "mc_STOREVn_slow");
sewardj45d94cc2005-04-20 14:44:11 +0000455 tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
456
457 /* Dump vbytes in memory, iterating from least to most significant
458 byte. At the same time establish addressibility of the
459 location. */
460 for (i = 0; i < szB; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +0000461 PROF_EVENT(36, "mc_STOREVn_slow(loop)");
sewardj45d94cc2005-04-20 14:44:11 +0000462 ai = a+byte_offset_w(szB,bigendian,i);
463 abit = get_abit(ai);
464 aok = abit == VGM_BIT_VALID;
465 if (!aok)
466 n_addrs_bad++;
467 set_vbyte(ai, vbytes & 0xFF );
468 vbytes >>= 8;
469 }
470
471 /* If an address error has happened, report it. */
472 if (n_addrs_bad > 0)
473 MAC_(record_address_error)( VG_(get_running_tid)(), a, szB, True );
474}
475
476
sewardj45d94cc2005-04-20 14:44:11 +0000477//zz /* Reading/writing of the bitmaps, for aligned word-sized accesses. */
478//zz
479//zz static __inline__ UChar get_abits4_ALIGNED ( Addr a )
480//zz {
481//zz SecMap* sm;
482//zz UInt sm_off;
483//zz UChar abits8;
484//zz PROF_EVENT(24);
485//zz # ifdef VG_DEBUG_MEMORY
486//zz tl_assert(VG_IS_4_ALIGNED(a));
487//zz # endif
488//zz sm = primary_map[PM_IDX(a)];
489//zz sm_off = SM_OFF(a);
490//zz abits8 = sm->abits[sm_off >> 3];
491//zz abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
492//zz abits8 &= 0x0F;
493//zz return abits8;
494//zz }
495//zz
496//zz static UInt __inline__ get_vbytes4_ALIGNED ( Addr a )
497//zz {
498//zz SecMap* sm = primary_map[PM_IDX(a)];
499//zz UInt sm_off = SM_OFF(a);
500//zz PROF_EVENT(25);
501//zz # ifdef VG_DEBUG_MEMORY
502//zz tl_assert(VG_IS_4_ALIGNED(a));
503//zz # endif
504//zz return ((UInt*)(sm->vbyte))[sm_off >> 2];
505//zz }
506//zz
507//zz
508//zz static void __inline__ set_vbytes4_ALIGNED ( Addr a, UInt vbytes )
509//zz {
510//zz SecMap* sm;
511//zz UInt sm_off;
512//zz ENSURE_MAPPABLE(a, "set_vbytes4_ALIGNED");
513//zz sm = primary_map[PM_IDX(a)];
514//zz sm_off = SM_OFF(a);
515//zz PROF_EVENT(23);
516//zz # ifdef VG_DEBUG_MEMORY
517//zz tl_assert(VG_IS_4_ALIGNED(a));
518//zz # endif
519//zz ((UInt*)(sm->vbyte))[sm_off >> 2] = vbytes;
520//zz }
sewardjee070842003-07-05 17:53:55 +0000521
522
njn25e49d8e72002-09-23 09:36:25 +0000523/*------------------------------------------------------------*/
524/*--- Setting permissions over address ranges. ---*/
525/*------------------------------------------------------------*/
526
sewardj23eb2fd2005-04-22 16:29:19 +0000527/* Given address 'a', find the place where the pointer to a's
528 secondary map lives. If a falls into the primary map, the returned
529 value points to one of the entries in primary_map[]. Otherwise,
530 the auxiliary primary map is searched for 'a', or an entry is
531 created for it; either way, the returned value points to the
532 relevant AuxMapEnt's .sm field.
533
534 The point of this is to enable set_address_range_perms to assign
535 secondary maps in a uniform way, without worrying about whether a
536 given secondary map is pointed to from the main or auxiliary
537 primary map.
538*/
539
540static SecMap** find_secmap_binder_for_addr ( Addr aA )
541{
542 if (aA > MAX_PRIMARY_ADDRESS) {
543 AuxMapEnt* am = find_or_alloc_in_auxmap(aA);
544 return &am->sm;
545 } else {
546 UWord a = (UWord)aA;
547 UWord sec_no = (UWord)(a >> 16);
548# if VG_DEBUG_MEMORY >= 1
549 tl_assert(sec_no < N_PRIMARY_MAP);
550# endif
551 return &primary_map[sec_no];
552 }
553}
554
555
556static void set_address_range_perms ( Addr aA, SizeT len,
sewardj45d94cc2005-04-20 14:44:11 +0000557 UWord example_a_bit,
558 UWord example_v_bit )
njn25e49d8e72002-09-23 09:36:25 +0000559{
sewardjae986ca2005-10-12 12:53:20 +0000560 UWord a, vbits8, abits8, vbits32, v_off, a_off;
561 SecMap* sm;
562 SecMap** binder;
563 SecMap* example_dsm;
564
sewardj23eb2fd2005-04-22 16:29:19 +0000565 PROF_EVENT(150, "set_address_range_perms");
566
567 /* Check the permissions make sense. */
568 tl_assert(example_a_bit == VGM_BIT_VALID
569 || example_a_bit == VGM_BIT_INVALID);
570 tl_assert(example_v_bit == VGM_BIT_VALID
571 || example_v_bit == VGM_BIT_INVALID);
572 if (example_a_bit == VGM_BIT_INVALID)
573 tl_assert(example_v_bit == VGM_BIT_INVALID);
574
575 if (len == 0)
576 return;
577
sewardj1fa7d2c2005-06-13 18:22:17 +0000578 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
sewardj23eb2fd2005-04-22 16:29:19 +0000579 if (len > 100 * 1000 * 1000) {
580 VG_(message)(Vg_UserMsg,
581 "Warning: set address range perms: "
sewardj9273eb92005-09-28 20:00:30 +0000582 "large range %lu, a %d, v %d",
sewardj23eb2fd2005-04-22 16:29:19 +0000583 len, example_a_bit, example_v_bit );
584 }
585 }
586
sewardjae986ca2005-10-12 12:53:20 +0000587 a = (UWord)aA;
sewardj23eb2fd2005-04-22 16:29:19 +0000588
589# if VG_DEBUG_MEMORY >= 2
590
591 /*------------------ debug-only case ------------------ */
sewardjae986ca2005-10-12 12:53:20 +0000592 { SizeT i;
njn25e49d8e72002-09-23 09:36:25 +0000593
sewardjae986ca2005-10-12 12:53:20 +0000594 UWord example_vbyte = BIT_TO_BYTE(example_v_bit);
sewardj45d94cc2005-04-20 14:44:11 +0000595
sewardjae986ca2005-10-12 12:53:20 +0000596 tl_assert(sizeof(SizeT) == sizeof(Addr));
sewardj45d94cc2005-04-20 14:44:11 +0000597
sewardjae986ca2005-10-12 12:53:20 +0000598 if (0 && len >= 4096)
599 VG_(printf)("s_a_r_p(0x%llx, %d, %d,%d)\n",
600 (ULong)a, len, example_a_bit, example_v_bit);
njn25e49d8e72002-09-23 09:36:25 +0000601
sewardjae986ca2005-10-12 12:53:20 +0000602 if (len == 0)
603 return;
njn25e49d8e72002-09-23 09:36:25 +0000604
sewardjae986ca2005-10-12 12:53:20 +0000605 for (i = 0; i < len; i++) {
606 set_abit_and_vbyte(a+i, example_a_bit, example_vbyte);
607 }
njn25e49d8e72002-09-23 09:36:25 +0000608 }
njn25e49d8e72002-09-23 09:36:25 +0000609
sewardj23eb2fd2005-04-22 16:29:19 +0000610# else
611
612 /*------------------ standard handling ------------------ */
sewardj23eb2fd2005-04-22 16:29:19 +0000613
614 /* Decide on the distinguished secondary that we might want
615 to use (part of the space-compression scheme). */
616 if (example_a_bit == VGM_BIT_INVALID) {
617 example_dsm = &sm_distinguished[SM_DIST_NOACCESS];
618 } else {
619 if (example_v_bit == VGM_BIT_VALID) {
620 example_dsm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
621 } else {
622 example_dsm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
623 }
624 }
625
626 /* Make various wider versions of the A/V values to use. */
627 vbits8 = BIT_TO_BYTE(example_v_bit);
628 abits8 = BIT_TO_BYTE(example_a_bit);
629 vbits32 = (vbits8 << 24) | (vbits8 << 16) | (vbits8 << 8) | vbits8;
630
631 /* Slowly do parts preceding 8-byte alignment. */
632 while (True) {
633 if (len == 0) break;
634 PROF_EVENT(151, "set_address_range_perms-loop1-pre");
635 if (VG_IS_8_ALIGNED(a)) break;
636 set_abit_and_vbyte( a, example_a_bit, vbits8 );
637 a++;
638 len--;
639 }
640
641 if (len == 0)
642 return;
643
644 tl_assert(VG_IS_8_ALIGNED(a) && len > 0);
645
646 /* Now go in steps of 8 bytes. */
647 binder = find_secmap_binder_for_addr(a);
648
649 while (True) {
650
651 if (len < 8) break;
652
653 PROF_EVENT(152, "set_address_range_perms-loop8");
654
655 if ((a & SECONDARY_MASK) == 0) {
656 /* we just traversed a primary map boundary, so update the
657 binder. */
658 binder = find_secmap_binder_for_addr(a);
659 PROF_EVENT(153, "set_address_range_perms-update-binder");
660
661 /* Space-optimisation. If we are setting the entire
662 secondary map, just point this entry at one of our
663 distinguished secondaries. However, only do that if it
664 already points at a distinguished secondary, since doing
665 otherwise would leak the existing secondary. We could do
666 better and free up any pre-existing non-distinguished
667 secondary at this point, since we are guaranteed that each
668 non-dist secondary only has one pointer to it, and we have
669 that pointer right here. */
670 if (len >= SECONDARY_SIZE && is_distinguished_sm(*binder)) {
671 PROF_EVENT(154, "set_address_range_perms-entire-secmap");
672 *binder = example_dsm;
673 len -= SECONDARY_SIZE;
674 a += SECONDARY_SIZE;
675 continue;
676 }
677 }
678
679 /* If the primary is already pointing to a distinguished map
680 with the same properties as we're trying to set, then leave
681 it that way. */
682 if (*binder == example_dsm) {
683 a += 8;
684 len -= 8;
685 continue;
686 }
687
688 /* Make sure it's OK to write the secondary. */
689 if (is_distinguished_sm(*binder))
690 *binder = copy_for_writing(*binder);
691
692 sm = *binder;
693 v_off = a & 0xFFFF;
694 a_off = v_off >> 3;
695 sm->abits[a_off] = (UChar)abits8;
696 ((UInt*)(sm->vbyte))[(v_off >> 2) + 0] = (UInt)vbits32;
697 ((UInt*)(sm->vbyte))[(v_off >> 2) + 1] = (UInt)vbits32;
698
699 a += 8;
700 len -= 8;
701 }
702
703 if (len == 0)
704 return;
705
706 tl_assert(VG_IS_8_ALIGNED(a) && len > 0 && len < 8);
707
708 /* Finish the upper fragment. */
709 while (True) {
710 if (len == 0) break;
711 PROF_EVENT(155, "set_address_range_perms-loop1-post");
712 set_abit_and_vbyte ( a, example_a_bit, vbits8 );
713 a++;
714 len--;
715 }
716
717# endif
718}
sewardj45d94cc2005-04-20 14:44:11 +0000719
sewardjc859fbf2005-04-22 21:10:28 +0000720
721/* --- Set permissions for arbitrary address ranges --- */
njn25e49d8e72002-09-23 09:36:25 +0000722
nethercote8b76fe52004-11-08 19:20:09 +0000723static void mc_make_noaccess ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000724{
sewardjc1a2cda2005-04-21 17:34:00 +0000725 PROF_EVENT(40, "mc_make_noaccess");
nethercote8b76fe52004-11-08 19:20:09 +0000726 DEBUG("mc_make_noaccess(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000727 set_address_range_perms ( a, len, VGM_BIT_INVALID, VGM_BIT_INVALID );
728}
729
nethercote8b76fe52004-11-08 19:20:09 +0000730static void mc_make_writable ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000731{
sewardjc1a2cda2005-04-21 17:34:00 +0000732 PROF_EVENT(41, "mc_make_writable");
nethercote8b76fe52004-11-08 19:20:09 +0000733 DEBUG("mc_make_writable(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000734 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_INVALID );
735}
736
nethercote8b76fe52004-11-08 19:20:09 +0000737static void mc_make_readable ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000738{
sewardjc1a2cda2005-04-21 17:34:00 +0000739 PROF_EVENT(42, "mc_make_readable");
nethercote8b76fe52004-11-08 19:20:09 +0000740 DEBUG("mc_make_readable(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000741 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_VALID );
742}
743
njn9b007f62003-04-07 14:40:25 +0000744
sewardj45f4e7c2005-09-27 19:20:21 +0000745/* --- Block-copy permissions (needed for implementing realloc() and
746 sys_mremap). --- */
sewardjc859fbf2005-04-22 21:10:28 +0000747
748static void mc_copy_address_range_state ( Addr src, Addr dst, SizeT len )
749{
sewardj45f4e7c2005-09-27 19:20:21 +0000750 SizeT i, j;
sewardjc859fbf2005-04-22 21:10:28 +0000751 UWord abit, vbyte;
752
753 DEBUG("mc_copy_address_range_state\n");
sewardjc859fbf2005-04-22 21:10:28 +0000754 PROF_EVENT(50, "mc_copy_address_range_state");
sewardj45f4e7c2005-09-27 19:20:21 +0000755
756 if (len == 0)
757 return;
758
759 if (src < dst) {
760 for (i = 0, j = len-1; i < len; i++, j--) {
761 PROF_EVENT(51, "mc_copy_address_range_state(loop)");
762 get_abit_and_vbyte( &abit, &vbyte, src+j );
763 set_abit_and_vbyte( dst+j, abit, vbyte );
764 }
765 }
766
767 if (src > dst) {
768 for (i = 0; i < len; i++) {
769 PROF_EVENT(51, "mc_copy_address_range_state(loop)");
770 get_abit_and_vbyte( &abit, &vbyte, src+i );
771 set_abit_and_vbyte( dst+i, abit, vbyte );
772 }
sewardjc859fbf2005-04-22 21:10:28 +0000773 }
774}
775
776
777/* --- Fast case permission setters, for dealing with stacks. --- */
778
njn9b007f62003-04-07 14:40:25 +0000779static __inline__
sewardj5d28efc2005-04-21 22:16:29 +0000780void make_aligned_word32_writable ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000781{
sewardjae986ca2005-10-12 12:53:20 +0000782 UWord a, sec_no, v_off, a_off, mask;
783 SecMap* sm;
784
sewardj5d28efc2005-04-21 22:16:29 +0000785 PROF_EVENT(300, "make_aligned_word32_writable");
786
787# if VG_DEBUG_MEMORY >= 2
788 mc_make_writable(aA, 4);
789# else
790
791 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
sewardj23eb2fd2005-04-22 16:29:19 +0000792 PROF_EVENT(301, "make_aligned_word32_writable-slow1");
sewardj5d28efc2005-04-21 22:16:29 +0000793 mc_make_writable(aA, 4);
794 return;
795 }
796
sewardjae986ca2005-10-12 12:53:20 +0000797 a = (UWord)aA;
798 sec_no = (UWord)(a >> 16);
sewardj5d28efc2005-04-21 22:16:29 +0000799# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +0000800 tl_assert(sec_no < N_PRIMARY_MAP);
sewardj5d28efc2005-04-21 22:16:29 +0000801# endif
802
803 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
804 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
805
sewardjae986ca2005-10-12 12:53:20 +0000806 sm = primary_map[sec_no];
807 v_off = a & 0xFFFF;
808 a_off = v_off >> 3;
sewardj5d28efc2005-04-21 22:16:29 +0000809
810 /* Paint the new area as uninitialised. */
811 ((UInt*)(sm->vbyte))[v_off >> 2] = VGM_WORD32_INVALID;
812
sewardjae986ca2005-10-12 12:53:20 +0000813 mask = 0x0F;
sewardj5d28efc2005-04-21 22:16:29 +0000814 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
815 /* mask now contains 1s where we wish to make address bits valid
816 (0s). */
817 sm->abits[a_off] &= ~mask;
818# endif
njn9b007f62003-04-07 14:40:25 +0000819}
820
sewardj5d28efc2005-04-21 22:16:29 +0000821
822static __inline__
823void make_aligned_word32_noaccess ( Addr aA )
824{
sewardjae986ca2005-10-12 12:53:20 +0000825 UWord a, sec_no, v_off, a_off, mask;
826 SecMap* sm;
827
sewardj5d28efc2005-04-21 22:16:29 +0000828 PROF_EVENT(310, "make_aligned_word32_noaccess");
829
830# if VG_DEBUG_MEMORY >= 2
831 mc_make_noaccess(aA, 4);
832# else
833
834 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
835 PROF_EVENT(311, "make_aligned_word32_noaccess-slow1");
836 mc_make_noaccess(aA, 4);
837 return;
838 }
839
sewardjae986ca2005-10-12 12:53:20 +0000840 a = (UWord)aA;
841 sec_no = (UWord)(a >> 16);
sewardj5d28efc2005-04-21 22:16:29 +0000842# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +0000843 tl_assert(sec_no < N_PRIMARY_MAP);
sewardj5d28efc2005-04-21 22:16:29 +0000844# endif
845
846 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
847 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
848
sewardjae986ca2005-10-12 12:53:20 +0000849 sm = primary_map[sec_no];
850 v_off = a & 0xFFFF;
851 a_off = v_off >> 3;
sewardj5d28efc2005-04-21 22:16:29 +0000852
853 /* Paint the abandoned data as uninitialised. Probably not
854 necessary, but still .. */
855 ((UInt*)(sm->vbyte))[v_off >> 2] = VGM_WORD32_INVALID;
856
sewardjae986ca2005-10-12 12:53:20 +0000857 mask = 0x0F;
sewardj5d28efc2005-04-21 22:16:29 +0000858 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
859 /* mask now contains 1s where we wish to make address bits invalid
860 (1s). */
861 sm->abits[a_off] |= mask;
862# endif
863}
864
865
njn9b007f62003-04-07 14:40:25 +0000866/* Nb: by "aligned" here we mean 8-byte aligned */
867static __inline__
sewardj23eb2fd2005-04-22 16:29:19 +0000868void make_aligned_word64_writable ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000869{
sewardjae986ca2005-10-12 12:53:20 +0000870 UWord a, sec_no, v_off, a_off;
871 SecMap* sm;
872
sewardj23eb2fd2005-04-22 16:29:19 +0000873 PROF_EVENT(320, "make_aligned_word64_writable");
874
875# if VG_DEBUG_MEMORY >= 2
876 mc_make_writable(aA, 8);
877# else
878
879 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
880 PROF_EVENT(321, "make_aligned_word64_writable-slow1");
881 mc_make_writable(aA, 8);
882 return;
883 }
884
sewardjae986ca2005-10-12 12:53:20 +0000885 a = (UWord)aA;
886 sec_no = (UWord)(a >> 16);
sewardj23eb2fd2005-04-22 16:29:19 +0000887# if VG_DEBUG_MEMORY >= 1
888 tl_assert(sec_no < N_PRIMARY_MAP);
889# endif
890
891 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
892 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
893
sewardjae986ca2005-10-12 12:53:20 +0000894 sm = primary_map[sec_no];
895 v_off = a & 0xFFFF;
896 a_off = v_off >> 3;
sewardj23eb2fd2005-04-22 16:29:19 +0000897
898 /* Paint the new area as uninitialised. */
899 ((ULong*)(sm->vbyte))[v_off >> 3] = VGM_WORD64_INVALID;
900
901 /* Make the relevant area accessible. */
902 sm->abits[a_off] = VGM_BYTE_VALID;
903# endif
njn9b007f62003-04-07 14:40:25 +0000904}
905
sewardj23eb2fd2005-04-22 16:29:19 +0000906
njn9b007f62003-04-07 14:40:25 +0000907static __inline__
sewardj23eb2fd2005-04-22 16:29:19 +0000908void make_aligned_word64_noaccess ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000909{
sewardjae986ca2005-10-12 12:53:20 +0000910 UWord a, sec_no, v_off, a_off;
911 SecMap* sm;
912
sewardj23eb2fd2005-04-22 16:29:19 +0000913 PROF_EVENT(330, "make_aligned_word64_noaccess");
914
915# if VG_DEBUG_MEMORY >= 2
916 mc_make_noaccess(aA, 8);
917# else
918
919 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
920 PROF_EVENT(331, "make_aligned_word64_noaccess-slow1");
921 mc_make_noaccess(aA, 8);
922 return;
923 }
924
sewardjae986ca2005-10-12 12:53:20 +0000925 a = (UWord)aA;
926 sec_no = (UWord)(a >> 16);
sewardj23eb2fd2005-04-22 16:29:19 +0000927# if VG_DEBUG_MEMORY >= 1
928 tl_assert(sec_no < N_PRIMARY_MAP);
929# endif
930
931 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
932 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
933
sewardjae986ca2005-10-12 12:53:20 +0000934 sm = primary_map[sec_no];
935 v_off = a & 0xFFFF;
936 a_off = v_off >> 3;
sewardj23eb2fd2005-04-22 16:29:19 +0000937
938 /* Paint the abandoned data as uninitialised. Probably not
939 necessary, but still .. */
940 ((ULong*)(sm->vbyte))[v_off >> 3] = VGM_WORD64_INVALID;
941
942 /* Make the abandoned area inaccessible. */
943 sm->abits[a_off] = VGM_BYTE_INVALID;
944# endif
njn9b007f62003-04-07 14:40:25 +0000945}
946
sewardj23eb2fd2005-04-22 16:29:19 +0000947
sewardj45d94cc2005-04-20 14:44:11 +0000948/* The stack-pointer update handling functions */
949SP_UPDATE_HANDLERS ( make_aligned_word32_writable,
950 make_aligned_word32_noaccess,
951 make_aligned_word64_writable,
952 make_aligned_word64_noaccess,
953 mc_make_writable,
954 mc_make_noaccess
955 );
njn9b007f62003-04-07 14:40:25 +0000956
sewardj45d94cc2005-04-20 14:44:11 +0000957
sewardj826ec492005-05-12 18:05:00 +0000958void MC_(helperc_MAKE_STACK_UNINIT) ( Addr base, UWord len )
959{
960 tl_assert(sizeof(UWord) == sizeof(SizeT));
sewardj2a3a1a72005-05-12 23:25:43 +0000961 if (0)
962 VG_(printf)("helperc_MAKE_STACK_UNINIT %p %d\n", base, len );
963
964# if 0
965 /* Really slow version */
966 mc_make_writable(base, len);
967# endif
968
969# if 0
970 /* Slow(ish) version, which is fairly easily seen to be correct.
971 */
972 if (EXPECTED_TAKEN( VG_IS_8_ALIGNED(base) && len==128 )) {
973 make_aligned_word64_writable(base + 0);
974 make_aligned_word64_writable(base + 8);
975 make_aligned_word64_writable(base + 16);
976 make_aligned_word64_writable(base + 24);
977
978 make_aligned_word64_writable(base + 32);
979 make_aligned_word64_writable(base + 40);
980 make_aligned_word64_writable(base + 48);
981 make_aligned_word64_writable(base + 56);
982
983 make_aligned_word64_writable(base + 64);
984 make_aligned_word64_writable(base + 72);
985 make_aligned_word64_writable(base + 80);
986 make_aligned_word64_writable(base + 88);
987
988 make_aligned_word64_writable(base + 96);
989 make_aligned_word64_writable(base + 104);
990 make_aligned_word64_writable(base + 112);
991 make_aligned_word64_writable(base + 120);
992 } else {
993 mc_make_writable(base, len);
994 }
995# endif
996
997 /* Idea is: go fast when
998 * 8-aligned and length is 128
999 * the sm is available in the main primary map
1000 * the address range falls entirely with a single
1001 secondary map
1002 * the SM is modifiable
1003 If all those conditions hold, just update the V bits
1004 by writing directly on the v-bit array. We don't care
1005 about A bits; if the address range is marked invalid,
1006 any attempt to access it will elicit an addressing error,
1007 and that's good enough.
1008 */
1009 if (EXPECTED_TAKEN( len == 128
1010 && VG_IS_8_ALIGNED(base)
1011 )) {
1012 /* Now we know the address range is suitably sized and
1013 aligned. */
1014 UWord a_lo = (UWord)base;
1015 UWord a_hi = (UWord)(base + 127);
1016 UWord sec_lo = a_lo >> 16;
1017 UWord sec_hi = a_hi >> 16;
1018
1019 if (EXPECTED_TAKEN( sec_lo == sec_hi
1020 && sec_lo <= N_PRIMARY_MAP
1021 )) {
1022 /* Now we know that the entire address range falls within a
1023 single secondary map, and that that secondary 'lives' in
1024 the main primary map. */
1025 SecMap* sm = primary_map[sec_lo];
1026
1027 if (EXPECTED_TAKEN( !is_distinguished_sm(sm) )) {
1028 /* And finally, now we know that the secondary in question
1029 is modifiable. */
1030 UWord v_off = a_lo & 0xFFFF;
1031 ULong* p = (ULong*)(&sm->vbyte[v_off]);
1032 p[ 0] = VGM_WORD64_INVALID;
1033 p[ 1] = VGM_WORD64_INVALID;
1034 p[ 2] = VGM_WORD64_INVALID;
1035 p[ 3] = VGM_WORD64_INVALID;
1036 p[ 4] = VGM_WORD64_INVALID;
1037 p[ 5] = VGM_WORD64_INVALID;
1038 p[ 6] = VGM_WORD64_INVALID;
1039 p[ 7] = VGM_WORD64_INVALID;
1040 p[ 8] = VGM_WORD64_INVALID;
1041 p[ 9] = VGM_WORD64_INVALID;
1042 p[10] = VGM_WORD64_INVALID;
1043 p[11] = VGM_WORD64_INVALID;
1044 p[12] = VGM_WORD64_INVALID;
1045 p[13] = VGM_WORD64_INVALID;
1046 p[14] = VGM_WORD64_INVALID;
1047 p[15] = VGM_WORD64_INVALID;
1048 return;
1049 }
1050 }
1051 }
1052
1053 /* else fall into slow case */
sewardj826ec492005-05-12 18:05:00 +00001054 mc_make_writable(base, len);
1055}
1056
1057
nethercote8b76fe52004-11-08 19:20:09 +00001058/*------------------------------------------------------------*/
1059/*--- Checking memory ---*/
1060/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001061
sewardje4ccc012005-05-02 12:53:38 +00001062typedef
1063 enum {
1064 MC_Ok = 5,
1065 MC_AddrErr = 6,
1066 MC_ValueErr = 7
1067 }
1068 MC_ReadResult;
1069
1070
njn25e49d8e72002-09-23 09:36:25 +00001071/* Check permissions for address range. If inadequate permissions
1072 exist, *bad_addr is set to the offending address, so the caller can
1073 know what it is. */
1074
sewardjecf8e102003-07-12 12:11:39 +00001075/* Returns True if [a .. a+len) is not addressible. Otherwise,
1076 returns False, and if bad_addr is non-NULL, sets *bad_addr to
1077 indicate the lowest failing address. Functions below are
1078 similar. */
nethercote8b76fe52004-11-08 19:20:09 +00001079static Bool mc_check_noaccess ( Addr a, SizeT len, Addr* bad_addr )
sewardjecf8e102003-07-12 12:11:39 +00001080{
nethercote451eae92004-11-02 13:06:32 +00001081 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +00001082 UWord abit;
sewardjc1a2cda2005-04-21 17:34:00 +00001083 PROF_EVENT(60, "mc_check_noaccess");
sewardjecf8e102003-07-12 12:11:39 +00001084 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +00001085 PROF_EVENT(61, "mc_check_noaccess(loop)");
sewardjecf8e102003-07-12 12:11:39 +00001086 abit = get_abit(a);
1087 if (abit == VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001088 if (bad_addr != NULL)
1089 *bad_addr = a;
sewardjecf8e102003-07-12 12:11:39 +00001090 return False;
1091 }
1092 a++;
1093 }
1094 return True;
1095}
1096
nethercote8b76fe52004-11-08 19:20:09 +00001097static Bool mc_check_writable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00001098{
nethercote451eae92004-11-02 13:06:32 +00001099 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +00001100 UWord abit;
sewardjc1a2cda2005-04-21 17:34:00 +00001101 PROF_EVENT(62, "mc_check_writable");
njn25e49d8e72002-09-23 09:36:25 +00001102 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +00001103 PROF_EVENT(63, "mc_check_writable(loop)");
njn25e49d8e72002-09-23 09:36:25 +00001104 abit = get_abit(a);
1105 if (abit == VGM_BIT_INVALID) {
1106 if (bad_addr != NULL) *bad_addr = a;
1107 return False;
1108 }
1109 a++;
1110 }
1111 return True;
1112}
1113
nethercote8b76fe52004-11-08 19:20:09 +00001114static MC_ReadResult mc_check_readable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00001115{
nethercote451eae92004-11-02 13:06:32 +00001116 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +00001117 UWord abit;
1118 UWord vbyte;
njn25e49d8e72002-09-23 09:36:25 +00001119
sewardjc1a2cda2005-04-21 17:34:00 +00001120 PROF_EVENT(64, "mc_check_readable");
nethercote8b76fe52004-11-08 19:20:09 +00001121 DEBUG("mc_check_readable\n");
njn25e49d8e72002-09-23 09:36:25 +00001122 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +00001123 PROF_EVENT(65, "mc_check_readable(loop)");
sewardj45d94cc2005-04-20 14:44:11 +00001124 get_abit_and_vbyte(&abit, &vbyte, a);
nethercote8b76fe52004-11-08 19:20:09 +00001125 // Report addressability errors in preference to definedness errors
1126 // by checking the A bits first.
1127 if (abit != VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001128 if (bad_addr != NULL)
1129 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001130 return MC_AddrErr;
1131 }
1132 if (vbyte != VGM_BYTE_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001133 if (bad_addr != NULL)
1134 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001135 return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +00001136 }
1137 a++;
1138 }
nethercote8b76fe52004-11-08 19:20:09 +00001139 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +00001140}
1141
1142
1143/* Check a zero-terminated ascii string. Tricky -- don't want to
1144 examine the actual bytes, to find the end, until we're sure it is
1145 safe to do so. */
1146
njn9b007f62003-04-07 14:40:25 +00001147static Bool mc_check_readable_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00001148{
sewardj45d94cc2005-04-20 14:44:11 +00001149 UWord abit;
1150 UWord vbyte;
sewardjc1a2cda2005-04-21 17:34:00 +00001151 PROF_EVENT(66, "mc_check_readable_asciiz");
njn5c004e42002-11-18 11:04:50 +00001152 DEBUG("mc_check_readable_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +00001153 while (True) {
sewardjc1a2cda2005-04-21 17:34:00 +00001154 PROF_EVENT(67, "mc_check_readable_asciiz(loop)");
sewardj45d94cc2005-04-20 14:44:11 +00001155 get_abit_and_vbyte(&abit, &vbyte, a);
nethercote8b76fe52004-11-08 19:20:09 +00001156 // As in mc_check_readable(), check A bits first
1157 if (abit != VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001158 if (bad_addr != NULL)
1159 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001160 return MC_AddrErr;
1161 }
1162 if (vbyte != VGM_BYTE_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001163 if (bad_addr != NULL)
1164 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001165 return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +00001166 }
1167 /* Ok, a is safe to read. */
sewardj45d94cc2005-04-20 14:44:11 +00001168 if (* ((UChar*)a) == 0)
1169 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +00001170 a++;
1171 }
1172}
1173
1174
1175/*------------------------------------------------------------*/
1176/*--- Memory event handlers ---*/
1177/*------------------------------------------------------------*/
1178
njn25e49d8e72002-09-23 09:36:25 +00001179static
njn72718642003-07-24 08:45:32 +00001180void mc_check_is_writable ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +00001181 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001182{
1183 Bool ok;
1184 Addr bad_addr;
1185
njn25e49d8e72002-09-23 09:36:25 +00001186 /* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
1187 base,base+size-1); */
nethercote8b76fe52004-11-08 19:20:09 +00001188 ok = mc_check_writable ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00001189 if (!ok) {
1190 switch (part) {
1191 case Vg_CoreSysCall:
nethercote8b76fe52004-11-08 19:20:09 +00001192 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False,
1193 /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00001194 break;
1195
1196 case Vg_CorePThread:
1197 case Vg_CoreSignal:
nethercote8b76fe52004-11-08 19:20:09 +00001198 MAC_(record_core_mem_error)( tid, /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00001199 break;
1200
1201 default:
njn67993252004-11-22 18:02:32 +00001202 VG_(tool_panic)("mc_check_is_writable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001203 }
1204 }
njn25e49d8e72002-09-23 09:36:25 +00001205}
1206
1207static
njn72718642003-07-24 08:45:32 +00001208void mc_check_is_readable ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +00001209 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001210{
njn25e49d8e72002-09-23 09:36:25 +00001211 Addr bad_addr;
nethercote8b76fe52004-11-08 19:20:09 +00001212 MC_ReadResult res;
njn25e49d8e72002-09-23 09:36:25 +00001213
nethercote8b76fe52004-11-08 19:20:09 +00001214 res = mc_check_readable ( base, size, &bad_addr );
sewardj45f4e7c2005-09-27 19:20:21 +00001215
1216 if (0)
1217 VG_(printf)("mc_check_is_readable(0x%x, %d, %s) -> %s\n",
1218 (UInt)base, (Int)size, s, res==MC_Ok ? "yes" : "no" );
1219
nethercote8b76fe52004-11-08 19:20:09 +00001220 if (MC_Ok != res) {
1221 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
sewardj45f4e7c2005-09-27 19:20:21 +00001222
njn25e49d8e72002-09-23 09:36:25 +00001223 switch (part) {
1224 case Vg_CoreSysCall:
nethercote8b76fe52004-11-08 19:20:09 +00001225 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False,
1226 isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001227 break;
1228
1229 case Vg_CorePThread:
nethercote8b76fe52004-11-08 19:20:09 +00001230 MAC_(record_core_mem_error)( tid, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001231 break;
1232
1233 /* If we're being asked to jump to a silly address, record an error
1234 message before potentially crashing the entire system. */
1235 case Vg_CoreTranslate:
njn72718642003-07-24 08:45:32 +00001236 MAC_(record_jump_error)( tid, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00001237 break;
1238
1239 default:
njn67993252004-11-22 18:02:32 +00001240 VG_(tool_panic)("mc_check_is_readable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001241 }
1242 }
njn25e49d8e72002-09-23 09:36:25 +00001243}
1244
1245static
njn72718642003-07-24 08:45:32 +00001246void mc_check_is_readable_asciiz ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +00001247 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +00001248{
nethercote8b76fe52004-11-08 19:20:09 +00001249 MC_ReadResult res;
njn5ab96ac2005-05-08 02:59:50 +00001250 Addr bad_addr = 0; // shut GCC up
njn25e49d8e72002-09-23 09:36:25 +00001251 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
1252
njnca82cc02004-11-22 17:18:48 +00001253 tl_assert(part == Vg_CoreSysCall);
nethercote8b76fe52004-11-08 19:20:09 +00001254 res = mc_check_readable_asciiz ( (Addr)str, &bad_addr );
1255 if (MC_Ok != res) {
1256 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
1257 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001258 }
njn25e49d8e72002-09-23 09:36:25 +00001259}
1260
njn25e49d8e72002-09-23 09:36:25 +00001261static
nethercote451eae92004-11-02 13:06:32 +00001262void mc_new_mem_startup( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00001263{
njn1f3a9092002-10-04 09:22:30 +00001264 /* Ignore the permissions, just make it readable. Seems to work... */
nethercote451eae92004-11-02 13:06:32 +00001265 DEBUG("mc_new_mem_startup(%p, %llu, rr=%u, ww=%u, xx=%u)\n",
1266 a,(ULong)len,rr,ww,xx);
nethercote8b76fe52004-11-08 19:20:09 +00001267 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001268}
1269
1270static
nethercote451eae92004-11-02 13:06:32 +00001271void mc_new_mem_heap ( Addr a, SizeT len, Bool is_inited )
njn25e49d8e72002-09-23 09:36:25 +00001272{
1273 if (is_inited) {
nethercote8b76fe52004-11-08 19:20:09 +00001274 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001275 } else {
nethercote8b76fe52004-11-08 19:20:09 +00001276 mc_make_writable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001277 }
1278}
1279
1280static
njnb8dca862005-03-14 02:42:44 +00001281void mc_new_mem_mmap ( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00001282{
njnb8dca862005-03-14 02:42:44 +00001283 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001284}
1285
njncf45fd42004-11-24 16:30:22 +00001286static
1287void mc_post_mem_write(CorePart part, ThreadId tid, Addr a, SizeT len)
1288{
1289 mc_make_readable(a, len);
1290}
njn25e49d8e72002-09-23 09:36:25 +00001291
sewardj45d94cc2005-04-20 14:44:11 +00001292
njn25e49d8e72002-09-23 09:36:25 +00001293/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00001294/*--- Register event handlers ---*/
1295/*------------------------------------------------------------*/
1296
sewardj45d94cc2005-04-20 14:44:11 +00001297/* When some chunk of guest state is written, mark the corresponding
1298 shadow area as valid. This is used to initialise arbitrarily large
sewardj2c27f702005-05-03 18:19:05 +00001299 chunks of guest state, hence the (somewhat arbitrary) 1024 limit.
sewardj45d94cc2005-04-20 14:44:11 +00001300*/
1301static void mc_post_reg_write ( CorePart part, ThreadId tid,
1302 OffT offset, SizeT size)
njnd3040452003-05-19 15:04:06 +00001303{
cerion21082042005-12-06 19:07:08 +00001304# define MAX_REG_WRITE_SIZE 1120
1305 UChar area[MAX_REG_WRITE_SIZE];
1306 tl_assert(size <= MAX_REG_WRITE_SIZE);
njncf45fd42004-11-24 16:30:22 +00001307 VG_(memset)(area, VGM_BYTE_VALID, size);
1308 VG_(set_shadow_regs_area)( tid, offset, size, area );
cerion21082042005-12-06 19:07:08 +00001309# undef MAX_REG_WRITE_SIZE
njnd3040452003-05-19 15:04:06 +00001310}
1311
sewardj45d94cc2005-04-20 14:44:11 +00001312static
1313void mc_post_reg_write_clientcall ( ThreadId tid,
1314 OffT offset, SizeT size,
1315 Addr f)
njnd3040452003-05-19 15:04:06 +00001316{
njncf45fd42004-11-24 16:30:22 +00001317 mc_post_reg_write(/*dummy*/0, tid, offset, size);
njnd3040452003-05-19 15:04:06 +00001318}
1319
sewardj45d94cc2005-04-20 14:44:11 +00001320/* Look at the definedness of the guest's shadow state for
1321 [offset, offset+len). If any part of that is undefined, record
1322 a parameter error.
1323*/
1324static void mc_pre_reg_read ( CorePart part, ThreadId tid, Char* s,
1325 OffT offset, SizeT size)
nethercote8b76fe52004-11-08 19:20:09 +00001326{
sewardj45d94cc2005-04-20 14:44:11 +00001327 Int i;
1328 Bool bad;
1329
1330 UChar area[16];
1331 tl_assert(size <= 16);
1332
1333 VG_(get_shadow_regs_area)( tid, offset, size, area );
1334
1335 bad = False;
1336 for (i = 0; i < size; i++) {
1337 if (area[i] != VGM_BYTE_VALID) {
sewardj2c27f702005-05-03 18:19:05 +00001338 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00001339 break;
1340 }
nethercote8b76fe52004-11-08 19:20:09 +00001341 }
1342
sewardj45d94cc2005-04-20 14:44:11 +00001343 if (bad)
nethercote8b76fe52004-11-08 19:20:09 +00001344 MAC_(record_param_error) ( tid, 0, /*isReg*/True, /*isUnaddr*/False, s );
1345}
njnd3040452003-05-19 15:04:06 +00001346
njn25e49d8e72002-09-23 09:36:25 +00001347
sewardj6cf40ff2005-04-20 22:31:26 +00001348/*------------------------------------------------------------*/
njn9e63cb62005-05-08 18:34:59 +00001349/*--- Printing errors ---*/
1350/*------------------------------------------------------------*/
1351
njn51d827b2005-05-09 01:02:08 +00001352static void mc_pp_Error ( Error* err )
njn9e63cb62005-05-08 18:34:59 +00001353{
1354 MAC_Error* err_extra = VG_(get_error_extra)(err);
1355
sewardj71bc3cb2005-05-19 00:25:45 +00001356 HChar* xpre = VG_(clo_xml) ? " <what>" : "";
1357 HChar* xpost = VG_(clo_xml) ? "</what>" : "";
1358
njn9e63cb62005-05-08 18:34:59 +00001359 switch (VG_(get_error_kind)(err)) {
1360 case CoreMemErr: {
1361 Char* s = ( err_extra->isUnaddr ? "unaddressable" : "uninitialised" );
sewardj71bc3cb2005-05-19 00:25:45 +00001362 if (VG_(clo_xml))
1363 VG_(message)(Vg_UserMsg, " <kind>CoreMemError</kind>");
1364 /* What the hell *is* a CoreMemError? jrs 2005-May-18 */
1365 VG_(message)(Vg_UserMsg, "%s%s contains %s byte(s)%s",
1366 xpre, VG_(get_error_string)(err), s, xpost);
1367
njn9e63cb62005-05-08 18:34:59 +00001368 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1369 break;
1370
1371 }
1372
1373 case ValueErr:
1374 if (err_extra->size == 0) {
sewardj71bc3cb2005-05-19 00:25:45 +00001375 if (VG_(clo_xml))
1376 VG_(message)(Vg_UserMsg, " <kind>UninitCondition</kind>");
1377 VG_(message)(Vg_UserMsg, "%sConditional jump or move depends"
1378 " on uninitialised value(s)%s",
1379 xpre, xpost);
njn9e63cb62005-05-08 18:34:59 +00001380 } else {
sewardj71bc3cb2005-05-19 00:25:45 +00001381 if (VG_(clo_xml))
1382 VG_(message)(Vg_UserMsg, " <kind>UninitValue</kind>");
1383 VG_(message)(Vg_UserMsg,
1384 "%sUse of uninitialised value of size %d%s",
1385 xpre, err_extra->size, xpost);
njn9e63cb62005-05-08 18:34:59 +00001386 }
1387 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1388 break;
1389
1390 case ParamErr: {
1391 Bool isReg = ( Register == err_extra->addrinfo.akind );
1392 Char* s1 = ( isReg ? "contains" : "points to" );
1393 Char* s2 = ( err_extra->isUnaddr ? "unaddressable" : "uninitialised" );
1394 if (isReg) tl_assert(!err_extra->isUnaddr);
1395
sewardj71bc3cb2005-05-19 00:25:45 +00001396 if (VG_(clo_xml))
1397 VG_(message)(Vg_UserMsg, " <kind>SyscallParam</kind>");
1398 VG_(message)(Vg_UserMsg, "%sSyscall param %s %s %s byte(s)%s",
1399 xpre, VG_(get_error_string)(err), s1, s2, xpost);
njn9e63cb62005-05-08 18:34:59 +00001400
1401 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1402 MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
1403 break;
1404 }
1405 case UserErr: {
1406 Char* s = ( err_extra->isUnaddr ? "Unaddressable" : "Uninitialised" );
1407
sewardj71bc3cb2005-05-19 00:25:45 +00001408 if (VG_(clo_xml))
1409 VG_(message)(Vg_UserMsg, " <kind>ClientCheck</kind>");
njn9e63cb62005-05-08 18:34:59 +00001410 VG_(message)(Vg_UserMsg,
sewardj71bc3cb2005-05-19 00:25:45 +00001411 "%s%s byte(s) found during client check request%s",
1412 xpre, s, xpost);
njn9e63cb62005-05-08 18:34:59 +00001413
1414 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1415 MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
1416 break;
1417 }
1418 default:
1419 MAC_(pp_shared_Error)(err);
1420 break;
1421 }
1422}
1423
1424/*------------------------------------------------------------*/
1425/*--- Recording errors ---*/
1426/*------------------------------------------------------------*/
1427
njn02bc4b82005-05-15 17:28:26 +00001428/* Creates a copy of the 'extra' part, updates the copy with address info if
njn9e63cb62005-05-08 18:34:59 +00001429 necessary, and returns the copy. */
1430/* This one called from generated code and non-generated code. */
njn96364822005-05-08 19:04:53 +00001431static void mc_record_value_error ( ThreadId tid, Int size )
njn9e63cb62005-05-08 18:34:59 +00001432{
1433 MAC_Error err_extra;
1434
1435 MAC_(clear_MAC_Error)( &err_extra );
1436 err_extra.size = size;
1437 err_extra.isUnaddr = False;
1438 VG_(maybe_record_error)( tid, ValueErr, /*addr*/0, /*s*/NULL, &err_extra );
1439}
1440
1441/* This called from non-generated code */
1442
njn96364822005-05-08 19:04:53 +00001443static void mc_record_user_error ( ThreadId tid, Addr a, Bool isWrite,
1444 Bool isUnaddr )
njn9e63cb62005-05-08 18:34:59 +00001445{
1446 MAC_Error err_extra;
1447
1448 tl_assert(VG_INVALID_THREADID != tid);
1449 MAC_(clear_MAC_Error)( &err_extra );
1450 err_extra.addrinfo.akind = Undescribed;
1451 err_extra.isUnaddr = isUnaddr;
1452 VG_(maybe_record_error)( tid, UserErr, a, /*s*/NULL, &err_extra );
1453}
1454
1455/*------------------------------------------------------------*/
1456/*--- Suppressions ---*/
1457/*------------------------------------------------------------*/
1458
njn51d827b2005-05-09 01:02:08 +00001459static Bool mc_recognised_suppression ( Char* name, Supp* su )
njn9e63cb62005-05-08 18:34:59 +00001460{
1461 SuppKind skind;
1462
1463 if (MAC_(shared_recognised_suppression)(name, su))
1464 return True;
1465
1466 /* Extra suppressions not used by Addrcheck */
1467 else if (VG_STREQ(name, "Cond")) skind = Value0Supp;
1468 else if (VG_STREQ(name, "Value0")) skind = Value0Supp;/* backwards compat */
1469 else if (VG_STREQ(name, "Value1")) skind = Value1Supp;
1470 else if (VG_STREQ(name, "Value2")) skind = Value2Supp;
1471 else if (VG_STREQ(name, "Value4")) skind = Value4Supp;
1472 else if (VG_STREQ(name, "Value8")) skind = Value8Supp;
1473 else if (VG_STREQ(name, "Value16")) skind = Value16Supp;
1474 else
1475 return False;
1476
1477 VG_(set_supp_kind)(su, skind);
1478 return True;
1479}
1480
1481/*------------------------------------------------------------*/
sewardjc859fbf2005-04-22 21:10:28 +00001482/*--- Functions called directly from generated code: ---*/
1483/*--- Load/store handlers. ---*/
sewardj6cf40ff2005-04-20 22:31:26 +00001484/*------------------------------------------------------------*/
1485
1486/* Types: LOADV4, LOADV2, LOADV1 are:
1487 UWord fn ( Addr a )
1488 so they return 32-bits on 32-bit machines and 64-bits on
1489 64-bit machines. Addr has the same size as a host word.
1490
1491 LOADV8 is always ULong fn ( Addr a )
1492
1493 Similarly for STOREV1, STOREV2, STOREV4, the supplied vbits
1494 are a UWord, and for STOREV8 they are a ULong.
1495*/
1496
sewardj95448072004-11-22 20:19:51 +00001497/* ------------------------ Size = 8 ------------------------ */
1498
sewardj8cf88b72005-07-08 01:29:33 +00001499#define MAKE_LOADV8(nAME,iS_BIGENDIAN) \
1500 \
1501 VG_REGPARM(1) \
1502 ULong nAME ( Addr aA ) \
1503 { \
sewardjae986ca2005-10-12 12:53:20 +00001504 UWord mask, a, sec_no, v_off, a_off, abits; \
1505 SecMap* sm; \
1506 \
sewardj8cf88b72005-07-08 01:29:33 +00001507 PROF_EVENT(200, #nAME); \
1508 \
1509 if (VG_DEBUG_MEMORY >= 2) \
1510 return mc_LOADVn_slow( aA, 8, iS_BIGENDIAN ); \
1511 \
sewardjae986ca2005-10-12 12:53:20 +00001512 mask = ~((0x10000-8) | ((N_PRIMARY_MAP-1) << 16)); \
1513 a = (UWord)aA; \
sewardj8cf88b72005-07-08 01:29:33 +00001514 \
1515 /* If any part of 'a' indicated by the mask is 1, either */ \
1516 /* 'a' is not naturally aligned, or 'a' exceeds the range */ \
1517 /* covered by the primary map. Either way we defer to the */ \
1518 /* slow-path case. */ \
1519 if (EXPECTED_NOT_TAKEN(a & mask)) { \
1520 PROF_EVENT(201, #nAME"-slow1"); \
sewardj78947932006-01-05 14:09:46 +00001521 return (ULong)mc_LOADVn_slow( aA, 8, iS_BIGENDIAN ); \
sewardj8cf88b72005-07-08 01:29:33 +00001522 } \
1523 \
sewardjae986ca2005-10-12 12:53:20 +00001524 sec_no = (UWord)(a >> 16); \
sewardj8cf88b72005-07-08 01:29:33 +00001525 \
1526 if (VG_DEBUG_MEMORY >= 1) \
1527 tl_assert(sec_no < N_PRIMARY_MAP); \
1528 \
sewardjae986ca2005-10-12 12:53:20 +00001529 sm = primary_map[sec_no]; \
1530 v_off = a & 0xFFFF; \
1531 a_off = v_off >> 3; \
1532 abits = (UWord)(sm->abits[a_off]); \
sewardj8cf88b72005-07-08 01:29:33 +00001533 \
1534 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) { \
1535 /* Handle common case quickly: a is suitably aligned, */ \
1536 /* is mapped, and is addressible. */ \
1537 return ((ULong*)(sm->vbyte))[ v_off >> 3 ]; \
1538 } else { \
1539 /* Slow but general case. */ \
1540 PROF_EVENT(202, #nAME"-slow2"); \
1541 return mc_LOADVn_slow( a, 8, iS_BIGENDIAN ); \
1542 } \
sewardjf9d81612005-04-23 23:25:49 +00001543 }
1544
sewardj8cf88b72005-07-08 01:29:33 +00001545MAKE_LOADV8( MC_(helperc_LOADV8be), True /*bigendian*/ );
1546MAKE_LOADV8( MC_(helperc_LOADV8le), False/*littleendian*/ );
sewardjf9d81612005-04-23 23:25:49 +00001547
sewardjf9d81612005-04-23 23:25:49 +00001548
sewardj8cf88b72005-07-08 01:29:33 +00001549#define MAKE_STOREV8(nAME,iS_BIGENDIAN) \
1550 \
1551 VG_REGPARM(1) \
1552 void nAME ( Addr aA, ULong vbytes ) \
1553 { \
sewardjae986ca2005-10-12 12:53:20 +00001554 UWord mask, a, sec_no, v_off, a_off, abits; \
1555 SecMap* sm; \
1556 \
sewardj8cf88b72005-07-08 01:29:33 +00001557 PROF_EVENT(210, #nAME); \
1558 \
1559 if (VG_DEBUG_MEMORY >= 2) \
1560 mc_STOREVn_slow( aA, 8, vbytes, iS_BIGENDIAN ); \
1561 \
sewardjae986ca2005-10-12 12:53:20 +00001562 mask = ~((0x10000-8) | ((N_PRIMARY_MAP-1) << 16)); \
1563 a = (UWord)aA; \
sewardj8cf88b72005-07-08 01:29:33 +00001564 \
1565 /* If any part of 'a' indicated by the mask is 1, either */ \
1566 /* 'a' is not naturally aligned, or 'a' exceeds the range */ \
1567 /* covered by the primary map. Either way we defer to the */ \
1568 /* slow-path case. */ \
1569 if (EXPECTED_NOT_TAKEN(a & mask)) { \
1570 PROF_EVENT(211, #nAME"-slow1"); \
1571 mc_STOREVn_slow( aA, 8, vbytes, iS_BIGENDIAN ); \
1572 return; \
1573 } \
1574 \
sewardjae986ca2005-10-12 12:53:20 +00001575 sec_no = (UWord)(a >> 16); \
sewardj8cf88b72005-07-08 01:29:33 +00001576 \
1577 if (VG_DEBUG_MEMORY >= 1) \
1578 tl_assert(sec_no < N_PRIMARY_MAP); \
1579 \
sewardjae986ca2005-10-12 12:53:20 +00001580 sm = primary_map[sec_no]; \
1581 v_off = a & 0xFFFF; \
1582 a_off = v_off >> 3; \
1583 abits = (UWord)(sm->abits[a_off]); \
sewardj8cf88b72005-07-08 01:29:33 +00001584 \
1585 if (EXPECTED_TAKEN(!is_distinguished_sm(sm) \
1586 && abits == VGM_BYTE_VALID)) { \
1587 /* Handle common case quickly: a is suitably aligned, */ \
1588 /* is mapped, and is addressible. */ \
1589 ((ULong*)(sm->vbyte))[ v_off >> 3 ] = vbytes; \
1590 } else { \
1591 /* Slow but general case. */ \
1592 PROF_EVENT(212, #nAME"-slow2"); \
1593 mc_STOREVn_slow( aA, 8, vbytes, iS_BIGENDIAN ); \
1594 } \
sewardjf9d81612005-04-23 23:25:49 +00001595 }
1596
sewardj8cf88b72005-07-08 01:29:33 +00001597MAKE_STOREV8( MC_(helperc_STOREV8be), True /*bigendian*/ );
1598MAKE_STOREV8( MC_(helperc_STOREV8le), False/*littleendian*/ );
sewardj95448072004-11-22 20:19:51 +00001599
sewardj95448072004-11-22 20:19:51 +00001600
1601/* ------------------------ Size = 4 ------------------------ */
1602
sewardj8cf88b72005-07-08 01:29:33 +00001603#define MAKE_LOADV4(nAME,iS_BIGENDIAN) \
1604 \
1605 VG_REGPARM(1) \
1606 UWord nAME ( Addr aA ) \
1607 { \
sewardjae986ca2005-10-12 12:53:20 +00001608 UWord mask, a, sec_no, v_off, a_off, abits; \
1609 SecMap* sm; \
1610 \
sewardj8cf88b72005-07-08 01:29:33 +00001611 PROF_EVENT(220, #nAME); \
1612 \
1613 if (VG_DEBUG_MEMORY >= 2) \
1614 return (UWord)mc_LOADVn_slow( aA, 4, iS_BIGENDIAN ); \
1615 \
sewardjae986ca2005-10-12 12:53:20 +00001616 mask = ~((0x10000-4) | ((N_PRIMARY_MAP-1) << 16)); \
1617 a = (UWord)aA; \
sewardj8cf88b72005-07-08 01:29:33 +00001618 \
1619 /* If any part of 'a' indicated by the mask is 1, either */ \
1620 /* 'a' is not naturally aligned, or 'a' exceeds the range */ \
1621 /* covered by the primary map. Either way we defer to the */ \
1622 /* slow-path case. */ \
1623 if (EXPECTED_NOT_TAKEN(a & mask)) { \
1624 PROF_EVENT(221, #nAME"-slow1"); \
1625 return (UWord)mc_LOADVn_slow( aA, 4, iS_BIGENDIAN ); \
1626 } \
1627 \
sewardjae986ca2005-10-12 12:53:20 +00001628 sec_no = (UWord)(a >> 16); \
sewardj8cf88b72005-07-08 01:29:33 +00001629 \
1630 if (VG_DEBUG_MEMORY >= 1) \
1631 tl_assert(sec_no < N_PRIMARY_MAP); \
1632 \
sewardjae986ca2005-10-12 12:53:20 +00001633 sm = primary_map[sec_no]; \
1634 v_off = a & 0xFFFF; \
1635 a_off = v_off >> 3; \
1636 abits = (UWord)(sm->abits[a_off]); \
sewardj8cf88b72005-07-08 01:29:33 +00001637 abits >>= (a & 4); \
1638 abits &= 15; \
1639 if (EXPECTED_TAKEN(abits == VGM_NIBBLE_VALID)) { \
1640 /* Handle common case quickly: a is suitably aligned, */ \
1641 /* is mapped, and is addressible. */ \
1642 /* On a 32-bit platform, simply hoick the required 32 */ \
1643 /* bits out of the vbyte array. On a 64-bit platform, */ \
1644 /* also set the upper 32 bits to 1 ("undefined"), just */ \
1645 /* in case. This almost certainly isn't necessary, */ \
1646 /* but be paranoid. */ \
1647 UWord ret = (UWord)0xFFFFFFFF00000000ULL; \
1648 ret |= (UWord)( ((UInt*)(sm->vbyte))[ v_off >> 2 ] ); \
1649 return ret; \
1650 } else { \
1651 /* Slow but general case. */ \
1652 PROF_EVENT(222, #nAME"-slow2"); \
1653 return (UWord)mc_LOADVn_slow( a, 4, iS_BIGENDIAN ); \
1654 } \
sewardjc1a2cda2005-04-21 17:34:00 +00001655 }
1656
sewardj8cf88b72005-07-08 01:29:33 +00001657MAKE_LOADV4( MC_(helperc_LOADV4be), True /*bigendian*/ );
1658MAKE_LOADV4( MC_(helperc_LOADV4le), False/*littleendian*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001659
sewardjc1a2cda2005-04-21 17:34:00 +00001660
sewardj8cf88b72005-07-08 01:29:33 +00001661#define MAKE_STOREV4(nAME,iS_BIGENDIAN) \
1662 \
1663 VG_REGPARM(2) \
1664 void nAME ( Addr aA, UWord vbytes ) \
1665 { \
sewardjae986ca2005-10-12 12:53:20 +00001666 UWord mask, a, sec_no, v_off, a_off, abits; \
1667 SecMap* sm; \
1668 \
sewardj8cf88b72005-07-08 01:29:33 +00001669 PROF_EVENT(230, #nAME); \
1670 \
1671 if (VG_DEBUG_MEMORY >= 2) \
1672 mc_STOREVn_slow( aA, 4, (ULong)vbytes, iS_BIGENDIAN ); \
1673 \
sewardjae986ca2005-10-12 12:53:20 +00001674 mask = ~((0x10000-4) | ((N_PRIMARY_MAP-1) << 16)); \
1675 a = (UWord)aA; \
sewardj8cf88b72005-07-08 01:29:33 +00001676 \
1677 /* If any part of 'a' indicated by the mask is 1, either */ \
1678 /* 'a' is not naturally aligned, or 'a' exceeds the range */ \
1679 /* covered by the primary map. Either way we defer to the */ \
1680 /* slow-path case. */ \
1681 if (EXPECTED_NOT_TAKEN(a & mask)) { \
1682 PROF_EVENT(231, #nAME"-slow1"); \
1683 mc_STOREVn_slow( aA, 4, (ULong)vbytes, iS_BIGENDIAN ); \
1684 return; \
1685 } \
1686 \
sewardjae986ca2005-10-12 12:53:20 +00001687 sec_no = (UWord)(a >> 16); \
sewardj8cf88b72005-07-08 01:29:33 +00001688 \
1689 if (VG_DEBUG_MEMORY >= 1) \
1690 tl_assert(sec_no < N_PRIMARY_MAP); \
1691 \
sewardjae986ca2005-10-12 12:53:20 +00001692 sm = primary_map[sec_no]; \
1693 v_off = a & 0xFFFF; \
1694 a_off = v_off >> 3; \
1695 abits = (UWord)(sm->abits[a_off]); \
sewardj8cf88b72005-07-08 01:29:33 +00001696 abits >>= (a & 4); \
1697 abits &= 15; \
1698 if (EXPECTED_TAKEN(!is_distinguished_sm(sm) \
1699 && abits == VGM_NIBBLE_VALID)) { \
1700 /* Handle common case quickly: a is suitably aligned, */ \
1701 /* is mapped, and is addressible. */ \
1702 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = (UInt)vbytes; \
1703 } else { \
1704 /* Slow but general case. */ \
1705 PROF_EVENT(232, #nAME"-slow2"); \
1706 mc_STOREVn_slow( aA, 4, (ULong)vbytes, iS_BIGENDIAN ); \
1707 } \
sewardjc1a2cda2005-04-21 17:34:00 +00001708 }
1709
sewardj8cf88b72005-07-08 01:29:33 +00001710MAKE_STOREV4( MC_(helperc_STOREV4be), True /*bigendian*/ );
1711MAKE_STOREV4( MC_(helperc_STOREV4le), False/*littleendian*/ );
njn25e49d8e72002-09-23 09:36:25 +00001712
njn25e49d8e72002-09-23 09:36:25 +00001713
sewardj95448072004-11-22 20:19:51 +00001714/* ------------------------ Size = 2 ------------------------ */
1715
sewardj8cf88b72005-07-08 01:29:33 +00001716#define MAKE_LOADV2(nAME,iS_BIGENDIAN) \
1717 \
1718 VG_REGPARM(1) \
1719 UWord nAME ( Addr aA ) \
1720 { \
sewardjae986ca2005-10-12 12:53:20 +00001721 UWord mask, a, sec_no, v_off, a_off, abits; \
1722 SecMap* sm; \
1723 \
sewardj8cf88b72005-07-08 01:29:33 +00001724 PROF_EVENT(240, #nAME); \
1725 \
1726 if (VG_DEBUG_MEMORY >= 2) \
1727 return (UWord)mc_LOADVn_slow( aA, 2, iS_BIGENDIAN ); \
1728 \
sewardjae986ca2005-10-12 12:53:20 +00001729 mask = ~((0x10000-2) | ((N_PRIMARY_MAP-1) << 16)); \
1730 a = (UWord)aA; \
sewardj8cf88b72005-07-08 01:29:33 +00001731 \
1732 /* If any part of 'a' indicated by the mask is 1, either */ \
1733 /* 'a' is not naturally aligned, or 'a' exceeds the range */ \
1734 /* covered by the primary map. Either way we defer to the */ \
1735 /* slow-path case. */ \
1736 if (EXPECTED_NOT_TAKEN(a & mask)) { \
1737 PROF_EVENT(241, #nAME"-slow1"); \
1738 return (UWord)mc_LOADVn_slow( aA, 2, iS_BIGENDIAN ); \
1739 } \
1740 \
sewardjae986ca2005-10-12 12:53:20 +00001741 sec_no = (UWord)(a >> 16); \
sewardj8cf88b72005-07-08 01:29:33 +00001742 \
1743 if (VG_DEBUG_MEMORY >= 1) \
1744 tl_assert(sec_no < N_PRIMARY_MAP); \
1745 \
sewardjae986ca2005-10-12 12:53:20 +00001746 sm = primary_map[sec_no]; \
1747 v_off = a & 0xFFFF; \
1748 a_off = v_off >> 3; \
1749 abits = (UWord)(sm->abits[a_off]); \
sewardj8cf88b72005-07-08 01:29:33 +00001750 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) { \
1751 /* Handle common case quickly: a is mapped, and the */ \
1752 /* entire word32 it lives in is addressible. */ \
1753 /* Set the upper 16/48 bits of the result to 1 */ \
1754 /* ("undefined"), just in case. This almost certainly */ \
1755 /* isn't necessary, but be paranoid. */ \
1756 return (~(UWord)0xFFFF) \
1757 | \
1758 (UWord)( ((UShort*)(sm->vbyte))[ v_off >> 1 ] ); \
1759 } else { \
1760 /* Slow but general case. */ \
1761 PROF_EVENT(242, #nAME"-slow2"); \
1762 return (UWord)mc_LOADVn_slow( aA, 2, iS_BIGENDIAN ); \
1763 } \
sewardjc1a2cda2005-04-21 17:34:00 +00001764 }
1765
sewardj8cf88b72005-07-08 01:29:33 +00001766MAKE_LOADV2( MC_(helperc_LOADV2be), True /*bigendian*/ );
1767MAKE_LOADV2( MC_(helperc_LOADV2le), False/*littleendian*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001768
sewardjc1a2cda2005-04-21 17:34:00 +00001769
sewardj8cf88b72005-07-08 01:29:33 +00001770#define MAKE_STOREV2(nAME,iS_BIGENDIAN) \
1771 \
1772 VG_REGPARM(2) \
1773 void nAME ( Addr aA, UWord vbytes ) \
1774 { \
sewardjae986ca2005-10-12 12:53:20 +00001775 UWord mask, a, sec_no, v_off, a_off, abits; \
1776 SecMap* sm; \
1777 \
sewardj8cf88b72005-07-08 01:29:33 +00001778 PROF_EVENT(250, #nAME); \
1779 \
1780 if (VG_DEBUG_MEMORY >= 2) \
1781 mc_STOREVn_slow( aA, 2, (ULong)vbytes, iS_BIGENDIAN ); \
1782 \
sewardjae986ca2005-10-12 12:53:20 +00001783 mask = ~((0x10000-2) | ((N_PRIMARY_MAP-1) << 16)); \
1784 a = (UWord)aA; \
sewardj8cf88b72005-07-08 01:29:33 +00001785 \
1786 /* If any part of 'a' indicated by the mask is 1, either */ \
1787 /* 'a' is not naturally aligned, or 'a' exceeds the range */ \
1788 /* covered by the primary map. Either way we defer to the */ \
1789 /* slow-path case. */ \
1790 if (EXPECTED_NOT_TAKEN(a & mask)) { \
1791 PROF_EVENT(251, #nAME"-slow1"); \
1792 mc_STOREVn_slow( aA, 2, (ULong)vbytes, iS_BIGENDIAN ); \
1793 return; \
1794 } \
1795 \
sewardjae986ca2005-10-12 12:53:20 +00001796 sec_no = (UWord)(a >> 16); \
sewardj8cf88b72005-07-08 01:29:33 +00001797 \
1798 if (VG_DEBUG_MEMORY >= 1) \
1799 tl_assert(sec_no < N_PRIMARY_MAP); \
1800 \
sewardjae986ca2005-10-12 12:53:20 +00001801 sm = primary_map[sec_no]; \
1802 v_off = a & 0xFFFF; \
1803 a_off = v_off >> 3; \
1804 abits = (UWord)(sm->abits[a_off]); \
sewardj8cf88b72005-07-08 01:29:33 +00001805 if (EXPECTED_TAKEN(!is_distinguished_sm(sm) \
1806 && abits == VGM_BYTE_VALID)) { \
1807 /* Handle common case quickly. */ \
1808 ((UShort*)(sm->vbyte))[ v_off >> 1 ] = (UShort)vbytes; \
1809 } else { \
1810 /* Slow but general case. */ \
1811 PROF_EVENT(252, #nAME"-slow2"); \
1812 mc_STOREVn_slow( aA, 2, (ULong)vbytes, iS_BIGENDIAN ); \
1813 } \
sewardjc1a2cda2005-04-21 17:34:00 +00001814 }
1815
njn25e49d8e72002-09-23 09:36:25 +00001816
sewardj8cf88b72005-07-08 01:29:33 +00001817MAKE_STOREV2( MC_(helperc_STOREV2be), True /*bigendian*/ );
1818MAKE_STOREV2( MC_(helperc_STOREV2le), False/*littleendian*/ );
sewardj5d28efc2005-04-21 22:16:29 +00001819
njn25e49d8e72002-09-23 09:36:25 +00001820
sewardj95448072004-11-22 20:19:51 +00001821/* ------------------------ Size = 1 ------------------------ */
sewardj8cf88b72005-07-08 01:29:33 +00001822/* Note: endianness is irrelevant for size == 1 */
sewardj95448072004-11-22 20:19:51 +00001823
njnaf839f52005-06-23 03:27:57 +00001824VG_REGPARM(1)
sewardj8cf88b72005-07-08 01:29:33 +00001825UWord MC_(helperc_LOADV1) ( Addr aA )
njn25e49d8e72002-09-23 09:36:25 +00001826{
sewardjae986ca2005-10-12 12:53:20 +00001827 UWord mask, a, sec_no, v_off, a_off, abits;
1828 SecMap* sm;
1829
sewardj8cf88b72005-07-08 01:29:33 +00001830 PROF_EVENT(260, "helperc_LOADV1");
sewardjc1a2cda2005-04-21 17:34:00 +00001831
1832# if VG_DEBUG_MEMORY >= 2
sewardj8cf88b72005-07-08 01:29:33 +00001833 return (UWord)mc_LOADVn_slow( aA, 1, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001834# else
1835
sewardjae986ca2005-10-12 12:53:20 +00001836 mask = ~((0x10000-1) | ((N_PRIMARY_MAP-1) << 16));
1837 a = (UWord)aA;
sewardjc1a2cda2005-04-21 17:34:00 +00001838
1839 /* If any part of 'a' indicated by the mask is 1, it means 'a'
1840 exceeds the range covered by the primary map. In which case we
1841 defer to the slow-path case. */
1842 if (EXPECTED_NOT_TAKEN(a & mask)) {
sewardj8cf88b72005-07-08 01:29:33 +00001843 PROF_EVENT(261, "helperc_LOADV1-slow1");
1844 return (UWord)mc_LOADVn_slow( aA, 1, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001845 }
1846
sewardjae986ca2005-10-12 12:53:20 +00001847 sec_no = (UWord)(a >> 16);
sewardjc1a2cda2005-04-21 17:34:00 +00001848
1849# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001850 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001851# endif
1852
sewardjae986ca2005-10-12 12:53:20 +00001853 sm = primary_map[sec_no];
1854 v_off = a & 0xFFFF;
1855 a_off = v_off >> 3;
1856 abits = (UWord)(sm->abits[a_off]);
sewardjc1a2cda2005-04-21 17:34:00 +00001857 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) {
1858 /* Handle common case quickly: a is mapped, and the entire
1859 word32 it lives in is addressible. */
sewardj5d28efc2005-04-21 22:16:29 +00001860 /* Set the upper 24/56 bits of the result to 1 ("undefined"),
1861 just in case. This almost certainly isn't necessary, but be
1862 paranoid. */
sewardjc1a2cda2005-04-21 17:34:00 +00001863 return (~(UWord)0xFF)
1864 |
1865 (UWord)( ((UChar*)(sm->vbyte))[ v_off ] );
1866 } else {
1867 /* Slow but general case. */
sewardj8cf88b72005-07-08 01:29:33 +00001868 PROF_EVENT(262, "helperc_LOADV1-slow2");
1869 return (UWord)mc_LOADVn_slow( aA, 1, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001870 }
1871# endif
njn25e49d8e72002-09-23 09:36:25 +00001872}
1873
sewardjc1a2cda2005-04-21 17:34:00 +00001874
njnaf839f52005-06-23 03:27:57 +00001875VG_REGPARM(2)
sewardj8cf88b72005-07-08 01:29:33 +00001876void MC_(helperc_STOREV1) ( Addr aA, UWord vbyte )
njn25e49d8e72002-09-23 09:36:25 +00001877{
sewardjae986ca2005-10-12 12:53:20 +00001878 UWord mask, a, sec_no, v_off, a_off, abits;
1879 SecMap* sm;
1880
sewardj8cf88b72005-07-08 01:29:33 +00001881 PROF_EVENT(270, "helperc_STOREV1");
sewardjc1a2cda2005-04-21 17:34:00 +00001882
1883# if VG_DEBUG_MEMORY >= 2
sewardj8cf88b72005-07-08 01:29:33 +00001884 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001885# else
1886
sewardjae986ca2005-10-12 12:53:20 +00001887 mask = ~((0x10000-1) | ((N_PRIMARY_MAP-1) << 16));
1888 a = (UWord)aA;
sewardjc1a2cda2005-04-21 17:34:00 +00001889 /* If any part of 'a' indicated by the mask is 1, it means 'a'
1890 exceeds the range covered by the primary map. In which case we
1891 defer to the slow-path case. */
1892 if (EXPECTED_NOT_TAKEN(a & mask)) {
sewardj8cf88b72005-07-08 01:29:33 +00001893 PROF_EVENT(271, "helperc_STOREV1-slow1");
1894 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001895 return;
1896 }
1897
sewardjae986ca2005-10-12 12:53:20 +00001898 sec_no = (UWord)(a >> 16);
sewardjc1a2cda2005-04-21 17:34:00 +00001899
1900# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001901 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001902# endif
1903
sewardjae986ca2005-10-12 12:53:20 +00001904 sm = primary_map[sec_no];
1905 v_off = a & 0xFFFF;
1906 a_off = v_off >> 3;
1907 abits = (UWord)(sm->abits[a_off]);
sewardjc1a2cda2005-04-21 17:34:00 +00001908 if (EXPECTED_TAKEN(!is_distinguished_sm(sm)
1909 && abits == VGM_BYTE_VALID)) {
1910 /* Handle common case quickly: a is mapped, the entire word32 it
1911 lives in is addressible. */
1912 ((UChar*)(sm->vbyte))[ v_off ] = (UChar)vbyte;
1913 } else {
sewardj8cf88b72005-07-08 01:29:33 +00001914 PROF_EVENT(272, "helperc_STOREV1-slow2");
1915 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001916 }
1917
1918# endif
njn25e49d8e72002-09-23 09:36:25 +00001919}
1920
1921
sewardjc859fbf2005-04-22 21:10:28 +00001922/*------------------------------------------------------------*/
1923/*--- Functions called directly from generated code: ---*/
1924/*--- Value-check failure handlers. ---*/
1925/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001926
njn5c004e42002-11-18 11:04:50 +00001927void MC_(helperc_value_check0_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001928{
njn9e63cb62005-05-08 18:34:59 +00001929 mc_record_value_error ( VG_(get_running_tid)(), 0 );
njn25e49d8e72002-09-23 09:36:25 +00001930}
1931
njn5c004e42002-11-18 11:04:50 +00001932void MC_(helperc_value_check1_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001933{
njn9e63cb62005-05-08 18:34:59 +00001934 mc_record_value_error ( VG_(get_running_tid)(), 1 );
njn25e49d8e72002-09-23 09:36:25 +00001935}
1936
njn5c004e42002-11-18 11:04:50 +00001937void MC_(helperc_value_check4_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001938{
njn9e63cb62005-05-08 18:34:59 +00001939 mc_record_value_error ( VG_(get_running_tid)(), 4 );
njn25e49d8e72002-09-23 09:36:25 +00001940}
1941
sewardj11bcc4e2005-04-23 22:38:38 +00001942void MC_(helperc_value_check8_fail) ( void )
1943{
njn9e63cb62005-05-08 18:34:59 +00001944 mc_record_value_error ( VG_(get_running_tid)(), 8 );
sewardj11bcc4e2005-04-23 22:38:38 +00001945}
1946
njnaf839f52005-06-23 03:27:57 +00001947VG_REGPARM(1) void MC_(helperc_complain_undef) ( HWord sz )
sewardj95448072004-11-22 20:19:51 +00001948{
njn9e63cb62005-05-08 18:34:59 +00001949 mc_record_value_error ( VG_(get_running_tid)(), (Int)sz );
sewardj95448072004-11-22 20:19:51 +00001950}
1951
njn25e49d8e72002-09-23 09:36:25 +00001952
sewardj45d94cc2005-04-20 14:44:11 +00001953//zz /*------------------------------------------------------------*/
1954//zz /*--- Metadata get/set functions, for client requests. ---*/
1955//zz /*------------------------------------------------------------*/
1956//zz
1957//zz /* Copy Vbits for src into vbits. Returns: 1 == OK, 2 == alignment
1958//zz error, 3 == addressing error. */
1959//zz static Int mc_get_or_set_vbits_for_client (
1960//zz ThreadId tid,
1961//zz Addr dataV,
1962//zz Addr vbitsV,
1963//zz SizeT size,
1964//zz Bool setting /* True <=> set vbits, False <=> get vbits */
1965//zz )
1966//zz {
1967//zz Bool addressibleD = True;
1968//zz Bool addressibleV = True;
1969//zz UInt* data = (UInt*)dataV;
1970//zz UInt* vbits = (UInt*)vbitsV;
1971//zz SizeT szW = size / 4; /* sigh */
1972//zz SizeT i;
1973//zz UInt* dataP = NULL; /* bogus init to keep gcc happy */
1974//zz UInt* vbitsP = NULL; /* ditto */
1975//zz
1976//zz /* Check alignment of args. */
1977//zz if (!(VG_IS_4_ALIGNED(data) && VG_IS_4_ALIGNED(vbits)))
1978//zz return 2;
1979//zz if ((size & 3) != 0)
1980//zz return 2;
1981//zz
1982//zz /* Check that arrays are addressible. */
1983//zz for (i = 0; i < szW; i++) {
1984//zz dataP = &data[i];
1985//zz vbitsP = &vbits[i];
1986//zz if (get_abits4_ALIGNED((Addr)dataP) != VGM_NIBBLE_VALID) {
1987//zz addressibleD = False;
1988//zz break;
1989//zz }
1990//zz if (get_abits4_ALIGNED((Addr)vbitsP) != VGM_NIBBLE_VALID) {
1991//zz addressibleV = False;
1992//zz break;
1993//zz }
1994//zz }
1995//zz if (!addressibleD) {
1996//zz MAC_(record_address_error)( tid, (Addr)dataP, 4,
1997//zz setting ? True : False );
1998//zz return 3;
1999//zz }
2000//zz if (!addressibleV) {
2001//zz MAC_(record_address_error)( tid, (Addr)vbitsP, 4,
2002//zz setting ? False : True );
2003//zz return 3;
2004//zz }
2005//zz
2006//zz /* Do the copy */
2007//zz if (setting) {
2008//zz /* setting */
2009//zz for (i = 0; i < szW; i++) {
2010//zz if (get_vbytes4_ALIGNED( (Addr)&vbits[i] ) != VGM_WORD_VALID)
njn9e63cb62005-05-08 18:34:59 +00002011//zz mc_record_value_error(tid, 4);
sewardj45d94cc2005-04-20 14:44:11 +00002012//zz set_vbytes4_ALIGNED( (Addr)&data[i], vbits[i] );
2013//zz }
2014//zz } else {
2015//zz /* getting */
2016//zz for (i = 0; i < szW; i++) {
2017//zz vbits[i] = get_vbytes4_ALIGNED( (Addr)&data[i] );
2018//zz set_vbytes4_ALIGNED( (Addr)&vbits[i], VGM_WORD_VALID );
2019//zz }
2020//zz }
2021//zz
2022//zz return 1;
2023//zz }
sewardj05fe85e2005-04-27 22:46:36 +00002024
2025
2026/*------------------------------------------------------------*/
2027/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
2028/*------------------------------------------------------------*/
2029
2030/* For the memory leak detector, say whether an entire 64k chunk of
2031 address space is possibly in use, or not. If in doubt return
2032 True.
2033*/
2034static
2035Bool mc_is_within_valid_secondary ( Addr a )
2036{
2037 SecMap* sm = maybe_get_secmap_for ( a );
2038 if (sm == NULL || sm == &sm_distinguished[SM_DIST_NOACCESS]) {
2039 /* Definitely not in use. */
2040 return False;
2041 } else {
2042 return True;
2043 }
2044}
2045
2046
2047/* For the memory leak detector, say whether or not a given word
2048 address is to be regarded as valid. */
2049static
2050Bool mc_is_valid_aligned_word ( Addr a )
2051{
2052 tl_assert(sizeof(UWord) == 4 || sizeof(UWord) == 8);
2053 if (sizeof(UWord) == 4) {
2054 tl_assert(VG_IS_4_ALIGNED(a));
2055 } else {
2056 tl_assert(VG_IS_8_ALIGNED(a));
2057 }
2058 if (mc_check_readable( a, sizeof(UWord), NULL ) == MC_Ok) {
2059 return True;
2060 } else {
2061 return False;
2062 }
2063}
sewardja4495682002-10-21 07:29:59 +00002064
2065
nethercote996901a2004-08-03 13:29:09 +00002066/* Leak detector for this tool. We don't actually do anything, merely
sewardja4495682002-10-21 07:29:59 +00002067 run the generic leak detector with suitable parameters for this
nethercote996901a2004-08-03 13:29:09 +00002068 tool. */
njnb8dca862005-03-14 02:42:44 +00002069static void mc_detect_memory_leaks ( ThreadId tid, LeakCheckMode mode )
njn25e49d8e72002-09-23 09:36:25 +00002070{
sewardj05fe85e2005-04-27 22:46:36 +00002071 MAC_(do_detect_memory_leaks) (
2072 tid,
2073 mode,
2074 mc_is_within_valid_secondary,
2075 mc_is_valid_aligned_word
2076 );
njn25e49d8e72002-09-23 09:36:25 +00002077}
2078
2079
sewardjc859fbf2005-04-22 21:10:28 +00002080/*------------------------------------------------------------*/
2081/*--- Initialisation ---*/
2082/*------------------------------------------------------------*/
2083
2084static void init_shadow_memory ( void )
2085{
2086 Int i;
2087 SecMap* sm;
2088
2089 /* Build the 3 distinguished secondaries */
2090 tl_assert(VGM_BIT_INVALID == 1);
2091 tl_assert(VGM_BIT_VALID == 0);
2092 tl_assert(VGM_BYTE_INVALID == 0xFF);
2093 tl_assert(VGM_BYTE_VALID == 0);
2094
2095 /* Set A invalid, V invalid. */
2096 sm = &sm_distinguished[SM_DIST_NOACCESS];
2097 for (i = 0; i < 65536; i++)
2098 sm->vbyte[i] = VGM_BYTE_INVALID;
2099 for (i = 0; i < 8192; i++)
2100 sm->abits[i] = VGM_BYTE_INVALID;
2101
2102 /* Set A valid, V invalid. */
2103 sm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
2104 for (i = 0; i < 65536; i++)
2105 sm->vbyte[i] = VGM_BYTE_INVALID;
2106 for (i = 0; i < 8192; i++)
2107 sm->abits[i] = VGM_BYTE_VALID;
2108
2109 /* Set A valid, V valid. */
2110 sm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
2111 for (i = 0; i < 65536; i++)
2112 sm->vbyte[i] = VGM_BYTE_VALID;
2113 for (i = 0; i < 8192; i++)
2114 sm->abits[i] = VGM_BYTE_VALID;
2115
2116 /* Set up the primary map. */
2117 /* These entries gradually get overwritten as the used address
2118 space expands. */
2119 for (i = 0; i < N_PRIMARY_MAP; i++)
2120 primary_map[i] = &sm_distinguished[SM_DIST_NOACCESS];
2121
2122 /* auxmap_size = auxmap_used = 0;
2123 no ... these are statically initialised */
2124}
2125
2126
2127/*------------------------------------------------------------*/
2128/*--- Sanity check machinery (permanently engaged) ---*/
2129/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00002130
njn51d827b2005-05-09 01:02:08 +00002131static Bool mc_cheap_sanity_check ( void )
njn25e49d8e72002-09-23 09:36:25 +00002132{
jseward9800fd32004-01-04 23:08:04 +00002133 /* nothing useful we can rapidly check */
sewardj23eb2fd2005-04-22 16:29:19 +00002134 n_sanity_cheap++;
sewardjc1a2cda2005-04-21 17:34:00 +00002135 PROF_EVENT(490, "cheap_sanity_check");
jseward9800fd32004-01-04 23:08:04 +00002136 return True;
njn25e49d8e72002-09-23 09:36:25 +00002137}
2138
njn51d827b2005-05-09 01:02:08 +00002139static Bool mc_expensive_sanity_check ( void )
njn25e49d8e72002-09-23 09:36:25 +00002140{
sewardj23eb2fd2005-04-22 16:29:19 +00002141 Int i, n_secmaps_found;
sewardj45d94cc2005-04-20 14:44:11 +00002142 SecMap* sm;
sewardj23eb2fd2005-04-22 16:29:19 +00002143 Bool bad = False;
njn25e49d8e72002-09-23 09:36:25 +00002144
sewardj23eb2fd2005-04-22 16:29:19 +00002145 n_sanity_expensive++;
sewardjc1a2cda2005-04-21 17:34:00 +00002146 PROF_EVENT(491, "expensive_sanity_check");
2147
sewardj23eb2fd2005-04-22 16:29:19 +00002148 /* Check that the 3 distinguished SMs are still as they should
2149 be. */
njn25e49d8e72002-09-23 09:36:25 +00002150
sewardj45d94cc2005-04-20 14:44:11 +00002151 /* Check A invalid, V invalid. */
2152 sm = &sm_distinguished[SM_DIST_NOACCESS];
njn25e49d8e72002-09-23 09:36:25 +00002153 for (i = 0; i < 65536; i++)
sewardj45d94cc2005-04-20 14:44:11 +00002154 if (!(sm->vbyte[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002155 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002156 for (i = 0; i < 8192; i++)
2157 if (!(sm->abits[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002158 bad = True;
njn25e49d8e72002-09-23 09:36:25 +00002159
sewardj45d94cc2005-04-20 14:44:11 +00002160 /* Check A valid, V invalid. */
2161 sm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
2162 for (i = 0; i < 65536; i++)
2163 if (!(sm->vbyte[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002164 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002165 for (i = 0; i < 8192; i++)
2166 if (!(sm->abits[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002167 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002168
2169 /* Check A valid, V valid. */
2170 sm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
2171 for (i = 0; i < 65536; i++)
2172 if (!(sm->vbyte[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002173 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002174 for (i = 0; i < 8192; i++)
2175 if (!(sm->abits[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00002176 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002177
sewardj23eb2fd2005-04-22 16:29:19 +00002178 if (bad) {
2179 VG_(printf)("memcheck expensive sanity: "
2180 "distinguished_secondaries have changed\n");
2181 return False;
2182 }
2183
2184 /* check nonsensical auxmap sizing */
sewardj45d94cc2005-04-20 14:44:11 +00002185 if (auxmap_used > auxmap_size)
sewardj23eb2fd2005-04-22 16:29:19 +00002186 bad = True;
2187
2188 if (bad) {
2189 VG_(printf)("memcheck expensive sanity: "
2190 "nonsensical auxmap sizing\n");
2191 return False;
2192 }
2193
2194 /* check that the number of secmaps issued matches the number that
2195 are reachable (iow, no secmap leaks) */
2196 n_secmaps_found = 0;
2197 for (i = 0; i < N_PRIMARY_MAP; i++) {
2198 if (primary_map[i] == NULL) {
2199 bad = True;
2200 } else {
2201 if (!is_distinguished_sm(primary_map[i]))
2202 n_secmaps_found++;
2203 }
2204 }
2205
2206 for (i = 0; i < auxmap_used; i++) {
2207 if (auxmap[i].sm == NULL) {
2208 bad = True;
2209 } else {
2210 if (!is_distinguished_sm(auxmap[i].sm))
2211 n_secmaps_found++;
2212 }
2213 }
2214
2215 if (n_secmaps_found != n_secmaps_issued)
2216 bad = True;
2217
2218 if (bad) {
2219 VG_(printf)("memcheck expensive sanity: "
2220 "apparent secmap leakage\n");
2221 return False;
2222 }
2223
2224 /* check that auxmap only covers address space that the primary
2225 doesn't */
2226
2227 for (i = 0; i < auxmap_used; i++)
2228 if (auxmap[i].base <= MAX_PRIMARY_ADDRESS)
2229 bad = True;
2230
2231 if (bad) {
2232 VG_(printf)("memcheck expensive sanity: "
2233 "auxmap covers wrong address space\n");
2234 return False;
2235 }
2236
2237 /* there is only one pointer to each secmap (expensive) */
njn25e49d8e72002-09-23 09:36:25 +00002238
2239 return True;
2240}
sewardj45d94cc2005-04-20 14:44:11 +00002241
njn25e49d8e72002-09-23 09:36:25 +00002242
njn25e49d8e72002-09-23 09:36:25 +00002243/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00002244/*--- Command line args ---*/
njn25e49d8e72002-09-23 09:36:25 +00002245/*------------------------------------------------------------*/
2246
njn51d827b2005-05-09 01:02:08 +00002247static Bool mc_process_cmd_line_option(Char* arg)
njn25e49d8e72002-09-23 09:36:25 +00002248{
sewardjf3418c02005-11-08 14:10:24 +00002249 return MAC_(process_common_cmd_line_option)(arg);
njn25e49d8e72002-09-23 09:36:25 +00002250}
2251
njn51d827b2005-05-09 01:02:08 +00002252static void mc_print_usage(void)
njn25e49d8e72002-09-23 09:36:25 +00002253{
njn3e884182003-04-15 13:03:23 +00002254 MAC_(print_common_usage)();
njn3e884182003-04-15 13:03:23 +00002255}
2256
njn51d827b2005-05-09 01:02:08 +00002257static void mc_print_debug_usage(void)
njn3e884182003-04-15 13:03:23 +00002258{
2259 MAC_(print_common_debug_usage)();
njn25e49d8e72002-09-23 09:36:25 +00002260}
2261
sewardjf3418c02005-11-08 14:10:24 +00002262
nethercote8b76fe52004-11-08 19:20:09 +00002263/*------------------------------------------------------------*/
2264/*--- Client requests ---*/
2265/*------------------------------------------------------------*/
2266
2267/* Client block management:
2268
2269 This is managed as an expanding array of client block descriptors.
2270 Indices of live descriptors are issued to the client, so it can ask
2271 to free them later. Therefore we cannot slide live entries down
2272 over dead ones. Instead we must use free/inuse flags and scan for
2273 an empty slot at allocation time. This in turn means allocation is
2274 relatively expensive, so we hope this does not happen too often.
nethercote8b76fe52004-11-08 19:20:09 +00002275
sewardjedc75ab2005-03-15 23:30:32 +00002276 An unused block has start == size == 0
2277*/
nethercote8b76fe52004-11-08 19:20:09 +00002278
2279typedef
2280 struct {
2281 Addr start;
2282 SizeT size;
2283 ExeContext* where;
sewardj8cf88b72005-07-08 01:29:33 +00002284 Char* desc;
nethercote8b76fe52004-11-08 19:20:09 +00002285 }
2286 CGenBlock;
2287
2288/* This subsystem is self-initialising. */
njn695c16e2005-03-27 03:40:28 +00002289static UInt cgb_size = 0;
2290static UInt cgb_used = 0;
2291static CGenBlock* cgbs = NULL;
nethercote8b76fe52004-11-08 19:20:09 +00002292
2293/* Stats for this subsystem. */
njn695c16e2005-03-27 03:40:28 +00002294static UInt cgb_used_MAX = 0; /* Max in use. */
2295static UInt cgb_allocs = 0; /* Number of allocs. */
2296static UInt cgb_discards = 0; /* Number of discards. */
2297static UInt cgb_search = 0; /* Number of searches. */
nethercote8b76fe52004-11-08 19:20:09 +00002298
2299
2300static
njn695c16e2005-03-27 03:40:28 +00002301Int alloc_client_block ( void )
nethercote8b76fe52004-11-08 19:20:09 +00002302{
2303 UInt i, sz_new;
2304 CGenBlock* cgbs_new;
2305
njn695c16e2005-03-27 03:40:28 +00002306 cgb_allocs++;
nethercote8b76fe52004-11-08 19:20:09 +00002307
njn695c16e2005-03-27 03:40:28 +00002308 for (i = 0; i < cgb_used; i++) {
2309 cgb_search++;
2310 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00002311 return i;
2312 }
2313
2314 /* Not found. Try to allocate one at the end. */
njn695c16e2005-03-27 03:40:28 +00002315 if (cgb_used < cgb_size) {
2316 cgb_used++;
2317 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00002318 }
2319
2320 /* Ok, we have to allocate a new one. */
njn695c16e2005-03-27 03:40:28 +00002321 tl_assert(cgb_used == cgb_size);
2322 sz_new = (cgbs == NULL) ? 10 : (2 * cgb_size);
nethercote8b76fe52004-11-08 19:20:09 +00002323
2324 cgbs_new = VG_(malloc)( sz_new * sizeof(CGenBlock) );
njn695c16e2005-03-27 03:40:28 +00002325 for (i = 0; i < cgb_used; i++)
2326 cgbs_new[i] = cgbs[i];
nethercote8b76fe52004-11-08 19:20:09 +00002327
njn695c16e2005-03-27 03:40:28 +00002328 if (cgbs != NULL)
2329 VG_(free)( cgbs );
2330 cgbs = cgbs_new;
nethercote8b76fe52004-11-08 19:20:09 +00002331
njn695c16e2005-03-27 03:40:28 +00002332 cgb_size = sz_new;
2333 cgb_used++;
2334 if (cgb_used > cgb_used_MAX)
2335 cgb_used_MAX = cgb_used;
2336 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00002337}
2338
2339
2340static void show_client_block_stats ( void )
2341{
2342 VG_(message)(Vg_DebugMsg,
2343 "general CBs: %d allocs, %d discards, %d maxinuse, %d search",
njn695c16e2005-03-27 03:40:28 +00002344 cgb_allocs, cgb_discards, cgb_used_MAX, cgb_search
nethercote8b76fe52004-11-08 19:20:09 +00002345 );
2346}
2347
nethercote8b76fe52004-11-08 19:20:09 +00002348static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai )
2349{
2350 UInt i;
2351 /* VG_(printf)("try to identify %d\n", a); */
2352
2353 /* Perhaps it's a general block ? */
njn695c16e2005-03-27 03:40:28 +00002354 for (i = 0; i < cgb_used; i++) {
2355 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00002356 continue;
njn717cde52005-05-10 02:47:21 +00002357 // Use zero as the redzone for client blocks.
2358 if (VG_(addr_is_in_block)(a, cgbs[i].start, cgbs[i].size, 0)) {
nethercote8b76fe52004-11-08 19:20:09 +00002359 /* OK - maybe it's a mempool, too? */
njn12627272005-08-14 18:32:16 +00002360 MAC_Mempool* mp = VG_(HT_lookup)(MAC_(mempool_list),
2361 (UWord)cgbs[i].start);
njn1d0cb0d2005-08-15 01:52:02 +00002362 if (mp != NULL) {
2363 if (mp->chunks != NULL) {
2364 MAC_Chunk* mc;
njnf66dbfc2005-08-15 01:54:05 +00002365 VG_(HT_ResetIter)(mp->chunks);
2366 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
njn1d0cb0d2005-08-15 01:52:02 +00002367 if (VG_(addr_is_in_block)(a, mc->data, mc->size,
2368 MAC_MALLOC_REDZONE_SZB)) {
2369 ai->akind = UserG;
2370 ai->blksize = mc->size;
2371 ai->rwoffset = (Int)(a) - (Int)mc->data;
2372 ai->lastchange = mc->where;
2373 return True;
2374 }
nethercote8b76fe52004-11-08 19:20:09 +00002375 }
2376 }
njn1d0cb0d2005-08-15 01:52:02 +00002377 ai->akind = Mempool;
2378 ai->blksize = cgbs[i].size;
2379 ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
njn695c16e2005-03-27 03:40:28 +00002380 ai->lastchange = cgbs[i].where;
nethercote8b76fe52004-11-08 19:20:09 +00002381 return True;
2382 }
njn1d0cb0d2005-08-15 01:52:02 +00002383 ai->akind = UserG;
2384 ai->blksize = cgbs[i].size;
2385 ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
njn695c16e2005-03-27 03:40:28 +00002386 ai->lastchange = cgbs[i].where;
njn1d0cb0d2005-08-15 01:52:02 +00002387 ai->desc = cgbs[i].desc;
nethercote8b76fe52004-11-08 19:20:09 +00002388 return True;
2389 }
2390 }
2391 return False;
2392}
2393
njn51d827b2005-05-09 01:02:08 +00002394static Bool mc_handle_client_request ( ThreadId tid, UWord* arg, UWord* ret )
nethercote8b76fe52004-11-08 19:20:09 +00002395{
2396 Int i;
2397 Bool ok;
2398 Addr bad_addr;
2399
njnfc26ff92004-11-22 19:12:49 +00002400 if (!VG_IS_TOOL_USERREQ('M','C',arg[0])
nethercote8b76fe52004-11-08 19:20:09 +00002401 && VG_USERREQ__MALLOCLIKE_BLOCK != arg[0]
2402 && VG_USERREQ__FREELIKE_BLOCK != arg[0]
2403 && VG_USERREQ__CREATE_MEMPOOL != arg[0]
2404 && VG_USERREQ__DESTROY_MEMPOOL != arg[0]
2405 && VG_USERREQ__MEMPOOL_ALLOC != arg[0]
2406 && VG_USERREQ__MEMPOOL_FREE != arg[0])
2407 return False;
2408
2409 switch (arg[0]) {
2410 case VG_USERREQ__CHECK_WRITABLE: /* check writable */
2411 ok = mc_check_writable ( arg[1], arg[2], &bad_addr );
2412 if (!ok)
njn9e63cb62005-05-08 18:34:59 +00002413 mc_record_user_error ( tid, bad_addr, /*isWrite*/True,
2414 /*isUnaddr*/True );
nethercote8b76fe52004-11-08 19:20:09 +00002415 *ret = ok ? (UWord)NULL : bad_addr;
sewardj8cf88b72005-07-08 01:29:33 +00002416 break;
nethercote8b76fe52004-11-08 19:20:09 +00002417
2418 case VG_USERREQ__CHECK_READABLE: { /* check readable */
2419 MC_ReadResult res;
2420 res = mc_check_readable ( arg[1], arg[2], &bad_addr );
2421 if (MC_AddrErr == res)
njn9e63cb62005-05-08 18:34:59 +00002422 mc_record_user_error ( tid, bad_addr, /*isWrite*/False,
2423 /*isUnaddr*/True );
nethercote8b76fe52004-11-08 19:20:09 +00002424 else if (MC_ValueErr == res)
njn9e63cb62005-05-08 18:34:59 +00002425 mc_record_user_error ( tid, bad_addr, /*isWrite*/False,
2426 /*isUnaddr*/False );
nethercote8b76fe52004-11-08 19:20:09 +00002427 *ret = ( res==MC_Ok ? (UWord)NULL : bad_addr );
sewardj8cf88b72005-07-08 01:29:33 +00002428 break;
nethercote8b76fe52004-11-08 19:20:09 +00002429 }
2430
2431 case VG_USERREQ__DO_LEAK_CHECK:
njnb8dca862005-03-14 02:42:44 +00002432 mc_detect_memory_leaks(tid, arg[1] ? LC_Summary : LC_Full);
sewardj8cf88b72005-07-08 01:29:33 +00002433 *ret = 0; /* return value is meaningless */
2434 break;
nethercote8b76fe52004-11-08 19:20:09 +00002435
2436 case VG_USERREQ__MAKE_NOACCESS: /* make no access */
nethercote8b76fe52004-11-08 19:20:09 +00002437 mc_make_noaccess ( arg[1], arg[2] );
sewardj8cf88b72005-07-08 01:29:33 +00002438 *ret = -1;
2439 break;
nethercote8b76fe52004-11-08 19:20:09 +00002440
2441 case VG_USERREQ__MAKE_WRITABLE: /* make writable */
nethercote8b76fe52004-11-08 19:20:09 +00002442 mc_make_writable ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00002443 *ret = -1;
sewardj8cf88b72005-07-08 01:29:33 +00002444 break;
nethercote8b76fe52004-11-08 19:20:09 +00002445
2446 case VG_USERREQ__MAKE_READABLE: /* make readable */
nethercote8b76fe52004-11-08 19:20:09 +00002447 mc_make_readable ( arg[1], arg[2] );
sewardj8cf88b72005-07-08 01:29:33 +00002448 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00002449 break;
2450
sewardjedc75ab2005-03-15 23:30:32 +00002451 case VG_USERREQ__CREATE_BLOCK: /* describe a block */
sewardj8cf88b72005-07-08 01:29:33 +00002452 if (arg[1] != 0 && arg[2] != 0) {
2453 i = alloc_client_block();
2454 /* VG_(printf)("allocated %d %p\n", i, cgbs); */
2455 cgbs[i].start = arg[1];
2456 cgbs[i].size = arg[2];
2457 cgbs[i].desc = VG_(strdup)((Char *)arg[3]);
2458 cgbs[i].where = VG_(record_ExeContext) ( tid );
sewardjedc75ab2005-03-15 23:30:32 +00002459
sewardj8cf88b72005-07-08 01:29:33 +00002460 *ret = i;
2461 } else
2462 *ret = -1;
2463 break;
sewardjedc75ab2005-03-15 23:30:32 +00002464
nethercote8b76fe52004-11-08 19:20:09 +00002465 case VG_USERREQ__DISCARD: /* discard */
njn695c16e2005-03-27 03:40:28 +00002466 if (cgbs == NULL
2467 || arg[2] >= cgb_used ||
sewardj8cf88b72005-07-08 01:29:33 +00002468 (cgbs[arg[2]].start == 0 && cgbs[arg[2]].size == 0)) {
sewardjedc75ab2005-03-15 23:30:32 +00002469 *ret = 1;
sewardj8cf88b72005-07-08 01:29:33 +00002470 } else {
2471 tl_assert(arg[2] >= 0 && arg[2] < cgb_used);
2472 cgbs[arg[2]].start = cgbs[arg[2]].size = 0;
2473 VG_(free)(cgbs[arg[2]].desc);
2474 cgb_discards++;
2475 *ret = 0;
2476 }
2477 break;
nethercote8b76fe52004-11-08 19:20:09 +00002478
sewardj45d94cc2005-04-20 14:44:11 +00002479//zz case VG_USERREQ__GET_VBITS:
2480//zz /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
2481//zz error. */
2482//zz /* VG_(printf)("get_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
2483//zz *ret = mc_get_or_set_vbits_for_client
2484//zz ( tid, arg[1], arg[2], arg[3], False /* get them */ );
2485//zz break;
2486//zz
2487//zz case VG_USERREQ__SET_VBITS:
2488//zz /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
2489//zz error. */
2490//zz /* VG_(printf)("set_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
2491//zz *ret = mc_get_or_set_vbits_for_client
2492//zz ( tid, arg[1], arg[2], arg[3], True /* set them */ );
2493//zz break;
nethercote8b76fe52004-11-08 19:20:09 +00002494
2495 default:
2496 if (MAC_(handle_common_client_requests)(tid, arg, ret )) {
2497 return True;
2498 } else {
2499 VG_(message)(Vg_UserMsg,
2500 "Warning: unknown memcheck client request code %llx",
2501 (ULong)arg[0]);
2502 return False;
2503 }
2504 }
2505 return True;
2506}
njn25e49d8e72002-09-23 09:36:25 +00002507
2508/*------------------------------------------------------------*/
njn51d827b2005-05-09 01:02:08 +00002509/*--- Setup and finalisation ---*/
njn25e49d8e72002-09-23 09:36:25 +00002510/*------------------------------------------------------------*/
2511
njn51d827b2005-05-09 01:02:08 +00002512static void mc_post_clo_init ( void )
njn5c004e42002-11-18 11:04:50 +00002513{
sewardj71bc3cb2005-05-19 00:25:45 +00002514 /* If we've been asked to emit XML, mash around various other
2515 options so as to constrain the output somewhat. */
2516 if (VG_(clo_xml)) {
2517 /* Extract as much info as possible from the leak checker. */
sewardj09890d82005-05-20 02:45:15 +00002518 /* MAC_(clo_show_reachable) = True; */
sewardj71bc3cb2005-05-19 00:25:45 +00002519 MAC_(clo_leak_check) = LC_Full;
2520 }
njn5c004e42002-11-18 11:04:50 +00002521}
2522
njn51d827b2005-05-09 01:02:08 +00002523static void mc_fini ( Int exitcode )
njn5c004e42002-11-18 11:04:50 +00002524{
sewardj23eb2fd2005-04-22 16:29:19 +00002525 Int i, n_accessible_dist;
2526 SecMap* sm;
2527
sewardjae986ca2005-10-12 12:53:20 +00002528 MAC_(common_fini)( mc_detect_memory_leaks );
2529
sewardj45d94cc2005-04-20 14:44:11 +00002530 if (VG_(clo_verbosity) > 1) {
2531 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00002532 " memcheck: sanity checks: %d cheap, %d expensive",
2533 n_sanity_cheap, n_sanity_expensive );
sewardj45d94cc2005-04-20 14:44:11 +00002534 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00002535 " memcheck: auxmaps: %d auxmap entries (%dk, %dM) in use",
2536 auxmap_used,
2537 auxmap_used * 64,
2538 auxmap_used / 16 );
2539 VG_(message)(Vg_DebugMsg,
2540 " memcheck: auxmaps: %lld searches, %lld comparisons",
sewardj45d94cc2005-04-20 14:44:11 +00002541 n_auxmap_searches, n_auxmap_cmps );
sewardj23eb2fd2005-04-22 16:29:19 +00002542 VG_(message)(Vg_DebugMsg,
2543 " memcheck: secondaries: %d issued (%dk, %dM)",
2544 n_secmaps_issued,
2545 n_secmaps_issued * 64,
2546 n_secmaps_issued / 16 );
2547
2548 n_accessible_dist = 0;
2549 for (i = 0; i < N_PRIMARY_MAP; i++) {
2550 sm = primary_map[i];
2551 if (is_distinguished_sm(sm)
2552 && sm != &sm_distinguished[SM_DIST_NOACCESS])
2553 n_accessible_dist ++;
2554 }
2555 for (i = 0; i < auxmap_used; i++) {
2556 sm = auxmap[i].sm;
2557 if (is_distinguished_sm(sm)
2558 && sm != &sm_distinguished[SM_DIST_NOACCESS])
2559 n_accessible_dist ++;
2560 }
2561
2562 VG_(message)(Vg_DebugMsg,
2563 " memcheck: secondaries: %d accessible and distinguished (%dk, %dM)",
2564 n_accessible_dist,
2565 n_accessible_dist * 64,
2566 n_accessible_dist / 16 );
2567
sewardj45d94cc2005-04-20 14:44:11 +00002568 }
2569
njn5c004e42002-11-18 11:04:50 +00002570 if (0) {
2571 VG_(message)(Vg_DebugMsg,
2572 "------ Valgrind's client block stats follow ---------------" );
nethercote8b76fe52004-11-08 19:20:09 +00002573 show_client_block_stats();
njn5c004e42002-11-18 11:04:50 +00002574 }
njn25e49d8e72002-09-23 09:36:25 +00002575}
2576
njn51d827b2005-05-09 01:02:08 +00002577static void mc_pre_clo_init(void)
2578{
2579 VG_(details_name) ("Memcheck");
2580 VG_(details_version) (NULL);
2581 VG_(details_description) ("a memory error detector");
2582 VG_(details_copyright_author)(
2583 "Copyright (C) 2002-2005, and GNU GPL'd, by Julian Seward et al.");
2584 VG_(details_bug_reports_to) (VG_BUGS_TO);
2585 VG_(details_avg_translation_sizeB) ( 370 );
2586
2587 VG_(basic_tool_funcs) (mc_post_clo_init,
2588 MC_(instrument),
2589 mc_fini);
2590
2591 VG_(needs_core_errors) ();
2592 VG_(needs_tool_errors) (MAC_(eq_Error),
2593 mc_pp_Error,
2594 MAC_(update_extra),
2595 mc_recognised_suppression,
2596 MAC_(read_extra_suppression_info),
2597 MAC_(error_matches_suppression),
2598 MAC_(get_error_name),
2599 MAC_(print_extra_suppression_info));
2600 VG_(needs_libc_freeres) ();
2601 VG_(needs_command_line_options)(mc_process_cmd_line_option,
2602 mc_print_usage,
2603 mc_print_debug_usage);
2604 VG_(needs_client_requests) (mc_handle_client_request);
2605 VG_(needs_sanity_checks) (mc_cheap_sanity_check,
2606 mc_expensive_sanity_check);
njn51d827b2005-05-09 01:02:08 +00002607
njnfc51f8d2005-06-21 03:20:17 +00002608 VG_(needs_malloc_replacement) (MAC_(malloc),
njn51d827b2005-05-09 01:02:08 +00002609 MAC_(__builtin_new),
2610 MAC_(__builtin_vec_new),
2611 MAC_(memalign),
2612 MAC_(calloc),
2613 MAC_(free),
2614 MAC_(__builtin_delete),
2615 MAC_(__builtin_vec_delete),
2616 MAC_(realloc),
2617 MAC_MALLOC_REDZONE_SZB );
2618
2619 MAC_( new_mem_heap) = & mc_new_mem_heap;
2620 MAC_( ban_mem_heap) = & mc_make_noaccess;
2621 MAC_(copy_mem_heap) = & mc_copy_address_range_state;
2622 MAC_( die_mem_heap) = & mc_make_noaccess;
2623 MAC_(check_noaccess) = & mc_check_noaccess;
2624
2625 VG_(track_new_mem_startup) ( & mc_new_mem_startup );
2626 VG_(track_new_mem_stack_signal)( & mc_make_writable );
2627 VG_(track_new_mem_brk) ( & mc_make_writable );
2628 VG_(track_new_mem_mmap) ( & mc_new_mem_mmap );
2629
2630 VG_(track_copy_mem_remap) ( & mc_copy_address_range_state );
njn81623712005-10-07 04:48:37 +00002631
2632 // Nb: we don't do anything with mprotect. This means that V bits are
2633 // preserved if a program, for example, marks some memory as inaccessible
2634 // and then later marks it as accessible again.
2635 //
2636 // If an access violation occurs (eg. writing to read-only memory) we let
2637 // it fault and print an informative termination message. This doesn't
2638 // happen if the program catches the signal, though, which is bad. If we
2639 // had two A bits (for readability and writability) that were completely
2640 // distinct from V bits, then we could handle all this properly.
2641 VG_(track_change_mem_mprotect) ( NULL );
njn51d827b2005-05-09 01:02:08 +00002642
2643 VG_(track_die_mem_stack_signal)( & mc_make_noaccess );
2644 VG_(track_die_mem_brk) ( & mc_make_noaccess );
2645 VG_(track_die_mem_munmap) ( & mc_make_noaccess );
2646
2647 VG_(track_new_mem_stack_4) ( & MAC_(new_mem_stack_4) );
2648 VG_(track_new_mem_stack_8) ( & MAC_(new_mem_stack_8) );
2649 VG_(track_new_mem_stack_12) ( & MAC_(new_mem_stack_12) );
2650 VG_(track_new_mem_stack_16) ( & MAC_(new_mem_stack_16) );
2651 VG_(track_new_mem_stack_32) ( & MAC_(new_mem_stack_32) );
2652 VG_(track_new_mem_stack) ( & MAC_(new_mem_stack) );
2653
2654 VG_(track_die_mem_stack_4) ( & MAC_(die_mem_stack_4) );
2655 VG_(track_die_mem_stack_8) ( & MAC_(die_mem_stack_8) );
2656 VG_(track_die_mem_stack_12) ( & MAC_(die_mem_stack_12) );
2657 VG_(track_die_mem_stack_16) ( & MAC_(die_mem_stack_16) );
2658 VG_(track_die_mem_stack_32) ( & MAC_(die_mem_stack_32) );
2659 VG_(track_die_mem_stack) ( & MAC_(die_mem_stack) );
2660
2661 VG_(track_ban_mem_stack) ( & mc_make_noaccess );
2662
2663 VG_(track_pre_mem_read) ( & mc_check_is_readable );
2664 VG_(track_pre_mem_read_asciiz) ( & mc_check_is_readable_asciiz );
2665 VG_(track_pre_mem_write) ( & mc_check_is_writable );
2666 VG_(track_post_mem_write) ( & mc_post_mem_write );
2667
2668 VG_(track_pre_reg_read) ( & mc_pre_reg_read );
2669
2670 VG_(track_post_reg_write) ( & mc_post_reg_write );
2671 VG_(track_post_reg_write_clientcall_return)( & mc_post_reg_write_clientcall );
2672
njn51d827b2005-05-09 01:02:08 +00002673 /* Additional block description for VG_(describe_addr)() */
2674 MAC_(describe_addr_supp) = client_perm_maybe_describe;
2675
2676 init_shadow_memory();
2677 MAC_(common_pre_clo_init)();
2678
2679 tl_assert( mc_expensive_sanity_check() );
2680}
2681
sewardj45f4e7c2005-09-27 19:20:21 +00002682VG_DETERMINE_INTERFACE_VERSION(mc_pre_clo_init)
fitzhardinge98abfc72003-12-16 02:05:15 +00002683
njn25e49d8e72002-09-23 09:36:25 +00002684/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00002685/*--- end mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00002686/*--------------------------------------------------------------------*/