blob: dd129a3e2895619921910a04aaaaa50066c32e02 [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
nethercote137bc552003-11-14 17:47:54 +00009 This file is part of MemCheck, a heavyweight Valgrind tool for
njnc9539842002-10-02 13:26:35 +000010 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
sewardje4b0bf02006-06-05 23:21:15 +000012 Copyright (C) 2000-2006 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
njnc7561b92005-06-19 01:24:32 +000033#include "pub_tool_basics.h"
njn4802b382005-06-11 04:58:29 +000034#include "pub_tool_aspacemgr.h"
njn1d0825f2006-03-27 11:37:07 +000035#include "pub_tool_hashtable.h" // For mc_include.h
njn97405b22005-06-02 03:39:33 +000036#include "pub_tool_libcbase.h"
njn132bfcc2005-06-04 19:16:06 +000037#include "pub_tool_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000038#include "pub_tool_libcprint.h"
njnf536bbb2005-06-13 04:21:38 +000039#include "pub_tool_machine.h"
njnc7561b92005-06-19 01:24:32 +000040#include "pub_tool_mallocfree.h"
41#include "pub_tool_options.h"
njn1d0825f2006-03-27 11:37:07 +000042#include "pub_tool_oset.h"
njnc7561b92005-06-19 01:24:32 +000043#include "pub_tool_replacemalloc.h"
44#include "pub_tool_tooliface.h"
45#include "pub_tool_threadstate.h"
sewardj05a46732006-10-17 01:28:10 +000046#include "pub_tool_oset.h"
njnc7561b92005-06-19 01:24:32 +000047
48#include "mc_include.h"
49#include "memcheck.h" /* for client requests */
njn25e49d8e72002-09-23 09:36:25 +000050
tomd55121e2005-12-19 12:40:13 +000051#ifdef HAVE_BUILTIN_EXPECT
sewardjc1a2cda2005-04-21 17:34:00 +000052#define EXPECTED_TAKEN(cond) __builtin_expect((cond),1)
53#define EXPECTED_NOT_TAKEN(cond) __builtin_expect((cond),0)
tomd55121e2005-12-19 12:40:13 +000054#else
55#define EXPECTED_TAKEN(cond) (cond)
56#define EXPECTED_NOT_TAKEN(cond) (cond)
57#endif
sewardjc1a2cda2005-04-21 17:34:00 +000058
njn1d0825f2006-03-27 11:37:07 +000059/* Set to 1 to do a little more sanity checking */
sewardj23eb2fd2005-04-22 16:29:19 +000060#define VG_DEBUG_MEMORY 0
sewardjc1a2cda2005-04-21 17:34:00 +000061
njn25e49d8e72002-09-23 09:36:25 +000062#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
63
njn25e49d8e72002-09-23 09:36:25 +000064
njn25e49d8e72002-09-23 09:36:25 +000065/*------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +000066/*--- Fast-case knobs ---*/
67/*------------------------------------------------------------*/
68
69// Comment these out to disable the fast cases (don't just set them to zero).
70
71#define PERF_FAST_LOADV 1
72#define PERF_FAST_STOREV 1
73
74#define PERF_FAST_SARP 1
75
76#define PERF_FAST_STACK 1
77#define PERF_FAST_STACK2 1
78
79/*------------------------------------------------------------*/
80/*--- V bits and A bits ---*/
81/*------------------------------------------------------------*/
82
83/* Conceptually, every byte value has 8 V bits, which track whether Memcheck
84 thinks the corresponding value bit is defined. And every memory byte
85 has an A bit, which tracks whether Memcheck thinks the program can access
86 it safely. So every N-bit register is shadowed with N V bits, and every
87 memory byte is shadowed with 8 V bits and one A bit.
88
89 In the implementation, we use two forms of compression (compressed V bits
90 and distinguished secondary maps) to avoid the 9-bit-per-byte overhead
91 for memory.
92
93 Memcheck also tracks extra information about each heap block that is
94 allocated, for detecting memory leaks and other purposes.
95*/
96
97/*------------------------------------------------------------*/
sewardj45d94cc2005-04-20 14:44:11 +000098/*--- Basic A/V bitmap representation. ---*/
njn25e49d8e72002-09-23 09:36:25 +000099/*------------------------------------------------------------*/
100
njn1d0825f2006-03-27 11:37:07 +0000101/* All reads and writes are checked against a memory map (a.k.a. shadow
102 memory), which records the state of all memory in the process.
103
104 On 32-bit machines the memory map is organised as follows.
105 The top 16 bits of an address are used to index into a top-level
106 map table, containing 65536 entries. Each entry is a pointer to a
107 second-level map, which records the accesibililty and validity
108 permissions for the 65536 bytes indexed by the lower 16 bits of the
109 address. Each byte is represented by two bits (details are below). So
110 each second-level map contains 16384 bytes. This two-level arrangement
111 conveniently divides the 4G address space into 64k lumps, each size 64k
112 bytes.
113
114 All entries in the primary (top-level) map must point to a valid
115 secondary (second-level) map. Since many of the 64kB chunks will
njndbf7ca72006-03-31 11:57:59 +0000116 have the same status for every bit -- ie. noaccess (for unused
117 address space) or entirely addressable and defined (for code segments) --
118 there are three distinguished secondary maps, which indicate 'noaccess',
119 'undefined' and 'defined'. For these uniform 64kB chunks, the primary
120 map entry points to the relevant distinguished map. In practice,
121 typically more than half of the addressable memory is represented with
122 the 'undefined' or 'defined' distinguished secondary map, so it gives a
123 good saving. It also lets us set the V+A bits of large address regions
124 quickly in set_address_range_perms().
njn1d0825f2006-03-27 11:37:07 +0000125
126 On 64-bit machines it's more complicated. If we followed the same basic
127 scheme we'd have a four-level table which would require too many memory
128 accesses. So instead the top-level map table has 2^19 entries (indexed
129 using bits 16..34 of the address); this covers the bottom 32GB. Any
130 accesses above 32GB are handled with a slow, sparse auxiliary table.
131 Valgrind's address space manager tries very hard to keep things below
132 this 32GB barrier so that performance doesn't suffer too much.
133
134 Note that this file has a lot of different functions for reading and
135 writing shadow memory. Only a couple are strictly necessary (eg.
136 get_vabits2 and set_vabits2), most are just specialised for specific
137 common cases to improve performance.
138
139 Aside: the V+A bits are less precise than they could be -- we have no way
140 of marking memory as read-only. It would be great if we could add an
141 extra state VA_BITSn_READONLY. But then we'd have 5 different states,
142 which requires 2.3 bits to hold, and there's no way to do that elegantly
143 -- we'd have to double up to 4 bits of metadata per byte, which doesn't
144 seem worth it.
145*/
sewardjc859fbf2005-04-22 21:10:28 +0000146
sewardj45d94cc2005-04-20 14:44:11 +0000147/* --------------- Basic configuration --------------- */
sewardj95448072004-11-22 20:19:51 +0000148
sewardj23eb2fd2005-04-22 16:29:19 +0000149/* Only change this. N_PRIMARY_MAP *must* be a power of 2. */
sewardj21f7ff42005-04-28 10:32:02 +0000150
sewardje4ccc012005-05-02 12:53:38 +0000151#if VG_WORDSIZE == 4
sewardj21f7ff42005-04-28 10:32:02 +0000152
153/* cover the entire address space */
154# define N_PRIMARY_BITS 16
155
156#else
157
sewardj34483bc2005-09-28 11:50:20 +0000158/* Just handle the first 32G fast and the rest via auxiliary
sewardj21f7ff42005-04-28 10:32:02 +0000159 primaries. */
sewardj34483bc2005-09-28 11:50:20 +0000160# define N_PRIMARY_BITS 19
sewardj21f7ff42005-04-28 10:32:02 +0000161
162#endif
163
sewardj45d94cc2005-04-20 14:44:11 +0000164
sewardjc1a2cda2005-04-21 17:34:00 +0000165/* Do not change this. */
sewardje4ccc012005-05-02 12:53:38 +0000166#define N_PRIMARY_MAP ( ((UWord)1) << N_PRIMARY_BITS)
sewardjc1a2cda2005-04-21 17:34:00 +0000167
168/* Do not change this. */
sewardj23eb2fd2005-04-22 16:29:19 +0000169#define MAX_PRIMARY_ADDRESS (Addr)((((Addr)65536) * N_PRIMARY_MAP)-1)
170
171
sewardj45d94cc2005-04-20 14:44:11 +0000172/* --------------- Secondary maps --------------- */
njn25e49d8e72002-09-23 09:36:25 +0000173
njn1d0825f2006-03-27 11:37:07 +0000174// Each byte of memory conceptually has an A bit, which indicates its
175// addressability, and 8 V bits, which indicates its definedness.
176//
177// But because very few bytes are partially defined, we can use a nice
178// compression scheme to reduce the size of shadow memory. Each byte of
179// memory has 2 bits which indicates its state (ie. V+A bits):
180//
njndbf7ca72006-03-31 11:57:59 +0000181// 00: noaccess (unaddressable but treated as fully defined)
182// 01: undefined (addressable and fully undefined)
183// 10: defined (addressable and fully defined)
184// 11: partdefined (addressable and partially defined)
njn1d0825f2006-03-27 11:37:07 +0000185//
njndbf7ca72006-03-31 11:57:59 +0000186// In the "partdefined" case, we use a secondary table to store the V bits.
187// Each entry in the secondary-V-bits table maps a byte address to its 8 V
188// bits.
njn1d0825f2006-03-27 11:37:07 +0000189//
190// We store the compressed V+A bits in 8-bit chunks, ie. the V+A bits for
191// four bytes (32 bits) of memory are in each chunk. Hence the name
192// "vabits8". This lets us get the V+A bits for four bytes at a time
193// easily (without having to do any shifting and/or masking), and that is a
194// very common operation. (Note that although each vabits8 chunk
195// is 8 bits in size, it represents 32 bits of memory.)
196//
197// The representation is "inverse" little-endian... each 4 bytes of
198// memory is represented by a 1 byte value, where:
199//
200// - the status of byte (a+0) is held in bits [1..0]
201// - the status of byte (a+1) is held in bits [3..2]
202// - the status of byte (a+2) is held in bits [5..4]
203// - the status of byte (a+3) is held in bits [7..6]
204//
205// It's "inverse" because endianness normally describes a mapping from
206// value bits to memory addresses; in this case the mapping is inverted.
207// Ie. instead of particular value bits being held in certain addresses, in
208// this case certain addresses are represented by particular value bits.
209// See insert_vabits2_into_vabits8() for an example.
210//
211// But note that we don't compress the V bits stored in registers; they
212// need to be explicit to made the shadow operations possible. Therefore
213// when moving values between registers and memory we need to convert
214// between the expanded in-register format and the compressed in-memory
215// format. This isn't so difficult, it just requires careful attention in a
216// few places.
217
218// These represent eight bits of memory.
219#define VA_BITS2_NOACCESS 0x0 // 00b
njndbf7ca72006-03-31 11:57:59 +0000220#define VA_BITS2_UNDEFINED 0x1 // 01b
221#define VA_BITS2_DEFINED 0x2 // 10b
222#define VA_BITS2_PARTDEFINED 0x3 // 11b
njn1d0825f2006-03-27 11:37:07 +0000223
224// These represent 16 bits of memory.
225#define VA_BITS4_NOACCESS 0x0 // 00_00b
njndbf7ca72006-03-31 11:57:59 +0000226#define VA_BITS4_UNDEFINED 0x5 // 01_01b
227#define VA_BITS4_DEFINED 0xa // 10_10b
njn1d0825f2006-03-27 11:37:07 +0000228
229// These represent 32 bits of memory.
230#define VA_BITS8_NOACCESS 0x00 // 00_00_00_00b
njndbf7ca72006-03-31 11:57:59 +0000231#define VA_BITS8_UNDEFINED 0x55 // 01_01_01_01b
232#define VA_BITS8_DEFINED 0xaa // 10_10_10_10b
njn1d0825f2006-03-27 11:37:07 +0000233
234// These represent 64 bits of memory.
235#define VA_BITS16_NOACCESS 0x0000 // 00_00_00_00b x 2
njndbf7ca72006-03-31 11:57:59 +0000236#define VA_BITS16_UNDEFINED 0x5555 // 01_01_01_01b x 2
237#define VA_BITS16_DEFINED 0xaaaa // 10_10_10_10b x 2
njn1d0825f2006-03-27 11:37:07 +0000238
239
240#define SM_CHUNKS 16384
241#define SM_OFF(aaa) (((aaa) & 0xffff) >> 2)
242#define SM_OFF_16(aaa) (((aaa) & 0xffff) >> 3)
243
244// Paranoia: it's critical for performance that the requested inlining
245// occurs. So try extra hard.
246#define INLINE inline __attribute__((always_inline))
247
248static INLINE Addr start_of_this_sm ( Addr a ) {
249 return (a & (~SM_MASK));
250}
251static INLINE Bool is_start_of_sm ( Addr a ) {
252 return (start_of_this_sm(a) == a);
253}
254
njn25e49d8e72002-09-23 09:36:25 +0000255typedef
256 struct {
njn1d0825f2006-03-27 11:37:07 +0000257 UChar vabits8[SM_CHUNKS];
njn25e49d8e72002-09-23 09:36:25 +0000258 }
259 SecMap;
260
njn1d0825f2006-03-27 11:37:07 +0000261// 3 distinguished secondary maps, one for no-access, one for
262// accessible but undefined, and one for accessible and defined.
263// Distinguished secondaries may never be modified.
264#define SM_DIST_NOACCESS 0
njndbf7ca72006-03-31 11:57:59 +0000265#define SM_DIST_UNDEFINED 1
266#define SM_DIST_DEFINED 2
njnb8dca862005-03-14 02:42:44 +0000267
sewardj45d94cc2005-04-20 14:44:11 +0000268static SecMap sm_distinguished[3];
njnb8dca862005-03-14 02:42:44 +0000269
njn1d0825f2006-03-27 11:37:07 +0000270static INLINE Bool is_distinguished_sm ( SecMap* sm ) {
sewardj45d94cc2005-04-20 14:44:11 +0000271 return sm >= &sm_distinguished[0] && sm <= &sm_distinguished[2];
272}
njnb8dca862005-03-14 02:42:44 +0000273
njn1d0825f2006-03-27 11:37:07 +0000274// Forward declaration
275static void update_SM_counts(SecMap* oldSM, SecMap* newSM);
276
sewardj45d94cc2005-04-20 14:44:11 +0000277/* dist_sm points to one of our three distinguished secondaries. Make
278 a copy of it so that we can write to it.
279*/
280static SecMap* copy_for_writing ( SecMap* dist_sm )
281{
282 SecMap* new_sm;
283 tl_assert(dist_sm == &sm_distinguished[0]
njn1d0825f2006-03-27 11:37:07 +0000284 || dist_sm == &sm_distinguished[1]
285 || dist_sm == &sm_distinguished[2]);
njnb8dca862005-03-14 02:42:44 +0000286
sewardj45f4e7c2005-09-27 19:20:21 +0000287 new_sm = VG_(am_shadow_alloc)(sizeof(SecMap));
288 if (new_sm == NULL)
289 VG_(out_of_memory_NORETURN)( "memcheck:allocate new SecMap",
290 sizeof(SecMap) );
sewardj45d94cc2005-04-20 14:44:11 +0000291 VG_(memcpy)(new_sm, dist_sm, sizeof(SecMap));
njn1d0825f2006-03-27 11:37:07 +0000292 update_SM_counts(dist_sm, new_sm);
sewardj45d94cc2005-04-20 14:44:11 +0000293 return new_sm;
294}
njnb8dca862005-03-14 02:42:44 +0000295
njn1d0825f2006-03-27 11:37:07 +0000296/* --------------- Stats --------------- */
297
njndbf7ca72006-03-31 11:57:59 +0000298static Int n_issued_SMs = 0;
299static Int n_deissued_SMs = 0;
300static Int n_noaccess_SMs = N_PRIMARY_MAP; // start with many noaccess DSMs
301static Int n_undefined_SMs = 0;
302static Int n_defined_SMs = 0;
303static Int n_non_DSM_SMs = 0;
304static Int max_noaccess_SMs = 0;
305static Int max_undefined_SMs = 0;
306static Int max_defined_SMs = 0;
307static Int max_non_DSM_SMs = 0;
njn1d0825f2006-03-27 11:37:07 +0000308
sewardj05a46732006-10-17 01:28:10 +0000309/* # searches initiated in auxmap_L1, and # base cmps required */
310static ULong n_auxmap_L1_searches = 0;
311static ULong n_auxmap_L1_cmps = 0;
312/* # of searches that missed in auxmap_L1 and therefore had to
313 be handed to auxmap_L2. And the number of nodes inserted. */
314static ULong n_auxmap_L2_searches = 0;
315static ULong n_auxmap_L2_nodes = 0;
316
njn1d0825f2006-03-27 11:37:07 +0000317static Int n_sanity_cheap = 0;
318static Int n_sanity_expensive = 0;
319
320static Int n_secVBit_nodes = 0;
321static Int max_secVBit_nodes = 0;
322
323static void update_SM_counts(SecMap* oldSM, SecMap* newSM)
324{
njndbf7ca72006-03-31 11:57:59 +0000325 if (oldSM == &sm_distinguished[SM_DIST_NOACCESS ]) n_noaccess_SMs --;
326 else if (oldSM == &sm_distinguished[SM_DIST_UNDEFINED]) n_undefined_SMs--;
327 else if (oldSM == &sm_distinguished[SM_DIST_DEFINED ]) n_defined_SMs --;
328 else { n_non_DSM_SMs --;
329 n_deissued_SMs ++; }
njn1d0825f2006-03-27 11:37:07 +0000330
njndbf7ca72006-03-31 11:57:59 +0000331 if (newSM == &sm_distinguished[SM_DIST_NOACCESS ]) n_noaccess_SMs ++;
332 else if (newSM == &sm_distinguished[SM_DIST_UNDEFINED]) n_undefined_SMs++;
333 else if (newSM == &sm_distinguished[SM_DIST_DEFINED ]) n_defined_SMs ++;
334 else { n_non_DSM_SMs ++;
335 n_issued_SMs ++; }
njn1d0825f2006-03-27 11:37:07 +0000336
njndbf7ca72006-03-31 11:57:59 +0000337 if (n_noaccess_SMs > max_noaccess_SMs ) max_noaccess_SMs = n_noaccess_SMs;
338 if (n_undefined_SMs > max_undefined_SMs) max_undefined_SMs = n_undefined_SMs;
339 if (n_defined_SMs > max_defined_SMs ) max_defined_SMs = n_defined_SMs;
340 if (n_non_DSM_SMs > max_non_DSM_SMs ) max_non_DSM_SMs = n_non_DSM_SMs;
njn1d0825f2006-03-27 11:37:07 +0000341}
sewardj45d94cc2005-04-20 14:44:11 +0000342
343/* --------------- Primary maps --------------- */
344
345/* The main primary map. This covers some initial part of the address
sewardj23eb2fd2005-04-22 16:29:19 +0000346 space, addresses 0 .. (N_PRIMARY_MAP << 16)-1. The rest of it is
sewardj45d94cc2005-04-20 14:44:11 +0000347 handled using the auxiliary primary map.
348*/
sewardj23eb2fd2005-04-22 16:29:19 +0000349static SecMap* primary_map[N_PRIMARY_MAP];
sewardj45d94cc2005-04-20 14:44:11 +0000350
351
352/* An entry in the auxiliary primary map. base must be a 64k-aligned
353 value, and sm points at the relevant secondary map. As with the
354 main primary map, the secondary may be either a real secondary, or
sewardj05a46732006-10-17 01:28:10 +0000355 one of the three distinguished secondaries. DO NOT CHANGE THIS
356 LAYOUT: the first word has to be the key for OSet fast lookups.
sewardj45d94cc2005-04-20 14:44:11 +0000357*/
358typedef
359 struct {
sewardj23eb2fd2005-04-22 16:29:19 +0000360 Addr base;
sewardj45d94cc2005-04-20 14:44:11 +0000361 SecMap* sm;
362 }
363 AuxMapEnt;
364
sewardj05a46732006-10-17 01:28:10 +0000365/* Tunable parameter: How big is the L1 queue? */
366#define N_AUXMAP_L1 24
sewardj45d94cc2005-04-20 14:44:11 +0000367
sewardj05a46732006-10-17 01:28:10 +0000368/* Tunable parameter: How far along the L1 queue to insert
369 entries resulting from L2 lookups? */
370#define AUXMAP_L1_INSERT_IX 12
sewardj45d94cc2005-04-20 14:44:11 +0000371
sewardj05a46732006-10-17 01:28:10 +0000372static struct {
373 Addr base;
374 AuxMapEnt* ent; // pointer to the matching auxmap_L2 node
375 }
376 auxmap_L1[N_AUXMAP_L1];
377
378static OSet* auxmap_L2 = NULL;
379
380static void init_auxmap_L1_L2 ( void )
sewardj45d94cc2005-04-20 14:44:11 +0000381{
sewardj05a46732006-10-17 01:28:10 +0000382 Int i;
383 for (i = 0; i < N_AUXMAP_L1; i++) {
384 auxmap_L1[i].base = 0;
385 auxmap_L1[i].ent = NULL;
sewardj45d94cc2005-04-20 14:44:11 +0000386 }
387
sewardj05a46732006-10-17 01:28:10 +0000388 tl_assert(0 == offsetof(AuxMapEnt,base));
389 tl_assert(sizeof(Addr) == sizeof(void*));
390 auxmap_L2 = VG_(OSet_Create)( /*keyOff*/ offsetof(AuxMapEnt,base),
391 /*fastCmp*/ NULL,
392 VG_(malloc), VG_(free) );
sewardj05fe85e2005-04-27 22:46:36 +0000393}
394
sewardj05a46732006-10-17 01:28:10 +0000395/* Check representation invariants; if OK return NULL; else a
396 descriptive bit of text. Also return the number of
397 non-distinguished secondary maps referred to from the auxiliary
398 primary maps. */
sewardj05fe85e2005-04-27 22:46:36 +0000399
sewardj05a46732006-10-17 01:28:10 +0000400static HChar* check_auxmap_L1_L2_sanity ( Word* n_secmaps_found )
sewardj05fe85e2005-04-27 22:46:36 +0000401{
sewardj05a46732006-10-17 01:28:10 +0000402 Word i, j;
403 /* On a 32-bit platform, the L2 and L1 tables should
404 both remain empty forever.
sewardj05fe85e2005-04-27 22:46:36 +0000405
sewardj05a46732006-10-17 01:28:10 +0000406 On a 64-bit platform:
407 In the L2 table:
408 all .base & 0xFFFF == 0
409 all .base > MAX_PRIMARY_ADDRESS
410 In the L1 table:
411 all .base & 0xFFFF == 0
412 all (.base > MAX_PRIMARY_ADDRESS
413 .base & 0xFFFF == 0
414 and .ent points to an AuxMapEnt with the same .base)
415 or
416 (.base == 0 and .ent == NULL)
417 */
418 *n_secmaps_found = 0;
419 if (sizeof(void*) == 4) {
420 /* 32-bit platform */
421 if (VG_(OSet_Size)(auxmap_L2) != 0)
422 return "32-bit: auxmap_L2 is non-empty";
423 for (i = 0; i < N_AUXMAP_L1; i++)
424 if (auxmap_L1[i].base != 0 || auxmap_L1[i].ent != NULL)
425 return "32-bit: auxmap_L1 is non-empty";
426 } else {
427 /* 64-bit platform */
428 UWord elems_seen = 0;
429 AuxMapEnt *elem, *res;
430 AuxMapEnt key;
431 /* L2 table */
432 VG_(OSet_ResetIter)(auxmap_L2);
433 while ( (elem = VG_(OSet_Next)(auxmap_L2)) ) {
434 elems_seen++;
435 if (0 != (elem->base & (Addr)0xFFFF))
436 return "64-bit: nonzero .base & 0xFFFF in auxmap_L2";
437 if (elem->base <= MAX_PRIMARY_ADDRESS)
438 return "64-bit: .base <= MAX_PRIMARY_ADDRESS in auxmap_L2";
439 if (elem->sm == NULL)
440 return "64-bit: .sm in _L2 is NULL";
441 if (!is_distinguished_sm(elem->sm))
442 (*n_secmaps_found)++;
443 }
444 if (elems_seen != n_auxmap_L2_nodes)
445 return "64-bit: disagreement on number of elems in _L2";
446 /* Check L1-L2 correspondence */
447 for (i = 0; i < N_AUXMAP_L1; i++) {
448 if (auxmap_L1[i].base == 0 && auxmap_L1[i].ent == NULL)
449 continue;
450 if (0 != (auxmap_L1[i].base & (Addr)0xFFFF))
451 return "64-bit: nonzero .base & 0xFFFF in auxmap_L1";
452 if (auxmap_L1[i].base <= MAX_PRIMARY_ADDRESS)
453 return "64-bit: .base <= MAX_PRIMARY_ADDRESS in auxmap_L1";
454 if (auxmap_L1[i].ent == NULL)
455 return "64-bit: .ent is NULL in auxmap_L1";
456 if (auxmap_L1[i].ent->base != auxmap_L1[i].base)
457 return "64-bit: _L1 and _L2 bases are inconsistent";
458 /* Look it up in auxmap_L2. */
459 key.base = auxmap_L1[i].base;
460 key.sm = 0;
461 res = VG_(OSet_Lookup)(auxmap_L2, &key);
462 if (res == NULL)
463 return "64-bit: _L1 .base not found in _L2";
464 if (res != auxmap_L1[i].ent)
465 return "64-bit: _L1 .ent disagrees with _L2 entry";
466 }
467 /* Check L1 contains no duplicates */
468 for (i = 0; i < N_AUXMAP_L1; i++) {
469 if (auxmap_L1[i].base == 0)
470 continue;
471 for (j = i+1; j < N_AUXMAP_L1; j++) {
472 if (auxmap_L1[j].base == 0)
473 continue;
474 if (auxmap_L1[j].base == auxmap_L1[i].base)
475 return "64-bit: duplicate _L1 .base entries";
476 }
477 }
478 }
479 return NULL; /* ok */
480}
481
482static void insert_into_auxmap_L1_at ( Word rank, AuxMapEnt* ent )
483{
484 Word i;
485 tl_assert(ent);
486 tl_assert(rank >= 0 && rank < N_AUXMAP_L1);
487 for (i = N_AUXMAP_L1-1; i > rank; i--)
488 auxmap_L1[i] = auxmap_L1[i-1];
489 auxmap_L1[rank].base = ent->base;
490 auxmap_L1[rank].ent = ent;
491}
492
493static INLINE AuxMapEnt* maybe_find_in_auxmap ( Addr a )
494{
495 AuxMapEnt key;
496 AuxMapEnt* res;
497 Word i;
498
499 tl_assert(a > MAX_PRIMARY_ADDRESS);
500 a &= ~(Addr)0xFFFF;
501
502 /* First search the front-cache, which is a self-organising
503 list containing the most popular entries. */
504
505 if (EXPECTED_TAKEN(auxmap_L1[0].base == a))
506 return auxmap_L1[0].ent;
507 if (EXPECTED_TAKEN(auxmap_L1[1].base == a)) {
508 Addr t_base = auxmap_L1[0].base;
509 AuxMapEnt* t_ent = auxmap_L1[0].ent;
510 auxmap_L1[0].base = auxmap_L1[1].base;
511 auxmap_L1[0].ent = auxmap_L1[1].ent;
512 auxmap_L1[1].base = t_base;
513 auxmap_L1[1].ent = t_ent;
514 return auxmap_L1[0].ent;
sewardj45d94cc2005-04-20 14:44:11 +0000515 }
516
sewardj05a46732006-10-17 01:28:10 +0000517 n_auxmap_L1_searches++;
sewardj45d94cc2005-04-20 14:44:11 +0000518
sewardj05a46732006-10-17 01:28:10 +0000519 for (i = 0; i < N_AUXMAP_L1; i++) {
520 if (auxmap_L1[i].base == a) {
521 break;
522 }
523 }
524 tl_assert(i >= 0 && i <= N_AUXMAP_L1);
sewardj45d94cc2005-04-20 14:44:11 +0000525
sewardj05a46732006-10-17 01:28:10 +0000526 n_auxmap_L1_cmps += (ULong)(i+1);
sewardj45d94cc2005-04-20 14:44:11 +0000527
sewardj05a46732006-10-17 01:28:10 +0000528 if (i < N_AUXMAP_L1) {
529 if (i > 0) {
530 Addr t_base = auxmap_L1[i-1].base;
531 AuxMapEnt* t_ent = auxmap_L1[i-1].ent;
532 auxmap_L1[i-1].base = auxmap_L1[i-0].base;
533 auxmap_L1[i-1].ent = auxmap_L1[i-0].ent;
534 auxmap_L1[i-0].base = t_base;
535 auxmap_L1[i-0].ent = t_ent;
536 i--;
537 }
538 return auxmap_L1[i].ent;
539 }
540
541 n_auxmap_L2_searches++;
542
543 /* First see if we already have it. */
544 key.base = a;
545 key.sm = 0;
546
547 res = VG_(OSet_Lookup)(auxmap_L2, &key);
548 if (res)
549 insert_into_auxmap_L1_at( AUXMAP_L1_INSERT_IX, res );
550 return res;
551}
552
553static AuxMapEnt* find_or_alloc_in_auxmap ( Addr a )
554{
555 AuxMapEnt *nyu, *res;
556
557 /* First see if we already have it. */
558 res = maybe_find_in_auxmap( a );
559 if (EXPECTED_TAKEN(res))
560 return res;
561
562 /* Ok, there's no entry in the secondary map, so we'll have
563 to allocate one. */
564 a &= ~(Addr)0xFFFF;
565
566 nyu = (AuxMapEnt*) VG_(OSet_AllocNode)( auxmap_L2, sizeof(AuxMapEnt) );
567 tl_assert(nyu);
568 nyu->base = a;
569 nyu->sm = &sm_distinguished[SM_DIST_NOACCESS];
570 VG_(OSet_Insert)( auxmap_L2, nyu );
571 insert_into_auxmap_L1_at( AUXMAP_L1_INSERT_IX, nyu );
572 n_auxmap_L2_nodes++;
573 return nyu;
sewardj45d94cc2005-04-20 14:44:11 +0000574}
575
sewardj45d94cc2005-04-20 14:44:11 +0000576/* --------------- SecMap fundamentals --------------- */
577
njn1d0825f2006-03-27 11:37:07 +0000578// In all these, 'low' means it's definitely in the main primary map,
579// 'high' means it's definitely in the auxiliary table.
580
581static INLINE SecMap** get_secmap_low_ptr ( Addr a )
582{
583 UWord pm_off = a >> 16;
584# if VG_DEBUG_MEMORY >= 1
585 tl_assert(pm_off < N_PRIMARY_MAP);
586# endif
587 return &primary_map[ pm_off ];
588}
589
590static INLINE SecMap** get_secmap_high_ptr ( Addr a )
591{
592 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
593 return &am->sm;
594}
595
596static SecMap** get_secmap_ptr ( Addr a )
597{
598 return ( a <= MAX_PRIMARY_ADDRESS
599 ? get_secmap_low_ptr(a)
600 : get_secmap_high_ptr(a));
601}
602
njna7c7ebd2006-03-28 12:51:02 +0000603static INLINE SecMap* get_secmap_for_reading_low ( Addr a )
njn1d0825f2006-03-27 11:37:07 +0000604{
605 return *get_secmap_low_ptr(a);
606}
607
njna7c7ebd2006-03-28 12:51:02 +0000608static INLINE SecMap* get_secmap_for_reading_high ( Addr a )
njn1d0825f2006-03-27 11:37:07 +0000609{
610 return *get_secmap_high_ptr(a);
611}
612
njna7c7ebd2006-03-28 12:51:02 +0000613static INLINE SecMap* get_secmap_for_writing_low(Addr a)
njn1d0825f2006-03-27 11:37:07 +0000614{
615 SecMap** p = get_secmap_low_ptr(a);
616 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(*p)))
617 *p = copy_for_writing(*p);
618 return *p;
619}
620
njna7c7ebd2006-03-28 12:51:02 +0000621static INLINE SecMap* get_secmap_for_writing_high ( Addr a )
njn1d0825f2006-03-27 11:37:07 +0000622{
623 SecMap** p = get_secmap_high_ptr(a);
624 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(*p)))
625 *p = copy_for_writing(*p);
626 return *p;
627}
628
sewardj45d94cc2005-04-20 14:44:11 +0000629/* Produce the secmap for 'a', either from the primary map or by
630 ensuring there is an entry for it in the aux primary map. The
631 secmap may be a distinguished one as the caller will only want to
632 be able to read it.
633*/
sewardj05a46732006-10-17 01:28:10 +0000634static INLINE SecMap* get_secmap_for_reading ( Addr a )
sewardj45d94cc2005-04-20 14:44:11 +0000635{
njn1d0825f2006-03-27 11:37:07 +0000636 return ( a <= MAX_PRIMARY_ADDRESS
njna7c7ebd2006-03-28 12:51:02 +0000637 ? get_secmap_for_reading_low (a)
638 : get_secmap_for_reading_high(a) );
sewardj45d94cc2005-04-20 14:44:11 +0000639}
640
641/* Produce the secmap for 'a', either from the primary map or by
642 ensuring there is an entry for it in the aux primary map. The
643 secmap may not be a distinguished one, since the caller will want
644 to be able to write it. If it is a distinguished secondary, make a
645 writable copy of it, install it, and return the copy instead. (COW
646 semantics).
647*/
njna7c7ebd2006-03-28 12:51:02 +0000648static SecMap* get_secmap_for_writing ( Addr a )
sewardj45d94cc2005-04-20 14:44:11 +0000649{
njn1d0825f2006-03-27 11:37:07 +0000650 return ( a <= MAX_PRIMARY_ADDRESS
njna7c7ebd2006-03-28 12:51:02 +0000651 ? get_secmap_for_writing_low (a)
652 : get_secmap_for_writing_high(a) );
njn1d0825f2006-03-27 11:37:07 +0000653}
654
655/* If 'a' has a SecMap, produce it. Else produce NULL. But don't
656 allocate one if one doesn't already exist. This is used by the
657 leak checker.
658*/
659static SecMap* maybe_get_secmap_for ( Addr a )
660{
sewardj45d94cc2005-04-20 14:44:11 +0000661 if (a <= MAX_PRIMARY_ADDRESS) {
njna7c7ebd2006-03-28 12:51:02 +0000662 return get_secmap_for_reading_low(a);
sewardj45d94cc2005-04-20 14:44:11 +0000663 } else {
njn1d0825f2006-03-27 11:37:07 +0000664 AuxMapEnt* am = maybe_find_in_auxmap(a);
665 return am ? am->sm : NULL;
sewardj45d94cc2005-04-20 14:44:11 +0000666 }
667}
668
njn1d0825f2006-03-27 11:37:07 +0000669/* --------------- Fundamental functions --------------- */
670
671static INLINE
672void insert_vabits2_into_vabits8 ( Addr a, UChar vabits2, UChar* vabits8 )
673{
674 UInt shift = (a & 3) << 1; // shift by 0, 2, 4, or 6
675 *vabits8 &= ~(0x3 << shift); // mask out the two old bits
676 *vabits8 |= (vabits2 << shift); // mask in the two new bits
677}
678
679static INLINE
680void insert_vabits4_into_vabits8 ( Addr a, UChar vabits4, UChar* vabits8 )
681{
682 UInt shift;
683 tl_assert(VG_IS_2_ALIGNED(a)); // Must be 2-aligned
684 shift = (a & 2) << 1; // shift by 0 or 4
685 *vabits8 &= ~(0xf << shift); // mask out the four old bits
686 *vabits8 |= (vabits4 << shift); // mask in the four new bits
687}
688
689static INLINE
690UChar extract_vabits2_from_vabits8 ( Addr a, UChar vabits8 )
691{
692 UInt shift = (a & 3) << 1; // shift by 0, 2, 4, or 6
693 vabits8 >>= shift; // shift the two bits to the bottom
694 return 0x3 & vabits8; // mask out the rest
695}
696
697static INLINE
698UChar extract_vabits4_from_vabits8 ( Addr a, UChar vabits8 )
699{
700 UInt shift;
701 tl_assert(VG_IS_2_ALIGNED(a)); // Must be 2-aligned
702 shift = (a & 2) << 1; // shift by 0 or 4
703 vabits8 >>= shift; // shift the four bits to the bottom
704 return 0xf & vabits8; // mask out the rest
705}
706
707// Note that these four are only used in slow cases. The fast cases do
708// clever things like combine the auxmap check (in
709// get_secmap_{read,writ}able) with alignment checks.
710
711// *** WARNING! ***
712// Any time this function is called, if it is possible that vabits2
njndbf7ca72006-03-31 11:57:59 +0000713// is equal to VA_BITS2_PARTDEFINED, then the corresponding entry in the
njn1d0825f2006-03-27 11:37:07 +0000714// sec-V-bits table must also be set!
715static INLINE
716void set_vabits2 ( Addr a, UChar vabits2 )
717{
njna7c7ebd2006-03-28 12:51:02 +0000718 SecMap* sm = get_secmap_for_writing(a);
njn1d0825f2006-03-27 11:37:07 +0000719 UWord sm_off = SM_OFF(a);
720 insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
721}
722
723static INLINE
724UChar get_vabits2 ( Addr a )
725{
njna7c7ebd2006-03-28 12:51:02 +0000726 SecMap* sm = get_secmap_for_reading(a);
njn1d0825f2006-03-27 11:37:07 +0000727 UWord sm_off = SM_OFF(a);
728 UChar vabits8 = sm->vabits8[sm_off];
729 return extract_vabits2_from_vabits8(a, vabits8);
730}
731
sewardjf2184912006-05-03 22:13:57 +0000732// *** WARNING! ***
733// Any time this function is called, if it is possible that any of the
734// 4 2-bit fields in vabits8 are equal to VA_BITS2_PARTDEFINED, then the
735// corresponding entry(s) in the sec-V-bits table must also be set!
736static INLINE
737UChar get_vabits8_for_aligned_word32 ( Addr a )
738{
739 SecMap* sm = get_secmap_for_reading(a);
740 UWord sm_off = SM_OFF(a);
741 UChar vabits8 = sm->vabits8[sm_off];
742 return vabits8;
743}
744
745static INLINE
746void set_vabits8_for_aligned_word32 ( Addr a, UChar vabits8 )
747{
748 SecMap* sm = get_secmap_for_writing(a);
749 UWord sm_off = SM_OFF(a);
750 sm->vabits8[sm_off] = vabits8;
751}
752
753
njn1d0825f2006-03-27 11:37:07 +0000754// Forward declarations
755static UWord get_sec_vbits8(Addr a);
756static void set_sec_vbits8(Addr a, UWord vbits8);
757
758// Returns False if there was an addressability error.
759static INLINE
760Bool set_vbits8 ( Addr a, UChar vbits8 )
761{
762 Bool ok = True;
763 UChar vabits2 = get_vabits2(a);
764 if ( VA_BITS2_NOACCESS != vabits2 ) {
765 // Addressable. Convert in-register format to in-memory format.
766 // Also remove any existing sec V bit entry for the byte if no
767 // longer necessary.
njndbf7ca72006-03-31 11:57:59 +0000768 if ( V_BITS8_DEFINED == vbits8 ) { vabits2 = VA_BITS2_DEFINED; }
769 else if ( V_BITS8_UNDEFINED == vbits8 ) { vabits2 = VA_BITS2_UNDEFINED; }
770 else { vabits2 = VA_BITS2_PARTDEFINED;
njn1d0825f2006-03-27 11:37:07 +0000771 set_sec_vbits8(a, vbits8); }
772 set_vabits2(a, vabits2);
773
774 } else {
775 // Unaddressable! Do nothing -- when writing to unaddressable
776 // memory it acts as a black hole, and the V bits can never be seen
777 // again. So we don't have to write them at all.
778 ok = False;
779 }
780 return ok;
781}
782
783// Returns False if there was an addressability error. In that case, we put
784// all defined bits into vbits8.
785static INLINE
786Bool get_vbits8 ( Addr a, UChar* vbits8 )
787{
788 Bool ok = True;
789 UChar vabits2 = get_vabits2(a);
790
791 // Convert the in-memory format to in-register format.
njndbf7ca72006-03-31 11:57:59 +0000792 if ( VA_BITS2_DEFINED == vabits2 ) { *vbits8 = V_BITS8_DEFINED; }
793 else if ( VA_BITS2_UNDEFINED == vabits2 ) { *vbits8 = V_BITS8_UNDEFINED; }
794 else if ( VA_BITS2_NOACCESS == vabits2 ) {
njn1d0825f2006-03-27 11:37:07 +0000795 *vbits8 = V_BITS8_DEFINED; // Make V bits defined!
796 ok = False;
797 } else {
njndbf7ca72006-03-31 11:57:59 +0000798 tl_assert( VA_BITS2_PARTDEFINED == vabits2 );
njn1d0825f2006-03-27 11:37:07 +0000799 *vbits8 = get_sec_vbits8(a);
800 }
801 return ok;
802}
803
804
805/* --------------- Secondary V bit table ------------ */
806
807// This table holds the full V bit pattern for partially-defined bytes
njndbf7ca72006-03-31 11:57:59 +0000808// (PDBs) that are represented by VA_BITS2_PARTDEFINED in the main shadow
809// memory.
njn1d0825f2006-03-27 11:37:07 +0000810//
811// Note: the nodes in this table can become stale. Eg. if you write a PDB,
812// then overwrite the same address with a fully defined byte, the sec-V-bit
813// node will not necessarily be removed. This is because checking for
814// whether removal is necessary would slow down the fast paths.
815//
816// To avoid the stale nodes building up too much, we periodically (once the
817// table reaches a certain size) garbage collect (GC) the table by
818// traversing it and evicting any "sufficiently stale" nodes, ie. nodes that
819// are stale and haven't been touched for a certain number of collections.
820// If more than a certain proportion of nodes survived, we increase the
821// table size so that GCs occur less often.
822//
823// (So this a bit different to a traditional GC, where you definitely want
824// to remove any dead nodes. It's more like we have a resizable cache and
825// we're trying to find the right balance how many elements to evict and how
826// big to make the cache.)
827//
828// This policy is designed to avoid bad table bloat in the worst case where
829// a program creates huge numbers of stale PDBs -- we would get this bloat
830// if we had no GC -- while handling well the case where a node becomes
831// stale but shortly afterwards is rewritten with a PDB and so becomes
832// non-stale again (which happens quite often, eg. in perf/bz2). If we just
833// remove all stale nodes as soon as possible, we just end up re-adding a
834// lot of them in later again. The "sufficiently stale" approach avoids
835// this. (If a program has many live PDBs, performance will just suck,
836// there's no way around that.)
837
838static OSet* secVBitTable;
839
840// Stats
841static ULong sec_vbits_new_nodes = 0;
842static ULong sec_vbits_updates = 0;
843
844// This must be a power of two; this is checked in mc_pre_clo_init().
845// The size chosen here is a trade-off: if the nodes are bigger (ie. cover
846// a larger address range) they take more space but we can get multiple
847// partially-defined bytes in one if they are close to each other, reducing
848// the number of total nodes. In practice sometimes they are clustered (eg.
849// perf/bz2 repeatedly writes then reads more than 20,000 in a contiguous
850// row), but often not. So we choose something intermediate.
851#define BYTES_PER_SEC_VBIT_NODE 16
852
853// We make the table bigger if more than this many nodes survive a GC.
854#define MAX_SURVIVOR_PROPORTION 0.5
855
856// Each time we make the table bigger, we increase it by this much.
857#define TABLE_GROWTH_FACTOR 2
858
859// This defines "sufficiently stale" -- any node that hasn't been touched in
860// this many GCs will be removed.
861#define MAX_STALE_AGE 2
862
863// We GC the table when it gets this many nodes in it, ie. it's effectively
864// the table size. It can change.
865static Int secVBitLimit = 1024;
866
867// The number of GCs done, used to age sec-V-bit nodes for eviction.
868// Because it's unsigned, wrapping doesn't matter -- the right answer will
869// come out anyway.
870static UInt GCs_done = 0;
871
872typedef
873 struct {
874 Addr a;
875 UChar vbits8[BYTES_PER_SEC_VBIT_NODE];
876 UInt last_touched;
877 }
878 SecVBitNode;
879
880static OSet* createSecVBitTable(void)
881{
882 return VG_(OSet_Create)( offsetof(SecVBitNode, a),
883 NULL, // use fast comparisons
884 VG_(malloc), VG_(free) );
885}
886
887static void gcSecVBitTable(void)
888{
889 OSet* secVBitTable2;
890 SecVBitNode* n;
891 Int i, n_nodes = 0, n_survivors = 0;
892
893 GCs_done++;
894
895 // Create the new table.
896 secVBitTable2 = createSecVBitTable();
897
898 // Traverse the table, moving fresh nodes into the new table.
899 VG_(OSet_ResetIter)(secVBitTable);
900 while ( (n = VG_(OSet_Next)(secVBitTable)) ) {
901 Bool keep = False;
902 if ( (GCs_done - n->last_touched) <= MAX_STALE_AGE ) {
903 // Keep node if it's been touched recently enough (regardless of
904 // freshness/staleness).
905 keep = True;
906 } else {
907 // Keep node if any of its bytes are non-stale. Using
908 // get_vabits2() for the lookup is not very efficient, but I don't
909 // think it matters.
910 for (i = 0; i < BYTES_PER_SEC_VBIT_NODE; i++) {
njndbf7ca72006-03-31 11:57:59 +0000911 if (VA_BITS2_PARTDEFINED == get_vabits2(n->a + i)) {
njn1d0825f2006-03-27 11:37:07 +0000912 keep = True; // Found a non-stale byte, so keep
913 break;
914 }
915 }
916 }
917
918 if ( keep ) {
919 // Insert a copy of the node into the new table.
920 SecVBitNode* n2 =
921 VG_(OSet_AllocNode)(secVBitTable2, sizeof(SecVBitNode));
922 *n2 = *n;
923 VG_(OSet_Insert)(secVBitTable2, n2);
924 }
925 }
926
927 // Get the before and after sizes.
928 n_nodes = VG_(OSet_Size)(secVBitTable);
929 n_survivors = VG_(OSet_Size)(secVBitTable2);
930
931 // Destroy the old table, and put the new one in its place.
932 VG_(OSet_Destroy)(secVBitTable, NULL);
933 secVBitTable = secVBitTable2;
934
935 if (VG_(clo_verbosity) > 1) {
936 Char percbuf[6];
937 VG_(percentify)(n_survivors, n_nodes, 1, 6, percbuf);
938 VG_(message)(Vg_DebugMsg, "memcheck GC: %d nodes, %d survivors (%s)",
939 n_nodes, n_survivors, percbuf);
940 }
941
942 // Increase table size if necessary.
943 if (n_survivors > (secVBitLimit * MAX_SURVIVOR_PROPORTION)) {
944 secVBitLimit *= TABLE_GROWTH_FACTOR;
945 if (VG_(clo_verbosity) > 1)
946 VG_(message)(Vg_DebugMsg, "memcheck GC: increase table size to %d",
947 secVBitLimit);
948 }
949}
950
951static UWord get_sec_vbits8(Addr a)
952{
953 Addr aAligned = VG_ROUNDDN(a, BYTES_PER_SEC_VBIT_NODE);
954 Int amod = a % BYTES_PER_SEC_VBIT_NODE;
955 SecVBitNode* n = VG_(OSet_Lookup)(secVBitTable, &aAligned);
956 UChar vbits8;
957 tl_assert2(n, "get_sec_vbits8: no node for address %p (%p)\n", aAligned, a);
958 // Shouldn't be fully defined or fully undefined -- those cases shouldn't
959 // make it to the secondary V bits table.
960 vbits8 = n->vbits8[amod];
961 tl_assert(V_BITS8_DEFINED != vbits8 && V_BITS8_UNDEFINED != vbits8);
962 return vbits8;
963}
964
965static void set_sec_vbits8(Addr a, UWord vbits8)
966{
967 Addr aAligned = VG_ROUNDDN(a, BYTES_PER_SEC_VBIT_NODE);
968 Int i, amod = a % BYTES_PER_SEC_VBIT_NODE;
969 SecVBitNode* n = VG_(OSet_Lookup)(secVBitTable, &aAligned);
970 // Shouldn't be fully defined or fully undefined -- those cases shouldn't
971 // make it to the secondary V bits table.
972 tl_assert(V_BITS8_DEFINED != vbits8 && V_BITS8_UNDEFINED != vbits8);
973 if (n) {
974 n->vbits8[amod] = vbits8; // update
975 n->last_touched = GCs_done;
976 sec_vbits_updates++;
977 } else {
978 // New node: assign the specific byte, make the rest invalid (they
979 // should never be read as-is, but be cautious).
980 n = VG_(OSet_AllocNode)(secVBitTable, sizeof(SecVBitNode));
981 n->a = aAligned;
982 for (i = 0; i < BYTES_PER_SEC_VBIT_NODE; i++) {
983 n->vbits8[i] = V_BITS8_UNDEFINED;
984 }
985 n->vbits8[amod] = vbits8;
986 n->last_touched = GCs_done;
987
988 // Do a table GC if necessary. Nb: do this before inserting the new
989 // node, to avoid erroneously GC'ing the new node.
990 if (secVBitLimit == VG_(OSet_Size)(secVBitTable)) {
991 gcSecVBitTable();
992 }
993
994 // Insert the new node.
995 VG_(OSet_Insert)(secVBitTable, n);
996 sec_vbits_new_nodes++;
997
998 n_secVBit_nodes = VG_(OSet_Size)(secVBitTable);
999 if (n_secVBit_nodes > max_secVBit_nodes)
1000 max_secVBit_nodes = n_secVBit_nodes;
1001 }
1002}
sewardj45d94cc2005-04-20 14:44:11 +00001003
1004/* --------------- Endianness helpers --------------- */
1005
1006/* Returns the offset in memory of the byteno-th most significant byte
1007 in a wordszB-sized word, given the specified endianness. */
njn1d0825f2006-03-27 11:37:07 +00001008static INLINE UWord byte_offset_w ( UWord wordszB, Bool bigendian,
sewardj45d94cc2005-04-20 14:44:11 +00001009 UWord byteno ) {
1010 return bigendian ? (wordszB-1-byteno) : byteno;
1011}
1012
sewardj05a46732006-10-17 01:28:10 +00001013
1014/* --------------- Ignored address ranges --------------- */
1015
1016#define M_IGNORE_RANGES 4
1017
1018typedef
1019 struct {
1020 Int used;
1021 Addr start[M_IGNORE_RANGES];
1022 Addr end[M_IGNORE_RANGES];
1023 }
1024 IgnoreRanges;
1025
1026static IgnoreRanges ignoreRanges;
1027
1028static INLINE Bool in_ignored_range ( Addr a )
1029{
1030 Int i;
1031 if (EXPECTED_TAKEN(ignoreRanges.used == 0))
1032 return False;
1033 for (i = 0; i < ignoreRanges.used; i++) {
1034 if (a >= ignoreRanges.start[i] && a < ignoreRanges.end[i])
1035 return True;
1036 }
1037 return False;
1038}
1039
1040
1041/* Parse a 32- or 64-bit hex number, including leading 0x, from string
1042 starting at *ppc, putting result in *result, and return True. Or
1043 fail, in which case *ppc and *result are undefined, and return
1044 False. */
1045
1046static Bool isHex ( UChar c )
1047{
1048 return ((c >= '0' && c <= '9')
1049 || (c >= 'a' && c <= 'f')
1050 || (c >= 'A' && c <= 'F'));
1051}
1052
1053static UInt fromHex ( UChar c )
1054{
1055 if (c >= '0' && c <= '9')
1056 return (UInt)c - (UInt)'0';
1057 if (c >= 'a' && c <= 'f')
1058 return 10 + (UInt)c - (UInt)'a';
1059 if (c >= 'A' && c <= 'F')
1060 return 10 + (UInt)c - (UInt)'A';
1061 /*NOTREACHED*/
1062 tl_assert(0);
1063 return 0;
1064}
1065
1066static Bool parse_Addr ( UChar** ppc, Addr* result )
1067{
1068 Int used, limit = 2 * sizeof(Addr);
1069 if (**ppc != '0')
1070 return False;
1071 (*ppc)++;
1072 if (**ppc != 'x')
1073 return False;
1074 (*ppc)++;
1075 *result = 0;
1076 used = 0;
1077 while (isHex(**ppc)) {
1078 UInt d = fromHex(**ppc);
1079 tl_assert(d < 16);
1080 *result = ((*result) << 4) | fromHex(**ppc);
1081 (*ppc)++;
1082 used++;
1083 if (used > limit) return False;
1084 }
1085 if (used == 0)
1086 return False;
1087 return True;
1088}
1089
1090/* Parse two such numbers separated by a dash, or fail. */
1091
1092static Bool parse_range ( UChar** ppc, Addr* result1, Addr* result2 )
1093{
1094 Bool ok = parse_Addr(ppc, result1);
1095 if (!ok)
1096 return False;
1097 if (**ppc != '-')
1098 return False;
1099 (*ppc)++;
1100 ok = parse_Addr(ppc, result2);
1101 if (!ok)
1102 return False;
1103 return True;
1104}
1105
1106/* Parse a set of ranges separated by commas into 'ignoreRanges', or
1107 fail. */
1108
1109static Bool parse_ignore_ranges ( UChar* str0 )
1110{
1111 Addr start, end;
1112 Bool ok;
1113 UChar* str = str0;
1114 UChar** ppc = &str;
1115 ignoreRanges.used = 0;
1116 while (1) {
1117 ok = parse_range(ppc, &start, &end);
1118 if (!ok)
1119 return False;
1120 if (ignoreRanges.used >= M_IGNORE_RANGES)
1121 return False;
1122 ignoreRanges.start[ignoreRanges.used] = start;
1123 ignoreRanges.end[ignoreRanges.used] = end;
1124 ignoreRanges.used++;
1125 if (**ppc == 0)
1126 return True;
1127 if (**ppc != ',')
1128 return False;
1129 (*ppc)++;
1130 }
1131 /*NOTREACHED*/
1132 return False;
1133}
1134
1135
sewardj45d94cc2005-04-20 14:44:11 +00001136/* --------------- Load/store slow cases. --------------- */
1137
njn1d0825f2006-03-27 11:37:07 +00001138// Forward declarations
1139static void mc_record_address_error ( ThreadId tid, Addr a,
1140 Int size, Bool isWrite );
1141static void mc_record_core_mem_error ( ThreadId tid, Bool isUnaddr, Char* s );
1142static void mc_record_param_error ( ThreadId tid, Addr a, Bool isReg,
1143 Bool isUnaddr, Char* msg );
1144static void mc_record_jump_error ( ThreadId tid, Addr a );
1145
sewardj45d94cc2005-04-20 14:44:11 +00001146static
njn1d0825f2006-03-27 11:37:07 +00001147#ifndef PERF_FAST_LOADV
1148INLINE
1149#endif
njn45e81252006-03-28 12:35:08 +00001150ULong mc_LOADVn_slow ( Addr a, SizeT nBits, Bool bigendian )
sewardj45d94cc2005-04-20 14:44:11 +00001151{
njn1d0825f2006-03-27 11:37:07 +00001152 /* Make up a 64-bit result V word, which contains the loaded data for
sewardjf3d57dd2005-04-22 20:23:27 +00001153 valid addresses and Defined for invalid addresses. Iterate over
1154 the bytes in the word, from the most significant down to the
1155 least. */
njn1d0825f2006-03-27 11:37:07 +00001156 ULong vbits64 = V_BITS64_UNDEFINED;
njn45e81252006-03-28 12:35:08 +00001157 SizeT szB = nBits / 8;
njn1d0825f2006-03-27 11:37:07 +00001158 SSizeT i = szB-1; // Must be signed
sewardj45d94cc2005-04-20 14:44:11 +00001159 SizeT n_addrs_bad = 0;
1160 Addr ai;
njn1d0825f2006-03-27 11:37:07 +00001161 Bool partial_load_exemption_applies;
1162 UChar vbits8;
1163 Bool ok;
sewardj45d94cc2005-04-20 14:44:11 +00001164
sewardjc1a2cda2005-04-21 17:34:00 +00001165 PROF_EVENT(30, "mc_LOADVn_slow");
sewardj05a46732006-10-17 01:28:10 +00001166
1167 /* ------------ BEGIN semi-fast cases ------------ */
1168 /* These deal quickly-ish with the common auxiliary primary map
1169 cases on 64-bit platforms. Are merely a speedup hack; can be
1170 omitted without loss of correctness/functionality. Note that in
1171 both cases the "sizeof(void*) == 8" causes these cases to be
1172 folded out by compilers on 32-bit platforms. These are derived
1173 from LOADV64 and LOADV32.
1174 */
1175 if (EXPECTED_TAKEN(sizeof(void*) == 8
1176 && nBits == 64 && VG_IS_8_ALIGNED(a))) {
1177 SecMap* sm = get_secmap_for_reading(a);
1178 UWord sm_off16 = SM_OFF_16(a);
1179 UWord vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
1180 if (EXPECTED_TAKEN(vabits16 == VA_BITS16_DEFINED))
1181 return V_BITS64_DEFINED;
1182 if (EXPECTED_TAKEN(vabits16 == VA_BITS16_UNDEFINED))
1183 return V_BITS64_UNDEFINED;
1184 /* else fall into the slow case */
1185 }
1186 if (EXPECTED_TAKEN(sizeof(void*) == 8
1187 && nBits == 32 && VG_IS_4_ALIGNED(a))) {
1188 SecMap* sm = get_secmap_for_reading(a);
1189 UWord sm_off = SM_OFF(a);
1190 UWord vabits8 = sm->vabits8[sm_off];
1191 if (EXPECTED_TAKEN(vabits8 == VA_BITS8_DEFINED))
1192 return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_DEFINED);
1193 if (EXPECTED_TAKEN(vabits8 == VA_BITS8_UNDEFINED))
1194 return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_UNDEFINED);
1195 /* else fall into slow case */
1196 }
1197 /* ------------ END semi-fast cases ------------ */
1198
njn45e81252006-03-28 12:35:08 +00001199 tl_assert(nBits == 64 || nBits == 32 || nBits == 16 || nBits == 8);
sewardj45d94cc2005-04-20 14:44:11 +00001200
njn1d0825f2006-03-27 11:37:07 +00001201 for (i = szB-1; i >= 0; i--) {
sewardjc1a2cda2005-04-21 17:34:00 +00001202 PROF_EVENT(31, "mc_LOADVn_slow(loop)");
njn45e81252006-03-28 12:35:08 +00001203 ai = a + byte_offset_w(szB, bigendian, i);
njn1d0825f2006-03-27 11:37:07 +00001204 ok = get_vbits8(ai, &vbits8);
1205 if (!ok) n_addrs_bad++;
1206 vbits64 <<= 8;
1207 vbits64 |= vbits8;
sewardj45d94cc2005-04-20 14:44:11 +00001208 }
1209
sewardj0ded7a42005-11-08 02:25:37 +00001210 /* This is a hack which avoids producing errors for code which
1211 insists in stepping along byte strings in aligned word-sized
1212 chunks, and there is a partially defined word at the end. (eg,
1213 optimised strlen). Such code is basically broken at least WRT
1214 semantics of ANSI C, but sometimes users don't have the option
1215 to fix it, and so this option is provided. Note it is now
1216 defaulted to not-engaged.
1217
1218 A load from a partially-addressible place is allowed if:
1219 - the command-line flag is set
1220 - it's a word-sized, word-aligned load
1221 - at least one of the addresses in the word *is* valid
1222 */
1223 partial_load_exemption_applies
njn1d0825f2006-03-27 11:37:07 +00001224 = MC_(clo_partial_loads_ok) && szB == VG_WORDSIZE
sewardj0ded7a42005-11-08 02:25:37 +00001225 && VG_IS_WORD_ALIGNED(a)
1226 && n_addrs_bad < VG_WORDSIZE;
1227
1228 if (n_addrs_bad > 0 && !partial_load_exemption_applies)
njn1d0825f2006-03-27 11:37:07 +00001229 mc_record_address_error( VG_(get_running_tid)(), a, szB, False );
sewardj45d94cc2005-04-20 14:44:11 +00001230
njn1d0825f2006-03-27 11:37:07 +00001231 return vbits64;
sewardj45d94cc2005-04-20 14:44:11 +00001232}
1233
1234
njn1d0825f2006-03-27 11:37:07 +00001235static
1236#ifndef PERF_FAST_STOREV
1237INLINE
1238#endif
njn45e81252006-03-28 12:35:08 +00001239void mc_STOREVn_slow ( Addr a, SizeT nBits, ULong vbytes, Bool bigendian )
sewardj45d94cc2005-04-20 14:44:11 +00001240{
njn45e81252006-03-28 12:35:08 +00001241 SizeT szB = nBits / 8;
njn1d0825f2006-03-27 11:37:07 +00001242 SizeT i, n_addrs_bad = 0;
1243 UChar vbits8;
sewardj45d94cc2005-04-20 14:44:11 +00001244 Addr ai;
njn1d0825f2006-03-27 11:37:07 +00001245 Bool ok;
sewardj45d94cc2005-04-20 14:44:11 +00001246
sewardjc1a2cda2005-04-21 17:34:00 +00001247 PROF_EVENT(35, "mc_STOREVn_slow");
sewardj05a46732006-10-17 01:28:10 +00001248
1249 /* ------------ BEGIN semi-fast cases ------------ */
1250 /* These deal quickly-ish with the common auxiliary primary map
1251 cases on 64-bit platforms. Are merely a speedup hack; can be
1252 omitted without loss of correctness/functionality. Note that in
1253 both cases the "sizeof(void*) == 8" causes these cases to be
1254 folded out by compilers on 32-bit platforms. These are derived
1255 from STOREV64 and STOREV32.
1256 */
1257 if (EXPECTED_TAKEN(sizeof(void*) == 8
1258 && nBits == 64 && VG_IS_8_ALIGNED(a))) {
1259 SecMap* sm = get_secmap_for_reading(a);
1260 UWord sm_off16 = SM_OFF_16(a);
1261 UWord vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
1262 if (EXPECTED_TAKEN( !is_distinguished_sm(sm) &&
1263 (VA_BITS16_DEFINED == vabits16 ||
1264 VA_BITS16_UNDEFINED == vabits16) )) {
1265 /* Handle common case quickly: a is suitably aligned, */
1266 /* is mapped, and is addressible. */
1267 // Convert full V-bits in register to compact 2-bit form.
1268 if (EXPECTED_TAKEN(V_BITS64_DEFINED == vbytes)) {
1269 ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_DEFINED;
1270 return;
1271 } else if (V_BITS64_UNDEFINED == vbytes) {
1272 ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_UNDEFINED;
1273 return;
1274 }
1275 /* else fall into the slow case */
1276 }
1277 /* else fall into the slow case */
1278 }
1279 if (EXPECTED_TAKEN(sizeof(void*) == 8
1280 && nBits == 32 && VG_IS_4_ALIGNED(a))) {
1281 SecMap* sm = get_secmap_for_reading(a);
1282 UWord sm_off = SM_OFF(a);
1283 UWord vabits8 = sm->vabits8[sm_off];
1284 if (EXPECTED_TAKEN( !is_distinguished_sm(sm) &&
1285 (VA_BITS8_DEFINED == vabits8 ||
1286 VA_BITS8_UNDEFINED == vabits8) )) {
1287 /* Handle common case quickly: a is suitably aligned, */
1288 /* is mapped, and is addressible. */
1289 // Convert full V-bits in register to compact 2-bit form.
1290 if (EXPECTED_TAKEN(V_BITS32_DEFINED == (vbytes & 0xFFFFFFFF))) {
1291 sm->vabits8[sm_off] = VA_BITS8_DEFINED;
1292 return;
1293 } else if (V_BITS32_UNDEFINED == (vbytes & 0xFFFFFFFF)) {
1294 sm->vabits8[sm_off] = VA_BITS8_UNDEFINED;
1295 return;
1296 }
1297 /* else fall into the slow case */
1298 }
1299 /* else fall into the slow case */
1300 }
1301 /* ------------ END semi-fast cases ------------ */
1302
njn45e81252006-03-28 12:35:08 +00001303 tl_assert(nBits == 64 || nBits == 32 || nBits == 16 || nBits == 8);
sewardj45d94cc2005-04-20 14:44:11 +00001304
1305 /* Dump vbytes in memory, iterating from least to most significant
1306 byte. At the same time establish addressibility of the
1307 location. */
1308 for (i = 0; i < szB; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +00001309 PROF_EVENT(36, "mc_STOREVn_slow(loop)");
njn45e81252006-03-28 12:35:08 +00001310 ai = a + byte_offset_w(szB, bigendian, i);
njn1d0825f2006-03-27 11:37:07 +00001311 vbits8 = vbytes & 0xff;
1312 ok = set_vbits8(ai, vbits8);
1313 if (!ok) n_addrs_bad++;
sewardj45d94cc2005-04-20 14:44:11 +00001314 vbytes >>= 8;
1315 }
1316
1317 /* If an address error has happened, report it. */
1318 if (n_addrs_bad > 0)
njn1d0825f2006-03-27 11:37:07 +00001319 mc_record_address_error( VG_(get_running_tid)(), a, szB, True );
sewardj45d94cc2005-04-20 14:44:11 +00001320}
1321
1322
njn25e49d8e72002-09-23 09:36:25 +00001323/*------------------------------------------------------------*/
1324/*--- Setting permissions over address ranges. ---*/
1325/*------------------------------------------------------------*/
1326
njn1d0825f2006-03-27 11:37:07 +00001327static void set_address_range_perms ( Addr a, SizeT lenT, UWord vabits16,
1328 UWord dsm_num )
sewardj23eb2fd2005-04-22 16:29:19 +00001329{
njn1d0825f2006-03-27 11:37:07 +00001330 UWord sm_off, sm_off16;
1331 UWord vabits2 = vabits16 & 0x3;
1332 SizeT lenA, lenB, len_to_next_secmap;
1333 Addr aNext;
sewardjae986ca2005-10-12 12:53:20 +00001334 SecMap* sm;
njn1d0825f2006-03-27 11:37:07 +00001335 SecMap** sm_ptr;
sewardjae986ca2005-10-12 12:53:20 +00001336 SecMap* example_dsm;
1337
sewardj23eb2fd2005-04-22 16:29:19 +00001338 PROF_EVENT(150, "set_address_range_perms");
1339
njn1d0825f2006-03-27 11:37:07 +00001340 /* Check the V+A bits make sense. */
njndbf7ca72006-03-31 11:57:59 +00001341 tl_assert(VA_BITS16_NOACCESS == vabits16 ||
1342 VA_BITS16_UNDEFINED == vabits16 ||
1343 VA_BITS16_DEFINED == vabits16);
sewardj23eb2fd2005-04-22 16:29:19 +00001344
njn1d0825f2006-03-27 11:37:07 +00001345 // This code should never write PDBs; ensure this. (See comment above
1346 // set_vabits2().)
njndbf7ca72006-03-31 11:57:59 +00001347 tl_assert(VA_BITS2_PARTDEFINED != vabits2);
njn1d0825f2006-03-27 11:37:07 +00001348
1349 if (lenT == 0)
sewardj23eb2fd2005-04-22 16:29:19 +00001350 return;
1351
njn1d0825f2006-03-27 11:37:07 +00001352 if (lenT > 100 * 1000 * 1000) {
1353 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
1354 Char* s = "unknown???";
njndbf7ca72006-03-31 11:57:59 +00001355 if (vabits16 == VA_BITS16_NOACCESS ) s = "noaccess";
1356 if (vabits16 == VA_BITS16_UNDEFINED) s = "undefined";
1357 if (vabits16 == VA_BITS16_DEFINED ) s = "defined";
njn1d0825f2006-03-27 11:37:07 +00001358 VG_(message)(Vg_UserMsg, "Warning: set address range perms: "
1359 "large range %lu (%s)", lenT, s);
sewardj23eb2fd2005-04-22 16:29:19 +00001360 }
1361 }
1362
njn1d0825f2006-03-27 11:37:07 +00001363#ifndef PERF_FAST_SARP
sewardj23eb2fd2005-04-22 16:29:19 +00001364 /*------------------ debug-only case ------------------ */
njn1d0825f2006-03-27 11:37:07 +00001365 {
1366 // Endianness doesn't matter here because all bytes are being set to
1367 // the same value.
1368 // Nb: We don't have to worry about updating the sec-V-bits table
1369 // after these set_vabits2() calls because this code never writes
njndbf7ca72006-03-31 11:57:59 +00001370 // VA_BITS2_PARTDEFINED values.
njn1d0825f2006-03-27 11:37:07 +00001371 SizeT i;
1372 for (i = 0; i < lenT; i++) {
1373 set_vabits2(a + i, vabits2);
1374 }
1375 return;
njn25e49d8e72002-09-23 09:36:25 +00001376 }
njn1d0825f2006-03-27 11:37:07 +00001377#endif
sewardj23eb2fd2005-04-22 16:29:19 +00001378
1379 /*------------------ standard handling ------------------ */
sewardj23eb2fd2005-04-22 16:29:19 +00001380
njn1d0825f2006-03-27 11:37:07 +00001381 /* Get the distinguished secondary that we might want
sewardj23eb2fd2005-04-22 16:29:19 +00001382 to use (part of the space-compression scheme). */
njn1d0825f2006-03-27 11:37:07 +00001383 example_dsm = &sm_distinguished[dsm_num];
1384
1385 // We have to handle ranges covering various combinations of partial and
1386 // whole sec-maps. Here is how parts 1, 2 and 3 are used in each case.
1387 // Cases marked with a '*' are common.
1388 //
1389 // TYPE PARTS USED
1390 // ---- ----------
1391 // * one partial sec-map (p) 1
1392 // - one whole sec-map (P) 2
1393 //
1394 // * two partial sec-maps (pp) 1,3
1395 // - one partial, one whole sec-map (pP) 1,2
1396 // - one whole, one partial sec-map (Pp) 2,3
1397 // - two whole sec-maps (PP) 2,2
1398 //
1399 // * one partial, one whole, one partial (pPp) 1,2,3
1400 // - one partial, two whole (pPP) 1,2,2
1401 // - two whole, one partial (PPp) 2,2,3
1402 // - three whole (PPP) 2,2,2
1403 //
1404 // * one partial, N-2 whole, one partial (pP...Pp) 1,2...2,3
1405 // - one partial, N-1 whole (pP...PP) 1,2...2,2
1406 // - N-1 whole, one partial (PP...Pp) 2,2...2,3
1407 // - N whole (PP...PP) 2,2...2,3
1408
1409 // Break up total length (lenT) into two parts: length in the first
1410 // sec-map (lenA), and the rest (lenB); lenT == lenA + lenB.
1411 aNext = start_of_this_sm(a) + SM_SIZE;
1412 len_to_next_secmap = aNext - a;
1413 if ( lenT <= len_to_next_secmap ) {
1414 // Range entirely within one sec-map. Covers almost all cases.
1415 PROF_EVENT(151, "set_address_range_perms-single-secmap");
1416 lenA = lenT;
1417 lenB = 0;
1418 } else if (is_start_of_sm(a)) {
1419 // Range spans at least one whole sec-map, and starts at the beginning
1420 // of a sec-map; skip to Part 2.
1421 PROF_EVENT(152, "set_address_range_perms-startof-secmap");
1422 lenA = 0;
1423 lenB = lenT;
1424 goto part2;
sewardj23eb2fd2005-04-22 16:29:19 +00001425 } else {
njn1d0825f2006-03-27 11:37:07 +00001426 // Range spans two or more sec-maps, first one is partial.
1427 PROF_EVENT(153, "set_address_range_perms-multiple-secmaps");
1428 lenA = len_to_next_secmap;
1429 lenB = lenT - lenA;
1430 }
1431
1432 //------------------------------------------------------------------------
1433 // Part 1: Deal with the first sec_map. Most of the time the range will be
1434 // entirely within a sec_map and this part alone will suffice. Also,
1435 // doing it this way lets us avoid repeatedly testing for the crossing of
1436 // a sec-map boundary within these loops.
1437 //------------------------------------------------------------------------
1438
1439 // If it's distinguished, make it undistinguished if necessary.
1440 sm_ptr = get_secmap_ptr(a);
1441 if (is_distinguished_sm(*sm_ptr)) {
1442 if (*sm_ptr == example_dsm) {
1443 // Sec-map already has the V+A bits that we want, so skip.
1444 PROF_EVENT(154, "set_address_range_perms-dist-sm1-quick");
1445 a = aNext;
1446 lenA = 0;
sewardj23eb2fd2005-04-22 16:29:19 +00001447 } else {
njn1d0825f2006-03-27 11:37:07 +00001448 PROF_EVENT(155, "set_address_range_perms-dist-sm1");
1449 *sm_ptr = copy_for_writing(*sm_ptr);
sewardj23eb2fd2005-04-22 16:29:19 +00001450 }
1451 }
njn1d0825f2006-03-27 11:37:07 +00001452 sm = *sm_ptr;
sewardj23eb2fd2005-04-22 16:29:19 +00001453
njn1d0825f2006-03-27 11:37:07 +00001454 // 1 byte steps
sewardj23eb2fd2005-04-22 16:29:19 +00001455 while (True) {
sewardj23eb2fd2005-04-22 16:29:19 +00001456 if (VG_IS_8_ALIGNED(a)) break;
njn1d0825f2006-03-27 11:37:07 +00001457 if (lenA < 1) break;
1458 PROF_EVENT(156, "set_address_range_perms-loop1a");
1459 sm_off = SM_OFF(a);
1460 insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
1461 a += 1;
1462 lenA -= 1;
1463 }
1464 // 8-aligned, 8 byte steps
sewardj23eb2fd2005-04-22 16:29:19 +00001465 while (True) {
njn1d0825f2006-03-27 11:37:07 +00001466 if (lenA < 8) break;
1467 PROF_EVENT(157, "set_address_range_perms-loop8a");
1468 sm_off16 = SM_OFF_16(a);
1469 ((UShort*)(sm->vabits8))[sm_off16] = vabits16;
1470 a += 8;
1471 lenA -= 8;
1472 }
1473 // 1 byte steps
1474 while (True) {
1475 if (lenA < 1) break;
1476 PROF_EVENT(158, "set_address_range_perms-loop1b");
1477 sm_off = SM_OFF(a);
1478 insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
1479 a += 1;
1480 lenA -= 1;
sewardj23eb2fd2005-04-22 16:29:19 +00001481 }
1482
njn1d0825f2006-03-27 11:37:07 +00001483 // We've finished the first sec-map. Is that it?
1484 if (lenB == 0)
sewardj23eb2fd2005-04-22 16:29:19 +00001485 return;
1486
njn1d0825f2006-03-27 11:37:07 +00001487 //------------------------------------------------------------------------
1488 // Part 2: Fast-set entire sec-maps at a time.
1489 //------------------------------------------------------------------------
1490 part2:
1491 // 64KB-aligned, 64KB steps.
1492 // Nb: we can reach here with lenB < SM_SIZE
sewardj23eb2fd2005-04-22 16:29:19 +00001493 while (True) {
njn1d0825f2006-03-27 11:37:07 +00001494 if (lenB < SM_SIZE) break;
1495 tl_assert(is_start_of_sm(a));
1496 PROF_EVENT(159, "set_address_range_perms-loop64K");
1497 sm_ptr = get_secmap_ptr(a);
1498 if (!is_distinguished_sm(*sm_ptr)) {
1499 PROF_EVENT(160, "set_address_range_perms-loop64K-free-dist-sm");
1500 // Free the non-distinguished sec-map that we're replacing. This
1501 // case happens moderately often, enough to be worthwhile.
1502 VG_(am_munmap_valgrind)((Addr)*sm_ptr, sizeof(SecMap));
1503 }
1504 update_SM_counts(*sm_ptr, example_dsm);
1505 // Make the sec-map entry point to the example DSM
1506 *sm_ptr = example_dsm;
1507 lenB -= SM_SIZE;
1508 a += SM_SIZE;
1509 }
sewardj23eb2fd2005-04-22 16:29:19 +00001510
njn1d0825f2006-03-27 11:37:07 +00001511 // We've finished the whole sec-maps. Is that it?
1512 if (lenB == 0)
1513 return;
1514
1515 //------------------------------------------------------------------------
1516 // Part 3: Finish off the final partial sec-map, if necessary.
1517 //------------------------------------------------------------------------
1518
1519 tl_assert(is_start_of_sm(a) && lenB < SM_SIZE);
1520
1521 // If it's distinguished, make it undistinguished if necessary.
1522 sm_ptr = get_secmap_ptr(a);
1523 if (is_distinguished_sm(*sm_ptr)) {
1524 if (*sm_ptr == example_dsm) {
1525 // Sec-map already has the V+A bits that we want, so stop.
1526 PROF_EVENT(161, "set_address_range_perms-dist-sm2-quick");
1527 return;
1528 } else {
1529 PROF_EVENT(162, "set_address_range_perms-dist-sm2");
1530 *sm_ptr = copy_for_writing(*sm_ptr);
1531 }
1532 }
1533 sm = *sm_ptr;
1534
1535 // 8-aligned, 8 byte steps
1536 while (True) {
1537 if (lenB < 8) break;
1538 PROF_EVENT(163, "set_address_range_perms-loop8b");
1539 sm_off16 = SM_OFF_16(a);
1540 ((UShort*)(sm->vabits8))[sm_off16] = vabits16;
1541 a += 8;
1542 lenB -= 8;
1543 }
1544 // 1 byte steps
1545 while (True) {
1546 if (lenB < 1) return;
1547 PROF_EVENT(164, "set_address_range_perms-loop1c");
1548 sm_off = SM_OFF(a);
1549 insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
1550 a += 1;
1551 lenB -= 1;
1552 }
sewardj23eb2fd2005-04-22 16:29:19 +00001553}
sewardj45d94cc2005-04-20 14:44:11 +00001554
sewardjc859fbf2005-04-22 21:10:28 +00001555
1556/* --- Set permissions for arbitrary address ranges --- */
njn25e49d8e72002-09-23 09:36:25 +00001557
njndbf7ca72006-03-31 11:57:59 +00001558void MC_(make_mem_noaccess) ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +00001559{
njndbf7ca72006-03-31 11:57:59 +00001560 PROF_EVENT(40, "MC_(make_mem_noaccess)");
1561 DEBUG("MC_(make_mem_noaccess)(%p, %lu)\n", a, len);
njn1d0825f2006-03-27 11:37:07 +00001562 set_address_range_perms ( a, len, VA_BITS16_NOACCESS, SM_DIST_NOACCESS );
njn25e49d8e72002-09-23 09:36:25 +00001563}
1564
njndbf7ca72006-03-31 11:57:59 +00001565void MC_(make_mem_undefined) ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +00001566{
njndbf7ca72006-03-31 11:57:59 +00001567 PROF_EVENT(41, "MC_(make_mem_undefined)");
1568 DEBUG("MC_(make_mem_undefined)(%p, %lu)\n", a, len);
1569 set_address_range_perms ( a, len, VA_BITS16_UNDEFINED, SM_DIST_UNDEFINED );
njn25e49d8e72002-09-23 09:36:25 +00001570}
1571
njndbf7ca72006-03-31 11:57:59 +00001572void MC_(make_mem_defined) ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +00001573{
njndbf7ca72006-03-31 11:57:59 +00001574 PROF_EVENT(42, "MC_(make_mem_defined)");
1575 DEBUG("MC_(make_mem_defined)(%p, %lu)\n", a, len);
1576 set_address_range_perms ( a, len, VA_BITS16_DEFINED, SM_DIST_DEFINED );
njn25e49d8e72002-09-23 09:36:25 +00001577}
1578
sewardjfb1e9ad2006-03-10 13:41:58 +00001579/* For each byte in [a,a+len), if the byte is addressable, make it be
1580 defined, but if it isn't addressible, leave it alone. In other
njndbf7ca72006-03-31 11:57:59 +00001581 words a version of MC_(make_mem_defined) that doesn't mess with
sewardjfb1e9ad2006-03-10 13:41:58 +00001582 addressibility. Low-performance implementation. */
njndbf7ca72006-03-31 11:57:59 +00001583static void make_mem_defined_if_addressable ( Addr a, SizeT len )
sewardjfb1e9ad2006-03-10 13:41:58 +00001584{
1585 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00001586 UChar vabits2;
njndbf7ca72006-03-31 11:57:59 +00001587 DEBUG("make_mem_defined_if_addressable(%p, %llu)\n", a, (ULong)len);
sewardjfb1e9ad2006-03-10 13:41:58 +00001588 for (i = 0; i < len; i++) {
njn1d0825f2006-03-27 11:37:07 +00001589 vabits2 = get_vabits2( a+i );
1590 if (EXPECTED_TAKEN(VA_BITS2_NOACCESS != vabits2)) {
njndbf7ca72006-03-31 11:57:59 +00001591 set_vabits2(a+i, VA_BITS2_DEFINED);
njn1d0825f2006-03-27 11:37:07 +00001592 }
sewardjfb1e9ad2006-03-10 13:41:58 +00001593 }
1594}
1595
njn9b007f62003-04-07 14:40:25 +00001596
sewardj45f4e7c2005-09-27 19:20:21 +00001597/* --- Block-copy permissions (needed for implementing realloc() and
1598 sys_mremap). --- */
sewardjc859fbf2005-04-22 21:10:28 +00001599
njn1d0825f2006-03-27 11:37:07 +00001600void MC_(copy_address_range_state) ( Addr src, Addr dst, SizeT len )
sewardjc859fbf2005-04-22 21:10:28 +00001601{
sewardj45f4e7c2005-09-27 19:20:21 +00001602 SizeT i, j;
sewardjf2184912006-05-03 22:13:57 +00001603 UChar vabits2, vabits8;
1604 Bool aligned, nooverlap;
sewardjc859fbf2005-04-22 21:10:28 +00001605
njn1d0825f2006-03-27 11:37:07 +00001606 DEBUG("MC_(copy_address_range_state)\n");
1607 PROF_EVENT(50, "MC_(copy_address_range_state)");
sewardj45f4e7c2005-09-27 19:20:21 +00001608
sewardjf2184912006-05-03 22:13:57 +00001609 if (len == 0 || src == dst)
sewardj45f4e7c2005-09-27 19:20:21 +00001610 return;
1611
sewardjf2184912006-05-03 22:13:57 +00001612 aligned = VG_IS_4_ALIGNED(src) && VG_IS_4_ALIGNED(dst);
1613 nooverlap = src+len <= dst || dst+len <= src;
sewardj45f4e7c2005-09-27 19:20:21 +00001614
sewardjf2184912006-05-03 22:13:57 +00001615 if (nooverlap && aligned) {
1616
1617 /* Vectorised fast case, when no overlap and suitably aligned */
1618 /* vector loop */
1619 i = 0;
1620 while (len >= 4) {
1621 vabits8 = get_vabits8_for_aligned_word32( src+i );
1622 set_vabits8_for_aligned_word32( dst+i, vabits8 );
1623 if (EXPECTED_TAKEN(VA_BITS8_DEFINED == vabits8
1624 || VA_BITS8_UNDEFINED == vabits8
1625 || VA_BITS8_NOACCESS == vabits8)) {
1626 /* do nothing */
1627 } else {
1628 /* have to copy secondary map info */
1629 if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+0 ))
1630 set_sec_vbits8( dst+i+0, get_sec_vbits8( src+i+0 ) );
1631 if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+1 ))
1632 set_sec_vbits8( dst+i+1, get_sec_vbits8( src+i+1 ) );
1633 if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+2 ))
1634 set_sec_vbits8( dst+i+2, get_sec_vbits8( src+i+2 ) );
1635 if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+3 ))
1636 set_sec_vbits8( dst+i+3, get_sec_vbits8( src+i+3 ) );
1637 }
1638 i += 4;
1639 len -= 4;
1640 }
1641 /* fixup loop */
1642 while (len >= 1) {
njn1d0825f2006-03-27 11:37:07 +00001643 vabits2 = get_vabits2( src+i );
1644 set_vabits2( dst+i, vabits2 );
njndbf7ca72006-03-31 11:57:59 +00001645 if (VA_BITS2_PARTDEFINED == vabits2) {
njn1d0825f2006-03-27 11:37:07 +00001646 set_sec_vbits8( dst+i, get_sec_vbits8( src+i ) );
1647 }
sewardjf2184912006-05-03 22:13:57 +00001648 i++;
1649 len--;
1650 }
1651
1652 } else {
1653
1654 /* We have to do things the slow way */
1655 if (src < dst) {
1656 for (i = 0, j = len-1; i < len; i++, j--) {
1657 PROF_EVENT(51, "MC_(copy_address_range_state)(loop)");
1658 vabits2 = get_vabits2( src+j );
1659 set_vabits2( dst+j, vabits2 );
1660 if (VA_BITS2_PARTDEFINED == vabits2) {
1661 set_sec_vbits8( dst+j, get_sec_vbits8( src+j ) );
1662 }
1663 }
1664 }
1665
1666 if (src > dst) {
1667 for (i = 0; i < len; i++) {
1668 PROF_EVENT(52, "MC_(copy_address_range_state)(loop)");
1669 vabits2 = get_vabits2( src+i );
1670 set_vabits2( dst+i, vabits2 );
1671 if (VA_BITS2_PARTDEFINED == vabits2) {
1672 set_sec_vbits8( dst+i, get_sec_vbits8( src+i ) );
1673 }
1674 }
sewardj45f4e7c2005-09-27 19:20:21 +00001675 }
sewardjc859fbf2005-04-22 21:10:28 +00001676 }
sewardjf2184912006-05-03 22:13:57 +00001677
sewardjc859fbf2005-04-22 21:10:28 +00001678}
1679
1680
1681/* --- Fast case permission setters, for dealing with stacks. --- */
1682
njn1d0825f2006-03-27 11:37:07 +00001683static INLINE
njndbf7ca72006-03-31 11:57:59 +00001684void make_aligned_word32_undefined ( Addr a )
njn9b007f62003-04-07 14:40:25 +00001685{
njn1d0825f2006-03-27 11:37:07 +00001686 UWord sm_off;
sewardjae986ca2005-10-12 12:53:20 +00001687 SecMap* sm;
1688
njndbf7ca72006-03-31 11:57:59 +00001689 PROF_EVENT(300, "make_aligned_word32_undefined");
sewardj5d28efc2005-04-21 22:16:29 +00001690
njn1d0825f2006-03-27 11:37:07 +00001691#ifndef PERF_FAST_STACK2
njndbf7ca72006-03-31 11:57:59 +00001692 MC_(make_mem_undefined)(a, 4);
njn1d0825f2006-03-27 11:37:07 +00001693#else
1694 if (EXPECTED_NOT_TAKEN(a > MAX_PRIMARY_ADDRESS)) {
njndbf7ca72006-03-31 11:57:59 +00001695 PROF_EVENT(301, "make_aligned_word32_undefined-slow1");
1696 MC_(make_mem_undefined)(a, 4);
sewardj5d28efc2005-04-21 22:16:29 +00001697 return;
1698 }
1699
njna7c7ebd2006-03-28 12:51:02 +00001700 sm = get_secmap_for_writing_low(a);
njn1d0825f2006-03-27 11:37:07 +00001701 sm_off = SM_OFF(a);
njndbf7ca72006-03-31 11:57:59 +00001702 sm->vabits8[sm_off] = VA_BITS8_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00001703#endif
njn9b007f62003-04-07 14:40:25 +00001704}
1705
sewardj5d28efc2005-04-21 22:16:29 +00001706
njn1d0825f2006-03-27 11:37:07 +00001707static INLINE
1708void make_aligned_word32_noaccess ( Addr a )
sewardj5d28efc2005-04-21 22:16:29 +00001709{
njn1d0825f2006-03-27 11:37:07 +00001710 UWord sm_off;
sewardjae986ca2005-10-12 12:53:20 +00001711 SecMap* sm;
1712
sewardj5d28efc2005-04-21 22:16:29 +00001713 PROF_EVENT(310, "make_aligned_word32_noaccess");
1714
njn1d0825f2006-03-27 11:37:07 +00001715#ifndef PERF_FAST_STACK2
njndbf7ca72006-03-31 11:57:59 +00001716 MC_(make_mem_noaccess)(a, 4);
njn1d0825f2006-03-27 11:37:07 +00001717#else
1718 if (EXPECTED_NOT_TAKEN(a > MAX_PRIMARY_ADDRESS)) {
sewardj5d28efc2005-04-21 22:16:29 +00001719 PROF_EVENT(311, "make_aligned_word32_noaccess-slow1");
njndbf7ca72006-03-31 11:57:59 +00001720 MC_(make_mem_noaccess)(a, 4);
sewardj5d28efc2005-04-21 22:16:29 +00001721 return;
1722 }
1723
njna7c7ebd2006-03-28 12:51:02 +00001724 sm = get_secmap_for_writing_low(a);
njn1d0825f2006-03-27 11:37:07 +00001725 sm_off = SM_OFF(a);
1726 sm->vabits8[sm_off] = VA_BITS8_NOACCESS;
1727#endif
sewardj5d28efc2005-04-21 22:16:29 +00001728}
1729
1730
njn9b007f62003-04-07 14:40:25 +00001731/* Nb: by "aligned" here we mean 8-byte aligned */
njn1d0825f2006-03-27 11:37:07 +00001732static INLINE
njndbf7ca72006-03-31 11:57:59 +00001733void make_aligned_word64_undefined ( Addr a )
njn9b007f62003-04-07 14:40:25 +00001734{
njn1d0825f2006-03-27 11:37:07 +00001735 UWord sm_off16;
sewardjae986ca2005-10-12 12:53:20 +00001736 SecMap* sm;
1737
njndbf7ca72006-03-31 11:57:59 +00001738 PROF_EVENT(320, "make_aligned_word64_undefined");
sewardj23eb2fd2005-04-22 16:29:19 +00001739
njn1d0825f2006-03-27 11:37:07 +00001740#ifndef PERF_FAST_STACK2
njndbf7ca72006-03-31 11:57:59 +00001741 MC_(make_mem_undefined)(a, 8);
njn1d0825f2006-03-27 11:37:07 +00001742#else
1743 if (EXPECTED_NOT_TAKEN(a > MAX_PRIMARY_ADDRESS)) {
njndbf7ca72006-03-31 11:57:59 +00001744 PROF_EVENT(321, "make_aligned_word64_undefined-slow1");
1745 MC_(make_mem_undefined)(a, 8);
sewardj23eb2fd2005-04-22 16:29:19 +00001746 return;
1747 }
1748
njna7c7ebd2006-03-28 12:51:02 +00001749 sm = get_secmap_for_writing_low(a);
njn1d0825f2006-03-27 11:37:07 +00001750 sm_off16 = SM_OFF_16(a);
njndbf7ca72006-03-31 11:57:59 +00001751 ((UShort*)(sm->vabits8))[sm_off16] = VA_BITS16_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00001752#endif
njn9b007f62003-04-07 14:40:25 +00001753}
1754
sewardj23eb2fd2005-04-22 16:29:19 +00001755
njn1d0825f2006-03-27 11:37:07 +00001756static INLINE
1757void make_aligned_word64_noaccess ( Addr a )
njn9b007f62003-04-07 14:40:25 +00001758{
njn1d0825f2006-03-27 11:37:07 +00001759 UWord sm_off16;
sewardjae986ca2005-10-12 12:53:20 +00001760 SecMap* sm;
1761
sewardj23eb2fd2005-04-22 16:29:19 +00001762 PROF_EVENT(330, "make_aligned_word64_noaccess");
1763
njn1d0825f2006-03-27 11:37:07 +00001764#ifndef PERF_FAST_STACK2
njndbf7ca72006-03-31 11:57:59 +00001765 MC_(make_mem_noaccess)(a, 8);
njn1d0825f2006-03-27 11:37:07 +00001766#else
1767 if (EXPECTED_NOT_TAKEN(a > MAX_PRIMARY_ADDRESS)) {
sewardj23eb2fd2005-04-22 16:29:19 +00001768 PROF_EVENT(331, "make_aligned_word64_noaccess-slow1");
njndbf7ca72006-03-31 11:57:59 +00001769 MC_(make_mem_noaccess)(a, 8);
sewardj23eb2fd2005-04-22 16:29:19 +00001770 return;
1771 }
1772
njna7c7ebd2006-03-28 12:51:02 +00001773 sm = get_secmap_for_writing_low(a);
njn1d0825f2006-03-27 11:37:07 +00001774 sm_off16 = SM_OFF_16(a);
1775 ((UShort*)(sm->vabits8))[sm_off16] = VA_BITS16_NOACCESS;
1776#endif
njn9b007f62003-04-07 14:40:25 +00001777}
1778
sewardj23eb2fd2005-04-22 16:29:19 +00001779
njn1d0825f2006-03-27 11:37:07 +00001780/*------------------------------------------------------------*/
1781/*--- Stack pointer adjustment ---*/
1782/*------------------------------------------------------------*/
1783
1784static void VG_REGPARM(1) mc_new_mem_stack_4(Addr new_SP)
1785{
1786 PROF_EVENT(110, "new_mem_stack_4");
sewardj05a46732006-10-17 01:28:10 +00001787 if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001788 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
njn1d0825f2006-03-27 11:37:07 +00001789 } else {
njndbf7ca72006-03-31 11:57:59 +00001790 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 4 );
njn1d0825f2006-03-27 11:37:07 +00001791 }
1792}
1793
1794static void VG_REGPARM(1) mc_die_mem_stack_4(Addr new_SP)
1795{
1796 PROF_EVENT(120, "die_mem_stack_4");
sewardj05a46732006-10-17 01:28:10 +00001797 if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001798 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
njn1d0825f2006-03-27 11:37:07 +00001799 } else {
njndbf7ca72006-03-31 11:57:59 +00001800 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-4, 4 );
njn1d0825f2006-03-27 11:37:07 +00001801 }
1802}
1803
1804static void VG_REGPARM(1) mc_new_mem_stack_8(Addr new_SP)
1805{
1806 PROF_EVENT(111, "new_mem_stack_8");
sewardj05a46732006-10-17 01:28:10 +00001807 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001808 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
sewardj05a46732006-10-17 01:28:10 +00001809 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001810 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1811 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
njn1d0825f2006-03-27 11:37:07 +00001812 } else {
njndbf7ca72006-03-31 11:57:59 +00001813 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 8 );
njn1d0825f2006-03-27 11:37:07 +00001814 }
1815}
1816
1817static void VG_REGPARM(1) mc_die_mem_stack_8(Addr new_SP)
1818{
1819 PROF_EVENT(121, "die_mem_stack_8");
sewardj05a46732006-10-17 01:28:10 +00001820 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001821 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
sewardj05a46732006-10-17 01:28:10 +00001822 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001823 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
1824 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
njn1d0825f2006-03-27 11:37:07 +00001825 } else {
njndbf7ca72006-03-31 11:57:59 +00001826 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-8, 8 );
njn1d0825f2006-03-27 11:37:07 +00001827 }
1828}
1829
1830static void VG_REGPARM(1) mc_new_mem_stack_12(Addr new_SP)
1831{
1832 PROF_EVENT(112, "new_mem_stack_12");
sewardj05a46732006-10-17 01:28:10 +00001833 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001834 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1835 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
sewardj05a46732006-10-17 01:28:10 +00001836 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001837 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1838 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
njn1d0825f2006-03-27 11:37:07 +00001839 } else {
njndbf7ca72006-03-31 11:57:59 +00001840 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 12 );
njn1d0825f2006-03-27 11:37:07 +00001841 }
1842}
1843
1844static void VG_REGPARM(1) mc_die_mem_stack_12(Addr new_SP)
1845{
1846 PROF_EVENT(122, "die_mem_stack_12");
1847 /* Note the -12 in the test */
1848 if (VG_IS_8_ALIGNED(new_SP-12)) {
njndbf7ca72006-03-31 11:57:59 +00001849 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
1850 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
sewardj05a46732006-10-17 01:28:10 +00001851 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001852 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
1853 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
njn1d0825f2006-03-27 11:37:07 +00001854 } else {
njndbf7ca72006-03-31 11:57:59 +00001855 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-12, 12 );
njn1d0825f2006-03-27 11:37:07 +00001856 }
1857}
1858
1859static void VG_REGPARM(1) mc_new_mem_stack_16(Addr new_SP)
1860{
1861 PROF_EVENT(113, "new_mem_stack_16");
sewardj05a46732006-10-17 01:28:10 +00001862 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001863 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1864 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
sewardj05a46732006-10-17 01:28:10 +00001865 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001866 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1867 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
1868 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+12 );
njn1d0825f2006-03-27 11:37:07 +00001869 } else {
njndbf7ca72006-03-31 11:57:59 +00001870 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 16 );
njn1d0825f2006-03-27 11:37:07 +00001871 }
1872}
1873
1874static void VG_REGPARM(1) mc_die_mem_stack_16(Addr new_SP)
1875{
1876 PROF_EVENT(123, "die_mem_stack_16");
sewardj05a46732006-10-17 01:28:10 +00001877 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001878 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
1879 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
sewardj05a46732006-10-17 01:28:10 +00001880 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001881 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
1882 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
1883 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
njn1d0825f2006-03-27 11:37:07 +00001884 } else {
njndbf7ca72006-03-31 11:57:59 +00001885 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-16, 16 );
njn1d0825f2006-03-27 11:37:07 +00001886 }
1887}
1888
1889static void VG_REGPARM(1) mc_new_mem_stack_32(Addr new_SP)
1890{
1891 PROF_EVENT(114, "new_mem_stack_32");
sewardj05a46732006-10-17 01:28:10 +00001892 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001893 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1894 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
1895 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
1896 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
sewardj05a46732006-10-17 01:28:10 +00001897 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001898 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1899 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
1900 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+12 );
1901 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+20 );
1902 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+28 );
njn1d0825f2006-03-27 11:37:07 +00001903 } else {
njndbf7ca72006-03-31 11:57:59 +00001904 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 32 );
njn1d0825f2006-03-27 11:37:07 +00001905 }
1906}
1907
1908static void VG_REGPARM(1) mc_die_mem_stack_32(Addr new_SP)
1909{
1910 PROF_EVENT(124, "die_mem_stack_32");
sewardj05a46732006-10-17 01:28:10 +00001911 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001912 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
1913 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
1914 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
1915 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
sewardj05a46732006-10-17 01:28:10 +00001916 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001917 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
1918 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-28 );
1919 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-20 );
1920 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
1921 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
njn1d0825f2006-03-27 11:37:07 +00001922 } else {
njndbf7ca72006-03-31 11:57:59 +00001923 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-32, 32 );
njn1d0825f2006-03-27 11:37:07 +00001924 }
1925}
1926
1927static void VG_REGPARM(1) mc_new_mem_stack_112(Addr new_SP)
1928{
1929 PROF_EVENT(115, "new_mem_stack_112");
sewardj05a46732006-10-17 01:28:10 +00001930 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001931 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1932 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
1933 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
1934 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
1935 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
1936 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
1937 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
1938 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
1939 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
1940 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
1941 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
1942 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
1943 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
1944 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104);
njn1d0825f2006-03-27 11:37:07 +00001945 } else {
njndbf7ca72006-03-31 11:57:59 +00001946 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 112 );
njn1d0825f2006-03-27 11:37:07 +00001947 }
1948}
1949
1950static void VG_REGPARM(1) mc_die_mem_stack_112(Addr new_SP)
1951{
1952 PROF_EVENT(125, "die_mem_stack_112");
sewardj05a46732006-10-17 01:28:10 +00001953 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001954 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
1955 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
1956 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
1957 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
1958 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
1959 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
1960 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
1961 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
1962 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
1963 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
1964 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
1965 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
1966 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
1967 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
njn1d0825f2006-03-27 11:37:07 +00001968 } else {
njndbf7ca72006-03-31 11:57:59 +00001969 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-112, 112 );
njn1d0825f2006-03-27 11:37:07 +00001970 }
1971}
1972
1973static void VG_REGPARM(1) mc_new_mem_stack_128(Addr new_SP)
1974{
1975 PROF_EVENT(116, "new_mem_stack_128");
sewardj05a46732006-10-17 01:28:10 +00001976 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001977 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1978 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
1979 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
1980 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
1981 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
1982 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
1983 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
1984 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
1985 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
1986 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
1987 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
1988 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
1989 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
1990 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104);
1991 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+112);
1992 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+120);
njn1d0825f2006-03-27 11:37:07 +00001993 } else {
njndbf7ca72006-03-31 11:57:59 +00001994 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 128 );
njn1d0825f2006-03-27 11:37:07 +00001995 }
1996}
1997
1998static void VG_REGPARM(1) mc_die_mem_stack_128(Addr new_SP)
1999{
2000 PROF_EVENT(126, "die_mem_stack_128");
sewardj05a46732006-10-17 01:28:10 +00002001 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002002 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
2003 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
2004 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
2005 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
2006 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
2007 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
2008 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
2009 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
2010 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
2011 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
2012 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
2013 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
2014 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
2015 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
2016 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
2017 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
njn1d0825f2006-03-27 11:37:07 +00002018 } else {
njndbf7ca72006-03-31 11:57:59 +00002019 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-128, 128 );
njn1d0825f2006-03-27 11:37:07 +00002020 }
2021}
2022
2023static void VG_REGPARM(1) mc_new_mem_stack_144(Addr new_SP)
2024{
2025 PROF_EVENT(117, "new_mem_stack_144");
sewardj05a46732006-10-17 01:28:10 +00002026 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002027 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
2028 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
2029 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
2030 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
2031 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
2032 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
2033 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
2034 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
2035 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
2036 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
2037 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
2038 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
2039 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
2040 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104);
2041 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+112);
2042 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+120);
2043 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+128);
2044 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+136);
njn1d0825f2006-03-27 11:37:07 +00002045 } else {
njndbf7ca72006-03-31 11:57:59 +00002046 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 144 );
njn1d0825f2006-03-27 11:37:07 +00002047 }
2048}
2049
2050static void VG_REGPARM(1) mc_die_mem_stack_144(Addr new_SP)
2051{
2052 PROF_EVENT(127, "die_mem_stack_144");
sewardj05a46732006-10-17 01:28:10 +00002053 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002054 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-144);
2055 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-136);
2056 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
2057 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
2058 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
2059 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
2060 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
2061 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
2062 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
2063 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
2064 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
2065 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
2066 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
2067 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
2068 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
2069 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
2070 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
2071 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
njn1d0825f2006-03-27 11:37:07 +00002072 } else {
njndbf7ca72006-03-31 11:57:59 +00002073 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-144, 144 );
njn1d0825f2006-03-27 11:37:07 +00002074 }
2075}
2076
2077static void VG_REGPARM(1) mc_new_mem_stack_160(Addr new_SP)
2078{
2079 PROF_EVENT(118, "new_mem_stack_160");
sewardj05a46732006-10-17 01:28:10 +00002080 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002081 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
2082 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
2083 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
2084 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
2085 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
2086 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
2087 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
2088 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
2089 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
2090 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
2091 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
2092 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
2093 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
2094 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104);
2095 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+112);
2096 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+120);
2097 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+128);
2098 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+136);
2099 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+144);
2100 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+152);
njn1d0825f2006-03-27 11:37:07 +00002101 } else {
njndbf7ca72006-03-31 11:57:59 +00002102 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 160 );
njn1d0825f2006-03-27 11:37:07 +00002103 }
2104}
2105
2106static void VG_REGPARM(1) mc_die_mem_stack_160(Addr new_SP)
2107{
2108 PROF_EVENT(128, "die_mem_stack_160");
sewardj05a46732006-10-17 01:28:10 +00002109 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002110 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-160);
2111 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-152);
2112 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-144);
2113 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-136);
2114 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
2115 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
2116 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
2117 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
2118 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
2119 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
2120 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
2121 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
2122 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
2123 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
2124 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
2125 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
2126 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
2127 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
2128 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
2129 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
njn1d0825f2006-03-27 11:37:07 +00002130 } else {
njndbf7ca72006-03-31 11:57:59 +00002131 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-160, 160 );
njn1d0825f2006-03-27 11:37:07 +00002132 }
2133}
2134
2135static void mc_new_mem_stack ( Addr a, SizeT len )
2136{
2137 PROF_EVENT(115, "new_mem_stack");
njndbf7ca72006-03-31 11:57:59 +00002138 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + a, len );
njn1d0825f2006-03-27 11:37:07 +00002139}
2140
2141static void mc_die_mem_stack ( Addr a, SizeT len )
2142{
2143 PROF_EVENT(125, "die_mem_stack");
njndbf7ca72006-03-31 11:57:59 +00002144 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + a, len );
njn1d0825f2006-03-27 11:37:07 +00002145}
njn9b007f62003-04-07 14:40:25 +00002146
sewardj45d94cc2005-04-20 14:44:11 +00002147
njn1d0825f2006-03-27 11:37:07 +00002148/* The AMD64 ABI says:
2149
2150 "The 128-byte area beyond the location pointed to by %rsp is considered
2151 to be reserved and shall not be modified by signal or interrupt
2152 handlers. Therefore, functions may use this area for temporary data
2153 that is not needed across function calls. In particular, leaf functions
2154 may use this area for their entire stack frame, rather than adjusting
2155 the stack pointer in the prologue and epilogue. This area is known as
2156 red zone [sic]."
2157
2158 So after any call or return we need to mark this redzone as containing
2159 undefined values.
2160
2161 Consider this: we're in function f. f calls g. g moves rsp down
2162 modestly (say 16 bytes) and writes stuff all over the red zone, making it
2163 defined. g returns. f is buggy and reads from parts of the red zone
2164 that it didn't write on. But because g filled that area in, f is going
2165 to be picking up defined V bits and so any errors from reading bits of
2166 the red zone it didn't write, will be missed. The only solution I could
2167 think of was to make the red zone undefined when g returns to f.
2168
2169 This is in accordance with the ABI, which makes it clear the redzone
2170 is volatile across function calls.
2171
2172 The problem occurs the other way round too: f could fill the RZ up
2173 with defined values and g could mistakenly read them. So the RZ
2174 also needs to be nuked on function calls.
2175*/
sewardj826ec492005-05-12 18:05:00 +00002176void MC_(helperc_MAKE_STACK_UNINIT) ( Addr base, UWord len )
2177{
2178 tl_assert(sizeof(UWord) == sizeof(SizeT));
sewardj2a3a1a72005-05-12 23:25:43 +00002179 if (0)
2180 VG_(printf)("helperc_MAKE_STACK_UNINIT %p %d\n", base, len );
2181
2182# if 0
2183 /* Really slow version */
njndbf7ca72006-03-31 11:57:59 +00002184 MC_(make_mem_undefined)(base, len);
sewardj2a3a1a72005-05-12 23:25:43 +00002185# endif
2186
2187# if 0
2188 /* Slow(ish) version, which is fairly easily seen to be correct.
2189 */
2190 if (EXPECTED_TAKEN( VG_IS_8_ALIGNED(base) && len==128 )) {
njndbf7ca72006-03-31 11:57:59 +00002191 make_aligned_word64_undefined(base + 0);
2192 make_aligned_word64_undefined(base + 8);
2193 make_aligned_word64_undefined(base + 16);
2194 make_aligned_word64_undefined(base + 24);
sewardj2a3a1a72005-05-12 23:25:43 +00002195
njndbf7ca72006-03-31 11:57:59 +00002196 make_aligned_word64_undefined(base + 32);
2197 make_aligned_word64_undefined(base + 40);
2198 make_aligned_word64_undefined(base + 48);
2199 make_aligned_word64_undefined(base + 56);
sewardj2a3a1a72005-05-12 23:25:43 +00002200
njndbf7ca72006-03-31 11:57:59 +00002201 make_aligned_word64_undefined(base + 64);
2202 make_aligned_word64_undefined(base + 72);
2203 make_aligned_word64_undefined(base + 80);
2204 make_aligned_word64_undefined(base + 88);
sewardj2a3a1a72005-05-12 23:25:43 +00002205
njndbf7ca72006-03-31 11:57:59 +00002206 make_aligned_word64_undefined(base + 96);
2207 make_aligned_word64_undefined(base + 104);
2208 make_aligned_word64_undefined(base + 112);
2209 make_aligned_word64_undefined(base + 120);
sewardj2a3a1a72005-05-12 23:25:43 +00002210 } else {
njndbf7ca72006-03-31 11:57:59 +00002211 MC_(make_mem_undefined)(base, len);
sewardj2a3a1a72005-05-12 23:25:43 +00002212 }
2213# endif
2214
2215 /* Idea is: go fast when
2216 * 8-aligned and length is 128
2217 * the sm is available in the main primary map
njn1d0825f2006-03-27 11:37:07 +00002218 * the address range falls entirely with a single secondary map
2219 If all those conditions hold, just update the V+A bits by writing
2220 directly into the vabits array. (If the sm was distinguished, this
2221 will make a copy and then write to it.)
sewardj2a3a1a72005-05-12 23:25:43 +00002222 */
njn1d0825f2006-03-27 11:37:07 +00002223 if (EXPECTED_TAKEN( len == 128 && VG_IS_8_ALIGNED(base) )) {
2224 /* Now we know the address range is suitably sized and aligned. */
2225 UWord a_lo = (UWord)(base);
sewardj3f5f5562006-06-16 21:39:08 +00002226 UWord a_hi = (UWord)(base + 128 - 1);
njn1d0825f2006-03-27 11:37:07 +00002227 tl_assert(a_lo < a_hi); // paranoia: detect overflow
2228 if (a_hi < MAX_PRIMARY_ADDRESS) {
2229 // Now we know the entire range is within the main primary map.
njna7c7ebd2006-03-28 12:51:02 +00002230 SecMap* sm = get_secmap_for_writing_low(a_lo);
2231 SecMap* sm_hi = get_secmap_for_writing_low(a_hi);
sewardj2a3a1a72005-05-12 23:25:43 +00002232 /* Now we know that the entire address range falls within a
2233 single secondary map, and that that secondary 'lives' in
2234 the main primary map. */
njn1d0825f2006-03-27 11:37:07 +00002235 if (EXPECTED_TAKEN(sm == sm_hi)) {
2236 // Finally, we know that the range is entirely within one secmap.
2237 UWord v_off = SM_OFF(a_lo);
2238 UShort* p = (UShort*)(&sm->vabits8[v_off]);
njndbf7ca72006-03-31 11:57:59 +00002239 p[ 0] = VA_BITS16_UNDEFINED;
2240 p[ 1] = VA_BITS16_UNDEFINED;
2241 p[ 2] = VA_BITS16_UNDEFINED;
2242 p[ 3] = VA_BITS16_UNDEFINED;
2243 p[ 4] = VA_BITS16_UNDEFINED;
2244 p[ 5] = VA_BITS16_UNDEFINED;
2245 p[ 6] = VA_BITS16_UNDEFINED;
2246 p[ 7] = VA_BITS16_UNDEFINED;
2247 p[ 8] = VA_BITS16_UNDEFINED;
2248 p[ 9] = VA_BITS16_UNDEFINED;
2249 p[10] = VA_BITS16_UNDEFINED;
2250 p[11] = VA_BITS16_UNDEFINED;
2251 p[12] = VA_BITS16_UNDEFINED;
2252 p[13] = VA_BITS16_UNDEFINED;
2253 p[14] = VA_BITS16_UNDEFINED;
2254 p[15] = VA_BITS16_UNDEFINED;
sewardj2a3a1a72005-05-12 23:25:43 +00002255 return;
njn1d0825f2006-03-27 11:37:07 +00002256 }
sewardj2a3a1a72005-05-12 23:25:43 +00002257 }
2258 }
2259
sewardj2e1a6772006-01-18 04:16:27 +00002260 /* 288 bytes (36 ULongs) is the magic value for ELF ppc64. */
sewardj3f5f5562006-06-16 21:39:08 +00002261 if (EXPECTED_TAKEN( len == 288 && VG_IS_8_ALIGNED(base) )) {
njn1d0825f2006-03-27 11:37:07 +00002262 /* Now we know the address range is suitably sized and aligned. */
2263 UWord a_lo = (UWord)(base);
sewardj3f5f5562006-06-16 21:39:08 +00002264 UWord a_hi = (UWord)(base + 288 - 1);
njn1d0825f2006-03-27 11:37:07 +00002265 tl_assert(a_lo < a_hi); // paranoia: detect overflow
2266 if (a_hi < MAX_PRIMARY_ADDRESS) {
2267 // Now we know the entire range is within the main primary map.
njna7c7ebd2006-03-28 12:51:02 +00002268 SecMap* sm = get_secmap_for_writing_low(a_lo);
2269 SecMap* sm_hi = get_secmap_for_writing_low(a_hi);
sewardj2e1a6772006-01-18 04:16:27 +00002270 /* Now we know that the entire address range falls within a
2271 single secondary map, and that that secondary 'lives' in
2272 the main primary map. */
njn1d0825f2006-03-27 11:37:07 +00002273 if (EXPECTED_TAKEN(sm == sm_hi)) {
2274 // Finally, we know that the range is entirely within one secmap.
2275 UWord v_off = SM_OFF(a_lo);
2276 UShort* p = (UShort*)(&sm->vabits8[v_off]);
njndbf7ca72006-03-31 11:57:59 +00002277 p[ 0] = VA_BITS16_UNDEFINED;
2278 p[ 1] = VA_BITS16_UNDEFINED;
2279 p[ 2] = VA_BITS16_UNDEFINED;
2280 p[ 3] = VA_BITS16_UNDEFINED;
2281 p[ 4] = VA_BITS16_UNDEFINED;
2282 p[ 5] = VA_BITS16_UNDEFINED;
2283 p[ 6] = VA_BITS16_UNDEFINED;
2284 p[ 7] = VA_BITS16_UNDEFINED;
2285 p[ 8] = VA_BITS16_UNDEFINED;
2286 p[ 9] = VA_BITS16_UNDEFINED;
2287 p[10] = VA_BITS16_UNDEFINED;
2288 p[11] = VA_BITS16_UNDEFINED;
2289 p[12] = VA_BITS16_UNDEFINED;
2290 p[13] = VA_BITS16_UNDEFINED;
2291 p[14] = VA_BITS16_UNDEFINED;
2292 p[15] = VA_BITS16_UNDEFINED;
2293 p[16] = VA_BITS16_UNDEFINED;
2294 p[17] = VA_BITS16_UNDEFINED;
2295 p[18] = VA_BITS16_UNDEFINED;
2296 p[19] = VA_BITS16_UNDEFINED;
2297 p[20] = VA_BITS16_UNDEFINED;
2298 p[21] = VA_BITS16_UNDEFINED;
2299 p[22] = VA_BITS16_UNDEFINED;
2300 p[23] = VA_BITS16_UNDEFINED;
2301 p[24] = VA_BITS16_UNDEFINED;
2302 p[25] = VA_BITS16_UNDEFINED;
2303 p[26] = VA_BITS16_UNDEFINED;
2304 p[27] = VA_BITS16_UNDEFINED;
2305 p[28] = VA_BITS16_UNDEFINED;
2306 p[29] = VA_BITS16_UNDEFINED;
2307 p[30] = VA_BITS16_UNDEFINED;
2308 p[31] = VA_BITS16_UNDEFINED;
2309 p[32] = VA_BITS16_UNDEFINED;
2310 p[33] = VA_BITS16_UNDEFINED;
2311 p[34] = VA_BITS16_UNDEFINED;
2312 p[35] = VA_BITS16_UNDEFINED;
sewardj2e1a6772006-01-18 04:16:27 +00002313 return;
njn1d0825f2006-03-27 11:37:07 +00002314 }
sewardj2e1a6772006-01-18 04:16:27 +00002315 }
2316 }
2317
sewardj2a3a1a72005-05-12 23:25:43 +00002318 /* else fall into slow case */
njndbf7ca72006-03-31 11:57:59 +00002319 MC_(make_mem_undefined)(base, len);
sewardj826ec492005-05-12 18:05:00 +00002320}
2321
2322
nethercote8b76fe52004-11-08 19:20:09 +00002323/*------------------------------------------------------------*/
2324/*--- Checking memory ---*/
2325/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00002326
sewardje4ccc012005-05-02 12:53:38 +00002327typedef
2328 enum {
2329 MC_Ok = 5,
2330 MC_AddrErr = 6,
2331 MC_ValueErr = 7
2332 }
2333 MC_ReadResult;
2334
2335
njn25e49d8e72002-09-23 09:36:25 +00002336/* Check permissions for address range. If inadequate permissions
2337 exist, *bad_addr is set to the offending address, so the caller can
2338 know what it is. */
2339
sewardjecf8e102003-07-12 12:11:39 +00002340/* Returns True if [a .. a+len) is not addressible. Otherwise,
2341 returns False, and if bad_addr is non-NULL, sets *bad_addr to
2342 indicate the lowest failing address. Functions below are
2343 similar. */
njndbf7ca72006-03-31 11:57:59 +00002344Bool MC_(check_mem_is_noaccess) ( Addr a, SizeT len, Addr* bad_addr )
sewardjecf8e102003-07-12 12:11:39 +00002345{
nethercote451eae92004-11-02 13:06:32 +00002346 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00002347 UWord vabits2;
2348
njndbf7ca72006-03-31 11:57:59 +00002349 PROF_EVENT(60, "check_mem_is_noaccess");
sewardjecf8e102003-07-12 12:11:39 +00002350 for (i = 0; i < len; i++) {
njndbf7ca72006-03-31 11:57:59 +00002351 PROF_EVENT(61, "check_mem_is_noaccess(loop)");
njn1d0825f2006-03-27 11:37:07 +00002352 vabits2 = get_vabits2(a);
2353 if (VA_BITS2_NOACCESS != vabits2) {
2354 if (bad_addr != NULL) *bad_addr = a;
sewardjecf8e102003-07-12 12:11:39 +00002355 return False;
2356 }
2357 a++;
2358 }
2359 return True;
2360}
2361
njndbf7ca72006-03-31 11:57:59 +00002362static Bool is_mem_addressable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00002363{
nethercote451eae92004-11-02 13:06:32 +00002364 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00002365 UWord vabits2;
2366
njndbf7ca72006-03-31 11:57:59 +00002367 PROF_EVENT(62, "is_mem_addressable");
njn25e49d8e72002-09-23 09:36:25 +00002368 for (i = 0; i < len; i++) {
njndbf7ca72006-03-31 11:57:59 +00002369 PROF_EVENT(63, "is_mem_addressable(loop)");
njn1d0825f2006-03-27 11:37:07 +00002370 vabits2 = get_vabits2(a);
2371 if (VA_BITS2_NOACCESS == vabits2) {
njn25e49d8e72002-09-23 09:36:25 +00002372 if (bad_addr != NULL) *bad_addr = a;
2373 return False;
2374 }
2375 a++;
2376 }
2377 return True;
2378}
2379
njndbf7ca72006-03-31 11:57:59 +00002380static MC_ReadResult is_mem_defined ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00002381{
nethercote451eae92004-11-02 13:06:32 +00002382 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00002383 UWord vabits2;
njn25e49d8e72002-09-23 09:36:25 +00002384
njndbf7ca72006-03-31 11:57:59 +00002385 PROF_EVENT(64, "is_mem_defined");
2386 DEBUG("is_mem_defined\n");
njn25e49d8e72002-09-23 09:36:25 +00002387 for (i = 0; i < len; i++) {
njndbf7ca72006-03-31 11:57:59 +00002388 PROF_EVENT(65, "is_mem_defined(loop)");
njn1d0825f2006-03-27 11:37:07 +00002389 vabits2 = get_vabits2(a);
njndbf7ca72006-03-31 11:57:59 +00002390 if (VA_BITS2_DEFINED != vabits2) {
njn1d0825f2006-03-27 11:37:07 +00002391 // Error! Nb: Report addressability errors in preference to
2392 // definedness errors. And don't report definedeness errors unless
2393 // --undef-value-errors=yes.
2394 if (bad_addr != NULL) *bad_addr = a;
2395 if ( VA_BITS2_NOACCESS == vabits2 ) return MC_AddrErr;
2396 else if ( MC_(clo_undef_value_errors) ) return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +00002397 }
2398 a++;
2399 }
nethercote8b76fe52004-11-08 19:20:09 +00002400 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +00002401}
2402
2403
2404/* Check a zero-terminated ascii string. Tricky -- don't want to
2405 examine the actual bytes, to find the end, until we're sure it is
2406 safe to do so. */
2407
njndbf7ca72006-03-31 11:57:59 +00002408static Bool mc_is_defined_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00002409{
njn1d0825f2006-03-27 11:37:07 +00002410 UWord vabits2;
2411
njndbf7ca72006-03-31 11:57:59 +00002412 PROF_EVENT(66, "mc_is_defined_asciiz");
2413 DEBUG("mc_is_defined_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +00002414 while (True) {
njndbf7ca72006-03-31 11:57:59 +00002415 PROF_EVENT(67, "mc_is_defined_asciiz(loop)");
njn1d0825f2006-03-27 11:37:07 +00002416 vabits2 = get_vabits2(a);
njndbf7ca72006-03-31 11:57:59 +00002417 if (VA_BITS2_DEFINED != vabits2) {
njn1d0825f2006-03-27 11:37:07 +00002418 // Error! Nb: Report addressability errors in preference to
2419 // definedness errors. And don't report definedeness errors unless
2420 // --undef-value-errors=yes.
2421 if (bad_addr != NULL) *bad_addr = a;
2422 if ( VA_BITS2_NOACCESS == vabits2 ) return MC_AddrErr;
2423 else if ( MC_(clo_undef_value_errors) ) return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +00002424 }
2425 /* Ok, a is safe to read. */
njn1d0825f2006-03-27 11:37:07 +00002426 if (* ((UChar*)a) == 0) {
sewardj45d94cc2005-04-20 14:44:11 +00002427 return MC_Ok;
njn1d0825f2006-03-27 11:37:07 +00002428 }
njn25e49d8e72002-09-23 09:36:25 +00002429 a++;
2430 }
2431}
2432
2433
2434/*------------------------------------------------------------*/
2435/*--- Memory event handlers ---*/
2436/*------------------------------------------------------------*/
2437
njn25e49d8e72002-09-23 09:36:25 +00002438static
njndbf7ca72006-03-31 11:57:59 +00002439void check_mem_is_addressable ( CorePart part, ThreadId tid, Char* s,
2440 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00002441{
njn25e49d8e72002-09-23 09:36:25 +00002442 Addr bad_addr;
njndbf7ca72006-03-31 11:57:59 +00002443 Bool ok = is_mem_addressable ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00002444
njn25e49d8e72002-09-23 09:36:25 +00002445 if (!ok) {
2446 switch (part) {
2447 case Vg_CoreSysCall:
njn1d0825f2006-03-27 11:37:07 +00002448 mc_record_param_error ( tid, bad_addr, /*isReg*/False,
nethercote8b76fe52004-11-08 19:20:09 +00002449 /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00002450 break;
2451
2452 case Vg_CorePThread:
2453 case Vg_CoreSignal:
njn1d0825f2006-03-27 11:37:07 +00002454 mc_record_core_mem_error( tid, /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00002455 break;
2456
2457 default:
njndbf7ca72006-03-31 11:57:59 +00002458 VG_(tool_panic)("check_mem_is_addressable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00002459 }
2460 }
njn25e49d8e72002-09-23 09:36:25 +00002461}
2462
2463static
njndbf7ca72006-03-31 11:57:59 +00002464void check_mem_is_defined ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +00002465 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00002466{
njn25e49d8e72002-09-23 09:36:25 +00002467 Addr bad_addr;
njndbf7ca72006-03-31 11:57:59 +00002468 MC_ReadResult res = is_mem_defined ( base, size, &bad_addr );
sewardj45f4e7c2005-09-27 19:20:21 +00002469
nethercote8b76fe52004-11-08 19:20:09 +00002470 if (MC_Ok != res) {
2471 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
sewardj45f4e7c2005-09-27 19:20:21 +00002472
njn25e49d8e72002-09-23 09:36:25 +00002473 switch (part) {
2474 case Vg_CoreSysCall:
njn1d0825f2006-03-27 11:37:07 +00002475 mc_record_param_error ( tid, bad_addr, /*isReg*/False,
njndbf7ca72006-03-31 11:57:59 +00002476 isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00002477 break;
2478
njn1d0825f2006-03-27 11:37:07 +00002479 case Vg_CoreClientReq: // Kludge: make this a CoreMemErr
njn25e49d8e72002-09-23 09:36:25 +00002480 case Vg_CorePThread:
njn1d0825f2006-03-27 11:37:07 +00002481 mc_record_core_mem_error( tid, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00002482 break;
2483
2484 /* If we're being asked to jump to a silly address, record an error
2485 message before potentially crashing the entire system. */
2486 case Vg_CoreTranslate:
njn1d0825f2006-03-27 11:37:07 +00002487 mc_record_jump_error( tid, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00002488 break;
2489
2490 default:
njndbf7ca72006-03-31 11:57:59 +00002491 VG_(tool_panic)("check_mem_is_defined: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00002492 }
2493 }
njn25e49d8e72002-09-23 09:36:25 +00002494}
2495
2496static
njndbf7ca72006-03-31 11:57:59 +00002497void check_mem_is_defined_asciiz ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +00002498 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +00002499{
nethercote8b76fe52004-11-08 19:20:09 +00002500 MC_ReadResult res;
njn5ab96ac2005-05-08 02:59:50 +00002501 Addr bad_addr = 0; // shut GCC up
njn25e49d8e72002-09-23 09:36:25 +00002502
njnca82cc02004-11-22 17:18:48 +00002503 tl_assert(part == Vg_CoreSysCall);
njndbf7ca72006-03-31 11:57:59 +00002504 res = mc_is_defined_asciiz ( (Addr)str, &bad_addr );
nethercote8b76fe52004-11-08 19:20:09 +00002505 if (MC_Ok != res) {
2506 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
njn1d0825f2006-03-27 11:37:07 +00002507 mc_record_param_error ( tid, bad_addr, /*isReg*/False, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00002508 }
njn25e49d8e72002-09-23 09:36:25 +00002509}
2510
njn25e49d8e72002-09-23 09:36:25 +00002511static
nethercote451eae92004-11-02 13:06:32 +00002512void mc_new_mem_startup( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00002513{
njndbf7ca72006-03-31 11:57:59 +00002514 /* Ignore the permissions, just make it defined. Seems to work... */
njnba7b4582006-09-21 15:59:30 +00002515 // Because code is defined, initialised variables get put in the data
2516 // segment and are defined, and uninitialised variables get put in the
2517 // bss segment and are auto-zeroed (and so defined).
2518 //
2519 // It's possible that there will be padding between global variables.
2520 // This will also be auto-zeroed, and marked as defined by Memcheck. If
2521 // a program uses it, Memcheck will not complain. This is arguably a
2522 // false negative, but it's a grey area -- the behaviour is defined (the
2523 // padding is zeroed) but it's probably not what the user intended. And
2524 // we can't avoid it.
nethercote451eae92004-11-02 13:06:32 +00002525 DEBUG("mc_new_mem_startup(%p, %llu, rr=%u, ww=%u, xx=%u)\n",
njndbf7ca72006-03-31 11:57:59 +00002526 a, (ULong)len, rr, ww, xx);
2527 MC_(make_mem_defined)(a, len);
njn25e49d8e72002-09-23 09:36:25 +00002528}
2529
2530static
njnb8dca862005-03-14 02:42:44 +00002531void mc_new_mem_mmap ( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00002532{
njndbf7ca72006-03-31 11:57:59 +00002533 MC_(make_mem_defined)(a, len);
njn25e49d8e72002-09-23 09:36:25 +00002534}
2535
njncf45fd42004-11-24 16:30:22 +00002536static
2537void mc_post_mem_write(CorePart part, ThreadId tid, Addr a, SizeT len)
2538{
njndbf7ca72006-03-31 11:57:59 +00002539 MC_(make_mem_defined)(a, len);
njncf45fd42004-11-24 16:30:22 +00002540}
njn25e49d8e72002-09-23 09:36:25 +00002541
sewardj45d94cc2005-04-20 14:44:11 +00002542
njn25e49d8e72002-09-23 09:36:25 +00002543/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00002544/*--- Register event handlers ---*/
2545/*------------------------------------------------------------*/
2546
sewardj45d94cc2005-04-20 14:44:11 +00002547/* When some chunk of guest state is written, mark the corresponding
2548 shadow area as valid. This is used to initialise arbitrarily large
sewardj62eae5f2006-01-17 01:58:24 +00002549 chunks of guest state, hence the _SIZE value, which has to be as
2550 big as the biggest guest state.
sewardj45d94cc2005-04-20 14:44:11 +00002551*/
2552static void mc_post_reg_write ( CorePart part, ThreadId tid,
2553 OffT offset, SizeT size)
njnd3040452003-05-19 15:04:06 +00002554{
sewardj05a46732006-10-17 01:28:10 +00002555# define MAX_REG_WRITE_SIZE 1408
cerion21082042005-12-06 19:07:08 +00002556 UChar area[MAX_REG_WRITE_SIZE];
2557 tl_assert(size <= MAX_REG_WRITE_SIZE);
njn1d0825f2006-03-27 11:37:07 +00002558 VG_(memset)(area, V_BITS8_DEFINED, size);
njncf45fd42004-11-24 16:30:22 +00002559 VG_(set_shadow_regs_area)( tid, offset, size, area );
cerion21082042005-12-06 19:07:08 +00002560# undef MAX_REG_WRITE_SIZE
njnd3040452003-05-19 15:04:06 +00002561}
2562
sewardj45d94cc2005-04-20 14:44:11 +00002563static
2564void mc_post_reg_write_clientcall ( ThreadId tid,
2565 OffT offset, SizeT size,
2566 Addr f)
njnd3040452003-05-19 15:04:06 +00002567{
njncf45fd42004-11-24 16:30:22 +00002568 mc_post_reg_write(/*dummy*/0, tid, offset, size);
njnd3040452003-05-19 15:04:06 +00002569}
2570
sewardj45d94cc2005-04-20 14:44:11 +00002571/* Look at the definedness of the guest's shadow state for
2572 [offset, offset+len). If any part of that is undefined, record
2573 a parameter error.
2574*/
2575static void mc_pre_reg_read ( CorePart part, ThreadId tid, Char* s,
2576 OffT offset, SizeT size)
nethercote8b76fe52004-11-08 19:20:09 +00002577{
sewardj45d94cc2005-04-20 14:44:11 +00002578 Int i;
2579 Bool bad;
2580
2581 UChar area[16];
2582 tl_assert(size <= 16);
2583
2584 VG_(get_shadow_regs_area)( tid, offset, size, area );
2585
2586 bad = False;
2587 for (i = 0; i < size; i++) {
njn1d0825f2006-03-27 11:37:07 +00002588 if (area[i] != V_BITS8_DEFINED) {
sewardj2c27f702005-05-03 18:19:05 +00002589 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002590 break;
2591 }
nethercote8b76fe52004-11-08 19:20:09 +00002592 }
2593
sewardj45d94cc2005-04-20 14:44:11 +00002594 if (bad)
njn1d0825f2006-03-27 11:37:07 +00002595 mc_record_param_error ( tid, 0, /*isReg*/True, /*isUnaddr*/False, s );
nethercote8b76fe52004-11-08 19:20:09 +00002596}
njnd3040452003-05-19 15:04:06 +00002597
njn25e49d8e72002-09-23 09:36:25 +00002598
sewardj6cf40ff2005-04-20 22:31:26 +00002599/*------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00002600/*--- Error and suppression types ---*/
2601/*------------------------------------------------------------*/
2602
2603/* The classification of a faulting address. */
2604typedef
2605 enum {
2606 Undescribed, // as-yet unclassified
2607 Stack,
2608 Unknown, // classification yielded nothing useful
2609 Freed, Mallocd,
2610 UserG, // in a user-defined block
2611 Mempool, // in a mempool
2612 Register, // in a register; for Param errors only
2613 }
2614 AddrKind;
2615
2616/* Records info about a faulting address. */
2617typedef
2618 struct { // Used by:
2619 AddrKind akind; // ALL
2620 SizeT blksize; // Freed, Mallocd
2621 OffT rwoffset; // Freed, Mallocd
2622 ExeContext* lastchange; // Freed, Mallocd
2623 ThreadId stack_tid; // Stack
2624 const Char *desc; // UserG
2625 Bool maybe_gcc; // True if just below %esp -- could be a gcc bug.
2626 }
2627 AddrInfo;
2628
2629typedef
2630 enum {
2631 ParamSupp, // Bad syscall params
2632 CoreMemSupp, // Memory errors in core (pthread ops, signal handling)
2633
2634 // Use of invalid values of given size (MemCheck only)
2635 Value0Supp, Value1Supp, Value2Supp, Value4Supp, Value8Supp, Value16Supp,
2636
2637 // Invalid read/write attempt at given size
2638 Addr1Supp, Addr2Supp, Addr4Supp, Addr8Supp, Addr16Supp,
2639
2640 FreeSupp, // Invalid or mismatching free
2641 OverlapSupp, // Overlapping blocks in memcpy(), strcpy(), etc
2642 LeakSupp, // Something to be suppressed in a leak check.
2643 MempoolSupp, // Memory pool suppression.
2644 }
2645 MC_SuppKind;
2646
2647/* What kind of error it is. */
2648typedef
2649 enum { ValueErr,
2650 CoreMemErr, // Error in core op (pthread, signals) or client req
2651 AddrErr,
2652 ParamErr, UserErr, /* behaves like an anonymous ParamErr */
2653 FreeErr, FreeMismatchErr,
2654 OverlapErr,
2655 LeakErr,
2656 IllegalMempoolErr,
2657 }
2658 MC_ErrorKind;
2659
2660/* What kind of memory access is involved in the error? */
2661typedef
2662 enum { ReadAxs, WriteAxs, ExecAxs }
2663 AxsKind;
2664
2665/* Extra context for memory errors */
2666typedef
2667 struct { // Used by:
2668 AxsKind axskind; // AddrErr
2669 Int size; // AddrErr, ValueErr
2670 AddrInfo addrinfo; // {Addr,Free,FreeMismatch,Param,User}Err
2671 Bool isUnaddr; // {CoreMem,Param,User}Err
2672 }
2673 MC_Error;
2674
2675/*------------------------------------------------------------*/
njn9e63cb62005-05-08 18:34:59 +00002676/*--- Printing errors ---*/
2677/*------------------------------------------------------------*/
2678
njn1d0825f2006-03-27 11:37:07 +00002679static void mc_pp_AddrInfo ( Addr a, AddrInfo* ai )
2680{
2681 HChar* xpre = VG_(clo_xml) ? " <auxwhat>" : " ";
2682 HChar* xpost = VG_(clo_xml) ? "</auxwhat>" : "";
2683
2684 switch (ai->akind) {
2685 case Stack:
2686 VG_(message)(Vg_UserMsg,
2687 "%sAddress 0x%llx is on thread %d's stack%s",
2688 xpre, (ULong)a, ai->stack_tid, xpost);
2689 break;
2690 case Unknown:
2691 if (ai->maybe_gcc) {
2692 VG_(message)(Vg_UserMsg,
2693 "%sAddress 0x%llx is just below the stack ptr. "
2694 "To suppress, use: --workaround-gcc296-bugs=yes%s",
2695 xpre, (ULong)a, xpost
2696 );
2697 } else {
2698 VG_(message)(Vg_UserMsg,
2699 "%sAddress 0x%llx "
2700 "is not stack'd, malloc'd or (recently) free'd%s",
2701 xpre, (ULong)a, xpost);
2702 }
2703 break;
2704 case Freed: case Mallocd: case UserG: case Mempool: {
2705 SizeT delta;
2706 const Char* relative;
2707 const Char* kind;
2708 if (ai->akind == Mempool) {
2709 kind = "mempool";
2710 } else {
2711 kind = "block";
2712 }
2713 if (ai->desc != NULL)
2714 kind = ai->desc;
2715
2716 if (ai->rwoffset < 0) {
2717 delta = (SizeT)(- ai->rwoffset);
2718 relative = "before";
2719 } else if (ai->rwoffset >= ai->blksize) {
2720 delta = ai->rwoffset - ai->blksize;
2721 relative = "after";
2722 } else {
2723 delta = ai->rwoffset;
2724 relative = "inside";
2725 }
2726 VG_(message)(Vg_UserMsg,
2727 "%sAddress 0x%lx is %,lu bytes %s a %s of size %,lu %s%s",
2728 xpre,
2729 a, delta, relative, kind,
2730 ai->blksize,
2731 ai->akind==Mallocd ? "alloc'd"
2732 : ai->akind==Freed ? "free'd"
2733 : "client-defined",
2734 xpost);
2735 VG_(pp_ExeContext)(ai->lastchange);
2736 break;
2737 }
2738 case Register:
2739 // print nothing
2740 tl_assert(0 == a);
2741 break;
2742 default:
2743 VG_(tool_panic)("mc_pp_AddrInfo");
2744 }
2745}
2746
njn51d827b2005-05-09 01:02:08 +00002747static void mc_pp_Error ( Error* err )
njn9e63cb62005-05-08 18:34:59 +00002748{
njn1d0825f2006-03-27 11:37:07 +00002749 MC_Error* err_extra = VG_(get_error_extra)(err);
njn9e63cb62005-05-08 18:34:59 +00002750
sewardj71bc3cb2005-05-19 00:25:45 +00002751 HChar* xpre = VG_(clo_xml) ? " <what>" : "";
2752 HChar* xpost = VG_(clo_xml) ? "</what>" : "";
2753
njn9e63cb62005-05-08 18:34:59 +00002754 switch (VG_(get_error_kind)(err)) {
2755 case CoreMemErr: {
2756 Char* s = ( err_extra->isUnaddr ? "unaddressable" : "uninitialised" );
sewardj71bc3cb2005-05-19 00:25:45 +00002757 if (VG_(clo_xml))
2758 VG_(message)(Vg_UserMsg, " <kind>CoreMemError</kind>");
2759 /* What the hell *is* a CoreMemError? jrs 2005-May-18 */
2760 VG_(message)(Vg_UserMsg, "%s%s contains %s byte(s)%s",
2761 xpre, VG_(get_error_string)(err), s, xpost);
2762
njn9e63cb62005-05-08 18:34:59 +00002763 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
2764 break;
2765
2766 }
2767
2768 case ValueErr:
2769 if (err_extra->size == 0) {
sewardj71bc3cb2005-05-19 00:25:45 +00002770 if (VG_(clo_xml))
2771 VG_(message)(Vg_UserMsg, " <kind>UninitCondition</kind>");
2772 VG_(message)(Vg_UserMsg, "%sConditional jump or move depends"
2773 " on uninitialised value(s)%s",
2774 xpre, xpost);
njn9e63cb62005-05-08 18:34:59 +00002775 } else {
sewardj71bc3cb2005-05-19 00:25:45 +00002776 if (VG_(clo_xml))
2777 VG_(message)(Vg_UserMsg, " <kind>UninitValue</kind>");
2778 VG_(message)(Vg_UserMsg,
2779 "%sUse of uninitialised value of size %d%s",
2780 xpre, err_extra->size, xpost);
njn9e63cb62005-05-08 18:34:59 +00002781 }
2782 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
2783 break;
2784
2785 case ParamErr: {
2786 Bool isReg = ( Register == err_extra->addrinfo.akind );
2787 Char* s1 = ( isReg ? "contains" : "points to" );
2788 Char* s2 = ( err_extra->isUnaddr ? "unaddressable" : "uninitialised" );
2789 if (isReg) tl_assert(!err_extra->isUnaddr);
2790
sewardj71bc3cb2005-05-19 00:25:45 +00002791 if (VG_(clo_xml))
2792 VG_(message)(Vg_UserMsg, " <kind>SyscallParam</kind>");
2793 VG_(message)(Vg_UserMsg, "%sSyscall param %s %s %s byte(s)%s",
2794 xpre, VG_(get_error_string)(err), s1, s2, xpost);
njn9e63cb62005-05-08 18:34:59 +00002795
2796 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
njn1d0825f2006-03-27 11:37:07 +00002797 mc_pp_AddrInfo(VG_(get_error_address)(err), &err_extra->addrinfo);
njn9e63cb62005-05-08 18:34:59 +00002798 break;
2799 }
2800 case UserErr: {
2801 Char* s = ( err_extra->isUnaddr ? "Unaddressable" : "Uninitialised" );
2802
sewardj71bc3cb2005-05-19 00:25:45 +00002803 if (VG_(clo_xml))
2804 VG_(message)(Vg_UserMsg, " <kind>ClientCheck</kind>");
njn9e63cb62005-05-08 18:34:59 +00002805 VG_(message)(Vg_UserMsg,
sewardj71bc3cb2005-05-19 00:25:45 +00002806 "%s%s byte(s) found during client check request%s",
2807 xpre, s, xpost);
njn9e63cb62005-05-08 18:34:59 +00002808
2809 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
njn1d0825f2006-03-27 11:37:07 +00002810 mc_pp_AddrInfo(VG_(get_error_address)(err), &err_extra->addrinfo);
njn9e63cb62005-05-08 18:34:59 +00002811 break;
2812 }
njn1d0825f2006-03-27 11:37:07 +00002813 case FreeErr:
2814 if (VG_(clo_xml))
2815 VG_(message)(Vg_UserMsg, " <kind>InvalidFree</kind>");
2816 VG_(message)(Vg_UserMsg,
2817 "%sInvalid free() / delete / delete[]%s",
2818 xpre, xpost);
2819 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
2820 mc_pp_AddrInfo(VG_(get_error_address)(err), &err_extra->addrinfo);
njn9e63cb62005-05-08 18:34:59 +00002821 break;
njn1d0825f2006-03-27 11:37:07 +00002822
2823 case FreeMismatchErr:
2824 if (VG_(clo_xml))
2825 VG_(message)(Vg_UserMsg, " <kind>MismatchedFree</kind>");
2826 VG_(message)(Vg_UserMsg,
2827 "%sMismatched free() / delete / delete []%s",
2828 xpre, xpost);
2829 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
2830 mc_pp_AddrInfo(VG_(get_error_address)(err), &err_extra->addrinfo);
2831 break;
2832
2833 case AddrErr:
2834 switch (err_extra->axskind) {
2835 case ReadAxs:
2836 if (VG_(clo_xml))
2837 VG_(message)(Vg_UserMsg, " <kind>InvalidRead</kind>");
2838 VG_(message)(Vg_UserMsg,
2839 "%sInvalid read of size %d%s",
2840 xpre, err_extra->size, xpost );
2841 break;
2842 case WriteAxs:
2843 if (VG_(clo_xml))
2844 VG_(message)(Vg_UserMsg, " <kind>InvalidWrite</kind>");
2845 VG_(message)(Vg_UserMsg,
2846 "%sInvalid write of size %d%s",
2847 xpre, err_extra->size, xpost );
2848 break;
2849 case ExecAxs:
2850 if (VG_(clo_xml))
2851 VG_(message)(Vg_UserMsg, " <kind>InvalidJump</kind>");
2852 VG_(message)(Vg_UserMsg,
2853 "%sJump to the invalid address "
2854 "stated on the next line%s",
2855 xpre, xpost);
2856 break;
2857 default:
2858 VG_(tool_panic)("mc_pp_Error(axskind)");
2859 }
2860 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
2861 mc_pp_AddrInfo(VG_(get_error_address)(err), &err_extra->addrinfo);
2862 break;
2863
2864 case OverlapErr: {
2865 OverlapExtra* ov_extra = (OverlapExtra*)VG_(get_error_extra)(err);
2866 if (VG_(clo_xml))
2867 VG_(message)(Vg_UserMsg, " <kind>Overlap</kind>");
2868 if (ov_extra->len == -1)
2869 VG_(message)(Vg_UserMsg,
2870 "%sSource and destination overlap in %s(%p, %p)%s",
2871 xpre,
2872 VG_(get_error_string)(err),
2873 ov_extra->dst, ov_extra->src,
2874 xpost);
2875 else
2876 VG_(message)(Vg_UserMsg,
2877 "%sSource and destination overlap in %s(%p, %p, %d)%s",
2878 xpre,
2879 VG_(get_error_string)(err),
2880 ov_extra->dst, ov_extra->src, ov_extra->len,
2881 xpost);
2882 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
2883 break;
2884 }
2885 case LeakErr: {
2886 MC_(pp_LeakError)(err_extra);
2887 break;
2888 }
2889
2890 case IllegalMempoolErr:
2891 if (VG_(clo_xml))
2892 VG_(message)(Vg_UserMsg, " <kind>InvalidMemPool</kind>");
2893 VG_(message)(Vg_UserMsg, "%sIllegal memory pool address%s",
2894 xpre, xpost);
2895 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
2896 mc_pp_AddrInfo(VG_(get_error_address)(err), &err_extra->addrinfo);
2897 break;
2898
2899 default:
2900 VG_(printf)("Error:\n unknown Memcheck error code %d\n",
2901 VG_(get_error_kind)(err));
2902 VG_(tool_panic)("unknown error code in mc_pp_Error)");
njn9e63cb62005-05-08 18:34:59 +00002903 }
2904}
2905
2906/*------------------------------------------------------------*/
2907/*--- Recording errors ---*/
2908/*------------------------------------------------------------*/
2909
njn1d0825f2006-03-27 11:37:07 +00002910/* These many bytes below %ESP are considered addressible if we're
2911 doing the --workaround-gcc296-bugs hack. */
2912#define VG_GCC296_BUG_STACK_SLOP 1024
2913
2914/* Is this address within some small distance below %ESP? Used only
2915 for the --workaround-gcc296-bugs kludge. */
2916static Bool is_just_below_ESP( Addr esp, Addr aa )
2917{
2918 if (esp > aa && (esp - aa) <= VG_GCC296_BUG_STACK_SLOP)
2919 return True;
2920 else
2921 return False;
2922}
2923
2924static void mc_clear_MC_Error ( MC_Error* err_extra )
2925{
2926 err_extra->axskind = ReadAxs;
2927 err_extra->size = 0;
2928 err_extra->isUnaddr = True;
2929 err_extra->addrinfo.akind = Unknown;
2930 err_extra->addrinfo.blksize = 0;
2931 err_extra->addrinfo.rwoffset = 0;
2932 err_extra->addrinfo.lastchange = NULL;
2933 err_extra->addrinfo.stack_tid = VG_INVALID_THREADID;
2934 err_extra->addrinfo.maybe_gcc = False;
2935 err_extra->addrinfo.desc = NULL;
2936}
2937
2938/* This one called from generated code and non-generated code. */
2939static void mc_record_address_error ( ThreadId tid, Addr a, Int size,
2940 Bool isWrite )
2941{
2942 MC_Error err_extra;
sewardj05a46732006-10-17 01:28:10 +00002943 Bool just_below_esp;
2944
2945 if (in_ignored_range(a))
2946 return;
2947
2948# if defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
2949 /* AIX zero-page handling. On AIX, reads from page zero are,
2950 bizarrely enough, legitimate. Writes to page zero aren't,
2951 though. Since memcheck can't distinguish reads from writes, the
2952 best we can do is to 'act normal' and mark the A bits in the
2953 normal way as noaccess, but then hide any reads from that page
2954 that get reported here. */
2955 if ((!isWrite) && a >= 0 && a+size <= 4096)
2956 return;
2957
2958 /* Appalling AIX hack. It suppresses reads done by glink
2959 fragments. Getting rid of this would require figuring out
2960 somehow where the referenced data areas are (and their
2961 sizes). */
2962 if ((!isWrite) && size == sizeof(Word)) {
2963 UInt i1, i2;
2964 UInt* pc = (UInt*)VG_(get_IP)(tid);
2965 if (sizeof(Word) == 4) {
2966 i1 = 0x800c0000; /* lwz r0,0(r12) */
2967 i2 = 0x804c0004; /* lwz r2,4(r12) */
2968 } else {
2969 i1 = 0xe80c0000; /* ld r0,0(r12) */
2970 i2 = 0xe84c0008; /* ld r2,8(r12) */
2971 }
2972 if (pc[0] == i1 && pc[1] == i2) return;
2973 if (pc[0] == i2 && pc[-1] == i1) return;
2974 }
2975# endif
njn1d0825f2006-03-27 11:37:07 +00002976
2977 just_below_esp = is_just_below_ESP( VG_(get_SP)(tid), a );
2978
2979 /* If this is caused by an access immediately below %ESP, and the
2980 user asks nicely, we just ignore it. */
2981 if (MC_(clo_workaround_gcc296_bugs) && just_below_esp)
2982 return;
2983
2984 mc_clear_MC_Error( &err_extra );
2985 err_extra.axskind = isWrite ? WriteAxs : ReadAxs;
2986 err_extra.size = size;
2987 err_extra.addrinfo.akind = Undescribed;
2988 err_extra.addrinfo.maybe_gcc = just_below_esp;
2989 VG_(maybe_record_error)( tid, AddrErr, a, /*s*/NULL, &err_extra );
2990}
2991
2992/* These ones are called from non-generated code */
2993
2994/* This is for memory errors in pthread functions, as opposed to pthread API
2995 errors which are found by the core. */
2996static void mc_record_core_mem_error ( ThreadId tid, Bool isUnaddr, Char* msg )
2997{
2998 MC_Error err_extra;
2999
3000 mc_clear_MC_Error( &err_extra );
3001 err_extra.isUnaddr = isUnaddr;
3002 VG_(maybe_record_error)( tid, CoreMemErr, /*addr*/0, msg, &err_extra );
3003}
3004
3005// Three kinds of param errors:
3006// - register arg contains undefined bytes
3007// - memory arg is unaddressable
3008// - memory arg contains undefined bytes
3009// 'isReg' and 'isUnaddr' dictate which of these it is.
3010static void mc_record_param_error ( ThreadId tid, Addr a, Bool isReg,
3011 Bool isUnaddr, Char* msg )
3012{
3013 MC_Error err_extra;
3014
sewardj1cf56cf2006-05-22 13:59:42 +00003015 if (!isUnaddr) tl_assert(MC_(clo_undef_value_errors));
njn1d0825f2006-03-27 11:37:07 +00003016 tl_assert(VG_INVALID_THREADID != tid);
3017 if (isUnaddr) tl_assert(!isReg); // unaddressable register is impossible
3018 mc_clear_MC_Error( &err_extra );
3019 err_extra.addrinfo.akind = ( isReg ? Register : Undescribed );
3020 err_extra.isUnaddr = isUnaddr;
3021 VG_(maybe_record_error)( tid, ParamErr, a, msg, &err_extra );
3022}
3023
3024static void mc_record_jump_error ( ThreadId tid, Addr a )
3025{
3026 MC_Error err_extra;
3027
3028 tl_assert(VG_INVALID_THREADID != tid);
3029 mc_clear_MC_Error( &err_extra );
3030 err_extra.axskind = ExecAxs;
3031 err_extra.size = 1; // size only used for suppressions
3032 err_extra.addrinfo.akind = Undescribed;
3033 VG_(maybe_record_error)( tid, AddrErr, a, /*s*/NULL, &err_extra );
3034}
3035
3036void MC_(record_free_error) ( ThreadId tid, Addr a )
3037{
3038 MC_Error err_extra;
3039
3040 tl_assert(VG_INVALID_THREADID != tid);
3041 mc_clear_MC_Error( &err_extra );
3042 err_extra.addrinfo.akind = Undescribed;
3043 VG_(maybe_record_error)( tid, FreeErr, a, /*s*/NULL, &err_extra );
3044}
3045
3046void MC_(record_illegal_mempool_error) ( ThreadId tid, Addr a )
3047{
3048 MC_Error err_extra;
3049
3050 tl_assert(VG_INVALID_THREADID != tid);
3051 mc_clear_MC_Error( &err_extra );
3052 err_extra.addrinfo.akind = Undescribed;
3053 VG_(maybe_record_error)( tid, IllegalMempoolErr, a, /*s*/NULL, &err_extra );
3054}
3055
3056void MC_(record_freemismatch_error) ( ThreadId tid, Addr a, MC_Chunk* mc )
3057{
3058 MC_Error err_extra;
3059 AddrInfo* ai;
3060
3061 tl_assert(VG_INVALID_THREADID != tid);
3062 mc_clear_MC_Error( &err_extra );
3063 ai = &err_extra.addrinfo;
3064 ai->akind = Mallocd; // Nb: not 'Freed'
3065 ai->blksize = mc->size;
3066 ai->rwoffset = (Int)a - (Int)mc->data;
3067 ai->lastchange = mc->where;
3068 VG_(maybe_record_error)( tid, FreeMismatchErr, a, /*s*/NULL, &err_extra );
3069}
3070
3071static void mc_record_overlap_error ( ThreadId tid,
3072 Char* function, OverlapExtra* ov_extra )
3073{
3074 VG_(maybe_record_error)(
3075 tid, OverlapErr, /*addr*/0, /*s*/function, ov_extra );
3076}
3077
3078Bool MC_(record_leak_error) ( ThreadId tid, /*LeakExtra*/void* leak_extra,
3079 ExeContext* where, Bool print_record )
3080{
3081 return
3082 VG_(unique_error) ( tid, LeakErr, /*Addr*/0, /*s*/NULL,
3083 /*extra*/leak_extra, where, print_record,
3084 /*allow_GDB_attach*/False, /*count_error*/False );
3085}
3086
3087
njn02bc4b82005-05-15 17:28:26 +00003088/* Creates a copy of the 'extra' part, updates the copy with address info if
njn9e63cb62005-05-08 18:34:59 +00003089 necessary, and returns the copy. */
3090/* This one called from generated code and non-generated code. */
njn96364822005-05-08 19:04:53 +00003091static void mc_record_value_error ( ThreadId tid, Int size )
njn9e63cb62005-05-08 18:34:59 +00003092{
njn1d0825f2006-03-27 11:37:07 +00003093 MC_Error err_extra;
njn9e63cb62005-05-08 18:34:59 +00003094
njn1d0825f2006-03-27 11:37:07 +00003095 tl_assert(MC_(clo_undef_value_errors));
3096 mc_clear_MC_Error( &err_extra );
njn9e63cb62005-05-08 18:34:59 +00003097 err_extra.size = size;
3098 err_extra.isUnaddr = False;
3099 VG_(maybe_record_error)( tid, ValueErr, /*addr*/0, /*s*/NULL, &err_extra );
3100}
3101
3102/* This called from non-generated code */
3103
njn96364822005-05-08 19:04:53 +00003104static void mc_record_user_error ( ThreadId tid, Addr a, Bool isWrite,
3105 Bool isUnaddr )
njn9e63cb62005-05-08 18:34:59 +00003106{
njn1d0825f2006-03-27 11:37:07 +00003107 MC_Error err_extra;
njn9e63cb62005-05-08 18:34:59 +00003108
3109 tl_assert(VG_INVALID_THREADID != tid);
njn1d0825f2006-03-27 11:37:07 +00003110 mc_clear_MC_Error( &err_extra );
njn9e63cb62005-05-08 18:34:59 +00003111 err_extra.addrinfo.akind = Undescribed;
3112 err_extra.isUnaddr = isUnaddr;
3113 VG_(maybe_record_error)( tid, UserErr, a, /*s*/NULL, &err_extra );
3114}
3115
njn1d0825f2006-03-27 11:37:07 +00003116__attribute__ ((unused))
3117static Bool eq_AddrInfo ( VgRes res, AddrInfo* ai1, AddrInfo* ai2 )
3118{
3119 if (ai1->akind != Undescribed
3120 && ai2->akind != Undescribed
3121 && ai1->akind != ai2->akind)
3122 return False;
3123 if (ai1->akind == Freed || ai1->akind == Mallocd) {
3124 if (ai1->blksize != ai2->blksize)
3125 return False;
3126 if (!VG_(eq_ExeContext)(res, ai1->lastchange, ai2->lastchange))
3127 return False;
3128 }
3129 return True;
3130}
3131
3132/* Compare error contexts, to detect duplicates. Note that if they
3133 are otherwise the same, the faulting addrs and associated rwoffsets
3134 are allowed to be different. */
3135static Bool mc_eq_Error ( VgRes res, Error* e1, Error* e2 )
3136{
3137 MC_Error* e1_extra = VG_(get_error_extra)(e1);
3138 MC_Error* e2_extra = VG_(get_error_extra)(e2);
3139
3140 /* Guaranteed by calling function */
3141 tl_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
3142
3143 switch (VG_(get_error_kind)(e1)) {
3144 case CoreMemErr: {
3145 Char *e1s, *e2s;
3146 if (e1_extra->isUnaddr != e2_extra->isUnaddr) return False;
3147 e1s = VG_(get_error_string)(e1);
3148 e2s = VG_(get_error_string)(e2);
3149 if (e1s == e2s) return True;
3150 if (0 == VG_(strcmp)(e1s, e2s)) return True;
3151 return False;
3152 }
3153
3154 // Perhaps we should also check the addrinfo.akinds for equality.
3155 // That would result in more error reports, but only in cases where
3156 // a register contains uninitialised bytes and points to memory
3157 // containing uninitialised bytes. Currently, the 2nd of those to be
3158 // detected won't be reported. That is (nearly?) always the memory
3159 // error, which is good.
3160 case ParamErr:
3161 if (0 != VG_(strcmp)(VG_(get_error_string)(e1),
3162 VG_(get_error_string)(e2))) return False;
3163 // fall through
3164 case UserErr:
3165 if (e1_extra->isUnaddr != e2_extra->isUnaddr) return False;
3166 return True;
3167
3168 case FreeErr:
3169 case FreeMismatchErr:
3170 /* JRS 2002-Aug-26: comparing addrs seems overkill and can
3171 cause excessive duplication of errors. Not even AddrErr
3172 below does that. So don't compare either the .addr field
3173 or the .addrinfo fields. */
3174 /* if (e1->addr != e2->addr) return False; */
3175 /* if (!eq_AddrInfo(res, &e1_extra->addrinfo, &e2_extra->addrinfo))
3176 return False;
3177 */
3178 return True;
3179
3180 case AddrErr:
3181 /* if (e1_extra->axskind != e2_extra->axskind) return False; */
3182 if (e1_extra->size != e2_extra->size) return False;
3183 /*
3184 if (!eq_AddrInfo(res, &e1_extra->addrinfo, &e2_extra->addrinfo))
3185 return False;
3186 */
3187 return True;
3188
3189 case ValueErr:
3190 if (e1_extra->size != e2_extra->size) return False;
3191 return True;
3192
3193 case OverlapErr:
3194 return True;
3195
3196 case LeakErr:
3197 VG_(tool_panic)("Shouldn't get LeakErr in mc_eq_Error,\n"
3198 "since it's handled with VG_(unique_error)()!");
3199
3200 case IllegalMempoolErr:
3201 return True;
3202
3203 default:
3204 VG_(printf)("Error:\n unknown error code %d\n",
3205 VG_(get_error_kind)(e1));
3206 VG_(tool_panic)("unknown error code in mc_eq_Error");
3207 }
3208}
3209
3210/* Function used when searching MC_Chunk lists */
3211static Bool addr_is_in_MC_Chunk(MC_Chunk* mc, Addr a)
3212{
3213 // Nb: this is not quite right! It assumes that the heap block has
3214 // a redzone of size MC_MALLOC_REDZONE_SZB. That's true for malloc'd
3215 // blocks, but not necessarily true for custom-alloc'd blocks. So
3216 // in some cases this could result in an incorrect description (eg.
3217 // saying "12 bytes after block A" when really it's within block B.
3218 // Fixing would require adding redzone size to MC_Chunks, though.
3219 return VG_(addr_is_in_block)( a, mc->data, mc->size,
3220 MC_MALLOC_REDZONE_SZB );
3221}
3222
3223// Forward declaration
3224static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai );
3225
3226/* Describe an address as best you can, for error messages,
3227 putting the result in ai. */
3228static void describe_addr ( Addr a, AddrInfo* ai )
3229{
3230 MC_Chunk* mc;
3231 ThreadId tid;
3232 Addr stack_min, stack_max;
3233
3234 /* Perhaps it's a user-def'd block? */
3235 if (client_perm_maybe_describe( a, ai ))
3236 return;
3237
3238 /* Perhaps it's on a thread's stack? */
3239 VG_(thread_stack_reset_iter)();
3240 while ( VG_(thread_stack_next)(&tid, &stack_min, &stack_max) ) {
3241 if (stack_min <= a && a <= stack_max) {
3242 ai->akind = Stack;
3243 ai->stack_tid = tid;
3244 return;
3245 }
3246 }
3247 /* Search for a recently freed block which might bracket it. */
3248 mc = MC_(get_freed_list_head)();
3249 while (mc) {
3250 if (addr_is_in_MC_Chunk(mc, a)) {
3251 ai->akind = Freed;
3252 ai->blksize = mc->size;
3253 ai->rwoffset = (Int)a - (Int)mc->data;
3254 ai->lastchange = mc->where;
3255 return;
3256 }
3257 mc = mc->next;
3258 }
3259 /* Search for a currently malloc'd block which might bracket it. */
3260 VG_(HT_ResetIter)(MC_(malloc_list));
3261 while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
3262 if (addr_is_in_MC_Chunk(mc, a)) {
3263 ai->akind = Mallocd;
3264 ai->blksize = mc->size;
3265 ai->rwoffset = (Int)(a) - (Int)mc->data;
3266 ai->lastchange = mc->where;
3267 return;
3268 }
3269 }
3270 /* Clueless ... */
3271 ai->akind = Unknown;
3272 return;
3273}
3274
3275/* Updates the copy with address info if necessary (but not for all errors). */
3276static UInt mc_update_extra( Error* err )
3277{
3278 switch (VG_(get_error_kind)(err)) {
3279 // These two don't have addresses associated with them, and so don't
3280 // need any updating.
3281 case CoreMemErr:
3282 case ValueErr: {
3283 MC_Error* extra = VG_(get_error_extra)(err);
3284 tl_assert(Unknown == extra->addrinfo.akind);
3285 return sizeof(MC_Error);
3286 }
3287
3288 // ParamErrs sometimes involve a memory address; call describe_addr() in
3289 // this case.
3290 case ParamErr: {
3291 MC_Error* extra = VG_(get_error_extra)(err);
3292 tl_assert(Undescribed == extra->addrinfo.akind ||
3293 Register == extra->addrinfo.akind);
3294 if (Undescribed == extra->addrinfo.akind)
3295 describe_addr ( VG_(get_error_address)(err), &(extra->addrinfo) );
3296 return sizeof(MC_Error);
3297 }
3298
3299 // These four always involve a memory address.
3300 case AddrErr:
3301 case UserErr:
3302 case FreeErr:
3303 case IllegalMempoolErr: {
3304 MC_Error* extra = VG_(get_error_extra)(err);
3305 tl_assert(Undescribed == extra->addrinfo.akind);
3306 describe_addr ( VG_(get_error_address)(err), &(extra->addrinfo) );
3307 return sizeof(MC_Error);
3308 }
3309
3310 // FreeMismatchErrs have already had their address described; this is
3311 // possible because we have the MC_Chunk on hand when the error is
3312 // detected. However, the address may be part of a user block, and if so
3313 // we override the pre-determined description with a user block one.
3314 case FreeMismatchErr: {
3315 MC_Error* extra = VG_(get_error_extra)(err);
3316 tl_assert(extra && Mallocd == extra->addrinfo.akind);
3317 (void)client_perm_maybe_describe( VG_(get_error_address)(err),
3318 &(extra->addrinfo) );
3319 return sizeof(MC_Error);
3320 }
3321
3322 // No memory address involved with these ones. Nb: for LeakErrs the
3323 // returned size does not matter -- LeakErrs are always shown with
3324 // VG_(unique_error)() so they're not copied.
3325 case LeakErr: return 0;
3326 case OverlapErr: return sizeof(OverlapExtra);
3327
3328 default: VG_(tool_panic)("mc_update_extra: bad errkind");
3329 }
3330}
3331
njn9e63cb62005-05-08 18:34:59 +00003332/*------------------------------------------------------------*/
3333/*--- Suppressions ---*/
3334/*------------------------------------------------------------*/
3335
njn51d827b2005-05-09 01:02:08 +00003336static Bool mc_recognised_suppression ( Char* name, Supp* su )
njn9e63cb62005-05-08 18:34:59 +00003337{
3338 SuppKind skind;
3339
njn1d0825f2006-03-27 11:37:07 +00003340 if (VG_STREQ(name, "Param")) skind = ParamSupp;
3341 else if (VG_STREQ(name, "CoreMem")) skind = CoreMemSupp;
3342 else if (VG_STREQ(name, "Addr1")) skind = Addr1Supp;
3343 else if (VG_STREQ(name, "Addr2")) skind = Addr2Supp;
3344 else if (VG_STREQ(name, "Addr4")) skind = Addr4Supp;
3345 else if (VG_STREQ(name, "Addr8")) skind = Addr8Supp;
3346 else if (VG_STREQ(name, "Addr16")) skind = Addr16Supp;
3347 else if (VG_STREQ(name, "Free")) skind = FreeSupp;
3348 else if (VG_STREQ(name, "Leak")) skind = LeakSupp;
3349 else if (VG_STREQ(name, "Overlap")) skind = OverlapSupp;
3350 else if (VG_STREQ(name, "Mempool")) skind = MempoolSupp;
njn9e63cb62005-05-08 18:34:59 +00003351 else if (VG_STREQ(name, "Cond")) skind = Value0Supp;
3352 else if (VG_STREQ(name, "Value0")) skind = Value0Supp;/* backwards compat */
3353 else if (VG_STREQ(name, "Value1")) skind = Value1Supp;
3354 else if (VG_STREQ(name, "Value2")) skind = Value2Supp;
3355 else if (VG_STREQ(name, "Value4")) skind = Value4Supp;
3356 else if (VG_STREQ(name, "Value8")) skind = Value8Supp;
3357 else if (VG_STREQ(name, "Value16")) skind = Value16Supp;
3358 else
3359 return False;
3360
3361 VG_(set_supp_kind)(su, skind);
3362 return True;
3363}
3364
njn1d0825f2006-03-27 11:37:07 +00003365static
3366Bool mc_read_extra_suppression_info ( Int fd, Char* buf, Int nBuf, Supp *su )
3367{
3368 Bool eof;
3369
3370 if (VG_(get_supp_kind)(su) == ParamSupp) {
3371 eof = VG_(get_line) ( fd, buf, nBuf );
3372 if (eof) return False;
3373 VG_(set_supp_string)(su, VG_(strdup)(buf));
3374 }
3375 return True;
3376}
3377
3378static Bool mc_error_matches_suppression(Error* err, Supp* su)
3379{
3380 Int su_size;
3381 MC_Error* err_extra = VG_(get_error_extra)(err);
3382 ErrorKind ekind = VG_(get_error_kind )(err);
3383
3384 switch (VG_(get_supp_kind)(su)) {
3385 case ParamSupp:
3386 return (ekind == ParamErr
3387 && VG_STREQ(VG_(get_error_string)(err),
3388 VG_(get_supp_string)(su)));
3389
3390 case CoreMemSupp:
3391 return (ekind == CoreMemErr
3392 && VG_STREQ(VG_(get_error_string)(err),
3393 VG_(get_supp_string)(su)));
3394
3395 case Value0Supp: su_size = 0; goto value_case;
3396 case Value1Supp: su_size = 1; goto value_case;
3397 case Value2Supp: su_size = 2; goto value_case;
3398 case Value4Supp: su_size = 4; goto value_case;
3399 case Value8Supp: su_size = 8; goto value_case;
3400 case Value16Supp:su_size =16; goto value_case;
3401 value_case:
3402 return (ekind == ValueErr && err_extra->size == su_size);
3403
3404 case Addr1Supp: su_size = 1; goto addr_case;
3405 case Addr2Supp: su_size = 2; goto addr_case;
3406 case Addr4Supp: su_size = 4; goto addr_case;
3407 case Addr8Supp: su_size = 8; goto addr_case;
3408 case Addr16Supp:su_size =16; goto addr_case;
3409 addr_case:
3410 return (ekind == AddrErr && err_extra->size == su_size);
3411
3412 case FreeSupp:
3413 return (ekind == FreeErr || ekind == FreeMismatchErr);
3414
3415 case OverlapSupp:
3416 return (ekind = OverlapErr);
3417
3418 case LeakSupp:
3419 return (ekind == LeakErr);
3420
3421 case MempoolSupp:
3422 return (ekind == IllegalMempoolErr);
3423
3424 default:
3425 VG_(printf)("Error:\n"
3426 " unknown suppression type %d\n",
3427 VG_(get_supp_kind)(su));
3428 VG_(tool_panic)("unknown suppression type in "
3429 "MC_(error_matches_suppression)");
3430 }
3431}
3432
3433static Char* mc_get_error_name ( Error* err )
3434{
3435 Char* s;
3436 switch (VG_(get_error_kind)(err)) {
3437 case ParamErr: return "Param";
3438 case UserErr: return NULL; /* Can't suppress User errors */
3439 case FreeMismatchErr: return "Free";
3440 case IllegalMempoolErr: return "Mempool";
3441 case FreeErr: return "Free";
3442 case AddrErr:
3443 switch ( ((MC_Error*)VG_(get_error_extra)(err))->size ) {
3444 case 1: return "Addr1";
3445 case 2: return "Addr2";
3446 case 4: return "Addr4";
3447 case 8: return "Addr8";
3448 case 16: return "Addr16";
3449 default: VG_(tool_panic)("unexpected size for Addr");
3450 }
3451
3452 case ValueErr:
3453 switch ( ((MC_Error*)VG_(get_error_extra)(err))->size ) {
3454 case 0: return "Cond";
3455 case 1: return "Value1";
3456 case 2: return "Value2";
3457 case 4: return "Value4";
3458 case 8: return "Value8";
3459 case 16: return "Value16";
3460 default: VG_(tool_panic)("unexpected size for Value");
3461 }
3462 case CoreMemErr: return "CoreMem";
3463 case OverlapErr: return "Overlap";
3464 case LeakErr: return "Leak";
3465 default: VG_(tool_panic)("get_error_name: unexpected type");
3466 }
3467 VG_(printf)(s);
3468}
3469
3470static void mc_print_extra_suppression_info ( Error* err )
3471{
3472 if (ParamErr == VG_(get_error_kind)(err)) {
3473 VG_(printf)(" %s\n", VG_(get_error_string)(err));
3474 }
3475}
3476
njn9e63cb62005-05-08 18:34:59 +00003477/*------------------------------------------------------------*/
sewardjc859fbf2005-04-22 21:10:28 +00003478/*--- Functions called directly from generated code: ---*/
3479/*--- Load/store handlers. ---*/
sewardj6cf40ff2005-04-20 22:31:26 +00003480/*------------------------------------------------------------*/
3481
njn1d0825f2006-03-27 11:37:07 +00003482/* Types: LOADV32, LOADV16, LOADV8 are:
sewardj6cf40ff2005-04-20 22:31:26 +00003483 UWord fn ( Addr a )
3484 so they return 32-bits on 32-bit machines and 64-bits on
3485 64-bit machines. Addr has the same size as a host word.
3486
njn1d0825f2006-03-27 11:37:07 +00003487 LOADV64 is always ULong fn ( Addr a )
sewardj6cf40ff2005-04-20 22:31:26 +00003488
njn1d0825f2006-03-27 11:37:07 +00003489 Similarly for STOREV8, STOREV16, STOREV32, the supplied vbits
3490 are a UWord, and for STOREV64 they are a ULong.
sewardj6cf40ff2005-04-20 22:31:26 +00003491*/
3492
njn1d0825f2006-03-27 11:37:07 +00003493/* If any part of '_a' indicated by the mask is 1, either
njn45e81252006-03-28 12:35:08 +00003494 '_a' is not naturally '_sz/8'-aligned, or it exceeds the range
njn1d0825f2006-03-27 11:37:07 +00003495 covered by the primary map. */
njn45e81252006-03-28 12:35:08 +00003496#define UNALIGNED_OR_HIGH(_a,_sz) ((_a) & MASK((_sz>>3)))
njn1d0825f2006-03-27 11:37:07 +00003497#define MASK(_sz) ( ~((0x10000-(_sz)) | ((N_PRIMARY_MAP-1) << 16)) )
3498
3499
sewardj95448072004-11-22 20:19:51 +00003500/* ------------------------ Size = 8 ------------------------ */
3501
njn1d0825f2006-03-27 11:37:07 +00003502static INLINE
3503ULong mc_LOADV64 ( Addr a, Bool isBigEndian )
3504{
3505 UWord sm_off16, vabits16;
3506 SecMap* sm;
3507
3508 PROF_EVENT(200, "mc_LOADV64");
3509
3510#ifndef PERF_FAST_LOADV
njn45e81252006-03-28 12:35:08 +00003511 return mc_LOADVn_slow( a, 64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003512#else
njn45e81252006-03-28 12:35:08 +00003513 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,64) )) {
njn1d0825f2006-03-27 11:37:07 +00003514 PROF_EVENT(201, "mc_LOADV64-slow1");
njn45e81252006-03-28 12:35:08 +00003515 return (ULong)mc_LOADVn_slow( a, 64, isBigEndian );
sewardjf9d81612005-04-23 23:25:49 +00003516 }
3517
njna7c7ebd2006-03-28 12:51:02 +00003518 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003519 sm_off16 = SM_OFF_16(a);
3520 vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
3521
3522 // Handle common case quickly: a is suitably aligned, is mapped, and
3523 // addressible.
3524 // Convert V bits from compact memory form to expanded register form.
njndbf7ca72006-03-31 11:57:59 +00003525 if (EXPECTED_TAKEN(vabits16 == VA_BITS16_DEFINED)) {
njn1d0825f2006-03-27 11:37:07 +00003526 return V_BITS64_DEFINED;
njndbf7ca72006-03-31 11:57:59 +00003527 } else if (EXPECTED_TAKEN(vabits16 == VA_BITS16_UNDEFINED)) {
njn1d0825f2006-03-27 11:37:07 +00003528 return V_BITS64_UNDEFINED;
3529 } else {
njndbf7ca72006-03-31 11:57:59 +00003530 /* Slow case: the 8 bytes are not all-defined or all-undefined. */
njn1d0825f2006-03-27 11:37:07 +00003531 PROF_EVENT(202, "mc_LOADV64-slow2");
njn45e81252006-03-28 12:35:08 +00003532 return mc_LOADVn_slow( a, 64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003533 }
3534#endif
3535}
3536
3537VG_REGPARM(1) ULong MC_(helperc_LOADV64be) ( Addr a )
3538{
3539 return mc_LOADV64(a, True);
3540}
3541VG_REGPARM(1) ULong MC_(helperc_LOADV64le) ( Addr a )
3542{
3543 return mc_LOADV64(a, False);
3544}
sewardjf9d81612005-04-23 23:25:49 +00003545
sewardjf9d81612005-04-23 23:25:49 +00003546
njn1d0825f2006-03-27 11:37:07 +00003547static INLINE
njn4cf530b2006-04-06 13:33:48 +00003548void mc_STOREV64 ( Addr a, ULong vbits64, Bool isBigEndian )
njn1d0825f2006-03-27 11:37:07 +00003549{
3550 UWord sm_off16, vabits16;
3551 SecMap* sm;
3552
3553 PROF_EVENT(210, "mc_STOREV64");
3554
3555#ifndef PERF_FAST_STOREV
3556 // XXX: this slow case seems to be marginally faster than the fast case!
3557 // Investigate further.
njn4cf530b2006-04-06 13:33:48 +00003558 mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003559#else
njn45e81252006-03-28 12:35:08 +00003560 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,64) )) {
njn1d0825f2006-03-27 11:37:07 +00003561 PROF_EVENT(211, "mc_STOREV64-slow1");
njn4cf530b2006-04-06 13:33:48 +00003562 mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003563 return;
sewardjf9d81612005-04-23 23:25:49 +00003564 }
3565
njna7c7ebd2006-03-28 12:51:02 +00003566 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003567 sm_off16 = SM_OFF_16(a);
3568 vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
3569
3570 if (EXPECTED_TAKEN( !is_distinguished_sm(sm) &&
njndbf7ca72006-03-31 11:57:59 +00003571 (VA_BITS16_DEFINED == vabits16 ||
3572 VA_BITS16_UNDEFINED == vabits16) ))
njn1d0825f2006-03-27 11:37:07 +00003573 {
3574 /* Handle common case quickly: a is suitably aligned, */
3575 /* is mapped, and is addressible. */
3576 // Convert full V-bits in register to compact 2-bit form.
njn4cf530b2006-04-06 13:33:48 +00003577 if (V_BITS64_DEFINED == vbits64) {
njndbf7ca72006-03-31 11:57:59 +00003578 ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_DEFINED;
njn4cf530b2006-04-06 13:33:48 +00003579 } else if (V_BITS64_UNDEFINED == vbits64) {
njndbf7ca72006-03-31 11:57:59 +00003580 ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00003581 } else {
3582 /* Slow but general case -- writing partially defined bytes. */
3583 PROF_EVENT(212, "mc_STOREV64-slow2");
njn4cf530b2006-04-06 13:33:48 +00003584 mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003585 }
3586 } else {
3587 /* Slow but general case. */
3588 PROF_EVENT(213, "mc_STOREV64-slow3");
njn4cf530b2006-04-06 13:33:48 +00003589 mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003590 }
3591#endif
3592}
3593
njn4cf530b2006-04-06 13:33:48 +00003594VG_REGPARM(1) void MC_(helperc_STOREV64be) ( Addr a, ULong vbits64 )
njn1d0825f2006-03-27 11:37:07 +00003595{
njn4cf530b2006-04-06 13:33:48 +00003596 mc_STOREV64(a, vbits64, True);
njn1d0825f2006-03-27 11:37:07 +00003597}
njn4cf530b2006-04-06 13:33:48 +00003598VG_REGPARM(1) void MC_(helperc_STOREV64le) ( Addr a, ULong vbits64 )
njn1d0825f2006-03-27 11:37:07 +00003599{
njn4cf530b2006-04-06 13:33:48 +00003600 mc_STOREV64(a, vbits64, False);
njn1d0825f2006-03-27 11:37:07 +00003601}
sewardj95448072004-11-22 20:19:51 +00003602
sewardj95448072004-11-22 20:19:51 +00003603
3604/* ------------------------ Size = 4 ------------------------ */
3605
njn1d0825f2006-03-27 11:37:07 +00003606static INLINE
3607UWord mc_LOADV32 ( Addr a, Bool isBigEndian )
3608{
3609 UWord sm_off, vabits8;
3610 SecMap* sm;
3611
3612 PROF_EVENT(220, "mc_LOADV32");
3613
3614#ifndef PERF_FAST_LOADV
njn45e81252006-03-28 12:35:08 +00003615 return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003616#else
njn45e81252006-03-28 12:35:08 +00003617 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,32) )) {
njn1d0825f2006-03-27 11:37:07 +00003618 PROF_EVENT(221, "mc_LOADV32-slow1");
njn45e81252006-03-28 12:35:08 +00003619 return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
sewardjc1a2cda2005-04-21 17:34:00 +00003620 }
3621
njna7c7ebd2006-03-28 12:51:02 +00003622 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003623 sm_off = SM_OFF(a);
3624 vabits8 = sm->vabits8[sm_off];
3625
3626 // Handle common case quickly: a is suitably aligned, is mapped, and the
3627 // entire word32 it lives in is addressible.
3628 // Convert V bits from compact memory form to expanded register form.
3629 // For 64-bit platforms, set the high 32 bits of retval to 1 (undefined).
3630 // Almost certainly not necessary, but be paranoid.
njndbf7ca72006-03-31 11:57:59 +00003631 if (EXPECTED_TAKEN(vabits8 == VA_BITS8_DEFINED)) {
njn1d0825f2006-03-27 11:37:07 +00003632 return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_DEFINED);
njndbf7ca72006-03-31 11:57:59 +00003633 } else if (EXPECTED_TAKEN(vabits8 == VA_BITS8_UNDEFINED)) {
njn1d0825f2006-03-27 11:37:07 +00003634 return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_UNDEFINED);
3635 } else {
njndbf7ca72006-03-31 11:57:59 +00003636 /* Slow case: the 4 bytes are not all-defined or all-undefined. */
njn1d0825f2006-03-27 11:37:07 +00003637 PROF_EVENT(222, "mc_LOADV32-slow2");
njn45e81252006-03-28 12:35:08 +00003638 return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003639 }
3640#endif
3641}
3642
3643VG_REGPARM(1) UWord MC_(helperc_LOADV32be) ( Addr a )
3644{
3645 return mc_LOADV32(a, True);
3646}
3647VG_REGPARM(1) UWord MC_(helperc_LOADV32le) ( Addr a )
3648{
3649 return mc_LOADV32(a, False);
3650}
sewardjc1a2cda2005-04-21 17:34:00 +00003651
sewardjc1a2cda2005-04-21 17:34:00 +00003652
njn1d0825f2006-03-27 11:37:07 +00003653static INLINE
njn4cf530b2006-04-06 13:33:48 +00003654void mc_STOREV32 ( Addr a, UWord vbits32, Bool isBigEndian )
njn1d0825f2006-03-27 11:37:07 +00003655{
3656 UWord sm_off, vabits8;
3657 SecMap* sm;
3658
3659 PROF_EVENT(230, "mc_STOREV32");
3660
3661#ifndef PERF_FAST_STOREV
njn4cf530b2006-04-06 13:33:48 +00003662 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003663#else
njn45e81252006-03-28 12:35:08 +00003664 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,32) )) {
njn1d0825f2006-03-27 11:37:07 +00003665 PROF_EVENT(231, "mc_STOREV32-slow1");
njn4cf530b2006-04-06 13:33:48 +00003666 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003667 return;
sewardjc1a2cda2005-04-21 17:34:00 +00003668 }
3669
njna7c7ebd2006-03-28 12:51:02 +00003670 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003671 sm_off = SM_OFF(a);
3672 vabits8 = sm->vabits8[sm_off];
3673
3674//---------------------------------------------------------------------------
3675#if 1
3676 // Cleverness: sometimes we don't have to write the shadow memory at
3677 // all, if we can tell that what we want to write is the same as what is
3678 // already there.
njn4cf530b2006-04-06 13:33:48 +00003679 if (V_BITS32_DEFINED == vbits32) {
njndbf7ca72006-03-31 11:57:59 +00003680 if (vabits8 == (UInt)VA_BITS8_DEFINED) {
njn1d0825f2006-03-27 11:37:07 +00003681 return;
njndbf7ca72006-03-31 11:57:59 +00003682 } else if (!is_distinguished_sm(sm) && VA_BITS8_UNDEFINED == vabits8) {
3683 sm->vabits8[sm_off] = (UInt)VA_BITS8_DEFINED;
njn1d0825f2006-03-27 11:37:07 +00003684 } else {
njndbf7ca72006-03-31 11:57:59 +00003685 // not defined/undefined, or distinguished and changing state
njn1d0825f2006-03-27 11:37:07 +00003686 PROF_EVENT(232, "mc_STOREV32-slow2");
njn4cf530b2006-04-06 13:33:48 +00003687 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003688 }
njn4cf530b2006-04-06 13:33:48 +00003689 } else if (V_BITS32_UNDEFINED == vbits32) {
njndbf7ca72006-03-31 11:57:59 +00003690 if (vabits8 == (UInt)VA_BITS8_UNDEFINED) {
njn1d0825f2006-03-27 11:37:07 +00003691 return;
njndbf7ca72006-03-31 11:57:59 +00003692 } else if (!is_distinguished_sm(sm) && VA_BITS8_DEFINED == vabits8) {
3693 sm->vabits8[sm_off] = (UInt)VA_BITS8_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00003694 } else {
njndbf7ca72006-03-31 11:57:59 +00003695 // not defined/undefined, or distinguished and changing state
njn1d0825f2006-03-27 11:37:07 +00003696 PROF_EVENT(233, "mc_STOREV32-slow3");
njn4cf530b2006-04-06 13:33:48 +00003697 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003698 }
3699 } else {
3700 // Partially defined word
3701 PROF_EVENT(234, "mc_STOREV32-slow4");
njn4cf530b2006-04-06 13:33:48 +00003702 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003703 }
3704//---------------------------------------------------------------------------
3705#else
3706 if (EXPECTED_TAKEN( !is_distinguished_sm(sm) &&
njndbf7ca72006-03-31 11:57:59 +00003707 (VA_BITS8_DEFINED == vabits8 ||
3708 VA_BITS8_UNDEFINED == vabits8) ))
njn1d0825f2006-03-27 11:37:07 +00003709 {
3710 /* Handle common case quickly: a is suitably aligned, */
3711 /* is mapped, and is addressible. */
3712 // Convert full V-bits in register to compact 2-bit form.
njn4cf530b2006-04-06 13:33:48 +00003713 if (V_BITS32_DEFINED == vbits32) {
njndbf7ca72006-03-31 11:57:59 +00003714 sm->vabits8[sm_off] = VA_BITS8_DEFINED;
njn4cf530b2006-04-06 13:33:48 +00003715 } else if (V_BITS32_UNDEFINED == vbits32) {
njndbf7ca72006-03-31 11:57:59 +00003716 sm->vabits8[sm_off] = VA_BITS8_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00003717 } else {
3718 /* Slow but general case -- writing partially defined bytes. */
3719 PROF_EVENT(232, "mc_STOREV32-slow2");
njn4cf530b2006-04-06 13:33:48 +00003720 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003721 }
3722 } else {
3723 /* Slow but general case. */
3724 PROF_EVENT(233, "mc_STOREV32-slow3");
njn4cf530b2006-04-06 13:33:48 +00003725 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003726 }
3727#endif
3728//---------------------------------------------------------------------------
3729#endif
3730}
3731
njn4cf530b2006-04-06 13:33:48 +00003732VG_REGPARM(2) void MC_(helperc_STOREV32be) ( Addr a, UWord vbits32 )
njn1d0825f2006-03-27 11:37:07 +00003733{
njn4cf530b2006-04-06 13:33:48 +00003734 mc_STOREV32(a, vbits32, True);
njn1d0825f2006-03-27 11:37:07 +00003735}
njn4cf530b2006-04-06 13:33:48 +00003736VG_REGPARM(2) void MC_(helperc_STOREV32le) ( Addr a, UWord vbits32 )
njn1d0825f2006-03-27 11:37:07 +00003737{
njn4cf530b2006-04-06 13:33:48 +00003738 mc_STOREV32(a, vbits32, False);
njn1d0825f2006-03-27 11:37:07 +00003739}
njn25e49d8e72002-09-23 09:36:25 +00003740
njn25e49d8e72002-09-23 09:36:25 +00003741
sewardj95448072004-11-22 20:19:51 +00003742/* ------------------------ Size = 2 ------------------------ */
3743
njn1d0825f2006-03-27 11:37:07 +00003744static INLINE
3745UWord mc_LOADV16 ( Addr a, Bool isBigEndian )
3746{
3747 UWord sm_off, vabits8;
3748 SecMap* sm;
3749
3750 PROF_EVENT(240, "mc_LOADV16");
3751
3752#ifndef PERF_FAST_LOADV
njn45e81252006-03-28 12:35:08 +00003753 return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003754#else
njn45e81252006-03-28 12:35:08 +00003755 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,16) )) {
njn1d0825f2006-03-27 11:37:07 +00003756 PROF_EVENT(241, "mc_LOADV16-slow1");
njn45e81252006-03-28 12:35:08 +00003757 return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
sewardjc1a2cda2005-04-21 17:34:00 +00003758 }
3759
njna7c7ebd2006-03-28 12:51:02 +00003760 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003761 sm_off = SM_OFF(a);
3762 vabits8 = sm->vabits8[sm_off];
3763 // Handle common case quickly: a is suitably aligned, is mapped, and is
3764 // addressible.
3765 // Convert V bits from compact memory form to expanded register form
3766 // XXX: set the high 16/48 bits of retval to 1 for 64-bit paranoia?
njndbf7ca72006-03-31 11:57:59 +00003767 if (vabits8 == VA_BITS8_DEFINED ) { return V_BITS16_DEFINED; }
3768 else if (vabits8 == VA_BITS8_UNDEFINED) { return V_BITS16_UNDEFINED; }
njn1d0825f2006-03-27 11:37:07 +00003769 else {
njndbf7ca72006-03-31 11:57:59 +00003770 // The 4 (yes, 4) bytes are not all-defined or all-undefined, check
njn1d0825f2006-03-27 11:37:07 +00003771 // the two sub-bytes.
3772 UChar vabits4 = extract_vabits4_from_vabits8(a, vabits8);
njndbf7ca72006-03-31 11:57:59 +00003773 if (vabits4 == VA_BITS4_DEFINED ) { return V_BITS16_DEFINED; }
3774 else if (vabits4 == VA_BITS4_UNDEFINED) { return V_BITS16_UNDEFINED; }
njn1d0825f2006-03-27 11:37:07 +00003775 else {
njndbf7ca72006-03-31 11:57:59 +00003776 /* Slow case: the two bytes are not all-defined or all-undefined. */
njn1d0825f2006-03-27 11:37:07 +00003777 PROF_EVENT(242, "mc_LOADV16-slow2");
njn45e81252006-03-28 12:35:08 +00003778 return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003779 }
3780 }
3781#endif
3782}
3783
3784VG_REGPARM(1) UWord MC_(helperc_LOADV16be) ( Addr a )
3785{
3786 return mc_LOADV16(a, True);
3787}
3788VG_REGPARM(1) UWord MC_(helperc_LOADV16le) ( Addr a )
3789{
3790 return mc_LOADV16(a, False);
3791}
sewardjc1a2cda2005-04-21 17:34:00 +00003792
sewardjc1a2cda2005-04-21 17:34:00 +00003793
njn1d0825f2006-03-27 11:37:07 +00003794static INLINE
njn4cf530b2006-04-06 13:33:48 +00003795void mc_STOREV16 ( Addr a, UWord vbits16, Bool isBigEndian )
njn1d0825f2006-03-27 11:37:07 +00003796{
3797 UWord sm_off, vabits8;
3798 SecMap* sm;
3799
3800 PROF_EVENT(250, "mc_STOREV16");
3801
3802#ifndef PERF_FAST_STOREV
njn4cf530b2006-04-06 13:33:48 +00003803 mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003804#else
njn45e81252006-03-28 12:35:08 +00003805 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,16) )) {
njn1d0825f2006-03-27 11:37:07 +00003806 PROF_EVENT(251, "mc_STOREV16-slow1");
njn4cf530b2006-04-06 13:33:48 +00003807 mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003808 return;
sewardjc1a2cda2005-04-21 17:34:00 +00003809 }
3810
njna7c7ebd2006-03-28 12:51:02 +00003811 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003812 sm_off = SM_OFF(a);
3813 vabits8 = sm->vabits8[sm_off];
3814 if (EXPECTED_TAKEN( !is_distinguished_sm(sm) &&
njndbf7ca72006-03-31 11:57:59 +00003815 (VA_BITS8_DEFINED == vabits8 ||
3816 VA_BITS8_UNDEFINED == vabits8) ))
njn1d0825f2006-03-27 11:37:07 +00003817 {
3818 /* Handle common case quickly: a is suitably aligned, */
3819 /* is mapped, and is addressible. */
3820 // Convert full V-bits in register to compact 2-bit form.
njn4cf530b2006-04-06 13:33:48 +00003821 if (V_BITS16_DEFINED == vbits16) {
njndbf7ca72006-03-31 11:57:59 +00003822 insert_vabits4_into_vabits8( a, VA_BITS4_DEFINED ,
njn1d0825f2006-03-27 11:37:07 +00003823 &(sm->vabits8[sm_off]) );
njn4cf530b2006-04-06 13:33:48 +00003824 } else if (V_BITS16_UNDEFINED == vbits16) {
njndbf7ca72006-03-31 11:57:59 +00003825 insert_vabits4_into_vabits8( a, VA_BITS4_UNDEFINED,
njn1d0825f2006-03-27 11:37:07 +00003826 &(sm->vabits8[sm_off]) );
3827 } else {
3828 /* Slow but general case -- writing partially defined bytes. */
3829 PROF_EVENT(252, "mc_STOREV16-slow2");
njn4cf530b2006-04-06 13:33:48 +00003830 mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003831 }
3832 } else {
3833 /* Slow but general case. */
3834 PROF_EVENT(253, "mc_STOREV16-slow3");
njn4cf530b2006-04-06 13:33:48 +00003835 mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003836 }
3837#endif
3838}
njn25e49d8e72002-09-23 09:36:25 +00003839
njn4cf530b2006-04-06 13:33:48 +00003840VG_REGPARM(2) void MC_(helperc_STOREV16be) ( Addr a, UWord vbits16 )
njn1d0825f2006-03-27 11:37:07 +00003841{
njn4cf530b2006-04-06 13:33:48 +00003842 mc_STOREV16(a, vbits16, True);
njn1d0825f2006-03-27 11:37:07 +00003843}
njn4cf530b2006-04-06 13:33:48 +00003844VG_REGPARM(2) void MC_(helperc_STOREV16le) ( Addr a, UWord vbits16 )
njn1d0825f2006-03-27 11:37:07 +00003845{
njn4cf530b2006-04-06 13:33:48 +00003846 mc_STOREV16(a, vbits16, False);
njn1d0825f2006-03-27 11:37:07 +00003847}
sewardj5d28efc2005-04-21 22:16:29 +00003848
njn25e49d8e72002-09-23 09:36:25 +00003849
sewardj95448072004-11-22 20:19:51 +00003850/* ------------------------ Size = 1 ------------------------ */
sewardj8cf88b72005-07-08 01:29:33 +00003851/* Note: endianness is irrelevant for size == 1 */
sewardj95448072004-11-22 20:19:51 +00003852
njnaf839f52005-06-23 03:27:57 +00003853VG_REGPARM(1)
njn1d0825f2006-03-27 11:37:07 +00003854UWord MC_(helperc_LOADV8) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +00003855{
njn1d0825f2006-03-27 11:37:07 +00003856 UWord sm_off, vabits8;
sewardjae986ca2005-10-12 12:53:20 +00003857 SecMap* sm;
3858
njn1d0825f2006-03-27 11:37:07 +00003859 PROF_EVENT(260, "mc_LOADV8");
sewardjc1a2cda2005-04-21 17:34:00 +00003860
njn1d0825f2006-03-27 11:37:07 +00003861#ifndef PERF_FAST_LOADV
njn45e81252006-03-28 12:35:08 +00003862 return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
njn1d0825f2006-03-27 11:37:07 +00003863#else
njn45e81252006-03-28 12:35:08 +00003864 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,8) )) {
njn1d0825f2006-03-27 11:37:07 +00003865 PROF_EVENT(261, "mc_LOADV8-slow1");
njn45e81252006-03-28 12:35:08 +00003866 return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00003867 }
3868
njna7c7ebd2006-03-28 12:51:02 +00003869 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003870 sm_off = SM_OFF(a);
3871 vabits8 = sm->vabits8[sm_off];
3872 // Convert V bits from compact memory form to expanded register form
3873 // Handle common case quickly: a is mapped, and the entire
3874 // word32 it lives in is addressible.
3875 // XXX: set the high 24/56 bits of retval to 1 for 64-bit paranoia?
njndbf7ca72006-03-31 11:57:59 +00003876 if (vabits8 == VA_BITS8_DEFINED ) { return V_BITS8_DEFINED; }
3877 else if (vabits8 == VA_BITS8_UNDEFINED) { return V_BITS8_UNDEFINED; }
njn1d0825f2006-03-27 11:37:07 +00003878 else {
njndbf7ca72006-03-31 11:57:59 +00003879 // The 4 (yes, 4) bytes are not all-defined or all-undefined, check
njn1d0825f2006-03-27 11:37:07 +00003880 // the single byte.
3881 UChar vabits2 = extract_vabits2_from_vabits8(a, vabits8);
njndbf7ca72006-03-31 11:57:59 +00003882 if (vabits2 == VA_BITS2_DEFINED ) { return V_BITS8_DEFINED; }
3883 else if (vabits2 == VA_BITS2_UNDEFINED) { return V_BITS8_UNDEFINED; }
njn1d0825f2006-03-27 11:37:07 +00003884 else {
njndbf7ca72006-03-31 11:57:59 +00003885 /* Slow case: the byte is not all-defined or all-undefined. */
njn1d0825f2006-03-27 11:37:07 +00003886 PROF_EVENT(262, "mc_LOADV8-slow2");
njn45e81252006-03-28 12:35:08 +00003887 return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
njn1d0825f2006-03-27 11:37:07 +00003888 }
sewardjc1a2cda2005-04-21 17:34:00 +00003889 }
njn1d0825f2006-03-27 11:37:07 +00003890#endif
njn25e49d8e72002-09-23 09:36:25 +00003891}
3892
sewardjc1a2cda2005-04-21 17:34:00 +00003893
njnaf839f52005-06-23 03:27:57 +00003894VG_REGPARM(2)
njn4cf530b2006-04-06 13:33:48 +00003895void MC_(helperc_STOREV8) ( Addr a, UWord vbits8 )
njn25e49d8e72002-09-23 09:36:25 +00003896{
njn1d0825f2006-03-27 11:37:07 +00003897 UWord sm_off, vabits8;
sewardjae986ca2005-10-12 12:53:20 +00003898 SecMap* sm;
3899
njn1d0825f2006-03-27 11:37:07 +00003900 PROF_EVENT(270, "mc_STOREV8");
sewardjc1a2cda2005-04-21 17:34:00 +00003901
njn1d0825f2006-03-27 11:37:07 +00003902#ifndef PERF_FAST_STOREV
njn4cf530b2006-04-06 13:33:48 +00003903 mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
njn1d0825f2006-03-27 11:37:07 +00003904#else
njn45e81252006-03-28 12:35:08 +00003905 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,8) )) {
njn1d0825f2006-03-27 11:37:07 +00003906 PROF_EVENT(271, "mc_STOREV8-slow1");
njn4cf530b2006-04-06 13:33:48 +00003907 mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00003908 return;
3909 }
3910
njna7c7ebd2006-03-28 12:51:02 +00003911 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003912 sm_off = SM_OFF(a);
3913 vabits8 = sm->vabits8[sm_off];
3914 if (EXPECTED_TAKEN
3915 ( !is_distinguished_sm(sm) &&
njndbf7ca72006-03-31 11:57:59 +00003916 ( (VA_BITS8_DEFINED == vabits8 || VA_BITS8_UNDEFINED == vabits8)
njn1d0825f2006-03-27 11:37:07 +00003917 || (VA_BITS2_NOACCESS != extract_vabits2_from_vabits8(a, vabits8))
3918 )
3919 )
3920 )
3921 {
sewardjc1a2cda2005-04-21 17:34:00 +00003922 /* Handle common case quickly: a is mapped, the entire word32 it
3923 lives in is addressible. */
njn1d0825f2006-03-27 11:37:07 +00003924 // Convert full V-bits in register to compact 2-bit form.
njn4cf530b2006-04-06 13:33:48 +00003925 if (V_BITS8_DEFINED == vbits8) {
njndbf7ca72006-03-31 11:57:59 +00003926 insert_vabits2_into_vabits8( a, VA_BITS2_DEFINED,
njn1d0825f2006-03-27 11:37:07 +00003927 &(sm->vabits8[sm_off]) );
njn4cf530b2006-04-06 13:33:48 +00003928 } else if (V_BITS8_UNDEFINED == vbits8) {
njndbf7ca72006-03-31 11:57:59 +00003929 insert_vabits2_into_vabits8( a, VA_BITS2_UNDEFINED,
njn1d0825f2006-03-27 11:37:07 +00003930 &(sm->vabits8[sm_off]) );
3931 } else {
3932 /* Slow but general case -- writing partially defined bytes. */
3933 PROF_EVENT(272, "mc_STOREV8-slow2");
njn4cf530b2006-04-06 13:33:48 +00003934 mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
njn1d0825f2006-03-27 11:37:07 +00003935 }
sewardjc1a2cda2005-04-21 17:34:00 +00003936 } else {
njn1d0825f2006-03-27 11:37:07 +00003937 /* Slow but general case. */
3938 PROF_EVENT(273, "mc_STOREV8-slow3");
njn4cf530b2006-04-06 13:33:48 +00003939 mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00003940 }
njn1d0825f2006-03-27 11:37:07 +00003941#endif
njn25e49d8e72002-09-23 09:36:25 +00003942}
3943
3944
sewardjc859fbf2005-04-22 21:10:28 +00003945/*------------------------------------------------------------*/
3946/*--- Functions called directly from generated code: ---*/
3947/*--- Value-check failure handlers. ---*/
3948/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00003949
njn5c004e42002-11-18 11:04:50 +00003950void MC_(helperc_value_check0_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00003951{
njn9e63cb62005-05-08 18:34:59 +00003952 mc_record_value_error ( VG_(get_running_tid)(), 0 );
njn25e49d8e72002-09-23 09:36:25 +00003953}
3954
njn5c004e42002-11-18 11:04:50 +00003955void MC_(helperc_value_check1_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00003956{
njn9e63cb62005-05-08 18:34:59 +00003957 mc_record_value_error ( VG_(get_running_tid)(), 1 );
njn25e49d8e72002-09-23 09:36:25 +00003958}
3959
njn5c004e42002-11-18 11:04:50 +00003960void MC_(helperc_value_check4_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00003961{
njn9e63cb62005-05-08 18:34:59 +00003962 mc_record_value_error ( VG_(get_running_tid)(), 4 );
njn25e49d8e72002-09-23 09:36:25 +00003963}
3964
sewardj11bcc4e2005-04-23 22:38:38 +00003965void MC_(helperc_value_check8_fail) ( void )
3966{
njn9e63cb62005-05-08 18:34:59 +00003967 mc_record_value_error ( VG_(get_running_tid)(), 8 );
sewardj11bcc4e2005-04-23 22:38:38 +00003968}
3969
njnaf839f52005-06-23 03:27:57 +00003970VG_REGPARM(1) void MC_(helperc_complain_undef) ( HWord sz )
sewardj95448072004-11-22 20:19:51 +00003971{
njn9e63cb62005-05-08 18:34:59 +00003972 mc_record_value_error ( VG_(get_running_tid)(), (Int)sz );
sewardj95448072004-11-22 20:19:51 +00003973}
3974
njn25e49d8e72002-09-23 09:36:25 +00003975
sewardjc2c12c22006-03-08 13:20:09 +00003976/*------------------------------------------------------------*/
3977/*--- Metadata get/set functions, for client requests. ---*/
3978/*------------------------------------------------------------*/
3979
njn1d0825f2006-03-27 11:37:07 +00003980// Nb: this expands the V+A bits out into register-form V bits, even though
3981// they're in memory. This is for backward compatibility, and because it's
3982// probably what the user wants.
3983
3984/* Copy Vbits from/to address 'a'. Returns: 1 == OK, 2 == alignment
sewardjc2c12c22006-03-08 13:20:09 +00003985 error [no longer used], 3 == addressing error. */
3986static Int mc_get_or_set_vbits_for_client (
3987 ThreadId tid,
njn1d0825f2006-03-27 11:37:07 +00003988 Addr a,
3989 Addr vbits,
3990 SizeT szB,
sewardjc2c12c22006-03-08 13:20:09 +00003991 Bool setting /* True <=> set vbits, False <=> get vbits */
3992)
3993{
sewardjc2c12c22006-03-08 13:20:09 +00003994 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00003995 Bool ok;
3996 UChar vbits8;
sewardjc2c12c22006-03-08 13:20:09 +00003997
njn1d0825f2006-03-27 11:37:07 +00003998 /* Check that arrays are addressible before doing any getting/setting. */
3999 for (i = 0; i < szB; i++) {
4000 if (VA_BITS2_NOACCESS == get_vabits2(a + i)) {
4001 mc_record_address_error( tid, a + i, 1, setting ? True : False );
4002 return 3;
sewardjc2c12c22006-03-08 13:20:09 +00004003 }
njn1d0825f2006-03-27 11:37:07 +00004004 if (VA_BITS2_NOACCESS == get_vabits2(vbits + i)) {
4005 mc_record_address_error( tid, vbits + i, 1, setting ? False : True );
4006 return 3;
sewardjc2c12c22006-03-08 13:20:09 +00004007 }
4008 }
njn1d0825f2006-03-27 11:37:07 +00004009
sewardjc2c12c22006-03-08 13:20:09 +00004010 /* Do the copy */
4011 if (setting) {
njn1d0825f2006-03-27 11:37:07 +00004012
4013 // It's actually a tool ClientReq, but Vg_CoreClientReq is the closest
4014 // thing we have.
njndbf7ca72006-03-31 11:57:59 +00004015 check_mem_is_defined(Vg_CoreClientReq, tid, "SET_VBITS(vbits)",
4016 vbits, szB);
njn1d0825f2006-03-27 11:37:07 +00004017
4018 /* setting */
4019 for (i = 0; i < szB; i++) {
4020 ok = set_vbits8(a + i, ((UChar*)vbits)[i]);
4021 tl_assert(ok);
sewardjc2c12c22006-03-08 13:20:09 +00004022 }
4023 } else {
4024 /* getting */
njn1d0825f2006-03-27 11:37:07 +00004025 for (i = 0; i < szB; i++) {
4026 ok = get_vbits8(a + i, &vbits8);
4027 tl_assert(ok);
4028// XXX: used to do this, but it's a pain
4029// if (V_BITS8_DEFINED != vbits8)
4030// mc_record_value_error(tid, 1);
4031 ((UChar*)vbits)[i] = vbits8;
sewardjc2c12c22006-03-08 13:20:09 +00004032 }
4033 // The bytes in vbits[] have now been set, so mark them as such.
njndbf7ca72006-03-31 11:57:59 +00004034 MC_(make_mem_defined)(vbits, szB);
njn1d0825f2006-03-27 11:37:07 +00004035 }
sewardjc2c12c22006-03-08 13:20:09 +00004036
4037 return 1;
4038}
sewardj05fe85e2005-04-27 22:46:36 +00004039
4040
4041/*------------------------------------------------------------*/
4042/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
4043/*------------------------------------------------------------*/
4044
4045/* For the memory leak detector, say whether an entire 64k chunk of
4046 address space is possibly in use, or not. If in doubt return
4047 True.
4048*/
4049static
4050Bool mc_is_within_valid_secondary ( Addr a )
4051{
4052 SecMap* sm = maybe_get_secmap_for ( a );
sewardj05a46732006-10-17 01:28:10 +00004053 if (sm == NULL || sm == &sm_distinguished[SM_DIST_NOACCESS]
4054 || in_ignored_range(a)) {
sewardj05fe85e2005-04-27 22:46:36 +00004055 /* Definitely not in use. */
4056 return False;
4057 } else {
4058 return True;
4059 }
4060}
4061
4062
4063/* For the memory leak detector, say whether or not a given word
4064 address is to be regarded as valid. */
4065static
4066Bool mc_is_valid_aligned_word ( Addr a )
4067{
4068 tl_assert(sizeof(UWord) == 4 || sizeof(UWord) == 8);
4069 if (sizeof(UWord) == 4) {
4070 tl_assert(VG_IS_4_ALIGNED(a));
4071 } else {
4072 tl_assert(VG_IS_8_ALIGNED(a));
4073 }
sewardj05a46732006-10-17 01:28:10 +00004074 if (is_mem_defined( a, sizeof(UWord), NULL ) == MC_Ok
4075 && !in_ignored_range(a)) {
sewardj05fe85e2005-04-27 22:46:36 +00004076 return True;
4077 } else {
4078 return False;
4079 }
4080}
sewardja4495682002-10-21 07:29:59 +00004081
4082
nethercote996901a2004-08-03 13:29:09 +00004083/* Leak detector for this tool. We don't actually do anything, merely
sewardja4495682002-10-21 07:29:59 +00004084 run the generic leak detector with suitable parameters for this
nethercote996901a2004-08-03 13:29:09 +00004085 tool. */
njnb8dca862005-03-14 02:42:44 +00004086static void mc_detect_memory_leaks ( ThreadId tid, LeakCheckMode mode )
njn25e49d8e72002-09-23 09:36:25 +00004087{
njn1d0825f2006-03-27 11:37:07 +00004088 MC_(do_detect_memory_leaks) (
sewardj05fe85e2005-04-27 22:46:36 +00004089 tid,
4090 mode,
4091 mc_is_within_valid_secondary,
4092 mc_is_valid_aligned_word
4093 );
njn25e49d8e72002-09-23 09:36:25 +00004094}
4095
4096
sewardjc859fbf2005-04-22 21:10:28 +00004097/*------------------------------------------------------------*/
4098/*--- Initialisation ---*/
4099/*------------------------------------------------------------*/
4100
4101static void init_shadow_memory ( void )
4102{
4103 Int i;
4104 SecMap* sm;
4105
njn1d0825f2006-03-27 11:37:07 +00004106 tl_assert(V_BIT_UNDEFINED == 1);
4107 tl_assert(V_BIT_DEFINED == 0);
4108 tl_assert(V_BITS8_UNDEFINED == 0xFF);
4109 tl_assert(V_BITS8_DEFINED == 0);
4110
sewardjc859fbf2005-04-22 21:10:28 +00004111 /* Build the 3 distinguished secondaries */
sewardjc859fbf2005-04-22 21:10:28 +00004112 sm = &sm_distinguished[SM_DIST_NOACCESS];
njn1d0825f2006-03-27 11:37:07 +00004113 for (i = 0; i < SM_CHUNKS; i++) sm->vabits8[i] = VA_BITS8_NOACCESS;
sewardjc859fbf2005-04-22 21:10:28 +00004114
njndbf7ca72006-03-31 11:57:59 +00004115 sm = &sm_distinguished[SM_DIST_UNDEFINED];
4116 for (i = 0; i < SM_CHUNKS; i++) sm->vabits8[i] = VA_BITS8_UNDEFINED;
sewardjc859fbf2005-04-22 21:10:28 +00004117
njndbf7ca72006-03-31 11:57:59 +00004118 sm = &sm_distinguished[SM_DIST_DEFINED];
4119 for (i = 0; i < SM_CHUNKS; i++) sm->vabits8[i] = VA_BITS8_DEFINED;
sewardjc859fbf2005-04-22 21:10:28 +00004120
4121 /* Set up the primary map. */
4122 /* These entries gradually get overwritten as the used address
4123 space expands. */
4124 for (i = 0; i < N_PRIMARY_MAP; i++)
4125 primary_map[i] = &sm_distinguished[SM_DIST_NOACCESS];
4126
sewardj05a46732006-10-17 01:28:10 +00004127 /* Auxiliary primary maps */
4128 init_auxmap_L1_L2();
4129
sewardjc859fbf2005-04-22 21:10:28 +00004130 /* auxmap_size = auxmap_used = 0;
4131 no ... these are statically initialised */
njn1d0825f2006-03-27 11:37:07 +00004132
4133 /* Secondary V bit table */
4134 secVBitTable = createSecVBitTable();
sewardjc859fbf2005-04-22 21:10:28 +00004135}
4136
4137
4138/*------------------------------------------------------------*/
4139/*--- Sanity check machinery (permanently engaged) ---*/
4140/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00004141
njn51d827b2005-05-09 01:02:08 +00004142static Bool mc_cheap_sanity_check ( void )
njn25e49d8e72002-09-23 09:36:25 +00004143{
jseward9800fd32004-01-04 23:08:04 +00004144 /* nothing useful we can rapidly check */
sewardj23eb2fd2005-04-22 16:29:19 +00004145 n_sanity_cheap++;
sewardjc1a2cda2005-04-21 17:34:00 +00004146 PROF_EVENT(490, "cheap_sanity_check");
jseward9800fd32004-01-04 23:08:04 +00004147 return True;
njn25e49d8e72002-09-23 09:36:25 +00004148}
4149
njn51d827b2005-05-09 01:02:08 +00004150static Bool mc_expensive_sanity_check ( void )
njn25e49d8e72002-09-23 09:36:25 +00004151{
sewardj05a46732006-10-17 01:28:10 +00004152 Int i;
4153 Word n_secmaps_found;
sewardj45d94cc2005-04-20 14:44:11 +00004154 SecMap* sm;
sewardj05a46732006-10-17 01:28:10 +00004155 HChar* errmsg;
sewardj23eb2fd2005-04-22 16:29:19 +00004156 Bool bad = False;
njn25e49d8e72002-09-23 09:36:25 +00004157
sewardj05a46732006-10-17 01:28:10 +00004158 if (0) VG_(printf)("expensive sanity check\n");
4159 if (0) return True;
4160
sewardj23eb2fd2005-04-22 16:29:19 +00004161 n_sanity_expensive++;
sewardjc1a2cda2005-04-21 17:34:00 +00004162 PROF_EVENT(491, "expensive_sanity_check");
4163
njn1d0825f2006-03-27 11:37:07 +00004164 /* Check that the 3 distinguished SMs are still as they should be. */
njn25e49d8e72002-09-23 09:36:25 +00004165
njndbf7ca72006-03-31 11:57:59 +00004166 /* Check noaccess DSM. */
sewardj45d94cc2005-04-20 14:44:11 +00004167 sm = &sm_distinguished[SM_DIST_NOACCESS];
njn1d0825f2006-03-27 11:37:07 +00004168 for (i = 0; i < SM_CHUNKS; i++)
4169 if (sm->vabits8[i] != VA_BITS8_NOACCESS)
sewardj23eb2fd2005-04-22 16:29:19 +00004170 bad = True;
njn25e49d8e72002-09-23 09:36:25 +00004171
njndbf7ca72006-03-31 11:57:59 +00004172 /* Check undefined DSM. */
4173 sm = &sm_distinguished[SM_DIST_UNDEFINED];
njn1d0825f2006-03-27 11:37:07 +00004174 for (i = 0; i < SM_CHUNKS; i++)
njndbf7ca72006-03-31 11:57:59 +00004175 if (sm->vabits8[i] != VA_BITS8_UNDEFINED)
sewardj23eb2fd2005-04-22 16:29:19 +00004176 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00004177
njndbf7ca72006-03-31 11:57:59 +00004178 /* Check defined DSM. */
4179 sm = &sm_distinguished[SM_DIST_DEFINED];
njn1d0825f2006-03-27 11:37:07 +00004180 for (i = 0; i < SM_CHUNKS; i++)
njndbf7ca72006-03-31 11:57:59 +00004181 if (sm->vabits8[i] != VA_BITS8_DEFINED)
sewardj23eb2fd2005-04-22 16:29:19 +00004182 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00004183
sewardj23eb2fd2005-04-22 16:29:19 +00004184 if (bad) {
4185 VG_(printf)("memcheck expensive sanity: "
4186 "distinguished_secondaries have changed\n");
4187 return False;
4188 }
4189
njn1d0825f2006-03-27 11:37:07 +00004190 /* If we're not checking for undefined value errors, the secondary V bit
4191 * table should be empty. */
4192 if (!MC_(clo_undef_value_errors)) {
4193 if (0 != VG_(OSet_Size)(secVBitTable))
4194 return False;
4195 }
4196
sewardj05a46732006-10-17 01:28:10 +00004197 /* check the auxiliary maps, very thoroughly */
4198 n_secmaps_found = 0;
4199 errmsg = check_auxmap_L1_L2_sanity( &n_secmaps_found );
4200 if (errmsg) {
4201 VG_(printf)("memcheck expensive sanity, auxmaps:\n\t%s", errmsg);
sewardj23eb2fd2005-04-22 16:29:19 +00004202 return False;
4203 }
4204
sewardj05a46732006-10-17 01:28:10 +00004205 /* n_secmaps_found is now the number referred to by the auxiliary
4206 primary map. Now add on the ones referred to by the main
4207 primary map. */
sewardj23eb2fd2005-04-22 16:29:19 +00004208 for (i = 0; i < N_PRIMARY_MAP; i++) {
sewardj05a46732006-10-17 01:28:10 +00004209 if (primary_map[i] == NULL) {
sewardj23eb2fd2005-04-22 16:29:19 +00004210 bad = True;
4211 } else {
sewardj05a46732006-10-17 01:28:10 +00004212 if (!is_distinguished_sm(primary_map[i]))
sewardj23eb2fd2005-04-22 16:29:19 +00004213 n_secmaps_found++;
4214 }
4215 }
4216
sewardj05a46732006-10-17 01:28:10 +00004217 /* check that the number of secmaps issued matches the number that
4218 are reachable (iow, no secmap leaks) */
njn1d0825f2006-03-27 11:37:07 +00004219 if (n_secmaps_found != (n_issued_SMs - n_deissued_SMs))
sewardj23eb2fd2005-04-22 16:29:19 +00004220 bad = True;
4221
4222 if (bad) {
4223 VG_(printf)("memcheck expensive sanity: "
4224 "apparent secmap leakage\n");
4225 return False;
4226 }
4227
sewardj23eb2fd2005-04-22 16:29:19 +00004228 if (bad) {
4229 VG_(printf)("memcheck expensive sanity: "
4230 "auxmap covers wrong address space\n");
4231 return False;
4232 }
4233
4234 /* there is only one pointer to each secmap (expensive) */
njn25e49d8e72002-09-23 09:36:25 +00004235
4236 return True;
4237}
sewardj45d94cc2005-04-20 14:44:11 +00004238
njn25e49d8e72002-09-23 09:36:25 +00004239/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00004240/*--- Command line args ---*/
njn25e49d8e72002-09-23 09:36:25 +00004241/*------------------------------------------------------------*/
4242
njn1d0825f2006-03-27 11:37:07 +00004243Bool MC_(clo_partial_loads_ok) = False;
4244Int MC_(clo_freelist_vol) = 5000000;
4245LeakCheckMode MC_(clo_leak_check) = LC_Summary;
4246VgRes MC_(clo_leak_resolution) = Vg_LowRes;
4247Bool MC_(clo_show_reachable) = False;
4248Bool MC_(clo_workaround_gcc296_bugs) = False;
4249Bool MC_(clo_undef_value_errors) = True;
4250
4251static Bool mc_process_cmd_line_options(Char* arg)
njn25e49d8e72002-09-23 09:36:25 +00004252{
njn1d0825f2006-03-27 11:37:07 +00004253 VG_BOOL_CLO(arg, "--partial-loads-ok", MC_(clo_partial_loads_ok))
4254 else VG_BOOL_CLO(arg, "--show-reachable", MC_(clo_show_reachable))
4255 else VG_BOOL_CLO(arg, "--workaround-gcc296-bugs",MC_(clo_workaround_gcc296_bugs))
4256
4257 else VG_BOOL_CLO(arg, "--undef-value-errors", MC_(clo_undef_value_errors))
4258
4259 else VG_BNUM_CLO(arg, "--freelist-vol", MC_(clo_freelist_vol), 0, 1000000000)
4260
4261 else if (VG_CLO_STREQ(arg, "--leak-check=no"))
4262 MC_(clo_leak_check) = LC_Off;
4263 else if (VG_CLO_STREQ(arg, "--leak-check=summary"))
4264 MC_(clo_leak_check) = LC_Summary;
4265 else if (VG_CLO_STREQ(arg, "--leak-check=yes") ||
4266 VG_CLO_STREQ(arg, "--leak-check=full"))
4267 MC_(clo_leak_check) = LC_Full;
4268
4269 else if (VG_CLO_STREQ(arg, "--leak-resolution=low"))
4270 MC_(clo_leak_resolution) = Vg_LowRes;
4271 else if (VG_CLO_STREQ(arg, "--leak-resolution=med"))
4272 MC_(clo_leak_resolution) = Vg_MedRes;
4273 else if (VG_CLO_STREQ(arg, "--leak-resolution=high"))
4274 MC_(clo_leak_resolution) = Vg_HighRes;
4275
sewardj05a46732006-10-17 01:28:10 +00004276 else if (VG_CLO_STREQN(16,arg,"--ignore-ranges=")) {
4277 Int i;
4278 UChar* txt = (UChar*)(arg+16);
4279 Bool ok = parse_ignore_ranges(txt);
4280 if (!ok)
4281 return False;
4282 tl_assert(ignoreRanges.used >= 0);
4283 tl_assert(ignoreRanges.used < M_IGNORE_RANGES);
4284 for (i = 0; i < ignoreRanges.used; i++) {
4285 Addr s = ignoreRanges.start[i];
4286 Addr e = ignoreRanges.end[i];
4287 Addr limit = 0x4000000; /* 64M - entirely arbitrary limit */
4288 if (e <= s) {
4289 VG_(message)(Vg_DebugMsg,
4290 "ERROR: --ignore-ranges: end <= start in range:");
4291 VG_(message)(Vg_DebugMsg,
4292 " 0x%lx-0x%lx", s, e);
4293 return False;
4294 }
4295 if (e - s > limit) {
4296 VG_(message)(Vg_DebugMsg,
4297 "ERROR: --ignore-ranges: suspiciously large range:");
4298 VG_(message)(Vg_DebugMsg,
4299 " 0x%lx-0x%lx (size %ld)", s, e, (UWord)(e-s));
4300 return False;
4301 }
4302 }
4303 }
4304
njn1d0825f2006-03-27 11:37:07 +00004305 else
4306 return VG_(replacement_malloc_process_cmd_line_option)(arg);
4307
4308 return True;
njn25e49d8e72002-09-23 09:36:25 +00004309}
4310
njn51d827b2005-05-09 01:02:08 +00004311static void mc_print_usage(void)
njn25e49d8e72002-09-23 09:36:25 +00004312{
njn1d0825f2006-03-27 11:37:07 +00004313 VG_(printf)(
4314" --leak-check=no|summary|full search for memory leaks at exit? [summary]\n"
4315" --leak-resolution=low|med|high how much bt merging in leak check [low]\n"
4316" --show-reachable=no|yes show reachable blocks in leak check? [no]\n"
4317" --undef-value-errors=no|yes check for undefined value errors [yes]\n"
4318" --partial-loads-ok=no|yes too hard to explain here; see manual [no]\n"
4319" --freelist-vol=<number> volume of freed blocks queue [5000000]\n"
4320" --workaround-gcc296-bugs=no|yes self explanatory [no]\n"
sewardj05a46732006-10-17 01:28:10 +00004321" --ignore-ranges=0xPP-0xQQ[,0xRR-0xSS] assume given addresses are OK\n"
njn1d0825f2006-03-27 11:37:07 +00004322 );
4323 VG_(replacement_malloc_print_usage)();
njn3e884182003-04-15 13:03:23 +00004324}
4325
njn51d827b2005-05-09 01:02:08 +00004326static void mc_print_debug_usage(void)
njn3e884182003-04-15 13:03:23 +00004327{
njn1d0825f2006-03-27 11:37:07 +00004328 VG_(replacement_malloc_print_debug_usage)();
njn25e49d8e72002-09-23 09:36:25 +00004329}
4330
sewardjf3418c02005-11-08 14:10:24 +00004331
nethercote8b76fe52004-11-08 19:20:09 +00004332/*------------------------------------------------------------*/
4333/*--- Client requests ---*/
4334/*------------------------------------------------------------*/
4335
4336/* Client block management:
4337
4338 This is managed as an expanding array of client block descriptors.
4339 Indices of live descriptors are issued to the client, so it can ask
4340 to free them later. Therefore we cannot slide live entries down
4341 over dead ones. Instead we must use free/inuse flags and scan for
4342 an empty slot at allocation time. This in turn means allocation is
4343 relatively expensive, so we hope this does not happen too often.
nethercote8b76fe52004-11-08 19:20:09 +00004344
sewardjedc75ab2005-03-15 23:30:32 +00004345 An unused block has start == size == 0
4346*/
nethercote8b76fe52004-11-08 19:20:09 +00004347
4348typedef
4349 struct {
4350 Addr start;
4351 SizeT size;
4352 ExeContext* where;
sewardj8cf88b72005-07-08 01:29:33 +00004353 Char* desc;
nethercote8b76fe52004-11-08 19:20:09 +00004354 }
4355 CGenBlock;
4356
4357/* This subsystem is self-initialising. */
njn695c16e2005-03-27 03:40:28 +00004358static UInt cgb_size = 0;
4359static UInt cgb_used = 0;
4360static CGenBlock* cgbs = NULL;
nethercote8b76fe52004-11-08 19:20:09 +00004361
4362/* Stats for this subsystem. */
njn695c16e2005-03-27 03:40:28 +00004363static UInt cgb_used_MAX = 0; /* Max in use. */
4364static UInt cgb_allocs = 0; /* Number of allocs. */
4365static UInt cgb_discards = 0; /* Number of discards. */
4366static UInt cgb_search = 0; /* Number of searches. */
nethercote8b76fe52004-11-08 19:20:09 +00004367
4368
4369static
njn695c16e2005-03-27 03:40:28 +00004370Int alloc_client_block ( void )
nethercote8b76fe52004-11-08 19:20:09 +00004371{
4372 UInt i, sz_new;
4373 CGenBlock* cgbs_new;
4374
njn695c16e2005-03-27 03:40:28 +00004375 cgb_allocs++;
nethercote8b76fe52004-11-08 19:20:09 +00004376
njn695c16e2005-03-27 03:40:28 +00004377 for (i = 0; i < cgb_used; i++) {
4378 cgb_search++;
4379 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00004380 return i;
4381 }
4382
4383 /* Not found. Try to allocate one at the end. */
njn695c16e2005-03-27 03:40:28 +00004384 if (cgb_used < cgb_size) {
4385 cgb_used++;
4386 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00004387 }
4388
4389 /* Ok, we have to allocate a new one. */
njn695c16e2005-03-27 03:40:28 +00004390 tl_assert(cgb_used == cgb_size);
4391 sz_new = (cgbs == NULL) ? 10 : (2 * cgb_size);
nethercote8b76fe52004-11-08 19:20:09 +00004392
4393 cgbs_new = VG_(malloc)( sz_new * sizeof(CGenBlock) );
njn695c16e2005-03-27 03:40:28 +00004394 for (i = 0; i < cgb_used; i++)
4395 cgbs_new[i] = cgbs[i];
nethercote8b76fe52004-11-08 19:20:09 +00004396
njn695c16e2005-03-27 03:40:28 +00004397 if (cgbs != NULL)
4398 VG_(free)( cgbs );
4399 cgbs = cgbs_new;
nethercote8b76fe52004-11-08 19:20:09 +00004400
njn695c16e2005-03-27 03:40:28 +00004401 cgb_size = sz_new;
4402 cgb_used++;
4403 if (cgb_used > cgb_used_MAX)
4404 cgb_used_MAX = cgb_used;
4405 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00004406}
4407
4408
4409static void show_client_block_stats ( void )
4410{
4411 VG_(message)(Vg_DebugMsg,
4412 "general CBs: %d allocs, %d discards, %d maxinuse, %d search",
njn695c16e2005-03-27 03:40:28 +00004413 cgb_allocs, cgb_discards, cgb_used_MAX, cgb_search
nethercote8b76fe52004-11-08 19:20:09 +00004414 );
4415}
4416
nethercote8b76fe52004-11-08 19:20:09 +00004417static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai )
4418{
4419 UInt i;
4420 /* VG_(printf)("try to identify %d\n", a); */
4421
4422 /* Perhaps it's a general block ? */
njn695c16e2005-03-27 03:40:28 +00004423 for (i = 0; i < cgb_used; i++) {
4424 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00004425 continue;
njn717cde52005-05-10 02:47:21 +00004426 // Use zero as the redzone for client blocks.
4427 if (VG_(addr_is_in_block)(a, cgbs[i].start, cgbs[i].size, 0)) {
nethercote8b76fe52004-11-08 19:20:09 +00004428 /* OK - maybe it's a mempool, too? */
njn1d0825f2006-03-27 11:37:07 +00004429 MC_Mempool* mp = VG_(HT_lookup)(MC_(mempool_list),
njn12627272005-08-14 18:32:16 +00004430 (UWord)cgbs[i].start);
njn1d0cb0d2005-08-15 01:52:02 +00004431 if (mp != NULL) {
4432 if (mp->chunks != NULL) {
njn1d0825f2006-03-27 11:37:07 +00004433 MC_Chunk* mc;
njnf66dbfc2005-08-15 01:54:05 +00004434 VG_(HT_ResetIter)(mp->chunks);
4435 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
njn1d0825f2006-03-27 11:37:07 +00004436 if (addr_is_in_MC_Chunk(mc, a)) {
njn1d0cb0d2005-08-15 01:52:02 +00004437 ai->akind = UserG;
4438 ai->blksize = mc->size;
4439 ai->rwoffset = (Int)(a) - (Int)mc->data;
4440 ai->lastchange = mc->where;
4441 return True;
4442 }
nethercote8b76fe52004-11-08 19:20:09 +00004443 }
4444 }
njn1d0cb0d2005-08-15 01:52:02 +00004445 ai->akind = Mempool;
4446 ai->blksize = cgbs[i].size;
4447 ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
njn695c16e2005-03-27 03:40:28 +00004448 ai->lastchange = cgbs[i].where;
nethercote8b76fe52004-11-08 19:20:09 +00004449 return True;
4450 }
njn1d0cb0d2005-08-15 01:52:02 +00004451 ai->akind = UserG;
4452 ai->blksize = cgbs[i].size;
4453 ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
njn695c16e2005-03-27 03:40:28 +00004454 ai->lastchange = cgbs[i].where;
njn1d0cb0d2005-08-15 01:52:02 +00004455 ai->desc = cgbs[i].desc;
nethercote8b76fe52004-11-08 19:20:09 +00004456 return True;
4457 }
4458 }
4459 return False;
4460}
4461
njn51d827b2005-05-09 01:02:08 +00004462static Bool mc_handle_client_request ( ThreadId tid, UWord* arg, UWord* ret )
nethercote8b76fe52004-11-08 19:20:09 +00004463{
4464 Int i;
4465 Bool ok;
4466 Addr bad_addr;
4467
njnfc26ff92004-11-22 19:12:49 +00004468 if (!VG_IS_TOOL_USERREQ('M','C',arg[0])
nethercote8b76fe52004-11-08 19:20:09 +00004469 && VG_USERREQ__MALLOCLIKE_BLOCK != arg[0]
4470 && VG_USERREQ__FREELIKE_BLOCK != arg[0]
4471 && VG_USERREQ__CREATE_MEMPOOL != arg[0]
4472 && VG_USERREQ__DESTROY_MEMPOOL != arg[0]
4473 && VG_USERREQ__MEMPOOL_ALLOC != arg[0]
sewardj2c1c9df2006-07-28 00:06:37 +00004474 && VG_USERREQ__MEMPOOL_FREE != arg[0]
sewardjc740d762006-10-05 17:59:23 +00004475 && VG_USERREQ__MEMPOOL_TRIM != arg[0]
4476 && VG_USERREQ__MOVE_MEMPOOL != arg[0]
4477 && VG_USERREQ__MEMPOOL_CHANGE != arg[0]
4478 && VG_USERREQ__MEMPOOL_EXISTS != arg[0])
nethercote8b76fe52004-11-08 19:20:09 +00004479 return False;
4480
4481 switch (arg[0]) {
njndbf7ca72006-03-31 11:57:59 +00004482 case VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE:
4483 ok = is_mem_addressable ( arg[1], arg[2], &bad_addr );
nethercote8b76fe52004-11-08 19:20:09 +00004484 if (!ok)
njn9e63cb62005-05-08 18:34:59 +00004485 mc_record_user_error ( tid, bad_addr, /*isWrite*/True,
4486 /*isUnaddr*/True );
nethercote8b76fe52004-11-08 19:20:09 +00004487 *ret = ok ? (UWord)NULL : bad_addr;
sewardj8cf88b72005-07-08 01:29:33 +00004488 break;
nethercote8b76fe52004-11-08 19:20:09 +00004489
njndbf7ca72006-03-31 11:57:59 +00004490 case VG_USERREQ__CHECK_MEM_IS_DEFINED: {
nethercote8b76fe52004-11-08 19:20:09 +00004491 MC_ReadResult res;
njndbf7ca72006-03-31 11:57:59 +00004492 res = is_mem_defined ( arg[1], arg[2], &bad_addr );
nethercote8b76fe52004-11-08 19:20:09 +00004493 if (MC_AddrErr == res)
njn9e63cb62005-05-08 18:34:59 +00004494 mc_record_user_error ( tid, bad_addr, /*isWrite*/False,
4495 /*isUnaddr*/True );
nethercote8b76fe52004-11-08 19:20:09 +00004496 else if (MC_ValueErr == res)
njn9e63cb62005-05-08 18:34:59 +00004497 mc_record_user_error ( tid, bad_addr, /*isWrite*/False,
4498 /*isUnaddr*/False );
nethercote8b76fe52004-11-08 19:20:09 +00004499 *ret = ( res==MC_Ok ? (UWord)NULL : bad_addr );
sewardj8cf88b72005-07-08 01:29:33 +00004500 break;
nethercote8b76fe52004-11-08 19:20:09 +00004501 }
4502
4503 case VG_USERREQ__DO_LEAK_CHECK:
njnb8dca862005-03-14 02:42:44 +00004504 mc_detect_memory_leaks(tid, arg[1] ? LC_Summary : LC_Full);
sewardj8cf88b72005-07-08 01:29:33 +00004505 *ret = 0; /* return value is meaningless */
4506 break;
nethercote8b76fe52004-11-08 19:20:09 +00004507
njndbf7ca72006-03-31 11:57:59 +00004508 case VG_USERREQ__MAKE_MEM_NOACCESS:
4509 MC_(make_mem_noaccess) ( arg[1], arg[2] );
sewardj8cf88b72005-07-08 01:29:33 +00004510 *ret = -1;
4511 break;
nethercote8b76fe52004-11-08 19:20:09 +00004512
njndbf7ca72006-03-31 11:57:59 +00004513 case VG_USERREQ__MAKE_MEM_UNDEFINED:
4514 MC_(make_mem_undefined) ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00004515 *ret = -1;
sewardj8cf88b72005-07-08 01:29:33 +00004516 break;
nethercote8b76fe52004-11-08 19:20:09 +00004517
njndbf7ca72006-03-31 11:57:59 +00004518 case VG_USERREQ__MAKE_MEM_DEFINED:
4519 MC_(make_mem_defined) ( arg[1], arg[2] );
sewardj8cf88b72005-07-08 01:29:33 +00004520 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00004521 break;
4522
njndbf7ca72006-03-31 11:57:59 +00004523 case VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE:
4524 make_mem_defined_if_addressable ( arg[1], arg[2] );
sewardjfb1e9ad2006-03-10 13:41:58 +00004525 *ret = -1;
4526 break;
4527
sewardjedc75ab2005-03-15 23:30:32 +00004528 case VG_USERREQ__CREATE_BLOCK: /* describe a block */
sewardj8cf88b72005-07-08 01:29:33 +00004529 if (arg[1] != 0 && arg[2] != 0) {
4530 i = alloc_client_block();
4531 /* VG_(printf)("allocated %d %p\n", i, cgbs); */
4532 cgbs[i].start = arg[1];
4533 cgbs[i].size = arg[2];
4534 cgbs[i].desc = VG_(strdup)((Char *)arg[3]);
4535 cgbs[i].where = VG_(record_ExeContext) ( tid );
sewardjedc75ab2005-03-15 23:30:32 +00004536
sewardj8cf88b72005-07-08 01:29:33 +00004537 *ret = i;
4538 } else
4539 *ret = -1;
4540 break;
sewardjedc75ab2005-03-15 23:30:32 +00004541
nethercote8b76fe52004-11-08 19:20:09 +00004542 case VG_USERREQ__DISCARD: /* discard */
njn695c16e2005-03-27 03:40:28 +00004543 if (cgbs == NULL
4544 || arg[2] >= cgb_used ||
sewardj8cf88b72005-07-08 01:29:33 +00004545 (cgbs[arg[2]].start == 0 && cgbs[arg[2]].size == 0)) {
sewardjedc75ab2005-03-15 23:30:32 +00004546 *ret = 1;
sewardj8cf88b72005-07-08 01:29:33 +00004547 } else {
4548 tl_assert(arg[2] >= 0 && arg[2] < cgb_used);
4549 cgbs[arg[2]].start = cgbs[arg[2]].size = 0;
4550 VG_(free)(cgbs[arg[2]].desc);
4551 cgb_discards++;
4552 *ret = 0;
4553 }
4554 break;
nethercote8b76fe52004-11-08 19:20:09 +00004555
sewardjc2c12c22006-03-08 13:20:09 +00004556 case VG_USERREQ__GET_VBITS:
4557 /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
4558 error. */
4559 /* VG_(printf)("get_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
4560 *ret = mc_get_or_set_vbits_for_client
4561 ( tid, arg[1], arg[2], arg[3], False /* get them */ );
4562 break;
4563
4564 case VG_USERREQ__SET_VBITS:
4565 /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
4566 error. */
4567 /* VG_(printf)("set_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
4568 *ret = mc_get_or_set_vbits_for_client
4569 ( tid, arg[1], arg[2], arg[3], True /* set them */ );
4570 break;
nethercote8b76fe52004-11-08 19:20:09 +00004571
njn1d0825f2006-03-27 11:37:07 +00004572 case VG_USERREQ__COUNT_LEAKS: { /* count leaked bytes */
4573 UWord** argp = (UWord**)arg;
4574 // MC_(bytes_leaked) et al were set by the last leak check (or zero
4575 // if no prior leak checks performed).
4576 *argp[1] = MC_(bytes_leaked) + MC_(bytes_indirect);
4577 *argp[2] = MC_(bytes_dubious);
4578 *argp[3] = MC_(bytes_reachable);
4579 *argp[4] = MC_(bytes_suppressed);
4580 // there is no argp[5]
4581 //*argp[5] = MC_(bytes_indirect);
njndbf7ca72006-03-31 11:57:59 +00004582 // XXX need to make *argp[1-4] defined
njn1d0825f2006-03-27 11:37:07 +00004583 *ret = 0;
4584 return True;
4585 }
4586 case VG_USERREQ__MALLOCLIKE_BLOCK: {
4587 Addr p = (Addr)arg[1];
4588 SizeT sizeB = arg[2];
4589 UInt rzB = arg[3];
4590 Bool is_zeroed = (Bool)arg[4];
4591
4592 MC_(new_block) ( tid, p, sizeB, /*ignored*/0, rzB, is_zeroed,
4593 MC_AllocCustom, MC_(malloc_list) );
4594 return True;
4595 }
4596 case VG_USERREQ__FREELIKE_BLOCK: {
4597 Addr p = (Addr)arg[1];
4598 UInt rzB = arg[2];
4599
4600 MC_(handle_free) ( tid, p, rzB, MC_AllocCustom );
4601 return True;
4602 }
4603
4604 case _VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR: {
4605 Char* s = (Char*) arg[1];
4606 OverlapExtra* extra = (OverlapExtra*)arg[2];
4607 mc_record_overlap_error(tid, s, extra);
4608 return True;
4609 }
4610
4611 case VG_USERREQ__CREATE_MEMPOOL: {
4612 Addr pool = (Addr)arg[1];
4613 UInt rzB = arg[2];
4614 Bool is_zeroed = (Bool)arg[3];
4615
4616 MC_(create_mempool) ( pool, rzB, is_zeroed );
4617 return True;
4618 }
4619
4620 case VG_USERREQ__DESTROY_MEMPOOL: {
4621 Addr pool = (Addr)arg[1];
4622
4623 MC_(destroy_mempool) ( pool );
4624 return True;
4625 }
4626
4627 case VG_USERREQ__MEMPOOL_ALLOC: {
4628 Addr pool = (Addr)arg[1];
4629 Addr addr = (Addr)arg[2];
4630 UInt size = arg[3];
4631
4632 MC_(mempool_alloc) ( tid, pool, addr, size );
4633 return True;
4634 }
4635
4636 case VG_USERREQ__MEMPOOL_FREE: {
4637 Addr pool = (Addr)arg[1];
4638 Addr addr = (Addr)arg[2];
4639
4640 MC_(mempool_free) ( pool, addr );
4641 return True;
4642 }
4643
sewardj2c1c9df2006-07-28 00:06:37 +00004644 case VG_USERREQ__MEMPOOL_TRIM: {
4645 Addr pool = (Addr)arg[1];
4646 Addr addr = (Addr)arg[2];
4647 UInt size = arg[3];
4648
4649 MC_(mempool_trim) ( pool, addr, size );
4650 return True;
4651 }
4652
sewardjc740d762006-10-05 17:59:23 +00004653 case VG_USERREQ__MOVE_MEMPOOL: {
4654 Addr poolA = (Addr)arg[1];
4655 Addr poolB = (Addr)arg[2];
4656
4657 MC_(move_mempool) ( poolA, poolB );
4658 return True;
4659 }
4660
4661 case VG_USERREQ__MEMPOOL_CHANGE: {
4662 Addr pool = (Addr)arg[1];
4663 Addr addrA = (Addr)arg[2];
4664 Addr addrB = (Addr)arg[3];
4665 UInt size = arg[4];
4666
4667 MC_(mempool_change) ( pool, addrA, addrB, size );
4668 return True;
4669 }
4670
4671 case VG_USERREQ__MEMPOOL_EXISTS: {
4672 Addr pool = (Addr)arg[1];
4673
4674 *ret = (UWord) MC_(mempool_exists) ( pool );
4675 return True;
4676 }
4677
4678
nethercote8b76fe52004-11-08 19:20:09 +00004679 default:
njn1d0825f2006-03-27 11:37:07 +00004680 VG_(message)(Vg_UserMsg,
4681 "Warning: unknown memcheck client request code %llx",
4682 (ULong)arg[0]);
4683 return False;
nethercote8b76fe52004-11-08 19:20:09 +00004684 }
4685 return True;
4686}
njn25e49d8e72002-09-23 09:36:25 +00004687
4688/*------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00004689/*--- Crude profiling machinery. ---*/
4690/*------------------------------------------------------------*/
4691
4692// We track a number of interesting events (using PROF_EVENT)
4693// if MC_PROFILE_MEMORY is defined.
4694
4695#ifdef MC_PROFILE_MEMORY
4696
4697UInt MC_(event_ctr)[N_PROF_EVENTS];
4698HChar* MC_(event_ctr_name)[N_PROF_EVENTS];
4699
4700static void init_prof_mem ( void )
4701{
4702 Int i;
4703 for (i = 0; i < N_PROF_EVENTS; i++) {
4704 MC_(event_ctr)[i] = 0;
4705 MC_(event_ctr_name)[i] = NULL;
4706 }
4707}
4708
4709static void done_prof_mem ( void )
4710{
4711 Int i;
4712 Bool spaced = False;
4713 for (i = 0; i < N_PROF_EVENTS; i++) {
4714 if (!spaced && (i % 10) == 0) {
4715 VG_(printf)("\n");
4716 spaced = True;
4717 }
4718 if (MC_(event_ctr)[i] > 0) {
4719 spaced = False;
4720 VG_(printf)( "prof mem event %3d: %9d %s\n",
4721 i, MC_(event_ctr)[i],
4722 MC_(event_ctr_name)[i]
4723 ? MC_(event_ctr_name)[i] : "unnamed");
4724 }
4725 }
4726}
4727
4728#else
4729
4730static void init_prof_mem ( void ) { }
4731static void done_prof_mem ( void ) { }
4732
4733#endif
4734
4735/*------------------------------------------------------------*/
njn51d827b2005-05-09 01:02:08 +00004736/*--- Setup and finalisation ---*/
njn25e49d8e72002-09-23 09:36:25 +00004737/*------------------------------------------------------------*/
4738
njn51d827b2005-05-09 01:02:08 +00004739static void mc_post_clo_init ( void )
njn5c004e42002-11-18 11:04:50 +00004740{
sewardj71bc3cb2005-05-19 00:25:45 +00004741 /* If we've been asked to emit XML, mash around various other
4742 options so as to constrain the output somewhat. */
4743 if (VG_(clo_xml)) {
4744 /* Extract as much info as possible from the leak checker. */
njn1d0825f2006-03-27 11:37:07 +00004745 /* MC_(clo_show_reachable) = True; */
4746 MC_(clo_leak_check) = LC_Full;
sewardj71bc3cb2005-05-19 00:25:45 +00004747 }
njn5c004e42002-11-18 11:04:50 +00004748}
4749
njn1d0825f2006-03-27 11:37:07 +00004750static void print_SM_info(char* type, int n_SMs)
4751{
4752 VG_(message)(Vg_DebugMsg,
4753 " memcheck: SMs: %s = %d (%dk, %dM)",
4754 type,
4755 n_SMs,
4756 n_SMs * sizeof(SecMap) / 1024,
4757 n_SMs * sizeof(SecMap) / (1024 * 1024) );
4758}
4759
njn51d827b2005-05-09 01:02:08 +00004760static void mc_fini ( Int exitcode )
njn5c004e42002-11-18 11:04:50 +00004761{
njn1d0825f2006-03-27 11:37:07 +00004762 MC_(print_malloc_stats)();
sewardj23eb2fd2005-04-22 16:29:19 +00004763
njn1d0825f2006-03-27 11:37:07 +00004764 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
4765 if (MC_(clo_leak_check) == LC_Off)
4766 VG_(message)(Vg_UserMsg,
4767 "For a detailed leak analysis, rerun with: --leak-check=yes");
4768
4769 VG_(message)(Vg_UserMsg,
4770 "For counts of detected errors, rerun with: -v");
4771 }
4772 if (MC_(clo_leak_check) != LC_Off)
4773 mc_detect_memory_leaks(1/*bogus ThreadId*/, MC_(clo_leak_check));
4774
4775 done_prof_mem();
sewardjae986ca2005-10-12 12:53:20 +00004776
sewardj45d94cc2005-04-20 14:44:11 +00004777 if (VG_(clo_verbosity) > 1) {
njn1d0825f2006-03-27 11:37:07 +00004778 SizeT max_secVBit_szB, max_SMs_szB, max_shmem_szB;
4779
sewardj45d94cc2005-04-20 14:44:11 +00004780 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00004781 " memcheck: sanity checks: %d cheap, %d expensive",
4782 n_sanity_cheap, n_sanity_expensive );
sewardj45d94cc2005-04-20 14:44:11 +00004783 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00004784 " memcheck: auxmaps: %d auxmap entries (%dk, %dM) in use",
sewardj05a46732006-10-17 01:28:10 +00004785 n_auxmap_L2_nodes,
4786 n_auxmap_L2_nodes * 64,
4787 n_auxmap_L2_nodes / 16 );
sewardj23eb2fd2005-04-22 16:29:19 +00004788 VG_(message)(Vg_DebugMsg,
sewardj05a46732006-10-17 01:28:10 +00004789 " memcheck: auxmaps_L1: %lld searches, %lld cmps, ratio %lld:10",
4790 n_auxmap_L1_searches, n_auxmap_L1_cmps,
4791 (10ULL * n_auxmap_L1_cmps)
4792 / (n_auxmap_L1_searches ? n_auxmap_L1_searches : 1)
4793 );
4794 VG_(message)(Vg_DebugMsg,
4795 " memcheck: auxmaps_L2: %lld searches, %lld nodes",
4796 n_auxmap_L2_searches, n_auxmap_L2_nodes
4797 );
sewardj23eb2fd2005-04-22 16:29:19 +00004798
njndbf7ca72006-03-31 11:57:59 +00004799 print_SM_info("n_issued ", n_issued_SMs);
4800 print_SM_info("n_deissued ", n_deissued_SMs);
4801 print_SM_info("max_noaccess ", max_noaccess_SMs);
4802 print_SM_info("max_undefined", max_undefined_SMs);
4803 print_SM_info("max_defined ", max_defined_SMs);
4804 print_SM_info("max_non_DSM ", max_non_DSM_SMs);
njn1d0825f2006-03-27 11:37:07 +00004805
4806 // Three DSMs, plus the non-DSM ones
4807 max_SMs_szB = (3 + max_non_DSM_SMs) * sizeof(SecMap);
4808 // The 3*sizeof(Word) bytes is the AVL node metadata size.
4809 // The 4*sizeof(Word) bytes is the malloc metadata size.
4810 // Hardwiring these sizes in sucks, but I don't see how else to do it.
4811 max_secVBit_szB = max_secVBit_nodes *
4812 (sizeof(SecVBitNode) + 3*sizeof(Word) + 4*sizeof(Word));
4813 max_shmem_szB = sizeof(primary_map) + max_SMs_szB + max_secVBit_szB;
sewardj23eb2fd2005-04-22 16:29:19 +00004814
4815 VG_(message)(Vg_DebugMsg,
njn1d0825f2006-03-27 11:37:07 +00004816 " memcheck: max sec V bit nodes: %d (%dk, %dM)",
4817 max_secVBit_nodes, max_secVBit_szB / 1024,
4818 max_secVBit_szB / (1024 * 1024));
4819 VG_(message)(Vg_DebugMsg,
4820 " memcheck: set_sec_vbits8 calls: %llu (new: %llu, updates: %llu)",
4821 sec_vbits_new_nodes + sec_vbits_updates,
4822 sec_vbits_new_nodes, sec_vbits_updates );
4823 VG_(message)(Vg_DebugMsg,
4824 " memcheck: max shadow mem size: %dk, %dM",
4825 max_shmem_szB / 1024, max_shmem_szB / (1024 * 1024));
sewardj45d94cc2005-04-20 14:44:11 +00004826 }
4827
njn5c004e42002-11-18 11:04:50 +00004828 if (0) {
4829 VG_(message)(Vg_DebugMsg,
4830 "------ Valgrind's client block stats follow ---------------" );
nethercote8b76fe52004-11-08 19:20:09 +00004831 show_client_block_stats();
njn5c004e42002-11-18 11:04:50 +00004832 }
njn25e49d8e72002-09-23 09:36:25 +00004833}
4834
njn51d827b2005-05-09 01:02:08 +00004835static void mc_pre_clo_init(void)
4836{
4837 VG_(details_name) ("Memcheck");
4838 VG_(details_version) (NULL);
4839 VG_(details_description) ("a memory error detector");
4840 VG_(details_copyright_author)(
sewardje4b0bf02006-06-05 23:21:15 +00004841 "Copyright (C) 2002-2006, and GNU GPL'd, by Julian Seward et al.");
njn51d827b2005-05-09 01:02:08 +00004842 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj05a46732006-10-17 01:28:10 +00004843 VG_(details_avg_translation_sizeB) ( 556 );
njn51d827b2005-05-09 01:02:08 +00004844
4845 VG_(basic_tool_funcs) (mc_post_clo_init,
4846 MC_(instrument),
4847 mc_fini);
4848
4849 VG_(needs_core_errors) ();
njn1d0825f2006-03-27 11:37:07 +00004850 VG_(needs_tool_errors) (mc_eq_Error,
njn51d827b2005-05-09 01:02:08 +00004851 mc_pp_Error,
njn1d0825f2006-03-27 11:37:07 +00004852 mc_update_extra,
njn51d827b2005-05-09 01:02:08 +00004853 mc_recognised_suppression,
njn1d0825f2006-03-27 11:37:07 +00004854 mc_read_extra_suppression_info,
4855 mc_error_matches_suppression,
4856 mc_get_error_name,
4857 mc_print_extra_suppression_info);
njn51d827b2005-05-09 01:02:08 +00004858 VG_(needs_libc_freeres) ();
njn1d0825f2006-03-27 11:37:07 +00004859 VG_(needs_command_line_options)(mc_process_cmd_line_options,
njn51d827b2005-05-09 01:02:08 +00004860 mc_print_usage,
4861 mc_print_debug_usage);
4862 VG_(needs_client_requests) (mc_handle_client_request);
4863 VG_(needs_sanity_checks) (mc_cheap_sanity_check,
4864 mc_expensive_sanity_check);
njn1d0825f2006-03-27 11:37:07 +00004865 VG_(needs_malloc_replacement) (MC_(malloc),
4866 MC_(__builtin_new),
4867 MC_(__builtin_vec_new),
4868 MC_(memalign),
4869 MC_(calloc),
4870 MC_(free),
4871 MC_(__builtin_delete),
4872 MC_(__builtin_vec_delete),
4873 MC_(realloc),
4874 MC_MALLOC_REDZONE_SZB );
njnca54af32006-04-16 10:25:43 +00004875 VG_(needs_xml_output) ();
njn51d827b2005-05-09 01:02:08 +00004876
njn1d0825f2006-03-27 11:37:07 +00004877 VG_(track_new_mem_startup) ( mc_new_mem_startup );
njndbf7ca72006-03-31 11:57:59 +00004878 VG_(track_new_mem_stack_signal)( MC_(make_mem_undefined) );
4879 VG_(track_new_mem_brk) ( MC_(make_mem_undefined) );
njn1d0825f2006-03-27 11:37:07 +00004880 VG_(track_new_mem_mmap) ( mc_new_mem_mmap );
njn51d827b2005-05-09 01:02:08 +00004881
njn1d0825f2006-03-27 11:37:07 +00004882 VG_(track_copy_mem_remap) ( MC_(copy_address_range_state) );
njn81623712005-10-07 04:48:37 +00004883
4884 // Nb: we don't do anything with mprotect. This means that V bits are
4885 // preserved if a program, for example, marks some memory as inaccessible
4886 // and then later marks it as accessible again.
4887 //
4888 // If an access violation occurs (eg. writing to read-only memory) we let
4889 // it fault and print an informative termination message. This doesn't
4890 // happen if the program catches the signal, though, which is bad. If we
4891 // had two A bits (for readability and writability) that were completely
4892 // distinct from V bits, then we could handle all this properly.
4893 VG_(track_change_mem_mprotect) ( NULL );
njn51d827b2005-05-09 01:02:08 +00004894
njndbf7ca72006-03-31 11:57:59 +00004895 VG_(track_die_mem_stack_signal)( MC_(make_mem_noaccess) );
4896 VG_(track_die_mem_brk) ( MC_(make_mem_noaccess) );
4897 VG_(track_die_mem_munmap) ( MC_(make_mem_noaccess) );
njn51d827b2005-05-09 01:02:08 +00004898
njn1d0825f2006-03-27 11:37:07 +00004899#ifdef PERF_FAST_STACK
4900 VG_(track_new_mem_stack_4) ( mc_new_mem_stack_4 );
4901 VG_(track_new_mem_stack_8) ( mc_new_mem_stack_8 );
4902 VG_(track_new_mem_stack_12) ( mc_new_mem_stack_12 );
4903 VG_(track_new_mem_stack_16) ( mc_new_mem_stack_16 );
4904 VG_(track_new_mem_stack_32) ( mc_new_mem_stack_32 );
4905 VG_(track_new_mem_stack_112) ( mc_new_mem_stack_112 );
4906 VG_(track_new_mem_stack_128) ( mc_new_mem_stack_128 );
4907 VG_(track_new_mem_stack_144) ( mc_new_mem_stack_144 );
4908 VG_(track_new_mem_stack_160) ( mc_new_mem_stack_160 );
4909#endif
4910 VG_(track_new_mem_stack) ( mc_new_mem_stack );
njn51d827b2005-05-09 01:02:08 +00004911
njn1d0825f2006-03-27 11:37:07 +00004912#ifdef PERF_FAST_STACK
4913 VG_(track_die_mem_stack_4) ( mc_die_mem_stack_4 );
4914 VG_(track_die_mem_stack_8) ( mc_die_mem_stack_8 );
4915 VG_(track_die_mem_stack_12) ( mc_die_mem_stack_12 );
4916 VG_(track_die_mem_stack_16) ( mc_die_mem_stack_16 );
4917 VG_(track_die_mem_stack_32) ( mc_die_mem_stack_32 );
4918 VG_(track_die_mem_stack_112) ( mc_die_mem_stack_112 );
4919 VG_(track_die_mem_stack_128) ( mc_die_mem_stack_128 );
4920 VG_(track_die_mem_stack_144) ( mc_die_mem_stack_144 );
4921 VG_(track_die_mem_stack_160) ( mc_die_mem_stack_160 );
4922#endif
4923 VG_(track_die_mem_stack) ( mc_die_mem_stack );
njn51d827b2005-05-09 01:02:08 +00004924
njndbf7ca72006-03-31 11:57:59 +00004925 VG_(track_ban_mem_stack) ( MC_(make_mem_noaccess) );
njn51d827b2005-05-09 01:02:08 +00004926
njndbf7ca72006-03-31 11:57:59 +00004927 VG_(track_pre_mem_read) ( check_mem_is_defined );
4928 VG_(track_pre_mem_read_asciiz) ( check_mem_is_defined_asciiz );
4929 VG_(track_pre_mem_write) ( check_mem_is_addressable );
njn1d0825f2006-03-27 11:37:07 +00004930 VG_(track_post_mem_write) ( mc_post_mem_write );
njn51d827b2005-05-09 01:02:08 +00004931
njn1d0825f2006-03-27 11:37:07 +00004932 if (MC_(clo_undef_value_errors))
4933 VG_(track_pre_reg_read) ( mc_pre_reg_read );
njn51d827b2005-05-09 01:02:08 +00004934
njn1d0825f2006-03-27 11:37:07 +00004935 VG_(track_post_reg_write) ( mc_post_reg_write );
4936 VG_(track_post_reg_write_clientcall_return)( mc_post_reg_write_clientcall );
njn51d827b2005-05-09 01:02:08 +00004937
4938 init_shadow_memory();
njn1d0825f2006-03-27 11:37:07 +00004939 MC_(malloc_list) = VG_(HT_construct)( 80021 ); // prime, big
4940 MC_(mempool_list) = VG_(HT_construct)( 1009 ); // prime, not so big
4941 init_prof_mem();
njn51d827b2005-05-09 01:02:08 +00004942
4943 tl_assert( mc_expensive_sanity_check() );
njn1d0825f2006-03-27 11:37:07 +00004944
4945 // {LOADV,STOREV}[8421] will all fail horribly if this isn't true.
4946 tl_assert(sizeof(UWord) == sizeof(Addr));
sewardj05a46732006-10-17 01:28:10 +00004947 // Call me paranoid. I don't care.
4948 tl_assert(sizeof(void*) == sizeof(Addr));
njn1d0825f2006-03-27 11:37:07 +00004949
4950 // BYTES_PER_SEC_VBIT_NODE must be a power of two.
4951 tl_assert(-1 != VG_(log2)(BYTES_PER_SEC_VBIT_NODE));
njn51d827b2005-05-09 01:02:08 +00004952}
4953
sewardj45f4e7c2005-09-27 19:20:21 +00004954VG_DETERMINE_INTERFACE_VERSION(mc_pre_clo_init)
fitzhardinge98abfc72003-12-16 02:05:15 +00004955
njn25e49d8e72002-09-23 09:36:25 +00004956/*--------------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00004957/*--- end ---*/
njn25e49d8e72002-09-23 09:36:25 +00004958/*--------------------------------------------------------------------*/