blob: bc9f2e7af9c87a3e1a65abeebe0800706c6ab722 [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
nethercote137bc552003-11-14 17:47:54 +00009 This file is part of MemCheck, a heavyweight Valgrind tool for
njnc9539842002-10-02 13:26:35 +000010 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
sewardj9ebd6e02007-01-08 06:01:59 +000012 Copyright (C) 2000-2007 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
njnc7561b92005-06-19 01:24:32 +000033#include "pub_tool_basics.h"
njn4802b382005-06-11 04:58:29 +000034#include "pub_tool_aspacemgr.h"
njn1d0825f2006-03-27 11:37:07 +000035#include "pub_tool_hashtable.h" // For mc_include.h
njn97405b22005-06-02 03:39:33 +000036#include "pub_tool_libcbase.h"
njn132bfcc2005-06-04 19:16:06 +000037#include "pub_tool_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000038#include "pub_tool_libcprint.h"
njnf536bbb2005-06-13 04:21:38 +000039#include "pub_tool_machine.h"
njnc7561b92005-06-19 01:24:32 +000040#include "pub_tool_mallocfree.h"
41#include "pub_tool_options.h"
njn1d0825f2006-03-27 11:37:07 +000042#include "pub_tool_oset.h"
njnc7561b92005-06-19 01:24:32 +000043#include "pub_tool_replacemalloc.h"
44#include "pub_tool_tooliface.h"
45#include "pub_tool_threadstate.h"
sewardj05a46732006-10-17 01:28:10 +000046#include "pub_tool_oset.h"
njnc7561b92005-06-19 01:24:32 +000047
48#include "mc_include.h"
49#include "memcheck.h" /* for client requests */
njn25e49d8e72002-09-23 09:36:25 +000050
tomd55121e2005-12-19 12:40:13 +000051#ifdef HAVE_BUILTIN_EXPECT
sewardjc1a2cda2005-04-21 17:34:00 +000052#define EXPECTED_TAKEN(cond) __builtin_expect((cond),1)
53#define EXPECTED_NOT_TAKEN(cond) __builtin_expect((cond),0)
tomd55121e2005-12-19 12:40:13 +000054#else
55#define EXPECTED_TAKEN(cond) (cond)
56#define EXPECTED_NOT_TAKEN(cond) (cond)
57#endif
sewardjc1a2cda2005-04-21 17:34:00 +000058
njn1d0825f2006-03-27 11:37:07 +000059/* Set to 1 to do a little more sanity checking */
sewardj23eb2fd2005-04-22 16:29:19 +000060#define VG_DEBUG_MEMORY 0
sewardjc1a2cda2005-04-21 17:34:00 +000061
njn25e49d8e72002-09-23 09:36:25 +000062#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
63
njn25e49d8e72002-09-23 09:36:25 +000064
njn25e49d8e72002-09-23 09:36:25 +000065/*------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +000066/*--- Fast-case knobs ---*/
67/*------------------------------------------------------------*/
68
69// Comment these out to disable the fast cases (don't just set them to zero).
70
71#define PERF_FAST_LOADV 1
72#define PERF_FAST_STOREV 1
73
74#define PERF_FAST_SARP 1
75
76#define PERF_FAST_STACK 1
77#define PERF_FAST_STACK2 1
78
79/*------------------------------------------------------------*/
80/*--- V bits and A bits ---*/
81/*------------------------------------------------------------*/
82
83/* Conceptually, every byte value has 8 V bits, which track whether Memcheck
84 thinks the corresponding value bit is defined. And every memory byte
85 has an A bit, which tracks whether Memcheck thinks the program can access
86 it safely. So every N-bit register is shadowed with N V bits, and every
87 memory byte is shadowed with 8 V bits and one A bit.
88
89 In the implementation, we use two forms of compression (compressed V bits
90 and distinguished secondary maps) to avoid the 9-bit-per-byte overhead
91 for memory.
92
93 Memcheck also tracks extra information about each heap block that is
94 allocated, for detecting memory leaks and other purposes.
95*/
96
97/*------------------------------------------------------------*/
sewardj45d94cc2005-04-20 14:44:11 +000098/*--- Basic A/V bitmap representation. ---*/
njn25e49d8e72002-09-23 09:36:25 +000099/*------------------------------------------------------------*/
100
njn1d0825f2006-03-27 11:37:07 +0000101/* All reads and writes are checked against a memory map (a.k.a. shadow
102 memory), which records the state of all memory in the process.
103
104 On 32-bit machines the memory map is organised as follows.
105 The top 16 bits of an address are used to index into a top-level
106 map table, containing 65536 entries. Each entry is a pointer to a
107 second-level map, which records the accesibililty and validity
108 permissions for the 65536 bytes indexed by the lower 16 bits of the
109 address. Each byte is represented by two bits (details are below). So
110 each second-level map contains 16384 bytes. This two-level arrangement
111 conveniently divides the 4G address space into 64k lumps, each size 64k
112 bytes.
113
114 All entries in the primary (top-level) map must point to a valid
115 secondary (second-level) map. Since many of the 64kB chunks will
njndbf7ca72006-03-31 11:57:59 +0000116 have the same status for every bit -- ie. noaccess (for unused
117 address space) or entirely addressable and defined (for code segments) --
118 there are three distinguished secondary maps, which indicate 'noaccess',
119 'undefined' and 'defined'. For these uniform 64kB chunks, the primary
120 map entry points to the relevant distinguished map. In practice,
121 typically more than half of the addressable memory is represented with
122 the 'undefined' or 'defined' distinguished secondary map, so it gives a
123 good saving. It also lets us set the V+A bits of large address regions
124 quickly in set_address_range_perms().
njn1d0825f2006-03-27 11:37:07 +0000125
126 On 64-bit machines it's more complicated. If we followed the same basic
127 scheme we'd have a four-level table which would require too many memory
128 accesses. So instead the top-level map table has 2^19 entries (indexed
129 using bits 16..34 of the address); this covers the bottom 32GB. Any
130 accesses above 32GB are handled with a slow, sparse auxiliary table.
131 Valgrind's address space manager tries very hard to keep things below
132 this 32GB barrier so that performance doesn't suffer too much.
133
134 Note that this file has a lot of different functions for reading and
135 writing shadow memory. Only a couple are strictly necessary (eg.
136 get_vabits2 and set_vabits2), most are just specialised for specific
137 common cases to improve performance.
138
139 Aside: the V+A bits are less precise than they could be -- we have no way
140 of marking memory as read-only. It would be great if we could add an
141 extra state VA_BITSn_READONLY. But then we'd have 5 different states,
142 which requires 2.3 bits to hold, and there's no way to do that elegantly
143 -- we'd have to double up to 4 bits of metadata per byte, which doesn't
144 seem worth it.
145*/
sewardjc859fbf2005-04-22 21:10:28 +0000146
sewardj45d94cc2005-04-20 14:44:11 +0000147/* --------------- Basic configuration --------------- */
sewardj95448072004-11-22 20:19:51 +0000148
sewardj23eb2fd2005-04-22 16:29:19 +0000149/* Only change this. N_PRIMARY_MAP *must* be a power of 2. */
sewardj21f7ff42005-04-28 10:32:02 +0000150
sewardje4ccc012005-05-02 12:53:38 +0000151#if VG_WORDSIZE == 4
sewardj21f7ff42005-04-28 10:32:02 +0000152
153/* cover the entire address space */
154# define N_PRIMARY_BITS 16
155
156#else
157
sewardj34483bc2005-09-28 11:50:20 +0000158/* Just handle the first 32G fast and the rest via auxiliary
sewardj21f7ff42005-04-28 10:32:02 +0000159 primaries. */
sewardj34483bc2005-09-28 11:50:20 +0000160# define N_PRIMARY_BITS 19
sewardj21f7ff42005-04-28 10:32:02 +0000161
162#endif
163
sewardj45d94cc2005-04-20 14:44:11 +0000164
sewardjc1a2cda2005-04-21 17:34:00 +0000165/* Do not change this. */
sewardje4ccc012005-05-02 12:53:38 +0000166#define N_PRIMARY_MAP ( ((UWord)1) << N_PRIMARY_BITS)
sewardjc1a2cda2005-04-21 17:34:00 +0000167
168/* Do not change this. */
sewardj23eb2fd2005-04-22 16:29:19 +0000169#define MAX_PRIMARY_ADDRESS (Addr)((((Addr)65536) * N_PRIMARY_MAP)-1)
170
171
sewardj45d94cc2005-04-20 14:44:11 +0000172/* --------------- Secondary maps --------------- */
njn25e49d8e72002-09-23 09:36:25 +0000173
njn1d0825f2006-03-27 11:37:07 +0000174// Each byte of memory conceptually has an A bit, which indicates its
175// addressability, and 8 V bits, which indicates its definedness.
176//
177// But because very few bytes are partially defined, we can use a nice
178// compression scheme to reduce the size of shadow memory. Each byte of
179// memory has 2 bits which indicates its state (ie. V+A bits):
180//
njndbf7ca72006-03-31 11:57:59 +0000181// 00: noaccess (unaddressable but treated as fully defined)
182// 01: undefined (addressable and fully undefined)
183// 10: defined (addressable and fully defined)
184// 11: partdefined (addressable and partially defined)
njn1d0825f2006-03-27 11:37:07 +0000185//
njndbf7ca72006-03-31 11:57:59 +0000186// In the "partdefined" case, we use a secondary table to store the V bits.
187// Each entry in the secondary-V-bits table maps a byte address to its 8 V
188// bits.
njn1d0825f2006-03-27 11:37:07 +0000189//
190// We store the compressed V+A bits in 8-bit chunks, ie. the V+A bits for
191// four bytes (32 bits) of memory are in each chunk. Hence the name
192// "vabits8". This lets us get the V+A bits for four bytes at a time
193// easily (without having to do any shifting and/or masking), and that is a
194// very common operation. (Note that although each vabits8 chunk
195// is 8 bits in size, it represents 32 bits of memory.)
196//
197// The representation is "inverse" little-endian... each 4 bytes of
198// memory is represented by a 1 byte value, where:
199//
200// - the status of byte (a+0) is held in bits [1..0]
201// - the status of byte (a+1) is held in bits [3..2]
202// - the status of byte (a+2) is held in bits [5..4]
203// - the status of byte (a+3) is held in bits [7..6]
204//
205// It's "inverse" because endianness normally describes a mapping from
206// value bits to memory addresses; in this case the mapping is inverted.
207// Ie. instead of particular value bits being held in certain addresses, in
208// this case certain addresses are represented by particular value bits.
209// See insert_vabits2_into_vabits8() for an example.
210//
211// But note that we don't compress the V bits stored in registers; they
212// need to be explicit to made the shadow operations possible. Therefore
213// when moving values between registers and memory we need to convert
214// between the expanded in-register format and the compressed in-memory
215// format. This isn't so difficult, it just requires careful attention in a
216// few places.
217
218// These represent eight bits of memory.
219#define VA_BITS2_NOACCESS 0x0 // 00b
njndbf7ca72006-03-31 11:57:59 +0000220#define VA_BITS2_UNDEFINED 0x1 // 01b
221#define VA_BITS2_DEFINED 0x2 // 10b
222#define VA_BITS2_PARTDEFINED 0x3 // 11b
njn1d0825f2006-03-27 11:37:07 +0000223
224// These represent 16 bits of memory.
225#define VA_BITS4_NOACCESS 0x0 // 00_00b
njndbf7ca72006-03-31 11:57:59 +0000226#define VA_BITS4_UNDEFINED 0x5 // 01_01b
227#define VA_BITS4_DEFINED 0xa // 10_10b
njn1d0825f2006-03-27 11:37:07 +0000228
229// These represent 32 bits of memory.
230#define VA_BITS8_NOACCESS 0x00 // 00_00_00_00b
njndbf7ca72006-03-31 11:57:59 +0000231#define VA_BITS8_UNDEFINED 0x55 // 01_01_01_01b
232#define VA_BITS8_DEFINED 0xaa // 10_10_10_10b
njn1d0825f2006-03-27 11:37:07 +0000233
234// These represent 64 bits of memory.
235#define VA_BITS16_NOACCESS 0x0000 // 00_00_00_00b x 2
njndbf7ca72006-03-31 11:57:59 +0000236#define VA_BITS16_UNDEFINED 0x5555 // 01_01_01_01b x 2
237#define VA_BITS16_DEFINED 0xaaaa // 10_10_10_10b x 2
njn1d0825f2006-03-27 11:37:07 +0000238
239
240#define SM_CHUNKS 16384
241#define SM_OFF(aaa) (((aaa) & 0xffff) >> 2)
242#define SM_OFF_16(aaa) (((aaa) & 0xffff) >> 3)
243
244// Paranoia: it's critical for performance that the requested inlining
245// occurs. So try extra hard.
246#define INLINE inline __attribute__((always_inline))
247
248static INLINE Addr start_of_this_sm ( Addr a ) {
249 return (a & (~SM_MASK));
250}
251static INLINE Bool is_start_of_sm ( Addr a ) {
252 return (start_of_this_sm(a) == a);
253}
254
njn25e49d8e72002-09-23 09:36:25 +0000255typedef
256 struct {
njn1d0825f2006-03-27 11:37:07 +0000257 UChar vabits8[SM_CHUNKS];
njn25e49d8e72002-09-23 09:36:25 +0000258 }
259 SecMap;
260
njn1d0825f2006-03-27 11:37:07 +0000261// 3 distinguished secondary maps, one for no-access, one for
262// accessible but undefined, and one for accessible and defined.
263// Distinguished secondaries may never be modified.
264#define SM_DIST_NOACCESS 0
njndbf7ca72006-03-31 11:57:59 +0000265#define SM_DIST_UNDEFINED 1
266#define SM_DIST_DEFINED 2
njnb8dca862005-03-14 02:42:44 +0000267
sewardj45d94cc2005-04-20 14:44:11 +0000268static SecMap sm_distinguished[3];
njnb8dca862005-03-14 02:42:44 +0000269
njn1d0825f2006-03-27 11:37:07 +0000270static INLINE Bool is_distinguished_sm ( SecMap* sm ) {
sewardj45d94cc2005-04-20 14:44:11 +0000271 return sm >= &sm_distinguished[0] && sm <= &sm_distinguished[2];
272}
njnb8dca862005-03-14 02:42:44 +0000273
njn1d0825f2006-03-27 11:37:07 +0000274// Forward declaration
275static void update_SM_counts(SecMap* oldSM, SecMap* newSM);
276
sewardj45d94cc2005-04-20 14:44:11 +0000277/* dist_sm points to one of our three distinguished secondaries. Make
278 a copy of it so that we can write to it.
279*/
280static SecMap* copy_for_writing ( SecMap* dist_sm )
281{
282 SecMap* new_sm;
283 tl_assert(dist_sm == &sm_distinguished[0]
njn1d0825f2006-03-27 11:37:07 +0000284 || dist_sm == &sm_distinguished[1]
285 || dist_sm == &sm_distinguished[2]);
njnb8dca862005-03-14 02:42:44 +0000286
sewardj45f4e7c2005-09-27 19:20:21 +0000287 new_sm = VG_(am_shadow_alloc)(sizeof(SecMap));
288 if (new_sm == NULL)
289 VG_(out_of_memory_NORETURN)( "memcheck:allocate new SecMap",
290 sizeof(SecMap) );
sewardj45d94cc2005-04-20 14:44:11 +0000291 VG_(memcpy)(new_sm, dist_sm, sizeof(SecMap));
njn1d0825f2006-03-27 11:37:07 +0000292 update_SM_counts(dist_sm, new_sm);
sewardj45d94cc2005-04-20 14:44:11 +0000293 return new_sm;
294}
njnb8dca862005-03-14 02:42:44 +0000295
njn1d0825f2006-03-27 11:37:07 +0000296/* --------------- Stats --------------- */
297
njndbf7ca72006-03-31 11:57:59 +0000298static Int n_issued_SMs = 0;
299static Int n_deissued_SMs = 0;
300static Int n_noaccess_SMs = N_PRIMARY_MAP; // start with many noaccess DSMs
301static Int n_undefined_SMs = 0;
302static Int n_defined_SMs = 0;
303static Int n_non_DSM_SMs = 0;
304static Int max_noaccess_SMs = 0;
305static Int max_undefined_SMs = 0;
306static Int max_defined_SMs = 0;
307static Int max_non_DSM_SMs = 0;
njn1d0825f2006-03-27 11:37:07 +0000308
sewardj05a46732006-10-17 01:28:10 +0000309/* # searches initiated in auxmap_L1, and # base cmps required */
310static ULong n_auxmap_L1_searches = 0;
311static ULong n_auxmap_L1_cmps = 0;
312/* # of searches that missed in auxmap_L1 and therefore had to
313 be handed to auxmap_L2. And the number of nodes inserted. */
314static ULong n_auxmap_L2_searches = 0;
315static ULong n_auxmap_L2_nodes = 0;
316
njn1d0825f2006-03-27 11:37:07 +0000317static Int n_sanity_cheap = 0;
318static Int n_sanity_expensive = 0;
319
320static Int n_secVBit_nodes = 0;
321static Int max_secVBit_nodes = 0;
322
323static void update_SM_counts(SecMap* oldSM, SecMap* newSM)
324{
njndbf7ca72006-03-31 11:57:59 +0000325 if (oldSM == &sm_distinguished[SM_DIST_NOACCESS ]) n_noaccess_SMs --;
326 else if (oldSM == &sm_distinguished[SM_DIST_UNDEFINED]) n_undefined_SMs--;
327 else if (oldSM == &sm_distinguished[SM_DIST_DEFINED ]) n_defined_SMs --;
328 else { n_non_DSM_SMs --;
329 n_deissued_SMs ++; }
njn1d0825f2006-03-27 11:37:07 +0000330
njndbf7ca72006-03-31 11:57:59 +0000331 if (newSM == &sm_distinguished[SM_DIST_NOACCESS ]) n_noaccess_SMs ++;
332 else if (newSM == &sm_distinguished[SM_DIST_UNDEFINED]) n_undefined_SMs++;
333 else if (newSM == &sm_distinguished[SM_DIST_DEFINED ]) n_defined_SMs ++;
334 else { n_non_DSM_SMs ++;
335 n_issued_SMs ++; }
njn1d0825f2006-03-27 11:37:07 +0000336
njndbf7ca72006-03-31 11:57:59 +0000337 if (n_noaccess_SMs > max_noaccess_SMs ) max_noaccess_SMs = n_noaccess_SMs;
338 if (n_undefined_SMs > max_undefined_SMs) max_undefined_SMs = n_undefined_SMs;
339 if (n_defined_SMs > max_defined_SMs ) max_defined_SMs = n_defined_SMs;
340 if (n_non_DSM_SMs > max_non_DSM_SMs ) max_non_DSM_SMs = n_non_DSM_SMs;
njn1d0825f2006-03-27 11:37:07 +0000341}
sewardj45d94cc2005-04-20 14:44:11 +0000342
343/* --------------- Primary maps --------------- */
344
345/* The main primary map. This covers some initial part of the address
sewardj23eb2fd2005-04-22 16:29:19 +0000346 space, addresses 0 .. (N_PRIMARY_MAP << 16)-1. The rest of it is
sewardj45d94cc2005-04-20 14:44:11 +0000347 handled using the auxiliary primary map.
348*/
sewardj23eb2fd2005-04-22 16:29:19 +0000349static SecMap* primary_map[N_PRIMARY_MAP];
sewardj45d94cc2005-04-20 14:44:11 +0000350
351
352/* An entry in the auxiliary primary map. base must be a 64k-aligned
353 value, and sm points at the relevant secondary map. As with the
354 main primary map, the secondary may be either a real secondary, or
sewardj05a46732006-10-17 01:28:10 +0000355 one of the three distinguished secondaries. DO NOT CHANGE THIS
356 LAYOUT: the first word has to be the key for OSet fast lookups.
sewardj45d94cc2005-04-20 14:44:11 +0000357*/
358typedef
359 struct {
sewardj23eb2fd2005-04-22 16:29:19 +0000360 Addr base;
sewardj45d94cc2005-04-20 14:44:11 +0000361 SecMap* sm;
362 }
363 AuxMapEnt;
364
sewardj05a46732006-10-17 01:28:10 +0000365/* Tunable parameter: How big is the L1 queue? */
366#define N_AUXMAP_L1 24
sewardj45d94cc2005-04-20 14:44:11 +0000367
sewardj05a46732006-10-17 01:28:10 +0000368/* Tunable parameter: How far along the L1 queue to insert
369 entries resulting from L2 lookups? */
370#define AUXMAP_L1_INSERT_IX 12
sewardj45d94cc2005-04-20 14:44:11 +0000371
sewardj05a46732006-10-17 01:28:10 +0000372static struct {
373 Addr base;
374 AuxMapEnt* ent; // pointer to the matching auxmap_L2 node
375 }
376 auxmap_L1[N_AUXMAP_L1];
377
378static OSet* auxmap_L2 = NULL;
379
380static void init_auxmap_L1_L2 ( void )
sewardj45d94cc2005-04-20 14:44:11 +0000381{
sewardj05a46732006-10-17 01:28:10 +0000382 Int i;
383 for (i = 0; i < N_AUXMAP_L1; i++) {
384 auxmap_L1[i].base = 0;
385 auxmap_L1[i].ent = NULL;
sewardj45d94cc2005-04-20 14:44:11 +0000386 }
387
sewardj05a46732006-10-17 01:28:10 +0000388 tl_assert(0 == offsetof(AuxMapEnt,base));
389 tl_assert(sizeof(Addr) == sizeof(void*));
njne2a9ad32007-09-17 05:30:48 +0000390 auxmap_L2 = VG_(OSetGen_Create)( /*keyOff*/ offsetof(AuxMapEnt,base),
391 /*fastCmp*/ NULL,
392 VG_(malloc), VG_(free) );
sewardj05fe85e2005-04-27 22:46:36 +0000393}
394
sewardj05a46732006-10-17 01:28:10 +0000395/* Check representation invariants; if OK return NULL; else a
396 descriptive bit of text. Also return the number of
397 non-distinguished secondary maps referred to from the auxiliary
398 primary maps. */
sewardj05fe85e2005-04-27 22:46:36 +0000399
sewardj05a46732006-10-17 01:28:10 +0000400static HChar* check_auxmap_L1_L2_sanity ( Word* n_secmaps_found )
sewardj05fe85e2005-04-27 22:46:36 +0000401{
sewardj05a46732006-10-17 01:28:10 +0000402 Word i, j;
403 /* On a 32-bit platform, the L2 and L1 tables should
404 both remain empty forever.
sewardj05fe85e2005-04-27 22:46:36 +0000405
sewardj05a46732006-10-17 01:28:10 +0000406 On a 64-bit platform:
407 In the L2 table:
408 all .base & 0xFFFF == 0
409 all .base > MAX_PRIMARY_ADDRESS
410 In the L1 table:
411 all .base & 0xFFFF == 0
412 all (.base > MAX_PRIMARY_ADDRESS
413 .base & 0xFFFF == 0
414 and .ent points to an AuxMapEnt with the same .base)
415 or
416 (.base == 0 and .ent == NULL)
417 */
418 *n_secmaps_found = 0;
419 if (sizeof(void*) == 4) {
420 /* 32-bit platform */
njne2a9ad32007-09-17 05:30:48 +0000421 if (VG_(OSetGen_Size)(auxmap_L2) != 0)
sewardj05a46732006-10-17 01:28:10 +0000422 return "32-bit: auxmap_L2 is non-empty";
423 for (i = 0; i < N_AUXMAP_L1; i++)
424 if (auxmap_L1[i].base != 0 || auxmap_L1[i].ent != NULL)
425 return "32-bit: auxmap_L1 is non-empty";
426 } else {
427 /* 64-bit platform */
428 UWord elems_seen = 0;
429 AuxMapEnt *elem, *res;
430 AuxMapEnt key;
431 /* L2 table */
njne2a9ad32007-09-17 05:30:48 +0000432 VG_(OSetGen_ResetIter)(auxmap_L2);
433 while ( (elem = VG_(OSetGen_Next)(auxmap_L2)) ) {
sewardj05a46732006-10-17 01:28:10 +0000434 elems_seen++;
435 if (0 != (elem->base & (Addr)0xFFFF))
436 return "64-bit: nonzero .base & 0xFFFF in auxmap_L2";
437 if (elem->base <= MAX_PRIMARY_ADDRESS)
438 return "64-bit: .base <= MAX_PRIMARY_ADDRESS in auxmap_L2";
439 if (elem->sm == NULL)
440 return "64-bit: .sm in _L2 is NULL";
441 if (!is_distinguished_sm(elem->sm))
442 (*n_secmaps_found)++;
443 }
444 if (elems_seen != n_auxmap_L2_nodes)
445 return "64-bit: disagreement on number of elems in _L2";
446 /* Check L1-L2 correspondence */
447 for (i = 0; i < N_AUXMAP_L1; i++) {
448 if (auxmap_L1[i].base == 0 && auxmap_L1[i].ent == NULL)
449 continue;
450 if (0 != (auxmap_L1[i].base & (Addr)0xFFFF))
451 return "64-bit: nonzero .base & 0xFFFF in auxmap_L1";
452 if (auxmap_L1[i].base <= MAX_PRIMARY_ADDRESS)
453 return "64-bit: .base <= MAX_PRIMARY_ADDRESS in auxmap_L1";
454 if (auxmap_L1[i].ent == NULL)
455 return "64-bit: .ent is NULL in auxmap_L1";
456 if (auxmap_L1[i].ent->base != auxmap_L1[i].base)
457 return "64-bit: _L1 and _L2 bases are inconsistent";
458 /* Look it up in auxmap_L2. */
459 key.base = auxmap_L1[i].base;
460 key.sm = 0;
njne2a9ad32007-09-17 05:30:48 +0000461 res = VG_(OSetGen_Lookup)(auxmap_L2, &key);
sewardj05a46732006-10-17 01:28:10 +0000462 if (res == NULL)
463 return "64-bit: _L1 .base not found in _L2";
464 if (res != auxmap_L1[i].ent)
465 return "64-bit: _L1 .ent disagrees with _L2 entry";
466 }
467 /* Check L1 contains no duplicates */
468 for (i = 0; i < N_AUXMAP_L1; i++) {
469 if (auxmap_L1[i].base == 0)
470 continue;
471 for (j = i+1; j < N_AUXMAP_L1; j++) {
472 if (auxmap_L1[j].base == 0)
473 continue;
474 if (auxmap_L1[j].base == auxmap_L1[i].base)
475 return "64-bit: duplicate _L1 .base entries";
476 }
477 }
478 }
479 return NULL; /* ok */
480}
481
482static void insert_into_auxmap_L1_at ( Word rank, AuxMapEnt* ent )
483{
484 Word i;
485 tl_assert(ent);
486 tl_assert(rank >= 0 && rank < N_AUXMAP_L1);
487 for (i = N_AUXMAP_L1-1; i > rank; i--)
488 auxmap_L1[i] = auxmap_L1[i-1];
489 auxmap_L1[rank].base = ent->base;
490 auxmap_L1[rank].ent = ent;
491}
492
493static INLINE AuxMapEnt* maybe_find_in_auxmap ( Addr a )
494{
495 AuxMapEnt key;
496 AuxMapEnt* res;
497 Word i;
498
499 tl_assert(a > MAX_PRIMARY_ADDRESS);
500 a &= ~(Addr)0xFFFF;
501
502 /* First search the front-cache, which is a self-organising
503 list containing the most popular entries. */
504
505 if (EXPECTED_TAKEN(auxmap_L1[0].base == a))
506 return auxmap_L1[0].ent;
507 if (EXPECTED_TAKEN(auxmap_L1[1].base == a)) {
508 Addr t_base = auxmap_L1[0].base;
509 AuxMapEnt* t_ent = auxmap_L1[0].ent;
510 auxmap_L1[0].base = auxmap_L1[1].base;
511 auxmap_L1[0].ent = auxmap_L1[1].ent;
512 auxmap_L1[1].base = t_base;
513 auxmap_L1[1].ent = t_ent;
514 return auxmap_L1[0].ent;
sewardj45d94cc2005-04-20 14:44:11 +0000515 }
516
sewardj05a46732006-10-17 01:28:10 +0000517 n_auxmap_L1_searches++;
sewardj45d94cc2005-04-20 14:44:11 +0000518
sewardj05a46732006-10-17 01:28:10 +0000519 for (i = 0; i < N_AUXMAP_L1; i++) {
520 if (auxmap_L1[i].base == a) {
521 break;
522 }
523 }
524 tl_assert(i >= 0 && i <= N_AUXMAP_L1);
sewardj45d94cc2005-04-20 14:44:11 +0000525
sewardj05a46732006-10-17 01:28:10 +0000526 n_auxmap_L1_cmps += (ULong)(i+1);
sewardj45d94cc2005-04-20 14:44:11 +0000527
sewardj05a46732006-10-17 01:28:10 +0000528 if (i < N_AUXMAP_L1) {
529 if (i > 0) {
530 Addr t_base = auxmap_L1[i-1].base;
531 AuxMapEnt* t_ent = auxmap_L1[i-1].ent;
532 auxmap_L1[i-1].base = auxmap_L1[i-0].base;
533 auxmap_L1[i-1].ent = auxmap_L1[i-0].ent;
534 auxmap_L1[i-0].base = t_base;
535 auxmap_L1[i-0].ent = t_ent;
536 i--;
537 }
538 return auxmap_L1[i].ent;
539 }
540
541 n_auxmap_L2_searches++;
542
543 /* First see if we already have it. */
544 key.base = a;
545 key.sm = 0;
546
njne2a9ad32007-09-17 05:30:48 +0000547 res = VG_(OSetGen_Lookup)(auxmap_L2, &key);
sewardj05a46732006-10-17 01:28:10 +0000548 if (res)
549 insert_into_auxmap_L1_at( AUXMAP_L1_INSERT_IX, res );
550 return res;
551}
552
553static AuxMapEnt* find_or_alloc_in_auxmap ( Addr a )
554{
555 AuxMapEnt *nyu, *res;
556
557 /* First see if we already have it. */
558 res = maybe_find_in_auxmap( a );
559 if (EXPECTED_TAKEN(res))
560 return res;
561
562 /* Ok, there's no entry in the secondary map, so we'll have
563 to allocate one. */
564 a &= ~(Addr)0xFFFF;
565
njne2a9ad32007-09-17 05:30:48 +0000566 nyu = (AuxMapEnt*) VG_(OSetGen_AllocNode)( auxmap_L2, sizeof(AuxMapEnt) );
sewardj05a46732006-10-17 01:28:10 +0000567 tl_assert(nyu);
568 nyu->base = a;
569 nyu->sm = &sm_distinguished[SM_DIST_NOACCESS];
njne2a9ad32007-09-17 05:30:48 +0000570 VG_(OSetGen_Insert)( auxmap_L2, nyu );
sewardj05a46732006-10-17 01:28:10 +0000571 insert_into_auxmap_L1_at( AUXMAP_L1_INSERT_IX, nyu );
572 n_auxmap_L2_nodes++;
573 return nyu;
sewardj45d94cc2005-04-20 14:44:11 +0000574}
575
sewardj45d94cc2005-04-20 14:44:11 +0000576/* --------------- SecMap fundamentals --------------- */
577
njn1d0825f2006-03-27 11:37:07 +0000578// In all these, 'low' means it's definitely in the main primary map,
579// 'high' means it's definitely in the auxiliary table.
580
581static INLINE SecMap** get_secmap_low_ptr ( Addr a )
582{
583 UWord pm_off = a >> 16;
584# if VG_DEBUG_MEMORY >= 1
585 tl_assert(pm_off < N_PRIMARY_MAP);
586# endif
587 return &primary_map[ pm_off ];
588}
589
590static INLINE SecMap** get_secmap_high_ptr ( Addr a )
591{
592 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
593 return &am->sm;
594}
595
596static SecMap** get_secmap_ptr ( Addr a )
597{
598 return ( a <= MAX_PRIMARY_ADDRESS
599 ? get_secmap_low_ptr(a)
600 : get_secmap_high_ptr(a));
601}
602
njna7c7ebd2006-03-28 12:51:02 +0000603static INLINE SecMap* get_secmap_for_reading_low ( Addr a )
njn1d0825f2006-03-27 11:37:07 +0000604{
605 return *get_secmap_low_ptr(a);
606}
607
njna7c7ebd2006-03-28 12:51:02 +0000608static INLINE SecMap* get_secmap_for_reading_high ( Addr a )
njn1d0825f2006-03-27 11:37:07 +0000609{
610 return *get_secmap_high_ptr(a);
611}
612
njna7c7ebd2006-03-28 12:51:02 +0000613static INLINE SecMap* get_secmap_for_writing_low(Addr a)
njn1d0825f2006-03-27 11:37:07 +0000614{
615 SecMap** p = get_secmap_low_ptr(a);
616 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(*p)))
617 *p = copy_for_writing(*p);
618 return *p;
619}
620
njna7c7ebd2006-03-28 12:51:02 +0000621static INLINE SecMap* get_secmap_for_writing_high ( Addr a )
njn1d0825f2006-03-27 11:37:07 +0000622{
623 SecMap** p = get_secmap_high_ptr(a);
624 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(*p)))
625 *p = copy_for_writing(*p);
626 return *p;
627}
628
sewardj45d94cc2005-04-20 14:44:11 +0000629/* Produce the secmap for 'a', either from the primary map or by
630 ensuring there is an entry for it in the aux primary map. The
631 secmap may be a distinguished one as the caller will only want to
632 be able to read it.
633*/
sewardj05a46732006-10-17 01:28:10 +0000634static INLINE SecMap* get_secmap_for_reading ( Addr a )
sewardj45d94cc2005-04-20 14:44:11 +0000635{
njn1d0825f2006-03-27 11:37:07 +0000636 return ( a <= MAX_PRIMARY_ADDRESS
njna7c7ebd2006-03-28 12:51:02 +0000637 ? get_secmap_for_reading_low (a)
638 : get_secmap_for_reading_high(a) );
sewardj45d94cc2005-04-20 14:44:11 +0000639}
640
641/* Produce the secmap for 'a', either from the primary map or by
642 ensuring there is an entry for it in the aux primary map. The
643 secmap may not be a distinguished one, since the caller will want
644 to be able to write it. If it is a distinguished secondary, make a
645 writable copy of it, install it, and return the copy instead. (COW
646 semantics).
647*/
njna7c7ebd2006-03-28 12:51:02 +0000648static SecMap* get_secmap_for_writing ( Addr a )
sewardj45d94cc2005-04-20 14:44:11 +0000649{
njn1d0825f2006-03-27 11:37:07 +0000650 return ( a <= MAX_PRIMARY_ADDRESS
njna7c7ebd2006-03-28 12:51:02 +0000651 ? get_secmap_for_writing_low (a)
652 : get_secmap_for_writing_high(a) );
njn1d0825f2006-03-27 11:37:07 +0000653}
654
655/* If 'a' has a SecMap, produce it. Else produce NULL. But don't
656 allocate one if one doesn't already exist. This is used by the
657 leak checker.
658*/
659static SecMap* maybe_get_secmap_for ( Addr a )
660{
sewardj45d94cc2005-04-20 14:44:11 +0000661 if (a <= MAX_PRIMARY_ADDRESS) {
njna7c7ebd2006-03-28 12:51:02 +0000662 return get_secmap_for_reading_low(a);
sewardj45d94cc2005-04-20 14:44:11 +0000663 } else {
njn1d0825f2006-03-27 11:37:07 +0000664 AuxMapEnt* am = maybe_find_in_auxmap(a);
665 return am ? am->sm : NULL;
sewardj45d94cc2005-04-20 14:44:11 +0000666 }
667}
668
njn1d0825f2006-03-27 11:37:07 +0000669/* --------------- Fundamental functions --------------- */
670
671static INLINE
672void insert_vabits2_into_vabits8 ( Addr a, UChar vabits2, UChar* vabits8 )
673{
674 UInt shift = (a & 3) << 1; // shift by 0, 2, 4, or 6
675 *vabits8 &= ~(0x3 << shift); // mask out the two old bits
676 *vabits8 |= (vabits2 << shift); // mask in the two new bits
677}
678
679static INLINE
680void insert_vabits4_into_vabits8 ( Addr a, UChar vabits4, UChar* vabits8 )
681{
682 UInt shift;
683 tl_assert(VG_IS_2_ALIGNED(a)); // Must be 2-aligned
684 shift = (a & 2) << 1; // shift by 0 or 4
685 *vabits8 &= ~(0xf << shift); // mask out the four old bits
686 *vabits8 |= (vabits4 << shift); // mask in the four new bits
687}
688
689static INLINE
690UChar extract_vabits2_from_vabits8 ( Addr a, UChar vabits8 )
691{
692 UInt shift = (a & 3) << 1; // shift by 0, 2, 4, or 6
693 vabits8 >>= shift; // shift the two bits to the bottom
694 return 0x3 & vabits8; // mask out the rest
695}
696
697static INLINE
698UChar extract_vabits4_from_vabits8 ( Addr a, UChar vabits8 )
699{
700 UInt shift;
701 tl_assert(VG_IS_2_ALIGNED(a)); // Must be 2-aligned
702 shift = (a & 2) << 1; // shift by 0 or 4
703 vabits8 >>= shift; // shift the four bits to the bottom
704 return 0xf & vabits8; // mask out the rest
705}
706
707// Note that these four are only used in slow cases. The fast cases do
708// clever things like combine the auxmap check (in
709// get_secmap_{read,writ}able) with alignment checks.
710
711// *** WARNING! ***
712// Any time this function is called, if it is possible that vabits2
njndbf7ca72006-03-31 11:57:59 +0000713// is equal to VA_BITS2_PARTDEFINED, then the corresponding entry in the
njn1d0825f2006-03-27 11:37:07 +0000714// sec-V-bits table must also be set!
715static INLINE
716void set_vabits2 ( Addr a, UChar vabits2 )
717{
njna7c7ebd2006-03-28 12:51:02 +0000718 SecMap* sm = get_secmap_for_writing(a);
njn1d0825f2006-03-27 11:37:07 +0000719 UWord sm_off = SM_OFF(a);
720 insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
721}
722
723static INLINE
724UChar get_vabits2 ( Addr a )
725{
njna7c7ebd2006-03-28 12:51:02 +0000726 SecMap* sm = get_secmap_for_reading(a);
njn1d0825f2006-03-27 11:37:07 +0000727 UWord sm_off = SM_OFF(a);
728 UChar vabits8 = sm->vabits8[sm_off];
729 return extract_vabits2_from_vabits8(a, vabits8);
730}
731
sewardjf2184912006-05-03 22:13:57 +0000732// *** WARNING! ***
733// Any time this function is called, if it is possible that any of the
734// 4 2-bit fields in vabits8 are equal to VA_BITS2_PARTDEFINED, then the
735// corresponding entry(s) in the sec-V-bits table must also be set!
736static INLINE
737UChar get_vabits8_for_aligned_word32 ( Addr a )
738{
739 SecMap* sm = get_secmap_for_reading(a);
740 UWord sm_off = SM_OFF(a);
741 UChar vabits8 = sm->vabits8[sm_off];
742 return vabits8;
743}
744
745static INLINE
746void set_vabits8_for_aligned_word32 ( Addr a, UChar vabits8 )
747{
748 SecMap* sm = get_secmap_for_writing(a);
749 UWord sm_off = SM_OFF(a);
750 sm->vabits8[sm_off] = vabits8;
751}
752
753
njn1d0825f2006-03-27 11:37:07 +0000754// Forward declarations
755static UWord get_sec_vbits8(Addr a);
756static void set_sec_vbits8(Addr a, UWord vbits8);
757
758// Returns False if there was an addressability error.
759static INLINE
760Bool set_vbits8 ( Addr a, UChar vbits8 )
761{
762 Bool ok = True;
763 UChar vabits2 = get_vabits2(a);
764 if ( VA_BITS2_NOACCESS != vabits2 ) {
765 // Addressable. Convert in-register format to in-memory format.
766 // Also remove any existing sec V bit entry for the byte if no
767 // longer necessary.
njndbf7ca72006-03-31 11:57:59 +0000768 if ( V_BITS8_DEFINED == vbits8 ) { vabits2 = VA_BITS2_DEFINED; }
769 else if ( V_BITS8_UNDEFINED == vbits8 ) { vabits2 = VA_BITS2_UNDEFINED; }
770 else { vabits2 = VA_BITS2_PARTDEFINED;
njn1d0825f2006-03-27 11:37:07 +0000771 set_sec_vbits8(a, vbits8); }
772 set_vabits2(a, vabits2);
773
774 } else {
775 // Unaddressable! Do nothing -- when writing to unaddressable
776 // memory it acts as a black hole, and the V bits can never be seen
777 // again. So we don't have to write them at all.
778 ok = False;
779 }
780 return ok;
781}
782
783// Returns False if there was an addressability error. In that case, we put
784// all defined bits into vbits8.
785static INLINE
786Bool get_vbits8 ( Addr a, UChar* vbits8 )
787{
788 Bool ok = True;
789 UChar vabits2 = get_vabits2(a);
790
791 // Convert the in-memory format to in-register format.
njndbf7ca72006-03-31 11:57:59 +0000792 if ( VA_BITS2_DEFINED == vabits2 ) { *vbits8 = V_BITS8_DEFINED; }
793 else if ( VA_BITS2_UNDEFINED == vabits2 ) { *vbits8 = V_BITS8_UNDEFINED; }
794 else if ( VA_BITS2_NOACCESS == vabits2 ) {
njn1d0825f2006-03-27 11:37:07 +0000795 *vbits8 = V_BITS8_DEFINED; // Make V bits defined!
796 ok = False;
797 } else {
njndbf7ca72006-03-31 11:57:59 +0000798 tl_assert( VA_BITS2_PARTDEFINED == vabits2 );
njn1d0825f2006-03-27 11:37:07 +0000799 *vbits8 = get_sec_vbits8(a);
800 }
801 return ok;
802}
803
804
805/* --------------- Secondary V bit table ------------ */
806
807// This table holds the full V bit pattern for partially-defined bytes
njndbf7ca72006-03-31 11:57:59 +0000808// (PDBs) that are represented by VA_BITS2_PARTDEFINED in the main shadow
809// memory.
njn1d0825f2006-03-27 11:37:07 +0000810//
811// Note: the nodes in this table can become stale. Eg. if you write a PDB,
812// then overwrite the same address with a fully defined byte, the sec-V-bit
813// node will not necessarily be removed. This is because checking for
814// whether removal is necessary would slow down the fast paths.
815//
816// To avoid the stale nodes building up too much, we periodically (once the
817// table reaches a certain size) garbage collect (GC) the table by
818// traversing it and evicting any "sufficiently stale" nodes, ie. nodes that
819// are stale and haven't been touched for a certain number of collections.
820// If more than a certain proportion of nodes survived, we increase the
821// table size so that GCs occur less often.
822//
823// (So this a bit different to a traditional GC, where you definitely want
824// to remove any dead nodes. It's more like we have a resizable cache and
825// we're trying to find the right balance how many elements to evict and how
826// big to make the cache.)
827//
828// This policy is designed to avoid bad table bloat in the worst case where
829// a program creates huge numbers of stale PDBs -- we would get this bloat
830// if we had no GC -- while handling well the case where a node becomes
831// stale but shortly afterwards is rewritten with a PDB and so becomes
832// non-stale again (which happens quite often, eg. in perf/bz2). If we just
833// remove all stale nodes as soon as possible, we just end up re-adding a
834// lot of them in later again. The "sufficiently stale" approach avoids
835// this. (If a program has many live PDBs, performance will just suck,
836// there's no way around that.)
837
838static OSet* secVBitTable;
839
840// Stats
841static ULong sec_vbits_new_nodes = 0;
842static ULong sec_vbits_updates = 0;
843
844// This must be a power of two; this is checked in mc_pre_clo_init().
845// The size chosen here is a trade-off: if the nodes are bigger (ie. cover
846// a larger address range) they take more space but we can get multiple
847// partially-defined bytes in one if they are close to each other, reducing
848// the number of total nodes. In practice sometimes they are clustered (eg.
849// perf/bz2 repeatedly writes then reads more than 20,000 in a contiguous
850// row), but often not. So we choose something intermediate.
851#define BYTES_PER_SEC_VBIT_NODE 16
852
853// We make the table bigger if more than this many nodes survive a GC.
854#define MAX_SURVIVOR_PROPORTION 0.5
855
856// Each time we make the table bigger, we increase it by this much.
857#define TABLE_GROWTH_FACTOR 2
858
859// This defines "sufficiently stale" -- any node that hasn't been touched in
860// this many GCs will be removed.
861#define MAX_STALE_AGE 2
862
863// We GC the table when it gets this many nodes in it, ie. it's effectively
864// the table size. It can change.
865static Int secVBitLimit = 1024;
866
867// The number of GCs done, used to age sec-V-bit nodes for eviction.
868// Because it's unsigned, wrapping doesn't matter -- the right answer will
869// come out anyway.
870static UInt GCs_done = 0;
871
872typedef
873 struct {
874 Addr a;
875 UChar vbits8[BYTES_PER_SEC_VBIT_NODE];
876 UInt last_touched;
877 }
878 SecVBitNode;
879
880static OSet* createSecVBitTable(void)
881{
njne2a9ad32007-09-17 05:30:48 +0000882 return VG_(OSetGen_Create)( offsetof(SecVBitNode, a),
883 NULL, // use fast comparisons
884 VG_(malloc), VG_(free) );
njn1d0825f2006-03-27 11:37:07 +0000885}
886
887static void gcSecVBitTable(void)
888{
889 OSet* secVBitTable2;
890 SecVBitNode* n;
891 Int i, n_nodes = 0, n_survivors = 0;
892
893 GCs_done++;
894
895 // Create the new table.
896 secVBitTable2 = createSecVBitTable();
897
898 // Traverse the table, moving fresh nodes into the new table.
njne2a9ad32007-09-17 05:30:48 +0000899 VG_(OSetGen_ResetIter)(secVBitTable);
900 while ( (n = VG_(OSetGen_Next)(secVBitTable)) ) {
njn1d0825f2006-03-27 11:37:07 +0000901 Bool keep = False;
902 if ( (GCs_done - n->last_touched) <= MAX_STALE_AGE ) {
903 // Keep node if it's been touched recently enough (regardless of
904 // freshness/staleness).
905 keep = True;
906 } else {
907 // Keep node if any of its bytes are non-stale. Using
908 // get_vabits2() for the lookup is not very efficient, but I don't
909 // think it matters.
910 for (i = 0; i < BYTES_PER_SEC_VBIT_NODE; i++) {
njndbf7ca72006-03-31 11:57:59 +0000911 if (VA_BITS2_PARTDEFINED == get_vabits2(n->a + i)) {
njn1d0825f2006-03-27 11:37:07 +0000912 keep = True; // Found a non-stale byte, so keep
913 break;
914 }
915 }
916 }
917
918 if ( keep ) {
919 // Insert a copy of the node into the new table.
920 SecVBitNode* n2 =
njne2a9ad32007-09-17 05:30:48 +0000921 VG_(OSetGen_AllocNode)(secVBitTable2, sizeof(SecVBitNode));
njn1d0825f2006-03-27 11:37:07 +0000922 *n2 = *n;
njne2a9ad32007-09-17 05:30:48 +0000923 VG_(OSetGen_Insert)(secVBitTable2, n2);
njn1d0825f2006-03-27 11:37:07 +0000924 }
925 }
926
927 // Get the before and after sizes.
njne2a9ad32007-09-17 05:30:48 +0000928 n_nodes = VG_(OSetGen_Size)(secVBitTable);
929 n_survivors = VG_(OSetGen_Size)(secVBitTable2);
njn1d0825f2006-03-27 11:37:07 +0000930
931 // Destroy the old table, and put the new one in its place.
njne2a9ad32007-09-17 05:30:48 +0000932 VG_(OSetGen_Destroy)(secVBitTable);
njn1d0825f2006-03-27 11:37:07 +0000933 secVBitTable = secVBitTable2;
934
935 if (VG_(clo_verbosity) > 1) {
936 Char percbuf[6];
937 VG_(percentify)(n_survivors, n_nodes, 1, 6, percbuf);
938 VG_(message)(Vg_DebugMsg, "memcheck GC: %d nodes, %d survivors (%s)",
939 n_nodes, n_survivors, percbuf);
940 }
941
942 // Increase table size if necessary.
943 if (n_survivors > (secVBitLimit * MAX_SURVIVOR_PROPORTION)) {
944 secVBitLimit *= TABLE_GROWTH_FACTOR;
945 if (VG_(clo_verbosity) > 1)
946 VG_(message)(Vg_DebugMsg, "memcheck GC: increase table size to %d",
947 secVBitLimit);
948 }
949}
950
951static UWord get_sec_vbits8(Addr a)
952{
953 Addr aAligned = VG_ROUNDDN(a, BYTES_PER_SEC_VBIT_NODE);
954 Int amod = a % BYTES_PER_SEC_VBIT_NODE;
njne2a9ad32007-09-17 05:30:48 +0000955 SecVBitNode* n = VG_(OSetGen_Lookup)(secVBitTable, &aAligned);
njn1d0825f2006-03-27 11:37:07 +0000956 UChar vbits8;
957 tl_assert2(n, "get_sec_vbits8: no node for address %p (%p)\n", aAligned, a);
958 // Shouldn't be fully defined or fully undefined -- those cases shouldn't
959 // make it to the secondary V bits table.
960 vbits8 = n->vbits8[amod];
961 tl_assert(V_BITS8_DEFINED != vbits8 && V_BITS8_UNDEFINED != vbits8);
962 return vbits8;
963}
964
965static void set_sec_vbits8(Addr a, UWord vbits8)
966{
967 Addr aAligned = VG_ROUNDDN(a, BYTES_PER_SEC_VBIT_NODE);
968 Int i, amod = a % BYTES_PER_SEC_VBIT_NODE;
njne2a9ad32007-09-17 05:30:48 +0000969 SecVBitNode* n = VG_(OSetGen_Lookup)(secVBitTable, &aAligned);
njn1d0825f2006-03-27 11:37:07 +0000970 // Shouldn't be fully defined or fully undefined -- those cases shouldn't
971 // make it to the secondary V bits table.
972 tl_assert(V_BITS8_DEFINED != vbits8 && V_BITS8_UNDEFINED != vbits8);
973 if (n) {
974 n->vbits8[amod] = vbits8; // update
975 n->last_touched = GCs_done;
976 sec_vbits_updates++;
977 } else {
978 // New node: assign the specific byte, make the rest invalid (they
979 // should never be read as-is, but be cautious).
njne2a9ad32007-09-17 05:30:48 +0000980 n = VG_(OSetGen_AllocNode)(secVBitTable, sizeof(SecVBitNode));
njn1d0825f2006-03-27 11:37:07 +0000981 n->a = aAligned;
982 for (i = 0; i < BYTES_PER_SEC_VBIT_NODE; i++) {
983 n->vbits8[i] = V_BITS8_UNDEFINED;
984 }
985 n->vbits8[amod] = vbits8;
986 n->last_touched = GCs_done;
987
988 // Do a table GC if necessary. Nb: do this before inserting the new
989 // node, to avoid erroneously GC'ing the new node.
njne2a9ad32007-09-17 05:30:48 +0000990 if (secVBitLimit == VG_(OSetGen_Size)(secVBitTable)) {
njn1d0825f2006-03-27 11:37:07 +0000991 gcSecVBitTable();
992 }
993
994 // Insert the new node.
njne2a9ad32007-09-17 05:30:48 +0000995 VG_(OSetGen_Insert)(secVBitTable, n);
njn1d0825f2006-03-27 11:37:07 +0000996 sec_vbits_new_nodes++;
997
njne2a9ad32007-09-17 05:30:48 +0000998 n_secVBit_nodes = VG_(OSetGen_Size)(secVBitTable);
njn1d0825f2006-03-27 11:37:07 +0000999 if (n_secVBit_nodes > max_secVBit_nodes)
1000 max_secVBit_nodes = n_secVBit_nodes;
1001 }
1002}
sewardj45d94cc2005-04-20 14:44:11 +00001003
1004/* --------------- Endianness helpers --------------- */
1005
1006/* Returns the offset in memory of the byteno-th most significant byte
1007 in a wordszB-sized word, given the specified endianness. */
njn1d0825f2006-03-27 11:37:07 +00001008static INLINE UWord byte_offset_w ( UWord wordszB, Bool bigendian,
sewardj45d94cc2005-04-20 14:44:11 +00001009 UWord byteno ) {
1010 return bigendian ? (wordszB-1-byteno) : byteno;
1011}
1012
sewardj05a46732006-10-17 01:28:10 +00001013
1014/* --------------- Ignored address ranges --------------- */
1015
1016#define M_IGNORE_RANGES 4
1017
1018typedef
1019 struct {
1020 Int used;
1021 Addr start[M_IGNORE_RANGES];
1022 Addr end[M_IGNORE_RANGES];
1023 }
1024 IgnoreRanges;
1025
1026static IgnoreRanges ignoreRanges;
1027
1028static INLINE Bool in_ignored_range ( Addr a )
1029{
1030 Int i;
1031 if (EXPECTED_TAKEN(ignoreRanges.used == 0))
1032 return False;
1033 for (i = 0; i < ignoreRanges.used; i++) {
1034 if (a >= ignoreRanges.start[i] && a < ignoreRanges.end[i])
1035 return True;
1036 }
1037 return False;
1038}
1039
1040
1041/* Parse a 32- or 64-bit hex number, including leading 0x, from string
1042 starting at *ppc, putting result in *result, and return True. Or
1043 fail, in which case *ppc and *result are undefined, and return
1044 False. */
1045
1046static Bool isHex ( UChar c )
1047{
1048 return ((c >= '0' && c <= '9')
1049 || (c >= 'a' && c <= 'f')
1050 || (c >= 'A' && c <= 'F'));
1051}
1052
1053static UInt fromHex ( UChar c )
1054{
1055 if (c >= '0' && c <= '9')
1056 return (UInt)c - (UInt)'0';
1057 if (c >= 'a' && c <= 'f')
1058 return 10 + (UInt)c - (UInt)'a';
1059 if (c >= 'A' && c <= 'F')
1060 return 10 + (UInt)c - (UInt)'A';
1061 /*NOTREACHED*/
1062 tl_assert(0);
1063 return 0;
1064}
1065
1066static Bool parse_Addr ( UChar** ppc, Addr* result )
1067{
1068 Int used, limit = 2 * sizeof(Addr);
1069 if (**ppc != '0')
1070 return False;
1071 (*ppc)++;
1072 if (**ppc != 'x')
1073 return False;
1074 (*ppc)++;
1075 *result = 0;
1076 used = 0;
1077 while (isHex(**ppc)) {
1078 UInt d = fromHex(**ppc);
1079 tl_assert(d < 16);
1080 *result = ((*result) << 4) | fromHex(**ppc);
1081 (*ppc)++;
1082 used++;
1083 if (used > limit) return False;
1084 }
1085 if (used == 0)
1086 return False;
1087 return True;
1088}
1089
1090/* Parse two such numbers separated by a dash, or fail. */
1091
1092static Bool parse_range ( UChar** ppc, Addr* result1, Addr* result2 )
1093{
1094 Bool ok = parse_Addr(ppc, result1);
1095 if (!ok)
1096 return False;
1097 if (**ppc != '-')
1098 return False;
1099 (*ppc)++;
1100 ok = parse_Addr(ppc, result2);
1101 if (!ok)
1102 return False;
1103 return True;
1104}
1105
1106/* Parse a set of ranges separated by commas into 'ignoreRanges', or
1107 fail. */
1108
1109static Bool parse_ignore_ranges ( UChar* str0 )
1110{
1111 Addr start, end;
1112 Bool ok;
1113 UChar* str = str0;
1114 UChar** ppc = &str;
1115 ignoreRanges.used = 0;
1116 while (1) {
1117 ok = parse_range(ppc, &start, &end);
1118 if (!ok)
1119 return False;
1120 if (ignoreRanges.used >= M_IGNORE_RANGES)
1121 return False;
1122 ignoreRanges.start[ignoreRanges.used] = start;
1123 ignoreRanges.end[ignoreRanges.used] = end;
1124 ignoreRanges.used++;
1125 if (**ppc == 0)
1126 return True;
1127 if (**ppc != ',')
1128 return False;
1129 (*ppc)++;
1130 }
1131 /*NOTREACHED*/
1132 return False;
1133}
1134
1135
sewardj45d94cc2005-04-20 14:44:11 +00001136/* --------------- Load/store slow cases. --------------- */
1137
njn1d0825f2006-03-27 11:37:07 +00001138// Forward declarations
1139static void mc_record_address_error ( ThreadId tid, Addr a,
1140 Int size, Bool isWrite );
njn718d3b12006-12-16 00:54:12 +00001141static void mc_record_core_mem_error ( ThreadId tid, Bool isAddrErr, Char* s );
1142static void mc_record_regparam_error ( ThreadId tid, Char* msg );
1143static void mc_record_memparam_error ( ThreadId tid, Addr a,
1144 Bool isAddrErr, Char* msg );
njn1d0825f2006-03-27 11:37:07 +00001145static void mc_record_jump_error ( ThreadId tid, Addr a );
1146
sewardj45d94cc2005-04-20 14:44:11 +00001147static
njn1d0825f2006-03-27 11:37:07 +00001148#ifndef PERF_FAST_LOADV
1149INLINE
1150#endif
njn45e81252006-03-28 12:35:08 +00001151ULong mc_LOADVn_slow ( Addr a, SizeT nBits, Bool bigendian )
sewardj45d94cc2005-04-20 14:44:11 +00001152{
njn1d0825f2006-03-27 11:37:07 +00001153 /* Make up a 64-bit result V word, which contains the loaded data for
sewardjf3d57dd2005-04-22 20:23:27 +00001154 valid addresses and Defined for invalid addresses. Iterate over
1155 the bytes in the word, from the most significant down to the
1156 least. */
njn1d0825f2006-03-27 11:37:07 +00001157 ULong vbits64 = V_BITS64_UNDEFINED;
njn45e81252006-03-28 12:35:08 +00001158 SizeT szB = nBits / 8;
njn1d0825f2006-03-27 11:37:07 +00001159 SSizeT i = szB-1; // Must be signed
sewardj45d94cc2005-04-20 14:44:11 +00001160 SizeT n_addrs_bad = 0;
1161 Addr ai;
njn1d0825f2006-03-27 11:37:07 +00001162 Bool partial_load_exemption_applies;
1163 UChar vbits8;
1164 Bool ok;
sewardj45d94cc2005-04-20 14:44:11 +00001165
sewardjc1a2cda2005-04-21 17:34:00 +00001166 PROF_EVENT(30, "mc_LOADVn_slow");
sewardj05a46732006-10-17 01:28:10 +00001167
1168 /* ------------ BEGIN semi-fast cases ------------ */
1169 /* These deal quickly-ish with the common auxiliary primary map
1170 cases on 64-bit platforms. Are merely a speedup hack; can be
1171 omitted without loss of correctness/functionality. Note that in
1172 both cases the "sizeof(void*) == 8" causes these cases to be
1173 folded out by compilers on 32-bit platforms. These are derived
1174 from LOADV64 and LOADV32.
1175 */
1176 if (EXPECTED_TAKEN(sizeof(void*) == 8
1177 && nBits == 64 && VG_IS_8_ALIGNED(a))) {
1178 SecMap* sm = get_secmap_for_reading(a);
1179 UWord sm_off16 = SM_OFF_16(a);
1180 UWord vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
1181 if (EXPECTED_TAKEN(vabits16 == VA_BITS16_DEFINED))
1182 return V_BITS64_DEFINED;
1183 if (EXPECTED_TAKEN(vabits16 == VA_BITS16_UNDEFINED))
1184 return V_BITS64_UNDEFINED;
1185 /* else fall into the slow case */
1186 }
1187 if (EXPECTED_TAKEN(sizeof(void*) == 8
1188 && nBits == 32 && VG_IS_4_ALIGNED(a))) {
1189 SecMap* sm = get_secmap_for_reading(a);
1190 UWord sm_off = SM_OFF(a);
1191 UWord vabits8 = sm->vabits8[sm_off];
1192 if (EXPECTED_TAKEN(vabits8 == VA_BITS8_DEFINED))
1193 return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_DEFINED);
1194 if (EXPECTED_TAKEN(vabits8 == VA_BITS8_UNDEFINED))
1195 return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_UNDEFINED);
1196 /* else fall into slow case */
1197 }
1198 /* ------------ END semi-fast cases ------------ */
1199
njn45e81252006-03-28 12:35:08 +00001200 tl_assert(nBits == 64 || nBits == 32 || nBits == 16 || nBits == 8);
sewardj45d94cc2005-04-20 14:44:11 +00001201
njn1d0825f2006-03-27 11:37:07 +00001202 for (i = szB-1; i >= 0; i--) {
sewardjc1a2cda2005-04-21 17:34:00 +00001203 PROF_EVENT(31, "mc_LOADVn_slow(loop)");
njn45e81252006-03-28 12:35:08 +00001204 ai = a + byte_offset_w(szB, bigendian, i);
njn1d0825f2006-03-27 11:37:07 +00001205 ok = get_vbits8(ai, &vbits8);
1206 if (!ok) n_addrs_bad++;
1207 vbits64 <<= 8;
1208 vbits64 |= vbits8;
sewardj45d94cc2005-04-20 14:44:11 +00001209 }
1210
sewardj0ded7a42005-11-08 02:25:37 +00001211 /* This is a hack which avoids producing errors for code which
1212 insists in stepping along byte strings in aligned word-sized
1213 chunks, and there is a partially defined word at the end. (eg,
1214 optimised strlen). Such code is basically broken at least WRT
1215 semantics of ANSI C, but sometimes users don't have the option
1216 to fix it, and so this option is provided. Note it is now
1217 defaulted to not-engaged.
1218
1219 A load from a partially-addressible place is allowed if:
1220 - the command-line flag is set
1221 - it's a word-sized, word-aligned load
1222 - at least one of the addresses in the word *is* valid
1223 */
1224 partial_load_exemption_applies
njn1d0825f2006-03-27 11:37:07 +00001225 = MC_(clo_partial_loads_ok) && szB == VG_WORDSIZE
sewardj0ded7a42005-11-08 02:25:37 +00001226 && VG_IS_WORD_ALIGNED(a)
1227 && n_addrs_bad < VG_WORDSIZE;
1228
1229 if (n_addrs_bad > 0 && !partial_load_exemption_applies)
njn1d0825f2006-03-27 11:37:07 +00001230 mc_record_address_error( VG_(get_running_tid)(), a, szB, False );
sewardj45d94cc2005-04-20 14:44:11 +00001231
njn1d0825f2006-03-27 11:37:07 +00001232 return vbits64;
sewardj45d94cc2005-04-20 14:44:11 +00001233}
1234
1235
njn1d0825f2006-03-27 11:37:07 +00001236static
1237#ifndef PERF_FAST_STOREV
1238INLINE
1239#endif
njn45e81252006-03-28 12:35:08 +00001240void mc_STOREVn_slow ( Addr a, SizeT nBits, ULong vbytes, Bool bigendian )
sewardj45d94cc2005-04-20 14:44:11 +00001241{
njn45e81252006-03-28 12:35:08 +00001242 SizeT szB = nBits / 8;
njn1d0825f2006-03-27 11:37:07 +00001243 SizeT i, n_addrs_bad = 0;
1244 UChar vbits8;
sewardj45d94cc2005-04-20 14:44:11 +00001245 Addr ai;
njn1d0825f2006-03-27 11:37:07 +00001246 Bool ok;
sewardj45d94cc2005-04-20 14:44:11 +00001247
sewardjc1a2cda2005-04-21 17:34:00 +00001248 PROF_EVENT(35, "mc_STOREVn_slow");
sewardj05a46732006-10-17 01:28:10 +00001249
1250 /* ------------ BEGIN semi-fast cases ------------ */
1251 /* These deal quickly-ish with the common auxiliary primary map
1252 cases on 64-bit platforms. Are merely a speedup hack; can be
1253 omitted without loss of correctness/functionality. Note that in
1254 both cases the "sizeof(void*) == 8" causes these cases to be
1255 folded out by compilers on 32-bit platforms. These are derived
1256 from STOREV64 and STOREV32.
1257 */
1258 if (EXPECTED_TAKEN(sizeof(void*) == 8
1259 && nBits == 64 && VG_IS_8_ALIGNED(a))) {
1260 SecMap* sm = get_secmap_for_reading(a);
1261 UWord sm_off16 = SM_OFF_16(a);
1262 UWord vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
1263 if (EXPECTED_TAKEN( !is_distinguished_sm(sm) &&
1264 (VA_BITS16_DEFINED == vabits16 ||
1265 VA_BITS16_UNDEFINED == vabits16) )) {
1266 /* Handle common case quickly: a is suitably aligned, */
1267 /* is mapped, and is addressible. */
1268 // Convert full V-bits in register to compact 2-bit form.
1269 if (EXPECTED_TAKEN(V_BITS64_DEFINED == vbytes)) {
1270 ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_DEFINED;
1271 return;
1272 } else if (V_BITS64_UNDEFINED == vbytes) {
1273 ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_UNDEFINED;
1274 return;
1275 }
1276 /* else fall into the slow case */
1277 }
1278 /* else fall into the slow case */
1279 }
1280 if (EXPECTED_TAKEN(sizeof(void*) == 8
1281 && nBits == 32 && VG_IS_4_ALIGNED(a))) {
1282 SecMap* sm = get_secmap_for_reading(a);
1283 UWord sm_off = SM_OFF(a);
1284 UWord vabits8 = sm->vabits8[sm_off];
1285 if (EXPECTED_TAKEN( !is_distinguished_sm(sm) &&
1286 (VA_BITS8_DEFINED == vabits8 ||
1287 VA_BITS8_UNDEFINED == vabits8) )) {
1288 /* Handle common case quickly: a is suitably aligned, */
1289 /* is mapped, and is addressible. */
1290 // Convert full V-bits in register to compact 2-bit form.
1291 if (EXPECTED_TAKEN(V_BITS32_DEFINED == (vbytes & 0xFFFFFFFF))) {
1292 sm->vabits8[sm_off] = VA_BITS8_DEFINED;
1293 return;
1294 } else if (V_BITS32_UNDEFINED == (vbytes & 0xFFFFFFFF)) {
1295 sm->vabits8[sm_off] = VA_BITS8_UNDEFINED;
1296 return;
1297 }
1298 /* else fall into the slow case */
1299 }
1300 /* else fall into the slow case */
1301 }
1302 /* ------------ END semi-fast cases ------------ */
1303
njn45e81252006-03-28 12:35:08 +00001304 tl_assert(nBits == 64 || nBits == 32 || nBits == 16 || nBits == 8);
sewardj45d94cc2005-04-20 14:44:11 +00001305
1306 /* Dump vbytes in memory, iterating from least to most significant
njn718d3b12006-12-16 00:54:12 +00001307 byte. At the same time establish addressibility of the location. */
sewardj45d94cc2005-04-20 14:44:11 +00001308 for (i = 0; i < szB; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +00001309 PROF_EVENT(36, "mc_STOREVn_slow(loop)");
njn45e81252006-03-28 12:35:08 +00001310 ai = a + byte_offset_w(szB, bigendian, i);
njn1d0825f2006-03-27 11:37:07 +00001311 vbits8 = vbytes & 0xff;
1312 ok = set_vbits8(ai, vbits8);
1313 if (!ok) n_addrs_bad++;
sewardj45d94cc2005-04-20 14:44:11 +00001314 vbytes >>= 8;
1315 }
1316
1317 /* If an address error has happened, report it. */
1318 if (n_addrs_bad > 0)
njn1d0825f2006-03-27 11:37:07 +00001319 mc_record_address_error( VG_(get_running_tid)(), a, szB, True );
sewardj45d94cc2005-04-20 14:44:11 +00001320}
1321
1322
njn25e49d8e72002-09-23 09:36:25 +00001323/*------------------------------------------------------------*/
1324/*--- Setting permissions over address ranges. ---*/
1325/*------------------------------------------------------------*/
1326
njn1d0825f2006-03-27 11:37:07 +00001327static void set_address_range_perms ( Addr a, SizeT lenT, UWord vabits16,
1328 UWord dsm_num )
sewardj23eb2fd2005-04-22 16:29:19 +00001329{
njn1d0825f2006-03-27 11:37:07 +00001330 UWord sm_off, sm_off16;
1331 UWord vabits2 = vabits16 & 0x3;
1332 SizeT lenA, lenB, len_to_next_secmap;
1333 Addr aNext;
sewardjae986ca2005-10-12 12:53:20 +00001334 SecMap* sm;
njn1d0825f2006-03-27 11:37:07 +00001335 SecMap** sm_ptr;
sewardjae986ca2005-10-12 12:53:20 +00001336 SecMap* example_dsm;
1337
sewardj23eb2fd2005-04-22 16:29:19 +00001338 PROF_EVENT(150, "set_address_range_perms");
1339
njn1d0825f2006-03-27 11:37:07 +00001340 /* Check the V+A bits make sense. */
njndbf7ca72006-03-31 11:57:59 +00001341 tl_assert(VA_BITS16_NOACCESS == vabits16 ||
1342 VA_BITS16_UNDEFINED == vabits16 ||
1343 VA_BITS16_DEFINED == vabits16);
sewardj23eb2fd2005-04-22 16:29:19 +00001344
njn1d0825f2006-03-27 11:37:07 +00001345 // This code should never write PDBs; ensure this. (See comment above
1346 // set_vabits2().)
njndbf7ca72006-03-31 11:57:59 +00001347 tl_assert(VA_BITS2_PARTDEFINED != vabits2);
njn1d0825f2006-03-27 11:37:07 +00001348
1349 if (lenT == 0)
sewardj23eb2fd2005-04-22 16:29:19 +00001350 return;
1351
njn1d0825f2006-03-27 11:37:07 +00001352 if (lenT > 100 * 1000 * 1000) {
1353 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
1354 Char* s = "unknown???";
njndbf7ca72006-03-31 11:57:59 +00001355 if (vabits16 == VA_BITS16_NOACCESS ) s = "noaccess";
1356 if (vabits16 == VA_BITS16_UNDEFINED) s = "undefined";
1357 if (vabits16 == VA_BITS16_DEFINED ) s = "defined";
njn1d0825f2006-03-27 11:37:07 +00001358 VG_(message)(Vg_UserMsg, "Warning: set address range perms: "
1359 "large range %lu (%s)", lenT, s);
sewardj23eb2fd2005-04-22 16:29:19 +00001360 }
1361 }
1362
njn1d0825f2006-03-27 11:37:07 +00001363#ifndef PERF_FAST_SARP
sewardj23eb2fd2005-04-22 16:29:19 +00001364 /*------------------ debug-only case ------------------ */
njn1d0825f2006-03-27 11:37:07 +00001365 {
1366 // Endianness doesn't matter here because all bytes are being set to
1367 // the same value.
1368 // Nb: We don't have to worry about updating the sec-V-bits table
1369 // after these set_vabits2() calls because this code never writes
njndbf7ca72006-03-31 11:57:59 +00001370 // VA_BITS2_PARTDEFINED values.
njn1d0825f2006-03-27 11:37:07 +00001371 SizeT i;
1372 for (i = 0; i < lenT; i++) {
1373 set_vabits2(a + i, vabits2);
1374 }
1375 return;
njn25e49d8e72002-09-23 09:36:25 +00001376 }
njn1d0825f2006-03-27 11:37:07 +00001377#endif
sewardj23eb2fd2005-04-22 16:29:19 +00001378
1379 /*------------------ standard handling ------------------ */
sewardj23eb2fd2005-04-22 16:29:19 +00001380
njn1d0825f2006-03-27 11:37:07 +00001381 /* Get the distinguished secondary that we might want
sewardj23eb2fd2005-04-22 16:29:19 +00001382 to use (part of the space-compression scheme). */
njn1d0825f2006-03-27 11:37:07 +00001383 example_dsm = &sm_distinguished[dsm_num];
1384
1385 // We have to handle ranges covering various combinations of partial and
1386 // whole sec-maps. Here is how parts 1, 2 and 3 are used in each case.
1387 // Cases marked with a '*' are common.
1388 //
1389 // TYPE PARTS USED
1390 // ---- ----------
1391 // * one partial sec-map (p) 1
1392 // - one whole sec-map (P) 2
1393 //
1394 // * two partial sec-maps (pp) 1,3
1395 // - one partial, one whole sec-map (pP) 1,2
1396 // - one whole, one partial sec-map (Pp) 2,3
1397 // - two whole sec-maps (PP) 2,2
1398 //
1399 // * one partial, one whole, one partial (pPp) 1,2,3
1400 // - one partial, two whole (pPP) 1,2,2
1401 // - two whole, one partial (PPp) 2,2,3
1402 // - three whole (PPP) 2,2,2
1403 //
1404 // * one partial, N-2 whole, one partial (pP...Pp) 1,2...2,3
1405 // - one partial, N-1 whole (pP...PP) 1,2...2,2
1406 // - N-1 whole, one partial (PP...Pp) 2,2...2,3
1407 // - N whole (PP...PP) 2,2...2,3
1408
1409 // Break up total length (lenT) into two parts: length in the first
1410 // sec-map (lenA), and the rest (lenB); lenT == lenA + lenB.
1411 aNext = start_of_this_sm(a) + SM_SIZE;
1412 len_to_next_secmap = aNext - a;
1413 if ( lenT <= len_to_next_secmap ) {
1414 // Range entirely within one sec-map. Covers almost all cases.
1415 PROF_EVENT(151, "set_address_range_perms-single-secmap");
1416 lenA = lenT;
1417 lenB = 0;
1418 } else if (is_start_of_sm(a)) {
1419 // Range spans at least one whole sec-map, and starts at the beginning
1420 // of a sec-map; skip to Part 2.
1421 PROF_EVENT(152, "set_address_range_perms-startof-secmap");
1422 lenA = 0;
1423 lenB = lenT;
1424 goto part2;
sewardj23eb2fd2005-04-22 16:29:19 +00001425 } else {
njn1d0825f2006-03-27 11:37:07 +00001426 // Range spans two or more sec-maps, first one is partial.
1427 PROF_EVENT(153, "set_address_range_perms-multiple-secmaps");
1428 lenA = len_to_next_secmap;
1429 lenB = lenT - lenA;
1430 }
1431
1432 //------------------------------------------------------------------------
1433 // Part 1: Deal with the first sec_map. Most of the time the range will be
1434 // entirely within a sec_map and this part alone will suffice. Also,
1435 // doing it this way lets us avoid repeatedly testing for the crossing of
1436 // a sec-map boundary within these loops.
1437 //------------------------------------------------------------------------
1438
1439 // If it's distinguished, make it undistinguished if necessary.
1440 sm_ptr = get_secmap_ptr(a);
1441 if (is_distinguished_sm(*sm_ptr)) {
1442 if (*sm_ptr == example_dsm) {
1443 // Sec-map already has the V+A bits that we want, so skip.
1444 PROF_EVENT(154, "set_address_range_perms-dist-sm1-quick");
1445 a = aNext;
1446 lenA = 0;
sewardj23eb2fd2005-04-22 16:29:19 +00001447 } else {
njn1d0825f2006-03-27 11:37:07 +00001448 PROF_EVENT(155, "set_address_range_perms-dist-sm1");
1449 *sm_ptr = copy_for_writing(*sm_ptr);
sewardj23eb2fd2005-04-22 16:29:19 +00001450 }
1451 }
njn1d0825f2006-03-27 11:37:07 +00001452 sm = *sm_ptr;
sewardj23eb2fd2005-04-22 16:29:19 +00001453
njn1d0825f2006-03-27 11:37:07 +00001454 // 1 byte steps
sewardj23eb2fd2005-04-22 16:29:19 +00001455 while (True) {
sewardj23eb2fd2005-04-22 16:29:19 +00001456 if (VG_IS_8_ALIGNED(a)) break;
njn1d0825f2006-03-27 11:37:07 +00001457 if (lenA < 1) break;
1458 PROF_EVENT(156, "set_address_range_perms-loop1a");
1459 sm_off = SM_OFF(a);
1460 insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
1461 a += 1;
1462 lenA -= 1;
1463 }
1464 // 8-aligned, 8 byte steps
sewardj23eb2fd2005-04-22 16:29:19 +00001465 while (True) {
njn1d0825f2006-03-27 11:37:07 +00001466 if (lenA < 8) break;
1467 PROF_EVENT(157, "set_address_range_perms-loop8a");
1468 sm_off16 = SM_OFF_16(a);
1469 ((UShort*)(sm->vabits8))[sm_off16] = vabits16;
1470 a += 8;
1471 lenA -= 8;
1472 }
1473 // 1 byte steps
1474 while (True) {
1475 if (lenA < 1) break;
1476 PROF_EVENT(158, "set_address_range_perms-loop1b");
1477 sm_off = SM_OFF(a);
1478 insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
1479 a += 1;
1480 lenA -= 1;
sewardj23eb2fd2005-04-22 16:29:19 +00001481 }
1482
njn1d0825f2006-03-27 11:37:07 +00001483 // We've finished the first sec-map. Is that it?
1484 if (lenB == 0)
sewardj23eb2fd2005-04-22 16:29:19 +00001485 return;
1486
njn1d0825f2006-03-27 11:37:07 +00001487 //------------------------------------------------------------------------
1488 // Part 2: Fast-set entire sec-maps at a time.
1489 //------------------------------------------------------------------------
1490 part2:
1491 // 64KB-aligned, 64KB steps.
1492 // Nb: we can reach here with lenB < SM_SIZE
sewardj23eb2fd2005-04-22 16:29:19 +00001493 while (True) {
njn1d0825f2006-03-27 11:37:07 +00001494 if (lenB < SM_SIZE) break;
1495 tl_assert(is_start_of_sm(a));
1496 PROF_EVENT(159, "set_address_range_perms-loop64K");
1497 sm_ptr = get_secmap_ptr(a);
1498 if (!is_distinguished_sm(*sm_ptr)) {
1499 PROF_EVENT(160, "set_address_range_perms-loop64K-free-dist-sm");
1500 // Free the non-distinguished sec-map that we're replacing. This
1501 // case happens moderately often, enough to be worthwhile.
1502 VG_(am_munmap_valgrind)((Addr)*sm_ptr, sizeof(SecMap));
1503 }
1504 update_SM_counts(*sm_ptr, example_dsm);
1505 // Make the sec-map entry point to the example DSM
1506 *sm_ptr = example_dsm;
1507 lenB -= SM_SIZE;
1508 a += SM_SIZE;
1509 }
sewardj23eb2fd2005-04-22 16:29:19 +00001510
njn1d0825f2006-03-27 11:37:07 +00001511 // We've finished the whole sec-maps. Is that it?
1512 if (lenB == 0)
1513 return;
1514
1515 //------------------------------------------------------------------------
1516 // Part 3: Finish off the final partial sec-map, if necessary.
1517 //------------------------------------------------------------------------
1518
1519 tl_assert(is_start_of_sm(a) && lenB < SM_SIZE);
1520
1521 // If it's distinguished, make it undistinguished if necessary.
1522 sm_ptr = get_secmap_ptr(a);
1523 if (is_distinguished_sm(*sm_ptr)) {
1524 if (*sm_ptr == example_dsm) {
1525 // Sec-map already has the V+A bits that we want, so stop.
1526 PROF_EVENT(161, "set_address_range_perms-dist-sm2-quick");
1527 return;
1528 } else {
1529 PROF_EVENT(162, "set_address_range_perms-dist-sm2");
1530 *sm_ptr = copy_for_writing(*sm_ptr);
1531 }
1532 }
1533 sm = *sm_ptr;
1534
1535 // 8-aligned, 8 byte steps
1536 while (True) {
1537 if (lenB < 8) break;
1538 PROF_EVENT(163, "set_address_range_perms-loop8b");
1539 sm_off16 = SM_OFF_16(a);
1540 ((UShort*)(sm->vabits8))[sm_off16] = vabits16;
1541 a += 8;
1542 lenB -= 8;
1543 }
1544 // 1 byte steps
1545 while (True) {
1546 if (lenB < 1) return;
1547 PROF_EVENT(164, "set_address_range_perms-loop1c");
1548 sm_off = SM_OFF(a);
1549 insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
1550 a += 1;
1551 lenB -= 1;
1552 }
sewardj23eb2fd2005-04-22 16:29:19 +00001553}
sewardj45d94cc2005-04-20 14:44:11 +00001554
sewardjc859fbf2005-04-22 21:10:28 +00001555
1556/* --- Set permissions for arbitrary address ranges --- */
njn25e49d8e72002-09-23 09:36:25 +00001557
njndbf7ca72006-03-31 11:57:59 +00001558void MC_(make_mem_noaccess) ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +00001559{
njndbf7ca72006-03-31 11:57:59 +00001560 PROF_EVENT(40, "MC_(make_mem_noaccess)");
1561 DEBUG("MC_(make_mem_noaccess)(%p, %lu)\n", a, len);
njn1d0825f2006-03-27 11:37:07 +00001562 set_address_range_perms ( a, len, VA_BITS16_NOACCESS, SM_DIST_NOACCESS );
njn25e49d8e72002-09-23 09:36:25 +00001563}
1564
njndbf7ca72006-03-31 11:57:59 +00001565void MC_(make_mem_undefined) ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +00001566{
njndbf7ca72006-03-31 11:57:59 +00001567 PROF_EVENT(41, "MC_(make_mem_undefined)");
1568 DEBUG("MC_(make_mem_undefined)(%p, %lu)\n", a, len);
1569 set_address_range_perms ( a, len, VA_BITS16_UNDEFINED, SM_DIST_UNDEFINED );
njn25e49d8e72002-09-23 09:36:25 +00001570}
1571
njndbf7ca72006-03-31 11:57:59 +00001572void MC_(make_mem_defined) ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +00001573{
njndbf7ca72006-03-31 11:57:59 +00001574 PROF_EVENT(42, "MC_(make_mem_defined)");
1575 DEBUG("MC_(make_mem_defined)(%p, %lu)\n", a, len);
1576 set_address_range_perms ( a, len, VA_BITS16_DEFINED, SM_DIST_DEFINED );
njn25e49d8e72002-09-23 09:36:25 +00001577}
1578
sewardjfb1e9ad2006-03-10 13:41:58 +00001579/* For each byte in [a,a+len), if the byte is addressable, make it be
1580 defined, but if it isn't addressible, leave it alone. In other
njndbf7ca72006-03-31 11:57:59 +00001581 words a version of MC_(make_mem_defined) that doesn't mess with
sewardjfb1e9ad2006-03-10 13:41:58 +00001582 addressibility. Low-performance implementation. */
njndbf7ca72006-03-31 11:57:59 +00001583static void make_mem_defined_if_addressable ( Addr a, SizeT len )
sewardjfb1e9ad2006-03-10 13:41:58 +00001584{
1585 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00001586 UChar vabits2;
njndbf7ca72006-03-31 11:57:59 +00001587 DEBUG("make_mem_defined_if_addressable(%p, %llu)\n", a, (ULong)len);
sewardjfb1e9ad2006-03-10 13:41:58 +00001588 for (i = 0; i < len; i++) {
njn1d0825f2006-03-27 11:37:07 +00001589 vabits2 = get_vabits2( a+i );
1590 if (EXPECTED_TAKEN(VA_BITS2_NOACCESS != vabits2)) {
njndbf7ca72006-03-31 11:57:59 +00001591 set_vabits2(a+i, VA_BITS2_DEFINED);
njn1d0825f2006-03-27 11:37:07 +00001592 }
sewardjfb1e9ad2006-03-10 13:41:58 +00001593 }
1594}
1595
njn9b007f62003-04-07 14:40:25 +00001596
sewardj45f4e7c2005-09-27 19:20:21 +00001597/* --- Block-copy permissions (needed for implementing realloc() and
1598 sys_mremap). --- */
sewardjc859fbf2005-04-22 21:10:28 +00001599
njn1d0825f2006-03-27 11:37:07 +00001600void MC_(copy_address_range_state) ( Addr src, Addr dst, SizeT len )
sewardjc859fbf2005-04-22 21:10:28 +00001601{
sewardj45f4e7c2005-09-27 19:20:21 +00001602 SizeT i, j;
sewardjf2184912006-05-03 22:13:57 +00001603 UChar vabits2, vabits8;
1604 Bool aligned, nooverlap;
sewardjc859fbf2005-04-22 21:10:28 +00001605
njn1d0825f2006-03-27 11:37:07 +00001606 DEBUG("MC_(copy_address_range_state)\n");
1607 PROF_EVENT(50, "MC_(copy_address_range_state)");
sewardj45f4e7c2005-09-27 19:20:21 +00001608
sewardjf2184912006-05-03 22:13:57 +00001609 if (len == 0 || src == dst)
sewardj45f4e7c2005-09-27 19:20:21 +00001610 return;
1611
sewardjf2184912006-05-03 22:13:57 +00001612 aligned = VG_IS_4_ALIGNED(src) && VG_IS_4_ALIGNED(dst);
1613 nooverlap = src+len <= dst || dst+len <= src;
sewardj45f4e7c2005-09-27 19:20:21 +00001614
sewardjf2184912006-05-03 22:13:57 +00001615 if (nooverlap && aligned) {
1616
1617 /* Vectorised fast case, when no overlap and suitably aligned */
1618 /* vector loop */
1619 i = 0;
1620 while (len >= 4) {
1621 vabits8 = get_vabits8_for_aligned_word32( src+i );
1622 set_vabits8_for_aligned_word32( dst+i, vabits8 );
1623 if (EXPECTED_TAKEN(VA_BITS8_DEFINED == vabits8
1624 || VA_BITS8_UNDEFINED == vabits8
1625 || VA_BITS8_NOACCESS == vabits8)) {
1626 /* do nothing */
1627 } else {
1628 /* have to copy secondary map info */
1629 if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+0 ))
1630 set_sec_vbits8( dst+i+0, get_sec_vbits8( src+i+0 ) );
1631 if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+1 ))
1632 set_sec_vbits8( dst+i+1, get_sec_vbits8( src+i+1 ) );
1633 if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+2 ))
1634 set_sec_vbits8( dst+i+2, get_sec_vbits8( src+i+2 ) );
1635 if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+3 ))
1636 set_sec_vbits8( dst+i+3, get_sec_vbits8( src+i+3 ) );
1637 }
1638 i += 4;
1639 len -= 4;
1640 }
1641 /* fixup loop */
1642 while (len >= 1) {
njn1d0825f2006-03-27 11:37:07 +00001643 vabits2 = get_vabits2( src+i );
1644 set_vabits2( dst+i, vabits2 );
njndbf7ca72006-03-31 11:57:59 +00001645 if (VA_BITS2_PARTDEFINED == vabits2) {
njn1d0825f2006-03-27 11:37:07 +00001646 set_sec_vbits8( dst+i, get_sec_vbits8( src+i ) );
1647 }
sewardjf2184912006-05-03 22:13:57 +00001648 i++;
1649 len--;
1650 }
1651
1652 } else {
1653
1654 /* We have to do things the slow way */
1655 if (src < dst) {
1656 for (i = 0, j = len-1; i < len; i++, j--) {
1657 PROF_EVENT(51, "MC_(copy_address_range_state)(loop)");
1658 vabits2 = get_vabits2( src+j );
1659 set_vabits2( dst+j, vabits2 );
1660 if (VA_BITS2_PARTDEFINED == vabits2) {
1661 set_sec_vbits8( dst+j, get_sec_vbits8( src+j ) );
1662 }
1663 }
1664 }
1665
1666 if (src > dst) {
1667 for (i = 0; i < len; i++) {
1668 PROF_EVENT(52, "MC_(copy_address_range_state)(loop)");
1669 vabits2 = get_vabits2( src+i );
1670 set_vabits2( dst+i, vabits2 );
1671 if (VA_BITS2_PARTDEFINED == vabits2) {
1672 set_sec_vbits8( dst+i, get_sec_vbits8( src+i ) );
1673 }
1674 }
sewardj45f4e7c2005-09-27 19:20:21 +00001675 }
sewardjc859fbf2005-04-22 21:10:28 +00001676 }
sewardjf2184912006-05-03 22:13:57 +00001677
sewardjc859fbf2005-04-22 21:10:28 +00001678}
1679
1680
1681/* --- Fast case permission setters, for dealing with stacks. --- */
1682
njn1d0825f2006-03-27 11:37:07 +00001683static INLINE
njndbf7ca72006-03-31 11:57:59 +00001684void make_aligned_word32_undefined ( Addr a )
njn9b007f62003-04-07 14:40:25 +00001685{
njn1d0825f2006-03-27 11:37:07 +00001686 UWord sm_off;
sewardjae986ca2005-10-12 12:53:20 +00001687 SecMap* sm;
1688
njndbf7ca72006-03-31 11:57:59 +00001689 PROF_EVENT(300, "make_aligned_word32_undefined");
sewardj5d28efc2005-04-21 22:16:29 +00001690
njn1d0825f2006-03-27 11:37:07 +00001691#ifndef PERF_FAST_STACK2
njndbf7ca72006-03-31 11:57:59 +00001692 MC_(make_mem_undefined)(a, 4);
njn1d0825f2006-03-27 11:37:07 +00001693#else
1694 if (EXPECTED_NOT_TAKEN(a > MAX_PRIMARY_ADDRESS)) {
njndbf7ca72006-03-31 11:57:59 +00001695 PROF_EVENT(301, "make_aligned_word32_undefined-slow1");
1696 MC_(make_mem_undefined)(a, 4);
sewardj5d28efc2005-04-21 22:16:29 +00001697 return;
1698 }
1699
njna7c7ebd2006-03-28 12:51:02 +00001700 sm = get_secmap_for_writing_low(a);
njn1d0825f2006-03-27 11:37:07 +00001701 sm_off = SM_OFF(a);
njndbf7ca72006-03-31 11:57:59 +00001702 sm->vabits8[sm_off] = VA_BITS8_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00001703#endif
njn9b007f62003-04-07 14:40:25 +00001704}
1705
sewardj5d28efc2005-04-21 22:16:29 +00001706
njn1d0825f2006-03-27 11:37:07 +00001707static INLINE
1708void make_aligned_word32_noaccess ( Addr a )
sewardj5d28efc2005-04-21 22:16:29 +00001709{
njn1d0825f2006-03-27 11:37:07 +00001710 UWord sm_off;
sewardjae986ca2005-10-12 12:53:20 +00001711 SecMap* sm;
1712
sewardj5d28efc2005-04-21 22:16:29 +00001713 PROF_EVENT(310, "make_aligned_word32_noaccess");
1714
njn1d0825f2006-03-27 11:37:07 +00001715#ifndef PERF_FAST_STACK2
njndbf7ca72006-03-31 11:57:59 +00001716 MC_(make_mem_noaccess)(a, 4);
njn1d0825f2006-03-27 11:37:07 +00001717#else
1718 if (EXPECTED_NOT_TAKEN(a > MAX_PRIMARY_ADDRESS)) {
sewardj5d28efc2005-04-21 22:16:29 +00001719 PROF_EVENT(311, "make_aligned_word32_noaccess-slow1");
njndbf7ca72006-03-31 11:57:59 +00001720 MC_(make_mem_noaccess)(a, 4);
sewardj5d28efc2005-04-21 22:16:29 +00001721 return;
1722 }
1723
njna7c7ebd2006-03-28 12:51:02 +00001724 sm = get_secmap_for_writing_low(a);
njn1d0825f2006-03-27 11:37:07 +00001725 sm_off = SM_OFF(a);
1726 sm->vabits8[sm_off] = VA_BITS8_NOACCESS;
1727#endif
sewardj5d28efc2005-04-21 22:16:29 +00001728}
1729
1730
njn9b007f62003-04-07 14:40:25 +00001731/* Nb: by "aligned" here we mean 8-byte aligned */
njn1d0825f2006-03-27 11:37:07 +00001732static INLINE
njndbf7ca72006-03-31 11:57:59 +00001733void make_aligned_word64_undefined ( Addr a )
njn9b007f62003-04-07 14:40:25 +00001734{
njn1d0825f2006-03-27 11:37:07 +00001735 UWord sm_off16;
sewardjae986ca2005-10-12 12:53:20 +00001736 SecMap* sm;
1737
njndbf7ca72006-03-31 11:57:59 +00001738 PROF_EVENT(320, "make_aligned_word64_undefined");
sewardj23eb2fd2005-04-22 16:29:19 +00001739
njn1d0825f2006-03-27 11:37:07 +00001740#ifndef PERF_FAST_STACK2
njndbf7ca72006-03-31 11:57:59 +00001741 MC_(make_mem_undefined)(a, 8);
njn1d0825f2006-03-27 11:37:07 +00001742#else
1743 if (EXPECTED_NOT_TAKEN(a > MAX_PRIMARY_ADDRESS)) {
njndbf7ca72006-03-31 11:57:59 +00001744 PROF_EVENT(321, "make_aligned_word64_undefined-slow1");
1745 MC_(make_mem_undefined)(a, 8);
sewardj23eb2fd2005-04-22 16:29:19 +00001746 return;
1747 }
1748
njna7c7ebd2006-03-28 12:51:02 +00001749 sm = get_secmap_for_writing_low(a);
njn1d0825f2006-03-27 11:37:07 +00001750 sm_off16 = SM_OFF_16(a);
njndbf7ca72006-03-31 11:57:59 +00001751 ((UShort*)(sm->vabits8))[sm_off16] = VA_BITS16_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00001752#endif
njn9b007f62003-04-07 14:40:25 +00001753}
1754
sewardj23eb2fd2005-04-22 16:29:19 +00001755
njn1d0825f2006-03-27 11:37:07 +00001756static INLINE
1757void make_aligned_word64_noaccess ( Addr a )
njn9b007f62003-04-07 14:40:25 +00001758{
njn1d0825f2006-03-27 11:37:07 +00001759 UWord sm_off16;
sewardjae986ca2005-10-12 12:53:20 +00001760 SecMap* sm;
1761
sewardj23eb2fd2005-04-22 16:29:19 +00001762 PROF_EVENT(330, "make_aligned_word64_noaccess");
1763
njn1d0825f2006-03-27 11:37:07 +00001764#ifndef PERF_FAST_STACK2
njndbf7ca72006-03-31 11:57:59 +00001765 MC_(make_mem_noaccess)(a, 8);
njn1d0825f2006-03-27 11:37:07 +00001766#else
1767 if (EXPECTED_NOT_TAKEN(a > MAX_PRIMARY_ADDRESS)) {
sewardj23eb2fd2005-04-22 16:29:19 +00001768 PROF_EVENT(331, "make_aligned_word64_noaccess-slow1");
njndbf7ca72006-03-31 11:57:59 +00001769 MC_(make_mem_noaccess)(a, 8);
sewardj23eb2fd2005-04-22 16:29:19 +00001770 return;
1771 }
1772
njna7c7ebd2006-03-28 12:51:02 +00001773 sm = get_secmap_for_writing_low(a);
njn1d0825f2006-03-27 11:37:07 +00001774 sm_off16 = SM_OFF_16(a);
1775 ((UShort*)(sm->vabits8))[sm_off16] = VA_BITS16_NOACCESS;
1776#endif
njn9b007f62003-04-07 14:40:25 +00001777}
1778
sewardj23eb2fd2005-04-22 16:29:19 +00001779
njn1d0825f2006-03-27 11:37:07 +00001780/*------------------------------------------------------------*/
1781/*--- Stack pointer adjustment ---*/
1782/*------------------------------------------------------------*/
1783
1784static void VG_REGPARM(1) mc_new_mem_stack_4(Addr new_SP)
1785{
1786 PROF_EVENT(110, "new_mem_stack_4");
sewardj05a46732006-10-17 01:28:10 +00001787 if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001788 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
njn1d0825f2006-03-27 11:37:07 +00001789 } else {
njndbf7ca72006-03-31 11:57:59 +00001790 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 4 );
njn1d0825f2006-03-27 11:37:07 +00001791 }
1792}
1793
1794static void VG_REGPARM(1) mc_die_mem_stack_4(Addr new_SP)
1795{
1796 PROF_EVENT(120, "die_mem_stack_4");
sewardj05a46732006-10-17 01:28:10 +00001797 if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001798 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
njn1d0825f2006-03-27 11:37:07 +00001799 } else {
njndbf7ca72006-03-31 11:57:59 +00001800 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-4, 4 );
njn1d0825f2006-03-27 11:37:07 +00001801 }
1802}
1803
1804static void VG_REGPARM(1) mc_new_mem_stack_8(Addr new_SP)
1805{
1806 PROF_EVENT(111, "new_mem_stack_8");
sewardj05a46732006-10-17 01:28:10 +00001807 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001808 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
sewardj05a46732006-10-17 01:28:10 +00001809 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001810 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1811 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
njn1d0825f2006-03-27 11:37:07 +00001812 } else {
njndbf7ca72006-03-31 11:57:59 +00001813 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 8 );
njn1d0825f2006-03-27 11:37:07 +00001814 }
1815}
1816
1817static void VG_REGPARM(1) mc_die_mem_stack_8(Addr new_SP)
1818{
1819 PROF_EVENT(121, "die_mem_stack_8");
sewardj05a46732006-10-17 01:28:10 +00001820 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001821 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
sewardj05a46732006-10-17 01:28:10 +00001822 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001823 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
1824 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
njn1d0825f2006-03-27 11:37:07 +00001825 } else {
njndbf7ca72006-03-31 11:57:59 +00001826 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-8, 8 );
njn1d0825f2006-03-27 11:37:07 +00001827 }
1828}
1829
1830static void VG_REGPARM(1) mc_new_mem_stack_12(Addr new_SP)
1831{
1832 PROF_EVENT(112, "new_mem_stack_12");
sewardj05a46732006-10-17 01:28:10 +00001833 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001834 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1835 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
sewardj05a46732006-10-17 01:28:10 +00001836 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001837 /* from previous test we don't have 8-alignment at offset +0,
1838 hence must have 8 alignment at offsets +4/-4. Hence safe to
1839 do 4 at +0 and then 8 at +4/. */
njndbf7ca72006-03-31 11:57:59 +00001840 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1841 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
njn1d0825f2006-03-27 11:37:07 +00001842 } else {
njndbf7ca72006-03-31 11:57:59 +00001843 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 12 );
njn1d0825f2006-03-27 11:37:07 +00001844 }
1845}
1846
1847static void VG_REGPARM(1) mc_die_mem_stack_12(Addr new_SP)
1848{
1849 PROF_EVENT(122, "die_mem_stack_12");
1850 /* Note the -12 in the test */
sewardj43fcfd92006-10-17 23:14:42 +00001851 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP-12 )) {
1852 /* We have 8-alignment at -12, hence ok to do 8 at -12 and 4 at
1853 -4. */
njndbf7ca72006-03-31 11:57:59 +00001854 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
1855 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
sewardj05a46732006-10-17 01:28:10 +00001856 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001857 /* We have 4-alignment at +0, but we don't have 8-alignment at
1858 -12. So we must have 8-alignment at -8. Hence do 4 at -12
1859 and then 8 at -8. */
njndbf7ca72006-03-31 11:57:59 +00001860 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
1861 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
njn1d0825f2006-03-27 11:37:07 +00001862 } else {
njndbf7ca72006-03-31 11:57:59 +00001863 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-12, 12 );
njn1d0825f2006-03-27 11:37:07 +00001864 }
1865}
1866
1867static void VG_REGPARM(1) mc_new_mem_stack_16(Addr new_SP)
1868{
1869 PROF_EVENT(113, "new_mem_stack_16");
sewardj05a46732006-10-17 01:28:10 +00001870 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001871 /* Have 8-alignment at +0, hence do 8 at +0 and 8 at +8. */
njndbf7ca72006-03-31 11:57:59 +00001872 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1873 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
sewardj05a46732006-10-17 01:28:10 +00001874 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001875 /* Have 4 alignment at +0 but not 8; hence 8 must be at +4.
1876 Hence do 4 at +0, 8 at +4, 4 at +12. */
njndbf7ca72006-03-31 11:57:59 +00001877 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1878 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
1879 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+12 );
njn1d0825f2006-03-27 11:37:07 +00001880 } else {
njndbf7ca72006-03-31 11:57:59 +00001881 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 16 );
njn1d0825f2006-03-27 11:37:07 +00001882 }
1883}
1884
1885static void VG_REGPARM(1) mc_die_mem_stack_16(Addr new_SP)
1886{
1887 PROF_EVENT(123, "die_mem_stack_16");
sewardj05a46732006-10-17 01:28:10 +00001888 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001889 /* Have 8-alignment at +0, hence do 8 at -16 and 8 at -8. */
njndbf7ca72006-03-31 11:57:59 +00001890 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
1891 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
sewardj05a46732006-10-17 01:28:10 +00001892 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001893 /* 8 alignment must be at -12. Do 4 at -16, 8 at -12, 4 at -4. */
njndbf7ca72006-03-31 11:57:59 +00001894 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
1895 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
1896 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
njn1d0825f2006-03-27 11:37:07 +00001897 } else {
njndbf7ca72006-03-31 11:57:59 +00001898 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-16, 16 );
njn1d0825f2006-03-27 11:37:07 +00001899 }
1900}
1901
1902static void VG_REGPARM(1) mc_new_mem_stack_32(Addr new_SP)
1903{
1904 PROF_EVENT(114, "new_mem_stack_32");
sewardj05a46732006-10-17 01:28:10 +00001905 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001906 /* Straightforward */
njndbf7ca72006-03-31 11:57:59 +00001907 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1908 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
1909 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
1910 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
sewardj05a46732006-10-17 01:28:10 +00001911 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001912 /* 8 alignment must be at +4. Hence do 8 at +4,+12,+20 and 4 at
1913 +0,+28. */
njndbf7ca72006-03-31 11:57:59 +00001914 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1915 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
1916 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+12 );
1917 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+20 );
1918 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+28 );
njn1d0825f2006-03-27 11:37:07 +00001919 } else {
njndbf7ca72006-03-31 11:57:59 +00001920 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 32 );
njn1d0825f2006-03-27 11:37:07 +00001921 }
1922}
1923
1924static void VG_REGPARM(1) mc_die_mem_stack_32(Addr new_SP)
1925{
1926 PROF_EVENT(124, "die_mem_stack_32");
sewardj05a46732006-10-17 01:28:10 +00001927 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001928 /* Straightforward */
njndbf7ca72006-03-31 11:57:59 +00001929 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
1930 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
1931 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
1932 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
sewardj05a46732006-10-17 01:28:10 +00001933 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001934 /* 8 alignment must be at -4 etc. Hence do 8 at -12,-20,-28 and
1935 4 at -32,-4. */
njndbf7ca72006-03-31 11:57:59 +00001936 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
1937 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-28 );
1938 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-20 );
1939 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
1940 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
njn1d0825f2006-03-27 11:37:07 +00001941 } else {
njndbf7ca72006-03-31 11:57:59 +00001942 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-32, 32 );
njn1d0825f2006-03-27 11:37:07 +00001943 }
1944}
1945
1946static void VG_REGPARM(1) mc_new_mem_stack_112(Addr new_SP)
1947{
1948 PROF_EVENT(115, "new_mem_stack_112");
sewardj05a46732006-10-17 01:28:10 +00001949 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001950 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1951 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
1952 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
1953 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
1954 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
1955 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
1956 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
1957 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
1958 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
1959 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
1960 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
1961 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
1962 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
1963 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104);
njn1d0825f2006-03-27 11:37:07 +00001964 } else {
njndbf7ca72006-03-31 11:57:59 +00001965 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 112 );
njn1d0825f2006-03-27 11:37:07 +00001966 }
1967}
1968
1969static void VG_REGPARM(1) mc_die_mem_stack_112(Addr new_SP)
1970{
1971 PROF_EVENT(125, "die_mem_stack_112");
sewardj05a46732006-10-17 01:28:10 +00001972 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001973 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
1974 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
1975 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
1976 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
1977 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
1978 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
1979 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
1980 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
1981 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
1982 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
1983 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
1984 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
1985 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
1986 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
njn1d0825f2006-03-27 11:37:07 +00001987 } else {
njndbf7ca72006-03-31 11:57:59 +00001988 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-112, 112 );
njn1d0825f2006-03-27 11:37:07 +00001989 }
1990}
1991
1992static void VG_REGPARM(1) mc_new_mem_stack_128(Addr new_SP)
1993{
1994 PROF_EVENT(116, "new_mem_stack_128");
sewardj05a46732006-10-17 01:28:10 +00001995 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001996 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1997 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
1998 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
1999 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
2000 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
2001 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
2002 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
2003 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
2004 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
2005 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
2006 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
2007 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
2008 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
2009 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104);
2010 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+112);
2011 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+120);
njn1d0825f2006-03-27 11:37:07 +00002012 } else {
njndbf7ca72006-03-31 11:57:59 +00002013 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 128 );
njn1d0825f2006-03-27 11:37:07 +00002014 }
2015}
2016
2017static void VG_REGPARM(1) mc_die_mem_stack_128(Addr new_SP)
2018{
2019 PROF_EVENT(126, "die_mem_stack_128");
sewardj05a46732006-10-17 01:28:10 +00002020 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002021 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
2022 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
2023 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
2024 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
2025 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
2026 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
2027 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
2028 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
2029 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
2030 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
2031 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
2032 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
2033 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
2034 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
2035 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
2036 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
njn1d0825f2006-03-27 11:37:07 +00002037 } else {
njndbf7ca72006-03-31 11:57:59 +00002038 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-128, 128 );
njn1d0825f2006-03-27 11:37:07 +00002039 }
2040}
2041
2042static void VG_REGPARM(1) mc_new_mem_stack_144(Addr new_SP)
2043{
2044 PROF_EVENT(117, "new_mem_stack_144");
sewardj05a46732006-10-17 01:28:10 +00002045 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002046 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
2047 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
2048 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
2049 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
2050 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
2051 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
2052 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
2053 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
2054 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
2055 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
2056 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
2057 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
2058 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
2059 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104);
2060 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+112);
2061 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+120);
2062 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+128);
2063 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+136);
njn1d0825f2006-03-27 11:37:07 +00002064 } else {
njndbf7ca72006-03-31 11:57:59 +00002065 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 144 );
njn1d0825f2006-03-27 11:37:07 +00002066 }
2067}
2068
2069static void VG_REGPARM(1) mc_die_mem_stack_144(Addr new_SP)
2070{
2071 PROF_EVENT(127, "die_mem_stack_144");
sewardj05a46732006-10-17 01:28:10 +00002072 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002073 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-144);
2074 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-136);
2075 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
2076 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
2077 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
2078 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
2079 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
2080 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
2081 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
2082 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
2083 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
2084 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
2085 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
2086 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
2087 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
2088 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
2089 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
2090 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
njn1d0825f2006-03-27 11:37:07 +00002091 } else {
njndbf7ca72006-03-31 11:57:59 +00002092 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-144, 144 );
njn1d0825f2006-03-27 11:37:07 +00002093 }
2094}
2095
2096static void VG_REGPARM(1) mc_new_mem_stack_160(Addr new_SP)
2097{
2098 PROF_EVENT(118, "new_mem_stack_160");
sewardj05a46732006-10-17 01:28:10 +00002099 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002100 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
2101 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
2102 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
2103 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
2104 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
2105 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
2106 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
2107 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
2108 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
2109 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
2110 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
2111 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
2112 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
2113 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104);
2114 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+112);
2115 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+120);
2116 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+128);
2117 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+136);
2118 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+144);
2119 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+152);
njn1d0825f2006-03-27 11:37:07 +00002120 } else {
njndbf7ca72006-03-31 11:57:59 +00002121 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 160 );
njn1d0825f2006-03-27 11:37:07 +00002122 }
2123}
2124
2125static void VG_REGPARM(1) mc_die_mem_stack_160(Addr new_SP)
2126{
2127 PROF_EVENT(128, "die_mem_stack_160");
sewardj05a46732006-10-17 01:28:10 +00002128 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002129 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-160);
2130 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-152);
2131 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-144);
2132 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-136);
2133 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
2134 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
2135 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
2136 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
2137 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
2138 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
2139 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
2140 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
2141 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
2142 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
2143 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
2144 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
2145 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
2146 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
2147 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
2148 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
njn1d0825f2006-03-27 11:37:07 +00002149 } else {
njndbf7ca72006-03-31 11:57:59 +00002150 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-160, 160 );
njn1d0825f2006-03-27 11:37:07 +00002151 }
2152}
2153
2154static void mc_new_mem_stack ( Addr a, SizeT len )
2155{
2156 PROF_EVENT(115, "new_mem_stack");
njndbf7ca72006-03-31 11:57:59 +00002157 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + a, len );
njn1d0825f2006-03-27 11:37:07 +00002158}
2159
2160static void mc_die_mem_stack ( Addr a, SizeT len )
2161{
2162 PROF_EVENT(125, "die_mem_stack");
njndbf7ca72006-03-31 11:57:59 +00002163 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + a, len );
njn1d0825f2006-03-27 11:37:07 +00002164}
njn9b007f62003-04-07 14:40:25 +00002165
sewardj45d94cc2005-04-20 14:44:11 +00002166
njn1d0825f2006-03-27 11:37:07 +00002167/* The AMD64 ABI says:
2168
2169 "The 128-byte area beyond the location pointed to by %rsp is considered
2170 to be reserved and shall not be modified by signal or interrupt
2171 handlers. Therefore, functions may use this area for temporary data
2172 that is not needed across function calls. In particular, leaf functions
2173 may use this area for their entire stack frame, rather than adjusting
2174 the stack pointer in the prologue and epilogue. This area is known as
2175 red zone [sic]."
2176
2177 So after any call or return we need to mark this redzone as containing
2178 undefined values.
2179
2180 Consider this: we're in function f. f calls g. g moves rsp down
2181 modestly (say 16 bytes) and writes stuff all over the red zone, making it
2182 defined. g returns. f is buggy and reads from parts of the red zone
2183 that it didn't write on. But because g filled that area in, f is going
2184 to be picking up defined V bits and so any errors from reading bits of
2185 the red zone it didn't write, will be missed. The only solution I could
2186 think of was to make the red zone undefined when g returns to f.
2187
2188 This is in accordance with the ABI, which makes it clear the redzone
2189 is volatile across function calls.
2190
2191 The problem occurs the other way round too: f could fill the RZ up
2192 with defined values and g could mistakenly read them. So the RZ
2193 also needs to be nuked on function calls.
2194*/
sewardj826ec492005-05-12 18:05:00 +00002195void MC_(helperc_MAKE_STACK_UNINIT) ( Addr base, UWord len )
2196{
2197 tl_assert(sizeof(UWord) == sizeof(SizeT));
sewardj2a3a1a72005-05-12 23:25:43 +00002198 if (0)
njn8a7b41b2007-09-23 00:51:24 +00002199 VG_(printf)("helperc_MAKE_STACK_UNINIT %p %lu\n", base, len );
sewardj2a3a1a72005-05-12 23:25:43 +00002200
2201# if 0
2202 /* Really slow version */
njndbf7ca72006-03-31 11:57:59 +00002203 MC_(make_mem_undefined)(base, len);
sewardj2a3a1a72005-05-12 23:25:43 +00002204# endif
2205
2206# if 0
2207 /* Slow(ish) version, which is fairly easily seen to be correct.
2208 */
2209 if (EXPECTED_TAKEN( VG_IS_8_ALIGNED(base) && len==128 )) {
njndbf7ca72006-03-31 11:57:59 +00002210 make_aligned_word64_undefined(base + 0);
2211 make_aligned_word64_undefined(base + 8);
2212 make_aligned_word64_undefined(base + 16);
2213 make_aligned_word64_undefined(base + 24);
sewardj2a3a1a72005-05-12 23:25:43 +00002214
njndbf7ca72006-03-31 11:57:59 +00002215 make_aligned_word64_undefined(base + 32);
2216 make_aligned_word64_undefined(base + 40);
2217 make_aligned_word64_undefined(base + 48);
2218 make_aligned_word64_undefined(base + 56);
sewardj2a3a1a72005-05-12 23:25:43 +00002219
njndbf7ca72006-03-31 11:57:59 +00002220 make_aligned_word64_undefined(base + 64);
2221 make_aligned_word64_undefined(base + 72);
2222 make_aligned_word64_undefined(base + 80);
2223 make_aligned_word64_undefined(base + 88);
sewardj2a3a1a72005-05-12 23:25:43 +00002224
njndbf7ca72006-03-31 11:57:59 +00002225 make_aligned_word64_undefined(base + 96);
2226 make_aligned_word64_undefined(base + 104);
2227 make_aligned_word64_undefined(base + 112);
2228 make_aligned_word64_undefined(base + 120);
sewardj2a3a1a72005-05-12 23:25:43 +00002229 } else {
njndbf7ca72006-03-31 11:57:59 +00002230 MC_(make_mem_undefined)(base, len);
sewardj2a3a1a72005-05-12 23:25:43 +00002231 }
2232# endif
2233
2234 /* Idea is: go fast when
2235 * 8-aligned and length is 128
2236 * the sm is available in the main primary map
njn1d0825f2006-03-27 11:37:07 +00002237 * the address range falls entirely with a single secondary map
2238 If all those conditions hold, just update the V+A bits by writing
2239 directly into the vabits array. (If the sm was distinguished, this
2240 will make a copy and then write to it.)
sewardj2a3a1a72005-05-12 23:25:43 +00002241 */
njn1d0825f2006-03-27 11:37:07 +00002242 if (EXPECTED_TAKEN( len == 128 && VG_IS_8_ALIGNED(base) )) {
2243 /* Now we know the address range is suitably sized and aligned. */
2244 UWord a_lo = (UWord)(base);
sewardj3f5f5562006-06-16 21:39:08 +00002245 UWord a_hi = (UWord)(base + 128 - 1);
njn1d0825f2006-03-27 11:37:07 +00002246 tl_assert(a_lo < a_hi); // paranoia: detect overflow
2247 if (a_hi < MAX_PRIMARY_ADDRESS) {
2248 // Now we know the entire range is within the main primary map.
njna7c7ebd2006-03-28 12:51:02 +00002249 SecMap* sm = get_secmap_for_writing_low(a_lo);
2250 SecMap* sm_hi = get_secmap_for_writing_low(a_hi);
sewardj2a3a1a72005-05-12 23:25:43 +00002251 /* Now we know that the entire address range falls within a
2252 single secondary map, and that that secondary 'lives' in
2253 the main primary map. */
njn1d0825f2006-03-27 11:37:07 +00002254 if (EXPECTED_TAKEN(sm == sm_hi)) {
2255 // Finally, we know that the range is entirely within one secmap.
2256 UWord v_off = SM_OFF(a_lo);
2257 UShort* p = (UShort*)(&sm->vabits8[v_off]);
njndbf7ca72006-03-31 11:57:59 +00002258 p[ 0] = VA_BITS16_UNDEFINED;
2259 p[ 1] = VA_BITS16_UNDEFINED;
2260 p[ 2] = VA_BITS16_UNDEFINED;
2261 p[ 3] = VA_BITS16_UNDEFINED;
2262 p[ 4] = VA_BITS16_UNDEFINED;
2263 p[ 5] = VA_BITS16_UNDEFINED;
2264 p[ 6] = VA_BITS16_UNDEFINED;
2265 p[ 7] = VA_BITS16_UNDEFINED;
2266 p[ 8] = VA_BITS16_UNDEFINED;
2267 p[ 9] = VA_BITS16_UNDEFINED;
2268 p[10] = VA_BITS16_UNDEFINED;
2269 p[11] = VA_BITS16_UNDEFINED;
2270 p[12] = VA_BITS16_UNDEFINED;
2271 p[13] = VA_BITS16_UNDEFINED;
2272 p[14] = VA_BITS16_UNDEFINED;
2273 p[15] = VA_BITS16_UNDEFINED;
sewardj2a3a1a72005-05-12 23:25:43 +00002274 return;
njn1d0825f2006-03-27 11:37:07 +00002275 }
sewardj2a3a1a72005-05-12 23:25:43 +00002276 }
2277 }
2278
sewardj2e1a6772006-01-18 04:16:27 +00002279 /* 288 bytes (36 ULongs) is the magic value for ELF ppc64. */
sewardj3f5f5562006-06-16 21:39:08 +00002280 if (EXPECTED_TAKEN( len == 288 && VG_IS_8_ALIGNED(base) )) {
njn1d0825f2006-03-27 11:37:07 +00002281 /* Now we know the address range is suitably sized and aligned. */
2282 UWord a_lo = (UWord)(base);
sewardj3f5f5562006-06-16 21:39:08 +00002283 UWord a_hi = (UWord)(base + 288 - 1);
njn1d0825f2006-03-27 11:37:07 +00002284 tl_assert(a_lo < a_hi); // paranoia: detect overflow
2285 if (a_hi < MAX_PRIMARY_ADDRESS) {
2286 // Now we know the entire range is within the main primary map.
njna7c7ebd2006-03-28 12:51:02 +00002287 SecMap* sm = get_secmap_for_writing_low(a_lo);
2288 SecMap* sm_hi = get_secmap_for_writing_low(a_hi);
sewardj2e1a6772006-01-18 04:16:27 +00002289 /* Now we know that the entire address range falls within a
2290 single secondary map, and that that secondary 'lives' in
2291 the main primary map. */
njn1d0825f2006-03-27 11:37:07 +00002292 if (EXPECTED_TAKEN(sm == sm_hi)) {
2293 // Finally, we know that the range is entirely within one secmap.
2294 UWord v_off = SM_OFF(a_lo);
2295 UShort* p = (UShort*)(&sm->vabits8[v_off]);
njndbf7ca72006-03-31 11:57:59 +00002296 p[ 0] = VA_BITS16_UNDEFINED;
2297 p[ 1] = VA_BITS16_UNDEFINED;
2298 p[ 2] = VA_BITS16_UNDEFINED;
2299 p[ 3] = VA_BITS16_UNDEFINED;
2300 p[ 4] = VA_BITS16_UNDEFINED;
2301 p[ 5] = VA_BITS16_UNDEFINED;
2302 p[ 6] = VA_BITS16_UNDEFINED;
2303 p[ 7] = VA_BITS16_UNDEFINED;
2304 p[ 8] = VA_BITS16_UNDEFINED;
2305 p[ 9] = VA_BITS16_UNDEFINED;
2306 p[10] = VA_BITS16_UNDEFINED;
2307 p[11] = VA_BITS16_UNDEFINED;
2308 p[12] = VA_BITS16_UNDEFINED;
2309 p[13] = VA_BITS16_UNDEFINED;
2310 p[14] = VA_BITS16_UNDEFINED;
2311 p[15] = VA_BITS16_UNDEFINED;
2312 p[16] = VA_BITS16_UNDEFINED;
2313 p[17] = VA_BITS16_UNDEFINED;
2314 p[18] = VA_BITS16_UNDEFINED;
2315 p[19] = VA_BITS16_UNDEFINED;
2316 p[20] = VA_BITS16_UNDEFINED;
2317 p[21] = VA_BITS16_UNDEFINED;
2318 p[22] = VA_BITS16_UNDEFINED;
2319 p[23] = VA_BITS16_UNDEFINED;
2320 p[24] = VA_BITS16_UNDEFINED;
2321 p[25] = VA_BITS16_UNDEFINED;
2322 p[26] = VA_BITS16_UNDEFINED;
2323 p[27] = VA_BITS16_UNDEFINED;
2324 p[28] = VA_BITS16_UNDEFINED;
2325 p[29] = VA_BITS16_UNDEFINED;
2326 p[30] = VA_BITS16_UNDEFINED;
2327 p[31] = VA_BITS16_UNDEFINED;
2328 p[32] = VA_BITS16_UNDEFINED;
2329 p[33] = VA_BITS16_UNDEFINED;
2330 p[34] = VA_BITS16_UNDEFINED;
2331 p[35] = VA_BITS16_UNDEFINED;
sewardj2e1a6772006-01-18 04:16:27 +00002332 return;
njn1d0825f2006-03-27 11:37:07 +00002333 }
sewardj2e1a6772006-01-18 04:16:27 +00002334 }
2335 }
2336
sewardj2a3a1a72005-05-12 23:25:43 +00002337 /* else fall into slow case */
njndbf7ca72006-03-31 11:57:59 +00002338 MC_(make_mem_undefined)(base, len);
sewardj826ec492005-05-12 18:05:00 +00002339}
2340
2341
nethercote8b76fe52004-11-08 19:20:09 +00002342/*------------------------------------------------------------*/
2343/*--- Checking memory ---*/
2344/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00002345
sewardje4ccc012005-05-02 12:53:38 +00002346typedef
2347 enum {
2348 MC_Ok = 5,
2349 MC_AddrErr = 6,
2350 MC_ValueErr = 7
2351 }
2352 MC_ReadResult;
2353
2354
njn25e49d8e72002-09-23 09:36:25 +00002355/* Check permissions for address range. If inadequate permissions
2356 exist, *bad_addr is set to the offending address, so the caller can
2357 know what it is. */
2358
sewardjecf8e102003-07-12 12:11:39 +00002359/* Returns True if [a .. a+len) is not addressible. Otherwise,
2360 returns False, and if bad_addr is non-NULL, sets *bad_addr to
2361 indicate the lowest failing address. Functions below are
2362 similar. */
njndbf7ca72006-03-31 11:57:59 +00002363Bool MC_(check_mem_is_noaccess) ( Addr a, SizeT len, Addr* bad_addr )
sewardjecf8e102003-07-12 12:11:39 +00002364{
nethercote451eae92004-11-02 13:06:32 +00002365 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00002366 UWord vabits2;
2367
njndbf7ca72006-03-31 11:57:59 +00002368 PROF_EVENT(60, "check_mem_is_noaccess");
sewardjecf8e102003-07-12 12:11:39 +00002369 for (i = 0; i < len; i++) {
njndbf7ca72006-03-31 11:57:59 +00002370 PROF_EVENT(61, "check_mem_is_noaccess(loop)");
njn1d0825f2006-03-27 11:37:07 +00002371 vabits2 = get_vabits2(a);
2372 if (VA_BITS2_NOACCESS != vabits2) {
2373 if (bad_addr != NULL) *bad_addr = a;
sewardjecf8e102003-07-12 12:11:39 +00002374 return False;
2375 }
2376 a++;
2377 }
2378 return True;
2379}
2380
njndbf7ca72006-03-31 11:57:59 +00002381static Bool is_mem_addressable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00002382{
nethercote451eae92004-11-02 13:06:32 +00002383 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00002384 UWord vabits2;
2385
njndbf7ca72006-03-31 11:57:59 +00002386 PROF_EVENT(62, "is_mem_addressable");
njn25e49d8e72002-09-23 09:36:25 +00002387 for (i = 0; i < len; i++) {
njndbf7ca72006-03-31 11:57:59 +00002388 PROF_EVENT(63, "is_mem_addressable(loop)");
njn1d0825f2006-03-27 11:37:07 +00002389 vabits2 = get_vabits2(a);
2390 if (VA_BITS2_NOACCESS == vabits2) {
njn25e49d8e72002-09-23 09:36:25 +00002391 if (bad_addr != NULL) *bad_addr = a;
2392 return False;
2393 }
2394 a++;
2395 }
2396 return True;
2397}
2398
njndbf7ca72006-03-31 11:57:59 +00002399static MC_ReadResult is_mem_defined ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00002400{
nethercote451eae92004-11-02 13:06:32 +00002401 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00002402 UWord vabits2;
njn25e49d8e72002-09-23 09:36:25 +00002403
njndbf7ca72006-03-31 11:57:59 +00002404 PROF_EVENT(64, "is_mem_defined");
2405 DEBUG("is_mem_defined\n");
njn25e49d8e72002-09-23 09:36:25 +00002406 for (i = 0; i < len; i++) {
njndbf7ca72006-03-31 11:57:59 +00002407 PROF_EVENT(65, "is_mem_defined(loop)");
njn1d0825f2006-03-27 11:37:07 +00002408 vabits2 = get_vabits2(a);
njndbf7ca72006-03-31 11:57:59 +00002409 if (VA_BITS2_DEFINED != vabits2) {
njn1d0825f2006-03-27 11:37:07 +00002410 // Error! Nb: Report addressability errors in preference to
2411 // definedness errors. And don't report definedeness errors unless
2412 // --undef-value-errors=yes.
2413 if (bad_addr != NULL) *bad_addr = a;
2414 if ( VA_BITS2_NOACCESS == vabits2 ) return MC_AddrErr;
2415 else if ( MC_(clo_undef_value_errors) ) return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +00002416 }
2417 a++;
2418 }
nethercote8b76fe52004-11-08 19:20:09 +00002419 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +00002420}
2421
2422
2423/* Check a zero-terminated ascii string. Tricky -- don't want to
2424 examine the actual bytes, to find the end, until we're sure it is
2425 safe to do so. */
2426
njndbf7ca72006-03-31 11:57:59 +00002427static Bool mc_is_defined_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00002428{
njn1d0825f2006-03-27 11:37:07 +00002429 UWord vabits2;
2430
njndbf7ca72006-03-31 11:57:59 +00002431 PROF_EVENT(66, "mc_is_defined_asciiz");
2432 DEBUG("mc_is_defined_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +00002433 while (True) {
njndbf7ca72006-03-31 11:57:59 +00002434 PROF_EVENT(67, "mc_is_defined_asciiz(loop)");
njn1d0825f2006-03-27 11:37:07 +00002435 vabits2 = get_vabits2(a);
njndbf7ca72006-03-31 11:57:59 +00002436 if (VA_BITS2_DEFINED != vabits2) {
njn1d0825f2006-03-27 11:37:07 +00002437 // Error! Nb: Report addressability errors in preference to
2438 // definedness errors. And don't report definedeness errors unless
2439 // --undef-value-errors=yes.
2440 if (bad_addr != NULL) *bad_addr = a;
2441 if ( VA_BITS2_NOACCESS == vabits2 ) return MC_AddrErr;
2442 else if ( MC_(clo_undef_value_errors) ) return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +00002443 }
2444 /* Ok, a is safe to read. */
njn1d0825f2006-03-27 11:37:07 +00002445 if (* ((UChar*)a) == 0) {
sewardj45d94cc2005-04-20 14:44:11 +00002446 return MC_Ok;
njn1d0825f2006-03-27 11:37:07 +00002447 }
njn25e49d8e72002-09-23 09:36:25 +00002448 a++;
2449 }
2450}
2451
2452
2453/*------------------------------------------------------------*/
2454/*--- Memory event handlers ---*/
2455/*------------------------------------------------------------*/
2456
njn25e49d8e72002-09-23 09:36:25 +00002457static
njndbf7ca72006-03-31 11:57:59 +00002458void check_mem_is_addressable ( CorePart part, ThreadId tid, Char* s,
2459 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00002460{
njn25e49d8e72002-09-23 09:36:25 +00002461 Addr bad_addr;
njndbf7ca72006-03-31 11:57:59 +00002462 Bool ok = is_mem_addressable ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00002463
njn25e49d8e72002-09-23 09:36:25 +00002464 if (!ok) {
2465 switch (part) {
2466 case Vg_CoreSysCall:
njn718d3b12006-12-16 00:54:12 +00002467 mc_record_memparam_error ( tid, bad_addr, /*isAddrErr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00002468 break;
2469
njn25e49d8e72002-09-23 09:36:25 +00002470 case Vg_CoreSignal:
njn718d3b12006-12-16 00:54:12 +00002471 mc_record_core_mem_error( tid, /*isAddrErr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00002472 break;
2473
2474 default:
njndbf7ca72006-03-31 11:57:59 +00002475 VG_(tool_panic)("check_mem_is_addressable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00002476 }
2477 }
njn25e49d8e72002-09-23 09:36:25 +00002478}
2479
2480static
njndbf7ca72006-03-31 11:57:59 +00002481void check_mem_is_defined ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +00002482 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00002483{
njn25e49d8e72002-09-23 09:36:25 +00002484 Addr bad_addr;
njndbf7ca72006-03-31 11:57:59 +00002485 MC_ReadResult res = is_mem_defined ( base, size, &bad_addr );
sewardj45f4e7c2005-09-27 19:20:21 +00002486
nethercote8b76fe52004-11-08 19:20:09 +00002487 if (MC_Ok != res) {
njn718d3b12006-12-16 00:54:12 +00002488 Bool isAddrErr = ( MC_AddrErr == res ? True : False );
sewardj45f4e7c2005-09-27 19:20:21 +00002489
njn25e49d8e72002-09-23 09:36:25 +00002490 switch (part) {
2491 case Vg_CoreSysCall:
njn718d3b12006-12-16 00:54:12 +00002492 mc_record_memparam_error ( tid, bad_addr, isAddrErr, s );
njn25e49d8e72002-09-23 09:36:25 +00002493 break;
2494
njn25e49d8e72002-09-23 09:36:25 +00002495 /* If we're being asked to jump to a silly address, record an error
2496 message before potentially crashing the entire system. */
2497 case Vg_CoreTranslate:
njn1d0825f2006-03-27 11:37:07 +00002498 mc_record_jump_error( tid, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00002499 break;
2500
2501 default:
njndbf7ca72006-03-31 11:57:59 +00002502 VG_(tool_panic)("check_mem_is_defined: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00002503 }
2504 }
njn25e49d8e72002-09-23 09:36:25 +00002505}
2506
2507static
njndbf7ca72006-03-31 11:57:59 +00002508void check_mem_is_defined_asciiz ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +00002509 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +00002510{
nethercote8b76fe52004-11-08 19:20:09 +00002511 MC_ReadResult res;
njn5ab96ac2005-05-08 02:59:50 +00002512 Addr bad_addr = 0; // shut GCC up
njn25e49d8e72002-09-23 09:36:25 +00002513
njnca82cc02004-11-22 17:18:48 +00002514 tl_assert(part == Vg_CoreSysCall);
njndbf7ca72006-03-31 11:57:59 +00002515 res = mc_is_defined_asciiz ( (Addr)str, &bad_addr );
nethercote8b76fe52004-11-08 19:20:09 +00002516 if (MC_Ok != res) {
njn718d3b12006-12-16 00:54:12 +00002517 Bool isAddrErr = ( MC_AddrErr == res ? True : False );
2518 mc_record_memparam_error ( tid, bad_addr, isAddrErr, s );
njn25e49d8e72002-09-23 09:36:25 +00002519 }
njn25e49d8e72002-09-23 09:36:25 +00002520}
2521
njn25e49d8e72002-09-23 09:36:25 +00002522static
nethercote451eae92004-11-02 13:06:32 +00002523void mc_new_mem_startup( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00002524{
njndbf7ca72006-03-31 11:57:59 +00002525 /* Ignore the permissions, just make it defined. Seems to work... */
njnba7b4582006-09-21 15:59:30 +00002526 // Because code is defined, initialised variables get put in the data
2527 // segment and are defined, and uninitialised variables get put in the
2528 // bss segment and are auto-zeroed (and so defined).
2529 //
2530 // It's possible that there will be padding between global variables.
2531 // This will also be auto-zeroed, and marked as defined by Memcheck. If
2532 // a program uses it, Memcheck will not complain. This is arguably a
2533 // false negative, but it's a grey area -- the behaviour is defined (the
2534 // padding is zeroed) but it's probably not what the user intended. And
2535 // we can't avoid it.
nethercote451eae92004-11-02 13:06:32 +00002536 DEBUG("mc_new_mem_startup(%p, %llu, rr=%u, ww=%u, xx=%u)\n",
njndbf7ca72006-03-31 11:57:59 +00002537 a, (ULong)len, rr, ww, xx);
2538 MC_(make_mem_defined)(a, len);
njn25e49d8e72002-09-23 09:36:25 +00002539}
2540
2541static
njnb8dca862005-03-14 02:42:44 +00002542void mc_new_mem_mmap ( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00002543{
njndbf7ca72006-03-31 11:57:59 +00002544 MC_(make_mem_defined)(a, len);
njn25e49d8e72002-09-23 09:36:25 +00002545}
2546
njncf45fd42004-11-24 16:30:22 +00002547static
2548void mc_post_mem_write(CorePart part, ThreadId tid, Addr a, SizeT len)
2549{
njndbf7ca72006-03-31 11:57:59 +00002550 MC_(make_mem_defined)(a, len);
njncf45fd42004-11-24 16:30:22 +00002551}
njn25e49d8e72002-09-23 09:36:25 +00002552
sewardj45d94cc2005-04-20 14:44:11 +00002553
njn25e49d8e72002-09-23 09:36:25 +00002554/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00002555/*--- Register event handlers ---*/
2556/*------------------------------------------------------------*/
2557
sewardj45d94cc2005-04-20 14:44:11 +00002558/* When some chunk of guest state is written, mark the corresponding
2559 shadow area as valid. This is used to initialise arbitrarily large
sewardj62eae5f2006-01-17 01:58:24 +00002560 chunks of guest state, hence the _SIZE value, which has to be as
2561 big as the biggest guest state.
sewardj45d94cc2005-04-20 14:44:11 +00002562*/
2563static void mc_post_reg_write ( CorePart part, ThreadId tid,
2564 OffT offset, SizeT size)
njnd3040452003-05-19 15:04:06 +00002565{
sewardj05a46732006-10-17 01:28:10 +00002566# define MAX_REG_WRITE_SIZE 1408
cerion21082042005-12-06 19:07:08 +00002567 UChar area[MAX_REG_WRITE_SIZE];
2568 tl_assert(size <= MAX_REG_WRITE_SIZE);
njn1d0825f2006-03-27 11:37:07 +00002569 VG_(memset)(area, V_BITS8_DEFINED, size);
njncf45fd42004-11-24 16:30:22 +00002570 VG_(set_shadow_regs_area)( tid, offset, size, area );
cerion21082042005-12-06 19:07:08 +00002571# undef MAX_REG_WRITE_SIZE
njnd3040452003-05-19 15:04:06 +00002572}
2573
sewardj45d94cc2005-04-20 14:44:11 +00002574static
2575void mc_post_reg_write_clientcall ( ThreadId tid,
2576 OffT offset, SizeT size,
2577 Addr f)
njnd3040452003-05-19 15:04:06 +00002578{
njncf45fd42004-11-24 16:30:22 +00002579 mc_post_reg_write(/*dummy*/0, tid, offset, size);
njnd3040452003-05-19 15:04:06 +00002580}
2581
sewardj45d94cc2005-04-20 14:44:11 +00002582/* Look at the definedness of the guest's shadow state for
2583 [offset, offset+len). If any part of that is undefined, record
2584 a parameter error.
2585*/
2586static void mc_pre_reg_read ( CorePart part, ThreadId tid, Char* s,
2587 OffT offset, SizeT size)
nethercote8b76fe52004-11-08 19:20:09 +00002588{
sewardj45d94cc2005-04-20 14:44:11 +00002589 Int i;
2590 Bool bad;
2591
2592 UChar area[16];
2593 tl_assert(size <= 16);
2594
2595 VG_(get_shadow_regs_area)( tid, offset, size, area );
2596
2597 bad = False;
2598 for (i = 0; i < size; i++) {
njn1d0825f2006-03-27 11:37:07 +00002599 if (area[i] != V_BITS8_DEFINED) {
sewardj2c27f702005-05-03 18:19:05 +00002600 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002601 break;
2602 }
nethercote8b76fe52004-11-08 19:20:09 +00002603 }
2604
sewardj45d94cc2005-04-20 14:44:11 +00002605 if (bad)
njn718d3b12006-12-16 00:54:12 +00002606 mc_record_regparam_error ( tid, s );
nethercote8b76fe52004-11-08 19:20:09 +00002607}
njnd3040452003-05-19 15:04:06 +00002608
njn25e49d8e72002-09-23 09:36:25 +00002609
sewardj6cf40ff2005-04-20 22:31:26 +00002610/*------------------------------------------------------------*/
njn718d3b12006-12-16 00:54:12 +00002611/*--- Error types ---*/
njn1d0825f2006-03-27 11:37:07 +00002612/*------------------------------------------------------------*/
2613
njn718d3b12006-12-16 00:54:12 +00002614// Different kinds of blocks.
2615typedef enum {
2616 Block_Mallocd = 111,
2617 Block_Freed,
2618 Block_Mempool,
2619 Block_MempoolChunk,
2620 Block_UserG
2621} BlockKind;
2622
2623/* ------------------ Addresses -------------------- */
2624
njn1d0825f2006-03-27 11:37:07 +00002625/* The classification of a faulting address. */
2626typedef
2627 enum {
njn718d3b12006-12-16 00:54:12 +00002628 Addr_Undescribed, // as-yet unclassified
2629 Addr_Unknown, // classification yielded nothing useful
2630 Addr_Stack,
2631 Addr_Block,
njn1d0825f2006-03-27 11:37:07 +00002632 }
njn718d3b12006-12-16 00:54:12 +00002633 AddrTag;
njn1d0825f2006-03-27 11:37:07 +00002634
njn1d0825f2006-03-27 11:37:07 +00002635typedef
njn718d3b12006-12-16 00:54:12 +00002636 struct _AddrInfo
njn1d0825f2006-03-27 11:37:07 +00002637 AddrInfo;
2638
njn718d3b12006-12-16 00:54:12 +00002639struct _AddrInfo {
2640 AddrTag tag;
2641 union {
2642 // As-yet unclassified.
2643 struct { } Undescribed;
njn1d0825f2006-03-27 11:37:07 +00002644
njn718d3b12006-12-16 00:54:12 +00002645 // On a stack.
2646 struct {
2647 ThreadId tid; // Which thread's stack?
2648 } Stack;
njn1d0825f2006-03-27 11:37:07 +00002649
njn718d3b12006-12-16 00:54:12 +00002650 // This covers heap blocks (normal and from mempools) and user-defined
2651 // blocks.
2652 struct {
2653 BlockKind block_kind;
2654 Char* block_desc; // "block", "mempool" or user-defined
2655 SizeT block_szB;
2656 OffT rwoffset;
2657 ExeContext* lastchange;
2658 } Block;
njn1d0825f2006-03-27 11:37:07 +00002659
njn718d3b12006-12-16 00:54:12 +00002660 // Classification yielded nothing useful.
2661 struct { } Unknown;
2662
2663 } Addr;
2664};
2665
2666/* ------------------ Errors ----------------------- */
njn1d0825f2006-03-27 11:37:07 +00002667
2668/* What kind of error it is. */
2669typedef
njn718d3b12006-12-16 00:54:12 +00002670 enum {
2671 Err_Value,
2672 Err_Cond,
2673 Err_CoreMem,
2674 Err_Addr,
2675 Err_Jump,
2676 Err_RegParam,
2677 Err_MemParam,
2678 Err_User,
2679 Err_Free,
2680 Err_FreeMismatch,
2681 Err_Overlap,
2682 Err_Leak,
2683 Err_IllegalMempool,
njn1d0825f2006-03-27 11:37:07 +00002684 }
njn718d3b12006-12-16 00:54:12 +00002685 MC_ErrorTag;
njn1d0825f2006-03-27 11:37:07 +00002686
njn1d0825f2006-03-27 11:37:07 +00002687
njn718d3b12006-12-16 00:54:12 +00002688typedef struct _MC_Error MC_Error;
2689
2690struct _MC_Error {
2691 // Nb: we don't need the tag here, as it's stored in the Error type! Yuk.
2692 //MC_ErrorTag tag;
2693
2694 union {
2695 // Use of an undefined value:
2696 // - as a pointer in a load or store
2697 // - as a jump target
2698 struct {
2699 SizeT szB; // size of value in bytes
2700 } Value;
2701
2702 // Use of an undefined value in a conditional branch or move.
2703 struct {
2704 } Cond;
2705
2706 // Addressability error in core (signal-handling) operation.
2707 // It would be good to get rid of this error kind, merge it with
2708 // another one somehow.
2709 struct {
2710 } CoreMem;
2711
2712 // Use of an unaddressable memory location in a load or store.
2713 struct {
2714 Bool isWrite; // read or write?
2715 SizeT szB; // not used for exec (jump) errors
2716 Bool maybe_gcc; // True if just below %esp -- could be a gcc bug
2717 AddrInfo ai;
2718 } Addr;
2719
2720 // Jump to an unaddressable memory location.
2721 struct {
2722 AddrInfo ai;
2723 } Jump;
2724
2725 // System call register input contains undefined bytes.
2726 struct {
2727 } RegParam;
2728
2729 // System call memory input contains undefined/unaddressable bytes
2730 struct {
2731 Bool isAddrErr; // Addressability or definedness error?
2732 AddrInfo ai;
2733 } MemParam;
2734
2735 // Problem found from a client request like CHECK_MEM_IS_ADDRESSABLE.
2736 struct {
2737 Bool isAddrErr; // Addressability or definedness error?
2738 AddrInfo ai;
2739 } User;
2740
2741 // Program tried to free() something that's not a heap block (this
2742 // covers double-frees). */
2743 struct {
2744 AddrInfo ai;
2745 } Free;
2746
2747 // Program allocates heap block with one function
2748 // (malloc/new/new[]/custom) and deallocates with not the matching one.
2749 struct {
2750 AddrInfo ai;
2751 } FreeMismatch;
2752
2753 // Call to strcpy, memcpy, etc, with overlapping blocks.
2754 struct {
2755 Addr src; // Source block
2756 Addr dst; // Destination block
2757 Int szB; // Size in bytes; 0 if unused.
2758 } Overlap;
2759
2760 // A memory leak.
2761 struct {
2762 UInt n_this_record;
2763 UInt n_total_records;
2764 LossRecord* lossRecord;
2765 } Leak;
2766
2767 // A memory pool error.
2768 struct {
2769 AddrInfo ai;
2770 } IllegalMempool;
2771
2772 } Err;
2773};
2774
njn1d0825f2006-03-27 11:37:07 +00002775
2776/*------------------------------------------------------------*/
njn9e63cb62005-05-08 18:34:59 +00002777/*--- Printing errors ---*/
2778/*------------------------------------------------------------*/
2779
njn718d3b12006-12-16 00:54:12 +00002780static void mc_pp_AddrInfo ( Addr a, AddrInfo* ai, Bool maybe_gcc )
njn1d0825f2006-03-27 11:37:07 +00002781{
2782 HChar* xpre = VG_(clo_xml) ? " <auxwhat>" : " ";
2783 HChar* xpost = VG_(clo_xml) ? "</auxwhat>" : "";
2784
njn718d3b12006-12-16 00:54:12 +00002785 switch (ai->tag) {
2786 case Addr_Unknown:
2787 if (maybe_gcc) {
njn1d0825f2006-03-27 11:37:07 +00002788 VG_(message)(Vg_UserMsg,
2789 "%sAddress 0x%llx is just below the stack ptr. "
2790 "To suppress, use: --workaround-gcc296-bugs=yes%s",
2791 xpre, (ULong)a, xpost
2792 );
2793 } else {
2794 VG_(message)(Vg_UserMsg,
2795 "%sAddress 0x%llx "
2796 "is not stack'd, malloc'd or (recently) free'd%s",
2797 xpre, (ULong)a, xpost);
2798 }
2799 break;
njn718d3b12006-12-16 00:54:12 +00002800
2801 case Addr_Stack:
2802 VG_(message)(Vg_UserMsg,
2803 "%sAddress 0x%llx is on thread %d's stack%s",
2804 xpre, (ULong)a, ai->Addr.Stack.tid, xpost);
2805 break;
2806
2807 case Addr_Block: {
2808 SizeT block_szB = ai->Addr.Block.block_szB;
2809 OffT rwoffset = ai->Addr.Block.rwoffset;
njn1d0825f2006-03-27 11:37:07 +00002810 SizeT delta;
2811 const Char* relative;
njn1d0825f2006-03-27 11:37:07 +00002812
njn718d3b12006-12-16 00:54:12 +00002813 if (rwoffset < 0) {
2814 delta = (SizeT)(-rwoffset);
njn1d0825f2006-03-27 11:37:07 +00002815 relative = "before";
njn718d3b12006-12-16 00:54:12 +00002816 } else if (rwoffset >= block_szB) {
2817 delta = rwoffset - block_szB;
njn1d0825f2006-03-27 11:37:07 +00002818 relative = "after";
2819 } else {
njn718d3b12006-12-16 00:54:12 +00002820 delta = rwoffset;
njn1d0825f2006-03-27 11:37:07 +00002821 relative = "inside";
2822 }
2823 VG_(message)(Vg_UserMsg,
2824 "%sAddress 0x%lx is %,lu bytes %s a %s of size %,lu %s%s",
2825 xpre,
njn718d3b12006-12-16 00:54:12 +00002826 a, delta, relative, ai->Addr.Block.block_desc,
2827 block_szB,
2828 ai->Addr.Block.block_kind==Block_Mallocd ? "alloc'd"
2829 : ai->Addr.Block.block_kind==Block_Freed ? "free'd"
2830 : "client-defined",
njn1d0825f2006-03-27 11:37:07 +00002831 xpost);
njn718d3b12006-12-16 00:54:12 +00002832 VG_(pp_ExeContext)(ai->Addr.Block.lastchange);
njn1d0825f2006-03-27 11:37:07 +00002833 break;
2834 }
njn718d3b12006-12-16 00:54:12 +00002835
njn1d0825f2006-03-27 11:37:07 +00002836 default:
2837 VG_(tool_panic)("mc_pp_AddrInfo");
2838 }
2839}
2840
njn718d3b12006-12-16 00:54:12 +00002841static const HChar* str_leak_lossmode ( Reachedness lossmode )
njn9e63cb62005-05-08 18:34:59 +00002842{
njn718d3b12006-12-16 00:54:12 +00002843 const HChar *loss = "?";
2844 switch (lossmode) {
2845 case Unreached: loss = "definitely lost"; break;
2846 case IndirectLeak: loss = "indirectly lost"; break;
2847 case Interior: loss = "possibly lost"; break;
2848 case Proper: loss = "still reachable"; break;
2849 }
2850 return loss;
2851}
njn9e63cb62005-05-08 18:34:59 +00002852
njn718d3b12006-12-16 00:54:12 +00002853static const HChar* xml_leak_kind ( Reachedness lossmode )
2854{
2855 const HChar *loss = "?";
2856 switch (lossmode) {
2857 case Unreached: loss = "Leak_DefinitelyLost"; break;
2858 case IndirectLeak: loss = "Leak_IndirectlyLost"; break;
2859 case Interior: loss = "Leak_PossiblyLost"; break;
2860 case Proper: loss = "Leak_StillReachable"; break;
2861 }
2862 return loss;
2863}
2864
2865static void mc_pp_msg( Char* xml_name, Error* err, const HChar* format, ... )
2866{
sewardj71bc3cb2005-05-19 00:25:45 +00002867 HChar* xpre = VG_(clo_xml) ? " <what>" : "";
2868 HChar* xpost = VG_(clo_xml) ? "</what>" : "";
njn718d3b12006-12-16 00:54:12 +00002869 Char buf[256];
2870 va_list vargs;
2871
2872 if (VG_(clo_xml))
2873 VG_(message)(Vg_UserMsg, " <kind>%s</kind>", xml_name);
2874 // Stick xpre and xpost on the front and back of the format string.
2875 VG_(snprintf)(buf, 256, "%s%s%s", xpre, format, xpost);
2876 va_start(vargs, format);
2877 VG_(vmessage) ( Vg_UserMsg, buf, vargs );
2878 va_end(vargs);
2879 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
2880}
2881
2882static void mc_pp_Error ( Error* err )
2883{
2884 MC_Error* extra = VG_(get_error_extra)(err);
sewardj71bc3cb2005-05-19 00:25:45 +00002885
njn9e63cb62005-05-08 18:34:59 +00002886 switch (VG_(get_error_kind)(err)) {
njn718d3b12006-12-16 00:54:12 +00002887 case Err_CoreMem: {
2888 /* What the hell *is* a CoreMemError? jrs 2005-May-18 */
2889 /* As of 2006-Dec-14, it's caused by unaddressable bytes in a
2890 signal handler frame. --njn */
2891 mc_pp_msg("CoreMemError", err,
2892 "%s contains unaddressable byte(s)",
2893 VG_(get_error_string)(err));
njn9e63cb62005-05-08 18:34:59 +00002894 break;
njn9e63cb62005-05-08 18:34:59 +00002895 }
2896
njn718d3b12006-12-16 00:54:12 +00002897 case Err_Value:
2898 mc_pp_msg("UninitValue", err,
2899 "Use of uninitialised value of size %d",
2900 extra->Err.Value.szB);
2901 break;
2902
2903 case Err_Cond:
2904 mc_pp_msg("UninitCondition", err,
2905 "Conditional jump or move depends"
2906 " on uninitialised value(s)");
2907 break;
2908
2909 case Err_RegParam:
2910 mc_pp_msg("SyscallParam", err,
2911 "Syscall param %s contains uninitialised byte(s)",
2912 VG_(get_error_string)(err));
2913 break;
2914
2915 case Err_MemParam:
2916 mc_pp_msg("SyscallParam", err,
2917 "Syscall param %s points to %s byte(s)",
2918 VG_(get_error_string)(err),
2919 ( extra->Err.MemParam.isAddrErr
2920 ? "unaddressable" : "uninitialised" ));
2921 mc_pp_AddrInfo(VG_(get_error_address)(err),
2922 &extra->Err.MemParam.ai, False);
2923 break;
2924
2925 case Err_User:
2926 mc_pp_msg("ClientCheck", err,
2927 "%s byte(s) found during client check request",
2928 ( extra->Err.User.isAddrErr
2929 ? "Unaddressable" : "Uninitialised" ));
2930 mc_pp_AddrInfo(VG_(get_error_address)(err), &extra->Err.User.ai,
2931 False);
2932 break;
2933
2934 case Err_Free:
2935 mc_pp_msg("InvalidFree", err,
2936 "Invalid free() / delete / delete[]");
2937 mc_pp_AddrInfo(VG_(get_error_address)(err),
2938 &extra->Err.Free.ai, False);
2939 break;
2940
2941 case Err_FreeMismatch:
2942 mc_pp_msg("MismatchedFree", err,
2943 "Mismatched free() / delete / delete []");
2944 mc_pp_AddrInfo(VG_(get_error_address)(err),
2945 &extra->Err.FreeMismatch.ai, False);
2946 break;
2947
2948 case Err_Addr:
2949 if (extra->Err.Addr.isWrite) {
2950 mc_pp_msg("InvalidWrite", err,
2951 "Invalid write of size %d",
2952 extra->Err.Addr.szB);
njn9e63cb62005-05-08 18:34:59 +00002953 } else {
njn718d3b12006-12-16 00:54:12 +00002954 mc_pp_msg("InvalidRead", err,
2955 "Invalid read of size %d",
2956 extra->Err.Addr.szB);
njn9e63cb62005-05-08 18:34:59 +00002957 }
njn718d3b12006-12-16 00:54:12 +00002958 mc_pp_AddrInfo(VG_(get_error_address)(err), &extra->Err.Addr.ai,
2959 extra->Err.Addr.maybe_gcc);
njn9e63cb62005-05-08 18:34:59 +00002960 break;
2961
njn718d3b12006-12-16 00:54:12 +00002962 case Err_Jump:
2963 mc_pp_msg("InvalidJump", err,
2964 "Jump to the invalid address stated on the next line");
2965 mc_pp_AddrInfo(VG_(get_error_address)(err), &extra->Err.Jump.ai,
2966 False);
njn9e63cb62005-05-08 18:34:59 +00002967 break;
njn1d0825f2006-03-27 11:37:07 +00002968
njn718d3b12006-12-16 00:54:12 +00002969 case Err_Overlap:
2970 if (extra->Err.Overlap.szB == 0)
2971 mc_pp_msg("Overlap", err,
2972 "Source and destination overlap in %s(%p, %p)",
2973 VG_(get_error_string)(err),
2974 extra->Err.Overlap.dst, extra->Err.Overlap.src);
njn1d0825f2006-03-27 11:37:07 +00002975 else
njn718d3b12006-12-16 00:54:12 +00002976 mc_pp_msg("Overlap", err,
2977 "Source and destination overlap in %s(%p, %p, %d)",
2978 VG_(get_error_string)(err),
2979 extra->Err.Overlap.dst, extra->Err.Overlap.src,
2980 extra->Err.Overlap.szB);
njn1d0825f2006-03-27 11:37:07 +00002981 break;
njn1d0825f2006-03-27 11:37:07 +00002982
njn718d3b12006-12-16 00:54:12 +00002983 case Err_IllegalMempool:
2984 mc_pp_msg("InvalidMemPool", err,
2985 "Illegal memory pool address");
2986 mc_pp_AddrInfo(VG_(get_error_address)(err),
2987 &extra->Err.IllegalMempool.ai, False);
njn1d0825f2006-03-27 11:37:07 +00002988 break;
2989
njn718d3b12006-12-16 00:54:12 +00002990 case Err_Leak: {
2991 HChar* xpre = VG_(clo_xml) ? " <what>" : "";
2992 HChar* xpost = VG_(clo_xml) ? "</what>" : "";
2993 UInt n_this_record = extra->Err.Leak.n_this_record;
2994 UInt n_total_records = extra->Err.Leak.n_total_records;
2995 LossRecord* l = extra->Err.Leak.lossRecord;
2996
2997 if (VG_(clo_xml)) {
2998 VG_(message)(Vg_UserMsg, " <kind>%t</kind>",
2999 xml_leak_kind(l->loss_mode));
3000 } else {
3001 VG_(message)(Vg_UserMsg, "");
3002 }
3003
3004 if (l->indirect_bytes) {
3005 VG_(message)(Vg_UserMsg,
3006 "%s%,lu (%,lu direct, %,lu indirect) bytes in %,u blocks"
3007 " are %s in loss record %,u of %,u%s",
3008 xpre,
3009 l->total_bytes + l->indirect_bytes,
3010 l->total_bytes, l->indirect_bytes, l->num_blocks,
3011 str_leak_lossmode(l->loss_mode), n_this_record, n_total_records,
3012 xpost
3013 );
3014 if (VG_(clo_xml)) {
3015 // Nb: don't put commas in these XML numbers
3016 VG_(message)(Vg_UserMsg, " <leakedbytes>%lu</leakedbytes>",
3017 l->total_bytes + l->indirect_bytes);
3018 VG_(message)(Vg_UserMsg, " <leakedblocks>%u</leakedblocks>",
3019 l->num_blocks);
3020 }
3021 } else {
3022 VG_(message)(
3023 Vg_UserMsg,
3024 "%s%,lu bytes in %,u blocks are %s in loss record %,u of %,u%s",
3025 xpre,
3026 l->total_bytes, l->num_blocks,
3027 str_leak_lossmode(l->loss_mode), n_this_record, n_total_records,
3028 xpost
3029 );
3030 if (VG_(clo_xml)) {
3031 VG_(message)(Vg_UserMsg, " <leakedbytes>%d</leakedbytes>",
3032 l->total_bytes);
3033 VG_(message)(Vg_UserMsg, " <leakedblocks>%d</leakedblocks>",
3034 l->num_blocks);
3035 }
3036 }
3037 VG_(pp_ExeContext)(l->allocated_at);
3038 break;
3039 }
3040
njn1d0825f2006-03-27 11:37:07 +00003041 default:
3042 VG_(printf)("Error:\n unknown Memcheck error code %d\n",
3043 VG_(get_error_kind)(err));
3044 VG_(tool_panic)("unknown error code in mc_pp_Error)");
njn9e63cb62005-05-08 18:34:59 +00003045 }
3046}
3047
3048/*------------------------------------------------------------*/
3049/*--- Recording errors ---*/
3050/*------------------------------------------------------------*/
3051
njn1d0825f2006-03-27 11:37:07 +00003052/* These many bytes below %ESP are considered addressible if we're
3053 doing the --workaround-gcc296-bugs hack. */
3054#define VG_GCC296_BUG_STACK_SLOP 1024
3055
3056/* Is this address within some small distance below %ESP? Used only
3057 for the --workaround-gcc296-bugs kludge. */
3058static Bool is_just_below_ESP( Addr esp, Addr aa )
3059{
3060 if (esp > aa && (esp - aa) <= VG_GCC296_BUG_STACK_SLOP)
3061 return True;
3062 else
3063 return False;
3064}
3065
njn718d3b12006-12-16 00:54:12 +00003066/* --- Called from generated and non-generated code --- */
njn1d0825f2006-03-27 11:37:07 +00003067
njn718d3b12006-12-16 00:54:12 +00003068static void mc_record_address_error ( ThreadId tid, Addr a, Int szB,
njn1d0825f2006-03-27 11:37:07 +00003069 Bool isWrite )
3070{
njn718d3b12006-12-16 00:54:12 +00003071 MC_Error extra;
sewardj05a46732006-10-17 01:28:10 +00003072 Bool just_below_esp;
3073
3074 if (in_ignored_range(a))
3075 return;
3076
3077# if defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
3078 /* AIX zero-page handling. On AIX, reads from page zero are,
3079 bizarrely enough, legitimate. Writes to page zero aren't,
3080 though. Since memcheck can't distinguish reads from writes, the
3081 best we can do is to 'act normal' and mark the A bits in the
3082 normal way as noaccess, but then hide any reads from that page
3083 that get reported here. */
njn718d3b12006-12-16 00:54:12 +00003084 if ((!isWrite) && a >= 0 && a < 4096 && a+szB <= 4096)
sewardj05a46732006-10-17 01:28:10 +00003085 return;
3086
3087 /* Appalling AIX hack. It suppresses reads done by glink
3088 fragments. Getting rid of this would require figuring out
3089 somehow where the referenced data areas are (and their
3090 sizes). */
njn718d3b12006-12-16 00:54:12 +00003091 if ((!isWrite) && szB == sizeof(Word)) {
sewardj05a46732006-10-17 01:28:10 +00003092 UInt i1, i2;
3093 UInt* pc = (UInt*)VG_(get_IP)(tid);
3094 if (sizeof(Word) == 4) {
3095 i1 = 0x800c0000; /* lwz r0,0(r12) */
3096 i2 = 0x804c0004; /* lwz r2,4(r12) */
3097 } else {
3098 i1 = 0xe80c0000; /* ld r0,0(r12) */
3099 i2 = 0xe84c0008; /* ld r2,8(r12) */
3100 }
3101 if (pc[0] == i1 && pc[1] == i2) return;
3102 if (pc[0] == i2 && pc[-1] == i1) return;
3103 }
3104# endif
njn1d0825f2006-03-27 11:37:07 +00003105
3106 just_below_esp = is_just_below_ESP( VG_(get_SP)(tid), a );
3107
3108 /* If this is caused by an access immediately below %ESP, and the
3109 user asks nicely, we just ignore it. */
3110 if (MC_(clo_workaround_gcc296_bugs) && just_below_esp)
3111 return;
3112
njn718d3b12006-12-16 00:54:12 +00003113 extra.Err.Addr.isWrite = isWrite;
3114 extra.Err.Addr.szB = szB;
3115 extra.Err.Addr.maybe_gcc = just_below_esp;
3116 extra.Err.Addr.ai.tag = Addr_Undescribed;
3117 VG_(maybe_record_error)( tid, Err_Addr, a, /*s*/NULL, &extra );
njn1d0825f2006-03-27 11:37:07 +00003118}
3119
njn718d3b12006-12-16 00:54:12 +00003120static void mc_record_value_error ( ThreadId tid, Int szB )
3121{
3122 MC_Error extra;
3123 tl_assert(MC_(clo_undef_value_errors));
3124 extra.Err.Value.szB = szB;
3125 VG_(maybe_record_error)( tid, Err_Value, /*addr*/0, /*s*/NULL, &extra );
3126}
3127
3128static void mc_record_cond_error ( ThreadId tid )
3129{
3130 tl_assert(MC_(clo_undef_value_errors));
3131 VG_(maybe_record_error)( tid, Err_Cond, /*addr*/0, /*s*/NULL, /*extra*/NULL);
3132}
3133
3134/* --- Called from non-generated code --- */
njn1d0825f2006-03-27 11:37:07 +00003135
3136/* This is for memory errors in pthread functions, as opposed to pthread API
3137 errors which are found by the core. */
njn718d3b12006-12-16 00:54:12 +00003138static void mc_record_core_mem_error ( ThreadId tid, Bool isAddrErr, Char* msg )
njn1d0825f2006-03-27 11:37:07 +00003139{
njn718d3b12006-12-16 00:54:12 +00003140 VG_(maybe_record_error)( tid, Err_CoreMem, /*addr*/0, msg, /*extra*/NULL );
njn1d0825f2006-03-27 11:37:07 +00003141}
3142
njn718d3b12006-12-16 00:54:12 +00003143static void mc_record_regparam_error ( ThreadId tid, Char* msg )
njn1d0825f2006-03-27 11:37:07 +00003144{
njn1d0825f2006-03-27 11:37:07 +00003145 tl_assert(VG_INVALID_THREADID != tid);
njn718d3b12006-12-16 00:54:12 +00003146 VG_(maybe_record_error)( tid, Err_RegParam, /*addr*/0, msg, /*extra*/NULL );
3147}
3148
3149static void mc_record_memparam_error ( ThreadId tid, Addr a,
3150 Bool isAddrErr, Char* msg )
3151{
3152 MC_Error extra;
3153 tl_assert(VG_INVALID_THREADID != tid);
3154 if (!isAddrErr)
3155 tl_assert(MC_(clo_undef_value_errors));
3156 extra.Err.MemParam.isAddrErr = isAddrErr;
3157 extra.Err.MemParam.ai.tag = Addr_Undescribed;
3158 VG_(maybe_record_error)( tid, Err_MemParam, a, msg, &extra );
njn1d0825f2006-03-27 11:37:07 +00003159}
3160
3161static void mc_record_jump_error ( ThreadId tid, Addr a )
3162{
njn718d3b12006-12-16 00:54:12 +00003163 MC_Error extra;
njn1d0825f2006-03-27 11:37:07 +00003164 tl_assert(VG_INVALID_THREADID != tid);
njn718d3b12006-12-16 00:54:12 +00003165 extra.Err.Jump.ai.tag = Addr_Undescribed;
3166 VG_(maybe_record_error)( tid, Err_Jump, a, /*s*/NULL, &extra );
njn1d0825f2006-03-27 11:37:07 +00003167}
3168
3169void MC_(record_free_error) ( ThreadId tid, Addr a )
3170{
njn718d3b12006-12-16 00:54:12 +00003171 MC_Error extra;
njn1d0825f2006-03-27 11:37:07 +00003172 tl_assert(VG_INVALID_THREADID != tid);
njn718d3b12006-12-16 00:54:12 +00003173 extra.Err.Free.ai.tag = Addr_Undescribed;
3174 VG_(maybe_record_error)( tid, Err_Free, a, /*s*/NULL, &extra );
3175}
3176
3177void MC_(record_freemismatch_error) ( ThreadId tid, MC_Chunk* mc )
3178{
3179 MC_Error extra;
3180 AddrInfo* ai = &extra.Err.FreeMismatch.ai;
3181 tl_assert(VG_INVALID_THREADID != tid);
3182 ai->tag = Addr_Block;
3183 ai->Addr.Block.block_kind = Block_Mallocd; // Nb: Not 'Block_Freed'
3184 ai->Addr.Block.block_desc = "block";
3185 ai->Addr.Block.block_szB = mc->szB;
3186 ai->Addr.Block.rwoffset = 0;
3187 ai->Addr.Block.lastchange = mc->where;
3188 VG_(maybe_record_error)( tid, Err_FreeMismatch, mc->data, /*s*/NULL,
3189 &extra );
njn1d0825f2006-03-27 11:37:07 +00003190}
3191
3192void MC_(record_illegal_mempool_error) ( ThreadId tid, Addr a )
3193{
njn718d3b12006-12-16 00:54:12 +00003194 MC_Error extra;
njn1d0825f2006-03-27 11:37:07 +00003195 tl_assert(VG_INVALID_THREADID != tid);
njn718d3b12006-12-16 00:54:12 +00003196 extra.Err.IllegalMempool.ai.tag = Addr_Undescribed;
3197 VG_(maybe_record_error)( tid, Err_IllegalMempool, a, /*s*/NULL, &extra );
njn1d0825f2006-03-27 11:37:07 +00003198}
3199
njn718d3b12006-12-16 00:54:12 +00003200static void mc_record_overlap_error ( ThreadId tid, Char* function,
3201 Addr src, Addr dst, SizeT szB )
njn1d0825f2006-03-27 11:37:07 +00003202{
njn718d3b12006-12-16 00:54:12 +00003203 MC_Error extra;
njn1d0825f2006-03-27 11:37:07 +00003204 tl_assert(VG_INVALID_THREADID != tid);
njn718d3b12006-12-16 00:54:12 +00003205 extra.Err.Overlap.src = src;
3206 extra.Err.Overlap.dst = dst;
3207 extra.Err.Overlap.szB = szB;
njn1d0825f2006-03-27 11:37:07 +00003208 VG_(maybe_record_error)(
njn718d3b12006-12-16 00:54:12 +00003209 tid, Err_Overlap, /*addr*/0, /*s*/function, &extra );
njn1d0825f2006-03-27 11:37:07 +00003210}
3211
njn718d3b12006-12-16 00:54:12 +00003212Bool MC_(record_leak_error) ( ThreadId tid, UInt n_this_record,
3213 UInt n_total_records, LossRecord* lossRecord,
3214 Bool print_record )
njn1d0825f2006-03-27 11:37:07 +00003215{
njn718d3b12006-12-16 00:54:12 +00003216 MC_Error extra;
3217 extra.Err.Leak.n_this_record = n_this_record;
3218 extra.Err.Leak.n_total_records = n_total_records;
3219 extra.Err.Leak.lossRecord = lossRecord;
njn1d0825f2006-03-27 11:37:07 +00003220 return
njn718d3b12006-12-16 00:54:12 +00003221 VG_(unique_error) ( tid, Err_Leak, /*Addr*/0, /*s*/NULL, &extra,
3222 lossRecord->allocated_at, print_record,
njn1d0825f2006-03-27 11:37:07 +00003223 /*allow_GDB_attach*/False, /*count_error*/False );
3224}
3225
njn718d3b12006-12-16 00:54:12 +00003226static void mc_record_user_error ( ThreadId tid, Addr a, Bool isAddrErr )
njn9e63cb62005-05-08 18:34:59 +00003227{
njn718d3b12006-12-16 00:54:12 +00003228 MC_Error extra;
njn9e63cb62005-05-08 18:34:59 +00003229
3230 tl_assert(VG_INVALID_THREADID != tid);
njn718d3b12006-12-16 00:54:12 +00003231 extra.Err.User.isAddrErr = isAddrErr;
3232 extra.Err.User.ai.tag = Addr_Undescribed;
3233 VG_(maybe_record_error)( tid, Err_User, a, /*s*/NULL, &extra );
njn9e63cb62005-05-08 18:34:59 +00003234}
3235
njn718d3b12006-12-16 00:54:12 +00003236/*------------------------------------------------------------*/
3237/*--- Other error operations ---*/
3238/*------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00003239
3240/* Compare error contexts, to detect duplicates. Note that if they
3241 are otherwise the same, the faulting addrs and associated rwoffsets
3242 are allowed to be different. */
3243static Bool mc_eq_Error ( VgRes res, Error* e1, Error* e2 )
3244{
njn718d3b12006-12-16 00:54:12 +00003245 MC_Error* extra1 = VG_(get_error_extra)(e1);
3246 MC_Error* extra2 = VG_(get_error_extra)(e2);
njn1d0825f2006-03-27 11:37:07 +00003247
3248 /* Guaranteed by calling function */
3249 tl_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
3250
3251 switch (VG_(get_error_kind)(e1)) {
njn718d3b12006-12-16 00:54:12 +00003252 case Err_CoreMem: {
njn1d0825f2006-03-27 11:37:07 +00003253 Char *e1s, *e2s;
njn1d0825f2006-03-27 11:37:07 +00003254 e1s = VG_(get_error_string)(e1);
3255 e2s = VG_(get_error_string)(e2);
njn718d3b12006-12-16 00:54:12 +00003256 if (e1s == e2s) return True;
3257 if (VG_STREQ(e1s, e2s)) return True;
njn1d0825f2006-03-27 11:37:07 +00003258 return False;
3259 }
3260
njn718d3b12006-12-16 00:54:12 +00003261 case Err_RegParam:
3262 return VG_STREQ(VG_(get_error_string)(e1), VG_(get_error_string)(e2));
3263
njn1d0825f2006-03-27 11:37:07 +00003264 // Perhaps we should also check the addrinfo.akinds for equality.
3265 // That would result in more error reports, but only in cases where
3266 // a register contains uninitialised bytes and points to memory
3267 // containing uninitialised bytes. Currently, the 2nd of those to be
3268 // detected won't be reported. That is (nearly?) always the memory
3269 // error, which is good.
njn718d3b12006-12-16 00:54:12 +00003270 case Err_MemParam:
3271 if (!VG_STREQ(VG_(get_error_string)(e1),
3272 VG_(get_error_string)(e2))) return False;
njn1d0825f2006-03-27 11:37:07 +00003273 // fall through
njn718d3b12006-12-16 00:54:12 +00003274 case Err_User:
3275 return ( extra1->Err.User.isAddrErr == extra2->Err.User.isAddrErr
3276 ? True : False );
3277
3278 case Err_Free:
3279 case Err_FreeMismatch:
3280 case Err_Jump:
3281 case Err_IllegalMempool:
3282 case Err_Overlap:
3283 case Err_Cond:
njn1d0825f2006-03-27 11:37:07 +00003284 return True;
3285
njn718d3b12006-12-16 00:54:12 +00003286 case Err_Addr:
3287 return ( extra1->Err.Addr.szB == extra2->Err.Addr.szB
3288 ? True : False );
njn1d0825f2006-03-27 11:37:07 +00003289
njn718d3b12006-12-16 00:54:12 +00003290 case Err_Value:
3291 return ( extra1->Err.Value.szB == extra2->Err.Value.szB
3292 ? True : False );
njn1d0825f2006-03-27 11:37:07 +00003293
njn718d3b12006-12-16 00:54:12 +00003294 case Err_Leak:
3295 VG_(tool_panic)("Shouldn't get Err_Leak in mc_eq_Error,\n"
njn1d0825f2006-03-27 11:37:07 +00003296 "since it's handled with VG_(unique_error)()!");
3297
njn1d0825f2006-03-27 11:37:07 +00003298 default:
3299 VG_(printf)("Error:\n unknown error code %d\n",
3300 VG_(get_error_kind)(e1));
3301 VG_(tool_panic)("unknown error code in mc_eq_Error");
3302 }
3303}
3304
3305/* Function used when searching MC_Chunk lists */
3306static Bool addr_is_in_MC_Chunk(MC_Chunk* mc, Addr a)
3307{
3308 // Nb: this is not quite right! It assumes that the heap block has
3309 // a redzone of size MC_MALLOC_REDZONE_SZB. That's true for malloc'd
3310 // blocks, but not necessarily true for custom-alloc'd blocks. So
3311 // in some cases this could result in an incorrect description (eg.
3312 // saying "12 bytes after block A" when really it's within block B.
3313 // Fixing would require adding redzone size to MC_Chunks, though.
njn718d3b12006-12-16 00:54:12 +00003314 return VG_(addr_is_in_block)( a, mc->data, mc->szB,
njn1d0825f2006-03-27 11:37:07 +00003315 MC_MALLOC_REDZONE_SZB );
3316}
3317
3318// Forward declaration
3319static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai );
3320
njn718d3b12006-12-16 00:54:12 +00003321
njn1d0825f2006-03-27 11:37:07 +00003322/* Describe an address as best you can, for error messages,
3323 putting the result in ai. */
3324static void describe_addr ( Addr a, AddrInfo* ai )
3325{
3326 MC_Chunk* mc;
njn718d3b12006-12-16 00:54:12 +00003327 ThreadId tid;
3328 Addr stack_min, stack_max;
3329
3330 tl_assert(Addr_Undescribed == ai->tag);
njn1d0825f2006-03-27 11:37:07 +00003331
3332 /* Perhaps it's a user-def'd block? */
3333 if (client_perm_maybe_describe( a, ai ))
3334 return;
3335
3336 /* Perhaps it's on a thread's stack? */
3337 VG_(thread_stack_reset_iter)();
3338 while ( VG_(thread_stack_next)(&tid, &stack_min, &stack_max) ) {
3339 if (stack_min <= a && a <= stack_max) {
njn718d3b12006-12-16 00:54:12 +00003340 ai->tag = Addr_Stack;
3341 ai->Addr.Stack.tid = tid;
njn1d0825f2006-03-27 11:37:07 +00003342 return;
3343 }
3344 }
3345 /* Search for a recently freed block which might bracket it. */
3346 mc = MC_(get_freed_list_head)();
3347 while (mc) {
3348 if (addr_is_in_MC_Chunk(mc, a)) {
njn718d3b12006-12-16 00:54:12 +00003349 ai->tag = Addr_Block;
3350 ai->Addr.Block.block_kind = Block_Freed;
3351 ai->Addr.Block.block_desc = "block";
3352 ai->Addr.Block.block_szB = mc->szB;
3353 ai->Addr.Block.rwoffset = (Int)a - (Int)mc->data;
3354 ai->Addr.Block.lastchange = mc->where;
njn1d0825f2006-03-27 11:37:07 +00003355 return;
3356 }
3357 mc = mc->next;
3358 }
3359 /* Search for a currently malloc'd block which might bracket it. */
3360 VG_(HT_ResetIter)(MC_(malloc_list));
3361 while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
3362 if (addr_is_in_MC_Chunk(mc, a)) {
njn718d3b12006-12-16 00:54:12 +00003363 ai->tag = Addr_Block;
3364 ai->Addr.Block.block_kind = Block_Mallocd;
3365 ai->Addr.Block.block_desc = "block";
3366 ai->Addr.Block.block_szB = mc->szB;
3367 ai->Addr.Block.rwoffset = (Int)a - (Int)mc->data;
3368 ai->Addr.Block.lastchange = mc->where;
njn1d0825f2006-03-27 11:37:07 +00003369 return;
3370 }
3371 }
3372 /* Clueless ... */
njn718d3b12006-12-16 00:54:12 +00003373 ai->tag = Addr_Unknown;
njn1d0825f2006-03-27 11:37:07 +00003374 return;
3375}
3376
3377/* Updates the copy with address info if necessary (but not for all errors). */
3378static UInt mc_update_extra( Error* err )
3379{
njn718d3b12006-12-16 00:54:12 +00003380 MC_Error* extra = VG_(get_error_extra)(err);
3381
njn1d0825f2006-03-27 11:37:07 +00003382 switch (VG_(get_error_kind)(err)) {
njn718d3b12006-12-16 00:54:12 +00003383 // These ones don't have addresses associated with them, and so don't
njn1d0825f2006-03-27 11:37:07 +00003384 // need any updating.
njn718d3b12006-12-16 00:54:12 +00003385 case Err_CoreMem:
3386 case Err_Value:
3387 case Err_Cond:
3388 case Err_Overlap:
3389 case Err_RegParam:
3390 // For Err_Leaks the returned size does not matter -- they are always
3391 // shown with VG_(unique_error)() so they 'extra' not copied. But we make it
3392 // consistent with the others.
3393 case Err_Leak:
njn1d0825f2006-03-27 11:37:07 +00003394 return sizeof(MC_Error);
njn1d0825f2006-03-27 11:37:07 +00003395
njn718d3b12006-12-16 00:54:12 +00003396 // These ones always involve a memory address.
3397 case Err_Addr:
3398 describe_addr ( VG_(get_error_address)(err), &extra->Err.Addr.ai );
njn1d0825f2006-03-27 11:37:07 +00003399 return sizeof(MC_Error);
njn718d3b12006-12-16 00:54:12 +00003400 case Err_MemParam:
3401 describe_addr ( VG_(get_error_address)(err), &extra->Err.MemParam.ai );
njn1d0825f2006-03-27 11:37:07 +00003402 return sizeof(MC_Error);
njn718d3b12006-12-16 00:54:12 +00003403 case Err_Jump:
3404 describe_addr ( VG_(get_error_address)(err), &extra->Err.Jump.ai );
3405 return sizeof(MC_Error);
3406 case Err_User:
3407 describe_addr ( VG_(get_error_address)(err), &extra->Err.User.ai );
3408 return sizeof(MC_Error);
3409 case Err_Free:
3410 describe_addr ( VG_(get_error_address)(err), &extra->Err.Free.ai );
3411 return sizeof(MC_Error);
3412 case Err_IllegalMempool:
3413 describe_addr ( VG_(get_error_address)(err),
3414 &extra->Err.IllegalMempool.ai );
3415 return sizeof(MC_Error);
njn1d0825f2006-03-27 11:37:07 +00003416
njn718d3b12006-12-16 00:54:12 +00003417 // Err_FreeMismatches have already had their address described; this is
njn1d0825f2006-03-27 11:37:07 +00003418 // possible because we have the MC_Chunk on hand when the error is
3419 // detected. However, the address may be part of a user block, and if so
3420 // we override the pre-determined description with a user block one.
njn718d3b12006-12-16 00:54:12 +00003421 case Err_FreeMismatch: {
3422 tl_assert(extra && Block_Mallocd ==
3423 extra->Err.FreeMismatch.ai.Addr.Block.block_kind);
njn1d0825f2006-03-27 11:37:07 +00003424 (void)client_perm_maybe_describe( VG_(get_error_address)(err),
njn718d3b12006-12-16 00:54:12 +00003425 &extra->Err.FreeMismatch.ai );
njn1d0825f2006-03-27 11:37:07 +00003426 return sizeof(MC_Error);
3427 }
3428
njn1d0825f2006-03-27 11:37:07 +00003429 default: VG_(tool_panic)("mc_update_extra: bad errkind");
3430 }
3431}
3432
njn9e63cb62005-05-08 18:34:59 +00003433/*------------------------------------------------------------*/
3434/*--- Suppressions ---*/
3435/*------------------------------------------------------------*/
3436
njn718d3b12006-12-16 00:54:12 +00003437typedef
3438 enum {
3439 ParamSupp, // Bad syscall params
3440 UserSupp, // Errors arising from client-request checks
3441 CoreMemSupp, // Memory errors in core (pthread ops, signal handling)
3442
3443 // Undefined value errors of given size
3444 Value1Supp, Value2Supp, Value4Supp, Value8Supp, Value16Supp,
3445
3446 // Undefined value error in conditional.
3447 CondSupp,
3448
3449 // Unaddressable read/write attempt at given size
3450 Addr1Supp, Addr2Supp, Addr4Supp, Addr8Supp, Addr16Supp,
3451
3452 JumpSupp, // Jump to unaddressable target
3453 FreeSupp, // Invalid or mismatching free
3454 OverlapSupp, // Overlapping blocks in memcpy(), strcpy(), etc
3455 LeakSupp, // Something to be suppressed in a leak check.
3456 MempoolSupp, // Memory pool suppression.
3457 }
3458 MC_SuppKind;
3459
njn51d827b2005-05-09 01:02:08 +00003460static Bool mc_recognised_suppression ( Char* name, Supp* su )
njn9e63cb62005-05-08 18:34:59 +00003461{
3462 SuppKind skind;
3463
njn1d0825f2006-03-27 11:37:07 +00003464 if (VG_STREQ(name, "Param")) skind = ParamSupp;
sewardj6362bb52006-11-28 00:15:35 +00003465 else if (VG_STREQ(name, "User")) skind = UserSupp;
njn1d0825f2006-03-27 11:37:07 +00003466 else if (VG_STREQ(name, "CoreMem")) skind = CoreMemSupp;
3467 else if (VG_STREQ(name, "Addr1")) skind = Addr1Supp;
3468 else if (VG_STREQ(name, "Addr2")) skind = Addr2Supp;
3469 else if (VG_STREQ(name, "Addr4")) skind = Addr4Supp;
3470 else if (VG_STREQ(name, "Addr8")) skind = Addr8Supp;
3471 else if (VG_STREQ(name, "Addr16")) skind = Addr16Supp;
njn718d3b12006-12-16 00:54:12 +00003472 else if (VG_STREQ(name, "Jump")) skind = JumpSupp;
njn1d0825f2006-03-27 11:37:07 +00003473 else if (VG_STREQ(name, "Free")) skind = FreeSupp;
3474 else if (VG_STREQ(name, "Leak")) skind = LeakSupp;
3475 else if (VG_STREQ(name, "Overlap")) skind = OverlapSupp;
3476 else if (VG_STREQ(name, "Mempool")) skind = MempoolSupp;
njn718d3b12006-12-16 00:54:12 +00003477 else if (VG_STREQ(name, "Cond")) skind = CondSupp;
3478 else if (VG_STREQ(name, "Value0")) skind = CondSupp; /* backwards compat */
njn9e63cb62005-05-08 18:34:59 +00003479 else if (VG_STREQ(name, "Value1")) skind = Value1Supp;
3480 else if (VG_STREQ(name, "Value2")) skind = Value2Supp;
3481 else if (VG_STREQ(name, "Value4")) skind = Value4Supp;
3482 else if (VG_STREQ(name, "Value8")) skind = Value8Supp;
3483 else if (VG_STREQ(name, "Value16")) skind = Value16Supp;
3484 else
3485 return False;
3486
3487 VG_(set_supp_kind)(su, skind);
3488 return True;
3489}
3490
njn1d0825f2006-03-27 11:37:07 +00003491static
3492Bool mc_read_extra_suppression_info ( Int fd, Char* buf, Int nBuf, Supp *su )
3493{
3494 Bool eof;
3495
3496 if (VG_(get_supp_kind)(su) == ParamSupp) {
3497 eof = VG_(get_line) ( fd, buf, nBuf );
3498 if (eof) return False;
3499 VG_(set_supp_string)(su, VG_(strdup)(buf));
3500 }
3501 return True;
3502}
3503
3504static Bool mc_error_matches_suppression(Error* err, Supp* su)
3505{
njn718d3b12006-12-16 00:54:12 +00003506 Int su_szB;
3507 MC_Error* extra = VG_(get_error_extra)(err);
3508 ErrorKind ekind = VG_(get_error_kind )(err);
njn1d0825f2006-03-27 11:37:07 +00003509
3510 switch (VG_(get_supp_kind)(su)) {
3511 case ParamSupp:
njn718d3b12006-12-16 00:54:12 +00003512 return ((ekind == Err_RegParam || ekind == Err_MemParam)
njn1d0825f2006-03-27 11:37:07 +00003513 && VG_STREQ(VG_(get_error_string)(err),
3514 VG_(get_supp_string)(su)));
3515
sewardj6362bb52006-11-28 00:15:35 +00003516 case UserSupp:
njn718d3b12006-12-16 00:54:12 +00003517 return (ekind == Err_User);
sewardj6362bb52006-11-28 00:15:35 +00003518
njn1d0825f2006-03-27 11:37:07 +00003519 case CoreMemSupp:
njn718d3b12006-12-16 00:54:12 +00003520 return (ekind == Err_CoreMem
njn1d0825f2006-03-27 11:37:07 +00003521 && VG_STREQ(VG_(get_error_string)(err),
3522 VG_(get_supp_string)(su)));
3523
njn718d3b12006-12-16 00:54:12 +00003524 case Value1Supp: su_szB = 1; goto value_case;
3525 case Value2Supp: su_szB = 2; goto value_case;
3526 case Value4Supp: su_szB = 4; goto value_case;
3527 case Value8Supp: su_szB = 8; goto value_case;
3528 case Value16Supp:su_szB =16; goto value_case;
njn1d0825f2006-03-27 11:37:07 +00003529 value_case:
njn718d3b12006-12-16 00:54:12 +00003530 return (ekind == Err_Value && extra->Err.Value.szB == su_szB);
njn1d0825f2006-03-27 11:37:07 +00003531
njn718d3b12006-12-16 00:54:12 +00003532 case CondSupp:
3533 return (ekind == Err_Cond);
3534
3535 case Addr1Supp: su_szB = 1; goto addr_case;
3536 case Addr2Supp: su_szB = 2; goto addr_case;
3537 case Addr4Supp: su_szB = 4; goto addr_case;
3538 case Addr8Supp: su_szB = 8; goto addr_case;
3539 case Addr16Supp:su_szB =16; goto addr_case;
njn1d0825f2006-03-27 11:37:07 +00003540 addr_case:
njn718d3b12006-12-16 00:54:12 +00003541 return (ekind == Err_Addr && extra->Err.Addr.szB == su_szB);
3542
3543 case JumpSupp:
3544 return (ekind == Err_Jump);
njn1d0825f2006-03-27 11:37:07 +00003545
3546 case FreeSupp:
njn718d3b12006-12-16 00:54:12 +00003547 return (ekind == Err_Free || ekind == Err_FreeMismatch);
njn1d0825f2006-03-27 11:37:07 +00003548
3549 case OverlapSupp:
njn718d3b12006-12-16 00:54:12 +00003550 return (ekind == Err_Overlap);
njn1d0825f2006-03-27 11:37:07 +00003551
3552 case LeakSupp:
njn718d3b12006-12-16 00:54:12 +00003553 return (ekind == Err_Leak);
njn1d0825f2006-03-27 11:37:07 +00003554
3555 case MempoolSupp:
njn718d3b12006-12-16 00:54:12 +00003556 return (ekind == Err_IllegalMempool);
njn1d0825f2006-03-27 11:37:07 +00003557
3558 default:
3559 VG_(printf)("Error:\n"
3560 " unknown suppression type %d\n",
3561 VG_(get_supp_kind)(su));
3562 VG_(tool_panic)("unknown suppression type in "
3563 "MC_(error_matches_suppression)");
3564 }
3565}
3566
3567static Char* mc_get_error_name ( Error* err )
3568{
njn1d0825f2006-03-27 11:37:07 +00003569 switch (VG_(get_error_kind)(err)) {
njn718d3b12006-12-16 00:54:12 +00003570 case Err_RegParam: return "Param";
3571 case Err_MemParam: return "Param";
3572 case Err_User: return "User";
3573 case Err_FreeMismatch: return "Free";
3574 case Err_IllegalMempool: return "Mempool";
3575 case Err_Free: return "Free";
3576 case Err_Jump: return "Jump";
3577 case Err_CoreMem: return "CoreMem";
3578 case Err_Overlap: return "Overlap";
3579 case Err_Leak: return "Leak";
3580 case Err_Cond: return "Cond";
3581 case Err_Addr: {
3582 MC_Error* extra = VG_(get_error_extra)(err);
3583 switch ( extra->Err.Addr.szB ) {
njn1d0825f2006-03-27 11:37:07 +00003584 case 1: return "Addr1";
3585 case 2: return "Addr2";
3586 case 4: return "Addr4";
3587 case 8: return "Addr8";
3588 case 16: return "Addr16";
3589 default: VG_(tool_panic)("unexpected size for Addr");
3590 }
njn718d3b12006-12-16 00:54:12 +00003591 }
3592 case Err_Value: {
3593 MC_Error* extra = VG_(get_error_extra)(err);
3594 switch ( extra->Err.Value.szB ) {
njn1d0825f2006-03-27 11:37:07 +00003595 case 1: return "Value1";
3596 case 2: return "Value2";
3597 case 4: return "Value4";
3598 case 8: return "Value8";
3599 case 16: return "Value16";
3600 default: VG_(tool_panic)("unexpected size for Value");
3601 }
njn718d3b12006-12-16 00:54:12 +00003602 }
njn1d0825f2006-03-27 11:37:07 +00003603 default: VG_(tool_panic)("get_error_name: unexpected type");
3604 }
njn1d0825f2006-03-27 11:37:07 +00003605}
3606
3607static void mc_print_extra_suppression_info ( Error* err )
3608{
njn718d3b12006-12-16 00:54:12 +00003609 ErrorKind ekind = VG_(get_error_kind )(err);
3610 if (Err_RegParam == ekind || Err_MemParam == ekind) {
njn1d0825f2006-03-27 11:37:07 +00003611 VG_(printf)(" %s\n", VG_(get_error_string)(err));
3612 }
3613}
3614
njn9e63cb62005-05-08 18:34:59 +00003615/*------------------------------------------------------------*/
sewardjc859fbf2005-04-22 21:10:28 +00003616/*--- Functions called directly from generated code: ---*/
3617/*--- Load/store handlers. ---*/
sewardj6cf40ff2005-04-20 22:31:26 +00003618/*------------------------------------------------------------*/
3619
njn1d0825f2006-03-27 11:37:07 +00003620/* Types: LOADV32, LOADV16, LOADV8 are:
sewardj6cf40ff2005-04-20 22:31:26 +00003621 UWord fn ( Addr a )
3622 so they return 32-bits on 32-bit machines and 64-bits on
3623 64-bit machines. Addr has the same size as a host word.
3624
njn1d0825f2006-03-27 11:37:07 +00003625 LOADV64 is always ULong fn ( Addr a )
sewardj6cf40ff2005-04-20 22:31:26 +00003626
njn1d0825f2006-03-27 11:37:07 +00003627 Similarly for STOREV8, STOREV16, STOREV32, the supplied vbits
3628 are a UWord, and for STOREV64 they are a ULong.
sewardj6cf40ff2005-04-20 22:31:26 +00003629*/
3630
njn1d0825f2006-03-27 11:37:07 +00003631/* If any part of '_a' indicated by the mask is 1, either
njn45e81252006-03-28 12:35:08 +00003632 '_a' is not naturally '_sz/8'-aligned, or it exceeds the range
njn1d0825f2006-03-27 11:37:07 +00003633 covered by the primary map. */
njn45e81252006-03-28 12:35:08 +00003634#define UNALIGNED_OR_HIGH(_a,_sz) ((_a) & MASK((_sz>>3)))
njn1d0825f2006-03-27 11:37:07 +00003635#define MASK(_sz) ( ~((0x10000-(_sz)) | ((N_PRIMARY_MAP-1) << 16)) )
3636
3637
sewardj95448072004-11-22 20:19:51 +00003638/* ------------------------ Size = 8 ------------------------ */
3639
njn1d0825f2006-03-27 11:37:07 +00003640static INLINE
3641ULong mc_LOADV64 ( Addr a, Bool isBigEndian )
3642{
3643 UWord sm_off16, vabits16;
3644 SecMap* sm;
3645
3646 PROF_EVENT(200, "mc_LOADV64");
3647
3648#ifndef PERF_FAST_LOADV
njn45e81252006-03-28 12:35:08 +00003649 return mc_LOADVn_slow( a, 64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003650#else
njn45e81252006-03-28 12:35:08 +00003651 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,64) )) {
njn1d0825f2006-03-27 11:37:07 +00003652 PROF_EVENT(201, "mc_LOADV64-slow1");
njn45e81252006-03-28 12:35:08 +00003653 return (ULong)mc_LOADVn_slow( a, 64, isBigEndian );
sewardjf9d81612005-04-23 23:25:49 +00003654 }
3655
njna7c7ebd2006-03-28 12:51:02 +00003656 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003657 sm_off16 = SM_OFF_16(a);
3658 vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
3659
3660 // Handle common case quickly: a is suitably aligned, is mapped, and
3661 // addressible.
3662 // Convert V bits from compact memory form to expanded register form.
njndbf7ca72006-03-31 11:57:59 +00003663 if (EXPECTED_TAKEN(vabits16 == VA_BITS16_DEFINED)) {
njn1d0825f2006-03-27 11:37:07 +00003664 return V_BITS64_DEFINED;
njndbf7ca72006-03-31 11:57:59 +00003665 } else if (EXPECTED_TAKEN(vabits16 == VA_BITS16_UNDEFINED)) {
njn1d0825f2006-03-27 11:37:07 +00003666 return V_BITS64_UNDEFINED;
3667 } else {
njndbf7ca72006-03-31 11:57:59 +00003668 /* Slow case: the 8 bytes are not all-defined or all-undefined. */
njn1d0825f2006-03-27 11:37:07 +00003669 PROF_EVENT(202, "mc_LOADV64-slow2");
njn45e81252006-03-28 12:35:08 +00003670 return mc_LOADVn_slow( a, 64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003671 }
3672#endif
3673}
3674
3675VG_REGPARM(1) ULong MC_(helperc_LOADV64be) ( Addr a )
3676{
3677 return mc_LOADV64(a, True);
3678}
3679VG_REGPARM(1) ULong MC_(helperc_LOADV64le) ( Addr a )
3680{
3681 return mc_LOADV64(a, False);
3682}
sewardjf9d81612005-04-23 23:25:49 +00003683
sewardjf9d81612005-04-23 23:25:49 +00003684
njn1d0825f2006-03-27 11:37:07 +00003685static INLINE
njn4cf530b2006-04-06 13:33:48 +00003686void mc_STOREV64 ( Addr a, ULong vbits64, Bool isBigEndian )
njn1d0825f2006-03-27 11:37:07 +00003687{
3688 UWord sm_off16, vabits16;
3689 SecMap* sm;
3690
3691 PROF_EVENT(210, "mc_STOREV64");
3692
3693#ifndef PERF_FAST_STOREV
3694 // XXX: this slow case seems to be marginally faster than the fast case!
3695 // Investigate further.
njn4cf530b2006-04-06 13:33:48 +00003696 mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003697#else
njn45e81252006-03-28 12:35:08 +00003698 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,64) )) {
njn1d0825f2006-03-27 11:37:07 +00003699 PROF_EVENT(211, "mc_STOREV64-slow1");
njn4cf530b2006-04-06 13:33:48 +00003700 mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003701 return;
sewardjf9d81612005-04-23 23:25:49 +00003702 }
3703
njna7c7ebd2006-03-28 12:51:02 +00003704 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003705 sm_off16 = SM_OFF_16(a);
3706 vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
3707
3708 if (EXPECTED_TAKEN( !is_distinguished_sm(sm) &&
njndbf7ca72006-03-31 11:57:59 +00003709 (VA_BITS16_DEFINED == vabits16 ||
3710 VA_BITS16_UNDEFINED == vabits16) ))
njn1d0825f2006-03-27 11:37:07 +00003711 {
3712 /* Handle common case quickly: a is suitably aligned, */
3713 /* is mapped, and is addressible. */
3714 // Convert full V-bits in register to compact 2-bit form.
njn4cf530b2006-04-06 13:33:48 +00003715 if (V_BITS64_DEFINED == vbits64) {
njndbf7ca72006-03-31 11:57:59 +00003716 ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_DEFINED;
njn4cf530b2006-04-06 13:33:48 +00003717 } else if (V_BITS64_UNDEFINED == vbits64) {
njndbf7ca72006-03-31 11:57:59 +00003718 ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00003719 } else {
3720 /* Slow but general case -- writing partially defined bytes. */
3721 PROF_EVENT(212, "mc_STOREV64-slow2");
njn4cf530b2006-04-06 13:33:48 +00003722 mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003723 }
3724 } else {
3725 /* Slow but general case. */
3726 PROF_EVENT(213, "mc_STOREV64-slow3");
njn4cf530b2006-04-06 13:33:48 +00003727 mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003728 }
3729#endif
3730}
3731
njn4cf530b2006-04-06 13:33:48 +00003732VG_REGPARM(1) void MC_(helperc_STOREV64be) ( Addr a, ULong vbits64 )
njn1d0825f2006-03-27 11:37:07 +00003733{
njn4cf530b2006-04-06 13:33:48 +00003734 mc_STOREV64(a, vbits64, True);
njn1d0825f2006-03-27 11:37:07 +00003735}
njn4cf530b2006-04-06 13:33:48 +00003736VG_REGPARM(1) void MC_(helperc_STOREV64le) ( Addr a, ULong vbits64 )
njn1d0825f2006-03-27 11:37:07 +00003737{
njn4cf530b2006-04-06 13:33:48 +00003738 mc_STOREV64(a, vbits64, False);
njn1d0825f2006-03-27 11:37:07 +00003739}
sewardj95448072004-11-22 20:19:51 +00003740
sewardj95448072004-11-22 20:19:51 +00003741
3742/* ------------------------ Size = 4 ------------------------ */
3743
njn1d0825f2006-03-27 11:37:07 +00003744static INLINE
3745UWord mc_LOADV32 ( Addr a, Bool isBigEndian )
3746{
3747 UWord sm_off, vabits8;
3748 SecMap* sm;
3749
3750 PROF_EVENT(220, "mc_LOADV32");
3751
3752#ifndef PERF_FAST_LOADV
njn45e81252006-03-28 12:35:08 +00003753 return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003754#else
njn45e81252006-03-28 12:35:08 +00003755 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,32) )) {
njn1d0825f2006-03-27 11:37:07 +00003756 PROF_EVENT(221, "mc_LOADV32-slow1");
njn45e81252006-03-28 12:35:08 +00003757 return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
sewardjc1a2cda2005-04-21 17:34:00 +00003758 }
3759
njna7c7ebd2006-03-28 12:51:02 +00003760 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003761 sm_off = SM_OFF(a);
3762 vabits8 = sm->vabits8[sm_off];
3763
3764 // Handle common case quickly: a is suitably aligned, is mapped, and the
3765 // entire word32 it lives in is addressible.
3766 // Convert V bits from compact memory form to expanded register form.
3767 // For 64-bit platforms, set the high 32 bits of retval to 1 (undefined).
3768 // Almost certainly not necessary, but be paranoid.
njndbf7ca72006-03-31 11:57:59 +00003769 if (EXPECTED_TAKEN(vabits8 == VA_BITS8_DEFINED)) {
njn1d0825f2006-03-27 11:37:07 +00003770 return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_DEFINED);
njndbf7ca72006-03-31 11:57:59 +00003771 } else if (EXPECTED_TAKEN(vabits8 == VA_BITS8_UNDEFINED)) {
njn1d0825f2006-03-27 11:37:07 +00003772 return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_UNDEFINED);
3773 } else {
njndbf7ca72006-03-31 11:57:59 +00003774 /* Slow case: the 4 bytes are not all-defined or all-undefined. */
njn1d0825f2006-03-27 11:37:07 +00003775 PROF_EVENT(222, "mc_LOADV32-slow2");
njn45e81252006-03-28 12:35:08 +00003776 return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003777 }
3778#endif
3779}
3780
3781VG_REGPARM(1) UWord MC_(helperc_LOADV32be) ( Addr a )
3782{
3783 return mc_LOADV32(a, True);
3784}
3785VG_REGPARM(1) UWord MC_(helperc_LOADV32le) ( Addr a )
3786{
3787 return mc_LOADV32(a, False);
3788}
sewardjc1a2cda2005-04-21 17:34:00 +00003789
sewardjc1a2cda2005-04-21 17:34:00 +00003790
njn1d0825f2006-03-27 11:37:07 +00003791static INLINE
njn4cf530b2006-04-06 13:33:48 +00003792void mc_STOREV32 ( Addr a, UWord vbits32, Bool isBigEndian )
njn1d0825f2006-03-27 11:37:07 +00003793{
3794 UWord sm_off, vabits8;
3795 SecMap* sm;
3796
3797 PROF_EVENT(230, "mc_STOREV32");
3798
3799#ifndef PERF_FAST_STOREV
njn4cf530b2006-04-06 13:33:48 +00003800 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003801#else
njn45e81252006-03-28 12:35:08 +00003802 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,32) )) {
njn1d0825f2006-03-27 11:37:07 +00003803 PROF_EVENT(231, "mc_STOREV32-slow1");
njn4cf530b2006-04-06 13:33:48 +00003804 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003805 return;
sewardjc1a2cda2005-04-21 17:34:00 +00003806 }
3807
njna7c7ebd2006-03-28 12:51:02 +00003808 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003809 sm_off = SM_OFF(a);
3810 vabits8 = sm->vabits8[sm_off];
3811
3812//---------------------------------------------------------------------------
3813#if 1
3814 // Cleverness: sometimes we don't have to write the shadow memory at
3815 // all, if we can tell that what we want to write is the same as what is
3816 // already there.
njn4cf530b2006-04-06 13:33:48 +00003817 if (V_BITS32_DEFINED == vbits32) {
njndbf7ca72006-03-31 11:57:59 +00003818 if (vabits8 == (UInt)VA_BITS8_DEFINED) {
njn1d0825f2006-03-27 11:37:07 +00003819 return;
njndbf7ca72006-03-31 11:57:59 +00003820 } else if (!is_distinguished_sm(sm) && VA_BITS8_UNDEFINED == vabits8) {
3821 sm->vabits8[sm_off] = (UInt)VA_BITS8_DEFINED;
njn1d0825f2006-03-27 11:37:07 +00003822 } else {
njndbf7ca72006-03-31 11:57:59 +00003823 // not defined/undefined, or distinguished and changing state
njn1d0825f2006-03-27 11:37:07 +00003824 PROF_EVENT(232, "mc_STOREV32-slow2");
njn4cf530b2006-04-06 13:33:48 +00003825 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003826 }
njn4cf530b2006-04-06 13:33:48 +00003827 } else if (V_BITS32_UNDEFINED == vbits32) {
njndbf7ca72006-03-31 11:57:59 +00003828 if (vabits8 == (UInt)VA_BITS8_UNDEFINED) {
njn1d0825f2006-03-27 11:37:07 +00003829 return;
njndbf7ca72006-03-31 11:57:59 +00003830 } else if (!is_distinguished_sm(sm) && VA_BITS8_DEFINED == vabits8) {
3831 sm->vabits8[sm_off] = (UInt)VA_BITS8_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00003832 } else {
njndbf7ca72006-03-31 11:57:59 +00003833 // not defined/undefined, or distinguished and changing state
njn1d0825f2006-03-27 11:37:07 +00003834 PROF_EVENT(233, "mc_STOREV32-slow3");
njn4cf530b2006-04-06 13:33:48 +00003835 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003836 }
3837 } else {
3838 // Partially defined word
3839 PROF_EVENT(234, "mc_STOREV32-slow4");
njn4cf530b2006-04-06 13:33:48 +00003840 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003841 }
3842//---------------------------------------------------------------------------
3843#else
3844 if (EXPECTED_TAKEN( !is_distinguished_sm(sm) &&
njndbf7ca72006-03-31 11:57:59 +00003845 (VA_BITS8_DEFINED == vabits8 ||
3846 VA_BITS8_UNDEFINED == vabits8) ))
njn1d0825f2006-03-27 11:37:07 +00003847 {
3848 /* Handle common case quickly: a is suitably aligned, */
3849 /* is mapped, and is addressible. */
3850 // Convert full V-bits in register to compact 2-bit form.
njn4cf530b2006-04-06 13:33:48 +00003851 if (V_BITS32_DEFINED == vbits32) {
njndbf7ca72006-03-31 11:57:59 +00003852 sm->vabits8[sm_off] = VA_BITS8_DEFINED;
njn4cf530b2006-04-06 13:33:48 +00003853 } else if (V_BITS32_UNDEFINED == vbits32) {
njndbf7ca72006-03-31 11:57:59 +00003854 sm->vabits8[sm_off] = VA_BITS8_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00003855 } else {
3856 /* Slow but general case -- writing partially defined bytes. */
3857 PROF_EVENT(232, "mc_STOREV32-slow2");
njn4cf530b2006-04-06 13:33:48 +00003858 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003859 }
3860 } else {
3861 /* Slow but general case. */
3862 PROF_EVENT(233, "mc_STOREV32-slow3");
njn4cf530b2006-04-06 13:33:48 +00003863 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003864 }
3865#endif
3866//---------------------------------------------------------------------------
3867#endif
3868}
3869
njn4cf530b2006-04-06 13:33:48 +00003870VG_REGPARM(2) void MC_(helperc_STOREV32be) ( Addr a, UWord vbits32 )
njn1d0825f2006-03-27 11:37:07 +00003871{
njn4cf530b2006-04-06 13:33:48 +00003872 mc_STOREV32(a, vbits32, True);
njn1d0825f2006-03-27 11:37:07 +00003873}
njn4cf530b2006-04-06 13:33:48 +00003874VG_REGPARM(2) void MC_(helperc_STOREV32le) ( Addr a, UWord vbits32 )
njn1d0825f2006-03-27 11:37:07 +00003875{
njn4cf530b2006-04-06 13:33:48 +00003876 mc_STOREV32(a, vbits32, False);
njn1d0825f2006-03-27 11:37:07 +00003877}
njn25e49d8e72002-09-23 09:36:25 +00003878
njn25e49d8e72002-09-23 09:36:25 +00003879
sewardj95448072004-11-22 20:19:51 +00003880/* ------------------------ Size = 2 ------------------------ */
3881
njn1d0825f2006-03-27 11:37:07 +00003882static INLINE
3883UWord mc_LOADV16 ( Addr a, Bool isBigEndian )
3884{
3885 UWord sm_off, vabits8;
3886 SecMap* sm;
3887
3888 PROF_EVENT(240, "mc_LOADV16");
3889
3890#ifndef PERF_FAST_LOADV
njn45e81252006-03-28 12:35:08 +00003891 return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003892#else
njn45e81252006-03-28 12:35:08 +00003893 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,16) )) {
njn1d0825f2006-03-27 11:37:07 +00003894 PROF_EVENT(241, "mc_LOADV16-slow1");
njn45e81252006-03-28 12:35:08 +00003895 return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
sewardjc1a2cda2005-04-21 17:34:00 +00003896 }
3897
njna7c7ebd2006-03-28 12:51:02 +00003898 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003899 sm_off = SM_OFF(a);
3900 vabits8 = sm->vabits8[sm_off];
3901 // Handle common case quickly: a is suitably aligned, is mapped, and is
3902 // addressible.
3903 // Convert V bits from compact memory form to expanded register form
njndbf7ca72006-03-31 11:57:59 +00003904 if (vabits8 == VA_BITS8_DEFINED ) { return V_BITS16_DEFINED; }
3905 else if (vabits8 == VA_BITS8_UNDEFINED) { return V_BITS16_UNDEFINED; }
njn1d0825f2006-03-27 11:37:07 +00003906 else {
njndbf7ca72006-03-31 11:57:59 +00003907 // The 4 (yes, 4) bytes are not all-defined or all-undefined, check
njn1d0825f2006-03-27 11:37:07 +00003908 // the two sub-bytes.
3909 UChar vabits4 = extract_vabits4_from_vabits8(a, vabits8);
njndbf7ca72006-03-31 11:57:59 +00003910 if (vabits4 == VA_BITS4_DEFINED ) { return V_BITS16_DEFINED; }
3911 else if (vabits4 == VA_BITS4_UNDEFINED) { return V_BITS16_UNDEFINED; }
njn1d0825f2006-03-27 11:37:07 +00003912 else {
njndbf7ca72006-03-31 11:57:59 +00003913 /* Slow case: the two bytes are not all-defined or all-undefined. */
njn1d0825f2006-03-27 11:37:07 +00003914 PROF_EVENT(242, "mc_LOADV16-slow2");
njn45e81252006-03-28 12:35:08 +00003915 return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003916 }
3917 }
3918#endif
3919}
3920
3921VG_REGPARM(1) UWord MC_(helperc_LOADV16be) ( Addr a )
3922{
3923 return mc_LOADV16(a, True);
3924}
3925VG_REGPARM(1) UWord MC_(helperc_LOADV16le) ( Addr a )
3926{
3927 return mc_LOADV16(a, False);
3928}
sewardjc1a2cda2005-04-21 17:34:00 +00003929
sewardjc1a2cda2005-04-21 17:34:00 +00003930
njn1d0825f2006-03-27 11:37:07 +00003931static INLINE
njn4cf530b2006-04-06 13:33:48 +00003932void mc_STOREV16 ( Addr a, UWord vbits16, Bool isBigEndian )
njn1d0825f2006-03-27 11:37:07 +00003933{
3934 UWord sm_off, vabits8;
3935 SecMap* sm;
3936
3937 PROF_EVENT(250, "mc_STOREV16");
3938
3939#ifndef PERF_FAST_STOREV
njn4cf530b2006-04-06 13:33:48 +00003940 mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003941#else
njn45e81252006-03-28 12:35:08 +00003942 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,16) )) {
njn1d0825f2006-03-27 11:37:07 +00003943 PROF_EVENT(251, "mc_STOREV16-slow1");
njn4cf530b2006-04-06 13:33:48 +00003944 mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003945 return;
sewardjc1a2cda2005-04-21 17:34:00 +00003946 }
3947
njna7c7ebd2006-03-28 12:51:02 +00003948 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003949 sm_off = SM_OFF(a);
3950 vabits8 = sm->vabits8[sm_off];
3951 if (EXPECTED_TAKEN( !is_distinguished_sm(sm) &&
njndbf7ca72006-03-31 11:57:59 +00003952 (VA_BITS8_DEFINED == vabits8 ||
3953 VA_BITS8_UNDEFINED == vabits8) ))
njn1d0825f2006-03-27 11:37:07 +00003954 {
3955 /* Handle common case quickly: a is suitably aligned, */
3956 /* is mapped, and is addressible. */
3957 // Convert full V-bits in register to compact 2-bit form.
njn4cf530b2006-04-06 13:33:48 +00003958 if (V_BITS16_DEFINED == vbits16) {
njndbf7ca72006-03-31 11:57:59 +00003959 insert_vabits4_into_vabits8( a, VA_BITS4_DEFINED ,
njn1d0825f2006-03-27 11:37:07 +00003960 &(sm->vabits8[sm_off]) );
njn4cf530b2006-04-06 13:33:48 +00003961 } else if (V_BITS16_UNDEFINED == vbits16) {
njndbf7ca72006-03-31 11:57:59 +00003962 insert_vabits4_into_vabits8( a, VA_BITS4_UNDEFINED,
njn1d0825f2006-03-27 11:37:07 +00003963 &(sm->vabits8[sm_off]) );
3964 } else {
3965 /* Slow but general case -- writing partially defined bytes. */
3966 PROF_EVENT(252, "mc_STOREV16-slow2");
njn4cf530b2006-04-06 13:33:48 +00003967 mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003968 }
3969 } else {
3970 /* Slow but general case. */
3971 PROF_EVENT(253, "mc_STOREV16-slow3");
njn4cf530b2006-04-06 13:33:48 +00003972 mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003973 }
3974#endif
3975}
njn25e49d8e72002-09-23 09:36:25 +00003976
njn4cf530b2006-04-06 13:33:48 +00003977VG_REGPARM(2) void MC_(helperc_STOREV16be) ( Addr a, UWord vbits16 )
njn1d0825f2006-03-27 11:37:07 +00003978{
njn4cf530b2006-04-06 13:33:48 +00003979 mc_STOREV16(a, vbits16, True);
njn1d0825f2006-03-27 11:37:07 +00003980}
njn4cf530b2006-04-06 13:33:48 +00003981VG_REGPARM(2) void MC_(helperc_STOREV16le) ( Addr a, UWord vbits16 )
njn1d0825f2006-03-27 11:37:07 +00003982{
njn4cf530b2006-04-06 13:33:48 +00003983 mc_STOREV16(a, vbits16, False);
njn1d0825f2006-03-27 11:37:07 +00003984}
sewardj5d28efc2005-04-21 22:16:29 +00003985
njn25e49d8e72002-09-23 09:36:25 +00003986
sewardj95448072004-11-22 20:19:51 +00003987/* ------------------------ Size = 1 ------------------------ */
sewardj8cf88b72005-07-08 01:29:33 +00003988/* Note: endianness is irrelevant for size == 1 */
sewardj95448072004-11-22 20:19:51 +00003989
njnaf839f52005-06-23 03:27:57 +00003990VG_REGPARM(1)
njn1d0825f2006-03-27 11:37:07 +00003991UWord MC_(helperc_LOADV8) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +00003992{
njn1d0825f2006-03-27 11:37:07 +00003993 UWord sm_off, vabits8;
sewardjae986ca2005-10-12 12:53:20 +00003994 SecMap* sm;
3995
njn1d0825f2006-03-27 11:37:07 +00003996 PROF_EVENT(260, "mc_LOADV8");
sewardjc1a2cda2005-04-21 17:34:00 +00003997
njn1d0825f2006-03-27 11:37:07 +00003998#ifndef PERF_FAST_LOADV
njn45e81252006-03-28 12:35:08 +00003999 return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
njn1d0825f2006-03-27 11:37:07 +00004000#else
njn45e81252006-03-28 12:35:08 +00004001 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,8) )) {
njn1d0825f2006-03-27 11:37:07 +00004002 PROF_EVENT(261, "mc_LOADV8-slow1");
njn45e81252006-03-28 12:35:08 +00004003 return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00004004 }
4005
njna7c7ebd2006-03-28 12:51:02 +00004006 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00004007 sm_off = SM_OFF(a);
4008 vabits8 = sm->vabits8[sm_off];
4009 // Convert V bits from compact memory form to expanded register form
4010 // Handle common case quickly: a is mapped, and the entire
4011 // word32 it lives in is addressible.
njndbf7ca72006-03-31 11:57:59 +00004012 if (vabits8 == VA_BITS8_DEFINED ) { return V_BITS8_DEFINED; }
4013 else if (vabits8 == VA_BITS8_UNDEFINED) { return V_BITS8_UNDEFINED; }
njn1d0825f2006-03-27 11:37:07 +00004014 else {
njndbf7ca72006-03-31 11:57:59 +00004015 // The 4 (yes, 4) bytes are not all-defined or all-undefined, check
njn1d0825f2006-03-27 11:37:07 +00004016 // the single byte.
4017 UChar vabits2 = extract_vabits2_from_vabits8(a, vabits8);
njndbf7ca72006-03-31 11:57:59 +00004018 if (vabits2 == VA_BITS2_DEFINED ) { return V_BITS8_DEFINED; }
4019 else if (vabits2 == VA_BITS2_UNDEFINED) { return V_BITS8_UNDEFINED; }
njn1d0825f2006-03-27 11:37:07 +00004020 else {
njndbf7ca72006-03-31 11:57:59 +00004021 /* Slow case: the byte is not all-defined or all-undefined. */
njn1d0825f2006-03-27 11:37:07 +00004022 PROF_EVENT(262, "mc_LOADV8-slow2");
njn45e81252006-03-28 12:35:08 +00004023 return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
njn1d0825f2006-03-27 11:37:07 +00004024 }
sewardjc1a2cda2005-04-21 17:34:00 +00004025 }
njn1d0825f2006-03-27 11:37:07 +00004026#endif
njn25e49d8e72002-09-23 09:36:25 +00004027}
4028
sewardjc1a2cda2005-04-21 17:34:00 +00004029
njnaf839f52005-06-23 03:27:57 +00004030VG_REGPARM(2)
njn4cf530b2006-04-06 13:33:48 +00004031void MC_(helperc_STOREV8) ( Addr a, UWord vbits8 )
njn25e49d8e72002-09-23 09:36:25 +00004032{
njn1d0825f2006-03-27 11:37:07 +00004033 UWord sm_off, vabits8;
sewardjae986ca2005-10-12 12:53:20 +00004034 SecMap* sm;
4035
njn1d0825f2006-03-27 11:37:07 +00004036 PROF_EVENT(270, "mc_STOREV8");
sewardjc1a2cda2005-04-21 17:34:00 +00004037
njn1d0825f2006-03-27 11:37:07 +00004038#ifndef PERF_FAST_STOREV
njn4cf530b2006-04-06 13:33:48 +00004039 mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
njn1d0825f2006-03-27 11:37:07 +00004040#else
njn45e81252006-03-28 12:35:08 +00004041 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,8) )) {
njn1d0825f2006-03-27 11:37:07 +00004042 PROF_EVENT(271, "mc_STOREV8-slow1");
njn4cf530b2006-04-06 13:33:48 +00004043 mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00004044 return;
4045 }
4046
njna7c7ebd2006-03-28 12:51:02 +00004047 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00004048 sm_off = SM_OFF(a);
4049 vabits8 = sm->vabits8[sm_off];
4050 if (EXPECTED_TAKEN
4051 ( !is_distinguished_sm(sm) &&
njndbf7ca72006-03-31 11:57:59 +00004052 ( (VA_BITS8_DEFINED == vabits8 || VA_BITS8_UNDEFINED == vabits8)
njn1d0825f2006-03-27 11:37:07 +00004053 || (VA_BITS2_NOACCESS != extract_vabits2_from_vabits8(a, vabits8))
4054 )
4055 )
4056 )
4057 {
sewardjc1a2cda2005-04-21 17:34:00 +00004058 /* Handle common case quickly: a is mapped, the entire word32 it
4059 lives in is addressible. */
njn1d0825f2006-03-27 11:37:07 +00004060 // Convert full V-bits in register to compact 2-bit form.
njn4cf530b2006-04-06 13:33:48 +00004061 if (V_BITS8_DEFINED == vbits8) {
njndbf7ca72006-03-31 11:57:59 +00004062 insert_vabits2_into_vabits8( a, VA_BITS2_DEFINED,
njn1d0825f2006-03-27 11:37:07 +00004063 &(sm->vabits8[sm_off]) );
njn4cf530b2006-04-06 13:33:48 +00004064 } else if (V_BITS8_UNDEFINED == vbits8) {
njndbf7ca72006-03-31 11:57:59 +00004065 insert_vabits2_into_vabits8( a, VA_BITS2_UNDEFINED,
njn1d0825f2006-03-27 11:37:07 +00004066 &(sm->vabits8[sm_off]) );
4067 } else {
4068 /* Slow but general case -- writing partially defined bytes. */
4069 PROF_EVENT(272, "mc_STOREV8-slow2");
njn4cf530b2006-04-06 13:33:48 +00004070 mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
njn1d0825f2006-03-27 11:37:07 +00004071 }
sewardjc1a2cda2005-04-21 17:34:00 +00004072 } else {
njn1d0825f2006-03-27 11:37:07 +00004073 /* Slow but general case. */
4074 PROF_EVENT(273, "mc_STOREV8-slow3");
njn4cf530b2006-04-06 13:33:48 +00004075 mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00004076 }
njn1d0825f2006-03-27 11:37:07 +00004077#endif
njn25e49d8e72002-09-23 09:36:25 +00004078}
4079
4080
sewardjc859fbf2005-04-22 21:10:28 +00004081/*------------------------------------------------------------*/
4082/*--- Functions called directly from generated code: ---*/
4083/*--- Value-check failure handlers. ---*/
4084/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00004085
njn5c004e42002-11-18 11:04:50 +00004086void MC_(helperc_value_check0_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00004087{
njn718d3b12006-12-16 00:54:12 +00004088 mc_record_cond_error ( VG_(get_running_tid)() );
njn25e49d8e72002-09-23 09:36:25 +00004089}
4090
njn5c004e42002-11-18 11:04:50 +00004091void MC_(helperc_value_check1_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00004092{
njn9e63cb62005-05-08 18:34:59 +00004093 mc_record_value_error ( VG_(get_running_tid)(), 1 );
njn25e49d8e72002-09-23 09:36:25 +00004094}
4095
njn5c004e42002-11-18 11:04:50 +00004096void MC_(helperc_value_check4_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00004097{
njn9e63cb62005-05-08 18:34:59 +00004098 mc_record_value_error ( VG_(get_running_tid)(), 4 );
njn25e49d8e72002-09-23 09:36:25 +00004099}
4100
sewardj11bcc4e2005-04-23 22:38:38 +00004101void MC_(helperc_value_check8_fail) ( void )
4102{
njn9e63cb62005-05-08 18:34:59 +00004103 mc_record_value_error ( VG_(get_running_tid)(), 8 );
sewardj11bcc4e2005-04-23 22:38:38 +00004104}
4105
njnaf839f52005-06-23 03:27:57 +00004106VG_REGPARM(1) void MC_(helperc_complain_undef) ( HWord sz )
sewardj95448072004-11-22 20:19:51 +00004107{
njn9e63cb62005-05-08 18:34:59 +00004108 mc_record_value_error ( VG_(get_running_tid)(), (Int)sz );
sewardj95448072004-11-22 20:19:51 +00004109}
4110
njn25e49d8e72002-09-23 09:36:25 +00004111
sewardjc2c12c22006-03-08 13:20:09 +00004112/*------------------------------------------------------------*/
4113/*--- Metadata get/set functions, for client requests. ---*/
4114/*------------------------------------------------------------*/
4115
njn1d0825f2006-03-27 11:37:07 +00004116// Nb: this expands the V+A bits out into register-form V bits, even though
4117// they're in memory. This is for backward compatibility, and because it's
4118// probably what the user wants.
4119
4120/* Copy Vbits from/to address 'a'. Returns: 1 == OK, 2 == alignment
sewardjc2c12c22006-03-08 13:20:09 +00004121 error [no longer used], 3 == addressing error. */
njn718d3b12006-12-16 00:54:12 +00004122/* Nb: We used to issue various definedness/addressability errors from here,
4123 but we took them out because they ranged from not-very-helpful to
4124 downright annoying, and they complicated the error data structures. */
sewardjc2c12c22006-03-08 13:20:09 +00004125static Int mc_get_or_set_vbits_for_client (
4126 ThreadId tid,
njn1d0825f2006-03-27 11:37:07 +00004127 Addr a,
4128 Addr vbits,
4129 SizeT szB,
sewardjc2c12c22006-03-08 13:20:09 +00004130 Bool setting /* True <=> set vbits, False <=> get vbits */
4131)
4132{
sewardjc2c12c22006-03-08 13:20:09 +00004133 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00004134 Bool ok;
4135 UChar vbits8;
sewardjc2c12c22006-03-08 13:20:09 +00004136
njn1d0825f2006-03-27 11:37:07 +00004137 /* Check that arrays are addressible before doing any getting/setting. */
4138 for (i = 0; i < szB; i++) {
njn718d3b12006-12-16 00:54:12 +00004139 if (VA_BITS2_NOACCESS == get_vabits2(a + i) ||
4140 VA_BITS2_NOACCESS == get_vabits2(vbits + i)) {
njn1d0825f2006-03-27 11:37:07 +00004141 return 3;
sewardjc2c12c22006-03-08 13:20:09 +00004142 }
4143 }
njn1d0825f2006-03-27 11:37:07 +00004144
sewardjc2c12c22006-03-08 13:20:09 +00004145 /* Do the copy */
4146 if (setting) {
njn1d0825f2006-03-27 11:37:07 +00004147 /* setting */
4148 for (i = 0; i < szB; i++) {
4149 ok = set_vbits8(a + i, ((UChar*)vbits)[i]);
4150 tl_assert(ok);
sewardjc2c12c22006-03-08 13:20:09 +00004151 }
4152 } else {
4153 /* getting */
njn1d0825f2006-03-27 11:37:07 +00004154 for (i = 0; i < szB; i++) {
4155 ok = get_vbits8(a + i, &vbits8);
4156 tl_assert(ok);
njn1d0825f2006-03-27 11:37:07 +00004157 ((UChar*)vbits)[i] = vbits8;
sewardjc2c12c22006-03-08 13:20:09 +00004158 }
4159 // The bytes in vbits[] have now been set, so mark them as such.
njndbf7ca72006-03-31 11:57:59 +00004160 MC_(make_mem_defined)(vbits, szB);
njn1d0825f2006-03-27 11:37:07 +00004161 }
sewardjc2c12c22006-03-08 13:20:09 +00004162
4163 return 1;
4164}
sewardj05fe85e2005-04-27 22:46:36 +00004165
4166
4167/*------------------------------------------------------------*/
4168/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
4169/*------------------------------------------------------------*/
4170
4171/* For the memory leak detector, say whether an entire 64k chunk of
4172 address space is possibly in use, or not. If in doubt return
4173 True.
4174*/
4175static
4176Bool mc_is_within_valid_secondary ( Addr a )
4177{
4178 SecMap* sm = maybe_get_secmap_for ( a );
sewardj05a46732006-10-17 01:28:10 +00004179 if (sm == NULL || sm == &sm_distinguished[SM_DIST_NOACCESS]
4180 || in_ignored_range(a)) {
sewardj05fe85e2005-04-27 22:46:36 +00004181 /* Definitely not in use. */
4182 return False;
4183 } else {
4184 return True;
4185 }
4186}
4187
4188
4189/* For the memory leak detector, say whether or not a given word
4190 address is to be regarded as valid. */
4191static
4192Bool mc_is_valid_aligned_word ( Addr a )
4193{
4194 tl_assert(sizeof(UWord) == 4 || sizeof(UWord) == 8);
4195 if (sizeof(UWord) == 4) {
4196 tl_assert(VG_IS_4_ALIGNED(a));
4197 } else {
4198 tl_assert(VG_IS_8_ALIGNED(a));
4199 }
sewardj05a46732006-10-17 01:28:10 +00004200 if (is_mem_defined( a, sizeof(UWord), NULL ) == MC_Ok
4201 && !in_ignored_range(a)) {
sewardj05fe85e2005-04-27 22:46:36 +00004202 return True;
4203 } else {
4204 return False;
4205 }
4206}
sewardja4495682002-10-21 07:29:59 +00004207
4208
nethercote996901a2004-08-03 13:29:09 +00004209/* Leak detector for this tool. We don't actually do anything, merely
sewardja4495682002-10-21 07:29:59 +00004210 run the generic leak detector with suitable parameters for this
nethercote996901a2004-08-03 13:29:09 +00004211 tool. */
njnb8dca862005-03-14 02:42:44 +00004212static void mc_detect_memory_leaks ( ThreadId tid, LeakCheckMode mode )
njn25e49d8e72002-09-23 09:36:25 +00004213{
njn1d0825f2006-03-27 11:37:07 +00004214 MC_(do_detect_memory_leaks) (
sewardj05fe85e2005-04-27 22:46:36 +00004215 tid,
4216 mode,
4217 mc_is_within_valid_secondary,
4218 mc_is_valid_aligned_word
4219 );
njn25e49d8e72002-09-23 09:36:25 +00004220}
4221
4222
sewardjc859fbf2005-04-22 21:10:28 +00004223/*------------------------------------------------------------*/
4224/*--- Initialisation ---*/
4225/*------------------------------------------------------------*/
4226
4227static void init_shadow_memory ( void )
4228{
4229 Int i;
4230 SecMap* sm;
4231
njn1d0825f2006-03-27 11:37:07 +00004232 tl_assert(V_BIT_UNDEFINED == 1);
4233 tl_assert(V_BIT_DEFINED == 0);
4234 tl_assert(V_BITS8_UNDEFINED == 0xFF);
4235 tl_assert(V_BITS8_DEFINED == 0);
4236
sewardjc859fbf2005-04-22 21:10:28 +00004237 /* Build the 3 distinguished secondaries */
sewardjc859fbf2005-04-22 21:10:28 +00004238 sm = &sm_distinguished[SM_DIST_NOACCESS];
njn1d0825f2006-03-27 11:37:07 +00004239 for (i = 0; i < SM_CHUNKS; i++) sm->vabits8[i] = VA_BITS8_NOACCESS;
sewardjc859fbf2005-04-22 21:10:28 +00004240
njndbf7ca72006-03-31 11:57:59 +00004241 sm = &sm_distinguished[SM_DIST_UNDEFINED];
4242 for (i = 0; i < SM_CHUNKS; i++) sm->vabits8[i] = VA_BITS8_UNDEFINED;
sewardjc859fbf2005-04-22 21:10:28 +00004243
njndbf7ca72006-03-31 11:57:59 +00004244 sm = &sm_distinguished[SM_DIST_DEFINED];
4245 for (i = 0; i < SM_CHUNKS; i++) sm->vabits8[i] = VA_BITS8_DEFINED;
sewardjc859fbf2005-04-22 21:10:28 +00004246
4247 /* Set up the primary map. */
4248 /* These entries gradually get overwritten as the used address
4249 space expands. */
4250 for (i = 0; i < N_PRIMARY_MAP; i++)
4251 primary_map[i] = &sm_distinguished[SM_DIST_NOACCESS];
4252
sewardj05a46732006-10-17 01:28:10 +00004253 /* Auxiliary primary maps */
4254 init_auxmap_L1_L2();
4255
sewardjc859fbf2005-04-22 21:10:28 +00004256 /* auxmap_size = auxmap_used = 0;
4257 no ... these are statically initialised */
njn1d0825f2006-03-27 11:37:07 +00004258
4259 /* Secondary V bit table */
4260 secVBitTable = createSecVBitTable();
sewardjc859fbf2005-04-22 21:10:28 +00004261}
4262
4263
4264/*------------------------------------------------------------*/
4265/*--- Sanity check machinery (permanently engaged) ---*/
4266/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00004267
njn51d827b2005-05-09 01:02:08 +00004268static Bool mc_cheap_sanity_check ( void )
njn25e49d8e72002-09-23 09:36:25 +00004269{
jseward9800fd32004-01-04 23:08:04 +00004270 /* nothing useful we can rapidly check */
sewardj23eb2fd2005-04-22 16:29:19 +00004271 n_sanity_cheap++;
sewardjc1a2cda2005-04-21 17:34:00 +00004272 PROF_EVENT(490, "cheap_sanity_check");
jseward9800fd32004-01-04 23:08:04 +00004273 return True;
njn25e49d8e72002-09-23 09:36:25 +00004274}
4275
njn51d827b2005-05-09 01:02:08 +00004276static Bool mc_expensive_sanity_check ( void )
njn25e49d8e72002-09-23 09:36:25 +00004277{
sewardj05a46732006-10-17 01:28:10 +00004278 Int i;
4279 Word n_secmaps_found;
sewardj45d94cc2005-04-20 14:44:11 +00004280 SecMap* sm;
sewardj05a46732006-10-17 01:28:10 +00004281 HChar* errmsg;
sewardj23eb2fd2005-04-22 16:29:19 +00004282 Bool bad = False;
njn25e49d8e72002-09-23 09:36:25 +00004283
sewardj05a46732006-10-17 01:28:10 +00004284 if (0) VG_(printf)("expensive sanity check\n");
4285 if (0) return True;
4286
sewardj23eb2fd2005-04-22 16:29:19 +00004287 n_sanity_expensive++;
sewardjc1a2cda2005-04-21 17:34:00 +00004288 PROF_EVENT(491, "expensive_sanity_check");
4289
njn1d0825f2006-03-27 11:37:07 +00004290 /* Check that the 3 distinguished SMs are still as they should be. */
njn25e49d8e72002-09-23 09:36:25 +00004291
njndbf7ca72006-03-31 11:57:59 +00004292 /* Check noaccess DSM. */
sewardj45d94cc2005-04-20 14:44:11 +00004293 sm = &sm_distinguished[SM_DIST_NOACCESS];
njn1d0825f2006-03-27 11:37:07 +00004294 for (i = 0; i < SM_CHUNKS; i++)
4295 if (sm->vabits8[i] != VA_BITS8_NOACCESS)
sewardj23eb2fd2005-04-22 16:29:19 +00004296 bad = True;
njn25e49d8e72002-09-23 09:36:25 +00004297
njndbf7ca72006-03-31 11:57:59 +00004298 /* Check undefined DSM. */
4299 sm = &sm_distinguished[SM_DIST_UNDEFINED];
njn1d0825f2006-03-27 11:37:07 +00004300 for (i = 0; i < SM_CHUNKS; i++)
njndbf7ca72006-03-31 11:57:59 +00004301 if (sm->vabits8[i] != VA_BITS8_UNDEFINED)
sewardj23eb2fd2005-04-22 16:29:19 +00004302 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00004303
njndbf7ca72006-03-31 11:57:59 +00004304 /* Check defined DSM. */
4305 sm = &sm_distinguished[SM_DIST_DEFINED];
njn1d0825f2006-03-27 11:37:07 +00004306 for (i = 0; i < SM_CHUNKS; i++)
njndbf7ca72006-03-31 11:57:59 +00004307 if (sm->vabits8[i] != VA_BITS8_DEFINED)
sewardj23eb2fd2005-04-22 16:29:19 +00004308 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00004309
sewardj23eb2fd2005-04-22 16:29:19 +00004310 if (bad) {
4311 VG_(printf)("memcheck expensive sanity: "
4312 "distinguished_secondaries have changed\n");
4313 return False;
4314 }
4315
njn1d0825f2006-03-27 11:37:07 +00004316 /* If we're not checking for undefined value errors, the secondary V bit
4317 * table should be empty. */
4318 if (!MC_(clo_undef_value_errors)) {
njne2a9ad32007-09-17 05:30:48 +00004319 if (0 != VG_(OSetGen_Size)(secVBitTable))
njn1d0825f2006-03-27 11:37:07 +00004320 return False;
4321 }
4322
sewardj05a46732006-10-17 01:28:10 +00004323 /* check the auxiliary maps, very thoroughly */
4324 n_secmaps_found = 0;
4325 errmsg = check_auxmap_L1_L2_sanity( &n_secmaps_found );
4326 if (errmsg) {
4327 VG_(printf)("memcheck expensive sanity, auxmaps:\n\t%s", errmsg);
sewardj23eb2fd2005-04-22 16:29:19 +00004328 return False;
4329 }
4330
sewardj05a46732006-10-17 01:28:10 +00004331 /* n_secmaps_found is now the number referred to by the auxiliary
4332 primary map. Now add on the ones referred to by the main
4333 primary map. */
sewardj23eb2fd2005-04-22 16:29:19 +00004334 for (i = 0; i < N_PRIMARY_MAP; i++) {
sewardj05a46732006-10-17 01:28:10 +00004335 if (primary_map[i] == NULL) {
sewardj23eb2fd2005-04-22 16:29:19 +00004336 bad = True;
4337 } else {
sewardj05a46732006-10-17 01:28:10 +00004338 if (!is_distinguished_sm(primary_map[i]))
sewardj23eb2fd2005-04-22 16:29:19 +00004339 n_secmaps_found++;
4340 }
4341 }
4342
sewardj05a46732006-10-17 01:28:10 +00004343 /* check that the number of secmaps issued matches the number that
4344 are reachable (iow, no secmap leaks) */
njn1d0825f2006-03-27 11:37:07 +00004345 if (n_secmaps_found != (n_issued_SMs - n_deissued_SMs))
sewardj23eb2fd2005-04-22 16:29:19 +00004346 bad = True;
4347
4348 if (bad) {
4349 VG_(printf)("memcheck expensive sanity: "
4350 "apparent secmap leakage\n");
4351 return False;
4352 }
4353
sewardj23eb2fd2005-04-22 16:29:19 +00004354 if (bad) {
4355 VG_(printf)("memcheck expensive sanity: "
4356 "auxmap covers wrong address space\n");
4357 return False;
4358 }
4359
4360 /* there is only one pointer to each secmap (expensive) */
njn25e49d8e72002-09-23 09:36:25 +00004361
4362 return True;
4363}
sewardj45d94cc2005-04-20 14:44:11 +00004364
njn25e49d8e72002-09-23 09:36:25 +00004365/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00004366/*--- Command line args ---*/
njn25e49d8e72002-09-23 09:36:25 +00004367/*------------------------------------------------------------*/
4368
njn1d0825f2006-03-27 11:37:07 +00004369Bool MC_(clo_partial_loads_ok) = False;
njnbf8c3502007-09-17 22:46:45 +00004370SSizeT MC_(clo_freelist_vol) = 5000000;
njn1d0825f2006-03-27 11:37:07 +00004371LeakCheckMode MC_(clo_leak_check) = LC_Summary;
4372VgRes MC_(clo_leak_resolution) = Vg_LowRes;
4373Bool MC_(clo_show_reachable) = False;
4374Bool MC_(clo_workaround_gcc296_bugs) = False;
4375Bool MC_(clo_undef_value_errors) = True;
4376
4377static Bool mc_process_cmd_line_options(Char* arg)
njn25e49d8e72002-09-23 09:36:25 +00004378{
njn1d0825f2006-03-27 11:37:07 +00004379 VG_BOOL_CLO(arg, "--partial-loads-ok", MC_(clo_partial_loads_ok))
4380 else VG_BOOL_CLO(arg, "--show-reachable", MC_(clo_show_reachable))
4381 else VG_BOOL_CLO(arg, "--workaround-gcc296-bugs",MC_(clo_workaround_gcc296_bugs))
4382
4383 else VG_BOOL_CLO(arg, "--undef-value-errors", MC_(clo_undef_value_errors))
4384
4385 else VG_BNUM_CLO(arg, "--freelist-vol", MC_(clo_freelist_vol), 0, 1000000000)
4386
4387 else if (VG_CLO_STREQ(arg, "--leak-check=no"))
4388 MC_(clo_leak_check) = LC_Off;
4389 else if (VG_CLO_STREQ(arg, "--leak-check=summary"))
4390 MC_(clo_leak_check) = LC_Summary;
4391 else if (VG_CLO_STREQ(arg, "--leak-check=yes") ||
4392 VG_CLO_STREQ(arg, "--leak-check=full"))
4393 MC_(clo_leak_check) = LC_Full;
4394
4395 else if (VG_CLO_STREQ(arg, "--leak-resolution=low"))
4396 MC_(clo_leak_resolution) = Vg_LowRes;
4397 else if (VG_CLO_STREQ(arg, "--leak-resolution=med"))
4398 MC_(clo_leak_resolution) = Vg_MedRes;
4399 else if (VG_CLO_STREQ(arg, "--leak-resolution=high"))
4400 MC_(clo_leak_resolution) = Vg_HighRes;
4401
sewardj05a46732006-10-17 01:28:10 +00004402 else if (VG_CLO_STREQN(16,arg,"--ignore-ranges=")) {
4403 Int i;
4404 UChar* txt = (UChar*)(arg+16);
4405 Bool ok = parse_ignore_ranges(txt);
4406 if (!ok)
4407 return False;
4408 tl_assert(ignoreRanges.used >= 0);
4409 tl_assert(ignoreRanges.used < M_IGNORE_RANGES);
4410 for (i = 0; i < ignoreRanges.used; i++) {
4411 Addr s = ignoreRanges.start[i];
4412 Addr e = ignoreRanges.end[i];
4413 Addr limit = 0x4000000; /* 64M - entirely arbitrary limit */
4414 if (e <= s) {
4415 VG_(message)(Vg_DebugMsg,
4416 "ERROR: --ignore-ranges: end <= start in range:");
4417 VG_(message)(Vg_DebugMsg,
4418 " 0x%lx-0x%lx", s, e);
4419 return False;
4420 }
4421 if (e - s > limit) {
4422 VG_(message)(Vg_DebugMsg,
4423 "ERROR: --ignore-ranges: suspiciously large range:");
4424 VG_(message)(Vg_DebugMsg,
4425 " 0x%lx-0x%lx (size %ld)", s, e, (UWord)(e-s));
4426 return False;
4427 }
4428 }
4429 }
4430
njn1d0825f2006-03-27 11:37:07 +00004431 else
4432 return VG_(replacement_malloc_process_cmd_line_option)(arg);
4433
4434 return True;
njn25e49d8e72002-09-23 09:36:25 +00004435}
4436
njn51d827b2005-05-09 01:02:08 +00004437static void mc_print_usage(void)
njn25e49d8e72002-09-23 09:36:25 +00004438{
njn1d0825f2006-03-27 11:37:07 +00004439 VG_(printf)(
4440" --leak-check=no|summary|full search for memory leaks at exit? [summary]\n"
4441" --leak-resolution=low|med|high how much bt merging in leak check [low]\n"
4442" --show-reachable=no|yes show reachable blocks in leak check? [no]\n"
4443" --undef-value-errors=no|yes check for undefined value errors [yes]\n"
4444" --partial-loads-ok=no|yes too hard to explain here; see manual [no]\n"
4445" --freelist-vol=<number> volume of freed blocks queue [5000000]\n"
4446" --workaround-gcc296-bugs=no|yes self explanatory [no]\n"
sewardj05a46732006-10-17 01:28:10 +00004447" --ignore-ranges=0xPP-0xQQ[,0xRR-0xSS] assume given addresses are OK\n"
njn1d0825f2006-03-27 11:37:07 +00004448 );
4449 VG_(replacement_malloc_print_usage)();
njn3e884182003-04-15 13:03:23 +00004450}
4451
njn51d827b2005-05-09 01:02:08 +00004452static void mc_print_debug_usage(void)
njn3e884182003-04-15 13:03:23 +00004453{
njn1d0825f2006-03-27 11:37:07 +00004454 VG_(replacement_malloc_print_debug_usage)();
njn25e49d8e72002-09-23 09:36:25 +00004455}
4456
sewardjf3418c02005-11-08 14:10:24 +00004457
nethercote8b76fe52004-11-08 19:20:09 +00004458/*------------------------------------------------------------*/
4459/*--- Client requests ---*/
4460/*------------------------------------------------------------*/
4461
4462/* Client block management:
4463
4464 This is managed as an expanding array of client block descriptors.
4465 Indices of live descriptors are issued to the client, so it can ask
4466 to free them later. Therefore we cannot slide live entries down
4467 over dead ones. Instead we must use free/inuse flags and scan for
4468 an empty slot at allocation time. This in turn means allocation is
4469 relatively expensive, so we hope this does not happen too often.
nethercote8b76fe52004-11-08 19:20:09 +00004470
sewardjedc75ab2005-03-15 23:30:32 +00004471 An unused block has start == size == 0
4472*/
nethercote8b76fe52004-11-08 19:20:09 +00004473
4474typedef
4475 struct {
4476 Addr start;
4477 SizeT size;
4478 ExeContext* where;
sewardj8cf88b72005-07-08 01:29:33 +00004479 Char* desc;
nethercote8b76fe52004-11-08 19:20:09 +00004480 }
4481 CGenBlock;
4482
4483/* This subsystem is self-initialising. */
njn695c16e2005-03-27 03:40:28 +00004484static UInt cgb_size = 0;
4485static UInt cgb_used = 0;
4486static CGenBlock* cgbs = NULL;
nethercote8b76fe52004-11-08 19:20:09 +00004487
4488/* Stats for this subsystem. */
njn695c16e2005-03-27 03:40:28 +00004489static UInt cgb_used_MAX = 0; /* Max in use. */
4490static UInt cgb_allocs = 0; /* Number of allocs. */
4491static UInt cgb_discards = 0; /* Number of discards. */
4492static UInt cgb_search = 0; /* Number of searches. */
nethercote8b76fe52004-11-08 19:20:09 +00004493
4494
4495static
njn695c16e2005-03-27 03:40:28 +00004496Int alloc_client_block ( void )
nethercote8b76fe52004-11-08 19:20:09 +00004497{
4498 UInt i, sz_new;
4499 CGenBlock* cgbs_new;
4500
njn695c16e2005-03-27 03:40:28 +00004501 cgb_allocs++;
nethercote8b76fe52004-11-08 19:20:09 +00004502
njn695c16e2005-03-27 03:40:28 +00004503 for (i = 0; i < cgb_used; i++) {
4504 cgb_search++;
4505 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00004506 return i;
4507 }
4508
4509 /* Not found. Try to allocate one at the end. */
njn695c16e2005-03-27 03:40:28 +00004510 if (cgb_used < cgb_size) {
4511 cgb_used++;
4512 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00004513 }
4514
4515 /* Ok, we have to allocate a new one. */
njn695c16e2005-03-27 03:40:28 +00004516 tl_assert(cgb_used == cgb_size);
4517 sz_new = (cgbs == NULL) ? 10 : (2 * cgb_size);
nethercote8b76fe52004-11-08 19:20:09 +00004518
4519 cgbs_new = VG_(malloc)( sz_new * sizeof(CGenBlock) );
njn695c16e2005-03-27 03:40:28 +00004520 for (i = 0; i < cgb_used; i++)
4521 cgbs_new[i] = cgbs[i];
nethercote8b76fe52004-11-08 19:20:09 +00004522
njn695c16e2005-03-27 03:40:28 +00004523 if (cgbs != NULL)
4524 VG_(free)( cgbs );
4525 cgbs = cgbs_new;
nethercote8b76fe52004-11-08 19:20:09 +00004526
njn695c16e2005-03-27 03:40:28 +00004527 cgb_size = sz_new;
4528 cgb_used++;
4529 if (cgb_used > cgb_used_MAX)
4530 cgb_used_MAX = cgb_used;
4531 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00004532}
4533
4534
4535static void show_client_block_stats ( void )
4536{
4537 VG_(message)(Vg_DebugMsg,
4538 "general CBs: %d allocs, %d discards, %d maxinuse, %d search",
njn695c16e2005-03-27 03:40:28 +00004539 cgb_allocs, cgb_discards, cgb_used_MAX, cgb_search
nethercote8b76fe52004-11-08 19:20:09 +00004540 );
4541}
4542
nethercote8b76fe52004-11-08 19:20:09 +00004543static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai )
4544{
4545 UInt i;
nethercote8b76fe52004-11-08 19:20:09 +00004546
4547 /* Perhaps it's a general block ? */
njn695c16e2005-03-27 03:40:28 +00004548 for (i = 0; i < cgb_used; i++) {
4549 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00004550 continue;
njn717cde52005-05-10 02:47:21 +00004551 // Use zero as the redzone for client blocks.
4552 if (VG_(addr_is_in_block)(a, cgbs[i].start, cgbs[i].size, 0)) {
nethercote8b76fe52004-11-08 19:20:09 +00004553 /* OK - maybe it's a mempool, too? */
njn1d0825f2006-03-27 11:37:07 +00004554 MC_Mempool* mp = VG_(HT_lookup)(MC_(mempool_list),
njn12627272005-08-14 18:32:16 +00004555 (UWord)cgbs[i].start);
njn1d0cb0d2005-08-15 01:52:02 +00004556 if (mp != NULL) {
4557 if (mp->chunks != NULL) {
njn1d0825f2006-03-27 11:37:07 +00004558 MC_Chunk* mc;
njnf66dbfc2005-08-15 01:54:05 +00004559 VG_(HT_ResetIter)(mp->chunks);
4560 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
njn1d0825f2006-03-27 11:37:07 +00004561 if (addr_is_in_MC_Chunk(mc, a)) {
njn718d3b12006-12-16 00:54:12 +00004562 ai->tag = Addr_Block;
4563 ai->Addr.Block.block_kind = Block_MempoolChunk;
4564 ai->Addr.Block.block_desc = "block";
4565 ai->Addr.Block.block_szB = mc->szB;
4566 ai->Addr.Block.rwoffset = (Int)a - (Int)mc->data;
4567 ai->Addr.Block.lastchange = mc->where;
njn1d0cb0d2005-08-15 01:52:02 +00004568 return True;
4569 }
nethercote8b76fe52004-11-08 19:20:09 +00004570 }
4571 }
njn718d3b12006-12-16 00:54:12 +00004572 ai->tag = Addr_Block;
4573 ai->Addr.Block.block_kind = Block_Mempool;
4574 ai->Addr.Block.block_desc = "mempool";
4575 ai->Addr.Block.block_szB = cgbs[i].size;
4576 ai->Addr.Block.rwoffset = (Int)(a) - (Int)(cgbs[i].start);
4577 ai->Addr.Block.lastchange = cgbs[i].where;
nethercote8b76fe52004-11-08 19:20:09 +00004578 return True;
4579 }
njn718d3b12006-12-16 00:54:12 +00004580 ai->tag = Addr_Block;
4581 ai->Addr.Block.block_kind = Block_UserG;
4582 ai->Addr.Block.block_desc = cgbs[i].desc;
4583 ai->Addr.Block.block_szB = cgbs[i].size;
4584 ai->Addr.Block.rwoffset = (Int)(a) - (Int)(cgbs[i].start);
4585 ai->Addr.Block.lastchange = cgbs[i].where;
nethercote8b76fe52004-11-08 19:20:09 +00004586 return True;
4587 }
4588 }
4589 return False;
4590}
4591
njn51d827b2005-05-09 01:02:08 +00004592static Bool mc_handle_client_request ( ThreadId tid, UWord* arg, UWord* ret )
nethercote8b76fe52004-11-08 19:20:09 +00004593{
4594 Int i;
4595 Bool ok;
4596 Addr bad_addr;
4597
njnfc26ff92004-11-22 19:12:49 +00004598 if (!VG_IS_TOOL_USERREQ('M','C',arg[0])
nethercote8b76fe52004-11-08 19:20:09 +00004599 && VG_USERREQ__MALLOCLIKE_BLOCK != arg[0]
4600 && VG_USERREQ__FREELIKE_BLOCK != arg[0]
4601 && VG_USERREQ__CREATE_MEMPOOL != arg[0]
4602 && VG_USERREQ__DESTROY_MEMPOOL != arg[0]
4603 && VG_USERREQ__MEMPOOL_ALLOC != arg[0]
sewardj2c1c9df2006-07-28 00:06:37 +00004604 && VG_USERREQ__MEMPOOL_FREE != arg[0]
sewardjc740d762006-10-05 17:59:23 +00004605 && VG_USERREQ__MEMPOOL_TRIM != arg[0]
4606 && VG_USERREQ__MOVE_MEMPOOL != arg[0]
4607 && VG_USERREQ__MEMPOOL_CHANGE != arg[0]
4608 && VG_USERREQ__MEMPOOL_EXISTS != arg[0])
nethercote8b76fe52004-11-08 19:20:09 +00004609 return False;
4610
4611 switch (arg[0]) {
njndbf7ca72006-03-31 11:57:59 +00004612 case VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE:
4613 ok = is_mem_addressable ( arg[1], arg[2], &bad_addr );
nethercote8b76fe52004-11-08 19:20:09 +00004614 if (!ok)
njn718d3b12006-12-16 00:54:12 +00004615 mc_record_user_error ( tid, bad_addr, /*isAddrErr*/True );
nethercote8b76fe52004-11-08 19:20:09 +00004616 *ret = ok ? (UWord)NULL : bad_addr;
sewardj8cf88b72005-07-08 01:29:33 +00004617 break;
nethercote8b76fe52004-11-08 19:20:09 +00004618
njndbf7ca72006-03-31 11:57:59 +00004619 case VG_USERREQ__CHECK_MEM_IS_DEFINED: {
nethercote8b76fe52004-11-08 19:20:09 +00004620 MC_ReadResult res;
njndbf7ca72006-03-31 11:57:59 +00004621 res = is_mem_defined ( arg[1], arg[2], &bad_addr );
nethercote8b76fe52004-11-08 19:20:09 +00004622 if (MC_AddrErr == res)
njn718d3b12006-12-16 00:54:12 +00004623 mc_record_user_error ( tid, bad_addr, /*isAddrErr*/True );
nethercote8b76fe52004-11-08 19:20:09 +00004624 else if (MC_ValueErr == res)
njn718d3b12006-12-16 00:54:12 +00004625 mc_record_user_error ( tid, bad_addr, /*isAddrErr*/False );
nethercote8b76fe52004-11-08 19:20:09 +00004626 *ret = ( res==MC_Ok ? (UWord)NULL : bad_addr );
sewardj8cf88b72005-07-08 01:29:33 +00004627 break;
nethercote8b76fe52004-11-08 19:20:09 +00004628 }
4629
4630 case VG_USERREQ__DO_LEAK_CHECK:
njnb8dca862005-03-14 02:42:44 +00004631 mc_detect_memory_leaks(tid, arg[1] ? LC_Summary : LC_Full);
sewardj8cf88b72005-07-08 01:29:33 +00004632 *ret = 0; /* return value is meaningless */
4633 break;
nethercote8b76fe52004-11-08 19:20:09 +00004634
njndbf7ca72006-03-31 11:57:59 +00004635 case VG_USERREQ__MAKE_MEM_NOACCESS:
4636 MC_(make_mem_noaccess) ( arg[1], arg[2] );
sewardj8cf88b72005-07-08 01:29:33 +00004637 *ret = -1;
4638 break;
nethercote8b76fe52004-11-08 19:20:09 +00004639
njndbf7ca72006-03-31 11:57:59 +00004640 case VG_USERREQ__MAKE_MEM_UNDEFINED:
4641 MC_(make_mem_undefined) ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00004642 *ret = -1;
sewardj8cf88b72005-07-08 01:29:33 +00004643 break;
nethercote8b76fe52004-11-08 19:20:09 +00004644
njndbf7ca72006-03-31 11:57:59 +00004645 case VG_USERREQ__MAKE_MEM_DEFINED:
4646 MC_(make_mem_defined) ( arg[1], arg[2] );
sewardj8cf88b72005-07-08 01:29:33 +00004647 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00004648 break;
4649
njndbf7ca72006-03-31 11:57:59 +00004650 case VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE:
4651 make_mem_defined_if_addressable ( arg[1], arg[2] );
sewardjfb1e9ad2006-03-10 13:41:58 +00004652 *ret = -1;
4653 break;
4654
sewardjedc75ab2005-03-15 23:30:32 +00004655 case VG_USERREQ__CREATE_BLOCK: /* describe a block */
sewardj8cf88b72005-07-08 01:29:33 +00004656 if (arg[1] != 0 && arg[2] != 0) {
4657 i = alloc_client_block();
4658 /* VG_(printf)("allocated %d %p\n", i, cgbs); */
4659 cgbs[i].start = arg[1];
4660 cgbs[i].size = arg[2];
4661 cgbs[i].desc = VG_(strdup)((Char *)arg[3]);
sewardj39f34232007-11-09 23:02:28 +00004662 cgbs[i].where = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
sewardjedc75ab2005-03-15 23:30:32 +00004663
sewardj8cf88b72005-07-08 01:29:33 +00004664 *ret = i;
4665 } else
4666 *ret = -1;
4667 break;
sewardjedc75ab2005-03-15 23:30:32 +00004668
nethercote8b76fe52004-11-08 19:20:09 +00004669 case VG_USERREQ__DISCARD: /* discard */
njn695c16e2005-03-27 03:40:28 +00004670 if (cgbs == NULL
4671 || arg[2] >= cgb_used ||
sewardj8cf88b72005-07-08 01:29:33 +00004672 (cgbs[arg[2]].start == 0 && cgbs[arg[2]].size == 0)) {
sewardjedc75ab2005-03-15 23:30:32 +00004673 *ret = 1;
sewardj8cf88b72005-07-08 01:29:33 +00004674 } else {
4675 tl_assert(arg[2] >= 0 && arg[2] < cgb_used);
4676 cgbs[arg[2]].start = cgbs[arg[2]].size = 0;
4677 VG_(free)(cgbs[arg[2]].desc);
4678 cgb_discards++;
4679 *ret = 0;
4680 }
4681 break;
nethercote8b76fe52004-11-08 19:20:09 +00004682
sewardjc2c12c22006-03-08 13:20:09 +00004683 case VG_USERREQ__GET_VBITS:
sewardjc2c12c22006-03-08 13:20:09 +00004684 *ret = mc_get_or_set_vbits_for_client
4685 ( tid, arg[1], arg[2], arg[3], False /* get them */ );
4686 break;
4687
4688 case VG_USERREQ__SET_VBITS:
sewardjc2c12c22006-03-08 13:20:09 +00004689 *ret = mc_get_or_set_vbits_for_client
4690 ( tid, arg[1], arg[2], arg[3], True /* set them */ );
4691 break;
nethercote8b76fe52004-11-08 19:20:09 +00004692
njn1d0825f2006-03-27 11:37:07 +00004693 case VG_USERREQ__COUNT_LEAKS: { /* count leaked bytes */
4694 UWord** argp = (UWord**)arg;
4695 // MC_(bytes_leaked) et al were set by the last leak check (or zero
4696 // if no prior leak checks performed).
4697 *argp[1] = MC_(bytes_leaked) + MC_(bytes_indirect);
4698 *argp[2] = MC_(bytes_dubious);
4699 *argp[3] = MC_(bytes_reachable);
4700 *argp[4] = MC_(bytes_suppressed);
4701 // there is no argp[5]
4702 //*argp[5] = MC_(bytes_indirect);
njndbf7ca72006-03-31 11:57:59 +00004703 // XXX need to make *argp[1-4] defined
njn1d0825f2006-03-27 11:37:07 +00004704 *ret = 0;
4705 return True;
4706 }
4707 case VG_USERREQ__MALLOCLIKE_BLOCK: {
4708 Addr p = (Addr)arg[1];
4709 SizeT sizeB = arg[2];
4710 UInt rzB = arg[3];
4711 Bool is_zeroed = (Bool)arg[4];
4712
4713 MC_(new_block) ( tid, p, sizeB, /*ignored*/0, rzB, is_zeroed,
4714 MC_AllocCustom, MC_(malloc_list) );
4715 return True;
4716 }
4717 case VG_USERREQ__FREELIKE_BLOCK: {
4718 Addr p = (Addr)arg[1];
4719 UInt rzB = arg[2];
4720
4721 MC_(handle_free) ( tid, p, rzB, MC_AllocCustom );
4722 return True;
4723 }
4724
4725 case _VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR: {
njn718d3b12006-12-16 00:54:12 +00004726 Char* s = (Char*)arg[1];
4727 Addr dst = (Addr) arg[2];
4728 Addr src = (Addr) arg[3];
4729 SizeT len = (SizeT)arg[4];
4730 mc_record_overlap_error(tid, s, src, dst, len);
njn1d0825f2006-03-27 11:37:07 +00004731 return True;
4732 }
4733
4734 case VG_USERREQ__CREATE_MEMPOOL: {
4735 Addr pool = (Addr)arg[1];
4736 UInt rzB = arg[2];
4737 Bool is_zeroed = (Bool)arg[3];
4738
4739 MC_(create_mempool) ( pool, rzB, is_zeroed );
4740 return True;
4741 }
4742
4743 case VG_USERREQ__DESTROY_MEMPOOL: {
4744 Addr pool = (Addr)arg[1];
4745
4746 MC_(destroy_mempool) ( pool );
4747 return True;
4748 }
4749
4750 case VG_USERREQ__MEMPOOL_ALLOC: {
4751 Addr pool = (Addr)arg[1];
4752 Addr addr = (Addr)arg[2];
4753 UInt size = arg[3];
4754
4755 MC_(mempool_alloc) ( tid, pool, addr, size );
4756 return True;
4757 }
4758
4759 case VG_USERREQ__MEMPOOL_FREE: {
4760 Addr pool = (Addr)arg[1];
4761 Addr addr = (Addr)arg[2];
4762
4763 MC_(mempool_free) ( pool, addr );
4764 return True;
4765 }
4766
sewardj2c1c9df2006-07-28 00:06:37 +00004767 case VG_USERREQ__MEMPOOL_TRIM: {
4768 Addr pool = (Addr)arg[1];
4769 Addr addr = (Addr)arg[2];
4770 UInt size = arg[3];
4771
4772 MC_(mempool_trim) ( pool, addr, size );
4773 return True;
4774 }
4775
sewardjc740d762006-10-05 17:59:23 +00004776 case VG_USERREQ__MOVE_MEMPOOL: {
4777 Addr poolA = (Addr)arg[1];
4778 Addr poolB = (Addr)arg[2];
4779
4780 MC_(move_mempool) ( poolA, poolB );
4781 return True;
4782 }
4783
4784 case VG_USERREQ__MEMPOOL_CHANGE: {
4785 Addr pool = (Addr)arg[1];
4786 Addr addrA = (Addr)arg[2];
4787 Addr addrB = (Addr)arg[3];
4788 UInt size = arg[4];
4789
4790 MC_(mempool_change) ( pool, addrA, addrB, size );
4791 return True;
4792 }
4793
4794 case VG_USERREQ__MEMPOOL_EXISTS: {
4795 Addr pool = (Addr)arg[1];
4796
4797 *ret = (UWord) MC_(mempool_exists) ( pool );
4798 return True;
4799 }
4800
4801
nethercote8b76fe52004-11-08 19:20:09 +00004802 default:
njn1d0825f2006-03-27 11:37:07 +00004803 VG_(message)(Vg_UserMsg,
4804 "Warning: unknown memcheck client request code %llx",
4805 (ULong)arg[0]);
4806 return False;
nethercote8b76fe52004-11-08 19:20:09 +00004807 }
4808 return True;
4809}
njn25e49d8e72002-09-23 09:36:25 +00004810
4811/*------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00004812/*--- Crude profiling machinery. ---*/
4813/*------------------------------------------------------------*/
4814
4815// We track a number of interesting events (using PROF_EVENT)
4816// if MC_PROFILE_MEMORY is defined.
4817
4818#ifdef MC_PROFILE_MEMORY
4819
4820UInt MC_(event_ctr)[N_PROF_EVENTS];
4821HChar* MC_(event_ctr_name)[N_PROF_EVENTS];
4822
4823static void init_prof_mem ( void )
4824{
4825 Int i;
4826 for (i = 0; i < N_PROF_EVENTS; i++) {
4827 MC_(event_ctr)[i] = 0;
4828 MC_(event_ctr_name)[i] = NULL;
4829 }
4830}
4831
4832static void done_prof_mem ( void )
4833{
4834 Int i;
4835 Bool spaced = False;
4836 for (i = 0; i < N_PROF_EVENTS; i++) {
4837 if (!spaced && (i % 10) == 0) {
4838 VG_(printf)("\n");
4839 spaced = True;
4840 }
4841 if (MC_(event_ctr)[i] > 0) {
4842 spaced = False;
4843 VG_(printf)( "prof mem event %3d: %9d %s\n",
4844 i, MC_(event_ctr)[i],
4845 MC_(event_ctr_name)[i]
4846 ? MC_(event_ctr_name)[i] : "unnamed");
4847 }
4848 }
4849}
4850
4851#else
4852
4853static void init_prof_mem ( void ) { }
4854static void done_prof_mem ( void ) { }
4855
4856#endif
4857
4858/*------------------------------------------------------------*/
njn51d827b2005-05-09 01:02:08 +00004859/*--- Setup and finalisation ---*/
njn25e49d8e72002-09-23 09:36:25 +00004860/*------------------------------------------------------------*/
4861
njn51d827b2005-05-09 01:02:08 +00004862static void mc_post_clo_init ( void )
njn5c004e42002-11-18 11:04:50 +00004863{
sewardj71bc3cb2005-05-19 00:25:45 +00004864 /* If we've been asked to emit XML, mash around various other
4865 options so as to constrain the output somewhat. */
4866 if (VG_(clo_xml)) {
4867 /* Extract as much info as possible from the leak checker. */
njn1d0825f2006-03-27 11:37:07 +00004868 /* MC_(clo_show_reachable) = True; */
4869 MC_(clo_leak_check) = LC_Full;
sewardj71bc3cb2005-05-19 00:25:45 +00004870 }
njn5c004e42002-11-18 11:04:50 +00004871}
4872
njn1d0825f2006-03-27 11:37:07 +00004873static void print_SM_info(char* type, int n_SMs)
4874{
4875 VG_(message)(Vg_DebugMsg,
4876 " memcheck: SMs: %s = %d (%dk, %dM)",
4877 type,
4878 n_SMs,
4879 n_SMs * sizeof(SecMap) / 1024,
4880 n_SMs * sizeof(SecMap) / (1024 * 1024) );
4881}
4882
njn51d827b2005-05-09 01:02:08 +00004883static void mc_fini ( Int exitcode )
njn5c004e42002-11-18 11:04:50 +00004884{
njn1d0825f2006-03-27 11:37:07 +00004885 MC_(print_malloc_stats)();
sewardj23eb2fd2005-04-22 16:29:19 +00004886
njn1d0825f2006-03-27 11:37:07 +00004887 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
4888 if (MC_(clo_leak_check) == LC_Off)
4889 VG_(message)(Vg_UserMsg,
4890 "For a detailed leak analysis, rerun with: --leak-check=yes");
4891
4892 VG_(message)(Vg_UserMsg,
4893 "For counts of detected errors, rerun with: -v");
4894 }
4895 if (MC_(clo_leak_check) != LC_Off)
4896 mc_detect_memory_leaks(1/*bogus ThreadId*/, MC_(clo_leak_check));
4897
4898 done_prof_mem();
sewardjae986ca2005-10-12 12:53:20 +00004899
sewardj45d94cc2005-04-20 14:44:11 +00004900 if (VG_(clo_verbosity) > 1) {
njn1d0825f2006-03-27 11:37:07 +00004901 SizeT max_secVBit_szB, max_SMs_szB, max_shmem_szB;
4902
sewardj45d94cc2005-04-20 14:44:11 +00004903 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00004904 " memcheck: sanity checks: %d cheap, %d expensive",
4905 n_sanity_cheap, n_sanity_expensive );
sewardj45d94cc2005-04-20 14:44:11 +00004906 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00004907 " memcheck: auxmaps: %d auxmap entries (%dk, %dM) in use",
sewardj05a46732006-10-17 01:28:10 +00004908 n_auxmap_L2_nodes,
4909 n_auxmap_L2_nodes * 64,
4910 n_auxmap_L2_nodes / 16 );
sewardj23eb2fd2005-04-22 16:29:19 +00004911 VG_(message)(Vg_DebugMsg,
sewardj05a46732006-10-17 01:28:10 +00004912 " memcheck: auxmaps_L1: %lld searches, %lld cmps, ratio %lld:10",
4913 n_auxmap_L1_searches, n_auxmap_L1_cmps,
4914 (10ULL * n_auxmap_L1_cmps)
4915 / (n_auxmap_L1_searches ? n_auxmap_L1_searches : 1)
4916 );
4917 VG_(message)(Vg_DebugMsg,
4918 " memcheck: auxmaps_L2: %lld searches, %lld nodes",
4919 n_auxmap_L2_searches, n_auxmap_L2_nodes
4920 );
sewardj23eb2fd2005-04-22 16:29:19 +00004921
njndbf7ca72006-03-31 11:57:59 +00004922 print_SM_info("n_issued ", n_issued_SMs);
4923 print_SM_info("n_deissued ", n_deissued_SMs);
4924 print_SM_info("max_noaccess ", max_noaccess_SMs);
4925 print_SM_info("max_undefined", max_undefined_SMs);
4926 print_SM_info("max_defined ", max_defined_SMs);
4927 print_SM_info("max_non_DSM ", max_non_DSM_SMs);
njn1d0825f2006-03-27 11:37:07 +00004928
4929 // Three DSMs, plus the non-DSM ones
4930 max_SMs_szB = (3 + max_non_DSM_SMs) * sizeof(SecMap);
4931 // The 3*sizeof(Word) bytes is the AVL node metadata size.
4932 // The 4*sizeof(Word) bytes is the malloc metadata size.
4933 // Hardwiring these sizes in sucks, but I don't see how else to do it.
4934 max_secVBit_szB = max_secVBit_nodes *
4935 (sizeof(SecVBitNode) + 3*sizeof(Word) + 4*sizeof(Word));
4936 max_shmem_szB = sizeof(primary_map) + max_SMs_szB + max_secVBit_szB;
sewardj23eb2fd2005-04-22 16:29:19 +00004937
4938 VG_(message)(Vg_DebugMsg,
njn1d0825f2006-03-27 11:37:07 +00004939 " memcheck: max sec V bit nodes: %d (%dk, %dM)",
4940 max_secVBit_nodes, max_secVBit_szB / 1024,
4941 max_secVBit_szB / (1024 * 1024));
4942 VG_(message)(Vg_DebugMsg,
4943 " memcheck: set_sec_vbits8 calls: %llu (new: %llu, updates: %llu)",
4944 sec_vbits_new_nodes + sec_vbits_updates,
4945 sec_vbits_new_nodes, sec_vbits_updates );
4946 VG_(message)(Vg_DebugMsg,
4947 " memcheck: max shadow mem size: %dk, %dM",
4948 max_shmem_szB / 1024, max_shmem_szB / (1024 * 1024));
sewardj45d94cc2005-04-20 14:44:11 +00004949 }
4950
njn5c004e42002-11-18 11:04:50 +00004951 if (0) {
4952 VG_(message)(Vg_DebugMsg,
4953 "------ Valgrind's client block stats follow ---------------" );
nethercote8b76fe52004-11-08 19:20:09 +00004954 show_client_block_stats();
njn5c004e42002-11-18 11:04:50 +00004955 }
njn25e49d8e72002-09-23 09:36:25 +00004956}
4957
njn51d827b2005-05-09 01:02:08 +00004958static void mc_pre_clo_init(void)
4959{
4960 VG_(details_name) ("Memcheck");
4961 VG_(details_version) (NULL);
4962 VG_(details_description) ("a memory error detector");
4963 VG_(details_copyright_author)(
sewardj9ebd6e02007-01-08 06:01:59 +00004964 "Copyright (C) 2002-2007, and GNU GPL'd, by Julian Seward et al.");
njn51d827b2005-05-09 01:02:08 +00004965 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj05a46732006-10-17 01:28:10 +00004966 VG_(details_avg_translation_sizeB) ( 556 );
njn51d827b2005-05-09 01:02:08 +00004967
4968 VG_(basic_tool_funcs) (mc_post_clo_init,
4969 MC_(instrument),
4970 mc_fini);
4971
sewardj81651dc2007-08-28 06:05:20 +00004972 VG_(needs_final_IR_tidy_pass) ( MC_(final_tidy) );
4973
4974
njn51d827b2005-05-09 01:02:08 +00004975 VG_(needs_core_errors) ();
njn1d0825f2006-03-27 11:37:07 +00004976 VG_(needs_tool_errors) (mc_eq_Error,
njn51d827b2005-05-09 01:02:08 +00004977 mc_pp_Error,
sewardj39f34232007-11-09 23:02:28 +00004978 True,/*show TIDs for errors*/
njn1d0825f2006-03-27 11:37:07 +00004979 mc_update_extra,
njn51d827b2005-05-09 01:02:08 +00004980 mc_recognised_suppression,
njn1d0825f2006-03-27 11:37:07 +00004981 mc_read_extra_suppression_info,
4982 mc_error_matches_suppression,
4983 mc_get_error_name,
4984 mc_print_extra_suppression_info);
njn51d827b2005-05-09 01:02:08 +00004985 VG_(needs_libc_freeres) ();
njn1d0825f2006-03-27 11:37:07 +00004986 VG_(needs_command_line_options)(mc_process_cmd_line_options,
njn51d827b2005-05-09 01:02:08 +00004987 mc_print_usage,
4988 mc_print_debug_usage);
4989 VG_(needs_client_requests) (mc_handle_client_request);
4990 VG_(needs_sanity_checks) (mc_cheap_sanity_check,
4991 mc_expensive_sanity_check);
njn1d0825f2006-03-27 11:37:07 +00004992 VG_(needs_malloc_replacement) (MC_(malloc),
4993 MC_(__builtin_new),
4994 MC_(__builtin_vec_new),
4995 MC_(memalign),
4996 MC_(calloc),
4997 MC_(free),
4998 MC_(__builtin_delete),
4999 MC_(__builtin_vec_delete),
5000 MC_(realloc),
5001 MC_MALLOC_REDZONE_SZB );
njnca54af32006-04-16 10:25:43 +00005002 VG_(needs_xml_output) ();
njn51d827b2005-05-09 01:02:08 +00005003
njn1d0825f2006-03-27 11:37:07 +00005004 VG_(track_new_mem_startup) ( mc_new_mem_startup );
njndbf7ca72006-03-31 11:57:59 +00005005 VG_(track_new_mem_stack_signal)( MC_(make_mem_undefined) );
5006 VG_(track_new_mem_brk) ( MC_(make_mem_undefined) );
njn1d0825f2006-03-27 11:37:07 +00005007 VG_(track_new_mem_mmap) ( mc_new_mem_mmap );
njn51d827b2005-05-09 01:02:08 +00005008
njn1d0825f2006-03-27 11:37:07 +00005009 VG_(track_copy_mem_remap) ( MC_(copy_address_range_state) );
njn81623712005-10-07 04:48:37 +00005010
5011 // Nb: we don't do anything with mprotect. This means that V bits are
5012 // preserved if a program, for example, marks some memory as inaccessible
5013 // and then later marks it as accessible again.
5014 //
5015 // If an access violation occurs (eg. writing to read-only memory) we let
5016 // it fault and print an informative termination message. This doesn't
5017 // happen if the program catches the signal, though, which is bad. If we
5018 // had two A bits (for readability and writability) that were completely
5019 // distinct from V bits, then we could handle all this properly.
5020 VG_(track_change_mem_mprotect) ( NULL );
njn51d827b2005-05-09 01:02:08 +00005021
njndbf7ca72006-03-31 11:57:59 +00005022 VG_(track_die_mem_stack_signal)( MC_(make_mem_noaccess) );
5023 VG_(track_die_mem_brk) ( MC_(make_mem_noaccess) );
5024 VG_(track_die_mem_munmap) ( MC_(make_mem_noaccess) );
njn51d827b2005-05-09 01:02:08 +00005025
njn1d0825f2006-03-27 11:37:07 +00005026#ifdef PERF_FAST_STACK
5027 VG_(track_new_mem_stack_4) ( mc_new_mem_stack_4 );
5028 VG_(track_new_mem_stack_8) ( mc_new_mem_stack_8 );
5029 VG_(track_new_mem_stack_12) ( mc_new_mem_stack_12 );
5030 VG_(track_new_mem_stack_16) ( mc_new_mem_stack_16 );
5031 VG_(track_new_mem_stack_32) ( mc_new_mem_stack_32 );
5032 VG_(track_new_mem_stack_112) ( mc_new_mem_stack_112 );
5033 VG_(track_new_mem_stack_128) ( mc_new_mem_stack_128 );
5034 VG_(track_new_mem_stack_144) ( mc_new_mem_stack_144 );
5035 VG_(track_new_mem_stack_160) ( mc_new_mem_stack_160 );
5036#endif
5037 VG_(track_new_mem_stack) ( mc_new_mem_stack );
njn51d827b2005-05-09 01:02:08 +00005038
njn1d0825f2006-03-27 11:37:07 +00005039#ifdef PERF_FAST_STACK
5040 VG_(track_die_mem_stack_4) ( mc_die_mem_stack_4 );
5041 VG_(track_die_mem_stack_8) ( mc_die_mem_stack_8 );
5042 VG_(track_die_mem_stack_12) ( mc_die_mem_stack_12 );
5043 VG_(track_die_mem_stack_16) ( mc_die_mem_stack_16 );
5044 VG_(track_die_mem_stack_32) ( mc_die_mem_stack_32 );
5045 VG_(track_die_mem_stack_112) ( mc_die_mem_stack_112 );
5046 VG_(track_die_mem_stack_128) ( mc_die_mem_stack_128 );
5047 VG_(track_die_mem_stack_144) ( mc_die_mem_stack_144 );
5048 VG_(track_die_mem_stack_160) ( mc_die_mem_stack_160 );
5049#endif
5050 VG_(track_die_mem_stack) ( mc_die_mem_stack );
njn51d827b2005-05-09 01:02:08 +00005051
njndbf7ca72006-03-31 11:57:59 +00005052 VG_(track_ban_mem_stack) ( MC_(make_mem_noaccess) );
njn51d827b2005-05-09 01:02:08 +00005053
njndbf7ca72006-03-31 11:57:59 +00005054 VG_(track_pre_mem_read) ( check_mem_is_defined );
5055 VG_(track_pre_mem_read_asciiz) ( check_mem_is_defined_asciiz );
5056 VG_(track_pre_mem_write) ( check_mem_is_addressable );
njn1d0825f2006-03-27 11:37:07 +00005057 VG_(track_post_mem_write) ( mc_post_mem_write );
njn51d827b2005-05-09 01:02:08 +00005058
njn1d0825f2006-03-27 11:37:07 +00005059 if (MC_(clo_undef_value_errors))
5060 VG_(track_pre_reg_read) ( mc_pre_reg_read );
njn51d827b2005-05-09 01:02:08 +00005061
njn1d0825f2006-03-27 11:37:07 +00005062 VG_(track_post_reg_write) ( mc_post_reg_write );
5063 VG_(track_post_reg_write_clientcall_return)( mc_post_reg_write_clientcall );
njn51d827b2005-05-09 01:02:08 +00005064
5065 init_shadow_memory();
sewardj3f94a7d2007-08-25 07:19:08 +00005066 MC_(malloc_list) = VG_(HT_construct)( "MC_(malloc_list)" );
5067 MC_(mempool_list) = VG_(HT_construct)( "MC_(mempool_list)" );
njn1d0825f2006-03-27 11:37:07 +00005068 init_prof_mem();
njn51d827b2005-05-09 01:02:08 +00005069
5070 tl_assert( mc_expensive_sanity_check() );
njn1d0825f2006-03-27 11:37:07 +00005071
5072 // {LOADV,STOREV}[8421] will all fail horribly if this isn't true.
5073 tl_assert(sizeof(UWord) == sizeof(Addr));
sewardj05a46732006-10-17 01:28:10 +00005074 // Call me paranoid. I don't care.
5075 tl_assert(sizeof(void*) == sizeof(Addr));
njn1d0825f2006-03-27 11:37:07 +00005076
5077 // BYTES_PER_SEC_VBIT_NODE must be a power of two.
5078 tl_assert(-1 != VG_(log2)(BYTES_PER_SEC_VBIT_NODE));
njn51d827b2005-05-09 01:02:08 +00005079}
5080
sewardj45f4e7c2005-09-27 19:20:21 +00005081VG_DETERMINE_INTERFACE_VERSION(mc_pre_clo_init)
fitzhardinge98abfc72003-12-16 02:05:15 +00005082
njn25e49d8e72002-09-23 09:36:25 +00005083/*--------------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00005084/*--- end ---*/
njn25e49d8e72002-09-23 09:36:25 +00005085/*--------------------------------------------------------------------*/