blob: fb6706cbe7a9de9e4465fb449a64deeff5da4283 [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
nethercote137bc552003-11-14 17:47:54 +00009 This file is part of MemCheck, a heavyweight Valgrind tool for
njnc9539842002-10-02 13:26:35 +000010 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
sewardje4b0bf02006-06-05 23:21:15 +000012 Copyright (C) 2000-2006 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
njnc7561b92005-06-19 01:24:32 +000033#include "pub_tool_basics.h"
njn4802b382005-06-11 04:58:29 +000034#include "pub_tool_aspacemgr.h"
njn1d0825f2006-03-27 11:37:07 +000035#include "pub_tool_hashtable.h" // For mc_include.h
njn97405b22005-06-02 03:39:33 +000036#include "pub_tool_libcbase.h"
njn132bfcc2005-06-04 19:16:06 +000037#include "pub_tool_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000038#include "pub_tool_libcprint.h"
njnf536bbb2005-06-13 04:21:38 +000039#include "pub_tool_machine.h"
njnc7561b92005-06-19 01:24:32 +000040#include "pub_tool_mallocfree.h"
41#include "pub_tool_options.h"
njn1d0825f2006-03-27 11:37:07 +000042#include "pub_tool_oset.h"
njnc7561b92005-06-19 01:24:32 +000043#include "pub_tool_replacemalloc.h"
44#include "pub_tool_tooliface.h"
45#include "pub_tool_threadstate.h"
sewardj05a46732006-10-17 01:28:10 +000046#include "pub_tool_oset.h"
njnc7561b92005-06-19 01:24:32 +000047
48#include "mc_include.h"
49#include "memcheck.h" /* for client requests */
njn25e49d8e72002-09-23 09:36:25 +000050
tomd55121e2005-12-19 12:40:13 +000051#ifdef HAVE_BUILTIN_EXPECT
sewardjc1a2cda2005-04-21 17:34:00 +000052#define EXPECTED_TAKEN(cond) __builtin_expect((cond),1)
53#define EXPECTED_NOT_TAKEN(cond) __builtin_expect((cond),0)
tomd55121e2005-12-19 12:40:13 +000054#else
55#define EXPECTED_TAKEN(cond) (cond)
56#define EXPECTED_NOT_TAKEN(cond) (cond)
57#endif
sewardjc1a2cda2005-04-21 17:34:00 +000058
njn1d0825f2006-03-27 11:37:07 +000059/* Set to 1 to do a little more sanity checking */
sewardj23eb2fd2005-04-22 16:29:19 +000060#define VG_DEBUG_MEMORY 0
sewardjc1a2cda2005-04-21 17:34:00 +000061
njn25e49d8e72002-09-23 09:36:25 +000062#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
63
njn25e49d8e72002-09-23 09:36:25 +000064
njn25e49d8e72002-09-23 09:36:25 +000065/*------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +000066/*--- Fast-case knobs ---*/
67/*------------------------------------------------------------*/
68
69// Comment these out to disable the fast cases (don't just set them to zero).
70
71#define PERF_FAST_LOADV 1
72#define PERF_FAST_STOREV 1
73
74#define PERF_FAST_SARP 1
75
76#define PERF_FAST_STACK 1
77#define PERF_FAST_STACK2 1
78
79/*------------------------------------------------------------*/
80/*--- V bits and A bits ---*/
81/*------------------------------------------------------------*/
82
83/* Conceptually, every byte value has 8 V bits, which track whether Memcheck
84 thinks the corresponding value bit is defined. And every memory byte
85 has an A bit, which tracks whether Memcheck thinks the program can access
86 it safely. So every N-bit register is shadowed with N V bits, and every
87 memory byte is shadowed with 8 V bits and one A bit.
88
89 In the implementation, we use two forms of compression (compressed V bits
90 and distinguished secondary maps) to avoid the 9-bit-per-byte overhead
91 for memory.
92
93 Memcheck also tracks extra information about each heap block that is
94 allocated, for detecting memory leaks and other purposes.
95*/
96
97/*------------------------------------------------------------*/
sewardj45d94cc2005-04-20 14:44:11 +000098/*--- Basic A/V bitmap representation. ---*/
njn25e49d8e72002-09-23 09:36:25 +000099/*------------------------------------------------------------*/
100
njn1d0825f2006-03-27 11:37:07 +0000101/* All reads and writes are checked against a memory map (a.k.a. shadow
102 memory), which records the state of all memory in the process.
103
104 On 32-bit machines the memory map is organised as follows.
105 The top 16 bits of an address are used to index into a top-level
106 map table, containing 65536 entries. Each entry is a pointer to a
107 second-level map, which records the accesibililty and validity
108 permissions for the 65536 bytes indexed by the lower 16 bits of the
109 address. Each byte is represented by two bits (details are below). So
110 each second-level map contains 16384 bytes. This two-level arrangement
111 conveniently divides the 4G address space into 64k lumps, each size 64k
112 bytes.
113
114 All entries in the primary (top-level) map must point to a valid
115 secondary (second-level) map. Since many of the 64kB chunks will
njndbf7ca72006-03-31 11:57:59 +0000116 have the same status for every bit -- ie. noaccess (for unused
117 address space) or entirely addressable and defined (for code segments) --
118 there are three distinguished secondary maps, which indicate 'noaccess',
119 'undefined' and 'defined'. For these uniform 64kB chunks, the primary
120 map entry points to the relevant distinguished map. In practice,
121 typically more than half of the addressable memory is represented with
122 the 'undefined' or 'defined' distinguished secondary map, so it gives a
123 good saving. It also lets us set the V+A bits of large address regions
124 quickly in set_address_range_perms().
njn1d0825f2006-03-27 11:37:07 +0000125
126 On 64-bit machines it's more complicated. If we followed the same basic
127 scheme we'd have a four-level table which would require too many memory
128 accesses. So instead the top-level map table has 2^19 entries (indexed
129 using bits 16..34 of the address); this covers the bottom 32GB. Any
130 accesses above 32GB are handled with a slow, sparse auxiliary table.
131 Valgrind's address space manager tries very hard to keep things below
132 this 32GB barrier so that performance doesn't suffer too much.
133
134 Note that this file has a lot of different functions for reading and
135 writing shadow memory. Only a couple are strictly necessary (eg.
136 get_vabits2 and set_vabits2), most are just specialised for specific
137 common cases to improve performance.
138
139 Aside: the V+A bits are less precise than they could be -- we have no way
140 of marking memory as read-only. It would be great if we could add an
141 extra state VA_BITSn_READONLY. But then we'd have 5 different states,
142 which requires 2.3 bits to hold, and there's no way to do that elegantly
143 -- we'd have to double up to 4 bits of metadata per byte, which doesn't
144 seem worth it.
145*/
sewardjc859fbf2005-04-22 21:10:28 +0000146
sewardj45d94cc2005-04-20 14:44:11 +0000147/* --------------- Basic configuration --------------- */
sewardj95448072004-11-22 20:19:51 +0000148
sewardj23eb2fd2005-04-22 16:29:19 +0000149/* Only change this. N_PRIMARY_MAP *must* be a power of 2. */
sewardj21f7ff42005-04-28 10:32:02 +0000150
sewardje4ccc012005-05-02 12:53:38 +0000151#if VG_WORDSIZE == 4
sewardj21f7ff42005-04-28 10:32:02 +0000152
153/* cover the entire address space */
154# define N_PRIMARY_BITS 16
155
156#else
157
sewardj34483bc2005-09-28 11:50:20 +0000158/* Just handle the first 32G fast and the rest via auxiliary
sewardj21f7ff42005-04-28 10:32:02 +0000159 primaries. */
sewardj34483bc2005-09-28 11:50:20 +0000160# define N_PRIMARY_BITS 19
sewardj21f7ff42005-04-28 10:32:02 +0000161
162#endif
163
sewardj45d94cc2005-04-20 14:44:11 +0000164
sewardjc1a2cda2005-04-21 17:34:00 +0000165/* Do not change this. */
sewardje4ccc012005-05-02 12:53:38 +0000166#define N_PRIMARY_MAP ( ((UWord)1) << N_PRIMARY_BITS)
sewardjc1a2cda2005-04-21 17:34:00 +0000167
168/* Do not change this. */
sewardj23eb2fd2005-04-22 16:29:19 +0000169#define MAX_PRIMARY_ADDRESS (Addr)((((Addr)65536) * N_PRIMARY_MAP)-1)
170
171
sewardj45d94cc2005-04-20 14:44:11 +0000172/* --------------- Secondary maps --------------- */
njn25e49d8e72002-09-23 09:36:25 +0000173
njn1d0825f2006-03-27 11:37:07 +0000174// Each byte of memory conceptually has an A bit, which indicates its
175// addressability, and 8 V bits, which indicates its definedness.
176//
177// But because very few bytes are partially defined, we can use a nice
178// compression scheme to reduce the size of shadow memory. Each byte of
179// memory has 2 bits which indicates its state (ie. V+A bits):
180//
njndbf7ca72006-03-31 11:57:59 +0000181// 00: noaccess (unaddressable but treated as fully defined)
182// 01: undefined (addressable and fully undefined)
183// 10: defined (addressable and fully defined)
184// 11: partdefined (addressable and partially defined)
njn1d0825f2006-03-27 11:37:07 +0000185//
njndbf7ca72006-03-31 11:57:59 +0000186// In the "partdefined" case, we use a secondary table to store the V bits.
187// Each entry in the secondary-V-bits table maps a byte address to its 8 V
188// bits.
njn1d0825f2006-03-27 11:37:07 +0000189//
190// We store the compressed V+A bits in 8-bit chunks, ie. the V+A bits for
191// four bytes (32 bits) of memory are in each chunk. Hence the name
192// "vabits8". This lets us get the V+A bits for four bytes at a time
193// easily (without having to do any shifting and/or masking), and that is a
194// very common operation. (Note that although each vabits8 chunk
195// is 8 bits in size, it represents 32 bits of memory.)
196//
197// The representation is "inverse" little-endian... each 4 bytes of
198// memory is represented by a 1 byte value, where:
199//
200// - the status of byte (a+0) is held in bits [1..0]
201// - the status of byte (a+1) is held in bits [3..2]
202// - the status of byte (a+2) is held in bits [5..4]
203// - the status of byte (a+3) is held in bits [7..6]
204//
205// It's "inverse" because endianness normally describes a mapping from
206// value bits to memory addresses; in this case the mapping is inverted.
207// Ie. instead of particular value bits being held in certain addresses, in
208// this case certain addresses are represented by particular value bits.
209// See insert_vabits2_into_vabits8() for an example.
210//
211// But note that we don't compress the V bits stored in registers; they
212// need to be explicit to made the shadow operations possible. Therefore
213// when moving values between registers and memory we need to convert
214// between the expanded in-register format and the compressed in-memory
215// format. This isn't so difficult, it just requires careful attention in a
216// few places.
217
218// These represent eight bits of memory.
219#define VA_BITS2_NOACCESS 0x0 // 00b
njndbf7ca72006-03-31 11:57:59 +0000220#define VA_BITS2_UNDEFINED 0x1 // 01b
221#define VA_BITS2_DEFINED 0x2 // 10b
222#define VA_BITS2_PARTDEFINED 0x3 // 11b
njn1d0825f2006-03-27 11:37:07 +0000223
224// These represent 16 bits of memory.
225#define VA_BITS4_NOACCESS 0x0 // 00_00b
njndbf7ca72006-03-31 11:57:59 +0000226#define VA_BITS4_UNDEFINED 0x5 // 01_01b
227#define VA_BITS4_DEFINED 0xa // 10_10b
njn1d0825f2006-03-27 11:37:07 +0000228
229// These represent 32 bits of memory.
230#define VA_BITS8_NOACCESS 0x00 // 00_00_00_00b
njndbf7ca72006-03-31 11:57:59 +0000231#define VA_BITS8_UNDEFINED 0x55 // 01_01_01_01b
232#define VA_BITS8_DEFINED 0xaa // 10_10_10_10b
njn1d0825f2006-03-27 11:37:07 +0000233
234// These represent 64 bits of memory.
235#define VA_BITS16_NOACCESS 0x0000 // 00_00_00_00b x 2
njndbf7ca72006-03-31 11:57:59 +0000236#define VA_BITS16_UNDEFINED 0x5555 // 01_01_01_01b x 2
237#define VA_BITS16_DEFINED 0xaaaa // 10_10_10_10b x 2
njn1d0825f2006-03-27 11:37:07 +0000238
239
240#define SM_CHUNKS 16384
241#define SM_OFF(aaa) (((aaa) & 0xffff) >> 2)
242#define SM_OFF_16(aaa) (((aaa) & 0xffff) >> 3)
243
244// Paranoia: it's critical for performance that the requested inlining
245// occurs. So try extra hard.
246#define INLINE inline __attribute__((always_inline))
247
248static INLINE Addr start_of_this_sm ( Addr a ) {
249 return (a & (~SM_MASK));
250}
251static INLINE Bool is_start_of_sm ( Addr a ) {
252 return (start_of_this_sm(a) == a);
253}
254
njn25e49d8e72002-09-23 09:36:25 +0000255typedef
256 struct {
njn1d0825f2006-03-27 11:37:07 +0000257 UChar vabits8[SM_CHUNKS];
njn25e49d8e72002-09-23 09:36:25 +0000258 }
259 SecMap;
260
njn1d0825f2006-03-27 11:37:07 +0000261// 3 distinguished secondary maps, one for no-access, one for
262// accessible but undefined, and one for accessible and defined.
263// Distinguished secondaries may never be modified.
264#define SM_DIST_NOACCESS 0
njndbf7ca72006-03-31 11:57:59 +0000265#define SM_DIST_UNDEFINED 1
266#define SM_DIST_DEFINED 2
njnb8dca862005-03-14 02:42:44 +0000267
sewardj45d94cc2005-04-20 14:44:11 +0000268static SecMap sm_distinguished[3];
njnb8dca862005-03-14 02:42:44 +0000269
njn1d0825f2006-03-27 11:37:07 +0000270static INLINE Bool is_distinguished_sm ( SecMap* sm ) {
sewardj45d94cc2005-04-20 14:44:11 +0000271 return sm >= &sm_distinguished[0] && sm <= &sm_distinguished[2];
272}
njnb8dca862005-03-14 02:42:44 +0000273
njn1d0825f2006-03-27 11:37:07 +0000274// Forward declaration
275static void update_SM_counts(SecMap* oldSM, SecMap* newSM);
276
sewardj45d94cc2005-04-20 14:44:11 +0000277/* dist_sm points to one of our three distinguished secondaries. Make
278 a copy of it so that we can write to it.
279*/
280static SecMap* copy_for_writing ( SecMap* dist_sm )
281{
282 SecMap* new_sm;
283 tl_assert(dist_sm == &sm_distinguished[0]
njn1d0825f2006-03-27 11:37:07 +0000284 || dist_sm == &sm_distinguished[1]
285 || dist_sm == &sm_distinguished[2]);
njnb8dca862005-03-14 02:42:44 +0000286
sewardj45f4e7c2005-09-27 19:20:21 +0000287 new_sm = VG_(am_shadow_alloc)(sizeof(SecMap));
288 if (new_sm == NULL)
289 VG_(out_of_memory_NORETURN)( "memcheck:allocate new SecMap",
290 sizeof(SecMap) );
sewardj45d94cc2005-04-20 14:44:11 +0000291 VG_(memcpy)(new_sm, dist_sm, sizeof(SecMap));
njn1d0825f2006-03-27 11:37:07 +0000292 update_SM_counts(dist_sm, new_sm);
sewardj45d94cc2005-04-20 14:44:11 +0000293 return new_sm;
294}
njnb8dca862005-03-14 02:42:44 +0000295
njn1d0825f2006-03-27 11:37:07 +0000296/* --------------- Stats --------------- */
297
njndbf7ca72006-03-31 11:57:59 +0000298static Int n_issued_SMs = 0;
299static Int n_deissued_SMs = 0;
300static Int n_noaccess_SMs = N_PRIMARY_MAP; // start with many noaccess DSMs
301static Int n_undefined_SMs = 0;
302static Int n_defined_SMs = 0;
303static Int n_non_DSM_SMs = 0;
304static Int max_noaccess_SMs = 0;
305static Int max_undefined_SMs = 0;
306static Int max_defined_SMs = 0;
307static Int max_non_DSM_SMs = 0;
njn1d0825f2006-03-27 11:37:07 +0000308
sewardj05a46732006-10-17 01:28:10 +0000309/* # searches initiated in auxmap_L1, and # base cmps required */
310static ULong n_auxmap_L1_searches = 0;
311static ULong n_auxmap_L1_cmps = 0;
312/* # of searches that missed in auxmap_L1 and therefore had to
313 be handed to auxmap_L2. And the number of nodes inserted. */
314static ULong n_auxmap_L2_searches = 0;
315static ULong n_auxmap_L2_nodes = 0;
316
njn1d0825f2006-03-27 11:37:07 +0000317static Int n_sanity_cheap = 0;
318static Int n_sanity_expensive = 0;
319
320static Int n_secVBit_nodes = 0;
321static Int max_secVBit_nodes = 0;
322
323static void update_SM_counts(SecMap* oldSM, SecMap* newSM)
324{
njndbf7ca72006-03-31 11:57:59 +0000325 if (oldSM == &sm_distinguished[SM_DIST_NOACCESS ]) n_noaccess_SMs --;
326 else if (oldSM == &sm_distinguished[SM_DIST_UNDEFINED]) n_undefined_SMs--;
327 else if (oldSM == &sm_distinguished[SM_DIST_DEFINED ]) n_defined_SMs --;
328 else { n_non_DSM_SMs --;
329 n_deissued_SMs ++; }
njn1d0825f2006-03-27 11:37:07 +0000330
njndbf7ca72006-03-31 11:57:59 +0000331 if (newSM == &sm_distinguished[SM_DIST_NOACCESS ]) n_noaccess_SMs ++;
332 else if (newSM == &sm_distinguished[SM_DIST_UNDEFINED]) n_undefined_SMs++;
333 else if (newSM == &sm_distinguished[SM_DIST_DEFINED ]) n_defined_SMs ++;
334 else { n_non_DSM_SMs ++;
335 n_issued_SMs ++; }
njn1d0825f2006-03-27 11:37:07 +0000336
njndbf7ca72006-03-31 11:57:59 +0000337 if (n_noaccess_SMs > max_noaccess_SMs ) max_noaccess_SMs = n_noaccess_SMs;
338 if (n_undefined_SMs > max_undefined_SMs) max_undefined_SMs = n_undefined_SMs;
339 if (n_defined_SMs > max_defined_SMs ) max_defined_SMs = n_defined_SMs;
340 if (n_non_DSM_SMs > max_non_DSM_SMs ) max_non_DSM_SMs = n_non_DSM_SMs;
njn1d0825f2006-03-27 11:37:07 +0000341}
sewardj45d94cc2005-04-20 14:44:11 +0000342
343/* --------------- Primary maps --------------- */
344
345/* The main primary map. This covers some initial part of the address
sewardj23eb2fd2005-04-22 16:29:19 +0000346 space, addresses 0 .. (N_PRIMARY_MAP << 16)-1. The rest of it is
sewardj45d94cc2005-04-20 14:44:11 +0000347 handled using the auxiliary primary map.
348*/
sewardj23eb2fd2005-04-22 16:29:19 +0000349static SecMap* primary_map[N_PRIMARY_MAP];
sewardj45d94cc2005-04-20 14:44:11 +0000350
351
352/* An entry in the auxiliary primary map. base must be a 64k-aligned
353 value, and sm points at the relevant secondary map. As with the
354 main primary map, the secondary may be either a real secondary, or
sewardj05a46732006-10-17 01:28:10 +0000355 one of the three distinguished secondaries. DO NOT CHANGE THIS
356 LAYOUT: the first word has to be the key for OSet fast lookups.
sewardj45d94cc2005-04-20 14:44:11 +0000357*/
358typedef
359 struct {
sewardj23eb2fd2005-04-22 16:29:19 +0000360 Addr base;
sewardj45d94cc2005-04-20 14:44:11 +0000361 SecMap* sm;
362 }
363 AuxMapEnt;
364
sewardj05a46732006-10-17 01:28:10 +0000365/* Tunable parameter: How big is the L1 queue? */
366#define N_AUXMAP_L1 24
sewardj45d94cc2005-04-20 14:44:11 +0000367
sewardj05a46732006-10-17 01:28:10 +0000368/* Tunable parameter: How far along the L1 queue to insert
369 entries resulting from L2 lookups? */
370#define AUXMAP_L1_INSERT_IX 12
sewardj45d94cc2005-04-20 14:44:11 +0000371
sewardj05a46732006-10-17 01:28:10 +0000372static struct {
373 Addr base;
374 AuxMapEnt* ent; // pointer to the matching auxmap_L2 node
375 }
376 auxmap_L1[N_AUXMAP_L1];
377
378static OSet* auxmap_L2 = NULL;
379
380static void init_auxmap_L1_L2 ( void )
sewardj45d94cc2005-04-20 14:44:11 +0000381{
sewardj05a46732006-10-17 01:28:10 +0000382 Int i;
383 for (i = 0; i < N_AUXMAP_L1; i++) {
384 auxmap_L1[i].base = 0;
385 auxmap_L1[i].ent = NULL;
sewardj45d94cc2005-04-20 14:44:11 +0000386 }
387
sewardj05a46732006-10-17 01:28:10 +0000388 tl_assert(0 == offsetof(AuxMapEnt,base));
389 tl_assert(sizeof(Addr) == sizeof(void*));
390 auxmap_L2 = VG_(OSet_Create)( /*keyOff*/ offsetof(AuxMapEnt,base),
391 /*fastCmp*/ NULL,
392 VG_(malloc), VG_(free) );
sewardj05fe85e2005-04-27 22:46:36 +0000393}
394
sewardj05a46732006-10-17 01:28:10 +0000395/* Check representation invariants; if OK return NULL; else a
396 descriptive bit of text. Also return the number of
397 non-distinguished secondary maps referred to from the auxiliary
398 primary maps. */
sewardj05fe85e2005-04-27 22:46:36 +0000399
sewardj05a46732006-10-17 01:28:10 +0000400static HChar* check_auxmap_L1_L2_sanity ( Word* n_secmaps_found )
sewardj05fe85e2005-04-27 22:46:36 +0000401{
sewardj05a46732006-10-17 01:28:10 +0000402 Word i, j;
403 /* On a 32-bit platform, the L2 and L1 tables should
404 both remain empty forever.
sewardj05fe85e2005-04-27 22:46:36 +0000405
sewardj05a46732006-10-17 01:28:10 +0000406 On a 64-bit platform:
407 In the L2 table:
408 all .base & 0xFFFF == 0
409 all .base > MAX_PRIMARY_ADDRESS
410 In the L1 table:
411 all .base & 0xFFFF == 0
412 all (.base > MAX_PRIMARY_ADDRESS
413 .base & 0xFFFF == 0
414 and .ent points to an AuxMapEnt with the same .base)
415 or
416 (.base == 0 and .ent == NULL)
417 */
418 *n_secmaps_found = 0;
419 if (sizeof(void*) == 4) {
420 /* 32-bit platform */
421 if (VG_(OSet_Size)(auxmap_L2) != 0)
422 return "32-bit: auxmap_L2 is non-empty";
423 for (i = 0; i < N_AUXMAP_L1; i++)
424 if (auxmap_L1[i].base != 0 || auxmap_L1[i].ent != NULL)
425 return "32-bit: auxmap_L1 is non-empty";
426 } else {
427 /* 64-bit platform */
428 UWord elems_seen = 0;
429 AuxMapEnt *elem, *res;
430 AuxMapEnt key;
431 /* L2 table */
432 VG_(OSet_ResetIter)(auxmap_L2);
433 while ( (elem = VG_(OSet_Next)(auxmap_L2)) ) {
434 elems_seen++;
435 if (0 != (elem->base & (Addr)0xFFFF))
436 return "64-bit: nonzero .base & 0xFFFF in auxmap_L2";
437 if (elem->base <= MAX_PRIMARY_ADDRESS)
438 return "64-bit: .base <= MAX_PRIMARY_ADDRESS in auxmap_L2";
439 if (elem->sm == NULL)
440 return "64-bit: .sm in _L2 is NULL";
441 if (!is_distinguished_sm(elem->sm))
442 (*n_secmaps_found)++;
443 }
444 if (elems_seen != n_auxmap_L2_nodes)
445 return "64-bit: disagreement on number of elems in _L2";
446 /* Check L1-L2 correspondence */
447 for (i = 0; i < N_AUXMAP_L1; i++) {
448 if (auxmap_L1[i].base == 0 && auxmap_L1[i].ent == NULL)
449 continue;
450 if (0 != (auxmap_L1[i].base & (Addr)0xFFFF))
451 return "64-bit: nonzero .base & 0xFFFF in auxmap_L1";
452 if (auxmap_L1[i].base <= MAX_PRIMARY_ADDRESS)
453 return "64-bit: .base <= MAX_PRIMARY_ADDRESS in auxmap_L1";
454 if (auxmap_L1[i].ent == NULL)
455 return "64-bit: .ent is NULL in auxmap_L1";
456 if (auxmap_L1[i].ent->base != auxmap_L1[i].base)
457 return "64-bit: _L1 and _L2 bases are inconsistent";
458 /* Look it up in auxmap_L2. */
459 key.base = auxmap_L1[i].base;
460 key.sm = 0;
461 res = VG_(OSet_Lookup)(auxmap_L2, &key);
462 if (res == NULL)
463 return "64-bit: _L1 .base not found in _L2";
464 if (res != auxmap_L1[i].ent)
465 return "64-bit: _L1 .ent disagrees with _L2 entry";
466 }
467 /* Check L1 contains no duplicates */
468 for (i = 0; i < N_AUXMAP_L1; i++) {
469 if (auxmap_L1[i].base == 0)
470 continue;
471 for (j = i+1; j < N_AUXMAP_L1; j++) {
472 if (auxmap_L1[j].base == 0)
473 continue;
474 if (auxmap_L1[j].base == auxmap_L1[i].base)
475 return "64-bit: duplicate _L1 .base entries";
476 }
477 }
478 }
479 return NULL; /* ok */
480}
481
482static void insert_into_auxmap_L1_at ( Word rank, AuxMapEnt* ent )
483{
484 Word i;
485 tl_assert(ent);
486 tl_assert(rank >= 0 && rank < N_AUXMAP_L1);
487 for (i = N_AUXMAP_L1-1; i > rank; i--)
488 auxmap_L1[i] = auxmap_L1[i-1];
489 auxmap_L1[rank].base = ent->base;
490 auxmap_L1[rank].ent = ent;
491}
492
493static INLINE AuxMapEnt* maybe_find_in_auxmap ( Addr a )
494{
495 AuxMapEnt key;
496 AuxMapEnt* res;
497 Word i;
498
499 tl_assert(a > MAX_PRIMARY_ADDRESS);
500 a &= ~(Addr)0xFFFF;
501
502 /* First search the front-cache, which is a self-organising
503 list containing the most popular entries. */
504
505 if (EXPECTED_TAKEN(auxmap_L1[0].base == a))
506 return auxmap_L1[0].ent;
507 if (EXPECTED_TAKEN(auxmap_L1[1].base == a)) {
508 Addr t_base = auxmap_L1[0].base;
509 AuxMapEnt* t_ent = auxmap_L1[0].ent;
510 auxmap_L1[0].base = auxmap_L1[1].base;
511 auxmap_L1[0].ent = auxmap_L1[1].ent;
512 auxmap_L1[1].base = t_base;
513 auxmap_L1[1].ent = t_ent;
514 return auxmap_L1[0].ent;
sewardj45d94cc2005-04-20 14:44:11 +0000515 }
516
sewardj05a46732006-10-17 01:28:10 +0000517 n_auxmap_L1_searches++;
sewardj45d94cc2005-04-20 14:44:11 +0000518
sewardj05a46732006-10-17 01:28:10 +0000519 for (i = 0; i < N_AUXMAP_L1; i++) {
520 if (auxmap_L1[i].base == a) {
521 break;
522 }
523 }
524 tl_assert(i >= 0 && i <= N_AUXMAP_L1);
sewardj45d94cc2005-04-20 14:44:11 +0000525
sewardj05a46732006-10-17 01:28:10 +0000526 n_auxmap_L1_cmps += (ULong)(i+1);
sewardj45d94cc2005-04-20 14:44:11 +0000527
sewardj05a46732006-10-17 01:28:10 +0000528 if (i < N_AUXMAP_L1) {
529 if (i > 0) {
530 Addr t_base = auxmap_L1[i-1].base;
531 AuxMapEnt* t_ent = auxmap_L1[i-1].ent;
532 auxmap_L1[i-1].base = auxmap_L1[i-0].base;
533 auxmap_L1[i-1].ent = auxmap_L1[i-0].ent;
534 auxmap_L1[i-0].base = t_base;
535 auxmap_L1[i-0].ent = t_ent;
536 i--;
537 }
538 return auxmap_L1[i].ent;
539 }
540
541 n_auxmap_L2_searches++;
542
543 /* First see if we already have it. */
544 key.base = a;
545 key.sm = 0;
546
547 res = VG_(OSet_Lookup)(auxmap_L2, &key);
548 if (res)
549 insert_into_auxmap_L1_at( AUXMAP_L1_INSERT_IX, res );
550 return res;
551}
552
553static AuxMapEnt* find_or_alloc_in_auxmap ( Addr a )
554{
555 AuxMapEnt *nyu, *res;
556
557 /* First see if we already have it. */
558 res = maybe_find_in_auxmap( a );
559 if (EXPECTED_TAKEN(res))
560 return res;
561
562 /* Ok, there's no entry in the secondary map, so we'll have
563 to allocate one. */
564 a &= ~(Addr)0xFFFF;
565
566 nyu = (AuxMapEnt*) VG_(OSet_AllocNode)( auxmap_L2, sizeof(AuxMapEnt) );
567 tl_assert(nyu);
568 nyu->base = a;
569 nyu->sm = &sm_distinguished[SM_DIST_NOACCESS];
570 VG_(OSet_Insert)( auxmap_L2, nyu );
571 insert_into_auxmap_L1_at( AUXMAP_L1_INSERT_IX, nyu );
572 n_auxmap_L2_nodes++;
573 return nyu;
sewardj45d94cc2005-04-20 14:44:11 +0000574}
575
sewardj45d94cc2005-04-20 14:44:11 +0000576/* --------------- SecMap fundamentals --------------- */
577
njn1d0825f2006-03-27 11:37:07 +0000578// In all these, 'low' means it's definitely in the main primary map,
579// 'high' means it's definitely in the auxiliary table.
580
581static INLINE SecMap** get_secmap_low_ptr ( Addr a )
582{
583 UWord pm_off = a >> 16;
584# if VG_DEBUG_MEMORY >= 1
585 tl_assert(pm_off < N_PRIMARY_MAP);
586# endif
587 return &primary_map[ pm_off ];
588}
589
590static INLINE SecMap** get_secmap_high_ptr ( Addr a )
591{
592 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
593 return &am->sm;
594}
595
596static SecMap** get_secmap_ptr ( Addr a )
597{
598 return ( a <= MAX_PRIMARY_ADDRESS
599 ? get_secmap_low_ptr(a)
600 : get_secmap_high_ptr(a));
601}
602
njna7c7ebd2006-03-28 12:51:02 +0000603static INLINE SecMap* get_secmap_for_reading_low ( Addr a )
njn1d0825f2006-03-27 11:37:07 +0000604{
605 return *get_secmap_low_ptr(a);
606}
607
njna7c7ebd2006-03-28 12:51:02 +0000608static INLINE SecMap* get_secmap_for_reading_high ( Addr a )
njn1d0825f2006-03-27 11:37:07 +0000609{
610 return *get_secmap_high_ptr(a);
611}
612
njna7c7ebd2006-03-28 12:51:02 +0000613static INLINE SecMap* get_secmap_for_writing_low(Addr a)
njn1d0825f2006-03-27 11:37:07 +0000614{
615 SecMap** p = get_secmap_low_ptr(a);
616 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(*p)))
617 *p = copy_for_writing(*p);
618 return *p;
619}
620
njna7c7ebd2006-03-28 12:51:02 +0000621static INLINE SecMap* get_secmap_for_writing_high ( Addr a )
njn1d0825f2006-03-27 11:37:07 +0000622{
623 SecMap** p = get_secmap_high_ptr(a);
624 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(*p)))
625 *p = copy_for_writing(*p);
626 return *p;
627}
628
sewardj45d94cc2005-04-20 14:44:11 +0000629/* Produce the secmap for 'a', either from the primary map or by
630 ensuring there is an entry for it in the aux primary map. The
631 secmap may be a distinguished one as the caller will only want to
632 be able to read it.
633*/
sewardj05a46732006-10-17 01:28:10 +0000634static INLINE SecMap* get_secmap_for_reading ( Addr a )
sewardj45d94cc2005-04-20 14:44:11 +0000635{
njn1d0825f2006-03-27 11:37:07 +0000636 return ( a <= MAX_PRIMARY_ADDRESS
njna7c7ebd2006-03-28 12:51:02 +0000637 ? get_secmap_for_reading_low (a)
638 : get_secmap_for_reading_high(a) );
sewardj45d94cc2005-04-20 14:44:11 +0000639}
640
641/* Produce the secmap for 'a', either from the primary map or by
642 ensuring there is an entry for it in the aux primary map. The
643 secmap may not be a distinguished one, since the caller will want
644 to be able to write it. If it is a distinguished secondary, make a
645 writable copy of it, install it, and return the copy instead. (COW
646 semantics).
647*/
njna7c7ebd2006-03-28 12:51:02 +0000648static SecMap* get_secmap_for_writing ( Addr a )
sewardj45d94cc2005-04-20 14:44:11 +0000649{
njn1d0825f2006-03-27 11:37:07 +0000650 return ( a <= MAX_PRIMARY_ADDRESS
njna7c7ebd2006-03-28 12:51:02 +0000651 ? get_secmap_for_writing_low (a)
652 : get_secmap_for_writing_high(a) );
njn1d0825f2006-03-27 11:37:07 +0000653}
654
655/* If 'a' has a SecMap, produce it. Else produce NULL. But don't
656 allocate one if one doesn't already exist. This is used by the
657 leak checker.
658*/
659static SecMap* maybe_get_secmap_for ( Addr a )
660{
sewardj45d94cc2005-04-20 14:44:11 +0000661 if (a <= MAX_PRIMARY_ADDRESS) {
njna7c7ebd2006-03-28 12:51:02 +0000662 return get_secmap_for_reading_low(a);
sewardj45d94cc2005-04-20 14:44:11 +0000663 } else {
njn1d0825f2006-03-27 11:37:07 +0000664 AuxMapEnt* am = maybe_find_in_auxmap(a);
665 return am ? am->sm : NULL;
sewardj45d94cc2005-04-20 14:44:11 +0000666 }
667}
668
njn1d0825f2006-03-27 11:37:07 +0000669/* --------------- Fundamental functions --------------- */
670
671static INLINE
672void insert_vabits2_into_vabits8 ( Addr a, UChar vabits2, UChar* vabits8 )
673{
674 UInt shift = (a & 3) << 1; // shift by 0, 2, 4, or 6
675 *vabits8 &= ~(0x3 << shift); // mask out the two old bits
676 *vabits8 |= (vabits2 << shift); // mask in the two new bits
677}
678
679static INLINE
680void insert_vabits4_into_vabits8 ( Addr a, UChar vabits4, UChar* vabits8 )
681{
682 UInt shift;
683 tl_assert(VG_IS_2_ALIGNED(a)); // Must be 2-aligned
684 shift = (a & 2) << 1; // shift by 0 or 4
685 *vabits8 &= ~(0xf << shift); // mask out the four old bits
686 *vabits8 |= (vabits4 << shift); // mask in the four new bits
687}
688
689static INLINE
690UChar extract_vabits2_from_vabits8 ( Addr a, UChar vabits8 )
691{
692 UInt shift = (a & 3) << 1; // shift by 0, 2, 4, or 6
693 vabits8 >>= shift; // shift the two bits to the bottom
694 return 0x3 & vabits8; // mask out the rest
695}
696
697static INLINE
698UChar extract_vabits4_from_vabits8 ( Addr a, UChar vabits8 )
699{
700 UInt shift;
701 tl_assert(VG_IS_2_ALIGNED(a)); // Must be 2-aligned
702 shift = (a & 2) << 1; // shift by 0 or 4
703 vabits8 >>= shift; // shift the four bits to the bottom
704 return 0xf & vabits8; // mask out the rest
705}
706
707// Note that these four are only used in slow cases. The fast cases do
708// clever things like combine the auxmap check (in
709// get_secmap_{read,writ}able) with alignment checks.
710
711// *** WARNING! ***
712// Any time this function is called, if it is possible that vabits2
njndbf7ca72006-03-31 11:57:59 +0000713// is equal to VA_BITS2_PARTDEFINED, then the corresponding entry in the
njn1d0825f2006-03-27 11:37:07 +0000714// sec-V-bits table must also be set!
715static INLINE
716void set_vabits2 ( Addr a, UChar vabits2 )
717{
njna7c7ebd2006-03-28 12:51:02 +0000718 SecMap* sm = get_secmap_for_writing(a);
njn1d0825f2006-03-27 11:37:07 +0000719 UWord sm_off = SM_OFF(a);
720 insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
721}
722
723static INLINE
724UChar get_vabits2 ( Addr a )
725{
njna7c7ebd2006-03-28 12:51:02 +0000726 SecMap* sm = get_secmap_for_reading(a);
njn1d0825f2006-03-27 11:37:07 +0000727 UWord sm_off = SM_OFF(a);
728 UChar vabits8 = sm->vabits8[sm_off];
729 return extract_vabits2_from_vabits8(a, vabits8);
730}
731
sewardjf2184912006-05-03 22:13:57 +0000732// *** WARNING! ***
733// Any time this function is called, if it is possible that any of the
734// 4 2-bit fields in vabits8 are equal to VA_BITS2_PARTDEFINED, then the
735// corresponding entry(s) in the sec-V-bits table must also be set!
736static INLINE
737UChar get_vabits8_for_aligned_word32 ( Addr a )
738{
739 SecMap* sm = get_secmap_for_reading(a);
740 UWord sm_off = SM_OFF(a);
741 UChar vabits8 = sm->vabits8[sm_off];
742 return vabits8;
743}
744
745static INLINE
746void set_vabits8_for_aligned_word32 ( Addr a, UChar vabits8 )
747{
748 SecMap* sm = get_secmap_for_writing(a);
749 UWord sm_off = SM_OFF(a);
750 sm->vabits8[sm_off] = vabits8;
751}
752
753
njn1d0825f2006-03-27 11:37:07 +0000754// Forward declarations
755static UWord get_sec_vbits8(Addr a);
756static void set_sec_vbits8(Addr a, UWord vbits8);
757
758// Returns False if there was an addressability error.
759static INLINE
760Bool set_vbits8 ( Addr a, UChar vbits8 )
761{
762 Bool ok = True;
763 UChar vabits2 = get_vabits2(a);
764 if ( VA_BITS2_NOACCESS != vabits2 ) {
765 // Addressable. Convert in-register format to in-memory format.
766 // Also remove any existing sec V bit entry for the byte if no
767 // longer necessary.
njndbf7ca72006-03-31 11:57:59 +0000768 if ( V_BITS8_DEFINED == vbits8 ) { vabits2 = VA_BITS2_DEFINED; }
769 else if ( V_BITS8_UNDEFINED == vbits8 ) { vabits2 = VA_BITS2_UNDEFINED; }
770 else { vabits2 = VA_BITS2_PARTDEFINED;
njn1d0825f2006-03-27 11:37:07 +0000771 set_sec_vbits8(a, vbits8); }
772 set_vabits2(a, vabits2);
773
774 } else {
775 // Unaddressable! Do nothing -- when writing to unaddressable
776 // memory it acts as a black hole, and the V bits can never be seen
777 // again. So we don't have to write them at all.
778 ok = False;
779 }
780 return ok;
781}
782
783// Returns False if there was an addressability error. In that case, we put
784// all defined bits into vbits8.
785static INLINE
786Bool get_vbits8 ( Addr a, UChar* vbits8 )
787{
788 Bool ok = True;
789 UChar vabits2 = get_vabits2(a);
790
791 // Convert the in-memory format to in-register format.
njndbf7ca72006-03-31 11:57:59 +0000792 if ( VA_BITS2_DEFINED == vabits2 ) { *vbits8 = V_BITS8_DEFINED; }
793 else if ( VA_BITS2_UNDEFINED == vabits2 ) { *vbits8 = V_BITS8_UNDEFINED; }
794 else if ( VA_BITS2_NOACCESS == vabits2 ) {
njn1d0825f2006-03-27 11:37:07 +0000795 *vbits8 = V_BITS8_DEFINED; // Make V bits defined!
796 ok = False;
797 } else {
njndbf7ca72006-03-31 11:57:59 +0000798 tl_assert( VA_BITS2_PARTDEFINED == vabits2 );
njn1d0825f2006-03-27 11:37:07 +0000799 *vbits8 = get_sec_vbits8(a);
800 }
801 return ok;
802}
803
804
805/* --------------- Secondary V bit table ------------ */
806
807// This table holds the full V bit pattern for partially-defined bytes
njndbf7ca72006-03-31 11:57:59 +0000808// (PDBs) that are represented by VA_BITS2_PARTDEFINED in the main shadow
809// memory.
njn1d0825f2006-03-27 11:37:07 +0000810//
811// Note: the nodes in this table can become stale. Eg. if you write a PDB,
812// then overwrite the same address with a fully defined byte, the sec-V-bit
813// node will not necessarily be removed. This is because checking for
814// whether removal is necessary would slow down the fast paths.
815//
816// To avoid the stale nodes building up too much, we periodically (once the
817// table reaches a certain size) garbage collect (GC) the table by
818// traversing it and evicting any "sufficiently stale" nodes, ie. nodes that
819// are stale and haven't been touched for a certain number of collections.
820// If more than a certain proportion of nodes survived, we increase the
821// table size so that GCs occur less often.
822//
823// (So this a bit different to a traditional GC, where you definitely want
824// to remove any dead nodes. It's more like we have a resizable cache and
825// we're trying to find the right balance how many elements to evict and how
826// big to make the cache.)
827//
828// This policy is designed to avoid bad table bloat in the worst case where
829// a program creates huge numbers of stale PDBs -- we would get this bloat
830// if we had no GC -- while handling well the case where a node becomes
831// stale but shortly afterwards is rewritten with a PDB and so becomes
832// non-stale again (which happens quite often, eg. in perf/bz2). If we just
833// remove all stale nodes as soon as possible, we just end up re-adding a
834// lot of them in later again. The "sufficiently stale" approach avoids
835// this. (If a program has many live PDBs, performance will just suck,
836// there's no way around that.)
837
838static OSet* secVBitTable;
839
840// Stats
841static ULong sec_vbits_new_nodes = 0;
842static ULong sec_vbits_updates = 0;
843
844// This must be a power of two; this is checked in mc_pre_clo_init().
845// The size chosen here is a trade-off: if the nodes are bigger (ie. cover
846// a larger address range) they take more space but we can get multiple
847// partially-defined bytes in one if they are close to each other, reducing
848// the number of total nodes. In practice sometimes they are clustered (eg.
849// perf/bz2 repeatedly writes then reads more than 20,000 in a contiguous
850// row), but often not. So we choose something intermediate.
851#define BYTES_PER_SEC_VBIT_NODE 16
852
853// We make the table bigger if more than this many nodes survive a GC.
854#define MAX_SURVIVOR_PROPORTION 0.5
855
856// Each time we make the table bigger, we increase it by this much.
857#define TABLE_GROWTH_FACTOR 2
858
859// This defines "sufficiently stale" -- any node that hasn't been touched in
860// this many GCs will be removed.
861#define MAX_STALE_AGE 2
862
863// We GC the table when it gets this many nodes in it, ie. it's effectively
864// the table size. It can change.
865static Int secVBitLimit = 1024;
866
867// The number of GCs done, used to age sec-V-bit nodes for eviction.
868// Because it's unsigned, wrapping doesn't matter -- the right answer will
869// come out anyway.
870static UInt GCs_done = 0;
871
872typedef
873 struct {
874 Addr a;
875 UChar vbits8[BYTES_PER_SEC_VBIT_NODE];
876 UInt last_touched;
877 }
878 SecVBitNode;
879
880static OSet* createSecVBitTable(void)
881{
882 return VG_(OSet_Create)( offsetof(SecVBitNode, a),
883 NULL, // use fast comparisons
884 VG_(malloc), VG_(free) );
885}
886
887static void gcSecVBitTable(void)
888{
889 OSet* secVBitTable2;
890 SecVBitNode* n;
891 Int i, n_nodes = 0, n_survivors = 0;
892
893 GCs_done++;
894
895 // Create the new table.
896 secVBitTable2 = createSecVBitTable();
897
898 // Traverse the table, moving fresh nodes into the new table.
899 VG_(OSet_ResetIter)(secVBitTable);
900 while ( (n = VG_(OSet_Next)(secVBitTable)) ) {
901 Bool keep = False;
902 if ( (GCs_done - n->last_touched) <= MAX_STALE_AGE ) {
903 // Keep node if it's been touched recently enough (regardless of
904 // freshness/staleness).
905 keep = True;
906 } else {
907 // Keep node if any of its bytes are non-stale. Using
908 // get_vabits2() for the lookup is not very efficient, but I don't
909 // think it matters.
910 for (i = 0; i < BYTES_PER_SEC_VBIT_NODE; i++) {
njndbf7ca72006-03-31 11:57:59 +0000911 if (VA_BITS2_PARTDEFINED == get_vabits2(n->a + i)) {
njn1d0825f2006-03-27 11:37:07 +0000912 keep = True; // Found a non-stale byte, so keep
913 break;
914 }
915 }
916 }
917
918 if ( keep ) {
919 // Insert a copy of the node into the new table.
920 SecVBitNode* n2 =
921 VG_(OSet_AllocNode)(secVBitTable2, sizeof(SecVBitNode));
922 *n2 = *n;
923 VG_(OSet_Insert)(secVBitTable2, n2);
924 }
925 }
926
927 // Get the before and after sizes.
928 n_nodes = VG_(OSet_Size)(secVBitTable);
929 n_survivors = VG_(OSet_Size)(secVBitTable2);
930
931 // Destroy the old table, and put the new one in its place.
932 VG_(OSet_Destroy)(secVBitTable, NULL);
933 secVBitTable = secVBitTable2;
934
935 if (VG_(clo_verbosity) > 1) {
936 Char percbuf[6];
937 VG_(percentify)(n_survivors, n_nodes, 1, 6, percbuf);
938 VG_(message)(Vg_DebugMsg, "memcheck GC: %d nodes, %d survivors (%s)",
939 n_nodes, n_survivors, percbuf);
940 }
941
942 // Increase table size if necessary.
943 if (n_survivors > (secVBitLimit * MAX_SURVIVOR_PROPORTION)) {
944 secVBitLimit *= TABLE_GROWTH_FACTOR;
945 if (VG_(clo_verbosity) > 1)
946 VG_(message)(Vg_DebugMsg, "memcheck GC: increase table size to %d",
947 secVBitLimit);
948 }
949}
950
951static UWord get_sec_vbits8(Addr a)
952{
953 Addr aAligned = VG_ROUNDDN(a, BYTES_PER_SEC_VBIT_NODE);
954 Int amod = a % BYTES_PER_SEC_VBIT_NODE;
955 SecVBitNode* n = VG_(OSet_Lookup)(secVBitTable, &aAligned);
956 UChar vbits8;
957 tl_assert2(n, "get_sec_vbits8: no node for address %p (%p)\n", aAligned, a);
958 // Shouldn't be fully defined or fully undefined -- those cases shouldn't
959 // make it to the secondary V bits table.
960 vbits8 = n->vbits8[amod];
961 tl_assert(V_BITS8_DEFINED != vbits8 && V_BITS8_UNDEFINED != vbits8);
962 return vbits8;
963}
964
965static void set_sec_vbits8(Addr a, UWord vbits8)
966{
967 Addr aAligned = VG_ROUNDDN(a, BYTES_PER_SEC_VBIT_NODE);
968 Int i, amod = a % BYTES_PER_SEC_VBIT_NODE;
969 SecVBitNode* n = VG_(OSet_Lookup)(secVBitTable, &aAligned);
970 // Shouldn't be fully defined or fully undefined -- those cases shouldn't
971 // make it to the secondary V bits table.
972 tl_assert(V_BITS8_DEFINED != vbits8 && V_BITS8_UNDEFINED != vbits8);
973 if (n) {
974 n->vbits8[amod] = vbits8; // update
975 n->last_touched = GCs_done;
976 sec_vbits_updates++;
977 } else {
978 // New node: assign the specific byte, make the rest invalid (they
979 // should never be read as-is, but be cautious).
980 n = VG_(OSet_AllocNode)(secVBitTable, sizeof(SecVBitNode));
981 n->a = aAligned;
982 for (i = 0; i < BYTES_PER_SEC_VBIT_NODE; i++) {
983 n->vbits8[i] = V_BITS8_UNDEFINED;
984 }
985 n->vbits8[amod] = vbits8;
986 n->last_touched = GCs_done;
987
988 // Do a table GC if necessary. Nb: do this before inserting the new
989 // node, to avoid erroneously GC'ing the new node.
990 if (secVBitLimit == VG_(OSet_Size)(secVBitTable)) {
991 gcSecVBitTable();
992 }
993
994 // Insert the new node.
995 VG_(OSet_Insert)(secVBitTable, n);
996 sec_vbits_new_nodes++;
997
998 n_secVBit_nodes = VG_(OSet_Size)(secVBitTable);
999 if (n_secVBit_nodes > max_secVBit_nodes)
1000 max_secVBit_nodes = n_secVBit_nodes;
1001 }
1002}
sewardj45d94cc2005-04-20 14:44:11 +00001003
1004/* --------------- Endianness helpers --------------- */
1005
1006/* Returns the offset in memory of the byteno-th most significant byte
1007 in a wordszB-sized word, given the specified endianness. */
njn1d0825f2006-03-27 11:37:07 +00001008static INLINE UWord byte_offset_w ( UWord wordszB, Bool bigendian,
sewardj45d94cc2005-04-20 14:44:11 +00001009 UWord byteno ) {
1010 return bigendian ? (wordszB-1-byteno) : byteno;
1011}
1012
sewardj05a46732006-10-17 01:28:10 +00001013
1014/* --------------- Ignored address ranges --------------- */
1015
1016#define M_IGNORE_RANGES 4
1017
1018typedef
1019 struct {
1020 Int used;
1021 Addr start[M_IGNORE_RANGES];
1022 Addr end[M_IGNORE_RANGES];
1023 }
1024 IgnoreRanges;
1025
1026static IgnoreRanges ignoreRanges;
1027
1028static INLINE Bool in_ignored_range ( Addr a )
1029{
1030 Int i;
1031 if (EXPECTED_TAKEN(ignoreRanges.used == 0))
1032 return False;
1033 for (i = 0; i < ignoreRanges.used; i++) {
1034 if (a >= ignoreRanges.start[i] && a < ignoreRanges.end[i])
1035 return True;
1036 }
1037 return False;
1038}
1039
1040
1041/* Parse a 32- or 64-bit hex number, including leading 0x, from string
1042 starting at *ppc, putting result in *result, and return True. Or
1043 fail, in which case *ppc and *result are undefined, and return
1044 False. */
1045
1046static Bool isHex ( UChar c )
1047{
1048 return ((c >= '0' && c <= '9')
1049 || (c >= 'a' && c <= 'f')
1050 || (c >= 'A' && c <= 'F'));
1051}
1052
1053static UInt fromHex ( UChar c )
1054{
1055 if (c >= '0' && c <= '9')
1056 return (UInt)c - (UInt)'0';
1057 if (c >= 'a' && c <= 'f')
1058 return 10 + (UInt)c - (UInt)'a';
1059 if (c >= 'A' && c <= 'F')
1060 return 10 + (UInt)c - (UInt)'A';
1061 /*NOTREACHED*/
1062 tl_assert(0);
1063 return 0;
1064}
1065
1066static Bool parse_Addr ( UChar** ppc, Addr* result )
1067{
1068 Int used, limit = 2 * sizeof(Addr);
1069 if (**ppc != '0')
1070 return False;
1071 (*ppc)++;
1072 if (**ppc != 'x')
1073 return False;
1074 (*ppc)++;
1075 *result = 0;
1076 used = 0;
1077 while (isHex(**ppc)) {
1078 UInt d = fromHex(**ppc);
1079 tl_assert(d < 16);
1080 *result = ((*result) << 4) | fromHex(**ppc);
1081 (*ppc)++;
1082 used++;
1083 if (used > limit) return False;
1084 }
1085 if (used == 0)
1086 return False;
1087 return True;
1088}
1089
1090/* Parse two such numbers separated by a dash, or fail. */
1091
1092static Bool parse_range ( UChar** ppc, Addr* result1, Addr* result2 )
1093{
1094 Bool ok = parse_Addr(ppc, result1);
1095 if (!ok)
1096 return False;
1097 if (**ppc != '-')
1098 return False;
1099 (*ppc)++;
1100 ok = parse_Addr(ppc, result2);
1101 if (!ok)
1102 return False;
1103 return True;
1104}
1105
1106/* Parse a set of ranges separated by commas into 'ignoreRanges', or
1107 fail. */
1108
1109static Bool parse_ignore_ranges ( UChar* str0 )
1110{
1111 Addr start, end;
1112 Bool ok;
1113 UChar* str = str0;
1114 UChar** ppc = &str;
1115 ignoreRanges.used = 0;
1116 while (1) {
1117 ok = parse_range(ppc, &start, &end);
1118 if (!ok)
1119 return False;
1120 if (ignoreRanges.used >= M_IGNORE_RANGES)
1121 return False;
1122 ignoreRanges.start[ignoreRanges.used] = start;
1123 ignoreRanges.end[ignoreRanges.used] = end;
1124 ignoreRanges.used++;
1125 if (**ppc == 0)
1126 return True;
1127 if (**ppc != ',')
1128 return False;
1129 (*ppc)++;
1130 }
1131 /*NOTREACHED*/
1132 return False;
1133}
1134
1135
sewardj45d94cc2005-04-20 14:44:11 +00001136/* --------------- Load/store slow cases. --------------- */
1137
njn1d0825f2006-03-27 11:37:07 +00001138// Forward declarations
1139static void mc_record_address_error ( ThreadId tid, Addr a,
1140 Int size, Bool isWrite );
1141static void mc_record_core_mem_error ( ThreadId tid, Bool isUnaddr, Char* s );
1142static void mc_record_param_error ( ThreadId tid, Addr a, Bool isReg,
1143 Bool isUnaddr, Char* msg );
1144static void mc_record_jump_error ( ThreadId tid, Addr a );
1145
sewardj45d94cc2005-04-20 14:44:11 +00001146static
njn1d0825f2006-03-27 11:37:07 +00001147#ifndef PERF_FAST_LOADV
1148INLINE
1149#endif
njn45e81252006-03-28 12:35:08 +00001150ULong mc_LOADVn_slow ( Addr a, SizeT nBits, Bool bigendian )
sewardj45d94cc2005-04-20 14:44:11 +00001151{
njn1d0825f2006-03-27 11:37:07 +00001152 /* Make up a 64-bit result V word, which contains the loaded data for
sewardjf3d57dd2005-04-22 20:23:27 +00001153 valid addresses and Defined for invalid addresses. Iterate over
1154 the bytes in the word, from the most significant down to the
1155 least. */
njn1d0825f2006-03-27 11:37:07 +00001156 ULong vbits64 = V_BITS64_UNDEFINED;
njn45e81252006-03-28 12:35:08 +00001157 SizeT szB = nBits / 8;
njn1d0825f2006-03-27 11:37:07 +00001158 SSizeT i = szB-1; // Must be signed
sewardj45d94cc2005-04-20 14:44:11 +00001159 SizeT n_addrs_bad = 0;
1160 Addr ai;
njn1d0825f2006-03-27 11:37:07 +00001161 Bool partial_load_exemption_applies;
1162 UChar vbits8;
1163 Bool ok;
sewardj45d94cc2005-04-20 14:44:11 +00001164
sewardjc1a2cda2005-04-21 17:34:00 +00001165 PROF_EVENT(30, "mc_LOADVn_slow");
sewardj05a46732006-10-17 01:28:10 +00001166
1167 /* ------------ BEGIN semi-fast cases ------------ */
1168 /* These deal quickly-ish with the common auxiliary primary map
1169 cases on 64-bit platforms. Are merely a speedup hack; can be
1170 omitted without loss of correctness/functionality. Note that in
1171 both cases the "sizeof(void*) == 8" causes these cases to be
1172 folded out by compilers on 32-bit platforms. These are derived
1173 from LOADV64 and LOADV32.
1174 */
1175 if (EXPECTED_TAKEN(sizeof(void*) == 8
1176 && nBits == 64 && VG_IS_8_ALIGNED(a))) {
1177 SecMap* sm = get_secmap_for_reading(a);
1178 UWord sm_off16 = SM_OFF_16(a);
1179 UWord vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
1180 if (EXPECTED_TAKEN(vabits16 == VA_BITS16_DEFINED))
1181 return V_BITS64_DEFINED;
1182 if (EXPECTED_TAKEN(vabits16 == VA_BITS16_UNDEFINED))
1183 return V_BITS64_UNDEFINED;
1184 /* else fall into the slow case */
1185 }
1186 if (EXPECTED_TAKEN(sizeof(void*) == 8
1187 && nBits == 32 && VG_IS_4_ALIGNED(a))) {
1188 SecMap* sm = get_secmap_for_reading(a);
1189 UWord sm_off = SM_OFF(a);
1190 UWord vabits8 = sm->vabits8[sm_off];
1191 if (EXPECTED_TAKEN(vabits8 == VA_BITS8_DEFINED))
1192 return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_DEFINED);
1193 if (EXPECTED_TAKEN(vabits8 == VA_BITS8_UNDEFINED))
1194 return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_UNDEFINED);
1195 /* else fall into slow case */
1196 }
1197 /* ------------ END semi-fast cases ------------ */
1198
njn45e81252006-03-28 12:35:08 +00001199 tl_assert(nBits == 64 || nBits == 32 || nBits == 16 || nBits == 8);
sewardj45d94cc2005-04-20 14:44:11 +00001200
njn1d0825f2006-03-27 11:37:07 +00001201 for (i = szB-1; i >= 0; i--) {
sewardjc1a2cda2005-04-21 17:34:00 +00001202 PROF_EVENT(31, "mc_LOADVn_slow(loop)");
njn45e81252006-03-28 12:35:08 +00001203 ai = a + byte_offset_w(szB, bigendian, i);
njn1d0825f2006-03-27 11:37:07 +00001204 ok = get_vbits8(ai, &vbits8);
1205 if (!ok) n_addrs_bad++;
1206 vbits64 <<= 8;
1207 vbits64 |= vbits8;
sewardj45d94cc2005-04-20 14:44:11 +00001208 }
1209
sewardj0ded7a42005-11-08 02:25:37 +00001210 /* This is a hack which avoids producing errors for code which
1211 insists in stepping along byte strings in aligned word-sized
1212 chunks, and there is a partially defined word at the end. (eg,
1213 optimised strlen). Such code is basically broken at least WRT
1214 semantics of ANSI C, but sometimes users don't have the option
1215 to fix it, and so this option is provided. Note it is now
1216 defaulted to not-engaged.
1217
1218 A load from a partially-addressible place is allowed if:
1219 - the command-line flag is set
1220 - it's a word-sized, word-aligned load
1221 - at least one of the addresses in the word *is* valid
1222 */
1223 partial_load_exemption_applies
njn1d0825f2006-03-27 11:37:07 +00001224 = MC_(clo_partial_loads_ok) && szB == VG_WORDSIZE
sewardj0ded7a42005-11-08 02:25:37 +00001225 && VG_IS_WORD_ALIGNED(a)
1226 && n_addrs_bad < VG_WORDSIZE;
1227
1228 if (n_addrs_bad > 0 && !partial_load_exemption_applies)
njn1d0825f2006-03-27 11:37:07 +00001229 mc_record_address_error( VG_(get_running_tid)(), a, szB, False );
sewardj45d94cc2005-04-20 14:44:11 +00001230
njn1d0825f2006-03-27 11:37:07 +00001231 return vbits64;
sewardj45d94cc2005-04-20 14:44:11 +00001232}
1233
1234
njn1d0825f2006-03-27 11:37:07 +00001235static
1236#ifndef PERF_FAST_STOREV
1237INLINE
1238#endif
njn45e81252006-03-28 12:35:08 +00001239void mc_STOREVn_slow ( Addr a, SizeT nBits, ULong vbytes, Bool bigendian )
sewardj45d94cc2005-04-20 14:44:11 +00001240{
njn45e81252006-03-28 12:35:08 +00001241 SizeT szB = nBits / 8;
njn1d0825f2006-03-27 11:37:07 +00001242 SizeT i, n_addrs_bad = 0;
1243 UChar vbits8;
sewardj45d94cc2005-04-20 14:44:11 +00001244 Addr ai;
njn1d0825f2006-03-27 11:37:07 +00001245 Bool ok;
sewardj45d94cc2005-04-20 14:44:11 +00001246
sewardjc1a2cda2005-04-21 17:34:00 +00001247 PROF_EVENT(35, "mc_STOREVn_slow");
sewardj05a46732006-10-17 01:28:10 +00001248
1249 /* ------------ BEGIN semi-fast cases ------------ */
1250 /* These deal quickly-ish with the common auxiliary primary map
1251 cases on 64-bit platforms. Are merely a speedup hack; can be
1252 omitted without loss of correctness/functionality. Note that in
1253 both cases the "sizeof(void*) == 8" causes these cases to be
1254 folded out by compilers on 32-bit platforms. These are derived
1255 from STOREV64 and STOREV32.
1256 */
1257 if (EXPECTED_TAKEN(sizeof(void*) == 8
1258 && nBits == 64 && VG_IS_8_ALIGNED(a))) {
1259 SecMap* sm = get_secmap_for_reading(a);
1260 UWord sm_off16 = SM_OFF_16(a);
1261 UWord vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
1262 if (EXPECTED_TAKEN( !is_distinguished_sm(sm) &&
1263 (VA_BITS16_DEFINED == vabits16 ||
1264 VA_BITS16_UNDEFINED == vabits16) )) {
1265 /* Handle common case quickly: a is suitably aligned, */
1266 /* is mapped, and is addressible. */
1267 // Convert full V-bits in register to compact 2-bit form.
1268 if (EXPECTED_TAKEN(V_BITS64_DEFINED == vbytes)) {
1269 ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_DEFINED;
1270 return;
1271 } else if (V_BITS64_UNDEFINED == vbytes) {
1272 ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_UNDEFINED;
1273 return;
1274 }
1275 /* else fall into the slow case */
1276 }
1277 /* else fall into the slow case */
1278 }
1279 if (EXPECTED_TAKEN(sizeof(void*) == 8
1280 && nBits == 32 && VG_IS_4_ALIGNED(a))) {
1281 SecMap* sm = get_secmap_for_reading(a);
1282 UWord sm_off = SM_OFF(a);
1283 UWord vabits8 = sm->vabits8[sm_off];
1284 if (EXPECTED_TAKEN( !is_distinguished_sm(sm) &&
1285 (VA_BITS8_DEFINED == vabits8 ||
1286 VA_BITS8_UNDEFINED == vabits8) )) {
1287 /* Handle common case quickly: a is suitably aligned, */
1288 /* is mapped, and is addressible. */
1289 // Convert full V-bits in register to compact 2-bit form.
1290 if (EXPECTED_TAKEN(V_BITS32_DEFINED == (vbytes & 0xFFFFFFFF))) {
1291 sm->vabits8[sm_off] = VA_BITS8_DEFINED;
1292 return;
1293 } else if (V_BITS32_UNDEFINED == (vbytes & 0xFFFFFFFF)) {
1294 sm->vabits8[sm_off] = VA_BITS8_UNDEFINED;
1295 return;
1296 }
1297 /* else fall into the slow case */
1298 }
1299 /* else fall into the slow case */
1300 }
1301 /* ------------ END semi-fast cases ------------ */
1302
njn45e81252006-03-28 12:35:08 +00001303 tl_assert(nBits == 64 || nBits == 32 || nBits == 16 || nBits == 8);
sewardj45d94cc2005-04-20 14:44:11 +00001304
1305 /* Dump vbytes in memory, iterating from least to most significant
1306 byte. At the same time establish addressibility of the
1307 location. */
1308 for (i = 0; i < szB; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +00001309 PROF_EVENT(36, "mc_STOREVn_slow(loop)");
njn45e81252006-03-28 12:35:08 +00001310 ai = a + byte_offset_w(szB, bigendian, i);
njn1d0825f2006-03-27 11:37:07 +00001311 vbits8 = vbytes & 0xff;
1312 ok = set_vbits8(ai, vbits8);
1313 if (!ok) n_addrs_bad++;
sewardj45d94cc2005-04-20 14:44:11 +00001314 vbytes >>= 8;
1315 }
1316
1317 /* If an address error has happened, report it. */
1318 if (n_addrs_bad > 0)
njn1d0825f2006-03-27 11:37:07 +00001319 mc_record_address_error( VG_(get_running_tid)(), a, szB, True );
sewardj45d94cc2005-04-20 14:44:11 +00001320}
1321
1322
njn25e49d8e72002-09-23 09:36:25 +00001323/*------------------------------------------------------------*/
1324/*--- Setting permissions over address ranges. ---*/
1325/*------------------------------------------------------------*/
1326
njn1d0825f2006-03-27 11:37:07 +00001327static void set_address_range_perms ( Addr a, SizeT lenT, UWord vabits16,
1328 UWord dsm_num )
sewardj23eb2fd2005-04-22 16:29:19 +00001329{
njn1d0825f2006-03-27 11:37:07 +00001330 UWord sm_off, sm_off16;
1331 UWord vabits2 = vabits16 & 0x3;
1332 SizeT lenA, lenB, len_to_next_secmap;
1333 Addr aNext;
sewardjae986ca2005-10-12 12:53:20 +00001334 SecMap* sm;
njn1d0825f2006-03-27 11:37:07 +00001335 SecMap** sm_ptr;
sewardjae986ca2005-10-12 12:53:20 +00001336 SecMap* example_dsm;
1337
sewardj23eb2fd2005-04-22 16:29:19 +00001338 PROF_EVENT(150, "set_address_range_perms");
1339
njn1d0825f2006-03-27 11:37:07 +00001340 /* Check the V+A bits make sense. */
njndbf7ca72006-03-31 11:57:59 +00001341 tl_assert(VA_BITS16_NOACCESS == vabits16 ||
1342 VA_BITS16_UNDEFINED == vabits16 ||
1343 VA_BITS16_DEFINED == vabits16);
sewardj23eb2fd2005-04-22 16:29:19 +00001344
njn1d0825f2006-03-27 11:37:07 +00001345 // This code should never write PDBs; ensure this. (See comment above
1346 // set_vabits2().)
njndbf7ca72006-03-31 11:57:59 +00001347 tl_assert(VA_BITS2_PARTDEFINED != vabits2);
njn1d0825f2006-03-27 11:37:07 +00001348
1349 if (lenT == 0)
sewardj23eb2fd2005-04-22 16:29:19 +00001350 return;
1351
njn1d0825f2006-03-27 11:37:07 +00001352 if (lenT > 100 * 1000 * 1000) {
1353 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
1354 Char* s = "unknown???";
njndbf7ca72006-03-31 11:57:59 +00001355 if (vabits16 == VA_BITS16_NOACCESS ) s = "noaccess";
1356 if (vabits16 == VA_BITS16_UNDEFINED) s = "undefined";
1357 if (vabits16 == VA_BITS16_DEFINED ) s = "defined";
njn1d0825f2006-03-27 11:37:07 +00001358 VG_(message)(Vg_UserMsg, "Warning: set address range perms: "
1359 "large range %lu (%s)", lenT, s);
sewardj23eb2fd2005-04-22 16:29:19 +00001360 }
1361 }
1362
njn1d0825f2006-03-27 11:37:07 +00001363#ifndef PERF_FAST_SARP
sewardj23eb2fd2005-04-22 16:29:19 +00001364 /*------------------ debug-only case ------------------ */
njn1d0825f2006-03-27 11:37:07 +00001365 {
1366 // Endianness doesn't matter here because all bytes are being set to
1367 // the same value.
1368 // Nb: We don't have to worry about updating the sec-V-bits table
1369 // after these set_vabits2() calls because this code never writes
njndbf7ca72006-03-31 11:57:59 +00001370 // VA_BITS2_PARTDEFINED values.
njn1d0825f2006-03-27 11:37:07 +00001371 SizeT i;
1372 for (i = 0; i < lenT; i++) {
1373 set_vabits2(a + i, vabits2);
1374 }
1375 return;
njn25e49d8e72002-09-23 09:36:25 +00001376 }
njn1d0825f2006-03-27 11:37:07 +00001377#endif
sewardj23eb2fd2005-04-22 16:29:19 +00001378
1379 /*------------------ standard handling ------------------ */
sewardj23eb2fd2005-04-22 16:29:19 +00001380
njn1d0825f2006-03-27 11:37:07 +00001381 /* Get the distinguished secondary that we might want
sewardj23eb2fd2005-04-22 16:29:19 +00001382 to use (part of the space-compression scheme). */
njn1d0825f2006-03-27 11:37:07 +00001383 example_dsm = &sm_distinguished[dsm_num];
1384
1385 // We have to handle ranges covering various combinations of partial and
1386 // whole sec-maps. Here is how parts 1, 2 and 3 are used in each case.
1387 // Cases marked with a '*' are common.
1388 //
1389 // TYPE PARTS USED
1390 // ---- ----------
1391 // * one partial sec-map (p) 1
1392 // - one whole sec-map (P) 2
1393 //
1394 // * two partial sec-maps (pp) 1,3
1395 // - one partial, one whole sec-map (pP) 1,2
1396 // - one whole, one partial sec-map (Pp) 2,3
1397 // - two whole sec-maps (PP) 2,2
1398 //
1399 // * one partial, one whole, one partial (pPp) 1,2,3
1400 // - one partial, two whole (pPP) 1,2,2
1401 // - two whole, one partial (PPp) 2,2,3
1402 // - three whole (PPP) 2,2,2
1403 //
1404 // * one partial, N-2 whole, one partial (pP...Pp) 1,2...2,3
1405 // - one partial, N-1 whole (pP...PP) 1,2...2,2
1406 // - N-1 whole, one partial (PP...Pp) 2,2...2,3
1407 // - N whole (PP...PP) 2,2...2,3
1408
1409 // Break up total length (lenT) into two parts: length in the first
1410 // sec-map (lenA), and the rest (lenB); lenT == lenA + lenB.
1411 aNext = start_of_this_sm(a) + SM_SIZE;
1412 len_to_next_secmap = aNext - a;
1413 if ( lenT <= len_to_next_secmap ) {
1414 // Range entirely within one sec-map. Covers almost all cases.
1415 PROF_EVENT(151, "set_address_range_perms-single-secmap");
1416 lenA = lenT;
1417 lenB = 0;
1418 } else if (is_start_of_sm(a)) {
1419 // Range spans at least one whole sec-map, and starts at the beginning
1420 // of a sec-map; skip to Part 2.
1421 PROF_EVENT(152, "set_address_range_perms-startof-secmap");
1422 lenA = 0;
1423 lenB = lenT;
1424 goto part2;
sewardj23eb2fd2005-04-22 16:29:19 +00001425 } else {
njn1d0825f2006-03-27 11:37:07 +00001426 // Range spans two or more sec-maps, first one is partial.
1427 PROF_EVENT(153, "set_address_range_perms-multiple-secmaps");
1428 lenA = len_to_next_secmap;
1429 lenB = lenT - lenA;
1430 }
1431
1432 //------------------------------------------------------------------------
1433 // Part 1: Deal with the first sec_map. Most of the time the range will be
1434 // entirely within a sec_map and this part alone will suffice. Also,
1435 // doing it this way lets us avoid repeatedly testing for the crossing of
1436 // a sec-map boundary within these loops.
1437 //------------------------------------------------------------------------
1438
1439 // If it's distinguished, make it undistinguished if necessary.
1440 sm_ptr = get_secmap_ptr(a);
1441 if (is_distinguished_sm(*sm_ptr)) {
1442 if (*sm_ptr == example_dsm) {
1443 // Sec-map already has the V+A bits that we want, so skip.
1444 PROF_EVENT(154, "set_address_range_perms-dist-sm1-quick");
1445 a = aNext;
1446 lenA = 0;
sewardj23eb2fd2005-04-22 16:29:19 +00001447 } else {
njn1d0825f2006-03-27 11:37:07 +00001448 PROF_EVENT(155, "set_address_range_perms-dist-sm1");
1449 *sm_ptr = copy_for_writing(*sm_ptr);
sewardj23eb2fd2005-04-22 16:29:19 +00001450 }
1451 }
njn1d0825f2006-03-27 11:37:07 +00001452 sm = *sm_ptr;
sewardj23eb2fd2005-04-22 16:29:19 +00001453
njn1d0825f2006-03-27 11:37:07 +00001454 // 1 byte steps
sewardj23eb2fd2005-04-22 16:29:19 +00001455 while (True) {
sewardj23eb2fd2005-04-22 16:29:19 +00001456 if (VG_IS_8_ALIGNED(a)) break;
njn1d0825f2006-03-27 11:37:07 +00001457 if (lenA < 1) break;
1458 PROF_EVENT(156, "set_address_range_perms-loop1a");
1459 sm_off = SM_OFF(a);
1460 insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
1461 a += 1;
1462 lenA -= 1;
1463 }
1464 // 8-aligned, 8 byte steps
sewardj23eb2fd2005-04-22 16:29:19 +00001465 while (True) {
njn1d0825f2006-03-27 11:37:07 +00001466 if (lenA < 8) break;
1467 PROF_EVENT(157, "set_address_range_perms-loop8a");
1468 sm_off16 = SM_OFF_16(a);
1469 ((UShort*)(sm->vabits8))[sm_off16] = vabits16;
1470 a += 8;
1471 lenA -= 8;
1472 }
1473 // 1 byte steps
1474 while (True) {
1475 if (lenA < 1) break;
1476 PROF_EVENT(158, "set_address_range_perms-loop1b");
1477 sm_off = SM_OFF(a);
1478 insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
1479 a += 1;
1480 lenA -= 1;
sewardj23eb2fd2005-04-22 16:29:19 +00001481 }
1482
njn1d0825f2006-03-27 11:37:07 +00001483 // We've finished the first sec-map. Is that it?
1484 if (lenB == 0)
sewardj23eb2fd2005-04-22 16:29:19 +00001485 return;
1486
njn1d0825f2006-03-27 11:37:07 +00001487 //------------------------------------------------------------------------
1488 // Part 2: Fast-set entire sec-maps at a time.
1489 //------------------------------------------------------------------------
1490 part2:
1491 // 64KB-aligned, 64KB steps.
1492 // Nb: we can reach here with lenB < SM_SIZE
sewardj23eb2fd2005-04-22 16:29:19 +00001493 while (True) {
njn1d0825f2006-03-27 11:37:07 +00001494 if (lenB < SM_SIZE) break;
1495 tl_assert(is_start_of_sm(a));
1496 PROF_EVENT(159, "set_address_range_perms-loop64K");
1497 sm_ptr = get_secmap_ptr(a);
1498 if (!is_distinguished_sm(*sm_ptr)) {
1499 PROF_EVENT(160, "set_address_range_perms-loop64K-free-dist-sm");
1500 // Free the non-distinguished sec-map that we're replacing. This
1501 // case happens moderately often, enough to be worthwhile.
1502 VG_(am_munmap_valgrind)((Addr)*sm_ptr, sizeof(SecMap));
1503 }
1504 update_SM_counts(*sm_ptr, example_dsm);
1505 // Make the sec-map entry point to the example DSM
1506 *sm_ptr = example_dsm;
1507 lenB -= SM_SIZE;
1508 a += SM_SIZE;
1509 }
sewardj23eb2fd2005-04-22 16:29:19 +00001510
njn1d0825f2006-03-27 11:37:07 +00001511 // We've finished the whole sec-maps. Is that it?
1512 if (lenB == 0)
1513 return;
1514
1515 //------------------------------------------------------------------------
1516 // Part 3: Finish off the final partial sec-map, if necessary.
1517 //------------------------------------------------------------------------
1518
1519 tl_assert(is_start_of_sm(a) && lenB < SM_SIZE);
1520
1521 // If it's distinguished, make it undistinguished if necessary.
1522 sm_ptr = get_secmap_ptr(a);
1523 if (is_distinguished_sm(*sm_ptr)) {
1524 if (*sm_ptr == example_dsm) {
1525 // Sec-map already has the V+A bits that we want, so stop.
1526 PROF_EVENT(161, "set_address_range_perms-dist-sm2-quick");
1527 return;
1528 } else {
1529 PROF_EVENT(162, "set_address_range_perms-dist-sm2");
1530 *sm_ptr = copy_for_writing(*sm_ptr);
1531 }
1532 }
1533 sm = *sm_ptr;
1534
1535 // 8-aligned, 8 byte steps
1536 while (True) {
1537 if (lenB < 8) break;
1538 PROF_EVENT(163, "set_address_range_perms-loop8b");
1539 sm_off16 = SM_OFF_16(a);
1540 ((UShort*)(sm->vabits8))[sm_off16] = vabits16;
1541 a += 8;
1542 lenB -= 8;
1543 }
1544 // 1 byte steps
1545 while (True) {
1546 if (lenB < 1) return;
1547 PROF_EVENT(164, "set_address_range_perms-loop1c");
1548 sm_off = SM_OFF(a);
1549 insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
1550 a += 1;
1551 lenB -= 1;
1552 }
sewardj23eb2fd2005-04-22 16:29:19 +00001553}
sewardj45d94cc2005-04-20 14:44:11 +00001554
sewardjc859fbf2005-04-22 21:10:28 +00001555
1556/* --- Set permissions for arbitrary address ranges --- */
njn25e49d8e72002-09-23 09:36:25 +00001557
njndbf7ca72006-03-31 11:57:59 +00001558void MC_(make_mem_noaccess) ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +00001559{
njndbf7ca72006-03-31 11:57:59 +00001560 PROF_EVENT(40, "MC_(make_mem_noaccess)");
1561 DEBUG("MC_(make_mem_noaccess)(%p, %lu)\n", a, len);
njn1d0825f2006-03-27 11:37:07 +00001562 set_address_range_perms ( a, len, VA_BITS16_NOACCESS, SM_DIST_NOACCESS );
njn25e49d8e72002-09-23 09:36:25 +00001563}
1564
njndbf7ca72006-03-31 11:57:59 +00001565void MC_(make_mem_undefined) ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +00001566{
njndbf7ca72006-03-31 11:57:59 +00001567 PROF_EVENT(41, "MC_(make_mem_undefined)");
1568 DEBUG("MC_(make_mem_undefined)(%p, %lu)\n", a, len);
1569 set_address_range_perms ( a, len, VA_BITS16_UNDEFINED, SM_DIST_UNDEFINED );
njn25e49d8e72002-09-23 09:36:25 +00001570}
1571
njndbf7ca72006-03-31 11:57:59 +00001572void MC_(make_mem_defined) ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +00001573{
njndbf7ca72006-03-31 11:57:59 +00001574 PROF_EVENT(42, "MC_(make_mem_defined)");
1575 DEBUG("MC_(make_mem_defined)(%p, %lu)\n", a, len);
1576 set_address_range_perms ( a, len, VA_BITS16_DEFINED, SM_DIST_DEFINED );
njn25e49d8e72002-09-23 09:36:25 +00001577}
1578
sewardjfb1e9ad2006-03-10 13:41:58 +00001579/* For each byte in [a,a+len), if the byte is addressable, make it be
1580 defined, but if it isn't addressible, leave it alone. In other
njndbf7ca72006-03-31 11:57:59 +00001581 words a version of MC_(make_mem_defined) that doesn't mess with
sewardjfb1e9ad2006-03-10 13:41:58 +00001582 addressibility. Low-performance implementation. */
njndbf7ca72006-03-31 11:57:59 +00001583static void make_mem_defined_if_addressable ( Addr a, SizeT len )
sewardjfb1e9ad2006-03-10 13:41:58 +00001584{
1585 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00001586 UChar vabits2;
njndbf7ca72006-03-31 11:57:59 +00001587 DEBUG("make_mem_defined_if_addressable(%p, %llu)\n", a, (ULong)len);
sewardjfb1e9ad2006-03-10 13:41:58 +00001588 for (i = 0; i < len; i++) {
njn1d0825f2006-03-27 11:37:07 +00001589 vabits2 = get_vabits2( a+i );
1590 if (EXPECTED_TAKEN(VA_BITS2_NOACCESS != vabits2)) {
njndbf7ca72006-03-31 11:57:59 +00001591 set_vabits2(a+i, VA_BITS2_DEFINED);
njn1d0825f2006-03-27 11:37:07 +00001592 }
sewardjfb1e9ad2006-03-10 13:41:58 +00001593 }
1594}
1595
njn9b007f62003-04-07 14:40:25 +00001596
sewardj45f4e7c2005-09-27 19:20:21 +00001597/* --- Block-copy permissions (needed for implementing realloc() and
1598 sys_mremap). --- */
sewardjc859fbf2005-04-22 21:10:28 +00001599
njn1d0825f2006-03-27 11:37:07 +00001600void MC_(copy_address_range_state) ( Addr src, Addr dst, SizeT len )
sewardjc859fbf2005-04-22 21:10:28 +00001601{
sewardj45f4e7c2005-09-27 19:20:21 +00001602 SizeT i, j;
sewardjf2184912006-05-03 22:13:57 +00001603 UChar vabits2, vabits8;
1604 Bool aligned, nooverlap;
sewardjc859fbf2005-04-22 21:10:28 +00001605
njn1d0825f2006-03-27 11:37:07 +00001606 DEBUG("MC_(copy_address_range_state)\n");
1607 PROF_EVENT(50, "MC_(copy_address_range_state)");
sewardj45f4e7c2005-09-27 19:20:21 +00001608
sewardjf2184912006-05-03 22:13:57 +00001609 if (len == 0 || src == dst)
sewardj45f4e7c2005-09-27 19:20:21 +00001610 return;
1611
sewardjf2184912006-05-03 22:13:57 +00001612 aligned = VG_IS_4_ALIGNED(src) && VG_IS_4_ALIGNED(dst);
1613 nooverlap = src+len <= dst || dst+len <= src;
sewardj45f4e7c2005-09-27 19:20:21 +00001614
sewardjf2184912006-05-03 22:13:57 +00001615 if (nooverlap && aligned) {
1616
1617 /* Vectorised fast case, when no overlap and suitably aligned */
1618 /* vector loop */
1619 i = 0;
1620 while (len >= 4) {
1621 vabits8 = get_vabits8_for_aligned_word32( src+i );
1622 set_vabits8_for_aligned_word32( dst+i, vabits8 );
1623 if (EXPECTED_TAKEN(VA_BITS8_DEFINED == vabits8
1624 || VA_BITS8_UNDEFINED == vabits8
1625 || VA_BITS8_NOACCESS == vabits8)) {
1626 /* do nothing */
1627 } else {
1628 /* have to copy secondary map info */
1629 if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+0 ))
1630 set_sec_vbits8( dst+i+0, get_sec_vbits8( src+i+0 ) );
1631 if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+1 ))
1632 set_sec_vbits8( dst+i+1, get_sec_vbits8( src+i+1 ) );
1633 if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+2 ))
1634 set_sec_vbits8( dst+i+2, get_sec_vbits8( src+i+2 ) );
1635 if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+3 ))
1636 set_sec_vbits8( dst+i+3, get_sec_vbits8( src+i+3 ) );
1637 }
1638 i += 4;
1639 len -= 4;
1640 }
1641 /* fixup loop */
1642 while (len >= 1) {
njn1d0825f2006-03-27 11:37:07 +00001643 vabits2 = get_vabits2( src+i );
1644 set_vabits2( dst+i, vabits2 );
njndbf7ca72006-03-31 11:57:59 +00001645 if (VA_BITS2_PARTDEFINED == vabits2) {
njn1d0825f2006-03-27 11:37:07 +00001646 set_sec_vbits8( dst+i, get_sec_vbits8( src+i ) );
1647 }
sewardjf2184912006-05-03 22:13:57 +00001648 i++;
1649 len--;
1650 }
1651
1652 } else {
1653
1654 /* We have to do things the slow way */
1655 if (src < dst) {
1656 for (i = 0, j = len-1; i < len; i++, j--) {
1657 PROF_EVENT(51, "MC_(copy_address_range_state)(loop)");
1658 vabits2 = get_vabits2( src+j );
1659 set_vabits2( dst+j, vabits2 );
1660 if (VA_BITS2_PARTDEFINED == vabits2) {
1661 set_sec_vbits8( dst+j, get_sec_vbits8( src+j ) );
1662 }
1663 }
1664 }
1665
1666 if (src > dst) {
1667 for (i = 0; i < len; i++) {
1668 PROF_EVENT(52, "MC_(copy_address_range_state)(loop)");
1669 vabits2 = get_vabits2( src+i );
1670 set_vabits2( dst+i, vabits2 );
1671 if (VA_BITS2_PARTDEFINED == vabits2) {
1672 set_sec_vbits8( dst+i, get_sec_vbits8( src+i ) );
1673 }
1674 }
sewardj45f4e7c2005-09-27 19:20:21 +00001675 }
sewardjc859fbf2005-04-22 21:10:28 +00001676 }
sewardjf2184912006-05-03 22:13:57 +00001677
sewardjc859fbf2005-04-22 21:10:28 +00001678}
1679
1680
1681/* --- Fast case permission setters, for dealing with stacks. --- */
1682
njn1d0825f2006-03-27 11:37:07 +00001683static INLINE
njndbf7ca72006-03-31 11:57:59 +00001684void make_aligned_word32_undefined ( Addr a )
njn9b007f62003-04-07 14:40:25 +00001685{
njn1d0825f2006-03-27 11:37:07 +00001686 UWord sm_off;
sewardjae986ca2005-10-12 12:53:20 +00001687 SecMap* sm;
1688
njndbf7ca72006-03-31 11:57:59 +00001689 PROF_EVENT(300, "make_aligned_word32_undefined");
sewardj5d28efc2005-04-21 22:16:29 +00001690
njn1d0825f2006-03-27 11:37:07 +00001691#ifndef PERF_FAST_STACK2
njndbf7ca72006-03-31 11:57:59 +00001692 MC_(make_mem_undefined)(a, 4);
njn1d0825f2006-03-27 11:37:07 +00001693#else
1694 if (EXPECTED_NOT_TAKEN(a > MAX_PRIMARY_ADDRESS)) {
njndbf7ca72006-03-31 11:57:59 +00001695 PROF_EVENT(301, "make_aligned_word32_undefined-slow1");
1696 MC_(make_mem_undefined)(a, 4);
sewardj5d28efc2005-04-21 22:16:29 +00001697 return;
1698 }
1699
njna7c7ebd2006-03-28 12:51:02 +00001700 sm = get_secmap_for_writing_low(a);
njn1d0825f2006-03-27 11:37:07 +00001701 sm_off = SM_OFF(a);
njndbf7ca72006-03-31 11:57:59 +00001702 sm->vabits8[sm_off] = VA_BITS8_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00001703#endif
njn9b007f62003-04-07 14:40:25 +00001704}
1705
sewardj5d28efc2005-04-21 22:16:29 +00001706
njn1d0825f2006-03-27 11:37:07 +00001707static INLINE
1708void make_aligned_word32_noaccess ( Addr a )
sewardj5d28efc2005-04-21 22:16:29 +00001709{
njn1d0825f2006-03-27 11:37:07 +00001710 UWord sm_off;
sewardjae986ca2005-10-12 12:53:20 +00001711 SecMap* sm;
1712
sewardj5d28efc2005-04-21 22:16:29 +00001713 PROF_EVENT(310, "make_aligned_word32_noaccess");
1714
njn1d0825f2006-03-27 11:37:07 +00001715#ifndef PERF_FAST_STACK2
njndbf7ca72006-03-31 11:57:59 +00001716 MC_(make_mem_noaccess)(a, 4);
njn1d0825f2006-03-27 11:37:07 +00001717#else
1718 if (EXPECTED_NOT_TAKEN(a > MAX_PRIMARY_ADDRESS)) {
sewardj5d28efc2005-04-21 22:16:29 +00001719 PROF_EVENT(311, "make_aligned_word32_noaccess-slow1");
njndbf7ca72006-03-31 11:57:59 +00001720 MC_(make_mem_noaccess)(a, 4);
sewardj5d28efc2005-04-21 22:16:29 +00001721 return;
1722 }
1723
njna7c7ebd2006-03-28 12:51:02 +00001724 sm = get_secmap_for_writing_low(a);
njn1d0825f2006-03-27 11:37:07 +00001725 sm_off = SM_OFF(a);
1726 sm->vabits8[sm_off] = VA_BITS8_NOACCESS;
1727#endif
sewardj5d28efc2005-04-21 22:16:29 +00001728}
1729
1730
njn9b007f62003-04-07 14:40:25 +00001731/* Nb: by "aligned" here we mean 8-byte aligned */
njn1d0825f2006-03-27 11:37:07 +00001732static INLINE
njndbf7ca72006-03-31 11:57:59 +00001733void make_aligned_word64_undefined ( Addr a )
njn9b007f62003-04-07 14:40:25 +00001734{
njn1d0825f2006-03-27 11:37:07 +00001735 UWord sm_off16;
sewardjae986ca2005-10-12 12:53:20 +00001736 SecMap* sm;
1737
njndbf7ca72006-03-31 11:57:59 +00001738 PROF_EVENT(320, "make_aligned_word64_undefined");
sewardj23eb2fd2005-04-22 16:29:19 +00001739
njn1d0825f2006-03-27 11:37:07 +00001740#ifndef PERF_FAST_STACK2
njndbf7ca72006-03-31 11:57:59 +00001741 MC_(make_mem_undefined)(a, 8);
njn1d0825f2006-03-27 11:37:07 +00001742#else
1743 if (EXPECTED_NOT_TAKEN(a > MAX_PRIMARY_ADDRESS)) {
njndbf7ca72006-03-31 11:57:59 +00001744 PROF_EVENT(321, "make_aligned_word64_undefined-slow1");
1745 MC_(make_mem_undefined)(a, 8);
sewardj23eb2fd2005-04-22 16:29:19 +00001746 return;
1747 }
1748
njna7c7ebd2006-03-28 12:51:02 +00001749 sm = get_secmap_for_writing_low(a);
njn1d0825f2006-03-27 11:37:07 +00001750 sm_off16 = SM_OFF_16(a);
njndbf7ca72006-03-31 11:57:59 +00001751 ((UShort*)(sm->vabits8))[sm_off16] = VA_BITS16_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00001752#endif
njn9b007f62003-04-07 14:40:25 +00001753}
1754
sewardj23eb2fd2005-04-22 16:29:19 +00001755
njn1d0825f2006-03-27 11:37:07 +00001756static INLINE
1757void make_aligned_word64_noaccess ( Addr a )
njn9b007f62003-04-07 14:40:25 +00001758{
njn1d0825f2006-03-27 11:37:07 +00001759 UWord sm_off16;
sewardjae986ca2005-10-12 12:53:20 +00001760 SecMap* sm;
1761
sewardj23eb2fd2005-04-22 16:29:19 +00001762 PROF_EVENT(330, "make_aligned_word64_noaccess");
1763
njn1d0825f2006-03-27 11:37:07 +00001764#ifndef PERF_FAST_STACK2
njndbf7ca72006-03-31 11:57:59 +00001765 MC_(make_mem_noaccess)(a, 8);
njn1d0825f2006-03-27 11:37:07 +00001766#else
1767 if (EXPECTED_NOT_TAKEN(a > MAX_PRIMARY_ADDRESS)) {
sewardj23eb2fd2005-04-22 16:29:19 +00001768 PROF_EVENT(331, "make_aligned_word64_noaccess-slow1");
njndbf7ca72006-03-31 11:57:59 +00001769 MC_(make_mem_noaccess)(a, 8);
sewardj23eb2fd2005-04-22 16:29:19 +00001770 return;
1771 }
1772
njna7c7ebd2006-03-28 12:51:02 +00001773 sm = get_secmap_for_writing_low(a);
njn1d0825f2006-03-27 11:37:07 +00001774 sm_off16 = SM_OFF_16(a);
1775 ((UShort*)(sm->vabits8))[sm_off16] = VA_BITS16_NOACCESS;
1776#endif
njn9b007f62003-04-07 14:40:25 +00001777}
1778
sewardj23eb2fd2005-04-22 16:29:19 +00001779
njn1d0825f2006-03-27 11:37:07 +00001780/*------------------------------------------------------------*/
1781/*--- Stack pointer adjustment ---*/
1782/*------------------------------------------------------------*/
1783
1784static void VG_REGPARM(1) mc_new_mem_stack_4(Addr new_SP)
1785{
1786 PROF_EVENT(110, "new_mem_stack_4");
sewardj05a46732006-10-17 01:28:10 +00001787 if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001788 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
njn1d0825f2006-03-27 11:37:07 +00001789 } else {
njndbf7ca72006-03-31 11:57:59 +00001790 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 4 );
njn1d0825f2006-03-27 11:37:07 +00001791 }
1792}
1793
1794static void VG_REGPARM(1) mc_die_mem_stack_4(Addr new_SP)
1795{
1796 PROF_EVENT(120, "die_mem_stack_4");
sewardj05a46732006-10-17 01:28:10 +00001797 if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001798 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
njn1d0825f2006-03-27 11:37:07 +00001799 } else {
njndbf7ca72006-03-31 11:57:59 +00001800 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-4, 4 );
njn1d0825f2006-03-27 11:37:07 +00001801 }
1802}
1803
1804static void VG_REGPARM(1) mc_new_mem_stack_8(Addr new_SP)
1805{
1806 PROF_EVENT(111, "new_mem_stack_8");
sewardj05a46732006-10-17 01:28:10 +00001807 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001808 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
sewardj05a46732006-10-17 01:28:10 +00001809 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001810 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1811 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
njn1d0825f2006-03-27 11:37:07 +00001812 } else {
njndbf7ca72006-03-31 11:57:59 +00001813 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 8 );
njn1d0825f2006-03-27 11:37:07 +00001814 }
1815}
1816
1817static void VG_REGPARM(1) mc_die_mem_stack_8(Addr new_SP)
1818{
1819 PROF_EVENT(121, "die_mem_stack_8");
sewardj05a46732006-10-17 01:28:10 +00001820 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001821 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
sewardj05a46732006-10-17 01:28:10 +00001822 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001823 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
1824 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
njn1d0825f2006-03-27 11:37:07 +00001825 } else {
njndbf7ca72006-03-31 11:57:59 +00001826 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-8, 8 );
njn1d0825f2006-03-27 11:37:07 +00001827 }
1828}
1829
1830static void VG_REGPARM(1) mc_new_mem_stack_12(Addr new_SP)
1831{
1832 PROF_EVENT(112, "new_mem_stack_12");
sewardj05a46732006-10-17 01:28:10 +00001833 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001834 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1835 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
sewardj05a46732006-10-17 01:28:10 +00001836 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001837 /* from previous test we don't have 8-alignment at offset +0,
1838 hence must have 8 alignment at offsets +4/-4. Hence safe to
1839 do 4 at +0 and then 8 at +4/. */
njndbf7ca72006-03-31 11:57:59 +00001840 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1841 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
njn1d0825f2006-03-27 11:37:07 +00001842 } else {
njndbf7ca72006-03-31 11:57:59 +00001843 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 12 );
njn1d0825f2006-03-27 11:37:07 +00001844 }
1845}
1846
1847static void VG_REGPARM(1) mc_die_mem_stack_12(Addr new_SP)
1848{
1849 PROF_EVENT(122, "die_mem_stack_12");
1850 /* Note the -12 in the test */
sewardj43fcfd92006-10-17 23:14:42 +00001851 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP-12 )) {
1852 /* We have 8-alignment at -12, hence ok to do 8 at -12 and 4 at
1853 -4. */
njndbf7ca72006-03-31 11:57:59 +00001854 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
1855 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
sewardj05a46732006-10-17 01:28:10 +00001856 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001857 /* We have 4-alignment at +0, but we don't have 8-alignment at
1858 -12. So we must have 8-alignment at -8. Hence do 4 at -12
1859 and then 8 at -8. */
njndbf7ca72006-03-31 11:57:59 +00001860 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
1861 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
njn1d0825f2006-03-27 11:37:07 +00001862 } else {
njndbf7ca72006-03-31 11:57:59 +00001863 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-12, 12 );
njn1d0825f2006-03-27 11:37:07 +00001864 }
1865}
1866
1867static void VG_REGPARM(1) mc_new_mem_stack_16(Addr new_SP)
1868{
1869 PROF_EVENT(113, "new_mem_stack_16");
sewardj05a46732006-10-17 01:28:10 +00001870 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001871 /* Have 8-alignment at +0, hence do 8 at +0 and 8 at +8. */
njndbf7ca72006-03-31 11:57:59 +00001872 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1873 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
sewardj05a46732006-10-17 01:28:10 +00001874 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001875 /* Have 4 alignment at +0 but not 8; hence 8 must be at +4.
1876 Hence do 4 at +0, 8 at +4, 4 at +12. */
njndbf7ca72006-03-31 11:57:59 +00001877 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1878 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
1879 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+12 );
njn1d0825f2006-03-27 11:37:07 +00001880 } else {
njndbf7ca72006-03-31 11:57:59 +00001881 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 16 );
njn1d0825f2006-03-27 11:37:07 +00001882 }
1883}
1884
1885static void VG_REGPARM(1) mc_die_mem_stack_16(Addr new_SP)
1886{
1887 PROF_EVENT(123, "die_mem_stack_16");
sewardj05a46732006-10-17 01:28:10 +00001888 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001889 /* Have 8-alignment at +0, hence do 8 at -16 and 8 at -8. */
njndbf7ca72006-03-31 11:57:59 +00001890 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
1891 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
sewardj05a46732006-10-17 01:28:10 +00001892 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001893 /* 8 alignment must be at -12. Do 4 at -16, 8 at -12, 4 at -4. */
njndbf7ca72006-03-31 11:57:59 +00001894 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
1895 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
1896 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
njn1d0825f2006-03-27 11:37:07 +00001897 } else {
njndbf7ca72006-03-31 11:57:59 +00001898 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-16, 16 );
njn1d0825f2006-03-27 11:37:07 +00001899 }
1900}
1901
1902static void VG_REGPARM(1) mc_new_mem_stack_32(Addr new_SP)
1903{
1904 PROF_EVENT(114, "new_mem_stack_32");
sewardj05a46732006-10-17 01:28:10 +00001905 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001906 /* Straightforward */
njndbf7ca72006-03-31 11:57:59 +00001907 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1908 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
1909 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
1910 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
sewardj05a46732006-10-17 01:28:10 +00001911 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001912 /* 8 alignment must be at +4. Hence do 8 at +4,+12,+20 and 4 at
1913 +0,+28. */
njndbf7ca72006-03-31 11:57:59 +00001914 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1915 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
1916 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+12 );
1917 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+20 );
1918 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+28 );
njn1d0825f2006-03-27 11:37:07 +00001919 } else {
njndbf7ca72006-03-31 11:57:59 +00001920 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 32 );
njn1d0825f2006-03-27 11:37:07 +00001921 }
1922}
1923
1924static void VG_REGPARM(1) mc_die_mem_stack_32(Addr new_SP)
1925{
1926 PROF_EVENT(124, "die_mem_stack_32");
sewardj05a46732006-10-17 01:28:10 +00001927 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001928 /* Straightforward */
njndbf7ca72006-03-31 11:57:59 +00001929 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
1930 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
1931 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
1932 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
sewardj05a46732006-10-17 01:28:10 +00001933 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001934 /* 8 alignment must be at -4 etc. Hence do 8 at -12,-20,-28 and
1935 4 at -32,-4. */
njndbf7ca72006-03-31 11:57:59 +00001936 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
1937 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-28 );
1938 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-20 );
1939 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
1940 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
njn1d0825f2006-03-27 11:37:07 +00001941 } else {
njndbf7ca72006-03-31 11:57:59 +00001942 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-32, 32 );
njn1d0825f2006-03-27 11:37:07 +00001943 }
1944}
1945
1946static void VG_REGPARM(1) mc_new_mem_stack_112(Addr new_SP)
1947{
1948 PROF_EVENT(115, "new_mem_stack_112");
sewardj05a46732006-10-17 01:28:10 +00001949 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001950 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1951 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
1952 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
1953 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
1954 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
1955 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
1956 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
1957 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
1958 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
1959 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
1960 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
1961 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
1962 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
1963 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104);
njn1d0825f2006-03-27 11:37:07 +00001964 } else {
njndbf7ca72006-03-31 11:57:59 +00001965 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 112 );
njn1d0825f2006-03-27 11:37:07 +00001966 }
1967}
1968
1969static void VG_REGPARM(1) mc_die_mem_stack_112(Addr new_SP)
1970{
1971 PROF_EVENT(125, "die_mem_stack_112");
sewardj05a46732006-10-17 01:28:10 +00001972 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001973 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
1974 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
1975 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
1976 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
1977 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
1978 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
1979 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
1980 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
1981 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
1982 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
1983 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
1984 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
1985 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
1986 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
njn1d0825f2006-03-27 11:37:07 +00001987 } else {
njndbf7ca72006-03-31 11:57:59 +00001988 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-112, 112 );
njn1d0825f2006-03-27 11:37:07 +00001989 }
1990}
1991
1992static void VG_REGPARM(1) mc_new_mem_stack_128(Addr new_SP)
1993{
1994 PROF_EVENT(116, "new_mem_stack_128");
sewardj05a46732006-10-17 01:28:10 +00001995 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001996 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1997 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
1998 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
1999 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
2000 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
2001 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
2002 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
2003 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
2004 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
2005 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
2006 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
2007 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
2008 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
2009 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104);
2010 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+112);
2011 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+120);
njn1d0825f2006-03-27 11:37:07 +00002012 } else {
njndbf7ca72006-03-31 11:57:59 +00002013 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 128 );
njn1d0825f2006-03-27 11:37:07 +00002014 }
2015}
2016
2017static void VG_REGPARM(1) mc_die_mem_stack_128(Addr new_SP)
2018{
2019 PROF_EVENT(126, "die_mem_stack_128");
sewardj05a46732006-10-17 01:28:10 +00002020 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002021 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
2022 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
2023 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
2024 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
2025 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
2026 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
2027 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
2028 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
2029 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
2030 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
2031 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
2032 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
2033 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
2034 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
2035 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
2036 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
njn1d0825f2006-03-27 11:37:07 +00002037 } else {
njndbf7ca72006-03-31 11:57:59 +00002038 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-128, 128 );
njn1d0825f2006-03-27 11:37:07 +00002039 }
2040}
2041
2042static void VG_REGPARM(1) mc_new_mem_stack_144(Addr new_SP)
2043{
2044 PROF_EVENT(117, "new_mem_stack_144");
sewardj05a46732006-10-17 01:28:10 +00002045 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002046 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
2047 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
2048 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
2049 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
2050 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
2051 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
2052 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
2053 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
2054 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
2055 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
2056 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
2057 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
2058 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
2059 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104);
2060 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+112);
2061 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+120);
2062 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+128);
2063 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+136);
njn1d0825f2006-03-27 11:37:07 +00002064 } else {
njndbf7ca72006-03-31 11:57:59 +00002065 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 144 );
njn1d0825f2006-03-27 11:37:07 +00002066 }
2067}
2068
2069static void VG_REGPARM(1) mc_die_mem_stack_144(Addr new_SP)
2070{
2071 PROF_EVENT(127, "die_mem_stack_144");
sewardj05a46732006-10-17 01:28:10 +00002072 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002073 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-144);
2074 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-136);
2075 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
2076 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
2077 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
2078 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
2079 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
2080 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
2081 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
2082 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
2083 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
2084 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
2085 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
2086 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
2087 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
2088 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
2089 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
2090 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
njn1d0825f2006-03-27 11:37:07 +00002091 } else {
njndbf7ca72006-03-31 11:57:59 +00002092 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-144, 144 );
njn1d0825f2006-03-27 11:37:07 +00002093 }
2094}
2095
2096static void VG_REGPARM(1) mc_new_mem_stack_160(Addr new_SP)
2097{
2098 PROF_EVENT(118, "new_mem_stack_160");
sewardj05a46732006-10-17 01:28:10 +00002099 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002100 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
2101 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
2102 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
2103 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
2104 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
2105 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
2106 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
2107 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
2108 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
2109 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
2110 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
2111 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
2112 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
2113 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104);
2114 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+112);
2115 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+120);
2116 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+128);
2117 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+136);
2118 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+144);
2119 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+152);
njn1d0825f2006-03-27 11:37:07 +00002120 } else {
njndbf7ca72006-03-31 11:57:59 +00002121 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 160 );
njn1d0825f2006-03-27 11:37:07 +00002122 }
2123}
2124
2125static void VG_REGPARM(1) mc_die_mem_stack_160(Addr new_SP)
2126{
2127 PROF_EVENT(128, "die_mem_stack_160");
sewardj05a46732006-10-17 01:28:10 +00002128 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002129 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-160);
2130 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-152);
2131 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-144);
2132 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-136);
2133 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
2134 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
2135 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
2136 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
2137 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
2138 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
2139 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
2140 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
2141 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
2142 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
2143 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
2144 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
2145 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
2146 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
2147 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
2148 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
njn1d0825f2006-03-27 11:37:07 +00002149 } else {
njndbf7ca72006-03-31 11:57:59 +00002150 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-160, 160 );
njn1d0825f2006-03-27 11:37:07 +00002151 }
2152}
2153
2154static void mc_new_mem_stack ( Addr a, SizeT len )
2155{
2156 PROF_EVENT(115, "new_mem_stack");
njndbf7ca72006-03-31 11:57:59 +00002157 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + a, len );
njn1d0825f2006-03-27 11:37:07 +00002158}
2159
2160static void mc_die_mem_stack ( Addr a, SizeT len )
2161{
2162 PROF_EVENT(125, "die_mem_stack");
njndbf7ca72006-03-31 11:57:59 +00002163 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + a, len );
njn1d0825f2006-03-27 11:37:07 +00002164}
njn9b007f62003-04-07 14:40:25 +00002165
sewardj45d94cc2005-04-20 14:44:11 +00002166
njn1d0825f2006-03-27 11:37:07 +00002167/* The AMD64 ABI says:
2168
2169 "The 128-byte area beyond the location pointed to by %rsp is considered
2170 to be reserved and shall not be modified by signal or interrupt
2171 handlers. Therefore, functions may use this area for temporary data
2172 that is not needed across function calls. In particular, leaf functions
2173 may use this area for their entire stack frame, rather than adjusting
2174 the stack pointer in the prologue and epilogue. This area is known as
2175 red zone [sic]."
2176
2177 So after any call or return we need to mark this redzone as containing
2178 undefined values.
2179
2180 Consider this: we're in function f. f calls g. g moves rsp down
2181 modestly (say 16 bytes) and writes stuff all over the red zone, making it
2182 defined. g returns. f is buggy and reads from parts of the red zone
2183 that it didn't write on. But because g filled that area in, f is going
2184 to be picking up defined V bits and so any errors from reading bits of
2185 the red zone it didn't write, will be missed. The only solution I could
2186 think of was to make the red zone undefined when g returns to f.
2187
2188 This is in accordance with the ABI, which makes it clear the redzone
2189 is volatile across function calls.
2190
2191 The problem occurs the other way round too: f could fill the RZ up
2192 with defined values and g could mistakenly read them. So the RZ
2193 also needs to be nuked on function calls.
2194*/
sewardj826ec492005-05-12 18:05:00 +00002195void MC_(helperc_MAKE_STACK_UNINIT) ( Addr base, UWord len )
2196{
2197 tl_assert(sizeof(UWord) == sizeof(SizeT));
sewardj2a3a1a72005-05-12 23:25:43 +00002198 if (0)
2199 VG_(printf)("helperc_MAKE_STACK_UNINIT %p %d\n", base, len );
2200
2201# if 0
2202 /* Really slow version */
njndbf7ca72006-03-31 11:57:59 +00002203 MC_(make_mem_undefined)(base, len);
sewardj2a3a1a72005-05-12 23:25:43 +00002204# endif
2205
2206# if 0
2207 /* Slow(ish) version, which is fairly easily seen to be correct.
2208 */
2209 if (EXPECTED_TAKEN( VG_IS_8_ALIGNED(base) && len==128 )) {
njndbf7ca72006-03-31 11:57:59 +00002210 make_aligned_word64_undefined(base + 0);
2211 make_aligned_word64_undefined(base + 8);
2212 make_aligned_word64_undefined(base + 16);
2213 make_aligned_word64_undefined(base + 24);
sewardj2a3a1a72005-05-12 23:25:43 +00002214
njndbf7ca72006-03-31 11:57:59 +00002215 make_aligned_word64_undefined(base + 32);
2216 make_aligned_word64_undefined(base + 40);
2217 make_aligned_word64_undefined(base + 48);
2218 make_aligned_word64_undefined(base + 56);
sewardj2a3a1a72005-05-12 23:25:43 +00002219
njndbf7ca72006-03-31 11:57:59 +00002220 make_aligned_word64_undefined(base + 64);
2221 make_aligned_word64_undefined(base + 72);
2222 make_aligned_word64_undefined(base + 80);
2223 make_aligned_word64_undefined(base + 88);
sewardj2a3a1a72005-05-12 23:25:43 +00002224
njndbf7ca72006-03-31 11:57:59 +00002225 make_aligned_word64_undefined(base + 96);
2226 make_aligned_word64_undefined(base + 104);
2227 make_aligned_word64_undefined(base + 112);
2228 make_aligned_word64_undefined(base + 120);
sewardj2a3a1a72005-05-12 23:25:43 +00002229 } else {
njndbf7ca72006-03-31 11:57:59 +00002230 MC_(make_mem_undefined)(base, len);
sewardj2a3a1a72005-05-12 23:25:43 +00002231 }
2232# endif
2233
2234 /* Idea is: go fast when
2235 * 8-aligned and length is 128
2236 * the sm is available in the main primary map
njn1d0825f2006-03-27 11:37:07 +00002237 * the address range falls entirely with a single secondary map
2238 If all those conditions hold, just update the V+A bits by writing
2239 directly into the vabits array. (If the sm was distinguished, this
2240 will make a copy and then write to it.)
sewardj2a3a1a72005-05-12 23:25:43 +00002241 */
njn1d0825f2006-03-27 11:37:07 +00002242 if (EXPECTED_TAKEN( len == 128 && VG_IS_8_ALIGNED(base) )) {
2243 /* Now we know the address range is suitably sized and aligned. */
2244 UWord a_lo = (UWord)(base);
sewardj3f5f5562006-06-16 21:39:08 +00002245 UWord a_hi = (UWord)(base + 128 - 1);
njn1d0825f2006-03-27 11:37:07 +00002246 tl_assert(a_lo < a_hi); // paranoia: detect overflow
2247 if (a_hi < MAX_PRIMARY_ADDRESS) {
2248 // Now we know the entire range is within the main primary map.
njna7c7ebd2006-03-28 12:51:02 +00002249 SecMap* sm = get_secmap_for_writing_low(a_lo);
2250 SecMap* sm_hi = get_secmap_for_writing_low(a_hi);
sewardj2a3a1a72005-05-12 23:25:43 +00002251 /* Now we know that the entire address range falls within a
2252 single secondary map, and that that secondary 'lives' in
2253 the main primary map. */
njn1d0825f2006-03-27 11:37:07 +00002254 if (EXPECTED_TAKEN(sm == sm_hi)) {
2255 // Finally, we know that the range is entirely within one secmap.
2256 UWord v_off = SM_OFF(a_lo);
2257 UShort* p = (UShort*)(&sm->vabits8[v_off]);
njndbf7ca72006-03-31 11:57:59 +00002258 p[ 0] = VA_BITS16_UNDEFINED;
2259 p[ 1] = VA_BITS16_UNDEFINED;
2260 p[ 2] = VA_BITS16_UNDEFINED;
2261 p[ 3] = VA_BITS16_UNDEFINED;
2262 p[ 4] = VA_BITS16_UNDEFINED;
2263 p[ 5] = VA_BITS16_UNDEFINED;
2264 p[ 6] = VA_BITS16_UNDEFINED;
2265 p[ 7] = VA_BITS16_UNDEFINED;
2266 p[ 8] = VA_BITS16_UNDEFINED;
2267 p[ 9] = VA_BITS16_UNDEFINED;
2268 p[10] = VA_BITS16_UNDEFINED;
2269 p[11] = VA_BITS16_UNDEFINED;
2270 p[12] = VA_BITS16_UNDEFINED;
2271 p[13] = VA_BITS16_UNDEFINED;
2272 p[14] = VA_BITS16_UNDEFINED;
2273 p[15] = VA_BITS16_UNDEFINED;
sewardj2a3a1a72005-05-12 23:25:43 +00002274 return;
njn1d0825f2006-03-27 11:37:07 +00002275 }
sewardj2a3a1a72005-05-12 23:25:43 +00002276 }
2277 }
2278
sewardj2e1a6772006-01-18 04:16:27 +00002279 /* 288 bytes (36 ULongs) is the magic value for ELF ppc64. */
sewardj3f5f5562006-06-16 21:39:08 +00002280 if (EXPECTED_TAKEN( len == 288 && VG_IS_8_ALIGNED(base) )) {
njn1d0825f2006-03-27 11:37:07 +00002281 /* Now we know the address range is suitably sized and aligned. */
2282 UWord a_lo = (UWord)(base);
sewardj3f5f5562006-06-16 21:39:08 +00002283 UWord a_hi = (UWord)(base + 288 - 1);
njn1d0825f2006-03-27 11:37:07 +00002284 tl_assert(a_lo < a_hi); // paranoia: detect overflow
2285 if (a_hi < MAX_PRIMARY_ADDRESS) {
2286 // Now we know the entire range is within the main primary map.
njna7c7ebd2006-03-28 12:51:02 +00002287 SecMap* sm = get_secmap_for_writing_low(a_lo);
2288 SecMap* sm_hi = get_secmap_for_writing_low(a_hi);
sewardj2e1a6772006-01-18 04:16:27 +00002289 /* Now we know that the entire address range falls within a
2290 single secondary map, and that that secondary 'lives' in
2291 the main primary map. */
njn1d0825f2006-03-27 11:37:07 +00002292 if (EXPECTED_TAKEN(sm == sm_hi)) {
2293 // Finally, we know that the range is entirely within one secmap.
2294 UWord v_off = SM_OFF(a_lo);
2295 UShort* p = (UShort*)(&sm->vabits8[v_off]);
njndbf7ca72006-03-31 11:57:59 +00002296 p[ 0] = VA_BITS16_UNDEFINED;
2297 p[ 1] = VA_BITS16_UNDEFINED;
2298 p[ 2] = VA_BITS16_UNDEFINED;
2299 p[ 3] = VA_BITS16_UNDEFINED;
2300 p[ 4] = VA_BITS16_UNDEFINED;
2301 p[ 5] = VA_BITS16_UNDEFINED;
2302 p[ 6] = VA_BITS16_UNDEFINED;
2303 p[ 7] = VA_BITS16_UNDEFINED;
2304 p[ 8] = VA_BITS16_UNDEFINED;
2305 p[ 9] = VA_BITS16_UNDEFINED;
2306 p[10] = VA_BITS16_UNDEFINED;
2307 p[11] = VA_BITS16_UNDEFINED;
2308 p[12] = VA_BITS16_UNDEFINED;
2309 p[13] = VA_BITS16_UNDEFINED;
2310 p[14] = VA_BITS16_UNDEFINED;
2311 p[15] = VA_BITS16_UNDEFINED;
2312 p[16] = VA_BITS16_UNDEFINED;
2313 p[17] = VA_BITS16_UNDEFINED;
2314 p[18] = VA_BITS16_UNDEFINED;
2315 p[19] = VA_BITS16_UNDEFINED;
2316 p[20] = VA_BITS16_UNDEFINED;
2317 p[21] = VA_BITS16_UNDEFINED;
2318 p[22] = VA_BITS16_UNDEFINED;
2319 p[23] = VA_BITS16_UNDEFINED;
2320 p[24] = VA_BITS16_UNDEFINED;
2321 p[25] = VA_BITS16_UNDEFINED;
2322 p[26] = VA_BITS16_UNDEFINED;
2323 p[27] = VA_BITS16_UNDEFINED;
2324 p[28] = VA_BITS16_UNDEFINED;
2325 p[29] = VA_BITS16_UNDEFINED;
2326 p[30] = VA_BITS16_UNDEFINED;
2327 p[31] = VA_BITS16_UNDEFINED;
2328 p[32] = VA_BITS16_UNDEFINED;
2329 p[33] = VA_BITS16_UNDEFINED;
2330 p[34] = VA_BITS16_UNDEFINED;
2331 p[35] = VA_BITS16_UNDEFINED;
sewardj2e1a6772006-01-18 04:16:27 +00002332 return;
njn1d0825f2006-03-27 11:37:07 +00002333 }
sewardj2e1a6772006-01-18 04:16:27 +00002334 }
2335 }
2336
sewardj2a3a1a72005-05-12 23:25:43 +00002337 /* else fall into slow case */
njndbf7ca72006-03-31 11:57:59 +00002338 MC_(make_mem_undefined)(base, len);
sewardj826ec492005-05-12 18:05:00 +00002339}
2340
2341
nethercote8b76fe52004-11-08 19:20:09 +00002342/*------------------------------------------------------------*/
2343/*--- Checking memory ---*/
2344/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00002345
sewardje4ccc012005-05-02 12:53:38 +00002346typedef
2347 enum {
2348 MC_Ok = 5,
2349 MC_AddrErr = 6,
2350 MC_ValueErr = 7
2351 }
2352 MC_ReadResult;
2353
2354
njn25e49d8e72002-09-23 09:36:25 +00002355/* Check permissions for address range. If inadequate permissions
2356 exist, *bad_addr is set to the offending address, so the caller can
2357 know what it is. */
2358
sewardjecf8e102003-07-12 12:11:39 +00002359/* Returns True if [a .. a+len) is not addressible. Otherwise,
2360 returns False, and if bad_addr is non-NULL, sets *bad_addr to
2361 indicate the lowest failing address. Functions below are
2362 similar. */
njndbf7ca72006-03-31 11:57:59 +00002363Bool MC_(check_mem_is_noaccess) ( Addr a, SizeT len, Addr* bad_addr )
sewardjecf8e102003-07-12 12:11:39 +00002364{
nethercote451eae92004-11-02 13:06:32 +00002365 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00002366 UWord vabits2;
2367
njndbf7ca72006-03-31 11:57:59 +00002368 PROF_EVENT(60, "check_mem_is_noaccess");
sewardjecf8e102003-07-12 12:11:39 +00002369 for (i = 0; i < len; i++) {
njndbf7ca72006-03-31 11:57:59 +00002370 PROF_EVENT(61, "check_mem_is_noaccess(loop)");
njn1d0825f2006-03-27 11:37:07 +00002371 vabits2 = get_vabits2(a);
2372 if (VA_BITS2_NOACCESS != vabits2) {
2373 if (bad_addr != NULL) *bad_addr = a;
sewardjecf8e102003-07-12 12:11:39 +00002374 return False;
2375 }
2376 a++;
2377 }
2378 return True;
2379}
2380
njndbf7ca72006-03-31 11:57:59 +00002381static Bool is_mem_addressable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00002382{
nethercote451eae92004-11-02 13:06:32 +00002383 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00002384 UWord vabits2;
2385
njndbf7ca72006-03-31 11:57:59 +00002386 PROF_EVENT(62, "is_mem_addressable");
njn25e49d8e72002-09-23 09:36:25 +00002387 for (i = 0; i < len; i++) {
njndbf7ca72006-03-31 11:57:59 +00002388 PROF_EVENT(63, "is_mem_addressable(loop)");
njn1d0825f2006-03-27 11:37:07 +00002389 vabits2 = get_vabits2(a);
2390 if (VA_BITS2_NOACCESS == vabits2) {
njn25e49d8e72002-09-23 09:36:25 +00002391 if (bad_addr != NULL) *bad_addr = a;
2392 return False;
2393 }
2394 a++;
2395 }
2396 return True;
2397}
2398
njndbf7ca72006-03-31 11:57:59 +00002399static MC_ReadResult is_mem_defined ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00002400{
nethercote451eae92004-11-02 13:06:32 +00002401 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00002402 UWord vabits2;
njn25e49d8e72002-09-23 09:36:25 +00002403
njndbf7ca72006-03-31 11:57:59 +00002404 PROF_EVENT(64, "is_mem_defined");
2405 DEBUG("is_mem_defined\n");
njn25e49d8e72002-09-23 09:36:25 +00002406 for (i = 0; i < len; i++) {
njndbf7ca72006-03-31 11:57:59 +00002407 PROF_EVENT(65, "is_mem_defined(loop)");
njn1d0825f2006-03-27 11:37:07 +00002408 vabits2 = get_vabits2(a);
njndbf7ca72006-03-31 11:57:59 +00002409 if (VA_BITS2_DEFINED != vabits2) {
njn1d0825f2006-03-27 11:37:07 +00002410 // Error! Nb: Report addressability errors in preference to
2411 // definedness errors. And don't report definedeness errors unless
2412 // --undef-value-errors=yes.
2413 if (bad_addr != NULL) *bad_addr = a;
2414 if ( VA_BITS2_NOACCESS == vabits2 ) return MC_AddrErr;
2415 else if ( MC_(clo_undef_value_errors) ) return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +00002416 }
2417 a++;
2418 }
nethercote8b76fe52004-11-08 19:20:09 +00002419 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +00002420}
2421
2422
2423/* Check a zero-terminated ascii string. Tricky -- don't want to
2424 examine the actual bytes, to find the end, until we're sure it is
2425 safe to do so. */
2426
njndbf7ca72006-03-31 11:57:59 +00002427static Bool mc_is_defined_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00002428{
njn1d0825f2006-03-27 11:37:07 +00002429 UWord vabits2;
2430
njndbf7ca72006-03-31 11:57:59 +00002431 PROF_EVENT(66, "mc_is_defined_asciiz");
2432 DEBUG("mc_is_defined_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +00002433 while (True) {
njndbf7ca72006-03-31 11:57:59 +00002434 PROF_EVENT(67, "mc_is_defined_asciiz(loop)");
njn1d0825f2006-03-27 11:37:07 +00002435 vabits2 = get_vabits2(a);
njndbf7ca72006-03-31 11:57:59 +00002436 if (VA_BITS2_DEFINED != vabits2) {
njn1d0825f2006-03-27 11:37:07 +00002437 // Error! Nb: Report addressability errors in preference to
2438 // definedness errors. And don't report definedeness errors unless
2439 // --undef-value-errors=yes.
2440 if (bad_addr != NULL) *bad_addr = a;
2441 if ( VA_BITS2_NOACCESS == vabits2 ) return MC_AddrErr;
2442 else if ( MC_(clo_undef_value_errors) ) return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +00002443 }
2444 /* Ok, a is safe to read. */
njn1d0825f2006-03-27 11:37:07 +00002445 if (* ((UChar*)a) == 0) {
sewardj45d94cc2005-04-20 14:44:11 +00002446 return MC_Ok;
njn1d0825f2006-03-27 11:37:07 +00002447 }
njn25e49d8e72002-09-23 09:36:25 +00002448 a++;
2449 }
2450}
2451
2452
2453/*------------------------------------------------------------*/
2454/*--- Memory event handlers ---*/
2455/*------------------------------------------------------------*/
2456
njn25e49d8e72002-09-23 09:36:25 +00002457static
njndbf7ca72006-03-31 11:57:59 +00002458void check_mem_is_addressable ( CorePart part, ThreadId tid, Char* s,
2459 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00002460{
njn25e49d8e72002-09-23 09:36:25 +00002461 Addr bad_addr;
njndbf7ca72006-03-31 11:57:59 +00002462 Bool ok = is_mem_addressable ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00002463
njn25e49d8e72002-09-23 09:36:25 +00002464 if (!ok) {
2465 switch (part) {
2466 case Vg_CoreSysCall:
njn1d0825f2006-03-27 11:37:07 +00002467 mc_record_param_error ( tid, bad_addr, /*isReg*/False,
nethercote8b76fe52004-11-08 19:20:09 +00002468 /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00002469 break;
2470
2471 case Vg_CorePThread:
2472 case Vg_CoreSignal:
njn1d0825f2006-03-27 11:37:07 +00002473 mc_record_core_mem_error( tid, /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00002474 break;
2475
2476 default:
njndbf7ca72006-03-31 11:57:59 +00002477 VG_(tool_panic)("check_mem_is_addressable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00002478 }
2479 }
njn25e49d8e72002-09-23 09:36:25 +00002480}
2481
2482static
njndbf7ca72006-03-31 11:57:59 +00002483void check_mem_is_defined ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +00002484 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00002485{
njn25e49d8e72002-09-23 09:36:25 +00002486 Addr bad_addr;
njndbf7ca72006-03-31 11:57:59 +00002487 MC_ReadResult res = is_mem_defined ( base, size, &bad_addr );
sewardj45f4e7c2005-09-27 19:20:21 +00002488
nethercote8b76fe52004-11-08 19:20:09 +00002489 if (MC_Ok != res) {
2490 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
sewardj45f4e7c2005-09-27 19:20:21 +00002491
njn25e49d8e72002-09-23 09:36:25 +00002492 switch (part) {
2493 case Vg_CoreSysCall:
njn1d0825f2006-03-27 11:37:07 +00002494 mc_record_param_error ( tid, bad_addr, /*isReg*/False,
njndbf7ca72006-03-31 11:57:59 +00002495 isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00002496 break;
2497
njn1d0825f2006-03-27 11:37:07 +00002498 case Vg_CoreClientReq: // Kludge: make this a CoreMemErr
njn25e49d8e72002-09-23 09:36:25 +00002499 case Vg_CorePThread:
njn1d0825f2006-03-27 11:37:07 +00002500 mc_record_core_mem_error( tid, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00002501 break;
2502
2503 /* If we're being asked to jump to a silly address, record an error
2504 message before potentially crashing the entire system. */
2505 case Vg_CoreTranslate:
njn1d0825f2006-03-27 11:37:07 +00002506 mc_record_jump_error( tid, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00002507 break;
2508
2509 default:
njndbf7ca72006-03-31 11:57:59 +00002510 VG_(tool_panic)("check_mem_is_defined: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00002511 }
2512 }
njn25e49d8e72002-09-23 09:36:25 +00002513}
2514
2515static
njndbf7ca72006-03-31 11:57:59 +00002516void check_mem_is_defined_asciiz ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +00002517 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +00002518{
nethercote8b76fe52004-11-08 19:20:09 +00002519 MC_ReadResult res;
njn5ab96ac2005-05-08 02:59:50 +00002520 Addr bad_addr = 0; // shut GCC up
njn25e49d8e72002-09-23 09:36:25 +00002521
njnca82cc02004-11-22 17:18:48 +00002522 tl_assert(part == Vg_CoreSysCall);
njndbf7ca72006-03-31 11:57:59 +00002523 res = mc_is_defined_asciiz ( (Addr)str, &bad_addr );
nethercote8b76fe52004-11-08 19:20:09 +00002524 if (MC_Ok != res) {
2525 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
njn1d0825f2006-03-27 11:37:07 +00002526 mc_record_param_error ( tid, bad_addr, /*isReg*/False, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00002527 }
njn25e49d8e72002-09-23 09:36:25 +00002528}
2529
njn25e49d8e72002-09-23 09:36:25 +00002530static
nethercote451eae92004-11-02 13:06:32 +00002531void mc_new_mem_startup( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00002532{
njndbf7ca72006-03-31 11:57:59 +00002533 /* Ignore the permissions, just make it defined. Seems to work... */
njnba7b4582006-09-21 15:59:30 +00002534 // Because code is defined, initialised variables get put in the data
2535 // segment and are defined, and uninitialised variables get put in the
2536 // bss segment and are auto-zeroed (and so defined).
2537 //
2538 // It's possible that there will be padding between global variables.
2539 // This will also be auto-zeroed, and marked as defined by Memcheck. If
2540 // a program uses it, Memcheck will not complain. This is arguably a
2541 // false negative, but it's a grey area -- the behaviour is defined (the
2542 // padding is zeroed) but it's probably not what the user intended. And
2543 // we can't avoid it.
nethercote451eae92004-11-02 13:06:32 +00002544 DEBUG("mc_new_mem_startup(%p, %llu, rr=%u, ww=%u, xx=%u)\n",
njndbf7ca72006-03-31 11:57:59 +00002545 a, (ULong)len, rr, ww, xx);
2546 MC_(make_mem_defined)(a, len);
njn25e49d8e72002-09-23 09:36:25 +00002547}
2548
2549static
njnb8dca862005-03-14 02:42:44 +00002550void mc_new_mem_mmap ( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00002551{
njndbf7ca72006-03-31 11:57:59 +00002552 MC_(make_mem_defined)(a, len);
njn25e49d8e72002-09-23 09:36:25 +00002553}
2554
njncf45fd42004-11-24 16:30:22 +00002555static
2556void mc_post_mem_write(CorePart part, ThreadId tid, Addr a, SizeT len)
2557{
njndbf7ca72006-03-31 11:57:59 +00002558 MC_(make_mem_defined)(a, len);
njncf45fd42004-11-24 16:30:22 +00002559}
njn25e49d8e72002-09-23 09:36:25 +00002560
sewardj45d94cc2005-04-20 14:44:11 +00002561
njn25e49d8e72002-09-23 09:36:25 +00002562/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00002563/*--- Register event handlers ---*/
2564/*------------------------------------------------------------*/
2565
sewardj45d94cc2005-04-20 14:44:11 +00002566/* When some chunk of guest state is written, mark the corresponding
2567 shadow area as valid. This is used to initialise arbitrarily large
sewardj62eae5f2006-01-17 01:58:24 +00002568 chunks of guest state, hence the _SIZE value, which has to be as
2569 big as the biggest guest state.
sewardj45d94cc2005-04-20 14:44:11 +00002570*/
2571static void mc_post_reg_write ( CorePart part, ThreadId tid,
2572 OffT offset, SizeT size)
njnd3040452003-05-19 15:04:06 +00002573{
sewardj05a46732006-10-17 01:28:10 +00002574# define MAX_REG_WRITE_SIZE 1408
cerion21082042005-12-06 19:07:08 +00002575 UChar area[MAX_REG_WRITE_SIZE];
2576 tl_assert(size <= MAX_REG_WRITE_SIZE);
njn1d0825f2006-03-27 11:37:07 +00002577 VG_(memset)(area, V_BITS8_DEFINED, size);
njncf45fd42004-11-24 16:30:22 +00002578 VG_(set_shadow_regs_area)( tid, offset, size, area );
cerion21082042005-12-06 19:07:08 +00002579# undef MAX_REG_WRITE_SIZE
njnd3040452003-05-19 15:04:06 +00002580}
2581
sewardj45d94cc2005-04-20 14:44:11 +00002582static
2583void mc_post_reg_write_clientcall ( ThreadId tid,
2584 OffT offset, SizeT size,
2585 Addr f)
njnd3040452003-05-19 15:04:06 +00002586{
njncf45fd42004-11-24 16:30:22 +00002587 mc_post_reg_write(/*dummy*/0, tid, offset, size);
njnd3040452003-05-19 15:04:06 +00002588}
2589
sewardj45d94cc2005-04-20 14:44:11 +00002590/* Look at the definedness of the guest's shadow state for
2591 [offset, offset+len). If any part of that is undefined, record
2592 a parameter error.
2593*/
2594static void mc_pre_reg_read ( CorePart part, ThreadId tid, Char* s,
2595 OffT offset, SizeT size)
nethercote8b76fe52004-11-08 19:20:09 +00002596{
sewardj45d94cc2005-04-20 14:44:11 +00002597 Int i;
2598 Bool bad;
2599
2600 UChar area[16];
2601 tl_assert(size <= 16);
2602
2603 VG_(get_shadow_regs_area)( tid, offset, size, area );
2604
2605 bad = False;
2606 for (i = 0; i < size; i++) {
njn1d0825f2006-03-27 11:37:07 +00002607 if (area[i] != V_BITS8_DEFINED) {
sewardj2c27f702005-05-03 18:19:05 +00002608 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002609 break;
2610 }
nethercote8b76fe52004-11-08 19:20:09 +00002611 }
2612
sewardj45d94cc2005-04-20 14:44:11 +00002613 if (bad)
njn1d0825f2006-03-27 11:37:07 +00002614 mc_record_param_error ( tid, 0, /*isReg*/True, /*isUnaddr*/False, s );
nethercote8b76fe52004-11-08 19:20:09 +00002615}
njnd3040452003-05-19 15:04:06 +00002616
njn25e49d8e72002-09-23 09:36:25 +00002617
sewardj6cf40ff2005-04-20 22:31:26 +00002618/*------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00002619/*--- Error and suppression types ---*/
2620/*------------------------------------------------------------*/
2621
2622/* The classification of a faulting address. */
2623typedef
2624 enum {
2625 Undescribed, // as-yet unclassified
2626 Stack,
2627 Unknown, // classification yielded nothing useful
2628 Freed, Mallocd,
2629 UserG, // in a user-defined block
2630 Mempool, // in a mempool
2631 Register, // in a register; for Param errors only
2632 }
2633 AddrKind;
2634
2635/* Records info about a faulting address. */
2636typedef
2637 struct { // Used by:
2638 AddrKind akind; // ALL
2639 SizeT blksize; // Freed, Mallocd
2640 OffT rwoffset; // Freed, Mallocd
2641 ExeContext* lastchange; // Freed, Mallocd
2642 ThreadId stack_tid; // Stack
2643 const Char *desc; // UserG
2644 Bool maybe_gcc; // True if just below %esp -- could be a gcc bug.
2645 }
2646 AddrInfo;
2647
2648typedef
2649 enum {
2650 ParamSupp, // Bad syscall params
2651 CoreMemSupp, // Memory errors in core (pthread ops, signal handling)
2652
2653 // Use of invalid values of given size (MemCheck only)
2654 Value0Supp, Value1Supp, Value2Supp, Value4Supp, Value8Supp, Value16Supp,
2655
2656 // Invalid read/write attempt at given size
2657 Addr1Supp, Addr2Supp, Addr4Supp, Addr8Supp, Addr16Supp,
2658
2659 FreeSupp, // Invalid or mismatching free
2660 OverlapSupp, // Overlapping blocks in memcpy(), strcpy(), etc
2661 LeakSupp, // Something to be suppressed in a leak check.
2662 MempoolSupp, // Memory pool suppression.
2663 }
2664 MC_SuppKind;
2665
2666/* What kind of error it is. */
2667typedef
2668 enum { ValueErr,
2669 CoreMemErr, // Error in core op (pthread, signals) or client req
2670 AddrErr,
2671 ParamErr, UserErr, /* behaves like an anonymous ParamErr */
2672 FreeErr, FreeMismatchErr,
2673 OverlapErr,
2674 LeakErr,
2675 IllegalMempoolErr,
2676 }
2677 MC_ErrorKind;
2678
2679/* What kind of memory access is involved in the error? */
2680typedef
2681 enum { ReadAxs, WriteAxs, ExecAxs }
2682 AxsKind;
2683
2684/* Extra context for memory errors */
2685typedef
2686 struct { // Used by:
2687 AxsKind axskind; // AddrErr
2688 Int size; // AddrErr, ValueErr
2689 AddrInfo addrinfo; // {Addr,Free,FreeMismatch,Param,User}Err
2690 Bool isUnaddr; // {CoreMem,Param,User}Err
2691 }
2692 MC_Error;
2693
2694/*------------------------------------------------------------*/
njn9e63cb62005-05-08 18:34:59 +00002695/*--- Printing errors ---*/
2696/*------------------------------------------------------------*/
2697
njn1d0825f2006-03-27 11:37:07 +00002698static void mc_pp_AddrInfo ( Addr a, AddrInfo* ai )
2699{
2700 HChar* xpre = VG_(clo_xml) ? " <auxwhat>" : " ";
2701 HChar* xpost = VG_(clo_xml) ? "</auxwhat>" : "";
2702
2703 switch (ai->akind) {
2704 case Stack:
2705 VG_(message)(Vg_UserMsg,
2706 "%sAddress 0x%llx is on thread %d's stack%s",
2707 xpre, (ULong)a, ai->stack_tid, xpost);
2708 break;
2709 case Unknown:
2710 if (ai->maybe_gcc) {
2711 VG_(message)(Vg_UserMsg,
2712 "%sAddress 0x%llx is just below the stack ptr. "
2713 "To suppress, use: --workaround-gcc296-bugs=yes%s",
2714 xpre, (ULong)a, xpost
2715 );
2716 } else {
2717 VG_(message)(Vg_UserMsg,
2718 "%sAddress 0x%llx "
2719 "is not stack'd, malloc'd or (recently) free'd%s",
2720 xpre, (ULong)a, xpost);
2721 }
2722 break;
2723 case Freed: case Mallocd: case UserG: case Mempool: {
2724 SizeT delta;
2725 const Char* relative;
2726 const Char* kind;
2727 if (ai->akind == Mempool) {
2728 kind = "mempool";
2729 } else {
2730 kind = "block";
2731 }
2732 if (ai->desc != NULL)
2733 kind = ai->desc;
2734
2735 if (ai->rwoffset < 0) {
2736 delta = (SizeT)(- ai->rwoffset);
2737 relative = "before";
2738 } else if (ai->rwoffset >= ai->blksize) {
2739 delta = ai->rwoffset - ai->blksize;
2740 relative = "after";
2741 } else {
2742 delta = ai->rwoffset;
2743 relative = "inside";
2744 }
2745 VG_(message)(Vg_UserMsg,
2746 "%sAddress 0x%lx is %,lu bytes %s a %s of size %,lu %s%s",
2747 xpre,
2748 a, delta, relative, kind,
2749 ai->blksize,
2750 ai->akind==Mallocd ? "alloc'd"
2751 : ai->akind==Freed ? "free'd"
2752 : "client-defined",
2753 xpost);
2754 VG_(pp_ExeContext)(ai->lastchange);
2755 break;
2756 }
2757 case Register:
2758 // print nothing
2759 tl_assert(0 == a);
2760 break;
2761 default:
2762 VG_(tool_panic)("mc_pp_AddrInfo");
2763 }
2764}
2765
njn51d827b2005-05-09 01:02:08 +00002766static void mc_pp_Error ( Error* err )
njn9e63cb62005-05-08 18:34:59 +00002767{
njn1d0825f2006-03-27 11:37:07 +00002768 MC_Error* err_extra = VG_(get_error_extra)(err);
njn9e63cb62005-05-08 18:34:59 +00002769
sewardj71bc3cb2005-05-19 00:25:45 +00002770 HChar* xpre = VG_(clo_xml) ? " <what>" : "";
2771 HChar* xpost = VG_(clo_xml) ? "</what>" : "";
2772
njn9e63cb62005-05-08 18:34:59 +00002773 switch (VG_(get_error_kind)(err)) {
2774 case CoreMemErr: {
2775 Char* s = ( err_extra->isUnaddr ? "unaddressable" : "uninitialised" );
sewardj71bc3cb2005-05-19 00:25:45 +00002776 if (VG_(clo_xml))
2777 VG_(message)(Vg_UserMsg, " <kind>CoreMemError</kind>");
2778 /* What the hell *is* a CoreMemError? jrs 2005-May-18 */
2779 VG_(message)(Vg_UserMsg, "%s%s contains %s byte(s)%s",
2780 xpre, VG_(get_error_string)(err), s, xpost);
2781
njn9e63cb62005-05-08 18:34:59 +00002782 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
2783 break;
2784
2785 }
2786
2787 case ValueErr:
2788 if (err_extra->size == 0) {
sewardj71bc3cb2005-05-19 00:25:45 +00002789 if (VG_(clo_xml))
2790 VG_(message)(Vg_UserMsg, " <kind>UninitCondition</kind>");
2791 VG_(message)(Vg_UserMsg, "%sConditional jump or move depends"
2792 " on uninitialised value(s)%s",
2793 xpre, xpost);
njn9e63cb62005-05-08 18:34:59 +00002794 } else {
sewardj71bc3cb2005-05-19 00:25:45 +00002795 if (VG_(clo_xml))
2796 VG_(message)(Vg_UserMsg, " <kind>UninitValue</kind>");
2797 VG_(message)(Vg_UserMsg,
2798 "%sUse of uninitialised value of size %d%s",
2799 xpre, err_extra->size, xpost);
njn9e63cb62005-05-08 18:34:59 +00002800 }
2801 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
2802 break;
2803
2804 case ParamErr: {
2805 Bool isReg = ( Register == err_extra->addrinfo.akind );
2806 Char* s1 = ( isReg ? "contains" : "points to" );
2807 Char* s2 = ( err_extra->isUnaddr ? "unaddressable" : "uninitialised" );
2808 if (isReg) tl_assert(!err_extra->isUnaddr);
2809
sewardj71bc3cb2005-05-19 00:25:45 +00002810 if (VG_(clo_xml))
2811 VG_(message)(Vg_UserMsg, " <kind>SyscallParam</kind>");
2812 VG_(message)(Vg_UserMsg, "%sSyscall param %s %s %s byte(s)%s",
2813 xpre, VG_(get_error_string)(err), s1, s2, xpost);
njn9e63cb62005-05-08 18:34:59 +00002814
2815 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
njn1d0825f2006-03-27 11:37:07 +00002816 mc_pp_AddrInfo(VG_(get_error_address)(err), &err_extra->addrinfo);
njn9e63cb62005-05-08 18:34:59 +00002817 break;
2818 }
2819 case UserErr: {
2820 Char* s = ( err_extra->isUnaddr ? "Unaddressable" : "Uninitialised" );
2821
sewardj71bc3cb2005-05-19 00:25:45 +00002822 if (VG_(clo_xml))
2823 VG_(message)(Vg_UserMsg, " <kind>ClientCheck</kind>");
njn9e63cb62005-05-08 18:34:59 +00002824 VG_(message)(Vg_UserMsg,
sewardj71bc3cb2005-05-19 00:25:45 +00002825 "%s%s byte(s) found during client check request%s",
2826 xpre, s, xpost);
njn9e63cb62005-05-08 18:34:59 +00002827
2828 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
njn1d0825f2006-03-27 11:37:07 +00002829 mc_pp_AddrInfo(VG_(get_error_address)(err), &err_extra->addrinfo);
njn9e63cb62005-05-08 18:34:59 +00002830 break;
2831 }
njn1d0825f2006-03-27 11:37:07 +00002832 case FreeErr:
2833 if (VG_(clo_xml))
2834 VG_(message)(Vg_UserMsg, " <kind>InvalidFree</kind>");
2835 VG_(message)(Vg_UserMsg,
2836 "%sInvalid free() / delete / delete[]%s",
2837 xpre, xpost);
2838 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
2839 mc_pp_AddrInfo(VG_(get_error_address)(err), &err_extra->addrinfo);
njn9e63cb62005-05-08 18:34:59 +00002840 break;
njn1d0825f2006-03-27 11:37:07 +00002841
2842 case FreeMismatchErr:
2843 if (VG_(clo_xml))
2844 VG_(message)(Vg_UserMsg, " <kind>MismatchedFree</kind>");
2845 VG_(message)(Vg_UserMsg,
2846 "%sMismatched free() / delete / delete []%s",
2847 xpre, xpost);
2848 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
2849 mc_pp_AddrInfo(VG_(get_error_address)(err), &err_extra->addrinfo);
2850 break;
2851
2852 case AddrErr:
2853 switch (err_extra->axskind) {
2854 case ReadAxs:
2855 if (VG_(clo_xml))
2856 VG_(message)(Vg_UserMsg, " <kind>InvalidRead</kind>");
2857 VG_(message)(Vg_UserMsg,
2858 "%sInvalid read of size %d%s",
2859 xpre, err_extra->size, xpost );
2860 break;
2861 case WriteAxs:
2862 if (VG_(clo_xml))
2863 VG_(message)(Vg_UserMsg, " <kind>InvalidWrite</kind>");
2864 VG_(message)(Vg_UserMsg,
2865 "%sInvalid write of size %d%s",
2866 xpre, err_extra->size, xpost );
2867 break;
2868 case ExecAxs:
2869 if (VG_(clo_xml))
2870 VG_(message)(Vg_UserMsg, " <kind>InvalidJump</kind>");
2871 VG_(message)(Vg_UserMsg,
2872 "%sJump to the invalid address "
2873 "stated on the next line%s",
2874 xpre, xpost);
2875 break;
2876 default:
2877 VG_(tool_panic)("mc_pp_Error(axskind)");
2878 }
2879 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
2880 mc_pp_AddrInfo(VG_(get_error_address)(err), &err_extra->addrinfo);
2881 break;
2882
2883 case OverlapErr: {
2884 OverlapExtra* ov_extra = (OverlapExtra*)VG_(get_error_extra)(err);
2885 if (VG_(clo_xml))
2886 VG_(message)(Vg_UserMsg, " <kind>Overlap</kind>");
2887 if (ov_extra->len == -1)
2888 VG_(message)(Vg_UserMsg,
2889 "%sSource and destination overlap in %s(%p, %p)%s",
2890 xpre,
2891 VG_(get_error_string)(err),
2892 ov_extra->dst, ov_extra->src,
2893 xpost);
2894 else
2895 VG_(message)(Vg_UserMsg,
2896 "%sSource and destination overlap in %s(%p, %p, %d)%s",
2897 xpre,
2898 VG_(get_error_string)(err),
2899 ov_extra->dst, ov_extra->src, ov_extra->len,
2900 xpost);
2901 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
2902 break;
2903 }
2904 case LeakErr: {
2905 MC_(pp_LeakError)(err_extra);
2906 break;
2907 }
2908
2909 case IllegalMempoolErr:
2910 if (VG_(clo_xml))
2911 VG_(message)(Vg_UserMsg, " <kind>InvalidMemPool</kind>");
2912 VG_(message)(Vg_UserMsg, "%sIllegal memory pool address%s",
2913 xpre, xpost);
2914 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
2915 mc_pp_AddrInfo(VG_(get_error_address)(err), &err_extra->addrinfo);
2916 break;
2917
2918 default:
2919 VG_(printf)("Error:\n unknown Memcheck error code %d\n",
2920 VG_(get_error_kind)(err));
2921 VG_(tool_panic)("unknown error code in mc_pp_Error)");
njn9e63cb62005-05-08 18:34:59 +00002922 }
2923}
2924
2925/*------------------------------------------------------------*/
2926/*--- Recording errors ---*/
2927/*------------------------------------------------------------*/
2928
njn1d0825f2006-03-27 11:37:07 +00002929/* These many bytes below %ESP are considered addressible if we're
2930 doing the --workaround-gcc296-bugs hack. */
2931#define VG_GCC296_BUG_STACK_SLOP 1024
2932
2933/* Is this address within some small distance below %ESP? Used only
2934 for the --workaround-gcc296-bugs kludge. */
2935static Bool is_just_below_ESP( Addr esp, Addr aa )
2936{
2937 if (esp > aa && (esp - aa) <= VG_GCC296_BUG_STACK_SLOP)
2938 return True;
2939 else
2940 return False;
2941}
2942
2943static void mc_clear_MC_Error ( MC_Error* err_extra )
2944{
2945 err_extra->axskind = ReadAxs;
2946 err_extra->size = 0;
2947 err_extra->isUnaddr = True;
2948 err_extra->addrinfo.akind = Unknown;
2949 err_extra->addrinfo.blksize = 0;
2950 err_extra->addrinfo.rwoffset = 0;
2951 err_extra->addrinfo.lastchange = NULL;
2952 err_extra->addrinfo.stack_tid = VG_INVALID_THREADID;
2953 err_extra->addrinfo.maybe_gcc = False;
2954 err_extra->addrinfo.desc = NULL;
2955}
2956
2957/* This one called from generated code and non-generated code. */
2958static void mc_record_address_error ( ThreadId tid, Addr a, Int size,
2959 Bool isWrite )
2960{
2961 MC_Error err_extra;
sewardj05a46732006-10-17 01:28:10 +00002962 Bool just_below_esp;
2963
2964 if (in_ignored_range(a))
2965 return;
2966
2967# if defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
2968 /* AIX zero-page handling. On AIX, reads from page zero are,
2969 bizarrely enough, legitimate. Writes to page zero aren't,
2970 though. Since memcheck can't distinguish reads from writes, the
2971 best we can do is to 'act normal' and mark the A bits in the
2972 normal way as noaccess, but then hide any reads from that page
2973 that get reported here. */
2974 if ((!isWrite) && a >= 0 && a+size <= 4096)
2975 return;
2976
2977 /* Appalling AIX hack. It suppresses reads done by glink
2978 fragments. Getting rid of this would require figuring out
2979 somehow where the referenced data areas are (and their
2980 sizes). */
2981 if ((!isWrite) && size == sizeof(Word)) {
2982 UInt i1, i2;
2983 UInt* pc = (UInt*)VG_(get_IP)(tid);
2984 if (sizeof(Word) == 4) {
2985 i1 = 0x800c0000; /* lwz r0,0(r12) */
2986 i2 = 0x804c0004; /* lwz r2,4(r12) */
2987 } else {
2988 i1 = 0xe80c0000; /* ld r0,0(r12) */
2989 i2 = 0xe84c0008; /* ld r2,8(r12) */
2990 }
2991 if (pc[0] == i1 && pc[1] == i2) return;
2992 if (pc[0] == i2 && pc[-1] == i1) return;
2993 }
2994# endif
njn1d0825f2006-03-27 11:37:07 +00002995
2996 just_below_esp = is_just_below_ESP( VG_(get_SP)(tid), a );
2997
2998 /* If this is caused by an access immediately below %ESP, and the
2999 user asks nicely, we just ignore it. */
3000 if (MC_(clo_workaround_gcc296_bugs) && just_below_esp)
3001 return;
3002
3003 mc_clear_MC_Error( &err_extra );
3004 err_extra.axskind = isWrite ? WriteAxs : ReadAxs;
3005 err_extra.size = size;
3006 err_extra.addrinfo.akind = Undescribed;
3007 err_extra.addrinfo.maybe_gcc = just_below_esp;
3008 VG_(maybe_record_error)( tid, AddrErr, a, /*s*/NULL, &err_extra );
3009}
3010
3011/* These ones are called from non-generated code */
3012
3013/* This is for memory errors in pthread functions, as opposed to pthread API
3014 errors which are found by the core. */
3015static void mc_record_core_mem_error ( ThreadId tid, Bool isUnaddr, Char* msg )
3016{
3017 MC_Error err_extra;
3018
3019 mc_clear_MC_Error( &err_extra );
3020 err_extra.isUnaddr = isUnaddr;
3021 VG_(maybe_record_error)( tid, CoreMemErr, /*addr*/0, msg, &err_extra );
3022}
3023
3024// Three kinds of param errors:
3025// - register arg contains undefined bytes
3026// - memory arg is unaddressable
3027// - memory arg contains undefined bytes
3028// 'isReg' and 'isUnaddr' dictate which of these it is.
3029static void mc_record_param_error ( ThreadId tid, Addr a, Bool isReg,
3030 Bool isUnaddr, Char* msg )
3031{
3032 MC_Error err_extra;
3033
sewardj1cf56cf2006-05-22 13:59:42 +00003034 if (!isUnaddr) tl_assert(MC_(clo_undef_value_errors));
njn1d0825f2006-03-27 11:37:07 +00003035 tl_assert(VG_INVALID_THREADID != tid);
3036 if (isUnaddr) tl_assert(!isReg); // unaddressable register is impossible
3037 mc_clear_MC_Error( &err_extra );
3038 err_extra.addrinfo.akind = ( isReg ? Register : Undescribed );
3039 err_extra.isUnaddr = isUnaddr;
3040 VG_(maybe_record_error)( tid, ParamErr, a, msg, &err_extra );
3041}
3042
3043static void mc_record_jump_error ( ThreadId tid, Addr a )
3044{
3045 MC_Error err_extra;
3046
3047 tl_assert(VG_INVALID_THREADID != tid);
3048 mc_clear_MC_Error( &err_extra );
3049 err_extra.axskind = ExecAxs;
3050 err_extra.size = 1; // size only used for suppressions
3051 err_extra.addrinfo.akind = Undescribed;
3052 VG_(maybe_record_error)( tid, AddrErr, a, /*s*/NULL, &err_extra );
3053}
3054
3055void MC_(record_free_error) ( ThreadId tid, Addr a )
3056{
3057 MC_Error err_extra;
3058
3059 tl_assert(VG_INVALID_THREADID != tid);
3060 mc_clear_MC_Error( &err_extra );
3061 err_extra.addrinfo.akind = Undescribed;
3062 VG_(maybe_record_error)( tid, FreeErr, a, /*s*/NULL, &err_extra );
3063}
3064
3065void MC_(record_illegal_mempool_error) ( ThreadId tid, Addr a )
3066{
3067 MC_Error err_extra;
3068
3069 tl_assert(VG_INVALID_THREADID != tid);
3070 mc_clear_MC_Error( &err_extra );
3071 err_extra.addrinfo.akind = Undescribed;
3072 VG_(maybe_record_error)( tid, IllegalMempoolErr, a, /*s*/NULL, &err_extra );
3073}
3074
3075void MC_(record_freemismatch_error) ( ThreadId tid, Addr a, MC_Chunk* mc )
3076{
3077 MC_Error err_extra;
3078 AddrInfo* ai;
3079
3080 tl_assert(VG_INVALID_THREADID != tid);
3081 mc_clear_MC_Error( &err_extra );
3082 ai = &err_extra.addrinfo;
3083 ai->akind = Mallocd; // Nb: not 'Freed'
3084 ai->blksize = mc->size;
3085 ai->rwoffset = (Int)a - (Int)mc->data;
3086 ai->lastchange = mc->where;
3087 VG_(maybe_record_error)( tid, FreeMismatchErr, a, /*s*/NULL, &err_extra );
3088}
3089
3090static void mc_record_overlap_error ( ThreadId tid,
3091 Char* function, OverlapExtra* ov_extra )
3092{
3093 VG_(maybe_record_error)(
3094 tid, OverlapErr, /*addr*/0, /*s*/function, ov_extra );
3095}
3096
3097Bool MC_(record_leak_error) ( ThreadId tid, /*LeakExtra*/void* leak_extra,
3098 ExeContext* where, Bool print_record )
3099{
3100 return
3101 VG_(unique_error) ( tid, LeakErr, /*Addr*/0, /*s*/NULL,
3102 /*extra*/leak_extra, where, print_record,
3103 /*allow_GDB_attach*/False, /*count_error*/False );
3104}
3105
3106
njn02bc4b82005-05-15 17:28:26 +00003107/* Creates a copy of the 'extra' part, updates the copy with address info if
njn9e63cb62005-05-08 18:34:59 +00003108 necessary, and returns the copy. */
3109/* This one called from generated code and non-generated code. */
njn96364822005-05-08 19:04:53 +00003110static void mc_record_value_error ( ThreadId tid, Int size )
njn9e63cb62005-05-08 18:34:59 +00003111{
njn1d0825f2006-03-27 11:37:07 +00003112 MC_Error err_extra;
njn9e63cb62005-05-08 18:34:59 +00003113
njn1d0825f2006-03-27 11:37:07 +00003114 tl_assert(MC_(clo_undef_value_errors));
3115 mc_clear_MC_Error( &err_extra );
njn9e63cb62005-05-08 18:34:59 +00003116 err_extra.size = size;
3117 err_extra.isUnaddr = False;
3118 VG_(maybe_record_error)( tid, ValueErr, /*addr*/0, /*s*/NULL, &err_extra );
3119}
3120
3121/* This called from non-generated code */
3122
njn96364822005-05-08 19:04:53 +00003123static void mc_record_user_error ( ThreadId tid, Addr a, Bool isWrite,
3124 Bool isUnaddr )
njn9e63cb62005-05-08 18:34:59 +00003125{
njn1d0825f2006-03-27 11:37:07 +00003126 MC_Error err_extra;
njn9e63cb62005-05-08 18:34:59 +00003127
3128 tl_assert(VG_INVALID_THREADID != tid);
njn1d0825f2006-03-27 11:37:07 +00003129 mc_clear_MC_Error( &err_extra );
njn9e63cb62005-05-08 18:34:59 +00003130 err_extra.addrinfo.akind = Undescribed;
3131 err_extra.isUnaddr = isUnaddr;
3132 VG_(maybe_record_error)( tid, UserErr, a, /*s*/NULL, &err_extra );
3133}
3134
njn1d0825f2006-03-27 11:37:07 +00003135__attribute__ ((unused))
3136static Bool eq_AddrInfo ( VgRes res, AddrInfo* ai1, AddrInfo* ai2 )
3137{
3138 if (ai1->akind != Undescribed
3139 && ai2->akind != Undescribed
3140 && ai1->akind != ai2->akind)
3141 return False;
3142 if (ai1->akind == Freed || ai1->akind == Mallocd) {
3143 if (ai1->blksize != ai2->blksize)
3144 return False;
3145 if (!VG_(eq_ExeContext)(res, ai1->lastchange, ai2->lastchange))
3146 return False;
3147 }
3148 return True;
3149}
3150
3151/* Compare error contexts, to detect duplicates. Note that if they
3152 are otherwise the same, the faulting addrs and associated rwoffsets
3153 are allowed to be different. */
3154static Bool mc_eq_Error ( VgRes res, Error* e1, Error* e2 )
3155{
3156 MC_Error* e1_extra = VG_(get_error_extra)(e1);
3157 MC_Error* e2_extra = VG_(get_error_extra)(e2);
3158
3159 /* Guaranteed by calling function */
3160 tl_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
3161
3162 switch (VG_(get_error_kind)(e1)) {
3163 case CoreMemErr: {
3164 Char *e1s, *e2s;
3165 if (e1_extra->isUnaddr != e2_extra->isUnaddr) return False;
3166 e1s = VG_(get_error_string)(e1);
3167 e2s = VG_(get_error_string)(e2);
3168 if (e1s == e2s) return True;
3169 if (0 == VG_(strcmp)(e1s, e2s)) return True;
3170 return False;
3171 }
3172
3173 // Perhaps we should also check the addrinfo.akinds for equality.
3174 // That would result in more error reports, but only in cases where
3175 // a register contains uninitialised bytes and points to memory
3176 // containing uninitialised bytes. Currently, the 2nd of those to be
3177 // detected won't be reported. That is (nearly?) always the memory
3178 // error, which is good.
3179 case ParamErr:
3180 if (0 != VG_(strcmp)(VG_(get_error_string)(e1),
3181 VG_(get_error_string)(e2))) return False;
3182 // fall through
3183 case UserErr:
3184 if (e1_extra->isUnaddr != e2_extra->isUnaddr) return False;
3185 return True;
3186
3187 case FreeErr:
3188 case FreeMismatchErr:
3189 /* JRS 2002-Aug-26: comparing addrs seems overkill and can
3190 cause excessive duplication of errors. Not even AddrErr
3191 below does that. So don't compare either the .addr field
3192 or the .addrinfo fields. */
3193 /* if (e1->addr != e2->addr) return False; */
3194 /* if (!eq_AddrInfo(res, &e1_extra->addrinfo, &e2_extra->addrinfo))
3195 return False;
3196 */
3197 return True;
3198
3199 case AddrErr:
3200 /* if (e1_extra->axskind != e2_extra->axskind) return False; */
3201 if (e1_extra->size != e2_extra->size) return False;
3202 /*
3203 if (!eq_AddrInfo(res, &e1_extra->addrinfo, &e2_extra->addrinfo))
3204 return False;
3205 */
3206 return True;
3207
3208 case ValueErr:
3209 if (e1_extra->size != e2_extra->size) return False;
3210 return True;
3211
3212 case OverlapErr:
3213 return True;
3214
3215 case LeakErr:
3216 VG_(tool_panic)("Shouldn't get LeakErr in mc_eq_Error,\n"
3217 "since it's handled with VG_(unique_error)()!");
3218
3219 case IllegalMempoolErr:
3220 return True;
3221
3222 default:
3223 VG_(printf)("Error:\n unknown error code %d\n",
3224 VG_(get_error_kind)(e1));
3225 VG_(tool_panic)("unknown error code in mc_eq_Error");
3226 }
3227}
3228
3229/* Function used when searching MC_Chunk lists */
3230static Bool addr_is_in_MC_Chunk(MC_Chunk* mc, Addr a)
3231{
3232 // Nb: this is not quite right! It assumes that the heap block has
3233 // a redzone of size MC_MALLOC_REDZONE_SZB. That's true for malloc'd
3234 // blocks, but not necessarily true for custom-alloc'd blocks. So
3235 // in some cases this could result in an incorrect description (eg.
3236 // saying "12 bytes after block A" when really it's within block B.
3237 // Fixing would require adding redzone size to MC_Chunks, though.
3238 return VG_(addr_is_in_block)( a, mc->data, mc->size,
3239 MC_MALLOC_REDZONE_SZB );
3240}
3241
3242// Forward declaration
3243static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai );
3244
3245/* Describe an address as best you can, for error messages,
3246 putting the result in ai. */
3247static void describe_addr ( Addr a, AddrInfo* ai )
3248{
3249 MC_Chunk* mc;
3250 ThreadId tid;
3251 Addr stack_min, stack_max;
3252
3253 /* Perhaps it's a user-def'd block? */
3254 if (client_perm_maybe_describe( a, ai ))
3255 return;
3256
3257 /* Perhaps it's on a thread's stack? */
3258 VG_(thread_stack_reset_iter)();
3259 while ( VG_(thread_stack_next)(&tid, &stack_min, &stack_max) ) {
3260 if (stack_min <= a && a <= stack_max) {
3261 ai->akind = Stack;
3262 ai->stack_tid = tid;
3263 return;
3264 }
3265 }
3266 /* Search for a recently freed block which might bracket it. */
3267 mc = MC_(get_freed_list_head)();
3268 while (mc) {
3269 if (addr_is_in_MC_Chunk(mc, a)) {
3270 ai->akind = Freed;
3271 ai->blksize = mc->size;
3272 ai->rwoffset = (Int)a - (Int)mc->data;
3273 ai->lastchange = mc->where;
3274 return;
3275 }
3276 mc = mc->next;
3277 }
3278 /* Search for a currently malloc'd block which might bracket it. */
3279 VG_(HT_ResetIter)(MC_(malloc_list));
3280 while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
3281 if (addr_is_in_MC_Chunk(mc, a)) {
3282 ai->akind = Mallocd;
3283 ai->blksize = mc->size;
3284 ai->rwoffset = (Int)(a) - (Int)mc->data;
3285 ai->lastchange = mc->where;
3286 return;
3287 }
3288 }
3289 /* Clueless ... */
3290 ai->akind = Unknown;
3291 return;
3292}
3293
3294/* Updates the copy with address info if necessary (but not for all errors). */
3295static UInt mc_update_extra( Error* err )
3296{
3297 switch (VG_(get_error_kind)(err)) {
3298 // These two don't have addresses associated with them, and so don't
3299 // need any updating.
3300 case CoreMemErr:
3301 case ValueErr: {
3302 MC_Error* extra = VG_(get_error_extra)(err);
3303 tl_assert(Unknown == extra->addrinfo.akind);
3304 return sizeof(MC_Error);
3305 }
3306
3307 // ParamErrs sometimes involve a memory address; call describe_addr() in
3308 // this case.
3309 case ParamErr: {
3310 MC_Error* extra = VG_(get_error_extra)(err);
3311 tl_assert(Undescribed == extra->addrinfo.akind ||
3312 Register == extra->addrinfo.akind);
3313 if (Undescribed == extra->addrinfo.akind)
3314 describe_addr ( VG_(get_error_address)(err), &(extra->addrinfo) );
3315 return sizeof(MC_Error);
3316 }
3317
3318 // These four always involve a memory address.
3319 case AddrErr:
3320 case UserErr:
3321 case FreeErr:
3322 case IllegalMempoolErr: {
3323 MC_Error* extra = VG_(get_error_extra)(err);
3324 tl_assert(Undescribed == extra->addrinfo.akind);
3325 describe_addr ( VG_(get_error_address)(err), &(extra->addrinfo) );
3326 return sizeof(MC_Error);
3327 }
3328
3329 // FreeMismatchErrs have already had their address described; this is
3330 // possible because we have the MC_Chunk on hand when the error is
3331 // detected. However, the address may be part of a user block, and if so
3332 // we override the pre-determined description with a user block one.
3333 case FreeMismatchErr: {
3334 MC_Error* extra = VG_(get_error_extra)(err);
3335 tl_assert(extra && Mallocd == extra->addrinfo.akind);
3336 (void)client_perm_maybe_describe( VG_(get_error_address)(err),
3337 &(extra->addrinfo) );
3338 return sizeof(MC_Error);
3339 }
3340
3341 // No memory address involved with these ones. Nb: for LeakErrs the
3342 // returned size does not matter -- LeakErrs are always shown with
3343 // VG_(unique_error)() so they're not copied.
3344 case LeakErr: return 0;
3345 case OverlapErr: return sizeof(OverlapExtra);
3346
3347 default: VG_(tool_panic)("mc_update_extra: bad errkind");
3348 }
3349}
3350
njn9e63cb62005-05-08 18:34:59 +00003351/*------------------------------------------------------------*/
3352/*--- Suppressions ---*/
3353/*------------------------------------------------------------*/
3354
njn51d827b2005-05-09 01:02:08 +00003355static Bool mc_recognised_suppression ( Char* name, Supp* su )
njn9e63cb62005-05-08 18:34:59 +00003356{
3357 SuppKind skind;
3358
njn1d0825f2006-03-27 11:37:07 +00003359 if (VG_STREQ(name, "Param")) skind = ParamSupp;
3360 else if (VG_STREQ(name, "CoreMem")) skind = CoreMemSupp;
3361 else if (VG_STREQ(name, "Addr1")) skind = Addr1Supp;
3362 else if (VG_STREQ(name, "Addr2")) skind = Addr2Supp;
3363 else if (VG_STREQ(name, "Addr4")) skind = Addr4Supp;
3364 else if (VG_STREQ(name, "Addr8")) skind = Addr8Supp;
3365 else if (VG_STREQ(name, "Addr16")) skind = Addr16Supp;
3366 else if (VG_STREQ(name, "Free")) skind = FreeSupp;
3367 else if (VG_STREQ(name, "Leak")) skind = LeakSupp;
3368 else if (VG_STREQ(name, "Overlap")) skind = OverlapSupp;
3369 else if (VG_STREQ(name, "Mempool")) skind = MempoolSupp;
njn9e63cb62005-05-08 18:34:59 +00003370 else if (VG_STREQ(name, "Cond")) skind = Value0Supp;
3371 else if (VG_STREQ(name, "Value0")) skind = Value0Supp;/* backwards compat */
3372 else if (VG_STREQ(name, "Value1")) skind = Value1Supp;
3373 else if (VG_STREQ(name, "Value2")) skind = Value2Supp;
3374 else if (VG_STREQ(name, "Value4")) skind = Value4Supp;
3375 else if (VG_STREQ(name, "Value8")) skind = Value8Supp;
3376 else if (VG_STREQ(name, "Value16")) skind = Value16Supp;
3377 else
3378 return False;
3379
3380 VG_(set_supp_kind)(su, skind);
3381 return True;
3382}
3383
njn1d0825f2006-03-27 11:37:07 +00003384static
3385Bool mc_read_extra_suppression_info ( Int fd, Char* buf, Int nBuf, Supp *su )
3386{
3387 Bool eof;
3388
3389 if (VG_(get_supp_kind)(su) == ParamSupp) {
3390 eof = VG_(get_line) ( fd, buf, nBuf );
3391 if (eof) return False;
3392 VG_(set_supp_string)(su, VG_(strdup)(buf));
3393 }
3394 return True;
3395}
3396
3397static Bool mc_error_matches_suppression(Error* err, Supp* su)
3398{
3399 Int su_size;
3400 MC_Error* err_extra = VG_(get_error_extra)(err);
3401 ErrorKind ekind = VG_(get_error_kind )(err);
3402
3403 switch (VG_(get_supp_kind)(su)) {
3404 case ParamSupp:
3405 return (ekind == ParamErr
3406 && VG_STREQ(VG_(get_error_string)(err),
3407 VG_(get_supp_string)(su)));
3408
3409 case CoreMemSupp:
3410 return (ekind == CoreMemErr
3411 && VG_STREQ(VG_(get_error_string)(err),
3412 VG_(get_supp_string)(su)));
3413
3414 case Value0Supp: su_size = 0; goto value_case;
3415 case Value1Supp: su_size = 1; goto value_case;
3416 case Value2Supp: su_size = 2; goto value_case;
3417 case Value4Supp: su_size = 4; goto value_case;
3418 case Value8Supp: su_size = 8; goto value_case;
3419 case Value16Supp:su_size =16; goto value_case;
3420 value_case:
3421 return (ekind == ValueErr && err_extra->size == su_size);
3422
3423 case Addr1Supp: su_size = 1; goto addr_case;
3424 case Addr2Supp: su_size = 2; goto addr_case;
3425 case Addr4Supp: su_size = 4; goto addr_case;
3426 case Addr8Supp: su_size = 8; goto addr_case;
3427 case Addr16Supp:su_size =16; goto addr_case;
3428 addr_case:
3429 return (ekind == AddrErr && err_extra->size == su_size);
3430
3431 case FreeSupp:
3432 return (ekind == FreeErr || ekind == FreeMismatchErr);
3433
3434 case OverlapSupp:
3435 return (ekind = OverlapErr);
3436
3437 case LeakSupp:
3438 return (ekind == LeakErr);
3439
3440 case MempoolSupp:
3441 return (ekind == IllegalMempoolErr);
3442
3443 default:
3444 VG_(printf)("Error:\n"
3445 " unknown suppression type %d\n",
3446 VG_(get_supp_kind)(su));
3447 VG_(tool_panic)("unknown suppression type in "
3448 "MC_(error_matches_suppression)");
3449 }
3450}
3451
3452static Char* mc_get_error_name ( Error* err )
3453{
3454 Char* s;
3455 switch (VG_(get_error_kind)(err)) {
3456 case ParamErr: return "Param";
3457 case UserErr: return NULL; /* Can't suppress User errors */
3458 case FreeMismatchErr: return "Free";
3459 case IllegalMempoolErr: return "Mempool";
3460 case FreeErr: return "Free";
3461 case AddrErr:
3462 switch ( ((MC_Error*)VG_(get_error_extra)(err))->size ) {
3463 case 1: return "Addr1";
3464 case 2: return "Addr2";
3465 case 4: return "Addr4";
3466 case 8: return "Addr8";
3467 case 16: return "Addr16";
3468 default: VG_(tool_panic)("unexpected size for Addr");
3469 }
3470
3471 case ValueErr:
3472 switch ( ((MC_Error*)VG_(get_error_extra)(err))->size ) {
3473 case 0: return "Cond";
3474 case 1: return "Value1";
3475 case 2: return "Value2";
3476 case 4: return "Value4";
3477 case 8: return "Value8";
3478 case 16: return "Value16";
3479 default: VG_(tool_panic)("unexpected size for Value");
3480 }
3481 case CoreMemErr: return "CoreMem";
3482 case OverlapErr: return "Overlap";
3483 case LeakErr: return "Leak";
3484 default: VG_(tool_panic)("get_error_name: unexpected type");
3485 }
3486 VG_(printf)(s);
3487}
3488
3489static void mc_print_extra_suppression_info ( Error* err )
3490{
3491 if (ParamErr == VG_(get_error_kind)(err)) {
3492 VG_(printf)(" %s\n", VG_(get_error_string)(err));
3493 }
3494}
3495
njn9e63cb62005-05-08 18:34:59 +00003496/*------------------------------------------------------------*/
sewardjc859fbf2005-04-22 21:10:28 +00003497/*--- Functions called directly from generated code: ---*/
3498/*--- Load/store handlers. ---*/
sewardj6cf40ff2005-04-20 22:31:26 +00003499/*------------------------------------------------------------*/
3500
njn1d0825f2006-03-27 11:37:07 +00003501/* Types: LOADV32, LOADV16, LOADV8 are:
sewardj6cf40ff2005-04-20 22:31:26 +00003502 UWord fn ( Addr a )
3503 so they return 32-bits on 32-bit machines and 64-bits on
3504 64-bit machines. Addr has the same size as a host word.
3505
njn1d0825f2006-03-27 11:37:07 +00003506 LOADV64 is always ULong fn ( Addr a )
sewardj6cf40ff2005-04-20 22:31:26 +00003507
njn1d0825f2006-03-27 11:37:07 +00003508 Similarly for STOREV8, STOREV16, STOREV32, the supplied vbits
3509 are a UWord, and for STOREV64 they are a ULong.
sewardj6cf40ff2005-04-20 22:31:26 +00003510*/
3511
njn1d0825f2006-03-27 11:37:07 +00003512/* If any part of '_a' indicated by the mask is 1, either
njn45e81252006-03-28 12:35:08 +00003513 '_a' is not naturally '_sz/8'-aligned, or it exceeds the range
njn1d0825f2006-03-27 11:37:07 +00003514 covered by the primary map. */
njn45e81252006-03-28 12:35:08 +00003515#define UNALIGNED_OR_HIGH(_a,_sz) ((_a) & MASK((_sz>>3)))
njn1d0825f2006-03-27 11:37:07 +00003516#define MASK(_sz) ( ~((0x10000-(_sz)) | ((N_PRIMARY_MAP-1) << 16)) )
3517
3518
sewardj95448072004-11-22 20:19:51 +00003519/* ------------------------ Size = 8 ------------------------ */
3520
njn1d0825f2006-03-27 11:37:07 +00003521static INLINE
3522ULong mc_LOADV64 ( Addr a, Bool isBigEndian )
3523{
3524 UWord sm_off16, vabits16;
3525 SecMap* sm;
3526
3527 PROF_EVENT(200, "mc_LOADV64");
3528
3529#ifndef PERF_FAST_LOADV
njn45e81252006-03-28 12:35:08 +00003530 return mc_LOADVn_slow( a, 64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003531#else
njn45e81252006-03-28 12:35:08 +00003532 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,64) )) {
njn1d0825f2006-03-27 11:37:07 +00003533 PROF_EVENT(201, "mc_LOADV64-slow1");
njn45e81252006-03-28 12:35:08 +00003534 return (ULong)mc_LOADVn_slow( a, 64, isBigEndian );
sewardjf9d81612005-04-23 23:25:49 +00003535 }
3536
njna7c7ebd2006-03-28 12:51:02 +00003537 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003538 sm_off16 = SM_OFF_16(a);
3539 vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
3540
3541 // Handle common case quickly: a is suitably aligned, is mapped, and
3542 // addressible.
3543 // Convert V bits from compact memory form to expanded register form.
njndbf7ca72006-03-31 11:57:59 +00003544 if (EXPECTED_TAKEN(vabits16 == VA_BITS16_DEFINED)) {
njn1d0825f2006-03-27 11:37:07 +00003545 return V_BITS64_DEFINED;
njndbf7ca72006-03-31 11:57:59 +00003546 } else if (EXPECTED_TAKEN(vabits16 == VA_BITS16_UNDEFINED)) {
njn1d0825f2006-03-27 11:37:07 +00003547 return V_BITS64_UNDEFINED;
3548 } else {
njndbf7ca72006-03-31 11:57:59 +00003549 /* Slow case: the 8 bytes are not all-defined or all-undefined. */
njn1d0825f2006-03-27 11:37:07 +00003550 PROF_EVENT(202, "mc_LOADV64-slow2");
njn45e81252006-03-28 12:35:08 +00003551 return mc_LOADVn_slow( a, 64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003552 }
3553#endif
3554}
3555
3556VG_REGPARM(1) ULong MC_(helperc_LOADV64be) ( Addr a )
3557{
3558 return mc_LOADV64(a, True);
3559}
3560VG_REGPARM(1) ULong MC_(helperc_LOADV64le) ( Addr a )
3561{
3562 return mc_LOADV64(a, False);
3563}
sewardjf9d81612005-04-23 23:25:49 +00003564
sewardjf9d81612005-04-23 23:25:49 +00003565
njn1d0825f2006-03-27 11:37:07 +00003566static INLINE
njn4cf530b2006-04-06 13:33:48 +00003567void mc_STOREV64 ( Addr a, ULong vbits64, Bool isBigEndian )
njn1d0825f2006-03-27 11:37:07 +00003568{
3569 UWord sm_off16, vabits16;
3570 SecMap* sm;
3571
3572 PROF_EVENT(210, "mc_STOREV64");
3573
3574#ifndef PERF_FAST_STOREV
3575 // XXX: this slow case seems to be marginally faster than the fast case!
3576 // Investigate further.
njn4cf530b2006-04-06 13:33:48 +00003577 mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003578#else
njn45e81252006-03-28 12:35:08 +00003579 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,64) )) {
njn1d0825f2006-03-27 11:37:07 +00003580 PROF_EVENT(211, "mc_STOREV64-slow1");
njn4cf530b2006-04-06 13:33:48 +00003581 mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003582 return;
sewardjf9d81612005-04-23 23:25:49 +00003583 }
3584
njna7c7ebd2006-03-28 12:51:02 +00003585 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003586 sm_off16 = SM_OFF_16(a);
3587 vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
3588
3589 if (EXPECTED_TAKEN( !is_distinguished_sm(sm) &&
njndbf7ca72006-03-31 11:57:59 +00003590 (VA_BITS16_DEFINED == vabits16 ||
3591 VA_BITS16_UNDEFINED == vabits16) ))
njn1d0825f2006-03-27 11:37:07 +00003592 {
3593 /* Handle common case quickly: a is suitably aligned, */
3594 /* is mapped, and is addressible. */
3595 // Convert full V-bits in register to compact 2-bit form.
njn4cf530b2006-04-06 13:33:48 +00003596 if (V_BITS64_DEFINED == vbits64) {
njndbf7ca72006-03-31 11:57:59 +00003597 ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_DEFINED;
njn4cf530b2006-04-06 13:33:48 +00003598 } else if (V_BITS64_UNDEFINED == vbits64) {
njndbf7ca72006-03-31 11:57:59 +00003599 ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00003600 } else {
3601 /* Slow but general case -- writing partially defined bytes. */
3602 PROF_EVENT(212, "mc_STOREV64-slow2");
njn4cf530b2006-04-06 13:33:48 +00003603 mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003604 }
3605 } else {
3606 /* Slow but general case. */
3607 PROF_EVENT(213, "mc_STOREV64-slow3");
njn4cf530b2006-04-06 13:33:48 +00003608 mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003609 }
3610#endif
3611}
3612
njn4cf530b2006-04-06 13:33:48 +00003613VG_REGPARM(1) void MC_(helperc_STOREV64be) ( Addr a, ULong vbits64 )
njn1d0825f2006-03-27 11:37:07 +00003614{
njn4cf530b2006-04-06 13:33:48 +00003615 mc_STOREV64(a, vbits64, True);
njn1d0825f2006-03-27 11:37:07 +00003616}
njn4cf530b2006-04-06 13:33:48 +00003617VG_REGPARM(1) void MC_(helperc_STOREV64le) ( Addr a, ULong vbits64 )
njn1d0825f2006-03-27 11:37:07 +00003618{
njn4cf530b2006-04-06 13:33:48 +00003619 mc_STOREV64(a, vbits64, False);
njn1d0825f2006-03-27 11:37:07 +00003620}
sewardj95448072004-11-22 20:19:51 +00003621
sewardj95448072004-11-22 20:19:51 +00003622
3623/* ------------------------ Size = 4 ------------------------ */
3624
njn1d0825f2006-03-27 11:37:07 +00003625static INLINE
3626UWord mc_LOADV32 ( Addr a, Bool isBigEndian )
3627{
3628 UWord sm_off, vabits8;
3629 SecMap* sm;
3630
3631 PROF_EVENT(220, "mc_LOADV32");
3632
3633#ifndef PERF_FAST_LOADV
njn45e81252006-03-28 12:35:08 +00003634 return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003635#else
njn45e81252006-03-28 12:35:08 +00003636 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,32) )) {
njn1d0825f2006-03-27 11:37:07 +00003637 PROF_EVENT(221, "mc_LOADV32-slow1");
njn45e81252006-03-28 12:35:08 +00003638 return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
sewardjc1a2cda2005-04-21 17:34:00 +00003639 }
3640
njna7c7ebd2006-03-28 12:51:02 +00003641 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003642 sm_off = SM_OFF(a);
3643 vabits8 = sm->vabits8[sm_off];
3644
3645 // Handle common case quickly: a is suitably aligned, is mapped, and the
3646 // entire word32 it lives in is addressible.
3647 // Convert V bits from compact memory form to expanded register form.
3648 // For 64-bit platforms, set the high 32 bits of retval to 1 (undefined).
3649 // Almost certainly not necessary, but be paranoid.
njndbf7ca72006-03-31 11:57:59 +00003650 if (EXPECTED_TAKEN(vabits8 == VA_BITS8_DEFINED)) {
njn1d0825f2006-03-27 11:37:07 +00003651 return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_DEFINED);
njndbf7ca72006-03-31 11:57:59 +00003652 } else if (EXPECTED_TAKEN(vabits8 == VA_BITS8_UNDEFINED)) {
njn1d0825f2006-03-27 11:37:07 +00003653 return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_UNDEFINED);
3654 } else {
njndbf7ca72006-03-31 11:57:59 +00003655 /* Slow case: the 4 bytes are not all-defined or all-undefined. */
njn1d0825f2006-03-27 11:37:07 +00003656 PROF_EVENT(222, "mc_LOADV32-slow2");
njn45e81252006-03-28 12:35:08 +00003657 return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003658 }
3659#endif
3660}
3661
3662VG_REGPARM(1) UWord MC_(helperc_LOADV32be) ( Addr a )
3663{
3664 return mc_LOADV32(a, True);
3665}
3666VG_REGPARM(1) UWord MC_(helperc_LOADV32le) ( Addr a )
3667{
3668 return mc_LOADV32(a, False);
3669}
sewardjc1a2cda2005-04-21 17:34:00 +00003670
sewardjc1a2cda2005-04-21 17:34:00 +00003671
njn1d0825f2006-03-27 11:37:07 +00003672static INLINE
njn4cf530b2006-04-06 13:33:48 +00003673void mc_STOREV32 ( Addr a, UWord vbits32, Bool isBigEndian )
njn1d0825f2006-03-27 11:37:07 +00003674{
3675 UWord sm_off, vabits8;
3676 SecMap* sm;
3677
3678 PROF_EVENT(230, "mc_STOREV32");
3679
3680#ifndef PERF_FAST_STOREV
njn4cf530b2006-04-06 13:33:48 +00003681 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003682#else
njn45e81252006-03-28 12:35:08 +00003683 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,32) )) {
njn1d0825f2006-03-27 11:37:07 +00003684 PROF_EVENT(231, "mc_STOREV32-slow1");
njn4cf530b2006-04-06 13:33:48 +00003685 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003686 return;
sewardjc1a2cda2005-04-21 17:34:00 +00003687 }
3688
njna7c7ebd2006-03-28 12:51:02 +00003689 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003690 sm_off = SM_OFF(a);
3691 vabits8 = sm->vabits8[sm_off];
3692
3693//---------------------------------------------------------------------------
3694#if 1
3695 // Cleverness: sometimes we don't have to write the shadow memory at
3696 // all, if we can tell that what we want to write is the same as what is
3697 // already there.
njn4cf530b2006-04-06 13:33:48 +00003698 if (V_BITS32_DEFINED == vbits32) {
njndbf7ca72006-03-31 11:57:59 +00003699 if (vabits8 == (UInt)VA_BITS8_DEFINED) {
njn1d0825f2006-03-27 11:37:07 +00003700 return;
njndbf7ca72006-03-31 11:57:59 +00003701 } else if (!is_distinguished_sm(sm) && VA_BITS8_UNDEFINED == vabits8) {
3702 sm->vabits8[sm_off] = (UInt)VA_BITS8_DEFINED;
njn1d0825f2006-03-27 11:37:07 +00003703 } else {
njndbf7ca72006-03-31 11:57:59 +00003704 // not defined/undefined, or distinguished and changing state
njn1d0825f2006-03-27 11:37:07 +00003705 PROF_EVENT(232, "mc_STOREV32-slow2");
njn4cf530b2006-04-06 13:33:48 +00003706 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003707 }
njn4cf530b2006-04-06 13:33:48 +00003708 } else if (V_BITS32_UNDEFINED == vbits32) {
njndbf7ca72006-03-31 11:57:59 +00003709 if (vabits8 == (UInt)VA_BITS8_UNDEFINED) {
njn1d0825f2006-03-27 11:37:07 +00003710 return;
njndbf7ca72006-03-31 11:57:59 +00003711 } else if (!is_distinguished_sm(sm) && VA_BITS8_DEFINED == vabits8) {
3712 sm->vabits8[sm_off] = (UInt)VA_BITS8_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00003713 } else {
njndbf7ca72006-03-31 11:57:59 +00003714 // not defined/undefined, or distinguished and changing state
njn1d0825f2006-03-27 11:37:07 +00003715 PROF_EVENT(233, "mc_STOREV32-slow3");
njn4cf530b2006-04-06 13:33:48 +00003716 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003717 }
3718 } else {
3719 // Partially defined word
3720 PROF_EVENT(234, "mc_STOREV32-slow4");
njn4cf530b2006-04-06 13:33:48 +00003721 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003722 }
3723//---------------------------------------------------------------------------
3724#else
3725 if (EXPECTED_TAKEN( !is_distinguished_sm(sm) &&
njndbf7ca72006-03-31 11:57:59 +00003726 (VA_BITS8_DEFINED == vabits8 ||
3727 VA_BITS8_UNDEFINED == vabits8) ))
njn1d0825f2006-03-27 11:37:07 +00003728 {
3729 /* Handle common case quickly: a is suitably aligned, */
3730 /* is mapped, and is addressible. */
3731 // Convert full V-bits in register to compact 2-bit form.
njn4cf530b2006-04-06 13:33:48 +00003732 if (V_BITS32_DEFINED == vbits32) {
njndbf7ca72006-03-31 11:57:59 +00003733 sm->vabits8[sm_off] = VA_BITS8_DEFINED;
njn4cf530b2006-04-06 13:33:48 +00003734 } else if (V_BITS32_UNDEFINED == vbits32) {
njndbf7ca72006-03-31 11:57:59 +00003735 sm->vabits8[sm_off] = VA_BITS8_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00003736 } else {
3737 /* Slow but general case -- writing partially defined bytes. */
3738 PROF_EVENT(232, "mc_STOREV32-slow2");
njn4cf530b2006-04-06 13:33:48 +00003739 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003740 }
3741 } else {
3742 /* Slow but general case. */
3743 PROF_EVENT(233, "mc_STOREV32-slow3");
njn4cf530b2006-04-06 13:33:48 +00003744 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003745 }
3746#endif
3747//---------------------------------------------------------------------------
3748#endif
3749}
3750
njn4cf530b2006-04-06 13:33:48 +00003751VG_REGPARM(2) void MC_(helperc_STOREV32be) ( Addr a, UWord vbits32 )
njn1d0825f2006-03-27 11:37:07 +00003752{
njn4cf530b2006-04-06 13:33:48 +00003753 mc_STOREV32(a, vbits32, True);
njn1d0825f2006-03-27 11:37:07 +00003754}
njn4cf530b2006-04-06 13:33:48 +00003755VG_REGPARM(2) void MC_(helperc_STOREV32le) ( Addr a, UWord vbits32 )
njn1d0825f2006-03-27 11:37:07 +00003756{
njn4cf530b2006-04-06 13:33:48 +00003757 mc_STOREV32(a, vbits32, False);
njn1d0825f2006-03-27 11:37:07 +00003758}
njn25e49d8e72002-09-23 09:36:25 +00003759
njn25e49d8e72002-09-23 09:36:25 +00003760
sewardj95448072004-11-22 20:19:51 +00003761/* ------------------------ Size = 2 ------------------------ */
3762
njn1d0825f2006-03-27 11:37:07 +00003763static INLINE
3764UWord mc_LOADV16 ( Addr a, Bool isBigEndian )
3765{
3766 UWord sm_off, vabits8;
3767 SecMap* sm;
3768
3769 PROF_EVENT(240, "mc_LOADV16");
3770
3771#ifndef PERF_FAST_LOADV
njn45e81252006-03-28 12:35:08 +00003772 return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003773#else
njn45e81252006-03-28 12:35:08 +00003774 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,16) )) {
njn1d0825f2006-03-27 11:37:07 +00003775 PROF_EVENT(241, "mc_LOADV16-slow1");
njn45e81252006-03-28 12:35:08 +00003776 return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
sewardjc1a2cda2005-04-21 17:34:00 +00003777 }
3778
njna7c7ebd2006-03-28 12:51:02 +00003779 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003780 sm_off = SM_OFF(a);
3781 vabits8 = sm->vabits8[sm_off];
3782 // Handle common case quickly: a is suitably aligned, is mapped, and is
3783 // addressible.
3784 // Convert V bits from compact memory form to expanded register form
3785 // XXX: set the high 16/48 bits of retval to 1 for 64-bit paranoia?
njndbf7ca72006-03-31 11:57:59 +00003786 if (vabits8 == VA_BITS8_DEFINED ) { return V_BITS16_DEFINED; }
3787 else if (vabits8 == VA_BITS8_UNDEFINED) { return V_BITS16_UNDEFINED; }
njn1d0825f2006-03-27 11:37:07 +00003788 else {
njndbf7ca72006-03-31 11:57:59 +00003789 // The 4 (yes, 4) bytes are not all-defined or all-undefined, check
njn1d0825f2006-03-27 11:37:07 +00003790 // the two sub-bytes.
3791 UChar vabits4 = extract_vabits4_from_vabits8(a, vabits8);
njndbf7ca72006-03-31 11:57:59 +00003792 if (vabits4 == VA_BITS4_DEFINED ) { return V_BITS16_DEFINED; }
3793 else if (vabits4 == VA_BITS4_UNDEFINED) { return V_BITS16_UNDEFINED; }
njn1d0825f2006-03-27 11:37:07 +00003794 else {
njndbf7ca72006-03-31 11:57:59 +00003795 /* Slow case: the two bytes are not all-defined or all-undefined. */
njn1d0825f2006-03-27 11:37:07 +00003796 PROF_EVENT(242, "mc_LOADV16-slow2");
njn45e81252006-03-28 12:35:08 +00003797 return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003798 }
3799 }
3800#endif
3801}
3802
3803VG_REGPARM(1) UWord MC_(helperc_LOADV16be) ( Addr a )
3804{
3805 return mc_LOADV16(a, True);
3806}
3807VG_REGPARM(1) UWord MC_(helperc_LOADV16le) ( Addr a )
3808{
3809 return mc_LOADV16(a, False);
3810}
sewardjc1a2cda2005-04-21 17:34:00 +00003811
sewardjc1a2cda2005-04-21 17:34:00 +00003812
njn1d0825f2006-03-27 11:37:07 +00003813static INLINE
njn4cf530b2006-04-06 13:33:48 +00003814void mc_STOREV16 ( Addr a, UWord vbits16, Bool isBigEndian )
njn1d0825f2006-03-27 11:37:07 +00003815{
3816 UWord sm_off, vabits8;
3817 SecMap* sm;
3818
3819 PROF_EVENT(250, "mc_STOREV16");
3820
3821#ifndef PERF_FAST_STOREV
njn4cf530b2006-04-06 13:33:48 +00003822 mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003823#else
njn45e81252006-03-28 12:35:08 +00003824 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,16) )) {
njn1d0825f2006-03-27 11:37:07 +00003825 PROF_EVENT(251, "mc_STOREV16-slow1");
njn4cf530b2006-04-06 13:33:48 +00003826 mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003827 return;
sewardjc1a2cda2005-04-21 17:34:00 +00003828 }
3829
njna7c7ebd2006-03-28 12:51:02 +00003830 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003831 sm_off = SM_OFF(a);
3832 vabits8 = sm->vabits8[sm_off];
3833 if (EXPECTED_TAKEN( !is_distinguished_sm(sm) &&
njndbf7ca72006-03-31 11:57:59 +00003834 (VA_BITS8_DEFINED == vabits8 ||
3835 VA_BITS8_UNDEFINED == vabits8) ))
njn1d0825f2006-03-27 11:37:07 +00003836 {
3837 /* Handle common case quickly: a is suitably aligned, */
3838 /* is mapped, and is addressible. */
3839 // Convert full V-bits in register to compact 2-bit form.
njn4cf530b2006-04-06 13:33:48 +00003840 if (V_BITS16_DEFINED == vbits16) {
njndbf7ca72006-03-31 11:57:59 +00003841 insert_vabits4_into_vabits8( a, VA_BITS4_DEFINED ,
njn1d0825f2006-03-27 11:37:07 +00003842 &(sm->vabits8[sm_off]) );
njn4cf530b2006-04-06 13:33:48 +00003843 } else if (V_BITS16_UNDEFINED == vbits16) {
njndbf7ca72006-03-31 11:57:59 +00003844 insert_vabits4_into_vabits8( a, VA_BITS4_UNDEFINED,
njn1d0825f2006-03-27 11:37:07 +00003845 &(sm->vabits8[sm_off]) );
3846 } else {
3847 /* Slow but general case -- writing partially defined bytes. */
3848 PROF_EVENT(252, "mc_STOREV16-slow2");
njn4cf530b2006-04-06 13:33:48 +00003849 mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003850 }
3851 } else {
3852 /* Slow but general case. */
3853 PROF_EVENT(253, "mc_STOREV16-slow3");
njn4cf530b2006-04-06 13:33:48 +00003854 mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003855 }
3856#endif
3857}
njn25e49d8e72002-09-23 09:36:25 +00003858
njn4cf530b2006-04-06 13:33:48 +00003859VG_REGPARM(2) void MC_(helperc_STOREV16be) ( Addr a, UWord vbits16 )
njn1d0825f2006-03-27 11:37:07 +00003860{
njn4cf530b2006-04-06 13:33:48 +00003861 mc_STOREV16(a, vbits16, True);
njn1d0825f2006-03-27 11:37:07 +00003862}
njn4cf530b2006-04-06 13:33:48 +00003863VG_REGPARM(2) void MC_(helperc_STOREV16le) ( Addr a, UWord vbits16 )
njn1d0825f2006-03-27 11:37:07 +00003864{
njn4cf530b2006-04-06 13:33:48 +00003865 mc_STOREV16(a, vbits16, False);
njn1d0825f2006-03-27 11:37:07 +00003866}
sewardj5d28efc2005-04-21 22:16:29 +00003867
njn25e49d8e72002-09-23 09:36:25 +00003868
sewardj95448072004-11-22 20:19:51 +00003869/* ------------------------ Size = 1 ------------------------ */
sewardj8cf88b72005-07-08 01:29:33 +00003870/* Note: endianness is irrelevant for size == 1 */
sewardj95448072004-11-22 20:19:51 +00003871
njnaf839f52005-06-23 03:27:57 +00003872VG_REGPARM(1)
njn1d0825f2006-03-27 11:37:07 +00003873UWord MC_(helperc_LOADV8) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +00003874{
njn1d0825f2006-03-27 11:37:07 +00003875 UWord sm_off, vabits8;
sewardjae986ca2005-10-12 12:53:20 +00003876 SecMap* sm;
3877
njn1d0825f2006-03-27 11:37:07 +00003878 PROF_EVENT(260, "mc_LOADV8");
sewardjc1a2cda2005-04-21 17:34:00 +00003879
njn1d0825f2006-03-27 11:37:07 +00003880#ifndef PERF_FAST_LOADV
njn45e81252006-03-28 12:35:08 +00003881 return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
njn1d0825f2006-03-27 11:37:07 +00003882#else
njn45e81252006-03-28 12:35:08 +00003883 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,8) )) {
njn1d0825f2006-03-27 11:37:07 +00003884 PROF_EVENT(261, "mc_LOADV8-slow1");
njn45e81252006-03-28 12:35:08 +00003885 return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00003886 }
3887
njna7c7ebd2006-03-28 12:51:02 +00003888 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003889 sm_off = SM_OFF(a);
3890 vabits8 = sm->vabits8[sm_off];
3891 // Convert V bits from compact memory form to expanded register form
3892 // Handle common case quickly: a is mapped, and the entire
3893 // word32 it lives in is addressible.
3894 // XXX: set the high 24/56 bits of retval to 1 for 64-bit paranoia?
njndbf7ca72006-03-31 11:57:59 +00003895 if (vabits8 == VA_BITS8_DEFINED ) { return V_BITS8_DEFINED; }
3896 else if (vabits8 == VA_BITS8_UNDEFINED) { return V_BITS8_UNDEFINED; }
njn1d0825f2006-03-27 11:37:07 +00003897 else {
njndbf7ca72006-03-31 11:57:59 +00003898 // The 4 (yes, 4) bytes are not all-defined or all-undefined, check
njn1d0825f2006-03-27 11:37:07 +00003899 // the single byte.
3900 UChar vabits2 = extract_vabits2_from_vabits8(a, vabits8);
njndbf7ca72006-03-31 11:57:59 +00003901 if (vabits2 == VA_BITS2_DEFINED ) { return V_BITS8_DEFINED; }
3902 else if (vabits2 == VA_BITS2_UNDEFINED) { return V_BITS8_UNDEFINED; }
njn1d0825f2006-03-27 11:37:07 +00003903 else {
njndbf7ca72006-03-31 11:57:59 +00003904 /* Slow case: the byte is not all-defined or all-undefined. */
njn1d0825f2006-03-27 11:37:07 +00003905 PROF_EVENT(262, "mc_LOADV8-slow2");
njn45e81252006-03-28 12:35:08 +00003906 return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
njn1d0825f2006-03-27 11:37:07 +00003907 }
sewardjc1a2cda2005-04-21 17:34:00 +00003908 }
njn1d0825f2006-03-27 11:37:07 +00003909#endif
njn25e49d8e72002-09-23 09:36:25 +00003910}
3911
sewardjc1a2cda2005-04-21 17:34:00 +00003912
njnaf839f52005-06-23 03:27:57 +00003913VG_REGPARM(2)
njn4cf530b2006-04-06 13:33:48 +00003914void MC_(helperc_STOREV8) ( Addr a, UWord vbits8 )
njn25e49d8e72002-09-23 09:36:25 +00003915{
njn1d0825f2006-03-27 11:37:07 +00003916 UWord sm_off, vabits8;
sewardjae986ca2005-10-12 12:53:20 +00003917 SecMap* sm;
3918
njn1d0825f2006-03-27 11:37:07 +00003919 PROF_EVENT(270, "mc_STOREV8");
sewardjc1a2cda2005-04-21 17:34:00 +00003920
njn1d0825f2006-03-27 11:37:07 +00003921#ifndef PERF_FAST_STOREV
njn4cf530b2006-04-06 13:33:48 +00003922 mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
njn1d0825f2006-03-27 11:37:07 +00003923#else
njn45e81252006-03-28 12:35:08 +00003924 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,8) )) {
njn1d0825f2006-03-27 11:37:07 +00003925 PROF_EVENT(271, "mc_STOREV8-slow1");
njn4cf530b2006-04-06 13:33:48 +00003926 mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00003927 return;
3928 }
3929
njna7c7ebd2006-03-28 12:51:02 +00003930 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003931 sm_off = SM_OFF(a);
3932 vabits8 = sm->vabits8[sm_off];
3933 if (EXPECTED_TAKEN
3934 ( !is_distinguished_sm(sm) &&
njndbf7ca72006-03-31 11:57:59 +00003935 ( (VA_BITS8_DEFINED == vabits8 || VA_BITS8_UNDEFINED == vabits8)
njn1d0825f2006-03-27 11:37:07 +00003936 || (VA_BITS2_NOACCESS != extract_vabits2_from_vabits8(a, vabits8))
3937 )
3938 )
3939 )
3940 {
sewardjc1a2cda2005-04-21 17:34:00 +00003941 /* Handle common case quickly: a is mapped, the entire word32 it
3942 lives in is addressible. */
njn1d0825f2006-03-27 11:37:07 +00003943 // Convert full V-bits in register to compact 2-bit form.
njn4cf530b2006-04-06 13:33:48 +00003944 if (V_BITS8_DEFINED == vbits8) {
njndbf7ca72006-03-31 11:57:59 +00003945 insert_vabits2_into_vabits8( a, VA_BITS2_DEFINED,
njn1d0825f2006-03-27 11:37:07 +00003946 &(sm->vabits8[sm_off]) );
njn4cf530b2006-04-06 13:33:48 +00003947 } else if (V_BITS8_UNDEFINED == vbits8) {
njndbf7ca72006-03-31 11:57:59 +00003948 insert_vabits2_into_vabits8( a, VA_BITS2_UNDEFINED,
njn1d0825f2006-03-27 11:37:07 +00003949 &(sm->vabits8[sm_off]) );
3950 } else {
3951 /* Slow but general case -- writing partially defined bytes. */
3952 PROF_EVENT(272, "mc_STOREV8-slow2");
njn4cf530b2006-04-06 13:33:48 +00003953 mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
njn1d0825f2006-03-27 11:37:07 +00003954 }
sewardjc1a2cda2005-04-21 17:34:00 +00003955 } else {
njn1d0825f2006-03-27 11:37:07 +00003956 /* Slow but general case. */
3957 PROF_EVENT(273, "mc_STOREV8-slow3");
njn4cf530b2006-04-06 13:33:48 +00003958 mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00003959 }
njn1d0825f2006-03-27 11:37:07 +00003960#endif
njn25e49d8e72002-09-23 09:36:25 +00003961}
3962
3963
sewardjc859fbf2005-04-22 21:10:28 +00003964/*------------------------------------------------------------*/
3965/*--- Functions called directly from generated code: ---*/
3966/*--- Value-check failure handlers. ---*/
3967/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00003968
njn5c004e42002-11-18 11:04:50 +00003969void MC_(helperc_value_check0_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00003970{
njn9e63cb62005-05-08 18:34:59 +00003971 mc_record_value_error ( VG_(get_running_tid)(), 0 );
njn25e49d8e72002-09-23 09:36:25 +00003972}
3973
njn5c004e42002-11-18 11:04:50 +00003974void MC_(helperc_value_check1_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00003975{
njn9e63cb62005-05-08 18:34:59 +00003976 mc_record_value_error ( VG_(get_running_tid)(), 1 );
njn25e49d8e72002-09-23 09:36:25 +00003977}
3978
njn5c004e42002-11-18 11:04:50 +00003979void MC_(helperc_value_check4_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00003980{
njn9e63cb62005-05-08 18:34:59 +00003981 mc_record_value_error ( VG_(get_running_tid)(), 4 );
njn25e49d8e72002-09-23 09:36:25 +00003982}
3983
sewardj11bcc4e2005-04-23 22:38:38 +00003984void MC_(helperc_value_check8_fail) ( void )
3985{
njn9e63cb62005-05-08 18:34:59 +00003986 mc_record_value_error ( VG_(get_running_tid)(), 8 );
sewardj11bcc4e2005-04-23 22:38:38 +00003987}
3988
njnaf839f52005-06-23 03:27:57 +00003989VG_REGPARM(1) void MC_(helperc_complain_undef) ( HWord sz )
sewardj95448072004-11-22 20:19:51 +00003990{
njn9e63cb62005-05-08 18:34:59 +00003991 mc_record_value_error ( VG_(get_running_tid)(), (Int)sz );
sewardj95448072004-11-22 20:19:51 +00003992}
3993
njn25e49d8e72002-09-23 09:36:25 +00003994
sewardjc2c12c22006-03-08 13:20:09 +00003995/*------------------------------------------------------------*/
3996/*--- Metadata get/set functions, for client requests. ---*/
3997/*------------------------------------------------------------*/
3998
njn1d0825f2006-03-27 11:37:07 +00003999// Nb: this expands the V+A bits out into register-form V bits, even though
4000// they're in memory. This is for backward compatibility, and because it's
4001// probably what the user wants.
4002
4003/* Copy Vbits from/to address 'a'. Returns: 1 == OK, 2 == alignment
sewardjc2c12c22006-03-08 13:20:09 +00004004 error [no longer used], 3 == addressing error. */
4005static Int mc_get_or_set_vbits_for_client (
4006 ThreadId tid,
njn1d0825f2006-03-27 11:37:07 +00004007 Addr a,
4008 Addr vbits,
4009 SizeT szB,
sewardjc2c12c22006-03-08 13:20:09 +00004010 Bool setting /* True <=> set vbits, False <=> get vbits */
4011)
4012{
sewardjc2c12c22006-03-08 13:20:09 +00004013 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00004014 Bool ok;
4015 UChar vbits8;
sewardjc2c12c22006-03-08 13:20:09 +00004016
njn1d0825f2006-03-27 11:37:07 +00004017 /* Check that arrays are addressible before doing any getting/setting. */
4018 for (i = 0; i < szB; i++) {
4019 if (VA_BITS2_NOACCESS == get_vabits2(a + i)) {
4020 mc_record_address_error( tid, a + i, 1, setting ? True : False );
4021 return 3;
sewardjc2c12c22006-03-08 13:20:09 +00004022 }
njn1d0825f2006-03-27 11:37:07 +00004023 if (VA_BITS2_NOACCESS == get_vabits2(vbits + i)) {
4024 mc_record_address_error( tid, vbits + i, 1, setting ? False : True );
4025 return 3;
sewardjc2c12c22006-03-08 13:20:09 +00004026 }
4027 }
njn1d0825f2006-03-27 11:37:07 +00004028
sewardjc2c12c22006-03-08 13:20:09 +00004029 /* Do the copy */
4030 if (setting) {
njn1d0825f2006-03-27 11:37:07 +00004031
4032 // It's actually a tool ClientReq, but Vg_CoreClientReq is the closest
4033 // thing we have.
njndbf7ca72006-03-31 11:57:59 +00004034 check_mem_is_defined(Vg_CoreClientReq, tid, "SET_VBITS(vbits)",
4035 vbits, szB);
njn1d0825f2006-03-27 11:37:07 +00004036
4037 /* setting */
4038 for (i = 0; i < szB; i++) {
4039 ok = set_vbits8(a + i, ((UChar*)vbits)[i]);
4040 tl_assert(ok);
sewardjc2c12c22006-03-08 13:20:09 +00004041 }
4042 } else {
4043 /* getting */
njn1d0825f2006-03-27 11:37:07 +00004044 for (i = 0; i < szB; i++) {
4045 ok = get_vbits8(a + i, &vbits8);
4046 tl_assert(ok);
4047// XXX: used to do this, but it's a pain
4048// if (V_BITS8_DEFINED != vbits8)
4049// mc_record_value_error(tid, 1);
4050 ((UChar*)vbits)[i] = vbits8;
sewardjc2c12c22006-03-08 13:20:09 +00004051 }
4052 // The bytes in vbits[] have now been set, so mark them as such.
njndbf7ca72006-03-31 11:57:59 +00004053 MC_(make_mem_defined)(vbits, szB);
njn1d0825f2006-03-27 11:37:07 +00004054 }
sewardjc2c12c22006-03-08 13:20:09 +00004055
4056 return 1;
4057}
sewardj05fe85e2005-04-27 22:46:36 +00004058
4059
4060/*------------------------------------------------------------*/
4061/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
4062/*------------------------------------------------------------*/
4063
4064/* For the memory leak detector, say whether an entire 64k chunk of
4065 address space is possibly in use, or not. If in doubt return
4066 True.
4067*/
4068static
4069Bool mc_is_within_valid_secondary ( Addr a )
4070{
4071 SecMap* sm = maybe_get_secmap_for ( a );
sewardj05a46732006-10-17 01:28:10 +00004072 if (sm == NULL || sm == &sm_distinguished[SM_DIST_NOACCESS]
4073 || in_ignored_range(a)) {
sewardj05fe85e2005-04-27 22:46:36 +00004074 /* Definitely not in use. */
4075 return False;
4076 } else {
4077 return True;
4078 }
4079}
4080
4081
4082/* For the memory leak detector, say whether or not a given word
4083 address is to be regarded as valid. */
4084static
4085Bool mc_is_valid_aligned_word ( Addr a )
4086{
4087 tl_assert(sizeof(UWord) == 4 || sizeof(UWord) == 8);
4088 if (sizeof(UWord) == 4) {
4089 tl_assert(VG_IS_4_ALIGNED(a));
4090 } else {
4091 tl_assert(VG_IS_8_ALIGNED(a));
4092 }
sewardj05a46732006-10-17 01:28:10 +00004093 if (is_mem_defined( a, sizeof(UWord), NULL ) == MC_Ok
4094 && !in_ignored_range(a)) {
sewardj05fe85e2005-04-27 22:46:36 +00004095 return True;
4096 } else {
4097 return False;
4098 }
4099}
sewardja4495682002-10-21 07:29:59 +00004100
4101
nethercote996901a2004-08-03 13:29:09 +00004102/* Leak detector for this tool. We don't actually do anything, merely
sewardja4495682002-10-21 07:29:59 +00004103 run the generic leak detector with suitable parameters for this
nethercote996901a2004-08-03 13:29:09 +00004104 tool. */
njnb8dca862005-03-14 02:42:44 +00004105static void mc_detect_memory_leaks ( ThreadId tid, LeakCheckMode mode )
njn25e49d8e72002-09-23 09:36:25 +00004106{
njn1d0825f2006-03-27 11:37:07 +00004107 MC_(do_detect_memory_leaks) (
sewardj05fe85e2005-04-27 22:46:36 +00004108 tid,
4109 mode,
4110 mc_is_within_valid_secondary,
4111 mc_is_valid_aligned_word
4112 );
njn25e49d8e72002-09-23 09:36:25 +00004113}
4114
4115
sewardjc859fbf2005-04-22 21:10:28 +00004116/*------------------------------------------------------------*/
4117/*--- Initialisation ---*/
4118/*------------------------------------------------------------*/
4119
4120static void init_shadow_memory ( void )
4121{
4122 Int i;
4123 SecMap* sm;
4124
njn1d0825f2006-03-27 11:37:07 +00004125 tl_assert(V_BIT_UNDEFINED == 1);
4126 tl_assert(V_BIT_DEFINED == 0);
4127 tl_assert(V_BITS8_UNDEFINED == 0xFF);
4128 tl_assert(V_BITS8_DEFINED == 0);
4129
sewardjc859fbf2005-04-22 21:10:28 +00004130 /* Build the 3 distinguished secondaries */
sewardjc859fbf2005-04-22 21:10:28 +00004131 sm = &sm_distinguished[SM_DIST_NOACCESS];
njn1d0825f2006-03-27 11:37:07 +00004132 for (i = 0; i < SM_CHUNKS; i++) sm->vabits8[i] = VA_BITS8_NOACCESS;
sewardjc859fbf2005-04-22 21:10:28 +00004133
njndbf7ca72006-03-31 11:57:59 +00004134 sm = &sm_distinguished[SM_DIST_UNDEFINED];
4135 for (i = 0; i < SM_CHUNKS; i++) sm->vabits8[i] = VA_BITS8_UNDEFINED;
sewardjc859fbf2005-04-22 21:10:28 +00004136
njndbf7ca72006-03-31 11:57:59 +00004137 sm = &sm_distinguished[SM_DIST_DEFINED];
4138 for (i = 0; i < SM_CHUNKS; i++) sm->vabits8[i] = VA_BITS8_DEFINED;
sewardjc859fbf2005-04-22 21:10:28 +00004139
4140 /* Set up the primary map. */
4141 /* These entries gradually get overwritten as the used address
4142 space expands. */
4143 for (i = 0; i < N_PRIMARY_MAP; i++)
4144 primary_map[i] = &sm_distinguished[SM_DIST_NOACCESS];
4145
sewardj05a46732006-10-17 01:28:10 +00004146 /* Auxiliary primary maps */
4147 init_auxmap_L1_L2();
4148
sewardjc859fbf2005-04-22 21:10:28 +00004149 /* auxmap_size = auxmap_used = 0;
4150 no ... these are statically initialised */
njn1d0825f2006-03-27 11:37:07 +00004151
4152 /* Secondary V bit table */
4153 secVBitTable = createSecVBitTable();
sewardjc859fbf2005-04-22 21:10:28 +00004154}
4155
4156
4157/*------------------------------------------------------------*/
4158/*--- Sanity check machinery (permanently engaged) ---*/
4159/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00004160
njn51d827b2005-05-09 01:02:08 +00004161static Bool mc_cheap_sanity_check ( void )
njn25e49d8e72002-09-23 09:36:25 +00004162{
jseward9800fd32004-01-04 23:08:04 +00004163 /* nothing useful we can rapidly check */
sewardj23eb2fd2005-04-22 16:29:19 +00004164 n_sanity_cheap++;
sewardjc1a2cda2005-04-21 17:34:00 +00004165 PROF_EVENT(490, "cheap_sanity_check");
jseward9800fd32004-01-04 23:08:04 +00004166 return True;
njn25e49d8e72002-09-23 09:36:25 +00004167}
4168
njn51d827b2005-05-09 01:02:08 +00004169static Bool mc_expensive_sanity_check ( void )
njn25e49d8e72002-09-23 09:36:25 +00004170{
sewardj05a46732006-10-17 01:28:10 +00004171 Int i;
4172 Word n_secmaps_found;
sewardj45d94cc2005-04-20 14:44:11 +00004173 SecMap* sm;
sewardj05a46732006-10-17 01:28:10 +00004174 HChar* errmsg;
sewardj23eb2fd2005-04-22 16:29:19 +00004175 Bool bad = False;
njn25e49d8e72002-09-23 09:36:25 +00004176
sewardj05a46732006-10-17 01:28:10 +00004177 if (0) VG_(printf)("expensive sanity check\n");
4178 if (0) return True;
4179
sewardj23eb2fd2005-04-22 16:29:19 +00004180 n_sanity_expensive++;
sewardjc1a2cda2005-04-21 17:34:00 +00004181 PROF_EVENT(491, "expensive_sanity_check");
4182
njn1d0825f2006-03-27 11:37:07 +00004183 /* Check that the 3 distinguished SMs are still as they should be. */
njn25e49d8e72002-09-23 09:36:25 +00004184
njndbf7ca72006-03-31 11:57:59 +00004185 /* Check noaccess DSM. */
sewardj45d94cc2005-04-20 14:44:11 +00004186 sm = &sm_distinguished[SM_DIST_NOACCESS];
njn1d0825f2006-03-27 11:37:07 +00004187 for (i = 0; i < SM_CHUNKS; i++)
4188 if (sm->vabits8[i] != VA_BITS8_NOACCESS)
sewardj23eb2fd2005-04-22 16:29:19 +00004189 bad = True;
njn25e49d8e72002-09-23 09:36:25 +00004190
njndbf7ca72006-03-31 11:57:59 +00004191 /* Check undefined DSM. */
4192 sm = &sm_distinguished[SM_DIST_UNDEFINED];
njn1d0825f2006-03-27 11:37:07 +00004193 for (i = 0; i < SM_CHUNKS; i++)
njndbf7ca72006-03-31 11:57:59 +00004194 if (sm->vabits8[i] != VA_BITS8_UNDEFINED)
sewardj23eb2fd2005-04-22 16:29:19 +00004195 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00004196
njndbf7ca72006-03-31 11:57:59 +00004197 /* Check defined DSM. */
4198 sm = &sm_distinguished[SM_DIST_DEFINED];
njn1d0825f2006-03-27 11:37:07 +00004199 for (i = 0; i < SM_CHUNKS; i++)
njndbf7ca72006-03-31 11:57:59 +00004200 if (sm->vabits8[i] != VA_BITS8_DEFINED)
sewardj23eb2fd2005-04-22 16:29:19 +00004201 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00004202
sewardj23eb2fd2005-04-22 16:29:19 +00004203 if (bad) {
4204 VG_(printf)("memcheck expensive sanity: "
4205 "distinguished_secondaries have changed\n");
4206 return False;
4207 }
4208
njn1d0825f2006-03-27 11:37:07 +00004209 /* If we're not checking for undefined value errors, the secondary V bit
4210 * table should be empty. */
4211 if (!MC_(clo_undef_value_errors)) {
4212 if (0 != VG_(OSet_Size)(secVBitTable))
4213 return False;
4214 }
4215
sewardj05a46732006-10-17 01:28:10 +00004216 /* check the auxiliary maps, very thoroughly */
4217 n_secmaps_found = 0;
4218 errmsg = check_auxmap_L1_L2_sanity( &n_secmaps_found );
4219 if (errmsg) {
4220 VG_(printf)("memcheck expensive sanity, auxmaps:\n\t%s", errmsg);
sewardj23eb2fd2005-04-22 16:29:19 +00004221 return False;
4222 }
4223
sewardj05a46732006-10-17 01:28:10 +00004224 /* n_secmaps_found is now the number referred to by the auxiliary
4225 primary map. Now add on the ones referred to by the main
4226 primary map. */
sewardj23eb2fd2005-04-22 16:29:19 +00004227 for (i = 0; i < N_PRIMARY_MAP; i++) {
sewardj05a46732006-10-17 01:28:10 +00004228 if (primary_map[i] == NULL) {
sewardj23eb2fd2005-04-22 16:29:19 +00004229 bad = True;
4230 } else {
sewardj05a46732006-10-17 01:28:10 +00004231 if (!is_distinguished_sm(primary_map[i]))
sewardj23eb2fd2005-04-22 16:29:19 +00004232 n_secmaps_found++;
4233 }
4234 }
4235
sewardj05a46732006-10-17 01:28:10 +00004236 /* check that the number of secmaps issued matches the number that
4237 are reachable (iow, no secmap leaks) */
njn1d0825f2006-03-27 11:37:07 +00004238 if (n_secmaps_found != (n_issued_SMs - n_deissued_SMs))
sewardj23eb2fd2005-04-22 16:29:19 +00004239 bad = True;
4240
4241 if (bad) {
4242 VG_(printf)("memcheck expensive sanity: "
4243 "apparent secmap leakage\n");
4244 return False;
4245 }
4246
sewardj23eb2fd2005-04-22 16:29:19 +00004247 if (bad) {
4248 VG_(printf)("memcheck expensive sanity: "
4249 "auxmap covers wrong address space\n");
4250 return False;
4251 }
4252
4253 /* there is only one pointer to each secmap (expensive) */
njn25e49d8e72002-09-23 09:36:25 +00004254
4255 return True;
4256}
sewardj45d94cc2005-04-20 14:44:11 +00004257
njn25e49d8e72002-09-23 09:36:25 +00004258/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00004259/*--- Command line args ---*/
njn25e49d8e72002-09-23 09:36:25 +00004260/*------------------------------------------------------------*/
4261
njn1d0825f2006-03-27 11:37:07 +00004262Bool MC_(clo_partial_loads_ok) = False;
4263Int MC_(clo_freelist_vol) = 5000000;
4264LeakCheckMode MC_(clo_leak_check) = LC_Summary;
4265VgRes MC_(clo_leak_resolution) = Vg_LowRes;
4266Bool MC_(clo_show_reachable) = False;
4267Bool MC_(clo_workaround_gcc296_bugs) = False;
4268Bool MC_(clo_undef_value_errors) = True;
4269
4270static Bool mc_process_cmd_line_options(Char* arg)
njn25e49d8e72002-09-23 09:36:25 +00004271{
njn1d0825f2006-03-27 11:37:07 +00004272 VG_BOOL_CLO(arg, "--partial-loads-ok", MC_(clo_partial_loads_ok))
4273 else VG_BOOL_CLO(arg, "--show-reachable", MC_(clo_show_reachable))
4274 else VG_BOOL_CLO(arg, "--workaround-gcc296-bugs",MC_(clo_workaround_gcc296_bugs))
4275
4276 else VG_BOOL_CLO(arg, "--undef-value-errors", MC_(clo_undef_value_errors))
4277
4278 else VG_BNUM_CLO(arg, "--freelist-vol", MC_(clo_freelist_vol), 0, 1000000000)
4279
4280 else if (VG_CLO_STREQ(arg, "--leak-check=no"))
4281 MC_(clo_leak_check) = LC_Off;
4282 else if (VG_CLO_STREQ(arg, "--leak-check=summary"))
4283 MC_(clo_leak_check) = LC_Summary;
4284 else if (VG_CLO_STREQ(arg, "--leak-check=yes") ||
4285 VG_CLO_STREQ(arg, "--leak-check=full"))
4286 MC_(clo_leak_check) = LC_Full;
4287
4288 else if (VG_CLO_STREQ(arg, "--leak-resolution=low"))
4289 MC_(clo_leak_resolution) = Vg_LowRes;
4290 else if (VG_CLO_STREQ(arg, "--leak-resolution=med"))
4291 MC_(clo_leak_resolution) = Vg_MedRes;
4292 else if (VG_CLO_STREQ(arg, "--leak-resolution=high"))
4293 MC_(clo_leak_resolution) = Vg_HighRes;
4294
sewardj05a46732006-10-17 01:28:10 +00004295 else if (VG_CLO_STREQN(16,arg,"--ignore-ranges=")) {
4296 Int i;
4297 UChar* txt = (UChar*)(arg+16);
4298 Bool ok = parse_ignore_ranges(txt);
4299 if (!ok)
4300 return False;
4301 tl_assert(ignoreRanges.used >= 0);
4302 tl_assert(ignoreRanges.used < M_IGNORE_RANGES);
4303 for (i = 0; i < ignoreRanges.used; i++) {
4304 Addr s = ignoreRanges.start[i];
4305 Addr e = ignoreRanges.end[i];
4306 Addr limit = 0x4000000; /* 64M - entirely arbitrary limit */
4307 if (e <= s) {
4308 VG_(message)(Vg_DebugMsg,
4309 "ERROR: --ignore-ranges: end <= start in range:");
4310 VG_(message)(Vg_DebugMsg,
4311 " 0x%lx-0x%lx", s, e);
4312 return False;
4313 }
4314 if (e - s > limit) {
4315 VG_(message)(Vg_DebugMsg,
4316 "ERROR: --ignore-ranges: suspiciously large range:");
4317 VG_(message)(Vg_DebugMsg,
4318 " 0x%lx-0x%lx (size %ld)", s, e, (UWord)(e-s));
4319 return False;
4320 }
4321 }
4322 }
4323
njn1d0825f2006-03-27 11:37:07 +00004324 else
4325 return VG_(replacement_malloc_process_cmd_line_option)(arg);
4326
4327 return True;
njn25e49d8e72002-09-23 09:36:25 +00004328}
4329
njn51d827b2005-05-09 01:02:08 +00004330static void mc_print_usage(void)
njn25e49d8e72002-09-23 09:36:25 +00004331{
njn1d0825f2006-03-27 11:37:07 +00004332 VG_(printf)(
4333" --leak-check=no|summary|full search for memory leaks at exit? [summary]\n"
4334" --leak-resolution=low|med|high how much bt merging in leak check [low]\n"
4335" --show-reachable=no|yes show reachable blocks in leak check? [no]\n"
4336" --undef-value-errors=no|yes check for undefined value errors [yes]\n"
4337" --partial-loads-ok=no|yes too hard to explain here; see manual [no]\n"
4338" --freelist-vol=<number> volume of freed blocks queue [5000000]\n"
4339" --workaround-gcc296-bugs=no|yes self explanatory [no]\n"
sewardj05a46732006-10-17 01:28:10 +00004340" --ignore-ranges=0xPP-0xQQ[,0xRR-0xSS] assume given addresses are OK\n"
njn1d0825f2006-03-27 11:37:07 +00004341 );
4342 VG_(replacement_malloc_print_usage)();
njn3e884182003-04-15 13:03:23 +00004343}
4344
njn51d827b2005-05-09 01:02:08 +00004345static void mc_print_debug_usage(void)
njn3e884182003-04-15 13:03:23 +00004346{
njn1d0825f2006-03-27 11:37:07 +00004347 VG_(replacement_malloc_print_debug_usage)();
njn25e49d8e72002-09-23 09:36:25 +00004348}
4349
sewardjf3418c02005-11-08 14:10:24 +00004350
nethercote8b76fe52004-11-08 19:20:09 +00004351/*------------------------------------------------------------*/
4352/*--- Client requests ---*/
4353/*------------------------------------------------------------*/
4354
4355/* Client block management:
4356
4357 This is managed as an expanding array of client block descriptors.
4358 Indices of live descriptors are issued to the client, so it can ask
4359 to free them later. Therefore we cannot slide live entries down
4360 over dead ones. Instead we must use free/inuse flags and scan for
4361 an empty slot at allocation time. This in turn means allocation is
4362 relatively expensive, so we hope this does not happen too often.
nethercote8b76fe52004-11-08 19:20:09 +00004363
sewardjedc75ab2005-03-15 23:30:32 +00004364 An unused block has start == size == 0
4365*/
nethercote8b76fe52004-11-08 19:20:09 +00004366
4367typedef
4368 struct {
4369 Addr start;
4370 SizeT size;
4371 ExeContext* where;
sewardj8cf88b72005-07-08 01:29:33 +00004372 Char* desc;
nethercote8b76fe52004-11-08 19:20:09 +00004373 }
4374 CGenBlock;
4375
4376/* This subsystem is self-initialising. */
njn695c16e2005-03-27 03:40:28 +00004377static UInt cgb_size = 0;
4378static UInt cgb_used = 0;
4379static CGenBlock* cgbs = NULL;
nethercote8b76fe52004-11-08 19:20:09 +00004380
4381/* Stats for this subsystem. */
njn695c16e2005-03-27 03:40:28 +00004382static UInt cgb_used_MAX = 0; /* Max in use. */
4383static UInt cgb_allocs = 0; /* Number of allocs. */
4384static UInt cgb_discards = 0; /* Number of discards. */
4385static UInt cgb_search = 0; /* Number of searches. */
nethercote8b76fe52004-11-08 19:20:09 +00004386
4387
4388static
njn695c16e2005-03-27 03:40:28 +00004389Int alloc_client_block ( void )
nethercote8b76fe52004-11-08 19:20:09 +00004390{
4391 UInt i, sz_new;
4392 CGenBlock* cgbs_new;
4393
njn695c16e2005-03-27 03:40:28 +00004394 cgb_allocs++;
nethercote8b76fe52004-11-08 19:20:09 +00004395
njn695c16e2005-03-27 03:40:28 +00004396 for (i = 0; i < cgb_used; i++) {
4397 cgb_search++;
4398 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00004399 return i;
4400 }
4401
4402 /* Not found. Try to allocate one at the end. */
njn695c16e2005-03-27 03:40:28 +00004403 if (cgb_used < cgb_size) {
4404 cgb_used++;
4405 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00004406 }
4407
4408 /* Ok, we have to allocate a new one. */
njn695c16e2005-03-27 03:40:28 +00004409 tl_assert(cgb_used == cgb_size);
4410 sz_new = (cgbs == NULL) ? 10 : (2 * cgb_size);
nethercote8b76fe52004-11-08 19:20:09 +00004411
4412 cgbs_new = VG_(malloc)( sz_new * sizeof(CGenBlock) );
njn695c16e2005-03-27 03:40:28 +00004413 for (i = 0; i < cgb_used; i++)
4414 cgbs_new[i] = cgbs[i];
nethercote8b76fe52004-11-08 19:20:09 +00004415
njn695c16e2005-03-27 03:40:28 +00004416 if (cgbs != NULL)
4417 VG_(free)( cgbs );
4418 cgbs = cgbs_new;
nethercote8b76fe52004-11-08 19:20:09 +00004419
njn695c16e2005-03-27 03:40:28 +00004420 cgb_size = sz_new;
4421 cgb_used++;
4422 if (cgb_used > cgb_used_MAX)
4423 cgb_used_MAX = cgb_used;
4424 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00004425}
4426
4427
4428static void show_client_block_stats ( void )
4429{
4430 VG_(message)(Vg_DebugMsg,
4431 "general CBs: %d allocs, %d discards, %d maxinuse, %d search",
njn695c16e2005-03-27 03:40:28 +00004432 cgb_allocs, cgb_discards, cgb_used_MAX, cgb_search
nethercote8b76fe52004-11-08 19:20:09 +00004433 );
4434}
4435
nethercote8b76fe52004-11-08 19:20:09 +00004436static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai )
4437{
4438 UInt i;
4439 /* VG_(printf)("try to identify %d\n", a); */
4440
4441 /* Perhaps it's a general block ? */
njn695c16e2005-03-27 03:40:28 +00004442 for (i = 0; i < cgb_used; i++) {
4443 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00004444 continue;
njn717cde52005-05-10 02:47:21 +00004445 // Use zero as the redzone for client blocks.
4446 if (VG_(addr_is_in_block)(a, cgbs[i].start, cgbs[i].size, 0)) {
nethercote8b76fe52004-11-08 19:20:09 +00004447 /* OK - maybe it's a mempool, too? */
njn1d0825f2006-03-27 11:37:07 +00004448 MC_Mempool* mp = VG_(HT_lookup)(MC_(mempool_list),
njn12627272005-08-14 18:32:16 +00004449 (UWord)cgbs[i].start);
njn1d0cb0d2005-08-15 01:52:02 +00004450 if (mp != NULL) {
4451 if (mp->chunks != NULL) {
njn1d0825f2006-03-27 11:37:07 +00004452 MC_Chunk* mc;
njnf66dbfc2005-08-15 01:54:05 +00004453 VG_(HT_ResetIter)(mp->chunks);
4454 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
njn1d0825f2006-03-27 11:37:07 +00004455 if (addr_is_in_MC_Chunk(mc, a)) {
njn1d0cb0d2005-08-15 01:52:02 +00004456 ai->akind = UserG;
4457 ai->blksize = mc->size;
4458 ai->rwoffset = (Int)(a) - (Int)mc->data;
4459 ai->lastchange = mc->where;
4460 return True;
4461 }
nethercote8b76fe52004-11-08 19:20:09 +00004462 }
4463 }
njn1d0cb0d2005-08-15 01:52:02 +00004464 ai->akind = Mempool;
4465 ai->blksize = cgbs[i].size;
4466 ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
njn695c16e2005-03-27 03:40:28 +00004467 ai->lastchange = cgbs[i].where;
nethercote8b76fe52004-11-08 19:20:09 +00004468 return True;
4469 }
njn1d0cb0d2005-08-15 01:52:02 +00004470 ai->akind = UserG;
4471 ai->blksize = cgbs[i].size;
4472 ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
njn695c16e2005-03-27 03:40:28 +00004473 ai->lastchange = cgbs[i].where;
njn1d0cb0d2005-08-15 01:52:02 +00004474 ai->desc = cgbs[i].desc;
nethercote8b76fe52004-11-08 19:20:09 +00004475 return True;
4476 }
4477 }
4478 return False;
4479}
4480
njn51d827b2005-05-09 01:02:08 +00004481static Bool mc_handle_client_request ( ThreadId tid, UWord* arg, UWord* ret )
nethercote8b76fe52004-11-08 19:20:09 +00004482{
4483 Int i;
4484 Bool ok;
4485 Addr bad_addr;
4486
njnfc26ff92004-11-22 19:12:49 +00004487 if (!VG_IS_TOOL_USERREQ('M','C',arg[0])
nethercote8b76fe52004-11-08 19:20:09 +00004488 && VG_USERREQ__MALLOCLIKE_BLOCK != arg[0]
4489 && VG_USERREQ__FREELIKE_BLOCK != arg[0]
4490 && VG_USERREQ__CREATE_MEMPOOL != arg[0]
4491 && VG_USERREQ__DESTROY_MEMPOOL != arg[0]
4492 && VG_USERREQ__MEMPOOL_ALLOC != arg[0]
sewardj2c1c9df2006-07-28 00:06:37 +00004493 && VG_USERREQ__MEMPOOL_FREE != arg[0]
sewardjc740d762006-10-05 17:59:23 +00004494 && VG_USERREQ__MEMPOOL_TRIM != arg[0]
4495 && VG_USERREQ__MOVE_MEMPOOL != arg[0]
4496 && VG_USERREQ__MEMPOOL_CHANGE != arg[0]
4497 && VG_USERREQ__MEMPOOL_EXISTS != arg[0])
nethercote8b76fe52004-11-08 19:20:09 +00004498 return False;
4499
4500 switch (arg[0]) {
njndbf7ca72006-03-31 11:57:59 +00004501 case VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE:
4502 ok = is_mem_addressable ( arg[1], arg[2], &bad_addr );
nethercote8b76fe52004-11-08 19:20:09 +00004503 if (!ok)
njn9e63cb62005-05-08 18:34:59 +00004504 mc_record_user_error ( tid, bad_addr, /*isWrite*/True,
4505 /*isUnaddr*/True );
nethercote8b76fe52004-11-08 19:20:09 +00004506 *ret = ok ? (UWord)NULL : bad_addr;
sewardj8cf88b72005-07-08 01:29:33 +00004507 break;
nethercote8b76fe52004-11-08 19:20:09 +00004508
njndbf7ca72006-03-31 11:57:59 +00004509 case VG_USERREQ__CHECK_MEM_IS_DEFINED: {
nethercote8b76fe52004-11-08 19:20:09 +00004510 MC_ReadResult res;
njndbf7ca72006-03-31 11:57:59 +00004511 res = is_mem_defined ( arg[1], arg[2], &bad_addr );
nethercote8b76fe52004-11-08 19:20:09 +00004512 if (MC_AddrErr == res)
njn9e63cb62005-05-08 18:34:59 +00004513 mc_record_user_error ( tid, bad_addr, /*isWrite*/False,
4514 /*isUnaddr*/True );
nethercote8b76fe52004-11-08 19:20:09 +00004515 else if (MC_ValueErr == res)
njn9e63cb62005-05-08 18:34:59 +00004516 mc_record_user_error ( tid, bad_addr, /*isWrite*/False,
4517 /*isUnaddr*/False );
nethercote8b76fe52004-11-08 19:20:09 +00004518 *ret = ( res==MC_Ok ? (UWord)NULL : bad_addr );
sewardj8cf88b72005-07-08 01:29:33 +00004519 break;
nethercote8b76fe52004-11-08 19:20:09 +00004520 }
4521
4522 case VG_USERREQ__DO_LEAK_CHECK:
njnb8dca862005-03-14 02:42:44 +00004523 mc_detect_memory_leaks(tid, arg[1] ? LC_Summary : LC_Full);
sewardj8cf88b72005-07-08 01:29:33 +00004524 *ret = 0; /* return value is meaningless */
4525 break;
nethercote8b76fe52004-11-08 19:20:09 +00004526
njndbf7ca72006-03-31 11:57:59 +00004527 case VG_USERREQ__MAKE_MEM_NOACCESS:
4528 MC_(make_mem_noaccess) ( arg[1], arg[2] );
sewardj8cf88b72005-07-08 01:29:33 +00004529 *ret = -1;
4530 break;
nethercote8b76fe52004-11-08 19:20:09 +00004531
njndbf7ca72006-03-31 11:57:59 +00004532 case VG_USERREQ__MAKE_MEM_UNDEFINED:
4533 MC_(make_mem_undefined) ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00004534 *ret = -1;
sewardj8cf88b72005-07-08 01:29:33 +00004535 break;
nethercote8b76fe52004-11-08 19:20:09 +00004536
njndbf7ca72006-03-31 11:57:59 +00004537 case VG_USERREQ__MAKE_MEM_DEFINED:
4538 MC_(make_mem_defined) ( arg[1], arg[2] );
sewardj8cf88b72005-07-08 01:29:33 +00004539 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00004540 break;
4541
njndbf7ca72006-03-31 11:57:59 +00004542 case VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE:
4543 make_mem_defined_if_addressable ( arg[1], arg[2] );
sewardjfb1e9ad2006-03-10 13:41:58 +00004544 *ret = -1;
4545 break;
4546
sewardjedc75ab2005-03-15 23:30:32 +00004547 case VG_USERREQ__CREATE_BLOCK: /* describe a block */
sewardj8cf88b72005-07-08 01:29:33 +00004548 if (arg[1] != 0 && arg[2] != 0) {
4549 i = alloc_client_block();
4550 /* VG_(printf)("allocated %d %p\n", i, cgbs); */
4551 cgbs[i].start = arg[1];
4552 cgbs[i].size = arg[2];
4553 cgbs[i].desc = VG_(strdup)((Char *)arg[3]);
4554 cgbs[i].where = VG_(record_ExeContext) ( tid );
sewardjedc75ab2005-03-15 23:30:32 +00004555
sewardj8cf88b72005-07-08 01:29:33 +00004556 *ret = i;
4557 } else
4558 *ret = -1;
4559 break;
sewardjedc75ab2005-03-15 23:30:32 +00004560
nethercote8b76fe52004-11-08 19:20:09 +00004561 case VG_USERREQ__DISCARD: /* discard */
njn695c16e2005-03-27 03:40:28 +00004562 if (cgbs == NULL
4563 || arg[2] >= cgb_used ||
sewardj8cf88b72005-07-08 01:29:33 +00004564 (cgbs[arg[2]].start == 0 && cgbs[arg[2]].size == 0)) {
sewardjedc75ab2005-03-15 23:30:32 +00004565 *ret = 1;
sewardj8cf88b72005-07-08 01:29:33 +00004566 } else {
4567 tl_assert(arg[2] >= 0 && arg[2] < cgb_used);
4568 cgbs[arg[2]].start = cgbs[arg[2]].size = 0;
4569 VG_(free)(cgbs[arg[2]].desc);
4570 cgb_discards++;
4571 *ret = 0;
4572 }
4573 break;
nethercote8b76fe52004-11-08 19:20:09 +00004574
sewardjc2c12c22006-03-08 13:20:09 +00004575 case VG_USERREQ__GET_VBITS:
4576 /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
4577 error. */
4578 /* VG_(printf)("get_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
4579 *ret = mc_get_or_set_vbits_for_client
4580 ( tid, arg[1], arg[2], arg[3], False /* get them */ );
4581 break;
4582
4583 case VG_USERREQ__SET_VBITS:
4584 /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
4585 error. */
4586 /* VG_(printf)("set_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
4587 *ret = mc_get_or_set_vbits_for_client
4588 ( tid, arg[1], arg[2], arg[3], True /* set them */ );
4589 break;
nethercote8b76fe52004-11-08 19:20:09 +00004590
njn1d0825f2006-03-27 11:37:07 +00004591 case VG_USERREQ__COUNT_LEAKS: { /* count leaked bytes */
4592 UWord** argp = (UWord**)arg;
4593 // MC_(bytes_leaked) et al were set by the last leak check (or zero
4594 // if no prior leak checks performed).
4595 *argp[1] = MC_(bytes_leaked) + MC_(bytes_indirect);
4596 *argp[2] = MC_(bytes_dubious);
4597 *argp[3] = MC_(bytes_reachable);
4598 *argp[4] = MC_(bytes_suppressed);
4599 // there is no argp[5]
4600 //*argp[5] = MC_(bytes_indirect);
njndbf7ca72006-03-31 11:57:59 +00004601 // XXX need to make *argp[1-4] defined
njn1d0825f2006-03-27 11:37:07 +00004602 *ret = 0;
4603 return True;
4604 }
4605 case VG_USERREQ__MALLOCLIKE_BLOCK: {
4606 Addr p = (Addr)arg[1];
4607 SizeT sizeB = arg[2];
4608 UInt rzB = arg[3];
4609 Bool is_zeroed = (Bool)arg[4];
4610
4611 MC_(new_block) ( tid, p, sizeB, /*ignored*/0, rzB, is_zeroed,
4612 MC_AllocCustom, MC_(malloc_list) );
4613 return True;
4614 }
4615 case VG_USERREQ__FREELIKE_BLOCK: {
4616 Addr p = (Addr)arg[1];
4617 UInt rzB = arg[2];
4618
4619 MC_(handle_free) ( tid, p, rzB, MC_AllocCustom );
4620 return True;
4621 }
4622
4623 case _VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR: {
4624 Char* s = (Char*) arg[1];
4625 OverlapExtra* extra = (OverlapExtra*)arg[2];
4626 mc_record_overlap_error(tid, s, extra);
4627 return True;
4628 }
4629
4630 case VG_USERREQ__CREATE_MEMPOOL: {
4631 Addr pool = (Addr)arg[1];
4632 UInt rzB = arg[2];
4633 Bool is_zeroed = (Bool)arg[3];
4634
4635 MC_(create_mempool) ( pool, rzB, is_zeroed );
4636 return True;
4637 }
4638
4639 case VG_USERREQ__DESTROY_MEMPOOL: {
4640 Addr pool = (Addr)arg[1];
4641
4642 MC_(destroy_mempool) ( pool );
4643 return True;
4644 }
4645
4646 case VG_USERREQ__MEMPOOL_ALLOC: {
4647 Addr pool = (Addr)arg[1];
4648 Addr addr = (Addr)arg[2];
4649 UInt size = arg[3];
4650
4651 MC_(mempool_alloc) ( tid, pool, addr, size );
4652 return True;
4653 }
4654
4655 case VG_USERREQ__MEMPOOL_FREE: {
4656 Addr pool = (Addr)arg[1];
4657 Addr addr = (Addr)arg[2];
4658
4659 MC_(mempool_free) ( pool, addr );
4660 return True;
4661 }
4662
sewardj2c1c9df2006-07-28 00:06:37 +00004663 case VG_USERREQ__MEMPOOL_TRIM: {
4664 Addr pool = (Addr)arg[1];
4665 Addr addr = (Addr)arg[2];
4666 UInt size = arg[3];
4667
4668 MC_(mempool_trim) ( pool, addr, size );
4669 return True;
4670 }
4671
sewardjc740d762006-10-05 17:59:23 +00004672 case VG_USERREQ__MOVE_MEMPOOL: {
4673 Addr poolA = (Addr)arg[1];
4674 Addr poolB = (Addr)arg[2];
4675
4676 MC_(move_mempool) ( poolA, poolB );
4677 return True;
4678 }
4679
4680 case VG_USERREQ__MEMPOOL_CHANGE: {
4681 Addr pool = (Addr)arg[1];
4682 Addr addrA = (Addr)arg[2];
4683 Addr addrB = (Addr)arg[3];
4684 UInt size = arg[4];
4685
4686 MC_(mempool_change) ( pool, addrA, addrB, size );
4687 return True;
4688 }
4689
4690 case VG_USERREQ__MEMPOOL_EXISTS: {
4691 Addr pool = (Addr)arg[1];
4692
4693 *ret = (UWord) MC_(mempool_exists) ( pool );
4694 return True;
4695 }
4696
4697
nethercote8b76fe52004-11-08 19:20:09 +00004698 default:
njn1d0825f2006-03-27 11:37:07 +00004699 VG_(message)(Vg_UserMsg,
4700 "Warning: unknown memcheck client request code %llx",
4701 (ULong)arg[0]);
4702 return False;
nethercote8b76fe52004-11-08 19:20:09 +00004703 }
4704 return True;
4705}
njn25e49d8e72002-09-23 09:36:25 +00004706
4707/*------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00004708/*--- Crude profiling machinery. ---*/
4709/*------------------------------------------------------------*/
4710
4711// We track a number of interesting events (using PROF_EVENT)
4712// if MC_PROFILE_MEMORY is defined.
4713
4714#ifdef MC_PROFILE_MEMORY
4715
4716UInt MC_(event_ctr)[N_PROF_EVENTS];
4717HChar* MC_(event_ctr_name)[N_PROF_EVENTS];
4718
4719static void init_prof_mem ( void )
4720{
4721 Int i;
4722 for (i = 0; i < N_PROF_EVENTS; i++) {
4723 MC_(event_ctr)[i] = 0;
4724 MC_(event_ctr_name)[i] = NULL;
4725 }
4726}
4727
4728static void done_prof_mem ( void )
4729{
4730 Int i;
4731 Bool spaced = False;
4732 for (i = 0; i < N_PROF_EVENTS; i++) {
4733 if (!spaced && (i % 10) == 0) {
4734 VG_(printf)("\n");
4735 spaced = True;
4736 }
4737 if (MC_(event_ctr)[i] > 0) {
4738 spaced = False;
4739 VG_(printf)( "prof mem event %3d: %9d %s\n",
4740 i, MC_(event_ctr)[i],
4741 MC_(event_ctr_name)[i]
4742 ? MC_(event_ctr_name)[i] : "unnamed");
4743 }
4744 }
4745}
4746
4747#else
4748
4749static void init_prof_mem ( void ) { }
4750static void done_prof_mem ( void ) { }
4751
4752#endif
4753
4754/*------------------------------------------------------------*/
njn51d827b2005-05-09 01:02:08 +00004755/*--- Setup and finalisation ---*/
njn25e49d8e72002-09-23 09:36:25 +00004756/*------------------------------------------------------------*/
4757
njn51d827b2005-05-09 01:02:08 +00004758static void mc_post_clo_init ( void )
njn5c004e42002-11-18 11:04:50 +00004759{
sewardj71bc3cb2005-05-19 00:25:45 +00004760 /* If we've been asked to emit XML, mash around various other
4761 options so as to constrain the output somewhat. */
4762 if (VG_(clo_xml)) {
4763 /* Extract as much info as possible from the leak checker. */
njn1d0825f2006-03-27 11:37:07 +00004764 /* MC_(clo_show_reachable) = True; */
4765 MC_(clo_leak_check) = LC_Full;
sewardj71bc3cb2005-05-19 00:25:45 +00004766 }
njn5c004e42002-11-18 11:04:50 +00004767}
4768
njn1d0825f2006-03-27 11:37:07 +00004769static void print_SM_info(char* type, int n_SMs)
4770{
4771 VG_(message)(Vg_DebugMsg,
4772 " memcheck: SMs: %s = %d (%dk, %dM)",
4773 type,
4774 n_SMs,
4775 n_SMs * sizeof(SecMap) / 1024,
4776 n_SMs * sizeof(SecMap) / (1024 * 1024) );
4777}
4778
njn51d827b2005-05-09 01:02:08 +00004779static void mc_fini ( Int exitcode )
njn5c004e42002-11-18 11:04:50 +00004780{
njn1d0825f2006-03-27 11:37:07 +00004781 MC_(print_malloc_stats)();
sewardj23eb2fd2005-04-22 16:29:19 +00004782
njn1d0825f2006-03-27 11:37:07 +00004783 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
4784 if (MC_(clo_leak_check) == LC_Off)
4785 VG_(message)(Vg_UserMsg,
4786 "For a detailed leak analysis, rerun with: --leak-check=yes");
4787
4788 VG_(message)(Vg_UserMsg,
4789 "For counts of detected errors, rerun with: -v");
4790 }
4791 if (MC_(clo_leak_check) != LC_Off)
4792 mc_detect_memory_leaks(1/*bogus ThreadId*/, MC_(clo_leak_check));
4793
4794 done_prof_mem();
sewardjae986ca2005-10-12 12:53:20 +00004795
sewardj45d94cc2005-04-20 14:44:11 +00004796 if (VG_(clo_verbosity) > 1) {
njn1d0825f2006-03-27 11:37:07 +00004797 SizeT max_secVBit_szB, max_SMs_szB, max_shmem_szB;
4798
sewardj45d94cc2005-04-20 14:44:11 +00004799 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00004800 " memcheck: sanity checks: %d cheap, %d expensive",
4801 n_sanity_cheap, n_sanity_expensive );
sewardj45d94cc2005-04-20 14:44:11 +00004802 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00004803 " memcheck: auxmaps: %d auxmap entries (%dk, %dM) in use",
sewardj05a46732006-10-17 01:28:10 +00004804 n_auxmap_L2_nodes,
4805 n_auxmap_L2_nodes * 64,
4806 n_auxmap_L2_nodes / 16 );
sewardj23eb2fd2005-04-22 16:29:19 +00004807 VG_(message)(Vg_DebugMsg,
sewardj05a46732006-10-17 01:28:10 +00004808 " memcheck: auxmaps_L1: %lld searches, %lld cmps, ratio %lld:10",
4809 n_auxmap_L1_searches, n_auxmap_L1_cmps,
4810 (10ULL * n_auxmap_L1_cmps)
4811 / (n_auxmap_L1_searches ? n_auxmap_L1_searches : 1)
4812 );
4813 VG_(message)(Vg_DebugMsg,
4814 " memcheck: auxmaps_L2: %lld searches, %lld nodes",
4815 n_auxmap_L2_searches, n_auxmap_L2_nodes
4816 );
sewardj23eb2fd2005-04-22 16:29:19 +00004817
njndbf7ca72006-03-31 11:57:59 +00004818 print_SM_info("n_issued ", n_issued_SMs);
4819 print_SM_info("n_deissued ", n_deissued_SMs);
4820 print_SM_info("max_noaccess ", max_noaccess_SMs);
4821 print_SM_info("max_undefined", max_undefined_SMs);
4822 print_SM_info("max_defined ", max_defined_SMs);
4823 print_SM_info("max_non_DSM ", max_non_DSM_SMs);
njn1d0825f2006-03-27 11:37:07 +00004824
4825 // Three DSMs, plus the non-DSM ones
4826 max_SMs_szB = (3 + max_non_DSM_SMs) * sizeof(SecMap);
4827 // The 3*sizeof(Word) bytes is the AVL node metadata size.
4828 // The 4*sizeof(Word) bytes is the malloc metadata size.
4829 // Hardwiring these sizes in sucks, but I don't see how else to do it.
4830 max_secVBit_szB = max_secVBit_nodes *
4831 (sizeof(SecVBitNode) + 3*sizeof(Word) + 4*sizeof(Word));
4832 max_shmem_szB = sizeof(primary_map) + max_SMs_szB + max_secVBit_szB;
sewardj23eb2fd2005-04-22 16:29:19 +00004833
4834 VG_(message)(Vg_DebugMsg,
njn1d0825f2006-03-27 11:37:07 +00004835 " memcheck: max sec V bit nodes: %d (%dk, %dM)",
4836 max_secVBit_nodes, max_secVBit_szB / 1024,
4837 max_secVBit_szB / (1024 * 1024));
4838 VG_(message)(Vg_DebugMsg,
4839 " memcheck: set_sec_vbits8 calls: %llu (new: %llu, updates: %llu)",
4840 sec_vbits_new_nodes + sec_vbits_updates,
4841 sec_vbits_new_nodes, sec_vbits_updates );
4842 VG_(message)(Vg_DebugMsg,
4843 " memcheck: max shadow mem size: %dk, %dM",
4844 max_shmem_szB / 1024, max_shmem_szB / (1024 * 1024));
sewardj45d94cc2005-04-20 14:44:11 +00004845 }
4846
njn5c004e42002-11-18 11:04:50 +00004847 if (0) {
4848 VG_(message)(Vg_DebugMsg,
4849 "------ Valgrind's client block stats follow ---------------" );
nethercote8b76fe52004-11-08 19:20:09 +00004850 show_client_block_stats();
njn5c004e42002-11-18 11:04:50 +00004851 }
njn25e49d8e72002-09-23 09:36:25 +00004852}
4853
njn51d827b2005-05-09 01:02:08 +00004854static void mc_pre_clo_init(void)
4855{
4856 VG_(details_name) ("Memcheck");
4857 VG_(details_version) (NULL);
4858 VG_(details_description) ("a memory error detector");
4859 VG_(details_copyright_author)(
sewardje4b0bf02006-06-05 23:21:15 +00004860 "Copyright (C) 2002-2006, and GNU GPL'd, by Julian Seward et al.");
njn51d827b2005-05-09 01:02:08 +00004861 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj05a46732006-10-17 01:28:10 +00004862 VG_(details_avg_translation_sizeB) ( 556 );
njn51d827b2005-05-09 01:02:08 +00004863
4864 VG_(basic_tool_funcs) (mc_post_clo_init,
4865 MC_(instrument),
4866 mc_fini);
4867
4868 VG_(needs_core_errors) ();
njn1d0825f2006-03-27 11:37:07 +00004869 VG_(needs_tool_errors) (mc_eq_Error,
njn51d827b2005-05-09 01:02:08 +00004870 mc_pp_Error,
njn1d0825f2006-03-27 11:37:07 +00004871 mc_update_extra,
njn51d827b2005-05-09 01:02:08 +00004872 mc_recognised_suppression,
njn1d0825f2006-03-27 11:37:07 +00004873 mc_read_extra_suppression_info,
4874 mc_error_matches_suppression,
4875 mc_get_error_name,
4876 mc_print_extra_suppression_info);
njn51d827b2005-05-09 01:02:08 +00004877 VG_(needs_libc_freeres) ();
njn1d0825f2006-03-27 11:37:07 +00004878 VG_(needs_command_line_options)(mc_process_cmd_line_options,
njn51d827b2005-05-09 01:02:08 +00004879 mc_print_usage,
4880 mc_print_debug_usage);
4881 VG_(needs_client_requests) (mc_handle_client_request);
4882 VG_(needs_sanity_checks) (mc_cheap_sanity_check,
4883 mc_expensive_sanity_check);
njn1d0825f2006-03-27 11:37:07 +00004884 VG_(needs_malloc_replacement) (MC_(malloc),
4885 MC_(__builtin_new),
4886 MC_(__builtin_vec_new),
4887 MC_(memalign),
4888 MC_(calloc),
4889 MC_(free),
4890 MC_(__builtin_delete),
4891 MC_(__builtin_vec_delete),
4892 MC_(realloc),
4893 MC_MALLOC_REDZONE_SZB );
njnca54af32006-04-16 10:25:43 +00004894 VG_(needs_xml_output) ();
njn51d827b2005-05-09 01:02:08 +00004895
njn1d0825f2006-03-27 11:37:07 +00004896 VG_(track_new_mem_startup) ( mc_new_mem_startup );
njndbf7ca72006-03-31 11:57:59 +00004897 VG_(track_new_mem_stack_signal)( MC_(make_mem_undefined) );
4898 VG_(track_new_mem_brk) ( MC_(make_mem_undefined) );
njn1d0825f2006-03-27 11:37:07 +00004899 VG_(track_new_mem_mmap) ( mc_new_mem_mmap );
njn51d827b2005-05-09 01:02:08 +00004900
njn1d0825f2006-03-27 11:37:07 +00004901 VG_(track_copy_mem_remap) ( MC_(copy_address_range_state) );
njn81623712005-10-07 04:48:37 +00004902
4903 // Nb: we don't do anything with mprotect. This means that V bits are
4904 // preserved if a program, for example, marks some memory as inaccessible
4905 // and then later marks it as accessible again.
4906 //
4907 // If an access violation occurs (eg. writing to read-only memory) we let
4908 // it fault and print an informative termination message. This doesn't
4909 // happen if the program catches the signal, though, which is bad. If we
4910 // had two A bits (for readability and writability) that were completely
4911 // distinct from V bits, then we could handle all this properly.
4912 VG_(track_change_mem_mprotect) ( NULL );
njn51d827b2005-05-09 01:02:08 +00004913
njndbf7ca72006-03-31 11:57:59 +00004914 VG_(track_die_mem_stack_signal)( MC_(make_mem_noaccess) );
4915 VG_(track_die_mem_brk) ( MC_(make_mem_noaccess) );
4916 VG_(track_die_mem_munmap) ( MC_(make_mem_noaccess) );
njn51d827b2005-05-09 01:02:08 +00004917
njn1d0825f2006-03-27 11:37:07 +00004918#ifdef PERF_FAST_STACK
4919 VG_(track_new_mem_stack_4) ( mc_new_mem_stack_4 );
4920 VG_(track_new_mem_stack_8) ( mc_new_mem_stack_8 );
4921 VG_(track_new_mem_stack_12) ( mc_new_mem_stack_12 );
4922 VG_(track_new_mem_stack_16) ( mc_new_mem_stack_16 );
4923 VG_(track_new_mem_stack_32) ( mc_new_mem_stack_32 );
4924 VG_(track_new_mem_stack_112) ( mc_new_mem_stack_112 );
4925 VG_(track_new_mem_stack_128) ( mc_new_mem_stack_128 );
4926 VG_(track_new_mem_stack_144) ( mc_new_mem_stack_144 );
4927 VG_(track_new_mem_stack_160) ( mc_new_mem_stack_160 );
4928#endif
4929 VG_(track_new_mem_stack) ( mc_new_mem_stack );
njn51d827b2005-05-09 01:02:08 +00004930
njn1d0825f2006-03-27 11:37:07 +00004931#ifdef PERF_FAST_STACK
4932 VG_(track_die_mem_stack_4) ( mc_die_mem_stack_4 );
4933 VG_(track_die_mem_stack_8) ( mc_die_mem_stack_8 );
4934 VG_(track_die_mem_stack_12) ( mc_die_mem_stack_12 );
4935 VG_(track_die_mem_stack_16) ( mc_die_mem_stack_16 );
4936 VG_(track_die_mem_stack_32) ( mc_die_mem_stack_32 );
4937 VG_(track_die_mem_stack_112) ( mc_die_mem_stack_112 );
4938 VG_(track_die_mem_stack_128) ( mc_die_mem_stack_128 );
4939 VG_(track_die_mem_stack_144) ( mc_die_mem_stack_144 );
4940 VG_(track_die_mem_stack_160) ( mc_die_mem_stack_160 );
4941#endif
4942 VG_(track_die_mem_stack) ( mc_die_mem_stack );
njn51d827b2005-05-09 01:02:08 +00004943
njndbf7ca72006-03-31 11:57:59 +00004944 VG_(track_ban_mem_stack) ( MC_(make_mem_noaccess) );
njn51d827b2005-05-09 01:02:08 +00004945
njndbf7ca72006-03-31 11:57:59 +00004946 VG_(track_pre_mem_read) ( check_mem_is_defined );
4947 VG_(track_pre_mem_read_asciiz) ( check_mem_is_defined_asciiz );
4948 VG_(track_pre_mem_write) ( check_mem_is_addressable );
njn1d0825f2006-03-27 11:37:07 +00004949 VG_(track_post_mem_write) ( mc_post_mem_write );
njn51d827b2005-05-09 01:02:08 +00004950
njn1d0825f2006-03-27 11:37:07 +00004951 if (MC_(clo_undef_value_errors))
4952 VG_(track_pre_reg_read) ( mc_pre_reg_read );
njn51d827b2005-05-09 01:02:08 +00004953
njn1d0825f2006-03-27 11:37:07 +00004954 VG_(track_post_reg_write) ( mc_post_reg_write );
4955 VG_(track_post_reg_write_clientcall_return)( mc_post_reg_write_clientcall );
njn51d827b2005-05-09 01:02:08 +00004956
4957 init_shadow_memory();
njn1d0825f2006-03-27 11:37:07 +00004958 MC_(malloc_list) = VG_(HT_construct)( 80021 ); // prime, big
4959 MC_(mempool_list) = VG_(HT_construct)( 1009 ); // prime, not so big
4960 init_prof_mem();
njn51d827b2005-05-09 01:02:08 +00004961
4962 tl_assert( mc_expensive_sanity_check() );
njn1d0825f2006-03-27 11:37:07 +00004963
4964 // {LOADV,STOREV}[8421] will all fail horribly if this isn't true.
4965 tl_assert(sizeof(UWord) == sizeof(Addr));
sewardj05a46732006-10-17 01:28:10 +00004966 // Call me paranoid. I don't care.
4967 tl_assert(sizeof(void*) == sizeof(Addr));
njn1d0825f2006-03-27 11:37:07 +00004968
4969 // BYTES_PER_SEC_VBIT_NODE must be a power of two.
4970 tl_assert(-1 != VG_(log2)(BYTES_PER_SEC_VBIT_NODE));
njn51d827b2005-05-09 01:02:08 +00004971}
4972
sewardj45f4e7c2005-09-27 19:20:21 +00004973VG_DETERMINE_INTERFACE_VERSION(mc_pre_clo_init)
fitzhardinge98abfc72003-12-16 02:05:15 +00004974
njn25e49d8e72002-09-23 09:36:25 +00004975/*--------------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00004976/*--- end ---*/
njn25e49d8e72002-09-23 09:36:25 +00004977/*--------------------------------------------------------------------*/