blob: 8c1c2c2e1fa63f420ffe5cc413c37695f6fc9ff1 [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
nethercote137bc552003-11-14 17:47:54 +00009 This file is part of MemCheck, a heavyweight Valgrind tool for
njnc9539842002-10-02 13:26:35 +000010 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
sewardj4d474d02008-02-11 11:34:59 +000012 Copyright (C) 2000-2008 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
njnc7561b92005-06-19 01:24:32 +000033#include "pub_tool_basics.h"
njn4802b382005-06-11 04:58:29 +000034#include "pub_tool_aspacemgr.h"
njn1d0825f2006-03-27 11:37:07 +000035#include "pub_tool_hashtable.h" // For mc_include.h
njn97405b22005-06-02 03:39:33 +000036#include "pub_tool_libcbase.h"
njn132bfcc2005-06-04 19:16:06 +000037#include "pub_tool_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000038#include "pub_tool_libcprint.h"
njnf536bbb2005-06-13 04:21:38 +000039#include "pub_tool_machine.h"
njnc7561b92005-06-19 01:24:32 +000040#include "pub_tool_mallocfree.h"
41#include "pub_tool_options.h"
njn1d0825f2006-03-27 11:37:07 +000042#include "pub_tool_oset.h"
njnc7561b92005-06-19 01:24:32 +000043#include "pub_tool_replacemalloc.h"
44#include "pub_tool_tooliface.h"
45#include "pub_tool_threadstate.h"
sewardj05a46732006-10-17 01:28:10 +000046#include "pub_tool_oset.h"
sewardjb8b79ad2008-03-03 01:35:41 +000047#include "pub_tool_debuginfo.h" // VG_(get_dataname_and_offset)
njnc7561b92005-06-19 01:24:32 +000048
49#include "mc_include.h"
50#include "memcheck.h" /* for client requests */
njn25e49d8e72002-09-23 09:36:25 +000051
tomd55121e2005-12-19 12:40:13 +000052#ifdef HAVE_BUILTIN_EXPECT
sewardjc1a2cda2005-04-21 17:34:00 +000053#define EXPECTED_TAKEN(cond) __builtin_expect((cond),1)
54#define EXPECTED_NOT_TAKEN(cond) __builtin_expect((cond),0)
tomd55121e2005-12-19 12:40:13 +000055#else
56#define EXPECTED_TAKEN(cond) (cond)
57#define EXPECTED_NOT_TAKEN(cond) (cond)
58#endif
sewardjc1a2cda2005-04-21 17:34:00 +000059
njn1d0825f2006-03-27 11:37:07 +000060/* Set to 1 to do a little more sanity checking */
sewardj23eb2fd2005-04-22 16:29:19 +000061#define VG_DEBUG_MEMORY 0
sewardjc1a2cda2005-04-21 17:34:00 +000062
njn25e49d8e72002-09-23 09:36:25 +000063#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
64
njn25e49d8e72002-09-23 09:36:25 +000065
njn25e49d8e72002-09-23 09:36:25 +000066/*------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +000067/*--- Fast-case knobs ---*/
68/*------------------------------------------------------------*/
69
70// Comment these out to disable the fast cases (don't just set them to zero).
71
72#define PERF_FAST_LOADV 1
73#define PERF_FAST_STOREV 1
74
75#define PERF_FAST_SARP 1
76
77#define PERF_FAST_STACK 1
78#define PERF_FAST_STACK2 1
79
80/*------------------------------------------------------------*/
81/*--- V bits and A bits ---*/
82/*------------------------------------------------------------*/
83
84/* Conceptually, every byte value has 8 V bits, which track whether Memcheck
85 thinks the corresponding value bit is defined. And every memory byte
86 has an A bit, which tracks whether Memcheck thinks the program can access
87 it safely. So every N-bit register is shadowed with N V bits, and every
88 memory byte is shadowed with 8 V bits and one A bit.
89
90 In the implementation, we use two forms of compression (compressed V bits
91 and distinguished secondary maps) to avoid the 9-bit-per-byte overhead
92 for memory.
93
94 Memcheck also tracks extra information about each heap block that is
95 allocated, for detecting memory leaks and other purposes.
96*/
97
98/*------------------------------------------------------------*/
sewardj45d94cc2005-04-20 14:44:11 +000099/*--- Basic A/V bitmap representation. ---*/
njn25e49d8e72002-09-23 09:36:25 +0000100/*------------------------------------------------------------*/
101
njn1d0825f2006-03-27 11:37:07 +0000102/* All reads and writes are checked against a memory map (a.k.a. shadow
103 memory), which records the state of all memory in the process.
104
105 On 32-bit machines the memory map is organised as follows.
106 The top 16 bits of an address are used to index into a top-level
107 map table, containing 65536 entries. Each entry is a pointer to a
108 second-level map, which records the accesibililty and validity
109 permissions for the 65536 bytes indexed by the lower 16 bits of the
110 address. Each byte is represented by two bits (details are below). So
111 each second-level map contains 16384 bytes. This two-level arrangement
112 conveniently divides the 4G address space into 64k lumps, each size 64k
113 bytes.
114
115 All entries in the primary (top-level) map must point to a valid
116 secondary (second-level) map. Since many of the 64kB chunks will
njndbf7ca72006-03-31 11:57:59 +0000117 have the same status for every bit -- ie. noaccess (for unused
118 address space) or entirely addressable and defined (for code segments) --
119 there are three distinguished secondary maps, which indicate 'noaccess',
120 'undefined' and 'defined'. For these uniform 64kB chunks, the primary
121 map entry points to the relevant distinguished map. In practice,
122 typically more than half of the addressable memory is represented with
123 the 'undefined' or 'defined' distinguished secondary map, so it gives a
124 good saving. It also lets us set the V+A bits of large address regions
125 quickly in set_address_range_perms().
njn1d0825f2006-03-27 11:37:07 +0000126
127 On 64-bit machines it's more complicated. If we followed the same basic
128 scheme we'd have a four-level table which would require too many memory
129 accesses. So instead the top-level map table has 2^19 entries (indexed
130 using bits 16..34 of the address); this covers the bottom 32GB. Any
131 accesses above 32GB are handled with a slow, sparse auxiliary table.
132 Valgrind's address space manager tries very hard to keep things below
133 this 32GB barrier so that performance doesn't suffer too much.
134
135 Note that this file has a lot of different functions for reading and
136 writing shadow memory. Only a couple are strictly necessary (eg.
137 get_vabits2 and set_vabits2), most are just specialised for specific
138 common cases to improve performance.
139
140 Aside: the V+A bits are less precise than they could be -- we have no way
141 of marking memory as read-only. It would be great if we could add an
142 extra state VA_BITSn_READONLY. But then we'd have 5 different states,
143 which requires 2.3 bits to hold, and there's no way to do that elegantly
144 -- we'd have to double up to 4 bits of metadata per byte, which doesn't
145 seem worth it.
146*/
sewardjc859fbf2005-04-22 21:10:28 +0000147
sewardj45d94cc2005-04-20 14:44:11 +0000148/* --------------- Basic configuration --------------- */
sewardj95448072004-11-22 20:19:51 +0000149
sewardj23eb2fd2005-04-22 16:29:19 +0000150/* Only change this. N_PRIMARY_MAP *must* be a power of 2. */
sewardj21f7ff42005-04-28 10:32:02 +0000151
sewardje4ccc012005-05-02 12:53:38 +0000152#if VG_WORDSIZE == 4
sewardj21f7ff42005-04-28 10:32:02 +0000153
154/* cover the entire address space */
155# define N_PRIMARY_BITS 16
156
157#else
158
sewardj34483bc2005-09-28 11:50:20 +0000159/* Just handle the first 32G fast and the rest via auxiliary
sewardj21f7ff42005-04-28 10:32:02 +0000160 primaries. */
sewardj34483bc2005-09-28 11:50:20 +0000161# define N_PRIMARY_BITS 19
sewardj21f7ff42005-04-28 10:32:02 +0000162
163#endif
164
sewardj45d94cc2005-04-20 14:44:11 +0000165
sewardjc1a2cda2005-04-21 17:34:00 +0000166/* Do not change this. */
sewardje4ccc012005-05-02 12:53:38 +0000167#define N_PRIMARY_MAP ( ((UWord)1) << N_PRIMARY_BITS)
sewardjc1a2cda2005-04-21 17:34:00 +0000168
169/* Do not change this. */
sewardj23eb2fd2005-04-22 16:29:19 +0000170#define MAX_PRIMARY_ADDRESS (Addr)((((Addr)65536) * N_PRIMARY_MAP)-1)
171
172
sewardj45d94cc2005-04-20 14:44:11 +0000173/* --------------- Secondary maps --------------- */
njn25e49d8e72002-09-23 09:36:25 +0000174
njn1d0825f2006-03-27 11:37:07 +0000175// Each byte of memory conceptually has an A bit, which indicates its
176// addressability, and 8 V bits, which indicates its definedness.
177//
178// But because very few bytes are partially defined, we can use a nice
179// compression scheme to reduce the size of shadow memory. Each byte of
180// memory has 2 bits which indicates its state (ie. V+A bits):
181//
njndbf7ca72006-03-31 11:57:59 +0000182// 00: noaccess (unaddressable but treated as fully defined)
183// 01: undefined (addressable and fully undefined)
184// 10: defined (addressable and fully defined)
185// 11: partdefined (addressable and partially defined)
njn1d0825f2006-03-27 11:37:07 +0000186//
njndbf7ca72006-03-31 11:57:59 +0000187// In the "partdefined" case, we use a secondary table to store the V bits.
188// Each entry in the secondary-V-bits table maps a byte address to its 8 V
189// bits.
njn1d0825f2006-03-27 11:37:07 +0000190//
191// We store the compressed V+A bits in 8-bit chunks, ie. the V+A bits for
192// four bytes (32 bits) of memory are in each chunk. Hence the name
193// "vabits8". This lets us get the V+A bits for four bytes at a time
194// easily (without having to do any shifting and/or masking), and that is a
195// very common operation. (Note that although each vabits8 chunk
196// is 8 bits in size, it represents 32 bits of memory.)
197//
198// The representation is "inverse" little-endian... each 4 bytes of
199// memory is represented by a 1 byte value, where:
200//
201// - the status of byte (a+0) is held in bits [1..0]
202// - the status of byte (a+1) is held in bits [3..2]
203// - the status of byte (a+2) is held in bits [5..4]
204// - the status of byte (a+3) is held in bits [7..6]
205//
206// It's "inverse" because endianness normally describes a mapping from
207// value bits to memory addresses; in this case the mapping is inverted.
208// Ie. instead of particular value bits being held in certain addresses, in
209// this case certain addresses are represented by particular value bits.
210// See insert_vabits2_into_vabits8() for an example.
211//
212// But note that we don't compress the V bits stored in registers; they
213// need to be explicit to made the shadow operations possible. Therefore
214// when moving values between registers and memory we need to convert
215// between the expanded in-register format and the compressed in-memory
216// format. This isn't so difficult, it just requires careful attention in a
217// few places.
218
219// These represent eight bits of memory.
220#define VA_BITS2_NOACCESS 0x0 // 00b
njndbf7ca72006-03-31 11:57:59 +0000221#define VA_BITS2_UNDEFINED 0x1 // 01b
222#define VA_BITS2_DEFINED 0x2 // 10b
223#define VA_BITS2_PARTDEFINED 0x3 // 11b
njn1d0825f2006-03-27 11:37:07 +0000224
225// These represent 16 bits of memory.
226#define VA_BITS4_NOACCESS 0x0 // 00_00b
njndbf7ca72006-03-31 11:57:59 +0000227#define VA_BITS4_UNDEFINED 0x5 // 01_01b
228#define VA_BITS4_DEFINED 0xa // 10_10b
njn1d0825f2006-03-27 11:37:07 +0000229
230// These represent 32 bits of memory.
231#define VA_BITS8_NOACCESS 0x00 // 00_00_00_00b
njndbf7ca72006-03-31 11:57:59 +0000232#define VA_BITS8_UNDEFINED 0x55 // 01_01_01_01b
233#define VA_BITS8_DEFINED 0xaa // 10_10_10_10b
njn1d0825f2006-03-27 11:37:07 +0000234
235// These represent 64 bits of memory.
236#define VA_BITS16_NOACCESS 0x0000 // 00_00_00_00b x 2
njndbf7ca72006-03-31 11:57:59 +0000237#define VA_BITS16_UNDEFINED 0x5555 // 01_01_01_01b x 2
238#define VA_BITS16_DEFINED 0xaaaa // 10_10_10_10b x 2
njn1d0825f2006-03-27 11:37:07 +0000239
240
241#define SM_CHUNKS 16384
242#define SM_OFF(aaa) (((aaa) & 0xffff) >> 2)
243#define SM_OFF_16(aaa) (((aaa) & 0xffff) >> 3)
244
245// Paranoia: it's critical for performance that the requested inlining
246// occurs. So try extra hard.
247#define INLINE inline __attribute__((always_inline))
248
249static INLINE Addr start_of_this_sm ( Addr a ) {
250 return (a & (~SM_MASK));
251}
252static INLINE Bool is_start_of_sm ( Addr a ) {
253 return (start_of_this_sm(a) == a);
254}
255
njn25e49d8e72002-09-23 09:36:25 +0000256typedef
257 struct {
njn1d0825f2006-03-27 11:37:07 +0000258 UChar vabits8[SM_CHUNKS];
njn25e49d8e72002-09-23 09:36:25 +0000259 }
260 SecMap;
261
njn1d0825f2006-03-27 11:37:07 +0000262// 3 distinguished secondary maps, one for no-access, one for
263// accessible but undefined, and one for accessible and defined.
264// Distinguished secondaries may never be modified.
265#define SM_DIST_NOACCESS 0
njndbf7ca72006-03-31 11:57:59 +0000266#define SM_DIST_UNDEFINED 1
267#define SM_DIST_DEFINED 2
njnb8dca862005-03-14 02:42:44 +0000268
sewardj45d94cc2005-04-20 14:44:11 +0000269static SecMap sm_distinguished[3];
njnb8dca862005-03-14 02:42:44 +0000270
njn1d0825f2006-03-27 11:37:07 +0000271static INLINE Bool is_distinguished_sm ( SecMap* sm ) {
sewardj45d94cc2005-04-20 14:44:11 +0000272 return sm >= &sm_distinguished[0] && sm <= &sm_distinguished[2];
273}
njnb8dca862005-03-14 02:42:44 +0000274
njn1d0825f2006-03-27 11:37:07 +0000275// Forward declaration
276static void update_SM_counts(SecMap* oldSM, SecMap* newSM);
277
sewardj45d94cc2005-04-20 14:44:11 +0000278/* dist_sm points to one of our three distinguished secondaries. Make
279 a copy of it so that we can write to it.
280*/
281static SecMap* copy_for_writing ( SecMap* dist_sm )
282{
283 SecMap* new_sm;
284 tl_assert(dist_sm == &sm_distinguished[0]
njn1d0825f2006-03-27 11:37:07 +0000285 || dist_sm == &sm_distinguished[1]
286 || dist_sm == &sm_distinguished[2]);
njnb8dca862005-03-14 02:42:44 +0000287
sewardj45f4e7c2005-09-27 19:20:21 +0000288 new_sm = VG_(am_shadow_alloc)(sizeof(SecMap));
289 if (new_sm == NULL)
290 VG_(out_of_memory_NORETURN)( "memcheck:allocate new SecMap",
291 sizeof(SecMap) );
sewardj45d94cc2005-04-20 14:44:11 +0000292 VG_(memcpy)(new_sm, dist_sm, sizeof(SecMap));
njn1d0825f2006-03-27 11:37:07 +0000293 update_SM_counts(dist_sm, new_sm);
sewardj45d94cc2005-04-20 14:44:11 +0000294 return new_sm;
295}
njnb8dca862005-03-14 02:42:44 +0000296
njn1d0825f2006-03-27 11:37:07 +0000297/* --------------- Stats --------------- */
298
njndbf7ca72006-03-31 11:57:59 +0000299static Int n_issued_SMs = 0;
300static Int n_deissued_SMs = 0;
301static Int n_noaccess_SMs = N_PRIMARY_MAP; // start with many noaccess DSMs
302static Int n_undefined_SMs = 0;
303static Int n_defined_SMs = 0;
304static Int n_non_DSM_SMs = 0;
305static Int max_noaccess_SMs = 0;
306static Int max_undefined_SMs = 0;
307static Int max_defined_SMs = 0;
308static Int max_non_DSM_SMs = 0;
njn1d0825f2006-03-27 11:37:07 +0000309
sewardj05a46732006-10-17 01:28:10 +0000310/* # searches initiated in auxmap_L1, and # base cmps required */
311static ULong n_auxmap_L1_searches = 0;
312static ULong n_auxmap_L1_cmps = 0;
313/* # of searches that missed in auxmap_L1 and therefore had to
314 be handed to auxmap_L2. And the number of nodes inserted. */
315static ULong n_auxmap_L2_searches = 0;
316static ULong n_auxmap_L2_nodes = 0;
317
njn1d0825f2006-03-27 11:37:07 +0000318static Int n_sanity_cheap = 0;
319static Int n_sanity_expensive = 0;
320
321static Int n_secVBit_nodes = 0;
322static Int max_secVBit_nodes = 0;
323
324static void update_SM_counts(SecMap* oldSM, SecMap* newSM)
325{
njndbf7ca72006-03-31 11:57:59 +0000326 if (oldSM == &sm_distinguished[SM_DIST_NOACCESS ]) n_noaccess_SMs --;
327 else if (oldSM == &sm_distinguished[SM_DIST_UNDEFINED]) n_undefined_SMs--;
328 else if (oldSM == &sm_distinguished[SM_DIST_DEFINED ]) n_defined_SMs --;
329 else { n_non_DSM_SMs --;
330 n_deissued_SMs ++; }
njn1d0825f2006-03-27 11:37:07 +0000331
njndbf7ca72006-03-31 11:57:59 +0000332 if (newSM == &sm_distinguished[SM_DIST_NOACCESS ]) n_noaccess_SMs ++;
333 else if (newSM == &sm_distinguished[SM_DIST_UNDEFINED]) n_undefined_SMs++;
334 else if (newSM == &sm_distinguished[SM_DIST_DEFINED ]) n_defined_SMs ++;
335 else { n_non_DSM_SMs ++;
336 n_issued_SMs ++; }
njn1d0825f2006-03-27 11:37:07 +0000337
njndbf7ca72006-03-31 11:57:59 +0000338 if (n_noaccess_SMs > max_noaccess_SMs ) max_noaccess_SMs = n_noaccess_SMs;
339 if (n_undefined_SMs > max_undefined_SMs) max_undefined_SMs = n_undefined_SMs;
340 if (n_defined_SMs > max_defined_SMs ) max_defined_SMs = n_defined_SMs;
341 if (n_non_DSM_SMs > max_non_DSM_SMs ) max_non_DSM_SMs = n_non_DSM_SMs;
njn1d0825f2006-03-27 11:37:07 +0000342}
sewardj45d94cc2005-04-20 14:44:11 +0000343
344/* --------------- Primary maps --------------- */
345
346/* The main primary map. This covers some initial part of the address
sewardj23eb2fd2005-04-22 16:29:19 +0000347 space, addresses 0 .. (N_PRIMARY_MAP << 16)-1. The rest of it is
sewardj45d94cc2005-04-20 14:44:11 +0000348 handled using the auxiliary primary map.
349*/
sewardj23eb2fd2005-04-22 16:29:19 +0000350static SecMap* primary_map[N_PRIMARY_MAP];
sewardj45d94cc2005-04-20 14:44:11 +0000351
352
353/* An entry in the auxiliary primary map. base must be a 64k-aligned
354 value, and sm points at the relevant secondary map. As with the
355 main primary map, the secondary may be either a real secondary, or
sewardj05a46732006-10-17 01:28:10 +0000356 one of the three distinguished secondaries. DO NOT CHANGE THIS
357 LAYOUT: the first word has to be the key for OSet fast lookups.
sewardj45d94cc2005-04-20 14:44:11 +0000358*/
359typedef
360 struct {
sewardj23eb2fd2005-04-22 16:29:19 +0000361 Addr base;
sewardj45d94cc2005-04-20 14:44:11 +0000362 SecMap* sm;
363 }
364 AuxMapEnt;
365
sewardj05a46732006-10-17 01:28:10 +0000366/* Tunable parameter: How big is the L1 queue? */
367#define N_AUXMAP_L1 24
sewardj45d94cc2005-04-20 14:44:11 +0000368
sewardj05a46732006-10-17 01:28:10 +0000369/* Tunable parameter: How far along the L1 queue to insert
370 entries resulting from L2 lookups? */
371#define AUXMAP_L1_INSERT_IX 12
sewardj45d94cc2005-04-20 14:44:11 +0000372
sewardj05a46732006-10-17 01:28:10 +0000373static struct {
374 Addr base;
375 AuxMapEnt* ent; // pointer to the matching auxmap_L2 node
376 }
377 auxmap_L1[N_AUXMAP_L1];
378
379static OSet* auxmap_L2 = NULL;
380
381static void init_auxmap_L1_L2 ( void )
sewardj45d94cc2005-04-20 14:44:11 +0000382{
sewardj05a46732006-10-17 01:28:10 +0000383 Int i;
384 for (i = 0; i < N_AUXMAP_L1; i++) {
385 auxmap_L1[i].base = 0;
386 auxmap_L1[i].ent = NULL;
sewardj45d94cc2005-04-20 14:44:11 +0000387 }
388
sewardj05a46732006-10-17 01:28:10 +0000389 tl_assert(0 == offsetof(AuxMapEnt,base));
390 tl_assert(sizeof(Addr) == sizeof(void*));
njne2a9ad32007-09-17 05:30:48 +0000391 auxmap_L2 = VG_(OSetGen_Create)( /*keyOff*/ offsetof(AuxMapEnt,base),
392 /*fastCmp*/ NULL,
393 VG_(malloc), VG_(free) );
sewardj05fe85e2005-04-27 22:46:36 +0000394}
395
sewardj05a46732006-10-17 01:28:10 +0000396/* Check representation invariants; if OK return NULL; else a
397 descriptive bit of text. Also return the number of
398 non-distinguished secondary maps referred to from the auxiliary
399 primary maps. */
sewardj05fe85e2005-04-27 22:46:36 +0000400
sewardj05a46732006-10-17 01:28:10 +0000401static HChar* check_auxmap_L1_L2_sanity ( Word* n_secmaps_found )
sewardj05fe85e2005-04-27 22:46:36 +0000402{
sewardj05a46732006-10-17 01:28:10 +0000403 Word i, j;
404 /* On a 32-bit platform, the L2 and L1 tables should
405 both remain empty forever.
sewardj05fe85e2005-04-27 22:46:36 +0000406
sewardj05a46732006-10-17 01:28:10 +0000407 On a 64-bit platform:
408 In the L2 table:
409 all .base & 0xFFFF == 0
410 all .base > MAX_PRIMARY_ADDRESS
411 In the L1 table:
412 all .base & 0xFFFF == 0
413 all (.base > MAX_PRIMARY_ADDRESS
414 .base & 0xFFFF == 0
415 and .ent points to an AuxMapEnt with the same .base)
416 or
417 (.base == 0 and .ent == NULL)
418 */
419 *n_secmaps_found = 0;
420 if (sizeof(void*) == 4) {
421 /* 32-bit platform */
njne2a9ad32007-09-17 05:30:48 +0000422 if (VG_(OSetGen_Size)(auxmap_L2) != 0)
sewardj05a46732006-10-17 01:28:10 +0000423 return "32-bit: auxmap_L2 is non-empty";
424 for (i = 0; i < N_AUXMAP_L1; i++)
425 if (auxmap_L1[i].base != 0 || auxmap_L1[i].ent != NULL)
426 return "32-bit: auxmap_L1 is non-empty";
427 } else {
428 /* 64-bit platform */
429 UWord elems_seen = 0;
430 AuxMapEnt *elem, *res;
431 AuxMapEnt key;
432 /* L2 table */
njne2a9ad32007-09-17 05:30:48 +0000433 VG_(OSetGen_ResetIter)(auxmap_L2);
434 while ( (elem = VG_(OSetGen_Next)(auxmap_L2)) ) {
sewardj05a46732006-10-17 01:28:10 +0000435 elems_seen++;
436 if (0 != (elem->base & (Addr)0xFFFF))
437 return "64-bit: nonzero .base & 0xFFFF in auxmap_L2";
438 if (elem->base <= MAX_PRIMARY_ADDRESS)
439 return "64-bit: .base <= MAX_PRIMARY_ADDRESS in auxmap_L2";
440 if (elem->sm == NULL)
441 return "64-bit: .sm in _L2 is NULL";
442 if (!is_distinguished_sm(elem->sm))
443 (*n_secmaps_found)++;
444 }
445 if (elems_seen != n_auxmap_L2_nodes)
446 return "64-bit: disagreement on number of elems in _L2";
447 /* Check L1-L2 correspondence */
448 for (i = 0; i < N_AUXMAP_L1; i++) {
449 if (auxmap_L1[i].base == 0 && auxmap_L1[i].ent == NULL)
450 continue;
451 if (0 != (auxmap_L1[i].base & (Addr)0xFFFF))
452 return "64-bit: nonzero .base & 0xFFFF in auxmap_L1";
453 if (auxmap_L1[i].base <= MAX_PRIMARY_ADDRESS)
454 return "64-bit: .base <= MAX_PRIMARY_ADDRESS in auxmap_L1";
455 if (auxmap_L1[i].ent == NULL)
456 return "64-bit: .ent is NULL in auxmap_L1";
457 if (auxmap_L1[i].ent->base != auxmap_L1[i].base)
458 return "64-bit: _L1 and _L2 bases are inconsistent";
459 /* Look it up in auxmap_L2. */
460 key.base = auxmap_L1[i].base;
461 key.sm = 0;
njne2a9ad32007-09-17 05:30:48 +0000462 res = VG_(OSetGen_Lookup)(auxmap_L2, &key);
sewardj05a46732006-10-17 01:28:10 +0000463 if (res == NULL)
464 return "64-bit: _L1 .base not found in _L2";
465 if (res != auxmap_L1[i].ent)
466 return "64-bit: _L1 .ent disagrees with _L2 entry";
467 }
468 /* Check L1 contains no duplicates */
469 for (i = 0; i < N_AUXMAP_L1; i++) {
470 if (auxmap_L1[i].base == 0)
471 continue;
472 for (j = i+1; j < N_AUXMAP_L1; j++) {
473 if (auxmap_L1[j].base == 0)
474 continue;
475 if (auxmap_L1[j].base == auxmap_L1[i].base)
476 return "64-bit: duplicate _L1 .base entries";
477 }
478 }
479 }
480 return NULL; /* ok */
481}
482
483static void insert_into_auxmap_L1_at ( Word rank, AuxMapEnt* ent )
484{
485 Word i;
486 tl_assert(ent);
487 tl_assert(rank >= 0 && rank < N_AUXMAP_L1);
488 for (i = N_AUXMAP_L1-1; i > rank; i--)
489 auxmap_L1[i] = auxmap_L1[i-1];
490 auxmap_L1[rank].base = ent->base;
491 auxmap_L1[rank].ent = ent;
492}
493
494static INLINE AuxMapEnt* maybe_find_in_auxmap ( Addr a )
495{
496 AuxMapEnt key;
497 AuxMapEnt* res;
498 Word i;
499
500 tl_assert(a > MAX_PRIMARY_ADDRESS);
501 a &= ~(Addr)0xFFFF;
502
503 /* First search the front-cache, which is a self-organising
504 list containing the most popular entries. */
505
506 if (EXPECTED_TAKEN(auxmap_L1[0].base == a))
507 return auxmap_L1[0].ent;
508 if (EXPECTED_TAKEN(auxmap_L1[1].base == a)) {
509 Addr t_base = auxmap_L1[0].base;
510 AuxMapEnt* t_ent = auxmap_L1[0].ent;
511 auxmap_L1[0].base = auxmap_L1[1].base;
512 auxmap_L1[0].ent = auxmap_L1[1].ent;
513 auxmap_L1[1].base = t_base;
514 auxmap_L1[1].ent = t_ent;
515 return auxmap_L1[0].ent;
sewardj45d94cc2005-04-20 14:44:11 +0000516 }
517
sewardj05a46732006-10-17 01:28:10 +0000518 n_auxmap_L1_searches++;
sewardj45d94cc2005-04-20 14:44:11 +0000519
sewardj05a46732006-10-17 01:28:10 +0000520 for (i = 0; i < N_AUXMAP_L1; i++) {
521 if (auxmap_L1[i].base == a) {
522 break;
523 }
524 }
525 tl_assert(i >= 0 && i <= N_AUXMAP_L1);
sewardj45d94cc2005-04-20 14:44:11 +0000526
sewardj05a46732006-10-17 01:28:10 +0000527 n_auxmap_L1_cmps += (ULong)(i+1);
sewardj45d94cc2005-04-20 14:44:11 +0000528
sewardj05a46732006-10-17 01:28:10 +0000529 if (i < N_AUXMAP_L1) {
530 if (i > 0) {
531 Addr t_base = auxmap_L1[i-1].base;
532 AuxMapEnt* t_ent = auxmap_L1[i-1].ent;
533 auxmap_L1[i-1].base = auxmap_L1[i-0].base;
534 auxmap_L1[i-1].ent = auxmap_L1[i-0].ent;
535 auxmap_L1[i-0].base = t_base;
536 auxmap_L1[i-0].ent = t_ent;
537 i--;
538 }
539 return auxmap_L1[i].ent;
540 }
541
542 n_auxmap_L2_searches++;
543
544 /* First see if we already have it. */
545 key.base = a;
546 key.sm = 0;
547
njne2a9ad32007-09-17 05:30:48 +0000548 res = VG_(OSetGen_Lookup)(auxmap_L2, &key);
sewardj05a46732006-10-17 01:28:10 +0000549 if (res)
550 insert_into_auxmap_L1_at( AUXMAP_L1_INSERT_IX, res );
551 return res;
552}
553
554static AuxMapEnt* find_or_alloc_in_auxmap ( Addr a )
555{
556 AuxMapEnt *nyu, *res;
557
558 /* First see if we already have it. */
559 res = maybe_find_in_auxmap( a );
560 if (EXPECTED_TAKEN(res))
561 return res;
562
563 /* Ok, there's no entry in the secondary map, so we'll have
564 to allocate one. */
565 a &= ~(Addr)0xFFFF;
566
njne2a9ad32007-09-17 05:30:48 +0000567 nyu = (AuxMapEnt*) VG_(OSetGen_AllocNode)( auxmap_L2, sizeof(AuxMapEnt) );
sewardj05a46732006-10-17 01:28:10 +0000568 tl_assert(nyu);
569 nyu->base = a;
570 nyu->sm = &sm_distinguished[SM_DIST_NOACCESS];
njne2a9ad32007-09-17 05:30:48 +0000571 VG_(OSetGen_Insert)( auxmap_L2, nyu );
sewardj05a46732006-10-17 01:28:10 +0000572 insert_into_auxmap_L1_at( AUXMAP_L1_INSERT_IX, nyu );
573 n_auxmap_L2_nodes++;
574 return nyu;
sewardj45d94cc2005-04-20 14:44:11 +0000575}
576
sewardj45d94cc2005-04-20 14:44:11 +0000577/* --------------- SecMap fundamentals --------------- */
578
njn1d0825f2006-03-27 11:37:07 +0000579// In all these, 'low' means it's definitely in the main primary map,
580// 'high' means it's definitely in the auxiliary table.
581
582static INLINE SecMap** get_secmap_low_ptr ( Addr a )
583{
584 UWord pm_off = a >> 16;
585# if VG_DEBUG_MEMORY >= 1
586 tl_assert(pm_off < N_PRIMARY_MAP);
587# endif
588 return &primary_map[ pm_off ];
589}
590
591static INLINE SecMap** get_secmap_high_ptr ( Addr a )
592{
593 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
594 return &am->sm;
595}
596
597static SecMap** get_secmap_ptr ( Addr a )
598{
599 return ( a <= MAX_PRIMARY_ADDRESS
600 ? get_secmap_low_ptr(a)
601 : get_secmap_high_ptr(a));
602}
603
njna7c7ebd2006-03-28 12:51:02 +0000604static INLINE SecMap* get_secmap_for_reading_low ( Addr a )
njn1d0825f2006-03-27 11:37:07 +0000605{
606 return *get_secmap_low_ptr(a);
607}
608
njna7c7ebd2006-03-28 12:51:02 +0000609static INLINE SecMap* get_secmap_for_reading_high ( Addr a )
njn1d0825f2006-03-27 11:37:07 +0000610{
611 return *get_secmap_high_ptr(a);
612}
613
njna7c7ebd2006-03-28 12:51:02 +0000614static INLINE SecMap* get_secmap_for_writing_low(Addr a)
njn1d0825f2006-03-27 11:37:07 +0000615{
616 SecMap** p = get_secmap_low_ptr(a);
617 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(*p)))
618 *p = copy_for_writing(*p);
619 return *p;
620}
621
njna7c7ebd2006-03-28 12:51:02 +0000622static INLINE SecMap* get_secmap_for_writing_high ( Addr a )
njn1d0825f2006-03-27 11:37:07 +0000623{
624 SecMap** p = get_secmap_high_ptr(a);
625 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(*p)))
626 *p = copy_for_writing(*p);
627 return *p;
628}
629
sewardj45d94cc2005-04-20 14:44:11 +0000630/* Produce the secmap for 'a', either from the primary map or by
631 ensuring there is an entry for it in the aux primary map. The
632 secmap may be a distinguished one as the caller will only want to
633 be able to read it.
634*/
sewardj05a46732006-10-17 01:28:10 +0000635static INLINE SecMap* get_secmap_for_reading ( Addr a )
sewardj45d94cc2005-04-20 14:44:11 +0000636{
njn1d0825f2006-03-27 11:37:07 +0000637 return ( a <= MAX_PRIMARY_ADDRESS
njna7c7ebd2006-03-28 12:51:02 +0000638 ? get_secmap_for_reading_low (a)
639 : get_secmap_for_reading_high(a) );
sewardj45d94cc2005-04-20 14:44:11 +0000640}
641
642/* Produce the secmap for 'a', either from the primary map or by
643 ensuring there is an entry for it in the aux primary map. The
644 secmap may not be a distinguished one, since the caller will want
645 to be able to write it. If it is a distinguished secondary, make a
646 writable copy of it, install it, and return the copy instead. (COW
647 semantics).
648*/
njna7c7ebd2006-03-28 12:51:02 +0000649static SecMap* get_secmap_for_writing ( Addr a )
sewardj45d94cc2005-04-20 14:44:11 +0000650{
njn1d0825f2006-03-27 11:37:07 +0000651 return ( a <= MAX_PRIMARY_ADDRESS
njna7c7ebd2006-03-28 12:51:02 +0000652 ? get_secmap_for_writing_low (a)
653 : get_secmap_for_writing_high(a) );
njn1d0825f2006-03-27 11:37:07 +0000654}
655
656/* If 'a' has a SecMap, produce it. Else produce NULL. But don't
657 allocate one if one doesn't already exist. This is used by the
658 leak checker.
659*/
660static SecMap* maybe_get_secmap_for ( Addr a )
661{
sewardj45d94cc2005-04-20 14:44:11 +0000662 if (a <= MAX_PRIMARY_ADDRESS) {
njna7c7ebd2006-03-28 12:51:02 +0000663 return get_secmap_for_reading_low(a);
sewardj45d94cc2005-04-20 14:44:11 +0000664 } else {
njn1d0825f2006-03-27 11:37:07 +0000665 AuxMapEnt* am = maybe_find_in_auxmap(a);
666 return am ? am->sm : NULL;
sewardj45d94cc2005-04-20 14:44:11 +0000667 }
668}
669
njn1d0825f2006-03-27 11:37:07 +0000670/* --------------- Fundamental functions --------------- */
671
672static INLINE
673void insert_vabits2_into_vabits8 ( Addr a, UChar vabits2, UChar* vabits8 )
674{
675 UInt shift = (a & 3) << 1; // shift by 0, 2, 4, or 6
676 *vabits8 &= ~(0x3 << shift); // mask out the two old bits
677 *vabits8 |= (vabits2 << shift); // mask in the two new bits
678}
679
680static INLINE
681void insert_vabits4_into_vabits8 ( Addr a, UChar vabits4, UChar* vabits8 )
682{
683 UInt shift;
684 tl_assert(VG_IS_2_ALIGNED(a)); // Must be 2-aligned
685 shift = (a & 2) << 1; // shift by 0 or 4
686 *vabits8 &= ~(0xf << shift); // mask out the four old bits
687 *vabits8 |= (vabits4 << shift); // mask in the four new bits
688}
689
690static INLINE
691UChar extract_vabits2_from_vabits8 ( Addr a, UChar vabits8 )
692{
693 UInt shift = (a & 3) << 1; // shift by 0, 2, 4, or 6
694 vabits8 >>= shift; // shift the two bits to the bottom
695 return 0x3 & vabits8; // mask out the rest
696}
697
698static INLINE
699UChar extract_vabits4_from_vabits8 ( Addr a, UChar vabits8 )
700{
701 UInt shift;
702 tl_assert(VG_IS_2_ALIGNED(a)); // Must be 2-aligned
703 shift = (a & 2) << 1; // shift by 0 or 4
704 vabits8 >>= shift; // shift the four bits to the bottom
705 return 0xf & vabits8; // mask out the rest
706}
707
708// Note that these four are only used in slow cases. The fast cases do
709// clever things like combine the auxmap check (in
710// get_secmap_{read,writ}able) with alignment checks.
711
712// *** WARNING! ***
713// Any time this function is called, if it is possible that vabits2
njndbf7ca72006-03-31 11:57:59 +0000714// is equal to VA_BITS2_PARTDEFINED, then the corresponding entry in the
njn1d0825f2006-03-27 11:37:07 +0000715// sec-V-bits table must also be set!
716static INLINE
717void set_vabits2 ( Addr a, UChar vabits2 )
718{
njna7c7ebd2006-03-28 12:51:02 +0000719 SecMap* sm = get_secmap_for_writing(a);
njn1d0825f2006-03-27 11:37:07 +0000720 UWord sm_off = SM_OFF(a);
721 insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
722}
723
724static INLINE
725UChar get_vabits2 ( Addr a )
726{
njna7c7ebd2006-03-28 12:51:02 +0000727 SecMap* sm = get_secmap_for_reading(a);
njn1d0825f2006-03-27 11:37:07 +0000728 UWord sm_off = SM_OFF(a);
729 UChar vabits8 = sm->vabits8[sm_off];
730 return extract_vabits2_from_vabits8(a, vabits8);
731}
732
sewardjf2184912006-05-03 22:13:57 +0000733// *** WARNING! ***
734// Any time this function is called, if it is possible that any of the
735// 4 2-bit fields in vabits8 are equal to VA_BITS2_PARTDEFINED, then the
736// corresponding entry(s) in the sec-V-bits table must also be set!
737static INLINE
738UChar get_vabits8_for_aligned_word32 ( Addr a )
739{
740 SecMap* sm = get_secmap_for_reading(a);
741 UWord sm_off = SM_OFF(a);
742 UChar vabits8 = sm->vabits8[sm_off];
743 return vabits8;
744}
745
746static INLINE
747void set_vabits8_for_aligned_word32 ( Addr a, UChar vabits8 )
748{
749 SecMap* sm = get_secmap_for_writing(a);
750 UWord sm_off = SM_OFF(a);
751 sm->vabits8[sm_off] = vabits8;
752}
753
754
njn1d0825f2006-03-27 11:37:07 +0000755// Forward declarations
756static UWord get_sec_vbits8(Addr a);
757static void set_sec_vbits8(Addr a, UWord vbits8);
758
759// Returns False if there was an addressability error.
760static INLINE
761Bool set_vbits8 ( Addr a, UChar vbits8 )
762{
763 Bool ok = True;
764 UChar vabits2 = get_vabits2(a);
765 if ( VA_BITS2_NOACCESS != vabits2 ) {
766 // Addressable. Convert in-register format to in-memory format.
767 // Also remove any existing sec V bit entry for the byte if no
768 // longer necessary.
njndbf7ca72006-03-31 11:57:59 +0000769 if ( V_BITS8_DEFINED == vbits8 ) { vabits2 = VA_BITS2_DEFINED; }
770 else if ( V_BITS8_UNDEFINED == vbits8 ) { vabits2 = VA_BITS2_UNDEFINED; }
771 else { vabits2 = VA_BITS2_PARTDEFINED;
njn1d0825f2006-03-27 11:37:07 +0000772 set_sec_vbits8(a, vbits8); }
773 set_vabits2(a, vabits2);
774
775 } else {
776 // Unaddressable! Do nothing -- when writing to unaddressable
777 // memory it acts as a black hole, and the V bits can never be seen
778 // again. So we don't have to write them at all.
779 ok = False;
780 }
781 return ok;
782}
783
784// Returns False if there was an addressability error. In that case, we put
785// all defined bits into vbits8.
786static INLINE
787Bool get_vbits8 ( Addr a, UChar* vbits8 )
788{
789 Bool ok = True;
790 UChar vabits2 = get_vabits2(a);
791
792 // Convert the in-memory format to in-register format.
njndbf7ca72006-03-31 11:57:59 +0000793 if ( VA_BITS2_DEFINED == vabits2 ) { *vbits8 = V_BITS8_DEFINED; }
794 else if ( VA_BITS2_UNDEFINED == vabits2 ) { *vbits8 = V_BITS8_UNDEFINED; }
795 else if ( VA_BITS2_NOACCESS == vabits2 ) {
njn1d0825f2006-03-27 11:37:07 +0000796 *vbits8 = V_BITS8_DEFINED; // Make V bits defined!
797 ok = False;
798 } else {
njndbf7ca72006-03-31 11:57:59 +0000799 tl_assert( VA_BITS2_PARTDEFINED == vabits2 );
njn1d0825f2006-03-27 11:37:07 +0000800 *vbits8 = get_sec_vbits8(a);
801 }
802 return ok;
803}
804
805
806/* --------------- Secondary V bit table ------------ */
807
808// This table holds the full V bit pattern for partially-defined bytes
njndbf7ca72006-03-31 11:57:59 +0000809// (PDBs) that are represented by VA_BITS2_PARTDEFINED in the main shadow
810// memory.
njn1d0825f2006-03-27 11:37:07 +0000811//
812// Note: the nodes in this table can become stale. Eg. if you write a PDB,
813// then overwrite the same address with a fully defined byte, the sec-V-bit
814// node will not necessarily be removed. This is because checking for
815// whether removal is necessary would slow down the fast paths.
816//
817// To avoid the stale nodes building up too much, we periodically (once the
818// table reaches a certain size) garbage collect (GC) the table by
819// traversing it and evicting any "sufficiently stale" nodes, ie. nodes that
820// are stale and haven't been touched for a certain number of collections.
821// If more than a certain proportion of nodes survived, we increase the
822// table size so that GCs occur less often.
823//
824// (So this a bit different to a traditional GC, where you definitely want
825// to remove any dead nodes. It's more like we have a resizable cache and
826// we're trying to find the right balance how many elements to evict and how
827// big to make the cache.)
828//
829// This policy is designed to avoid bad table bloat in the worst case where
830// a program creates huge numbers of stale PDBs -- we would get this bloat
831// if we had no GC -- while handling well the case where a node becomes
832// stale but shortly afterwards is rewritten with a PDB and so becomes
833// non-stale again (which happens quite often, eg. in perf/bz2). If we just
834// remove all stale nodes as soon as possible, we just end up re-adding a
835// lot of them in later again. The "sufficiently stale" approach avoids
836// this. (If a program has many live PDBs, performance will just suck,
837// there's no way around that.)
838
839static OSet* secVBitTable;
840
841// Stats
842static ULong sec_vbits_new_nodes = 0;
843static ULong sec_vbits_updates = 0;
844
845// This must be a power of two; this is checked in mc_pre_clo_init().
846// The size chosen here is a trade-off: if the nodes are bigger (ie. cover
847// a larger address range) they take more space but we can get multiple
848// partially-defined bytes in one if they are close to each other, reducing
849// the number of total nodes. In practice sometimes they are clustered (eg.
850// perf/bz2 repeatedly writes then reads more than 20,000 in a contiguous
851// row), but often not. So we choose something intermediate.
852#define BYTES_PER_SEC_VBIT_NODE 16
853
854// We make the table bigger if more than this many nodes survive a GC.
855#define MAX_SURVIVOR_PROPORTION 0.5
856
857// Each time we make the table bigger, we increase it by this much.
858#define TABLE_GROWTH_FACTOR 2
859
860// This defines "sufficiently stale" -- any node that hasn't been touched in
861// this many GCs will be removed.
862#define MAX_STALE_AGE 2
863
864// We GC the table when it gets this many nodes in it, ie. it's effectively
865// the table size. It can change.
866static Int secVBitLimit = 1024;
867
868// The number of GCs done, used to age sec-V-bit nodes for eviction.
869// Because it's unsigned, wrapping doesn't matter -- the right answer will
870// come out anyway.
871static UInt GCs_done = 0;
872
873typedef
874 struct {
875 Addr a;
876 UChar vbits8[BYTES_PER_SEC_VBIT_NODE];
877 UInt last_touched;
878 }
879 SecVBitNode;
880
881static OSet* createSecVBitTable(void)
882{
njne2a9ad32007-09-17 05:30:48 +0000883 return VG_(OSetGen_Create)( offsetof(SecVBitNode, a),
884 NULL, // use fast comparisons
885 VG_(malloc), VG_(free) );
njn1d0825f2006-03-27 11:37:07 +0000886}
887
888static void gcSecVBitTable(void)
889{
890 OSet* secVBitTable2;
891 SecVBitNode* n;
892 Int i, n_nodes = 0, n_survivors = 0;
893
894 GCs_done++;
895
896 // Create the new table.
897 secVBitTable2 = createSecVBitTable();
898
899 // Traverse the table, moving fresh nodes into the new table.
njne2a9ad32007-09-17 05:30:48 +0000900 VG_(OSetGen_ResetIter)(secVBitTable);
901 while ( (n = VG_(OSetGen_Next)(secVBitTable)) ) {
njn1d0825f2006-03-27 11:37:07 +0000902 Bool keep = False;
903 if ( (GCs_done - n->last_touched) <= MAX_STALE_AGE ) {
904 // Keep node if it's been touched recently enough (regardless of
905 // freshness/staleness).
906 keep = True;
907 } else {
908 // Keep node if any of its bytes are non-stale. Using
909 // get_vabits2() for the lookup is not very efficient, but I don't
910 // think it matters.
911 for (i = 0; i < BYTES_PER_SEC_VBIT_NODE; i++) {
njndbf7ca72006-03-31 11:57:59 +0000912 if (VA_BITS2_PARTDEFINED == get_vabits2(n->a + i)) {
njn1d0825f2006-03-27 11:37:07 +0000913 keep = True; // Found a non-stale byte, so keep
914 break;
915 }
916 }
917 }
918
919 if ( keep ) {
920 // Insert a copy of the node into the new table.
921 SecVBitNode* n2 =
njne2a9ad32007-09-17 05:30:48 +0000922 VG_(OSetGen_AllocNode)(secVBitTable2, sizeof(SecVBitNode));
njn1d0825f2006-03-27 11:37:07 +0000923 *n2 = *n;
njne2a9ad32007-09-17 05:30:48 +0000924 VG_(OSetGen_Insert)(secVBitTable2, n2);
njn1d0825f2006-03-27 11:37:07 +0000925 }
926 }
927
928 // Get the before and after sizes.
njne2a9ad32007-09-17 05:30:48 +0000929 n_nodes = VG_(OSetGen_Size)(secVBitTable);
930 n_survivors = VG_(OSetGen_Size)(secVBitTable2);
njn1d0825f2006-03-27 11:37:07 +0000931
932 // Destroy the old table, and put the new one in its place.
njne2a9ad32007-09-17 05:30:48 +0000933 VG_(OSetGen_Destroy)(secVBitTable);
njn1d0825f2006-03-27 11:37:07 +0000934 secVBitTable = secVBitTable2;
935
936 if (VG_(clo_verbosity) > 1) {
937 Char percbuf[6];
938 VG_(percentify)(n_survivors, n_nodes, 1, 6, percbuf);
939 VG_(message)(Vg_DebugMsg, "memcheck GC: %d nodes, %d survivors (%s)",
940 n_nodes, n_survivors, percbuf);
941 }
942
943 // Increase table size if necessary.
944 if (n_survivors > (secVBitLimit * MAX_SURVIVOR_PROPORTION)) {
945 secVBitLimit *= TABLE_GROWTH_FACTOR;
946 if (VG_(clo_verbosity) > 1)
947 VG_(message)(Vg_DebugMsg, "memcheck GC: increase table size to %d",
948 secVBitLimit);
949 }
950}
951
952static UWord get_sec_vbits8(Addr a)
953{
954 Addr aAligned = VG_ROUNDDN(a, BYTES_PER_SEC_VBIT_NODE);
955 Int amod = a % BYTES_PER_SEC_VBIT_NODE;
njne2a9ad32007-09-17 05:30:48 +0000956 SecVBitNode* n = VG_(OSetGen_Lookup)(secVBitTable, &aAligned);
njn1d0825f2006-03-27 11:37:07 +0000957 UChar vbits8;
958 tl_assert2(n, "get_sec_vbits8: no node for address %p (%p)\n", aAligned, a);
959 // Shouldn't be fully defined or fully undefined -- those cases shouldn't
960 // make it to the secondary V bits table.
961 vbits8 = n->vbits8[amod];
962 tl_assert(V_BITS8_DEFINED != vbits8 && V_BITS8_UNDEFINED != vbits8);
963 return vbits8;
964}
965
966static void set_sec_vbits8(Addr a, UWord vbits8)
967{
968 Addr aAligned = VG_ROUNDDN(a, BYTES_PER_SEC_VBIT_NODE);
969 Int i, amod = a % BYTES_PER_SEC_VBIT_NODE;
njne2a9ad32007-09-17 05:30:48 +0000970 SecVBitNode* n = VG_(OSetGen_Lookup)(secVBitTable, &aAligned);
njn1d0825f2006-03-27 11:37:07 +0000971 // Shouldn't be fully defined or fully undefined -- those cases shouldn't
972 // make it to the secondary V bits table.
973 tl_assert(V_BITS8_DEFINED != vbits8 && V_BITS8_UNDEFINED != vbits8);
974 if (n) {
975 n->vbits8[amod] = vbits8; // update
976 n->last_touched = GCs_done;
977 sec_vbits_updates++;
978 } else {
979 // New node: assign the specific byte, make the rest invalid (they
980 // should never be read as-is, but be cautious).
njne2a9ad32007-09-17 05:30:48 +0000981 n = VG_(OSetGen_AllocNode)(secVBitTable, sizeof(SecVBitNode));
njn1d0825f2006-03-27 11:37:07 +0000982 n->a = aAligned;
983 for (i = 0; i < BYTES_PER_SEC_VBIT_NODE; i++) {
984 n->vbits8[i] = V_BITS8_UNDEFINED;
985 }
986 n->vbits8[amod] = vbits8;
987 n->last_touched = GCs_done;
988
989 // Do a table GC if necessary. Nb: do this before inserting the new
990 // node, to avoid erroneously GC'ing the new node.
njne2a9ad32007-09-17 05:30:48 +0000991 if (secVBitLimit == VG_(OSetGen_Size)(secVBitTable)) {
njn1d0825f2006-03-27 11:37:07 +0000992 gcSecVBitTable();
993 }
994
995 // Insert the new node.
njne2a9ad32007-09-17 05:30:48 +0000996 VG_(OSetGen_Insert)(secVBitTable, n);
njn1d0825f2006-03-27 11:37:07 +0000997 sec_vbits_new_nodes++;
998
njne2a9ad32007-09-17 05:30:48 +0000999 n_secVBit_nodes = VG_(OSetGen_Size)(secVBitTable);
njn1d0825f2006-03-27 11:37:07 +00001000 if (n_secVBit_nodes > max_secVBit_nodes)
1001 max_secVBit_nodes = n_secVBit_nodes;
1002 }
1003}
sewardj45d94cc2005-04-20 14:44:11 +00001004
1005/* --------------- Endianness helpers --------------- */
1006
1007/* Returns the offset in memory of the byteno-th most significant byte
1008 in a wordszB-sized word, given the specified endianness. */
njn1d0825f2006-03-27 11:37:07 +00001009static INLINE UWord byte_offset_w ( UWord wordszB, Bool bigendian,
sewardj45d94cc2005-04-20 14:44:11 +00001010 UWord byteno ) {
1011 return bigendian ? (wordszB-1-byteno) : byteno;
1012}
1013
sewardj05a46732006-10-17 01:28:10 +00001014
1015/* --------------- Ignored address ranges --------------- */
1016
1017#define M_IGNORE_RANGES 4
1018
1019typedef
1020 struct {
1021 Int used;
1022 Addr start[M_IGNORE_RANGES];
1023 Addr end[M_IGNORE_RANGES];
1024 }
1025 IgnoreRanges;
1026
1027static IgnoreRanges ignoreRanges;
1028
1029static INLINE Bool in_ignored_range ( Addr a )
1030{
1031 Int i;
1032 if (EXPECTED_TAKEN(ignoreRanges.used == 0))
1033 return False;
1034 for (i = 0; i < ignoreRanges.used; i++) {
1035 if (a >= ignoreRanges.start[i] && a < ignoreRanges.end[i])
1036 return True;
1037 }
1038 return False;
1039}
1040
1041
1042/* Parse a 32- or 64-bit hex number, including leading 0x, from string
1043 starting at *ppc, putting result in *result, and return True. Or
1044 fail, in which case *ppc and *result are undefined, and return
1045 False. */
1046
1047static Bool isHex ( UChar c )
1048{
1049 return ((c >= '0' && c <= '9')
1050 || (c >= 'a' && c <= 'f')
1051 || (c >= 'A' && c <= 'F'));
1052}
1053
1054static UInt fromHex ( UChar c )
1055{
1056 if (c >= '0' && c <= '9')
1057 return (UInt)c - (UInt)'0';
1058 if (c >= 'a' && c <= 'f')
1059 return 10 + (UInt)c - (UInt)'a';
1060 if (c >= 'A' && c <= 'F')
1061 return 10 + (UInt)c - (UInt)'A';
1062 /*NOTREACHED*/
1063 tl_assert(0);
1064 return 0;
1065}
1066
1067static Bool parse_Addr ( UChar** ppc, Addr* result )
1068{
1069 Int used, limit = 2 * sizeof(Addr);
1070 if (**ppc != '0')
1071 return False;
1072 (*ppc)++;
1073 if (**ppc != 'x')
1074 return False;
1075 (*ppc)++;
1076 *result = 0;
1077 used = 0;
1078 while (isHex(**ppc)) {
1079 UInt d = fromHex(**ppc);
1080 tl_assert(d < 16);
1081 *result = ((*result) << 4) | fromHex(**ppc);
1082 (*ppc)++;
1083 used++;
1084 if (used > limit) return False;
1085 }
1086 if (used == 0)
1087 return False;
1088 return True;
1089}
1090
1091/* Parse two such numbers separated by a dash, or fail. */
1092
1093static Bool parse_range ( UChar** ppc, Addr* result1, Addr* result2 )
1094{
1095 Bool ok = parse_Addr(ppc, result1);
1096 if (!ok)
1097 return False;
1098 if (**ppc != '-')
1099 return False;
1100 (*ppc)++;
1101 ok = parse_Addr(ppc, result2);
1102 if (!ok)
1103 return False;
1104 return True;
1105}
1106
1107/* Parse a set of ranges separated by commas into 'ignoreRanges', or
1108 fail. */
1109
1110static Bool parse_ignore_ranges ( UChar* str0 )
1111{
1112 Addr start, end;
1113 Bool ok;
1114 UChar* str = str0;
1115 UChar** ppc = &str;
1116 ignoreRanges.used = 0;
1117 while (1) {
1118 ok = parse_range(ppc, &start, &end);
1119 if (!ok)
1120 return False;
1121 if (ignoreRanges.used >= M_IGNORE_RANGES)
1122 return False;
1123 ignoreRanges.start[ignoreRanges.used] = start;
1124 ignoreRanges.end[ignoreRanges.used] = end;
1125 ignoreRanges.used++;
1126 if (**ppc == 0)
1127 return True;
1128 if (**ppc != ',')
1129 return False;
1130 (*ppc)++;
1131 }
1132 /*NOTREACHED*/
1133 return False;
1134}
1135
1136
sewardj45d94cc2005-04-20 14:44:11 +00001137/* --------------- Load/store slow cases. --------------- */
1138
njn1d0825f2006-03-27 11:37:07 +00001139// Forward declarations
1140static void mc_record_address_error ( ThreadId tid, Addr a,
1141 Int size, Bool isWrite );
njn718d3b12006-12-16 00:54:12 +00001142static void mc_record_core_mem_error ( ThreadId tid, Bool isAddrErr, Char* s );
1143static void mc_record_regparam_error ( ThreadId tid, Char* msg );
1144static void mc_record_memparam_error ( ThreadId tid, Addr a,
1145 Bool isAddrErr, Char* msg );
njn1d0825f2006-03-27 11:37:07 +00001146static void mc_record_jump_error ( ThreadId tid, Addr a );
1147
sewardj45d94cc2005-04-20 14:44:11 +00001148static
njn1d0825f2006-03-27 11:37:07 +00001149#ifndef PERF_FAST_LOADV
1150INLINE
1151#endif
njn45e81252006-03-28 12:35:08 +00001152ULong mc_LOADVn_slow ( Addr a, SizeT nBits, Bool bigendian )
sewardj45d94cc2005-04-20 14:44:11 +00001153{
njn1d0825f2006-03-27 11:37:07 +00001154 /* Make up a 64-bit result V word, which contains the loaded data for
sewardjf3d57dd2005-04-22 20:23:27 +00001155 valid addresses and Defined for invalid addresses. Iterate over
1156 the bytes in the word, from the most significant down to the
1157 least. */
njn1d0825f2006-03-27 11:37:07 +00001158 ULong vbits64 = V_BITS64_UNDEFINED;
njn45e81252006-03-28 12:35:08 +00001159 SizeT szB = nBits / 8;
njn1d0825f2006-03-27 11:37:07 +00001160 SSizeT i = szB-1; // Must be signed
sewardj45d94cc2005-04-20 14:44:11 +00001161 SizeT n_addrs_bad = 0;
1162 Addr ai;
njn1d0825f2006-03-27 11:37:07 +00001163 Bool partial_load_exemption_applies;
1164 UChar vbits8;
1165 Bool ok;
sewardj45d94cc2005-04-20 14:44:11 +00001166
sewardjc1a2cda2005-04-21 17:34:00 +00001167 PROF_EVENT(30, "mc_LOADVn_slow");
sewardj05a46732006-10-17 01:28:10 +00001168
1169 /* ------------ BEGIN semi-fast cases ------------ */
1170 /* These deal quickly-ish with the common auxiliary primary map
1171 cases on 64-bit platforms. Are merely a speedup hack; can be
1172 omitted without loss of correctness/functionality. Note that in
1173 both cases the "sizeof(void*) == 8" causes these cases to be
1174 folded out by compilers on 32-bit platforms. These are derived
1175 from LOADV64 and LOADV32.
1176 */
1177 if (EXPECTED_TAKEN(sizeof(void*) == 8
1178 && nBits == 64 && VG_IS_8_ALIGNED(a))) {
1179 SecMap* sm = get_secmap_for_reading(a);
1180 UWord sm_off16 = SM_OFF_16(a);
1181 UWord vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
1182 if (EXPECTED_TAKEN(vabits16 == VA_BITS16_DEFINED))
1183 return V_BITS64_DEFINED;
1184 if (EXPECTED_TAKEN(vabits16 == VA_BITS16_UNDEFINED))
1185 return V_BITS64_UNDEFINED;
1186 /* else fall into the slow case */
1187 }
1188 if (EXPECTED_TAKEN(sizeof(void*) == 8
1189 && nBits == 32 && VG_IS_4_ALIGNED(a))) {
1190 SecMap* sm = get_secmap_for_reading(a);
1191 UWord sm_off = SM_OFF(a);
1192 UWord vabits8 = sm->vabits8[sm_off];
1193 if (EXPECTED_TAKEN(vabits8 == VA_BITS8_DEFINED))
1194 return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_DEFINED);
1195 if (EXPECTED_TAKEN(vabits8 == VA_BITS8_UNDEFINED))
1196 return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_UNDEFINED);
1197 /* else fall into slow case */
1198 }
1199 /* ------------ END semi-fast cases ------------ */
1200
njn45e81252006-03-28 12:35:08 +00001201 tl_assert(nBits == 64 || nBits == 32 || nBits == 16 || nBits == 8);
sewardj45d94cc2005-04-20 14:44:11 +00001202
njn1d0825f2006-03-27 11:37:07 +00001203 for (i = szB-1; i >= 0; i--) {
sewardjc1a2cda2005-04-21 17:34:00 +00001204 PROF_EVENT(31, "mc_LOADVn_slow(loop)");
njn45e81252006-03-28 12:35:08 +00001205 ai = a + byte_offset_w(szB, bigendian, i);
njn1d0825f2006-03-27 11:37:07 +00001206 ok = get_vbits8(ai, &vbits8);
1207 if (!ok) n_addrs_bad++;
1208 vbits64 <<= 8;
1209 vbits64 |= vbits8;
sewardj45d94cc2005-04-20 14:44:11 +00001210 }
1211
sewardj0ded7a42005-11-08 02:25:37 +00001212 /* This is a hack which avoids producing errors for code which
1213 insists in stepping along byte strings in aligned word-sized
1214 chunks, and there is a partially defined word at the end. (eg,
1215 optimised strlen). Such code is basically broken at least WRT
1216 semantics of ANSI C, but sometimes users don't have the option
1217 to fix it, and so this option is provided. Note it is now
1218 defaulted to not-engaged.
1219
1220 A load from a partially-addressible place is allowed if:
1221 - the command-line flag is set
1222 - it's a word-sized, word-aligned load
1223 - at least one of the addresses in the word *is* valid
1224 */
1225 partial_load_exemption_applies
njn1d0825f2006-03-27 11:37:07 +00001226 = MC_(clo_partial_loads_ok) && szB == VG_WORDSIZE
sewardj0ded7a42005-11-08 02:25:37 +00001227 && VG_IS_WORD_ALIGNED(a)
1228 && n_addrs_bad < VG_WORDSIZE;
1229
1230 if (n_addrs_bad > 0 && !partial_load_exemption_applies)
njn1d0825f2006-03-27 11:37:07 +00001231 mc_record_address_error( VG_(get_running_tid)(), a, szB, False );
sewardj45d94cc2005-04-20 14:44:11 +00001232
njn1d0825f2006-03-27 11:37:07 +00001233 return vbits64;
sewardj45d94cc2005-04-20 14:44:11 +00001234}
1235
1236
njn1d0825f2006-03-27 11:37:07 +00001237static
1238#ifndef PERF_FAST_STOREV
1239INLINE
1240#endif
njn45e81252006-03-28 12:35:08 +00001241void mc_STOREVn_slow ( Addr a, SizeT nBits, ULong vbytes, Bool bigendian )
sewardj45d94cc2005-04-20 14:44:11 +00001242{
njn45e81252006-03-28 12:35:08 +00001243 SizeT szB = nBits / 8;
njn1d0825f2006-03-27 11:37:07 +00001244 SizeT i, n_addrs_bad = 0;
1245 UChar vbits8;
sewardj45d94cc2005-04-20 14:44:11 +00001246 Addr ai;
njn1d0825f2006-03-27 11:37:07 +00001247 Bool ok;
sewardj45d94cc2005-04-20 14:44:11 +00001248
sewardjc1a2cda2005-04-21 17:34:00 +00001249 PROF_EVENT(35, "mc_STOREVn_slow");
sewardj05a46732006-10-17 01:28:10 +00001250
1251 /* ------------ BEGIN semi-fast cases ------------ */
1252 /* These deal quickly-ish with the common auxiliary primary map
1253 cases on 64-bit platforms. Are merely a speedup hack; can be
1254 omitted without loss of correctness/functionality. Note that in
1255 both cases the "sizeof(void*) == 8" causes these cases to be
1256 folded out by compilers on 32-bit platforms. These are derived
1257 from STOREV64 and STOREV32.
1258 */
1259 if (EXPECTED_TAKEN(sizeof(void*) == 8
1260 && nBits == 64 && VG_IS_8_ALIGNED(a))) {
1261 SecMap* sm = get_secmap_for_reading(a);
1262 UWord sm_off16 = SM_OFF_16(a);
1263 UWord vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
1264 if (EXPECTED_TAKEN( !is_distinguished_sm(sm) &&
1265 (VA_BITS16_DEFINED == vabits16 ||
1266 VA_BITS16_UNDEFINED == vabits16) )) {
1267 /* Handle common case quickly: a is suitably aligned, */
1268 /* is mapped, and is addressible. */
1269 // Convert full V-bits in register to compact 2-bit form.
1270 if (EXPECTED_TAKEN(V_BITS64_DEFINED == vbytes)) {
1271 ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_DEFINED;
1272 return;
1273 } else if (V_BITS64_UNDEFINED == vbytes) {
1274 ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_UNDEFINED;
1275 return;
1276 }
1277 /* else fall into the slow case */
1278 }
1279 /* else fall into the slow case */
1280 }
1281 if (EXPECTED_TAKEN(sizeof(void*) == 8
1282 && nBits == 32 && VG_IS_4_ALIGNED(a))) {
1283 SecMap* sm = get_secmap_for_reading(a);
1284 UWord sm_off = SM_OFF(a);
1285 UWord vabits8 = sm->vabits8[sm_off];
1286 if (EXPECTED_TAKEN( !is_distinguished_sm(sm) &&
1287 (VA_BITS8_DEFINED == vabits8 ||
1288 VA_BITS8_UNDEFINED == vabits8) )) {
1289 /* Handle common case quickly: a is suitably aligned, */
1290 /* is mapped, and is addressible. */
1291 // Convert full V-bits in register to compact 2-bit form.
1292 if (EXPECTED_TAKEN(V_BITS32_DEFINED == (vbytes & 0xFFFFFFFF))) {
1293 sm->vabits8[sm_off] = VA_BITS8_DEFINED;
1294 return;
1295 } else if (V_BITS32_UNDEFINED == (vbytes & 0xFFFFFFFF)) {
1296 sm->vabits8[sm_off] = VA_BITS8_UNDEFINED;
1297 return;
1298 }
1299 /* else fall into the slow case */
1300 }
1301 /* else fall into the slow case */
1302 }
1303 /* ------------ END semi-fast cases ------------ */
1304
njn45e81252006-03-28 12:35:08 +00001305 tl_assert(nBits == 64 || nBits == 32 || nBits == 16 || nBits == 8);
sewardj45d94cc2005-04-20 14:44:11 +00001306
1307 /* Dump vbytes in memory, iterating from least to most significant
njn718d3b12006-12-16 00:54:12 +00001308 byte. At the same time establish addressibility of the location. */
sewardj45d94cc2005-04-20 14:44:11 +00001309 for (i = 0; i < szB; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +00001310 PROF_EVENT(36, "mc_STOREVn_slow(loop)");
njn45e81252006-03-28 12:35:08 +00001311 ai = a + byte_offset_w(szB, bigendian, i);
njn1d0825f2006-03-27 11:37:07 +00001312 vbits8 = vbytes & 0xff;
1313 ok = set_vbits8(ai, vbits8);
1314 if (!ok) n_addrs_bad++;
sewardj45d94cc2005-04-20 14:44:11 +00001315 vbytes >>= 8;
1316 }
1317
1318 /* If an address error has happened, report it. */
1319 if (n_addrs_bad > 0)
njn1d0825f2006-03-27 11:37:07 +00001320 mc_record_address_error( VG_(get_running_tid)(), a, szB, True );
sewardj45d94cc2005-04-20 14:44:11 +00001321}
1322
1323
njn25e49d8e72002-09-23 09:36:25 +00001324/*------------------------------------------------------------*/
1325/*--- Setting permissions over address ranges. ---*/
1326/*------------------------------------------------------------*/
1327
njn1d0825f2006-03-27 11:37:07 +00001328static void set_address_range_perms ( Addr a, SizeT lenT, UWord vabits16,
1329 UWord dsm_num )
sewardj23eb2fd2005-04-22 16:29:19 +00001330{
njn1d0825f2006-03-27 11:37:07 +00001331 UWord sm_off, sm_off16;
1332 UWord vabits2 = vabits16 & 0x3;
1333 SizeT lenA, lenB, len_to_next_secmap;
1334 Addr aNext;
sewardjae986ca2005-10-12 12:53:20 +00001335 SecMap* sm;
njn1d0825f2006-03-27 11:37:07 +00001336 SecMap** sm_ptr;
sewardjae986ca2005-10-12 12:53:20 +00001337 SecMap* example_dsm;
1338
sewardj23eb2fd2005-04-22 16:29:19 +00001339 PROF_EVENT(150, "set_address_range_perms");
1340
njn1d0825f2006-03-27 11:37:07 +00001341 /* Check the V+A bits make sense. */
njndbf7ca72006-03-31 11:57:59 +00001342 tl_assert(VA_BITS16_NOACCESS == vabits16 ||
1343 VA_BITS16_UNDEFINED == vabits16 ||
1344 VA_BITS16_DEFINED == vabits16);
sewardj23eb2fd2005-04-22 16:29:19 +00001345
njn1d0825f2006-03-27 11:37:07 +00001346 // This code should never write PDBs; ensure this. (See comment above
1347 // set_vabits2().)
njndbf7ca72006-03-31 11:57:59 +00001348 tl_assert(VA_BITS2_PARTDEFINED != vabits2);
njn1d0825f2006-03-27 11:37:07 +00001349
1350 if (lenT == 0)
sewardj23eb2fd2005-04-22 16:29:19 +00001351 return;
1352
njn1d0825f2006-03-27 11:37:07 +00001353 if (lenT > 100 * 1000 * 1000) {
1354 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
1355 Char* s = "unknown???";
njndbf7ca72006-03-31 11:57:59 +00001356 if (vabits16 == VA_BITS16_NOACCESS ) s = "noaccess";
1357 if (vabits16 == VA_BITS16_UNDEFINED) s = "undefined";
1358 if (vabits16 == VA_BITS16_DEFINED ) s = "defined";
njn1d0825f2006-03-27 11:37:07 +00001359 VG_(message)(Vg_UserMsg, "Warning: set address range perms: "
1360 "large range %lu (%s)", lenT, s);
sewardj23eb2fd2005-04-22 16:29:19 +00001361 }
1362 }
1363
njn1d0825f2006-03-27 11:37:07 +00001364#ifndef PERF_FAST_SARP
sewardj23eb2fd2005-04-22 16:29:19 +00001365 /*------------------ debug-only case ------------------ */
njn1d0825f2006-03-27 11:37:07 +00001366 {
1367 // Endianness doesn't matter here because all bytes are being set to
1368 // the same value.
1369 // Nb: We don't have to worry about updating the sec-V-bits table
1370 // after these set_vabits2() calls because this code never writes
njndbf7ca72006-03-31 11:57:59 +00001371 // VA_BITS2_PARTDEFINED values.
njn1d0825f2006-03-27 11:37:07 +00001372 SizeT i;
1373 for (i = 0; i < lenT; i++) {
1374 set_vabits2(a + i, vabits2);
1375 }
1376 return;
njn25e49d8e72002-09-23 09:36:25 +00001377 }
njn1d0825f2006-03-27 11:37:07 +00001378#endif
sewardj23eb2fd2005-04-22 16:29:19 +00001379
1380 /*------------------ standard handling ------------------ */
sewardj23eb2fd2005-04-22 16:29:19 +00001381
njn1d0825f2006-03-27 11:37:07 +00001382 /* Get the distinguished secondary that we might want
sewardj23eb2fd2005-04-22 16:29:19 +00001383 to use (part of the space-compression scheme). */
njn1d0825f2006-03-27 11:37:07 +00001384 example_dsm = &sm_distinguished[dsm_num];
1385
1386 // We have to handle ranges covering various combinations of partial and
1387 // whole sec-maps. Here is how parts 1, 2 and 3 are used in each case.
1388 // Cases marked with a '*' are common.
1389 //
1390 // TYPE PARTS USED
1391 // ---- ----------
1392 // * one partial sec-map (p) 1
1393 // - one whole sec-map (P) 2
1394 //
1395 // * two partial sec-maps (pp) 1,3
1396 // - one partial, one whole sec-map (pP) 1,2
1397 // - one whole, one partial sec-map (Pp) 2,3
1398 // - two whole sec-maps (PP) 2,2
1399 //
1400 // * one partial, one whole, one partial (pPp) 1,2,3
1401 // - one partial, two whole (pPP) 1,2,2
1402 // - two whole, one partial (PPp) 2,2,3
1403 // - three whole (PPP) 2,2,2
1404 //
1405 // * one partial, N-2 whole, one partial (pP...Pp) 1,2...2,3
1406 // - one partial, N-1 whole (pP...PP) 1,2...2,2
1407 // - N-1 whole, one partial (PP...Pp) 2,2...2,3
1408 // - N whole (PP...PP) 2,2...2,3
1409
1410 // Break up total length (lenT) into two parts: length in the first
1411 // sec-map (lenA), and the rest (lenB); lenT == lenA + lenB.
1412 aNext = start_of_this_sm(a) + SM_SIZE;
1413 len_to_next_secmap = aNext - a;
1414 if ( lenT <= len_to_next_secmap ) {
1415 // Range entirely within one sec-map. Covers almost all cases.
1416 PROF_EVENT(151, "set_address_range_perms-single-secmap");
1417 lenA = lenT;
1418 lenB = 0;
1419 } else if (is_start_of_sm(a)) {
1420 // Range spans at least one whole sec-map, and starts at the beginning
1421 // of a sec-map; skip to Part 2.
1422 PROF_EVENT(152, "set_address_range_perms-startof-secmap");
1423 lenA = 0;
1424 lenB = lenT;
1425 goto part2;
sewardj23eb2fd2005-04-22 16:29:19 +00001426 } else {
njn1d0825f2006-03-27 11:37:07 +00001427 // Range spans two or more sec-maps, first one is partial.
1428 PROF_EVENT(153, "set_address_range_perms-multiple-secmaps");
1429 lenA = len_to_next_secmap;
1430 lenB = lenT - lenA;
1431 }
1432
1433 //------------------------------------------------------------------------
1434 // Part 1: Deal with the first sec_map. Most of the time the range will be
1435 // entirely within a sec_map and this part alone will suffice. Also,
1436 // doing it this way lets us avoid repeatedly testing for the crossing of
1437 // a sec-map boundary within these loops.
1438 //------------------------------------------------------------------------
1439
1440 // If it's distinguished, make it undistinguished if necessary.
1441 sm_ptr = get_secmap_ptr(a);
1442 if (is_distinguished_sm(*sm_ptr)) {
1443 if (*sm_ptr == example_dsm) {
1444 // Sec-map already has the V+A bits that we want, so skip.
1445 PROF_EVENT(154, "set_address_range_perms-dist-sm1-quick");
1446 a = aNext;
1447 lenA = 0;
sewardj23eb2fd2005-04-22 16:29:19 +00001448 } else {
njn1d0825f2006-03-27 11:37:07 +00001449 PROF_EVENT(155, "set_address_range_perms-dist-sm1");
1450 *sm_ptr = copy_for_writing(*sm_ptr);
sewardj23eb2fd2005-04-22 16:29:19 +00001451 }
1452 }
njn1d0825f2006-03-27 11:37:07 +00001453 sm = *sm_ptr;
sewardj23eb2fd2005-04-22 16:29:19 +00001454
njn1d0825f2006-03-27 11:37:07 +00001455 // 1 byte steps
sewardj23eb2fd2005-04-22 16:29:19 +00001456 while (True) {
sewardj23eb2fd2005-04-22 16:29:19 +00001457 if (VG_IS_8_ALIGNED(a)) break;
njn1d0825f2006-03-27 11:37:07 +00001458 if (lenA < 1) break;
1459 PROF_EVENT(156, "set_address_range_perms-loop1a");
1460 sm_off = SM_OFF(a);
1461 insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
1462 a += 1;
1463 lenA -= 1;
1464 }
1465 // 8-aligned, 8 byte steps
sewardj23eb2fd2005-04-22 16:29:19 +00001466 while (True) {
njn1d0825f2006-03-27 11:37:07 +00001467 if (lenA < 8) break;
1468 PROF_EVENT(157, "set_address_range_perms-loop8a");
1469 sm_off16 = SM_OFF_16(a);
1470 ((UShort*)(sm->vabits8))[sm_off16] = vabits16;
1471 a += 8;
1472 lenA -= 8;
1473 }
1474 // 1 byte steps
1475 while (True) {
1476 if (lenA < 1) break;
1477 PROF_EVENT(158, "set_address_range_perms-loop1b");
1478 sm_off = SM_OFF(a);
1479 insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
1480 a += 1;
1481 lenA -= 1;
sewardj23eb2fd2005-04-22 16:29:19 +00001482 }
1483
njn1d0825f2006-03-27 11:37:07 +00001484 // We've finished the first sec-map. Is that it?
1485 if (lenB == 0)
sewardj23eb2fd2005-04-22 16:29:19 +00001486 return;
1487
njn1d0825f2006-03-27 11:37:07 +00001488 //------------------------------------------------------------------------
1489 // Part 2: Fast-set entire sec-maps at a time.
1490 //------------------------------------------------------------------------
1491 part2:
1492 // 64KB-aligned, 64KB steps.
1493 // Nb: we can reach here with lenB < SM_SIZE
sewardj23eb2fd2005-04-22 16:29:19 +00001494 while (True) {
njn1d0825f2006-03-27 11:37:07 +00001495 if (lenB < SM_SIZE) break;
1496 tl_assert(is_start_of_sm(a));
1497 PROF_EVENT(159, "set_address_range_perms-loop64K");
1498 sm_ptr = get_secmap_ptr(a);
1499 if (!is_distinguished_sm(*sm_ptr)) {
1500 PROF_EVENT(160, "set_address_range_perms-loop64K-free-dist-sm");
1501 // Free the non-distinguished sec-map that we're replacing. This
1502 // case happens moderately often, enough to be worthwhile.
1503 VG_(am_munmap_valgrind)((Addr)*sm_ptr, sizeof(SecMap));
1504 }
1505 update_SM_counts(*sm_ptr, example_dsm);
1506 // Make the sec-map entry point to the example DSM
1507 *sm_ptr = example_dsm;
1508 lenB -= SM_SIZE;
1509 a += SM_SIZE;
1510 }
sewardj23eb2fd2005-04-22 16:29:19 +00001511
njn1d0825f2006-03-27 11:37:07 +00001512 // We've finished the whole sec-maps. Is that it?
1513 if (lenB == 0)
1514 return;
1515
1516 //------------------------------------------------------------------------
1517 // Part 3: Finish off the final partial sec-map, if necessary.
1518 //------------------------------------------------------------------------
1519
1520 tl_assert(is_start_of_sm(a) && lenB < SM_SIZE);
1521
1522 // If it's distinguished, make it undistinguished if necessary.
1523 sm_ptr = get_secmap_ptr(a);
1524 if (is_distinguished_sm(*sm_ptr)) {
1525 if (*sm_ptr == example_dsm) {
1526 // Sec-map already has the V+A bits that we want, so stop.
1527 PROF_EVENT(161, "set_address_range_perms-dist-sm2-quick");
1528 return;
1529 } else {
1530 PROF_EVENT(162, "set_address_range_perms-dist-sm2");
1531 *sm_ptr = copy_for_writing(*sm_ptr);
1532 }
1533 }
1534 sm = *sm_ptr;
1535
1536 // 8-aligned, 8 byte steps
1537 while (True) {
1538 if (lenB < 8) break;
1539 PROF_EVENT(163, "set_address_range_perms-loop8b");
1540 sm_off16 = SM_OFF_16(a);
1541 ((UShort*)(sm->vabits8))[sm_off16] = vabits16;
1542 a += 8;
1543 lenB -= 8;
1544 }
1545 // 1 byte steps
1546 while (True) {
1547 if (lenB < 1) return;
1548 PROF_EVENT(164, "set_address_range_perms-loop1c");
1549 sm_off = SM_OFF(a);
1550 insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
1551 a += 1;
1552 lenB -= 1;
1553 }
sewardj23eb2fd2005-04-22 16:29:19 +00001554}
sewardj45d94cc2005-04-20 14:44:11 +00001555
sewardjc859fbf2005-04-22 21:10:28 +00001556
1557/* --- Set permissions for arbitrary address ranges --- */
njn25e49d8e72002-09-23 09:36:25 +00001558
njndbf7ca72006-03-31 11:57:59 +00001559void MC_(make_mem_noaccess) ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +00001560{
njndbf7ca72006-03-31 11:57:59 +00001561 PROF_EVENT(40, "MC_(make_mem_noaccess)");
1562 DEBUG("MC_(make_mem_noaccess)(%p, %lu)\n", a, len);
njn1d0825f2006-03-27 11:37:07 +00001563 set_address_range_perms ( a, len, VA_BITS16_NOACCESS, SM_DIST_NOACCESS );
njn25e49d8e72002-09-23 09:36:25 +00001564}
1565
njndbf7ca72006-03-31 11:57:59 +00001566void MC_(make_mem_undefined) ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +00001567{
njndbf7ca72006-03-31 11:57:59 +00001568 PROF_EVENT(41, "MC_(make_mem_undefined)");
1569 DEBUG("MC_(make_mem_undefined)(%p, %lu)\n", a, len);
1570 set_address_range_perms ( a, len, VA_BITS16_UNDEFINED, SM_DIST_UNDEFINED );
njn25e49d8e72002-09-23 09:36:25 +00001571}
1572
njndbf7ca72006-03-31 11:57:59 +00001573void MC_(make_mem_defined) ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +00001574{
njndbf7ca72006-03-31 11:57:59 +00001575 PROF_EVENT(42, "MC_(make_mem_defined)");
1576 DEBUG("MC_(make_mem_defined)(%p, %lu)\n", a, len);
1577 set_address_range_perms ( a, len, VA_BITS16_DEFINED, SM_DIST_DEFINED );
njn25e49d8e72002-09-23 09:36:25 +00001578}
1579
sewardjfb1e9ad2006-03-10 13:41:58 +00001580/* For each byte in [a,a+len), if the byte is addressable, make it be
1581 defined, but if it isn't addressible, leave it alone. In other
njndbf7ca72006-03-31 11:57:59 +00001582 words a version of MC_(make_mem_defined) that doesn't mess with
sewardjfb1e9ad2006-03-10 13:41:58 +00001583 addressibility. Low-performance implementation. */
njndbf7ca72006-03-31 11:57:59 +00001584static void make_mem_defined_if_addressable ( Addr a, SizeT len )
sewardjfb1e9ad2006-03-10 13:41:58 +00001585{
1586 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00001587 UChar vabits2;
njndbf7ca72006-03-31 11:57:59 +00001588 DEBUG("make_mem_defined_if_addressable(%p, %llu)\n", a, (ULong)len);
sewardjfb1e9ad2006-03-10 13:41:58 +00001589 for (i = 0; i < len; i++) {
njn1d0825f2006-03-27 11:37:07 +00001590 vabits2 = get_vabits2( a+i );
1591 if (EXPECTED_TAKEN(VA_BITS2_NOACCESS != vabits2)) {
njndbf7ca72006-03-31 11:57:59 +00001592 set_vabits2(a+i, VA_BITS2_DEFINED);
njn1d0825f2006-03-27 11:37:07 +00001593 }
sewardjfb1e9ad2006-03-10 13:41:58 +00001594 }
1595}
1596
njn9b007f62003-04-07 14:40:25 +00001597
sewardj45f4e7c2005-09-27 19:20:21 +00001598/* --- Block-copy permissions (needed for implementing realloc() and
1599 sys_mremap). --- */
sewardjc859fbf2005-04-22 21:10:28 +00001600
njn1d0825f2006-03-27 11:37:07 +00001601void MC_(copy_address_range_state) ( Addr src, Addr dst, SizeT len )
sewardjc859fbf2005-04-22 21:10:28 +00001602{
sewardj45f4e7c2005-09-27 19:20:21 +00001603 SizeT i, j;
sewardjf2184912006-05-03 22:13:57 +00001604 UChar vabits2, vabits8;
1605 Bool aligned, nooverlap;
sewardjc859fbf2005-04-22 21:10:28 +00001606
njn1d0825f2006-03-27 11:37:07 +00001607 DEBUG("MC_(copy_address_range_state)\n");
1608 PROF_EVENT(50, "MC_(copy_address_range_state)");
sewardj45f4e7c2005-09-27 19:20:21 +00001609
sewardjf2184912006-05-03 22:13:57 +00001610 if (len == 0 || src == dst)
sewardj45f4e7c2005-09-27 19:20:21 +00001611 return;
1612
sewardjf2184912006-05-03 22:13:57 +00001613 aligned = VG_IS_4_ALIGNED(src) && VG_IS_4_ALIGNED(dst);
1614 nooverlap = src+len <= dst || dst+len <= src;
sewardj45f4e7c2005-09-27 19:20:21 +00001615
sewardjf2184912006-05-03 22:13:57 +00001616 if (nooverlap && aligned) {
1617
1618 /* Vectorised fast case, when no overlap and suitably aligned */
1619 /* vector loop */
1620 i = 0;
1621 while (len >= 4) {
1622 vabits8 = get_vabits8_for_aligned_word32( src+i );
1623 set_vabits8_for_aligned_word32( dst+i, vabits8 );
1624 if (EXPECTED_TAKEN(VA_BITS8_DEFINED == vabits8
1625 || VA_BITS8_UNDEFINED == vabits8
1626 || VA_BITS8_NOACCESS == vabits8)) {
1627 /* do nothing */
1628 } else {
1629 /* have to copy secondary map info */
1630 if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+0 ))
1631 set_sec_vbits8( dst+i+0, get_sec_vbits8( src+i+0 ) );
1632 if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+1 ))
1633 set_sec_vbits8( dst+i+1, get_sec_vbits8( src+i+1 ) );
1634 if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+2 ))
1635 set_sec_vbits8( dst+i+2, get_sec_vbits8( src+i+2 ) );
1636 if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+3 ))
1637 set_sec_vbits8( dst+i+3, get_sec_vbits8( src+i+3 ) );
1638 }
1639 i += 4;
1640 len -= 4;
1641 }
1642 /* fixup loop */
1643 while (len >= 1) {
njn1d0825f2006-03-27 11:37:07 +00001644 vabits2 = get_vabits2( src+i );
1645 set_vabits2( dst+i, vabits2 );
njndbf7ca72006-03-31 11:57:59 +00001646 if (VA_BITS2_PARTDEFINED == vabits2) {
njn1d0825f2006-03-27 11:37:07 +00001647 set_sec_vbits8( dst+i, get_sec_vbits8( src+i ) );
1648 }
sewardjf2184912006-05-03 22:13:57 +00001649 i++;
1650 len--;
1651 }
1652
1653 } else {
1654
1655 /* We have to do things the slow way */
1656 if (src < dst) {
1657 for (i = 0, j = len-1; i < len; i++, j--) {
1658 PROF_EVENT(51, "MC_(copy_address_range_state)(loop)");
1659 vabits2 = get_vabits2( src+j );
1660 set_vabits2( dst+j, vabits2 );
1661 if (VA_BITS2_PARTDEFINED == vabits2) {
1662 set_sec_vbits8( dst+j, get_sec_vbits8( src+j ) );
1663 }
1664 }
1665 }
1666
1667 if (src > dst) {
1668 for (i = 0; i < len; i++) {
1669 PROF_EVENT(52, "MC_(copy_address_range_state)(loop)");
1670 vabits2 = get_vabits2( src+i );
1671 set_vabits2( dst+i, vabits2 );
1672 if (VA_BITS2_PARTDEFINED == vabits2) {
1673 set_sec_vbits8( dst+i, get_sec_vbits8( src+i ) );
1674 }
1675 }
sewardj45f4e7c2005-09-27 19:20:21 +00001676 }
sewardjc859fbf2005-04-22 21:10:28 +00001677 }
sewardjf2184912006-05-03 22:13:57 +00001678
sewardjc859fbf2005-04-22 21:10:28 +00001679}
1680
1681
1682/* --- Fast case permission setters, for dealing with stacks. --- */
1683
njn1d0825f2006-03-27 11:37:07 +00001684static INLINE
njndbf7ca72006-03-31 11:57:59 +00001685void make_aligned_word32_undefined ( Addr a )
njn9b007f62003-04-07 14:40:25 +00001686{
njn1d0825f2006-03-27 11:37:07 +00001687 UWord sm_off;
sewardjae986ca2005-10-12 12:53:20 +00001688 SecMap* sm;
1689
njndbf7ca72006-03-31 11:57:59 +00001690 PROF_EVENT(300, "make_aligned_word32_undefined");
sewardj5d28efc2005-04-21 22:16:29 +00001691
njn1d0825f2006-03-27 11:37:07 +00001692#ifndef PERF_FAST_STACK2
njndbf7ca72006-03-31 11:57:59 +00001693 MC_(make_mem_undefined)(a, 4);
njn1d0825f2006-03-27 11:37:07 +00001694#else
1695 if (EXPECTED_NOT_TAKEN(a > MAX_PRIMARY_ADDRESS)) {
njndbf7ca72006-03-31 11:57:59 +00001696 PROF_EVENT(301, "make_aligned_word32_undefined-slow1");
1697 MC_(make_mem_undefined)(a, 4);
sewardj5d28efc2005-04-21 22:16:29 +00001698 return;
1699 }
1700
njna7c7ebd2006-03-28 12:51:02 +00001701 sm = get_secmap_for_writing_low(a);
njn1d0825f2006-03-27 11:37:07 +00001702 sm_off = SM_OFF(a);
njndbf7ca72006-03-31 11:57:59 +00001703 sm->vabits8[sm_off] = VA_BITS8_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00001704#endif
njn9b007f62003-04-07 14:40:25 +00001705}
1706
sewardj5d28efc2005-04-21 22:16:29 +00001707
njn1d0825f2006-03-27 11:37:07 +00001708static INLINE
1709void make_aligned_word32_noaccess ( Addr a )
sewardj5d28efc2005-04-21 22:16:29 +00001710{
njn1d0825f2006-03-27 11:37:07 +00001711 UWord sm_off;
sewardjae986ca2005-10-12 12:53:20 +00001712 SecMap* sm;
1713
sewardj5d28efc2005-04-21 22:16:29 +00001714 PROF_EVENT(310, "make_aligned_word32_noaccess");
1715
njn1d0825f2006-03-27 11:37:07 +00001716#ifndef PERF_FAST_STACK2
njndbf7ca72006-03-31 11:57:59 +00001717 MC_(make_mem_noaccess)(a, 4);
njn1d0825f2006-03-27 11:37:07 +00001718#else
1719 if (EXPECTED_NOT_TAKEN(a > MAX_PRIMARY_ADDRESS)) {
sewardj5d28efc2005-04-21 22:16:29 +00001720 PROF_EVENT(311, "make_aligned_word32_noaccess-slow1");
njndbf7ca72006-03-31 11:57:59 +00001721 MC_(make_mem_noaccess)(a, 4);
sewardj5d28efc2005-04-21 22:16:29 +00001722 return;
1723 }
1724
njna7c7ebd2006-03-28 12:51:02 +00001725 sm = get_secmap_for_writing_low(a);
njn1d0825f2006-03-27 11:37:07 +00001726 sm_off = SM_OFF(a);
1727 sm->vabits8[sm_off] = VA_BITS8_NOACCESS;
1728#endif
sewardj5d28efc2005-04-21 22:16:29 +00001729}
1730
1731
njn9b007f62003-04-07 14:40:25 +00001732/* Nb: by "aligned" here we mean 8-byte aligned */
njn1d0825f2006-03-27 11:37:07 +00001733static INLINE
njndbf7ca72006-03-31 11:57:59 +00001734void make_aligned_word64_undefined ( Addr a )
njn9b007f62003-04-07 14:40:25 +00001735{
njn1d0825f2006-03-27 11:37:07 +00001736 UWord sm_off16;
sewardjae986ca2005-10-12 12:53:20 +00001737 SecMap* sm;
1738
njndbf7ca72006-03-31 11:57:59 +00001739 PROF_EVENT(320, "make_aligned_word64_undefined");
sewardj23eb2fd2005-04-22 16:29:19 +00001740
njn1d0825f2006-03-27 11:37:07 +00001741#ifndef PERF_FAST_STACK2
njndbf7ca72006-03-31 11:57:59 +00001742 MC_(make_mem_undefined)(a, 8);
njn1d0825f2006-03-27 11:37:07 +00001743#else
1744 if (EXPECTED_NOT_TAKEN(a > MAX_PRIMARY_ADDRESS)) {
njndbf7ca72006-03-31 11:57:59 +00001745 PROF_EVENT(321, "make_aligned_word64_undefined-slow1");
1746 MC_(make_mem_undefined)(a, 8);
sewardj23eb2fd2005-04-22 16:29:19 +00001747 return;
1748 }
1749
njna7c7ebd2006-03-28 12:51:02 +00001750 sm = get_secmap_for_writing_low(a);
njn1d0825f2006-03-27 11:37:07 +00001751 sm_off16 = SM_OFF_16(a);
njndbf7ca72006-03-31 11:57:59 +00001752 ((UShort*)(sm->vabits8))[sm_off16] = VA_BITS16_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00001753#endif
njn9b007f62003-04-07 14:40:25 +00001754}
1755
sewardj23eb2fd2005-04-22 16:29:19 +00001756
njn1d0825f2006-03-27 11:37:07 +00001757static INLINE
1758void make_aligned_word64_noaccess ( Addr a )
njn9b007f62003-04-07 14:40:25 +00001759{
njn1d0825f2006-03-27 11:37:07 +00001760 UWord sm_off16;
sewardjae986ca2005-10-12 12:53:20 +00001761 SecMap* sm;
1762
sewardj23eb2fd2005-04-22 16:29:19 +00001763 PROF_EVENT(330, "make_aligned_word64_noaccess");
1764
njn1d0825f2006-03-27 11:37:07 +00001765#ifndef PERF_FAST_STACK2
njndbf7ca72006-03-31 11:57:59 +00001766 MC_(make_mem_noaccess)(a, 8);
njn1d0825f2006-03-27 11:37:07 +00001767#else
1768 if (EXPECTED_NOT_TAKEN(a > MAX_PRIMARY_ADDRESS)) {
sewardj23eb2fd2005-04-22 16:29:19 +00001769 PROF_EVENT(331, "make_aligned_word64_noaccess-slow1");
njndbf7ca72006-03-31 11:57:59 +00001770 MC_(make_mem_noaccess)(a, 8);
sewardj23eb2fd2005-04-22 16:29:19 +00001771 return;
1772 }
1773
njna7c7ebd2006-03-28 12:51:02 +00001774 sm = get_secmap_for_writing_low(a);
njn1d0825f2006-03-27 11:37:07 +00001775 sm_off16 = SM_OFF_16(a);
1776 ((UShort*)(sm->vabits8))[sm_off16] = VA_BITS16_NOACCESS;
1777#endif
njn9b007f62003-04-07 14:40:25 +00001778}
1779
sewardj23eb2fd2005-04-22 16:29:19 +00001780
njn1d0825f2006-03-27 11:37:07 +00001781/*------------------------------------------------------------*/
1782/*--- Stack pointer adjustment ---*/
1783/*------------------------------------------------------------*/
1784
1785static void VG_REGPARM(1) mc_new_mem_stack_4(Addr new_SP)
1786{
1787 PROF_EVENT(110, "new_mem_stack_4");
sewardj05a46732006-10-17 01:28:10 +00001788 if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001789 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
njn1d0825f2006-03-27 11:37:07 +00001790 } else {
njndbf7ca72006-03-31 11:57:59 +00001791 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 4 );
njn1d0825f2006-03-27 11:37:07 +00001792 }
1793}
1794
1795static void VG_REGPARM(1) mc_die_mem_stack_4(Addr new_SP)
1796{
1797 PROF_EVENT(120, "die_mem_stack_4");
sewardj05a46732006-10-17 01:28:10 +00001798 if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001799 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
njn1d0825f2006-03-27 11:37:07 +00001800 } else {
njndbf7ca72006-03-31 11:57:59 +00001801 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-4, 4 );
njn1d0825f2006-03-27 11:37:07 +00001802 }
1803}
1804
1805static void VG_REGPARM(1) mc_new_mem_stack_8(Addr new_SP)
1806{
1807 PROF_EVENT(111, "new_mem_stack_8");
sewardj05a46732006-10-17 01:28:10 +00001808 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001809 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
sewardj05a46732006-10-17 01:28:10 +00001810 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001811 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1812 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
njn1d0825f2006-03-27 11:37:07 +00001813 } else {
njndbf7ca72006-03-31 11:57:59 +00001814 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 8 );
njn1d0825f2006-03-27 11:37:07 +00001815 }
1816}
1817
1818static void VG_REGPARM(1) mc_die_mem_stack_8(Addr new_SP)
1819{
1820 PROF_EVENT(121, "die_mem_stack_8");
sewardj05a46732006-10-17 01:28:10 +00001821 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001822 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
sewardj05a46732006-10-17 01:28:10 +00001823 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001824 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
1825 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
njn1d0825f2006-03-27 11:37:07 +00001826 } else {
njndbf7ca72006-03-31 11:57:59 +00001827 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-8, 8 );
njn1d0825f2006-03-27 11:37:07 +00001828 }
1829}
1830
1831static void VG_REGPARM(1) mc_new_mem_stack_12(Addr new_SP)
1832{
1833 PROF_EVENT(112, "new_mem_stack_12");
sewardj05a46732006-10-17 01:28:10 +00001834 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001835 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1836 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
sewardj05a46732006-10-17 01:28:10 +00001837 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001838 /* from previous test we don't have 8-alignment at offset +0,
1839 hence must have 8 alignment at offsets +4/-4. Hence safe to
1840 do 4 at +0 and then 8 at +4/. */
njndbf7ca72006-03-31 11:57:59 +00001841 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1842 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
njn1d0825f2006-03-27 11:37:07 +00001843 } else {
njndbf7ca72006-03-31 11:57:59 +00001844 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 12 );
njn1d0825f2006-03-27 11:37:07 +00001845 }
1846}
1847
1848static void VG_REGPARM(1) mc_die_mem_stack_12(Addr new_SP)
1849{
1850 PROF_EVENT(122, "die_mem_stack_12");
1851 /* Note the -12 in the test */
sewardj43fcfd92006-10-17 23:14:42 +00001852 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP-12 )) {
1853 /* We have 8-alignment at -12, hence ok to do 8 at -12 and 4 at
1854 -4. */
njndbf7ca72006-03-31 11:57:59 +00001855 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
1856 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
sewardj05a46732006-10-17 01:28:10 +00001857 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001858 /* We have 4-alignment at +0, but we don't have 8-alignment at
1859 -12. So we must have 8-alignment at -8. Hence do 4 at -12
1860 and then 8 at -8. */
njndbf7ca72006-03-31 11:57:59 +00001861 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
1862 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
njn1d0825f2006-03-27 11:37:07 +00001863 } else {
njndbf7ca72006-03-31 11:57:59 +00001864 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-12, 12 );
njn1d0825f2006-03-27 11:37:07 +00001865 }
1866}
1867
1868static void VG_REGPARM(1) mc_new_mem_stack_16(Addr new_SP)
1869{
1870 PROF_EVENT(113, "new_mem_stack_16");
sewardj05a46732006-10-17 01:28:10 +00001871 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001872 /* Have 8-alignment at +0, hence do 8 at +0 and 8 at +8. */
njndbf7ca72006-03-31 11:57:59 +00001873 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1874 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
sewardj05a46732006-10-17 01:28:10 +00001875 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001876 /* Have 4 alignment at +0 but not 8; hence 8 must be at +4.
1877 Hence do 4 at +0, 8 at +4, 4 at +12. */
njndbf7ca72006-03-31 11:57:59 +00001878 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1879 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
1880 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+12 );
njn1d0825f2006-03-27 11:37:07 +00001881 } else {
njndbf7ca72006-03-31 11:57:59 +00001882 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 16 );
njn1d0825f2006-03-27 11:37:07 +00001883 }
1884}
1885
1886static void VG_REGPARM(1) mc_die_mem_stack_16(Addr new_SP)
1887{
1888 PROF_EVENT(123, "die_mem_stack_16");
sewardj05a46732006-10-17 01:28:10 +00001889 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001890 /* Have 8-alignment at +0, hence do 8 at -16 and 8 at -8. */
njndbf7ca72006-03-31 11:57:59 +00001891 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
1892 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
sewardj05a46732006-10-17 01:28:10 +00001893 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001894 /* 8 alignment must be at -12. Do 4 at -16, 8 at -12, 4 at -4. */
njndbf7ca72006-03-31 11:57:59 +00001895 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
1896 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
1897 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
njn1d0825f2006-03-27 11:37:07 +00001898 } else {
njndbf7ca72006-03-31 11:57:59 +00001899 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-16, 16 );
njn1d0825f2006-03-27 11:37:07 +00001900 }
1901}
1902
1903static void VG_REGPARM(1) mc_new_mem_stack_32(Addr new_SP)
1904{
1905 PROF_EVENT(114, "new_mem_stack_32");
sewardj05a46732006-10-17 01:28:10 +00001906 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001907 /* Straightforward */
njndbf7ca72006-03-31 11:57:59 +00001908 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1909 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
1910 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
1911 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
sewardj05a46732006-10-17 01:28:10 +00001912 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001913 /* 8 alignment must be at +4. Hence do 8 at +4,+12,+20 and 4 at
1914 +0,+28. */
njndbf7ca72006-03-31 11:57:59 +00001915 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1916 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
1917 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+12 );
1918 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+20 );
1919 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+28 );
njn1d0825f2006-03-27 11:37:07 +00001920 } else {
njndbf7ca72006-03-31 11:57:59 +00001921 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 32 );
njn1d0825f2006-03-27 11:37:07 +00001922 }
1923}
1924
1925static void VG_REGPARM(1) mc_die_mem_stack_32(Addr new_SP)
1926{
1927 PROF_EVENT(124, "die_mem_stack_32");
sewardj05a46732006-10-17 01:28:10 +00001928 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001929 /* Straightforward */
njndbf7ca72006-03-31 11:57:59 +00001930 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
1931 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
1932 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
1933 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
sewardj05a46732006-10-17 01:28:10 +00001934 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001935 /* 8 alignment must be at -4 etc. Hence do 8 at -12,-20,-28 and
1936 4 at -32,-4. */
njndbf7ca72006-03-31 11:57:59 +00001937 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
1938 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-28 );
1939 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-20 );
1940 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
1941 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
njn1d0825f2006-03-27 11:37:07 +00001942 } else {
njndbf7ca72006-03-31 11:57:59 +00001943 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-32, 32 );
njn1d0825f2006-03-27 11:37:07 +00001944 }
1945}
1946
1947static void VG_REGPARM(1) mc_new_mem_stack_112(Addr new_SP)
1948{
1949 PROF_EVENT(115, "new_mem_stack_112");
sewardj05a46732006-10-17 01:28:10 +00001950 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001951 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1952 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
1953 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
1954 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
1955 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
1956 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
1957 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
1958 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
1959 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
1960 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
1961 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
1962 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
1963 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
1964 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104);
njn1d0825f2006-03-27 11:37:07 +00001965 } else {
njndbf7ca72006-03-31 11:57:59 +00001966 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 112 );
njn1d0825f2006-03-27 11:37:07 +00001967 }
1968}
1969
1970static void VG_REGPARM(1) mc_die_mem_stack_112(Addr new_SP)
1971{
1972 PROF_EVENT(125, "die_mem_stack_112");
sewardj05a46732006-10-17 01:28:10 +00001973 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001974 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
1975 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
1976 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
1977 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
1978 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
1979 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
1980 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
1981 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
1982 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
1983 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
1984 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
1985 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
1986 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
1987 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
njn1d0825f2006-03-27 11:37:07 +00001988 } else {
njndbf7ca72006-03-31 11:57:59 +00001989 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-112, 112 );
njn1d0825f2006-03-27 11:37:07 +00001990 }
1991}
1992
1993static void VG_REGPARM(1) mc_new_mem_stack_128(Addr new_SP)
1994{
1995 PROF_EVENT(116, "new_mem_stack_128");
sewardj05a46732006-10-17 01:28:10 +00001996 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001997 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1998 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
1999 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
2000 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
2001 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
2002 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
2003 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
2004 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
2005 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
2006 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
2007 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
2008 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
2009 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
2010 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104);
2011 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+112);
2012 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+120);
njn1d0825f2006-03-27 11:37:07 +00002013 } else {
njndbf7ca72006-03-31 11:57:59 +00002014 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 128 );
njn1d0825f2006-03-27 11:37:07 +00002015 }
2016}
2017
2018static void VG_REGPARM(1) mc_die_mem_stack_128(Addr new_SP)
2019{
2020 PROF_EVENT(126, "die_mem_stack_128");
sewardj05a46732006-10-17 01:28:10 +00002021 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002022 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
2023 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
2024 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
2025 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
2026 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
2027 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
2028 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
2029 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
2030 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
2031 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
2032 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
2033 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
2034 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
2035 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
2036 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
2037 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
njn1d0825f2006-03-27 11:37:07 +00002038 } else {
njndbf7ca72006-03-31 11:57:59 +00002039 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-128, 128 );
njn1d0825f2006-03-27 11:37:07 +00002040 }
2041}
2042
2043static void VG_REGPARM(1) mc_new_mem_stack_144(Addr new_SP)
2044{
2045 PROF_EVENT(117, "new_mem_stack_144");
sewardj05a46732006-10-17 01:28:10 +00002046 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002047 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
2048 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
2049 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
2050 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
2051 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
2052 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
2053 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
2054 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
2055 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
2056 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
2057 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
2058 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
2059 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
2060 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104);
2061 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+112);
2062 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+120);
2063 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+128);
2064 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+136);
njn1d0825f2006-03-27 11:37:07 +00002065 } else {
njndbf7ca72006-03-31 11:57:59 +00002066 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 144 );
njn1d0825f2006-03-27 11:37:07 +00002067 }
2068}
2069
2070static void VG_REGPARM(1) mc_die_mem_stack_144(Addr new_SP)
2071{
2072 PROF_EVENT(127, "die_mem_stack_144");
sewardj05a46732006-10-17 01:28:10 +00002073 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002074 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-144);
2075 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-136);
2076 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
2077 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
2078 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
2079 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
2080 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
2081 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
2082 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
2083 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
2084 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
2085 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
2086 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
2087 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
2088 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
2089 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
2090 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
2091 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
njn1d0825f2006-03-27 11:37:07 +00002092 } else {
njndbf7ca72006-03-31 11:57:59 +00002093 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-144, 144 );
njn1d0825f2006-03-27 11:37:07 +00002094 }
2095}
2096
2097static void VG_REGPARM(1) mc_new_mem_stack_160(Addr new_SP)
2098{
2099 PROF_EVENT(118, "new_mem_stack_160");
sewardj05a46732006-10-17 01:28:10 +00002100 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002101 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
2102 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
2103 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
2104 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
2105 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
2106 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
2107 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
2108 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
2109 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
2110 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
2111 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
2112 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
2113 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
2114 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104);
2115 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+112);
2116 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+120);
2117 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+128);
2118 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+136);
2119 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+144);
2120 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+152);
njn1d0825f2006-03-27 11:37:07 +00002121 } else {
njndbf7ca72006-03-31 11:57:59 +00002122 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 160 );
njn1d0825f2006-03-27 11:37:07 +00002123 }
2124}
2125
2126static void VG_REGPARM(1) mc_die_mem_stack_160(Addr new_SP)
2127{
2128 PROF_EVENT(128, "die_mem_stack_160");
sewardj05a46732006-10-17 01:28:10 +00002129 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002130 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-160);
2131 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-152);
2132 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-144);
2133 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-136);
2134 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
2135 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
2136 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
2137 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
2138 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
2139 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
2140 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
2141 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
2142 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
2143 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
2144 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
2145 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
2146 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
2147 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
2148 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
2149 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
njn1d0825f2006-03-27 11:37:07 +00002150 } else {
njndbf7ca72006-03-31 11:57:59 +00002151 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-160, 160 );
njn1d0825f2006-03-27 11:37:07 +00002152 }
2153}
2154
2155static void mc_new_mem_stack ( Addr a, SizeT len )
2156{
2157 PROF_EVENT(115, "new_mem_stack");
njndbf7ca72006-03-31 11:57:59 +00002158 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + a, len );
njn1d0825f2006-03-27 11:37:07 +00002159}
2160
2161static void mc_die_mem_stack ( Addr a, SizeT len )
2162{
2163 PROF_EVENT(125, "die_mem_stack");
njndbf7ca72006-03-31 11:57:59 +00002164 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + a, len );
njn1d0825f2006-03-27 11:37:07 +00002165}
njn9b007f62003-04-07 14:40:25 +00002166
sewardj45d94cc2005-04-20 14:44:11 +00002167
njn1d0825f2006-03-27 11:37:07 +00002168/* The AMD64 ABI says:
2169
2170 "The 128-byte area beyond the location pointed to by %rsp is considered
2171 to be reserved and shall not be modified by signal or interrupt
2172 handlers. Therefore, functions may use this area for temporary data
2173 that is not needed across function calls. In particular, leaf functions
2174 may use this area for their entire stack frame, rather than adjusting
2175 the stack pointer in the prologue and epilogue. This area is known as
2176 red zone [sic]."
2177
2178 So after any call or return we need to mark this redzone as containing
2179 undefined values.
2180
2181 Consider this: we're in function f. f calls g. g moves rsp down
2182 modestly (say 16 bytes) and writes stuff all over the red zone, making it
2183 defined. g returns. f is buggy and reads from parts of the red zone
2184 that it didn't write on. But because g filled that area in, f is going
2185 to be picking up defined V bits and so any errors from reading bits of
2186 the red zone it didn't write, will be missed. The only solution I could
2187 think of was to make the red zone undefined when g returns to f.
2188
2189 This is in accordance with the ABI, which makes it clear the redzone
2190 is volatile across function calls.
2191
2192 The problem occurs the other way round too: f could fill the RZ up
2193 with defined values and g could mistakenly read them. So the RZ
2194 also needs to be nuked on function calls.
2195*/
sewardj826ec492005-05-12 18:05:00 +00002196void MC_(helperc_MAKE_STACK_UNINIT) ( Addr base, UWord len )
2197{
2198 tl_assert(sizeof(UWord) == sizeof(SizeT));
sewardj2a3a1a72005-05-12 23:25:43 +00002199 if (0)
njn8a7b41b2007-09-23 00:51:24 +00002200 VG_(printf)("helperc_MAKE_STACK_UNINIT %p %lu\n", base, len );
sewardj2a3a1a72005-05-12 23:25:43 +00002201
2202# if 0
2203 /* Really slow version */
njndbf7ca72006-03-31 11:57:59 +00002204 MC_(make_mem_undefined)(base, len);
sewardj2a3a1a72005-05-12 23:25:43 +00002205# endif
2206
2207# if 0
2208 /* Slow(ish) version, which is fairly easily seen to be correct.
2209 */
2210 if (EXPECTED_TAKEN( VG_IS_8_ALIGNED(base) && len==128 )) {
njndbf7ca72006-03-31 11:57:59 +00002211 make_aligned_word64_undefined(base + 0);
2212 make_aligned_word64_undefined(base + 8);
2213 make_aligned_word64_undefined(base + 16);
2214 make_aligned_word64_undefined(base + 24);
sewardj2a3a1a72005-05-12 23:25:43 +00002215
njndbf7ca72006-03-31 11:57:59 +00002216 make_aligned_word64_undefined(base + 32);
2217 make_aligned_word64_undefined(base + 40);
2218 make_aligned_word64_undefined(base + 48);
2219 make_aligned_word64_undefined(base + 56);
sewardj2a3a1a72005-05-12 23:25:43 +00002220
njndbf7ca72006-03-31 11:57:59 +00002221 make_aligned_word64_undefined(base + 64);
2222 make_aligned_word64_undefined(base + 72);
2223 make_aligned_word64_undefined(base + 80);
2224 make_aligned_word64_undefined(base + 88);
sewardj2a3a1a72005-05-12 23:25:43 +00002225
njndbf7ca72006-03-31 11:57:59 +00002226 make_aligned_word64_undefined(base + 96);
2227 make_aligned_word64_undefined(base + 104);
2228 make_aligned_word64_undefined(base + 112);
2229 make_aligned_word64_undefined(base + 120);
sewardj2a3a1a72005-05-12 23:25:43 +00002230 } else {
njndbf7ca72006-03-31 11:57:59 +00002231 MC_(make_mem_undefined)(base, len);
sewardj2a3a1a72005-05-12 23:25:43 +00002232 }
2233# endif
2234
2235 /* Idea is: go fast when
2236 * 8-aligned and length is 128
2237 * the sm is available in the main primary map
njn1d0825f2006-03-27 11:37:07 +00002238 * the address range falls entirely with a single secondary map
2239 If all those conditions hold, just update the V+A bits by writing
2240 directly into the vabits array. (If the sm was distinguished, this
2241 will make a copy and then write to it.)
sewardj2a3a1a72005-05-12 23:25:43 +00002242 */
njn1d0825f2006-03-27 11:37:07 +00002243 if (EXPECTED_TAKEN( len == 128 && VG_IS_8_ALIGNED(base) )) {
2244 /* Now we know the address range is suitably sized and aligned. */
2245 UWord a_lo = (UWord)(base);
sewardj3f5f5562006-06-16 21:39:08 +00002246 UWord a_hi = (UWord)(base + 128 - 1);
njn1d0825f2006-03-27 11:37:07 +00002247 tl_assert(a_lo < a_hi); // paranoia: detect overflow
2248 if (a_hi < MAX_PRIMARY_ADDRESS) {
2249 // Now we know the entire range is within the main primary map.
njna7c7ebd2006-03-28 12:51:02 +00002250 SecMap* sm = get_secmap_for_writing_low(a_lo);
2251 SecMap* sm_hi = get_secmap_for_writing_low(a_hi);
sewardj2a3a1a72005-05-12 23:25:43 +00002252 /* Now we know that the entire address range falls within a
2253 single secondary map, and that that secondary 'lives' in
2254 the main primary map. */
njn1d0825f2006-03-27 11:37:07 +00002255 if (EXPECTED_TAKEN(sm == sm_hi)) {
2256 // Finally, we know that the range is entirely within one secmap.
2257 UWord v_off = SM_OFF(a_lo);
2258 UShort* p = (UShort*)(&sm->vabits8[v_off]);
njndbf7ca72006-03-31 11:57:59 +00002259 p[ 0] = VA_BITS16_UNDEFINED;
2260 p[ 1] = VA_BITS16_UNDEFINED;
2261 p[ 2] = VA_BITS16_UNDEFINED;
2262 p[ 3] = VA_BITS16_UNDEFINED;
2263 p[ 4] = VA_BITS16_UNDEFINED;
2264 p[ 5] = VA_BITS16_UNDEFINED;
2265 p[ 6] = VA_BITS16_UNDEFINED;
2266 p[ 7] = VA_BITS16_UNDEFINED;
2267 p[ 8] = VA_BITS16_UNDEFINED;
2268 p[ 9] = VA_BITS16_UNDEFINED;
2269 p[10] = VA_BITS16_UNDEFINED;
2270 p[11] = VA_BITS16_UNDEFINED;
2271 p[12] = VA_BITS16_UNDEFINED;
2272 p[13] = VA_BITS16_UNDEFINED;
2273 p[14] = VA_BITS16_UNDEFINED;
2274 p[15] = VA_BITS16_UNDEFINED;
sewardj2a3a1a72005-05-12 23:25:43 +00002275 return;
njn1d0825f2006-03-27 11:37:07 +00002276 }
sewardj2a3a1a72005-05-12 23:25:43 +00002277 }
2278 }
2279
sewardj2e1a6772006-01-18 04:16:27 +00002280 /* 288 bytes (36 ULongs) is the magic value for ELF ppc64. */
sewardj3f5f5562006-06-16 21:39:08 +00002281 if (EXPECTED_TAKEN( len == 288 && VG_IS_8_ALIGNED(base) )) {
njn1d0825f2006-03-27 11:37:07 +00002282 /* Now we know the address range is suitably sized and aligned. */
2283 UWord a_lo = (UWord)(base);
sewardj3f5f5562006-06-16 21:39:08 +00002284 UWord a_hi = (UWord)(base + 288 - 1);
njn1d0825f2006-03-27 11:37:07 +00002285 tl_assert(a_lo < a_hi); // paranoia: detect overflow
2286 if (a_hi < MAX_PRIMARY_ADDRESS) {
2287 // Now we know the entire range is within the main primary map.
njna7c7ebd2006-03-28 12:51:02 +00002288 SecMap* sm = get_secmap_for_writing_low(a_lo);
2289 SecMap* sm_hi = get_secmap_for_writing_low(a_hi);
sewardj2e1a6772006-01-18 04:16:27 +00002290 /* Now we know that the entire address range falls within a
2291 single secondary map, and that that secondary 'lives' in
2292 the main primary map. */
njn1d0825f2006-03-27 11:37:07 +00002293 if (EXPECTED_TAKEN(sm == sm_hi)) {
2294 // Finally, we know that the range is entirely within one secmap.
2295 UWord v_off = SM_OFF(a_lo);
2296 UShort* p = (UShort*)(&sm->vabits8[v_off]);
njndbf7ca72006-03-31 11:57:59 +00002297 p[ 0] = VA_BITS16_UNDEFINED;
2298 p[ 1] = VA_BITS16_UNDEFINED;
2299 p[ 2] = VA_BITS16_UNDEFINED;
2300 p[ 3] = VA_BITS16_UNDEFINED;
2301 p[ 4] = VA_BITS16_UNDEFINED;
2302 p[ 5] = VA_BITS16_UNDEFINED;
2303 p[ 6] = VA_BITS16_UNDEFINED;
2304 p[ 7] = VA_BITS16_UNDEFINED;
2305 p[ 8] = VA_BITS16_UNDEFINED;
2306 p[ 9] = VA_BITS16_UNDEFINED;
2307 p[10] = VA_BITS16_UNDEFINED;
2308 p[11] = VA_BITS16_UNDEFINED;
2309 p[12] = VA_BITS16_UNDEFINED;
2310 p[13] = VA_BITS16_UNDEFINED;
2311 p[14] = VA_BITS16_UNDEFINED;
2312 p[15] = VA_BITS16_UNDEFINED;
2313 p[16] = VA_BITS16_UNDEFINED;
2314 p[17] = VA_BITS16_UNDEFINED;
2315 p[18] = VA_BITS16_UNDEFINED;
2316 p[19] = VA_BITS16_UNDEFINED;
2317 p[20] = VA_BITS16_UNDEFINED;
2318 p[21] = VA_BITS16_UNDEFINED;
2319 p[22] = VA_BITS16_UNDEFINED;
2320 p[23] = VA_BITS16_UNDEFINED;
2321 p[24] = VA_BITS16_UNDEFINED;
2322 p[25] = VA_BITS16_UNDEFINED;
2323 p[26] = VA_BITS16_UNDEFINED;
2324 p[27] = VA_BITS16_UNDEFINED;
2325 p[28] = VA_BITS16_UNDEFINED;
2326 p[29] = VA_BITS16_UNDEFINED;
2327 p[30] = VA_BITS16_UNDEFINED;
2328 p[31] = VA_BITS16_UNDEFINED;
2329 p[32] = VA_BITS16_UNDEFINED;
2330 p[33] = VA_BITS16_UNDEFINED;
2331 p[34] = VA_BITS16_UNDEFINED;
2332 p[35] = VA_BITS16_UNDEFINED;
sewardj2e1a6772006-01-18 04:16:27 +00002333 return;
njn1d0825f2006-03-27 11:37:07 +00002334 }
sewardj2e1a6772006-01-18 04:16:27 +00002335 }
2336 }
2337
sewardj2a3a1a72005-05-12 23:25:43 +00002338 /* else fall into slow case */
njndbf7ca72006-03-31 11:57:59 +00002339 MC_(make_mem_undefined)(base, len);
sewardj826ec492005-05-12 18:05:00 +00002340}
2341
2342
nethercote8b76fe52004-11-08 19:20:09 +00002343/*------------------------------------------------------------*/
2344/*--- Checking memory ---*/
2345/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00002346
sewardje4ccc012005-05-02 12:53:38 +00002347typedef
2348 enum {
2349 MC_Ok = 5,
2350 MC_AddrErr = 6,
2351 MC_ValueErr = 7
2352 }
2353 MC_ReadResult;
2354
2355
njn25e49d8e72002-09-23 09:36:25 +00002356/* Check permissions for address range. If inadequate permissions
2357 exist, *bad_addr is set to the offending address, so the caller can
2358 know what it is. */
2359
sewardjecf8e102003-07-12 12:11:39 +00002360/* Returns True if [a .. a+len) is not addressible. Otherwise,
2361 returns False, and if bad_addr is non-NULL, sets *bad_addr to
2362 indicate the lowest failing address. Functions below are
2363 similar. */
njndbf7ca72006-03-31 11:57:59 +00002364Bool MC_(check_mem_is_noaccess) ( Addr a, SizeT len, Addr* bad_addr )
sewardjecf8e102003-07-12 12:11:39 +00002365{
nethercote451eae92004-11-02 13:06:32 +00002366 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00002367 UWord vabits2;
2368
njndbf7ca72006-03-31 11:57:59 +00002369 PROF_EVENT(60, "check_mem_is_noaccess");
sewardjecf8e102003-07-12 12:11:39 +00002370 for (i = 0; i < len; i++) {
njndbf7ca72006-03-31 11:57:59 +00002371 PROF_EVENT(61, "check_mem_is_noaccess(loop)");
njn1d0825f2006-03-27 11:37:07 +00002372 vabits2 = get_vabits2(a);
2373 if (VA_BITS2_NOACCESS != vabits2) {
2374 if (bad_addr != NULL) *bad_addr = a;
sewardjecf8e102003-07-12 12:11:39 +00002375 return False;
2376 }
2377 a++;
2378 }
2379 return True;
2380}
2381
njndbf7ca72006-03-31 11:57:59 +00002382static Bool is_mem_addressable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00002383{
nethercote451eae92004-11-02 13:06:32 +00002384 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00002385 UWord vabits2;
2386
njndbf7ca72006-03-31 11:57:59 +00002387 PROF_EVENT(62, "is_mem_addressable");
njn25e49d8e72002-09-23 09:36:25 +00002388 for (i = 0; i < len; i++) {
njndbf7ca72006-03-31 11:57:59 +00002389 PROF_EVENT(63, "is_mem_addressable(loop)");
njn1d0825f2006-03-27 11:37:07 +00002390 vabits2 = get_vabits2(a);
2391 if (VA_BITS2_NOACCESS == vabits2) {
njn25e49d8e72002-09-23 09:36:25 +00002392 if (bad_addr != NULL) *bad_addr = a;
2393 return False;
2394 }
2395 a++;
2396 }
2397 return True;
2398}
2399
njndbf7ca72006-03-31 11:57:59 +00002400static MC_ReadResult is_mem_defined ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00002401{
nethercote451eae92004-11-02 13:06:32 +00002402 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00002403 UWord vabits2;
njn25e49d8e72002-09-23 09:36:25 +00002404
njndbf7ca72006-03-31 11:57:59 +00002405 PROF_EVENT(64, "is_mem_defined");
2406 DEBUG("is_mem_defined\n");
njn25e49d8e72002-09-23 09:36:25 +00002407 for (i = 0; i < len; i++) {
njndbf7ca72006-03-31 11:57:59 +00002408 PROF_EVENT(65, "is_mem_defined(loop)");
njn1d0825f2006-03-27 11:37:07 +00002409 vabits2 = get_vabits2(a);
njndbf7ca72006-03-31 11:57:59 +00002410 if (VA_BITS2_DEFINED != vabits2) {
njn1d0825f2006-03-27 11:37:07 +00002411 // Error! Nb: Report addressability errors in preference to
2412 // definedness errors. And don't report definedeness errors unless
2413 // --undef-value-errors=yes.
2414 if (bad_addr != NULL) *bad_addr = a;
2415 if ( VA_BITS2_NOACCESS == vabits2 ) return MC_AddrErr;
2416 else if ( MC_(clo_undef_value_errors) ) return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +00002417 }
2418 a++;
2419 }
nethercote8b76fe52004-11-08 19:20:09 +00002420 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +00002421}
2422
2423
2424/* Check a zero-terminated ascii string. Tricky -- don't want to
2425 examine the actual bytes, to find the end, until we're sure it is
2426 safe to do so. */
2427
njndbf7ca72006-03-31 11:57:59 +00002428static Bool mc_is_defined_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00002429{
njn1d0825f2006-03-27 11:37:07 +00002430 UWord vabits2;
2431
njndbf7ca72006-03-31 11:57:59 +00002432 PROF_EVENT(66, "mc_is_defined_asciiz");
2433 DEBUG("mc_is_defined_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +00002434 while (True) {
njndbf7ca72006-03-31 11:57:59 +00002435 PROF_EVENT(67, "mc_is_defined_asciiz(loop)");
njn1d0825f2006-03-27 11:37:07 +00002436 vabits2 = get_vabits2(a);
njndbf7ca72006-03-31 11:57:59 +00002437 if (VA_BITS2_DEFINED != vabits2) {
njn1d0825f2006-03-27 11:37:07 +00002438 // Error! Nb: Report addressability errors in preference to
2439 // definedness errors. And don't report definedeness errors unless
2440 // --undef-value-errors=yes.
2441 if (bad_addr != NULL) *bad_addr = a;
2442 if ( VA_BITS2_NOACCESS == vabits2 ) return MC_AddrErr;
2443 else if ( MC_(clo_undef_value_errors) ) return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +00002444 }
2445 /* Ok, a is safe to read. */
njn1d0825f2006-03-27 11:37:07 +00002446 if (* ((UChar*)a) == 0) {
sewardj45d94cc2005-04-20 14:44:11 +00002447 return MC_Ok;
njn1d0825f2006-03-27 11:37:07 +00002448 }
njn25e49d8e72002-09-23 09:36:25 +00002449 a++;
2450 }
2451}
2452
2453
2454/*------------------------------------------------------------*/
2455/*--- Memory event handlers ---*/
2456/*------------------------------------------------------------*/
2457
njn25e49d8e72002-09-23 09:36:25 +00002458static
njndbf7ca72006-03-31 11:57:59 +00002459void check_mem_is_addressable ( CorePart part, ThreadId tid, Char* s,
2460 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00002461{
njn25e49d8e72002-09-23 09:36:25 +00002462 Addr bad_addr;
njndbf7ca72006-03-31 11:57:59 +00002463 Bool ok = is_mem_addressable ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00002464
njn25e49d8e72002-09-23 09:36:25 +00002465 if (!ok) {
2466 switch (part) {
2467 case Vg_CoreSysCall:
njn718d3b12006-12-16 00:54:12 +00002468 mc_record_memparam_error ( tid, bad_addr, /*isAddrErr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00002469 break;
2470
njn25e49d8e72002-09-23 09:36:25 +00002471 case Vg_CoreSignal:
njn718d3b12006-12-16 00:54:12 +00002472 mc_record_core_mem_error( tid, /*isAddrErr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00002473 break;
2474
2475 default:
njndbf7ca72006-03-31 11:57:59 +00002476 VG_(tool_panic)("check_mem_is_addressable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00002477 }
2478 }
njn25e49d8e72002-09-23 09:36:25 +00002479}
2480
2481static
njndbf7ca72006-03-31 11:57:59 +00002482void check_mem_is_defined ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +00002483 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00002484{
njn25e49d8e72002-09-23 09:36:25 +00002485 Addr bad_addr;
njndbf7ca72006-03-31 11:57:59 +00002486 MC_ReadResult res = is_mem_defined ( base, size, &bad_addr );
sewardj45f4e7c2005-09-27 19:20:21 +00002487
nethercote8b76fe52004-11-08 19:20:09 +00002488 if (MC_Ok != res) {
njn718d3b12006-12-16 00:54:12 +00002489 Bool isAddrErr = ( MC_AddrErr == res ? True : False );
sewardj45f4e7c2005-09-27 19:20:21 +00002490
njn25e49d8e72002-09-23 09:36:25 +00002491 switch (part) {
2492 case Vg_CoreSysCall:
njn718d3b12006-12-16 00:54:12 +00002493 mc_record_memparam_error ( tid, bad_addr, isAddrErr, s );
njn25e49d8e72002-09-23 09:36:25 +00002494 break;
2495
njn25e49d8e72002-09-23 09:36:25 +00002496 /* If we're being asked to jump to a silly address, record an error
2497 message before potentially crashing the entire system. */
2498 case Vg_CoreTranslate:
njn1d0825f2006-03-27 11:37:07 +00002499 mc_record_jump_error( tid, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00002500 break;
2501
2502 default:
njndbf7ca72006-03-31 11:57:59 +00002503 VG_(tool_panic)("check_mem_is_defined: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00002504 }
2505 }
njn25e49d8e72002-09-23 09:36:25 +00002506}
2507
2508static
njndbf7ca72006-03-31 11:57:59 +00002509void check_mem_is_defined_asciiz ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +00002510 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +00002511{
nethercote8b76fe52004-11-08 19:20:09 +00002512 MC_ReadResult res;
njn5ab96ac2005-05-08 02:59:50 +00002513 Addr bad_addr = 0; // shut GCC up
njn25e49d8e72002-09-23 09:36:25 +00002514
njnca82cc02004-11-22 17:18:48 +00002515 tl_assert(part == Vg_CoreSysCall);
njndbf7ca72006-03-31 11:57:59 +00002516 res = mc_is_defined_asciiz ( (Addr)str, &bad_addr );
nethercote8b76fe52004-11-08 19:20:09 +00002517 if (MC_Ok != res) {
njn718d3b12006-12-16 00:54:12 +00002518 Bool isAddrErr = ( MC_AddrErr == res ? True : False );
2519 mc_record_memparam_error ( tid, bad_addr, isAddrErr, s );
njn25e49d8e72002-09-23 09:36:25 +00002520 }
njn25e49d8e72002-09-23 09:36:25 +00002521}
2522
njn25e49d8e72002-09-23 09:36:25 +00002523static
nethercote451eae92004-11-02 13:06:32 +00002524void mc_new_mem_startup( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00002525{
njndbf7ca72006-03-31 11:57:59 +00002526 /* Ignore the permissions, just make it defined. Seems to work... */
njnba7b4582006-09-21 15:59:30 +00002527 // Because code is defined, initialised variables get put in the data
2528 // segment and are defined, and uninitialised variables get put in the
2529 // bss segment and are auto-zeroed (and so defined).
2530 //
2531 // It's possible that there will be padding between global variables.
2532 // This will also be auto-zeroed, and marked as defined by Memcheck. If
2533 // a program uses it, Memcheck will not complain. This is arguably a
2534 // false negative, but it's a grey area -- the behaviour is defined (the
2535 // padding is zeroed) but it's probably not what the user intended. And
2536 // we can't avoid it.
nethercote451eae92004-11-02 13:06:32 +00002537 DEBUG("mc_new_mem_startup(%p, %llu, rr=%u, ww=%u, xx=%u)\n",
njndbf7ca72006-03-31 11:57:59 +00002538 a, (ULong)len, rr, ww, xx);
2539 MC_(make_mem_defined)(a, len);
njn25e49d8e72002-09-23 09:36:25 +00002540}
2541
2542static
njnb8dca862005-03-14 02:42:44 +00002543void mc_new_mem_mmap ( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00002544{
njndbf7ca72006-03-31 11:57:59 +00002545 MC_(make_mem_defined)(a, len);
njn25e49d8e72002-09-23 09:36:25 +00002546}
2547
njncf45fd42004-11-24 16:30:22 +00002548static
2549void mc_post_mem_write(CorePart part, ThreadId tid, Addr a, SizeT len)
2550{
njndbf7ca72006-03-31 11:57:59 +00002551 MC_(make_mem_defined)(a, len);
njncf45fd42004-11-24 16:30:22 +00002552}
njn25e49d8e72002-09-23 09:36:25 +00002553
sewardj45d94cc2005-04-20 14:44:11 +00002554
njn25e49d8e72002-09-23 09:36:25 +00002555/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00002556/*--- Register event handlers ---*/
2557/*------------------------------------------------------------*/
2558
sewardj45d94cc2005-04-20 14:44:11 +00002559/* When some chunk of guest state is written, mark the corresponding
2560 shadow area as valid. This is used to initialise arbitrarily large
sewardj62eae5f2006-01-17 01:58:24 +00002561 chunks of guest state, hence the _SIZE value, which has to be as
2562 big as the biggest guest state.
sewardj45d94cc2005-04-20 14:44:11 +00002563*/
2564static void mc_post_reg_write ( CorePart part, ThreadId tid,
2565 OffT offset, SizeT size)
njnd3040452003-05-19 15:04:06 +00002566{
sewardj05a46732006-10-17 01:28:10 +00002567# define MAX_REG_WRITE_SIZE 1408
cerion21082042005-12-06 19:07:08 +00002568 UChar area[MAX_REG_WRITE_SIZE];
2569 tl_assert(size <= MAX_REG_WRITE_SIZE);
njn1d0825f2006-03-27 11:37:07 +00002570 VG_(memset)(area, V_BITS8_DEFINED, size);
njncf45fd42004-11-24 16:30:22 +00002571 VG_(set_shadow_regs_area)( tid, offset, size, area );
cerion21082042005-12-06 19:07:08 +00002572# undef MAX_REG_WRITE_SIZE
njnd3040452003-05-19 15:04:06 +00002573}
2574
sewardj45d94cc2005-04-20 14:44:11 +00002575static
2576void mc_post_reg_write_clientcall ( ThreadId tid,
2577 OffT offset, SizeT size,
2578 Addr f)
njnd3040452003-05-19 15:04:06 +00002579{
njncf45fd42004-11-24 16:30:22 +00002580 mc_post_reg_write(/*dummy*/0, tid, offset, size);
njnd3040452003-05-19 15:04:06 +00002581}
2582
sewardj45d94cc2005-04-20 14:44:11 +00002583/* Look at the definedness of the guest's shadow state for
2584 [offset, offset+len). If any part of that is undefined, record
2585 a parameter error.
2586*/
2587static void mc_pre_reg_read ( CorePart part, ThreadId tid, Char* s,
2588 OffT offset, SizeT size)
nethercote8b76fe52004-11-08 19:20:09 +00002589{
sewardj45d94cc2005-04-20 14:44:11 +00002590 Int i;
2591 Bool bad;
2592
2593 UChar area[16];
2594 tl_assert(size <= 16);
2595
2596 VG_(get_shadow_regs_area)( tid, offset, size, area );
2597
2598 bad = False;
2599 for (i = 0; i < size; i++) {
njn1d0825f2006-03-27 11:37:07 +00002600 if (area[i] != V_BITS8_DEFINED) {
sewardj2c27f702005-05-03 18:19:05 +00002601 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002602 break;
2603 }
nethercote8b76fe52004-11-08 19:20:09 +00002604 }
2605
sewardj45d94cc2005-04-20 14:44:11 +00002606 if (bad)
njn718d3b12006-12-16 00:54:12 +00002607 mc_record_regparam_error ( tid, s );
nethercote8b76fe52004-11-08 19:20:09 +00002608}
njnd3040452003-05-19 15:04:06 +00002609
njn25e49d8e72002-09-23 09:36:25 +00002610
sewardj6cf40ff2005-04-20 22:31:26 +00002611/*------------------------------------------------------------*/
njn718d3b12006-12-16 00:54:12 +00002612/*--- Error types ---*/
njn1d0825f2006-03-27 11:37:07 +00002613/*------------------------------------------------------------*/
2614
njn718d3b12006-12-16 00:54:12 +00002615// Different kinds of blocks.
2616typedef enum {
2617 Block_Mallocd = 111,
2618 Block_Freed,
2619 Block_Mempool,
2620 Block_MempoolChunk,
2621 Block_UserG
2622} BlockKind;
2623
2624/* ------------------ Addresses -------------------- */
2625
njn1d0825f2006-03-27 11:37:07 +00002626/* The classification of a faulting address. */
2627typedef
2628 enum {
sewardjb8b79ad2008-03-03 01:35:41 +00002629 Addr_Undescribed, // as-yet unclassified
2630 Addr_Unknown, // classification yielded nothing useful
2631 Addr_Block, // in malloc'd/free'd block
2632 Addr_Stack, // on a thread's stack
2633 Addr_DataSym, // in a global data sym
2634 Addr_Variable, // variable described by the debug info
2635 Addr_SectKind // last-ditch classification attempt
njn1d0825f2006-03-27 11:37:07 +00002636 }
njn718d3b12006-12-16 00:54:12 +00002637 AddrTag;
njn1d0825f2006-03-27 11:37:07 +00002638
njn1d0825f2006-03-27 11:37:07 +00002639typedef
njn718d3b12006-12-16 00:54:12 +00002640 struct _AddrInfo
njn1d0825f2006-03-27 11:37:07 +00002641 AddrInfo;
2642
njn718d3b12006-12-16 00:54:12 +00002643struct _AddrInfo {
2644 AddrTag tag;
2645 union {
2646 // As-yet unclassified.
2647 struct { } Undescribed;
njn1d0825f2006-03-27 11:37:07 +00002648
njn718d3b12006-12-16 00:54:12 +00002649 // On a stack.
2650 struct {
2651 ThreadId tid; // Which thread's stack?
2652 } Stack;
njn1d0825f2006-03-27 11:37:07 +00002653
njn718d3b12006-12-16 00:54:12 +00002654 // This covers heap blocks (normal and from mempools) and user-defined
2655 // blocks.
2656 struct {
2657 BlockKind block_kind;
2658 Char* block_desc; // "block", "mempool" or user-defined
2659 SizeT block_szB;
2660 OffT rwoffset;
2661 ExeContext* lastchange;
2662 } Block;
njn1d0825f2006-03-27 11:37:07 +00002663
sewardjb8b79ad2008-03-03 01:35:41 +00002664 // In a global .data symbol. This holds the first 63 chars of
2665 // the variable's (zero terminated), plus an offset.
2666 struct {
2667 Char name[128];
2668 OffT offset;
2669 } DataSym;
2670
2671 // Is described by Dwarf debug info. Arbitrary strings. Must
2672 // be the same length.
2673 struct {
2674 Char descr1[96];
2675 Char descr2[96];
2676 } Variable;
2677
2678 // Could only narrow it down to be the PLT/GOT/etc of a given
2679 // object. Better than nothing, perhaps.
2680 struct {
2681 Char objname[128];
2682 VgSectKind kind;
2683 } SectKind;
2684
njn718d3b12006-12-16 00:54:12 +00002685 // Classification yielded nothing useful.
2686 struct { } Unknown;
2687
2688 } Addr;
2689};
2690
2691/* ------------------ Errors ----------------------- */
njn1d0825f2006-03-27 11:37:07 +00002692
2693/* What kind of error it is. */
2694typedef
njn718d3b12006-12-16 00:54:12 +00002695 enum {
2696 Err_Value,
2697 Err_Cond,
2698 Err_CoreMem,
2699 Err_Addr,
2700 Err_Jump,
2701 Err_RegParam,
2702 Err_MemParam,
2703 Err_User,
2704 Err_Free,
2705 Err_FreeMismatch,
2706 Err_Overlap,
2707 Err_Leak,
2708 Err_IllegalMempool,
njn1d0825f2006-03-27 11:37:07 +00002709 }
njn718d3b12006-12-16 00:54:12 +00002710 MC_ErrorTag;
njn1d0825f2006-03-27 11:37:07 +00002711
njn1d0825f2006-03-27 11:37:07 +00002712
njn718d3b12006-12-16 00:54:12 +00002713typedef struct _MC_Error MC_Error;
2714
2715struct _MC_Error {
2716 // Nb: we don't need the tag here, as it's stored in the Error type! Yuk.
2717 //MC_ErrorTag tag;
2718
2719 union {
2720 // Use of an undefined value:
2721 // - as a pointer in a load or store
2722 // - as a jump target
2723 struct {
2724 SizeT szB; // size of value in bytes
2725 } Value;
2726
2727 // Use of an undefined value in a conditional branch or move.
2728 struct {
2729 } Cond;
2730
2731 // Addressability error in core (signal-handling) operation.
2732 // It would be good to get rid of this error kind, merge it with
2733 // another one somehow.
2734 struct {
2735 } CoreMem;
2736
2737 // Use of an unaddressable memory location in a load or store.
2738 struct {
2739 Bool isWrite; // read or write?
2740 SizeT szB; // not used for exec (jump) errors
2741 Bool maybe_gcc; // True if just below %esp -- could be a gcc bug
2742 AddrInfo ai;
2743 } Addr;
2744
2745 // Jump to an unaddressable memory location.
2746 struct {
2747 AddrInfo ai;
2748 } Jump;
2749
2750 // System call register input contains undefined bytes.
2751 struct {
2752 } RegParam;
2753
2754 // System call memory input contains undefined/unaddressable bytes
2755 struct {
2756 Bool isAddrErr; // Addressability or definedness error?
2757 AddrInfo ai;
2758 } MemParam;
2759
2760 // Problem found from a client request like CHECK_MEM_IS_ADDRESSABLE.
2761 struct {
2762 Bool isAddrErr; // Addressability or definedness error?
2763 AddrInfo ai;
2764 } User;
2765
2766 // Program tried to free() something that's not a heap block (this
2767 // covers double-frees). */
2768 struct {
2769 AddrInfo ai;
2770 } Free;
2771
2772 // Program allocates heap block with one function
2773 // (malloc/new/new[]/custom) and deallocates with not the matching one.
2774 struct {
2775 AddrInfo ai;
2776 } FreeMismatch;
2777
2778 // Call to strcpy, memcpy, etc, with overlapping blocks.
2779 struct {
2780 Addr src; // Source block
2781 Addr dst; // Destination block
2782 Int szB; // Size in bytes; 0 if unused.
2783 } Overlap;
2784
2785 // A memory leak.
2786 struct {
2787 UInt n_this_record;
2788 UInt n_total_records;
2789 LossRecord* lossRecord;
2790 } Leak;
2791
2792 // A memory pool error.
2793 struct {
2794 AddrInfo ai;
2795 } IllegalMempool;
2796
2797 } Err;
2798};
2799
njn1d0825f2006-03-27 11:37:07 +00002800
2801/*------------------------------------------------------------*/
njn9e63cb62005-05-08 18:34:59 +00002802/*--- Printing errors ---*/
2803/*------------------------------------------------------------*/
2804
njn718d3b12006-12-16 00:54:12 +00002805static void mc_pp_AddrInfo ( Addr a, AddrInfo* ai, Bool maybe_gcc )
njn1d0825f2006-03-27 11:37:07 +00002806{
2807 HChar* xpre = VG_(clo_xml) ? " <auxwhat>" : " ";
2808 HChar* xpost = VG_(clo_xml) ? "</auxwhat>" : "";
2809
njn718d3b12006-12-16 00:54:12 +00002810 switch (ai->tag) {
2811 case Addr_Unknown:
2812 if (maybe_gcc) {
njn1d0825f2006-03-27 11:37:07 +00002813 VG_(message)(Vg_UserMsg,
2814 "%sAddress 0x%llx is just below the stack ptr. "
2815 "To suppress, use: --workaround-gcc296-bugs=yes%s",
2816 xpre, (ULong)a, xpost
2817 );
2818 } else {
2819 VG_(message)(Vg_UserMsg,
2820 "%sAddress 0x%llx "
2821 "is not stack'd, malloc'd or (recently) free'd%s",
2822 xpre, (ULong)a, xpost);
2823 }
2824 break;
njn718d3b12006-12-16 00:54:12 +00002825
2826 case Addr_Stack:
2827 VG_(message)(Vg_UserMsg,
2828 "%sAddress 0x%llx is on thread %d's stack%s",
2829 xpre, (ULong)a, ai->Addr.Stack.tid, xpost);
2830 break;
2831
2832 case Addr_Block: {
2833 SizeT block_szB = ai->Addr.Block.block_szB;
2834 OffT rwoffset = ai->Addr.Block.rwoffset;
njn1d0825f2006-03-27 11:37:07 +00002835 SizeT delta;
2836 const Char* relative;
njn1d0825f2006-03-27 11:37:07 +00002837
njn718d3b12006-12-16 00:54:12 +00002838 if (rwoffset < 0) {
2839 delta = (SizeT)(-rwoffset);
njn1d0825f2006-03-27 11:37:07 +00002840 relative = "before";
njn718d3b12006-12-16 00:54:12 +00002841 } else if (rwoffset >= block_szB) {
2842 delta = rwoffset - block_szB;
njn1d0825f2006-03-27 11:37:07 +00002843 relative = "after";
2844 } else {
njn718d3b12006-12-16 00:54:12 +00002845 delta = rwoffset;
njn1d0825f2006-03-27 11:37:07 +00002846 relative = "inside";
2847 }
2848 VG_(message)(Vg_UserMsg,
2849 "%sAddress 0x%lx is %,lu bytes %s a %s of size %,lu %s%s",
2850 xpre,
njn718d3b12006-12-16 00:54:12 +00002851 a, delta, relative, ai->Addr.Block.block_desc,
2852 block_szB,
2853 ai->Addr.Block.block_kind==Block_Mallocd ? "alloc'd"
2854 : ai->Addr.Block.block_kind==Block_Freed ? "free'd"
2855 : "client-defined",
njn1d0825f2006-03-27 11:37:07 +00002856 xpost);
njn718d3b12006-12-16 00:54:12 +00002857 VG_(pp_ExeContext)(ai->Addr.Block.lastchange);
njn1d0825f2006-03-27 11:37:07 +00002858 break;
2859 }
njn718d3b12006-12-16 00:54:12 +00002860
sewardjb8b79ad2008-03-03 01:35:41 +00002861 case Addr_DataSym:
2862 VG_(message)(Vg_UserMsg,
2863 "%sAddress 0x%llx is %llu bytes "
2864 "inside data symbol \"%t\"%s",
2865 xpre,
2866 (ULong)a,
2867 (ULong)ai->Addr.DataSym.offset,
2868 ai->Addr.DataSym.name,
2869 xpost);
2870 break;
2871
2872 case Addr_Variable:
2873 if (ai->Addr.Variable.descr1[0] != '\0')
2874 VG_(message)(Vg_UserMsg, "%s%s%s",
2875 xpre, ai->Addr.Variable.descr1, xpost);
2876 if (ai->Addr.Variable.descr2[0] != '\0')
2877 VG_(message)(Vg_UserMsg, "%s%s%s",
2878 xpre, ai->Addr.Variable.descr2, xpost);
2879 break;
2880
2881 case Addr_SectKind:
2882 VG_(message)(Vg_UserMsg,
2883 "%sAddress 0x%llx is in the %t segment of %t%s",
2884 xpre,
2885 (ULong)a,
2886 VG_(pp_SectKind)(ai->Addr.SectKind.kind),
2887 ai->Addr.SectKind.objname,
2888 xpost);
2889 break;
2890
njn1d0825f2006-03-27 11:37:07 +00002891 default:
2892 VG_(tool_panic)("mc_pp_AddrInfo");
2893 }
2894}
2895
njn718d3b12006-12-16 00:54:12 +00002896static const HChar* str_leak_lossmode ( Reachedness lossmode )
njn9e63cb62005-05-08 18:34:59 +00002897{
njn718d3b12006-12-16 00:54:12 +00002898 const HChar *loss = "?";
2899 switch (lossmode) {
2900 case Unreached: loss = "definitely lost"; break;
2901 case IndirectLeak: loss = "indirectly lost"; break;
2902 case Interior: loss = "possibly lost"; break;
2903 case Proper: loss = "still reachable"; break;
2904 }
2905 return loss;
2906}
njn9e63cb62005-05-08 18:34:59 +00002907
njn718d3b12006-12-16 00:54:12 +00002908static const HChar* xml_leak_kind ( Reachedness lossmode )
2909{
2910 const HChar *loss = "?";
2911 switch (lossmode) {
2912 case Unreached: loss = "Leak_DefinitelyLost"; break;
2913 case IndirectLeak: loss = "Leak_IndirectlyLost"; break;
2914 case Interior: loss = "Leak_PossiblyLost"; break;
2915 case Proper: loss = "Leak_StillReachable"; break;
2916 }
2917 return loss;
2918}
2919
2920static void mc_pp_msg( Char* xml_name, Error* err, const HChar* format, ... )
2921{
sewardj71bc3cb2005-05-19 00:25:45 +00002922 HChar* xpre = VG_(clo_xml) ? " <what>" : "";
2923 HChar* xpost = VG_(clo_xml) ? "</what>" : "";
njn718d3b12006-12-16 00:54:12 +00002924 Char buf[256];
2925 va_list vargs;
2926
2927 if (VG_(clo_xml))
2928 VG_(message)(Vg_UserMsg, " <kind>%s</kind>", xml_name);
2929 // Stick xpre and xpost on the front and back of the format string.
2930 VG_(snprintf)(buf, 256, "%s%s%s", xpre, format, xpost);
2931 va_start(vargs, format);
2932 VG_(vmessage) ( Vg_UserMsg, buf, vargs );
2933 va_end(vargs);
2934 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
2935}
2936
2937static void mc_pp_Error ( Error* err )
2938{
2939 MC_Error* extra = VG_(get_error_extra)(err);
sewardj71bc3cb2005-05-19 00:25:45 +00002940
njn9e63cb62005-05-08 18:34:59 +00002941 switch (VG_(get_error_kind)(err)) {
njn718d3b12006-12-16 00:54:12 +00002942 case Err_CoreMem: {
2943 /* What the hell *is* a CoreMemError? jrs 2005-May-18 */
2944 /* As of 2006-Dec-14, it's caused by unaddressable bytes in a
2945 signal handler frame. --njn */
2946 mc_pp_msg("CoreMemError", err,
2947 "%s contains unaddressable byte(s)",
2948 VG_(get_error_string)(err));
njn9e63cb62005-05-08 18:34:59 +00002949 break;
njn9e63cb62005-05-08 18:34:59 +00002950 }
2951
njn718d3b12006-12-16 00:54:12 +00002952 case Err_Value:
2953 mc_pp_msg("UninitValue", err,
2954 "Use of uninitialised value of size %d",
2955 extra->Err.Value.szB);
2956 break;
2957
2958 case Err_Cond:
2959 mc_pp_msg("UninitCondition", err,
2960 "Conditional jump or move depends"
2961 " on uninitialised value(s)");
2962 break;
2963
2964 case Err_RegParam:
2965 mc_pp_msg("SyscallParam", err,
2966 "Syscall param %s contains uninitialised byte(s)",
2967 VG_(get_error_string)(err));
2968 break;
2969
2970 case Err_MemParam:
2971 mc_pp_msg("SyscallParam", err,
2972 "Syscall param %s points to %s byte(s)",
2973 VG_(get_error_string)(err),
2974 ( extra->Err.MemParam.isAddrErr
2975 ? "unaddressable" : "uninitialised" ));
2976 mc_pp_AddrInfo(VG_(get_error_address)(err),
2977 &extra->Err.MemParam.ai, False);
2978 break;
2979
2980 case Err_User:
2981 mc_pp_msg("ClientCheck", err,
2982 "%s byte(s) found during client check request",
2983 ( extra->Err.User.isAddrErr
2984 ? "Unaddressable" : "Uninitialised" ));
2985 mc_pp_AddrInfo(VG_(get_error_address)(err), &extra->Err.User.ai,
2986 False);
2987 break;
2988
2989 case Err_Free:
2990 mc_pp_msg("InvalidFree", err,
2991 "Invalid free() / delete / delete[]");
2992 mc_pp_AddrInfo(VG_(get_error_address)(err),
2993 &extra->Err.Free.ai, False);
2994 break;
2995
2996 case Err_FreeMismatch:
2997 mc_pp_msg("MismatchedFree", err,
2998 "Mismatched free() / delete / delete []");
2999 mc_pp_AddrInfo(VG_(get_error_address)(err),
3000 &extra->Err.FreeMismatch.ai, False);
3001 break;
3002
3003 case Err_Addr:
3004 if (extra->Err.Addr.isWrite) {
3005 mc_pp_msg("InvalidWrite", err,
3006 "Invalid write of size %d",
3007 extra->Err.Addr.szB);
njn9e63cb62005-05-08 18:34:59 +00003008 } else {
njn718d3b12006-12-16 00:54:12 +00003009 mc_pp_msg("InvalidRead", err,
3010 "Invalid read of size %d",
3011 extra->Err.Addr.szB);
njn9e63cb62005-05-08 18:34:59 +00003012 }
njn718d3b12006-12-16 00:54:12 +00003013 mc_pp_AddrInfo(VG_(get_error_address)(err), &extra->Err.Addr.ai,
3014 extra->Err.Addr.maybe_gcc);
njn9e63cb62005-05-08 18:34:59 +00003015 break;
3016
njn718d3b12006-12-16 00:54:12 +00003017 case Err_Jump:
3018 mc_pp_msg("InvalidJump", err,
3019 "Jump to the invalid address stated on the next line");
3020 mc_pp_AddrInfo(VG_(get_error_address)(err), &extra->Err.Jump.ai,
3021 False);
njn9e63cb62005-05-08 18:34:59 +00003022 break;
njn1d0825f2006-03-27 11:37:07 +00003023
njn718d3b12006-12-16 00:54:12 +00003024 case Err_Overlap:
3025 if (extra->Err.Overlap.szB == 0)
3026 mc_pp_msg("Overlap", err,
3027 "Source and destination overlap in %s(%p, %p)",
3028 VG_(get_error_string)(err),
3029 extra->Err.Overlap.dst, extra->Err.Overlap.src);
njn1d0825f2006-03-27 11:37:07 +00003030 else
njn718d3b12006-12-16 00:54:12 +00003031 mc_pp_msg("Overlap", err,
3032 "Source and destination overlap in %s(%p, %p, %d)",
3033 VG_(get_error_string)(err),
3034 extra->Err.Overlap.dst, extra->Err.Overlap.src,
3035 extra->Err.Overlap.szB);
njn1d0825f2006-03-27 11:37:07 +00003036 break;
njn1d0825f2006-03-27 11:37:07 +00003037
njn718d3b12006-12-16 00:54:12 +00003038 case Err_IllegalMempool:
3039 mc_pp_msg("InvalidMemPool", err,
3040 "Illegal memory pool address");
3041 mc_pp_AddrInfo(VG_(get_error_address)(err),
3042 &extra->Err.IllegalMempool.ai, False);
njn1d0825f2006-03-27 11:37:07 +00003043 break;
3044
njn718d3b12006-12-16 00:54:12 +00003045 case Err_Leak: {
3046 HChar* xpre = VG_(clo_xml) ? " <what>" : "";
3047 HChar* xpost = VG_(clo_xml) ? "</what>" : "";
3048 UInt n_this_record = extra->Err.Leak.n_this_record;
3049 UInt n_total_records = extra->Err.Leak.n_total_records;
3050 LossRecord* l = extra->Err.Leak.lossRecord;
3051
3052 if (VG_(clo_xml)) {
3053 VG_(message)(Vg_UserMsg, " <kind>%t</kind>",
3054 xml_leak_kind(l->loss_mode));
3055 } else {
3056 VG_(message)(Vg_UserMsg, "");
3057 }
3058
3059 if (l->indirect_bytes) {
3060 VG_(message)(Vg_UserMsg,
3061 "%s%,lu (%,lu direct, %,lu indirect) bytes in %,u blocks"
3062 " are %s in loss record %,u of %,u%s",
3063 xpre,
3064 l->total_bytes + l->indirect_bytes,
3065 l->total_bytes, l->indirect_bytes, l->num_blocks,
3066 str_leak_lossmode(l->loss_mode), n_this_record, n_total_records,
3067 xpost
3068 );
3069 if (VG_(clo_xml)) {
3070 // Nb: don't put commas in these XML numbers
3071 VG_(message)(Vg_UserMsg, " <leakedbytes>%lu</leakedbytes>",
3072 l->total_bytes + l->indirect_bytes);
3073 VG_(message)(Vg_UserMsg, " <leakedblocks>%u</leakedblocks>",
3074 l->num_blocks);
3075 }
3076 } else {
3077 VG_(message)(
3078 Vg_UserMsg,
3079 "%s%,lu bytes in %,u blocks are %s in loss record %,u of %,u%s",
3080 xpre,
3081 l->total_bytes, l->num_blocks,
3082 str_leak_lossmode(l->loss_mode), n_this_record, n_total_records,
3083 xpost
3084 );
3085 if (VG_(clo_xml)) {
3086 VG_(message)(Vg_UserMsg, " <leakedbytes>%d</leakedbytes>",
3087 l->total_bytes);
3088 VG_(message)(Vg_UserMsg, " <leakedblocks>%d</leakedblocks>",
3089 l->num_blocks);
3090 }
3091 }
3092 VG_(pp_ExeContext)(l->allocated_at);
3093 break;
3094 }
3095
njn1d0825f2006-03-27 11:37:07 +00003096 default:
3097 VG_(printf)("Error:\n unknown Memcheck error code %d\n",
3098 VG_(get_error_kind)(err));
3099 VG_(tool_panic)("unknown error code in mc_pp_Error)");
njn9e63cb62005-05-08 18:34:59 +00003100 }
3101}
3102
3103/*------------------------------------------------------------*/
3104/*--- Recording errors ---*/
3105/*------------------------------------------------------------*/
3106
njn1d0825f2006-03-27 11:37:07 +00003107/* These many bytes below %ESP are considered addressible if we're
3108 doing the --workaround-gcc296-bugs hack. */
3109#define VG_GCC296_BUG_STACK_SLOP 1024
3110
3111/* Is this address within some small distance below %ESP? Used only
3112 for the --workaround-gcc296-bugs kludge. */
3113static Bool is_just_below_ESP( Addr esp, Addr aa )
3114{
3115 if (esp > aa && (esp - aa) <= VG_GCC296_BUG_STACK_SLOP)
3116 return True;
3117 else
3118 return False;
3119}
3120
njn718d3b12006-12-16 00:54:12 +00003121/* --- Called from generated and non-generated code --- */
njn1d0825f2006-03-27 11:37:07 +00003122
njn718d3b12006-12-16 00:54:12 +00003123static void mc_record_address_error ( ThreadId tid, Addr a, Int szB,
njn1d0825f2006-03-27 11:37:07 +00003124 Bool isWrite )
3125{
njn718d3b12006-12-16 00:54:12 +00003126 MC_Error extra;
sewardj05a46732006-10-17 01:28:10 +00003127 Bool just_below_esp;
3128
3129 if (in_ignored_range(a))
3130 return;
3131
3132# if defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
3133 /* AIX zero-page handling. On AIX, reads from page zero are,
3134 bizarrely enough, legitimate. Writes to page zero aren't,
3135 though. Since memcheck can't distinguish reads from writes, the
3136 best we can do is to 'act normal' and mark the A bits in the
3137 normal way as noaccess, but then hide any reads from that page
3138 that get reported here. */
njn718d3b12006-12-16 00:54:12 +00003139 if ((!isWrite) && a >= 0 && a < 4096 && a+szB <= 4096)
sewardj05a46732006-10-17 01:28:10 +00003140 return;
3141
3142 /* Appalling AIX hack. It suppresses reads done by glink
3143 fragments. Getting rid of this would require figuring out
3144 somehow where the referenced data areas are (and their
3145 sizes). */
njn718d3b12006-12-16 00:54:12 +00003146 if ((!isWrite) && szB == sizeof(Word)) {
sewardj05a46732006-10-17 01:28:10 +00003147 UInt i1, i2;
3148 UInt* pc = (UInt*)VG_(get_IP)(tid);
3149 if (sizeof(Word) == 4) {
3150 i1 = 0x800c0000; /* lwz r0,0(r12) */
3151 i2 = 0x804c0004; /* lwz r2,4(r12) */
3152 } else {
3153 i1 = 0xe80c0000; /* ld r0,0(r12) */
3154 i2 = 0xe84c0008; /* ld r2,8(r12) */
3155 }
3156 if (pc[0] == i1 && pc[1] == i2) return;
3157 if (pc[0] == i2 && pc[-1] == i1) return;
3158 }
3159# endif
njn1d0825f2006-03-27 11:37:07 +00003160
3161 just_below_esp = is_just_below_ESP( VG_(get_SP)(tid), a );
3162
3163 /* If this is caused by an access immediately below %ESP, and the
3164 user asks nicely, we just ignore it. */
3165 if (MC_(clo_workaround_gcc296_bugs) && just_below_esp)
3166 return;
3167
njn718d3b12006-12-16 00:54:12 +00003168 extra.Err.Addr.isWrite = isWrite;
3169 extra.Err.Addr.szB = szB;
3170 extra.Err.Addr.maybe_gcc = just_below_esp;
3171 extra.Err.Addr.ai.tag = Addr_Undescribed;
3172 VG_(maybe_record_error)( tid, Err_Addr, a, /*s*/NULL, &extra );
njn1d0825f2006-03-27 11:37:07 +00003173}
3174
njn718d3b12006-12-16 00:54:12 +00003175static void mc_record_value_error ( ThreadId tid, Int szB )
3176{
3177 MC_Error extra;
3178 tl_assert(MC_(clo_undef_value_errors));
3179 extra.Err.Value.szB = szB;
3180 VG_(maybe_record_error)( tid, Err_Value, /*addr*/0, /*s*/NULL, &extra );
3181}
3182
3183static void mc_record_cond_error ( ThreadId tid )
3184{
3185 tl_assert(MC_(clo_undef_value_errors));
3186 VG_(maybe_record_error)( tid, Err_Cond, /*addr*/0, /*s*/NULL, /*extra*/NULL);
3187}
3188
3189/* --- Called from non-generated code --- */
njn1d0825f2006-03-27 11:37:07 +00003190
3191/* This is for memory errors in pthread functions, as opposed to pthread API
3192 errors which are found by the core. */
njn718d3b12006-12-16 00:54:12 +00003193static void mc_record_core_mem_error ( ThreadId tid, Bool isAddrErr, Char* msg )
njn1d0825f2006-03-27 11:37:07 +00003194{
njn718d3b12006-12-16 00:54:12 +00003195 VG_(maybe_record_error)( tid, Err_CoreMem, /*addr*/0, msg, /*extra*/NULL );
njn1d0825f2006-03-27 11:37:07 +00003196}
3197
njn718d3b12006-12-16 00:54:12 +00003198static void mc_record_regparam_error ( ThreadId tid, Char* msg )
njn1d0825f2006-03-27 11:37:07 +00003199{
njn1d0825f2006-03-27 11:37:07 +00003200 tl_assert(VG_INVALID_THREADID != tid);
njn718d3b12006-12-16 00:54:12 +00003201 VG_(maybe_record_error)( tid, Err_RegParam, /*addr*/0, msg, /*extra*/NULL );
3202}
3203
3204static void mc_record_memparam_error ( ThreadId tid, Addr a,
3205 Bool isAddrErr, Char* msg )
3206{
3207 MC_Error extra;
3208 tl_assert(VG_INVALID_THREADID != tid);
3209 if (!isAddrErr)
3210 tl_assert(MC_(clo_undef_value_errors));
3211 extra.Err.MemParam.isAddrErr = isAddrErr;
3212 extra.Err.MemParam.ai.tag = Addr_Undescribed;
3213 VG_(maybe_record_error)( tid, Err_MemParam, a, msg, &extra );
njn1d0825f2006-03-27 11:37:07 +00003214}
3215
3216static void mc_record_jump_error ( ThreadId tid, Addr a )
3217{
njn718d3b12006-12-16 00:54:12 +00003218 MC_Error extra;
njn1d0825f2006-03-27 11:37:07 +00003219 tl_assert(VG_INVALID_THREADID != tid);
njn718d3b12006-12-16 00:54:12 +00003220 extra.Err.Jump.ai.tag = Addr_Undescribed;
3221 VG_(maybe_record_error)( tid, Err_Jump, a, /*s*/NULL, &extra );
njn1d0825f2006-03-27 11:37:07 +00003222}
3223
3224void MC_(record_free_error) ( ThreadId tid, Addr a )
3225{
njn718d3b12006-12-16 00:54:12 +00003226 MC_Error extra;
njn1d0825f2006-03-27 11:37:07 +00003227 tl_assert(VG_INVALID_THREADID != tid);
njn718d3b12006-12-16 00:54:12 +00003228 extra.Err.Free.ai.tag = Addr_Undescribed;
3229 VG_(maybe_record_error)( tid, Err_Free, a, /*s*/NULL, &extra );
3230}
3231
3232void MC_(record_freemismatch_error) ( ThreadId tid, MC_Chunk* mc )
3233{
3234 MC_Error extra;
3235 AddrInfo* ai = &extra.Err.FreeMismatch.ai;
3236 tl_assert(VG_INVALID_THREADID != tid);
3237 ai->tag = Addr_Block;
3238 ai->Addr.Block.block_kind = Block_Mallocd; // Nb: Not 'Block_Freed'
3239 ai->Addr.Block.block_desc = "block";
3240 ai->Addr.Block.block_szB = mc->szB;
3241 ai->Addr.Block.rwoffset = 0;
3242 ai->Addr.Block.lastchange = mc->where;
3243 VG_(maybe_record_error)( tid, Err_FreeMismatch, mc->data, /*s*/NULL,
3244 &extra );
njn1d0825f2006-03-27 11:37:07 +00003245}
3246
3247void MC_(record_illegal_mempool_error) ( ThreadId tid, Addr a )
3248{
njn718d3b12006-12-16 00:54:12 +00003249 MC_Error extra;
njn1d0825f2006-03-27 11:37:07 +00003250 tl_assert(VG_INVALID_THREADID != tid);
njn718d3b12006-12-16 00:54:12 +00003251 extra.Err.IllegalMempool.ai.tag = Addr_Undescribed;
3252 VG_(maybe_record_error)( tid, Err_IllegalMempool, a, /*s*/NULL, &extra );
njn1d0825f2006-03-27 11:37:07 +00003253}
3254
njn718d3b12006-12-16 00:54:12 +00003255static void mc_record_overlap_error ( ThreadId tid, Char* function,
3256 Addr src, Addr dst, SizeT szB )
njn1d0825f2006-03-27 11:37:07 +00003257{
njn718d3b12006-12-16 00:54:12 +00003258 MC_Error extra;
njn1d0825f2006-03-27 11:37:07 +00003259 tl_assert(VG_INVALID_THREADID != tid);
njn718d3b12006-12-16 00:54:12 +00003260 extra.Err.Overlap.src = src;
3261 extra.Err.Overlap.dst = dst;
3262 extra.Err.Overlap.szB = szB;
njn1d0825f2006-03-27 11:37:07 +00003263 VG_(maybe_record_error)(
njn718d3b12006-12-16 00:54:12 +00003264 tid, Err_Overlap, /*addr*/0, /*s*/function, &extra );
njn1d0825f2006-03-27 11:37:07 +00003265}
3266
njn718d3b12006-12-16 00:54:12 +00003267Bool MC_(record_leak_error) ( ThreadId tid, UInt n_this_record,
3268 UInt n_total_records, LossRecord* lossRecord,
3269 Bool print_record )
njn1d0825f2006-03-27 11:37:07 +00003270{
njn718d3b12006-12-16 00:54:12 +00003271 MC_Error extra;
3272 extra.Err.Leak.n_this_record = n_this_record;
3273 extra.Err.Leak.n_total_records = n_total_records;
3274 extra.Err.Leak.lossRecord = lossRecord;
njn1d0825f2006-03-27 11:37:07 +00003275 return
njn718d3b12006-12-16 00:54:12 +00003276 VG_(unique_error) ( tid, Err_Leak, /*Addr*/0, /*s*/NULL, &extra,
3277 lossRecord->allocated_at, print_record,
njn1d0825f2006-03-27 11:37:07 +00003278 /*allow_GDB_attach*/False, /*count_error*/False );
3279}
3280
njn718d3b12006-12-16 00:54:12 +00003281static void mc_record_user_error ( ThreadId tid, Addr a, Bool isAddrErr )
njn9e63cb62005-05-08 18:34:59 +00003282{
njn718d3b12006-12-16 00:54:12 +00003283 MC_Error extra;
njn9e63cb62005-05-08 18:34:59 +00003284
3285 tl_assert(VG_INVALID_THREADID != tid);
njn718d3b12006-12-16 00:54:12 +00003286 extra.Err.User.isAddrErr = isAddrErr;
3287 extra.Err.User.ai.tag = Addr_Undescribed;
3288 VG_(maybe_record_error)( tid, Err_User, a, /*s*/NULL, &extra );
njn9e63cb62005-05-08 18:34:59 +00003289}
3290
njn718d3b12006-12-16 00:54:12 +00003291/*------------------------------------------------------------*/
3292/*--- Other error operations ---*/
3293/*------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00003294
3295/* Compare error contexts, to detect duplicates. Note that if they
3296 are otherwise the same, the faulting addrs and associated rwoffsets
3297 are allowed to be different. */
3298static Bool mc_eq_Error ( VgRes res, Error* e1, Error* e2 )
3299{
njn718d3b12006-12-16 00:54:12 +00003300 MC_Error* extra1 = VG_(get_error_extra)(e1);
3301 MC_Error* extra2 = VG_(get_error_extra)(e2);
njn1d0825f2006-03-27 11:37:07 +00003302
3303 /* Guaranteed by calling function */
3304 tl_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
3305
3306 switch (VG_(get_error_kind)(e1)) {
njn718d3b12006-12-16 00:54:12 +00003307 case Err_CoreMem: {
njn1d0825f2006-03-27 11:37:07 +00003308 Char *e1s, *e2s;
njn1d0825f2006-03-27 11:37:07 +00003309 e1s = VG_(get_error_string)(e1);
3310 e2s = VG_(get_error_string)(e2);
njn718d3b12006-12-16 00:54:12 +00003311 if (e1s == e2s) return True;
3312 if (VG_STREQ(e1s, e2s)) return True;
njn1d0825f2006-03-27 11:37:07 +00003313 return False;
3314 }
3315
njn718d3b12006-12-16 00:54:12 +00003316 case Err_RegParam:
3317 return VG_STREQ(VG_(get_error_string)(e1), VG_(get_error_string)(e2));
3318
njn1d0825f2006-03-27 11:37:07 +00003319 // Perhaps we should also check the addrinfo.akinds for equality.
3320 // That would result in more error reports, but only in cases where
3321 // a register contains uninitialised bytes and points to memory
3322 // containing uninitialised bytes. Currently, the 2nd of those to be
3323 // detected won't be reported. That is (nearly?) always the memory
3324 // error, which is good.
njn718d3b12006-12-16 00:54:12 +00003325 case Err_MemParam:
3326 if (!VG_STREQ(VG_(get_error_string)(e1),
3327 VG_(get_error_string)(e2))) return False;
njn1d0825f2006-03-27 11:37:07 +00003328 // fall through
njn718d3b12006-12-16 00:54:12 +00003329 case Err_User:
3330 return ( extra1->Err.User.isAddrErr == extra2->Err.User.isAddrErr
3331 ? True : False );
3332
3333 case Err_Free:
3334 case Err_FreeMismatch:
3335 case Err_Jump:
3336 case Err_IllegalMempool:
3337 case Err_Overlap:
3338 case Err_Cond:
njn1d0825f2006-03-27 11:37:07 +00003339 return True;
3340
njn718d3b12006-12-16 00:54:12 +00003341 case Err_Addr:
3342 return ( extra1->Err.Addr.szB == extra2->Err.Addr.szB
3343 ? True : False );
njn1d0825f2006-03-27 11:37:07 +00003344
njn718d3b12006-12-16 00:54:12 +00003345 case Err_Value:
3346 return ( extra1->Err.Value.szB == extra2->Err.Value.szB
3347 ? True : False );
njn1d0825f2006-03-27 11:37:07 +00003348
njn718d3b12006-12-16 00:54:12 +00003349 case Err_Leak:
3350 VG_(tool_panic)("Shouldn't get Err_Leak in mc_eq_Error,\n"
njn1d0825f2006-03-27 11:37:07 +00003351 "since it's handled with VG_(unique_error)()!");
3352
njn1d0825f2006-03-27 11:37:07 +00003353 default:
3354 VG_(printf)("Error:\n unknown error code %d\n",
3355 VG_(get_error_kind)(e1));
3356 VG_(tool_panic)("unknown error code in mc_eq_Error");
3357 }
3358}
3359
3360/* Function used when searching MC_Chunk lists */
3361static Bool addr_is_in_MC_Chunk(MC_Chunk* mc, Addr a)
3362{
3363 // Nb: this is not quite right! It assumes that the heap block has
3364 // a redzone of size MC_MALLOC_REDZONE_SZB. That's true for malloc'd
3365 // blocks, but not necessarily true for custom-alloc'd blocks. So
3366 // in some cases this could result in an incorrect description (eg.
3367 // saying "12 bytes after block A" when really it's within block B.
3368 // Fixing would require adding redzone size to MC_Chunks, though.
njn718d3b12006-12-16 00:54:12 +00003369 return VG_(addr_is_in_block)( a, mc->data, mc->szB,
njn1d0825f2006-03-27 11:37:07 +00003370 MC_MALLOC_REDZONE_SZB );
3371}
3372
3373// Forward declaration
3374static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai );
3375
njn718d3b12006-12-16 00:54:12 +00003376
njn1d0825f2006-03-27 11:37:07 +00003377/* Describe an address as best you can, for error messages,
3378 putting the result in ai. */
sewardjb8b79ad2008-03-03 01:35:41 +00003379static void describe_addr ( Addr a, /*OUT*/AddrInfo* ai )
njn1d0825f2006-03-27 11:37:07 +00003380{
sewardjb8b79ad2008-03-03 01:35:41 +00003381 MC_Chunk* mc;
3382 ThreadId tid;
3383 Addr stack_min, stack_max;
3384 VgSectKind sect;
njn718d3b12006-12-16 00:54:12 +00003385
3386 tl_assert(Addr_Undescribed == ai->tag);
njn1d0825f2006-03-27 11:37:07 +00003387
3388 /* Perhaps it's a user-def'd block? */
sewardjb8b79ad2008-03-03 01:35:41 +00003389 if (client_perm_maybe_describe( a, ai )) {
njn1d0825f2006-03-27 11:37:07 +00003390 return;
njn1d0825f2006-03-27 11:37:07 +00003391 }
3392 /* Search for a recently freed block which might bracket it. */
3393 mc = MC_(get_freed_list_head)();
3394 while (mc) {
3395 if (addr_is_in_MC_Chunk(mc, a)) {
njn718d3b12006-12-16 00:54:12 +00003396 ai->tag = Addr_Block;
3397 ai->Addr.Block.block_kind = Block_Freed;
3398 ai->Addr.Block.block_desc = "block";
3399 ai->Addr.Block.block_szB = mc->szB;
3400 ai->Addr.Block.rwoffset = (Int)a - (Int)mc->data;
3401 ai->Addr.Block.lastchange = mc->where;
njn1d0825f2006-03-27 11:37:07 +00003402 return;
3403 }
3404 mc = mc->next;
3405 }
3406 /* Search for a currently malloc'd block which might bracket it. */
3407 VG_(HT_ResetIter)(MC_(malloc_list));
3408 while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
3409 if (addr_is_in_MC_Chunk(mc, a)) {
njn718d3b12006-12-16 00:54:12 +00003410 ai->tag = Addr_Block;
3411 ai->Addr.Block.block_kind = Block_Mallocd;
3412 ai->Addr.Block.block_desc = "block";
3413 ai->Addr.Block.block_szB = mc->szB;
3414 ai->Addr.Block.rwoffset = (Int)a - (Int)mc->data;
3415 ai->Addr.Block.lastchange = mc->where;
njn1d0825f2006-03-27 11:37:07 +00003416 return;
3417 }
3418 }
sewardjb8b79ad2008-03-03 01:35:41 +00003419 /* Perhaps the variable type/location data describes it? */
3420 tl_assert(sizeof(ai->Addr.Variable.descr1)
3421 == sizeof(ai->Addr.Variable.descr2));
3422 VG_(memset)( &ai->Addr.Variable.descr1,
3423 0, sizeof(ai->Addr.Variable.descr1));
3424 VG_(memset)( &ai->Addr.Variable.descr2,
3425 0, sizeof(ai->Addr.Variable.descr2));
3426 if (VG_(get_data_description)(
3427 &ai->Addr.Variable.descr1[0],
3428 &ai->Addr.Variable.descr2[0],
3429 sizeof(ai->Addr.Variable.descr1)-1,
3430 a )) {
3431 ai->tag = Addr_Variable;
3432 tl_assert( ai->Addr.Variable.descr1
3433 [ sizeof(ai->Addr.Variable.descr1)-1 ] == 0);
3434 tl_assert( ai->Addr.Variable.descr2
3435 [ sizeof(ai->Addr.Variable.descr2)-1 ] == 0);
3436 return;
3437 }
3438 /* Have a look at the low level data symbols - perhaps it's in
3439 there. */
3440 VG_(memset)( &ai->Addr.DataSym.name,
3441 0, sizeof(ai->Addr.DataSym.name));
3442 if (VG_(get_datasym_and_offset)(
3443 a, &ai->Addr.DataSym.name[0],
3444 sizeof(ai->Addr.DataSym.name)-1,
3445 &ai->Addr.DataSym.offset )) {
3446 ai->tag = Addr_DataSym;
3447 tl_assert( ai->Addr.DataSym.name
3448 [ sizeof(ai->Addr.DataSym.name)-1 ] == 0);
3449 return;
3450 }
3451 /* Perhaps it's on a thread's stack? */
3452 VG_(thread_stack_reset_iter)(&tid);
3453 while ( VG_(thread_stack_next)(&tid, &stack_min, &stack_max) ) {
3454 if (stack_min - VG_STACK_REDZONE_SZB <= a && a <= stack_max) {
3455 ai->tag = Addr_Stack;
3456 ai->Addr.Stack.tid = tid;
3457 return;
3458 }
3459 }
3460 /* last ditch attempt at classification */
3461 tl_assert( sizeof(ai->Addr.SectKind.objname) > 4 );
3462 VG_(memset)( &ai->Addr.SectKind.objname,
3463 0, sizeof(ai->Addr.SectKind.objname));
3464 VG_(strcpy)( ai->Addr.SectKind.objname, "???" );
3465 sect = VG_(seginfo_sect_kind)( &ai->Addr.SectKind.objname[0],
3466 sizeof(ai->Addr.SectKind.objname)-1, a);
3467 if (sect != Vg_SectUnknown) {
3468 ai->tag = Addr_SectKind;
3469 ai->Addr.SectKind.kind = sect;
3470 tl_assert( ai->Addr.SectKind.objname
3471 [ sizeof(ai->Addr.SectKind.objname)-1 ] == 0);
3472 return;
3473 }
njn1d0825f2006-03-27 11:37:07 +00003474 /* Clueless ... */
njn718d3b12006-12-16 00:54:12 +00003475 ai->tag = Addr_Unknown;
njn1d0825f2006-03-27 11:37:07 +00003476 return;
3477}
3478
3479/* Updates the copy with address info if necessary (but not for all errors). */
3480static UInt mc_update_extra( Error* err )
3481{
njn718d3b12006-12-16 00:54:12 +00003482 MC_Error* extra = VG_(get_error_extra)(err);
3483
njn1d0825f2006-03-27 11:37:07 +00003484 switch (VG_(get_error_kind)(err)) {
njn718d3b12006-12-16 00:54:12 +00003485 // These ones don't have addresses associated with them, and so don't
njn1d0825f2006-03-27 11:37:07 +00003486 // need any updating.
njn718d3b12006-12-16 00:54:12 +00003487 case Err_CoreMem:
3488 case Err_Value:
3489 case Err_Cond:
3490 case Err_Overlap:
3491 case Err_RegParam:
3492 // For Err_Leaks the returned size does not matter -- they are always
sewardjb8b79ad2008-03-03 01:35:41 +00003493 // shown with VG_(unique_error)() so they 'extra' not copied. But
3494 // we make it consistent with the others.
njn718d3b12006-12-16 00:54:12 +00003495 case Err_Leak:
njn1d0825f2006-03-27 11:37:07 +00003496 return sizeof(MC_Error);
njn1d0825f2006-03-27 11:37:07 +00003497
njn718d3b12006-12-16 00:54:12 +00003498 // These ones always involve a memory address.
3499 case Err_Addr:
sewardjb8b79ad2008-03-03 01:35:41 +00003500 describe_addr ( VG_(get_error_address)(err),
3501 &extra->Err.Addr.ai );
njn1d0825f2006-03-27 11:37:07 +00003502 return sizeof(MC_Error);
njn718d3b12006-12-16 00:54:12 +00003503 case Err_MemParam:
sewardjb8b79ad2008-03-03 01:35:41 +00003504 describe_addr ( VG_(get_error_address)(err),
3505 &extra->Err.MemParam.ai );
njn1d0825f2006-03-27 11:37:07 +00003506 return sizeof(MC_Error);
njn718d3b12006-12-16 00:54:12 +00003507 case Err_Jump:
sewardjb8b79ad2008-03-03 01:35:41 +00003508 describe_addr ( VG_(get_error_address)(err),
3509 &extra->Err.Jump.ai );
njn718d3b12006-12-16 00:54:12 +00003510 return sizeof(MC_Error);
3511 case Err_User:
sewardjb8b79ad2008-03-03 01:35:41 +00003512 describe_addr ( VG_(get_error_address)(err),
3513 &extra->Err.User.ai );
njn718d3b12006-12-16 00:54:12 +00003514 return sizeof(MC_Error);
3515 case Err_Free:
sewardjb8b79ad2008-03-03 01:35:41 +00003516 describe_addr ( VG_(get_error_address)(err),
3517 &extra->Err.Free.ai );
njn718d3b12006-12-16 00:54:12 +00003518 return sizeof(MC_Error);
3519 case Err_IllegalMempool:
3520 describe_addr ( VG_(get_error_address)(err),
3521 &extra->Err.IllegalMempool.ai );
3522 return sizeof(MC_Error);
njn1d0825f2006-03-27 11:37:07 +00003523
njn718d3b12006-12-16 00:54:12 +00003524 // Err_FreeMismatches have already had their address described; this is
njn1d0825f2006-03-27 11:37:07 +00003525 // possible because we have the MC_Chunk on hand when the error is
3526 // detected. However, the address may be part of a user block, and if so
3527 // we override the pre-determined description with a user block one.
njn718d3b12006-12-16 00:54:12 +00003528 case Err_FreeMismatch: {
3529 tl_assert(extra && Block_Mallocd ==
3530 extra->Err.FreeMismatch.ai.Addr.Block.block_kind);
njn1d0825f2006-03-27 11:37:07 +00003531 (void)client_perm_maybe_describe( VG_(get_error_address)(err),
njn718d3b12006-12-16 00:54:12 +00003532 &extra->Err.FreeMismatch.ai );
njn1d0825f2006-03-27 11:37:07 +00003533 return sizeof(MC_Error);
3534 }
3535
njn1d0825f2006-03-27 11:37:07 +00003536 default: VG_(tool_panic)("mc_update_extra: bad errkind");
3537 }
3538}
3539
njn9e63cb62005-05-08 18:34:59 +00003540/*------------------------------------------------------------*/
3541/*--- Suppressions ---*/
3542/*------------------------------------------------------------*/
3543
njn718d3b12006-12-16 00:54:12 +00003544typedef
3545 enum {
3546 ParamSupp, // Bad syscall params
3547 UserSupp, // Errors arising from client-request checks
3548 CoreMemSupp, // Memory errors in core (pthread ops, signal handling)
3549
3550 // Undefined value errors of given size
3551 Value1Supp, Value2Supp, Value4Supp, Value8Supp, Value16Supp,
3552
3553 // Undefined value error in conditional.
3554 CondSupp,
3555
3556 // Unaddressable read/write attempt at given size
3557 Addr1Supp, Addr2Supp, Addr4Supp, Addr8Supp, Addr16Supp,
3558
3559 JumpSupp, // Jump to unaddressable target
3560 FreeSupp, // Invalid or mismatching free
3561 OverlapSupp, // Overlapping blocks in memcpy(), strcpy(), etc
3562 LeakSupp, // Something to be suppressed in a leak check.
3563 MempoolSupp, // Memory pool suppression.
3564 }
3565 MC_SuppKind;
3566
njn51d827b2005-05-09 01:02:08 +00003567static Bool mc_recognised_suppression ( Char* name, Supp* su )
njn9e63cb62005-05-08 18:34:59 +00003568{
3569 SuppKind skind;
3570
njn1d0825f2006-03-27 11:37:07 +00003571 if (VG_STREQ(name, "Param")) skind = ParamSupp;
sewardj6362bb52006-11-28 00:15:35 +00003572 else if (VG_STREQ(name, "User")) skind = UserSupp;
njn1d0825f2006-03-27 11:37:07 +00003573 else if (VG_STREQ(name, "CoreMem")) skind = CoreMemSupp;
3574 else if (VG_STREQ(name, "Addr1")) skind = Addr1Supp;
3575 else if (VG_STREQ(name, "Addr2")) skind = Addr2Supp;
3576 else if (VG_STREQ(name, "Addr4")) skind = Addr4Supp;
3577 else if (VG_STREQ(name, "Addr8")) skind = Addr8Supp;
3578 else if (VG_STREQ(name, "Addr16")) skind = Addr16Supp;
njn718d3b12006-12-16 00:54:12 +00003579 else if (VG_STREQ(name, "Jump")) skind = JumpSupp;
njn1d0825f2006-03-27 11:37:07 +00003580 else if (VG_STREQ(name, "Free")) skind = FreeSupp;
3581 else if (VG_STREQ(name, "Leak")) skind = LeakSupp;
3582 else if (VG_STREQ(name, "Overlap")) skind = OverlapSupp;
3583 else if (VG_STREQ(name, "Mempool")) skind = MempoolSupp;
njn718d3b12006-12-16 00:54:12 +00003584 else if (VG_STREQ(name, "Cond")) skind = CondSupp;
3585 else if (VG_STREQ(name, "Value0")) skind = CondSupp; /* backwards compat */
njn9e63cb62005-05-08 18:34:59 +00003586 else if (VG_STREQ(name, "Value1")) skind = Value1Supp;
3587 else if (VG_STREQ(name, "Value2")) skind = Value2Supp;
3588 else if (VG_STREQ(name, "Value4")) skind = Value4Supp;
3589 else if (VG_STREQ(name, "Value8")) skind = Value8Supp;
3590 else if (VG_STREQ(name, "Value16")) skind = Value16Supp;
3591 else
3592 return False;
3593
3594 VG_(set_supp_kind)(su, skind);
3595 return True;
3596}
3597
njn1d0825f2006-03-27 11:37:07 +00003598static
3599Bool mc_read_extra_suppression_info ( Int fd, Char* buf, Int nBuf, Supp *su )
3600{
3601 Bool eof;
3602
3603 if (VG_(get_supp_kind)(su) == ParamSupp) {
3604 eof = VG_(get_line) ( fd, buf, nBuf );
3605 if (eof) return False;
3606 VG_(set_supp_string)(su, VG_(strdup)(buf));
3607 }
3608 return True;
3609}
3610
3611static Bool mc_error_matches_suppression(Error* err, Supp* su)
3612{
njn718d3b12006-12-16 00:54:12 +00003613 Int su_szB;
3614 MC_Error* extra = VG_(get_error_extra)(err);
3615 ErrorKind ekind = VG_(get_error_kind )(err);
njn1d0825f2006-03-27 11:37:07 +00003616
3617 switch (VG_(get_supp_kind)(su)) {
3618 case ParamSupp:
njn718d3b12006-12-16 00:54:12 +00003619 return ((ekind == Err_RegParam || ekind == Err_MemParam)
njn1d0825f2006-03-27 11:37:07 +00003620 && VG_STREQ(VG_(get_error_string)(err),
3621 VG_(get_supp_string)(su)));
3622
sewardj6362bb52006-11-28 00:15:35 +00003623 case UserSupp:
njn718d3b12006-12-16 00:54:12 +00003624 return (ekind == Err_User);
sewardj6362bb52006-11-28 00:15:35 +00003625
njn1d0825f2006-03-27 11:37:07 +00003626 case CoreMemSupp:
njn718d3b12006-12-16 00:54:12 +00003627 return (ekind == Err_CoreMem
njn1d0825f2006-03-27 11:37:07 +00003628 && VG_STREQ(VG_(get_error_string)(err),
3629 VG_(get_supp_string)(su)));
3630
njn718d3b12006-12-16 00:54:12 +00003631 case Value1Supp: su_szB = 1; goto value_case;
3632 case Value2Supp: su_szB = 2; goto value_case;
3633 case Value4Supp: su_szB = 4; goto value_case;
3634 case Value8Supp: su_szB = 8; goto value_case;
3635 case Value16Supp:su_szB =16; goto value_case;
njn1d0825f2006-03-27 11:37:07 +00003636 value_case:
njn718d3b12006-12-16 00:54:12 +00003637 return (ekind == Err_Value && extra->Err.Value.szB == su_szB);
njn1d0825f2006-03-27 11:37:07 +00003638
njn718d3b12006-12-16 00:54:12 +00003639 case CondSupp:
3640 return (ekind == Err_Cond);
3641
3642 case Addr1Supp: su_szB = 1; goto addr_case;
3643 case Addr2Supp: su_szB = 2; goto addr_case;
3644 case Addr4Supp: su_szB = 4; goto addr_case;
3645 case Addr8Supp: su_szB = 8; goto addr_case;
3646 case Addr16Supp:su_szB =16; goto addr_case;
njn1d0825f2006-03-27 11:37:07 +00003647 addr_case:
njn718d3b12006-12-16 00:54:12 +00003648 return (ekind == Err_Addr && extra->Err.Addr.szB == su_szB);
3649
3650 case JumpSupp:
3651 return (ekind == Err_Jump);
njn1d0825f2006-03-27 11:37:07 +00003652
3653 case FreeSupp:
njn718d3b12006-12-16 00:54:12 +00003654 return (ekind == Err_Free || ekind == Err_FreeMismatch);
njn1d0825f2006-03-27 11:37:07 +00003655
3656 case OverlapSupp:
njn718d3b12006-12-16 00:54:12 +00003657 return (ekind == Err_Overlap);
njn1d0825f2006-03-27 11:37:07 +00003658
3659 case LeakSupp:
njn718d3b12006-12-16 00:54:12 +00003660 return (ekind == Err_Leak);
njn1d0825f2006-03-27 11:37:07 +00003661
3662 case MempoolSupp:
njn718d3b12006-12-16 00:54:12 +00003663 return (ekind == Err_IllegalMempool);
njn1d0825f2006-03-27 11:37:07 +00003664
3665 default:
3666 VG_(printf)("Error:\n"
3667 " unknown suppression type %d\n",
3668 VG_(get_supp_kind)(su));
3669 VG_(tool_panic)("unknown suppression type in "
3670 "MC_(error_matches_suppression)");
3671 }
3672}
3673
3674static Char* mc_get_error_name ( Error* err )
3675{
njn1d0825f2006-03-27 11:37:07 +00003676 switch (VG_(get_error_kind)(err)) {
njn718d3b12006-12-16 00:54:12 +00003677 case Err_RegParam: return "Param";
3678 case Err_MemParam: return "Param";
3679 case Err_User: return "User";
3680 case Err_FreeMismatch: return "Free";
3681 case Err_IllegalMempool: return "Mempool";
3682 case Err_Free: return "Free";
3683 case Err_Jump: return "Jump";
3684 case Err_CoreMem: return "CoreMem";
3685 case Err_Overlap: return "Overlap";
3686 case Err_Leak: return "Leak";
3687 case Err_Cond: return "Cond";
3688 case Err_Addr: {
3689 MC_Error* extra = VG_(get_error_extra)(err);
3690 switch ( extra->Err.Addr.szB ) {
njn1d0825f2006-03-27 11:37:07 +00003691 case 1: return "Addr1";
3692 case 2: return "Addr2";
3693 case 4: return "Addr4";
3694 case 8: return "Addr8";
3695 case 16: return "Addr16";
3696 default: VG_(tool_panic)("unexpected size for Addr");
3697 }
njn718d3b12006-12-16 00:54:12 +00003698 }
3699 case Err_Value: {
3700 MC_Error* extra = VG_(get_error_extra)(err);
3701 switch ( extra->Err.Value.szB ) {
njn1d0825f2006-03-27 11:37:07 +00003702 case 1: return "Value1";
3703 case 2: return "Value2";
3704 case 4: return "Value4";
3705 case 8: return "Value8";
3706 case 16: return "Value16";
3707 default: VG_(tool_panic)("unexpected size for Value");
3708 }
njn718d3b12006-12-16 00:54:12 +00003709 }
njn1d0825f2006-03-27 11:37:07 +00003710 default: VG_(tool_panic)("get_error_name: unexpected type");
3711 }
njn1d0825f2006-03-27 11:37:07 +00003712}
3713
3714static void mc_print_extra_suppression_info ( Error* err )
3715{
njn718d3b12006-12-16 00:54:12 +00003716 ErrorKind ekind = VG_(get_error_kind )(err);
3717 if (Err_RegParam == ekind || Err_MemParam == ekind) {
njn1d0825f2006-03-27 11:37:07 +00003718 VG_(printf)(" %s\n", VG_(get_error_string)(err));
3719 }
3720}
3721
njn9e63cb62005-05-08 18:34:59 +00003722/*------------------------------------------------------------*/
sewardjc859fbf2005-04-22 21:10:28 +00003723/*--- Functions called directly from generated code: ---*/
3724/*--- Load/store handlers. ---*/
sewardj6cf40ff2005-04-20 22:31:26 +00003725/*------------------------------------------------------------*/
3726
njn1d0825f2006-03-27 11:37:07 +00003727/* Types: LOADV32, LOADV16, LOADV8 are:
sewardj6cf40ff2005-04-20 22:31:26 +00003728 UWord fn ( Addr a )
3729 so they return 32-bits on 32-bit machines and 64-bits on
3730 64-bit machines. Addr has the same size as a host word.
3731
njn1d0825f2006-03-27 11:37:07 +00003732 LOADV64 is always ULong fn ( Addr a )
sewardj6cf40ff2005-04-20 22:31:26 +00003733
njn1d0825f2006-03-27 11:37:07 +00003734 Similarly for STOREV8, STOREV16, STOREV32, the supplied vbits
3735 are a UWord, and for STOREV64 they are a ULong.
sewardj6cf40ff2005-04-20 22:31:26 +00003736*/
3737
njn1d0825f2006-03-27 11:37:07 +00003738/* If any part of '_a' indicated by the mask is 1, either
njn45e81252006-03-28 12:35:08 +00003739 '_a' is not naturally '_sz/8'-aligned, or it exceeds the range
njn1d0825f2006-03-27 11:37:07 +00003740 covered by the primary map. */
njn45e81252006-03-28 12:35:08 +00003741#define UNALIGNED_OR_HIGH(_a,_sz) ((_a) & MASK((_sz>>3)))
njn1d0825f2006-03-27 11:37:07 +00003742#define MASK(_sz) ( ~((0x10000-(_sz)) | ((N_PRIMARY_MAP-1) << 16)) )
3743
3744
sewardj95448072004-11-22 20:19:51 +00003745/* ------------------------ Size = 8 ------------------------ */
3746
njn1d0825f2006-03-27 11:37:07 +00003747static INLINE
3748ULong mc_LOADV64 ( Addr a, Bool isBigEndian )
3749{
3750 UWord sm_off16, vabits16;
3751 SecMap* sm;
3752
3753 PROF_EVENT(200, "mc_LOADV64");
3754
3755#ifndef PERF_FAST_LOADV
njn45e81252006-03-28 12:35:08 +00003756 return mc_LOADVn_slow( a, 64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003757#else
njn45e81252006-03-28 12:35:08 +00003758 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,64) )) {
njn1d0825f2006-03-27 11:37:07 +00003759 PROF_EVENT(201, "mc_LOADV64-slow1");
njn45e81252006-03-28 12:35:08 +00003760 return (ULong)mc_LOADVn_slow( a, 64, isBigEndian );
sewardjf9d81612005-04-23 23:25:49 +00003761 }
3762
njna7c7ebd2006-03-28 12:51:02 +00003763 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003764 sm_off16 = SM_OFF_16(a);
3765 vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
3766
3767 // Handle common case quickly: a is suitably aligned, is mapped, and
3768 // addressible.
3769 // Convert V bits from compact memory form to expanded register form.
njndbf7ca72006-03-31 11:57:59 +00003770 if (EXPECTED_TAKEN(vabits16 == VA_BITS16_DEFINED)) {
njn1d0825f2006-03-27 11:37:07 +00003771 return V_BITS64_DEFINED;
njndbf7ca72006-03-31 11:57:59 +00003772 } else if (EXPECTED_TAKEN(vabits16 == VA_BITS16_UNDEFINED)) {
njn1d0825f2006-03-27 11:37:07 +00003773 return V_BITS64_UNDEFINED;
3774 } else {
njndbf7ca72006-03-31 11:57:59 +00003775 /* Slow case: the 8 bytes are not all-defined or all-undefined. */
njn1d0825f2006-03-27 11:37:07 +00003776 PROF_EVENT(202, "mc_LOADV64-slow2");
njn45e81252006-03-28 12:35:08 +00003777 return mc_LOADVn_slow( a, 64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003778 }
3779#endif
3780}
3781
3782VG_REGPARM(1) ULong MC_(helperc_LOADV64be) ( Addr a )
3783{
3784 return mc_LOADV64(a, True);
3785}
3786VG_REGPARM(1) ULong MC_(helperc_LOADV64le) ( Addr a )
3787{
3788 return mc_LOADV64(a, False);
3789}
sewardjf9d81612005-04-23 23:25:49 +00003790
sewardjf9d81612005-04-23 23:25:49 +00003791
njn1d0825f2006-03-27 11:37:07 +00003792static INLINE
njn4cf530b2006-04-06 13:33:48 +00003793void mc_STOREV64 ( Addr a, ULong vbits64, Bool isBigEndian )
njn1d0825f2006-03-27 11:37:07 +00003794{
3795 UWord sm_off16, vabits16;
3796 SecMap* sm;
3797
3798 PROF_EVENT(210, "mc_STOREV64");
3799
3800#ifndef PERF_FAST_STOREV
3801 // XXX: this slow case seems to be marginally faster than the fast case!
3802 // Investigate further.
njn4cf530b2006-04-06 13:33:48 +00003803 mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003804#else
njn45e81252006-03-28 12:35:08 +00003805 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,64) )) {
njn1d0825f2006-03-27 11:37:07 +00003806 PROF_EVENT(211, "mc_STOREV64-slow1");
njn4cf530b2006-04-06 13:33:48 +00003807 mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003808 return;
sewardjf9d81612005-04-23 23:25:49 +00003809 }
3810
njna7c7ebd2006-03-28 12:51:02 +00003811 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003812 sm_off16 = SM_OFF_16(a);
3813 vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
3814
3815 if (EXPECTED_TAKEN( !is_distinguished_sm(sm) &&
njndbf7ca72006-03-31 11:57:59 +00003816 (VA_BITS16_DEFINED == vabits16 ||
3817 VA_BITS16_UNDEFINED == vabits16) ))
njn1d0825f2006-03-27 11:37:07 +00003818 {
3819 /* Handle common case quickly: a is suitably aligned, */
3820 /* is mapped, and is addressible. */
3821 // Convert full V-bits in register to compact 2-bit form.
njn4cf530b2006-04-06 13:33:48 +00003822 if (V_BITS64_DEFINED == vbits64) {
njndbf7ca72006-03-31 11:57:59 +00003823 ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_DEFINED;
njn4cf530b2006-04-06 13:33:48 +00003824 } else if (V_BITS64_UNDEFINED == vbits64) {
njndbf7ca72006-03-31 11:57:59 +00003825 ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00003826 } else {
3827 /* Slow but general case -- writing partially defined bytes. */
3828 PROF_EVENT(212, "mc_STOREV64-slow2");
njn4cf530b2006-04-06 13:33:48 +00003829 mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003830 }
3831 } else {
3832 /* Slow but general case. */
3833 PROF_EVENT(213, "mc_STOREV64-slow3");
njn4cf530b2006-04-06 13:33:48 +00003834 mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003835 }
3836#endif
3837}
3838
njn4cf530b2006-04-06 13:33:48 +00003839VG_REGPARM(1) void MC_(helperc_STOREV64be) ( Addr a, ULong vbits64 )
njn1d0825f2006-03-27 11:37:07 +00003840{
njn4cf530b2006-04-06 13:33:48 +00003841 mc_STOREV64(a, vbits64, True);
njn1d0825f2006-03-27 11:37:07 +00003842}
njn4cf530b2006-04-06 13:33:48 +00003843VG_REGPARM(1) void MC_(helperc_STOREV64le) ( Addr a, ULong vbits64 )
njn1d0825f2006-03-27 11:37:07 +00003844{
njn4cf530b2006-04-06 13:33:48 +00003845 mc_STOREV64(a, vbits64, False);
njn1d0825f2006-03-27 11:37:07 +00003846}
sewardj95448072004-11-22 20:19:51 +00003847
sewardj95448072004-11-22 20:19:51 +00003848
3849/* ------------------------ Size = 4 ------------------------ */
3850
njn1d0825f2006-03-27 11:37:07 +00003851static INLINE
3852UWord mc_LOADV32 ( Addr a, Bool isBigEndian )
3853{
3854 UWord sm_off, vabits8;
3855 SecMap* sm;
3856
3857 PROF_EVENT(220, "mc_LOADV32");
3858
3859#ifndef PERF_FAST_LOADV
njn45e81252006-03-28 12:35:08 +00003860 return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003861#else
njn45e81252006-03-28 12:35:08 +00003862 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,32) )) {
njn1d0825f2006-03-27 11:37:07 +00003863 PROF_EVENT(221, "mc_LOADV32-slow1");
njn45e81252006-03-28 12:35:08 +00003864 return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
sewardjc1a2cda2005-04-21 17:34:00 +00003865 }
3866
njna7c7ebd2006-03-28 12:51:02 +00003867 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003868 sm_off = SM_OFF(a);
3869 vabits8 = sm->vabits8[sm_off];
3870
3871 // Handle common case quickly: a is suitably aligned, is mapped, and the
3872 // entire word32 it lives in is addressible.
3873 // Convert V bits from compact memory form to expanded register form.
3874 // For 64-bit platforms, set the high 32 bits of retval to 1 (undefined).
3875 // Almost certainly not necessary, but be paranoid.
njndbf7ca72006-03-31 11:57:59 +00003876 if (EXPECTED_TAKEN(vabits8 == VA_BITS8_DEFINED)) {
njn1d0825f2006-03-27 11:37:07 +00003877 return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_DEFINED);
njndbf7ca72006-03-31 11:57:59 +00003878 } else if (EXPECTED_TAKEN(vabits8 == VA_BITS8_UNDEFINED)) {
njn1d0825f2006-03-27 11:37:07 +00003879 return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_UNDEFINED);
3880 } else {
njndbf7ca72006-03-31 11:57:59 +00003881 /* Slow case: the 4 bytes are not all-defined or all-undefined. */
njn1d0825f2006-03-27 11:37:07 +00003882 PROF_EVENT(222, "mc_LOADV32-slow2");
njn45e81252006-03-28 12:35:08 +00003883 return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003884 }
3885#endif
3886}
3887
3888VG_REGPARM(1) UWord MC_(helperc_LOADV32be) ( Addr a )
3889{
3890 return mc_LOADV32(a, True);
3891}
3892VG_REGPARM(1) UWord MC_(helperc_LOADV32le) ( Addr a )
3893{
3894 return mc_LOADV32(a, False);
3895}
sewardjc1a2cda2005-04-21 17:34:00 +00003896
sewardjc1a2cda2005-04-21 17:34:00 +00003897
njn1d0825f2006-03-27 11:37:07 +00003898static INLINE
njn4cf530b2006-04-06 13:33:48 +00003899void mc_STOREV32 ( Addr a, UWord vbits32, Bool isBigEndian )
njn1d0825f2006-03-27 11:37:07 +00003900{
3901 UWord sm_off, vabits8;
3902 SecMap* sm;
3903
3904 PROF_EVENT(230, "mc_STOREV32");
3905
3906#ifndef PERF_FAST_STOREV
njn4cf530b2006-04-06 13:33:48 +00003907 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003908#else
njn45e81252006-03-28 12:35:08 +00003909 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,32) )) {
njn1d0825f2006-03-27 11:37:07 +00003910 PROF_EVENT(231, "mc_STOREV32-slow1");
njn4cf530b2006-04-06 13:33:48 +00003911 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003912 return;
sewardjc1a2cda2005-04-21 17:34:00 +00003913 }
3914
njna7c7ebd2006-03-28 12:51:02 +00003915 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003916 sm_off = SM_OFF(a);
3917 vabits8 = sm->vabits8[sm_off];
3918
3919//---------------------------------------------------------------------------
3920#if 1
3921 // Cleverness: sometimes we don't have to write the shadow memory at
3922 // all, if we can tell that what we want to write is the same as what is
3923 // already there.
njn4cf530b2006-04-06 13:33:48 +00003924 if (V_BITS32_DEFINED == vbits32) {
njndbf7ca72006-03-31 11:57:59 +00003925 if (vabits8 == (UInt)VA_BITS8_DEFINED) {
njn1d0825f2006-03-27 11:37:07 +00003926 return;
njndbf7ca72006-03-31 11:57:59 +00003927 } else if (!is_distinguished_sm(sm) && VA_BITS8_UNDEFINED == vabits8) {
3928 sm->vabits8[sm_off] = (UInt)VA_BITS8_DEFINED;
njn1d0825f2006-03-27 11:37:07 +00003929 } else {
njndbf7ca72006-03-31 11:57:59 +00003930 // not defined/undefined, or distinguished and changing state
njn1d0825f2006-03-27 11:37:07 +00003931 PROF_EVENT(232, "mc_STOREV32-slow2");
njn4cf530b2006-04-06 13:33:48 +00003932 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003933 }
njn4cf530b2006-04-06 13:33:48 +00003934 } else if (V_BITS32_UNDEFINED == vbits32) {
njndbf7ca72006-03-31 11:57:59 +00003935 if (vabits8 == (UInt)VA_BITS8_UNDEFINED) {
njn1d0825f2006-03-27 11:37:07 +00003936 return;
njndbf7ca72006-03-31 11:57:59 +00003937 } else if (!is_distinguished_sm(sm) && VA_BITS8_DEFINED == vabits8) {
3938 sm->vabits8[sm_off] = (UInt)VA_BITS8_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00003939 } else {
njndbf7ca72006-03-31 11:57:59 +00003940 // not defined/undefined, or distinguished and changing state
njn1d0825f2006-03-27 11:37:07 +00003941 PROF_EVENT(233, "mc_STOREV32-slow3");
njn4cf530b2006-04-06 13:33:48 +00003942 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003943 }
3944 } else {
3945 // Partially defined word
3946 PROF_EVENT(234, "mc_STOREV32-slow4");
njn4cf530b2006-04-06 13:33:48 +00003947 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003948 }
3949//---------------------------------------------------------------------------
3950#else
3951 if (EXPECTED_TAKEN( !is_distinguished_sm(sm) &&
njndbf7ca72006-03-31 11:57:59 +00003952 (VA_BITS8_DEFINED == vabits8 ||
3953 VA_BITS8_UNDEFINED == vabits8) ))
njn1d0825f2006-03-27 11:37:07 +00003954 {
3955 /* Handle common case quickly: a is suitably aligned, */
3956 /* is mapped, and is addressible. */
3957 // Convert full V-bits in register to compact 2-bit form.
njn4cf530b2006-04-06 13:33:48 +00003958 if (V_BITS32_DEFINED == vbits32) {
njndbf7ca72006-03-31 11:57:59 +00003959 sm->vabits8[sm_off] = VA_BITS8_DEFINED;
njn4cf530b2006-04-06 13:33:48 +00003960 } else if (V_BITS32_UNDEFINED == vbits32) {
njndbf7ca72006-03-31 11:57:59 +00003961 sm->vabits8[sm_off] = VA_BITS8_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00003962 } else {
3963 /* Slow but general case -- writing partially defined bytes. */
3964 PROF_EVENT(232, "mc_STOREV32-slow2");
njn4cf530b2006-04-06 13:33:48 +00003965 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003966 }
3967 } else {
3968 /* Slow but general case. */
3969 PROF_EVENT(233, "mc_STOREV32-slow3");
njn4cf530b2006-04-06 13:33:48 +00003970 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003971 }
3972#endif
3973//---------------------------------------------------------------------------
3974#endif
3975}
3976
njn4cf530b2006-04-06 13:33:48 +00003977VG_REGPARM(2) void MC_(helperc_STOREV32be) ( Addr a, UWord vbits32 )
njn1d0825f2006-03-27 11:37:07 +00003978{
njn4cf530b2006-04-06 13:33:48 +00003979 mc_STOREV32(a, vbits32, True);
njn1d0825f2006-03-27 11:37:07 +00003980}
njn4cf530b2006-04-06 13:33:48 +00003981VG_REGPARM(2) void MC_(helperc_STOREV32le) ( Addr a, UWord vbits32 )
njn1d0825f2006-03-27 11:37:07 +00003982{
njn4cf530b2006-04-06 13:33:48 +00003983 mc_STOREV32(a, vbits32, False);
njn1d0825f2006-03-27 11:37:07 +00003984}
njn25e49d8e72002-09-23 09:36:25 +00003985
njn25e49d8e72002-09-23 09:36:25 +00003986
sewardj95448072004-11-22 20:19:51 +00003987/* ------------------------ Size = 2 ------------------------ */
3988
njn1d0825f2006-03-27 11:37:07 +00003989static INLINE
3990UWord mc_LOADV16 ( Addr a, Bool isBigEndian )
3991{
3992 UWord sm_off, vabits8;
3993 SecMap* sm;
3994
3995 PROF_EVENT(240, "mc_LOADV16");
3996
3997#ifndef PERF_FAST_LOADV
njn45e81252006-03-28 12:35:08 +00003998 return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003999#else
njn45e81252006-03-28 12:35:08 +00004000 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,16) )) {
njn1d0825f2006-03-27 11:37:07 +00004001 PROF_EVENT(241, "mc_LOADV16-slow1");
njn45e81252006-03-28 12:35:08 +00004002 return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
sewardjc1a2cda2005-04-21 17:34:00 +00004003 }
4004
njna7c7ebd2006-03-28 12:51:02 +00004005 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00004006 sm_off = SM_OFF(a);
4007 vabits8 = sm->vabits8[sm_off];
4008 // Handle common case quickly: a is suitably aligned, is mapped, and is
4009 // addressible.
4010 // Convert V bits from compact memory form to expanded register form
njndbf7ca72006-03-31 11:57:59 +00004011 if (vabits8 == VA_BITS8_DEFINED ) { return V_BITS16_DEFINED; }
4012 else if (vabits8 == VA_BITS8_UNDEFINED) { return V_BITS16_UNDEFINED; }
njn1d0825f2006-03-27 11:37:07 +00004013 else {
njndbf7ca72006-03-31 11:57:59 +00004014 // The 4 (yes, 4) bytes are not all-defined or all-undefined, check
njn1d0825f2006-03-27 11:37:07 +00004015 // the two sub-bytes.
4016 UChar vabits4 = extract_vabits4_from_vabits8(a, vabits8);
njndbf7ca72006-03-31 11:57:59 +00004017 if (vabits4 == VA_BITS4_DEFINED ) { return V_BITS16_DEFINED; }
4018 else if (vabits4 == VA_BITS4_UNDEFINED) { return V_BITS16_UNDEFINED; }
njn1d0825f2006-03-27 11:37:07 +00004019 else {
njndbf7ca72006-03-31 11:57:59 +00004020 /* Slow case: the two bytes are not all-defined or all-undefined. */
njn1d0825f2006-03-27 11:37:07 +00004021 PROF_EVENT(242, "mc_LOADV16-slow2");
njn45e81252006-03-28 12:35:08 +00004022 return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00004023 }
4024 }
4025#endif
4026}
4027
4028VG_REGPARM(1) UWord MC_(helperc_LOADV16be) ( Addr a )
4029{
4030 return mc_LOADV16(a, True);
4031}
4032VG_REGPARM(1) UWord MC_(helperc_LOADV16le) ( Addr a )
4033{
4034 return mc_LOADV16(a, False);
4035}
sewardjc1a2cda2005-04-21 17:34:00 +00004036
sewardjc1a2cda2005-04-21 17:34:00 +00004037
njn1d0825f2006-03-27 11:37:07 +00004038static INLINE
njn4cf530b2006-04-06 13:33:48 +00004039void mc_STOREV16 ( Addr a, UWord vbits16, Bool isBigEndian )
njn1d0825f2006-03-27 11:37:07 +00004040{
4041 UWord sm_off, vabits8;
4042 SecMap* sm;
4043
4044 PROF_EVENT(250, "mc_STOREV16");
4045
4046#ifndef PERF_FAST_STOREV
njn4cf530b2006-04-06 13:33:48 +00004047 mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00004048#else
njn45e81252006-03-28 12:35:08 +00004049 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,16) )) {
njn1d0825f2006-03-27 11:37:07 +00004050 PROF_EVENT(251, "mc_STOREV16-slow1");
njn4cf530b2006-04-06 13:33:48 +00004051 mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00004052 return;
sewardjc1a2cda2005-04-21 17:34:00 +00004053 }
4054
njna7c7ebd2006-03-28 12:51:02 +00004055 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00004056 sm_off = SM_OFF(a);
4057 vabits8 = sm->vabits8[sm_off];
4058 if (EXPECTED_TAKEN( !is_distinguished_sm(sm) &&
njndbf7ca72006-03-31 11:57:59 +00004059 (VA_BITS8_DEFINED == vabits8 ||
4060 VA_BITS8_UNDEFINED == vabits8) ))
njn1d0825f2006-03-27 11:37:07 +00004061 {
4062 /* Handle common case quickly: a is suitably aligned, */
4063 /* is mapped, and is addressible. */
4064 // Convert full V-bits in register to compact 2-bit form.
njn4cf530b2006-04-06 13:33:48 +00004065 if (V_BITS16_DEFINED == vbits16) {
njndbf7ca72006-03-31 11:57:59 +00004066 insert_vabits4_into_vabits8( a, VA_BITS4_DEFINED ,
njn1d0825f2006-03-27 11:37:07 +00004067 &(sm->vabits8[sm_off]) );
njn4cf530b2006-04-06 13:33:48 +00004068 } else if (V_BITS16_UNDEFINED == vbits16) {
njndbf7ca72006-03-31 11:57:59 +00004069 insert_vabits4_into_vabits8( a, VA_BITS4_UNDEFINED,
njn1d0825f2006-03-27 11:37:07 +00004070 &(sm->vabits8[sm_off]) );
4071 } else {
4072 /* Slow but general case -- writing partially defined bytes. */
4073 PROF_EVENT(252, "mc_STOREV16-slow2");
njn4cf530b2006-04-06 13:33:48 +00004074 mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00004075 }
4076 } else {
4077 /* Slow but general case. */
4078 PROF_EVENT(253, "mc_STOREV16-slow3");
njn4cf530b2006-04-06 13:33:48 +00004079 mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00004080 }
4081#endif
4082}
njn25e49d8e72002-09-23 09:36:25 +00004083
njn4cf530b2006-04-06 13:33:48 +00004084VG_REGPARM(2) void MC_(helperc_STOREV16be) ( Addr a, UWord vbits16 )
njn1d0825f2006-03-27 11:37:07 +00004085{
njn4cf530b2006-04-06 13:33:48 +00004086 mc_STOREV16(a, vbits16, True);
njn1d0825f2006-03-27 11:37:07 +00004087}
njn4cf530b2006-04-06 13:33:48 +00004088VG_REGPARM(2) void MC_(helperc_STOREV16le) ( Addr a, UWord vbits16 )
njn1d0825f2006-03-27 11:37:07 +00004089{
njn4cf530b2006-04-06 13:33:48 +00004090 mc_STOREV16(a, vbits16, False);
njn1d0825f2006-03-27 11:37:07 +00004091}
sewardj5d28efc2005-04-21 22:16:29 +00004092
njn25e49d8e72002-09-23 09:36:25 +00004093
sewardj95448072004-11-22 20:19:51 +00004094/* ------------------------ Size = 1 ------------------------ */
sewardj8cf88b72005-07-08 01:29:33 +00004095/* Note: endianness is irrelevant for size == 1 */
sewardj95448072004-11-22 20:19:51 +00004096
njnaf839f52005-06-23 03:27:57 +00004097VG_REGPARM(1)
njn1d0825f2006-03-27 11:37:07 +00004098UWord MC_(helperc_LOADV8) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +00004099{
njn1d0825f2006-03-27 11:37:07 +00004100 UWord sm_off, vabits8;
sewardjae986ca2005-10-12 12:53:20 +00004101 SecMap* sm;
4102
njn1d0825f2006-03-27 11:37:07 +00004103 PROF_EVENT(260, "mc_LOADV8");
sewardjc1a2cda2005-04-21 17:34:00 +00004104
njn1d0825f2006-03-27 11:37:07 +00004105#ifndef PERF_FAST_LOADV
njn45e81252006-03-28 12:35:08 +00004106 return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
njn1d0825f2006-03-27 11:37:07 +00004107#else
njn45e81252006-03-28 12:35:08 +00004108 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,8) )) {
njn1d0825f2006-03-27 11:37:07 +00004109 PROF_EVENT(261, "mc_LOADV8-slow1");
njn45e81252006-03-28 12:35:08 +00004110 return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00004111 }
4112
njna7c7ebd2006-03-28 12:51:02 +00004113 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00004114 sm_off = SM_OFF(a);
4115 vabits8 = sm->vabits8[sm_off];
4116 // Convert V bits from compact memory form to expanded register form
4117 // Handle common case quickly: a is mapped, and the entire
4118 // word32 it lives in is addressible.
njndbf7ca72006-03-31 11:57:59 +00004119 if (vabits8 == VA_BITS8_DEFINED ) { return V_BITS8_DEFINED; }
4120 else if (vabits8 == VA_BITS8_UNDEFINED) { return V_BITS8_UNDEFINED; }
njn1d0825f2006-03-27 11:37:07 +00004121 else {
njndbf7ca72006-03-31 11:57:59 +00004122 // The 4 (yes, 4) bytes are not all-defined or all-undefined, check
njn1d0825f2006-03-27 11:37:07 +00004123 // the single byte.
4124 UChar vabits2 = extract_vabits2_from_vabits8(a, vabits8);
njndbf7ca72006-03-31 11:57:59 +00004125 if (vabits2 == VA_BITS2_DEFINED ) { return V_BITS8_DEFINED; }
4126 else if (vabits2 == VA_BITS2_UNDEFINED) { return V_BITS8_UNDEFINED; }
njn1d0825f2006-03-27 11:37:07 +00004127 else {
njndbf7ca72006-03-31 11:57:59 +00004128 /* Slow case: the byte is not all-defined or all-undefined. */
njn1d0825f2006-03-27 11:37:07 +00004129 PROF_EVENT(262, "mc_LOADV8-slow2");
njn45e81252006-03-28 12:35:08 +00004130 return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
njn1d0825f2006-03-27 11:37:07 +00004131 }
sewardjc1a2cda2005-04-21 17:34:00 +00004132 }
njn1d0825f2006-03-27 11:37:07 +00004133#endif
njn25e49d8e72002-09-23 09:36:25 +00004134}
4135
sewardjc1a2cda2005-04-21 17:34:00 +00004136
njnaf839f52005-06-23 03:27:57 +00004137VG_REGPARM(2)
njn4cf530b2006-04-06 13:33:48 +00004138void MC_(helperc_STOREV8) ( Addr a, UWord vbits8 )
njn25e49d8e72002-09-23 09:36:25 +00004139{
njn1d0825f2006-03-27 11:37:07 +00004140 UWord sm_off, vabits8;
sewardjae986ca2005-10-12 12:53:20 +00004141 SecMap* sm;
4142
njn1d0825f2006-03-27 11:37:07 +00004143 PROF_EVENT(270, "mc_STOREV8");
sewardjc1a2cda2005-04-21 17:34:00 +00004144
njn1d0825f2006-03-27 11:37:07 +00004145#ifndef PERF_FAST_STOREV
njn4cf530b2006-04-06 13:33:48 +00004146 mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
njn1d0825f2006-03-27 11:37:07 +00004147#else
njn45e81252006-03-28 12:35:08 +00004148 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,8) )) {
njn1d0825f2006-03-27 11:37:07 +00004149 PROF_EVENT(271, "mc_STOREV8-slow1");
njn4cf530b2006-04-06 13:33:48 +00004150 mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00004151 return;
4152 }
4153
njna7c7ebd2006-03-28 12:51:02 +00004154 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00004155 sm_off = SM_OFF(a);
4156 vabits8 = sm->vabits8[sm_off];
4157 if (EXPECTED_TAKEN
4158 ( !is_distinguished_sm(sm) &&
njndbf7ca72006-03-31 11:57:59 +00004159 ( (VA_BITS8_DEFINED == vabits8 || VA_BITS8_UNDEFINED == vabits8)
njn1d0825f2006-03-27 11:37:07 +00004160 || (VA_BITS2_NOACCESS != extract_vabits2_from_vabits8(a, vabits8))
4161 )
4162 )
4163 )
4164 {
sewardjc1a2cda2005-04-21 17:34:00 +00004165 /* Handle common case quickly: a is mapped, the entire word32 it
4166 lives in is addressible. */
njn1d0825f2006-03-27 11:37:07 +00004167 // Convert full V-bits in register to compact 2-bit form.
njn4cf530b2006-04-06 13:33:48 +00004168 if (V_BITS8_DEFINED == vbits8) {
njndbf7ca72006-03-31 11:57:59 +00004169 insert_vabits2_into_vabits8( a, VA_BITS2_DEFINED,
njn1d0825f2006-03-27 11:37:07 +00004170 &(sm->vabits8[sm_off]) );
njn4cf530b2006-04-06 13:33:48 +00004171 } else if (V_BITS8_UNDEFINED == vbits8) {
njndbf7ca72006-03-31 11:57:59 +00004172 insert_vabits2_into_vabits8( a, VA_BITS2_UNDEFINED,
njn1d0825f2006-03-27 11:37:07 +00004173 &(sm->vabits8[sm_off]) );
4174 } else {
4175 /* Slow but general case -- writing partially defined bytes. */
4176 PROF_EVENT(272, "mc_STOREV8-slow2");
njn4cf530b2006-04-06 13:33:48 +00004177 mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
njn1d0825f2006-03-27 11:37:07 +00004178 }
sewardjc1a2cda2005-04-21 17:34:00 +00004179 } else {
njn1d0825f2006-03-27 11:37:07 +00004180 /* Slow but general case. */
4181 PROF_EVENT(273, "mc_STOREV8-slow3");
njn4cf530b2006-04-06 13:33:48 +00004182 mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00004183 }
njn1d0825f2006-03-27 11:37:07 +00004184#endif
njn25e49d8e72002-09-23 09:36:25 +00004185}
4186
4187
sewardjc859fbf2005-04-22 21:10:28 +00004188/*------------------------------------------------------------*/
4189/*--- Functions called directly from generated code: ---*/
4190/*--- Value-check failure handlers. ---*/
4191/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00004192
njn5c004e42002-11-18 11:04:50 +00004193void MC_(helperc_value_check0_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00004194{
njn718d3b12006-12-16 00:54:12 +00004195 mc_record_cond_error ( VG_(get_running_tid)() );
njn25e49d8e72002-09-23 09:36:25 +00004196}
4197
njn5c004e42002-11-18 11:04:50 +00004198void MC_(helperc_value_check1_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00004199{
njn9e63cb62005-05-08 18:34:59 +00004200 mc_record_value_error ( VG_(get_running_tid)(), 1 );
njn25e49d8e72002-09-23 09:36:25 +00004201}
4202
njn5c004e42002-11-18 11:04:50 +00004203void MC_(helperc_value_check4_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00004204{
njn9e63cb62005-05-08 18:34:59 +00004205 mc_record_value_error ( VG_(get_running_tid)(), 4 );
njn25e49d8e72002-09-23 09:36:25 +00004206}
4207
sewardj11bcc4e2005-04-23 22:38:38 +00004208void MC_(helperc_value_check8_fail) ( void )
4209{
njn9e63cb62005-05-08 18:34:59 +00004210 mc_record_value_error ( VG_(get_running_tid)(), 8 );
sewardj11bcc4e2005-04-23 22:38:38 +00004211}
4212
njnaf839f52005-06-23 03:27:57 +00004213VG_REGPARM(1) void MC_(helperc_complain_undef) ( HWord sz )
sewardj95448072004-11-22 20:19:51 +00004214{
njn9e63cb62005-05-08 18:34:59 +00004215 mc_record_value_error ( VG_(get_running_tid)(), (Int)sz );
sewardj95448072004-11-22 20:19:51 +00004216}
4217
njn25e49d8e72002-09-23 09:36:25 +00004218
sewardjc2c12c22006-03-08 13:20:09 +00004219/*------------------------------------------------------------*/
4220/*--- Metadata get/set functions, for client requests. ---*/
4221/*------------------------------------------------------------*/
4222
njn1d0825f2006-03-27 11:37:07 +00004223// Nb: this expands the V+A bits out into register-form V bits, even though
4224// they're in memory. This is for backward compatibility, and because it's
4225// probably what the user wants.
4226
4227/* Copy Vbits from/to address 'a'. Returns: 1 == OK, 2 == alignment
sewardjc2c12c22006-03-08 13:20:09 +00004228 error [no longer used], 3 == addressing error. */
njn718d3b12006-12-16 00:54:12 +00004229/* Nb: We used to issue various definedness/addressability errors from here,
4230 but we took them out because they ranged from not-very-helpful to
4231 downright annoying, and they complicated the error data structures. */
sewardjc2c12c22006-03-08 13:20:09 +00004232static Int mc_get_or_set_vbits_for_client (
4233 ThreadId tid,
njn1d0825f2006-03-27 11:37:07 +00004234 Addr a,
4235 Addr vbits,
4236 SizeT szB,
sewardjc2c12c22006-03-08 13:20:09 +00004237 Bool setting /* True <=> set vbits, False <=> get vbits */
4238)
4239{
sewardjc2c12c22006-03-08 13:20:09 +00004240 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00004241 Bool ok;
4242 UChar vbits8;
sewardjc2c12c22006-03-08 13:20:09 +00004243
njn1d0825f2006-03-27 11:37:07 +00004244 /* Check that arrays are addressible before doing any getting/setting. */
4245 for (i = 0; i < szB; i++) {
njn718d3b12006-12-16 00:54:12 +00004246 if (VA_BITS2_NOACCESS == get_vabits2(a + i) ||
4247 VA_BITS2_NOACCESS == get_vabits2(vbits + i)) {
njn1d0825f2006-03-27 11:37:07 +00004248 return 3;
sewardjc2c12c22006-03-08 13:20:09 +00004249 }
4250 }
njn1d0825f2006-03-27 11:37:07 +00004251
sewardjc2c12c22006-03-08 13:20:09 +00004252 /* Do the copy */
4253 if (setting) {
njn1d0825f2006-03-27 11:37:07 +00004254 /* setting */
4255 for (i = 0; i < szB; i++) {
4256 ok = set_vbits8(a + i, ((UChar*)vbits)[i]);
4257 tl_assert(ok);
sewardjc2c12c22006-03-08 13:20:09 +00004258 }
4259 } else {
4260 /* getting */
njn1d0825f2006-03-27 11:37:07 +00004261 for (i = 0; i < szB; i++) {
4262 ok = get_vbits8(a + i, &vbits8);
4263 tl_assert(ok);
njn1d0825f2006-03-27 11:37:07 +00004264 ((UChar*)vbits)[i] = vbits8;
sewardjc2c12c22006-03-08 13:20:09 +00004265 }
4266 // The bytes in vbits[] have now been set, so mark them as such.
njndbf7ca72006-03-31 11:57:59 +00004267 MC_(make_mem_defined)(vbits, szB);
njn1d0825f2006-03-27 11:37:07 +00004268 }
sewardjc2c12c22006-03-08 13:20:09 +00004269
4270 return 1;
4271}
sewardj05fe85e2005-04-27 22:46:36 +00004272
4273
4274/*------------------------------------------------------------*/
4275/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
4276/*------------------------------------------------------------*/
4277
4278/* For the memory leak detector, say whether an entire 64k chunk of
4279 address space is possibly in use, or not. If in doubt return
4280 True.
4281*/
4282static
4283Bool mc_is_within_valid_secondary ( Addr a )
4284{
4285 SecMap* sm = maybe_get_secmap_for ( a );
sewardj05a46732006-10-17 01:28:10 +00004286 if (sm == NULL || sm == &sm_distinguished[SM_DIST_NOACCESS]
4287 || in_ignored_range(a)) {
sewardj05fe85e2005-04-27 22:46:36 +00004288 /* Definitely not in use. */
4289 return False;
4290 } else {
4291 return True;
4292 }
4293}
4294
4295
4296/* For the memory leak detector, say whether or not a given word
4297 address is to be regarded as valid. */
4298static
4299Bool mc_is_valid_aligned_word ( Addr a )
4300{
4301 tl_assert(sizeof(UWord) == 4 || sizeof(UWord) == 8);
4302 if (sizeof(UWord) == 4) {
4303 tl_assert(VG_IS_4_ALIGNED(a));
4304 } else {
4305 tl_assert(VG_IS_8_ALIGNED(a));
4306 }
sewardj05a46732006-10-17 01:28:10 +00004307 if (is_mem_defined( a, sizeof(UWord), NULL ) == MC_Ok
4308 && !in_ignored_range(a)) {
sewardj05fe85e2005-04-27 22:46:36 +00004309 return True;
4310 } else {
4311 return False;
4312 }
4313}
sewardja4495682002-10-21 07:29:59 +00004314
4315
nethercote996901a2004-08-03 13:29:09 +00004316/* Leak detector for this tool. We don't actually do anything, merely
sewardja4495682002-10-21 07:29:59 +00004317 run the generic leak detector with suitable parameters for this
nethercote996901a2004-08-03 13:29:09 +00004318 tool. */
njnb8dca862005-03-14 02:42:44 +00004319static void mc_detect_memory_leaks ( ThreadId tid, LeakCheckMode mode )
njn25e49d8e72002-09-23 09:36:25 +00004320{
njn1d0825f2006-03-27 11:37:07 +00004321 MC_(do_detect_memory_leaks) (
sewardj05fe85e2005-04-27 22:46:36 +00004322 tid,
4323 mode,
4324 mc_is_within_valid_secondary,
4325 mc_is_valid_aligned_word
4326 );
njn25e49d8e72002-09-23 09:36:25 +00004327}
4328
4329
sewardjc859fbf2005-04-22 21:10:28 +00004330/*------------------------------------------------------------*/
4331/*--- Initialisation ---*/
4332/*------------------------------------------------------------*/
4333
4334static void init_shadow_memory ( void )
4335{
4336 Int i;
4337 SecMap* sm;
4338
njn1d0825f2006-03-27 11:37:07 +00004339 tl_assert(V_BIT_UNDEFINED == 1);
4340 tl_assert(V_BIT_DEFINED == 0);
4341 tl_assert(V_BITS8_UNDEFINED == 0xFF);
4342 tl_assert(V_BITS8_DEFINED == 0);
4343
sewardjc859fbf2005-04-22 21:10:28 +00004344 /* Build the 3 distinguished secondaries */
sewardjc859fbf2005-04-22 21:10:28 +00004345 sm = &sm_distinguished[SM_DIST_NOACCESS];
njn1d0825f2006-03-27 11:37:07 +00004346 for (i = 0; i < SM_CHUNKS; i++) sm->vabits8[i] = VA_BITS8_NOACCESS;
sewardjc859fbf2005-04-22 21:10:28 +00004347
njndbf7ca72006-03-31 11:57:59 +00004348 sm = &sm_distinguished[SM_DIST_UNDEFINED];
4349 for (i = 0; i < SM_CHUNKS; i++) sm->vabits8[i] = VA_BITS8_UNDEFINED;
sewardjc859fbf2005-04-22 21:10:28 +00004350
njndbf7ca72006-03-31 11:57:59 +00004351 sm = &sm_distinguished[SM_DIST_DEFINED];
4352 for (i = 0; i < SM_CHUNKS; i++) sm->vabits8[i] = VA_BITS8_DEFINED;
sewardjc859fbf2005-04-22 21:10:28 +00004353
4354 /* Set up the primary map. */
4355 /* These entries gradually get overwritten as the used address
4356 space expands. */
4357 for (i = 0; i < N_PRIMARY_MAP; i++)
4358 primary_map[i] = &sm_distinguished[SM_DIST_NOACCESS];
4359
sewardj05a46732006-10-17 01:28:10 +00004360 /* Auxiliary primary maps */
4361 init_auxmap_L1_L2();
4362
sewardjc859fbf2005-04-22 21:10:28 +00004363 /* auxmap_size = auxmap_used = 0;
4364 no ... these are statically initialised */
njn1d0825f2006-03-27 11:37:07 +00004365
4366 /* Secondary V bit table */
4367 secVBitTable = createSecVBitTable();
sewardjc859fbf2005-04-22 21:10:28 +00004368}
4369
4370
4371/*------------------------------------------------------------*/
4372/*--- Sanity check machinery (permanently engaged) ---*/
4373/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00004374
njn51d827b2005-05-09 01:02:08 +00004375static Bool mc_cheap_sanity_check ( void )
njn25e49d8e72002-09-23 09:36:25 +00004376{
jseward9800fd32004-01-04 23:08:04 +00004377 /* nothing useful we can rapidly check */
sewardj23eb2fd2005-04-22 16:29:19 +00004378 n_sanity_cheap++;
sewardjc1a2cda2005-04-21 17:34:00 +00004379 PROF_EVENT(490, "cheap_sanity_check");
jseward9800fd32004-01-04 23:08:04 +00004380 return True;
njn25e49d8e72002-09-23 09:36:25 +00004381}
4382
njn51d827b2005-05-09 01:02:08 +00004383static Bool mc_expensive_sanity_check ( void )
njn25e49d8e72002-09-23 09:36:25 +00004384{
sewardj05a46732006-10-17 01:28:10 +00004385 Int i;
4386 Word n_secmaps_found;
sewardj45d94cc2005-04-20 14:44:11 +00004387 SecMap* sm;
sewardj05a46732006-10-17 01:28:10 +00004388 HChar* errmsg;
sewardj23eb2fd2005-04-22 16:29:19 +00004389 Bool bad = False;
njn25e49d8e72002-09-23 09:36:25 +00004390
sewardj05a46732006-10-17 01:28:10 +00004391 if (0) VG_(printf)("expensive sanity check\n");
4392 if (0) return True;
4393
sewardj23eb2fd2005-04-22 16:29:19 +00004394 n_sanity_expensive++;
sewardjc1a2cda2005-04-21 17:34:00 +00004395 PROF_EVENT(491, "expensive_sanity_check");
4396
njn1d0825f2006-03-27 11:37:07 +00004397 /* Check that the 3 distinguished SMs are still as they should be. */
njn25e49d8e72002-09-23 09:36:25 +00004398
njndbf7ca72006-03-31 11:57:59 +00004399 /* Check noaccess DSM. */
sewardj45d94cc2005-04-20 14:44:11 +00004400 sm = &sm_distinguished[SM_DIST_NOACCESS];
njn1d0825f2006-03-27 11:37:07 +00004401 for (i = 0; i < SM_CHUNKS; i++)
4402 if (sm->vabits8[i] != VA_BITS8_NOACCESS)
sewardj23eb2fd2005-04-22 16:29:19 +00004403 bad = True;
njn25e49d8e72002-09-23 09:36:25 +00004404
njndbf7ca72006-03-31 11:57:59 +00004405 /* Check undefined DSM. */
4406 sm = &sm_distinguished[SM_DIST_UNDEFINED];
njn1d0825f2006-03-27 11:37:07 +00004407 for (i = 0; i < SM_CHUNKS; i++)
njndbf7ca72006-03-31 11:57:59 +00004408 if (sm->vabits8[i] != VA_BITS8_UNDEFINED)
sewardj23eb2fd2005-04-22 16:29:19 +00004409 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00004410
njndbf7ca72006-03-31 11:57:59 +00004411 /* Check defined DSM. */
4412 sm = &sm_distinguished[SM_DIST_DEFINED];
njn1d0825f2006-03-27 11:37:07 +00004413 for (i = 0; i < SM_CHUNKS; i++)
njndbf7ca72006-03-31 11:57:59 +00004414 if (sm->vabits8[i] != VA_BITS8_DEFINED)
sewardj23eb2fd2005-04-22 16:29:19 +00004415 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00004416
sewardj23eb2fd2005-04-22 16:29:19 +00004417 if (bad) {
4418 VG_(printf)("memcheck expensive sanity: "
4419 "distinguished_secondaries have changed\n");
4420 return False;
4421 }
4422
njn1d0825f2006-03-27 11:37:07 +00004423 /* If we're not checking for undefined value errors, the secondary V bit
4424 * table should be empty. */
4425 if (!MC_(clo_undef_value_errors)) {
njne2a9ad32007-09-17 05:30:48 +00004426 if (0 != VG_(OSetGen_Size)(secVBitTable))
njn1d0825f2006-03-27 11:37:07 +00004427 return False;
4428 }
4429
sewardj05a46732006-10-17 01:28:10 +00004430 /* check the auxiliary maps, very thoroughly */
4431 n_secmaps_found = 0;
4432 errmsg = check_auxmap_L1_L2_sanity( &n_secmaps_found );
4433 if (errmsg) {
4434 VG_(printf)("memcheck expensive sanity, auxmaps:\n\t%s", errmsg);
sewardj23eb2fd2005-04-22 16:29:19 +00004435 return False;
4436 }
4437
sewardj05a46732006-10-17 01:28:10 +00004438 /* n_secmaps_found is now the number referred to by the auxiliary
4439 primary map. Now add on the ones referred to by the main
4440 primary map. */
sewardj23eb2fd2005-04-22 16:29:19 +00004441 for (i = 0; i < N_PRIMARY_MAP; i++) {
sewardj05a46732006-10-17 01:28:10 +00004442 if (primary_map[i] == NULL) {
sewardj23eb2fd2005-04-22 16:29:19 +00004443 bad = True;
4444 } else {
sewardj05a46732006-10-17 01:28:10 +00004445 if (!is_distinguished_sm(primary_map[i]))
sewardj23eb2fd2005-04-22 16:29:19 +00004446 n_secmaps_found++;
4447 }
4448 }
4449
sewardj05a46732006-10-17 01:28:10 +00004450 /* check that the number of secmaps issued matches the number that
4451 are reachable (iow, no secmap leaks) */
njn1d0825f2006-03-27 11:37:07 +00004452 if (n_secmaps_found != (n_issued_SMs - n_deissued_SMs))
sewardj23eb2fd2005-04-22 16:29:19 +00004453 bad = True;
4454
4455 if (bad) {
4456 VG_(printf)("memcheck expensive sanity: "
4457 "apparent secmap leakage\n");
4458 return False;
4459 }
4460
sewardj23eb2fd2005-04-22 16:29:19 +00004461 if (bad) {
4462 VG_(printf)("memcheck expensive sanity: "
4463 "auxmap covers wrong address space\n");
4464 return False;
4465 }
4466
4467 /* there is only one pointer to each secmap (expensive) */
njn25e49d8e72002-09-23 09:36:25 +00004468
4469 return True;
4470}
sewardj45d94cc2005-04-20 14:44:11 +00004471
njn25e49d8e72002-09-23 09:36:25 +00004472/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00004473/*--- Command line args ---*/
njn25e49d8e72002-09-23 09:36:25 +00004474/*------------------------------------------------------------*/
4475
njn1d0825f2006-03-27 11:37:07 +00004476Bool MC_(clo_partial_loads_ok) = False;
sewardjfa4ca3b2007-11-30 17:19:36 +00004477Long MC_(clo_freelist_vol) = 10*1000*1000LL;
njn1d0825f2006-03-27 11:37:07 +00004478LeakCheckMode MC_(clo_leak_check) = LC_Summary;
4479VgRes MC_(clo_leak_resolution) = Vg_LowRes;
4480Bool MC_(clo_show_reachable) = False;
4481Bool MC_(clo_workaround_gcc296_bugs) = False;
4482Bool MC_(clo_undef_value_errors) = True;
sewardjeb0fa932007-11-30 21:41:40 +00004483Int MC_(clo_malloc_fill) = -1;
4484Int MC_(clo_free_fill) = -1;
njn1d0825f2006-03-27 11:37:07 +00004485
4486static Bool mc_process_cmd_line_options(Char* arg)
njn25e49d8e72002-09-23 09:36:25 +00004487{
njn1d0825f2006-03-27 11:37:07 +00004488 VG_BOOL_CLO(arg, "--partial-loads-ok", MC_(clo_partial_loads_ok))
4489 else VG_BOOL_CLO(arg, "--show-reachable", MC_(clo_show_reachable))
4490 else VG_BOOL_CLO(arg, "--workaround-gcc296-bugs",MC_(clo_workaround_gcc296_bugs))
4491
4492 else VG_BOOL_CLO(arg, "--undef-value-errors", MC_(clo_undef_value_errors))
4493
sewardjfa4ca3b2007-11-30 17:19:36 +00004494 else VG_BNUM_CLO(arg, "--freelist-vol", MC_(clo_freelist_vol),
4495 0, 10*1000*1000*1000LL)
njn1d0825f2006-03-27 11:37:07 +00004496
4497 else if (VG_CLO_STREQ(arg, "--leak-check=no"))
4498 MC_(clo_leak_check) = LC_Off;
4499 else if (VG_CLO_STREQ(arg, "--leak-check=summary"))
4500 MC_(clo_leak_check) = LC_Summary;
4501 else if (VG_CLO_STREQ(arg, "--leak-check=yes") ||
4502 VG_CLO_STREQ(arg, "--leak-check=full"))
4503 MC_(clo_leak_check) = LC_Full;
4504
4505 else if (VG_CLO_STREQ(arg, "--leak-resolution=low"))
4506 MC_(clo_leak_resolution) = Vg_LowRes;
4507 else if (VG_CLO_STREQ(arg, "--leak-resolution=med"))
4508 MC_(clo_leak_resolution) = Vg_MedRes;
4509 else if (VG_CLO_STREQ(arg, "--leak-resolution=high"))
4510 MC_(clo_leak_resolution) = Vg_HighRes;
4511
sewardj05a46732006-10-17 01:28:10 +00004512 else if (VG_CLO_STREQN(16,arg,"--ignore-ranges=")) {
4513 Int i;
4514 UChar* txt = (UChar*)(arg+16);
4515 Bool ok = parse_ignore_ranges(txt);
4516 if (!ok)
4517 return False;
4518 tl_assert(ignoreRanges.used >= 0);
4519 tl_assert(ignoreRanges.used < M_IGNORE_RANGES);
4520 for (i = 0; i < ignoreRanges.used; i++) {
4521 Addr s = ignoreRanges.start[i];
4522 Addr e = ignoreRanges.end[i];
4523 Addr limit = 0x4000000; /* 64M - entirely arbitrary limit */
4524 if (e <= s) {
4525 VG_(message)(Vg_DebugMsg,
4526 "ERROR: --ignore-ranges: end <= start in range:");
4527 VG_(message)(Vg_DebugMsg,
4528 " 0x%lx-0x%lx", s, e);
4529 return False;
4530 }
4531 if (e - s > limit) {
4532 VG_(message)(Vg_DebugMsg,
4533 "ERROR: --ignore-ranges: suspiciously large range:");
4534 VG_(message)(Vg_DebugMsg,
4535 " 0x%lx-0x%lx (size %ld)", s, e, (UWord)(e-s));
4536 return False;
4537 }
4538 }
4539 }
4540
sewardjeb0fa932007-11-30 21:41:40 +00004541 else VG_BHEX_CLO(arg, "--malloc-fill", MC_(clo_malloc_fill), 0x00, 0xFF)
4542 else VG_BHEX_CLO(arg, "--free-fill", MC_(clo_free_fill), 0x00, 0xFF)
4543
njn1d0825f2006-03-27 11:37:07 +00004544 else
4545 return VG_(replacement_malloc_process_cmd_line_option)(arg);
4546
4547 return True;
njn25e49d8e72002-09-23 09:36:25 +00004548}
4549
njn51d827b2005-05-09 01:02:08 +00004550static void mc_print_usage(void)
njn25e49d8e72002-09-23 09:36:25 +00004551{
njn1d0825f2006-03-27 11:37:07 +00004552 VG_(printf)(
4553" --leak-check=no|summary|full search for memory leaks at exit? [summary]\n"
4554" --leak-resolution=low|med|high how much bt merging in leak check [low]\n"
4555" --show-reachable=no|yes show reachable blocks in leak check? [no]\n"
4556" --undef-value-errors=no|yes check for undefined value errors [yes]\n"
4557" --partial-loads-ok=no|yes too hard to explain here; see manual [no]\n"
sewardjfa4ca3b2007-11-30 17:19:36 +00004558" --freelist-vol=<number> volume of freed blocks queue [10000000]\n"
njn1d0825f2006-03-27 11:37:07 +00004559" --workaround-gcc296-bugs=no|yes self explanatory [no]\n"
sewardj05a46732006-10-17 01:28:10 +00004560" --ignore-ranges=0xPP-0xQQ[,0xRR-0xSS] assume given addresses are OK\n"
sewardjeb0fa932007-11-30 21:41:40 +00004561" --malloc-fill=<hexnumber> fill malloc'd areas with given value\n"
4562" --free-fill=<hexnumber> fill free'd areas with given value\n"
njn1d0825f2006-03-27 11:37:07 +00004563 );
4564 VG_(replacement_malloc_print_usage)();
njn3e884182003-04-15 13:03:23 +00004565}
4566
njn51d827b2005-05-09 01:02:08 +00004567static void mc_print_debug_usage(void)
njn3e884182003-04-15 13:03:23 +00004568{
njn1d0825f2006-03-27 11:37:07 +00004569 VG_(replacement_malloc_print_debug_usage)();
njn25e49d8e72002-09-23 09:36:25 +00004570}
4571
sewardjf3418c02005-11-08 14:10:24 +00004572
nethercote8b76fe52004-11-08 19:20:09 +00004573/*------------------------------------------------------------*/
4574/*--- Client requests ---*/
4575/*------------------------------------------------------------*/
4576
4577/* Client block management:
4578
4579 This is managed as an expanding array of client block descriptors.
4580 Indices of live descriptors are issued to the client, so it can ask
4581 to free them later. Therefore we cannot slide live entries down
4582 over dead ones. Instead we must use free/inuse flags and scan for
4583 an empty slot at allocation time. This in turn means allocation is
4584 relatively expensive, so we hope this does not happen too often.
nethercote8b76fe52004-11-08 19:20:09 +00004585
sewardjedc75ab2005-03-15 23:30:32 +00004586 An unused block has start == size == 0
4587*/
nethercote8b76fe52004-11-08 19:20:09 +00004588
4589typedef
4590 struct {
4591 Addr start;
4592 SizeT size;
4593 ExeContext* where;
sewardj8cf88b72005-07-08 01:29:33 +00004594 Char* desc;
nethercote8b76fe52004-11-08 19:20:09 +00004595 }
4596 CGenBlock;
4597
4598/* This subsystem is self-initialising. */
njn695c16e2005-03-27 03:40:28 +00004599static UInt cgb_size = 0;
4600static UInt cgb_used = 0;
4601static CGenBlock* cgbs = NULL;
nethercote8b76fe52004-11-08 19:20:09 +00004602
4603/* Stats for this subsystem. */
njn695c16e2005-03-27 03:40:28 +00004604static UInt cgb_used_MAX = 0; /* Max in use. */
4605static UInt cgb_allocs = 0; /* Number of allocs. */
4606static UInt cgb_discards = 0; /* Number of discards. */
4607static UInt cgb_search = 0; /* Number of searches. */
nethercote8b76fe52004-11-08 19:20:09 +00004608
4609
4610static
njn695c16e2005-03-27 03:40:28 +00004611Int alloc_client_block ( void )
nethercote8b76fe52004-11-08 19:20:09 +00004612{
4613 UInt i, sz_new;
4614 CGenBlock* cgbs_new;
4615
njn695c16e2005-03-27 03:40:28 +00004616 cgb_allocs++;
nethercote8b76fe52004-11-08 19:20:09 +00004617
njn695c16e2005-03-27 03:40:28 +00004618 for (i = 0; i < cgb_used; i++) {
4619 cgb_search++;
4620 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00004621 return i;
4622 }
4623
4624 /* Not found. Try to allocate one at the end. */
njn695c16e2005-03-27 03:40:28 +00004625 if (cgb_used < cgb_size) {
4626 cgb_used++;
4627 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00004628 }
4629
4630 /* Ok, we have to allocate a new one. */
njn695c16e2005-03-27 03:40:28 +00004631 tl_assert(cgb_used == cgb_size);
4632 sz_new = (cgbs == NULL) ? 10 : (2 * cgb_size);
nethercote8b76fe52004-11-08 19:20:09 +00004633
4634 cgbs_new = VG_(malloc)( sz_new * sizeof(CGenBlock) );
njn695c16e2005-03-27 03:40:28 +00004635 for (i = 0; i < cgb_used; i++)
4636 cgbs_new[i] = cgbs[i];
nethercote8b76fe52004-11-08 19:20:09 +00004637
njn695c16e2005-03-27 03:40:28 +00004638 if (cgbs != NULL)
4639 VG_(free)( cgbs );
4640 cgbs = cgbs_new;
nethercote8b76fe52004-11-08 19:20:09 +00004641
njn695c16e2005-03-27 03:40:28 +00004642 cgb_size = sz_new;
4643 cgb_used++;
4644 if (cgb_used > cgb_used_MAX)
4645 cgb_used_MAX = cgb_used;
4646 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00004647}
4648
4649
4650static void show_client_block_stats ( void )
4651{
4652 VG_(message)(Vg_DebugMsg,
4653 "general CBs: %d allocs, %d discards, %d maxinuse, %d search",
njn695c16e2005-03-27 03:40:28 +00004654 cgb_allocs, cgb_discards, cgb_used_MAX, cgb_search
nethercote8b76fe52004-11-08 19:20:09 +00004655 );
4656}
4657
nethercote8b76fe52004-11-08 19:20:09 +00004658static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai )
4659{
4660 UInt i;
nethercote8b76fe52004-11-08 19:20:09 +00004661
4662 /* Perhaps it's a general block ? */
njn695c16e2005-03-27 03:40:28 +00004663 for (i = 0; i < cgb_used; i++) {
4664 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00004665 continue;
njn717cde52005-05-10 02:47:21 +00004666 // Use zero as the redzone for client blocks.
4667 if (VG_(addr_is_in_block)(a, cgbs[i].start, cgbs[i].size, 0)) {
nethercote8b76fe52004-11-08 19:20:09 +00004668 /* OK - maybe it's a mempool, too? */
njn1d0825f2006-03-27 11:37:07 +00004669 MC_Mempool* mp = VG_(HT_lookup)(MC_(mempool_list),
njn12627272005-08-14 18:32:16 +00004670 (UWord)cgbs[i].start);
njn1d0cb0d2005-08-15 01:52:02 +00004671 if (mp != NULL) {
4672 if (mp->chunks != NULL) {
njn1d0825f2006-03-27 11:37:07 +00004673 MC_Chunk* mc;
njnf66dbfc2005-08-15 01:54:05 +00004674 VG_(HT_ResetIter)(mp->chunks);
4675 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
njn1d0825f2006-03-27 11:37:07 +00004676 if (addr_is_in_MC_Chunk(mc, a)) {
njn718d3b12006-12-16 00:54:12 +00004677 ai->tag = Addr_Block;
4678 ai->Addr.Block.block_kind = Block_MempoolChunk;
4679 ai->Addr.Block.block_desc = "block";
4680 ai->Addr.Block.block_szB = mc->szB;
4681 ai->Addr.Block.rwoffset = (Int)a - (Int)mc->data;
4682 ai->Addr.Block.lastchange = mc->where;
njn1d0cb0d2005-08-15 01:52:02 +00004683 return True;
4684 }
nethercote8b76fe52004-11-08 19:20:09 +00004685 }
4686 }
njn718d3b12006-12-16 00:54:12 +00004687 ai->tag = Addr_Block;
4688 ai->Addr.Block.block_kind = Block_Mempool;
4689 ai->Addr.Block.block_desc = "mempool";
4690 ai->Addr.Block.block_szB = cgbs[i].size;
4691 ai->Addr.Block.rwoffset = (Int)(a) - (Int)(cgbs[i].start);
4692 ai->Addr.Block.lastchange = cgbs[i].where;
nethercote8b76fe52004-11-08 19:20:09 +00004693 return True;
4694 }
njn718d3b12006-12-16 00:54:12 +00004695 ai->tag = Addr_Block;
4696 ai->Addr.Block.block_kind = Block_UserG;
4697 ai->Addr.Block.block_desc = cgbs[i].desc;
4698 ai->Addr.Block.block_szB = cgbs[i].size;
4699 ai->Addr.Block.rwoffset = (Int)(a) - (Int)(cgbs[i].start);
4700 ai->Addr.Block.lastchange = cgbs[i].where;
nethercote8b76fe52004-11-08 19:20:09 +00004701 return True;
4702 }
4703 }
4704 return False;
4705}
4706
njn51d827b2005-05-09 01:02:08 +00004707static Bool mc_handle_client_request ( ThreadId tid, UWord* arg, UWord* ret )
nethercote8b76fe52004-11-08 19:20:09 +00004708{
4709 Int i;
4710 Bool ok;
4711 Addr bad_addr;
4712
njnfc26ff92004-11-22 19:12:49 +00004713 if (!VG_IS_TOOL_USERREQ('M','C',arg[0])
nethercote8b76fe52004-11-08 19:20:09 +00004714 && VG_USERREQ__MALLOCLIKE_BLOCK != arg[0]
4715 && VG_USERREQ__FREELIKE_BLOCK != arg[0]
4716 && VG_USERREQ__CREATE_MEMPOOL != arg[0]
4717 && VG_USERREQ__DESTROY_MEMPOOL != arg[0]
4718 && VG_USERREQ__MEMPOOL_ALLOC != arg[0]
sewardj2c1c9df2006-07-28 00:06:37 +00004719 && VG_USERREQ__MEMPOOL_FREE != arg[0]
sewardjc740d762006-10-05 17:59:23 +00004720 && VG_USERREQ__MEMPOOL_TRIM != arg[0]
4721 && VG_USERREQ__MOVE_MEMPOOL != arg[0]
4722 && VG_USERREQ__MEMPOOL_CHANGE != arg[0]
4723 && VG_USERREQ__MEMPOOL_EXISTS != arg[0])
nethercote8b76fe52004-11-08 19:20:09 +00004724 return False;
4725
4726 switch (arg[0]) {
njndbf7ca72006-03-31 11:57:59 +00004727 case VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE:
4728 ok = is_mem_addressable ( arg[1], arg[2], &bad_addr );
nethercote8b76fe52004-11-08 19:20:09 +00004729 if (!ok)
njn718d3b12006-12-16 00:54:12 +00004730 mc_record_user_error ( tid, bad_addr, /*isAddrErr*/True );
nethercote8b76fe52004-11-08 19:20:09 +00004731 *ret = ok ? (UWord)NULL : bad_addr;
sewardj8cf88b72005-07-08 01:29:33 +00004732 break;
nethercote8b76fe52004-11-08 19:20:09 +00004733
njndbf7ca72006-03-31 11:57:59 +00004734 case VG_USERREQ__CHECK_MEM_IS_DEFINED: {
nethercote8b76fe52004-11-08 19:20:09 +00004735 MC_ReadResult res;
njndbf7ca72006-03-31 11:57:59 +00004736 res = is_mem_defined ( arg[1], arg[2], &bad_addr );
nethercote8b76fe52004-11-08 19:20:09 +00004737 if (MC_AddrErr == res)
njn718d3b12006-12-16 00:54:12 +00004738 mc_record_user_error ( tid, bad_addr, /*isAddrErr*/True );
nethercote8b76fe52004-11-08 19:20:09 +00004739 else if (MC_ValueErr == res)
njn718d3b12006-12-16 00:54:12 +00004740 mc_record_user_error ( tid, bad_addr, /*isAddrErr*/False );
nethercote8b76fe52004-11-08 19:20:09 +00004741 *ret = ( res==MC_Ok ? (UWord)NULL : bad_addr );
sewardj8cf88b72005-07-08 01:29:33 +00004742 break;
nethercote8b76fe52004-11-08 19:20:09 +00004743 }
4744
4745 case VG_USERREQ__DO_LEAK_CHECK:
njnb8dca862005-03-14 02:42:44 +00004746 mc_detect_memory_leaks(tid, arg[1] ? LC_Summary : LC_Full);
sewardj8cf88b72005-07-08 01:29:33 +00004747 *ret = 0; /* return value is meaningless */
4748 break;
nethercote8b76fe52004-11-08 19:20:09 +00004749
njndbf7ca72006-03-31 11:57:59 +00004750 case VG_USERREQ__MAKE_MEM_NOACCESS:
4751 MC_(make_mem_noaccess) ( arg[1], arg[2] );
sewardj8cf88b72005-07-08 01:29:33 +00004752 *ret = -1;
4753 break;
nethercote8b76fe52004-11-08 19:20:09 +00004754
njndbf7ca72006-03-31 11:57:59 +00004755 case VG_USERREQ__MAKE_MEM_UNDEFINED:
4756 MC_(make_mem_undefined) ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00004757 *ret = -1;
sewardj8cf88b72005-07-08 01:29:33 +00004758 break;
nethercote8b76fe52004-11-08 19:20:09 +00004759
njndbf7ca72006-03-31 11:57:59 +00004760 case VG_USERREQ__MAKE_MEM_DEFINED:
4761 MC_(make_mem_defined) ( arg[1], arg[2] );
sewardj8cf88b72005-07-08 01:29:33 +00004762 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00004763 break;
4764
njndbf7ca72006-03-31 11:57:59 +00004765 case VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE:
4766 make_mem_defined_if_addressable ( arg[1], arg[2] );
sewardjfb1e9ad2006-03-10 13:41:58 +00004767 *ret = -1;
4768 break;
4769
sewardjedc75ab2005-03-15 23:30:32 +00004770 case VG_USERREQ__CREATE_BLOCK: /* describe a block */
sewardj8cf88b72005-07-08 01:29:33 +00004771 if (arg[1] != 0 && arg[2] != 0) {
4772 i = alloc_client_block();
4773 /* VG_(printf)("allocated %d %p\n", i, cgbs); */
4774 cgbs[i].start = arg[1];
4775 cgbs[i].size = arg[2];
4776 cgbs[i].desc = VG_(strdup)((Char *)arg[3]);
sewardj39f34232007-11-09 23:02:28 +00004777 cgbs[i].where = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
sewardjedc75ab2005-03-15 23:30:32 +00004778
sewardj8cf88b72005-07-08 01:29:33 +00004779 *ret = i;
4780 } else
4781 *ret = -1;
4782 break;
sewardjedc75ab2005-03-15 23:30:32 +00004783
nethercote8b76fe52004-11-08 19:20:09 +00004784 case VG_USERREQ__DISCARD: /* discard */
njn695c16e2005-03-27 03:40:28 +00004785 if (cgbs == NULL
4786 || arg[2] >= cgb_used ||
sewardj8cf88b72005-07-08 01:29:33 +00004787 (cgbs[arg[2]].start == 0 && cgbs[arg[2]].size == 0)) {
sewardjedc75ab2005-03-15 23:30:32 +00004788 *ret = 1;
sewardj8cf88b72005-07-08 01:29:33 +00004789 } else {
4790 tl_assert(arg[2] >= 0 && arg[2] < cgb_used);
4791 cgbs[arg[2]].start = cgbs[arg[2]].size = 0;
4792 VG_(free)(cgbs[arg[2]].desc);
4793 cgb_discards++;
4794 *ret = 0;
4795 }
4796 break;
nethercote8b76fe52004-11-08 19:20:09 +00004797
sewardjc2c12c22006-03-08 13:20:09 +00004798 case VG_USERREQ__GET_VBITS:
sewardjc2c12c22006-03-08 13:20:09 +00004799 *ret = mc_get_or_set_vbits_for_client
4800 ( tid, arg[1], arg[2], arg[3], False /* get them */ );
4801 break;
4802
4803 case VG_USERREQ__SET_VBITS:
sewardjc2c12c22006-03-08 13:20:09 +00004804 *ret = mc_get_or_set_vbits_for_client
4805 ( tid, arg[1], arg[2], arg[3], True /* set them */ );
4806 break;
nethercote8b76fe52004-11-08 19:20:09 +00004807
njn1d0825f2006-03-27 11:37:07 +00004808 case VG_USERREQ__COUNT_LEAKS: { /* count leaked bytes */
4809 UWord** argp = (UWord**)arg;
4810 // MC_(bytes_leaked) et al were set by the last leak check (or zero
4811 // if no prior leak checks performed).
4812 *argp[1] = MC_(bytes_leaked) + MC_(bytes_indirect);
4813 *argp[2] = MC_(bytes_dubious);
4814 *argp[3] = MC_(bytes_reachable);
4815 *argp[4] = MC_(bytes_suppressed);
4816 // there is no argp[5]
4817 //*argp[5] = MC_(bytes_indirect);
njndbf7ca72006-03-31 11:57:59 +00004818 // XXX need to make *argp[1-4] defined
njn1d0825f2006-03-27 11:37:07 +00004819 *ret = 0;
4820 return True;
4821 }
4822 case VG_USERREQ__MALLOCLIKE_BLOCK: {
4823 Addr p = (Addr)arg[1];
4824 SizeT sizeB = arg[2];
4825 UInt rzB = arg[3];
4826 Bool is_zeroed = (Bool)arg[4];
4827
4828 MC_(new_block) ( tid, p, sizeB, /*ignored*/0, rzB, is_zeroed,
4829 MC_AllocCustom, MC_(malloc_list) );
4830 return True;
4831 }
4832 case VG_USERREQ__FREELIKE_BLOCK: {
4833 Addr p = (Addr)arg[1];
4834 UInt rzB = arg[2];
4835
4836 MC_(handle_free) ( tid, p, rzB, MC_AllocCustom );
4837 return True;
4838 }
4839
4840 case _VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR: {
njn718d3b12006-12-16 00:54:12 +00004841 Char* s = (Char*)arg[1];
4842 Addr dst = (Addr) arg[2];
4843 Addr src = (Addr) arg[3];
4844 SizeT len = (SizeT)arg[4];
4845 mc_record_overlap_error(tid, s, src, dst, len);
njn1d0825f2006-03-27 11:37:07 +00004846 return True;
4847 }
4848
4849 case VG_USERREQ__CREATE_MEMPOOL: {
4850 Addr pool = (Addr)arg[1];
4851 UInt rzB = arg[2];
4852 Bool is_zeroed = (Bool)arg[3];
4853
4854 MC_(create_mempool) ( pool, rzB, is_zeroed );
4855 return True;
4856 }
4857
4858 case VG_USERREQ__DESTROY_MEMPOOL: {
4859 Addr pool = (Addr)arg[1];
4860
4861 MC_(destroy_mempool) ( pool );
4862 return True;
4863 }
4864
4865 case VG_USERREQ__MEMPOOL_ALLOC: {
4866 Addr pool = (Addr)arg[1];
4867 Addr addr = (Addr)arg[2];
4868 UInt size = arg[3];
4869
4870 MC_(mempool_alloc) ( tid, pool, addr, size );
4871 return True;
4872 }
4873
4874 case VG_USERREQ__MEMPOOL_FREE: {
4875 Addr pool = (Addr)arg[1];
4876 Addr addr = (Addr)arg[2];
4877
4878 MC_(mempool_free) ( pool, addr );
4879 return True;
4880 }
4881
sewardj2c1c9df2006-07-28 00:06:37 +00004882 case VG_USERREQ__MEMPOOL_TRIM: {
4883 Addr pool = (Addr)arg[1];
4884 Addr addr = (Addr)arg[2];
4885 UInt size = arg[3];
4886
4887 MC_(mempool_trim) ( pool, addr, size );
4888 return True;
4889 }
4890
sewardjc740d762006-10-05 17:59:23 +00004891 case VG_USERREQ__MOVE_MEMPOOL: {
4892 Addr poolA = (Addr)arg[1];
4893 Addr poolB = (Addr)arg[2];
4894
4895 MC_(move_mempool) ( poolA, poolB );
4896 return True;
4897 }
4898
4899 case VG_USERREQ__MEMPOOL_CHANGE: {
4900 Addr pool = (Addr)arg[1];
4901 Addr addrA = (Addr)arg[2];
4902 Addr addrB = (Addr)arg[3];
4903 UInt size = arg[4];
4904
4905 MC_(mempool_change) ( pool, addrA, addrB, size );
4906 return True;
4907 }
4908
4909 case VG_USERREQ__MEMPOOL_EXISTS: {
4910 Addr pool = (Addr)arg[1];
4911
4912 *ret = (UWord) MC_(mempool_exists) ( pool );
4913 return True;
4914 }
4915
4916
nethercote8b76fe52004-11-08 19:20:09 +00004917 default:
njn1d0825f2006-03-27 11:37:07 +00004918 VG_(message)(Vg_UserMsg,
4919 "Warning: unknown memcheck client request code %llx",
4920 (ULong)arg[0]);
4921 return False;
nethercote8b76fe52004-11-08 19:20:09 +00004922 }
4923 return True;
4924}
njn25e49d8e72002-09-23 09:36:25 +00004925
4926/*------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00004927/*--- Crude profiling machinery. ---*/
4928/*------------------------------------------------------------*/
4929
4930// We track a number of interesting events (using PROF_EVENT)
4931// if MC_PROFILE_MEMORY is defined.
4932
4933#ifdef MC_PROFILE_MEMORY
4934
4935UInt MC_(event_ctr)[N_PROF_EVENTS];
4936HChar* MC_(event_ctr_name)[N_PROF_EVENTS];
4937
4938static void init_prof_mem ( void )
4939{
4940 Int i;
4941 for (i = 0; i < N_PROF_EVENTS; i++) {
4942 MC_(event_ctr)[i] = 0;
4943 MC_(event_ctr_name)[i] = NULL;
4944 }
4945}
4946
4947static void done_prof_mem ( void )
4948{
4949 Int i;
4950 Bool spaced = False;
4951 for (i = 0; i < N_PROF_EVENTS; i++) {
4952 if (!spaced && (i % 10) == 0) {
4953 VG_(printf)("\n");
4954 spaced = True;
4955 }
4956 if (MC_(event_ctr)[i] > 0) {
4957 spaced = False;
4958 VG_(printf)( "prof mem event %3d: %9d %s\n",
4959 i, MC_(event_ctr)[i],
4960 MC_(event_ctr_name)[i]
4961 ? MC_(event_ctr_name)[i] : "unnamed");
4962 }
4963 }
4964}
4965
4966#else
4967
4968static void init_prof_mem ( void ) { }
4969static void done_prof_mem ( void ) { }
4970
4971#endif
4972
4973/*------------------------------------------------------------*/
njn51d827b2005-05-09 01:02:08 +00004974/*--- Setup and finalisation ---*/
njn25e49d8e72002-09-23 09:36:25 +00004975/*------------------------------------------------------------*/
4976
njn51d827b2005-05-09 01:02:08 +00004977static void mc_post_clo_init ( void )
njn5c004e42002-11-18 11:04:50 +00004978{
sewardj71bc3cb2005-05-19 00:25:45 +00004979 /* If we've been asked to emit XML, mash around various other
4980 options so as to constrain the output somewhat. */
4981 if (VG_(clo_xml)) {
4982 /* Extract as much info as possible from the leak checker. */
njn1d0825f2006-03-27 11:37:07 +00004983 /* MC_(clo_show_reachable) = True; */
4984 MC_(clo_leak_check) = LC_Full;
sewardj71bc3cb2005-05-19 00:25:45 +00004985 }
njn5c004e42002-11-18 11:04:50 +00004986}
4987
njn1d0825f2006-03-27 11:37:07 +00004988static void print_SM_info(char* type, int n_SMs)
4989{
4990 VG_(message)(Vg_DebugMsg,
4991 " memcheck: SMs: %s = %d (%dk, %dM)",
4992 type,
4993 n_SMs,
4994 n_SMs * sizeof(SecMap) / 1024,
4995 n_SMs * sizeof(SecMap) / (1024 * 1024) );
4996}
4997
njn51d827b2005-05-09 01:02:08 +00004998static void mc_fini ( Int exitcode )
njn5c004e42002-11-18 11:04:50 +00004999{
njn1d0825f2006-03-27 11:37:07 +00005000 MC_(print_malloc_stats)();
sewardj23eb2fd2005-04-22 16:29:19 +00005001
njn1d0825f2006-03-27 11:37:07 +00005002 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
5003 if (MC_(clo_leak_check) == LC_Off)
5004 VG_(message)(Vg_UserMsg,
5005 "For a detailed leak analysis, rerun with: --leak-check=yes");
5006
5007 VG_(message)(Vg_UserMsg,
5008 "For counts of detected errors, rerun with: -v");
5009 }
5010 if (MC_(clo_leak_check) != LC_Off)
5011 mc_detect_memory_leaks(1/*bogus ThreadId*/, MC_(clo_leak_check));
5012
5013 done_prof_mem();
sewardjae986ca2005-10-12 12:53:20 +00005014
sewardj45d94cc2005-04-20 14:44:11 +00005015 if (VG_(clo_verbosity) > 1) {
njn1d0825f2006-03-27 11:37:07 +00005016 SizeT max_secVBit_szB, max_SMs_szB, max_shmem_szB;
5017
sewardj45d94cc2005-04-20 14:44:11 +00005018 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00005019 " memcheck: sanity checks: %d cheap, %d expensive",
5020 n_sanity_cheap, n_sanity_expensive );
sewardj45d94cc2005-04-20 14:44:11 +00005021 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00005022 " memcheck: auxmaps: %d auxmap entries (%dk, %dM) in use",
sewardj05a46732006-10-17 01:28:10 +00005023 n_auxmap_L2_nodes,
5024 n_auxmap_L2_nodes * 64,
5025 n_auxmap_L2_nodes / 16 );
sewardj23eb2fd2005-04-22 16:29:19 +00005026 VG_(message)(Vg_DebugMsg,
sewardj05a46732006-10-17 01:28:10 +00005027 " memcheck: auxmaps_L1: %lld searches, %lld cmps, ratio %lld:10",
5028 n_auxmap_L1_searches, n_auxmap_L1_cmps,
5029 (10ULL * n_auxmap_L1_cmps)
5030 / (n_auxmap_L1_searches ? n_auxmap_L1_searches : 1)
5031 );
5032 VG_(message)(Vg_DebugMsg,
5033 " memcheck: auxmaps_L2: %lld searches, %lld nodes",
5034 n_auxmap_L2_searches, n_auxmap_L2_nodes
5035 );
sewardj23eb2fd2005-04-22 16:29:19 +00005036
njndbf7ca72006-03-31 11:57:59 +00005037 print_SM_info("n_issued ", n_issued_SMs);
5038 print_SM_info("n_deissued ", n_deissued_SMs);
5039 print_SM_info("max_noaccess ", max_noaccess_SMs);
5040 print_SM_info("max_undefined", max_undefined_SMs);
5041 print_SM_info("max_defined ", max_defined_SMs);
5042 print_SM_info("max_non_DSM ", max_non_DSM_SMs);
njn1d0825f2006-03-27 11:37:07 +00005043
5044 // Three DSMs, plus the non-DSM ones
5045 max_SMs_szB = (3 + max_non_DSM_SMs) * sizeof(SecMap);
5046 // The 3*sizeof(Word) bytes is the AVL node metadata size.
5047 // The 4*sizeof(Word) bytes is the malloc metadata size.
5048 // Hardwiring these sizes in sucks, but I don't see how else to do it.
5049 max_secVBit_szB = max_secVBit_nodes *
5050 (sizeof(SecVBitNode) + 3*sizeof(Word) + 4*sizeof(Word));
5051 max_shmem_szB = sizeof(primary_map) + max_SMs_szB + max_secVBit_szB;
sewardj23eb2fd2005-04-22 16:29:19 +00005052
5053 VG_(message)(Vg_DebugMsg,
njn1d0825f2006-03-27 11:37:07 +00005054 " memcheck: max sec V bit nodes: %d (%dk, %dM)",
5055 max_secVBit_nodes, max_secVBit_szB / 1024,
5056 max_secVBit_szB / (1024 * 1024));
5057 VG_(message)(Vg_DebugMsg,
5058 " memcheck: set_sec_vbits8 calls: %llu (new: %llu, updates: %llu)",
5059 sec_vbits_new_nodes + sec_vbits_updates,
5060 sec_vbits_new_nodes, sec_vbits_updates );
5061 VG_(message)(Vg_DebugMsg,
5062 " memcheck: max shadow mem size: %dk, %dM",
5063 max_shmem_szB / 1024, max_shmem_szB / (1024 * 1024));
sewardj45d94cc2005-04-20 14:44:11 +00005064 }
5065
njn5c004e42002-11-18 11:04:50 +00005066 if (0) {
5067 VG_(message)(Vg_DebugMsg,
5068 "------ Valgrind's client block stats follow ---------------" );
nethercote8b76fe52004-11-08 19:20:09 +00005069 show_client_block_stats();
njn5c004e42002-11-18 11:04:50 +00005070 }
njn25e49d8e72002-09-23 09:36:25 +00005071}
5072
njn51d827b2005-05-09 01:02:08 +00005073static void mc_pre_clo_init(void)
5074{
5075 VG_(details_name) ("Memcheck");
5076 VG_(details_version) (NULL);
5077 VG_(details_description) ("a memory error detector");
5078 VG_(details_copyright_author)(
sewardj4d474d02008-02-11 11:34:59 +00005079 "Copyright (C) 2002-2008, and GNU GPL'd, by Julian Seward et al.");
njn51d827b2005-05-09 01:02:08 +00005080 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj05a46732006-10-17 01:28:10 +00005081 VG_(details_avg_translation_sizeB) ( 556 );
njn51d827b2005-05-09 01:02:08 +00005082
5083 VG_(basic_tool_funcs) (mc_post_clo_init,
5084 MC_(instrument),
5085 mc_fini);
5086
sewardj81651dc2007-08-28 06:05:20 +00005087 VG_(needs_final_IR_tidy_pass) ( MC_(final_tidy) );
5088
5089
njn51d827b2005-05-09 01:02:08 +00005090 VG_(needs_core_errors) ();
njn1d0825f2006-03-27 11:37:07 +00005091 VG_(needs_tool_errors) (mc_eq_Error,
njn51d827b2005-05-09 01:02:08 +00005092 mc_pp_Error,
sewardj39f34232007-11-09 23:02:28 +00005093 True,/*show TIDs for errors*/
njn1d0825f2006-03-27 11:37:07 +00005094 mc_update_extra,
njn51d827b2005-05-09 01:02:08 +00005095 mc_recognised_suppression,
njn1d0825f2006-03-27 11:37:07 +00005096 mc_read_extra_suppression_info,
5097 mc_error_matches_suppression,
5098 mc_get_error_name,
5099 mc_print_extra_suppression_info);
njn51d827b2005-05-09 01:02:08 +00005100 VG_(needs_libc_freeres) ();
njn1d0825f2006-03-27 11:37:07 +00005101 VG_(needs_command_line_options)(mc_process_cmd_line_options,
njn51d827b2005-05-09 01:02:08 +00005102 mc_print_usage,
5103 mc_print_debug_usage);
5104 VG_(needs_client_requests) (mc_handle_client_request);
5105 VG_(needs_sanity_checks) (mc_cheap_sanity_check,
5106 mc_expensive_sanity_check);
njn1d0825f2006-03-27 11:37:07 +00005107 VG_(needs_malloc_replacement) (MC_(malloc),
5108 MC_(__builtin_new),
5109 MC_(__builtin_vec_new),
5110 MC_(memalign),
5111 MC_(calloc),
5112 MC_(free),
5113 MC_(__builtin_delete),
5114 MC_(__builtin_vec_delete),
5115 MC_(realloc),
5116 MC_MALLOC_REDZONE_SZB );
njnca54af32006-04-16 10:25:43 +00005117 VG_(needs_xml_output) ();
njn51d827b2005-05-09 01:02:08 +00005118
njn1d0825f2006-03-27 11:37:07 +00005119 VG_(track_new_mem_startup) ( mc_new_mem_startup );
njndbf7ca72006-03-31 11:57:59 +00005120 VG_(track_new_mem_stack_signal)( MC_(make_mem_undefined) );
5121 VG_(track_new_mem_brk) ( MC_(make_mem_undefined) );
njn1d0825f2006-03-27 11:37:07 +00005122 VG_(track_new_mem_mmap) ( mc_new_mem_mmap );
njn51d827b2005-05-09 01:02:08 +00005123
njn1d0825f2006-03-27 11:37:07 +00005124 VG_(track_copy_mem_remap) ( MC_(copy_address_range_state) );
njn81623712005-10-07 04:48:37 +00005125
5126 // Nb: we don't do anything with mprotect. This means that V bits are
5127 // preserved if a program, for example, marks some memory as inaccessible
5128 // and then later marks it as accessible again.
5129 //
5130 // If an access violation occurs (eg. writing to read-only memory) we let
5131 // it fault and print an informative termination message. This doesn't
5132 // happen if the program catches the signal, though, which is bad. If we
5133 // had two A bits (for readability and writability) that were completely
5134 // distinct from V bits, then we could handle all this properly.
5135 VG_(track_change_mem_mprotect) ( NULL );
njn51d827b2005-05-09 01:02:08 +00005136
njndbf7ca72006-03-31 11:57:59 +00005137 VG_(track_die_mem_stack_signal)( MC_(make_mem_noaccess) );
5138 VG_(track_die_mem_brk) ( MC_(make_mem_noaccess) );
5139 VG_(track_die_mem_munmap) ( MC_(make_mem_noaccess) );
njn51d827b2005-05-09 01:02:08 +00005140
njn1d0825f2006-03-27 11:37:07 +00005141#ifdef PERF_FAST_STACK
5142 VG_(track_new_mem_stack_4) ( mc_new_mem_stack_4 );
5143 VG_(track_new_mem_stack_8) ( mc_new_mem_stack_8 );
5144 VG_(track_new_mem_stack_12) ( mc_new_mem_stack_12 );
5145 VG_(track_new_mem_stack_16) ( mc_new_mem_stack_16 );
5146 VG_(track_new_mem_stack_32) ( mc_new_mem_stack_32 );
5147 VG_(track_new_mem_stack_112) ( mc_new_mem_stack_112 );
5148 VG_(track_new_mem_stack_128) ( mc_new_mem_stack_128 );
5149 VG_(track_new_mem_stack_144) ( mc_new_mem_stack_144 );
5150 VG_(track_new_mem_stack_160) ( mc_new_mem_stack_160 );
5151#endif
5152 VG_(track_new_mem_stack) ( mc_new_mem_stack );
njn51d827b2005-05-09 01:02:08 +00005153
njn1d0825f2006-03-27 11:37:07 +00005154#ifdef PERF_FAST_STACK
5155 VG_(track_die_mem_stack_4) ( mc_die_mem_stack_4 );
5156 VG_(track_die_mem_stack_8) ( mc_die_mem_stack_8 );
5157 VG_(track_die_mem_stack_12) ( mc_die_mem_stack_12 );
5158 VG_(track_die_mem_stack_16) ( mc_die_mem_stack_16 );
5159 VG_(track_die_mem_stack_32) ( mc_die_mem_stack_32 );
5160 VG_(track_die_mem_stack_112) ( mc_die_mem_stack_112 );
5161 VG_(track_die_mem_stack_128) ( mc_die_mem_stack_128 );
5162 VG_(track_die_mem_stack_144) ( mc_die_mem_stack_144 );
5163 VG_(track_die_mem_stack_160) ( mc_die_mem_stack_160 );
5164#endif
5165 VG_(track_die_mem_stack) ( mc_die_mem_stack );
njn51d827b2005-05-09 01:02:08 +00005166
njndbf7ca72006-03-31 11:57:59 +00005167 VG_(track_ban_mem_stack) ( MC_(make_mem_noaccess) );
njn51d827b2005-05-09 01:02:08 +00005168
njndbf7ca72006-03-31 11:57:59 +00005169 VG_(track_pre_mem_read) ( check_mem_is_defined );
5170 VG_(track_pre_mem_read_asciiz) ( check_mem_is_defined_asciiz );
5171 VG_(track_pre_mem_write) ( check_mem_is_addressable );
njn1d0825f2006-03-27 11:37:07 +00005172 VG_(track_post_mem_write) ( mc_post_mem_write );
njn51d827b2005-05-09 01:02:08 +00005173
njn1d0825f2006-03-27 11:37:07 +00005174 if (MC_(clo_undef_value_errors))
5175 VG_(track_pre_reg_read) ( mc_pre_reg_read );
njn51d827b2005-05-09 01:02:08 +00005176
njn1d0825f2006-03-27 11:37:07 +00005177 VG_(track_post_reg_write) ( mc_post_reg_write );
5178 VG_(track_post_reg_write_clientcall_return)( mc_post_reg_write_clientcall );
njn51d827b2005-05-09 01:02:08 +00005179
5180 init_shadow_memory();
sewardj3f94a7d2007-08-25 07:19:08 +00005181 MC_(malloc_list) = VG_(HT_construct)( "MC_(malloc_list)" );
5182 MC_(mempool_list) = VG_(HT_construct)( "MC_(mempool_list)" );
njn1d0825f2006-03-27 11:37:07 +00005183 init_prof_mem();
njn51d827b2005-05-09 01:02:08 +00005184
5185 tl_assert( mc_expensive_sanity_check() );
njn1d0825f2006-03-27 11:37:07 +00005186
5187 // {LOADV,STOREV}[8421] will all fail horribly if this isn't true.
5188 tl_assert(sizeof(UWord) == sizeof(Addr));
sewardj05a46732006-10-17 01:28:10 +00005189 // Call me paranoid. I don't care.
5190 tl_assert(sizeof(void*) == sizeof(Addr));
njn1d0825f2006-03-27 11:37:07 +00005191
5192 // BYTES_PER_SEC_VBIT_NODE must be a power of two.
5193 tl_assert(-1 != VG_(log2)(BYTES_PER_SEC_VBIT_NODE));
njn51d827b2005-05-09 01:02:08 +00005194}
5195
sewardj45f4e7c2005-09-27 19:20:21 +00005196VG_DETERMINE_INTERFACE_VERSION(mc_pre_clo_init)
fitzhardinge98abfc72003-12-16 02:05:15 +00005197
njn25e49d8e72002-09-23 09:36:25 +00005198/*--------------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00005199/*--- end ---*/
njn25e49d8e72002-09-23 09:36:25 +00005200/*--------------------------------------------------------------------*/