blob: e4b7e13349a4ad6fbae8ee053371de96788d6ce6 [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
nethercote137bc552003-11-14 17:47:54 +00009 This file is part of MemCheck, a heavyweight Valgrind tool for
njnc9539842002-10-02 13:26:35 +000010 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
sewardj4d474d02008-02-11 11:34:59 +000012 Copyright (C) 2000-2008 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
njnc7561b92005-06-19 01:24:32 +000033#include "pub_tool_basics.h"
njn4802b382005-06-11 04:58:29 +000034#include "pub_tool_aspacemgr.h"
njn1d0825f2006-03-27 11:37:07 +000035#include "pub_tool_hashtable.h" // For mc_include.h
njn97405b22005-06-02 03:39:33 +000036#include "pub_tool_libcbase.h"
njn132bfcc2005-06-04 19:16:06 +000037#include "pub_tool_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000038#include "pub_tool_libcprint.h"
njnf536bbb2005-06-13 04:21:38 +000039#include "pub_tool_machine.h"
njnc7561b92005-06-19 01:24:32 +000040#include "pub_tool_mallocfree.h"
41#include "pub_tool_options.h"
njn1d0825f2006-03-27 11:37:07 +000042#include "pub_tool_oset.h"
njnc7561b92005-06-19 01:24:32 +000043#include "pub_tool_replacemalloc.h"
44#include "pub_tool_tooliface.h"
45#include "pub_tool_threadstate.h"
sewardj05a46732006-10-17 01:28:10 +000046#include "pub_tool_oset.h"
sewardjb8b79ad2008-03-03 01:35:41 +000047#include "pub_tool_debuginfo.h" // VG_(get_dataname_and_offset)
njnc7561b92005-06-19 01:24:32 +000048
49#include "mc_include.h"
50#include "memcheck.h" /* for client requests */
njn25e49d8e72002-09-23 09:36:25 +000051
sewardjc1a2cda2005-04-21 17:34:00 +000052
njn1d0825f2006-03-27 11:37:07 +000053/* Set to 1 to do a little more sanity checking */
sewardj23eb2fd2005-04-22 16:29:19 +000054#define VG_DEBUG_MEMORY 0
sewardjc1a2cda2005-04-21 17:34:00 +000055
njn25e49d8e72002-09-23 09:36:25 +000056#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
57
sewardj7cf4e6b2008-05-01 20:24:26 +000058static void ocache_sarp_Set_Origins ( Addr, UWord, UInt ); /* fwds */
59static void ocache_sarp_Clear_Origins ( Addr, UWord ); /* fwds */
60
njn25e49d8e72002-09-23 09:36:25 +000061
njn25e49d8e72002-09-23 09:36:25 +000062/*------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +000063/*--- Fast-case knobs ---*/
64/*------------------------------------------------------------*/
65
66// Comment these out to disable the fast cases (don't just set them to zero).
67
68#define PERF_FAST_LOADV 1
69#define PERF_FAST_STOREV 1
70
71#define PERF_FAST_SARP 1
72
73#define PERF_FAST_STACK 1
74#define PERF_FAST_STACK2 1
75
sewardj7cf4e6b2008-05-01 20:24:26 +000076/* Change this to 1 to enable assertions on origin tracking cache fast
77 paths */
78#define OC_ENABLE_ASSERTIONS 0
79
80
njn1d0825f2006-03-27 11:37:07 +000081/*------------------------------------------------------------*/
82/*--- V bits and A bits ---*/
83/*------------------------------------------------------------*/
84
85/* Conceptually, every byte value has 8 V bits, which track whether Memcheck
86 thinks the corresponding value bit is defined. And every memory byte
87 has an A bit, which tracks whether Memcheck thinks the program can access
88 it safely. So every N-bit register is shadowed with N V bits, and every
89 memory byte is shadowed with 8 V bits and one A bit.
90
91 In the implementation, we use two forms of compression (compressed V bits
92 and distinguished secondary maps) to avoid the 9-bit-per-byte overhead
93 for memory.
94
95 Memcheck also tracks extra information about each heap block that is
96 allocated, for detecting memory leaks and other purposes.
97*/
98
99/*------------------------------------------------------------*/
sewardj45d94cc2005-04-20 14:44:11 +0000100/*--- Basic A/V bitmap representation. ---*/
njn25e49d8e72002-09-23 09:36:25 +0000101/*------------------------------------------------------------*/
102
njn1d0825f2006-03-27 11:37:07 +0000103/* All reads and writes are checked against a memory map (a.k.a. shadow
104 memory), which records the state of all memory in the process.
105
106 On 32-bit machines the memory map is organised as follows.
107 The top 16 bits of an address are used to index into a top-level
108 map table, containing 65536 entries. Each entry is a pointer to a
109 second-level map, which records the accesibililty and validity
110 permissions for the 65536 bytes indexed by the lower 16 bits of the
111 address. Each byte is represented by two bits (details are below). So
112 each second-level map contains 16384 bytes. This two-level arrangement
113 conveniently divides the 4G address space into 64k lumps, each size 64k
114 bytes.
115
116 All entries in the primary (top-level) map must point to a valid
117 secondary (second-level) map. Since many of the 64kB chunks will
njndbf7ca72006-03-31 11:57:59 +0000118 have the same status for every bit -- ie. noaccess (for unused
119 address space) or entirely addressable and defined (for code segments) --
120 there are three distinguished secondary maps, which indicate 'noaccess',
121 'undefined' and 'defined'. For these uniform 64kB chunks, the primary
122 map entry points to the relevant distinguished map. In practice,
123 typically more than half of the addressable memory is represented with
124 the 'undefined' or 'defined' distinguished secondary map, so it gives a
125 good saving. It also lets us set the V+A bits of large address regions
126 quickly in set_address_range_perms().
njn1d0825f2006-03-27 11:37:07 +0000127
128 On 64-bit machines it's more complicated. If we followed the same basic
129 scheme we'd have a four-level table which would require too many memory
130 accesses. So instead the top-level map table has 2^19 entries (indexed
131 using bits 16..34 of the address); this covers the bottom 32GB. Any
132 accesses above 32GB are handled with a slow, sparse auxiliary table.
133 Valgrind's address space manager tries very hard to keep things below
134 this 32GB barrier so that performance doesn't suffer too much.
135
136 Note that this file has a lot of different functions for reading and
137 writing shadow memory. Only a couple are strictly necessary (eg.
138 get_vabits2 and set_vabits2), most are just specialised for specific
139 common cases to improve performance.
140
141 Aside: the V+A bits are less precise than they could be -- we have no way
142 of marking memory as read-only. It would be great if we could add an
143 extra state VA_BITSn_READONLY. But then we'd have 5 different states,
144 which requires 2.3 bits to hold, and there's no way to do that elegantly
145 -- we'd have to double up to 4 bits of metadata per byte, which doesn't
146 seem worth it.
147*/
sewardjc859fbf2005-04-22 21:10:28 +0000148
sewardj45d94cc2005-04-20 14:44:11 +0000149/* --------------- Basic configuration --------------- */
sewardj95448072004-11-22 20:19:51 +0000150
sewardj23eb2fd2005-04-22 16:29:19 +0000151/* Only change this. N_PRIMARY_MAP *must* be a power of 2. */
sewardj21f7ff42005-04-28 10:32:02 +0000152
sewardje4ccc012005-05-02 12:53:38 +0000153#if VG_WORDSIZE == 4
sewardj21f7ff42005-04-28 10:32:02 +0000154
155/* cover the entire address space */
156# define N_PRIMARY_BITS 16
157
158#else
159
sewardj34483bc2005-09-28 11:50:20 +0000160/* Just handle the first 32G fast and the rest via auxiliary
sewardj21f7ff42005-04-28 10:32:02 +0000161 primaries. */
sewardj34483bc2005-09-28 11:50:20 +0000162# define N_PRIMARY_BITS 19
sewardj21f7ff42005-04-28 10:32:02 +0000163
164#endif
165
sewardj45d94cc2005-04-20 14:44:11 +0000166
sewardjc1a2cda2005-04-21 17:34:00 +0000167/* Do not change this. */
sewardje4ccc012005-05-02 12:53:38 +0000168#define N_PRIMARY_MAP ( ((UWord)1) << N_PRIMARY_BITS)
sewardjc1a2cda2005-04-21 17:34:00 +0000169
170/* Do not change this. */
sewardj23eb2fd2005-04-22 16:29:19 +0000171#define MAX_PRIMARY_ADDRESS (Addr)((((Addr)65536) * N_PRIMARY_MAP)-1)
172
173
sewardj45d94cc2005-04-20 14:44:11 +0000174/* --------------- Secondary maps --------------- */
njn25e49d8e72002-09-23 09:36:25 +0000175
njn1d0825f2006-03-27 11:37:07 +0000176// Each byte of memory conceptually has an A bit, which indicates its
177// addressability, and 8 V bits, which indicates its definedness.
178//
179// But because very few bytes are partially defined, we can use a nice
180// compression scheme to reduce the size of shadow memory. Each byte of
181// memory has 2 bits which indicates its state (ie. V+A bits):
182//
njndbf7ca72006-03-31 11:57:59 +0000183// 00: noaccess (unaddressable but treated as fully defined)
184// 01: undefined (addressable and fully undefined)
185// 10: defined (addressable and fully defined)
186// 11: partdefined (addressable and partially defined)
njn1d0825f2006-03-27 11:37:07 +0000187//
njndbf7ca72006-03-31 11:57:59 +0000188// In the "partdefined" case, we use a secondary table to store the V bits.
189// Each entry in the secondary-V-bits table maps a byte address to its 8 V
190// bits.
njn1d0825f2006-03-27 11:37:07 +0000191//
192// We store the compressed V+A bits in 8-bit chunks, ie. the V+A bits for
193// four bytes (32 bits) of memory are in each chunk. Hence the name
194// "vabits8". This lets us get the V+A bits for four bytes at a time
195// easily (without having to do any shifting and/or masking), and that is a
196// very common operation. (Note that although each vabits8 chunk
197// is 8 bits in size, it represents 32 bits of memory.)
198//
199// The representation is "inverse" little-endian... each 4 bytes of
200// memory is represented by a 1 byte value, where:
201//
202// - the status of byte (a+0) is held in bits [1..0]
203// - the status of byte (a+1) is held in bits [3..2]
204// - the status of byte (a+2) is held in bits [5..4]
205// - the status of byte (a+3) is held in bits [7..6]
206//
207// It's "inverse" because endianness normally describes a mapping from
208// value bits to memory addresses; in this case the mapping is inverted.
209// Ie. instead of particular value bits being held in certain addresses, in
210// this case certain addresses are represented by particular value bits.
211// See insert_vabits2_into_vabits8() for an example.
212//
213// But note that we don't compress the V bits stored in registers; they
214// need to be explicit to made the shadow operations possible. Therefore
215// when moving values between registers and memory we need to convert
216// between the expanded in-register format and the compressed in-memory
217// format. This isn't so difficult, it just requires careful attention in a
218// few places.
219
220// These represent eight bits of memory.
221#define VA_BITS2_NOACCESS 0x0 // 00b
njndbf7ca72006-03-31 11:57:59 +0000222#define VA_BITS2_UNDEFINED 0x1 // 01b
223#define VA_BITS2_DEFINED 0x2 // 10b
224#define VA_BITS2_PARTDEFINED 0x3 // 11b
njn1d0825f2006-03-27 11:37:07 +0000225
226// These represent 16 bits of memory.
227#define VA_BITS4_NOACCESS 0x0 // 00_00b
njndbf7ca72006-03-31 11:57:59 +0000228#define VA_BITS4_UNDEFINED 0x5 // 01_01b
229#define VA_BITS4_DEFINED 0xa // 10_10b
njn1d0825f2006-03-27 11:37:07 +0000230
231// These represent 32 bits of memory.
232#define VA_BITS8_NOACCESS 0x00 // 00_00_00_00b
njndbf7ca72006-03-31 11:57:59 +0000233#define VA_BITS8_UNDEFINED 0x55 // 01_01_01_01b
234#define VA_BITS8_DEFINED 0xaa // 10_10_10_10b
njn1d0825f2006-03-27 11:37:07 +0000235
236// These represent 64 bits of memory.
237#define VA_BITS16_NOACCESS 0x0000 // 00_00_00_00b x 2
njndbf7ca72006-03-31 11:57:59 +0000238#define VA_BITS16_UNDEFINED 0x5555 // 01_01_01_01b x 2
239#define VA_BITS16_DEFINED 0xaaaa // 10_10_10_10b x 2
njn1d0825f2006-03-27 11:37:07 +0000240
241
242#define SM_CHUNKS 16384
243#define SM_OFF(aaa) (((aaa) & 0xffff) >> 2)
244#define SM_OFF_16(aaa) (((aaa) & 0xffff) >> 3)
245
246// Paranoia: it's critical for performance that the requested inlining
247// occurs. So try extra hard.
248#define INLINE inline __attribute__((always_inline))
249
250static INLINE Addr start_of_this_sm ( Addr a ) {
251 return (a & (~SM_MASK));
252}
253static INLINE Bool is_start_of_sm ( Addr a ) {
254 return (start_of_this_sm(a) == a);
255}
256
njn25e49d8e72002-09-23 09:36:25 +0000257typedef
258 struct {
njn1d0825f2006-03-27 11:37:07 +0000259 UChar vabits8[SM_CHUNKS];
njn25e49d8e72002-09-23 09:36:25 +0000260 }
261 SecMap;
262
njn1d0825f2006-03-27 11:37:07 +0000263// 3 distinguished secondary maps, one for no-access, one for
264// accessible but undefined, and one for accessible and defined.
265// Distinguished secondaries may never be modified.
266#define SM_DIST_NOACCESS 0
njndbf7ca72006-03-31 11:57:59 +0000267#define SM_DIST_UNDEFINED 1
268#define SM_DIST_DEFINED 2
njnb8dca862005-03-14 02:42:44 +0000269
sewardj45d94cc2005-04-20 14:44:11 +0000270static SecMap sm_distinguished[3];
njnb8dca862005-03-14 02:42:44 +0000271
njn1d0825f2006-03-27 11:37:07 +0000272static INLINE Bool is_distinguished_sm ( SecMap* sm ) {
sewardj45d94cc2005-04-20 14:44:11 +0000273 return sm >= &sm_distinguished[0] && sm <= &sm_distinguished[2];
274}
njnb8dca862005-03-14 02:42:44 +0000275
njn1d0825f2006-03-27 11:37:07 +0000276// Forward declaration
277static void update_SM_counts(SecMap* oldSM, SecMap* newSM);
278
sewardj45d94cc2005-04-20 14:44:11 +0000279/* dist_sm points to one of our three distinguished secondaries. Make
280 a copy of it so that we can write to it.
281*/
282static SecMap* copy_for_writing ( SecMap* dist_sm )
283{
284 SecMap* new_sm;
285 tl_assert(dist_sm == &sm_distinguished[0]
njn1d0825f2006-03-27 11:37:07 +0000286 || dist_sm == &sm_distinguished[1]
287 || dist_sm == &sm_distinguished[2]);
njnb8dca862005-03-14 02:42:44 +0000288
sewardj45f4e7c2005-09-27 19:20:21 +0000289 new_sm = VG_(am_shadow_alloc)(sizeof(SecMap));
290 if (new_sm == NULL)
291 VG_(out_of_memory_NORETURN)( "memcheck:allocate new SecMap",
292 sizeof(SecMap) );
sewardj45d94cc2005-04-20 14:44:11 +0000293 VG_(memcpy)(new_sm, dist_sm, sizeof(SecMap));
njn1d0825f2006-03-27 11:37:07 +0000294 update_SM_counts(dist_sm, new_sm);
sewardj45d94cc2005-04-20 14:44:11 +0000295 return new_sm;
296}
njnb8dca862005-03-14 02:42:44 +0000297
njn1d0825f2006-03-27 11:37:07 +0000298/* --------------- Stats --------------- */
299
njndbf7ca72006-03-31 11:57:59 +0000300static Int n_issued_SMs = 0;
301static Int n_deissued_SMs = 0;
302static Int n_noaccess_SMs = N_PRIMARY_MAP; // start with many noaccess DSMs
303static Int n_undefined_SMs = 0;
304static Int n_defined_SMs = 0;
305static Int n_non_DSM_SMs = 0;
306static Int max_noaccess_SMs = 0;
307static Int max_undefined_SMs = 0;
308static Int max_defined_SMs = 0;
309static Int max_non_DSM_SMs = 0;
njn1d0825f2006-03-27 11:37:07 +0000310
sewardj05a46732006-10-17 01:28:10 +0000311/* # searches initiated in auxmap_L1, and # base cmps required */
312static ULong n_auxmap_L1_searches = 0;
313static ULong n_auxmap_L1_cmps = 0;
314/* # of searches that missed in auxmap_L1 and therefore had to
315 be handed to auxmap_L2. And the number of nodes inserted. */
316static ULong n_auxmap_L2_searches = 0;
317static ULong n_auxmap_L2_nodes = 0;
318
njn1d0825f2006-03-27 11:37:07 +0000319static Int n_sanity_cheap = 0;
320static Int n_sanity_expensive = 0;
321
322static Int n_secVBit_nodes = 0;
323static Int max_secVBit_nodes = 0;
324
325static void update_SM_counts(SecMap* oldSM, SecMap* newSM)
326{
njndbf7ca72006-03-31 11:57:59 +0000327 if (oldSM == &sm_distinguished[SM_DIST_NOACCESS ]) n_noaccess_SMs --;
328 else if (oldSM == &sm_distinguished[SM_DIST_UNDEFINED]) n_undefined_SMs--;
329 else if (oldSM == &sm_distinguished[SM_DIST_DEFINED ]) n_defined_SMs --;
330 else { n_non_DSM_SMs --;
331 n_deissued_SMs ++; }
njn1d0825f2006-03-27 11:37:07 +0000332
njndbf7ca72006-03-31 11:57:59 +0000333 if (newSM == &sm_distinguished[SM_DIST_NOACCESS ]) n_noaccess_SMs ++;
334 else if (newSM == &sm_distinguished[SM_DIST_UNDEFINED]) n_undefined_SMs++;
335 else if (newSM == &sm_distinguished[SM_DIST_DEFINED ]) n_defined_SMs ++;
336 else { n_non_DSM_SMs ++;
337 n_issued_SMs ++; }
njn1d0825f2006-03-27 11:37:07 +0000338
njndbf7ca72006-03-31 11:57:59 +0000339 if (n_noaccess_SMs > max_noaccess_SMs ) max_noaccess_SMs = n_noaccess_SMs;
340 if (n_undefined_SMs > max_undefined_SMs) max_undefined_SMs = n_undefined_SMs;
341 if (n_defined_SMs > max_defined_SMs ) max_defined_SMs = n_defined_SMs;
342 if (n_non_DSM_SMs > max_non_DSM_SMs ) max_non_DSM_SMs = n_non_DSM_SMs;
njn1d0825f2006-03-27 11:37:07 +0000343}
sewardj45d94cc2005-04-20 14:44:11 +0000344
345/* --------------- Primary maps --------------- */
346
347/* The main primary map. This covers some initial part of the address
sewardj23eb2fd2005-04-22 16:29:19 +0000348 space, addresses 0 .. (N_PRIMARY_MAP << 16)-1. The rest of it is
sewardj45d94cc2005-04-20 14:44:11 +0000349 handled using the auxiliary primary map.
350*/
sewardj23eb2fd2005-04-22 16:29:19 +0000351static SecMap* primary_map[N_PRIMARY_MAP];
sewardj45d94cc2005-04-20 14:44:11 +0000352
353
354/* An entry in the auxiliary primary map. base must be a 64k-aligned
355 value, and sm points at the relevant secondary map. As with the
356 main primary map, the secondary may be either a real secondary, or
sewardj05a46732006-10-17 01:28:10 +0000357 one of the three distinguished secondaries. DO NOT CHANGE THIS
358 LAYOUT: the first word has to be the key for OSet fast lookups.
sewardj45d94cc2005-04-20 14:44:11 +0000359*/
360typedef
361 struct {
sewardj23eb2fd2005-04-22 16:29:19 +0000362 Addr base;
sewardj45d94cc2005-04-20 14:44:11 +0000363 SecMap* sm;
364 }
365 AuxMapEnt;
366
sewardj05a46732006-10-17 01:28:10 +0000367/* Tunable parameter: How big is the L1 queue? */
368#define N_AUXMAP_L1 24
sewardj45d94cc2005-04-20 14:44:11 +0000369
sewardj05a46732006-10-17 01:28:10 +0000370/* Tunable parameter: How far along the L1 queue to insert
371 entries resulting from L2 lookups? */
372#define AUXMAP_L1_INSERT_IX 12
sewardj45d94cc2005-04-20 14:44:11 +0000373
sewardj05a46732006-10-17 01:28:10 +0000374static struct {
375 Addr base;
376 AuxMapEnt* ent; // pointer to the matching auxmap_L2 node
377 }
378 auxmap_L1[N_AUXMAP_L1];
379
380static OSet* auxmap_L2 = NULL;
381
382static void init_auxmap_L1_L2 ( void )
sewardj45d94cc2005-04-20 14:44:11 +0000383{
sewardj05a46732006-10-17 01:28:10 +0000384 Int i;
385 for (i = 0; i < N_AUXMAP_L1; i++) {
386 auxmap_L1[i].base = 0;
387 auxmap_L1[i].ent = NULL;
sewardj45d94cc2005-04-20 14:44:11 +0000388 }
389
sewardj05a46732006-10-17 01:28:10 +0000390 tl_assert(0 == offsetof(AuxMapEnt,base));
391 tl_assert(sizeof(Addr) == sizeof(void*));
njne2a9ad32007-09-17 05:30:48 +0000392 auxmap_L2 = VG_(OSetGen_Create)( /*keyOff*/ offsetof(AuxMapEnt,base),
393 /*fastCmp*/ NULL,
394 VG_(malloc), VG_(free) );
sewardj05fe85e2005-04-27 22:46:36 +0000395}
396
sewardj05a46732006-10-17 01:28:10 +0000397/* Check representation invariants; if OK return NULL; else a
398 descriptive bit of text. Also return the number of
399 non-distinguished secondary maps referred to from the auxiliary
400 primary maps. */
sewardj05fe85e2005-04-27 22:46:36 +0000401
sewardj05a46732006-10-17 01:28:10 +0000402static HChar* check_auxmap_L1_L2_sanity ( Word* n_secmaps_found )
sewardj05fe85e2005-04-27 22:46:36 +0000403{
sewardj05a46732006-10-17 01:28:10 +0000404 Word i, j;
405 /* On a 32-bit platform, the L2 and L1 tables should
406 both remain empty forever.
sewardj05fe85e2005-04-27 22:46:36 +0000407
sewardj05a46732006-10-17 01:28:10 +0000408 On a 64-bit platform:
409 In the L2 table:
410 all .base & 0xFFFF == 0
411 all .base > MAX_PRIMARY_ADDRESS
412 In the L1 table:
413 all .base & 0xFFFF == 0
414 all (.base > MAX_PRIMARY_ADDRESS
415 .base & 0xFFFF == 0
416 and .ent points to an AuxMapEnt with the same .base)
417 or
418 (.base == 0 and .ent == NULL)
419 */
420 *n_secmaps_found = 0;
421 if (sizeof(void*) == 4) {
422 /* 32-bit platform */
njne2a9ad32007-09-17 05:30:48 +0000423 if (VG_(OSetGen_Size)(auxmap_L2) != 0)
sewardj05a46732006-10-17 01:28:10 +0000424 return "32-bit: auxmap_L2 is non-empty";
425 for (i = 0; i < N_AUXMAP_L1; i++)
426 if (auxmap_L1[i].base != 0 || auxmap_L1[i].ent != NULL)
427 return "32-bit: auxmap_L1 is non-empty";
428 } else {
429 /* 64-bit platform */
430 UWord elems_seen = 0;
431 AuxMapEnt *elem, *res;
432 AuxMapEnt key;
433 /* L2 table */
njne2a9ad32007-09-17 05:30:48 +0000434 VG_(OSetGen_ResetIter)(auxmap_L2);
435 while ( (elem = VG_(OSetGen_Next)(auxmap_L2)) ) {
sewardj05a46732006-10-17 01:28:10 +0000436 elems_seen++;
437 if (0 != (elem->base & (Addr)0xFFFF))
438 return "64-bit: nonzero .base & 0xFFFF in auxmap_L2";
439 if (elem->base <= MAX_PRIMARY_ADDRESS)
440 return "64-bit: .base <= MAX_PRIMARY_ADDRESS in auxmap_L2";
441 if (elem->sm == NULL)
442 return "64-bit: .sm in _L2 is NULL";
443 if (!is_distinguished_sm(elem->sm))
444 (*n_secmaps_found)++;
445 }
446 if (elems_seen != n_auxmap_L2_nodes)
447 return "64-bit: disagreement on number of elems in _L2";
448 /* Check L1-L2 correspondence */
449 for (i = 0; i < N_AUXMAP_L1; i++) {
450 if (auxmap_L1[i].base == 0 && auxmap_L1[i].ent == NULL)
451 continue;
452 if (0 != (auxmap_L1[i].base & (Addr)0xFFFF))
453 return "64-bit: nonzero .base & 0xFFFF in auxmap_L1";
454 if (auxmap_L1[i].base <= MAX_PRIMARY_ADDRESS)
455 return "64-bit: .base <= MAX_PRIMARY_ADDRESS in auxmap_L1";
456 if (auxmap_L1[i].ent == NULL)
457 return "64-bit: .ent is NULL in auxmap_L1";
458 if (auxmap_L1[i].ent->base != auxmap_L1[i].base)
459 return "64-bit: _L1 and _L2 bases are inconsistent";
460 /* Look it up in auxmap_L2. */
461 key.base = auxmap_L1[i].base;
462 key.sm = 0;
njne2a9ad32007-09-17 05:30:48 +0000463 res = VG_(OSetGen_Lookup)(auxmap_L2, &key);
sewardj05a46732006-10-17 01:28:10 +0000464 if (res == NULL)
465 return "64-bit: _L1 .base not found in _L2";
466 if (res != auxmap_L1[i].ent)
467 return "64-bit: _L1 .ent disagrees with _L2 entry";
468 }
469 /* Check L1 contains no duplicates */
470 for (i = 0; i < N_AUXMAP_L1; i++) {
471 if (auxmap_L1[i].base == 0)
472 continue;
473 for (j = i+1; j < N_AUXMAP_L1; j++) {
474 if (auxmap_L1[j].base == 0)
475 continue;
476 if (auxmap_L1[j].base == auxmap_L1[i].base)
477 return "64-bit: duplicate _L1 .base entries";
478 }
479 }
480 }
481 return NULL; /* ok */
482}
483
484static void insert_into_auxmap_L1_at ( Word rank, AuxMapEnt* ent )
485{
486 Word i;
487 tl_assert(ent);
488 tl_assert(rank >= 0 && rank < N_AUXMAP_L1);
489 for (i = N_AUXMAP_L1-1; i > rank; i--)
490 auxmap_L1[i] = auxmap_L1[i-1];
491 auxmap_L1[rank].base = ent->base;
492 auxmap_L1[rank].ent = ent;
493}
494
495static INLINE AuxMapEnt* maybe_find_in_auxmap ( Addr a )
496{
497 AuxMapEnt key;
498 AuxMapEnt* res;
499 Word i;
500
501 tl_assert(a > MAX_PRIMARY_ADDRESS);
502 a &= ~(Addr)0xFFFF;
503
504 /* First search the front-cache, which is a self-organising
505 list containing the most popular entries. */
506
bart5dd8e6a2008-03-22 08:04:29 +0000507 if (LIKELY(auxmap_L1[0].base == a))
sewardj05a46732006-10-17 01:28:10 +0000508 return auxmap_L1[0].ent;
bart5dd8e6a2008-03-22 08:04:29 +0000509 if (LIKELY(auxmap_L1[1].base == a)) {
sewardj05a46732006-10-17 01:28:10 +0000510 Addr t_base = auxmap_L1[0].base;
511 AuxMapEnt* t_ent = auxmap_L1[0].ent;
512 auxmap_L1[0].base = auxmap_L1[1].base;
513 auxmap_L1[0].ent = auxmap_L1[1].ent;
514 auxmap_L1[1].base = t_base;
515 auxmap_L1[1].ent = t_ent;
516 return auxmap_L1[0].ent;
sewardj45d94cc2005-04-20 14:44:11 +0000517 }
518
sewardj05a46732006-10-17 01:28:10 +0000519 n_auxmap_L1_searches++;
sewardj45d94cc2005-04-20 14:44:11 +0000520
sewardj05a46732006-10-17 01:28:10 +0000521 for (i = 0; i < N_AUXMAP_L1; i++) {
522 if (auxmap_L1[i].base == a) {
523 break;
524 }
525 }
526 tl_assert(i >= 0 && i <= N_AUXMAP_L1);
sewardj45d94cc2005-04-20 14:44:11 +0000527
sewardj05a46732006-10-17 01:28:10 +0000528 n_auxmap_L1_cmps += (ULong)(i+1);
sewardj45d94cc2005-04-20 14:44:11 +0000529
sewardj05a46732006-10-17 01:28:10 +0000530 if (i < N_AUXMAP_L1) {
531 if (i > 0) {
532 Addr t_base = auxmap_L1[i-1].base;
533 AuxMapEnt* t_ent = auxmap_L1[i-1].ent;
534 auxmap_L1[i-1].base = auxmap_L1[i-0].base;
535 auxmap_L1[i-1].ent = auxmap_L1[i-0].ent;
536 auxmap_L1[i-0].base = t_base;
537 auxmap_L1[i-0].ent = t_ent;
538 i--;
539 }
540 return auxmap_L1[i].ent;
541 }
542
543 n_auxmap_L2_searches++;
544
545 /* First see if we already have it. */
546 key.base = a;
547 key.sm = 0;
548
njne2a9ad32007-09-17 05:30:48 +0000549 res = VG_(OSetGen_Lookup)(auxmap_L2, &key);
sewardj05a46732006-10-17 01:28:10 +0000550 if (res)
551 insert_into_auxmap_L1_at( AUXMAP_L1_INSERT_IX, res );
552 return res;
553}
554
555static AuxMapEnt* find_or_alloc_in_auxmap ( Addr a )
556{
557 AuxMapEnt *nyu, *res;
558
559 /* First see if we already have it. */
560 res = maybe_find_in_auxmap( a );
bart5dd8e6a2008-03-22 08:04:29 +0000561 if (LIKELY(res))
sewardj05a46732006-10-17 01:28:10 +0000562 return res;
563
564 /* Ok, there's no entry in the secondary map, so we'll have
565 to allocate one. */
566 a &= ~(Addr)0xFFFF;
567
njne2a9ad32007-09-17 05:30:48 +0000568 nyu = (AuxMapEnt*) VG_(OSetGen_AllocNode)( auxmap_L2, sizeof(AuxMapEnt) );
sewardj05a46732006-10-17 01:28:10 +0000569 tl_assert(nyu);
570 nyu->base = a;
571 nyu->sm = &sm_distinguished[SM_DIST_NOACCESS];
njne2a9ad32007-09-17 05:30:48 +0000572 VG_(OSetGen_Insert)( auxmap_L2, nyu );
sewardj05a46732006-10-17 01:28:10 +0000573 insert_into_auxmap_L1_at( AUXMAP_L1_INSERT_IX, nyu );
574 n_auxmap_L2_nodes++;
575 return nyu;
sewardj45d94cc2005-04-20 14:44:11 +0000576}
577
sewardj45d94cc2005-04-20 14:44:11 +0000578/* --------------- SecMap fundamentals --------------- */
579
njn1d0825f2006-03-27 11:37:07 +0000580// In all these, 'low' means it's definitely in the main primary map,
581// 'high' means it's definitely in the auxiliary table.
582
583static INLINE SecMap** get_secmap_low_ptr ( Addr a )
584{
585 UWord pm_off = a >> 16;
586# if VG_DEBUG_MEMORY >= 1
587 tl_assert(pm_off < N_PRIMARY_MAP);
588# endif
589 return &primary_map[ pm_off ];
590}
591
592static INLINE SecMap** get_secmap_high_ptr ( Addr a )
593{
594 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
595 return &am->sm;
596}
597
598static SecMap** get_secmap_ptr ( Addr a )
599{
600 return ( a <= MAX_PRIMARY_ADDRESS
601 ? get_secmap_low_ptr(a)
602 : get_secmap_high_ptr(a));
603}
604
njna7c7ebd2006-03-28 12:51:02 +0000605static INLINE SecMap* get_secmap_for_reading_low ( Addr a )
njn1d0825f2006-03-27 11:37:07 +0000606{
607 return *get_secmap_low_ptr(a);
608}
609
njna7c7ebd2006-03-28 12:51:02 +0000610static INLINE SecMap* get_secmap_for_reading_high ( Addr a )
njn1d0825f2006-03-27 11:37:07 +0000611{
612 return *get_secmap_high_ptr(a);
613}
614
njna7c7ebd2006-03-28 12:51:02 +0000615static INLINE SecMap* get_secmap_for_writing_low(Addr a)
njn1d0825f2006-03-27 11:37:07 +0000616{
617 SecMap** p = get_secmap_low_ptr(a);
bart5dd8e6a2008-03-22 08:04:29 +0000618 if (UNLIKELY(is_distinguished_sm(*p)))
njn1d0825f2006-03-27 11:37:07 +0000619 *p = copy_for_writing(*p);
620 return *p;
621}
622
njna7c7ebd2006-03-28 12:51:02 +0000623static INLINE SecMap* get_secmap_for_writing_high ( Addr a )
njn1d0825f2006-03-27 11:37:07 +0000624{
625 SecMap** p = get_secmap_high_ptr(a);
bart5dd8e6a2008-03-22 08:04:29 +0000626 if (UNLIKELY(is_distinguished_sm(*p)))
njn1d0825f2006-03-27 11:37:07 +0000627 *p = copy_for_writing(*p);
628 return *p;
629}
630
sewardj45d94cc2005-04-20 14:44:11 +0000631/* Produce the secmap for 'a', either from the primary map or by
632 ensuring there is an entry for it in the aux primary map. The
633 secmap may be a distinguished one as the caller will only want to
634 be able to read it.
635*/
sewardj05a46732006-10-17 01:28:10 +0000636static INLINE SecMap* get_secmap_for_reading ( Addr a )
sewardj45d94cc2005-04-20 14:44:11 +0000637{
njn1d0825f2006-03-27 11:37:07 +0000638 return ( a <= MAX_PRIMARY_ADDRESS
njna7c7ebd2006-03-28 12:51:02 +0000639 ? get_secmap_for_reading_low (a)
640 : get_secmap_for_reading_high(a) );
sewardj45d94cc2005-04-20 14:44:11 +0000641}
642
643/* Produce the secmap for 'a', either from the primary map or by
644 ensuring there is an entry for it in the aux primary map. The
645 secmap may not be a distinguished one, since the caller will want
646 to be able to write it. If it is a distinguished secondary, make a
647 writable copy of it, install it, and return the copy instead. (COW
648 semantics).
649*/
njna7c7ebd2006-03-28 12:51:02 +0000650static SecMap* get_secmap_for_writing ( Addr a )
sewardj45d94cc2005-04-20 14:44:11 +0000651{
njn1d0825f2006-03-27 11:37:07 +0000652 return ( a <= MAX_PRIMARY_ADDRESS
njna7c7ebd2006-03-28 12:51:02 +0000653 ? get_secmap_for_writing_low (a)
654 : get_secmap_for_writing_high(a) );
njn1d0825f2006-03-27 11:37:07 +0000655}
656
657/* If 'a' has a SecMap, produce it. Else produce NULL. But don't
658 allocate one if one doesn't already exist. This is used by the
659 leak checker.
660*/
661static SecMap* maybe_get_secmap_for ( Addr a )
662{
sewardj45d94cc2005-04-20 14:44:11 +0000663 if (a <= MAX_PRIMARY_ADDRESS) {
njna7c7ebd2006-03-28 12:51:02 +0000664 return get_secmap_for_reading_low(a);
sewardj45d94cc2005-04-20 14:44:11 +0000665 } else {
njn1d0825f2006-03-27 11:37:07 +0000666 AuxMapEnt* am = maybe_find_in_auxmap(a);
667 return am ? am->sm : NULL;
sewardj45d94cc2005-04-20 14:44:11 +0000668 }
669}
670
njn1d0825f2006-03-27 11:37:07 +0000671/* --------------- Fundamental functions --------------- */
672
673static INLINE
674void insert_vabits2_into_vabits8 ( Addr a, UChar vabits2, UChar* vabits8 )
675{
676 UInt shift = (a & 3) << 1; // shift by 0, 2, 4, or 6
677 *vabits8 &= ~(0x3 << shift); // mask out the two old bits
678 *vabits8 |= (vabits2 << shift); // mask in the two new bits
679}
680
681static INLINE
682void insert_vabits4_into_vabits8 ( Addr a, UChar vabits4, UChar* vabits8 )
683{
684 UInt shift;
685 tl_assert(VG_IS_2_ALIGNED(a)); // Must be 2-aligned
686 shift = (a & 2) << 1; // shift by 0 or 4
687 *vabits8 &= ~(0xf << shift); // mask out the four old bits
688 *vabits8 |= (vabits4 << shift); // mask in the four new bits
689}
690
691static INLINE
692UChar extract_vabits2_from_vabits8 ( Addr a, UChar vabits8 )
693{
694 UInt shift = (a & 3) << 1; // shift by 0, 2, 4, or 6
695 vabits8 >>= shift; // shift the two bits to the bottom
696 return 0x3 & vabits8; // mask out the rest
697}
698
699static INLINE
700UChar extract_vabits4_from_vabits8 ( Addr a, UChar vabits8 )
701{
702 UInt shift;
703 tl_assert(VG_IS_2_ALIGNED(a)); // Must be 2-aligned
704 shift = (a & 2) << 1; // shift by 0 or 4
705 vabits8 >>= shift; // shift the four bits to the bottom
706 return 0xf & vabits8; // mask out the rest
707}
708
709// Note that these four are only used in slow cases. The fast cases do
710// clever things like combine the auxmap check (in
711// get_secmap_{read,writ}able) with alignment checks.
712
713// *** WARNING! ***
714// Any time this function is called, if it is possible that vabits2
njndbf7ca72006-03-31 11:57:59 +0000715// is equal to VA_BITS2_PARTDEFINED, then the corresponding entry in the
njn1d0825f2006-03-27 11:37:07 +0000716// sec-V-bits table must also be set!
717static INLINE
718void set_vabits2 ( Addr a, UChar vabits2 )
719{
njna7c7ebd2006-03-28 12:51:02 +0000720 SecMap* sm = get_secmap_for_writing(a);
njn1d0825f2006-03-27 11:37:07 +0000721 UWord sm_off = SM_OFF(a);
722 insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
723}
724
725static INLINE
726UChar get_vabits2 ( Addr a )
727{
njna7c7ebd2006-03-28 12:51:02 +0000728 SecMap* sm = get_secmap_for_reading(a);
njn1d0825f2006-03-27 11:37:07 +0000729 UWord sm_off = SM_OFF(a);
730 UChar vabits8 = sm->vabits8[sm_off];
731 return extract_vabits2_from_vabits8(a, vabits8);
732}
733
sewardjf2184912006-05-03 22:13:57 +0000734// *** WARNING! ***
735// Any time this function is called, if it is possible that any of the
736// 4 2-bit fields in vabits8 are equal to VA_BITS2_PARTDEFINED, then the
737// corresponding entry(s) in the sec-V-bits table must also be set!
738static INLINE
739UChar get_vabits8_for_aligned_word32 ( Addr a )
740{
741 SecMap* sm = get_secmap_for_reading(a);
742 UWord sm_off = SM_OFF(a);
743 UChar vabits8 = sm->vabits8[sm_off];
744 return vabits8;
745}
746
747static INLINE
748void set_vabits8_for_aligned_word32 ( Addr a, UChar vabits8 )
749{
750 SecMap* sm = get_secmap_for_writing(a);
751 UWord sm_off = SM_OFF(a);
752 sm->vabits8[sm_off] = vabits8;
753}
754
755
njn1d0825f2006-03-27 11:37:07 +0000756// Forward declarations
757static UWord get_sec_vbits8(Addr a);
758static void set_sec_vbits8(Addr a, UWord vbits8);
759
760// Returns False if there was an addressability error.
761static INLINE
762Bool set_vbits8 ( Addr a, UChar vbits8 )
763{
764 Bool ok = True;
765 UChar vabits2 = get_vabits2(a);
766 if ( VA_BITS2_NOACCESS != vabits2 ) {
767 // Addressable. Convert in-register format to in-memory format.
768 // Also remove any existing sec V bit entry for the byte if no
769 // longer necessary.
njndbf7ca72006-03-31 11:57:59 +0000770 if ( V_BITS8_DEFINED == vbits8 ) { vabits2 = VA_BITS2_DEFINED; }
771 else if ( V_BITS8_UNDEFINED == vbits8 ) { vabits2 = VA_BITS2_UNDEFINED; }
772 else { vabits2 = VA_BITS2_PARTDEFINED;
njn1d0825f2006-03-27 11:37:07 +0000773 set_sec_vbits8(a, vbits8); }
774 set_vabits2(a, vabits2);
775
776 } else {
777 // Unaddressable! Do nothing -- when writing to unaddressable
778 // memory it acts as a black hole, and the V bits can never be seen
779 // again. So we don't have to write them at all.
780 ok = False;
781 }
782 return ok;
783}
784
785// Returns False if there was an addressability error. In that case, we put
786// all defined bits into vbits8.
787static INLINE
788Bool get_vbits8 ( Addr a, UChar* vbits8 )
789{
790 Bool ok = True;
791 UChar vabits2 = get_vabits2(a);
792
793 // Convert the in-memory format to in-register format.
njndbf7ca72006-03-31 11:57:59 +0000794 if ( VA_BITS2_DEFINED == vabits2 ) { *vbits8 = V_BITS8_DEFINED; }
795 else if ( VA_BITS2_UNDEFINED == vabits2 ) { *vbits8 = V_BITS8_UNDEFINED; }
796 else if ( VA_BITS2_NOACCESS == vabits2 ) {
njn1d0825f2006-03-27 11:37:07 +0000797 *vbits8 = V_BITS8_DEFINED; // Make V bits defined!
798 ok = False;
799 } else {
njndbf7ca72006-03-31 11:57:59 +0000800 tl_assert( VA_BITS2_PARTDEFINED == vabits2 );
njn1d0825f2006-03-27 11:37:07 +0000801 *vbits8 = get_sec_vbits8(a);
802 }
803 return ok;
804}
805
806
807/* --------------- Secondary V bit table ------------ */
808
809// This table holds the full V bit pattern for partially-defined bytes
njndbf7ca72006-03-31 11:57:59 +0000810// (PDBs) that are represented by VA_BITS2_PARTDEFINED in the main shadow
811// memory.
njn1d0825f2006-03-27 11:37:07 +0000812//
813// Note: the nodes in this table can become stale. Eg. if you write a PDB,
814// then overwrite the same address with a fully defined byte, the sec-V-bit
815// node will not necessarily be removed. This is because checking for
816// whether removal is necessary would slow down the fast paths.
817//
818// To avoid the stale nodes building up too much, we periodically (once the
819// table reaches a certain size) garbage collect (GC) the table by
820// traversing it and evicting any "sufficiently stale" nodes, ie. nodes that
821// are stale and haven't been touched for a certain number of collections.
822// If more than a certain proportion of nodes survived, we increase the
823// table size so that GCs occur less often.
824//
825// (So this a bit different to a traditional GC, where you definitely want
826// to remove any dead nodes. It's more like we have a resizable cache and
827// we're trying to find the right balance how many elements to evict and how
828// big to make the cache.)
829//
830// This policy is designed to avoid bad table bloat in the worst case where
831// a program creates huge numbers of stale PDBs -- we would get this bloat
832// if we had no GC -- while handling well the case where a node becomes
833// stale but shortly afterwards is rewritten with a PDB and so becomes
834// non-stale again (which happens quite often, eg. in perf/bz2). If we just
835// remove all stale nodes as soon as possible, we just end up re-adding a
836// lot of them in later again. The "sufficiently stale" approach avoids
837// this. (If a program has many live PDBs, performance will just suck,
838// there's no way around that.)
839
840static OSet* secVBitTable;
841
842// Stats
843static ULong sec_vbits_new_nodes = 0;
844static ULong sec_vbits_updates = 0;
845
846// This must be a power of two; this is checked in mc_pre_clo_init().
847// The size chosen here is a trade-off: if the nodes are bigger (ie. cover
848// a larger address range) they take more space but we can get multiple
849// partially-defined bytes in one if they are close to each other, reducing
850// the number of total nodes. In practice sometimes they are clustered (eg.
851// perf/bz2 repeatedly writes then reads more than 20,000 in a contiguous
852// row), but often not. So we choose something intermediate.
853#define BYTES_PER_SEC_VBIT_NODE 16
854
855// We make the table bigger if more than this many nodes survive a GC.
856#define MAX_SURVIVOR_PROPORTION 0.5
857
858// Each time we make the table bigger, we increase it by this much.
859#define TABLE_GROWTH_FACTOR 2
860
861// This defines "sufficiently stale" -- any node that hasn't been touched in
862// this many GCs will be removed.
863#define MAX_STALE_AGE 2
864
865// We GC the table when it gets this many nodes in it, ie. it's effectively
866// the table size. It can change.
867static Int secVBitLimit = 1024;
868
869// The number of GCs done, used to age sec-V-bit nodes for eviction.
870// Because it's unsigned, wrapping doesn't matter -- the right answer will
871// come out anyway.
872static UInt GCs_done = 0;
873
874typedef
875 struct {
876 Addr a;
877 UChar vbits8[BYTES_PER_SEC_VBIT_NODE];
878 UInt last_touched;
879 }
880 SecVBitNode;
881
882static OSet* createSecVBitTable(void)
883{
njne2a9ad32007-09-17 05:30:48 +0000884 return VG_(OSetGen_Create)( offsetof(SecVBitNode, a),
885 NULL, // use fast comparisons
886 VG_(malloc), VG_(free) );
njn1d0825f2006-03-27 11:37:07 +0000887}
888
889static void gcSecVBitTable(void)
890{
891 OSet* secVBitTable2;
892 SecVBitNode* n;
893 Int i, n_nodes = 0, n_survivors = 0;
894
895 GCs_done++;
896
897 // Create the new table.
898 secVBitTable2 = createSecVBitTable();
899
900 // Traverse the table, moving fresh nodes into the new table.
njne2a9ad32007-09-17 05:30:48 +0000901 VG_(OSetGen_ResetIter)(secVBitTable);
902 while ( (n = VG_(OSetGen_Next)(secVBitTable)) ) {
njn1d0825f2006-03-27 11:37:07 +0000903 Bool keep = False;
904 if ( (GCs_done - n->last_touched) <= MAX_STALE_AGE ) {
905 // Keep node if it's been touched recently enough (regardless of
906 // freshness/staleness).
907 keep = True;
908 } else {
909 // Keep node if any of its bytes are non-stale. Using
910 // get_vabits2() for the lookup is not very efficient, but I don't
911 // think it matters.
912 for (i = 0; i < BYTES_PER_SEC_VBIT_NODE; i++) {
njndbf7ca72006-03-31 11:57:59 +0000913 if (VA_BITS2_PARTDEFINED == get_vabits2(n->a + i)) {
njn1d0825f2006-03-27 11:37:07 +0000914 keep = True; // Found a non-stale byte, so keep
915 break;
916 }
917 }
918 }
919
920 if ( keep ) {
921 // Insert a copy of the node into the new table.
922 SecVBitNode* n2 =
njne2a9ad32007-09-17 05:30:48 +0000923 VG_(OSetGen_AllocNode)(secVBitTable2, sizeof(SecVBitNode));
njn1d0825f2006-03-27 11:37:07 +0000924 *n2 = *n;
njne2a9ad32007-09-17 05:30:48 +0000925 VG_(OSetGen_Insert)(secVBitTable2, n2);
njn1d0825f2006-03-27 11:37:07 +0000926 }
927 }
928
929 // Get the before and after sizes.
njne2a9ad32007-09-17 05:30:48 +0000930 n_nodes = VG_(OSetGen_Size)(secVBitTable);
931 n_survivors = VG_(OSetGen_Size)(secVBitTable2);
njn1d0825f2006-03-27 11:37:07 +0000932
933 // Destroy the old table, and put the new one in its place.
njne2a9ad32007-09-17 05:30:48 +0000934 VG_(OSetGen_Destroy)(secVBitTable);
njn1d0825f2006-03-27 11:37:07 +0000935 secVBitTable = secVBitTable2;
936
937 if (VG_(clo_verbosity) > 1) {
938 Char percbuf[6];
939 VG_(percentify)(n_survivors, n_nodes, 1, 6, percbuf);
940 VG_(message)(Vg_DebugMsg, "memcheck GC: %d nodes, %d survivors (%s)",
941 n_nodes, n_survivors, percbuf);
942 }
943
944 // Increase table size if necessary.
945 if (n_survivors > (secVBitLimit * MAX_SURVIVOR_PROPORTION)) {
946 secVBitLimit *= TABLE_GROWTH_FACTOR;
947 if (VG_(clo_verbosity) > 1)
948 VG_(message)(Vg_DebugMsg, "memcheck GC: increase table size to %d",
949 secVBitLimit);
950 }
951}
952
953static UWord get_sec_vbits8(Addr a)
954{
955 Addr aAligned = VG_ROUNDDN(a, BYTES_PER_SEC_VBIT_NODE);
956 Int amod = a % BYTES_PER_SEC_VBIT_NODE;
njne2a9ad32007-09-17 05:30:48 +0000957 SecVBitNode* n = VG_(OSetGen_Lookup)(secVBitTable, &aAligned);
njn1d0825f2006-03-27 11:37:07 +0000958 UChar vbits8;
959 tl_assert2(n, "get_sec_vbits8: no node for address %p (%p)\n", aAligned, a);
960 // Shouldn't be fully defined or fully undefined -- those cases shouldn't
961 // make it to the secondary V bits table.
962 vbits8 = n->vbits8[amod];
963 tl_assert(V_BITS8_DEFINED != vbits8 && V_BITS8_UNDEFINED != vbits8);
964 return vbits8;
965}
966
967static void set_sec_vbits8(Addr a, UWord vbits8)
968{
969 Addr aAligned = VG_ROUNDDN(a, BYTES_PER_SEC_VBIT_NODE);
970 Int i, amod = a % BYTES_PER_SEC_VBIT_NODE;
njne2a9ad32007-09-17 05:30:48 +0000971 SecVBitNode* n = VG_(OSetGen_Lookup)(secVBitTable, &aAligned);
njn1d0825f2006-03-27 11:37:07 +0000972 // Shouldn't be fully defined or fully undefined -- those cases shouldn't
973 // make it to the secondary V bits table.
974 tl_assert(V_BITS8_DEFINED != vbits8 && V_BITS8_UNDEFINED != vbits8);
975 if (n) {
976 n->vbits8[amod] = vbits8; // update
977 n->last_touched = GCs_done;
978 sec_vbits_updates++;
979 } else {
980 // New node: assign the specific byte, make the rest invalid (they
981 // should never be read as-is, but be cautious).
njne2a9ad32007-09-17 05:30:48 +0000982 n = VG_(OSetGen_AllocNode)(secVBitTable, sizeof(SecVBitNode));
njn1d0825f2006-03-27 11:37:07 +0000983 n->a = aAligned;
984 for (i = 0; i < BYTES_PER_SEC_VBIT_NODE; i++) {
985 n->vbits8[i] = V_BITS8_UNDEFINED;
986 }
987 n->vbits8[amod] = vbits8;
988 n->last_touched = GCs_done;
989
990 // Do a table GC if necessary. Nb: do this before inserting the new
991 // node, to avoid erroneously GC'ing the new node.
njne2a9ad32007-09-17 05:30:48 +0000992 if (secVBitLimit == VG_(OSetGen_Size)(secVBitTable)) {
njn1d0825f2006-03-27 11:37:07 +0000993 gcSecVBitTable();
994 }
995
996 // Insert the new node.
njne2a9ad32007-09-17 05:30:48 +0000997 VG_(OSetGen_Insert)(secVBitTable, n);
njn1d0825f2006-03-27 11:37:07 +0000998 sec_vbits_new_nodes++;
999
njne2a9ad32007-09-17 05:30:48 +00001000 n_secVBit_nodes = VG_(OSetGen_Size)(secVBitTable);
njn1d0825f2006-03-27 11:37:07 +00001001 if (n_secVBit_nodes > max_secVBit_nodes)
1002 max_secVBit_nodes = n_secVBit_nodes;
1003 }
1004}
sewardj45d94cc2005-04-20 14:44:11 +00001005
1006/* --------------- Endianness helpers --------------- */
1007
1008/* Returns the offset in memory of the byteno-th most significant byte
1009 in a wordszB-sized word, given the specified endianness. */
njn1d0825f2006-03-27 11:37:07 +00001010static INLINE UWord byte_offset_w ( UWord wordszB, Bool bigendian,
sewardj45d94cc2005-04-20 14:44:11 +00001011 UWord byteno ) {
1012 return bigendian ? (wordszB-1-byteno) : byteno;
1013}
1014
sewardj05a46732006-10-17 01:28:10 +00001015
1016/* --------------- Ignored address ranges --------------- */
1017
1018#define M_IGNORE_RANGES 4
1019
1020typedef
1021 struct {
1022 Int used;
1023 Addr start[M_IGNORE_RANGES];
1024 Addr end[M_IGNORE_RANGES];
1025 }
1026 IgnoreRanges;
1027
1028static IgnoreRanges ignoreRanges;
1029
1030static INLINE Bool in_ignored_range ( Addr a )
1031{
1032 Int i;
bart5dd8e6a2008-03-22 08:04:29 +00001033 if (LIKELY(ignoreRanges.used == 0))
sewardj05a46732006-10-17 01:28:10 +00001034 return False;
1035 for (i = 0; i < ignoreRanges.used; i++) {
1036 if (a >= ignoreRanges.start[i] && a < ignoreRanges.end[i])
1037 return True;
1038 }
1039 return False;
1040}
1041
1042
1043/* Parse a 32- or 64-bit hex number, including leading 0x, from string
1044 starting at *ppc, putting result in *result, and return True. Or
1045 fail, in which case *ppc and *result are undefined, and return
1046 False. */
1047
1048static Bool isHex ( UChar c )
1049{
1050 return ((c >= '0' && c <= '9')
1051 || (c >= 'a' && c <= 'f')
1052 || (c >= 'A' && c <= 'F'));
1053}
1054
1055static UInt fromHex ( UChar c )
1056{
1057 if (c >= '0' && c <= '9')
1058 return (UInt)c - (UInt)'0';
1059 if (c >= 'a' && c <= 'f')
1060 return 10 + (UInt)c - (UInt)'a';
1061 if (c >= 'A' && c <= 'F')
1062 return 10 + (UInt)c - (UInt)'A';
1063 /*NOTREACHED*/
1064 tl_assert(0);
1065 return 0;
1066}
1067
1068static Bool parse_Addr ( UChar** ppc, Addr* result )
1069{
1070 Int used, limit = 2 * sizeof(Addr);
1071 if (**ppc != '0')
1072 return False;
1073 (*ppc)++;
1074 if (**ppc != 'x')
1075 return False;
1076 (*ppc)++;
1077 *result = 0;
1078 used = 0;
1079 while (isHex(**ppc)) {
1080 UInt d = fromHex(**ppc);
1081 tl_assert(d < 16);
1082 *result = ((*result) << 4) | fromHex(**ppc);
1083 (*ppc)++;
1084 used++;
1085 if (used > limit) return False;
1086 }
1087 if (used == 0)
1088 return False;
1089 return True;
1090}
1091
1092/* Parse two such numbers separated by a dash, or fail. */
1093
1094static Bool parse_range ( UChar** ppc, Addr* result1, Addr* result2 )
1095{
1096 Bool ok = parse_Addr(ppc, result1);
1097 if (!ok)
1098 return False;
1099 if (**ppc != '-')
1100 return False;
1101 (*ppc)++;
1102 ok = parse_Addr(ppc, result2);
1103 if (!ok)
1104 return False;
1105 return True;
1106}
1107
1108/* Parse a set of ranges separated by commas into 'ignoreRanges', or
1109 fail. */
1110
1111static Bool parse_ignore_ranges ( UChar* str0 )
1112{
1113 Addr start, end;
1114 Bool ok;
1115 UChar* str = str0;
1116 UChar** ppc = &str;
1117 ignoreRanges.used = 0;
1118 while (1) {
1119 ok = parse_range(ppc, &start, &end);
1120 if (!ok)
1121 return False;
1122 if (ignoreRanges.used >= M_IGNORE_RANGES)
1123 return False;
1124 ignoreRanges.start[ignoreRanges.used] = start;
1125 ignoreRanges.end[ignoreRanges.used] = end;
1126 ignoreRanges.used++;
1127 if (**ppc == 0)
1128 return True;
1129 if (**ppc != ',')
1130 return False;
1131 (*ppc)++;
1132 }
1133 /*NOTREACHED*/
1134 return False;
1135}
1136
1137
sewardj45d94cc2005-04-20 14:44:11 +00001138/* --------------- Load/store slow cases. --------------- */
1139
njn1d0825f2006-03-27 11:37:07 +00001140// Forward declarations
1141static void mc_record_address_error ( ThreadId tid, Addr a,
1142 Int size, Bool isWrite );
njn718d3b12006-12-16 00:54:12 +00001143static void mc_record_core_mem_error ( ThreadId tid, Bool isAddrErr, Char* s );
sewardj7cf4e6b2008-05-01 20:24:26 +00001144static void mc_record_regparam_error ( ThreadId tid, Char* msg, UInt otag );
njn718d3b12006-12-16 00:54:12 +00001145static void mc_record_memparam_error ( ThreadId tid, Addr a,
sewardj7cf4e6b2008-05-01 20:24:26 +00001146 Bool isAddrErr, Char* msg, UInt otag );
njn1d0825f2006-03-27 11:37:07 +00001147static void mc_record_jump_error ( ThreadId tid, Addr a );
1148
sewardj45d94cc2005-04-20 14:44:11 +00001149static
njn1d0825f2006-03-27 11:37:07 +00001150#ifndef PERF_FAST_LOADV
1151INLINE
1152#endif
njn45e81252006-03-28 12:35:08 +00001153ULong mc_LOADVn_slow ( Addr a, SizeT nBits, Bool bigendian )
sewardj45d94cc2005-04-20 14:44:11 +00001154{
njn1d0825f2006-03-27 11:37:07 +00001155 /* Make up a 64-bit result V word, which contains the loaded data for
sewardjf3d57dd2005-04-22 20:23:27 +00001156 valid addresses and Defined for invalid addresses. Iterate over
1157 the bytes in the word, from the most significant down to the
1158 least. */
njn1d0825f2006-03-27 11:37:07 +00001159 ULong vbits64 = V_BITS64_UNDEFINED;
njn45e81252006-03-28 12:35:08 +00001160 SizeT szB = nBits / 8;
njn1d0825f2006-03-27 11:37:07 +00001161 SSizeT i = szB-1; // Must be signed
sewardj45d94cc2005-04-20 14:44:11 +00001162 SizeT n_addrs_bad = 0;
1163 Addr ai;
njn1d0825f2006-03-27 11:37:07 +00001164 Bool partial_load_exemption_applies;
1165 UChar vbits8;
1166 Bool ok;
sewardj45d94cc2005-04-20 14:44:11 +00001167
sewardjc1a2cda2005-04-21 17:34:00 +00001168 PROF_EVENT(30, "mc_LOADVn_slow");
sewardj05a46732006-10-17 01:28:10 +00001169
1170 /* ------------ BEGIN semi-fast cases ------------ */
1171 /* These deal quickly-ish with the common auxiliary primary map
1172 cases on 64-bit platforms. Are merely a speedup hack; can be
1173 omitted without loss of correctness/functionality. Note that in
1174 both cases the "sizeof(void*) == 8" causes these cases to be
1175 folded out by compilers on 32-bit platforms. These are derived
1176 from LOADV64 and LOADV32.
1177 */
bart5dd8e6a2008-03-22 08:04:29 +00001178 if (LIKELY(sizeof(void*) == 8
sewardj05a46732006-10-17 01:28:10 +00001179 && nBits == 64 && VG_IS_8_ALIGNED(a))) {
1180 SecMap* sm = get_secmap_for_reading(a);
1181 UWord sm_off16 = SM_OFF_16(a);
1182 UWord vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
bart5dd8e6a2008-03-22 08:04:29 +00001183 if (LIKELY(vabits16 == VA_BITS16_DEFINED))
sewardj05a46732006-10-17 01:28:10 +00001184 return V_BITS64_DEFINED;
bart5dd8e6a2008-03-22 08:04:29 +00001185 if (LIKELY(vabits16 == VA_BITS16_UNDEFINED))
sewardj05a46732006-10-17 01:28:10 +00001186 return V_BITS64_UNDEFINED;
1187 /* else fall into the slow case */
1188 }
bart5dd8e6a2008-03-22 08:04:29 +00001189 if (LIKELY(sizeof(void*) == 8
sewardj05a46732006-10-17 01:28:10 +00001190 && nBits == 32 && VG_IS_4_ALIGNED(a))) {
1191 SecMap* sm = get_secmap_for_reading(a);
1192 UWord sm_off = SM_OFF(a);
1193 UWord vabits8 = sm->vabits8[sm_off];
bart5dd8e6a2008-03-22 08:04:29 +00001194 if (LIKELY(vabits8 == VA_BITS8_DEFINED))
sewardj05a46732006-10-17 01:28:10 +00001195 return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_DEFINED);
bart5dd8e6a2008-03-22 08:04:29 +00001196 if (LIKELY(vabits8 == VA_BITS8_UNDEFINED))
sewardj05a46732006-10-17 01:28:10 +00001197 return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_UNDEFINED);
1198 /* else fall into slow case */
1199 }
1200 /* ------------ END semi-fast cases ------------ */
1201
njn45e81252006-03-28 12:35:08 +00001202 tl_assert(nBits == 64 || nBits == 32 || nBits == 16 || nBits == 8);
sewardj45d94cc2005-04-20 14:44:11 +00001203
njn1d0825f2006-03-27 11:37:07 +00001204 for (i = szB-1; i >= 0; i--) {
sewardjc1a2cda2005-04-21 17:34:00 +00001205 PROF_EVENT(31, "mc_LOADVn_slow(loop)");
njn45e81252006-03-28 12:35:08 +00001206 ai = a + byte_offset_w(szB, bigendian, i);
njn1d0825f2006-03-27 11:37:07 +00001207 ok = get_vbits8(ai, &vbits8);
1208 if (!ok) n_addrs_bad++;
1209 vbits64 <<= 8;
1210 vbits64 |= vbits8;
sewardj45d94cc2005-04-20 14:44:11 +00001211 }
1212
sewardj0ded7a42005-11-08 02:25:37 +00001213 /* This is a hack which avoids producing errors for code which
1214 insists in stepping along byte strings in aligned word-sized
1215 chunks, and there is a partially defined word at the end. (eg,
1216 optimised strlen). Such code is basically broken at least WRT
1217 semantics of ANSI C, but sometimes users don't have the option
1218 to fix it, and so this option is provided. Note it is now
1219 defaulted to not-engaged.
1220
1221 A load from a partially-addressible place is allowed if:
1222 - the command-line flag is set
1223 - it's a word-sized, word-aligned load
1224 - at least one of the addresses in the word *is* valid
1225 */
1226 partial_load_exemption_applies
njn1d0825f2006-03-27 11:37:07 +00001227 = MC_(clo_partial_loads_ok) && szB == VG_WORDSIZE
sewardj0ded7a42005-11-08 02:25:37 +00001228 && VG_IS_WORD_ALIGNED(a)
1229 && n_addrs_bad < VG_WORDSIZE;
1230
1231 if (n_addrs_bad > 0 && !partial_load_exemption_applies)
njn1d0825f2006-03-27 11:37:07 +00001232 mc_record_address_error( VG_(get_running_tid)(), a, szB, False );
sewardj45d94cc2005-04-20 14:44:11 +00001233
njn1d0825f2006-03-27 11:37:07 +00001234 return vbits64;
sewardj45d94cc2005-04-20 14:44:11 +00001235}
1236
1237
njn1d0825f2006-03-27 11:37:07 +00001238static
1239#ifndef PERF_FAST_STOREV
1240INLINE
1241#endif
njn45e81252006-03-28 12:35:08 +00001242void mc_STOREVn_slow ( Addr a, SizeT nBits, ULong vbytes, Bool bigendian )
sewardj45d94cc2005-04-20 14:44:11 +00001243{
njn45e81252006-03-28 12:35:08 +00001244 SizeT szB = nBits / 8;
njn1d0825f2006-03-27 11:37:07 +00001245 SizeT i, n_addrs_bad = 0;
1246 UChar vbits8;
sewardj45d94cc2005-04-20 14:44:11 +00001247 Addr ai;
njn1d0825f2006-03-27 11:37:07 +00001248 Bool ok;
sewardj45d94cc2005-04-20 14:44:11 +00001249
sewardjc1a2cda2005-04-21 17:34:00 +00001250 PROF_EVENT(35, "mc_STOREVn_slow");
sewardj05a46732006-10-17 01:28:10 +00001251
1252 /* ------------ BEGIN semi-fast cases ------------ */
1253 /* These deal quickly-ish with the common auxiliary primary map
1254 cases on 64-bit platforms. Are merely a speedup hack; can be
1255 omitted without loss of correctness/functionality. Note that in
1256 both cases the "sizeof(void*) == 8" causes these cases to be
1257 folded out by compilers on 32-bit platforms. These are derived
1258 from STOREV64 and STOREV32.
1259 */
bart5dd8e6a2008-03-22 08:04:29 +00001260 if (LIKELY(sizeof(void*) == 8
sewardj05a46732006-10-17 01:28:10 +00001261 && nBits == 64 && VG_IS_8_ALIGNED(a))) {
1262 SecMap* sm = get_secmap_for_reading(a);
1263 UWord sm_off16 = SM_OFF_16(a);
1264 UWord vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
bart5dd8e6a2008-03-22 08:04:29 +00001265 if (LIKELY( !is_distinguished_sm(sm) &&
sewardj05a46732006-10-17 01:28:10 +00001266 (VA_BITS16_DEFINED == vabits16 ||
1267 VA_BITS16_UNDEFINED == vabits16) )) {
1268 /* Handle common case quickly: a is suitably aligned, */
1269 /* is mapped, and is addressible. */
1270 // Convert full V-bits in register to compact 2-bit form.
bart5dd8e6a2008-03-22 08:04:29 +00001271 if (LIKELY(V_BITS64_DEFINED == vbytes)) {
sewardj05a46732006-10-17 01:28:10 +00001272 ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_DEFINED;
1273 return;
1274 } else if (V_BITS64_UNDEFINED == vbytes) {
1275 ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_UNDEFINED;
1276 return;
1277 }
1278 /* else fall into the slow case */
1279 }
1280 /* else fall into the slow case */
1281 }
bart5dd8e6a2008-03-22 08:04:29 +00001282 if (LIKELY(sizeof(void*) == 8
sewardj05a46732006-10-17 01:28:10 +00001283 && nBits == 32 && VG_IS_4_ALIGNED(a))) {
1284 SecMap* sm = get_secmap_for_reading(a);
1285 UWord sm_off = SM_OFF(a);
1286 UWord vabits8 = sm->vabits8[sm_off];
bart5dd8e6a2008-03-22 08:04:29 +00001287 if (LIKELY( !is_distinguished_sm(sm) &&
sewardj05a46732006-10-17 01:28:10 +00001288 (VA_BITS8_DEFINED == vabits8 ||
1289 VA_BITS8_UNDEFINED == vabits8) )) {
1290 /* Handle common case quickly: a is suitably aligned, */
1291 /* is mapped, and is addressible. */
1292 // Convert full V-bits in register to compact 2-bit form.
bart5dd8e6a2008-03-22 08:04:29 +00001293 if (LIKELY(V_BITS32_DEFINED == (vbytes & 0xFFFFFFFF))) {
sewardj05a46732006-10-17 01:28:10 +00001294 sm->vabits8[sm_off] = VA_BITS8_DEFINED;
1295 return;
1296 } else if (V_BITS32_UNDEFINED == (vbytes & 0xFFFFFFFF)) {
1297 sm->vabits8[sm_off] = VA_BITS8_UNDEFINED;
1298 return;
1299 }
1300 /* else fall into the slow case */
1301 }
1302 /* else fall into the slow case */
1303 }
1304 /* ------------ END semi-fast cases ------------ */
1305
njn45e81252006-03-28 12:35:08 +00001306 tl_assert(nBits == 64 || nBits == 32 || nBits == 16 || nBits == 8);
sewardj45d94cc2005-04-20 14:44:11 +00001307
1308 /* Dump vbytes in memory, iterating from least to most significant
njn718d3b12006-12-16 00:54:12 +00001309 byte. At the same time establish addressibility of the location. */
sewardj45d94cc2005-04-20 14:44:11 +00001310 for (i = 0; i < szB; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +00001311 PROF_EVENT(36, "mc_STOREVn_slow(loop)");
njn45e81252006-03-28 12:35:08 +00001312 ai = a + byte_offset_w(szB, bigendian, i);
njn1d0825f2006-03-27 11:37:07 +00001313 vbits8 = vbytes & 0xff;
1314 ok = set_vbits8(ai, vbits8);
1315 if (!ok) n_addrs_bad++;
sewardj45d94cc2005-04-20 14:44:11 +00001316 vbytes >>= 8;
1317 }
1318
1319 /* If an address error has happened, report it. */
1320 if (n_addrs_bad > 0)
njn1d0825f2006-03-27 11:37:07 +00001321 mc_record_address_error( VG_(get_running_tid)(), a, szB, True );
sewardj45d94cc2005-04-20 14:44:11 +00001322}
1323
1324
njn25e49d8e72002-09-23 09:36:25 +00001325/*------------------------------------------------------------*/
1326/*--- Setting permissions over address ranges. ---*/
1327/*------------------------------------------------------------*/
1328
njn1d0825f2006-03-27 11:37:07 +00001329static void set_address_range_perms ( Addr a, SizeT lenT, UWord vabits16,
1330 UWord dsm_num )
sewardj23eb2fd2005-04-22 16:29:19 +00001331{
njn1d0825f2006-03-27 11:37:07 +00001332 UWord sm_off, sm_off16;
1333 UWord vabits2 = vabits16 & 0x3;
1334 SizeT lenA, lenB, len_to_next_secmap;
1335 Addr aNext;
sewardjae986ca2005-10-12 12:53:20 +00001336 SecMap* sm;
njn1d0825f2006-03-27 11:37:07 +00001337 SecMap** sm_ptr;
sewardjae986ca2005-10-12 12:53:20 +00001338 SecMap* example_dsm;
1339
sewardj23eb2fd2005-04-22 16:29:19 +00001340 PROF_EVENT(150, "set_address_range_perms");
1341
njn1d0825f2006-03-27 11:37:07 +00001342 /* Check the V+A bits make sense. */
njndbf7ca72006-03-31 11:57:59 +00001343 tl_assert(VA_BITS16_NOACCESS == vabits16 ||
1344 VA_BITS16_UNDEFINED == vabits16 ||
1345 VA_BITS16_DEFINED == vabits16);
sewardj23eb2fd2005-04-22 16:29:19 +00001346
njn1d0825f2006-03-27 11:37:07 +00001347 // This code should never write PDBs; ensure this. (See comment above
1348 // set_vabits2().)
njndbf7ca72006-03-31 11:57:59 +00001349 tl_assert(VA_BITS2_PARTDEFINED != vabits2);
njn1d0825f2006-03-27 11:37:07 +00001350
1351 if (lenT == 0)
sewardj23eb2fd2005-04-22 16:29:19 +00001352 return;
1353
njn1d0825f2006-03-27 11:37:07 +00001354 if (lenT > 100 * 1000 * 1000) {
1355 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
1356 Char* s = "unknown???";
njndbf7ca72006-03-31 11:57:59 +00001357 if (vabits16 == VA_BITS16_NOACCESS ) s = "noaccess";
1358 if (vabits16 == VA_BITS16_UNDEFINED) s = "undefined";
1359 if (vabits16 == VA_BITS16_DEFINED ) s = "defined";
njn1d0825f2006-03-27 11:37:07 +00001360 VG_(message)(Vg_UserMsg, "Warning: set address range perms: "
1361 "large range %lu (%s)", lenT, s);
sewardj23eb2fd2005-04-22 16:29:19 +00001362 }
1363 }
1364
njn1d0825f2006-03-27 11:37:07 +00001365#ifndef PERF_FAST_SARP
sewardj23eb2fd2005-04-22 16:29:19 +00001366 /*------------------ debug-only case ------------------ */
njn1d0825f2006-03-27 11:37:07 +00001367 {
1368 // Endianness doesn't matter here because all bytes are being set to
1369 // the same value.
1370 // Nb: We don't have to worry about updating the sec-V-bits table
1371 // after these set_vabits2() calls because this code never writes
njndbf7ca72006-03-31 11:57:59 +00001372 // VA_BITS2_PARTDEFINED values.
njn1d0825f2006-03-27 11:37:07 +00001373 SizeT i;
1374 for (i = 0; i < lenT; i++) {
1375 set_vabits2(a + i, vabits2);
1376 }
1377 return;
njn25e49d8e72002-09-23 09:36:25 +00001378 }
njn1d0825f2006-03-27 11:37:07 +00001379#endif
sewardj23eb2fd2005-04-22 16:29:19 +00001380
1381 /*------------------ standard handling ------------------ */
sewardj23eb2fd2005-04-22 16:29:19 +00001382
njn1d0825f2006-03-27 11:37:07 +00001383 /* Get the distinguished secondary that we might want
sewardj23eb2fd2005-04-22 16:29:19 +00001384 to use (part of the space-compression scheme). */
njn1d0825f2006-03-27 11:37:07 +00001385 example_dsm = &sm_distinguished[dsm_num];
1386
1387 // We have to handle ranges covering various combinations of partial and
1388 // whole sec-maps. Here is how parts 1, 2 and 3 are used in each case.
1389 // Cases marked with a '*' are common.
1390 //
1391 // TYPE PARTS USED
1392 // ---- ----------
1393 // * one partial sec-map (p) 1
1394 // - one whole sec-map (P) 2
1395 //
1396 // * two partial sec-maps (pp) 1,3
1397 // - one partial, one whole sec-map (pP) 1,2
1398 // - one whole, one partial sec-map (Pp) 2,3
1399 // - two whole sec-maps (PP) 2,2
1400 //
1401 // * one partial, one whole, one partial (pPp) 1,2,3
1402 // - one partial, two whole (pPP) 1,2,2
1403 // - two whole, one partial (PPp) 2,2,3
1404 // - three whole (PPP) 2,2,2
1405 //
1406 // * one partial, N-2 whole, one partial (pP...Pp) 1,2...2,3
1407 // - one partial, N-1 whole (pP...PP) 1,2...2,2
1408 // - N-1 whole, one partial (PP...Pp) 2,2...2,3
1409 // - N whole (PP...PP) 2,2...2,3
1410
1411 // Break up total length (lenT) into two parts: length in the first
1412 // sec-map (lenA), and the rest (lenB); lenT == lenA + lenB.
1413 aNext = start_of_this_sm(a) + SM_SIZE;
1414 len_to_next_secmap = aNext - a;
1415 if ( lenT <= len_to_next_secmap ) {
1416 // Range entirely within one sec-map. Covers almost all cases.
1417 PROF_EVENT(151, "set_address_range_perms-single-secmap");
1418 lenA = lenT;
1419 lenB = 0;
1420 } else if (is_start_of_sm(a)) {
1421 // Range spans at least one whole sec-map, and starts at the beginning
1422 // of a sec-map; skip to Part 2.
1423 PROF_EVENT(152, "set_address_range_perms-startof-secmap");
1424 lenA = 0;
1425 lenB = lenT;
1426 goto part2;
sewardj23eb2fd2005-04-22 16:29:19 +00001427 } else {
njn1d0825f2006-03-27 11:37:07 +00001428 // Range spans two or more sec-maps, first one is partial.
1429 PROF_EVENT(153, "set_address_range_perms-multiple-secmaps");
1430 lenA = len_to_next_secmap;
1431 lenB = lenT - lenA;
1432 }
1433
1434 //------------------------------------------------------------------------
1435 // Part 1: Deal with the first sec_map. Most of the time the range will be
1436 // entirely within a sec_map and this part alone will suffice. Also,
1437 // doing it this way lets us avoid repeatedly testing for the crossing of
1438 // a sec-map boundary within these loops.
1439 //------------------------------------------------------------------------
1440
1441 // If it's distinguished, make it undistinguished if necessary.
1442 sm_ptr = get_secmap_ptr(a);
1443 if (is_distinguished_sm(*sm_ptr)) {
1444 if (*sm_ptr == example_dsm) {
1445 // Sec-map already has the V+A bits that we want, so skip.
1446 PROF_EVENT(154, "set_address_range_perms-dist-sm1-quick");
1447 a = aNext;
1448 lenA = 0;
sewardj23eb2fd2005-04-22 16:29:19 +00001449 } else {
njn1d0825f2006-03-27 11:37:07 +00001450 PROF_EVENT(155, "set_address_range_perms-dist-sm1");
1451 *sm_ptr = copy_for_writing(*sm_ptr);
sewardj23eb2fd2005-04-22 16:29:19 +00001452 }
1453 }
njn1d0825f2006-03-27 11:37:07 +00001454 sm = *sm_ptr;
sewardj23eb2fd2005-04-22 16:29:19 +00001455
njn1d0825f2006-03-27 11:37:07 +00001456 // 1 byte steps
sewardj23eb2fd2005-04-22 16:29:19 +00001457 while (True) {
sewardj23eb2fd2005-04-22 16:29:19 +00001458 if (VG_IS_8_ALIGNED(a)) break;
njn1d0825f2006-03-27 11:37:07 +00001459 if (lenA < 1) break;
1460 PROF_EVENT(156, "set_address_range_perms-loop1a");
1461 sm_off = SM_OFF(a);
1462 insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
1463 a += 1;
1464 lenA -= 1;
1465 }
1466 // 8-aligned, 8 byte steps
sewardj23eb2fd2005-04-22 16:29:19 +00001467 while (True) {
njn1d0825f2006-03-27 11:37:07 +00001468 if (lenA < 8) break;
1469 PROF_EVENT(157, "set_address_range_perms-loop8a");
1470 sm_off16 = SM_OFF_16(a);
1471 ((UShort*)(sm->vabits8))[sm_off16] = vabits16;
1472 a += 8;
1473 lenA -= 8;
1474 }
1475 // 1 byte steps
1476 while (True) {
1477 if (lenA < 1) break;
1478 PROF_EVENT(158, "set_address_range_perms-loop1b");
1479 sm_off = SM_OFF(a);
1480 insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
1481 a += 1;
1482 lenA -= 1;
sewardj23eb2fd2005-04-22 16:29:19 +00001483 }
1484
njn1d0825f2006-03-27 11:37:07 +00001485 // We've finished the first sec-map. Is that it?
1486 if (lenB == 0)
sewardj23eb2fd2005-04-22 16:29:19 +00001487 return;
1488
njn1d0825f2006-03-27 11:37:07 +00001489 //------------------------------------------------------------------------
1490 // Part 2: Fast-set entire sec-maps at a time.
1491 //------------------------------------------------------------------------
1492 part2:
1493 // 64KB-aligned, 64KB steps.
1494 // Nb: we can reach here with lenB < SM_SIZE
sewardj23eb2fd2005-04-22 16:29:19 +00001495 while (True) {
njn1d0825f2006-03-27 11:37:07 +00001496 if (lenB < SM_SIZE) break;
1497 tl_assert(is_start_of_sm(a));
1498 PROF_EVENT(159, "set_address_range_perms-loop64K");
1499 sm_ptr = get_secmap_ptr(a);
1500 if (!is_distinguished_sm(*sm_ptr)) {
1501 PROF_EVENT(160, "set_address_range_perms-loop64K-free-dist-sm");
1502 // Free the non-distinguished sec-map that we're replacing. This
1503 // case happens moderately often, enough to be worthwhile.
1504 VG_(am_munmap_valgrind)((Addr)*sm_ptr, sizeof(SecMap));
1505 }
1506 update_SM_counts(*sm_ptr, example_dsm);
1507 // Make the sec-map entry point to the example DSM
1508 *sm_ptr = example_dsm;
1509 lenB -= SM_SIZE;
1510 a += SM_SIZE;
1511 }
sewardj23eb2fd2005-04-22 16:29:19 +00001512
njn1d0825f2006-03-27 11:37:07 +00001513 // We've finished the whole sec-maps. Is that it?
1514 if (lenB == 0)
1515 return;
1516
1517 //------------------------------------------------------------------------
1518 // Part 3: Finish off the final partial sec-map, if necessary.
1519 //------------------------------------------------------------------------
1520
1521 tl_assert(is_start_of_sm(a) && lenB < SM_SIZE);
1522
1523 // If it's distinguished, make it undistinguished if necessary.
1524 sm_ptr = get_secmap_ptr(a);
1525 if (is_distinguished_sm(*sm_ptr)) {
1526 if (*sm_ptr == example_dsm) {
1527 // Sec-map already has the V+A bits that we want, so stop.
1528 PROF_EVENT(161, "set_address_range_perms-dist-sm2-quick");
1529 return;
1530 } else {
1531 PROF_EVENT(162, "set_address_range_perms-dist-sm2");
1532 *sm_ptr = copy_for_writing(*sm_ptr);
1533 }
1534 }
1535 sm = *sm_ptr;
1536
1537 // 8-aligned, 8 byte steps
1538 while (True) {
1539 if (lenB < 8) break;
1540 PROF_EVENT(163, "set_address_range_perms-loop8b");
1541 sm_off16 = SM_OFF_16(a);
1542 ((UShort*)(sm->vabits8))[sm_off16] = vabits16;
1543 a += 8;
1544 lenB -= 8;
1545 }
1546 // 1 byte steps
1547 while (True) {
1548 if (lenB < 1) return;
1549 PROF_EVENT(164, "set_address_range_perms-loop1c");
1550 sm_off = SM_OFF(a);
1551 insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
1552 a += 1;
1553 lenB -= 1;
1554 }
sewardj23eb2fd2005-04-22 16:29:19 +00001555}
sewardj45d94cc2005-04-20 14:44:11 +00001556
sewardjc859fbf2005-04-22 21:10:28 +00001557
1558/* --- Set permissions for arbitrary address ranges --- */
njn25e49d8e72002-09-23 09:36:25 +00001559
njndbf7ca72006-03-31 11:57:59 +00001560void MC_(make_mem_noaccess) ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +00001561{
njndbf7ca72006-03-31 11:57:59 +00001562 PROF_EVENT(40, "MC_(make_mem_noaccess)");
1563 DEBUG("MC_(make_mem_noaccess)(%p, %lu)\n", a, len);
njn1d0825f2006-03-27 11:37:07 +00001564 set_address_range_perms ( a, len, VA_BITS16_NOACCESS, SM_DIST_NOACCESS );
sewardj7cf4e6b2008-05-01 20:24:26 +00001565 if (UNLIKELY( MC_(clo_mc_level) == 3 ))
1566 ocache_sarp_Clear_Origins ( a, len );
njn25e49d8e72002-09-23 09:36:25 +00001567}
1568
sewardj7cf4e6b2008-05-01 20:24:26 +00001569static void make_mem_undefined ( Addr a, SizeT len )
1570{
1571 PROF_EVENT(41, "make_mem_undefined");
1572 DEBUG("make_mem_undefined(%p, %lu)\n", a, len);
1573 set_address_range_perms ( a, len, VA_BITS16_UNDEFINED, SM_DIST_UNDEFINED );
1574}
1575
1576void MC_(make_mem_undefined_w_otag) ( Addr a, SizeT len, UInt otag )
njn25e49d8e72002-09-23 09:36:25 +00001577{
njndbf7ca72006-03-31 11:57:59 +00001578 PROF_EVENT(41, "MC_(make_mem_undefined)");
1579 DEBUG("MC_(make_mem_undefined)(%p, %lu)\n", a, len);
1580 set_address_range_perms ( a, len, VA_BITS16_UNDEFINED, SM_DIST_UNDEFINED );
sewardj7cf4e6b2008-05-01 20:24:26 +00001581 if (UNLIKELY( MC_(clo_mc_level) == 3 ))
1582 ocache_sarp_Set_Origins ( a, len, otag );
njn25e49d8e72002-09-23 09:36:25 +00001583}
1584
sewardj7cf4e6b2008-05-01 20:24:26 +00001585static
1586void make_mem_undefined_w_tid_and_okind ( Addr a, SizeT len,
1587 ThreadId tid, UInt okind )
1588{
1589 UInt ecu;
1590 ExeContext* here;
1591 /* VG_(record_ExeContext) checks for validity of tid, and asserts
1592 if it is invalid. So no need to do it here. */
1593 tl_assert(okind <= 3);
1594 here = VG_(record_ExeContext)( tid, 0/*first_ip_delta*/ );
1595 tl_assert(here);
1596 ecu = VG_(get_ECU_from_ExeContext)(here);
1597 tl_assert(VG_(is_plausible_ECU)(ecu));
1598 MC_(make_mem_undefined_w_otag) ( a, len, ecu | okind );
1599}
1600
1601static
1602void make_mem_undefined_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1603 make_mem_undefined_w_tid_and_okind ( a, len, tid, MC_OKIND_UNKNOWN );
1604}
1605
1606
njndbf7ca72006-03-31 11:57:59 +00001607void MC_(make_mem_defined) ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +00001608{
njndbf7ca72006-03-31 11:57:59 +00001609 PROF_EVENT(42, "MC_(make_mem_defined)");
1610 DEBUG("MC_(make_mem_defined)(%p, %lu)\n", a, len);
1611 set_address_range_perms ( a, len, VA_BITS16_DEFINED, SM_DIST_DEFINED );
sewardj7cf4e6b2008-05-01 20:24:26 +00001612 if (UNLIKELY( MC_(clo_mc_level) == 3 ))
1613 ocache_sarp_Clear_Origins ( a, len );
njn25e49d8e72002-09-23 09:36:25 +00001614}
1615
sewardjfb1e9ad2006-03-10 13:41:58 +00001616/* For each byte in [a,a+len), if the byte is addressable, make it be
1617 defined, but if it isn't addressible, leave it alone. In other
njndbf7ca72006-03-31 11:57:59 +00001618 words a version of MC_(make_mem_defined) that doesn't mess with
sewardjfb1e9ad2006-03-10 13:41:58 +00001619 addressibility. Low-performance implementation. */
njndbf7ca72006-03-31 11:57:59 +00001620static void make_mem_defined_if_addressable ( Addr a, SizeT len )
sewardjfb1e9ad2006-03-10 13:41:58 +00001621{
1622 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00001623 UChar vabits2;
njndbf7ca72006-03-31 11:57:59 +00001624 DEBUG("make_mem_defined_if_addressable(%p, %llu)\n", a, (ULong)len);
sewardjfb1e9ad2006-03-10 13:41:58 +00001625 for (i = 0; i < len; i++) {
njn1d0825f2006-03-27 11:37:07 +00001626 vabits2 = get_vabits2( a+i );
bart5dd8e6a2008-03-22 08:04:29 +00001627 if (LIKELY(VA_BITS2_NOACCESS != vabits2)) {
njndbf7ca72006-03-31 11:57:59 +00001628 set_vabits2(a+i, VA_BITS2_DEFINED);
sewardj7cf4e6b2008-05-01 20:24:26 +00001629 if (UNLIKELY(MC_(clo_mc_level) >= 3)) {
1630 MC_(helperc_b_store1)( a+i, 0 ); /* clear the origin tag */
1631 }
njn1d0825f2006-03-27 11:37:07 +00001632 }
sewardjfb1e9ad2006-03-10 13:41:58 +00001633 }
1634}
1635
njn9b007f62003-04-07 14:40:25 +00001636
sewardj45f4e7c2005-09-27 19:20:21 +00001637/* --- Block-copy permissions (needed for implementing realloc() and
1638 sys_mremap). --- */
sewardjc859fbf2005-04-22 21:10:28 +00001639
njn1d0825f2006-03-27 11:37:07 +00001640void MC_(copy_address_range_state) ( Addr src, Addr dst, SizeT len )
sewardjc859fbf2005-04-22 21:10:28 +00001641{
sewardj45f4e7c2005-09-27 19:20:21 +00001642 SizeT i, j;
sewardjf2184912006-05-03 22:13:57 +00001643 UChar vabits2, vabits8;
1644 Bool aligned, nooverlap;
sewardjc859fbf2005-04-22 21:10:28 +00001645
njn1d0825f2006-03-27 11:37:07 +00001646 DEBUG("MC_(copy_address_range_state)\n");
1647 PROF_EVENT(50, "MC_(copy_address_range_state)");
sewardj45f4e7c2005-09-27 19:20:21 +00001648
sewardjf2184912006-05-03 22:13:57 +00001649 if (len == 0 || src == dst)
sewardj45f4e7c2005-09-27 19:20:21 +00001650 return;
1651
sewardjf2184912006-05-03 22:13:57 +00001652 aligned = VG_IS_4_ALIGNED(src) && VG_IS_4_ALIGNED(dst);
1653 nooverlap = src+len <= dst || dst+len <= src;
sewardj45f4e7c2005-09-27 19:20:21 +00001654
sewardjf2184912006-05-03 22:13:57 +00001655 if (nooverlap && aligned) {
1656
1657 /* Vectorised fast case, when no overlap and suitably aligned */
1658 /* vector loop */
1659 i = 0;
1660 while (len >= 4) {
1661 vabits8 = get_vabits8_for_aligned_word32( src+i );
1662 set_vabits8_for_aligned_word32( dst+i, vabits8 );
bart5dd8e6a2008-03-22 08:04:29 +00001663 if (LIKELY(VA_BITS8_DEFINED == vabits8
sewardjf2184912006-05-03 22:13:57 +00001664 || VA_BITS8_UNDEFINED == vabits8
1665 || VA_BITS8_NOACCESS == vabits8)) {
1666 /* do nothing */
1667 } else {
1668 /* have to copy secondary map info */
1669 if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+0 ))
1670 set_sec_vbits8( dst+i+0, get_sec_vbits8( src+i+0 ) );
1671 if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+1 ))
1672 set_sec_vbits8( dst+i+1, get_sec_vbits8( src+i+1 ) );
1673 if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+2 ))
1674 set_sec_vbits8( dst+i+2, get_sec_vbits8( src+i+2 ) );
1675 if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+3 ))
1676 set_sec_vbits8( dst+i+3, get_sec_vbits8( src+i+3 ) );
1677 }
1678 i += 4;
1679 len -= 4;
1680 }
1681 /* fixup loop */
1682 while (len >= 1) {
njn1d0825f2006-03-27 11:37:07 +00001683 vabits2 = get_vabits2( src+i );
1684 set_vabits2( dst+i, vabits2 );
njndbf7ca72006-03-31 11:57:59 +00001685 if (VA_BITS2_PARTDEFINED == vabits2) {
njn1d0825f2006-03-27 11:37:07 +00001686 set_sec_vbits8( dst+i, get_sec_vbits8( src+i ) );
1687 }
sewardjf2184912006-05-03 22:13:57 +00001688 i++;
1689 len--;
1690 }
1691
1692 } else {
1693
1694 /* We have to do things the slow way */
1695 if (src < dst) {
1696 for (i = 0, j = len-1; i < len; i++, j--) {
1697 PROF_EVENT(51, "MC_(copy_address_range_state)(loop)");
1698 vabits2 = get_vabits2( src+j );
1699 set_vabits2( dst+j, vabits2 );
1700 if (VA_BITS2_PARTDEFINED == vabits2) {
1701 set_sec_vbits8( dst+j, get_sec_vbits8( src+j ) );
1702 }
1703 }
1704 }
1705
1706 if (src > dst) {
1707 for (i = 0; i < len; i++) {
1708 PROF_EVENT(52, "MC_(copy_address_range_state)(loop)");
1709 vabits2 = get_vabits2( src+i );
1710 set_vabits2( dst+i, vabits2 );
1711 if (VA_BITS2_PARTDEFINED == vabits2) {
1712 set_sec_vbits8( dst+i, get_sec_vbits8( src+i ) );
1713 }
1714 }
sewardj45f4e7c2005-09-27 19:20:21 +00001715 }
sewardjc859fbf2005-04-22 21:10:28 +00001716 }
sewardjf2184912006-05-03 22:13:57 +00001717
sewardjc859fbf2005-04-22 21:10:28 +00001718}
1719
1720
sewardj7cf4e6b2008-05-01 20:24:26 +00001721/*------------------------------------------------------------*/
1722/*--- Origin tracking stuff - cache basics ---*/
1723/*------------------------------------------------------------*/
sewardjc859fbf2005-04-22 21:10:28 +00001724
sewardj7cf4e6b2008-05-01 20:24:26 +00001725/* Some background comments on the origin tracking implementation
1726 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1727
1728 Note that this implementation draws inspiration from the "origin
1729 tracking by value piggybacking" scheme described in "Tracking Bad
1730 Apples: Reporting the Origin of Null and Undefined Value Errors"
1731 (Michael Bond, Nicholas Nethercote, Stephen Kent, Samuel Guyer,
1732 Kathryn McKinley, OOPSLA07, Montreal, Oct 2007) but in fact it is
1733 implemented completely differently.
1734
1735 This implementation tracks the defining point of all values using
1736 so called "origin tags", which are 32-bit integers, rather than
1737 using the values themselves to encode the origins. The latter,
1738 so-called value piggybacking", is what the OOPSLA07 paper
1739 describes.
1740
1741 Origin tags, as tracked by the machinery below, are 32-bit unsigned
1742 ints (UInts), regardless of the machine's word size.
1743
1744 > Question: why is otag a UInt? Wouldn't a UWord be better? Isn't
1745 > it really just the address of the relevant ExeContext?
1746
1747 Well, it's not the address, but a value which has a 1-1 mapping
1748 with ExeContexts, and is guaranteed not to be zero, since zero
1749 denotes (to memcheck) "unknown origin or defined value". So these
1750 UInts are just numbers starting at 1; each ExeContext is given a
1751 number when it is created.
1752
1753 Making these otags 32-bit regardless of the machine's word size
1754 makes the 64-bit implementation easier (next para). And it doesn't
1755 really limit us in any way, since for the tags to overflow would
1756 require that the program somehow caused 2^32-1 different
1757 ExeContexts to be created, in which case it is probably in deep
1758 trouble. Not to mention V will have soaked up many tens of
1759 gigabytes of memory merely to store them all.
1760
1761 So having 64-bit origins doesn't really buy you anything, and has
1762 the following downsides:
1763
1764 Suppose that instead, an otag is a UWord. This would mean that, on
1765 a 64-bit target,
1766
1767 1. It becomes hard to shadow any element of guest state which is
1768 smaller than 8 bytes. To do so means you'd need to find some
1769 8-byte-sized hole in the guest state which you don't want to
1770 shadow, and use that instead to hold the otag. On ppc64, the
1771 condition code register(s) are split into 20 UChar sized pieces,
1772 all of which need to be tracked (guest_XER_SO .. guest_CR7_0)
1773 and so that would entail finding 160 bytes somewhere else in the
1774 guest state.
1775
1776 Even on x86, I want to track origins for %AH .. %DH (bits 15:8
1777 of %EAX .. %EDX) that are separate from %AL .. %DL (bits 7:0 of
1778 same) and so I had to look for 4 untracked otag-sized areas in
1779 the guest state to make that possible.
1780
1781 The same problem exists of course when origin tags are only 32
1782 bits, but it's less extreme.
1783
1784 2. (More compelling) it doubles the size of the origin shadow
1785 memory. Given that the shadow memory is organised as a fixed
1786 size cache, and that accuracy of tracking is limited by origins
1787 falling out the cache due to space conflicts, this isn't good.
1788
1789 > Another question: is the origin tracking perfect, or are there
1790 > cases where it fails to determine an origin?
1791
1792 It is imperfect for at least for the following reasons, and
1793 probably more:
1794
1795 * Insufficient capacity in the origin cache. When a line is
1796 evicted from the cache it is gone forever, and so subsequent
1797 queries for the line produce zero, indicating no origin
1798 information. Interestingly, a line containing all zeroes can be
1799 evicted "free" from the cache, since it contains no useful
1800 information, so there is scope perhaps for some cleverer cache
1801 management schemes.
1802
1803 * The origin cache only stores one otag per 32-bits of address
1804 space, plus 4 bits indicating which of the 4 bytes has that tag
1805 and which are considered defined. The result is that if two
1806 undefined bytes in the same word are stored in memory, the first
1807 stored byte's origin will be lost and replaced by the origin for
1808 the second byte.
1809
1810 * Nonzero origin tags for defined values. Consider a binary
1811 operator application op(x,y). Suppose y is undefined (and so has
1812 a valid nonzero origin tag), and x is defined, but erroneously
1813 has a nonzero origin tag (defined values should have tag zero).
1814 If the erroneous tag has a numeric value greater than y's tag,
1815 then the rule for propagating origin tags though binary
1816 operations, which is simply to take the unsigned max of the two
1817 tags, will erroneously propagate x's tag rather than y's.
1818
1819 * Some obscure uses of x86/amd64 byte registers can cause lossage
1820 or confusion of origins. %AH .. %DH are treated as different
1821 from, and unrelated to, their parent registers, %EAX .. %EDX.
1822 So some wierd sequences like
1823
1824 movb undefined-value, %AH
1825 movb defined-value, %AL
1826 .. use %AX or %EAX ..
1827
1828 will cause the origin attributed to %AH to be ignored, since %AL,
1829 %AX, %EAX are treated as the same register, and %AH as a
1830 completely separate one.
1831
1832 But having said all that, it actually seems to work fairly well in
1833 practice.
1834*/
1835
1836static UWord stats_ocacheL1_find = 0;
1837static UWord stats_ocacheL1_found_at_1 = 0;
1838static UWord stats_ocacheL1_found_at_N = 0;
1839static UWord stats_ocacheL1_misses = 0;
1840static UWord stats_ocacheL1_lossage = 0;
1841static UWord stats_ocacheL1_movefwds = 0;
1842
1843static UWord stats__ocacheL2_refs = 0;
1844static UWord stats__ocacheL2_misses = 0;
1845static UWord stats__ocacheL2_n_nodes_max = 0;
1846
1847/* Cache of 32-bit values, one every 32 bits of address space */
1848
1849#define OC_BITS_PER_LINE 5
1850#define OC_W32S_PER_LINE (1 << (OC_BITS_PER_LINE - 2))
1851
1852static INLINE UWord oc_line_offset ( Addr a ) {
1853 return (a >> 2) & (OC_W32S_PER_LINE - 1);
1854}
1855static INLINE Bool is_valid_oc_tag ( Addr tag ) {
1856 return 0 == (tag & ((1 << OC_BITS_PER_LINE) - 1));
1857}
1858
1859#define OC_LINES_PER_SET 2
1860
1861#define OC_N_SET_BITS 20
1862#define OC_N_SETS (1 << OC_N_SET_BITS)
1863
1864/* These settings give:
1865 64 bit host: ocache: 100,663,296 sizeB 67,108,864 useful
1866 32 bit host: ocache: 92,274,688 sizeB 67,108,864 useful
1867*/
1868
1869#define OC_MOVE_FORWARDS_EVERY_BITS 7
1870
1871
1872typedef
1873 struct {
1874 Addr tag;
1875 UInt w32[OC_W32S_PER_LINE];
1876 UChar descr[OC_W32S_PER_LINE];
1877 }
1878 OCacheLine;
1879
1880/* Classify and also sanity-check 'line'. Return 'e' (empty) if not
1881 in use, 'n' (nonzero) if it contains at least one valid origin tag,
1882 and 'z' if all the represented tags are zero. */
1883static UChar classify_OCacheLine ( OCacheLine* line )
1884{
1885 UWord i;
1886 if (line->tag == 1/*invalid*/)
1887 return 'e'; /* EMPTY */
1888 tl_assert(is_valid_oc_tag(line->tag));
1889 for (i = 0; i < OC_W32S_PER_LINE; i++) {
1890 tl_assert(0 == ((~0xF) & line->descr[i]));
1891 if (line->w32[i] > 0 && line->descr[i] > 0)
1892 return 'n'; /* NONZERO - contains useful info */
1893 }
1894 return 'z'; /* ZERO - no useful info */
1895}
1896
1897typedef
1898 struct {
1899 OCacheLine line[OC_LINES_PER_SET];
1900 }
1901 OCacheSet;
1902
1903typedef
1904 struct {
1905 OCacheSet set[OC_N_SETS];
1906 }
1907 OCache;
1908
1909static OCache ocache;
1910static UWord ocache_event_ctr = 0;
1911
1912static void init_ocacheL2 ( void ); /* fwds */
1913static void init_OCache ( void )
1914{
1915 UWord line, set;
1916 for (set = 0; set < OC_N_SETS; set++) {
1917 for (line = 0; line < OC_LINES_PER_SET; line++) {
1918 ocache.set[set].line[line].tag = 1/*invalid*/;
1919 }
1920 }
1921 init_ocacheL2();
1922}
1923
1924static void moveLineForwards ( OCacheSet* set, UWord lineno )
1925{
1926 OCacheLine tmp;
1927 stats_ocacheL1_movefwds++;
1928 tl_assert(lineno > 0 && lineno < OC_LINES_PER_SET);
1929 tmp = set->line[lineno-1];
1930 set->line[lineno-1] = set->line[lineno];
1931 set->line[lineno] = tmp;
1932}
1933
1934static void zeroise_OCacheLine ( OCacheLine* line, Addr tag ) {
1935 UWord i;
1936 for (i = 0; i < OC_W32S_PER_LINE; i++) {
1937 line->w32[i] = 0; /* NO ORIGIN */
1938 line->descr[i] = 0; /* REALLY REALLY NO ORIGIN! */
1939 }
1940 line->tag = tag;
1941}
1942
1943//////////////////////////////////////////////////////////////
1944//// OCache backing store
1945
1946static OSet* ocacheL2 = NULL;
1947
1948static void* ocacheL2_malloc ( SizeT szB ) {
1949 return VG_(malloc)(szB);
1950}
1951static void ocacheL2_free ( void* v ) {
1952 VG_(free)( v );
1953}
1954
1955/* Stats: # nodes currently in tree */
1956static UWord stats__ocacheL2_n_nodes = 0;
1957
1958static void init_ocacheL2 ( void )
1959{
1960 tl_assert(!ocacheL2);
1961 tl_assert(sizeof(Word) == sizeof(Addr)); /* since OCacheLine.tag :: Addr */
1962 tl_assert(0 == offsetof(OCacheLine,tag));
1963 ocacheL2
1964 = VG_(OSetGen_Create)( offsetof(OCacheLine,tag),
1965 NULL, /* fast cmp */
1966 ocacheL2_malloc, ocacheL2_free );
1967 tl_assert(ocacheL2);
1968 stats__ocacheL2_n_nodes = 0;
1969}
1970
1971/* Find line with the given tag in the tree, or NULL if not found. */
1972static OCacheLine* ocacheL2_find_tag ( Addr tag )
1973{
1974 OCacheLine* line;
1975 tl_assert(is_valid_oc_tag(tag));
1976 stats__ocacheL2_refs++;
1977 line = VG_(OSetGen_Lookup)( ocacheL2, &tag );
1978 return line;
1979}
1980
1981/* Delete the line with the given tag from the tree, if it is present, and
1982 free up the associated memory. */
1983static void ocacheL2_del_tag ( Addr tag )
1984{
1985 OCacheLine* line;
1986 tl_assert(is_valid_oc_tag(tag));
1987 stats__ocacheL2_refs++;
1988 line = VG_(OSetGen_Remove)( ocacheL2, &tag );
1989 if (line) {
1990 VG_(OSetGen_FreeNode)(ocacheL2, line);
1991 tl_assert(stats__ocacheL2_n_nodes > 0);
1992 stats__ocacheL2_n_nodes--;
1993 }
1994}
1995
1996/* Add a copy of the given line to the tree. It must not already be
1997 present. */
1998static void ocacheL2_add_line ( OCacheLine* line )
1999{
2000 OCacheLine* copy;
2001 tl_assert(is_valid_oc_tag(line->tag));
2002 copy = VG_(OSetGen_AllocNode)( ocacheL2, sizeof(OCacheLine) );
2003 tl_assert(copy);
2004 *copy = *line;
2005 stats__ocacheL2_refs++;
2006 VG_(OSetGen_Insert)( ocacheL2, copy );
2007 stats__ocacheL2_n_nodes++;
2008 if (stats__ocacheL2_n_nodes > stats__ocacheL2_n_nodes_max)
2009 stats__ocacheL2_n_nodes_max = stats__ocacheL2_n_nodes;
2010}
2011
2012////
2013//////////////////////////////////////////////////////////////
2014
2015__attribute__((noinline))
2016static OCacheLine* find_OCacheLine_SLOW ( Addr a )
2017{
2018 OCacheLine *victim, *inL2;
2019 UChar c;
2020 UWord line;
2021 UWord setno = (a >> OC_BITS_PER_LINE) & (OC_N_SETS - 1);
2022 UWord tagmask = ~((1 << OC_BITS_PER_LINE) - 1);
2023 UWord tag = a & tagmask;
2024 tl_assert(setno >= 0 && setno < OC_N_SETS);
2025
2026 /* we already tried line == 0; skip therefore. */
2027 for (line = 1; line < OC_LINES_PER_SET; line++) {
2028 if (ocache.set[setno].line[line].tag == tag) {
2029 if (line == 1) {
2030 stats_ocacheL1_found_at_1++;
2031 } else {
2032 stats_ocacheL1_found_at_N++;
2033 }
2034 if (UNLIKELY(0 == (ocache_event_ctr++
2035 & ((1<<OC_MOVE_FORWARDS_EVERY_BITS)-1)))) {
2036 moveLineForwards( &ocache.set[setno], line );
2037 line--;
2038 }
2039 return &ocache.set[setno].line[line];
2040 }
2041 }
2042
2043 /* A miss. Use the last slot. Implicitly this means we're
2044 ejecting the line in the last slot. */
2045 stats_ocacheL1_misses++;
2046 tl_assert(line == OC_LINES_PER_SET);
2047 line--;
2048 tl_assert(line > 0);
2049
2050 /* First, move the to-be-ejected line to the L2 cache. */
2051 victim = &ocache.set[setno].line[line];
2052 c = classify_OCacheLine(victim);
2053 switch (c) {
2054 case 'e':
2055 /* the line is empty (has invalid tag); ignore it. */
2056 break;
2057 case 'z':
2058 /* line contains zeroes. We must ensure the backing store is
2059 updated accordingly, either by copying the line there
2060 verbatim, or by ensuring it isn't present there. We
2061 chosse the latter on the basis that it reduces the size of
2062 the backing store. */
2063 ocacheL2_del_tag( victim->tag );
2064 break;
2065 case 'n':
2066 /* line contains at least one real, useful origin. Copy it
2067 to the backing store. */
2068 stats_ocacheL1_lossage++;
2069 inL2 = ocacheL2_find_tag( victim->tag );
2070 if (inL2) {
2071 *inL2 = *victim;
2072 } else {
2073 ocacheL2_add_line( victim );
2074 }
2075 break;
2076 default:
2077 tl_assert(0);
2078 }
2079
2080 /* Now we must reload the L1 cache from the backing tree, if
2081 possible. */
2082 tl_assert(tag != victim->tag); /* stay sane */
2083 inL2 = ocacheL2_find_tag( tag );
2084 if (inL2) {
2085 /* We're in luck. It's in the L2. */
2086 ocache.set[setno].line[line] = *inL2;
2087 } else {
2088 /* Missed at both levels of the cache hierarchy. We have to
2089 declare it as full of zeroes (unknown origins). */
2090 stats__ocacheL2_misses++;
2091 zeroise_OCacheLine( &ocache.set[setno].line[line], tag );
2092 }
2093
2094 /* Move it one forwards */
2095 moveLineForwards( &ocache.set[setno], line );
2096 line--;
2097
2098 return &ocache.set[setno].line[line];
2099}
2100
2101static INLINE OCacheLine* find_OCacheLine ( Addr a )
2102{
2103 UWord setno = (a >> OC_BITS_PER_LINE) & (OC_N_SETS - 1);
2104 UWord tagmask = ~((1 << OC_BITS_PER_LINE) - 1);
2105 UWord tag = a & tagmask;
2106
2107 stats_ocacheL1_find++;
2108
2109 if (OC_ENABLE_ASSERTIONS) {
2110 tl_assert(setno >= 0 && setno < OC_N_SETS);
2111 tl_assert(0 == (tag & (4 * OC_W32S_PER_LINE - 1)));
2112 }
2113
2114 if (LIKELY(ocache.set[setno].line[0].tag == tag)) {
2115 return &ocache.set[setno].line[0];
2116 }
2117
2118 return find_OCacheLine_SLOW( a );
2119}
2120
2121static INLINE void set_aligned_word64_Origin_to_undef ( Addr a, UInt otag )
2122{
2123 //// BEGIN inlined, specialised version of MC_(helperc_b_store8)
2124 //// Set the origins for a+0 .. a+7
2125 { OCacheLine* line;
2126 UWord lineoff = oc_line_offset(a);
2127 if (OC_ENABLE_ASSERTIONS) {
2128 tl_assert(lineoff >= 0
2129 && lineoff < OC_W32S_PER_LINE -1/*'cos 8-aligned*/);
2130 }
2131 line = find_OCacheLine( a );
2132 line->descr[lineoff+0] = 0xF;
2133 line->descr[lineoff+1] = 0xF;
2134 line->w32[lineoff+0] = otag;
2135 line->w32[lineoff+1] = otag;
2136 }
2137 //// END inlined, specialised version of MC_(helperc_b_store8)
2138}
2139
2140
2141/*------------------------------------------------------------*/
2142/*--- Aligned fast case permission setters, ---*/
2143/*--- for dealing with stacks ---*/
2144/*------------------------------------------------------------*/
2145
2146/*--------------------- 32-bit ---------------------*/
2147
2148/* Nb: by "aligned" here we mean 4-byte aligned */
2149
2150static INLINE void make_aligned_word32_undefined ( Addr a )
njn9b007f62003-04-07 14:40:25 +00002151{
njn1d0825f2006-03-27 11:37:07 +00002152 UWord sm_off;
sewardjae986ca2005-10-12 12:53:20 +00002153 SecMap* sm;
2154
njndbf7ca72006-03-31 11:57:59 +00002155 PROF_EVENT(300, "make_aligned_word32_undefined");
sewardj5d28efc2005-04-21 22:16:29 +00002156
njn1d0825f2006-03-27 11:37:07 +00002157#ifndef PERF_FAST_STACK2
sewardj7cf4e6b2008-05-01 20:24:26 +00002158 make_mem_undefined(a, 4);
njn1d0825f2006-03-27 11:37:07 +00002159#else
bart5dd8e6a2008-03-22 08:04:29 +00002160 if (UNLIKELY(a > MAX_PRIMARY_ADDRESS)) {
njndbf7ca72006-03-31 11:57:59 +00002161 PROF_EVENT(301, "make_aligned_word32_undefined-slow1");
sewardj7cf4e6b2008-05-01 20:24:26 +00002162 make_mem_undefined(a, 4);
sewardj5d28efc2005-04-21 22:16:29 +00002163 return;
2164 }
2165
njna7c7ebd2006-03-28 12:51:02 +00002166 sm = get_secmap_for_writing_low(a);
njn1d0825f2006-03-27 11:37:07 +00002167 sm_off = SM_OFF(a);
njndbf7ca72006-03-31 11:57:59 +00002168 sm->vabits8[sm_off] = VA_BITS8_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00002169#endif
njn9b007f62003-04-07 14:40:25 +00002170}
2171
sewardj7cf4e6b2008-05-01 20:24:26 +00002172static INLINE
2173void make_aligned_word32_undefined_w_otag ( Addr a, UInt otag )
2174{
2175 make_aligned_word32_undefined(a);
2176 //// BEGIN inlined, specialised version of MC_(helperc_b_store4)
2177 //// Set the origins for a+0 .. a+3
2178 { OCacheLine* line;
2179 UWord lineoff = oc_line_offset(a);
2180 if (OC_ENABLE_ASSERTIONS) {
2181 tl_assert(lineoff >= 0 && lineoff < OC_W32S_PER_LINE);
2182 }
2183 line = find_OCacheLine( a );
2184 line->descr[lineoff] = 0xF;
2185 line->w32[lineoff] = otag;
2186 }
2187 //// END inlined, specialised version of MC_(helperc_b_store4)
2188}
sewardj5d28efc2005-04-21 22:16:29 +00002189
njn1d0825f2006-03-27 11:37:07 +00002190static INLINE
2191void make_aligned_word32_noaccess ( Addr a )
sewardj5d28efc2005-04-21 22:16:29 +00002192{
njn1d0825f2006-03-27 11:37:07 +00002193 UWord sm_off;
sewardjae986ca2005-10-12 12:53:20 +00002194 SecMap* sm;
2195
sewardj5d28efc2005-04-21 22:16:29 +00002196 PROF_EVENT(310, "make_aligned_word32_noaccess");
2197
njn1d0825f2006-03-27 11:37:07 +00002198#ifndef PERF_FAST_STACK2
njndbf7ca72006-03-31 11:57:59 +00002199 MC_(make_mem_noaccess)(a, 4);
njn1d0825f2006-03-27 11:37:07 +00002200#else
bart5dd8e6a2008-03-22 08:04:29 +00002201 if (UNLIKELY(a > MAX_PRIMARY_ADDRESS)) {
sewardj5d28efc2005-04-21 22:16:29 +00002202 PROF_EVENT(311, "make_aligned_word32_noaccess-slow1");
njndbf7ca72006-03-31 11:57:59 +00002203 MC_(make_mem_noaccess)(a, 4);
sewardj5d28efc2005-04-21 22:16:29 +00002204 return;
2205 }
2206
njna7c7ebd2006-03-28 12:51:02 +00002207 sm = get_secmap_for_writing_low(a);
njn1d0825f2006-03-27 11:37:07 +00002208 sm_off = SM_OFF(a);
2209 sm->vabits8[sm_off] = VA_BITS8_NOACCESS;
sewardj7cf4e6b2008-05-01 20:24:26 +00002210
2211 //// BEGIN inlined, specialised version of MC_(helperc_b_store4)
2212 //// Set the origins for a+0 .. a+3.
2213 if (UNLIKELY( MC_(clo_mc_level) == 3 )) {
2214 OCacheLine* line;
2215 UWord lineoff = oc_line_offset(a);
2216 if (OC_ENABLE_ASSERTIONS) {
2217 tl_assert(lineoff >= 0 && lineoff < OC_W32S_PER_LINE);
2218 }
2219 line = find_OCacheLine( a );
2220 line->descr[lineoff] = 0;
2221 }
2222 //// END inlined, specialised version of MC_(helperc_b_store4)
njn1d0825f2006-03-27 11:37:07 +00002223#endif
sewardj5d28efc2005-04-21 22:16:29 +00002224}
2225
sewardj7cf4e6b2008-05-01 20:24:26 +00002226/*--------------------- 64-bit ---------------------*/
sewardj5d28efc2005-04-21 22:16:29 +00002227
njn9b007f62003-04-07 14:40:25 +00002228/* Nb: by "aligned" here we mean 8-byte aligned */
sewardj7cf4e6b2008-05-01 20:24:26 +00002229
2230static INLINE void make_aligned_word64_undefined ( Addr a )
njn9b007f62003-04-07 14:40:25 +00002231{
njn1d0825f2006-03-27 11:37:07 +00002232 UWord sm_off16;
sewardjae986ca2005-10-12 12:53:20 +00002233 SecMap* sm;
2234
njndbf7ca72006-03-31 11:57:59 +00002235 PROF_EVENT(320, "make_aligned_word64_undefined");
sewardj23eb2fd2005-04-22 16:29:19 +00002236
njn1d0825f2006-03-27 11:37:07 +00002237#ifndef PERF_FAST_STACK2
sewardj7cf4e6b2008-05-01 20:24:26 +00002238 make_mem_undefined(a, 8);
njn1d0825f2006-03-27 11:37:07 +00002239#else
bart5dd8e6a2008-03-22 08:04:29 +00002240 if (UNLIKELY(a > MAX_PRIMARY_ADDRESS)) {
njndbf7ca72006-03-31 11:57:59 +00002241 PROF_EVENT(321, "make_aligned_word64_undefined-slow1");
sewardj7cf4e6b2008-05-01 20:24:26 +00002242 make_mem_undefined(a, 8);
sewardj23eb2fd2005-04-22 16:29:19 +00002243 return;
2244 }
2245
njna7c7ebd2006-03-28 12:51:02 +00002246 sm = get_secmap_for_writing_low(a);
njn1d0825f2006-03-27 11:37:07 +00002247 sm_off16 = SM_OFF_16(a);
njndbf7ca72006-03-31 11:57:59 +00002248 ((UShort*)(sm->vabits8))[sm_off16] = VA_BITS16_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00002249#endif
njn9b007f62003-04-07 14:40:25 +00002250}
2251
sewardj7cf4e6b2008-05-01 20:24:26 +00002252static INLINE
2253void make_aligned_word64_undefined_w_otag ( Addr a, UInt otag )
2254{
2255 make_aligned_word64_undefined(a);
2256 //// BEGIN inlined, specialised version of MC_(helperc_b_store8)
2257 //// Set the origins for a+0 .. a+7
2258 { OCacheLine* line;
2259 UWord lineoff = oc_line_offset(a);
2260 tl_assert(lineoff >= 0
2261 && lineoff < OC_W32S_PER_LINE -1/*'cos 8-aligned*/);
2262 line = find_OCacheLine( a );
2263 line->descr[lineoff+0] = 0xF;
2264 line->descr[lineoff+1] = 0xF;
2265 line->w32[lineoff+0] = otag;
2266 line->w32[lineoff+1] = otag;
2267 }
2268 //// END inlined, specialised version of MC_(helperc_b_store8)
2269}
sewardj23eb2fd2005-04-22 16:29:19 +00002270
njn1d0825f2006-03-27 11:37:07 +00002271static INLINE
2272void make_aligned_word64_noaccess ( Addr a )
njn9b007f62003-04-07 14:40:25 +00002273{
njn1d0825f2006-03-27 11:37:07 +00002274 UWord sm_off16;
sewardjae986ca2005-10-12 12:53:20 +00002275 SecMap* sm;
2276
sewardj23eb2fd2005-04-22 16:29:19 +00002277 PROF_EVENT(330, "make_aligned_word64_noaccess");
2278
njn1d0825f2006-03-27 11:37:07 +00002279#ifndef PERF_FAST_STACK2
njndbf7ca72006-03-31 11:57:59 +00002280 MC_(make_mem_noaccess)(a, 8);
njn1d0825f2006-03-27 11:37:07 +00002281#else
bart5dd8e6a2008-03-22 08:04:29 +00002282 if (UNLIKELY(a > MAX_PRIMARY_ADDRESS)) {
sewardj23eb2fd2005-04-22 16:29:19 +00002283 PROF_EVENT(331, "make_aligned_word64_noaccess-slow1");
njndbf7ca72006-03-31 11:57:59 +00002284 MC_(make_mem_noaccess)(a, 8);
sewardj23eb2fd2005-04-22 16:29:19 +00002285 return;
2286 }
2287
njna7c7ebd2006-03-28 12:51:02 +00002288 sm = get_secmap_for_writing_low(a);
njn1d0825f2006-03-27 11:37:07 +00002289 sm_off16 = SM_OFF_16(a);
2290 ((UShort*)(sm->vabits8))[sm_off16] = VA_BITS16_NOACCESS;
sewardj7cf4e6b2008-05-01 20:24:26 +00002291
2292 //// BEGIN inlined, specialised version of MC_(helperc_b_store8)
2293 //// Clear the origins for a+0 .. a+7.
2294 if (UNLIKELY( MC_(clo_mc_level) == 3 )) {
2295 OCacheLine* line;
2296 UWord lineoff = oc_line_offset(a);
2297 tl_assert(lineoff >= 0
2298 && lineoff < OC_W32S_PER_LINE -1/*'cos 8-aligned*/);
2299 line = find_OCacheLine( a );
2300 line->descr[lineoff+0] = 0;
2301 line->descr[lineoff+1] = 0;
2302 }
2303 //// END inlined, specialised version of MC_(helperc_b_store8)
njn1d0825f2006-03-27 11:37:07 +00002304#endif
njn9b007f62003-04-07 14:40:25 +00002305}
2306
sewardj23eb2fd2005-04-22 16:29:19 +00002307
njn1d0825f2006-03-27 11:37:07 +00002308/*------------------------------------------------------------*/
2309/*--- Stack pointer adjustment ---*/
2310/*------------------------------------------------------------*/
2311
sewardj7cf4e6b2008-05-01 20:24:26 +00002312/*--------------- adjustment by 4 bytes ---------------*/
2313
2314static void VG_REGPARM(2) mc_new_mem_stack_4_w_ECU(Addr new_SP, UInt ecu)
2315{
2316 UInt otag = ecu | MC_OKIND_STACK;
2317 PROF_EVENT(110, "new_mem_stack_4");
2318 if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2319 make_aligned_word32_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP, otag );
2320 } else {
2321 MC_(make_mem_undefined_w_otag) ( -VG_STACK_REDZONE_SZB + new_SP, 4, otag );
2322 }
2323}
2324
njn1d0825f2006-03-27 11:37:07 +00002325static void VG_REGPARM(1) mc_new_mem_stack_4(Addr new_SP)
2326{
2327 PROF_EVENT(110, "new_mem_stack_4");
sewardj05a46732006-10-17 01:28:10 +00002328 if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002329 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
njn1d0825f2006-03-27 11:37:07 +00002330 } else {
sewardj7cf4e6b2008-05-01 20:24:26 +00002331 make_mem_undefined ( -VG_STACK_REDZONE_SZB + new_SP, 4 );
njn1d0825f2006-03-27 11:37:07 +00002332 }
2333}
2334
2335static void VG_REGPARM(1) mc_die_mem_stack_4(Addr new_SP)
2336{
2337 PROF_EVENT(120, "die_mem_stack_4");
sewardj05a46732006-10-17 01:28:10 +00002338 if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002339 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
njn1d0825f2006-03-27 11:37:07 +00002340 } else {
njndbf7ca72006-03-31 11:57:59 +00002341 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-4, 4 );
njn1d0825f2006-03-27 11:37:07 +00002342 }
2343}
2344
sewardj7cf4e6b2008-05-01 20:24:26 +00002345/*--------------- adjustment by 8 bytes ---------------*/
2346
2347static void VG_REGPARM(2) mc_new_mem_stack_8_w_ECU(Addr new_SP, UInt ecu)
2348{
2349 UInt otag = ecu | MC_OKIND_STACK;
2350 PROF_EVENT(111, "new_mem_stack_8");
2351 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2352 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP, otag );
2353 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2354 make_aligned_word32_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP , otag );
2355 make_aligned_word32_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+4, otag );
2356 } else {
2357 MC_(make_mem_undefined_w_otag) ( -VG_STACK_REDZONE_SZB + new_SP, 8, otag );
2358 }
2359}
2360
njn1d0825f2006-03-27 11:37:07 +00002361static void VG_REGPARM(1) mc_new_mem_stack_8(Addr new_SP)
2362{
2363 PROF_EVENT(111, "new_mem_stack_8");
sewardj05a46732006-10-17 01:28:10 +00002364 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002365 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
sewardj05a46732006-10-17 01:28:10 +00002366 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj7cf4e6b2008-05-01 20:24:26 +00002367 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
njndbf7ca72006-03-31 11:57:59 +00002368 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
njn1d0825f2006-03-27 11:37:07 +00002369 } else {
sewardj7cf4e6b2008-05-01 20:24:26 +00002370 make_mem_undefined ( -VG_STACK_REDZONE_SZB + new_SP, 8 );
njn1d0825f2006-03-27 11:37:07 +00002371 }
2372}
2373
2374static void VG_REGPARM(1) mc_die_mem_stack_8(Addr new_SP)
2375{
2376 PROF_EVENT(121, "die_mem_stack_8");
sewardj05a46732006-10-17 01:28:10 +00002377 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002378 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
sewardj05a46732006-10-17 01:28:10 +00002379 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002380 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
2381 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
njn1d0825f2006-03-27 11:37:07 +00002382 } else {
njndbf7ca72006-03-31 11:57:59 +00002383 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-8, 8 );
njn1d0825f2006-03-27 11:37:07 +00002384 }
2385}
2386
sewardj7cf4e6b2008-05-01 20:24:26 +00002387/*--------------- adjustment by 12 bytes ---------------*/
2388
2389static void VG_REGPARM(2) mc_new_mem_stack_12_w_ECU(Addr new_SP, UInt ecu)
2390{
2391 UInt otag = ecu | MC_OKIND_STACK;
2392 PROF_EVENT(112, "new_mem_stack_12");
2393 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2394 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP , otag );
2395 make_aligned_word32_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+8, otag );
2396 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2397 /* from previous test we don't have 8-alignment at offset +0,
2398 hence must have 8 alignment at offsets +4/-4. Hence safe to
2399 do 4 at +0 and then 8 at +4/. */
2400 make_aligned_word32_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP , otag );
2401 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+4, otag );
2402 } else {
2403 MC_(make_mem_undefined_w_otag) ( -VG_STACK_REDZONE_SZB + new_SP, 12, otag );
2404 }
2405}
2406
njn1d0825f2006-03-27 11:37:07 +00002407static void VG_REGPARM(1) mc_new_mem_stack_12(Addr new_SP)
2408{
2409 PROF_EVENT(112, "new_mem_stack_12");
sewardj05a46732006-10-17 01:28:10 +00002410 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj7cf4e6b2008-05-01 20:24:26 +00002411 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
njndbf7ca72006-03-31 11:57:59 +00002412 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
sewardj05a46732006-10-17 01:28:10 +00002413 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00002414 /* from previous test we don't have 8-alignment at offset +0,
2415 hence must have 8 alignment at offsets +4/-4. Hence safe to
2416 do 4 at +0 and then 8 at +4/. */
sewardj7cf4e6b2008-05-01 20:24:26 +00002417 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
njndbf7ca72006-03-31 11:57:59 +00002418 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
njn1d0825f2006-03-27 11:37:07 +00002419 } else {
sewardj7cf4e6b2008-05-01 20:24:26 +00002420 make_mem_undefined ( -VG_STACK_REDZONE_SZB + new_SP, 12 );
njn1d0825f2006-03-27 11:37:07 +00002421 }
2422}
2423
2424static void VG_REGPARM(1) mc_die_mem_stack_12(Addr new_SP)
2425{
2426 PROF_EVENT(122, "die_mem_stack_12");
2427 /* Note the -12 in the test */
sewardj43fcfd92006-10-17 23:14:42 +00002428 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP-12 )) {
2429 /* We have 8-alignment at -12, hence ok to do 8 at -12 and 4 at
2430 -4. */
njndbf7ca72006-03-31 11:57:59 +00002431 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
2432 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
sewardj05a46732006-10-17 01:28:10 +00002433 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00002434 /* We have 4-alignment at +0, but we don't have 8-alignment at
2435 -12. So we must have 8-alignment at -8. Hence do 4 at -12
2436 and then 8 at -8. */
njndbf7ca72006-03-31 11:57:59 +00002437 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
2438 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
njn1d0825f2006-03-27 11:37:07 +00002439 } else {
njndbf7ca72006-03-31 11:57:59 +00002440 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-12, 12 );
njn1d0825f2006-03-27 11:37:07 +00002441 }
2442}
2443
sewardj7cf4e6b2008-05-01 20:24:26 +00002444/*--------------- adjustment by 16 bytes ---------------*/
2445
2446static void VG_REGPARM(2) mc_new_mem_stack_16_w_ECU(Addr new_SP, UInt ecu)
2447{
2448 UInt otag = ecu | MC_OKIND_STACK;
2449 PROF_EVENT(113, "new_mem_stack_16");
2450 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2451 /* Have 8-alignment at +0, hence do 8 at +0 and 8 at +8. */
2452 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP , otag );
2453 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+8, otag );
2454 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2455 /* Have 4 alignment at +0 but not 8; hence 8 must be at +4.
2456 Hence do 4 at +0, 8 at +4, 4 at +12. */
2457 make_aligned_word32_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP , otag );
2458 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+4 , otag );
2459 make_aligned_word32_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+12, otag );
2460 } else {
2461 MC_(make_mem_undefined_w_otag) ( -VG_STACK_REDZONE_SZB + new_SP, 16, otag );
2462 }
2463}
2464
njn1d0825f2006-03-27 11:37:07 +00002465static void VG_REGPARM(1) mc_new_mem_stack_16(Addr new_SP)
2466{
2467 PROF_EVENT(113, "new_mem_stack_16");
sewardj05a46732006-10-17 01:28:10 +00002468 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00002469 /* Have 8-alignment at +0, hence do 8 at +0 and 8 at +8. */
sewardj7cf4e6b2008-05-01 20:24:26 +00002470 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
njndbf7ca72006-03-31 11:57:59 +00002471 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
sewardj05a46732006-10-17 01:28:10 +00002472 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00002473 /* Have 4 alignment at +0 but not 8; hence 8 must be at +4.
2474 Hence do 4 at +0, 8 at +4, 4 at +12. */
sewardj7cf4e6b2008-05-01 20:24:26 +00002475 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
njndbf7ca72006-03-31 11:57:59 +00002476 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
2477 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+12 );
njn1d0825f2006-03-27 11:37:07 +00002478 } else {
sewardj7cf4e6b2008-05-01 20:24:26 +00002479 make_mem_undefined ( -VG_STACK_REDZONE_SZB + new_SP, 16 );
njn1d0825f2006-03-27 11:37:07 +00002480 }
2481}
2482
2483static void VG_REGPARM(1) mc_die_mem_stack_16(Addr new_SP)
2484{
2485 PROF_EVENT(123, "die_mem_stack_16");
sewardj05a46732006-10-17 01:28:10 +00002486 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00002487 /* Have 8-alignment at +0, hence do 8 at -16 and 8 at -8. */
njndbf7ca72006-03-31 11:57:59 +00002488 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
2489 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
sewardj05a46732006-10-17 01:28:10 +00002490 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00002491 /* 8 alignment must be at -12. Do 4 at -16, 8 at -12, 4 at -4. */
njndbf7ca72006-03-31 11:57:59 +00002492 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
2493 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
2494 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
njn1d0825f2006-03-27 11:37:07 +00002495 } else {
njndbf7ca72006-03-31 11:57:59 +00002496 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-16, 16 );
njn1d0825f2006-03-27 11:37:07 +00002497 }
2498}
2499
sewardj7cf4e6b2008-05-01 20:24:26 +00002500/*--------------- adjustment by 32 bytes ---------------*/
2501
2502static void VG_REGPARM(2) mc_new_mem_stack_32_w_ECU(Addr new_SP, UInt ecu)
2503{
2504 UInt otag = ecu | MC_OKIND_STACK;
2505 PROF_EVENT(114, "new_mem_stack_32");
2506 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2507 /* Straightforward */
2508 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP , otag );
2509 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+8 , otag );
2510 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+16, otag );
2511 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+24, otag );
2512 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2513 /* 8 alignment must be at +4. Hence do 8 at +4,+12,+20 and 4 at
2514 +0,+28. */
2515 make_aligned_word32_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP , otag );
2516 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+4 , otag );
2517 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+12, otag );
2518 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+20, otag );
2519 make_aligned_word32_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+28, otag );
2520 } else {
2521 MC_(make_mem_undefined_w_otag) ( -VG_STACK_REDZONE_SZB + new_SP, 32, otag );
2522 }
2523}
2524
njn1d0825f2006-03-27 11:37:07 +00002525static void VG_REGPARM(1) mc_new_mem_stack_32(Addr new_SP)
2526{
2527 PROF_EVENT(114, "new_mem_stack_32");
sewardj05a46732006-10-17 01:28:10 +00002528 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00002529 /* Straightforward */
sewardj7cf4e6b2008-05-01 20:24:26 +00002530 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
2531 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
njndbf7ca72006-03-31 11:57:59 +00002532 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
2533 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
sewardj05a46732006-10-17 01:28:10 +00002534 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00002535 /* 8 alignment must be at +4. Hence do 8 at +4,+12,+20 and 4 at
2536 +0,+28. */
sewardj7cf4e6b2008-05-01 20:24:26 +00002537 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
2538 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
njndbf7ca72006-03-31 11:57:59 +00002539 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+12 );
2540 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+20 );
2541 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+28 );
njn1d0825f2006-03-27 11:37:07 +00002542 } else {
sewardj7cf4e6b2008-05-01 20:24:26 +00002543 make_mem_undefined ( -VG_STACK_REDZONE_SZB + new_SP, 32 );
njn1d0825f2006-03-27 11:37:07 +00002544 }
2545}
2546
2547static void VG_REGPARM(1) mc_die_mem_stack_32(Addr new_SP)
2548{
2549 PROF_EVENT(124, "die_mem_stack_32");
sewardj05a46732006-10-17 01:28:10 +00002550 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00002551 /* Straightforward */
njndbf7ca72006-03-31 11:57:59 +00002552 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
2553 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
2554 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
2555 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
sewardj05a46732006-10-17 01:28:10 +00002556 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00002557 /* 8 alignment must be at -4 etc. Hence do 8 at -12,-20,-28 and
2558 4 at -32,-4. */
njndbf7ca72006-03-31 11:57:59 +00002559 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
2560 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-28 );
2561 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-20 );
2562 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
2563 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
njn1d0825f2006-03-27 11:37:07 +00002564 } else {
njndbf7ca72006-03-31 11:57:59 +00002565 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-32, 32 );
njn1d0825f2006-03-27 11:37:07 +00002566 }
2567}
2568
sewardj7cf4e6b2008-05-01 20:24:26 +00002569/*--------------- adjustment by 112 bytes ---------------*/
2570
2571static void VG_REGPARM(2) mc_new_mem_stack_112_w_ECU(Addr new_SP, UInt ecu)
2572{
2573 UInt otag = ecu | MC_OKIND_STACK;
2574 PROF_EVENT(115, "new_mem_stack_112");
2575 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2576 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP , otag );
2577 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+8 , otag );
2578 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+16, otag );
2579 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+24, otag );
2580 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+32, otag );
2581 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+40, otag );
2582 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+48, otag );
2583 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+56, otag );
2584 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+64, otag );
2585 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+72, otag );
2586 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+80, otag );
2587 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+88, otag );
2588 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+96, otag );
2589 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+104, otag );
2590 } else {
2591 MC_(make_mem_undefined_w_otag) ( -VG_STACK_REDZONE_SZB + new_SP, 112, otag );
2592 }
2593}
2594
njn1d0825f2006-03-27 11:37:07 +00002595static void VG_REGPARM(1) mc_new_mem_stack_112(Addr new_SP)
2596{
2597 PROF_EVENT(115, "new_mem_stack_112");
sewardj05a46732006-10-17 01:28:10 +00002598 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj7cf4e6b2008-05-01 20:24:26 +00002599 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
2600 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
njndbf7ca72006-03-31 11:57:59 +00002601 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
2602 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
2603 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
2604 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
2605 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
2606 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
2607 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
2608 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
2609 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
2610 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
2611 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
sewardj7cf4e6b2008-05-01 20:24:26 +00002612 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104 );
njn1d0825f2006-03-27 11:37:07 +00002613 } else {
sewardj7cf4e6b2008-05-01 20:24:26 +00002614 make_mem_undefined ( -VG_STACK_REDZONE_SZB + new_SP, 112 );
njn1d0825f2006-03-27 11:37:07 +00002615 }
2616}
2617
2618static void VG_REGPARM(1) mc_die_mem_stack_112(Addr new_SP)
2619{
2620 PROF_EVENT(125, "die_mem_stack_112");
sewardj05a46732006-10-17 01:28:10 +00002621 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002622 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
2623 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
2624 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
2625 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
2626 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
2627 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
2628 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
2629 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
2630 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
2631 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
2632 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
2633 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
2634 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
2635 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
njn1d0825f2006-03-27 11:37:07 +00002636 } else {
njndbf7ca72006-03-31 11:57:59 +00002637 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-112, 112 );
njn1d0825f2006-03-27 11:37:07 +00002638 }
2639}
2640
sewardj7cf4e6b2008-05-01 20:24:26 +00002641/*--------------- adjustment by 128 bytes ---------------*/
2642
2643static void VG_REGPARM(2) mc_new_mem_stack_128_w_ECU(Addr new_SP, UInt ecu)
2644{
2645 UInt otag = ecu | MC_OKIND_STACK;
2646 PROF_EVENT(116, "new_mem_stack_128");
2647 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2648 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP , otag );
2649 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+8 , otag );
2650 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+16, otag );
2651 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+24, otag );
2652 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+32, otag );
2653 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+40, otag );
2654 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+48, otag );
2655 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+56, otag );
2656 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+64, otag );
2657 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+72, otag );
2658 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+80, otag );
2659 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+88, otag );
2660 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+96, otag );
2661 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+104, otag );
2662 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+112, otag );
2663 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+120, otag );
2664 } else {
2665 MC_(make_mem_undefined_w_otag) ( -VG_STACK_REDZONE_SZB + new_SP, 128, otag );
2666 }
2667}
2668
njn1d0825f2006-03-27 11:37:07 +00002669static void VG_REGPARM(1) mc_new_mem_stack_128(Addr new_SP)
2670{
2671 PROF_EVENT(116, "new_mem_stack_128");
sewardj05a46732006-10-17 01:28:10 +00002672 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj7cf4e6b2008-05-01 20:24:26 +00002673 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
2674 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
njndbf7ca72006-03-31 11:57:59 +00002675 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
2676 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
2677 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
2678 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
2679 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
2680 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
2681 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
2682 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
2683 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
2684 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
2685 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
sewardj7cf4e6b2008-05-01 20:24:26 +00002686 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104 );
2687 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+112 );
2688 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+120 );
njn1d0825f2006-03-27 11:37:07 +00002689 } else {
sewardj7cf4e6b2008-05-01 20:24:26 +00002690 make_mem_undefined ( -VG_STACK_REDZONE_SZB + new_SP, 128 );
njn1d0825f2006-03-27 11:37:07 +00002691 }
2692}
2693
2694static void VG_REGPARM(1) mc_die_mem_stack_128(Addr new_SP)
2695{
2696 PROF_EVENT(126, "die_mem_stack_128");
sewardj05a46732006-10-17 01:28:10 +00002697 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002698 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
2699 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
2700 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
2701 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
2702 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
2703 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
2704 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
2705 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
2706 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
2707 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
2708 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
2709 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
2710 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
2711 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
2712 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
2713 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
njn1d0825f2006-03-27 11:37:07 +00002714 } else {
njndbf7ca72006-03-31 11:57:59 +00002715 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-128, 128 );
njn1d0825f2006-03-27 11:37:07 +00002716 }
2717}
2718
sewardj7cf4e6b2008-05-01 20:24:26 +00002719/*--------------- adjustment by 144 bytes ---------------*/
2720
2721static void VG_REGPARM(2) mc_new_mem_stack_144_w_ECU(Addr new_SP, UInt ecu)
2722{
2723 UInt otag = ecu | MC_OKIND_STACK;
2724 PROF_EVENT(117, "new_mem_stack_144");
2725 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2726 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP, otag );
2727 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+8, otag );
2728 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+16, otag );
2729 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+24, otag );
2730 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+32, otag );
2731 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+40, otag );
2732 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+48, otag );
2733 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+56, otag );
2734 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+64, otag );
2735 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+72, otag );
2736 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+80, otag );
2737 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+88, otag );
2738 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+96, otag );
2739 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+104, otag );
2740 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+112, otag );
2741 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+120, otag );
2742 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+128, otag );
2743 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+136, otag );
2744 } else {
2745 MC_(make_mem_undefined_w_otag) ( -VG_STACK_REDZONE_SZB + new_SP, 144, otag );
2746 }
2747}
2748
njn1d0825f2006-03-27 11:37:07 +00002749static void VG_REGPARM(1) mc_new_mem_stack_144(Addr new_SP)
2750{
2751 PROF_EVENT(117, "new_mem_stack_144");
sewardj05a46732006-10-17 01:28:10 +00002752 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj7cf4e6b2008-05-01 20:24:26 +00002753 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
2754 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
njndbf7ca72006-03-31 11:57:59 +00002755 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
2756 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
2757 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
2758 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
2759 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
2760 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
2761 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
2762 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
2763 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
2764 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
2765 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
sewardj7cf4e6b2008-05-01 20:24:26 +00002766 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104 );
2767 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+112 );
2768 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+120 );
2769 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+128 );
2770 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+136 );
njn1d0825f2006-03-27 11:37:07 +00002771 } else {
sewardj7cf4e6b2008-05-01 20:24:26 +00002772 make_mem_undefined ( -VG_STACK_REDZONE_SZB + new_SP, 144 );
njn1d0825f2006-03-27 11:37:07 +00002773 }
2774}
2775
2776static void VG_REGPARM(1) mc_die_mem_stack_144(Addr new_SP)
2777{
2778 PROF_EVENT(127, "die_mem_stack_144");
sewardj05a46732006-10-17 01:28:10 +00002779 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002780 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-144);
2781 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-136);
2782 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
2783 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
2784 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
2785 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
2786 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
2787 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
2788 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
2789 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
2790 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
2791 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
2792 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
2793 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
2794 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
2795 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
2796 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
2797 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
njn1d0825f2006-03-27 11:37:07 +00002798 } else {
njndbf7ca72006-03-31 11:57:59 +00002799 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-144, 144 );
njn1d0825f2006-03-27 11:37:07 +00002800 }
2801}
2802
sewardj7cf4e6b2008-05-01 20:24:26 +00002803/*--------------- adjustment by 160 bytes ---------------*/
2804
2805static void VG_REGPARM(2) mc_new_mem_stack_160_w_ECU(Addr new_SP, UInt ecu)
2806{
2807 UInt otag = ecu | MC_OKIND_STACK;
2808 PROF_EVENT(118, "new_mem_stack_160");
2809 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2810 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP, otag );
2811 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+8, otag );
2812 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+16, otag );
2813 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+24, otag );
2814 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+32, otag );
2815 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+40, otag );
2816 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+48, otag );
2817 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+56, otag );
2818 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+64, otag );
2819 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+72, otag );
2820 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+80, otag );
2821 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+88, otag );
2822 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+96, otag );
2823 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+104, otag );
2824 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+112, otag );
2825 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+120, otag );
2826 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+128, otag );
2827 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+136, otag );
2828 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+144, otag );
2829 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+152, otag );
2830 } else {
2831 MC_(make_mem_undefined_w_otag) ( -VG_STACK_REDZONE_SZB + new_SP, 160, otag );
2832 }
2833}
2834
njn1d0825f2006-03-27 11:37:07 +00002835static void VG_REGPARM(1) mc_new_mem_stack_160(Addr new_SP)
2836{
2837 PROF_EVENT(118, "new_mem_stack_160");
sewardj05a46732006-10-17 01:28:10 +00002838 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj7cf4e6b2008-05-01 20:24:26 +00002839 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
2840 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
njndbf7ca72006-03-31 11:57:59 +00002841 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
2842 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
2843 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
2844 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
2845 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
2846 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
2847 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
2848 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
2849 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
2850 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
2851 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
sewardj7cf4e6b2008-05-01 20:24:26 +00002852 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104 );
2853 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+112 );
2854 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+120 );
2855 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+128 );
2856 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+136 );
2857 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+144 );
2858 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+152 );
njn1d0825f2006-03-27 11:37:07 +00002859 } else {
sewardj7cf4e6b2008-05-01 20:24:26 +00002860 make_mem_undefined ( -VG_STACK_REDZONE_SZB + new_SP, 160 );
njn1d0825f2006-03-27 11:37:07 +00002861 }
2862}
2863
2864static void VG_REGPARM(1) mc_die_mem_stack_160(Addr new_SP)
2865{
2866 PROF_EVENT(128, "die_mem_stack_160");
sewardj05a46732006-10-17 01:28:10 +00002867 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002868 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-160);
2869 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-152);
2870 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-144);
2871 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-136);
2872 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
2873 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
2874 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
2875 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
2876 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
2877 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
2878 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
2879 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
2880 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
2881 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
2882 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
2883 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
2884 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
2885 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
2886 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
2887 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
njn1d0825f2006-03-27 11:37:07 +00002888 } else {
njndbf7ca72006-03-31 11:57:59 +00002889 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-160, 160 );
njn1d0825f2006-03-27 11:37:07 +00002890 }
2891}
2892
sewardj7cf4e6b2008-05-01 20:24:26 +00002893/*--------------- adjustment by N bytes ---------------*/
2894
2895static void mc_new_mem_stack_w_ECU ( Addr a, SizeT len, UInt ecu )
2896{
2897 UInt otag = ecu | MC_OKIND_STACK;
2898 PROF_EVENT(115, "new_mem_stack_w_otag");
2899 MC_(make_mem_undefined_w_otag) ( -VG_STACK_REDZONE_SZB + a, len, otag );
2900}
2901
njn1d0825f2006-03-27 11:37:07 +00002902static void mc_new_mem_stack ( Addr a, SizeT len )
2903{
2904 PROF_EVENT(115, "new_mem_stack");
sewardj7cf4e6b2008-05-01 20:24:26 +00002905 make_mem_undefined ( -VG_STACK_REDZONE_SZB + a, len );
njn1d0825f2006-03-27 11:37:07 +00002906}
2907
2908static void mc_die_mem_stack ( Addr a, SizeT len )
2909{
2910 PROF_EVENT(125, "die_mem_stack");
njndbf7ca72006-03-31 11:57:59 +00002911 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + a, len );
njn1d0825f2006-03-27 11:37:07 +00002912}
njn9b007f62003-04-07 14:40:25 +00002913
sewardj45d94cc2005-04-20 14:44:11 +00002914
njn1d0825f2006-03-27 11:37:07 +00002915/* The AMD64 ABI says:
2916
2917 "The 128-byte area beyond the location pointed to by %rsp is considered
2918 to be reserved and shall not be modified by signal or interrupt
2919 handlers. Therefore, functions may use this area for temporary data
2920 that is not needed across function calls. In particular, leaf functions
2921 may use this area for their entire stack frame, rather than adjusting
2922 the stack pointer in the prologue and epilogue. This area is known as
2923 red zone [sic]."
2924
2925 So after any call or return we need to mark this redzone as containing
2926 undefined values.
2927
2928 Consider this: we're in function f. f calls g. g moves rsp down
2929 modestly (say 16 bytes) and writes stuff all over the red zone, making it
2930 defined. g returns. f is buggy and reads from parts of the red zone
2931 that it didn't write on. But because g filled that area in, f is going
2932 to be picking up defined V bits and so any errors from reading bits of
2933 the red zone it didn't write, will be missed. The only solution I could
2934 think of was to make the red zone undefined when g returns to f.
2935
2936 This is in accordance with the ABI, which makes it clear the redzone
2937 is volatile across function calls.
2938
2939 The problem occurs the other way round too: f could fill the RZ up
2940 with defined values and g could mistakenly read them. So the RZ
2941 also needs to be nuked on function calls.
2942*/
sewardj7cf4e6b2008-05-01 20:24:26 +00002943
2944
2945/* Here's a simple cache to hold nia -> ECU mappings. It could be
2946 improved so as to have a lower miss rate. */
2947
2948static UWord stats__nia_cache_queries = 0;
2949static UWord stats__nia_cache_misses = 0;
2950
2951typedef
2952 struct { UWord nia0; UWord ecu0; /* nia0 maps to ecu0 */
2953 UWord nia1; UWord ecu1; } /* nia1 maps to ecu1 */
2954 WCacheEnt;
2955
2956#define N_NIA_TO_ECU_CACHE 511
2957
2958static WCacheEnt nia_to_ecu_cache[N_NIA_TO_ECU_CACHE];
2959
2960static void init_nia_to_ecu_cache ( void )
sewardj826ec492005-05-12 18:05:00 +00002961{
sewardj7cf4e6b2008-05-01 20:24:26 +00002962 UWord i;
2963 Addr zero_addr = 0;
2964 ExeContext* zero_ec;
2965 UInt zero_ecu;
2966 /* Fill all the slots with an entry for address zero, and the
2967 relevant otags accordingly. Hence the cache is initially filled
2968 with valid data. */
2969 zero_ec = VG_(make_depth_1_ExeContext_from_Addr)(zero_addr);
2970 tl_assert(zero_ec);
2971 zero_ecu = VG_(get_ECU_from_ExeContext)(zero_ec);
2972 tl_assert(VG_(is_plausible_ECU)(zero_ecu));
2973 for (i = 0; i < N_NIA_TO_ECU_CACHE; i++) {
2974 nia_to_ecu_cache[i].nia0 = zero_addr;
2975 nia_to_ecu_cache[i].ecu0 = zero_ecu;
2976 nia_to_ecu_cache[i].nia1 = zero_addr;
2977 nia_to_ecu_cache[i].ecu1 = zero_ecu;
2978 }
2979}
2980
2981static inline UInt convert_nia_to_ecu ( Addr nia )
2982{
2983 UWord i;
2984 UInt ecu;
2985 ExeContext* ec;
2986
2987 tl_assert( sizeof(nia_to_ecu_cache[0].nia1) == sizeof(nia) );
2988
2989 stats__nia_cache_queries++;
2990 i = nia % N_NIA_TO_ECU_CACHE;
2991 tl_assert(i >= 0 && i < N_NIA_TO_ECU_CACHE);
2992
2993 if (LIKELY( nia_to_ecu_cache[i].nia0 == nia ))
2994 return nia_to_ecu_cache[i].ecu0;
2995
2996 if (LIKELY( nia_to_ecu_cache[i].nia1 == nia )) {
2997# define SWAP(_w1,_w2) { UWord _t = _w1; _w1 = _w2; _w2 = _t; }
2998 SWAP( nia_to_ecu_cache[i].nia0, nia_to_ecu_cache[i].nia1 );
2999 SWAP( nia_to_ecu_cache[i].ecu0, nia_to_ecu_cache[i].ecu1 );
3000# undef SWAP
3001 return nia_to_ecu_cache[i].ecu0;
3002 }
3003
3004 stats__nia_cache_misses++;
3005 ec = VG_(make_depth_1_ExeContext_from_Addr)(nia);
3006 tl_assert(ec);
3007 ecu = VG_(get_ECU_from_ExeContext)(ec);
3008 tl_assert(VG_(is_plausible_ECU)(ecu));
3009
3010 nia_to_ecu_cache[i].nia1 = nia_to_ecu_cache[i].nia0;
3011 nia_to_ecu_cache[i].ecu1 = nia_to_ecu_cache[i].ecu0;
3012
3013 nia_to_ecu_cache[i].nia0 = nia;
3014 nia_to_ecu_cache[i].ecu0 = (UWord)ecu;
3015 return ecu;
3016}
3017
3018
3019/* Note that this serves both the origin-tracking and
3020 no-origin-tracking modes. We assume that calls to it are
3021 sufficiently infrequent that it isn't worth specialising for the
3022 with/without origin-tracking cases. */
3023void MC_(helperc_MAKE_STACK_UNINIT) ( Addr base, UWord len, Addr nia )
3024{
3025 UInt otag;
sewardj826ec492005-05-12 18:05:00 +00003026 tl_assert(sizeof(UWord) == sizeof(SizeT));
sewardj2a3a1a72005-05-12 23:25:43 +00003027 if (0)
sewardj7cf4e6b2008-05-01 20:24:26 +00003028 VG_(printf)("helperc_MAKE_STACK_UNINIT (%p,%lu,nia=%p)\n",
3029 base, len, nia );
3030
3031 if (UNLIKELY( MC_(clo_mc_level) == 3 )) {
3032 UInt ecu = convert_nia_to_ecu ( nia );
3033 tl_assert(VG_(is_plausible_ECU)(ecu));
3034 otag = ecu | MC_OKIND_STACK;
3035 } else {
3036 tl_assert(nia == 0);
3037 otag = 0;
3038 }
sewardj2a3a1a72005-05-12 23:25:43 +00003039
3040# if 0
3041 /* Really slow version */
sewardj7cf4e6b2008-05-01 20:24:26 +00003042 MC_(make_mem_undefined)(base, len, otag);
sewardj2a3a1a72005-05-12 23:25:43 +00003043# endif
3044
3045# if 0
3046 /* Slow(ish) version, which is fairly easily seen to be correct.
3047 */
bart5dd8e6a2008-03-22 08:04:29 +00003048 if (LIKELY( VG_IS_8_ALIGNED(base) && len==128 )) {
sewardj7cf4e6b2008-05-01 20:24:26 +00003049 make_aligned_word64_undefined(base + 0, otag);
3050 make_aligned_word64_undefined(base + 8, otag);
3051 make_aligned_word64_undefined(base + 16, otag);
3052 make_aligned_word64_undefined(base + 24, otag);
sewardj2a3a1a72005-05-12 23:25:43 +00003053
sewardj7cf4e6b2008-05-01 20:24:26 +00003054 make_aligned_word64_undefined(base + 32, otag);
3055 make_aligned_word64_undefined(base + 40, otag);
3056 make_aligned_word64_undefined(base + 48, otag);
3057 make_aligned_word64_undefined(base + 56, otag);
sewardj2a3a1a72005-05-12 23:25:43 +00003058
sewardj7cf4e6b2008-05-01 20:24:26 +00003059 make_aligned_word64_undefined(base + 64, otag);
3060 make_aligned_word64_undefined(base + 72, otag);
3061 make_aligned_word64_undefined(base + 80, otag);
3062 make_aligned_word64_undefined(base + 88, otag);
sewardj2a3a1a72005-05-12 23:25:43 +00003063
sewardj7cf4e6b2008-05-01 20:24:26 +00003064 make_aligned_word64_undefined(base + 96, otag);
3065 make_aligned_word64_undefined(base + 104, otag);
3066 make_aligned_word64_undefined(base + 112, otag);
3067 make_aligned_word64_undefined(base + 120, otag);
sewardj2a3a1a72005-05-12 23:25:43 +00003068 } else {
sewardj7cf4e6b2008-05-01 20:24:26 +00003069 MC_(make_mem_undefined)(base, len, otag);
sewardj2a3a1a72005-05-12 23:25:43 +00003070 }
3071# endif
3072
3073 /* Idea is: go fast when
3074 * 8-aligned and length is 128
3075 * the sm is available in the main primary map
njn1d0825f2006-03-27 11:37:07 +00003076 * the address range falls entirely with a single secondary map
3077 If all those conditions hold, just update the V+A bits by writing
3078 directly into the vabits array. (If the sm was distinguished, this
3079 will make a copy and then write to it.)
sewardj2a3a1a72005-05-12 23:25:43 +00003080 */
sewardj7cf4e6b2008-05-01 20:24:26 +00003081
bart5dd8e6a2008-03-22 08:04:29 +00003082 if (LIKELY( len == 128 && VG_IS_8_ALIGNED(base) )) {
njn1d0825f2006-03-27 11:37:07 +00003083 /* Now we know the address range is suitably sized and aligned. */
3084 UWord a_lo = (UWord)(base);
sewardj3f5f5562006-06-16 21:39:08 +00003085 UWord a_hi = (UWord)(base + 128 - 1);
njn1d0825f2006-03-27 11:37:07 +00003086 tl_assert(a_lo < a_hi); // paranoia: detect overflow
3087 if (a_hi < MAX_PRIMARY_ADDRESS) {
3088 // Now we know the entire range is within the main primary map.
njna7c7ebd2006-03-28 12:51:02 +00003089 SecMap* sm = get_secmap_for_writing_low(a_lo);
3090 SecMap* sm_hi = get_secmap_for_writing_low(a_hi);
sewardj2a3a1a72005-05-12 23:25:43 +00003091 /* Now we know that the entire address range falls within a
3092 single secondary map, and that that secondary 'lives' in
3093 the main primary map. */
bart5dd8e6a2008-03-22 08:04:29 +00003094 if (LIKELY(sm == sm_hi)) {
njn1d0825f2006-03-27 11:37:07 +00003095 // Finally, we know that the range is entirely within one secmap.
3096 UWord v_off = SM_OFF(a_lo);
3097 UShort* p = (UShort*)(&sm->vabits8[v_off]);
njndbf7ca72006-03-31 11:57:59 +00003098 p[ 0] = VA_BITS16_UNDEFINED;
3099 p[ 1] = VA_BITS16_UNDEFINED;
3100 p[ 2] = VA_BITS16_UNDEFINED;
3101 p[ 3] = VA_BITS16_UNDEFINED;
3102 p[ 4] = VA_BITS16_UNDEFINED;
3103 p[ 5] = VA_BITS16_UNDEFINED;
3104 p[ 6] = VA_BITS16_UNDEFINED;
3105 p[ 7] = VA_BITS16_UNDEFINED;
3106 p[ 8] = VA_BITS16_UNDEFINED;
3107 p[ 9] = VA_BITS16_UNDEFINED;
3108 p[10] = VA_BITS16_UNDEFINED;
3109 p[11] = VA_BITS16_UNDEFINED;
3110 p[12] = VA_BITS16_UNDEFINED;
3111 p[13] = VA_BITS16_UNDEFINED;
3112 p[14] = VA_BITS16_UNDEFINED;
3113 p[15] = VA_BITS16_UNDEFINED;
sewardj7cf4e6b2008-05-01 20:24:26 +00003114 if (UNLIKELY( MC_(clo_mc_level) == 3 )) {
3115 set_aligned_word64_Origin_to_undef( base + 8 * 0, otag );
3116 set_aligned_word64_Origin_to_undef( base + 8 * 1, otag );
3117 set_aligned_word64_Origin_to_undef( base + 8 * 2, otag );
3118 set_aligned_word64_Origin_to_undef( base + 8 * 3, otag );
3119 set_aligned_word64_Origin_to_undef( base + 8 * 4, otag );
3120 set_aligned_word64_Origin_to_undef( base + 8 * 5, otag );
3121 set_aligned_word64_Origin_to_undef( base + 8 * 6, otag );
3122 set_aligned_word64_Origin_to_undef( base + 8 * 7, otag );
3123 set_aligned_word64_Origin_to_undef( base + 8 * 8, otag );
3124 set_aligned_word64_Origin_to_undef( base + 8 * 9, otag );
3125 set_aligned_word64_Origin_to_undef( base + 8 * 10, otag );
3126 set_aligned_word64_Origin_to_undef( base + 8 * 11, otag );
3127 set_aligned_word64_Origin_to_undef( base + 8 * 12, otag );
3128 set_aligned_word64_Origin_to_undef( base + 8 * 13, otag );
3129 set_aligned_word64_Origin_to_undef( base + 8 * 14, otag );
3130 set_aligned_word64_Origin_to_undef( base + 8 * 15, otag );
3131 }
sewardj2a3a1a72005-05-12 23:25:43 +00003132 return;
njn1d0825f2006-03-27 11:37:07 +00003133 }
sewardj2a3a1a72005-05-12 23:25:43 +00003134 }
3135 }
3136
sewardj2e1a6772006-01-18 04:16:27 +00003137 /* 288 bytes (36 ULongs) is the magic value for ELF ppc64. */
bart5dd8e6a2008-03-22 08:04:29 +00003138 if (LIKELY( len == 288 && VG_IS_8_ALIGNED(base) )) {
njn1d0825f2006-03-27 11:37:07 +00003139 /* Now we know the address range is suitably sized and aligned. */
3140 UWord a_lo = (UWord)(base);
sewardj3f5f5562006-06-16 21:39:08 +00003141 UWord a_hi = (UWord)(base + 288 - 1);
njn1d0825f2006-03-27 11:37:07 +00003142 tl_assert(a_lo < a_hi); // paranoia: detect overflow
3143 if (a_hi < MAX_PRIMARY_ADDRESS) {
3144 // Now we know the entire range is within the main primary map.
njna7c7ebd2006-03-28 12:51:02 +00003145 SecMap* sm = get_secmap_for_writing_low(a_lo);
3146 SecMap* sm_hi = get_secmap_for_writing_low(a_hi);
sewardj2e1a6772006-01-18 04:16:27 +00003147 /* Now we know that the entire address range falls within a
3148 single secondary map, and that that secondary 'lives' in
3149 the main primary map. */
bart5dd8e6a2008-03-22 08:04:29 +00003150 if (LIKELY(sm == sm_hi)) {
njn1d0825f2006-03-27 11:37:07 +00003151 // Finally, we know that the range is entirely within one secmap.
3152 UWord v_off = SM_OFF(a_lo);
3153 UShort* p = (UShort*)(&sm->vabits8[v_off]);
njndbf7ca72006-03-31 11:57:59 +00003154 p[ 0] = VA_BITS16_UNDEFINED;
3155 p[ 1] = VA_BITS16_UNDEFINED;
3156 p[ 2] = VA_BITS16_UNDEFINED;
3157 p[ 3] = VA_BITS16_UNDEFINED;
3158 p[ 4] = VA_BITS16_UNDEFINED;
3159 p[ 5] = VA_BITS16_UNDEFINED;
3160 p[ 6] = VA_BITS16_UNDEFINED;
3161 p[ 7] = VA_BITS16_UNDEFINED;
3162 p[ 8] = VA_BITS16_UNDEFINED;
3163 p[ 9] = VA_BITS16_UNDEFINED;
3164 p[10] = VA_BITS16_UNDEFINED;
3165 p[11] = VA_BITS16_UNDEFINED;
3166 p[12] = VA_BITS16_UNDEFINED;
3167 p[13] = VA_BITS16_UNDEFINED;
3168 p[14] = VA_BITS16_UNDEFINED;
3169 p[15] = VA_BITS16_UNDEFINED;
3170 p[16] = VA_BITS16_UNDEFINED;
3171 p[17] = VA_BITS16_UNDEFINED;
3172 p[18] = VA_BITS16_UNDEFINED;
3173 p[19] = VA_BITS16_UNDEFINED;
3174 p[20] = VA_BITS16_UNDEFINED;
3175 p[21] = VA_BITS16_UNDEFINED;
3176 p[22] = VA_BITS16_UNDEFINED;
3177 p[23] = VA_BITS16_UNDEFINED;
3178 p[24] = VA_BITS16_UNDEFINED;
3179 p[25] = VA_BITS16_UNDEFINED;
3180 p[26] = VA_BITS16_UNDEFINED;
3181 p[27] = VA_BITS16_UNDEFINED;
3182 p[28] = VA_BITS16_UNDEFINED;
3183 p[29] = VA_BITS16_UNDEFINED;
3184 p[30] = VA_BITS16_UNDEFINED;
3185 p[31] = VA_BITS16_UNDEFINED;
3186 p[32] = VA_BITS16_UNDEFINED;
3187 p[33] = VA_BITS16_UNDEFINED;
3188 p[34] = VA_BITS16_UNDEFINED;
3189 p[35] = VA_BITS16_UNDEFINED;
sewardj7cf4e6b2008-05-01 20:24:26 +00003190 if (UNLIKELY( MC_(clo_mc_level) == 3 )) {
3191 set_aligned_word64_Origin_to_undef( base + 8 * 0, otag );
3192 set_aligned_word64_Origin_to_undef( base + 8 * 1, otag );
3193 set_aligned_word64_Origin_to_undef( base + 8 * 2, otag );
3194 set_aligned_word64_Origin_to_undef( base + 8 * 3, otag );
3195 set_aligned_word64_Origin_to_undef( base + 8 * 4, otag );
3196 set_aligned_word64_Origin_to_undef( base + 8 * 5, otag );
3197 set_aligned_word64_Origin_to_undef( base + 8 * 6, otag );
3198 set_aligned_word64_Origin_to_undef( base + 8 * 7, otag );
3199 set_aligned_word64_Origin_to_undef( base + 8 * 8, otag );
3200 set_aligned_word64_Origin_to_undef( base + 8 * 9, otag );
3201 set_aligned_word64_Origin_to_undef( base + 8 * 10, otag );
3202 set_aligned_word64_Origin_to_undef( base + 8 * 11, otag );
3203 set_aligned_word64_Origin_to_undef( base + 8 * 12, otag );
3204 set_aligned_word64_Origin_to_undef( base + 8 * 13, otag );
3205 set_aligned_word64_Origin_to_undef( base + 8 * 14, otag );
3206 set_aligned_word64_Origin_to_undef( base + 8 * 15, otag );
3207 set_aligned_word64_Origin_to_undef( base + 8 * 16, otag );
3208 set_aligned_word64_Origin_to_undef( base + 8 * 17, otag );
3209 set_aligned_word64_Origin_to_undef( base + 8 * 18, otag );
3210 set_aligned_word64_Origin_to_undef( base + 8 * 19, otag );
3211 set_aligned_word64_Origin_to_undef( base + 8 * 20, otag );
3212 set_aligned_word64_Origin_to_undef( base + 8 * 21, otag );
3213 set_aligned_word64_Origin_to_undef( base + 8 * 22, otag );
3214 set_aligned_word64_Origin_to_undef( base + 8 * 23, otag );
3215 set_aligned_word64_Origin_to_undef( base + 8 * 24, otag );
3216 set_aligned_word64_Origin_to_undef( base + 8 * 25, otag );
3217 set_aligned_word64_Origin_to_undef( base + 8 * 26, otag );
3218 set_aligned_word64_Origin_to_undef( base + 8 * 27, otag );
3219 set_aligned_word64_Origin_to_undef( base + 8 * 28, otag );
3220 set_aligned_word64_Origin_to_undef( base + 8 * 29, otag );
3221 set_aligned_word64_Origin_to_undef( base + 8 * 30, otag );
3222 set_aligned_word64_Origin_to_undef( base + 8 * 31, otag );
3223 set_aligned_word64_Origin_to_undef( base + 8 * 32, otag );
3224 set_aligned_word64_Origin_to_undef( base + 8 * 33, otag );
3225 set_aligned_word64_Origin_to_undef( base + 8 * 34, otag );
3226 set_aligned_word64_Origin_to_undef( base + 8 * 35, otag );
3227 }
sewardj2e1a6772006-01-18 04:16:27 +00003228 return;
njn1d0825f2006-03-27 11:37:07 +00003229 }
sewardj2e1a6772006-01-18 04:16:27 +00003230 }
3231 }
3232
sewardj2a3a1a72005-05-12 23:25:43 +00003233 /* else fall into slow case */
sewardj7cf4e6b2008-05-01 20:24:26 +00003234 MC_(make_mem_undefined_w_otag)(base, len, otag);
sewardj826ec492005-05-12 18:05:00 +00003235}
3236
3237
nethercote8b76fe52004-11-08 19:20:09 +00003238/*------------------------------------------------------------*/
3239/*--- Checking memory ---*/
3240/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00003241
sewardje4ccc012005-05-02 12:53:38 +00003242typedef
3243 enum {
3244 MC_Ok = 5,
3245 MC_AddrErr = 6,
3246 MC_ValueErr = 7
3247 }
3248 MC_ReadResult;
3249
3250
njn25e49d8e72002-09-23 09:36:25 +00003251/* Check permissions for address range. If inadequate permissions
3252 exist, *bad_addr is set to the offending address, so the caller can
3253 know what it is. */
3254
sewardjecf8e102003-07-12 12:11:39 +00003255/* Returns True if [a .. a+len) is not addressible. Otherwise,
3256 returns False, and if bad_addr is non-NULL, sets *bad_addr to
3257 indicate the lowest failing address. Functions below are
3258 similar. */
njndbf7ca72006-03-31 11:57:59 +00003259Bool MC_(check_mem_is_noaccess) ( Addr a, SizeT len, Addr* bad_addr )
sewardjecf8e102003-07-12 12:11:39 +00003260{
nethercote451eae92004-11-02 13:06:32 +00003261 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00003262 UWord vabits2;
3263
njndbf7ca72006-03-31 11:57:59 +00003264 PROF_EVENT(60, "check_mem_is_noaccess");
sewardjecf8e102003-07-12 12:11:39 +00003265 for (i = 0; i < len; i++) {
njndbf7ca72006-03-31 11:57:59 +00003266 PROF_EVENT(61, "check_mem_is_noaccess(loop)");
njn1d0825f2006-03-27 11:37:07 +00003267 vabits2 = get_vabits2(a);
3268 if (VA_BITS2_NOACCESS != vabits2) {
3269 if (bad_addr != NULL) *bad_addr = a;
sewardjecf8e102003-07-12 12:11:39 +00003270 return False;
3271 }
3272 a++;
3273 }
3274 return True;
3275}
3276
sewardj7cf4e6b2008-05-01 20:24:26 +00003277static Bool is_mem_addressable ( Addr a, SizeT len,
3278 /*OUT*/Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00003279{
nethercote451eae92004-11-02 13:06:32 +00003280 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00003281 UWord vabits2;
3282
njndbf7ca72006-03-31 11:57:59 +00003283 PROF_EVENT(62, "is_mem_addressable");
njn25e49d8e72002-09-23 09:36:25 +00003284 for (i = 0; i < len; i++) {
njndbf7ca72006-03-31 11:57:59 +00003285 PROF_EVENT(63, "is_mem_addressable(loop)");
njn1d0825f2006-03-27 11:37:07 +00003286 vabits2 = get_vabits2(a);
3287 if (VA_BITS2_NOACCESS == vabits2) {
njn25e49d8e72002-09-23 09:36:25 +00003288 if (bad_addr != NULL) *bad_addr = a;
3289 return False;
3290 }
3291 a++;
3292 }
3293 return True;
3294}
3295
sewardj7cf4e6b2008-05-01 20:24:26 +00003296static MC_ReadResult is_mem_defined ( Addr a, SizeT len,
3297 /*OUT*/Addr* bad_addr,
3298 /*OUT*/UInt* otag )
njn25e49d8e72002-09-23 09:36:25 +00003299{
nethercote451eae92004-11-02 13:06:32 +00003300 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00003301 UWord vabits2;
njn25e49d8e72002-09-23 09:36:25 +00003302
njndbf7ca72006-03-31 11:57:59 +00003303 PROF_EVENT(64, "is_mem_defined");
3304 DEBUG("is_mem_defined\n");
sewardj7cf4e6b2008-05-01 20:24:26 +00003305
3306 if (otag) *otag = 0;
3307 if (bad_addr) *bad_addr = 0;
njn25e49d8e72002-09-23 09:36:25 +00003308 for (i = 0; i < len; i++) {
njndbf7ca72006-03-31 11:57:59 +00003309 PROF_EVENT(65, "is_mem_defined(loop)");
njn1d0825f2006-03-27 11:37:07 +00003310 vabits2 = get_vabits2(a);
njndbf7ca72006-03-31 11:57:59 +00003311 if (VA_BITS2_DEFINED != vabits2) {
njn1d0825f2006-03-27 11:37:07 +00003312 // Error! Nb: Report addressability errors in preference to
3313 // definedness errors. And don't report definedeness errors unless
3314 // --undef-value-errors=yes.
sewardj7cf4e6b2008-05-01 20:24:26 +00003315 if (bad_addr) {
3316 *bad_addr = a;
3317 }
3318 if (VA_BITS2_NOACCESS == vabits2) {
3319 return MC_AddrErr;
3320 }
3321 if (MC_(clo_mc_level) >= 2) {
3322 if (otag && MC_(clo_mc_level) == 3) {
3323 *otag = MC_(helperc_b_load1)( a );
3324 }
3325 return MC_ValueErr;
3326 }
njn25e49d8e72002-09-23 09:36:25 +00003327 }
3328 a++;
3329 }
nethercote8b76fe52004-11-08 19:20:09 +00003330 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +00003331}
3332
3333
3334/* Check a zero-terminated ascii string. Tricky -- don't want to
3335 examine the actual bytes, to find the end, until we're sure it is
3336 safe to do so. */
3337
sewardj7cf4e6b2008-05-01 20:24:26 +00003338static Bool mc_is_defined_asciiz ( Addr a, Addr* bad_addr, UInt* otag )
njn25e49d8e72002-09-23 09:36:25 +00003339{
njn1d0825f2006-03-27 11:37:07 +00003340 UWord vabits2;
3341
njndbf7ca72006-03-31 11:57:59 +00003342 PROF_EVENT(66, "mc_is_defined_asciiz");
3343 DEBUG("mc_is_defined_asciiz\n");
sewardj7cf4e6b2008-05-01 20:24:26 +00003344
3345 if (otag) *otag = 0;
3346 if (bad_addr) *bad_addr = 0;
njn25e49d8e72002-09-23 09:36:25 +00003347 while (True) {
njndbf7ca72006-03-31 11:57:59 +00003348 PROF_EVENT(67, "mc_is_defined_asciiz(loop)");
njn1d0825f2006-03-27 11:37:07 +00003349 vabits2 = get_vabits2(a);
njndbf7ca72006-03-31 11:57:59 +00003350 if (VA_BITS2_DEFINED != vabits2) {
njn1d0825f2006-03-27 11:37:07 +00003351 // Error! Nb: Report addressability errors in preference to
3352 // definedness errors. And don't report definedeness errors unless
3353 // --undef-value-errors=yes.
sewardj7cf4e6b2008-05-01 20:24:26 +00003354 if (bad_addr) {
3355 *bad_addr = a;
3356 }
3357 if (VA_BITS2_NOACCESS == vabits2) {
3358 return MC_AddrErr;
3359 }
3360 if (MC_(clo_mc_level) >= 2) {
3361 if (otag && MC_(clo_mc_level) == 3) {
3362 *otag = MC_(helperc_b_load1)( a );
3363 }
3364 return MC_ValueErr;
3365 }
njn25e49d8e72002-09-23 09:36:25 +00003366 }
3367 /* Ok, a is safe to read. */
njn1d0825f2006-03-27 11:37:07 +00003368 if (* ((UChar*)a) == 0) {
sewardj45d94cc2005-04-20 14:44:11 +00003369 return MC_Ok;
njn1d0825f2006-03-27 11:37:07 +00003370 }
njn25e49d8e72002-09-23 09:36:25 +00003371 a++;
3372 }
3373}
3374
3375
3376/*------------------------------------------------------------*/
3377/*--- Memory event handlers ---*/
3378/*------------------------------------------------------------*/
3379
njn25e49d8e72002-09-23 09:36:25 +00003380static
njndbf7ca72006-03-31 11:57:59 +00003381void check_mem_is_addressable ( CorePart part, ThreadId tid, Char* s,
3382 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00003383{
njn25e49d8e72002-09-23 09:36:25 +00003384 Addr bad_addr;
njndbf7ca72006-03-31 11:57:59 +00003385 Bool ok = is_mem_addressable ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00003386
njn25e49d8e72002-09-23 09:36:25 +00003387 if (!ok) {
3388 switch (part) {
3389 case Vg_CoreSysCall:
sewardj7cf4e6b2008-05-01 20:24:26 +00003390 mc_record_memparam_error ( tid, bad_addr,
3391 /*isAddrErr*/True, s, 0/*otag*/ );
njn25e49d8e72002-09-23 09:36:25 +00003392 break;
3393
njn25e49d8e72002-09-23 09:36:25 +00003394 case Vg_CoreSignal:
njn718d3b12006-12-16 00:54:12 +00003395 mc_record_core_mem_error( tid, /*isAddrErr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00003396 break;
3397
3398 default:
njndbf7ca72006-03-31 11:57:59 +00003399 VG_(tool_panic)("check_mem_is_addressable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00003400 }
3401 }
njn25e49d8e72002-09-23 09:36:25 +00003402}
3403
3404static
njndbf7ca72006-03-31 11:57:59 +00003405void check_mem_is_defined ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +00003406 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00003407{
sewardj7cf4e6b2008-05-01 20:24:26 +00003408 UInt otag = 0;
njn25e49d8e72002-09-23 09:36:25 +00003409 Addr bad_addr;
sewardj7cf4e6b2008-05-01 20:24:26 +00003410 MC_ReadResult res = is_mem_defined ( base, size, &bad_addr, &otag );
sewardj45f4e7c2005-09-27 19:20:21 +00003411
nethercote8b76fe52004-11-08 19:20:09 +00003412 if (MC_Ok != res) {
njn718d3b12006-12-16 00:54:12 +00003413 Bool isAddrErr = ( MC_AddrErr == res ? True : False );
sewardj45f4e7c2005-09-27 19:20:21 +00003414
njn25e49d8e72002-09-23 09:36:25 +00003415 switch (part) {
3416 case Vg_CoreSysCall:
sewardj7cf4e6b2008-05-01 20:24:26 +00003417 mc_record_memparam_error ( tid, bad_addr, isAddrErr, s,
3418 isAddrErr ? 0 : otag );
njn25e49d8e72002-09-23 09:36:25 +00003419 break;
3420
njn25e49d8e72002-09-23 09:36:25 +00003421 /* If we're being asked to jump to a silly address, record an error
3422 message before potentially crashing the entire system. */
3423 case Vg_CoreTranslate:
njn1d0825f2006-03-27 11:37:07 +00003424 mc_record_jump_error( tid, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00003425 break;
3426
3427 default:
njndbf7ca72006-03-31 11:57:59 +00003428 VG_(tool_panic)("check_mem_is_defined: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00003429 }
3430 }
njn25e49d8e72002-09-23 09:36:25 +00003431}
3432
3433static
njndbf7ca72006-03-31 11:57:59 +00003434void check_mem_is_defined_asciiz ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +00003435 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +00003436{
nethercote8b76fe52004-11-08 19:20:09 +00003437 MC_ReadResult res;
njn5ab96ac2005-05-08 02:59:50 +00003438 Addr bad_addr = 0; // shut GCC up
sewardj7cf4e6b2008-05-01 20:24:26 +00003439 UInt otag = 0;
njn25e49d8e72002-09-23 09:36:25 +00003440
njnca82cc02004-11-22 17:18:48 +00003441 tl_assert(part == Vg_CoreSysCall);
sewardj7cf4e6b2008-05-01 20:24:26 +00003442 res = mc_is_defined_asciiz ( (Addr)str, &bad_addr, &otag );
nethercote8b76fe52004-11-08 19:20:09 +00003443 if (MC_Ok != res) {
njn718d3b12006-12-16 00:54:12 +00003444 Bool isAddrErr = ( MC_AddrErr == res ? True : False );
sewardj7cf4e6b2008-05-01 20:24:26 +00003445 mc_record_memparam_error ( tid, bad_addr, isAddrErr, s,
3446 isAddrErr ? 0 : otag );
njn25e49d8e72002-09-23 09:36:25 +00003447 }
njn25e49d8e72002-09-23 09:36:25 +00003448}
3449
njn25e49d8e72002-09-23 09:36:25 +00003450static
nethercote451eae92004-11-02 13:06:32 +00003451void mc_new_mem_startup( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00003452{
njndbf7ca72006-03-31 11:57:59 +00003453 /* Ignore the permissions, just make it defined. Seems to work... */
njnba7b4582006-09-21 15:59:30 +00003454 // Because code is defined, initialised variables get put in the data
3455 // segment and are defined, and uninitialised variables get put in the
3456 // bss segment and are auto-zeroed (and so defined).
3457 //
3458 // It's possible that there will be padding between global variables.
3459 // This will also be auto-zeroed, and marked as defined by Memcheck. If
3460 // a program uses it, Memcheck will not complain. This is arguably a
3461 // false negative, but it's a grey area -- the behaviour is defined (the
3462 // padding is zeroed) but it's probably not what the user intended. And
3463 // we can't avoid it.
nethercote451eae92004-11-02 13:06:32 +00003464 DEBUG("mc_new_mem_startup(%p, %llu, rr=%u, ww=%u, xx=%u)\n",
njndbf7ca72006-03-31 11:57:59 +00003465 a, (ULong)len, rr, ww, xx);
3466 MC_(make_mem_defined)(a, len);
njn25e49d8e72002-09-23 09:36:25 +00003467}
3468
3469static
njnb8dca862005-03-14 02:42:44 +00003470void mc_new_mem_mmap ( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00003471{
njndbf7ca72006-03-31 11:57:59 +00003472 MC_(make_mem_defined)(a, len);
njn25e49d8e72002-09-23 09:36:25 +00003473}
3474
njncf45fd42004-11-24 16:30:22 +00003475static
3476void mc_post_mem_write(CorePart part, ThreadId tid, Addr a, SizeT len)
3477{
njndbf7ca72006-03-31 11:57:59 +00003478 MC_(make_mem_defined)(a, len);
njncf45fd42004-11-24 16:30:22 +00003479}
njn25e49d8e72002-09-23 09:36:25 +00003480
sewardj45d94cc2005-04-20 14:44:11 +00003481
njn25e49d8e72002-09-23 09:36:25 +00003482/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00003483/*--- Register event handlers ---*/
3484/*------------------------------------------------------------*/
3485
sewardj7cf4e6b2008-05-01 20:24:26 +00003486/* Try and get a nonzero origin for the guest state section of thread
3487 tid characterised by (offset,size). Return 0 if nothing to show
3488 for it. */
3489static UInt mb_get_origin_for_guest_offset ( ThreadId tid,
3490 Int offset, SizeT size )
3491{
3492 Int sh2off;
3493 UChar area[6];
3494 UInt otag;
3495 sh2off = MC_(get_otrack_shadow_offset)( offset, size );
3496 if (sh2off == -1)
3497 return 0; /* This piece of guest state is not tracked */
3498 tl_assert(sh2off >= 0);
3499 tl_assert(0 == (sh2off % 4));
3500 area[0] = 0x31;
3501 area[5] = 0x27;
3502 VG_(get_shadow_regs_area)( tid, &area[1], 2/*shadowno*/,sh2off,4 );
3503 tl_assert(area[0] == 0x31);
3504 tl_assert(area[5] == 0x27);
3505 otag = *(UInt*)&area[1];
3506 return otag;
3507}
3508
3509
sewardj45d94cc2005-04-20 14:44:11 +00003510/* When some chunk of guest state is written, mark the corresponding
3511 shadow area as valid. This is used to initialise arbitrarily large
sewardj62eae5f2006-01-17 01:58:24 +00003512 chunks of guest state, hence the _SIZE value, which has to be as
3513 big as the biggest guest state.
sewardj45d94cc2005-04-20 14:44:11 +00003514*/
3515static void mc_post_reg_write ( CorePart part, ThreadId tid,
3516 OffT offset, SizeT size)
njnd3040452003-05-19 15:04:06 +00003517{
sewardj05a46732006-10-17 01:28:10 +00003518# define MAX_REG_WRITE_SIZE 1408
cerion21082042005-12-06 19:07:08 +00003519 UChar area[MAX_REG_WRITE_SIZE];
3520 tl_assert(size <= MAX_REG_WRITE_SIZE);
njn1d0825f2006-03-27 11:37:07 +00003521 VG_(memset)(area, V_BITS8_DEFINED, size);
sewardj7cf4e6b2008-05-01 20:24:26 +00003522 VG_(set_shadow_regs_area)( tid, 1/*shadowNo*/,offset,size, area );
cerion21082042005-12-06 19:07:08 +00003523# undef MAX_REG_WRITE_SIZE
njnd3040452003-05-19 15:04:06 +00003524}
3525
sewardj45d94cc2005-04-20 14:44:11 +00003526static
3527void mc_post_reg_write_clientcall ( ThreadId tid,
3528 OffT offset, SizeT size,
3529 Addr f)
njnd3040452003-05-19 15:04:06 +00003530{
njncf45fd42004-11-24 16:30:22 +00003531 mc_post_reg_write(/*dummy*/0, tid, offset, size);
njnd3040452003-05-19 15:04:06 +00003532}
3533
sewardj45d94cc2005-04-20 14:44:11 +00003534/* Look at the definedness of the guest's shadow state for
3535 [offset, offset+len). If any part of that is undefined, record
3536 a parameter error.
3537*/
3538static void mc_pre_reg_read ( CorePart part, ThreadId tid, Char* s,
3539 OffT offset, SizeT size)
nethercote8b76fe52004-11-08 19:20:09 +00003540{
sewardj45d94cc2005-04-20 14:44:11 +00003541 Int i;
3542 Bool bad;
sewardj7cf4e6b2008-05-01 20:24:26 +00003543 UInt otag;
sewardj45d94cc2005-04-20 14:44:11 +00003544
3545 UChar area[16];
3546 tl_assert(size <= 16);
3547
sewardj7cf4e6b2008-05-01 20:24:26 +00003548 VG_(get_shadow_regs_area)( tid, area, 1/*shadowNo*/,offset,size );
sewardj45d94cc2005-04-20 14:44:11 +00003549
3550 bad = False;
3551 for (i = 0; i < size; i++) {
njn1d0825f2006-03-27 11:37:07 +00003552 if (area[i] != V_BITS8_DEFINED) {
sewardj2c27f702005-05-03 18:19:05 +00003553 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00003554 break;
3555 }
nethercote8b76fe52004-11-08 19:20:09 +00003556 }
3557
sewardj7cf4e6b2008-05-01 20:24:26 +00003558 if (!bad)
3559 return;
3560
3561 /* We've found some undefinedness. See if we can also find an
3562 origin for it. */
3563 otag = mb_get_origin_for_guest_offset( tid, offset, size );
3564 mc_record_regparam_error ( tid, s, otag );
nethercote8b76fe52004-11-08 19:20:09 +00003565}
njnd3040452003-05-19 15:04:06 +00003566
njn25e49d8e72002-09-23 09:36:25 +00003567
sewardj6cf40ff2005-04-20 22:31:26 +00003568/*------------------------------------------------------------*/
njn718d3b12006-12-16 00:54:12 +00003569/*--- Error types ---*/
njn1d0825f2006-03-27 11:37:07 +00003570/*------------------------------------------------------------*/
3571
sewardj7cf4e6b2008-05-01 20:24:26 +00003572/* Did we show to the user, any errors for which an uninitialised
3573 value origin could have been collected (but wasn't) ? If yes,
3574 then, at the end of the run, print a 1 line message advising that a
3575 rerun with --track-origins=yes might help. */
3576static Bool any_value_errors = False;
3577
3578
njn718d3b12006-12-16 00:54:12 +00003579// Different kinds of blocks.
3580typedef enum {
3581 Block_Mallocd = 111,
3582 Block_Freed,
3583 Block_Mempool,
3584 Block_MempoolChunk,
3585 Block_UserG
3586} BlockKind;
3587
3588/* ------------------ Addresses -------------------- */
3589
njn1d0825f2006-03-27 11:37:07 +00003590/* The classification of a faulting address. */
3591typedef
3592 enum {
sewardjb8b79ad2008-03-03 01:35:41 +00003593 Addr_Undescribed, // as-yet unclassified
3594 Addr_Unknown, // classification yielded nothing useful
3595 Addr_Block, // in malloc'd/free'd block
3596 Addr_Stack, // on a thread's stack
3597 Addr_DataSym, // in a global data sym
3598 Addr_Variable, // variable described by the debug info
3599 Addr_SectKind // last-ditch classification attempt
njn1d0825f2006-03-27 11:37:07 +00003600 }
njn718d3b12006-12-16 00:54:12 +00003601 AddrTag;
njn1d0825f2006-03-27 11:37:07 +00003602
njn1d0825f2006-03-27 11:37:07 +00003603typedef
njn718d3b12006-12-16 00:54:12 +00003604 struct _AddrInfo
njn1d0825f2006-03-27 11:37:07 +00003605 AddrInfo;
3606
njn718d3b12006-12-16 00:54:12 +00003607struct _AddrInfo {
3608 AddrTag tag;
3609 union {
3610 // As-yet unclassified.
3611 struct { } Undescribed;
njn1d0825f2006-03-27 11:37:07 +00003612
njn718d3b12006-12-16 00:54:12 +00003613 // On a stack.
3614 struct {
3615 ThreadId tid; // Which thread's stack?
3616 } Stack;
njn1d0825f2006-03-27 11:37:07 +00003617
njn718d3b12006-12-16 00:54:12 +00003618 // This covers heap blocks (normal and from mempools) and user-defined
3619 // blocks.
3620 struct {
3621 BlockKind block_kind;
3622 Char* block_desc; // "block", "mempool" or user-defined
3623 SizeT block_szB;
3624 OffT rwoffset;
3625 ExeContext* lastchange;
3626 } Block;
njn1d0825f2006-03-27 11:37:07 +00003627
sewardjb8b79ad2008-03-03 01:35:41 +00003628 // In a global .data symbol. This holds the first 63 chars of
3629 // the variable's (zero terminated), plus an offset.
3630 struct {
3631 Char name[128];
3632 OffT offset;
3633 } DataSym;
3634
3635 // Is described by Dwarf debug info. Arbitrary strings. Must
3636 // be the same length.
3637 struct {
3638 Char descr1[96];
3639 Char descr2[96];
3640 } Variable;
3641
3642 // Could only narrow it down to be the PLT/GOT/etc of a given
3643 // object. Better than nothing, perhaps.
3644 struct {
3645 Char objname[128];
3646 VgSectKind kind;
3647 } SectKind;
3648
njn718d3b12006-12-16 00:54:12 +00003649 // Classification yielded nothing useful.
3650 struct { } Unknown;
3651
3652 } Addr;
3653};
3654
3655/* ------------------ Errors ----------------------- */
njn1d0825f2006-03-27 11:37:07 +00003656
3657/* What kind of error it is. */
3658typedef
njn718d3b12006-12-16 00:54:12 +00003659 enum {
3660 Err_Value,
3661 Err_Cond,
3662 Err_CoreMem,
3663 Err_Addr,
3664 Err_Jump,
3665 Err_RegParam,
3666 Err_MemParam,
3667 Err_User,
3668 Err_Free,
3669 Err_FreeMismatch,
3670 Err_Overlap,
3671 Err_Leak,
3672 Err_IllegalMempool,
njn1d0825f2006-03-27 11:37:07 +00003673 }
njn718d3b12006-12-16 00:54:12 +00003674 MC_ErrorTag;
njn1d0825f2006-03-27 11:37:07 +00003675
njn1d0825f2006-03-27 11:37:07 +00003676
njn718d3b12006-12-16 00:54:12 +00003677typedef struct _MC_Error MC_Error;
3678
3679struct _MC_Error {
3680 // Nb: we don't need the tag here, as it's stored in the Error type! Yuk.
3681 //MC_ErrorTag tag;
3682
3683 union {
3684 // Use of an undefined value:
3685 // - as a pointer in a load or store
3686 // - as a jump target
3687 struct {
sewardj7cf4e6b2008-05-01 20:24:26 +00003688 SizeT szB; // size of value in bytes
3689 // Origin info
3690 UInt otag; // origin tag
3691 ExeContext* origin_ec; // filled in later
njn718d3b12006-12-16 00:54:12 +00003692 } Value;
3693
3694 // Use of an undefined value in a conditional branch or move.
3695 struct {
sewardj7cf4e6b2008-05-01 20:24:26 +00003696 // Origin info
3697 UInt otag; // origin tag
3698 ExeContext* origin_ec; // filled in later
njn718d3b12006-12-16 00:54:12 +00003699 } Cond;
3700
3701 // Addressability error in core (signal-handling) operation.
3702 // It would be good to get rid of this error kind, merge it with
3703 // another one somehow.
3704 struct {
3705 } CoreMem;
3706
3707 // Use of an unaddressable memory location in a load or store.
3708 struct {
3709 Bool isWrite; // read or write?
3710 SizeT szB; // not used for exec (jump) errors
3711 Bool maybe_gcc; // True if just below %esp -- could be a gcc bug
3712 AddrInfo ai;
3713 } Addr;
3714
3715 // Jump to an unaddressable memory location.
3716 struct {
3717 AddrInfo ai;
3718 } Jump;
3719
3720 // System call register input contains undefined bytes.
3721 struct {
sewardj7cf4e6b2008-05-01 20:24:26 +00003722 // Origin info
3723 UInt otag; // origin tag
3724 ExeContext* origin_ec; // filled in later
njn718d3b12006-12-16 00:54:12 +00003725 } RegParam;
3726
3727 // System call memory input contains undefined/unaddressable bytes
3728 struct {
3729 Bool isAddrErr; // Addressability or definedness error?
3730 AddrInfo ai;
sewardj7cf4e6b2008-05-01 20:24:26 +00003731 // Origin info
3732 UInt otag; // origin tag
3733 ExeContext* origin_ec; // filled in later
njn718d3b12006-12-16 00:54:12 +00003734 } MemParam;
3735
3736 // Problem found from a client request like CHECK_MEM_IS_ADDRESSABLE.
3737 struct {
3738 Bool isAddrErr; // Addressability or definedness error?
3739 AddrInfo ai;
sewardj7cf4e6b2008-05-01 20:24:26 +00003740 // Origin info
3741 UInt otag; // origin tag
3742 ExeContext* origin_ec; // filled in later
njn718d3b12006-12-16 00:54:12 +00003743 } User;
3744
3745 // Program tried to free() something that's not a heap block (this
3746 // covers double-frees). */
3747 struct {
3748 AddrInfo ai;
3749 } Free;
3750
3751 // Program allocates heap block with one function
3752 // (malloc/new/new[]/custom) and deallocates with not the matching one.
3753 struct {
3754 AddrInfo ai;
3755 } FreeMismatch;
3756
3757 // Call to strcpy, memcpy, etc, with overlapping blocks.
3758 struct {
3759 Addr src; // Source block
3760 Addr dst; // Destination block
3761 Int szB; // Size in bytes; 0 if unused.
3762 } Overlap;
3763
3764 // A memory leak.
3765 struct {
3766 UInt n_this_record;
3767 UInt n_total_records;
3768 LossRecord* lossRecord;
3769 } Leak;
3770
3771 // A memory pool error.
3772 struct {
3773 AddrInfo ai;
3774 } IllegalMempool;
3775
3776 } Err;
3777};
3778
njn1d0825f2006-03-27 11:37:07 +00003779
3780/*------------------------------------------------------------*/
njn9e63cb62005-05-08 18:34:59 +00003781/*--- Printing errors ---*/
3782/*------------------------------------------------------------*/
3783
njn718d3b12006-12-16 00:54:12 +00003784static void mc_pp_AddrInfo ( Addr a, AddrInfo* ai, Bool maybe_gcc )
njn1d0825f2006-03-27 11:37:07 +00003785{
3786 HChar* xpre = VG_(clo_xml) ? " <auxwhat>" : " ";
3787 HChar* xpost = VG_(clo_xml) ? "</auxwhat>" : "";
3788
njn718d3b12006-12-16 00:54:12 +00003789 switch (ai->tag) {
3790 case Addr_Unknown:
3791 if (maybe_gcc) {
njn1d0825f2006-03-27 11:37:07 +00003792 VG_(message)(Vg_UserMsg,
3793 "%sAddress 0x%llx is just below the stack ptr. "
3794 "To suppress, use: --workaround-gcc296-bugs=yes%s",
3795 xpre, (ULong)a, xpost
3796 );
3797 } else {
3798 VG_(message)(Vg_UserMsg,
3799 "%sAddress 0x%llx "
3800 "is not stack'd, malloc'd or (recently) free'd%s",
3801 xpre, (ULong)a, xpost);
3802 }
3803 break;
njn718d3b12006-12-16 00:54:12 +00003804
3805 case Addr_Stack:
3806 VG_(message)(Vg_UserMsg,
3807 "%sAddress 0x%llx is on thread %d's stack%s",
3808 xpre, (ULong)a, ai->Addr.Stack.tid, xpost);
3809 break;
3810
3811 case Addr_Block: {
3812 SizeT block_szB = ai->Addr.Block.block_szB;
3813 OffT rwoffset = ai->Addr.Block.rwoffset;
njn1d0825f2006-03-27 11:37:07 +00003814 SizeT delta;
3815 const Char* relative;
njn1d0825f2006-03-27 11:37:07 +00003816
njn718d3b12006-12-16 00:54:12 +00003817 if (rwoffset < 0) {
3818 delta = (SizeT)(-rwoffset);
njn1d0825f2006-03-27 11:37:07 +00003819 relative = "before";
njn718d3b12006-12-16 00:54:12 +00003820 } else if (rwoffset >= block_szB) {
3821 delta = rwoffset - block_szB;
njn1d0825f2006-03-27 11:37:07 +00003822 relative = "after";
3823 } else {
njn718d3b12006-12-16 00:54:12 +00003824 delta = rwoffset;
njn1d0825f2006-03-27 11:37:07 +00003825 relative = "inside";
3826 }
3827 VG_(message)(Vg_UserMsg,
3828 "%sAddress 0x%lx is %,lu bytes %s a %s of size %,lu %s%s",
3829 xpre,
njn718d3b12006-12-16 00:54:12 +00003830 a, delta, relative, ai->Addr.Block.block_desc,
3831 block_szB,
3832 ai->Addr.Block.block_kind==Block_Mallocd ? "alloc'd"
3833 : ai->Addr.Block.block_kind==Block_Freed ? "free'd"
3834 : "client-defined",
njn1d0825f2006-03-27 11:37:07 +00003835 xpost);
njn718d3b12006-12-16 00:54:12 +00003836 VG_(pp_ExeContext)(ai->Addr.Block.lastchange);
njn1d0825f2006-03-27 11:37:07 +00003837 break;
3838 }
njn718d3b12006-12-16 00:54:12 +00003839
sewardjb8b79ad2008-03-03 01:35:41 +00003840 case Addr_DataSym:
3841 VG_(message)(Vg_UserMsg,
3842 "%sAddress 0x%llx is %llu bytes "
3843 "inside data symbol \"%t\"%s",
3844 xpre,
3845 (ULong)a,
3846 (ULong)ai->Addr.DataSym.offset,
3847 ai->Addr.DataSym.name,
3848 xpost);
3849 break;
3850
3851 case Addr_Variable:
3852 if (ai->Addr.Variable.descr1[0] != '\0')
3853 VG_(message)(Vg_UserMsg, "%s%s%s",
3854 xpre, ai->Addr.Variable.descr1, xpost);
3855 if (ai->Addr.Variable.descr2[0] != '\0')
3856 VG_(message)(Vg_UserMsg, "%s%s%s",
3857 xpre, ai->Addr.Variable.descr2, xpost);
3858 break;
3859
3860 case Addr_SectKind:
3861 VG_(message)(Vg_UserMsg,
3862 "%sAddress 0x%llx is in the %t segment of %t%s",
3863 xpre,
3864 (ULong)a,
3865 VG_(pp_SectKind)(ai->Addr.SectKind.kind),
3866 ai->Addr.SectKind.objname,
3867 xpost);
3868 break;
3869
njn1d0825f2006-03-27 11:37:07 +00003870 default:
3871 VG_(tool_panic)("mc_pp_AddrInfo");
3872 }
3873}
3874
njn718d3b12006-12-16 00:54:12 +00003875static const HChar* str_leak_lossmode ( Reachedness lossmode )
njn9e63cb62005-05-08 18:34:59 +00003876{
njn718d3b12006-12-16 00:54:12 +00003877 const HChar *loss = "?";
3878 switch (lossmode) {
3879 case Unreached: loss = "definitely lost"; break;
3880 case IndirectLeak: loss = "indirectly lost"; break;
3881 case Interior: loss = "possibly lost"; break;
3882 case Proper: loss = "still reachable"; break;
3883 }
3884 return loss;
3885}
njn9e63cb62005-05-08 18:34:59 +00003886
njn718d3b12006-12-16 00:54:12 +00003887static const HChar* xml_leak_kind ( Reachedness lossmode )
3888{
3889 const HChar *loss = "?";
3890 switch (lossmode) {
3891 case Unreached: loss = "Leak_DefinitelyLost"; break;
3892 case IndirectLeak: loss = "Leak_IndirectlyLost"; break;
3893 case Interior: loss = "Leak_PossiblyLost"; break;
3894 case Proper: loss = "Leak_StillReachable"; break;
3895 }
3896 return loss;
3897}
3898
3899static void mc_pp_msg( Char* xml_name, Error* err, const HChar* format, ... )
3900{
sewardj71bc3cb2005-05-19 00:25:45 +00003901 HChar* xpre = VG_(clo_xml) ? " <what>" : "";
3902 HChar* xpost = VG_(clo_xml) ? "</what>" : "";
njn718d3b12006-12-16 00:54:12 +00003903 Char buf[256];
3904 va_list vargs;
3905
3906 if (VG_(clo_xml))
3907 VG_(message)(Vg_UserMsg, " <kind>%s</kind>", xml_name);
3908 // Stick xpre and xpost on the front and back of the format string.
3909 VG_(snprintf)(buf, 256, "%s%s%s", xpre, format, xpost);
3910 va_start(vargs, format);
3911 VG_(vmessage) ( Vg_UserMsg, buf, vargs );
3912 va_end(vargs);
3913 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
3914}
3915
sewardj7cf4e6b2008-05-01 20:24:26 +00003916static void mc_pp_origin ( ExeContext* ec, UInt okind )
3917{
3918 HChar* src = NULL;
3919 HChar* xpre = VG_(clo_xml) ? " <what>" : " ";
3920 HChar* xpost = VG_(clo_xml) ? "</what>" : "";
3921 tl_assert(ec);
3922
3923 switch (okind) {
3924 case MC_OKIND_STACK: src = " by a stack allocation"; break;
3925 case MC_OKIND_HEAP: src = " by a heap allocation"; break;
3926 case MC_OKIND_USER: src = " by a client request"; break;
3927 case MC_OKIND_UNKNOWN: src = ""; break;
3928 }
3929 tl_assert(src); /* guards against invalid 'okind' */
3930
3931 if (VG_(clo_xml)) {
3932 VG_(message)(Vg_UserMsg, " <origin>");
3933 }
3934
3935 VG_(message)(Vg_UserMsg, "%sUninitialised value was created%s%s",
3936 xpre, src, xpost);
3937 VG_(pp_ExeContext)( ec );
3938 if (VG_(clo_xml)) {
3939 VG_(message)(Vg_UserMsg, " </origin>");
3940 }
3941}
3942
njn718d3b12006-12-16 00:54:12 +00003943static void mc_pp_Error ( Error* err )
3944{
3945 MC_Error* extra = VG_(get_error_extra)(err);
sewardj71bc3cb2005-05-19 00:25:45 +00003946
njn9e63cb62005-05-08 18:34:59 +00003947 switch (VG_(get_error_kind)(err)) {
njn718d3b12006-12-16 00:54:12 +00003948 case Err_CoreMem: {
3949 /* What the hell *is* a CoreMemError? jrs 2005-May-18 */
3950 /* As of 2006-Dec-14, it's caused by unaddressable bytes in a
3951 signal handler frame. --njn */
3952 mc_pp_msg("CoreMemError", err,
3953 "%s contains unaddressable byte(s)",
3954 VG_(get_error_string)(err));
njn9e63cb62005-05-08 18:34:59 +00003955 break;
njn9e63cb62005-05-08 18:34:59 +00003956 }
3957
njn718d3b12006-12-16 00:54:12 +00003958 case Err_Value:
sewardj7cf4e6b2008-05-01 20:24:26 +00003959 any_value_errors = True;
3960 if (1 || extra->Err.Value.otag == 0) {
3961 mc_pp_msg("UninitValue", err,
3962 "Use of uninitialised value of size %d",
3963 extra->Err.Value.szB);
3964 } else {
3965 mc_pp_msg("UninitValue", err,
3966 "Use of uninitialised value of size %d (otag %u)",
3967 extra->Err.Value.szB, extra->Err.Value.otag);
3968 }
3969 if (extra->Err.Value.origin_ec)
3970 mc_pp_origin( extra->Err.Value.origin_ec,
3971 extra->Err.Value.otag & 3 );
njn718d3b12006-12-16 00:54:12 +00003972 break;
3973
3974 case Err_Cond:
sewardj7cf4e6b2008-05-01 20:24:26 +00003975 any_value_errors = True;
3976 if (1 || extra->Err.Cond.otag == 0) {
3977 mc_pp_msg("UninitCondition", err,
3978 "Conditional jump or move depends"
3979 " on uninitialised value(s)");
3980 } else {
3981 mc_pp_msg("UninitCondition", err,
3982 "Conditional jump or move depends"
3983 " on uninitialised value(s) (otag %u)",
3984 extra->Err.Cond.otag);
3985 }
3986 if (extra->Err.Cond.origin_ec)
3987 mc_pp_origin( extra->Err.Cond.origin_ec,
3988 extra->Err.Cond.otag & 3 );
njn718d3b12006-12-16 00:54:12 +00003989 break;
3990
3991 case Err_RegParam:
sewardj7cf4e6b2008-05-01 20:24:26 +00003992 any_value_errors = True;
njn718d3b12006-12-16 00:54:12 +00003993 mc_pp_msg("SyscallParam", err,
3994 "Syscall param %s contains uninitialised byte(s)",
3995 VG_(get_error_string)(err));
sewardj7cf4e6b2008-05-01 20:24:26 +00003996 if (extra->Err.RegParam.origin_ec)
3997 mc_pp_origin( extra->Err.RegParam.origin_ec,
3998 extra->Err.RegParam.otag & 3 );
njn718d3b12006-12-16 00:54:12 +00003999 break;
4000
4001 case Err_MemParam:
sewardj7cf4e6b2008-05-01 20:24:26 +00004002 if (!extra->Err.MemParam.isAddrErr)
4003 any_value_errors = True;
njn718d3b12006-12-16 00:54:12 +00004004 mc_pp_msg("SyscallParam", err,
4005 "Syscall param %s points to %s byte(s)",
4006 VG_(get_error_string)(err),
4007 ( extra->Err.MemParam.isAddrErr
4008 ? "unaddressable" : "uninitialised" ));
4009 mc_pp_AddrInfo(VG_(get_error_address)(err),
4010 &extra->Err.MemParam.ai, False);
sewardj7cf4e6b2008-05-01 20:24:26 +00004011 if (extra->Err.MemParam.origin_ec && !extra->Err.MemParam.isAddrErr)
4012 mc_pp_origin( extra->Err.MemParam.origin_ec,
4013 extra->Err.MemParam.otag & 3 );
njn718d3b12006-12-16 00:54:12 +00004014 break;
4015
4016 case Err_User:
sewardj7cf4e6b2008-05-01 20:24:26 +00004017 if (!extra->Err.User.isAddrErr)
4018 any_value_errors = True;
njn718d3b12006-12-16 00:54:12 +00004019 mc_pp_msg("ClientCheck", err,
4020 "%s byte(s) found during client check request",
4021 ( extra->Err.User.isAddrErr
4022 ? "Unaddressable" : "Uninitialised" ));
4023 mc_pp_AddrInfo(VG_(get_error_address)(err), &extra->Err.User.ai,
4024 False);
sewardj7cf4e6b2008-05-01 20:24:26 +00004025 if (extra->Err.User.origin_ec && !extra->Err.User.isAddrErr)
4026 mc_pp_origin( extra->Err.User.origin_ec,
4027 extra->Err.User.otag & 3 );
njn718d3b12006-12-16 00:54:12 +00004028 break;
4029
4030 case Err_Free:
4031 mc_pp_msg("InvalidFree", err,
4032 "Invalid free() / delete / delete[]");
4033 mc_pp_AddrInfo(VG_(get_error_address)(err),
4034 &extra->Err.Free.ai, False);
4035 break;
4036
4037 case Err_FreeMismatch:
4038 mc_pp_msg("MismatchedFree", err,
4039 "Mismatched free() / delete / delete []");
4040 mc_pp_AddrInfo(VG_(get_error_address)(err),
4041 &extra->Err.FreeMismatch.ai, False);
4042 break;
4043
4044 case Err_Addr:
4045 if (extra->Err.Addr.isWrite) {
4046 mc_pp_msg("InvalidWrite", err,
4047 "Invalid write of size %d",
4048 extra->Err.Addr.szB);
njn9e63cb62005-05-08 18:34:59 +00004049 } else {
njn718d3b12006-12-16 00:54:12 +00004050 mc_pp_msg("InvalidRead", err,
4051 "Invalid read of size %d",
4052 extra->Err.Addr.szB);
njn9e63cb62005-05-08 18:34:59 +00004053 }
njn718d3b12006-12-16 00:54:12 +00004054 mc_pp_AddrInfo(VG_(get_error_address)(err), &extra->Err.Addr.ai,
4055 extra->Err.Addr.maybe_gcc);
njn9e63cb62005-05-08 18:34:59 +00004056 break;
4057
njn718d3b12006-12-16 00:54:12 +00004058 case Err_Jump:
4059 mc_pp_msg("InvalidJump", err,
4060 "Jump to the invalid address stated on the next line");
4061 mc_pp_AddrInfo(VG_(get_error_address)(err), &extra->Err.Jump.ai,
4062 False);
njn9e63cb62005-05-08 18:34:59 +00004063 break;
njn1d0825f2006-03-27 11:37:07 +00004064
njn718d3b12006-12-16 00:54:12 +00004065 case Err_Overlap:
4066 if (extra->Err.Overlap.szB == 0)
4067 mc_pp_msg("Overlap", err,
4068 "Source and destination overlap in %s(%p, %p)",
4069 VG_(get_error_string)(err),
4070 extra->Err.Overlap.dst, extra->Err.Overlap.src);
njn1d0825f2006-03-27 11:37:07 +00004071 else
njn718d3b12006-12-16 00:54:12 +00004072 mc_pp_msg("Overlap", err,
4073 "Source and destination overlap in %s(%p, %p, %d)",
4074 VG_(get_error_string)(err),
4075 extra->Err.Overlap.dst, extra->Err.Overlap.src,
4076 extra->Err.Overlap.szB);
njn1d0825f2006-03-27 11:37:07 +00004077 break;
njn1d0825f2006-03-27 11:37:07 +00004078
njn718d3b12006-12-16 00:54:12 +00004079 case Err_IllegalMempool:
4080 mc_pp_msg("InvalidMemPool", err,
4081 "Illegal memory pool address");
4082 mc_pp_AddrInfo(VG_(get_error_address)(err),
4083 &extra->Err.IllegalMempool.ai, False);
njn1d0825f2006-03-27 11:37:07 +00004084 break;
4085
njn718d3b12006-12-16 00:54:12 +00004086 case Err_Leak: {
4087 HChar* xpre = VG_(clo_xml) ? " <what>" : "";
4088 HChar* xpost = VG_(clo_xml) ? "</what>" : "";
4089 UInt n_this_record = extra->Err.Leak.n_this_record;
4090 UInt n_total_records = extra->Err.Leak.n_total_records;
4091 LossRecord* l = extra->Err.Leak.lossRecord;
4092
4093 if (VG_(clo_xml)) {
4094 VG_(message)(Vg_UserMsg, " <kind>%t</kind>",
4095 xml_leak_kind(l->loss_mode));
4096 } else {
4097 VG_(message)(Vg_UserMsg, "");
4098 }
4099
4100 if (l->indirect_bytes) {
4101 VG_(message)(Vg_UserMsg,
4102 "%s%,lu (%,lu direct, %,lu indirect) bytes in %,u blocks"
4103 " are %s in loss record %,u of %,u%s",
4104 xpre,
4105 l->total_bytes + l->indirect_bytes,
4106 l->total_bytes, l->indirect_bytes, l->num_blocks,
4107 str_leak_lossmode(l->loss_mode), n_this_record, n_total_records,
4108 xpost
4109 );
4110 if (VG_(clo_xml)) {
4111 // Nb: don't put commas in these XML numbers
4112 VG_(message)(Vg_UserMsg, " <leakedbytes>%lu</leakedbytes>",
4113 l->total_bytes + l->indirect_bytes);
4114 VG_(message)(Vg_UserMsg, " <leakedblocks>%u</leakedblocks>",
4115 l->num_blocks);
4116 }
4117 } else {
4118 VG_(message)(
4119 Vg_UserMsg,
4120 "%s%,lu bytes in %,u blocks are %s in loss record %,u of %,u%s",
4121 xpre,
4122 l->total_bytes, l->num_blocks,
4123 str_leak_lossmode(l->loss_mode), n_this_record, n_total_records,
4124 xpost
4125 );
4126 if (VG_(clo_xml)) {
4127 VG_(message)(Vg_UserMsg, " <leakedbytes>%d</leakedbytes>",
4128 l->total_bytes);
4129 VG_(message)(Vg_UserMsg, " <leakedblocks>%d</leakedblocks>",
4130 l->num_blocks);
4131 }
4132 }
4133 VG_(pp_ExeContext)(l->allocated_at);
4134 break;
4135 }
4136
njn1d0825f2006-03-27 11:37:07 +00004137 default:
4138 VG_(printf)("Error:\n unknown Memcheck error code %d\n",
4139 VG_(get_error_kind)(err));
4140 VG_(tool_panic)("unknown error code in mc_pp_Error)");
njn9e63cb62005-05-08 18:34:59 +00004141 }
4142}
4143
4144/*------------------------------------------------------------*/
4145/*--- Recording errors ---*/
4146/*------------------------------------------------------------*/
4147
njn1d0825f2006-03-27 11:37:07 +00004148/* These many bytes below %ESP are considered addressible if we're
4149 doing the --workaround-gcc296-bugs hack. */
4150#define VG_GCC296_BUG_STACK_SLOP 1024
4151
4152/* Is this address within some small distance below %ESP? Used only
4153 for the --workaround-gcc296-bugs kludge. */
4154static Bool is_just_below_ESP( Addr esp, Addr aa )
4155{
4156 if (esp > aa && (esp - aa) <= VG_GCC296_BUG_STACK_SLOP)
4157 return True;
4158 else
4159 return False;
4160}
4161
njn718d3b12006-12-16 00:54:12 +00004162/* --- Called from generated and non-generated code --- */
njn1d0825f2006-03-27 11:37:07 +00004163
njn718d3b12006-12-16 00:54:12 +00004164static void mc_record_address_error ( ThreadId tid, Addr a, Int szB,
njn1d0825f2006-03-27 11:37:07 +00004165 Bool isWrite )
4166{
njn718d3b12006-12-16 00:54:12 +00004167 MC_Error extra;
sewardj05a46732006-10-17 01:28:10 +00004168 Bool just_below_esp;
4169
4170 if (in_ignored_range(a))
4171 return;
4172
4173# if defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
4174 /* AIX zero-page handling. On AIX, reads from page zero are,
4175 bizarrely enough, legitimate. Writes to page zero aren't,
4176 though. Since memcheck can't distinguish reads from writes, the
4177 best we can do is to 'act normal' and mark the A bits in the
4178 normal way as noaccess, but then hide any reads from that page
4179 that get reported here. */
njn718d3b12006-12-16 00:54:12 +00004180 if ((!isWrite) && a >= 0 && a < 4096 && a+szB <= 4096)
sewardj05a46732006-10-17 01:28:10 +00004181 return;
4182
4183 /* Appalling AIX hack. It suppresses reads done by glink
4184 fragments. Getting rid of this would require figuring out
4185 somehow where the referenced data areas are (and their
4186 sizes). */
njn718d3b12006-12-16 00:54:12 +00004187 if ((!isWrite) && szB == sizeof(Word)) {
sewardj05a46732006-10-17 01:28:10 +00004188 UInt i1, i2;
4189 UInt* pc = (UInt*)VG_(get_IP)(tid);
4190 if (sizeof(Word) == 4) {
4191 i1 = 0x800c0000; /* lwz r0,0(r12) */
4192 i2 = 0x804c0004; /* lwz r2,4(r12) */
4193 } else {
4194 i1 = 0xe80c0000; /* ld r0,0(r12) */
4195 i2 = 0xe84c0008; /* ld r2,8(r12) */
4196 }
4197 if (pc[0] == i1 && pc[1] == i2) return;
4198 if (pc[0] == i2 && pc[-1] == i1) return;
4199 }
4200# endif
njn1d0825f2006-03-27 11:37:07 +00004201
4202 just_below_esp = is_just_below_ESP( VG_(get_SP)(tid), a );
4203
4204 /* If this is caused by an access immediately below %ESP, and the
4205 user asks nicely, we just ignore it. */
4206 if (MC_(clo_workaround_gcc296_bugs) && just_below_esp)
4207 return;
4208
njn718d3b12006-12-16 00:54:12 +00004209 extra.Err.Addr.isWrite = isWrite;
4210 extra.Err.Addr.szB = szB;
4211 extra.Err.Addr.maybe_gcc = just_below_esp;
4212 extra.Err.Addr.ai.tag = Addr_Undescribed;
4213 VG_(maybe_record_error)( tid, Err_Addr, a, /*s*/NULL, &extra );
njn1d0825f2006-03-27 11:37:07 +00004214}
4215
sewardj7cf4e6b2008-05-01 20:24:26 +00004216static void mc_record_value_error ( ThreadId tid, Int szB, UInt otag )
njn718d3b12006-12-16 00:54:12 +00004217{
4218 MC_Error extra;
sewardj7cf4e6b2008-05-01 20:24:26 +00004219 tl_assert( MC_(clo_mc_level) >= 2 );
4220 if (otag > 0)
4221 tl_assert( MC_(clo_mc_level) == 3 );
4222 extra.Err.Value.szB = szB;
4223 extra.Err.Value.otag = otag;
4224 extra.Err.Value.origin_ec = NULL; /* Filled in later */
njn718d3b12006-12-16 00:54:12 +00004225 VG_(maybe_record_error)( tid, Err_Value, /*addr*/0, /*s*/NULL, &extra );
4226}
4227
sewardj7cf4e6b2008-05-01 20:24:26 +00004228static void mc_record_cond_error ( ThreadId tid, UInt otag )
njn718d3b12006-12-16 00:54:12 +00004229{
sewardj7cf4e6b2008-05-01 20:24:26 +00004230 MC_Error extra;
4231 tl_assert( MC_(clo_mc_level) >= 2 );
4232 if (otag > 0)
4233 tl_assert( MC_(clo_mc_level) == 3 );
4234 extra.Err.Cond.otag = otag;
4235 extra.Err.Cond.origin_ec = NULL; /* Filled in later */
4236 VG_(maybe_record_error)( tid, Err_Cond, /*addr*/0, /*s*/NULL, &extra );
njn718d3b12006-12-16 00:54:12 +00004237}
4238
4239/* --- Called from non-generated code --- */
njn1d0825f2006-03-27 11:37:07 +00004240
4241/* This is for memory errors in pthread functions, as opposed to pthread API
4242 errors which are found by the core. */
njn718d3b12006-12-16 00:54:12 +00004243static void mc_record_core_mem_error ( ThreadId tid, Bool isAddrErr, Char* msg )
njn1d0825f2006-03-27 11:37:07 +00004244{
njn718d3b12006-12-16 00:54:12 +00004245 VG_(maybe_record_error)( tid, Err_CoreMem, /*addr*/0, msg, /*extra*/NULL );
njn1d0825f2006-03-27 11:37:07 +00004246}
4247
sewardj7cf4e6b2008-05-01 20:24:26 +00004248static void mc_record_regparam_error ( ThreadId tid, Char* msg, UInt otag )
njn1d0825f2006-03-27 11:37:07 +00004249{
sewardj7cf4e6b2008-05-01 20:24:26 +00004250 MC_Error extra;
njn1d0825f2006-03-27 11:37:07 +00004251 tl_assert(VG_INVALID_THREADID != tid);
sewardj7cf4e6b2008-05-01 20:24:26 +00004252 if (otag > 0)
4253 tl_assert( MC_(clo_mc_level) == 3 );
4254 extra.Err.RegParam.otag = otag;
4255 extra.Err.RegParam.origin_ec = NULL; /* Filled in later */
4256 VG_(maybe_record_error)( tid, Err_RegParam, /*addr*/0, msg, &extra );
njn718d3b12006-12-16 00:54:12 +00004257}
4258
4259static void mc_record_memparam_error ( ThreadId tid, Addr a,
sewardj7cf4e6b2008-05-01 20:24:26 +00004260 Bool isAddrErr, Char* msg, UInt otag )
njn718d3b12006-12-16 00:54:12 +00004261{
4262 MC_Error extra;
4263 tl_assert(VG_INVALID_THREADID != tid);
4264 if (!isAddrErr)
sewardj7cf4e6b2008-05-01 20:24:26 +00004265 tl_assert( MC_(clo_mc_level) >= 2 );
4266 if (otag != 0) {
4267 tl_assert( MC_(clo_mc_level) == 3 );
4268 tl_assert( !isAddrErr );
4269 }
njn718d3b12006-12-16 00:54:12 +00004270 extra.Err.MemParam.isAddrErr = isAddrErr;
4271 extra.Err.MemParam.ai.tag = Addr_Undescribed;
sewardj7cf4e6b2008-05-01 20:24:26 +00004272 extra.Err.MemParam.otag = otag;
4273 extra.Err.MemParam.origin_ec = NULL; /* Filled in later */
njn718d3b12006-12-16 00:54:12 +00004274 VG_(maybe_record_error)( tid, Err_MemParam, a, msg, &extra );
njn1d0825f2006-03-27 11:37:07 +00004275}
4276
4277static void mc_record_jump_error ( ThreadId tid, Addr a )
4278{
njn718d3b12006-12-16 00:54:12 +00004279 MC_Error extra;
njn1d0825f2006-03-27 11:37:07 +00004280 tl_assert(VG_INVALID_THREADID != tid);
njn718d3b12006-12-16 00:54:12 +00004281 extra.Err.Jump.ai.tag = Addr_Undescribed;
4282 VG_(maybe_record_error)( tid, Err_Jump, a, /*s*/NULL, &extra );
njn1d0825f2006-03-27 11:37:07 +00004283}
4284
4285void MC_(record_free_error) ( ThreadId tid, Addr a )
4286{
njn718d3b12006-12-16 00:54:12 +00004287 MC_Error extra;
njn1d0825f2006-03-27 11:37:07 +00004288 tl_assert(VG_INVALID_THREADID != tid);
njn718d3b12006-12-16 00:54:12 +00004289 extra.Err.Free.ai.tag = Addr_Undescribed;
4290 VG_(maybe_record_error)( tid, Err_Free, a, /*s*/NULL, &extra );
4291}
4292
4293void MC_(record_freemismatch_error) ( ThreadId tid, MC_Chunk* mc )
4294{
4295 MC_Error extra;
4296 AddrInfo* ai = &extra.Err.FreeMismatch.ai;
4297 tl_assert(VG_INVALID_THREADID != tid);
4298 ai->tag = Addr_Block;
4299 ai->Addr.Block.block_kind = Block_Mallocd; // Nb: Not 'Block_Freed'
4300 ai->Addr.Block.block_desc = "block";
4301 ai->Addr.Block.block_szB = mc->szB;
4302 ai->Addr.Block.rwoffset = 0;
4303 ai->Addr.Block.lastchange = mc->where;
4304 VG_(maybe_record_error)( tid, Err_FreeMismatch, mc->data, /*s*/NULL,
4305 &extra );
njn1d0825f2006-03-27 11:37:07 +00004306}
4307
4308void MC_(record_illegal_mempool_error) ( ThreadId tid, Addr a )
4309{
njn718d3b12006-12-16 00:54:12 +00004310 MC_Error extra;
njn1d0825f2006-03-27 11:37:07 +00004311 tl_assert(VG_INVALID_THREADID != tid);
njn718d3b12006-12-16 00:54:12 +00004312 extra.Err.IllegalMempool.ai.tag = Addr_Undescribed;
4313 VG_(maybe_record_error)( tid, Err_IllegalMempool, a, /*s*/NULL, &extra );
njn1d0825f2006-03-27 11:37:07 +00004314}
4315
njn718d3b12006-12-16 00:54:12 +00004316static void mc_record_overlap_error ( ThreadId tid, Char* function,
4317 Addr src, Addr dst, SizeT szB )
njn1d0825f2006-03-27 11:37:07 +00004318{
njn718d3b12006-12-16 00:54:12 +00004319 MC_Error extra;
njn1d0825f2006-03-27 11:37:07 +00004320 tl_assert(VG_INVALID_THREADID != tid);
njn718d3b12006-12-16 00:54:12 +00004321 extra.Err.Overlap.src = src;
4322 extra.Err.Overlap.dst = dst;
4323 extra.Err.Overlap.szB = szB;
njn1d0825f2006-03-27 11:37:07 +00004324 VG_(maybe_record_error)(
njn718d3b12006-12-16 00:54:12 +00004325 tid, Err_Overlap, /*addr*/0, /*s*/function, &extra );
njn1d0825f2006-03-27 11:37:07 +00004326}
4327
njn718d3b12006-12-16 00:54:12 +00004328Bool MC_(record_leak_error) ( ThreadId tid, UInt n_this_record,
4329 UInt n_total_records, LossRecord* lossRecord,
4330 Bool print_record )
njn1d0825f2006-03-27 11:37:07 +00004331{
njn718d3b12006-12-16 00:54:12 +00004332 MC_Error extra;
4333 extra.Err.Leak.n_this_record = n_this_record;
4334 extra.Err.Leak.n_total_records = n_total_records;
4335 extra.Err.Leak.lossRecord = lossRecord;
njn1d0825f2006-03-27 11:37:07 +00004336 return
njn718d3b12006-12-16 00:54:12 +00004337 VG_(unique_error) ( tid, Err_Leak, /*Addr*/0, /*s*/NULL, &extra,
4338 lossRecord->allocated_at, print_record,
njn1d0825f2006-03-27 11:37:07 +00004339 /*allow_GDB_attach*/False, /*count_error*/False );
4340}
4341
sewardj7cf4e6b2008-05-01 20:24:26 +00004342static void mc_record_user_error ( ThreadId tid, Addr a,
4343 Bool isAddrErr, UInt otag )
njn9e63cb62005-05-08 18:34:59 +00004344{
njn718d3b12006-12-16 00:54:12 +00004345 MC_Error extra;
sewardj7cf4e6b2008-05-01 20:24:26 +00004346 if (otag != 0) {
4347 tl_assert(!isAddrErr);
4348 tl_assert( MC_(clo_mc_level) == 3 );
4349 }
4350 if (!isAddrErr) {
4351 tl_assert( MC_(clo_mc_level) >= 2 );
4352 }
njn9e63cb62005-05-08 18:34:59 +00004353 tl_assert(VG_INVALID_THREADID != tid);
njn718d3b12006-12-16 00:54:12 +00004354 extra.Err.User.isAddrErr = isAddrErr;
4355 extra.Err.User.ai.tag = Addr_Undescribed;
sewardj7cf4e6b2008-05-01 20:24:26 +00004356 extra.Err.User.otag = otag;
4357 extra.Err.User.origin_ec = NULL; /* Filled in later */
njn718d3b12006-12-16 00:54:12 +00004358 VG_(maybe_record_error)( tid, Err_User, a, /*s*/NULL, &extra );
njn9e63cb62005-05-08 18:34:59 +00004359}
4360
njn718d3b12006-12-16 00:54:12 +00004361/*------------------------------------------------------------*/
4362/*--- Other error operations ---*/
4363/*------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00004364
4365/* Compare error contexts, to detect duplicates. Note that if they
4366 are otherwise the same, the faulting addrs and associated rwoffsets
4367 are allowed to be different. */
4368static Bool mc_eq_Error ( VgRes res, Error* e1, Error* e2 )
4369{
njn718d3b12006-12-16 00:54:12 +00004370 MC_Error* extra1 = VG_(get_error_extra)(e1);
4371 MC_Error* extra2 = VG_(get_error_extra)(e2);
njn1d0825f2006-03-27 11:37:07 +00004372
4373 /* Guaranteed by calling function */
4374 tl_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
4375
4376 switch (VG_(get_error_kind)(e1)) {
njn718d3b12006-12-16 00:54:12 +00004377 case Err_CoreMem: {
njn1d0825f2006-03-27 11:37:07 +00004378 Char *e1s, *e2s;
njn1d0825f2006-03-27 11:37:07 +00004379 e1s = VG_(get_error_string)(e1);
4380 e2s = VG_(get_error_string)(e2);
njn718d3b12006-12-16 00:54:12 +00004381 if (e1s == e2s) return True;
4382 if (VG_STREQ(e1s, e2s)) return True;
njn1d0825f2006-03-27 11:37:07 +00004383 return False;
4384 }
4385
njn718d3b12006-12-16 00:54:12 +00004386 case Err_RegParam:
4387 return VG_STREQ(VG_(get_error_string)(e1), VG_(get_error_string)(e2));
4388
njn1d0825f2006-03-27 11:37:07 +00004389 // Perhaps we should also check the addrinfo.akinds for equality.
4390 // That would result in more error reports, but only in cases where
4391 // a register contains uninitialised bytes and points to memory
4392 // containing uninitialised bytes. Currently, the 2nd of those to be
4393 // detected won't be reported. That is (nearly?) always the memory
4394 // error, which is good.
njn718d3b12006-12-16 00:54:12 +00004395 case Err_MemParam:
4396 if (!VG_STREQ(VG_(get_error_string)(e1),
4397 VG_(get_error_string)(e2))) return False;
njn1d0825f2006-03-27 11:37:07 +00004398 // fall through
njn718d3b12006-12-16 00:54:12 +00004399 case Err_User:
4400 return ( extra1->Err.User.isAddrErr == extra2->Err.User.isAddrErr
4401 ? True : False );
4402
4403 case Err_Free:
4404 case Err_FreeMismatch:
4405 case Err_Jump:
4406 case Err_IllegalMempool:
4407 case Err_Overlap:
4408 case Err_Cond:
njn1d0825f2006-03-27 11:37:07 +00004409 return True;
4410
njn718d3b12006-12-16 00:54:12 +00004411 case Err_Addr:
4412 return ( extra1->Err.Addr.szB == extra2->Err.Addr.szB
4413 ? True : False );
njn1d0825f2006-03-27 11:37:07 +00004414
njn718d3b12006-12-16 00:54:12 +00004415 case Err_Value:
4416 return ( extra1->Err.Value.szB == extra2->Err.Value.szB
4417 ? True : False );
njn1d0825f2006-03-27 11:37:07 +00004418
njn718d3b12006-12-16 00:54:12 +00004419 case Err_Leak:
4420 VG_(tool_panic)("Shouldn't get Err_Leak in mc_eq_Error,\n"
njn1d0825f2006-03-27 11:37:07 +00004421 "since it's handled with VG_(unique_error)()!");
4422
njn1d0825f2006-03-27 11:37:07 +00004423 default:
4424 VG_(printf)("Error:\n unknown error code %d\n",
4425 VG_(get_error_kind)(e1));
4426 VG_(tool_panic)("unknown error code in mc_eq_Error");
4427 }
4428}
4429
4430/* Function used when searching MC_Chunk lists */
4431static Bool addr_is_in_MC_Chunk(MC_Chunk* mc, Addr a)
4432{
4433 // Nb: this is not quite right! It assumes that the heap block has
4434 // a redzone of size MC_MALLOC_REDZONE_SZB. That's true for malloc'd
4435 // blocks, but not necessarily true for custom-alloc'd blocks. So
4436 // in some cases this could result in an incorrect description (eg.
4437 // saying "12 bytes after block A" when really it's within block B.
4438 // Fixing would require adding redzone size to MC_Chunks, though.
njn718d3b12006-12-16 00:54:12 +00004439 return VG_(addr_is_in_block)( a, mc->data, mc->szB,
njn1d0825f2006-03-27 11:37:07 +00004440 MC_MALLOC_REDZONE_SZB );
4441}
4442
4443// Forward declaration
4444static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai );
4445
njn718d3b12006-12-16 00:54:12 +00004446
njn1d0825f2006-03-27 11:37:07 +00004447/* Describe an address as best you can, for error messages,
4448 putting the result in ai. */
sewardjb8b79ad2008-03-03 01:35:41 +00004449static void describe_addr ( Addr a, /*OUT*/AddrInfo* ai )
njn1d0825f2006-03-27 11:37:07 +00004450{
sewardjb8b79ad2008-03-03 01:35:41 +00004451 MC_Chunk* mc;
4452 ThreadId tid;
4453 Addr stack_min, stack_max;
4454 VgSectKind sect;
njn718d3b12006-12-16 00:54:12 +00004455
4456 tl_assert(Addr_Undescribed == ai->tag);
njn1d0825f2006-03-27 11:37:07 +00004457
4458 /* Perhaps it's a user-def'd block? */
sewardjb8b79ad2008-03-03 01:35:41 +00004459 if (client_perm_maybe_describe( a, ai )) {
njn1d0825f2006-03-27 11:37:07 +00004460 return;
njn1d0825f2006-03-27 11:37:07 +00004461 }
4462 /* Search for a recently freed block which might bracket it. */
4463 mc = MC_(get_freed_list_head)();
4464 while (mc) {
4465 if (addr_is_in_MC_Chunk(mc, a)) {
njn718d3b12006-12-16 00:54:12 +00004466 ai->tag = Addr_Block;
4467 ai->Addr.Block.block_kind = Block_Freed;
4468 ai->Addr.Block.block_desc = "block";
4469 ai->Addr.Block.block_szB = mc->szB;
4470 ai->Addr.Block.rwoffset = (Int)a - (Int)mc->data;
4471 ai->Addr.Block.lastchange = mc->where;
njn1d0825f2006-03-27 11:37:07 +00004472 return;
4473 }
4474 mc = mc->next;
4475 }
4476 /* Search for a currently malloc'd block which might bracket it. */
4477 VG_(HT_ResetIter)(MC_(malloc_list));
4478 while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
4479 if (addr_is_in_MC_Chunk(mc, a)) {
njn718d3b12006-12-16 00:54:12 +00004480 ai->tag = Addr_Block;
4481 ai->Addr.Block.block_kind = Block_Mallocd;
4482 ai->Addr.Block.block_desc = "block";
4483 ai->Addr.Block.block_szB = mc->szB;
4484 ai->Addr.Block.rwoffset = (Int)a - (Int)mc->data;
4485 ai->Addr.Block.lastchange = mc->where;
njn1d0825f2006-03-27 11:37:07 +00004486 return;
4487 }
4488 }
sewardjb8b79ad2008-03-03 01:35:41 +00004489 /* Perhaps the variable type/location data describes it? */
4490 tl_assert(sizeof(ai->Addr.Variable.descr1)
4491 == sizeof(ai->Addr.Variable.descr2));
4492 VG_(memset)( &ai->Addr.Variable.descr1,
4493 0, sizeof(ai->Addr.Variable.descr1));
4494 VG_(memset)( &ai->Addr.Variable.descr2,
4495 0, sizeof(ai->Addr.Variable.descr2));
4496 if (VG_(get_data_description)(
4497 &ai->Addr.Variable.descr1[0],
4498 &ai->Addr.Variable.descr2[0],
4499 sizeof(ai->Addr.Variable.descr1)-1,
4500 a )) {
4501 ai->tag = Addr_Variable;
4502 tl_assert( ai->Addr.Variable.descr1
4503 [ sizeof(ai->Addr.Variable.descr1)-1 ] == 0);
4504 tl_assert( ai->Addr.Variable.descr2
4505 [ sizeof(ai->Addr.Variable.descr2)-1 ] == 0);
4506 return;
4507 }
4508 /* Have a look at the low level data symbols - perhaps it's in
4509 there. */
4510 VG_(memset)( &ai->Addr.DataSym.name,
4511 0, sizeof(ai->Addr.DataSym.name));
4512 if (VG_(get_datasym_and_offset)(
4513 a, &ai->Addr.DataSym.name[0],
4514 sizeof(ai->Addr.DataSym.name)-1,
4515 &ai->Addr.DataSym.offset )) {
4516 ai->tag = Addr_DataSym;
4517 tl_assert( ai->Addr.DataSym.name
4518 [ sizeof(ai->Addr.DataSym.name)-1 ] == 0);
4519 return;
4520 }
4521 /* Perhaps it's on a thread's stack? */
4522 VG_(thread_stack_reset_iter)(&tid);
4523 while ( VG_(thread_stack_next)(&tid, &stack_min, &stack_max) ) {
4524 if (stack_min - VG_STACK_REDZONE_SZB <= a && a <= stack_max) {
4525 ai->tag = Addr_Stack;
4526 ai->Addr.Stack.tid = tid;
4527 return;
4528 }
4529 }
4530 /* last ditch attempt at classification */
4531 tl_assert( sizeof(ai->Addr.SectKind.objname) > 4 );
4532 VG_(memset)( &ai->Addr.SectKind.objname,
4533 0, sizeof(ai->Addr.SectKind.objname));
4534 VG_(strcpy)( ai->Addr.SectKind.objname, "???" );
4535 sect = VG_(seginfo_sect_kind)( &ai->Addr.SectKind.objname[0],
4536 sizeof(ai->Addr.SectKind.objname)-1, a);
4537 if (sect != Vg_SectUnknown) {
4538 ai->tag = Addr_SectKind;
4539 ai->Addr.SectKind.kind = sect;
4540 tl_assert( ai->Addr.SectKind.objname
4541 [ sizeof(ai->Addr.SectKind.objname)-1 ] == 0);
4542 return;
4543 }
njn1d0825f2006-03-27 11:37:07 +00004544 /* Clueless ... */
njn718d3b12006-12-16 00:54:12 +00004545 ai->tag = Addr_Unknown;
njn1d0825f2006-03-27 11:37:07 +00004546 return;
4547}
4548
sewardj7cf4e6b2008-05-01 20:24:26 +00004549/* Fill in *origin_ec as specified by otag, or NULL it out if otag
4550 does not refer to a known origin. */
4551static void update_origin ( /*OUT*/ExeContext** origin_ec,
4552 UInt otag )
4553{
4554 UInt ecu = otag & ~3;
4555 *origin_ec = NULL;
4556 if (VG_(is_plausible_ECU)(ecu)) {
4557 *origin_ec = VG_(get_ExeContext_from_ECU)( ecu );
4558 }
4559}
4560
njn1d0825f2006-03-27 11:37:07 +00004561/* Updates the copy with address info if necessary (but not for all errors). */
4562static UInt mc_update_extra( Error* err )
4563{
njn718d3b12006-12-16 00:54:12 +00004564 MC_Error* extra = VG_(get_error_extra)(err);
4565
njn1d0825f2006-03-27 11:37:07 +00004566 switch (VG_(get_error_kind)(err)) {
njn718d3b12006-12-16 00:54:12 +00004567 // These ones don't have addresses associated with them, and so don't
njn1d0825f2006-03-27 11:37:07 +00004568 // need any updating.
njn718d3b12006-12-16 00:54:12 +00004569 case Err_CoreMem:
sewardj7cf4e6b2008-05-01 20:24:26 +00004570 //case Err_Value:
4571 //case Err_Cond:
njn718d3b12006-12-16 00:54:12 +00004572 case Err_Overlap:
njn718d3b12006-12-16 00:54:12 +00004573 // For Err_Leaks the returned size does not matter -- they are always
sewardjb8b79ad2008-03-03 01:35:41 +00004574 // shown with VG_(unique_error)() so they 'extra' not copied. But
4575 // we make it consistent with the others.
njn718d3b12006-12-16 00:54:12 +00004576 case Err_Leak:
njn1d0825f2006-03-27 11:37:07 +00004577 return sizeof(MC_Error);
njn1d0825f2006-03-27 11:37:07 +00004578
sewardj7cf4e6b2008-05-01 20:24:26 +00004579 // For value errors, get the ExeContext corresponding to the
4580 // origin tag. Note that it is a kludge to assume that
4581 // a length-1 trace indicates a stack origin. FIXME.
4582 case Err_Value:
4583 update_origin( &extra->Err.Value.origin_ec,
4584 extra->Err.Value.otag );
4585 return sizeof(MC_Error);
4586 case Err_Cond:
4587 update_origin( &extra->Err.Cond.origin_ec,
4588 extra->Err.Cond.otag );
4589 return sizeof(MC_Error);
4590 case Err_RegParam:
4591 update_origin( &extra->Err.RegParam.origin_ec,
4592 extra->Err.RegParam.otag );
4593 return sizeof(MC_Error);
4594
njn718d3b12006-12-16 00:54:12 +00004595 // These ones always involve a memory address.
4596 case Err_Addr:
sewardjb8b79ad2008-03-03 01:35:41 +00004597 describe_addr ( VG_(get_error_address)(err),
4598 &extra->Err.Addr.ai );
njn1d0825f2006-03-27 11:37:07 +00004599 return sizeof(MC_Error);
njn718d3b12006-12-16 00:54:12 +00004600 case Err_MemParam:
sewardjb8b79ad2008-03-03 01:35:41 +00004601 describe_addr ( VG_(get_error_address)(err),
4602 &extra->Err.MemParam.ai );
sewardj7cf4e6b2008-05-01 20:24:26 +00004603 update_origin( &extra->Err.MemParam.origin_ec,
4604 extra->Err.MemParam.otag );
njn1d0825f2006-03-27 11:37:07 +00004605 return sizeof(MC_Error);
njn718d3b12006-12-16 00:54:12 +00004606 case Err_Jump:
sewardjb8b79ad2008-03-03 01:35:41 +00004607 describe_addr ( VG_(get_error_address)(err),
4608 &extra->Err.Jump.ai );
njn718d3b12006-12-16 00:54:12 +00004609 return sizeof(MC_Error);
4610 case Err_User:
sewardjb8b79ad2008-03-03 01:35:41 +00004611 describe_addr ( VG_(get_error_address)(err),
4612 &extra->Err.User.ai );
sewardj7cf4e6b2008-05-01 20:24:26 +00004613 update_origin( &extra->Err.User.origin_ec,
4614 extra->Err.User.otag );
njn718d3b12006-12-16 00:54:12 +00004615 return sizeof(MC_Error);
4616 case Err_Free:
sewardjb8b79ad2008-03-03 01:35:41 +00004617 describe_addr ( VG_(get_error_address)(err),
4618 &extra->Err.Free.ai );
njn718d3b12006-12-16 00:54:12 +00004619 return sizeof(MC_Error);
4620 case Err_IllegalMempool:
4621 describe_addr ( VG_(get_error_address)(err),
4622 &extra->Err.IllegalMempool.ai );
4623 return sizeof(MC_Error);
njn1d0825f2006-03-27 11:37:07 +00004624
njn718d3b12006-12-16 00:54:12 +00004625 // Err_FreeMismatches have already had their address described; this is
njn1d0825f2006-03-27 11:37:07 +00004626 // possible because we have the MC_Chunk on hand when the error is
4627 // detected. However, the address may be part of a user block, and if so
4628 // we override the pre-determined description with a user block one.
njn718d3b12006-12-16 00:54:12 +00004629 case Err_FreeMismatch: {
4630 tl_assert(extra && Block_Mallocd ==
4631 extra->Err.FreeMismatch.ai.Addr.Block.block_kind);
njn1d0825f2006-03-27 11:37:07 +00004632 (void)client_perm_maybe_describe( VG_(get_error_address)(err),
njn718d3b12006-12-16 00:54:12 +00004633 &extra->Err.FreeMismatch.ai );
njn1d0825f2006-03-27 11:37:07 +00004634 return sizeof(MC_Error);
4635 }
4636
njn1d0825f2006-03-27 11:37:07 +00004637 default: VG_(tool_panic)("mc_update_extra: bad errkind");
4638 }
4639}
4640
njn9e63cb62005-05-08 18:34:59 +00004641/*------------------------------------------------------------*/
4642/*--- Suppressions ---*/
4643/*------------------------------------------------------------*/
4644
njn718d3b12006-12-16 00:54:12 +00004645typedef
4646 enum {
4647 ParamSupp, // Bad syscall params
4648 UserSupp, // Errors arising from client-request checks
4649 CoreMemSupp, // Memory errors in core (pthread ops, signal handling)
4650
4651 // Undefined value errors of given size
4652 Value1Supp, Value2Supp, Value4Supp, Value8Supp, Value16Supp,
4653
4654 // Undefined value error in conditional.
4655 CondSupp,
4656
4657 // Unaddressable read/write attempt at given size
4658 Addr1Supp, Addr2Supp, Addr4Supp, Addr8Supp, Addr16Supp,
4659
4660 JumpSupp, // Jump to unaddressable target
4661 FreeSupp, // Invalid or mismatching free
4662 OverlapSupp, // Overlapping blocks in memcpy(), strcpy(), etc
4663 LeakSupp, // Something to be suppressed in a leak check.
4664 MempoolSupp, // Memory pool suppression.
4665 }
4666 MC_SuppKind;
4667
njn51d827b2005-05-09 01:02:08 +00004668static Bool mc_recognised_suppression ( Char* name, Supp* su )
njn9e63cb62005-05-08 18:34:59 +00004669{
4670 SuppKind skind;
4671
njn1d0825f2006-03-27 11:37:07 +00004672 if (VG_STREQ(name, "Param")) skind = ParamSupp;
sewardj6362bb52006-11-28 00:15:35 +00004673 else if (VG_STREQ(name, "User")) skind = UserSupp;
njn1d0825f2006-03-27 11:37:07 +00004674 else if (VG_STREQ(name, "CoreMem")) skind = CoreMemSupp;
4675 else if (VG_STREQ(name, "Addr1")) skind = Addr1Supp;
4676 else if (VG_STREQ(name, "Addr2")) skind = Addr2Supp;
4677 else if (VG_STREQ(name, "Addr4")) skind = Addr4Supp;
4678 else if (VG_STREQ(name, "Addr8")) skind = Addr8Supp;
4679 else if (VG_STREQ(name, "Addr16")) skind = Addr16Supp;
njn718d3b12006-12-16 00:54:12 +00004680 else if (VG_STREQ(name, "Jump")) skind = JumpSupp;
njn1d0825f2006-03-27 11:37:07 +00004681 else if (VG_STREQ(name, "Free")) skind = FreeSupp;
4682 else if (VG_STREQ(name, "Leak")) skind = LeakSupp;
4683 else if (VG_STREQ(name, "Overlap")) skind = OverlapSupp;
4684 else if (VG_STREQ(name, "Mempool")) skind = MempoolSupp;
njn718d3b12006-12-16 00:54:12 +00004685 else if (VG_STREQ(name, "Cond")) skind = CondSupp;
4686 else if (VG_STREQ(name, "Value0")) skind = CondSupp; /* backwards compat */
njn9e63cb62005-05-08 18:34:59 +00004687 else if (VG_STREQ(name, "Value1")) skind = Value1Supp;
4688 else if (VG_STREQ(name, "Value2")) skind = Value2Supp;
4689 else if (VG_STREQ(name, "Value4")) skind = Value4Supp;
4690 else if (VG_STREQ(name, "Value8")) skind = Value8Supp;
4691 else if (VG_STREQ(name, "Value16")) skind = Value16Supp;
4692 else
4693 return False;
4694
4695 VG_(set_supp_kind)(su, skind);
4696 return True;
4697}
4698
njn1d0825f2006-03-27 11:37:07 +00004699static
4700Bool mc_read_extra_suppression_info ( Int fd, Char* buf, Int nBuf, Supp *su )
4701{
4702 Bool eof;
4703
4704 if (VG_(get_supp_kind)(su) == ParamSupp) {
4705 eof = VG_(get_line) ( fd, buf, nBuf );
4706 if (eof) return False;
4707 VG_(set_supp_string)(su, VG_(strdup)(buf));
4708 }
4709 return True;
4710}
4711
4712static Bool mc_error_matches_suppression(Error* err, Supp* su)
4713{
njn718d3b12006-12-16 00:54:12 +00004714 Int su_szB;
4715 MC_Error* extra = VG_(get_error_extra)(err);
4716 ErrorKind ekind = VG_(get_error_kind )(err);
njn1d0825f2006-03-27 11:37:07 +00004717
4718 switch (VG_(get_supp_kind)(su)) {
4719 case ParamSupp:
njn718d3b12006-12-16 00:54:12 +00004720 return ((ekind == Err_RegParam || ekind == Err_MemParam)
njn1d0825f2006-03-27 11:37:07 +00004721 && VG_STREQ(VG_(get_error_string)(err),
4722 VG_(get_supp_string)(su)));
4723
sewardj6362bb52006-11-28 00:15:35 +00004724 case UserSupp:
njn718d3b12006-12-16 00:54:12 +00004725 return (ekind == Err_User);
sewardj6362bb52006-11-28 00:15:35 +00004726
njn1d0825f2006-03-27 11:37:07 +00004727 case CoreMemSupp:
njn718d3b12006-12-16 00:54:12 +00004728 return (ekind == Err_CoreMem
njn1d0825f2006-03-27 11:37:07 +00004729 && VG_STREQ(VG_(get_error_string)(err),
4730 VG_(get_supp_string)(su)));
4731
njn718d3b12006-12-16 00:54:12 +00004732 case Value1Supp: su_szB = 1; goto value_case;
4733 case Value2Supp: su_szB = 2; goto value_case;
4734 case Value4Supp: su_szB = 4; goto value_case;
4735 case Value8Supp: su_szB = 8; goto value_case;
4736 case Value16Supp:su_szB =16; goto value_case;
njn1d0825f2006-03-27 11:37:07 +00004737 value_case:
njn718d3b12006-12-16 00:54:12 +00004738 return (ekind == Err_Value && extra->Err.Value.szB == su_szB);
njn1d0825f2006-03-27 11:37:07 +00004739
njn718d3b12006-12-16 00:54:12 +00004740 case CondSupp:
4741 return (ekind == Err_Cond);
4742
4743 case Addr1Supp: su_szB = 1; goto addr_case;
4744 case Addr2Supp: su_szB = 2; goto addr_case;
4745 case Addr4Supp: su_szB = 4; goto addr_case;
4746 case Addr8Supp: su_szB = 8; goto addr_case;
4747 case Addr16Supp:su_szB =16; goto addr_case;
njn1d0825f2006-03-27 11:37:07 +00004748 addr_case:
njn718d3b12006-12-16 00:54:12 +00004749 return (ekind == Err_Addr && extra->Err.Addr.szB == su_szB);
4750
4751 case JumpSupp:
4752 return (ekind == Err_Jump);
njn1d0825f2006-03-27 11:37:07 +00004753
4754 case FreeSupp:
njn718d3b12006-12-16 00:54:12 +00004755 return (ekind == Err_Free || ekind == Err_FreeMismatch);
njn1d0825f2006-03-27 11:37:07 +00004756
4757 case OverlapSupp:
njn718d3b12006-12-16 00:54:12 +00004758 return (ekind == Err_Overlap);
njn1d0825f2006-03-27 11:37:07 +00004759
4760 case LeakSupp:
njn718d3b12006-12-16 00:54:12 +00004761 return (ekind == Err_Leak);
njn1d0825f2006-03-27 11:37:07 +00004762
4763 case MempoolSupp:
njn718d3b12006-12-16 00:54:12 +00004764 return (ekind == Err_IllegalMempool);
njn1d0825f2006-03-27 11:37:07 +00004765
4766 default:
4767 VG_(printf)("Error:\n"
4768 " unknown suppression type %d\n",
4769 VG_(get_supp_kind)(su));
4770 VG_(tool_panic)("unknown suppression type in "
4771 "MC_(error_matches_suppression)");
4772 }
4773}
4774
4775static Char* mc_get_error_name ( Error* err )
4776{
njn1d0825f2006-03-27 11:37:07 +00004777 switch (VG_(get_error_kind)(err)) {
njn718d3b12006-12-16 00:54:12 +00004778 case Err_RegParam: return "Param";
4779 case Err_MemParam: return "Param";
4780 case Err_User: return "User";
4781 case Err_FreeMismatch: return "Free";
4782 case Err_IllegalMempool: return "Mempool";
4783 case Err_Free: return "Free";
4784 case Err_Jump: return "Jump";
4785 case Err_CoreMem: return "CoreMem";
4786 case Err_Overlap: return "Overlap";
4787 case Err_Leak: return "Leak";
4788 case Err_Cond: return "Cond";
4789 case Err_Addr: {
4790 MC_Error* extra = VG_(get_error_extra)(err);
4791 switch ( extra->Err.Addr.szB ) {
njn1d0825f2006-03-27 11:37:07 +00004792 case 1: return "Addr1";
4793 case 2: return "Addr2";
4794 case 4: return "Addr4";
4795 case 8: return "Addr8";
4796 case 16: return "Addr16";
4797 default: VG_(tool_panic)("unexpected size for Addr");
4798 }
njn718d3b12006-12-16 00:54:12 +00004799 }
4800 case Err_Value: {
4801 MC_Error* extra = VG_(get_error_extra)(err);
4802 switch ( extra->Err.Value.szB ) {
njn1d0825f2006-03-27 11:37:07 +00004803 case 1: return "Value1";
4804 case 2: return "Value2";
4805 case 4: return "Value4";
4806 case 8: return "Value8";
4807 case 16: return "Value16";
4808 default: VG_(tool_panic)("unexpected size for Value");
4809 }
njn718d3b12006-12-16 00:54:12 +00004810 }
njn1d0825f2006-03-27 11:37:07 +00004811 default: VG_(tool_panic)("get_error_name: unexpected type");
4812 }
njn1d0825f2006-03-27 11:37:07 +00004813}
4814
4815static void mc_print_extra_suppression_info ( Error* err )
4816{
njn718d3b12006-12-16 00:54:12 +00004817 ErrorKind ekind = VG_(get_error_kind )(err);
4818 if (Err_RegParam == ekind || Err_MemParam == ekind) {
njn1d0825f2006-03-27 11:37:07 +00004819 VG_(printf)(" %s\n", VG_(get_error_string)(err));
4820 }
4821}
4822
njn9e63cb62005-05-08 18:34:59 +00004823/*------------------------------------------------------------*/
sewardjc859fbf2005-04-22 21:10:28 +00004824/*--- Functions called directly from generated code: ---*/
4825/*--- Load/store handlers. ---*/
sewardj6cf40ff2005-04-20 22:31:26 +00004826/*------------------------------------------------------------*/
4827
njn1d0825f2006-03-27 11:37:07 +00004828/* Types: LOADV32, LOADV16, LOADV8 are:
sewardj6cf40ff2005-04-20 22:31:26 +00004829 UWord fn ( Addr a )
4830 so they return 32-bits on 32-bit machines and 64-bits on
4831 64-bit machines. Addr has the same size as a host word.
4832
njn1d0825f2006-03-27 11:37:07 +00004833 LOADV64 is always ULong fn ( Addr a )
sewardj6cf40ff2005-04-20 22:31:26 +00004834
njn1d0825f2006-03-27 11:37:07 +00004835 Similarly for STOREV8, STOREV16, STOREV32, the supplied vbits
4836 are a UWord, and for STOREV64 they are a ULong.
sewardj6cf40ff2005-04-20 22:31:26 +00004837*/
4838
njn1d0825f2006-03-27 11:37:07 +00004839/* If any part of '_a' indicated by the mask is 1, either
njn45e81252006-03-28 12:35:08 +00004840 '_a' is not naturally '_sz/8'-aligned, or it exceeds the range
njn1d0825f2006-03-27 11:37:07 +00004841 covered by the primary map. */
njn45e81252006-03-28 12:35:08 +00004842#define UNALIGNED_OR_HIGH(_a,_sz) ((_a) & MASK((_sz>>3)))
njn1d0825f2006-03-27 11:37:07 +00004843#define MASK(_sz) ( ~((0x10000-(_sz)) | ((N_PRIMARY_MAP-1) << 16)) )
4844
4845
sewardj95448072004-11-22 20:19:51 +00004846/* ------------------------ Size = 8 ------------------------ */
4847
njn1d0825f2006-03-27 11:37:07 +00004848static INLINE
4849ULong mc_LOADV64 ( Addr a, Bool isBigEndian )
4850{
4851 UWord sm_off16, vabits16;
4852 SecMap* sm;
4853
4854 PROF_EVENT(200, "mc_LOADV64");
4855
4856#ifndef PERF_FAST_LOADV
njn45e81252006-03-28 12:35:08 +00004857 return mc_LOADVn_slow( a, 64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00004858#else
bart5dd8e6a2008-03-22 08:04:29 +00004859 if (UNLIKELY( UNALIGNED_OR_HIGH(a,64) )) {
njn1d0825f2006-03-27 11:37:07 +00004860 PROF_EVENT(201, "mc_LOADV64-slow1");
njn45e81252006-03-28 12:35:08 +00004861 return (ULong)mc_LOADVn_slow( a, 64, isBigEndian );
sewardjf9d81612005-04-23 23:25:49 +00004862 }
4863
njna7c7ebd2006-03-28 12:51:02 +00004864 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00004865 sm_off16 = SM_OFF_16(a);
4866 vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
4867
4868 // Handle common case quickly: a is suitably aligned, is mapped, and
4869 // addressible.
4870 // Convert V bits from compact memory form to expanded register form.
bart5dd8e6a2008-03-22 08:04:29 +00004871 if (LIKELY(vabits16 == VA_BITS16_DEFINED)) {
njn1d0825f2006-03-27 11:37:07 +00004872 return V_BITS64_DEFINED;
bart5dd8e6a2008-03-22 08:04:29 +00004873 } else if (LIKELY(vabits16 == VA_BITS16_UNDEFINED)) {
njn1d0825f2006-03-27 11:37:07 +00004874 return V_BITS64_UNDEFINED;
4875 } else {
njndbf7ca72006-03-31 11:57:59 +00004876 /* Slow case: the 8 bytes are not all-defined or all-undefined. */
njn1d0825f2006-03-27 11:37:07 +00004877 PROF_EVENT(202, "mc_LOADV64-slow2");
njn45e81252006-03-28 12:35:08 +00004878 return mc_LOADVn_slow( a, 64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00004879 }
4880#endif
4881}
4882
4883VG_REGPARM(1) ULong MC_(helperc_LOADV64be) ( Addr a )
4884{
4885 return mc_LOADV64(a, True);
4886}
4887VG_REGPARM(1) ULong MC_(helperc_LOADV64le) ( Addr a )
4888{
4889 return mc_LOADV64(a, False);
4890}
sewardjf9d81612005-04-23 23:25:49 +00004891
sewardjf9d81612005-04-23 23:25:49 +00004892
njn1d0825f2006-03-27 11:37:07 +00004893static INLINE
njn4cf530b2006-04-06 13:33:48 +00004894void mc_STOREV64 ( Addr a, ULong vbits64, Bool isBigEndian )
njn1d0825f2006-03-27 11:37:07 +00004895{
4896 UWord sm_off16, vabits16;
4897 SecMap* sm;
4898
4899 PROF_EVENT(210, "mc_STOREV64");
4900
4901#ifndef PERF_FAST_STOREV
4902 // XXX: this slow case seems to be marginally faster than the fast case!
4903 // Investigate further.
njn4cf530b2006-04-06 13:33:48 +00004904 mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00004905#else
bart5dd8e6a2008-03-22 08:04:29 +00004906 if (UNLIKELY( UNALIGNED_OR_HIGH(a,64) )) {
njn1d0825f2006-03-27 11:37:07 +00004907 PROF_EVENT(211, "mc_STOREV64-slow1");
njn4cf530b2006-04-06 13:33:48 +00004908 mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00004909 return;
sewardjf9d81612005-04-23 23:25:49 +00004910 }
4911
njna7c7ebd2006-03-28 12:51:02 +00004912 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00004913 sm_off16 = SM_OFF_16(a);
4914 vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
4915
bart5dd8e6a2008-03-22 08:04:29 +00004916 if (LIKELY( !is_distinguished_sm(sm) &&
njndbf7ca72006-03-31 11:57:59 +00004917 (VA_BITS16_DEFINED == vabits16 ||
4918 VA_BITS16_UNDEFINED == vabits16) ))
njn1d0825f2006-03-27 11:37:07 +00004919 {
4920 /* Handle common case quickly: a is suitably aligned, */
4921 /* is mapped, and is addressible. */
4922 // Convert full V-bits in register to compact 2-bit form.
njn4cf530b2006-04-06 13:33:48 +00004923 if (V_BITS64_DEFINED == vbits64) {
njndbf7ca72006-03-31 11:57:59 +00004924 ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_DEFINED;
njn4cf530b2006-04-06 13:33:48 +00004925 } else if (V_BITS64_UNDEFINED == vbits64) {
njndbf7ca72006-03-31 11:57:59 +00004926 ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00004927 } else {
4928 /* Slow but general case -- writing partially defined bytes. */
4929 PROF_EVENT(212, "mc_STOREV64-slow2");
njn4cf530b2006-04-06 13:33:48 +00004930 mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00004931 }
4932 } else {
4933 /* Slow but general case. */
4934 PROF_EVENT(213, "mc_STOREV64-slow3");
njn4cf530b2006-04-06 13:33:48 +00004935 mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00004936 }
4937#endif
4938}
4939
njn4cf530b2006-04-06 13:33:48 +00004940VG_REGPARM(1) void MC_(helperc_STOREV64be) ( Addr a, ULong vbits64 )
njn1d0825f2006-03-27 11:37:07 +00004941{
njn4cf530b2006-04-06 13:33:48 +00004942 mc_STOREV64(a, vbits64, True);
njn1d0825f2006-03-27 11:37:07 +00004943}
njn4cf530b2006-04-06 13:33:48 +00004944VG_REGPARM(1) void MC_(helperc_STOREV64le) ( Addr a, ULong vbits64 )
njn1d0825f2006-03-27 11:37:07 +00004945{
njn4cf530b2006-04-06 13:33:48 +00004946 mc_STOREV64(a, vbits64, False);
njn1d0825f2006-03-27 11:37:07 +00004947}
sewardj95448072004-11-22 20:19:51 +00004948
sewardj95448072004-11-22 20:19:51 +00004949
4950/* ------------------------ Size = 4 ------------------------ */
4951
njn1d0825f2006-03-27 11:37:07 +00004952static INLINE
4953UWord mc_LOADV32 ( Addr a, Bool isBigEndian )
4954{
4955 UWord sm_off, vabits8;
4956 SecMap* sm;
4957
4958 PROF_EVENT(220, "mc_LOADV32");
4959
4960#ifndef PERF_FAST_LOADV
njn45e81252006-03-28 12:35:08 +00004961 return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00004962#else
bart5dd8e6a2008-03-22 08:04:29 +00004963 if (UNLIKELY( UNALIGNED_OR_HIGH(a,32) )) {
njn1d0825f2006-03-27 11:37:07 +00004964 PROF_EVENT(221, "mc_LOADV32-slow1");
njn45e81252006-03-28 12:35:08 +00004965 return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
sewardjc1a2cda2005-04-21 17:34:00 +00004966 }
4967
njna7c7ebd2006-03-28 12:51:02 +00004968 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00004969 sm_off = SM_OFF(a);
4970 vabits8 = sm->vabits8[sm_off];
4971
4972 // Handle common case quickly: a is suitably aligned, is mapped, and the
4973 // entire word32 it lives in is addressible.
4974 // Convert V bits from compact memory form to expanded register form.
4975 // For 64-bit platforms, set the high 32 bits of retval to 1 (undefined).
4976 // Almost certainly not necessary, but be paranoid.
bart5dd8e6a2008-03-22 08:04:29 +00004977 if (LIKELY(vabits8 == VA_BITS8_DEFINED)) {
njn1d0825f2006-03-27 11:37:07 +00004978 return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_DEFINED);
bart5dd8e6a2008-03-22 08:04:29 +00004979 } else if (LIKELY(vabits8 == VA_BITS8_UNDEFINED)) {
njn1d0825f2006-03-27 11:37:07 +00004980 return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_UNDEFINED);
4981 } else {
njndbf7ca72006-03-31 11:57:59 +00004982 /* Slow case: the 4 bytes are not all-defined or all-undefined. */
njn1d0825f2006-03-27 11:37:07 +00004983 PROF_EVENT(222, "mc_LOADV32-slow2");
njn45e81252006-03-28 12:35:08 +00004984 return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00004985 }
4986#endif
4987}
4988
4989VG_REGPARM(1) UWord MC_(helperc_LOADV32be) ( Addr a )
4990{
4991 return mc_LOADV32(a, True);
4992}
4993VG_REGPARM(1) UWord MC_(helperc_LOADV32le) ( Addr a )
4994{
4995 return mc_LOADV32(a, False);
4996}
sewardjc1a2cda2005-04-21 17:34:00 +00004997
sewardjc1a2cda2005-04-21 17:34:00 +00004998
njn1d0825f2006-03-27 11:37:07 +00004999static INLINE
njn4cf530b2006-04-06 13:33:48 +00005000void mc_STOREV32 ( Addr a, UWord vbits32, Bool isBigEndian )
njn1d0825f2006-03-27 11:37:07 +00005001{
5002 UWord sm_off, vabits8;
5003 SecMap* sm;
5004
5005 PROF_EVENT(230, "mc_STOREV32");
5006
5007#ifndef PERF_FAST_STOREV
njn4cf530b2006-04-06 13:33:48 +00005008 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00005009#else
bart5dd8e6a2008-03-22 08:04:29 +00005010 if (UNLIKELY( UNALIGNED_OR_HIGH(a,32) )) {
njn1d0825f2006-03-27 11:37:07 +00005011 PROF_EVENT(231, "mc_STOREV32-slow1");
njn4cf530b2006-04-06 13:33:48 +00005012 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00005013 return;
sewardjc1a2cda2005-04-21 17:34:00 +00005014 }
5015
njna7c7ebd2006-03-28 12:51:02 +00005016 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00005017 sm_off = SM_OFF(a);
5018 vabits8 = sm->vabits8[sm_off];
5019
5020//---------------------------------------------------------------------------
5021#if 1
5022 // Cleverness: sometimes we don't have to write the shadow memory at
5023 // all, if we can tell that what we want to write is the same as what is
5024 // already there.
njn4cf530b2006-04-06 13:33:48 +00005025 if (V_BITS32_DEFINED == vbits32) {
njndbf7ca72006-03-31 11:57:59 +00005026 if (vabits8 == (UInt)VA_BITS8_DEFINED) {
njn1d0825f2006-03-27 11:37:07 +00005027 return;
njndbf7ca72006-03-31 11:57:59 +00005028 } else if (!is_distinguished_sm(sm) && VA_BITS8_UNDEFINED == vabits8) {
5029 sm->vabits8[sm_off] = (UInt)VA_BITS8_DEFINED;
njn1d0825f2006-03-27 11:37:07 +00005030 } else {
njndbf7ca72006-03-31 11:57:59 +00005031 // not defined/undefined, or distinguished and changing state
njn1d0825f2006-03-27 11:37:07 +00005032 PROF_EVENT(232, "mc_STOREV32-slow2");
njn4cf530b2006-04-06 13:33:48 +00005033 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00005034 }
njn4cf530b2006-04-06 13:33:48 +00005035 } else if (V_BITS32_UNDEFINED == vbits32) {
njndbf7ca72006-03-31 11:57:59 +00005036 if (vabits8 == (UInt)VA_BITS8_UNDEFINED) {
njn1d0825f2006-03-27 11:37:07 +00005037 return;
njndbf7ca72006-03-31 11:57:59 +00005038 } else if (!is_distinguished_sm(sm) && VA_BITS8_DEFINED == vabits8) {
5039 sm->vabits8[sm_off] = (UInt)VA_BITS8_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00005040 } else {
njndbf7ca72006-03-31 11:57:59 +00005041 // not defined/undefined, or distinguished and changing state
njn1d0825f2006-03-27 11:37:07 +00005042 PROF_EVENT(233, "mc_STOREV32-slow3");
njn4cf530b2006-04-06 13:33:48 +00005043 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00005044 }
5045 } else {
5046 // Partially defined word
5047 PROF_EVENT(234, "mc_STOREV32-slow4");
njn4cf530b2006-04-06 13:33:48 +00005048 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00005049 }
5050//---------------------------------------------------------------------------
5051#else
bart5dd8e6a2008-03-22 08:04:29 +00005052 if (LIKELY( !is_distinguished_sm(sm) &&
njndbf7ca72006-03-31 11:57:59 +00005053 (VA_BITS8_DEFINED == vabits8 ||
5054 VA_BITS8_UNDEFINED == vabits8) ))
njn1d0825f2006-03-27 11:37:07 +00005055 {
5056 /* Handle common case quickly: a is suitably aligned, */
5057 /* is mapped, and is addressible. */
5058 // Convert full V-bits in register to compact 2-bit form.
njn4cf530b2006-04-06 13:33:48 +00005059 if (V_BITS32_DEFINED == vbits32) {
njndbf7ca72006-03-31 11:57:59 +00005060 sm->vabits8[sm_off] = VA_BITS8_DEFINED;
njn4cf530b2006-04-06 13:33:48 +00005061 } else if (V_BITS32_UNDEFINED == vbits32) {
njndbf7ca72006-03-31 11:57:59 +00005062 sm->vabits8[sm_off] = VA_BITS8_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00005063 } else {
5064 /* Slow but general case -- writing partially defined bytes. */
5065 PROF_EVENT(232, "mc_STOREV32-slow2");
njn4cf530b2006-04-06 13:33:48 +00005066 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00005067 }
5068 } else {
5069 /* Slow but general case. */
5070 PROF_EVENT(233, "mc_STOREV32-slow3");
njn4cf530b2006-04-06 13:33:48 +00005071 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00005072 }
5073#endif
5074//---------------------------------------------------------------------------
5075#endif
5076}
5077
njn4cf530b2006-04-06 13:33:48 +00005078VG_REGPARM(2) void MC_(helperc_STOREV32be) ( Addr a, UWord vbits32 )
njn1d0825f2006-03-27 11:37:07 +00005079{
njn4cf530b2006-04-06 13:33:48 +00005080 mc_STOREV32(a, vbits32, True);
njn1d0825f2006-03-27 11:37:07 +00005081}
njn4cf530b2006-04-06 13:33:48 +00005082VG_REGPARM(2) void MC_(helperc_STOREV32le) ( Addr a, UWord vbits32 )
njn1d0825f2006-03-27 11:37:07 +00005083{
njn4cf530b2006-04-06 13:33:48 +00005084 mc_STOREV32(a, vbits32, False);
njn1d0825f2006-03-27 11:37:07 +00005085}
njn25e49d8e72002-09-23 09:36:25 +00005086
njn25e49d8e72002-09-23 09:36:25 +00005087
sewardj95448072004-11-22 20:19:51 +00005088/* ------------------------ Size = 2 ------------------------ */
5089
njn1d0825f2006-03-27 11:37:07 +00005090static INLINE
5091UWord mc_LOADV16 ( Addr a, Bool isBigEndian )
5092{
5093 UWord sm_off, vabits8;
5094 SecMap* sm;
5095
5096 PROF_EVENT(240, "mc_LOADV16");
5097
5098#ifndef PERF_FAST_LOADV
njn45e81252006-03-28 12:35:08 +00005099 return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00005100#else
bart5dd8e6a2008-03-22 08:04:29 +00005101 if (UNLIKELY( UNALIGNED_OR_HIGH(a,16) )) {
njn1d0825f2006-03-27 11:37:07 +00005102 PROF_EVENT(241, "mc_LOADV16-slow1");
njn45e81252006-03-28 12:35:08 +00005103 return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
sewardjc1a2cda2005-04-21 17:34:00 +00005104 }
5105
njna7c7ebd2006-03-28 12:51:02 +00005106 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00005107 sm_off = SM_OFF(a);
5108 vabits8 = sm->vabits8[sm_off];
5109 // Handle common case quickly: a is suitably aligned, is mapped, and is
5110 // addressible.
5111 // Convert V bits from compact memory form to expanded register form
njndbf7ca72006-03-31 11:57:59 +00005112 if (vabits8 == VA_BITS8_DEFINED ) { return V_BITS16_DEFINED; }
5113 else if (vabits8 == VA_BITS8_UNDEFINED) { return V_BITS16_UNDEFINED; }
njn1d0825f2006-03-27 11:37:07 +00005114 else {
njndbf7ca72006-03-31 11:57:59 +00005115 // The 4 (yes, 4) bytes are not all-defined or all-undefined, check
njn1d0825f2006-03-27 11:37:07 +00005116 // the two sub-bytes.
5117 UChar vabits4 = extract_vabits4_from_vabits8(a, vabits8);
njndbf7ca72006-03-31 11:57:59 +00005118 if (vabits4 == VA_BITS4_DEFINED ) { return V_BITS16_DEFINED; }
5119 else if (vabits4 == VA_BITS4_UNDEFINED) { return V_BITS16_UNDEFINED; }
njn1d0825f2006-03-27 11:37:07 +00005120 else {
njndbf7ca72006-03-31 11:57:59 +00005121 /* Slow case: the two bytes are not all-defined or all-undefined. */
njn1d0825f2006-03-27 11:37:07 +00005122 PROF_EVENT(242, "mc_LOADV16-slow2");
njn45e81252006-03-28 12:35:08 +00005123 return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00005124 }
5125 }
5126#endif
5127}
5128
5129VG_REGPARM(1) UWord MC_(helperc_LOADV16be) ( Addr a )
5130{
5131 return mc_LOADV16(a, True);
5132}
5133VG_REGPARM(1) UWord MC_(helperc_LOADV16le) ( Addr a )
5134{
5135 return mc_LOADV16(a, False);
5136}
sewardjc1a2cda2005-04-21 17:34:00 +00005137
sewardjc1a2cda2005-04-21 17:34:00 +00005138
njn1d0825f2006-03-27 11:37:07 +00005139static INLINE
njn4cf530b2006-04-06 13:33:48 +00005140void mc_STOREV16 ( Addr a, UWord vbits16, Bool isBigEndian )
njn1d0825f2006-03-27 11:37:07 +00005141{
5142 UWord sm_off, vabits8;
5143 SecMap* sm;
5144
5145 PROF_EVENT(250, "mc_STOREV16");
5146
5147#ifndef PERF_FAST_STOREV
njn4cf530b2006-04-06 13:33:48 +00005148 mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00005149#else
bart5dd8e6a2008-03-22 08:04:29 +00005150 if (UNLIKELY( UNALIGNED_OR_HIGH(a,16) )) {
njn1d0825f2006-03-27 11:37:07 +00005151 PROF_EVENT(251, "mc_STOREV16-slow1");
njn4cf530b2006-04-06 13:33:48 +00005152 mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00005153 return;
sewardjc1a2cda2005-04-21 17:34:00 +00005154 }
5155
njna7c7ebd2006-03-28 12:51:02 +00005156 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00005157 sm_off = SM_OFF(a);
5158 vabits8 = sm->vabits8[sm_off];
bart5dd8e6a2008-03-22 08:04:29 +00005159 if (LIKELY( !is_distinguished_sm(sm) &&
njndbf7ca72006-03-31 11:57:59 +00005160 (VA_BITS8_DEFINED == vabits8 ||
5161 VA_BITS8_UNDEFINED == vabits8) ))
njn1d0825f2006-03-27 11:37:07 +00005162 {
5163 /* Handle common case quickly: a is suitably aligned, */
5164 /* is mapped, and is addressible. */
5165 // Convert full V-bits in register to compact 2-bit form.
njn4cf530b2006-04-06 13:33:48 +00005166 if (V_BITS16_DEFINED == vbits16) {
njndbf7ca72006-03-31 11:57:59 +00005167 insert_vabits4_into_vabits8( a, VA_BITS4_DEFINED ,
njn1d0825f2006-03-27 11:37:07 +00005168 &(sm->vabits8[sm_off]) );
njn4cf530b2006-04-06 13:33:48 +00005169 } else if (V_BITS16_UNDEFINED == vbits16) {
njndbf7ca72006-03-31 11:57:59 +00005170 insert_vabits4_into_vabits8( a, VA_BITS4_UNDEFINED,
njn1d0825f2006-03-27 11:37:07 +00005171 &(sm->vabits8[sm_off]) );
5172 } else {
5173 /* Slow but general case -- writing partially defined bytes. */
5174 PROF_EVENT(252, "mc_STOREV16-slow2");
njn4cf530b2006-04-06 13:33:48 +00005175 mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00005176 }
5177 } else {
5178 /* Slow but general case. */
5179 PROF_EVENT(253, "mc_STOREV16-slow3");
njn4cf530b2006-04-06 13:33:48 +00005180 mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00005181 }
5182#endif
5183}
njn25e49d8e72002-09-23 09:36:25 +00005184
njn4cf530b2006-04-06 13:33:48 +00005185VG_REGPARM(2) void MC_(helperc_STOREV16be) ( Addr a, UWord vbits16 )
njn1d0825f2006-03-27 11:37:07 +00005186{
njn4cf530b2006-04-06 13:33:48 +00005187 mc_STOREV16(a, vbits16, True);
njn1d0825f2006-03-27 11:37:07 +00005188}
njn4cf530b2006-04-06 13:33:48 +00005189VG_REGPARM(2) void MC_(helperc_STOREV16le) ( Addr a, UWord vbits16 )
njn1d0825f2006-03-27 11:37:07 +00005190{
njn4cf530b2006-04-06 13:33:48 +00005191 mc_STOREV16(a, vbits16, False);
njn1d0825f2006-03-27 11:37:07 +00005192}
sewardj5d28efc2005-04-21 22:16:29 +00005193
njn25e49d8e72002-09-23 09:36:25 +00005194
sewardj95448072004-11-22 20:19:51 +00005195/* ------------------------ Size = 1 ------------------------ */
sewardj8cf88b72005-07-08 01:29:33 +00005196/* Note: endianness is irrelevant for size == 1 */
sewardj95448072004-11-22 20:19:51 +00005197
njnaf839f52005-06-23 03:27:57 +00005198VG_REGPARM(1)
njn1d0825f2006-03-27 11:37:07 +00005199UWord MC_(helperc_LOADV8) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +00005200{
njn1d0825f2006-03-27 11:37:07 +00005201 UWord sm_off, vabits8;
sewardjae986ca2005-10-12 12:53:20 +00005202 SecMap* sm;
5203
njn1d0825f2006-03-27 11:37:07 +00005204 PROF_EVENT(260, "mc_LOADV8");
sewardjc1a2cda2005-04-21 17:34:00 +00005205
njn1d0825f2006-03-27 11:37:07 +00005206#ifndef PERF_FAST_LOADV
njn45e81252006-03-28 12:35:08 +00005207 return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
njn1d0825f2006-03-27 11:37:07 +00005208#else
bart5dd8e6a2008-03-22 08:04:29 +00005209 if (UNLIKELY( UNALIGNED_OR_HIGH(a,8) )) {
njn1d0825f2006-03-27 11:37:07 +00005210 PROF_EVENT(261, "mc_LOADV8-slow1");
njn45e81252006-03-28 12:35:08 +00005211 return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00005212 }
5213
njna7c7ebd2006-03-28 12:51:02 +00005214 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00005215 sm_off = SM_OFF(a);
5216 vabits8 = sm->vabits8[sm_off];
5217 // Convert V bits from compact memory form to expanded register form
5218 // Handle common case quickly: a is mapped, and the entire
5219 // word32 it lives in is addressible.
njndbf7ca72006-03-31 11:57:59 +00005220 if (vabits8 == VA_BITS8_DEFINED ) { return V_BITS8_DEFINED; }
5221 else if (vabits8 == VA_BITS8_UNDEFINED) { return V_BITS8_UNDEFINED; }
njn1d0825f2006-03-27 11:37:07 +00005222 else {
njndbf7ca72006-03-31 11:57:59 +00005223 // The 4 (yes, 4) bytes are not all-defined or all-undefined, check
njn1d0825f2006-03-27 11:37:07 +00005224 // the single byte.
5225 UChar vabits2 = extract_vabits2_from_vabits8(a, vabits8);
njndbf7ca72006-03-31 11:57:59 +00005226 if (vabits2 == VA_BITS2_DEFINED ) { return V_BITS8_DEFINED; }
5227 else if (vabits2 == VA_BITS2_UNDEFINED) { return V_BITS8_UNDEFINED; }
njn1d0825f2006-03-27 11:37:07 +00005228 else {
njndbf7ca72006-03-31 11:57:59 +00005229 /* Slow case: the byte is not all-defined or all-undefined. */
njn1d0825f2006-03-27 11:37:07 +00005230 PROF_EVENT(262, "mc_LOADV8-slow2");
njn45e81252006-03-28 12:35:08 +00005231 return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
njn1d0825f2006-03-27 11:37:07 +00005232 }
sewardjc1a2cda2005-04-21 17:34:00 +00005233 }
njn1d0825f2006-03-27 11:37:07 +00005234#endif
njn25e49d8e72002-09-23 09:36:25 +00005235}
5236
sewardjc1a2cda2005-04-21 17:34:00 +00005237
njnaf839f52005-06-23 03:27:57 +00005238VG_REGPARM(2)
njn4cf530b2006-04-06 13:33:48 +00005239void MC_(helperc_STOREV8) ( Addr a, UWord vbits8 )
njn25e49d8e72002-09-23 09:36:25 +00005240{
njn1d0825f2006-03-27 11:37:07 +00005241 UWord sm_off, vabits8;
sewardjae986ca2005-10-12 12:53:20 +00005242 SecMap* sm;
5243
njn1d0825f2006-03-27 11:37:07 +00005244 PROF_EVENT(270, "mc_STOREV8");
sewardjc1a2cda2005-04-21 17:34:00 +00005245
njn1d0825f2006-03-27 11:37:07 +00005246#ifndef PERF_FAST_STOREV
njn4cf530b2006-04-06 13:33:48 +00005247 mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
njn1d0825f2006-03-27 11:37:07 +00005248#else
bart5dd8e6a2008-03-22 08:04:29 +00005249 if (UNLIKELY( UNALIGNED_OR_HIGH(a,8) )) {
njn1d0825f2006-03-27 11:37:07 +00005250 PROF_EVENT(271, "mc_STOREV8-slow1");
njn4cf530b2006-04-06 13:33:48 +00005251 mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00005252 return;
5253 }
5254
njna7c7ebd2006-03-28 12:51:02 +00005255 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00005256 sm_off = SM_OFF(a);
5257 vabits8 = sm->vabits8[sm_off];
bart5dd8e6a2008-03-22 08:04:29 +00005258 if (LIKELY
njn1d0825f2006-03-27 11:37:07 +00005259 ( !is_distinguished_sm(sm) &&
njndbf7ca72006-03-31 11:57:59 +00005260 ( (VA_BITS8_DEFINED == vabits8 || VA_BITS8_UNDEFINED == vabits8)
njn1d0825f2006-03-27 11:37:07 +00005261 || (VA_BITS2_NOACCESS != extract_vabits2_from_vabits8(a, vabits8))
5262 )
5263 )
5264 )
5265 {
sewardjc1a2cda2005-04-21 17:34:00 +00005266 /* Handle common case quickly: a is mapped, the entire word32 it
5267 lives in is addressible. */
njn1d0825f2006-03-27 11:37:07 +00005268 // Convert full V-bits in register to compact 2-bit form.
njn4cf530b2006-04-06 13:33:48 +00005269 if (V_BITS8_DEFINED == vbits8) {
njndbf7ca72006-03-31 11:57:59 +00005270 insert_vabits2_into_vabits8( a, VA_BITS2_DEFINED,
njn1d0825f2006-03-27 11:37:07 +00005271 &(sm->vabits8[sm_off]) );
njn4cf530b2006-04-06 13:33:48 +00005272 } else if (V_BITS8_UNDEFINED == vbits8) {
njndbf7ca72006-03-31 11:57:59 +00005273 insert_vabits2_into_vabits8( a, VA_BITS2_UNDEFINED,
njn1d0825f2006-03-27 11:37:07 +00005274 &(sm->vabits8[sm_off]) );
5275 } else {
5276 /* Slow but general case -- writing partially defined bytes. */
5277 PROF_EVENT(272, "mc_STOREV8-slow2");
njn4cf530b2006-04-06 13:33:48 +00005278 mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
njn1d0825f2006-03-27 11:37:07 +00005279 }
sewardjc1a2cda2005-04-21 17:34:00 +00005280 } else {
njn1d0825f2006-03-27 11:37:07 +00005281 /* Slow but general case. */
5282 PROF_EVENT(273, "mc_STOREV8-slow3");
njn4cf530b2006-04-06 13:33:48 +00005283 mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00005284 }
njn1d0825f2006-03-27 11:37:07 +00005285#endif
njn25e49d8e72002-09-23 09:36:25 +00005286}
5287
5288
sewardjc859fbf2005-04-22 21:10:28 +00005289/*------------------------------------------------------------*/
5290/*--- Functions called directly from generated code: ---*/
5291/*--- Value-check failure handlers. ---*/
5292/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00005293
sewardj7cf4e6b2008-05-01 20:24:26 +00005294/* Call these ones when an origin is available ... */
5295VG_REGPARM(1)
5296void MC_(helperc_value_check0_fail_w_o) ( UWord origin ) {
5297 mc_record_cond_error ( VG_(get_running_tid)(), (UInt)origin );
njn25e49d8e72002-09-23 09:36:25 +00005298}
5299
sewardj7cf4e6b2008-05-01 20:24:26 +00005300VG_REGPARM(1)
5301void MC_(helperc_value_check1_fail_w_o) ( UWord origin ) {
5302 mc_record_value_error ( VG_(get_running_tid)(), 1, (UInt)origin );
njn25e49d8e72002-09-23 09:36:25 +00005303}
5304
sewardj7cf4e6b2008-05-01 20:24:26 +00005305VG_REGPARM(1)
5306void MC_(helperc_value_check4_fail_w_o) ( UWord origin ) {
5307 mc_record_value_error ( VG_(get_running_tid)(), 4, (UInt)origin );
njn25e49d8e72002-09-23 09:36:25 +00005308}
5309
sewardj7cf4e6b2008-05-01 20:24:26 +00005310VG_REGPARM(1)
5311void MC_(helperc_value_check8_fail_w_o) ( UWord origin ) {
5312 mc_record_value_error ( VG_(get_running_tid)(), 8, (UInt)origin );
sewardj11bcc4e2005-04-23 22:38:38 +00005313}
5314
sewardj7cf4e6b2008-05-01 20:24:26 +00005315VG_REGPARM(2)
5316void MC_(helperc_value_checkN_fail_w_o) ( HWord sz, UWord origin ) {
5317 mc_record_value_error ( VG_(get_running_tid)(), (Int)sz, (UInt)origin );
5318}
5319
5320/* ... and these when an origin isn't available. */
5321
5322VG_REGPARM(0)
5323void MC_(helperc_value_check0_fail_no_o) ( void ) {
5324 mc_record_cond_error ( VG_(get_running_tid)(), 0/*origin*/ );
5325}
5326
5327VG_REGPARM(0)
5328void MC_(helperc_value_check1_fail_no_o) ( void ) {
5329 mc_record_value_error ( VG_(get_running_tid)(), 1, 0/*origin*/ );
5330}
5331
5332VG_REGPARM(0)
5333void MC_(helperc_value_check4_fail_no_o) ( void ) {
5334 mc_record_value_error ( VG_(get_running_tid)(), 4, 0/*origin*/ );
5335}
5336
5337VG_REGPARM(0)
5338void MC_(helperc_value_check8_fail_no_o) ( void ) {
5339 mc_record_value_error ( VG_(get_running_tid)(), 8, 0/*origin*/ );
5340}
5341
5342VG_REGPARM(1)
5343void MC_(helperc_value_checkN_fail_no_o) ( HWord sz ) {
5344 mc_record_value_error ( VG_(get_running_tid)(), (Int)sz, 0/*origin*/ );
sewardj95448072004-11-22 20:19:51 +00005345}
5346
njn25e49d8e72002-09-23 09:36:25 +00005347
sewardjc2c12c22006-03-08 13:20:09 +00005348/*------------------------------------------------------------*/
5349/*--- Metadata get/set functions, for client requests. ---*/
5350/*------------------------------------------------------------*/
5351
njn1d0825f2006-03-27 11:37:07 +00005352// Nb: this expands the V+A bits out into register-form V bits, even though
5353// they're in memory. This is for backward compatibility, and because it's
5354// probably what the user wants.
5355
5356/* Copy Vbits from/to address 'a'. Returns: 1 == OK, 2 == alignment
sewardjc2c12c22006-03-08 13:20:09 +00005357 error [no longer used], 3 == addressing error. */
njn718d3b12006-12-16 00:54:12 +00005358/* Nb: We used to issue various definedness/addressability errors from here,
5359 but we took them out because they ranged from not-very-helpful to
5360 downright annoying, and they complicated the error data structures. */
sewardjc2c12c22006-03-08 13:20:09 +00005361static Int mc_get_or_set_vbits_for_client (
5362 ThreadId tid,
njn1d0825f2006-03-27 11:37:07 +00005363 Addr a,
5364 Addr vbits,
5365 SizeT szB,
sewardjc2c12c22006-03-08 13:20:09 +00005366 Bool setting /* True <=> set vbits, False <=> get vbits */
5367)
5368{
sewardjc2c12c22006-03-08 13:20:09 +00005369 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00005370 Bool ok;
5371 UChar vbits8;
sewardjc2c12c22006-03-08 13:20:09 +00005372
njn1d0825f2006-03-27 11:37:07 +00005373 /* Check that arrays are addressible before doing any getting/setting. */
5374 for (i = 0; i < szB; i++) {
njn718d3b12006-12-16 00:54:12 +00005375 if (VA_BITS2_NOACCESS == get_vabits2(a + i) ||
5376 VA_BITS2_NOACCESS == get_vabits2(vbits + i)) {
njn1d0825f2006-03-27 11:37:07 +00005377 return 3;
sewardjc2c12c22006-03-08 13:20:09 +00005378 }
5379 }
njn1d0825f2006-03-27 11:37:07 +00005380
sewardjc2c12c22006-03-08 13:20:09 +00005381 /* Do the copy */
5382 if (setting) {
njn1d0825f2006-03-27 11:37:07 +00005383 /* setting */
5384 for (i = 0; i < szB; i++) {
5385 ok = set_vbits8(a + i, ((UChar*)vbits)[i]);
5386 tl_assert(ok);
sewardjc2c12c22006-03-08 13:20:09 +00005387 }
5388 } else {
5389 /* getting */
njn1d0825f2006-03-27 11:37:07 +00005390 for (i = 0; i < szB; i++) {
5391 ok = get_vbits8(a + i, &vbits8);
5392 tl_assert(ok);
njn1d0825f2006-03-27 11:37:07 +00005393 ((UChar*)vbits)[i] = vbits8;
sewardjc2c12c22006-03-08 13:20:09 +00005394 }
5395 // The bytes in vbits[] have now been set, so mark them as such.
njndbf7ca72006-03-31 11:57:59 +00005396 MC_(make_mem_defined)(vbits, szB);
njn1d0825f2006-03-27 11:37:07 +00005397 }
sewardjc2c12c22006-03-08 13:20:09 +00005398
5399 return 1;
5400}
sewardj05fe85e2005-04-27 22:46:36 +00005401
5402
5403/*------------------------------------------------------------*/
5404/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
5405/*------------------------------------------------------------*/
5406
5407/* For the memory leak detector, say whether an entire 64k chunk of
5408 address space is possibly in use, or not. If in doubt return
5409 True.
5410*/
5411static
5412Bool mc_is_within_valid_secondary ( Addr a )
5413{
5414 SecMap* sm = maybe_get_secmap_for ( a );
sewardj05a46732006-10-17 01:28:10 +00005415 if (sm == NULL || sm == &sm_distinguished[SM_DIST_NOACCESS]
5416 || in_ignored_range(a)) {
sewardj05fe85e2005-04-27 22:46:36 +00005417 /* Definitely not in use. */
5418 return False;
5419 } else {
5420 return True;
5421 }
5422}
5423
5424
5425/* For the memory leak detector, say whether or not a given word
5426 address is to be regarded as valid. */
5427static
5428Bool mc_is_valid_aligned_word ( Addr a )
5429{
5430 tl_assert(sizeof(UWord) == 4 || sizeof(UWord) == 8);
5431 if (sizeof(UWord) == 4) {
5432 tl_assert(VG_IS_4_ALIGNED(a));
5433 } else {
5434 tl_assert(VG_IS_8_ALIGNED(a));
5435 }
sewardj7cf4e6b2008-05-01 20:24:26 +00005436 if (is_mem_defined( a, sizeof(UWord), NULL, NULL) == MC_Ok
sewardj05a46732006-10-17 01:28:10 +00005437 && !in_ignored_range(a)) {
sewardj05fe85e2005-04-27 22:46:36 +00005438 return True;
5439 } else {
5440 return False;
5441 }
5442}
sewardja4495682002-10-21 07:29:59 +00005443
5444
nethercote996901a2004-08-03 13:29:09 +00005445/* Leak detector for this tool. We don't actually do anything, merely
sewardja4495682002-10-21 07:29:59 +00005446 run the generic leak detector with suitable parameters for this
nethercote996901a2004-08-03 13:29:09 +00005447 tool. */
njnb8dca862005-03-14 02:42:44 +00005448static void mc_detect_memory_leaks ( ThreadId tid, LeakCheckMode mode )
njn25e49d8e72002-09-23 09:36:25 +00005449{
njn1d0825f2006-03-27 11:37:07 +00005450 MC_(do_detect_memory_leaks) (
sewardj05fe85e2005-04-27 22:46:36 +00005451 tid,
5452 mode,
5453 mc_is_within_valid_secondary,
5454 mc_is_valid_aligned_word
5455 );
njn25e49d8e72002-09-23 09:36:25 +00005456}
5457
5458
sewardjc859fbf2005-04-22 21:10:28 +00005459/*------------------------------------------------------------*/
5460/*--- Initialisation ---*/
5461/*------------------------------------------------------------*/
5462
5463static void init_shadow_memory ( void )
5464{
5465 Int i;
5466 SecMap* sm;
5467
njn1d0825f2006-03-27 11:37:07 +00005468 tl_assert(V_BIT_UNDEFINED == 1);
5469 tl_assert(V_BIT_DEFINED == 0);
5470 tl_assert(V_BITS8_UNDEFINED == 0xFF);
5471 tl_assert(V_BITS8_DEFINED == 0);
5472
sewardjc859fbf2005-04-22 21:10:28 +00005473 /* Build the 3 distinguished secondaries */
sewardjc859fbf2005-04-22 21:10:28 +00005474 sm = &sm_distinguished[SM_DIST_NOACCESS];
njn1d0825f2006-03-27 11:37:07 +00005475 for (i = 0; i < SM_CHUNKS; i++) sm->vabits8[i] = VA_BITS8_NOACCESS;
sewardjc859fbf2005-04-22 21:10:28 +00005476
njndbf7ca72006-03-31 11:57:59 +00005477 sm = &sm_distinguished[SM_DIST_UNDEFINED];
5478 for (i = 0; i < SM_CHUNKS; i++) sm->vabits8[i] = VA_BITS8_UNDEFINED;
sewardjc859fbf2005-04-22 21:10:28 +00005479
njndbf7ca72006-03-31 11:57:59 +00005480 sm = &sm_distinguished[SM_DIST_DEFINED];
5481 for (i = 0; i < SM_CHUNKS; i++) sm->vabits8[i] = VA_BITS8_DEFINED;
sewardjc859fbf2005-04-22 21:10:28 +00005482
5483 /* Set up the primary map. */
5484 /* These entries gradually get overwritten as the used address
5485 space expands. */
5486 for (i = 0; i < N_PRIMARY_MAP; i++)
5487 primary_map[i] = &sm_distinguished[SM_DIST_NOACCESS];
5488
sewardj05a46732006-10-17 01:28:10 +00005489 /* Auxiliary primary maps */
5490 init_auxmap_L1_L2();
5491
sewardjc859fbf2005-04-22 21:10:28 +00005492 /* auxmap_size = auxmap_used = 0;
5493 no ... these are statically initialised */
njn1d0825f2006-03-27 11:37:07 +00005494
5495 /* Secondary V bit table */
5496 secVBitTable = createSecVBitTable();
sewardjc859fbf2005-04-22 21:10:28 +00005497}
5498
5499
5500/*------------------------------------------------------------*/
5501/*--- Sanity check machinery (permanently engaged) ---*/
5502/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00005503
njn51d827b2005-05-09 01:02:08 +00005504static Bool mc_cheap_sanity_check ( void )
njn25e49d8e72002-09-23 09:36:25 +00005505{
sewardj23eb2fd2005-04-22 16:29:19 +00005506 n_sanity_cheap++;
sewardjc1a2cda2005-04-21 17:34:00 +00005507 PROF_EVENT(490, "cheap_sanity_check");
sewardj7cf4e6b2008-05-01 20:24:26 +00005508 /* Check for sane operating level */
5509 if (MC_(clo_mc_level) < 1 || MC_(clo_mc_level) > 3)
5510 return False;
5511 /* nothing else useful we can rapidly check */
jseward9800fd32004-01-04 23:08:04 +00005512 return True;
njn25e49d8e72002-09-23 09:36:25 +00005513}
5514
njn51d827b2005-05-09 01:02:08 +00005515static Bool mc_expensive_sanity_check ( void )
njn25e49d8e72002-09-23 09:36:25 +00005516{
sewardj05a46732006-10-17 01:28:10 +00005517 Int i;
5518 Word n_secmaps_found;
sewardj45d94cc2005-04-20 14:44:11 +00005519 SecMap* sm;
sewardj05a46732006-10-17 01:28:10 +00005520 HChar* errmsg;
sewardj23eb2fd2005-04-22 16:29:19 +00005521 Bool bad = False;
njn25e49d8e72002-09-23 09:36:25 +00005522
sewardj05a46732006-10-17 01:28:10 +00005523 if (0) VG_(printf)("expensive sanity check\n");
5524 if (0) return True;
5525
sewardj23eb2fd2005-04-22 16:29:19 +00005526 n_sanity_expensive++;
sewardjc1a2cda2005-04-21 17:34:00 +00005527 PROF_EVENT(491, "expensive_sanity_check");
5528
sewardj7cf4e6b2008-05-01 20:24:26 +00005529 /* Check for sane operating level */
5530 if (MC_(clo_mc_level) < 1 || MC_(clo_mc_level) > 3)
5531 return False;
5532
njn1d0825f2006-03-27 11:37:07 +00005533 /* Check that the 3 distinguished SMs are still as they should be. */
njn25e49d8e72002-09-23 09:36:25 +00005534
njndbf7ca72006-03-31 11:57:59 +00005535 /* Check noaccess DSM. */
sewardj45d94cc2005-04-20 14:44:11 +00005536 sm = &sm_distinguished[SM_DIST_NOACCESS];
njn1d0825f2006-03-27 11:37:07 +00005537 for (i = 0; i < SM_CHUNKS; i++)
5538 if (sm->vabits8[i] != VA_BITS8_NOACCESS)
sewardj23eb2fd2005-04-22 16:29:19 +00005539 bad = True;
njn25e49d8e72002-09-23 09:36:25 +00005540
njndbf7ca72006-03-31 11:57:59 +00005541 /* Check undefined DSM. */
5542 sm = &sm_distinguished[SM_DIST_UNDEFINED];
njn1d0825f2006-03-27 11:37:07 +00005543 for (i = 0; i < SM_CHUNKS; i++)
njndbf7ca72006-03-31 11:57:59 +00005544 if (sm->vabits8[i] != VA_BITS8_UNDEFINED)
sewardj23eb2fd2005-04-22 16:29:19 +00005545 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00005546
njndbf7ca72006-03-31 11:57:59 +00005547 /* Check defined DSM. */
5548 sm = &sm_distinguished[SM_DIST_DEFINED];
njn1d0825f2006-03-27 11:37:07 +00005549 for (i = 0; i < SM_CHUNKS; i++)
njndbf7ca72006-03-31 11:57:59 +00005550 if (sm->vabits8[i] != VA_BITS8_DEFINED)
sewardj23eb2fd2005-04-22 16:29:19 +00005551 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00005552
sewardj23eb2fd2005-04-22 16:29:19 +00005553 if (bad) {
5554 VG_(printf)("memcheck expensive sanity: "
5555 "distinguished_secondaries have changed\n");
5556 return False;
5557 }
5558
njn1d0825f2006-03-27 11:37:07 +00005559 /* If we're not checking for undefined value errors, the secondary V bit
5560 * table should be empty. */
sewardj7cf4e6b2008-05-01 20:24:26 +00005561 if (MC_(clo_mc_level) == 1) {
njne2a9ad32007-09-17 05:30:48 +00005562 if (0 != VG_(OSetGen_Size)(secVBitTable))
njn1d0825f2006-03-27 11:37:07 +00005563 return False;
5564 }
5565
sewardj05a46732006-10-17 01:28:10 +00005566 /* check the auxiliary maps, very thoroughly */
5567 n_secmaps_found = 0;
5568 errmsg = check_auxmap_L1_L2_sanity( &n_secmaps_found );
5569 if (errmsg) {
5570 VG_(printf)("memcheck expensive sanity, auxmaps:\n\t%s", errmsg);
sewardj23eb2fd2005-04-22 16:29:19 +00005571 return False;
5572 }
5573
sewardj05a46732006-10-17 01:28:10 +00005574 /* n_secmaps_found is now the number referred to by the auxiliary
5575 primary map. Now add on the ones referred to by the main
5576 primary map. */
sewardj23eb2fd2005-04-22 16:29:19 +00005577 for (i = 0; i < N_PRIMARY_MAP; i++) {
sewardj05a46732006-10-17 01:28:10 +00005578 if (primary_map[i] == NULL) {
sewardj23eb2fd2005-04-22 16:29:19 +00005579 bad = True;
5580 } else {
sewardj05a46732006-10-17 01:28:10 +00005581 if (!is_distinguished_sm(primary_map[i]))
sewardj23eb2fd2005-04-22 16:29:19 +00005582 n_secmaps_found++;
5583 }
5584 }
5585
sewardj05a46732006-10-17 01:28:10 +00005586 /* check that the number of secmaps issued matches the number that
5587 are reachable (iow, no secmap leaks) */
njn1d0825f2006-03-27 11:37:07 +00005588 if (n_secmaps_found != (n_issued_SMs - n_deissued_SMs))
sewardj23eb2fd2005-04-22 16:29:19 +00005589 bad = True;
5590
5591 if (bad) {
5592 VG_(printf)("memcheck expensive sanity: "
5593 "apparent secmap leakage\n");
5594 return False;
5595 }
5596
sewardj23eb2fd2005-04-22 16:29:19 +00005597 if (bad) {
5598 VG_(printf)("memcheck expensive sanity: "
5599 "auxmap covers wrong address space\n");
5600 return False;
5601 }
5602
5603 /* there is only one pointer to each secmap (expensive) */
njn25e49d8e72002-09-23 09:36:25 +00005604
5605 return True;
5606}
sewardj45d94cc2005-04-20 14:44:11 +00005607
njn25e49d8e72002-09-23 09:36:25 +00005608/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00005609/*--- Command line args ---*/
njn25e49d8e72002-09-23 09:36:25 +00005610/*------------------------------------------------------------*/
5611
njn1d0825f2006-03-27 11:37:07 +00005612Bool MC_(clo_partial_loads_ok) = False;
sewardjfa4ca3b2007-11-30 17:19:36 +00005613Long MC_(clo_freelist_vol) = 10*1000*1000LL;
njn1d0825f2006-03-27 11:37:07 +00005614LeakCheckMode MC_(clo_leak_check) = LC_Summary;
5615VgRes MC_(clo_leak_resolution) = Vg_LowRes;
5616Bool MC_(clo_show_reachable) = False;
5617Bool MC_(clo_workaround_gcc296_bugs) = False;
sewardjeb0fa932007-11-30 21:41:40 +00005618Int MC_(clo_malloc_fill) = -1;
5619Int MC_(clo_free_fill) = -1;
sewardj7cf4e6b2008-05-01 20:24:26 +00005620Int MC_(clo_mc_level) = 2;
njn1d0825f2006-03-27 11:37:07 +00005621
5622static Bool mc_process_cmd_line_options(Char* arg)
njn25e49d8e72002-09-23 09:36:25 +00005623{
sewardj7cf4e6b2008-05-01 20:24:26 +00005624 tl_assert( MC_(clo_mc_level) >= 1 && MC_(clo_mc_level) <= 3 );
5625
5626 /* Set MC_(clo_mc_level):
5627 1 = A bit tracking only
5628 2 = A and V bit tracking, but no V bit origins
5629 3 = A and V bit tracking, and V bit origins
5630
5631 Do this by inspecting --undef-value-errors= and
5632 --track-origins=. Reject the case --undef-value-errors=no
5633 --track-origins=yes as meaningless.
5634 */
5635 if (0 == VG_(strcmp)(arg, "--undef-value-errors=no")) {
5636 if (MC_(clo_mc_level) == 3)
5637 goto mc_level_error;
5638 MC_(clo_mc_level) = 1;
5639 return True;
5640 }
5641 if (0 == VG_(strcmp)(arg, "--undef-value-errors=yes")) {
5642 if (MC_(clo_mc_level) == 1)
5643 MC_(clo_mc_level) = 2;
5644 return True;
5645 }
5646 if (0 == VG_(strcmp)(arg, "--track-origins=no")) {
5647 if (MC_(clo_mc_level) == 3)
5648 MC_(clo_mc_level) = 2;
5649 return True;
5650 }
5651 if (0 == VG_(strcmp)(arg, "--track-origins=yes")) {
5652 if (MC_(clo_mc_level) == 1)
5653 goto mc_level_error;
5654 MC_(clo_mc_level) = 3;
5655 return True;
5656 }
5657
njn1d0825f2006-03-27 11:37:07 +00005658 VG_BOOL_CLO(arg, "--partial-loads-ok", MC_(clo_partial_loads_ok))
5659 else VG_BOOL_CLO(arg, "--show-reachable", MC_(clo_show_reachable))
5660 else VG_BOOL_CLO(arg, "--workaround-gcc296-bugs",MC_(clo_workaround_gcc296_bugs))
5661
sewardjfa4ca3b2007-11-30 17:19:36 +00005662 else VG_BNUM_CLO(arg, "--freelist-vol", MC_(clo_freelist_vol),
5663 0, 10*1000*1000*1000LL)
njn1d0825f2006-03-27 11:37:07 +00005664
5665 else if (VG_CLO_STREQ(arg, "--leak-check=no"))
5666 MC_(clo_leak_check) = LC_Off;
5667 else if (VG_CLO_STREQ(arg, "--leak-check=summary"))
5668 MC_(clo_leak_check) = LC_Summary;
5669 else if (VG_CLO_STREQ(arg, "--leak-check=yes") ||
5670 VG_CLO_STREQ(arg, "--leak-check=full"))
5671 MC_(clo_leak_check) = LC_Full;
5672
5673 else if (VG_CLO_STREQ(arg, "--leak-resolution=low"))
5674 MC_(clo_leak_resolution) = Vg_LowRes;
5675 else if (VG_CLO_STREQ(arg, "--leak-resolution=med"))
5676 MC_(clo_leak_resolution) = Vg_MedRes;
5677 else if (VG_CLO_STREQ(arg, "--leak-resolution=high"))
5678 MC_(clo_leak_resolution) = Vg_HighRes;
5679
sewardj05a46732006-10-17 01:28:10 +00005680 else if (VG_CLO_STREQN(16,arg,"--ignore-ranges=")) {
5681 Int i;
5682 UChar* txt = (UChar*)(arg+16);
5683 Bool ok = parse_ignore_ranges(txt);
5684 if (!ok)
5685 return False;
5686 tl_assert(ignoreRanges.used >= 0);
5687 tl_assert(ignoreRanges.used < M_IGNORE_RANGES);
5688 for (i = 0; i < ignoreRanges.used; i++) {
5689 Addr s = ignoreRanges.start[i];
5690 Addr e = ignoreRanges.end[i];
5691 Addr limit = 0x4000000; /* 64M - entirely arbitrary limit */
5692 if (e <= s) {
5693 VG_(message)(Vg_DebugMsg,
5694 "ERROR: --ignore-ranges: end <= start in range:");
5695 VG_(message)(Vg_DebugMsg,
5696 " 0x%lx-0x%lx", s, e);
5697 return False;
5698 }
5699 if (e - s > limit) {
5700 VG_(message)(Vg_DebugMsg,
5701 "ERROR: --ignore-ranges: suspiciously large range:");
5702 VG_(message)(Vg_DebugMsg,
5703 " 0x%lx-0x%lx (size %ld)", s, e, (UWord)(e-s));
5704 return False;
5705 }
5706 }
5707 }
5708
sewardjeb0fa932007-11-30 21:41:40 +00005709 else VG_BHEX_CLO(arg, "--malloc-fill", MC_(clo_malloc_fill), 0x00, 0xFF)
5710 else VG_BHEX_CLO(arg, "--free-fill", MC_(clo_free_fill), 0x00, 0xFF)
5711
njn1d0825f2006-03-27 11:37:07 +00005712 else
5713 return VG_(replacement_malloc_process_cmd_line_option)(arg);
5714
5715 return True;
sewardj7cf4e6b2008-05-01 20:24:26 +00005716 /*NOTREACHED*/
5717
5718 mc_level_error:
5719 VG_(message)(Vg_DebugMsg, "ERROR: --track-origins=yes has no effect "
5720 "when --undef-value-errors=no");
5721 return False;
njn25e49d8e72002-09-23 09:36:25 +00005722}
5723
njn51d827b2005-05-09 01:02:08 +00005724static void mc_print_usage(void)
njn25e49d8e72002-09-23 09:36:25 +00005725{
njn1d0825f2006-03-27 11:37:07 +00005726 VG_(printf)(
5727" --leak-check=no|summary|full search for memory leaks at exit? [summary]\n"
5728" --leak-resolution=low|med|high how much bt merging in leak check [low]\n"
5729" --show-reachable=no|yes show reachable blocks in leak check? [no]\n"
5730" --undef-value-errors=no|yes check for undefined value errors [yes]\n"
sewardj7cf4e6b2008-05-01 20:24:26 +00005731" --track-origins=no|yes show origins of undefined values? [no]\n"
njn1d0825f2006-03-27 11:37:07 +00005732" --partial-loads-ok=no|yes too hard to explain here; see manual [no]\n"
sewardjfa4ca3b2007-11-30 17:19:36 +00005733" --freelist-vol=<number> volume of freed blocks queue [10000000]\n"
njn1d0825f2006-03-27 11:37:07 +00005734" --workaround-gcc296-bugs=no|yes self explanatory [no]\n"
sewardj05a46732006-10-17 01:28:10 +00005735" --ignore-ranges=0xPP-0xQQ[,0xRR-0xSS] assume given addresses are OK\n"
sewardjeb0fa932007-11-30 21:41:40 +00005736" --malloc-fill=<hexnumber> fill malloc'd areas with given value\n"
5737" --free-fill=<hexnumber> fill free'd areas with given value\n"
njn1d0825f2006-03-27 11:37:07 +00005738 );
5739 VG_(replacement_malloc_print_usage)();
njn3e884182003-04-15 13:03:23 +00005740}
5741
njn51d827b2005-05-09 01:02:08 +00005742static void mc_print_debug_usage(void)
njn3e884182003-04-15 13:03:23 +00005743{
njn1d0825f2006-03-27 11:37:07 +00005744 VG_(replacement_malloc_print_debug_usage)();
njn25e49d8e72002-09-23 09:36:25 +00005745}
5746
sewardjf3418c02005-11-08 14:10:24 +00005747
nethercote8b76fe52004-11-08 19:20:09 +00005748/*------------------------------------------------------------*/
5749/*--- Client requests ---*/
5750/*------------------------------------------------------------*/
5751
5752/* Client block management:
5753
5754 This is managed as an expanding array of client block descriptors.
5755 Indices of live descriptors are issued to the client, so it can ask
5756 to free them later. Therefore we cannot slide live entries down
5757 over dead ones. Instead we must use free/inuse flags and scan for
5758 an empty slot at allocation time. This in turn means allocation is
5759 relatively expensive, so we hope this does not happen too often.
nethercote8b76fe52004-11-08 19:20:09 +00005760
sewardjedc75ab2005-03-15 23:30:32 +00005761 An unused block has start == size == 0
5762*/
nethercote8b76fe52004-11-08 19:20:09 +00005763
5764typedef
5765 struct {
5766 Addr start;
5767 SizeT size;
5768 ExeContext* where;
sewardj8cf88b72005-07-08 01:29:33 +00005769 Char* desc;
nethercote8b76fe52004-11-08 19:20:09 +00005770 }
5771 CGenBlock;
5772
5773/* This subsystem is self-initialising. */
njn695c16e2005-03-27 03:40:28 +00005774static UInt cgb_size = 0;
5775static UInt cgb_used = 0;
5776static CGenBlock* cgbs = NULL;
nethercote8b76fe52004-11-08 19:20:09 +00005777
5778/* Stats for this subsystem. */
njn695c16e2005-03-27 03:40:28 +00005779static UInt cgb_used_MAX = 0; /* Max in use. */
5780static UInt cgb_allocs = 0; /* Number of allocs. */
5781static UInt cgb_discards = 0; /* Number of discards. */
5782static UInt cgb_search = 0; /* Number of searches. */
nethercote8b76fe52004-11-08 19:20:09 +00005783
5784
5785static
njn695c16e2005-03-27 03:40:28 +00005786Int alloc_client_block ( void )
nethercote8b76fe52004-11-08 19:20:09 +00005787{
5788 UInt i, sz_new;
5789 CGenBlock* cgbs_new;
5790
njn695c16e2005-03-27 03:40:28 +00005791 cgb_allocs++;
nethercote8b76fe52004-11-08 19:20:09 +00005792
njn695c16e2005-03-27 03:40:28 +00005793 for (i = 0; i < cgb_used; i++) {
5794 cgb_search++;
5795 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00005796 return i;
5797 }
5798
5799 /* Not found. Try to allocate one at the end. */
njn695c16e2005-03-27 03:40:28 +00005800 if (cgb_used < cgb_size) {
5801 cgb_used++;
5802 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00005803 }
5804
5805 /* Ok, we have to allocate a new one. */
njn695c16e2005-03-27 03:40:28 +00005806 tl_assert(cgb_used == cgb_size);
5807 sz_new = (cgbs == NULL) ? 10 : (2 * cgb_size);
nethercote8b76fe52004-11-08 19:20:09 +00005808
5809 cgbs_new = VG_(malloc)( sz_new * sizeof(CGenBlock) );
njn695c16e2005-03-27 03:40:28 +00005810 for (i = 0; i < cgb_used; i++)
5811 cgbs_new[i] = cgbs[i];
nethercote8b76fe52004-11-08 19:20:09 +00005812
njn695c16e2005-03-27 03:40:28 +00005813 if (cgbs != NULL)
5814 VG_(free)( cgbs );
5815 cgbs = cgbs_new;
nethercote8b76fe52004-11-08 19:20:09 +00005816
njn695c16e2005-03-27 03:40:28 +00005817 cgb_size = sz_new;
5818 cgb_used++;
5819 if (cgb_used > cgb_used_MAX)
5820 cgb_used_MAX = cgb_used;
5821 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00005822}
5823
5824
5825static void show_client_block_stats ( void )
5826{
5827 VG_(message)(Vg_DebugMsg,
5828 "general CBs: %d allocs, %d discards, %d maxinuse, %d search",
njn695c16e2005-03-27 03:40:28 +00005829 cgb_allocs, cgb_discards, cgb_used_MAX, cgb_search
nethercote8b76fe52004-11-08 19:20:09 +00005830 );
5831}
5832
nethercote8b76fe52004-11-08 19:20:09 +00005833static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai )
5834{
5835 UInt i;
nethercote8b76fe52004-11-08 19:20:09 +00005836
5837 /* Perhaps it's a general block ? */
njn695c16e2005-03-27 03:40:28 +00005838 for (i = 0; i < cgb_used; i++) {
5839 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00005840 continue;
njn717cde52005-05-10 02:47:21 +00005841 // Use zero as the redzone for client blocks.
5842 if (VG_(addr_is_in_block)(a, cgbs[i].start, cgbs[i].size, 0)) {
nethercote8b76fe52004-11-08 19:20:09 +00005843 /* OK - maybe it's a mempool, too? */
njn1d0825f2006-03-27 11:37:07 +00005844 MC_Mempool* mp = VG_(HT_lookup)(MC_(mempool_list),
njn12627272005-08-14 18:32:16 +00005845 (UWord)cgbs[i].start);
njn1d0cb0d2005-08-15 01:52:02 +00005846 if (mp != NULL) {
5847 if (mp->chunks != NULL) {
njn1d0825f2006-03-27 11:37:07 +00005848 MC_Chunk* mc;
njnf66dbfc2005-08-15 01:54:05 +00005849 VG_(HT_ResetIter)(mp->chunks);
5850 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
njn1d0825f2006-03-27 11:37:07 +00005851 if (addr_is_in_MC_Chunk(mc, a)) {
njn718d3b12006-12-16 00:54:12 +00005852 ai->tag = Addr_Block;
5853 ai->Addr.Block.block_kind = Block_MempoolChunk;
5854 ai->Addr.Block.block_desc = "block";
5855 ai->Addr.Block.block_szB = mc->szB;
5856 ai->Addr.Block.rwoffset = (Int)a - (Int)mc->data;
5857 ai->Addr.Block.lastchange = mc->where;
njn1d0cb0d2005-08-15 01:52:02 +00005858 return True;
5859 }
nethercote8b76fe52004-11-08 19:20:09 +00005860 }
5861 }
njn718d3b12006-12-16 00:54:12 +00005862 ai->tag = Addr_Block;
5863 ai->Addr.Block.block_kind = Block_Mempool;
5864 ai->Addr.Block.block_desc = "mempool";
5865 ai->Addr.Block.block_szB = cgbs[i].size;
5866 ai->Addr.Block.rwoffset = (Int)(a) - (Int)(cgbs[i].start);
5867 ai->Addr.Block.lastchange = cgbs[i].where;
nethercote8b76fe52004-11-08 19:20:09 +00005868 return True;
5869 }
njn718d3b12006-12-16 00:54:12 +00005870 ai->tag = Addr_Block;
5871 ai->Addr.Block.block_kind = Block_UserG;
5872 ai->Addr.Block.block_desc = cgbs[i].desc;
5873 ai->Addr.Block.block_szB = cgbs[i].size;
5874 ai->Addr.Block.rwoffset = (Int)(a) - (Int)(cgbs[i].start);
5875 ai->Addr.Block.lastchange = cgbs[i].where;
nethercote8b76fe52004-11-08 19:20:09 +00005876 return True;
5877 }
5878 }
5879 return False;
5880}
5881
njn51d827b2005-05-09 01:02:08 +00005882static Bool mc_handle_client_request ( ThreadId tid, UWord* arg, UWord* ret )
nethercote8b76fe52004-11-08 19:20:09 +00005883{
5884 Int i;
5885 Bool ok;
5886 Addr bad_addr;
5887
njnfc26ff92004-11-22 19:12:49 +00005888 if (!VG_IS_TOOL_USERREQ('M','C',arg[0])
nethercote8b76fe52004-11-08 19:20:09 +00005889 && VG_USERREQ__MALLOCLIKE_BLOCK != arg[0]
5890 && VG_USERREQ__FREELIKE_BLOCK != arg[0]
5891 && VG_USERREQ__CREATE_MEMPOOL != arg[0]
5892 && VG_USERREQ__DESTROY_MEMPOOL != arg[0]
5893 && VG_USERREQ__MEMPOOL_ALLOC != arg[0]
sewardj2c1c9df2006-07-28 00:06:37 +00005894 && VG_USERREQ__MEMPOOL_FREE != arg[0]
sewardjc740d762006-10-05 17:59:23 +00005895 && VG_USERREQ__MEMPOOL_TRIM != arg[0]
5896 && VG_USERREQ__MOVE_MEMPOOL != arg[0]
5897 && VG_USERREQ__MEMPOOL_CHANGE != arg[0]
5898 && VG_USERREQ__MEMPOOL_EXISTS != arg[0])
nethercote8b76fe52004-11-08 19:20:09 +00005899 return False;
5900
5901 switch (arg[0]) {
njndbf7ca72006-03-31 11:57:59 +00005902 case VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE:
5903 ok = is_mem_addressable ( arg[1], arg[2], &bad_addr );
nethercote8b76fe52004-11-08 19:20:09 +00005904 if (!ok)
sewardj7cf4e6b2008-05-01 20:24:26 +00005905 mc_record_user_error ( tid, bad_addr, /*isAddrErr*/True, 0 );
nethercote8b76fe52004-11-08 19:20:09 +00005906 *ret = ok ? (UWord)NULL : bad_addr;
sewardj8cf88b72005-07-08 01:29:33 +00005907 break;
nethercote8b76fe52004-11-08 19:20:09 +00005908
njndbf7ca72006-03-31 11:57:59 +00005909 case VG_USERREQ__CHECK_MEM_IS_DEFINED: {
nethercote8b76fe52004-11-08 19:20:09 +00005910 MC_ReadResult res;
sewardj7cf4e6b2008-05-01 20:24:26 +00005911 UInt otag = 0;
5912 res = is_mem_defined ( arg[1], arg[2], &bad_addr, &otag );
nethercote8b76fe52004-11-08 19:20:09 +00005913 if (MC_AddrErr == res)
sewardj7cf4e6b2008-05-01 20:24:26 +00005914 mc_record_user_error ( tid, bad_addr, /*isAddrErr*/True, 0 );
nethercote8b76fe52004-11-08 19:20:09 +00005915 else if (MC_ValueErr == res)
sewardj7cf4e6b2008-05-01 20:24:26 +00005916 mc_record_user_error ( tid, bad_addr, /*isAddrErr*/False, otag );
nethercote8b76fe52004-11-08 19:20:09 +00005917 *ret = ( res==MC_Ok ? (UWord)NULL : bad_addr );
sewardj8cf88b72005-07-08 01:29:33 +00005918 break;
nethercote8b76fe52004-11-08 19:20:09 +00005919 }
5920
5921 case VG_USERREQ__DO_LEAK_CHECK:
njnb8dca862005-03-14 02:42:44 +00005922 mc_detect_memory_leaks(tid, arg[1] ? LC_Summary : LC_Full);
sewardj8cf88b72005-07-08 01:29:33 +00005923 *ret = 0; /* return value is meaningless */
5924 break;
nethercote8b76fe52004-11-08 19:20:09 +00005925
njndbf7ca72006-03-31 11:57:59 +00005926 case VG_USERREQ__MAKE_MEM_NOACCESS:
5927 MC_(make_mem_noaccess) ( arg[1], arg[2] );
sewardj8cf88b72005-07-08 01:29:33 +00005928 *ret = -1;
5929 break;
nethercote8b76fe52004-11-08 19:20:09 +00005930
njndbf7ca72006-03-31 11:57:59 +00005931 case VG_USERREQ__MAKE_MEM_UNDEFINED:
sewardj7cf4e6b2008-05-01 20:24:26 +00005932 make_mem_undefined_w_tid_and_okind ( arg[1], arg[2], tid, MC_OKIND_USER );
sewardjedc75ab2005-03-15 23:30:32 +00005933 *ret = -1;
sewardj8cf88b72005-07-08 01:29:33 +00005934 break;
nethercote8b76fe52004-11-08 19:20:09 +00005935
njndbf7ca72006-03-31 11:57:59 +00005936 case VG_USERREQ__MAKE_MEM_DEFINED:
5937 MC_(make_mem_defined) ( arg[1], arg[2] );
sewardj8cf88b72005-07-08 01:29:33 +00005938 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00005939 break;
5940
njndbf7ca72006-03-31 11:57:59 +00005941 case VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE:
5942 make_mem_defined_if_addressable ( arg[1], arg[2] );
sewardjfb1e9ad2006-03-10 13:41:58 +00005943 *ret = -1;
5944 break;
5945
sewardjedc75ab2005-03-15 23:30:32 +00005946 case VG_USERREQ__CREATE_BLOCK: /* describe a block */
sewardj8cf88b72005-07-08 01:29:33 +00005947 if (arg[1] != 0 && arg[2] != 0) {
5948 i = alloc_client_block();
5949 /* VG_(printf)("allocated %d %p\n", i, cgbs); */
5950 cgbs[i].start = arg[1];
5951 cgbs[i].size = arg[2];
5952 cgbs[i].desc = VG_(strdup)((Char *)arg[3]);
sewardj39f34232007-11-09 23:02:28 +00005953 cgbs[i].where = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
sewardjedc75ab2005-03-15 23:30:32 +00005954
sewardj8cf88b72005-07-08 01:29:33 +00005955 *ret = i;
5956 } else
5957 *ret = -1;
5958 break;
sewardjedc75ab2005-03-15 23:30:32 +00005959
nethercote8b76fe52004-11-08 19:20:09 +00005960 case VG_USERREQ__DISCARD: /* discard */
njn695c16e2005-03-27 03:40:28 +00005961 if (cgbs == NULL
5962 || arg[2] >= cgb_used ||
sewardj8cf88b72005-07-08 01:29:33 +00005963 (cgbs[arg[2]].start == 0 && cgbs[arg[2]].size == 0)) {
sewardjedc75ab2005-03-15 23:30:32 +00005964 *ret = 1;
sewardj8cf88b72005-07-08 01:29:33 +00005965 } else {
5966 tl_assert(arg[2] >= 0 && arg[2] < cgb_used);
5967 cgbs[arg[2]].start = cgbs[arg[2]].size = 0;
5968 VG_(free)(cgbs[arg[2]].desc);
5969 cgb_discards++;
5970 *ret = 0;
5971 }
5972 break;
nethercote8b76fe52004-11-08 19:20:09 +00005973
sewardjc2c12c22006-03-08 13:20:09 +00005974 case VG_USERREQ__GET_VBITS:
sewardjc2c12c22006-03-08 13:20:09 +00005975 *ret = mc_get_or_set_vbits_for_client
5976 ( tid, arg[1], arg[2], arg[3], False /* get them */ );
5977 break;
5978
5979 case VG_USERREQ__SET_VBITS:
sewardjc2c12c22006-03-08 13:20:09 +00005980 *ret = mc_get_or_set_vbits_for_client
5981 ( tid, arg[1], arg[2], arg[3], True /* set them */ );
5982 break;
nethercote8b76fe52004-11-08 19:20:09 +00005983
njn1d0825f2006-03-27 11:37:07 +00005984 case VG_USERREQ__COUNT_LEAKS: { /* count leaked bytes */
5985 UWord** argp = (UWord**)arg;
5986 // MC_(bytes_leaked) et al were set by the last leak check (or zero
5987 // if no prior leak checks performed).
5988 *argp[1] = MC_(bytes_leaked) + MC_(bytes_indirect);
5989 *argp[2] = MC_(bytes_dubious);
5990 *argp[3] = MC_(bytes_reachable);
5991 *argp[4] = MC_(bytes_suppressed);
5992 // there is no argp[5]
5993 //*argp[5] = MC_(bytes_indirect);
njndbf7ca72006-03-31 11:57:59 +00005994 // XXX need to make *argp[1-4] defined
njn1d0825f2006-03-27 11:37:07 +00005995 *ret = 0;
5996 return True;
5997 }
5998 case VG_USERREQ__MALLOCLIKE_BLOCK: {
5999 Addr p = (Addr)arg[1];
6000 SizeT sizeB = arg[2];
6001 UInt rzB = arg[3];
6002 Bool is_zeroed = (Bool)arg[4];
6003
6004 MC_(new_block) ( tid, p, sizeB, /*ignored*/0, rzB, is_zeroed,
6005 MC_AllocCustom, MC_(malloc_list) );
6006 return True;
6007 }
6008 case VG_USERREQ__FREELIKE_BLOCK: {
6009 Addr p = (Addr)arg[1];
6010 UInt rzB = arg[2];
6011
6012 MC_(handle_free) ( tid, p, rzB, MC_AllocCustom );
6013 return True;
6014 }
6015
6016 case _VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR: {
njn718d3b12006-12-16 00:54:12 +00006017 Char* s = (Char*)arg[1];
6018 Addr dst = (Addr) arg[2];
6019 Addr src = (Addr) arg[3];
6020 SizeT len = (SizeT)arg[4];
6021 mc_record_overlap_error(tid, s, src, dst, len);
njn1d0825f2006-03-27 11:37:07 +00006022 return True;
6023 }
6024
6025 case VG_USERREQ__CREATE_MEMPOOL: {
6026 Addr pool = (Addr)arg[1];
6027 UInt rzB = arg[2];
6028 Bool is_zeroed = (Bool)arg[3];
6029
6030 MC_(create_mempool) ( pool, rzB, is_zeroed );
6031 return True;
6032 }
6033
6034 case VG_USERREQ__DESTROY_MEMPOOL: {
6035 Addr pool = (Addr)arg[1];
6036
6037 MC_(destroy_mempool) ( pool );
6038 return True;
6039 }
6040
6041 case VG_USERREQ__MEMPOOL_ALLOC: {
6042 Addr pool = (Addr)arg[1];
6043 Addr addr = (Addr)arg[2];
6044 UInt size = arg[3];
6045
6046 MC_(mempool_alloc) ( tid, pool, addr, size );
6047 return True;
6048 }
6049
6050 case VG_USERREQ__MEMPOOL_FREE: {
6051 Addr pool = (Addr)arg[1];
6052 Addr addr = (Addr)arg[2];
6053
6054 MC_(mempool_free) ( pool, addr );
6055 return True;
6056 }
6057
sewardj2c1c9df2006-07-28 00:06:37 +00006058 case VG_USERREQ__MEMPOOL_TRIM: {
6059 Addr pool = (Addr)arg[1];
6060 Addr addr = (Addr)arg[2];
6061 UInt size = arg[3];
6062
6063 MC_(mempool_trim) ( pool, addr, size );
6064 return True;
6065 }
6066
sewardjc740d762006-10-05 17:59:23 +00006067 case VG_USERREQ__MOVE_MEMPOOL: {
6068 Addr poolA = (Addr)arg[1];
6069 Addr poolB = (Addr)arg[2];
6070
6071 MC_(move_mempool) ( poolA, poolB );
6072 return True;
6073 }
6074
6075 case VG_USERREQ__MEMPOOL_CHANGE: {
6076 Addr pool = (Addr)arg[1];
6077 Addr addrA = (Addr)arg[2];
6078 Addr addrB = (Addr)arg[3];
6079 UInt size = arg[4];
6080
6081 MC_(mempool_change) ( pool, addrA, addrB, size );
6082 return True;
6083 }
6084
6085 case VG_USERREQ__MEMPOOL_EXISTS: {
6086 Addr pool = (Addr)arg[1];
6087
6088 *ret = (UWord) MC_(mempool_exists) ( pool );
6089 return True;
6090 }
6091
6092
nethercote8b76fe52004-11-08 19:20:09 +00006093 default:
njn1d0825f2006-03-27 11:37:07 +00006094 VG_(message)(Vg_UserMsg,
6095 "Warning: unknown memcheck client request code %llx",
6096 (ULong)arg[0]);
6097 return False;
nethercote8b76fe52004-11-08 19:20:09 +00006098 }
6099 return True;
6100}
njn25e49d8e72002-09-23 09:36:25 +00006101
6102/*------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00006103/*--- Crude profiling machinery. ---*/
6104/*------------------------------------------------------------*/
6105
6106// We track a number of interesting events (using PROF_EVENT)
6107// if MC_PROFILE_MEMORY is defined.
6108
6109#ifdef MC_PROFILE_MEMORY
6110
6111UInt MC_(event_ctr)[N_PROF_EVENTS];
6112HChar* MC_(event_ctr_name)[N_PROF_EVENTS];
6113
6114static void init_prof_mem ( void )
6115{
6116 Int i;
6117 for (i = 0; i < N_PROF_EVENTS; i++) {
6118 MC_(event_ctr)[i] = 0;
6119 MC_(event_ctr_name)[i] = NULL;
6120 }
6121}
6122
6123static void done_prof_mem ( void )
6124{
6125 Int i;
6126 Bool spaced = False;
6127 for (i = 0; i < N_PROF_EVENTS; i++) {
6128 if (!spaced && (i % 10) == 0) {
6129 VG_(printf)("\n");
6130 spaced = True;
6131 }
6132 if (MC_(event_ctr)[i] > 0) {
6133 spaced = False;
6134 VG_(printf)( "prof mem event %3d: %9d %s\n",
6135 i, MC_(event_ctr)[i],
6136 MC_(event_ctr_name)[i]
6137 ? MC_(event_ctr_name)[i] : "unnamed");
6138 }
6139 }
6140}
6141
6142#else
6143
6144static void init_prof_mem ( void ) { }
6145static void done_prof_mem ( void ) { }
6146
6147#endif
6148
sewardj7cf4e6b2008-05-01 20:24:26 +00006149
6150/*------------------------------------------------------------*/
6151/*--- Origin tracking stuff ---*/
6152/*------------------------------------------------------------*/
6153
6154/*--------------------------------------------*/
6155/*--- Origin tracking: load handlers ---*/
6156/*--------------------------------------------*/
6157
6158static INLINE UInt merge_origins ( UInt or1, UInt or2 ) {
6159 return or1 > or2 ? or1 : or2;
6160}
6161
6162UWord VG_REGPARM(1) MC_(helperc_b_load1)( Addr a ) {
6163 OCacheLine* line;
6164 UChar descr;
6165 UWord lineoff = oc_line_offset(a);
6166 UWord byteoff = a & 3; /* 0, 1, 2 or 3 */
6167
6168 if (OC_ENABLE_ASSERTIONS) {
6169 tl_assert(lineoff >= 0 && lineoff < OC_W32S_PER_LINE);
6170 }
6171
6172 line = find_OCacheLine( a );
6173
6174 descr = line->descr[lineoff];
6175 if (OC_ENABLE_ASSERTIONS) {
6176 tl_assert(descr < 0x10);
6177 }
6178
6179 if (LIKELY(0 == (descr & (1 << byteoff)))) {
6180 return 0;
6181 } else {
6182 return line->w32[lineoff];
6183 }
6184}
6185
6186UWord VG_REGPARM(1) MC_(helperc_b_load2)( Addr a ) {
6187 OCacheLine* line;
6188 UChar descr;
6189 UWord lineoff, byteoff;
6190
6191 if (UNLIKELY(a & 1)) {
6192 /* Handle misaligned case, slowly. */
6193 UInt oLo = (UInt)MC_(helperc_b_load1)( a + 0 );
6194 UInt oHi = (UInt)MC_(helperc_b_load1)( a + 1 );
6195 return merge_origins(oLo, oHi);
6196 }
6197
6198 lineoff = oc_line_offset(a);
6199 byteoff = a & 3; /* 0 or 2 */
6200
6201 if (OC_ENABLE_ASSERTIONS) {
6202 tl_assert(lineoff >= 0 && lineoff < OC_W32S_PER_LINE);
6203 }
6204 line = find_OCacheLine( a );
6205
6206 descr = line->descr[lineoff];
6207 if (OC_ENABLE_ASSERTIONS) {
6208 tl_assert(descr < 0x10);
6209 }
6210
6211 if (LIKELY(0 == (descr & (3 << byteoff)))) {
6212 return 0;
6213 } else {
6214 return line->w32[lineoff];
6215 }
6216}
6217
6218UWord VG_REGPARM(1) MC_(helperc_b_load4)( Addr a ) {
6219 OCacheLine* line;
6220 UChar descr;
6221 UWord lineoff;
6222
6223 if (UNLIKELY(a & 3)) {
6224 /* Handle misaligned case, slowly. */
6225 UInt oLo = (UInt)MC_(helperc_b_load2)( a + 0 );
6226 UInt oHi = (UInt)MC_(helperc_b_load2)( a + 2 );
6227 return merge_origins(oLo, oHi);
6228 }
6229
6230 lineoff = oc_line_offset(a);
6231 if (OC_ENABLE_ASSERTIONS) {
6232 tl_assert(lineoff >= 0 && lineoff < OC_W32S_PER_LINE);
6233 }
6234
6235 line = find_OCacheLine( a );
6236
6237 descr = line->descr[lineoff];
6238 if (OC_ENABLE_ASSERTIONS) {
6239 tl_assert(descr < 0x10);
6240 }
6241
6242 if (LIKELY(0 == descr)) {
6243 return 0;
6244 } else {
6245 return line->w32[lineoff];
6246 }
6247}
6248
6249UWord VG_REGPARM(1) MC_(helperc_b_load8)( Addr a ) {
6250 OCacheLine* line;
6251 UChar descrLo, descrHi, descr;
6252 UWord lineoff;
6253
6254 if (UNLIKELY(a & 7)) {
6255 /* Handle misaligned case, slowly. */
6256 UInt oLo = (UInt)MC_(helperc_b_load4)( a + 0 );
6257 UInt oHi = (UInt)MC_(helperc_b_load4)( a + 4 );
6258 return merge_origins(oLo, oHi);
6259 }
6260
6261 lineoff = oc_line_offset(a);
6262 if (OC_ENABLE_ASSERTIONS) {
6263 tl_assert(lineoff == (lineoff & 6)); /*0,2,4,6*//*since 8-aligned*/
6264 }
6265
6266 line = find_OCacheLine( a );
6267
6268 descrLo = line->descr[lineoff + 0];
6269 descrHi = line->descr[lineoff + 1];
6270 descr = descrLo | descrHi;
6271 if (OC_ENABLE_ASSERTIONS) {
6272 tl_assert(descr < 0x10);
6273 }
6274
6275 if (LIKELY(0 == descr)) {
6276 return 0; /* both 32-bit chunks are defined */
6277 } else {
6278 UInt oLo = descrLo == 0 ? 0 : line->w32[lineoff + 0];
6279 UInt oHi = descrHi == 0 ? 0 : line->w32[lineoff + 1];
6280 return merge_origins(oLo, oHi);
6281 }
6282}
6283
6284UWord VG_REGPARM(1) MC_(helperc_b_load16)( Addr a ) {
6285 UInt oLo = (UInt)MC_(helperc_b_load8)( a + 0 );
6286 UInt oHi = (UInt)MC_(helperc_b_load8)( a + 8 );
6287 UInt oBoth = merge_origins(oLo, oHi);
6288 return (UWord)oBoth;
6289}
6290
6291
6292/*--------------------------------------------*/
6293/*--- Origin tracking: store handlers ---*/
6294/*--------------------------------------------*/
6295
6296void VG_REGPARM(2) MC_(helperc_b_store1)( Addr a, UWord d32 ) {
6297 OCacheLine* line;
6298 UWord lineoff = oc_line_offset(a);
6299 UWord byteoff = a & 3; /* 0, 1, 2 or 3 */
6300
6301 if (OC_ENABLE_ASSERTIONS) {
6302 tl_assert(lineoff >= 0 && lineoff < OC_W32S_PER_LINE);
6303 }
6304
6305 line = find_OCacheLine( a );
6306
6307 if (d32 == 0) {
6308 line->descr[lineoff] &= ~(1 << byteoff);
6309 } else {
6310 line->descr[lineoff] |= (1 << byteoff);
6311 line->w32[lineoff] = d32;
6312 }
6313}
6314
6315void VG_REGPARM(2) MC_(helperc_b_store2)( Addr a, UWord d32 ) {
6316 OCacheLine* line;
6317 UWord lineoff, byteoff;
6318
6319 if (UNLIKELY(a & 1)) {
6320 /* Handle misaligned case, slowly. */
6321 MC_(helperc_b_store1)( a + 0, d32 );
6322 MC_(helperc_b_store1)( a + 1, d32 );
6323 return;
6324 }
6325
6326 lineoff = oc_line_offset(a);
6327 byteoff = a & 3; /* 0 or 2 */
6328
6329 if (OC_ENABLE_ASSERTIONS) {
6330 tl_assert(lineoff >= 0 && lineoff < OC_W32S_PER_LINE);
6331 }
6332
6333 line = find_OCacheLine( a );
6334
6335 if (d32 == 0) {
6336 line->descr[lineoff] &= ~(3 << byteoff);
6337 } else {
6338 line->descr[lineoff] |= (3 << byteoff);
6339 line->w32[lineoff] = d32;
6340 }
6341}
6342
6343void VG_REGPARM(2) MC_(helperc_b_store4)( Addr a, UWord d32 ) {
6344 OCacheLine* line;
6345 UWord lineoff;
6346
6347 if (UNLIKELY(a & 3)) {
6348 /* Handle misaligned case, slowly. */
6349 MC_(helperc_b_store2)( a + 0, d32 );
6350 MC_(helperc_b_store2)( a + 2, d32 );
6351 return;
6352 }
6353
6354 lineoff = oc_line_offset(a);
6355 if (OC_ENABLE_ASSERTIONS) {
6356 tl_assert(lineoff >= 0 && lineoff < OC_W32S_PER_LINE);
6357 }
6358
6359 line = find_OCacheLine( a );
6360
6361 if (d32 == 0) {
6362 line->descr[lineoff] = 0;
6363 } else {
6364 line->descr[lineoff] = 0xF;
6365 line->w32[lineoff] = d32;
6366 }
6367}
6368
6369void VG_REGPARM(2) MC_(helperc_b_store8)( Addr a, UWord d32 ) {
6370 OCacheLine* line;
6371 UWord lineoff;
6372
6373 if (UNLIKELY(a & 7)) {
6374 /* Handle misaligned case, slowly. */
6375 MC_(helperc_b_store4)( a + 0, d32 );
6376 MC_(helperc_b_store4)( a + 4, d32 );
6377 return;
6378 }
6379
6380 lineoff = oc_line_offset(a);
6381 if (OC_ENABLE_ASSERTIONS) {
6382 tl_assert(lineoff == (lineoff & 6)); /*0,2,4,6*//*since 8-aligned*/
6383 }
6384
6385 line = find_OCacheLine( a );
6386
6387 if (d32 == 0) {
6388 line->descr[lineoff + 0] = 0;
6389 line->descr[lineoff + 1] = 0;
6390 } else {
6391 line->descr[lineoff + 0] = 0xF;
6392 line->descr[lineoff + 1] = 0xF;
6393 line->w32[lineoff + 0] = d32;
6394 line->w32[lineoff + 1] = d32;
6395 }
6396}
6397
6398void VG_REGPARM(2) MC_(helperc_b_store16)( Addr a, UWord d32 ) {
6399 MC_(helperc_b_store8)( a + 0, d32 );
6400 MC_(helperc_b_store8)( a + 8, d32 );
6401}
6402
6403
6404/*--------------------------------------------*/
6405/*--- Origin tracking: sarp handlers ---*/
6406/*--------------------------------------------*/
6407
6408__attribute__((noinline))
6409static void ocache_sarp_Set_Origins ( Addr a, UWord len, UInt otag ) {
6410 if ((a & 1) && len >= 1) {
6411 MC_(helperc_b_store1)( a, otag );
6412 a++;
6413 len--;
6414 }
6415 if ((a & 2) && len >= 2) {
6416 MC_(helperc_b_store2)( a, otag );
6417 a += 2;
6418 len -= 2;
6419 }
6420 if (len >= 4)
6421 tl_assert(0 == (a & 3));
6422 while (len >= 4) {
6423 MC_(helperc_b_store4)( a, otag );
6424 a += 4;
6425 len -= 4;
6426 }
6427 if (len >= 2) {
6428 MC_(helperc_b_store2)( a, otag );
6429 a += 2;
6430 len -= 2;
6431 }
6432 if (len >= 1) {
6433 MC_(helperc_b_store1)( a, otag );
6434 a++;
6435 len--;
6436 }
6437 tl_assert(len == 0);
6438}
6439
6440__attribute__((noinline))
6441static void ocache_sarp_Clear_Origins ( Addr a, UWord len ) {
6442 if ((a & 1) && len >= 1) {
6443 MC_(helperc_b_store1)( a, 0 );
6444 a++;
6445 len--;
6446 }
6447 if ((a & 2) && len >= 2) {
6448 MC_(helperc_b_store2)( a, 0 );
6449 a += 2;
6450 len -= 2;
6451 }
6452 if (len >= 4)
6453 tl_assert(0 == (a & 3));
6454 while (len >= 4) {
6455 MC_(helperc_b_store4)( a, 0 );
6456 a += 4;
6457 len -= 4;
6458 }
6459 if (len >= 2) {
6460 MC_(helperc_b_store2)( a, 0 );
6461 a += 2;
6462 len -= 2;
6463 }
6464 if (len >= 1) {
6465 MC_(helperc_b_store1)( a, 0 );
6466 a++;
6467 len--;
6468 }
6469 tl_assert(len == 0);
6470}
6471
6472
njn1d0825f2006-03-27 11:37:07 +00006473/*------------------------------------------------------------*/
njn51d827b2005-05-09 01:02:08 +00006474/*--- Setup and finalisation ---*/
njn25e49d8e72002-09-23 09:36:25 +00006475/*------------------------------------------------------------*/
6476
sewardj7cf4e6b2008-05-01 20:24:26 +00006477
njn51d827b2005-05-09 01:02:08 +00006478static void mc_post_clo_init ( void )
njn5c004e42002-11-18 11:04:50 +00006479{
sewardj71bc3cb2005-05-19 00:25:45 +00006480 /* If we've been asked to emit XML, mash around various other
6481 options so as to constrain the output somewhat. */
6482 if (VG_(clo_xml)) {
6483 /* Extract as much info as possible from the leak checker. */
njn1d0825f2006-03-27 11:37:07 +00006484 /* MC_(clo_show_reachable) = True; */
6485 MC_(clo_leak_check) = LC_Full;
sewardj71bc3cb2005-05-19 00:25:45 +00006486 }
sewardj7cf4e6b2008-05-01 20:24:26 +00006487
6488 tl_assert( MC_(clo_mc_level) >= 1 && MC_(clo_mc_level) <= 3 );
6489
6490 if (MC_(clo_mc_level) == 3) {
6491 /* We're doing origin tracking. */
6492# ifdef PERF_FAST_STACK
6493 VG_(track_new_mem_stack_4_w_ECU) ( mc_new_mem_stack_4_w_ECU );
6494 VG_(track_new_mem_stack_8_w_ECU) ( mc_new_mem_stack_8_w_ECU );
6495 VG_(track_new_mem_stack_12_w_ECU) ( mc_new_mem_stack_12_w_ECU );
6496 VG_(track_new_mem_stack_16_w_ECU) ( mc_new_mem_stack_16_w_ECU );
6497 VG_(track_new_mem_stack_32_w_ECU) ( mc_new_mem_stack_32_w_ECU );
6498 VG_(track_new_mem_stack_112_w_ECU) ( mc_new_mem_stack_112_w_ECU );
6499 VG_(track_new_mem_stack_128_w_ECU) ( mc_new_mem_stack_128_w_ECU );
6500 VG_(track_new_mem_stack_144_w_ECU) ( mc_new_mem_stack_144_w_ECU );
6501 VG_(track_new_mem_stack_160_w_ECU) ( mc_new_mem_stack_160_w_ECU );
6502# endif
6503 VG_(track_new_mem_stack_w_ECU) ( mc_new_mem_stack_w_ECU );
6504 } else {
6505 /* Not doing origin tracking */
6506# ifdef PERF_FAST_STACK
6507 VG_(track_new_mem_stack_4) ( mc_new_mem_stack_4 );
6508 VG_(track_new_mem_stack_8) ( mc_new_mem_stack_8 );
6509 VG_(track_new_mem_stack_12) ( mc_new_mem_stack_12 );
6510 VG_(track_new_mem_stack_16) ( mc_new_mem_stack_16 );
6511 VG_(track_new_mem_stack_32) ( mc_new_mem_stack_32 );
6512 VG_(track_new_mem_stack_112) ( mc_new_mem_stack_112 );
6513 VG_(track_new_mem_stack_128) ( mc_new_mem_stack_128 );
6514 VG_(track_new_mem_stack_144) ( mc_new_mem_stack_144 );
6515 VG_(track_new_mem_stack_160) ( mc_new_mem_stack_160 );
6516# endif
6517 VG_(track_new_mem_stack) ( mc_new_mem_stack );
6518 }
njn5c004e42002-11-18 11:04:50 +00006519}
6520
njn1d0825f2006-03-27 11:37:07 +00006521static void print_SM_info(char* type, int n_SMs)
6522{
6523 VG_(message)(Vg_DebugMsg,
6524 " memcheck: SMs: %s = %d (%dk, %dM)",
6525 type,
6526 n_SMs,
6527 n_SMs * sizeof(SecMap) / 1024,
6528 n_SMs * sizeof(SecMap) / (1024 * 1024) );
6529}
6530
njn51d827b2005-05-09 01:02:08 +00006531static void mc_fini ( Int exitcode )
njn5c004e42002-11-18 11:04:50 +00006532{
njn1d0825f2006-03-27 11:37:07 +00006533 MC_(print_malloc_stats)();
sewardj23eb2fd2005-04-22 16:29:19 +00006534
njn1d0825f2006-03-27 11:37:07 +00006535 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
6536 if (MC_(clo_leak_check) == LC_Off)
6537 VG_(message)(Vg_UserMsg,
6538 "For a detailed leak analysis, rerun with: --leak-check=yes");
6539
6540 VG_(message)(Vg_UserMsg,
6541 "For counts of detected errors, rerun with: -v");
6542 }
sewardj7cf4e6b2008-05-01 20:24:26 +00006543
6544
6545 if (any_value_errors && !VG_(clo_xml) && VG_(clo_verbosity) >= 1
6546 && MC_(clo_mc_level) == 2) {
6547 VG_(message)(Vg_UserMsg,
6548 "Use --track-origins=yes to see where "
6549 "uninitialised values come from");
6550 }
6551
njn1d0825f2006-03-27 11:37:07 +00006552 if (MC_(clo_leak_check) != LC_Off)
6553 mc_detect_memory_leaks(1/*bogus ThreadId*/, MC_(clo_leak_check));
6554
6555 done_prof_mem();
sewardjae986ca2005-10-12 12:53:20 +00006556
sewardj45d94cc2005-04-20 14:44:11 +00006557 if (VG_(clo_verbosity) > 1) {
njn1d0825f2006-03-27 11:37:07 +00006558 SizeT max_secVBit_szB, max_SMs_szB, max_shmem_szB;
6559
sewardj45d94cc2005-04-20 14:44:11 +00006560 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00006561 " memcheck: sanity checks: %d cheap, %d expensive",
6562 n_sanity_cheap, n_sanity_expensive );
sewardj45d94cc2005-04-20 14:44:11 +00006563 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00006564 " memcheck: auxmaps: %d auxmap entries (%dk, %dM) in use",
sewardj05a46732006-10-17 01:28:10 +00006565 n_auxmap_L2_nodes,
6566 n_auxmap_L2_nodes * 64,
6567 n_auxmap_L2_nodes / 16 );
sewardj23eb2fd2005-04-22 16:29:19 +00006568 VG_(message)(Vg_DebugMsg,
sewardj05a46732006-10-17 01:28:10 +00006569 " memcheck: auxmaps_L1: %lld searches, %lld cmps, ratio %lld:10",
6570 n_auxmap_L1_searches, n_auxmap_L1_cmps,
6571 (10ULL * n_auxmap_L1_cmps)
6572 / (n_auxmap_L1_searches ? n_auxmap_L1_searches : 1)
6573 );
6574 VG_(message)(Vg_DebugMsg,
6575 " memcheck: auxmaps_L2: %lld searches, %lld nodes",
6576 n_auxmap_L2_searches, n_auxmap_L2_nodes
6577 );
sewardj23eb2fd2005-04-22 16:29:19 +00006578
njndbf7ca72006-03-31 11:57:59 +00006579 print_SM_info("n_issued ", n_issued_SMs);
6580 print_SM_info("n_deissued ", n_deissued_SMs);
6581 print_SM_info("max_noaccess ", max_noaccess_SMs);
6582 print_SM_info("max_undefined", max_undefined_SMs);
6583 print_SM_info("max_defined ", max_defined_SMs);
6584 print_SM_info("max_non_DSM ", max_non_DSM_SMs);
njn1d0825f2006-03-27 11:37:07 +00006585
6586 // Three DSMs, plus the non-DSM ones
6587 max_SMs_szB = (3 + max_non_DSM_SMs) * sizeof(SecMap);
6588 // The 3*sizeof(Word) bytes is the AVL node metadata size.
6589 // The 4*sizeof(Word) bytes is the malloc metadata size.
6590 // Hardwiring these sizes in sucks, but I don't see how else to do it.
6591 max_secVBit_szB = max_secVBit_nodes *
6592 (sizeof(SecVBitNode) + 3*sizeof(Word) + 4*sizeof(Word));
6593 max_shmem_szB = sizeof(primary_map) + max_SMs_szB + max_secVBit_szB;
sewardj23eb2fd2005-04-22 16:29:19 +00006594
6595 VG_(message)(Vg_DebugMsg,
njn1d0825f2006-03-27 11:37:07 +00006596 " memcheck: max sec V bit nodes: %d (%dk, %dM)",
6597 max_secVBit_nodes, max_secVBit_szB / 1024,
6598 max_secVBit_szB / (1024 * 1024));
6599 VG_(message)(Vg_DebugMsg,
6600 " memcheck: set_sec_vbits8 calls: %llu (new: %llu, updates: %llu)",
6601 sec_vbits_new_nodes + sec_vbits_updates,
6602 sec_vbits_new_nodes, sec_vbits_updates );
6603 VG_(message)(Vg_DebugMsg,
6604 " memcheck: max shadow mem size: %dk, %dM",
6605 max_shmem_szB / 1024, max_shmem_szB / (1024 * 1024));
sewardj7cf4e6b2008-05-01 20:24:26 +00006606
6607 if (MC_(clo_mc_level) >= 3) {
6608 VG_(message)(Vg_DebugMsg,
6609 " ocacheL1: %,12lu refs %,12lu misses (%,lu lossage)",
6610 stats_ocacheL1_find,
6611 stats_ocacheL1_misses,
6612 stats_ocacheL1_lossage );
6613 VG_(message)(Vg_DebugMsg,
6614 " ocacheL1: %,12lu at 0 %,12lu at 1",
6615 stats_ocacheL1_find - stats_ocacheL1_misses
6616 - stats_ocacheL1_found_at_1
6617 - stats_ocacheL1_found_at_N,
6618 stats_ocacheL1_found_at_1 );
6619 VG_(message)(Vg_DebugMsg,
6620 " ocacheL1: %,12lu at 2+ %,12lu move-fwds",
6621 stats_ocacheL1_found_at_N,
6622 stats_ocacheL1_movefwds );
6623 VG_(message)(Vg_DebugMsg,
6624 " ocacheL1: %,12lu sizeB %,12lu useful",
6625 (UWord)sizeof(OCache),
6626 4 * OC_W32S_PER_LINE * OC_LINES_PER_SET * OC_N_SETS );
6627 VG_(message)(Vg_DebugMsg,
6628 " ocacheL2: %,12lu refs %,12lu misses",
6629 stats__ocacheL2_refs,
6630 stats__ocacheL2_misses );
6631 VG_(message)(Vg_DebugMsg,
6632 " ocacheL2: %,9lu max nodes %,9lu curr nodes",
6633 stats__ocacheL2_n_nodes_max,
6634 stats__ocacheL2_n_nodes );
6635 VG_(message)(Vg_DebugMsg,
6636 " niacache: %,12lu refs %,12lu misses",
6637 stats__nia_cache_queries, stats__nia_cache_misses);
6638 }
sewardj45d94cc2005-04-20 14:44:11 +00006639 }
6640
njn5c004e42002-11-18 11:04:50 +00006641 if (0) {
6642 VG_(message)(Vg_DebugMsg,
6643 "------ Valgrind's client block stats follow ---------------" );
nethercote8b76fe52004-11-08 19:20:09 +00006644 show_client_block_stats();
njn5c004e42002-11-18 11:04:50 +00006645 }
njn25e49d8e72002-09-23 09:36:25 +00006646}
6647
njn51d827b2005-05-09 01:02:08 +00006648static void mc_pre_clo_init(void)
6649{
6650 VG_(details_name) ("Memcheck");
6651 VG_(details_version) (NULL);
6652 VG_(details_description) ("a memory error detector");
6653 VG_(details_copyright_author)(
sewardj4d474d02008-02-11 11:34:59 +00006654 "Copyright (C) 2002-2008, and GNU GPL'd, by Julian Seward et al.");
njn51d827b2005-05-09 01:02:08 +00006655 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj05a46732006-10-17 01:28:10 +00006656 VG_(details_avg_translation_sizeB) ( 556 );
njn51d827b2005-05-09 01:02:08 +00006657
6658 VG_(basic_tool_funcs) (mc_post_clo_init,
6659 MC_(instrument),
6660 mc_fini);
6661
sewardj81651dc2007-08-28 06:05:20 +00006662 VG_(needs_final_IR_tidy_pass) ( MC_(final_tidy) );
6663
6664
njn51d827b2005-05-09 01:02:08 +00006665 VG_(needs_core_errors) ();
njn1d0825f2006-03-27 11:37:07 +00006666 VG_(needs_tool_errors) (mc_eq_Error,
njn51d827b2005-05-09 01:02:08 +00006667 mc_pp_Error,
sewardj39f34232007-11-09 23:02:28 +00006668 True,/*show TIDs for errors*/
njn1d0825f2006-03-27 11:37:07 +00006669 mc_update_extra,
njn51d827b2005-05-09 01:02:08 +00006670 mc_recognised_suppression,
njn1d0825f2006-03-27 11:37:07 +00006671 mc_read_extra_suppression_info,
6672 mc_error_matches_suppression,
6673 mc_get_error_name,
6674 mc_print_extra_suppression_info);
njn51d827b2005-05-09 01:02:08 +00006675 VG_(needs_libc_freeres) ();
njn1d0825f2006-03-27 11:37:07 +00006676 VG_(needs_command_line_options)(mc_process_cmd_line_options,
njn51d827b2005-05-09 01:02:08 +00006677 mc_print_usage,
6678 mc_print_debug_usage);
6679 VG_(needs_client_requests) (mc_handle_client_request);
6680 VG_(needs_sanity_checks) (mc_cheap_sanity_check,
6681 mc_expensive_sanity_check);
njn1d0825f2006-03-27 11:37:07 +00006682 VG_(needs_malloc_replacement) (MC_(malloc),
6683 MC_(__builtin_new),
6684 MC_(__builtin_vec_new),
6685 MC_(memalign),
6686 MC_(calloc),
6687 MC_(free),
6688 MC_(__builtin_delete),
6689 MC_(__builtin_vec_delete),
6690 MC_(realloc),
6691 MC_MALLOC_REDZONE_SZB );
njnca54af32006-04-16 10:25:43 +00006692 VG_(needs_xml_output) ();
njn51d827b2005-05-09 01:02:08 +00006693
njn1d0825f2006-03-27 11:37:07 +00006694 VG_(track_new_mem_startup) ( mc_new_mem_startup );
sewardj7cf4e6b2008-05-01 20:24:26 +00006695 VG_(track_new_mem_stack_signal)( make_mem_undefined_w_tid );
6696 VG_(track_new_mem_brk) ( make_mem_undefined_w_tid );
njn1d0825f2006-03-27 11:37:07 +00006697 VG_(track_new_mem_mmap) ( mc_new_mem_mmap );
njn51d827b2005-05-09 01:02:08 +00006698
njn1d0825f2006-03-27 11:37:07 +00006699 VG_(track_copy_mem_remap) ( MC_(copy_address_range_state) );
njn81623712005-10-07 04:48:37 +00006700
6701 // Nb: we don't do anything with mprotect. This means that V bits are
6702 // preserved if a program, for example, marks some memory as inaccessible
6703 // and then later marks it as accessible again.
6704 //
6705 // If an access violation occurs (eg. writing to read-only memory) we let
6706 // it fault and print an informative termination message. This doesn't
6707 // happen if the program catches the signal, though, which is bad. If we
6708 // had two A bits (for readability and writability) that were completely
6709 // distinct from V bits, then we could handle all this properly.
6710 VG_(track_change_mem_mprotect) ( NULL );
njn51d827b2005-05-09 01:02:08 +00006711
njndbf7ca72006-03-31 11:57:59 +00006712 VG_(track_die_mem_stack_signal)( MC_(make_mem_noaccess) );
6713 VG_(track_die_mem_brk) ( MC_(make_mem_noaccess) );
6714 VG_(track_die_mem_munmap) ( MC_(make_mem_noaccess) );
njn51d827b2005-05-09 01:02:08 +00006715
sewardj7cf4e6b2008-05-01 20:24:26 +00006716 /* Defer the specification of the new_mem_stack functions to the
6717 post_clo_init function, since we need to first parse the command
6718 line before deciding which set to use. */
njn51d827b2005-05-09 01:02:08 +00006719
sewardj7cf4e6b2008-05-01 20:24:26 +00006720# ifdef PERF_FAST_STACK
njn1d0825f2006-03-27 11:37:07 +00006721 VG_(track_die_mem_stack_4) ( mc_die_mem_stack_4 );
6722 VG_(track_die_mem_stack_8) ( mc_die_mem_stack_8 );
6723 VG_(track_die_mem_stack_12) ( mc_die_mem_stack_12 );
6724 VG_(track_die_mem_stack_16) ( mc_die_mem_stack_16 );
6725 VG_(track_die_mem_stack_32) ( mc_die_mem_stack_32 );
6726 VG_(track_die_mem_stack_112) ( mc_die_mem_stack_112 );
6727 VG_(track_die_mem_stack_128) ( mc_die_mem_stack_128 );
6728 VG_(track_die_mem_stack_144) ( mc_die_mem_stack_144 );
6729 VG_(track_die_mem_stack_160) ( mc_die_mem_stack_160 );
sewardj7cf4e6b2008-05-01 20:24:26 +00006730# endif
njn1d0825f2006-03-27 11:37:07 +00006731 VG_(track_die_mem_stack) ( mc_die_mem_stack );
njn51d827b2005-05-09 01:02:08 +00006732
njndbf7ca72006-03-31 11:57:59 +00006733 VG_(track_ban_mem_stack) ( MC_(make_mem_noaccess) );
njn51d827b2005-05-09 01:02:08 +00006734
njndbf7ca72006-03-31 11:57:59 +00006735 VG_(track_pre_mem_read) ( check_mem_is_defined );
6736 VG_(track_pre_mem_read_asciiz) ( check_mem_is_defined_asciiz );
6737 VG_(track_pre_mem_write) ( check_mem_is_addressable );
njn1d0825f2006-03-27 11:37:07 +00006738 VG_(track_post_mem_write) ( mc_post_mem_write );
njn51d827b2005-05-09 01:02:08 +00006739
sewardj7cf4e6b2008-05-01 20:24:26 +00006740 if (MC_(clo_mc_level) >= 2)
njn1d0825f2006-03-27 11:37:07 +00006741 VG_(track_pre_reg_read) ( mc_pre_reg_read );
njn51d827b2005-05-09 01:02:08 +00006742
njn1d0825f2006-03-27 11:37:07 +00006743 VG_(track_post_reg_write) ( mc_post_reg_write );
6744 VG_(track_post_reg_write_clientcall_return)( mc_post_reg_write_clientcall );
njn51d827b2005-05-09 01:02:08 +00006745
6746 init_shadow_memory();
sewardj3f94a7d2007-08-25 07:19:08 +00006747 MC_(malloc_list) = VG_(HT_construct)( "MC_(malloc_list)" );
6748 MC_(mempool_list) = VG_(HT_construct)( "MC_(mempool_list)" );
njn1d0825f2006-03-27 11:37:07 +00006749 init_prof_mem();
njn51d827b2005-05-09 01:02:08 +00006750
6751 tl_assert( mc_expensive_sanity_check() );
njn1d0825f2006-03-27 11:37:07 +00006752
6753 // {LOADV,STOREV}[8421] will all fail horribly if this isn't true.
6754 tl_assert(sizeof(UWord) == sizeof(Addr));
sewardj05a46732006-10-17 01:28:10 +00006755 // Call me paranoid. I don't care.
6756 tl_assert(sizeof(void*) == sizeof(Addr));
njn1d0825f2006-03-27 11:37:07 +00006757
6758 // BYTES_PER_SEC_VBIT_NODE must be a power of two.
6759 tl_assert(-1 != VG_(log2)(BYTES_PER_SEC_VBIT_NODE));
sewardj7cf4e6b2008-05-01 20:24:26 +00006760
6761 init_OCache();
6762 init_nia_to_ecu_cache();
njn51d827b2005-05-09 01:02:08 +00006763}
6764
sewardj45f4e7c2005-09-27 19:20:21 +00006765VG_DETERMINE_INTERFACE_VERSION(mc_pre_clo_init)
fitzhardinge98abfc72003-12-16 02:05:15 +00006766
njn25e49d8e72002-09-23 09:36:25 +00006767/*--------------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00006768/*--- end ---*/
njn25e49d8e72002-09-23 09:36:25 +00006769/*--------------------------------------------------------------------*/