blob: d00bf03b8f625628a8d46480e3eb8cf01930be4c [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
nethercote137bc552003-11-14 17:47:54 +00009 This file is part of MemCheck, a heavyweight Valgrind tool for
njnc9539842002-10-02 13:26:35 +000010 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
sewardj4d474d02008-02-11 11:34:59 +000012 Copyright (C) 2000-2008 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
njnc7561b92005-06-19 01:24:32 +000033#include "pub_tool_basics.h"
njn4802b382005-06-11 04:58:29 +000034#include "pub_tool_aspacemgr.h"
njn1d0825f2006-03-27 11:37:07 +000035#include "pub_tool_hashtable.h" // For mc_include.h
njn97405b22005-06-02 03:39:33 +000036#include "pub_tool_libcbase.h"
njn132bfcc2005-06-04 19:16:06 +000037#include "pub_tool_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000038#include "pub_tool_libcprint.h"
njnf536bbb2005-06-13 04:21:38 +000039#include "pub_tool_machine.h"
njnc7561b92005-06-19 01:24:32 +000040#include "pub_tool_mallocfree.h"
41#include "pub_tool_options.h"
njn1d0825f2006-03-27 11:37:07 +000042#include "pub_tool_oset.h"
njnc7561b92005-06-19 01:24:32 +000043#include "pub_tool_replacemalloc.h"
44#include "pub_tool_tooliface.h"
45#include "pub_tool_threadstate.h"
sewardj05a46732006-10-17 01:28:10 +000046#include "pub_tool_oset.h"
sewardjb8b79ad2008-03-03 01:35:41 +000047#include "pub_tool_debuginfo.h" // VG_(get_dataname_and_offset)
njnc7561b92005-06-19 01:24:32 +000048
49#include "mc_include.h"
50#include "memcheck.h" /* for client requests */
njn25e49d8e72002-09-23 09:36:25 +000051
sewardjc1a2cda2005-04-21 17:34:00 +000052
njn1d0825f2006-03-27 11:37:07 +000053/* Set to 1 to do a little more sanity checking */
sewardj23eb2fd2005-04-22 16:29:19 +000054#define VG_DEBUG_MEMORY 0
sewardjc1a2cda2005-04-21 17:34:00 +000055
njn25e49d8e72002-09-23 09:36:25 +000056#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
57
njn25e49d8e72002-09-23 09:36:25 +000058
njn25e49d8e72002-09-23 09:36:25 +000059/*------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +000060/*--- Fast-case knobs ---*/
61/*------------------------------------------------------------*/
62
63// Comment these out to disable the fast cases (don't just set them to zero).
64
65#define PERF_FAST_LOADV 1
66#define PERF_FAST_STOREV 1
67
68#define PERF_FAST_SARP 1
69
70#define PERF_FAST_STACK 1
71#define PERF_FAST_STACK2 1
72
73/*------------------------------------------------------------*/
74/*--- V bits and A bits ---*/
75/*------------------------------------------------------------*/
76
77/* Conceptually, every byte value has 8 V bits, which track whether Memcheck
78 thinks the corresponding value bit is defined. And every memory byte
79 has an A bit, which tracks whether Memcheck thinks the program can access
80 it safely. So every N-bit register is shadowed with N V bits, and every
81 memory byte is shadowed with 8 V bits and one A bit.
82
83 In the implementation, we use two forms of compression (compressed V bits
84 and distinguished secondary maps) to avoid the 9-bit-per-byte overhead
85 for memory.
86
87 Memcheck also tracks extra information about each heap block that is
88 allocated, for detecting memory leaks and other purposes.
89*/
90
91/*------------------------------------------------------------*/
sewardj45d94cc2005-04-20 14:44:11 +000092/*--- Basic A/V bitmap representation. ---*/
njn25e49d8e72002-09-23 09:36:25 +000093/*------------------------------------------------------------*/
94
njn1d0825f2006-03-27 11:37:07 +000095/* All reads and writes are checked against a memory map (a.k.a. shadow
96 memory), which records the state of all memory in the process.
97
98 On 32-bit machines the memory map is organised as follows.
99 The top 16 bits of an address are used to index into a top-level
100 map table, containing 65536 entries. Each entry is a pointer to a
101 second-level map, which records the accesibililty and validity
102 permissions for the 65536 bytes indexed by the lower 16 bits of the
103 address. Each byte is represented by two bits (details are below). So
104 each second-level map contains 16384 bytes. This two-level arrangement
105 conveniently divides the 4G address space into 64k lumps, each size 64k
106 bytes.
107
108 All entries in the primary (top-level) map must point to a valid
109 secondary (second-level) map. Since many of the 64kB chunks will
njndbf7ca72006-03-31 11:57:59 +0000110 have the same status for every bit -- ie. noaccess (for unused
111 address space) or entirely addressable and defined (for code segments) --
112 there are three distinguished secondary maps, which indicate 'noaccess',
113 'undefined' and 'defined'. For these uniform 64kB chunks, the primary
114 map entry points to the relevant distinguished map. In practice,
115 typically more than half of the addressable memory is represented with
116 the 'undefined' or 'defined' distinguished secondary map, so it gives a
117 good saving. It also lets us set the V+A bits of large address regions
118 quickly in set_address_range_perms().
njn1d0825f2006-03-27 11:37:07 +0000119
120 On 64-bit machines it's more complicated. If we followed the same basic
121 scheme we'd have a four-level table which would require too many memory
122 accesses. So instead the top-level map table has 2^19 entries (indexed
123 using bits 16..34 of the address); this covers the bottom 32GB. Any
124 accesses above 32GB are handled with a slow, sparse auxiliary table.
125 Valgrind's address space manager tries very hard to keep things below
126 this 32GB barrier so that performance doesn't suffer too much.
127
128 Note that this file has a lot of different functions for reading and
129 writing shadow memory. Only a couple are strictly necessary (eg.
130 get_vabits2 and set_vabits2), most are just specialised for specific
131 common cases to improve performance.
132
133 Aside: the V+A bits are less precise than they could be -- we have no way
134 of marking memory as read-only. It would be great if we could add an
135 extra state VA_BITSn_READONLY. But then we'd have 5 different states,
136 which requires 2.3 bits to hold, and there's no way to do that elegantly
137 -- we'd have to double up to 4 bits of metadata per byte, which doesn't
138 seem worth it.
139*/
sewardjc859fbf2005-04-22 21:10:28 +0000140
sewardj45d94cc2005-04-20 14:44:11 +0000141/* --------------- Basic configuration --------------- */
sewardj95448072004-11-22 20:19:51 +0000142
sewardj23eb2fd2005-04-22 16:29:19 +0000143/* Only change this. N_PRIMARY_MAP *must* be a power of 2. */
sewardj21f7ff42005-04-28 10:32:02 +0000144
sewardje4ccc012005-05-02 12:53:38 +0000145#if VG_WORDSIZE == 4
sewardj21f7ff42005-04-28 10:32:02 +0000146
147/* cover the entire address space */
148# define N_PRIMARY_BITS 16
149
150#else
151
sewardj34483bc2005-09-28 11:50:20 +0000152/* Just handle the first 32G fast and the rest via auxiliary
sewardj21f7ff42005-04-28 10:32:02 +0000153 primaries. */
sewardj34483bc2005-09-28 11:50:20 +0000154# define N_PRIMARY_BITS 19
sewardj21f7ff42005-04-28 10:32:02 +0000155
156#endif
157
sewardj45d94cc2005-04-20 14:44:11 +0000158
sewardjc1a2cda2005-04-21 17:34:00 +0000159/* Do not change this. */
sewardje4ccc012005-05-02 12:53:38 +0000160#define N_PRIMARY_MAP ( ((UWord)1) << N_PRIMARY_BITS)
sewardjc1a2cda2005-04-21 17:34:00 +0000161
162/* Do not change this. */
sewardj23eb2fd2005-04-22 16:29:19 +0000163#define MAX_PRIMARY_ADDRESS (Addr)((((Addr)65536) * N_PRIMARY_MAP)-1)
164
165
sewardj45d94cc2005-04-20 14:44:11 +0000166/* --------------- Secondary maps --------------- */
njn25e49d8e72002-09-23 09:36:25 +0000167
njn1d0825f2006-03-27 11:37:07 +0000168// Each byte of memory conceptually has an A bit, which indicates its
169// addressability, and 8 V bits, which indicates its definedness.
170//
171// But because very few bytes are partially defined, we can use a nice
172// compression scheme to reduce the size of shadow memory. Each byte of
173// memory has 2 bits which indicates its state (ie. V+A bits):
174//
njndbf7ca72006-03-31 11:57:59 +0000175// 00: noaccess (unaddressable but treated as fully defined)
176// 01: undefined (addressable and fully undefined)
177// 10: defined (addressable and fully defined)
178// 11: partdefined (addressable and partially defined)
njn1d0825f2006-03-27 11:37:07 +0000179//
njndbf7ca72006-03-31 11:57:59 +0000180// In the "partdefined" case, we use a secondary table to store the V bits.
181// Each entry in the secondary-V-bits table maps a byte address to its 8 V
182// bits.
njn1d0825f2006-03-27 11:37:07 +0000183//
184// We store the compressed V+A bits in 8-bit chunks, ie. the V+A bits for
185// four bytes (32 bits) of memory are in each chunk. Hence the name
186// "vabits8". This lets us get the V+A bits for four bytes at a time
187// easily (without having to do any shifting and/or masking), and that is a
188// very common operation. (Note that although each vabits8 chunk
189// is 8 bits in size, it represents 32 bits of memory.)
190//
191// The representation is "inverse" little-endian... each 4 bytes of
192// memory is represented by a 1 byte value, where:
193//
194// - the status of byte (a+0) is held in bits [1..0]
195// - the status of byte (a+1) is held in bits [3..2]
196// - the status of byte (a+2) is held in bits [5..4]
197// - the status of byte (a+3) is held in bits [7..6]
198//
199// It's "inverse" because endianness normally describes a mapping from
200// value bits to memory addresses; in this case the mapping is inverted.
201// Ie. instead of particular value bits being held in certain addresses, in
202// this case certain addresses are represented by particular value bits.
203// See insert_vabits2_into_vabits8() for an example.
204//
205// But note that we don't compress the V bits stored in registers; they
206// need to be explicit to made the shadow operations possible. Therefore
207// when moving values between registers and memory we need to convert
208// between the expanded in-register format and the compressed in-memory
209// format. This isn't so difficult, it just requires careful attention in a
210// few places.
211
212// These represent eight bits of memory.
213#define VA_BITS2_NOACCESS 0x0 // 00b
njndbf7ca72006-03-31 11:57:59 +0000214#define VA_BITS2_UNDEFINED 0x1 // 01b
215#define VA_BITS2_DEFINED 0x2 // 10b
216#define VA_BITS2_PARTDEFINED 0x3 // 11b
njn1d0825f2006-03-27 11:37:07 +0000217
218// These represent 16 bits of memory.
219#define VA_BITS4_NOACCESS 0x0 // 00_00b
njndbf7ca72006-03-31 11:57:59 +0000220#define VA_BITS4_UNDEFINED 0x5 // 01_01b
221#define VA_BITS4_DEFINED 0xa // 10_10b
njn1d0825f2006-03-27 11:37:07 +0000222
223// These represent 32 bits of memory.
224#define VA_BITS8_NOACCESS 0x00 // 00_00_00_00b
njndbf7ca72006-03-31 11:57:59 +0000225#define VA_BITS8_UNDEFINED 0x55 // 01_01_01_01b
226#define VA_BITS8_DEFINED 0xaa // 10_10_10_10b
njn1d0825f2006-03-27 11:37:07 +0000227
228// These represent 64 bits of memory.
229#define VA_BITS16_NOACCESS 0x0000 // 00_00_00_00b x 2
njndbf7ca72006-03-31 11:57:59 +0000230#define VA_BITS16_UNDEFINED 0x5555 // 01_01_01_01b x 2
231#define VA_BITS16_DEFINED 0xaaaa // 10_10_10_10b x 2
njn1d0825f2006-03-27 11:37:07 +0000232
233
234#define SM_CHUNKS 16384
235#define SM_OFF(aaa) (((aaa) & 0xffff) >> 2)
236#define SM_OFF_16(aaa) (((aaa) & 0xffff) >> 3)
237
238// Paranoia: it's critical for performance that the requested inlining
239// occurs. So try extra hard.
240#define INLINE inline __attribute__((always_inline))
241
242static INLINE Addr start_of_this_sm ( Addr a ) {
243 return (a & (~SM_MASK));
244}
245static INLINE Bool is_start_of_sm ( Addr a ) {
246 return (start_of_this_sm(a) == a);
247}
248
njn25e49d8e72002-09-23 09:36:25 +0000249typedef
250 struct {
njn1d0825f2006-03-27 11:37:07 +0000251 UChar vabits8[SM_CHUNKS];
njn25e49d8e72002-09-23 09:36:25 +0000252 }
253 SecMap;
254
njn1d0825f2006-03-27 11:37:07 +0000255// 3 distinguished secondary maps, one for no-access, one for
256// accessible but undefined, and one for accessible and defined.
257// Distinguished secondaries may never be modified.
258#define SM_DIST_NOACCESS 0
njndbf7ca72006-03-31 11:57:59 +0000259#define SM_DIST_UNDEFINED 1
260#define SM_DIST_DEFINED 2
njnb8dca862005-03-14 02:42:44 +0000261
sewardj45d94cc2005-04-20 14:44:11 +0000262static SecMap sm_distinguished[3];
njnb8dca862005-03-14 02:42:44 +0000263
njn1d0825f2006-03-27 11:37:07 +0000264static INLINE Bool is_distinguished_sm ( SecMap* sm ) {
sewardj45d94cc2005-04-20 14:44:11 +0000265 return sm >= &sm_distinguished[0] && sm <= &sm_distinguished[2];
266}
njnb8dca862005-03-14 02:42:44 +0000267
njn1d0825f2006-03-27 11:37:07 +0000268// Forward declaration
269static void update_SM_counts(SecMap* oldSM, SecMap* newSM);
270
sewardj45d94cc2005-04-20 14:44:11 +0000271/* dist_sm points to one of our three distinguished secondaries. Make
272 a copy of it so that we can write to it.
273*/
274static SecMap* copy_for_writing ( SecMap* dist_sm )
275{
276 SecMap* new_sm;
277 tl_assert(dist_sm == &sm_distinguished[0]
njn1d0825f2006-03-27 11:37:07 +0000278 || dist_sm == &sm_distinguished[1]
279 || dist_sm == &sm_distinguished[2]);
njnb8dca862005-03-14 02:42:44 +0000280
sewardj45f4e7c2005-09-27 19:20:21 +0000281 new_sm = VG_(am_shadow_alloc)(sizeof(SecMap));
282 if (new_sm == NULL)
283 VG_(out_of_memory_NORETURN)( "memcheck:allocate new SecMap",
284 sizeof(SecMap) );
sewardj45d94cc2005-04-20 14:44:11 +0000285 VG_(memcpy)(new_sm, dist_sm, sizeof(SecMap));
njn1d0825f2006-03-27 11:37:07 +0000286 update_SM_counts(dist_sm, new_sm);
sewardj45d94cc2005-04-20 14:44:11 +0000287 return new_sm;
288}
njnb8dca862005-03-14 02:42:44 +0000289
njn1d0825f2006-03-27 11:37:07 +0000290/* --------------- Stats --------------- */
291
njndbf7ca72006-03-31 11:57:59 +0000292static Int n_issued_SMs = 0;
293static Int n_deissued_SMs = 0;
294static Int n_noaccess_SMs = N_PRIMARY_MAP; // start with many noaccess DSMs
295static Int n_undefined_SMs = 0;
296static Int n_defined_SMs = 0;
297static Int n_non_DSM_SMs = 0;
298static Int max_noaccess_SMs = 0;
299static Int max_undefined_SMs = 0;
300static Int max_defined_SMs = 0;
301static Int max_non_DSM_SMs = 0;
njn1d0825f2006-03-27 11:37:07 +0000302
sewardj05a46732006-10-17 01:28:10 +0000303/* # searches initiated in auxmap_L1, and # base cmps required */
304static ULong n_auxmap_L1_searches = 0;
305static ULong n_auxmap_L1_cmps = 0;
306/* # of searches that missed in auxmap_L1 and therefore had to
307 be handed to auxmap_L2. And the number of nodes inserted. */
308static ULong n_auxmap_L2_searches = 0;
309static ULong n_auxmap_L2_nodes = 0;
310
njn1d0825f2006-03-27 11:37:07 +0000311static Int n_sanity_cheap = 0;
312static Int n_sanity_expensive = 0;
313
314static Int n_secVBit_nodes = 0;
315static Int max_secVBit_nodes = 0;
316
317static void update_SM_counts(SecMap* oldSM, SecMap* newSM)
318{
njndbf7ca72006-03-31 11:57:59 +0000319 if (oldSM == &sm_distinguished[SM_DIST_NOACCESS ]) n_noaccess_SMs --;
320 else if (oldSM == &sm_distinguished[SM_DIST_UNDEFINED]) n_undefined_SMs--;
321 else if (oldSM == &sm_distinguished[SM_DIST_DEFINED ]) n_defined_SMs --;
322 else { n_non_DSM_SMs --;
323 n_deissued_SMs ++; }
njn1d0825f2006-03-27 11:37:07 +0000324
njndbf7ca72006-03-31 11:57:59 +0000325 if (newSM == &sm_distinguished[SM_DIST_NOACCESS ]) n_noaccess_SMs ++;
326 else if (newSM == &sm_distinguished[SM_DIST_UNDEFINED]) n_undefined_SMs++;
327 else if (newSM == &sm_distinguished[SM_DIST_DEFINED ]) n_defined_SMs ++;
328 else { n_non_DSM_SMs ++;
329 n_issued_SMs ++; }
njn1d0825f2006-03-27 11:37:07 +0000330
njndbf7ca72006-03-31 11:57:59 +0000331 if (n_noaccess_SMs > max_noaccess_SMs ) max_noaccess_SMs = n_noaccess_SMs;
332 if (n_undefined_SMs > max_undefined_SMs) max_undefined_SMs = n_undefined_SMs;
333 if (n_defined_SMs > max_defined_SMs ) max_defined_SMs = n_defined_SMs;
334 if (n_non_DSM_SMs > max_non_DSM_SMs ) max_non_DSM_SMs = n_non_DSM_SMs;
njn1d0825f2006-03-27 11:37:07 +0000335}
sewardj45d94cc2005-04-20 14:44:11 +0000336
337/* --------------- Primary maps --------------- */
338
339/* The main primary map. This covers some initial part of the address
sewardj23eb2fd2005-04-22 16:29:19 +0000340 space, addresses 0 .. (N_PRIMARY_MAP << 16)-1. The rest of it is
sewardj45d94cc2005-04-20 14:44:11 +0000341 handled using the auxiliary primary map.
342*/
sewardj23eb2fd2005-04-22 16:29:19 +0000343static SecMap* primary_map[N_PRIMARY_MAP];
sewardj45d94cc2005-04-20 14:44:11 +0000344
345
346/* An entry in the auxiliary primary map. base must be a 64k-aligned
347 value, and sm points at the relevant secondary map. As with the
348 main primary map, the secondary may be either a real secondary, or
sewardj05a46732006-10-17 01:28:10 +0000349 one of the three distinguished secondaries. DO NOT CHANGE THIS
350 LAYOUT: the first word has to be the key for OSet fast lookups.
sewardj45d94cc2005-04-20 14:44:11 +0000351*/
352typedef
353 struct {
sewardj23eb2fd2005-04-22 16:29:19 +0000354 Addr base;
sewardj45d94cc2005-04-20 14:44:11 +0000355 SecMap* sm;
356 }
357 AuxMapEnt;
358
sewardj05a46732006-10-17 01:28:10 +0000359/* Tunable parameter: How big is the L1 queue? */
360#define N_AUXMAP_L1 24
sewardj45d94cc2005-04-20 14:44:11 +0000361
sewardj05a46732006-10-17 01:28:10 +0000362/* Tunable parameter: How far along the L1 queue to insert
363 entries resulting from L2 lookups? */
364#define AUXMAP_L1_INSERT_IX 12
sewardj45d94cc2005-04-20 14:44:11 +0000365
sewardj05a46732006-10-17 01:28:10 +0000366static struct {
367 Addr base;
368 AuxMapEnt* ent; // pointer to the matching auxmap_L2 node
369 }
370 auxmap_L1[N_AUXMAP_L1];
371
372static OSet* auxmap_L2 = NULL;
373
374static void init_auxmap_L1_L2 ( void )
sewardj45d94cc2005-04-20 14:44:11 +0000375{
sewardj05a46732006-10-17 01:28:10 +0000376 Int i;
377 for (i = 0; i < N_AUXMAP_L1; i++) {
378 auxmap_L1[i].base = 0;
379 auxmap_L1[i].ent = NULL;
sewardj45d94cc2005-04-20 14:44:11 +0000380 }
381
sewardj05a46732006-10-17 01:28:10 +0000382 tl_assert(0 == offsetof(AuxMapEnt,base));
383 tl_assert(sizeof(Addr) == sizeof(void*));
njne2a9ad32007-09-17 05:30:48 +0000384 auxmap_L2 = VG_(OSetGen_Create)( /*keyOff*/ offsetof(AuxMapEnt,base),
385 /*fastCmp*/ NULL,
386 VG_(malloc), VG_(free) );
sewardj05fe85e2005-04-27 22:46:36 +0000387}
388
sewardj05a46732006-10-17 01:28:10 +0000389/* Check representation invariants; if OK return NULL; else a
390 descriptive bit of text. Also return the number of
391 non-distinguished secondary maps referred to from the auxiliary
392 primary maps. */
sewardj05fe85e2005-04-27 22:46:36 +0000393
sewardj05a46732006-10-17 01:28:10 +0000394static HChar* check_auxmap_L1_L2_sanity ( Word* n_secmaps_found )
sewardj05fe85e2005-04-27 22:46:36 +0000395{
sewardj05a46732006-10-17 01:28:10 +0000396 Word i, j;
397 /* On a 32-bit platform, the L2 and L1 tables should
398 both remain empty forever.
sewardj05fe85e2005-04-27 22:46:36 +0000399
sewardj05a46732006-10-17 01:28:10 +0000400 On a 64-bit platform:
401 In the L2 table:
402 all .base & 0xFFFF == 0
403 all .base > MAX_PRIMARY_ADDRESS
404 In the L1 table:
405 all .base & 0xFFFF == 0
406 all (.base > MAX_PRIMARY_ADDRESS
407 .base & 0xFFFF == 0
408 and .ent points to an AuxMapEnt with the same .base)
409 or
410 (.base == 0 and .ent == NULL)
411 */
412 *n_secmaps_found = 0;
413 if (sizeof(void*) == 4) {
414 /* 32-bit platform */
njne2a9ad32007-09-17 05:30:48 +0000415 if (VG_(OSetGen_Size)(auxmap_L2) != 0)
sewardj05a46732006-10-17 01:28:10 +0000416 return "32-bit: auxmap_L2 is non-empty";
417 for (i = 0; i < N_AUXMAP_L1; i++)
418 if (auxmap_L1[i].base != 0 || auxmap_L1[i].ent != NULL)
419 return "32-bit: auxmap_L1 is non-empty";
420 } else {
421 /* 64-bit platform */
422 UWord elems_seen = 0;
423 AuxMapEnt *elem, *res;
424 AuxMapEnt key;
425 /* L2 table */
njne2a9ad32007-09-17 05:30:48 +0000426 VG_(OSetGen_ResetIter)(auxmap_L2);
427 while ( (elem = VG_(OSetGen_Next)(auxmap_L2)) ) {
sewardj05a46732006-10-17 01:28:10 +0000428 elems_seen++;
429 if (0 != (elem->base & (Addr)0xFFFF))
430 return "64-bit: nonzero .base & 0xFFFF in auxmap_L2";
431 if (elem->base <= MAX_PRIMARY_ADDRESS)
432 return "64-bit: .base <= MAX_PRIMARY_ADDRESS in auxmap_L2";
433 if (elem->sm == NULL)
434 return "64-bit: .sm in _L2 is NULL";
435 if (!is_distinguished_sm(elem->sm))
436 (*n_secmaps_found)++;
437 }
438 if (elems_seen != n_auxmap_L2_nodes)
439 return "64-bit: disagreement on number of elems in _L2";
440 /* Check L1-L2 correspondence */
441 for (i = 0; i < N_AUXMAP_L1; i++) {
442 if (auxmap_L1[i].base == 0 && auxmap_L1[i].ent == NULL)
443 continue;
444 if (0 != (auxmap_L1[i].base & (Addr)0xFFFF))
445 return "64-bit: nonzero .base & 0xFFFF in auxmap_L1";
446 if (auxmap_L1[i].base <= MAX_PRIMARY_ADDRESS)
447 return "64-bit: .base <= MAX_PRIMARY_ADDRESS in auxmap_L1";
448 if (auxmap_L1[i].ent == NULL)
449 return "64-bit: .ent is NULL in auxmap_L1";
450 if (auxmap_L1[i].ent->base != auxmap_L1[i].base)
451 return "64-bit: _L1 and _L2 bases are inconsistent";
452 /* Look it up in auxmap_L2. */
453 key.base = auxmap_L1[i].base;
454 key.sm = 0;
njne2a9ad32007-09-17 05:30:48 +0000455 res = VG_(OSetGen_Lookup)(auxmap_L2, &key);
sewardj05a46732006-10-17 01:28:10 +0000456 if (res == NULL)
457 return "64-bit: _L1 .base not found in _L2";
458 if (res != auxmap_L1[i].ent)
459 return "64-bit: _L1 .ent disagrees with _L2 entry";
460 }
461 /* Check L1 contains no duplicates */
462 for (i = 0; i < N_AUXMAP_L1; i++) {
463 if (auxmap_L1[i].base == 0)
464 continue;
465 for (j = i+1; j < N_AUXMAP_L1; j++) {
466 if (auxmap_L1[j].base == 0)
467 continue;
468 if (auxmap_L1[j].base == auxmap_L1[i].base)
469 return "64-bit: duplicate _L1 .base entries";
470 }
471 }
472 }
473 return NULL; /* ok */
474}
475
476static void insert_into_auxmap_L1_at ( Word rank, AuxMapEnt* ent )
477{
478 Word i;
479 tl_assert(ent);
480 tl_assert(rank >= 0 && rank < N_AUXMAP_L1);
481 for (i = N_AUXMAP_L1-1; i > rank; i--)
482 auxmap_L1[i] = auxmap_L1[i-1];
483 auxmap_L1[rank].base = ent->base;
484 auxmap_L1[rank].ent = ent;
485}
486
487static INLINE AuxMapEnt* maybe_find_in_auxmap ( Addr a )
488{
489 AuxMapEnt key;
490 AuxMapEnt* res;
491 Word i;
492
493 tl_assert(a > MAX_PRIMARY_ADDRESS);
494 a &= ~(Addr)0xFFFF;
495
496 /* First search the front-cache, which is a self-organising
497 list containing the most popular entries. */
498
bart5dd8e6a2008-03-22 08:04:29 +0000499 if (LIKELY(auxmap_L1[0].base == a))
sewardj05a46732006-10-17 01:28:10 +0000500 return auxmap_L1[0].ent;
bart5dd8e6a2008-03-22 08:04:29 +0000501 if (LIKELY(auxmap_L1[1].base == a)) {
sewardj05a46732006-10-17 01:28:10 +0000502 Addr t_base = auxmap_L1[0].base;
503 AuxMapEnt* t_ent = auxmap_L1[0].ent;
504 auxmap_L1[0].base = auxmap_L1[1].base;
505 auxmap_L1[0].ent = auxmap_L1[1].ent;
506 auxmap_L1[1].base = t_base;
507 auxmap_L1[1].ent = t_ent;
508 return auxmap_L1[0].ent;
sewardj45d94cc2005-04-20 14:44:11 +0000509 }
510
sewardj05a46732006-10-17 01:28:10 +0000511 n_auxmap_L1_searches++;
sewardj45d94cc2005-04-20 14:44:11 +0000512
sewardj05a46732006-10-17 01:28:10 +0000513 for (i = 0; i < N_AUXMAP_L1; i++) {
514 if (auxmap_L1[i].base == a) {
515 break;
516 }
517 }
518 tl_assert(i >= 0 && i <= N_AUXMAP_L1);
sewardj45d94cc2005-04-20 14:44:11 +0000519
sewardj05a46732006-10-17 01:28:10 +0000520 n_auxmap_L1_cmps += (ULong)(i+1);
sewardj45d94cc2005-04-20 14:44:11 +0000521
sewardj05a46732006-10-17 01:28:10 +0000522 if (i < N_AUXMAP_L1) {
523 if (i > 0) {
524 Addr t_base = auxmap_L1[i-1].base;
525 AuxMapEnt* t_ent = auxmap_L1[i-1].ent;
526 auxmap_L1[i-1].base = auxmap_L1[i-0].base;
527 auxmap_L1[i-1].ent = auxmap_L1[i-0].ent;
528 auxmap_L1[i-0].base = t_base;
529 auxmap_L1[i-0].ent = t_ent;
530 i--;
531 }
532 return auxmap_L1[i].ent;
533 }
534
535 n_auxmap_L2_searches++;
536
537 /* First see if we already have it. */
538 key.base = a;
539 key.sm = 0;
540
njne2a9ad32007-09-17 05:30:48 +0000541 res = VG_(OSetGen_Lookup)(auxmap_L2, &key);
sewardj05a46732006-10-17 01:28:10 +0000542 if (res)
543 insert_into_auxmap_L1_at( AUXMAP_L1_INSERT_IX, res );
544 return res;
545}
546
547static AuxMapEnt* find_or_alloc_in_auxmap ( Addr a )
548{
549 AuxMapEnt *nyu, *res;
550
551 /* First see if we already have it. */
552 res = maybe_find_in_auxmap( a );
bart5dd8e6a2008-03-22 08:04:29 +0000553 if (LIKELY(res))
sewardj05a46732006-10-17 01:28:10 +0000554 return res;
555
556 /* Ok, there's no entry in the secondary map, so we'll have
557 to allocate one. */
558 a &= ~(Addr)0xFFFF;
559
njne2a9ad32007-09-17 05:30:48 +0000560 nyu = (AuxMapEnt*) VG_(OSetGen_AllocNode)( auxmap_L2, sizeof(AuxMapEnt) );
sewardj05a46732006-10-17 01:28:10 +0000561 tl_assert(nyu);
562 nyu->base = a;
563 nyu->sm = &sm_distinguished[SM_DIST_NOACCESS];
njne2a9ad32007-09-17 05:30:48 +0000564 VG_(OSetGen_Insert)( auxmap_L2, nyu );
sewardj05a46732006-10-17 01:28:10 +0000565 insert_into_auxmap_L1_at( AUXMAP_L1_INSERT_IX, nyu );
566 n_auxmap_L2_nodes++;
567 return nyu;
sewardj45d94cc2005-04-20 14:44:11 +0000568}
569
sewardj45d94cc2005-04-20 14:44:11 +0000570/* --------------- SecMap fundamentals --------------- */
571
njn1d0825f2006-03-27 11:37:07 +0000572// In all these, 'low' means it's definitely in the main primary map,
573// 'high' means it's definitely in the auxiliary table.
574
575static INLINE SecMap** get_secmap_low_ptr ( Addr a )
576{
577 UWord pm_off = a >> 16;
578# if VG_DEBUG_MEMORY >= 1
579 tl_assert(pm_off < N_PRIMARY_MAP);
580# endif
581 return &primary_map[ pm_off ];
582}
583
584static INLINE SecMap** get_secmap_high_ptr ( Addr a )
585{
586 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
587 return &am->sm;
588}
589
590static SecMap** get_secmap_ptr ( Addr a )
591{
592 return ( a <= MAX_PRIMARY_ADDRESS
593 ? get_secmap_low_ptr(a)
594 : get_secmap_high_ptr(a));
595}
596
njna7c7ebd2006-03-28 12:51:02 +0000597static INLINE SecMap* get_secmap_for_reading_low ( Addr a )
njn1d0825f2006-03-27 11:37:07 +0000598{
599 return *get_secmap_low_ptr(a);
600}
601
njna7c7ebd2006-03-28 12:51:02 +0000602static INLINE SecMap* get_secmap_for_reading_high ( Addr a )
njn1d0825f2006-03-27 11:37:07 +0000603{
604 return *get_secmap_high_ptr(a);
605}
606
njna7c7ebd2006-03-28 12:51:02 +0000607static INLINE SecMap* get_secmap_for_writing_low(Addr a)
njn1d0825f2006-03-27 11:37:07 +0000608{
609 SecMap** p = get_secmap_low_ptr(a);
bart5dd8e6a2008-03-22 08:04:29 +0000610 if (UNLIKELY(is_distinguished_sm(*p)))
njn1d0825f2006-03-27 11:37:07 +0000611 *p = copy_for_writing(*p);
612 return *p;
613}
614
njna7c7ebd2006-03-28 12:51:02 +0000615static INLINE SecMap* get_secmap_for_writing_high ( Addr a )
njn1d0825f2006-03-27 11:37:07 +0000616{
617 SecMap** p = get_secmap_high_ptr(a);
bart5dd8e6a2008-03-22 08:04:29 +0000618 if (UNLIKELY(is_distinguished_sm(*p)))
njn1d0825f2006-03-27 11:37:07 +0000619 *p = copy_for_writing(*p);
620 return *p;
621}
622
sewardj45d94cc2005-04-20 14:44:11 +0000623/* Produce the secmap for 'a', either from the primary map or by
624 ensuring there is an entry for it in the aux primary map. The
625 secmap may be a distinguished one as the caller will only want to
626 be able to read it.
627*/
sewardj05a46732006-10-17 01:28:10 +0000628static INLINE SecMap* get_secmap_for_reading ( Addr a )
sewardj45d94cc2005-04-20 14:44:11 +0000629{
njn1d0825f2006-03-27 11:37:07 +0000630 return ( a <= MAX_PRIMARY_ADDRESS
njna7c7ebd2006-03-28 12:51:02 +0000631 ? get_secmap_for_reading_low (a)
632 : get_secmap_for_reading_high(a) );
sewardj45d94cc2005-04-20 14:44:11 +0000633}
634
635/* Produce the secmap for 'a', either from the primary map or by
636 ensuring there is an entry for it in the aux primary map. The
637 secmap may not be a distinguished one, since the caller will want
638 to be able to write it. If it is a distinguished secondary, make a
639 writable copy of it, install it, and return the copy instead. (COW
640 semantics).
641*/
njna7c7ebd2006-03-28 12:51:02 +0000642static SecMap* get_secmap_for_writing ( Addr a )
sewardj45d94cc2005-04-20 14:44:11 +0000643{
njn1d0825f2006-03-27 11:37:07 +0000644 return ( a <= MAX_PRIMARY_ADDRESS
njna7c7ebd2006-03-28 12:51:02 +0000645 ? get_secmap_for_writing_low (a)
646 : get_secmap_for_writing_high(a) );
njn1d0825f2006-03-27 11:37:07 +0000647}
648
649/* If 'a' has a SecMap, produce it. Else produce NULL. But don't
650 allocate one if one doesn't already exist. This is used by the
651 leak checker.
652*/
653static SecMap* maybe_get_secmap_for ( Addr a )
654{
sewardj45d94cc2005-04-20 14:44:11 +0000655 if (a <= MAX_PRIMARY_ADDRESS) {
njna7c7ebd2006-03-28 12:51:02 +0000656 return get_secmap_for_reading_low(a);
sewardj45d94cc2005-04-20 14:44:11 +0000657 } else {
njn1d0825f2006-03-27 11:37:07 +0000658 AuxMapEnt* am = maybe_find_in_auxmap(a);
659 return am ? am->sm : NULL;
sewardj45d94cc2005-04-20 14:44:11 +0000660 }
661}
662
njn1d0825f2006-03-27 11:37:07 +0000663/* --------------- Fundamental functions --------------- */
664
665static INLINE
666void insert_vabits2_into_vabits8 ( Addr a, UChar vabits2, UChar* vabits8 )
667{
668 UInt shift = (a & 3) << 1; // shift by 0, 2, 4, or 6
669 *vabits8 &= ~(0x3 << shift); // mask out the two old bits
670 *vabits8 |= (vabits2 << shift); // mask in the two new bits
671}
672
673static INLINE
674void insert_vabits4_into_vabits8 ( Addr a, UChar vabits4, UChar* vabits8 )
675{
676 UInt shift;
677 tl_assert(VG_IS_2_ALIGNED(a)); // Must be 2-aligned
678 shift = (a & 2) << 1; // shift by 0 or 4
679 *vabits8 &= ~(0xf << shift); // mask out the four old bits
680 *vabits8 |= (vabits4 << shift); // mask in the four new bits
681}
682
683static INLINE
684UChar extract_vabits2_from_vabits8 ( Addr a, UChar vabits8 )
685{
686 UInt shift = (a & 3) << 1; // shift by 0, 2, 4, or 6
687 vabits8 >>= shift; // shift the two bits to the bottom
688 return 0x3 & vabits8; // mask out the rest
689}
690
691static INLINE
692UChar extract_vabits4_from_vabits8 ( Addr a, UChar vabits8 )
693{
694 UInt shift;
695 tl_assert(VG_IS_2_ALIGNED(a)); // Must be 2-aligned
696 shift = (a & 2) << 1; // shift by 0 or 4
697 vabits8 >>= shift; // shift the four bits to the bottom
698 return 0xf & vabits8; // mask out the rest
699}
700
701// Note that these four are only used in slow cases. The fast cases do
702// clever things like combine the auxmap check (in
703// get_secmap_{read,writ}able) with alignment checks.
704
705// *** WARNING! ***
706// Any time this function is called, if it is possible that vabits2
njndbf7ca72006-03-31 11:57:59 +0000707// is equal to VA_BITS2_PARTDEFINED, then the corresponding entry in the
njn1d0825f2006-03-27 11:37:07 +0000708// sec-V-bits table must also be set!
709static INLINE
710void set_vabits2 ( Addr a, UChar vabits2 )
711{
njna7c7ebd2006-03-28 12:51:02 +0000712 SecMap* sm = get_secmap_for_writing(a);
njn1d0825f2006-03-27 11:37:07 +0000713 UWord sm_off = SM_OFF(a);
714 insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
715}
716
717static INLINE
718UChar get_vabits2 ( Addr a )
719{
njna7c7ebd2006-03-28 12:51:02 +0000720 SecMap* sm = get_secmap_for_reading(a);
njn1d0825f2006-03-27 11:37:07 +0000721 UWord sm_off = SM_OFF(a);
722 UChar vabits8 = sm->vabits8[sm_off];
723 return extract_vabits2_from_vabits8(a, vabits8);
724}
725
sewardjf2184912006-05-03 22:13:57 +0000726// *** WARNING! ***
727// Any time this function is called, if it is possible that any of the
728// 4 2-bit fields in vabits8 are equal to VA_BITS2_PARTDEFINED, then the
729// corresponding entry(s) in the sec-V-bits table must also be set!
730static INLINE
731UChar get_vabits8_for_aligned_word32 ( Addr a )
732{
733 SecMap* sm = get_secmap_for_reading(a);
734 UWord sm_off = SM_OFF(a);
735 UChar vabits8 = sm->vabits8[sm_off];
736 return vabits8;
737}
738
739static INLINE
740void set_vabits8_for_aligned_word32 ( Addr a, UChar vabits8 )
741{
742 SecMap* sm = get_secmap_for_writing(a);
743 UWord sm_off = SM_OFF(a);
744 sm->vabits8[sm_off] = vabits8;
745}
746
747
njn1d0825f2006-03-27 11:37:07 +0000748// Forward declarations
749static UWord get_sec_vbits8(Addr a);
750static void set_sec_vbits8(Addr a, UWord vbits8);
751
752// Returns False if there was an addressability error.
753static INLINE
754Bool set_vbits8 ( Addr a, UChar vbits8 )
755{
756 Bool ok = True;
757 UChar vabits2 = get_vabits2(a);
758 if ( VA_BITS2_NOACCESS != vabits2 ) {
759 // Addressable. Convert in-register format to in-memory format.
760 // Also remove any existing sec V bit entry for the byte if no
761 // longer necessary.
njndbf7ca72006-03-31 11:57:59 +0000762 if ( V_BITS8_DEFINED == vbits8 ) { vabits2 = VA_BITS2_DEFINED; }
763 else if ( V_BITS8_UNDEFINED == vbits8 ) { vabits2 = VA_BITS2_UNDEFINED; }
764 else { vabits2 = VA_BITS2_PARTDEFINED;
njn1d0825f2006-03-27 11:37:07 +0000765 set_sec_vbits8(a, vbits8); }
766 set_vabits2(a, vabits2);
767
768 } else {
769 // Unaddressable! Do nothing -- when writing to unaddressable
770 // memory it acts as a black hole, and the V bits can never be seen
771 // again. So we don't have to write them at all.
772 ok = False;
773 }
774 return ok;
775}
776
777// Returns False if there was an addressability error. In that case, we put
778// all defined bits into vbits8.
779static INLINE
780Bool get_vbits8 ( Addr a, UChar* vbits8 )
781{
782 Bool ok = True;
783 UChar vabits2 = get_vabits2(a);
784
785 // Convert the in-memory format to in-register format.
njndbf7ca72006-03-31 11:57:59 +0000786 if ( VA_BITS2_DEFINED == vabits2 ) { *vbits8 = V_BITS8_DEFINED; }
787 else if ( VA_BITS2_UNDEFINED == vabits2 ) { *vbits8 = V_BITS8_UNDEFINED; }
788 else if ( VA_BITS2_NOACCESS == vabits2 ) {
njn1d0825f2006-03-27 11:37:07 +0000789 *vbits8 = V_BITS8_DEFINED; // Make V bits defined!
790 ok = False;
791 } else {
njndbf7ca72006-03-31 11:57:59 +0000792 tl_assert( VA_BITS2_PARTDEFINED == vabits2 );
njn1d0825f2006-03-27 11:37:07 +0000793 *vbits8 = get_sec_vbits8(a);
794 }
795 return ok;
796}
797
798
799/* --------------- Secondary V bit table ------------ */
800
801// This table holds the full V bit pattern for partially-defined bytes
njndbf7ca72006-03-31 11:57:59 +0000802// (PDBs) that are represented by VA_BITS2_PARTDEFINED in the main shadow
803// memory.
njn1d0825f2006-03-27 11:37:07 +0000804//
805// Note: the nodes in this table can become stale. Eg. if you write a PDB,
806// then overwrite the same address with a fully defined byte, the sec-V-bit
807// node will not necessarily be removed. This is because checking for
808// whether removal is necessary would slow down the fast paths.
809//
810// To avoid the stale nodes building up too much, we periodically (once the
811// table reaches a certain size) garbage collect (GC) the table by
812// traversing it and evicting any "sufficiently stale" nodes, ie. nodes that
813// are stale and haven't been touched for a certain number of collections.
814// If more than a certain proportion of nodes survived, we increase the
815// table size so that GCs occur less often.
816//
817// (So this a bit different to a traditional GC, where you definitely want
818// to remove any dead nodes. It's more like we have a resizable cache and
819// we're trying to find the right balance how many elements to evict and how
820// big to make the cache.)
821//
822// This policy is designed to avoid bad table bloat in the worst case where
823// a program creates huge numbers of stale PDBs -- we would get this bloat
824// if we had no GC -- while handling well the case where a node becomes
825// stale but shortly afterwards is rewritten with a PDB and so becomes
826// non-stale again (which happens quite often, eg. in perf/bz2). If we just
827// remove all stale nodes as soon as possible, we just end up re-adding a
828// lot of them in later again. The "sufficiently stale" approach avoids
829// this. (If a program has many live PDBs, performance will just suck,
830// there's no way around that.)
831
832static OSet* secVBitTable;
833
834// Stats
835static ULong sec_vbits_new_nodes = 0;
836static ULong sec_vbits_updates = 0;
837
838// This must be a power of two; this is checked in mc_pre_clo_init().
839// The size chosen here is a trade-off: if the nodes are bigger (ie. cover
840// a larger address range) they take more space but we can get multiple
841// partially-defined bytes in one if they are close to each other, reducing
842// the number of total nodes. In practice sometimes they are clustered (eg.
843// perf/bz2 repeatedly writes then reads more than 20,000 in a contiguous
844// row), but often not. So we choose something intermediate.
845#define BYTES_PER_SEC_VBIT_NODE 16
846
847// We make the table bigger if more than this many nodes survive a GC.
848#define MAX_SURVIVOR_PROPORTION 0.5
849
850// Each time we make the table bigger, we increase it by this much.
851#define TABLE_GROWTH_FACTOR 2
852
853// This defines "sufficiently stale" -- any node that hasn't been touched in
854// this many GCs will be removed.
855#define MAX_STALE_AGE 2
856
857// We GC the table when it gets this many nodes in it, ie. it's effectively
858// the table size. It can change.
859static Int secVBitLimit = 1024;
860
861// The number of GCs done, used to age sec-V-bit nodes for eviction.
862// Because it's unsigned, wrapping doesn't matter -- the right answer will
863// come out anyway.
864static UInt GCs_done = 0;
865
866typedef
867 struct {
868 Addr a;
869 UChar vbits8[BYTES_PER_SEC_VBIT_NODE];
870 UInt last_touched;
871 }
872 SecVBitNode;
873
874static OSet* createSecVBitTable(void)
875{
njne2a9ad32007-09-17 05:30:48 +0000876 return VG_(OSetGen_Create)( offsetof(SecVBitNode, a),
877 NULL, // use fast comparisons
878 VG_(malloc), VG_(free) );
njn1d0825f2006-03-27 11:37:07 +0000879}
880
881static void gcSecVBitTable(void)
882{
883 OSet* secVBitTable2;
884 SecVBitNode* n;
885 Int i, n_nodes = 0, n_survivors = 0;
886
887 GCs_done++;
888
889 // Create the new table.
890 secVBitTable2 = createSecVBitTable();
891
892 // Traverse the table, moving fresh nodes into the new table.
njne2a9ad32007-09-17 05:30:48 +0000893 VG_(OSetGen_ResetIter)(secVBitTable);
894 while ( (n = VG_(OSetGen_Next)(secVBitTable)) ) {
njn1d0825f2006-03-27 11:37:07 +0000895 Bool keep = False;
896 if ( (GCs_done - n->last_touched) <= MAX_STALE_AGE ) {
897 // Keep node if it's been touched recently enough (regardless of
898 // freshness/staleness).
899 keep = True;
900 } else {
901 // Keep node if any of its bytes are non-stale. Using
902 // get_vabits2() for the lookup is not very efficient, but I don't
903 // think it matters.
904 for (i = 0; i < BYTES_PER_SEC_VBIT_NODE; i++) {
njndbf7ca72006-03-31 11:57:59 +0000905 if (VA_BITS2_PARTDEFINED == get_vabits2(n->a + i)) {
njn1d0825f2006-03-27 11:37:07 +0000906 keep = True; // Found a non-stale byte, so keep
907 break;
908 }
909 }
910 }
911
912 if ( keep ) {
913 // Insert a copy of the node into the new table.
914 SecVBitNode* n2 =
njne2a9ad32007-09-17 05:30:48 +0000915 VG_(OSetGen_AllocNode)(secVBitTable2, sizeof(SecVBitNode));
njn1d0825f2006-03-27 11:37:07 +0000916 *n2 = *n;
njne2a9ad32007-09-17 05:30:48 +0000917 VG_(OSetGen_Insert)(secVBitTable2, n2);
njn1d0825f2006-03-27 11:37:07 +0000918 }
919 }
920
921 // Get the before and after sizes.
njne2a9ad32007-09-17 05:30:48 +0000922 n_nodes = VG_(OSetGen_Size)(secVBitTable);
923 n_survivors = VG_(OSetGen_Size)(secVBitTable2);
njn1d0825f2006-03-27 11:37:07 +0000924
925 // Destroy the old table, and put the new one in its place.
njne2a9ad32007-09-17 05:30:48 +0000926 VG_(OSetGen_Destroy)(secVBitTable);
njn1d0825f2006-03-27 11:37:07 +0000927 secVBitTable = secVBitTable2;
928
929 if (VG_(clo_verbosity) > 1) {
930 Char percbuf[6];
931 VG_(percentify)(n_survivors, n_nodes, 1, 6, percbuf);
932 VG_(message)(Vg_DebugMsg, "memcheck GC: %d nodes, %d survivors (%s)",
933 n_nodes, n_survivors, percbuf);
934 }
935
936 // Increase table size if necessary.
937 if (n_survivors > (secVBitLimit * MAX_SURVIVOR_PROPORTION)) {
938 secVBitLimit *= TABLE_GROWTH_FACTOR;
939 if (VG_(clo_verbosity) > 1)
940 VG_(message)(Vg_DebugMsg, "memcheck GC: increase table size to %d",
941 secVBitLimit);
942 }
943}
944
945static UWord get_sec_vbits8(Addr a)
946{
947 Addr aAligned = VG_ROUNDDN(a, BYTES_PER_SEC_VBIT_NODE);
948 Int amod = a % BYTES_PER_SEC_VBIT_NODE;
njne2a9ad32007-09-17 05:30:48 +0000949 SecVBitNode* n = VG_(OSetGen_Lookup)(secVBitTable, &aAligned);
njn1d0825f2006-03-27 11:37:07 +0000950 UChar vbits8;
951 tl_assert2(n, "get_sec_vbits8: no node for address %p (%p)\n", aAligned, a);
952 // Shouldn't be fully defined or fully undefined -- those cases shouldn't
953 // make it to the secondary V bits table.
954 vbits8 = n->vbits8[amod];
955 tl_assert(V_BITS8_DEFINED != vbits8 && V_BITS8_UNDEFINED != vbits8);
956 return vbits8;
957}
958
959static void set_sec_vbits8(Addr a, UWord vbits8)
960{
961 Addr aAligned = VG_ROUNDDN(a, BYTES_PER_SEC_VBIT_NODE);
962 Int i, amod = a % BYTES_PER_SEC_VBIT_NODE;
njne2a9ad32007-09-17 05:30:48 +0000963 SecVBitNode* n = VG_(OSetGen_Lookup)(secVBitTable, &aAligned);
njn1d0825f2006-03-27 11:37:07 +0000964 // Shouldn't be fully defined or fully undefined -- those cases shouldn't
965 // make it to the secondary V bits table.
966 tl_assert(V_BITS8_DEFINED != vbits8 && V_BITS8_UNDEFINED != vbits8);
967 if (n) {
968 n->vbits8[amod] = vbits8; // update
969 n->last_touched = GCs_done;
970 sec_vbits_updates++;
971 } else {
972 // New node: assign the specific byte, make the rest invalid (they
973 // should never be read as-is, but be cautious).
njne2a9ad32007-09-17 05:30:48 +0000974 n = VG_(OSetGen_AllocNode)(secVBitTable, sizeof(SecVBitNode));
njn1d0825f2006-03-27 11:37:07 +0000975 n->a = aAligned;
976 for (i = 0; i < BYTES_PER_SEC_VBIT_NODE; i++) {
977 n->vbits8[i] = V_BITS8_UNDEFINED;
978 }
979 n->vbits8[amod] = vbits8;
980 n->last_touched = GCs_done;
981
982 // Do a table GC if necessary. Nb: do this before inserting the new
983 // node, to avoid erroneously GC'ing the new node.
njne2a9ad32007-09-17 05:30:48 +0000984 if (secVBitLimit == VG_(OSetGen_Size)(secVBitTable)) {
njn1d0825f2006-03-27 11:37:07 +0000985 gcSecVBitTable();
986 }
987
988 // Insert the new node.
njne2a9ad32007-09-17 05:30:48 +0000989 VG_(OSetGen_Insert)(secVBitTable, n);
njn1d0825f2006-03-27 11:37:07 +0000990 sec_vbits_new_nodes++;
991
njne2a9ad32007-09-17 05:30:48 +0000992 n_secVBit_nodes = VG_(OSetGen_Size)(secVBitTable);
njn1d0825f2006-03-27 11:37:07 +0000993 if (n_secVBit_nodes > max_secVBit_nodes)
994 max_secVBit_nodes = n_secVBit_nodes;
995 }
996}
sewardj45d94cc2005-04-20 14:44:11 +0000997
998/* --------------- Endianness helpers --------------- */
999
1000/* Returns the offset in memory of the byteno-th most significant byte
1001 in a wordszB-sized word, given the specified endianness. */
njn1d0825f2006-03-27 11:37:07 +00001002static INLINE UWord byte_offset_w ( UWord wordszB, Bool bigendian,
sewardj45d94cc2005-04-20 14:44:11 +00001003 UWord byteno ) {
1004 return bigendian ? (wordszB-1-byteno) : byteno;
1005}
1006
sewardj05a46732006-10-17 01:28:10 +00001007
1008/* --------------- Ignored address ranges --------------- */
1009
1010#define M_IGNORE_RANGES 4
1011
1012typedef
1013 struct {
1014 Int used;
1015 Addr start[M_IGNORE_RANGES];
1016 Addr end[M_IGNORE_RANGES];
1017 }
1018 IgnoreRanges;
1019
1020static IgnoreRanges ignoreRanges;
1021
1022static INLINE Bool in_ignored_range ( Addr a )
1023{
1024 Int i;
bart5dd8e6a2008-03-22 08:04:29 +00001025 if (LIKELY(ignoreRanges.used == 0))
sewardj05a46732006-10-17 01:28:10 +00001026 return False;
1027 for (i = 0; i < ignoreRanges.used; i++) {
1028 if (a >= ignoreRanges.start[i] && a < ignoreRanges.end[i])
1029 return True;
1030 }
1031 return False;
1032}
1033
1034
1035/* Parse a 32- or 64-bit hex number, including leading 0x, from string
1036 starting at *ppc, putting result in *result, and return True. Or
1037 fail, in which case *ppc and *result are undefined, and return
1038 False. */
1039
1040static Bool isHex ( UChar c )
1041{
1042 return ((c >= '0' && c <= '9')
1043 || (c >= 'a' && c <= 'f')
1044 || (c >= 'A' && c <= 'F'));
1045}
1046
1047static UInt fromHex ( UChar c )
1048{
1049 if (c >= '0' && c <= '9')
1050 return (UInt)c - (UInt)'0';
1051 if (c >= 'a' && c <= 'f')
1052 return 10 + (UInt)c - (UInt)'a';
1053 if (c >= 'A' && c <= 'F')
1054 return 10 + (UInt)c - (UInt)'A';
1055 /*NOTREACHED*/
1056 tl_assert(0);
1057 return 0;
1058}
1059
1060static Bool parse_Addr ( UChar** ppc, Addr* result )
1061{
1062 Int used, limit = 2 * sizeof(Addr);
1063 if (**ppc != '0')
1064 return False;
1065 (*ppc)++;
1066 if (**ppc != 'x')
1067 return False;
1068 (*ppc)++;
1069 *result = 0;
1070 used = 0;
1071 while (isHex(**ppc)) {
1072 UInt d = fromHex(**ppc);
1073 tl_assert(d < 16);
1074 *result = ((*result) << 4) | fromHex(**ppc);
1075 (*ppc)++;
1076 used++;
1077 if (used > limit) return False;
1078 }
1079 if (used == 0)
1080 return False;
1081 return True;
1082}
1083
1084/* Parse two such numbers separated by a dash, or fail. */
1085
1086static Bool parse_range ( UChar** ppc, Addr* result1, Addr* result2 )
1087{
1088 Bool ok = parse_Addr(ppc, result1);
1089 if (!ok)
1090 return False;
1091 if (**ppc != '-')
1092 return False;
1093 (*ppc)++;
1094 ok = parse_Addr(ppc, result2);
1095 if (!ok)
1096 return False;
1097 return True;
1098}
1099
1100/* Parse a set of ranges separated by commas into 'ignoreRanges', or
1101 fail. */
1102
1103static Bool parse_ignore_ranges ( UChar* str0 )
1104{
1105 Addr start, end;
1106 Bool ok;
1107 UChar* str = str0;
1108 UChar** ppc = &str;
1109 ignoreRanges.used = 0;
1110 while (1) {
1111 ok = parse_range(ppc, &start, &end);
1112 if (!ok)
1113 return False;
1114 if (ignoreRanges.used >= M_IGNORE_RANGES)
1115 return False;
1116 ignoreRanges.start[ignoreRanges.used] = start;
1117 ignoreRanges.end[ignoreRanges.used] = end;
1118 ignoreRanges.used++;
1119 if (**ppc == 0)
1120 return True;
1121 if (**ppc != ',')
1122 return False;
1123 (*ppc)++;
1124 }
1125 /*NOTREACHED*/
1126 return False;
1127}
1128
1129
sewardj45d94cc2005-04-20 14:44:11 +00001130/* --------------- Load/store slow cases. --------------- */
1131
njn1d0825f2006-03-27 11:37:07 +00001132// Forward declarations
1133static void mc_record_address_error ( ThreadId tid, Addr a,
1134 Int size, Bool isWrite );
njn718d3b12006-12-16 00:54:12 +00001135static void mc_record_core_mem_error ( ThreadId tid, Bool isAddrErr, Char* s );
1136static void mc_record_regparam_error ( ThreadId tid, Char* msg );
1137static void mc_record_memparam_error ( ThreadId tid, Addr a,
1138 Bool isAddrErr, Char* msg );
njn1d0825f2006-03-27 11:37:07 +00001139static void mc_record_jump_error ( ThreadId tid, Addr a );
1140
sewardj45d94cc2005-04-20 14:44:11 +00001141static
njn1d0825f2006-03-27 11:37:07 +00001142#ifndef PERF_FAST_LOADV
1143INLINE
1144#endif
njn45e81252006-03-28 12:35:08 +00001145ULong mc_LOADVn_slow ( Addr a, SizeT nBits, Bool bigendian )
sewardj45d94cc2005-04-20 14:44:11 +00001146{
njn1d0825f2006-03-27 11:37:07 +00001147 /* Make up a 64-bit result V word, which contains the loaded data for
sewardjf3d57dd2005-04-22 20:23:27 +00001148 valid addresses and Defined for invalid addresses. Iterate over
1149 the bytes in the word, from the most significant down to the
1150 least. */
njn1d0825f2006-03-27 11:37:07 +00001151 ULong vbits64 = V_BITS64_UNDEFINED;
njn45e81252006-03-28 12:35:08 +00001152 SizeT szB = nBits / 8;
njn1d0825f2006-03-27 11:37:07 +00001153 SSizeT i = szB-1; // Must be signed
sewardj45d94cc2005-04-20 14:44:11 +00001154 SizeT n_addrs_bad = 0;
1155 Addr ai;
njn1d0825f2006-03-27 11:37:07 +00001156 Bool partial_load_exemption_applies;
1157 UChar vbits8;
1158 Bool ok;
sewardj45d94cc2005-04-20 14:44:11 +00001159
sewardjc1a2cda2005-04-21 17:34:00 +00001160 PROF_EVENT(30, "mc_LOADVn_slow");
sewardj05a46732006-10-17 01:28:10 +00001161
1162 /* ------------ BEGIN semi-fast cases ------------ */
1163 /* These deal quickly-ish with the common auxiliary primary map
1164 cases on 64-bit platforms. Are merely a speedup hack; can be
1165 omitted without loss of correctness/functionality. Note that in
1166 both cases the "sizeof(void*) == 8" causes these cases to be
1167 folded out by compilers on 32-bit platforms. These are derived
1168 from LOADV64 and LOADV32.
1169 */
bart5dd8e6a2008-03-22 08:04:29 +00001170 if (LIKELY(sizeof(void*) == 8
sewardj05a46732006-10-17 01:28:10 +00001171 && nBits == 64 && VG_IS_8_ALIGNED(a))) {
1172 SecMap* sm = get_secmap_for_reading(a);
1173 UWord sm_off16 = SM_OFF_16(a);
1174 UWord vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
bart5dd8e6a2008-03-22 08:04:29 +00001175 if (LIKELY(vabits16 == VA_BITS16_DEFINED))
sewardj05a46732006-10-17 01:28:10 +00001176 return V_BITS64_DEFINED;
bart5dd8e6a2008-03-22 08:04:29 +00001177 if (LIKELY(vabits16 == VA_BITS16_UNDEFINED))
sewardj05a46732006-10-17 01:28:10 +00001178 return V_BITS64_UNDEFINED;
1179 /* else fall into the slow case */
1180 }
bart5dd8e6a2008-03-22 08:04:29 +00001181 if (LIKELY(sizeof(void*) == 8
sewardj05a46732006-10-17 01:28:10 +00001182 && nBits == 32 && VG_IS_4_ALIGNED(a))) {
1183 SecMap* sm = get_secmap_for_reading(a);
1184 UWord sm_off = SM_OFF(a);
1185 UWord vabits8 = sm->vabits8[sm_off];
bart5dd8e6a2008-03-22 08:04:29 +00001186 if (LIKELY(vabits8 == VA_BITS8_DEFINED))
sewardj05a46732006-10-17 01:28:10 +00001187 return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_DEFINED);
bart5dd8e6a2008-03-22 08:04:29 +00001188 if (LIKELY(vabits8 == VA_BITS8_UNDEFINED))
sewardj05a46732006-10-17 01:28:10 +00001189 return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_UNDEFINED);
1190 /* else fall into slow case */
1191 }
1192 /* ------------ END semi-fast cases ------------ */
1193
njn45e81252006-03-28 12:35:08 +00001194 tl_assert(nBits == 64 || nBits == 32 || nBits == 16 || nBits == 8);
sewardj45d94cc2005-04-20 14:44:11 +00001195
njn1d0825f2006-03-27 11:37:07 +00001196 for (i = szB-1; i >= 0; i--) {
sewardjc1a2cda2005-04-21 17:34:00 +00001197 PROF_EVENT(31, "mc_LOADVn_slow(loop)");
njn45e81252006-03-28 12:35:08 +00001198 ai = a + byte_offset_w(szB, bigendian, i);
njn1d0825f2006-03-27 11:37:07 +00001199 ok = get_vbits8(ai, &vbits8);
1200 if (!ok) n_addrs_bad++;
1201 vbits64 <<= 8;
1202 vbits64 |= vbits8;
sewardj45d94cc2005-04-20 14:44:11 +00001203 }
1204
sewardj0ded7a42005-11-08 02:25:37 +00001205 /* This is a hack which avoids producing errors for code which
1206 insists in stepping along byte strings in aligned word-sized
1207 chunks, and there is a partially defined word at the end. (eg,
1208 optimised strlen). Such code is basically broken at least WRT
1209 semantics of ANSI C, but sometimes users don't have the option
1210 to fix it, and so this option is provided. Note it is now
1211 defaulted to not-engaged.
1212
1213 A load from a partially-addressible place is allowed if:
1214 - the command-line flag is set
1215 - it's a word-sized, word-aligned load
1216 - at least one of the addresses in the word *is* valid
1217 */
1218 partial_load_exemption_applies
njn1d0825f2006-03-27 11:37:07 +00001219 = MC_(clo_partial_loads_ok) && szB == VG_WORDSIZE
sewardj0ded7a42005-11-08 02:25:37 +00001220 && VG_IS_WORD_ALIGNED(a)
1221 && n_addrs_bad < VG_WORDSIZE;
1222
1223 if (n_addrs_bad > 0 && !partial_load_exemption_applies)
njn1d0825f2006-03-27 11:37:07 +00001224 mc_record_address_error( VG_(get_running_tid)(), a, szB, False );
sewardj45d94cc2005-04-20 14:44:11 +00001225
njn1d0825f2006-03-27 11:37:07 +00001226 return vbits64;
sewardj45d94cc2005-04-20 14:44:11 +00001227}
1228
1229
njn1d0825f2006-03-27 11:37:07 +00001230static
1231#ifndef PERF_FAST_STOREV
1232INLINE
1233#endif
njn45e81252006-03-28 12:35:08 +00001234void mc_STOREVn_slow ( Addr a, SizeT nBits, ULong vbytes, Bool bigendian )
sewardj45d94cc2005-04-20 14:44:11 +00001235{
njn45e81252006-03-28 12:35:08 +00001236 SizeT szB = nBits / 8;
njn1d0825f2006-03-27 11:37:07 +00001237 SizeT i, n_addrs_bad = 0;
1238 UChar vbits8;
sewardj45d94cc2005-04-20 14:44:11 +00001239 Addr ai;
njn1d0825f2006-03-27 11:37:07 +00001240 Bool ok;
sewardj45d94cc2005-04-20 14:44:11 +00001241
sewardjc1a2cda2005-04-21 17:34:00 +00001242 PROF_EVENT(35, "mc_STOREVn_slow");
sewardj05a46732006-10-17 01:28:10 +00001243
1244 /* ------------ BEGIN semi-fast cases ------------ */
1245 /* These deal quickly-ish with the common auxiliary primary map
1246 cases on 64-bit platforms. Are merely a speedup hack; can be
1247 omitted without loss of correctness/functionality. Note that in
1248 both cases the "sizeof(void*) == 8" causes these cases to be
1249 folded out by compilers on 32-bit platforms. These are derived
1250 from STOREV64 and STOREV32.
1251 */
bart5dd8e6a2008-03-22 08:04:29 +00001252 if (LIKELY(sizeof(void*) == 8
sewardj05a46732006-10-17 01:28:10 +00001253 && nBits == 64 && VG_IS_8_ALIGNED(a))) {
1254 SecMap* sm = get_secmap_for_reading(a);
1255 UWord sm_off16 = SM_OFF_16(a);
1256 UWord vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
bart5dd8e6a2008-03-22 08:04:29 +00001257 if (LIKELY( !is_distinguished_sm(sm) &&
sewardj05a46732006-10-17 01:28:10 +00001258 (VA_BITS16_DEFINED == vabits16 ||
1259 VA_BITS16_UNDEFINED == vabits16) )) {
1260 /* Handle common case quickly: a is suitably aligned, */
1261 /* is mapped, and is addressible. */
1262 // Convert full V-bits in register to compact 2-bit form.
bart5dd8e6a2008-03-22 08:04:29 +00001263 if (LIKELY(V_BITS64_DEFINED == vbytes)) {
sewardj05a46732006-10-17 01:28:10 +00001264 ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_DEFINED;
1265 return;
1266 } else if (V_BITS64_UNDEFINED == vbytes) {
1267 ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_UNDEFINED;
1268 return;
1269 }
1270 /* else fall into the slow case */
1271 }
1272 /* else fall into the slow case */
1273 }
bart5dd8e6a2008-03-22 08:04:29 +00001274 if (LIKELY(sizeof(void*) == 8
sewardj05a46732006-10-17 01:28:10 +00001275 && nBits == 32 && VG_IS_4_ALIGNED(a))) {
1276 SecMap* sm = get_secmap_for_reading(a);
1277 UWord sm_off = SM_OFF(a);
1278 UWord vabits8 = sm->vabits8[sm_off];
bart5dd8e6a2008-03-22 08:04:29 +00001279 if (LIKELY( !is_distinguished_sm(sm) &&
sewardj05a46732006-10-17 01:28:10 +00001280 (VA_BITS8_DEFINED == vabits8 ||
1281 VA_BITS8_UNDEFINED == vabits8) )) {
1282 /* Handle common case quickly: a is suitably aligned, */
1283 /* is mapped, and is addressible. */
1284 // Convert full V-bits in register to compact 2-bit form.
bart5dd8e6a2008-03-22 08:04:29 +00001285 if (LIKELY(V_BITS32_DEFINED == (vbytes & 0xFFFFFFFF))) {
sewardj05a46732006-10-17 01:28:10 +00001286 sm->vabits8[sm_off] = VA_BITS8_DEFINED;
1287 return;
1288 } else if (V_BITS32_UNDEFINED == (vbytes & 0xFFFFFFFF)) {
1289 sm->vabits8[sm_off] = VA_BITS8_UNDEFINED;
1290 return;
1291 }
1292 /* else fall into the slow case */
1293 }
1294 /* else fall into the slow case */
1295 }
1296 /* ------------ END semi-fast cases ------------ */
1297
njn45e81252006-03-28 12:35:08 +00001298 tl_assert(nBits == 64 || nBits == 32 || nBits == 16 || nBits == 8);
sewardj45d94cc2005-04-20 14:44:11 +00001299
1300 /* Dump vbytes in memory, iterating from least to most significant
njn718d3b12006-12-16 00:54:12 +00001301 byte. At the same time establish addressibility of the location. */
sewardj45d94cc2005-04-20 14:44:11 +00001302 for (i = 0; i < szB; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +00001303 PROF_EVENT(36, "mc_STOREVn_slow(loop)");
njn45e81252006-03-28 12:35:08 +00001304 ai = a + byte_offset_w(szB, bigendian, i);
njn1d0825f2006-03-27 11:37:07 +00001305 vbits8 = vbytes & 0xff;
1306 ok = set_vbits8(ai, vbits8);
1307 if (!ok) n_addrs_bad++;
sewardj45d94cc2005-04-20 14:44:11 +00001308 vbytes >>= 8;
1309 }
1310
1311 /* If an address error has happened, report it. */
1312 if (n_addrs_bad > 0)
njn1d0825f2006-03-27 11:37:07 +00001313 mc_record_address_error( VG_(get_running_tid)(), a, szB, True );
sewardj45d94cc2005-04-20 14:44:11 +00001314}
1315
1316
njn25e49d8e72002-09-23 09:36:25 +00001317/*------------------------------------------------------------*/
1318/*--- Setting permissions over address ranges. ---*/
1319/*------------------------------------------------------------*/
1320
njn1d0825f2006-03-27 11:37:07 +00001321static void set_address_range_perms ( Addr a, SizeT lenT, UWord vabits16,
1322 UWord dsm_num )
sewardj23eb2fd2005-04-22 16:29:19 +00001323{
njn1d0825f2006-03-27 11:37:07 +00001324 UWord sm_off, sm_off16;
1325 UWord vabits2 = vabits16 & 0x3;
1326 SizeT lenA, lenB, len_to_next_secmap;
1327 Addr aNext;
sewardjae986ca2005-10-12 12:53:20 +00001328 SecMap* sm;
njn1d0825f2006-03-27 11:37:07 +00001329 SecMap** sm_ptr;
sewardjae986ca2005-10-12 12:53:20 +00001330 SecMap* example_dsm;
1331
sewardj23eb2fd2005-04-22 16:29:19 +00001332 PROF_EVENT(150, "set_address_range_perms");
1333
njn1d0825f2006-03-27 11:37:07 +00001334 /* Check the V+A bits make sense. */
njndbf7ca72006-03-31 11:57:59 +00001335 tl_assert(VA_BITS16_NOACCESS == vabits16 ||
1336 VA_BITS16_UNDEFINED == vabits16 ||
1337 VA_BITS16_DEFINED == vabits16);
sewardj23eb2fd2005-04-22 16:29:19 +00001338
njn1d0825f2006-03-27 11:37:07 +00001339 // This code should never write PDBs; ensure this. (See comment above
1340 // set_vabits2().)
njndbf7ca72006-03-31 11:57:59 +00001341 tl_assert(VA_BITS2_PARTDEFINED != vabits2);
njn1d0825f2006-03-27 11:37:07 +00001342
1343 if (lenT == 0)
sewardj23eb2fd2005-04-22 16:29:19 +00001344 return;
1345
njn1d0825f2006-03-27 11:37:07 +00001346 if (lenT > 100 * 1000 * 1000) {
1347 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
1348 Char* s = "unknown???";
njndbf7ca72006-03-31 11:57:59 +00001349 if (vabits16 == VA_BITS16_NOACCESS ) s = "noaccess";
1350 if (vabits16 == VA_BITS16_UNDEFINED) s = "undefined";
1351 if (vabits16 == VA_BITS16_DEFINED ) s = "defined";
njn1d0825f2006-03-27 11:37:07 +00001352 VG_(message)(Vg_UserMsg, "Warning: set address range perms: "
1353 "large range %lu (%s)", lenT, s);
sewardj23eb2fd2005-04-22 16:29:19 +00001354 }
1355 }
1356
njn1d0825f2006-03-27 11:37:07 +00001357#ifndef PERF_FAST_SARP
sewardj23eb2fd2005-04-22 16:29:19 +00001358 /*------------------ debug-only case ------------------ */
njn1d0825f2006-03-27 11:37:07 +00001359 {
1360 // Endianness doesn't matter here because all bytes are being set to
1361 // the same value.
1362 // Nb: We don't have to worry about updating the sec-V-bits table
1363 // after these set_vabits2() calls because this code never writes
njndbf7ca72006-03-31 11:57:59 +00001364 // VA_BITS2_PARTDEFINED values.
njn1d0825f2006-03-27 11:37:07 +00001365 SizeT i;
1366 for (i = 0; i < lenT; i++) {
1367 set_vabits2(a + i, vabits2);
1368 }
1369 return;
njn25e49d8e72002-09-23 09:36:25 +00001370 }
njn1d0825f2006-03-27 11:37:07 +00001371#endif
sewardj23eb2fd2005-04-22 16:29:19 +00001372
1373 /*------------------ standard handling ------------------ */
sewardj23eb2fd2005-04-22 16:29:19 +00001374
njn1d0825f2006-03-27 11:37:07 +00001375 /* Get the distinguished secondary that we might want
sewardj23eb2fd2005-04-22 16:29:19 +00001376 to use (part of the space-compression scheme). */
njn1d0825f2006-03-27 11:37:07 +00001377 example_dsm = &sm_distinguished[dsm_num];
1378
1379 // We have to handle ranges covering various combinations of partial and
1380 // whole sec-maps. Here is how parts 1, 2 and 3 are used in each case.
1381 // Cases marked with a '*' are common.
1382 //
1383 // TYPE PARTS USED
1384 // ---- ----------
1385 // * one partial sec-map (p) 1
1386 // - one whole sec-map (P) 2
1387 //
1388 // * two partial sec-maps (pp) 1,3
1389 // - one partial, one whole sec-map (pP) 1,2
1390 // - one whole, one partial sec-map (Pp) 2,3
1391 // - two whole sec-maps (PP) 2,2
1392 //
1393 // * one partial, one whole, one partial (pPp) 1,2,3
1394 // - one partial, two whole (pPP) 1,2,2
1395 // - two whole, one partial (PPp) 2,2,3
1396 // - three whole (PPP) 2,2,2
1397 //
1398 // * one partial, N-2 whole, one partial (pP...Pp) 1,2...2,3
1399 // - one partial, N-1 whole (pP...PP) 1,2...2,2
1400 // - N-1 whole, one partial (PP...Pp) 2,2...2,3
1401 // - N whole (PP...PP) 2,2...2,3
1402
1403 // Break up total length (lenT) into two parts: length in the first
1404 // sec-map (lenA), and the rest (lenB); lenT == lenA + lenB.
1405 aNext = start_of_this_sm(a) + SM_SIZE;
1406 len_to_next_secmap = aNext - a;
1407 if ( lenT <= len_to_next_secmap ) {
1408 // Range entirely within one sec-map. Covers almost all cases.
1409 PROF_EVENT(151, "set_address_range_perms-single-secmap");
1410 lenA = lenT;
1411 lenB = 0;
1412 } else if (is_start_of_sm(a)) {
1413 // Range spans at least one whole sec-map, and starts at the beginning
1414 // of a sec-map; skip to Part 2.
1415 PROF_EVENT(152, "set_address_range_perms-startof-secmap");
1416 lenA = 0;
1417 lenB = lenT;
1418 goto part2;
sewardj23eb2fd2005-04-22 16:29:19 +00001419 } else {
njn1d0825f2006-03-27 11:37:07 +00001420 // Range spans two or more sec-maps, first one is partial.
1421 PROF_EVENT(153, "set_address_range_perms-multiple-secmaps");
1422 lenA = len_to_next_secmap;
1423 lenB = lenT - lenA;
1424 }
1425
1426 //------------------------------------------------------------------------
1427 // Part 1: Deal with the first sec_map. Most of the time the range will be
1428 // entirely within a sec_map and this part alone will suffice. Also,
1429 // doing it this way lets us avoid repeatedly testing for the crossing of
1430 // a sec-map boundary within these loops.
1431 //------------------------------------------------------------------------
1432
1433 // If it's distinguished, make it undistinguished if necessary.
1434 sm_ptr = get_secmap_ptr(a);
1435 if (is_distinguished_sm(*sm_ptr)) {
1436 if (*sm_ptr == example_dsm) {
1437 // Sec-map already has the V+A bits that we want, so skip.
1438 PROF_EVENT(154, "set_address_range_perms-dist-sm1-quick");
1439 a = aNext;
1440 lenA = 0;
sewardj23eb2fd2005-04-22 16:29:19 +00001441 } else {
njn1d0825f2006-03-27 11:37:07 +00001442 PROF_EVENT(155, "set_address_range_perms-dist-sm1");
1443 *sm_ptr = copy_for_writing(*sm_ptr);
sewardj23eb2fd2005-04-22 16:29:19 +00001444 }
1445 }
njn1d0825f2006-03-27 11:37:07 +00001446 sm = *sm_ptr;
sewardj23eb2fd2005-04-22 16:29:19 +00001447
njn1d0825f2006-03-27 11:37:07 +00001448 // 1 byte steps
sewardj23eb2fd2005-04-22 16:29:19 +00001449 while (True) {
sewardj23eb2fd2005-04-22 16:29:19 +00001450 if (VG_IS_8_ALIGNED(a)) break;
njn1d0825f2006-03-27 11:37:07 +00001451 if (lenA < 1) break;
1452 PROF_EVENT(156, "set_address_range_perms-loop1a");
1453 sm_off = SM_OFF(a);
1454 insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
1455 a += 1;
1456 lenA -= 1;
1457 }
1458 // 8-aligned, 8 byte steps
sewardj23eb2fd2005-04-22 16:29:19 +00001459 while (True) {
njn1d0825f2006-03-27 11:37:07 +00001460 if (lenA < 8) break;
1461 PROF_EVENT(157, "set_address_range_perms-loop8a");
1462 sm_off16 = SM_OFF_16(a);
1463 ((UShort*)(sm->vabits8))[sm_off16] = vabits16;
1464 a += 8;
1465 lenA -= 8;
1466 }
1467 // 1 byte steps
1468 while (True) {
1469 if (lenA < 1) break;
1470 PROF_EVENT(158, "set_address_range_perms-loop1b");
1471 sm_off = SM_OFF(a);
1472 insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
1473 a += 1;
1474 lenA -= 1;
sewardj23eb2fd2005-04-22 16:29:19 +00001475 }
1476
njn1d0825f2006-03-27 11:37:07 +00001477 // We've finished the first sec-map. Is that it?
1478 if (lenB == 0)
sewardj23eb2fd2005-04-22 16:29:19 +00001479 return;
1480
njn1d0825f2006-03-27 11:37:07 +00001481 //------------------------------------------------------------------------
1482 // Part 2: Fast-set entire sec-maps at a time.
1483 //------------------------------------------------------------------------
1484 part2:
1485 // 64KB-aligned, 64KB steps.
1486 // Nb: we can reach here with lenB < SM_SIZE
sewardj23eb2fd2005-04-22 16:29:19 +00001487 while (True) {
njn1d0825f2006-03-27 11:37:07 +00001488 if (lenB < SM_SIZE) break;
1489 tl_assert(is_start_of_sm(a));
1490 PROF_EVENT(159, "set_address_range_perms-loop64K");
1491 sm_ptr = get_secmap_ptr(a);
1492 if (!is_distinguished_sm(*sm_ptr)) {
1493 PROF_EVENT(160, "set_address_range_perms-loop64K-free-dist-sm");
1494 // Free the non-distinguished sec-map that we're replacing. This
1495 // case happens moderately often, enough to be worthwhile.
1496 VG_(am_munmap_valgrind)((Addr)*sm_ptr, sizeof(SecMap));
1497 }
1498 update_SM_counts(*sm_ptr, example_dsm);
1499 // Make the sec-map entry point to the example DSM
1500 *sm_ptr = example_dsm;
1501 lenB -= SM_SIZE;
1502 a += SM_SIZE;
1503 }
sewardj23eb2fd2005-04-22 16:29:19 +00001504
njn1d0825f2006-03-27 11:37:07 +00001505 // We've finished the whole sec-maps. Is that it?
1506 if (lenB == 0)
1507 return;
1508
1509 //------------------------------------------------------------------------
1510 // Part 3: Finish off the final partial sec-map, if necessary.
1511 //------------------------------------------------------------------------
1512
1513 tl_assert(is_start_of_sm(a) && lenB < SM_SIZE);
1514
1515 // If it's distinguished, make it undistinguished if necessary.
1516 sm_ptr = get_secmap_ptr(a);
1517 if (is_distinguished_sm(*sm_ptr)) {
1518 if (*sm_ptr == example_dsm) {
1519 // Sec-map already has the V+A bits that we want, so stop.
1520 PROF_EVENT(161, "set_address_range_perms-dist-sm2-quick");
1521 return;
1522 } else {
1523 PROF_EVENT(162, "set_address_range_perms-dist-sm2");
1524 *sm_ptr = copy_for_writing(*sm_ptr);
1525 }
1526 }
1527 sm = *sm_ptr;
1528
1529 // 8-aligned, 8 byte steps
1530 while (True) {
1531 if (lenB < 8) break;
1532 PROF_EVENT(163, "set_address_range_perms-loop8b");
1533 sm_off16 = SM_OFF_16(a);
1534 ((UShort*)(sm->vabits8))[sm_off16] = vabits16;
1535 a += 8;
1536 lenB -= 8;
1537 }
1538 // 1 byte steps
1539 while (True) {
1540 if (lenB < 1) return;
1541 PROF_EVENT(164, "set_address_range_perms-loop1c");
1542 sm_off = SM_OFF(a);
1543 insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
1544 a += 1;
1545 lenB -= 1;
1546 }
sewardj23eb2fd2005-04-22 16:29:19 +00001547}
sewardj45d94cc2005-04-20 14:44:11 +00001548
sewardjc859fbf2005-04-22 21:10:28 +00001549
1550/* --- Set permissions for arbitrary address ranges --- */
njn25e49d8e72002-09-23 09:36:25 +00001551
njndbf7ca72006-03-31 11:57:59 +00001552void MC_(make_mem_noaccess) ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +00001553{
njndbf7ca72006-03-31 11:57:59 +00001554 PROF_EVENT(40, "MC_(make_mem_noaccess)");
1555 DEBUG("MC_(make_mem_noaccess)(%p, %lu)\n", a, len);
njn1d0825f2006-03-27 11:37:07 +00001556 set_address_range_perms ( a, len, VA_BITS16_NOACCESS, SM_DIST_NOACCESS );
njn25e49d8e72002-09-23 09:36:25 +00001557}
1558
njndbf7ca72006-03-31 11:57:59 +00001559void MC_(make_mem_undefined) ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +00001560{
njndbf7ca72006-03-31 11:57:59 +00001561 PROF_EVENT(41, "MC_(make_mem_undefined)");
1562 DEBUG("MC_(make_mem_undefined)(%p, %lu)\n", a, len);
1563 set_address_range_perms ( a, len, VA_BITS16_UNDEFINED, SM_DIST_UNDEFINED );
njn25e49d8e72002-09-23 09:36:25 +00001564}
1565
njndbf7ca72006-03-31 11:57:59 +00001566void MC_(make_mem_defined) ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +00001567{
njndbf7ca72006-03-31 11:57:59 +00001568 PROF_EVENT(42, "MC_(make_mem_defined)");
1569 DEBUG("MC_(make_mem_defined)(%p, %lu)\n", a, len);
1570 set_address_range_perms ( a, len, VA_BITS16_DEFINED, SM_DIST_DEFINED );
njn25e49d8e72002-09-23 09:36:25 +00001571}
1572
sewardjfb1e9ad2006-03-10 13:41:58 +00001573/* For each byte in [a,a+len), if the byte is addressable, make it be
1574 defined, but if it isn't addressible, leave it alone. In other
njndbf7ca72006-03-31 11:57:59 +00001575 words a version of MC_(make_mem_defined) that doesn't mess with
sewardjfb1e9ad2006-03-10 13:41:58 +00001576 addressibility. Low-performance implementation. */
njndbf7ca72006-03-31 11:57:59 +00001577static void make_mem_defined_if_addressable ( Addr a, SizeT len )
sewardjfb1e9ad2006-03-10 13:41:58 +00001578{
1579 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00001580 UChar vabits2;
njndbf7ca72006-03-31 11:57:59 +00001581 DEBUG("make_mem_defined_if_addressable(%p, %llu)\n", a, (ULong)len);
sewardjfb1e9ad2006-03-10 13:41:58 +00001582 for (i = 0; i < len; i++) {
njn1d0825f2006-03-27 11:37:07 +00001583 vabits2 = get_vabits2( a+i );
bart5dd8e6a2008-03-22 08:04:29 +00001584 if (LIKELY(VA_BITS2_NOACCESS != vabits2)) {
njndbf7ca72006-03-31 11:57:59 +00001585 set_vabits2(a+i, VA_BITS2_DEFINED);
njn1d0825f2006-03-27 11:37:07 +00001586 }
sewardjfb1e9ad2006-03-10 13:41:58 +00001587 }
1588}
1589
njn9b007f62003-04-07 14:40:25 +00001590
sewardj45f4e7c2005-09-27 19:20:21 +00001591/* --- Block-copy permissions (needed for implementing realloc() and
1592 sys_mremap). --- */
sewardjc859fbf2005-04-22 21:10:28 +00001593
njn1d0825f2006-03-27 11:37:07 +00001594void MC_(copy_address_range_state) ( Addr src, Addr dst, SizeT len )
sewardjc859fbf2005-04-22 21:10:28 +00001595{
sewardj45f4e7c2005-09-27 19:20:21 +00001596 SizeT i, j;
sewardjf2184912006-05-03 22:13:57 +00001597 UChar vabits2, vabits8;
1598 Bool aligned, nooverlap;
sewardjc859fbf2005-04-22 21:10:28 +00001599
njn1d0825f2006-03-27 11:37:07 +00001600 DEBUG("MC_(copy_address_range_state)\n");
1601 PROF_EVENT(50, "MC_(copy_address_range_state)");
sewardj45f4e7c2005-09-27 19:20:21 +00001602
sewardjf2184912006-05-03 22:13:57 +00001603 if (len == 0 || src == dst)
sewardj45f4e7c2005-09-27 19:20:21 +00001604 return;
1605
sewardjf2184912006-05-03 22:13:57 +00001606 aligned = VG_IS_4_ALIGNED(src) && VG_IS_4_ALIGNED(dst);
1607 nooverlap = src+len <= dst || dst+len <= src;
sewardj45f4e7c2005-09-27 19:20:21 +00001608
sewardjf2184912006-05-03 22:13:57 +00001609 if (nooverlap && aligned) {
1610
1611 /* Vectorised fast case, when no overlap and suitably aligned */
1612 /* vector loop */
1613 i = 0;
1614 while (len >= 4) {
1615 vabits8 = get_vabits8_for_aligned_word32( src+i );
1616 set_vabits8_for_aligned_word32( dst+i, vabits8 );
bart5dd8e6a2008-03-22 08:04:29 +00001617 if (LIKELY(VA_BITS8_DEFINED == vabits8
sewardjf2184912006-05-03 22:13:57 +00001618 || VA_BITS8_UNDEFINED == vabits8
1619 || VA_BITS8_NOACCESS == vabits8)) {
1620 /* do nothing */
1621 } else {
1622 /* have to copy secondary map info */
1623 if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+0 ))
1624 set_sec_vbits8( dst+i+0, get_sec_vbits8( src+i+0 ) );
1625 if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+1 ))
1626 set_sec_vbits8( dst+i+1, get_sec_vbits8( src+i+1 ) );
1627 if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+2 ))
1628 set_sec_vbits8( dst+i+2, get_sec_vbits8( src+i+2 ) );
1629 if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+3 ))
1630 set_sec_vbits8( dst+i+3, get_sec_vbits8( src+i+3 ) );
1631 }
1632 i += 4;
1633 len -= 4;
1634 }
1635 /* fixup loop */
1636 while (len >= 1) {
njn1d0825f2006-03-27 11:37:07 +00001637 vabits2 = get_vabits2( src+i );
1638 set_vabits2( dst+i, vabits2 );
njndbf7ca72006-03-31 11:57:59 +00001639 if (VA_BITS2_PARTDEFINED == vabits2) {
njn1d0825f2006-03-27 11:37:07 +00001640 set_sec_vbits8( dst+i, get_sec_vbits8( src+i ) );
1641 }
sewardjf2184912006-05-03 22:13:57 +00001642 i++;
1643 len--;
1644 }
1645
1646 } else {
1647
1648 /* We have to do things the slow way */
1649 if (src < dst) {
1650 for (i = 0, j = len-1; i < len; i++, j--) {
1651 PROF_EVENT(51, "MC_(copy_address_range_state)(loop)");
1652 vabits2 = get_vabits2( src+j );
1653 set_vabits2( dst+j, vabits2 );
1654 if (VA_BITS2_PARTDEFINED == vabits2) {
1655 set_sec_vbits8( dst+j, get_sec_vbits8( src+j ) );
1656 }
1657 }
1658 }
1659
1660 if (src > dst) {
1661 for (i = 0; i < len; i++) {
1662 PROF_EVENT(52, "MC_(copy_address_range_state)(loop)");
1663 vabits2 = get_vabits2( src+i );
1664 set_vabits2( dst+i, vabits2 );
1665 if (VA_BITS2_PARTDEFINED == vabits2) {
1666 set_sec_vbits8( dst+i, get_sec_vbits8( src+i ) );
1667 }
1668 }
sewardj45f4e7c2005-09-27 19:20:21 +00001669 }
sewardjc859fbf2005-04-22 21:10:28 +00001670 }
sewardjf2184912006-05-03 22:13:57 +00001671
sewardjc859fbf2005-04-22 21:10:28 +00001672}
1673
1674
1675/* --- Fast case permission setters, for dealing with stacks. --- */
1676
njn1d0825f2006-03-27 11:37:07 +00001677static INLINE
njndbf7ca72006-03-31 11:57:59 +00001678void make_aligned_word32_undefined ( Addr a )
njn9b007f62003-04-07 14:40:25 +00001679{
njn1d0825f2006-03-27 11:37:07 +00001680 UWord sm_off;
sewardjae986ca2005-10-12 12:53:20 +00001681 SecMap* sm;
1682
njndbf7ca72006-03-31 11:57:59 +00001683 PROF_EVENT(300, "make_aligned_word32_undefined");
sewardj5d28efc2005-04-21 22:16:29 +00001684
njn1d0825f2006-03-27 11:37:07 +00001685#ifndef PERF_FAST_STACK2
njndbf7ca72006-03-31 11:57:59 +00001686 MC_(make_mem_undefined)(a, 4);
njn1d0825f2006-03-27 11:37:07 +00001687#else
bart5dd8e6a2008-03-22 08:04:29 +00001688 if (UNLIKELY(a > MAX_PRIMARY_ADDRESS)) {
njndbf7ca72006-03-31 11:57:59 +00001689 PROF_EVENT(301, "make_aligned_word32_undefined-slow1");
1690 MC_(make_mem_undefined)(a, 4);
sewardj5d28efc2005-04-21 22:16:29 +00001691 return;
1692 }
1693
njna7c7ebd2006-03-28 12:51:02 +00001694 sm = get_secmap_for_writing_low(a);
njn1d0825f2006-03-27 11:37:07 +00001695 sm_off = SM_OFF(a);
njndbf7ca72006-03-31 11:57:59 +00001696 sm->vabits8[sm_off] = VA_BITS8_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00001697#endif
njn9b007f62003-04-07 14:40:25 +00001698}
1699
sewardj5d28efc2005-04-21 22:16:29 +00001700
njn1d0825f2006-03-27 11:37:07 +00001701static INLINE
1702void make_aligned_word32_noaccess ( Addr a )
sewardj5d28efc2005-04-21 22:16:29 +00001703{
njn1d0825f2006-03-27 11:37:07 +00001704 UWord sm_off;
sewardjae986ca2005-10-12 12:53:20 +00001705 SecMap* sm;
1706
sewardj5d28efc2005-04-21 22:16:29 +00001707 PROF_EVENT(310, "make_aligned_word32_noaccess");
1708
njn1d0825f2006-03-27 11:37:07 +00001709#ifndef PERF_FAST_STACK2
njndbf7ca72006-03-31 11:57:59 +00001710 MC_(make_mem_noaccess)(a, 4);
njn1d0825f2006-03-27 11:37:07 +00001711#else
bart5dd8e6a2008-03-22 08:04:29 +00001712 if (UNLIKELY(a > MAX_PRIMARY_ADDRESS)) {
sewardj5d28efc2005-04-21 22:16:29 +00001713 PROF_EVENT(311, "make_aligned_word32_noaccess-slow1");
njndbf7ca72006-03-31 11:57:59 +00001714 MC_(make_mem_noaccess)(a, 4);
sewardj5d28efc2005-04-21 22:16:29 +00001715 return;
1716 }
1717
njna7c7ebd2006-03-28 12:51:02 +00001718 sm = get_secmap_for_writing_low(a);
njn1d0825f2006-03-27 11:37:07 +00001719 sm_off = SM_OFF(a);
1720 sm->vabits8[sm_off] = VA_BITS8_NOACCESS;
1721#endif
sewardj5d28efc2005-04-21 22:16:29 +00001722}
1723
1724
njn9b007f62003-04-07 14:40:25 +00001725/* Nb: by "aligned" here we mean 8-byte aligned */
njn1d0825f2006-03-27 11:37:07 +00001726static INLINE
njndbf7ca72006-03-31 11:57:59 +00001727void make_aligned_word64_undefined ( Addr a )
njn9b007f62003-04-07 14:40:25 +00001728{
njn1d0825f2006-03-27 11:37:07 +00001729 UWord sm_off16;
sewardjae986ca2005-10-12 12:53:20 +00001730 SecMap* sm;
1731
njndbf7ca72006-03-31 11:57:59 +00001732 PROF_EVENT(320, "make_aligned_word64_undefined");
sewardj23eb2fd2005-04-22 16:29:19 +00001733
njn1d0825f2006-03-27 11:37:07 +00001734#ifndef PERF_FAST_STACK2
njndbf7ca72006-03-31 11:57:59 +00001735 MC_(make_mem_undefined)(a, 8);
njn1d0825f2006-03-27 11:37:07 +00001736#else
bart5dd8e6a2008-03-22 08:04:29 +00001737 if (UNLIKELY(a > MAX_PRIMARY_ADDRESS)) {
njndbf7ca72006-03-31 11:57:59 +00001738 PROF_EVENT(321, "make_aligned_word64_undefined-slow1");
1739 MC_(make_mem_undefined)(a, 8);
sewardj23eb2fd2005-04-22 16:29:19 +00001740 return;
1741 }
1742
njna7c7ebd2006-03-28 12:51:02 +00001743 sm = get_secmap_for_writing_low(a);
njn1d0825f2006-03-27 11:37:07 +00001744 sm_off16 = SM_OFF_16(a);
njndbf7ca72006-03-31 11:57:59 +00001745 ((UShort*)(sm->vabits8))[sm_off16] = VA_BITS16_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00001746#endif
njn9b007f62003-04-07 14:40:25 +00001747}
1748
sewardj23eb2fd2005-04-22 16:29:19 +00001749
njn1d0825f2006-03-27 11:37:07 +00001750static INLINE
1751void make_aligned_word64_noaccess ( Addr a )
njn9b007f62003-04-07 14:40:25 +00001752{
njn1d0825f2006-03-27 11:37:07 +00001753 UWord sm_off16;
sewardjae986ca2005-10-12 12:53:20 +00001754 SecMap* sm;
1755
sewardj23eb2fd2005-04-22 16:29:19 +00001756 PROF_EVENT(330, "make_aligned_word64_noaccess");
1757
njn1d0825f2006-03-27 11:37:07 +00001758#ifndef PERF_FAST_STACK2
njndbf7ca72006-03-31 11:57:59 +00001759 MC_(make_mem_noaccess)(a, 8);
njn1d0825f2006-03-27 11:37:07 +00001760#else
bart5dd8e6a2008-03-22 08:04:29 +00001761 if (UNLIKELY(a > MAX_PRIMARY_ADDRESS)) {
sewardj23eb2fd2005-04-22 16:29:19 +00001762 PROF_EVENT(331, "make_aligned_word64_noaccess-slow1");
njndbf7ca72006-03-31 11:57:59 +00001763 MC_(make_mem_noaccess)(a, 8);
sewardj23eb2fd2005-04-22 16:29:19 +00001764 return;
1765 }
1766
njna7c7ebd2006-03-28 12:51:02 +00001767 sm = get_secmap_for_writing_low(a);
njn1d0825f2006-03-27 11:37:07 +00001768 sm_off16 = SM_OFF_16(a);
1769 ((UShort*)(sm->vabits8))[sm_off16] = VA_BITS16_NOACCESS;
1770#endif
njn9b007f62003-04-07 14:40:25 +00001771}
1772
sewardj23eb2fd2005-04-22 16:29:19 +00001773
njn1d0825f2006-03-27 11:37:07 +00001774/*------------------------------------------------------------*/
1775/*--- Stack pointer adjustment ---*/
1776/*------------------------------------------------------------*/
1777
1778static void VG_REGPARM(1) mc_new_mem_stack_4(Addr new_SP)
1779{
1780 PROF_EVENT(110, "new_mem_stack_4");
sewardj05a46732006-10-17 01:28:10 +00001781 if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001782 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
njn1d0825f2006-03-27 11:37:07 +00001783 } else {
njndbf7ca72006-03-31 11:57:59 +00001784 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 4 );
njn1d0825f2006-03-27 11:37:07 +00001785 }
1786}
1787
1788static void VG_REGPARM(1) mc_die_mem_stack_4(Addr new_SP)
1789{
1790 PROF_EVENT(120, "die_mem_stack_4");
sewardj05a46732006-10-17 01:28:10 +00001791 if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001792 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
njn1d0825f2006-03-27 11:37:07 +00001793 } else {
njndbf7ca72006-03-31 11:57:59 +00001794 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-4, 4 );
njn1d0825f2006-03-27 11:37:07 +00001795 }
1796}
1797
1798static void VG_REGPARM(1) mc_new_mem_stack_8(Addr new_SP)
1799{
1800 PROF_EVENT(111, "new_mem_stack_8");
sewardj05a46732006-10-17 01:28:10 +00001801 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001802 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
sewardj05a46732006-10-17 01:28:10 +00001803 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001804 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1805 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
njn1d0825f2006-03-27 11:37:07 +00001806 } else {
njndbf7ca72006-03-31 11:57:59 +00001807 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 8 );
njn1d0825f2006-03-27 11:37:07 +00001808 }
1809}
1810
1811static void VG_REGPARM(1) mc_die_mem_stack_8(Addr new_SP)
1812{
1813 PROF_EVENT(121, "die_mem_stack_8");
sewardj05a46732006-10-17 01:28:10 +00001814 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001815 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
sewardj05a46732006-10-17 01:28:10 +00001816 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001817 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
1818 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
njn1d0825f2006-03-27 11:37:07 +00001819 } else {
njndbf7ca72006-03-31 11:57:59 +00001820 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-8, 8 );
njn1d0825f2006-03-27 11:37:07 +00001821 }
1822}
1823
1824static void VG_REGPARM(1) mc_new_mem_stack_12(Addr new_SP)
1825{
1826 PROF_EVENT(112, "new_mem_stack_12");
sewardj05a46732006-10-17 01:28:10 +00001827 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001828 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1829 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
sewardj05a46732006-10-17 01:28:10 +00001830 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001831 /* from previous test we don't have 8-alignment at offset +0,
1832 hence must have 8 alignment at offsets +4/-4. Hence safe to
1833 do 4 at +0 and then 8 at +4/. */
njndbf7ca72006-03-31 11:57:59 +00001834 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1835 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
njn1d0825f2006-03-27 11:37:07 +00001836 } else {
njndbf7ca72006-03-31 11:57:59 +00001837 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 12 );
njn1d0825f2006-03-27 11:37:07 +00001838 }
1839}
1840
1841static void VG_REGPARM(1) mc_die_mem_stack_12(Addr new_SP)
1842{
1843 PROF_EVENT(122, "die_mem_stack_12");
1844 /* Note the -12 in the test */
sewardj43fcfd92006-10-17 23:14:42 +00001845 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP-12 )) {
1846 /* We have 8-alignment at -12, hence ok to do 8 at -12 and 4 at
1847 -4. */
njndbf7ca72006-03-31 11:57:59 +00001848 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
1849 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
sewardj05a46732006-10-17 01:28:10 +00001850 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001851 /* We have 4-alignment at +0, but we don't have 8-alignment at
1852 -12. So we must have 8-alignment at -8. Hence do 4 at -12
1853 and then 8 at -8. */
njndbf7ca72006-03-31 11:57:59 +00001854 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
1855 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
njn1d0825f2006-03-27 11:37:07 +00001856 } else {
njndbf7ca72006-03-31 11:57:59 +00001857 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-12, 12 );
njn1d0825f2006-03-27 11:37:07 +00001858 }
1859}
1860
1861static void VG_REGPARM(1) mc_new_mem_stack_16(Addr new_SP)
1862{
1863 PROF_EVENT(113, "new_mem_stack_16");
sewardj05a46732006-10-17 01:28:10 +00001864 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001865 /* Have 8-alignment at +0, hence do 8 at +0 and 8 at +8. */
njndbf7ca72006-03-31 11:57:59 +00001866 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1867 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
sewardj05a46732006-10-17 01:28:10 +00001868 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001869 /* Have 4 alignment at +0 but not 8; hence 8 must be at +4.
1870 Hence do 4 at +0, 8 at +4, 4 at +12. */
njndbf7ca72006-03-31 11:57:59 +00001871 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1872 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
1873 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+12 );
njn1d0825f2006-03-27 11:37:07 +00001874 } else {
njndbf7ca72006-03-31 11:57:59 +00001875 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 16 );
njn1d0825f2006-03-27 11:37:07 +00001876 }
1877}
1878
1879static void VG_REGPARM(1) mc_die_mem_stack_16(Addr new_SP)
1880{
1881 PROF_EVENT(123, "die_mem_stack_16");
sewardj05a46732006-10-17 01:28:10 +00001882 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001883 /* Have 8-alignment at +0, hence do 8 at -16 and 8 at -8. */
njndbf7ca72006-03-31 11:57:59 +00001884 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
1885 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
sewardj05a46732006-10-17 01:28:10 +00001886 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001887 /* 8 alignment must be at -12. Do 4 at -16, 8 at -12, 4 at -4. */
njndbf7ca72006-03-31 11:57:59 +00001888 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
1889 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
1890 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
njn1d0825f2006-03-27 11:37:07 +00001891 } else {
njndbf7ca72006-03-31 11:57:59 +00001892 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-16, 16 );
njn1d0825f2006-03-27 11:37:07 +00001893 }
1894}
1895
1896static void VG_REGPARM(1) mc_new_mem_stack_32(Addr new_SP)
1897{
1898 PROF_EVENT(114, "new_mem_stack_32");
sewardj05a46732006-10-17 01:28:10 +00001899 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001900 /* Straightforward */
njndbf7ca72006-03-31 11:57:59 +00001901 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1902 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
1903 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
1904 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
sewardj05a46732006-10-17 01:28:10 +00001905 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001906 /* 8 alignment must be at +4. Hence do 8 at +4,+12,+20 and 4 at
1907 +0,+28. */
njndbf7ca72006-03-31 11:57:59 +00001908 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1909 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
1910 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+12 );
1911 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+20 );
1912 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+28 );
njn1d0825f2006-03-27 11:37:07 +00001913 } else {
njndbf7ca72006-03-31 11:57:59 +00001914 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 32 );
njn1d0825f2006-03-27 11:37:07 +00001915 }
1916}
1917
1918static void VG_REGPARM(1) mc_die_mem_stack_32(Addr new_SP)
1919{
1920 PROF_EVENT(124, "die_mem_stack_32");
sewardj05a46732006-10-17 01:28:10 +00001921 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001922 /* Straightforward */
njndbf7ca72006-03-31 11:57:59 +00001923 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
1924 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
1925 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
1926 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
sewardj05a46732006-10-17 01:28:10 +00001927 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
sewardj43fcfd92006-10-17 23:14:42 +00001928 /* 8 alignment must be at -4 etc. Hence do 8 at -12,-20,-28 and
1929 4 at -32,-4. */
njndbf7ca72006-03-31 11:57:59 +00001930 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
1931 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-28 );
1932 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-20 );
1933 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
1934 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
njn1d0825f2006-03-27 11:37:07 +00001935 } else {
njndbf7ca72006-03-31 11:57:59 +00001936 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-32, 32 );
njn1d0825f2006-03-27 11:37:07 +00001937 }
1938}
1939
1940static void VG_REGPARM(1) mc_new_mem_stack_112(Addr new_SP)
1941{
1942 PROF_EVENT(115, "new_mem_stack_112");
sewardj05a46732006-10-17 01:28:10 +00001943 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001944 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1945 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
1946 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
1947 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
1948 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
1949 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
1950 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
1951 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
1952 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
1953 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
1954 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
1955 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
1956 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
1957 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104);
njn1d0825f2006-03-27 11:37:07 +00001958 } else {
njndbf7ca72006-03-31 11:57:59 +00001959 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 112 );
njn1d0825f2006-03-27 11:37:07 +00001960 }
1961}
1962
1963static void VG_REGPARM(1) mc_die_mem_stack_112(Addr new_SP)
1964{
1965 PROF_EVENT(125, "die_mem_stack_112");
sewardj05a46732006-10-17 01:28:10 +00001966 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001967 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
1968 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
1969 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
1970 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
1971 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
1972 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
1973 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
1974 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
1975 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
1976 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
1977 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
1978 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
1979 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
1980 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
njn1d0825f2006-03-27 11:37:07 +00001981 } else {
njndbf7ca72006-03-31 11:57:59 +00001982 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-112, 112 );
njn1d0825f2006-03-27 11:37:07 +00001983 }
1984}
1985
1986static void VG_REGPARM(1) mc_new_mem_stack_128(Addr new_SP)
1987{
1988 PROF_EVENT(116, "new_mem_stack_128");
sewardj05a46732006-10-17 01:28:10 +00001989 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00001990 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1991 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
1992 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
1993 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
1994 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
1995 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
1996 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
1997 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
1998 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
1999 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
2000 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
2001 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
2002 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
2003 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104);
2004 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+112);
2005 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+120);
njn1d0825f2006-03-27 11:37:07 +00002006 } else {
njndbf7ca72006-03-31 11:57:59 +00002007 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 128 );
njn1d0825f2006-03-27 11:37:07 +00002008 }
2009}
2010
2011static void VG_REGPARM(1) mc_die_mem_stack_128(Addr new_SP)
2012{
2013 PROF_EVENT(126, "die_mem_stack_128");
sewardj05a46732006-10-17 01:28:10 +00002014 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002015 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
2016 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
2017 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
2018 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
2019 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
2020 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
2021 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
2022 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
2023 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
2024 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
2025 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
2026 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
2027 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
2028 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
2029 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
2030 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
njn1d0825f2006-03-27 11:37:07 +00002031 } else {
njndbf7ca72006-03-31 11:57:59 +00002032 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-128, 128 );
njn1d0825f2006-03-27 11:37:07 +00002033 }
2034}
2035
2036static void VG_REGPARM(1) mc_new_mem_stack_144(Addr new_SP)
2037{
2038 PROF_EVENT(117, "new_mem_stack_144");
sewardj05a46732006-10-17 01:28:10 +00002039 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002040 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
2041 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
2042 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
2043 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
2044 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
2045 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
2046 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
2047 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
2048 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
2049 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
2050 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
2051 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
2052 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
2053 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104);
2054 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+112);
2055 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+120);
2056 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+128);
2057 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+136);
njn1d0825f2006-03-27 11:37:07 +00002058 } else {
njndbf7ca72006-03-31 11:57:59 +00002059 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 144 );
njn1d0825f2006-03-27 11:37:07 +00002060 }
2061}
2062
2063static void VG_REGPARM(1) mc_die_mem_stack_144(Addr new_SP)
2064{
2065 PROF_EVENT(127, "die_mem_stack_144");
sewardj05a46732006-10-17 01:28:10 +00002066 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002067 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-144);
2068 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-136);
2069 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
2070 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
2071 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
2072 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
2073 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
2074 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
2075 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
2076 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
2077 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
2078 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
2079 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
2080 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
2081 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
2082 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
2083 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
2084 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
njn1d0825f2006-03-27 11:37:07 +00002085 } else {
njndbf7ca72006-03-31 11:57:59 +00002086 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-144, 144 );
njn1d0825f2006-03-27 11:37:07 +00002087 }
2088}
2089
2090static void VG_REGPARM(1) mc_new_mem_stack_160(Addr new_SP)
2091{
2092 PROF_EVENT(118, "new_mem_stack_160");
sewardj05a46732006-10-17 01:28:10 +00002093 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002094 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
2095 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
2096 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
2097 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
2098 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
2099 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
2100 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
2101 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
2102 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
2103 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
2104 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
2105 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
2106 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
2107 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104);
2108 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+112);
2109 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+120);
2110 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+128);
2111 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+136);
2112 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+144);
2113 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+152);
njn1d0825f2006-03-27 11:37:07 +00002114 } else {
njndbf7ca72006-03-31 11:57:59 +00002115 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 160 );
njn1d0825f2006-03-27 11:37:07 +00002116 }
2117}
2118
2119static void VG_REGPARM(1) mc_die_mem_stack_160(Addr new_SP)
2120{
2121 PROF_EVENT(128, "die_mem_stack_160");
sewardj05a46732006-10-17 01:28:10 +00002122 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
njndbf7ca72006-03-31 11:57:59 +00002123 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-160);
2124 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-152);
2125 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-144);
2126 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-136);
2127 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
2128 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
2129 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
2130 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
2131 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
2132 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
2133 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
2134 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
2135 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
2136 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
2137 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
2138 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
2139 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
2140 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
2141 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
2142 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
njn1d0825f2006-03-27 11:37:07 +00002143 } else {
njndbf7ca72006-03-31 11:57:59 +00002144 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-160, 160 );
njn1d0825f2006-03-27 11:37:07 +00002145 }
2146}
2147
2148static void mc_new_mem_stack ( Addr a, SizeT len )
2149{
2150 PROF_EVENT(115, "new_mem_stack");
njndbf7ca72006-03-31 11:57:59 +00002151 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + a, len );
njn1d0825f2006-03-27 11:37:07 +00002152}
2153
2154static void mc_die_mem_stack ( Addr a, SizeT len )
2155{
2156 PROF_EVENT(125, "die_mem_stack");
njndbf7ca72006-03-31 11:57:59 +00002157 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + a, len );
njn1d0825f2006-03-27 11:37:07 +00002158}
njn9b007f62003-04-07 14:40:25 +00002159
sewardj45d94cc2005-04-20 14:44:11 +00002160
njn1d0825f2006-03-27 11:37:07 +00002161/* The AMD64 ABI says:
2162
2163 "The 128-byte area beyond the location pointed to by %rsp is considered
2164 to be reserved and shall not be modified by signal or interrupt
2165 handlers. Therefore, functions may use this area for temporary data
2166 that is not needed across function calls. In particular, leaf functions
2167 may use this area for their entire stack frame, rather than adjusting
2168 the stack pointer in the prologue and epilogue. This area is known as
2169 red zone [sic]."
2170
2171 So after any call or return we need to mark this redzone as containing
2172 undefined values.
2173
2174 Consider this: we're in function f. f calls g. g moves rsp down
2175 modestly (say 16 bytes) and writes stuff all over the red zone, making it
2176 defined. g returns. f is buggy and reads from parts of the red zone
2177 that it didn't write on. But because g filled that area in, f is going
2178 to be picking up defined V bits and so any errors from reading bits of
2179 the red zone it didn't write, will be missed. The only solution I could
2180 think of was to make the red zone undefined when g returns to f.
2181
2182 This is in accordance with the ABI, which makes it clear the redzone
2183 is volatile across function calls.
2184
2185 The problem occurs the other way round too: f could fill the RZ up
2186 with defined values and g could mistakenly read them. So the RZ
2187 also needs to be nuked on function calls.
2188*/
sewardj826ec492005-05-12 18:05:00 +00002189void MC_(helperc_MAKE_STACK_UNINIT) ( Addr base, UWord len )
2190{
2191 tl_assert(sizeof(UWord) == sizeof(SizeT));
sewardj2a3a1a72005-05-12 23:25:43 +00002192 if (0)
njn8a7b41b2007-09-23 00:51:24 +00002193 VG_(printf)("helperc_MAKE_STACK_UNINIT %p %lu\n", base, len );
sewardj2a3a1a72005-05-12 23:25:43 +00002194
2195# if 0
2196 /* Really slow version */
njndbf7ca72006-03-31 11:57:59 +00002197 MC_(make_mem_undefined)(base, len);
sewardj2a3a1a72005-05-12 23:25:43 +00002198# endif
2199
2200# if 0
2201 /* Slow(ish) version, which is fairly easily seen to be correct.
2202 */
bart5dd8e6a2008-03-22 08:04:29 +00002203 if (LIKELY( VG_IS_8_ALIGNED(base) && len==128 )) {
njndbf7ca72006-03-31 11:57:59 +00002204 make_aligned_word64_undefined(base + 0);
2205 make_aligned_word64_undefined(base + 8);
2206 make_aligned_word64_undefined(base + 16);
2207 make_aligned_word64_undefined(base + 24);
sewardj2a3a1a72005-05-12 23:25:43 +00002208
njndbf7ca72006-03-31 11:57:59 +00002209 make_aligned_word64_undefined(base + 32);
2210 make_aligned_word64_undefined(base + 40);
2211 make_aligned_word64_undefined(base + 48);
2212 make_aligned_word64_undefined(base + 56);
sewardj2a3a1a72005-05-12 23:25:43 +00002213
njndbf7ca72006-03-31 11:57:59 +00002214 make_aligned_word64_undefined(base + 64);
2215 make_aligned_word64_undefined(base + 72);
2216 make_aligned_word64_undefined(base + 80);
2217 make_aligned_word64_undefined(base + 88);
sewardj2a3a1a72005-05-12 23:25:43 +00002218
njndbf7ca72006-03-31 11:57:59 +00002219 make_aligned_word64_undefined(base + 96);
2220 make_aligned_word64_undefined(base + 104);
2221 make_aligned_word64_undefined(base + 112);
2222 make_aligned_word64_undefined(base + 120);
sewardj2a3a1a72005-05-12 23:25:43 +00002223 } else {
njndbf7ca72006-03-31 11:57:59 +00002224 MC_(make_mem_undefined)(base, len);
sewardj2a3a1a72005-05-12 23:25:43 +00002225 }
2226# endif
2227
2228 /* Idea is: go fast when
2229 * 8-aligned and length is 128
2230 * the sm is available in the main primary map
njn1d0825f2006-03-27 11:37:07 +00002231 * the address range falls entirely with a single secondary map
2232 If all those conditions hold, just update the V+A bits by writing
2233 directly into the vabits array. (If the sm was distinguished, this
2234 will make a copy and then write to it.)
sewardj2a3a1a72005-05-12 23:25:43 +00002235 */
bart5dd8e6a2008-03-22 08:04:29 +00002236 if (LIKELY( len == 128 && VG_IS_8_ALIGNED(base) )) {
njn1d0825f2006-03-27 11:37:07 +00002237 /* Now we know the address range is suitably sized and aligned. */
2238 UWord a_lo = (UWord)(base);
sewardj3f5f5562006-06-16 21:39:08 +00002239 UWord a_hi = (UWord)(base + 128 - 1);
njn1d0825f2006-03-27 11:37:07 +00002240 tl_assert(a_lo < a_hi); // paranoia: detect overflow
2241 if (a_hi < MAX_PRIMARY_ADDRESS) {
2242 // Now we know the entire range is within the main primary map.
njna7c7ebd2006-03-28 12:51:02 +00002243 SecMap* sm = get_secmap_for_writing_low(a_lo);
2244 SecMap* sm_hi = get_secmap_for_writing_low(a_hi);
sewardj2a3a1a72005-05-12 23:25:43 +00002245 /* Now we know that the entire address range falls within a
2246 single secondary map, and that that secondary 'lives' in
2247 the main primary map. */
bart5dd8e6a2008-03-22 08:04:29 +00002248 if (LIKELY(sm == sm_hi)) {
njn1d0825f2006-03-27 11:37:07 +00002249 // Finally, we know that the range is entirely within one secmap.
2250 UWord v_off = SM_OFF(a_lo);
2251 UShort* p = (UShort*)(&sm->vabits8[v_off]);
njndbf7ca72006-03-31 11:57:59 +00002252 p[ 0] = VA_BITS16_UNDEFINED;
2253 p[ 1] = VA_BITS16_UNDEFINED;
2254 p[ 2] = VA_BITS16_UNDEFINED;
2255 p[ 3] = VA_BITS16_UNDEFINED;
2256 p[ 4] = VA_BITS16_UNDEFINED;
2257 p[ 5] = VA_BITS16_UNDEFINED;
2258 p[ 6] = VA_BITS16_UNDEFINED;
2259 p[ 7] = VA_BITS16_UNDEFINED;
2260 p[ 8] = VA_BITS16_UNDEFINED;
2261 p[ 9] = VA_BITS16_UNDEFINED;
2262 p[10] = VA_BITS16_UNDEFINED;
2263 p[11] = VA_BITS16_UNDEFINED;
2264 p[12] = VA_BITS16_UNDEFINED;
2265 p[13] = VA_BITS16_UNDEFINED;
2266 p[14] = VA_BITS16_UNDEFINED;
2267 p[15] = VA_BITS16_UNDEFINED;
sewardj2a3a1a72005-05-12 23:25:43 +00002268 return;
njn1d0825f2006-03-27 11:37:07 +00002269 }
sewardj2a3a1a72005-05-12 23:25:43 +00002270 }
2271 }
2272
sewardj2e1a6772006-01-18 04:16:27 +00002273 /* 288 bytes (36 ULongs) is the magic value for ELF ppc64. */
bart5dd8e6a2008-03-22 08:04:29 +00002274 if (LIKELY( len == 288 && VG_IS_8_ALIGNED(base) )) {
njn1d0825f2006-03-27 11:37:07 +00002275 /* Now we know the address range is suitably sized and aligned. */
2276 UWord a_lo = (UWord)(base);
sewardj3f5f5562006-06-16 21:39:08 +00002277 UWord a_hi = (UWord)(base + 288 - 1);
njn1d0825f2006-03-27 11:37:07 +00002278 tl_assert(a_lo < a_hi); // paranoia: detect overflow
2279 if (a_hi < MAX_PRIMARY_ADDRESS) {
2280 // Now we know the entire range is within the main primary map.
njna7c7ebd2006-03-28 12:51:02 +00002281 SecMap* sm = get_secmap_for_writing_low(a_lo);
2282 SecMap* sm_hi = get_secmap_for_writing_low(a_hi);
sewardj2e1a6772006-01-18 04:16:27 +00002283 /* Now we know that the entire address range falls within a
2284 single secondary map, and that that secondary 'lives' in
2285 the main primary map. */
bart5dd8e6a2008-03-22 08:04:29 +00002286 if (LIKELY(sm == sm_hi)) {
njn1d0825f2006-03-27 11:37:07 +00002287 // Finally, we know that the range is entirely within one secmap.
2288 UWord v_off = SM_OFF(a_lo);
2289 UShort* p = (UShort*)(&sm->vabits8[v_off]);
njndbf7ca72006-03-31 11:57:59 +00002290 p[ 0] = VA_BITS16_UNDEFINED;
2291 p[ 1] = VA_BITS16_UNDEFINED;
2292 p[ 2] = VA_BITS16_UNDEFINED;
2293 p[ 3] = VA_BITS16_UNDEFINED;
2294 p[ 4] = VA_BITS16_UNDEFINED;
2295 p[ 5] = VA_BITS16_UNDEFINED;
2296 p[ 6] = VA_BITS16_UNDEFINED;
2297 p[ 7] = VA_BITS16_UNDEFINED;
2298 p[ 8] = VA_BITS16_UNDEFINED;
2299 p[ 9] = VA_BITS16_UNDEFINED;
2300 p[10] = VA_BITS16_UNDEFINED;
2301 p[11] = VA_BITS16_UNDEFINED;
2302 p[12] = VA_BITS16_UNDEFINED;
2303 p[13] = VA_BITS16_UNDEFINED;
2304 p[14] = VA_BITS16_UNDEFINED;
2305 p[15] = VA_BITS16_UNDEFINED;
2306 p[16] = VA_BITS16_UNDEFINED;
2307 p[17] = VA_BITS16_UNDEFINED;
2308 p[18] = VA_BITS16_UNDEFINED;
2309 p[19] = VA_BITS16_UNDEFINED;
2310 p[20] = VA_BITS16_UNDEFINED;
2311 p[21] = VA_BITS16_UNDEFINED;
2312 p[22] = VA_BITS16_UNDEFINED;
2313 p[23] = VA_BITS16_UNDEFINED;
2314 p[24] = VA_BITS16_UNDEFINED;
2315 p[25] = VA_BITS16_UNDEFINED;
2316 p[26] = VA_BITS16_UNDEFINED;
2317 p[27] = VA_BITS16_UNDEFINED;
2318 p[28] = VA_BITS16_UNDEFINED;
2319 p[29] = VA_BITS16_UNDEFINED;
2320 p[30] = VA_BITS16_UNDEFINED;
2321 p[31] = VA_BITS16_UNDEFINED;
2322 p[32] = VA_BITS16_UNDEFINED;
2323 p[33] = VA_BITS16_UNDEFINED;
2324 p[34] = VA_BITS16_UNDEFINED;
2325 p[35] = VA_BITS16_UNDEFINED;
sewardj2e1a6772006-01-18 04:16:27 +00002326 return;
njn1d0825f2006-03-27 11:37:07 +00002327 }
sewardj2e1a6772006-01-18 04:16:27 +00002328 }
2329 }
2330
sewardj2a3a1a72005-05-12 23:25:43 +00002331 /* else fall into slow case */
njndbf7ca72006-03-31 11:57:59 +00002332 MC_(make_mem_undefined)(base, len);
sewardj826ec492005-05-12 18:05:00 +00002333}
2334
2335
nethercote8b76fe52004-11-08 19:20:09 +00002336/*------------------------------------------------------------*/
2337/*--- Checking memory ---*/
2338/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00002339
sewardje4ccc012005-05-02 12:53:38 +00002340typedef
2341 enum {
2342 MC_Ok = 5,
2343 MC_AddrErr = 6,
2344 MC_ValueErr = 7
2345 }
2346 MC_ReadResult;
2347
2348
njn25e49d8e72002-09-23 09:36:25 +00002349/* Check permissions for address range. If inadequate permissions
2350 exist, *bad_addr is set to the offending address, so the caller can
2351 know what it is. */
2352
sewardjecf8e102003-07-12 12:11:39 +00002353/* Returns True if [a .. a+len) is not addressible. Otherwise,
2354 returns False, and if bad_addr is non-NULL, sets *bad_addr to
2355 indicate the lowest failing address. Functions below are
2356 similar. */
njndbf7ca72006-03-31 11:57:59 +00002357Bool MC_(check_mem_is_noaccess) ( Addr a, SizeT len, Addr* bad_addr )
sewardjecf8e102003-07-12 12:11:39 +00002358{
nethercote451eae92004-11-02 13:06:32 +00002359 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00002360 UWord vabits2;
2361
njndbf7ca72006-03-31 11:57:59 +00002362 PROF_EVENT(60, "check_mem_is_noaccess");
sewardjecf8e102003-07-12 12:11:39 +00002363 for (i = 0; i < len; i++) {
njndbf7ca72006-03-31 11:57:59 +00002364 PROF_EVENT(61, "check_mem_is_noaccess(loop)");
njn1d0825f2006-03-27 11:37:07 +00002365 vabits2 = get_vabits2(a);
2366 if (VA_BITS2_NOACCESS != vabits2) {
2367 if (bad_addr != NULL) *bad_addr = a;
sewardjecf8e102003-07-12 12:11:39 +00002368 return False;
2369 }
2370 a++;
2371 }
2372 return True;
2373}
2374
njndbf7ca72006-03-31 11:57:59 +00002375static Bool is_mem_addressable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00002376{
nethercote451eae92004-11-02 13:06:32 +00002377 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00002378 UWord vabits2;
2379
njndbf7ca72006-03-31 11:57:59 +00002380 PROF_EVENT(62, "is_mem_addressable");
njn25e49d8e72002-09-23 09:36:25 +00002381 for (i = 0; i < len; i++) {
njndbf7ca72006-03-31 11:57:59 +00002382 PROF_EVENT(63, "is_mem_addressable(loop)");
njn1d0825f2006-03-27 11:37:07 +00002383 vabits2 = get_vabits2(a);
2384 if (VA_BITS2_NOACCESS == vabits2) {
njn25e49d8e72002-09-23 09:36:25 +00002385 if (bad_addr != NULL) *bad_addr = a;
2386 return False;
2387 }
2388 a++;
2389 }
2390 return True;
2391}
2392
njndbf7ca72006-03-31 11:57:59 +00002393static MC_ReadResult is_mem_defined ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00002394{
nethercote451eae92004-11-02 13:06:32 +00002395 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00002396 UWord vabits2;
njn25e49d8e72002-09-23 09:36:25 +00002397
njndbf7ca72006-03-31 11:57:59 +00002398 PROF_EVENT(64, "is_mem_defined");
2399 DEBUG("is_mem_defined\n");
njn25e49d8e72002-09-23 09:36:25 +00002400 for (i = 0; i < len; i++) {
njndbf7ca72006-03-31 11:57:59 +00002401 PROF_EVENT(65, "is_mem_defined(loop)");
njn1d0825f2006-03-27 11:37:07 +00002402 vabits2 = get_vabits2(a);
njndbf7ca72006-03-31 11:57:59 +00002403 if (VA_BITS2_DEFINED != vabits2) {
njn1d0825f2006-03-27 11:37:07 +00002404 // Error! Nb: Report addressability errors in preference to
2405 // definedness errors. And don't report definedeness errors unless
2406 // --undef-value-errors=yes.
2407 if (bad_addr != NULL) *bad_addr = a;
2408 if ( VA_BITS2_NOACCESS == vabits2 ) return MC_AddrErr;
2409 else if ( MC_(clo_undef_value_errors) ) return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +00002410 }
2411 a++;
2412 }
nethercote8b76fe52004-11-08 19:20:09 +00002413 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +00002414}
2415
2416
2417/* Check a zero-terminated ascii string. Tricky -- don't want to
2418 examine the actual bytes, to find the end, until we're sure it is
2419 safe to do so. */
2420
njndbf7ca72006-03-31 11:57:59 +00002421static Bool mc_is_defined_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00002422{
njn1d0825f2006-03-27 11:37:07 +00002423 UWord vabits2;
2424
njndbf7ca72006-03-31 11:57:59 +00002425 PROF_EVENT(66, "mc_is_defined_asciiz");
2426 DEBUG("mc_is_defined_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +00002427 while (True) {
njndbf7ca72006-03-31 11:57:59 +00002428 PROF_EVENT(67, "mc_is_defined_asciiz(loop)");
njn1d0825f2006-03-27 11:37:07 +00002429 vabits2 = get_vabits2(a);
njndbf7ca72006-03-31 11:57:59 +00002430 if (VA_BITS2_DEFINED != vabits2) {
njn1d0825f2006-03-27 11:37:07 +00002431 // Error! Nb: Report addressability errors in preference to
2432 // definedness errors. And don't report definedeness errors unless
2433 // --undef-value-errors=yes.
2434 if (bad_addr != NULL) *bad_addr = a;
2435 if ( VA_BITS2_NOACCESS == vabits2 ) return MC_AddrErr;
2436 else if ( MC_(clo_undef_value_errors) ) return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +00002437 }
2438 /* Ok, a is safe to read. */
njn1d0825f2006-03-27 11:37:07 +00002439 if (* ((UChar*)a) == 0) {
sewardj45d94cc2005-04-20 14:44:11 +00002440 return MC_Ok;
njn1d0825f2006-03-27 11:37:07 +00002441 }
njn25e49d8e72002-09-23 09:36:25 +00002442 a++;
2443 }
2444}
2445
2446
2447/*------------------------------------------------------------*/
2448/*--- Memory event handlers ---*/
2449/*------------------------------------------------------------*/
2450
njn25e49d8e72002-09-23 09:36:25 +00002451static
njndbf7ca72006-03-31 11:57:59 +00002452void check_mem_is_addressable ( CorePart part, ThreadId tid, Char* s,
2453 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00002454{
njn25e49d8e72002-09-23 09:36:25 +00002455 Addr bad_addr;
njndbf7ca72006-03-31 11:57:59 +00002456 Bool ok = is_mem_addressable ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00002457
njn25e49d8e72002-09-23 09:36:25 +00002458 if (!ok) {
2459 switch (part) {
2460 case Vg_CoreSysCall:
njn718d3b12006-12-16 00:54:12 +00002461 mc_record_memparam_error ( tid, bad_addr, /*isAddrErr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00002462 break;
2463
njn25e49d8e72002-09-23 09:36:25 +00002464 case Vg_CoreSignal:
njn718d3b12006-12-16 00:54:12 +00002465 mc_record_core_mem_error( tid, /*isAddrErr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00002466 break;
2467
2468 default:
njndbf7ca72006-03-31 11:57:59 +00002469 VG_(tool_panic)("check_mem_is_addressable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00002470 }
2471 }
njn25e49d8e72002-09-23 09:36:25 +00002472}
2473
2474static
njndbf7ca72006-03-31 11:57:59 +00002475void check_mem_is_defined ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +00002476 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00002477{
njn25e49d8e72002-09-23 09:36:25 +00002478 Addr bad_addr;
njndbf7ca72006-03-31 11:57:59 +00002479 MC_ReadResult res = is_mem_defined ( base, size, &bad_addr );
sewardj45f4e7c2005-09-27 19:20:21 +00002480
nethercote8b76fe52004-11-08 19:20:09 +00002481 if (MC_Ok != res) {
njn718d3b12006-12-16 00:54:12 +00002482 Bool isAddrErr = ( MC_AddrErr == res ? True : False );
sewardj45f4e7c2005-09-27 19:20:21 +00002483
njn25e49d8e72002-09-23 09:36:25 +00002484 switch (part) {
2485 case Vg_CoreSysCall:
njn718d3b12006-12-16 00:54:12 +00002486 mc_record_memparam_error ( tid, bad_addr, isAddrErr, s );
njn25e49d8e72002-09-23 09:36:25 +00002487 break;
2488
njn25e49d8e72002-09-23 09:36:25 +00002489 /* If we're being asked to jump to a silly address, record an error
2490 message before potentially crashing the entire system. */
2491 case Vg_CoreTranslate:
njn1d0825f2006-03-27 11:37:07 +00002492 mc_record_jump_error( tid, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00002493 break;
2494
2495 default:
njndbf7ca72006-03-31 11:57:59 +00002496 VG_(tool_panic)("check_mem_is_defined: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00002497 }
2498 }
njn25e49d8e72002-09-23 09:36:25 +00002499}
2500
2501static
njndbf7ca72006-03-31 11:57:59 +00002502void check_mem_is_defined_asciiz ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +00002503 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +00002504{
nethercote8b76fe52004-11-08 19:20:09 +00002505 MC_ReadResult res;
njn5ab96ac2005-05-08 02:59:50 +00002506 Addr bad_addr = 0; // shut GCC up
njn25e49d8e72002-09-23 09:36:25 +00002507
njnca82cc02004-11-22 17:18:48 +00002508 tl_assert(part == Vg_CoreSysCall);
njndbf7ca72006-03-31 11:57:59 +00002509 res = mc_is_defined_asciiz ( (Addr)str, &bad_addr );
nethercote8b76fe52004-11-08 19:20:09 +00002510 if (MC_Ok != res) {
njn718d3b12006-12-16 00:54:12 +00002511 Bool isAddrErr = ( MC_AddrErr == res ? True : False );
2512 mc_record_memparam_error ( tid, bad_addr, isAddrErr, s );
njn25e49d8e72002-09-23 09:36:25 +00002513 }
njn25e49d8e72002-09-23 09:36:25 +00002514}
2515
njn25e49d8e72002-09-23 09:36:25 +00002516static
nethercote451eae92004-11-02 13:06:32 +00002517void mc_new_mem_startup( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00002518{
njndbf7ca72006-03-31 11:57:59 +00002519 /* Ignore the permissions, just make it defined. Seems to work... */
njnba7b4582006-09-21 15:59:30 +00002520 // Because code is defined, initialised variables get put in the data
2521 // segment and are defined, and uninitialised variables get put in the
2522 // bss segment and are auto-zeroed (and so defined).
2523 //
2524 // It's possible that there will be padding between global variables.
2525 // This will also be auto-zeroed, and marked as defined by Memcheck. If
2526 // a program uses it, Memcheck will not complain. This is arguably a
2527 // false negative, but it's a grey area -- the behaviour is defined (the
2528 // padding is zeroed) but it's probably not what the user intended. And
2529 // we can't avoid it.
nethercote451eae92004-11-02 13:06:32 +00002530 DEBUG("mc_new_mem_startup(%p, %llu, rr=%u, ww=%u, xx=%u)\n",
njndbf7ca72006-03-31 11:57:59 +00002531 a, (ULong)len, rr, ww, xx);
2532 MC_(make_mem_defined)(a, len);
njn25e49d8e72002-09-23 09:36:25 +00002533}
2534
2535static
njnb8dca862005-03-14 02:42:44 +00002536void mc_new_mem_mmap ( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00002537{
njndbf7ca72006-03-31 11:57:59 +00002538 MC_(make_mem_defined)(a, len);
njn25e49d8e72002-09-23 09:36:25 +00002539}
2540
njncf45fd42004-11-24 16:30:22 +00002541static
2542void mc_post_mem_write(CorePart part, ThreadId tid, Addr a, SizeT len)
2543{
njndbf7ca72006-03-31 11:57:59 +00002544 MC_(make_mem_defined)(a, len);
njncf45fd42004-11-24 16:30:22 +00002545}
njn25e49d8e72002-09-23 09:36:25 +00002546
sewardj45d94cc2005-04-20 14:44:11 +00002547
njn25e49d8e72002-09-23 09:36:25 +00002548/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00002549/*--- Register event handlers ---*/
2550/*------------------------------------------------------------*/
2551
sewardj45d94cc2005-04-20 14:44:11 +00002552/* When some chunk of guest state is written, mark the corresponding
2553 shadow area as valid. This is used to initialise arbitrarily large
sewardj62eae5f2006-01-17 01:58:24 +00002554 chunks of guest state, hence the _SIZE value, which has to be as
2555 big as the biggest guest state.
sewardj45d94cc2005-04-20 14:44:11 +00002556*/
2557static void mc_post_reg_write ( CorePart part, ThreadId tid,
2558 OffT offset, SizeT size)
njnd3040452003-05-19 15:04:06 +00002559{
sewardj05a46732006-10-17 01:28:10 +00002560# define MAX_REG_WRITE_SIZE 1408
cerion21082042005-12-06 19:07:08 +00002561 UChar area[MAX_REG_WRITE_SIZE];
2562 tl_assert(size <= MAX_REG_WRITE_SIZE);
njn1d0825f2006-03-27 11:37:07 +00002563 VG_(memset)(area, V_BITS8_DEFINED, size);
njncf45fd42004-11-24 16:30:22 +00002564 VG_(set_shadow_regs_area)( tid, offset, size, area );
cerion21082042005-12-06 19:07:08 +00002565# undef MAX_REG_WRITE_SIZE
njnd3040452003-05-19 15:04:06 +00002566}
2567
sewardj45d94cc2005-04-20 14:44:11 +00002568static
2569void mc_post_reg_write_clientcall ( ThreadId tid,
2570 OffT offset, SizeT size,
2571 Addr f)
njnd3040452003-05-19 15:04:06 +00002572{
njncf45fd42004-11-24 16:30:22 +00002573 mc_post_reg_write(/*dummy*/0, tid, offset, size);
njnd3040452003-05-19 15:04:06 +00002574}
2575
sewardj45d94cc2005-04-20 14:44:11 +00002576/* Look at the definedness of the guest's shadow state for
2577 [offset, offset+len). If any part of that is undefined, record
2578 a parameter error.
2579*/
2580static void mc_pre_reg_read ( CorePart part, ThreadId tid, Char* s,
2581 OffT offset, SizeT size)
nethercote8b76fe52004-11-08 19:20:09 +00002582{
sewardj45d94cc2005-04-20 14:44:11 +00002583 Int i;
2584 Bool bad;
2585
2586 UChar area[16];
2587 tl_assert(size <= 16);
2588
2589 VG_(get_shadow_regs_area)( tid, offset, size, area );
2590
2591 bad = False;
2592 for (i = 0; i < size; i++) {
njn1d0825f2006-03-27 11:37:07 +00002593 if (area[i] != V_BITS8_DEFINED) {
sewardj2c27f702005-05-03 18:19:05 +00002594 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002595 break;
2596 }
nethercote8b76fe52004-11-08 19:20:09 +00002597 }
2598
sewardj45d94cc2005-04-20 14:44:11 +00002599 if (bad)
njn718d3b12006-12-16 00:54:12 +00002600 mc_record_regparam_error ( tid, s );
nethercote8b76fe52004-11-08 19:20:09 +00002601}
njnd3040452003-05-19 15:04:06 +00002602
njn25e49d8e72002-09-23 09:36:25 +00002603
sewardj6cf40ff2005-04-20 22:31:26 +00002604/*------------------------------------------------------------*/
njn718d3b12006-12-16 00:54:12 +00002605/*--- Error types ---*/
njn1d0825f2006-03-27 11:37:07 +00002606/*------------------------------------------------------------*/
2607
njn718d3b12006-12-16 00:54:12 +00002608// Different kinds of blocks.
2609typedef enum {
2610 Block_Mallocd = 111,
2611 Block_Freed,
2612 Block_Mempool,
2613 Block_MempoolChunk,
2614 Block_UserG
2615} BlockKind;
2616
2617/* ------------------ Addresses -------------------- */
2618
njn1d0825f2006-03-27 11:37:07 +00002619/* The classification of a faulting address. */
2620typedef
2621 enum {
sewardjb8b79ad2008-03-03 01:35:41 +00002622 Addr_Undescribed, // as-yet unclassified
2623 Addr_Unknown, // classification yielded nothing useful
2624 Addr_Block, // in malloc'd/free'd block
2625 Addr_Stack, // on a thread's stack
2626 Addr_DataSym, // in a global data sym
2627 Addr_Variable, // variable described by the debug info
2628 Addr_SectKind // last-ditch classification attempt
njn1d0825f2006-03-27 11:37:07 +00002629 }
njn718d3b12006-12-16 00:54:12 +00002630 AddrTag;
njn1d0825f2006-03-27 11:37:07 +00002631
njn1d0825f2006-03-27 11:37:07 +00002632typedef
njn718d3b12006-12-16 00:54:12 +00002633 struct _AddrInfo
njn1d0825f2006-03-27 11:37:07 +00002634 AddrInfo;
2635
njn718d3b12006-12-16 00:54:12 +00002636struct _AddrInfo {
2637 AddrTag tag;
2638 union {
2639 // As-yet unclassified.
2640 struct { } Undescribed;
njn1d0825f2006-03-27 11:37:07 +00002641
njn718d3b12006-12-16 00:54:12 +00002642 // On a stack.
2643 struct {
2644 ThreadId tid; // Which thread's stack?
2645 } Stack;
njn1d0825f2006-03-27 11:37:07 +00002646
njn718d3b12006-12-16 00:54:12 +00002647 // This covers heap blocks (normal and from mempools) and user-defined
2648 // blocks.
2649 struct {
2650 BlockKind block_kind;
2651 Char* block_desc; // "block", "mempool" or user-defined
2652 SizeT block_szB;
2653 OffT rwoffset;
2654 ExeContext* lastchange;
2655 } Block;
njn1d0825f2006-03-27 11:37:07 +00002656
sewardjb8b79ad2008-03-03 01:35:41 +00002657 // In a global .data symbol. This holds the first 63 chars of
2658 // the variable's (zero terminated), plus an offset.
2659 struct {
2660 Char name[128];
2661 OffT offset;
2662 } DataSym;
2663
2664 // Is described by Dwarf debug info. Arbitrary strings. Must
2665 // be the same length.
2666 struct {
2667 Char descr1[96];
2668 Char descr2[96];
2669 } Variable;
2670
2671 // Could only narrow it down to be the PLT/GOT/etc of a given
2672 // object. Better than nothing, perhaps.
2673 struct {
2674 Char objname[128];
2675 VgSectKind kind;
2676 } SectKind;
2677
njn718d3b12006-12-16 00:54:12 +00002678 // Classification yielded nothing useful.
2679 struct { } Unknown;
2680
2681 } Addr;
2682};
2683
2684/* ------------------ Errors ----------------------- */
njn1d0825f2006-03-27 11:37:07 +00002685
2686/* What kind of error it is. */
2687typedef
njn718d3b12006-12-16 00:54:12 +00002688 enum {
2689 Err_Value,
2690 Err_Cond,
2691 Err_CoreMem,
2692 Err_Addr,
2693 Err_Jump,
2694 Err_RegParam,
2695 Err_MemParam,
2696 Err_User,
2697 Err_Free,
2698 Err_FreeMismatch,
2699 Err_Overlap,
2700 Err_Leak,
2701 Err_IllegalMempool,
njn1d0825f2006-03-27 11:37:07 +00002702 }
njn718d3b12006-12-16 00:54:12 +00002703 MC_ErrorTag;
njn1d0825f2006-03-27 11:37:07 +00002704
njn1d0825f2006-03-27 11:37:07 +00002705
njn718d3b12006-12-16 00:54:12 +00002706typedef struct _MC_Error MC_Error;
2707
2708struct _MC_Error {
2709 // Nb: we don't need the tag here, as it's stored in the Error type! Yuk.
2710 //MC_ErrorTag tag;
2711
2712 union {
2713 // Use of an undefined value:
2714 // - as a pointer in a load or store
2715 // - as a jump target
2716 struct {
2717 SizeT szB; // size of value in bytes
2718 } Value;
2719
2720 // Use of an undefined value in a conditional branch or move.
2721 struct {
2722 } Cond;
2723
2724 // Addressability error in core (signal-handling) operation.
2725 // It would be good to get rid of this error kind, merge it with
2726 // another one somehow.
2727 struct {
2728 } CoreMem;
2729
2730 // Use of an unaddressable memory location in a load or store.
2731 struct {
2732 Bool isWrite; // read or write?
2733 SizeT szB; // not used for exec (jump) errors
2734 Bool maybe_gcc; // True if just below %esp -- could be a gcc bug
2735 AddrInfo ai;
2736 } Addr;
2737
2738 // Jump to an unaddressable memory location.
2739 struct {
2740 AddrInfo ai;
2741 } Jump;
2742
2743 // System call register input contains undefined bytes.
2744 struct {
2745 } RegParam;
2746
2747 // System call memory input contains undefined/unaddressable bytes
2748 struct {
2749 Bool isAddrErr; // Addressability or definedness error?
2750 AddrInfo ai;
2751 } MemParam;
2752
2753 // Problem found from a client request like CHECK_MEM_IS_ADDRESSABLE.
2754 struct {
2755 Bool isAddrErr; // Addressability or definedness error?
2756 AddrInfo ai;
2757 } User;
2758
2759 // Program tried to free() something that's not a heap block (this
2760 // covers double-frees). */
2761 struct {
2762 AddrInfo ai;
2763 } Free;
2764
2765 // Program allocates heap block with one function
2766 // (malloc/new/new[]/custom) and deallocates with not the matching one.
2767 struct {
2768 AddrInfo ai;
2769 } FreeMismatch;
2770
2771 // Call to strcpy, memcpy, etc, with overlapping blocks.
2772 struct {
2773 Addr src; // Source block
2774 Addr dst; // Destination block
2775 Int szB; // Size in bytes; 0 if unused.
2776 } Overlap;
2777
2778 // A memory leak.
2779 struct {
2780 UInt n_this_record;
2781 UInt n_total_records;
2782 LossRecord* lossRecord;
2783 } Leak;
2784
2785 // A memory pool error.
2786 struct {
2787 AddrInfo ai;
2788 } IllegalMempool;
2789
2790 } Err;
2791};
2792
njn1d0825f2006-03-27 11:37:07 +00002793
2794/*------------------------------------------------------------*/
njn9e63cb62005-05-08 18:34:59 +00002795/*--- Printing errors ---*/
2796/*------------------------------------------------------------*/
2797
njn718d3b12006-12-16 00:54:12 +00002798static void mc_pp_AddrInfo ( Addr a, AddrInfo* ai, Bool maybe_gcc )
njn1d0825f2006-03-27 11:37:07 +00002799{
2800 HChar* xpre = VG_(clo_xml) ? " <auxwhat>" : " ";
2801 HChar* xpost = VG_(clo_xml) ? "</auxwhat>" : "";
2802
njn718d3b12006-12-16 00:54:12 +00002803 switch (ai->tag) {
2804 case Addr_Unknown:
2805 if (maybe_gcc) {
njn1d0825f2006-03-27 11:37:07 +00002806 VG_(message)(Vg_UserMsg,
2807 "%sAddress 0x%llx is just below the stack ptr. "
2808 "To suppress, use: --workaround-gcc296-bugs=yes%s",
2809 xpre, (ULong)a, xpost
2810 );
2811 } else {
2812 VG_(message)(Vg_UserMsg,
2813 "%sAddress 0x%llx "
2814 "is not stack'd, malloc'd or (recently) free'd%s",
2815 xpre, (ULong)a, xpost);
2816 }
2817 break;
njn718d3b12006-12-16 00:54:12 +00002818
2819 case Addr_Stack:
2820 VG_(message)(Vg_UserMsg,
2821 "%sAddress 0x%llx is on thread %d's stack%s",
2822 xpre, (ULong)a, ai->Addr.Stack.tid, xpost);
2823 break;
2824
2825 case Addr_Block: {
2826 SizeT block_szB = ai->Addr.Block.block_szB;
2827 OffT rwoffset = ai->Addr.Block.rwoffset;
njn1d0825f2006-03-27 11:37:07 +00002828 SizeT delta;
2829 const Char* relative;
njn1d0825f2006-03-27 11:37:07 +00002830
njn718d3b12006-12-16 00:54:12 +00002831 if (rwoffset < 0) {
2832 delta = (SizeT)(-rwoffset);
njn1d0825f2006-03-27 11:37:07 +00002833 relative = "before";
njn718d3b12006-12-16 00:54:12 +00002834 } else if (rwoffset >= block_szB) {
2835 delta = rwoffset - block_szB;
njn1d0825f2006-03-27 11:37:07 +00002836 relative = "after";
2837 } else {
njn718d3b12006-12-16 00:54:12 +00002838 delta = rwoffset;
njn1d0825f2006-03-27 11:37:07 +00002839 relative = "inside";
2840 }
2841 VG_(message)(Vg_UserMsg,
2842 "%sAddress 0x%lx is %,lu bytes %s a %s of size %,lu %s%s",
2843 xpre,
njn718d3b12006-12-16 00:54:12 +00002844 a, delta, relative, ai->Addr.Block.block_desc,
2845 block_szB,
2846 ai->Addr.Block.block_kind==Block_Mallocd ? "alloc'd"
2847 : ai->Addr.Block.block_kind==Block_Freed ? "free'd"
2848 : "client-defined",
njn1d0825f2006-03-27 11:37:07 +00002849 xpost);
njn718d3b12006-12-16 00:54:12 +00002850 VG_(pp_ExeContext)(ai->Addr.Block.lastchange);
njn1d0825f2006-03-27 11:37:07 +00002851 break;
2852 }
njn718d3b12006-12-16 00:54:12 +00002853
sewardjb8b79ad2008-03-03 01:35:41 +00002854 case Addr_DataSym:
2855 VG_(message)(Vg_UserMsg,
2856 "%sAddress 0x%llx is %llu bytes "
2857 "inside data symbol \"%t\"%s",
2858 xpre,
2859 (ULong)a,
2860 (ULong)ai->Addr.DataSym.offset,
2861 ai->Addr.DataSym.name,
2862 xpost);
2863 break;
2864
2865 case Addr_Variable:
2866 if (ai->Addr.Variable.descr1[0] != '\0')
2867 VG_(message)(Vg_UserMsg, "%s%s%s",
2868 xpre, ai->Addr.Variable.descr1, xpost);
2869 if (ai->Addr.Variable.descr2[0] != '\0')
2870 VG_(message)(Vg_UserMsg, "%s%s%s",
2871 xpre, ai->Addr.Variable.descr2, xpost);
2872 break;
2873
2874 case Addr_SectKind:
2875 VG_(message)(Vg_UserMsg,
2876 "%sAddress 0x%llx is in the %t segment of %t%s",
2877 xpre,
2878 (ULong)a,
2879 VG_(pp_SectKind)(ai->Addr.SectKind.kind),
2880 ai->Addr.SectKind.objname,
2881 xpost);
2882 break;
2883
njn1d0825f2006-03-27 11:37:07 +00002884 default:
2885 VG_(tool_panic)("mc_pp_AddrInfo");
2886 }
2887}
2888
njn718d3b12006-12-16 00:54:12 +00002889static const HChar* str_leak_lossmode ( Reachedness lossmode )
njn9e63cb62005-05-08 18:34:59 +00002890{
njn718d3b12006-12-16 00:54:12 +00002891 const HChar *loss = "?";
2892 switch (lossmode) {
2893 case Unreached: loss = "definitely lost"; break;
2894 case IndirectLeak: loss = "indirectly lost"; break;
2895 case Interior: loss = "possibly lost"; break;
2896 case Proper: loss = "still reachable"; break;
2897 }
2898 return loss;
2899}
njn9e63cb62005-05-08 18:34:59 +00002900
njn718d3b12006-12-16 00:54:12 +00002901static const HChar* xml_leak_kind ( Reachedness lossmode )
2902{
2903 const HChar *loss = "?";
2904 switch (lossmode) {
2905 case Unreached: loss = "Leak_DefinitelyLost"; break;
2906 case IndirectLeak: loss = "Leak_IndirectlyLost"; break;
2907 case Interior: loss = "Leak_PossiblyLost"; break;
2908 case Proper: loss = "Leak_StillReachable"; break;
2909 }
2910 return loss;
2911}
2912
2913static void mc_pp_msg( Char* xml_name, Error* err, const HChar* format, ... )
2914{
sewardj71bc3cb2005-05-19 00:25:45 +00002915 HChar* xpre = VG_(clo_xml) ? " <what>" : "";
2916 HChar* xpost = VG_(clo_xml) ? "</what>" : "";
njn718d3b12006-12-16 00:54:12 +00002917 Char buf[256];
2918 va_list vargs;
2919
2920 if (VG_(clo_xml))
2921 VG_(message)(Vg_UserMsg, " <kind>%s</kind>", xml_name);
2922 // Stick xpre and xpost on the front and back of the format string.
2923 VG_(snprintf)(buf, 256, "%s%s%s", xpre, format, xpost);
2924 va_start(vargs, format);
2925 VG_(vmessage) ( Vg_UserMsg, buf, vargs );
2926 va_end(vargs);
2927 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
2928}
2929
2930static void mc_pp_Error ( Error* err )
2931{
2932 MC_Error* extra = VG_(get_error_extra)(err);
sewardj71bc3cb2005-05-19 00:25:45 +00002933
njn9e63cb62005-05-08 18:34:59 +00002934 switch (VG_(get_error_kind)(err)) {
njn718d3b12006-12-16 00:54:12 +00002935 case Err_CoreMem: {
2936 /* What the hell *is* a CoreMemError? jrs 2005-May-18 */
2937 /* As of 2006-Dec-14, it's caused by unaddressable bytes in a
2938 signal handler frame. --njn */
2939 mc_pp_msg("CoreMemError", err,
2940 "%s contains unaddressable byte(s)",
2941 VG_(get_error_string)(err));
njn9e63cb62005-05-08 18:34:59 +00002942 break;
njn9e63cb62005-05-08 18:34:59 +00002943 }
2944
njn718d3b12006-12-16 00:54:12 +00002945 case Err_Value:
2946 mc_pp_msg("UninitValue", err,
2947 "Use of uninitialised value of size %d",
2948 extra->Err.Value.szB);
2949 break;
2950
2951 case Err_Cond:
2952 mc_pp_msg("UninitCondition", err,
2953 "Conditional jump or move depends"
2954 " on uninitialised value(s)");
2955 break;
2956
2957 case Err_RegParam:
2958 mc_pp_msg("SyscallParam", err,
2959 "Syscall param %s contains uninitialised byte(s)",
2960 VG_(get_error_string)(err));
2961 break;
2962
2963 case Err_MemParam:
2964 mc_pp_msg("SyscallParam", err,
2965 "Syscall param %s points to %s byte(s)",
2966 VG_(get_error_string)(err),
2967 ( extra->Err.MemParam.isAddrErr
2968 ? "unaddressable" : "uninitialised" ));
2969 mc_pp_AddrInfo(VG_(get_error_address)(err),
2970 &extra->Err.MemParam.ai, False);
2971 break;
2972
2973 case Err_User:
2974 mc_pp_msg("ClientCheck", err,
2975 "%s byte(s) found during client check request",
2976 ( extra->Err.User.isAddrErr
2977 ? "Unaddressable" : "Uninitialised" ));
2978 mc_pp_AddrInfo(VG_(get_error_address)(err), &extra->Err.User.ai,
2979 False);
2980 break;
2981
2982 case Err_Free:
2983 mc_pp_msg("InvalidFree", err,
2984 "Invalid free() / delete / delete[]");
2985 mc_pp_AddrInfo(VG_(get_error_address)(err),
2986 &extra->Err.Free.ai, False);
2987 break;
2988
2989 case Err_FreeMismatch:
2990 mc_pp_msg("MismatchedFree", err,
2991 "Mismatched free() / delete / delete []");
2992 mc_pp_AddrInfo(VG_(get_error_address)(err),
2993 &extra->Err.FreeMismatch.ai, False);
2994 break;
2995
2996 case Err_Addr:
2997 if (extra->Err.Addr.isWrite) {
2998 mc_pp_msg("InvalidWrite", err,
2999 "Invalid write of size %d",
3000 extra->Err.Addr.szB);
njn9e63cb62005-05-08 18:34:59 +00003001 } else {
njn718d3b12006-12-16 00:54:12 +00003002 mc_pp_msg("InvalidRead", err,
3003 "Invalid read of size %d",
3004 extra->Err.Addr.szB);
njn9e63cb62005-05-08 18:34:59 +00003005 }
njn718d3b12006-12-16 00:54:12 +00003006 mc_pp_AddrInfo(VG_(get_error_address)(err), &extra->Err.Addr.ai,
3007 extra->Err.Addr.maybe_gcc);
njn9e63cb62005-05-08 18:34:59 +00003008 break;
3009
njn718d3b12006-12-16 00:54:12 +00003010 case Err_Jump:
3011 mc_pp_msg("InvalidJump", err,
3012 "Jump to the invalid address stated on the next line");
3013 mc_pp_AddrInfo(VG_(get_error_address)(err), &extra->Err.Jump.ai,
3014 False);
njn9e63cb62005-05-08 18:34:59 +00003015 break;
njn1d0825f2006-03-27 11:37:07 +00003016
njn718d3b12006-12-16 00:54:12 +00003017 case Err_Overlap:
3018 if (extra->Err.Overlap.szB == 0)
3019 mc_pp_msg("Overlap", err,
3020 "Source and destination overlap in %s(%p, %p)",
3021 VG_(get_error_string)(err),
3022 extra->Err.Overlap.dst, extra->Err.Overlap.src);
njn1d0825f2006-03-27 11:37:07 +00003023 else
njn718d3b12006-12-16 00:54:12 +00003024 mc_pp_msg("Overlap", err,
3025 "Source and destination overlap in %s(%p, %p, %d)",
3026 VG_(get_error_string)(err),
3027 extra->Err.Overlap.dst, extra->Err.Overlap.src,
3028 extra->Err.Overlap.szB);
njn1d0825f2006-03-27 11:37:07 +00003029 break;
njn1d0825f2006-03-27 11:37:07 +00003030
njn718d3b12006-12-16 00:54:12 +00003031 case Err_IllegalMempool:
3032 mc_pp_msg("InvalidMemPool", err,
3033 "Illegal memory pool address");
3034 mc_pp_AddrInfo(VG_(get_error_address)(err),
3035 &extra->Err.IllegalMempool.ai, False);
njn1d0825f2006-03-27 11:37:07 +00003036 break;
3037
njn718d3b12006-12-16 00:54:12 +00003038 case Err_Leak: {
3039 HChar* xpre = VG_(clo_xml) ? " <what>" : "";
3040 HChar* xpost = VG_(clo_xml) ? "</what>" : "";
3041 UInt n_this_record = extra->Err.Leak.n_this_record;
3042 UInt n_total_records = extra->Err.Leak.n_total_records;
3043 LossRecord* l = extra->Err.Leak.lossRecord;
3044
3045 if (VG_(clo_xml)) {
3046 VG_(message)(Vg_UserMsg, " <kind>%t</kind>",
3047 xml_leak_kind(l->loss_mode));
3048 } else {
3049 VG_(message)(Vg_UserMsg, "");
3050 }
3051
3052 if (l->indirect_bytes) {
3053 VG_(message)(Vg_UserMsg,
3054 "%s%,lu (%,lu direct, %,lu indirect) bytes in %,u blocks"
3055 " are %s in loss record %,u of %,u%s",
3056 xpre,
3057 l->total_bytes + l->indirect_bytes,
3058 l->total_bytes, l->indirect_bytes, l->num_blocks,
3059 str_leak_lossmode(l->loss_mode), n_this_record, n_total_records,
3060 xpost
3061 );
3062 if (VG_(clo_xml)) {
3063 // Nb: don't put commas in these XML numbers
3064 VG_(message)(Vg_UserMsg, " <leakedbytes>%lu</leakedbytes>",
3065 l->total_bytes + l->indirect_bytes);
3066 VG_(message)(Vg_UserMsg, " <leakedblocks>%u</leakedblocks>",
3067 l->num_blocks);
3068 }
3069 } else {
3070 VG_(message)(
3071 Vg_UserMsg,
3072 "%s%,lu bytes in %,u blocks are %s in loss record %,u of %,u%s",
3073 xpre,
3074 l->total_bytes, l->num_blocks,
3075 str_leak_lossmode(l->loss_mode), n_this_record, n_total_records,
3076 xpost
3077 );
3078 if (VG_(clo_xml)) {
3079 VG_(message)(Vg_UserMsg, " <leakedbytes>%d</leakedbytes>",
3080 l->total_bytes);
3081 VG_(message)(Vg_UserMsg, " <leakedblocks>%d</leakedblocks>",
3082 l->num_blocks);
3083 }
3084 }
3085 VG_(pp_ExeContext)(l->allocated_at);
3086 break;
3087 }
3088
njn1d0825f2006-03-27 11:37:07 +00003089 default:
3090 VG_(printf)("Error:\n unknown Memcheck error code %d\n",
3091 VG_(get_error_kind)(err));
3092 VG_(tool_panic)("unknown error code in mc_pp_Error)");
njn9e63cb62005-05-08 18:34:59 +00003093 }
3094}
3095
3096/*------------------------------------------------------------*/
3097/*--- Recording errors ---*/
3098/*------------------------------------------------------------*/
3099
njn1d0825f2006-03-27 11:37:07 +00003100/* These many bytes below %ESP are considered addressible if we're
3101 doing the --workaround-gcc296-bugs hack. */
3102#define VG_GCC296_BUG_STACK_SLOP 1024
3103
3104/* Is this address within some small distance below %ESP? Used only
3105 for the --workaround-gcc296-bugs kludge. */
3106static Bool is_just_below_ESP( Addr esp, Addr aa )
3107{
3108 if (esp > aa && (esp - aa) <= VG_GCC296_BUG_STACK_SLOP)
3109 return True;
3110 else
3111 return False;
3112}
3113
njn718d3b12006-12-16 00:54:12 +00003114/* --- Called from generated and non-generated code --- */
njn1d0825f2006-03-27 11:37:07 +00003115
njn718d3b12006-12-16 00:54:12 +00003116static void mc_record_address_error ( ThreadId tid, Addr a, Int szB,
njn1d0825f2006-03-27 11:37:07 +00003117 Bool isWrite )
3118{
njn718d3b12006-12-16 00:54:12 +00003119 MC_Error extra;
sewardj05a46732006-10-17 01:28:10 +00003120 Bool just_below_esp;
3121
3122 if (in_ignored_range(a))
3123 return;
3124
3125# if defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
3126 /* AIX zero-page handling. On AIX, reads from page zero are,
3127 bizarrely enough, legitimate. Writes to page zero aren't,
3128 though. Since memcheck can't distinguish reads from writes, the
3129 best we can do is to 'act normal' and mark the A bits in the
3130 normal way as noaccess, but then hide any reads from that page
3131 that get reported here. */
njn718d3b12006-12-16 00:54:12 +00003132 if ((!isWrite) && a >= 0 && a < 4096 && a+szB <= 4096)
sewardj05a46732006-10-17 01:28:10 +00003133 return;
3134
3135 /* Appalling AIX hack. It suppresses reads done by glink
3136 fragments. Getting rid of this would require figuring out
3137 somehow where the referenced data areas are (and their
3138 sizes). */
njn718d3b12006-12-16 00:54:12 +00003139 if ((!isWrite) && szB == sizeof(Word)) {
sewardj05a46732006-10-17 01:28:10 +00003140 UInt i1, i2;
3141 UInt* pc = (UInt*)VG_(get_IP)(tid);
3142 if (sizeof(Word) == 4) {
3143 i1 = 0x800c0000; /* lwz r0,0(r12) */
3144 i2 = 0x804c0004; /* lwz r2,4(r12) */
3145 } else {
3146 i1 = 0xe80c0000; /* ld r0,0(r12) */
3147 i2 = 0xe84c0008; /* ld r2,8(r12) */
3148 }
3149 if (pc[0] == i1 && pc[1] == i2) return;
3150 if (pc[0] == i2 && pc[-1] == i1) return;
3151 }
3152# endif
njn1d0825f2006-03-27 11:37:07 +00003153
3154 just_below_esp = is_just_below_ESP( VG_(get_SP)(tid), a );
3155
3156 /* If this is caused by an access immediately below %ESP, and the
3157 user asks nicely, we just ignore it. */
3158 if (MC_(clo_workaround_gcc296_bugs) && just_below_esp)
3159 return;
3160
njn718d3b12006-12-16 00:54:12 +00003161 extra.Err.Addr.isWrite = isWrite;
3162 extra.Err.Addr.szB = szB;
3163 extra.Err.Addr.maybe_gcc = just_below_esp;
3164 extra.Err.Addr.ai.tag = Addr_Undescribed;
3165 VG_(maybe_record_error)( tid, Err_Addr, a, /*s*/NULL, &extra );
njn1d0825f2006-03-27 11:37:07 +00003166}
3167
njn718d3b12006-12-16 00:54:12 +00003168static void mc_record_value_error ( ThreadId tid, Int szB )
3169{
3170 MC_Error extra;
3171 tl_assert(MC_(clo_undef_value_errors));
3172 extra.Err.Value.szB = szB;
3173 VG_(maybe_record_error)( tid, Err_Value, /*addr*/0, /*s*/NULL, &extra );
3174}
3175
3176static void mc_record_cond_error ( ThreadId tid )
3177{
3178 tl_assert(MC_(clo_undef_value_errors));
3179 VG_(maybe_record_error)( tid, Err_Cond, /*addr*/0, /*s*/NULL, /*extra*/NULL);
3180}
3181
3182/* --- Called from non-generated code --- */
njn1d0825f2006-03-27 11:37:07 +00003183
3184/* This is for memory errors in pthread functions, as opposed to pthread API
3185 errors which are found by the core. */
njn718d3b12006-12-16 00:54:12 +00003186static void mc_record_core_mem_error ( ThreadId tid, Bool isAddrErr, Char* msg )
njn1d0825f2006-03-27 11:37:07 +00003187{
njn718d3b12006-12-16 00:54:12 +00003188 VG_(maybe_record_error)( tid, Err_CoreMem, /*addr*/0, msg, /*extra*/NULL );
njn1d0825f2006-03-27 11:37:07 +00003189}
3190
njn718d3b12006-12-16 00:54:12 +00003191static void mc_record_regparam_error ( ThreadId tid, Char* msg )
njn1d0825f2006-03-27 11:37:07 +00003192{
njn1d0825f2006-03-27 11:37:07 +00003193 tl_assert(VG_INVALID_THREADID != tid);
njn718d3b12006-12-16 00:54:12 +00003194 VG_(maybe_record_error)( tid, Err_RegParam, /*addr*/0, msg, /*extra*/NULL );
3195}
3196
3197static void mc_record_memparam_error ( ThreadId tid, Addr a,
3198 Bool isAddrErr, Char* msg )
3199{
3200 MC_Error extra;
3201 tl_assert(VG_INVALID_THREADID != tid);
3202 if (!isAddrErr)
3203 tl_assert(MC_(clo_undef_value_errors));
3204 extra.Err.MemParam.isAddrErr = isAddrErr;
3205 extra.Err.MemParam.ai.tag = Addr_Undescribed;
3206 VG_(maybe_record_error)( tid, Err_MemParam, a, msg, &extra );
njn1d0825f2006-03-27 11:37:07 +00003207}
3208
3209static void mc_record_jump_error ( ThreadId tid, Addr a )
3210{
njn718d3b12006-12-16 00:54:12 +00003211 MC_Error extra;
njn1d0825f2006-03-27 11:37:07 +00003212 tl_assert(VG_INVALID_THREADID != tid);
njn718d3b12006-12-16 00:54:12 +00003213 extra.Err.Jump.ai.tag = Addr_Undescribed;
3214 VG_(maybe_record_error)( tid, Err_Jump, a, /*s*/NULL, &extra );
njn1d0825f2006-03-27 11:37:07 +00003215}
3216
3217void MC_(record_free_error) ( ThreadId tid, Addr a )
3218{
njn718d3b12006-12-16 00:54:12 +00003219 MC_Error extra;
njn1d0825f2006-03-27 11:37:07 +00003220 tl_assert(VG_INVALID_THREADID != tid);
njn718d3b12006-12-16 00:54:12 +00003221 extra.Err.Free.ai.tag = Addr_Undescribed;
3222 VG_(maybe_record_error)( tid, Err_Free, a, /*s*/NULL, &extra );
3223}
3224
3225void MC_(record_freemismatch_error) ( ThreadId tid, MC_Chunk* mc )
3226{
3227 MC_Error extra;
3228 AddrInfo* ai = &extra.Err.FreeMismatch.ai;
3229 tl_assert(VG_INVALID_THREADID != tid);
3230 ai->tag = Addr_Block;
3231 ai->Addr.Block.block_kind = Block_Mallocd; // Nb: Not 'Block_Freed'
3232 ai->Addr.Block.block_desc = "block";
3233 ai->Addr.Block.block_szB = mc->szB;
3234 ai->Addr.Block.rwoffset = 0;
3235 ai->Addr.Block.lastchange = mc->where;
3236 VG_(maybe_record_error)( tid, Err_FreeMismatch, mc->data, /*s*/NULL,
3237 &extra );
njn1d0825f2006-03-27 11:37:07 +00003238}
3239
3240void MC_(record_illegal_mempool_error) ( ThreadId tid, Addr a )
3241{
njn718d3b12006-12-16 00:54:12 +00003242 MC_Error extra;
njn1d0825f2006-03-27 11:37:07 +00003243 tl_assert(VG_INVALID_THREADID != tid);
njn718d3b12006-12-16 00:54:12 +00003244 extra.Err.IllegalMempool.ai.tag = Addr_Undescribed;
3245 VG_(maybe_record_error)( tid, Err_IllegalMempool, a, /*s*/NULL, &extra );
njn1d0825f2006-03-27 11:37:07 +00003246}
3247
njn718d3b12006-12-16 00:54:12 +00003248static void mc_record_overlap_error ( ThreadId tid, Char* function,
3249 Addr src, Addr dst, SizeT szB )
njn1d0825f2006-03-27 11:37:07 +00003250{
njn718d3b12006-12-16 00:54:12 +00003251 MC_Error extra;
njn1d0825f2006-03-27 11:37:07 +00003252 tl_assert(VG_INVALID_THREADID != tid);
njn718d3b12006-12-16 00:54:12 +00003253 extra.Err.Overlap.src = src;
3254 extra.Err.Overlap.dst = dst;
3255 extra.Err.Overlap.szB = szB;
njn1d0825f2006-03-27 11:37:07 +00003256 VG_(maybe_record_error)(
njn718d3b12006-12-16 00:54:12 +00003257 tid, Err_Overlap, /*addr*/0, /*s*/function, &extra );
njn1d0825f2006-03-27 11:37:07 +00003258}
3259
njn718d3b12006-12-16 00:54:12 +00003260Bool MC_(record_leak_error) ( ThreadId tid, UInt n_this_record,
3261 UInt n_total_records, LossRecord* lossRecord,
3262 Bool print_record )
njn1d0825f2006-03-27 11:37:07 +00003263{
njn718d3b12006-12-16 00:54:12 +00003264 MC_Error extra;
3265 extra.Err.Leak.n_this_record = n_this_record;
3266 extra.Err.Leak.n_total_records = n_total_records;
3267 extra.Err.Leak.lossRecord = lossRecord;
njn1d0825f2006-03-27 11:37:07 +00003268 return
njn718d3b12006-12-16 00:54:12 +00003269 VG_(unique_error) ( tid, Err_Leak, /*Addr*/0, /*s*/NULL, &extra,
3270 lossRecord->allocated_at, print_record,
njn1d0825f2006-03-27 11:37:07 +00003271 /*allow_GDB_attach*/False, /*count_error*/False );
3272}
3273
njn718d3b12006-12-16 00:54:12 +00003274static void mc_record_user_error ( ThreadId tid, Addr a, Bool isAddrErr )
njn9e63cb62005-05-08 18:34:59 +00003275{
njn718d3b12006-12-16 00:54:12 +00003276 MC_Error extra;
njn9e63cb62005-05-08 18:34:59 +00003277
3278 tl_assert(VG_INVALID_THREADID != tid);
njn718d3b12006-12-16 00:54:12 +00003279 extra.Err.User.isAddrErr = isAddrErr;
3280 extra.Err.User.ai.tag = Addr_Undescribed;
3281 VG_(maybe_record_error)( tid, Err_User, a, /*s*/NULL, &extra );
njn9e63cb62005-05-08 18:34:59 +00003282}
3283
njn718d3b12006-12-16 00:54:12 +00003284/*------------------------------------------------------------*/
3285/*--- Other error operations ---*/
3286/*------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00003287
3288/* Compare error contexts, to detect duplicates. Note that if they
3289 are otherwise the same, the faulting addrs and associated rwoffsets
3290 are allowed to be different. */
3291static Bool mc_eq_Error ( VgRes res, Error* e1, Error* e2 )
3292{
njn718d3b12006-12-16 00:54:12 +00003293 MC_Error* extra1 = VG_(get_error_extra)(e1);
3294 MC_Error* extra2 = VG_(get_error_extra)(e2);
njn1d0825f2006-03-27 11:37:07 +00003295
3296 /* Guaranteed by calling function */
3297 tl_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
3298
3299 switch (VG_(get_error_kind)(e1)) {
njn718d3b12006-12-16 00:54:12 +00003300 case Err_CoreMem: {
njn1d0825f2006-03-27 11:37:07 +00003301 Char *e1s, *e2s;
njn1d0825f2006-03-27 11:37:07 +00003302 e1s = VG_(get_error_string)(e1);
3303 e2s = VG_(get_error_string)(e2);
njn718d3b12006-12-16 00:54:12 +00003304 if (e1s == e2s) return True;
3305 if (VG_STREQ(e1s, e2s)) return True;
njn1d0825f2006-03-27 11:37:07 +00003306 return False;
3307 }
3308
njn718d3b12006-12-16 00:54:12 +00003309 case Err_RegParam:
3310 return VG_STREQ(VG_(get_error_string)(e1), VG_(get_error_string)(e2));
3311
njn1d0825f2006-03-27 11:37:07 +00003312 // Perhaps we should also check the addrinfo.akinds for equality.
3313 // That would result in more error reports, but only in cases where
3314 // a register contains uninitialised bytes and points to memory
3315 // containing uninitialised bytes. Currently, the 2nd of those to be
3316 // detected won't be reported. That is (nearly?) always the memory
3317 // error, which is good.
njn718d3b12006-12-16 00:54:12 +00003318 case Err_MemParam:
3319 if (!VG_STREQ(VG_(get_error_string)(e1),
3320 VG_(get_error_string)(e2))) return False;
njn1d0825f2006-03-27 11:37:07 +00003321 // fall through
njn718d3b12006-12-16 00:54:12 +00003322 case Err_User:
3323 return ( extra1->Err.User.isAddrErr == extra2->Err.User.isAddrErr
3324 ? True : False );
3325
3326 case Err_Free:
3327 case Err_FreeMismatch:
3328 case Err_Jump:
3329 case Err_IllegalMempool:
3330 case Err_Overlap:
3331 case Err_Cond:
njn1d0825f2006-03-27 11:37:07 +00003332 return True;
3333
njn718d3b12006-12-16 00:54:12 +00003334 case Err_Addr:
3335 return ( extra1->Err.Addr.szB == extra2->Err.Addr.szB
3336 ? True : False );
njn1d0825f2006-03-27 11:37:07 +00003337
njn718d3b12006-12-16 00:54:12 +00003338 case Err_Value:
3339 return ( extra1->Err.Value.szB == extra2->Err.Value.szB
3340 ? True : False );
njn1d0825f2006-03-27 11:37:07 +00003341
njn718d3b12006-12-16 00:54:12 +00003342 case Err_Leak:
3343 VG_(tool_panic)("Shouldn't get Err_Leak in mc_eq_Error,\n"
njn1d0825f2006-03-27 11:37:07 +00003344 "since it's handled with VG_(unique_error)()!");
3345
njn1d0825f2006-03-27 11:37:07 +00003346 default:
3347 VG_(printf)("Error:\n unknown error code %d\n",
3348 VG_(get_error_kind)(e1));
3349 VG_(tool_panic)("unknown error code in mc_eq_Error");
3350 }
3351}
3352
3353/* Function used when searching MC_Chunk lists */
3354static Bool addr_is_in_MC_Chunk(MC_Chunk* mc, Addr a)
3355{
3356 // Nb: this is not quite right! It assumes that the heap block has
3357 // a redzone of size MC_MALLOC_REDZONE_SZB. That's true for malloc'd
3358 // blocks, but not necessarily true for custom-alloc'd blocks. So
3359 // in some cases this could result in an incorrect description (eg.
3360 // saying "12 bytes after block A" when really it's within block B.
3361 // Fixing would require adding redzone size to MC_Chunks, though.
njn718d3b12006-12-16 00:54:12 +00003362 return VG_(addr_is_in_block)( a, mc->data, mc->szB,
njn1d0825f2006-03-27 11:37:07 +00003363 MC_MALLOC_REDZONE_SZB );
3364}
3365
3366// Forward declaration
3367static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai );
3368
njn718d3b12006-12-16 00:54:12 +00003369
njn1d0825f2006-03-27 11:37:07 +00003370/* Describe an address as best you can, for error messages,
3371 putting the result in ai. */
sewardjb8b79ad2008-03-03 01:35:41 +00003372static void describe_addr ( Addr a, /*OUT*/AddrInfo* ai )
njn1d0825f2006-03-27 11:37:07 +00003373{
sewardjb8b79ad2008-03-03 01:35:41 +00003374 MC_Chunk* mc;
3375 ThreadId tid;
3376 Addr stack_min, stack_max;
3377 VgSectKind sect;
njn718d3b12006-12-16 00:54:12 +00003378
3379 tl_assert(Addr_Undescribed == ai->tag);
njn1d0825f2006-03-27 11:37:07 +00003380
3381 /* Perhaps it's a user-def'd block? */
sewardjb8b79ad2008-03-03 01:35:41 +00003382 if (client_perm_maybe_describe( a, ai )) {
njn1d0825f2006-03-27 11:37:07 +00003383 return;
njn1d0825f2006-03-27 11:37:07 +00003384 }
3385 /* Search for a recently freed block which might bracket it. */
3386 mc = MC_(get_freed_list_head)();
3387 while (mc) {
3388 if (addr_is_in_MC_Chunk(mc, a)) {
njn718d3b12006-12-16 00:54:12 +00003389 ai->tag = Addr_Block;
3390 ai->Addr.Block.block_kind = Block_Freed;
3391 ai->Addr.Block.block_desc = "block";
3392 ai->Addr.Block.block_szB = mc->szB;
3393 ai->Addr.Block.rwoffset = (Int)a - (Int)mc->data;
3394 ai->Addr.Block.lastchange = mc->where;
njn1d0825f2006-03-27 11:37:07 +00003395 return;
3396 }
3397 mc = mc->next;
3398 }
3399 /* Search for a currently malloc'd block which might bracket it. */
3400 VG_(HT_ResetIter)(MC_(malloc_list));
3401 while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
3402 if (addr_is_in_MC_Chunk(mc, a)) {
njn718d3b12006-12-16 00:54:12 +00003403 ai->tag = Addr_Block;
3404 ai->Addr.Block.block_kind = Block_Mallocd;
3405 ai->Addr.Block.block_desc = "block";
3406 ai->Addr.Block.block_szB = mc->szB;
3407 ai->Addr.Block.rwoffset = (Int)a - (Int)mc->data;
3408 ai->Addr.Block.lastchange = mc->where;
njn1d0825f2006-03-27 11:37:07 +00003409 return;
3410 }
3411 }
sewardjb8b79ad2008-03-03 01:35:41 +00003412 /* Perhaps the variable type/location data describes it? */
3413 tl_assert(sizeof(ai->Addr.Variable.descr1)
3414 == sizeof(ai->Addr.Variable.descr2));
3415 VG_(memset)( &ai->Addr.Variable.descr1,
3416 0, sizeof(ai->Addr.Variable.descr1));
3417 VG_(memset)( &ai->Addr.Variable.descr2,
3418 0, sizeof(ai->Addr.Variable.descr2));
3419 if (VG_(get_data_description)(
3420 &ai->Addr.Variable.descr1[0],
3421 &ai->Addr.Variable.descr2[0],
3422 sizeof(ai->Addr.Variable.descr1)-1,
3423 a )) {
3424 ai->tag = Addr_Variable;
3425 tl_assert( ai->Addr.Variable.descr1
3426 [ sizeof(ai->Addr.Variable.descr1)-1 ] == 0);
3427 tl_assert( ai->Addr.Variable.descr2
3428 [ sizeof(ai->Addr.Variable.descr2)-1 ] == 0);
3429 return;
3430 }
3431 /* Have a look at the low level data symbols - perhaps it's in
3432 there. */
3433 VG_(memset)( &ai->Addr.DataSym.name,
3434 0, sizeof(ai->Addr.DataSym.name));
3435 if (VG_(get_datasym_and_offset)(
3436 a, &ai->Addr.DataSym.name[0],
3437 sizeof(ai->Addr.DataSym.name)-1,
3438 &ai->Addr.DataSym.offset )) {
3439 ai->tag = Addr_DataSym;
3440 tl_assert( ai->Addr.DataSym.name
3441 [ sizeof(ai->Addr.DataSym.name)-1 ] == 0);
3442 return;
3443 }
3444 /* Perhaps it's on a thread's stack? */
3445 VG_(thread_stack_reset_iter)(&tid);
3446 while ( VG_(thread_stack_next)(&tid, &stack_min, &stack_max) ) {
3447 if (stack_min - VG_STACK_REDZONE_SZB <= a && a <= stack_max) {
3448 ai->tag = Addr_Stack;
3449 ai->Addr.Stack.tid = tid;
3450 return;
3451 }
3452 }
3453 /* last ditch attempt at classification */
3454 tl_assert( sizeof(ai->Addr.SectKind.objname) > 4 );
3455 VG_(memset)( &ai->Addr.SectKind.objname,
3456 0, sizeof(ai->Addr.SectKind.objname));
3457 VG_(strcpy)( ai->Addr.SectKind.objname, "???" );
3458 sect = VG_(seginfo_sect_kind)( &ai->Addr.SectKind.objname[0],
3459 sizeof(ai->Addr.SectKind.objname)-1, a);
3460 if (sect != Vg_SectUnknown) {
3461 ai->tag = Addr_SectKind;
3462 ai->Addr.SectKind.kind = sect;
3463 tl_assert( ai->Addr.SectKind.objname
3464 [ sizeof(ai->Addr.SectKind.objname)-1 ] == 0);
3465 return;
3466 }
njn1d0825f2006-03-27 11:37:07 +00003467 /* Clueless ... */
njn718d3b12006-12-16 00:54:12 +00003468 ai->tag = Addr_Unknown;
njn1d0825f2006-03-27 11:37:07 +00003469 return;
3470}
3471
3472/* Updates the copy with address info if necessary (but not for all errors). */
3473static UInt mc_update_extra( Error* err )
3474{
njn718d3b12006-12-16 00:54:12 +00003475 MC_Error* extra = VG_(get_error_extra)(err);
3476
njn1d0825f2006-03-27 11:37:07 +00003477 switch (VG_(get_error_kind)(err)) {
njn718d3b12006-12-16 00:54:12 +00003478 // These ones don't have addresses associated with them, and so don't
njn1d0825f2006-03-27 11:37:07 +00003479 // need any updating.
njn718d3b12006-12-16 00:54:12 +00003480 case Err_CoreMem:
3481 case Err_Value:
3482 case Err_Cond:
3483 case Err_Overlap:
3484 case Err_RegParam:
3485 // For Err_Leaks the returned size does not matter -- they are always
sewardjb8b79ad2008-03-03 01:35:41 +00003486 // shown with VG_(unique_error)() so they 'extra' not copied. But
3487 // we make it consistent with the others.
njn718d3b12006-12-16 00:54:12 +00003488 case Err_Leak:
njn1d0825f2006-03-27 11:37:07 +00003489 return sizeof(MC_Error);
njn1d0825f2006-03-27 11:37:07 +00003490
njn718d3b12006-12-16 00:54:12 +00003491 // These ones always involve a memory address.
3492 case Err_Addr:
sewardjb8b79ad2008-03-03 01:35:41 +00003493 describe_addr ( VG_(get_error_address)(err),
3494 &extra->Err.Addr.ai );
njn1d0825f2006-03-27 11:37:07 +00003495 return sizeof(MC_Error);
njn718d3b12006-12-16 00:54:12 +00003496 case Err_MemParam:
sewardjb8b79ad2008-03-03 01:35:41 +00003497 describe_addr ( VG_(get_error_address)(err),
3498 &extra->Err.MemParam.ai );
njn1d0825f2006-03-27 11:37:07 +00003499 return sizeof(MC_Error);
njn718d3b12006-12-16 00:54:12 +00003500 case Err_Jump:
sewardjb8b79ad2008-03-03 01:35:41 +00003501 describe_addr ( VG_(get_error_address)(err),
3502 &extra->Err.Jump.ai );
njn718d3b12006-12-16 00:54:12 +00003503 return sizeof(MC_Error);
3504 case Err_User:
sewardjb8b79ad2008-03-03 01:35:41 +00003505 describe_addr ( VG_(get_error_address)(err),
3506 &extra->Err.User.ai );
njn718d3b12006-12-16 00:54:12 +00003507 return sizeof(MC_Error);
3508 case Err_Free:
sewardjb8b79ad2008-03-03 01:35:41 +00003509 describe_addr ( VG_(get_error_address)(err),
3510 &extra->Err.Free.ai );
njn718d3b12006-12-16 00:54:12 +00003511 return sizeof(MC_Error);
3512 case Err_IllegalMempool:
3513 describe_addr ( VG_(get_error_address)(err),
3514 &extra->Err.IllegalMempool.ai );
3515 return sizeof(MC_Error);
njn1d0825f2006-03-27 11:37:07 +00003516
njn718d3b12006-12-16 00:54:12 +00003517 // Err_FreeMismatches have already had their address described; this is
njn1d0825f2006-03-27 11:37:07 +00003518 // possible because we have the MC_Chunk on hand when the error is
3519 // detected. However, the address may be part of a user block, and if so
3520 // we override the pre-determined description with a user block one.
njn718d3b12006-12-16 00:54:12 +00003521 case Err_FreeMismatch: {
3522 tl_assert(extra && Block_Mallocd ==
3523 extra->Err.FreeMismatch.ai.Addr.Block.block_kind);
njn1d0825f2006-03-27 11:37:07 +00003524 (void)client_perm_maybe_describe( VG_(get_error_address)(err),
njn718d3b12006-12-16 00:54:12 +00003525 &extra->Err.FreeMismatch.ai );
njn1d0825f2006-03-27 11:37:07 +00003526 return sizeof(MC_Error);
3527 }
3528
njn1d0825f2006-03-27 11:37:07 +00003529 default: VG_(tool_panic)("mc_update_extra: bad errkind");
3530 }
3531}
3532
njn9e63cb62005-05-08 18:34:59 +00003533/*------------------------------------------------------------*/
3534/*--- Suppressions ---*/
3535/*------------------------------------------------------------*/
3536
njn718d3b12006-12-16 00:54:12 +00003537typedef
3538 enum {
3539 ParamSupp, // Bad syscall params
3540 UserSupp, // Errors arising from client-request checks
3541 CoreMemSupp, // Memory errors in core (pthread ops, signal handling)
3542
3543 // Undefined value errors of given size
3544 Value1Supp, Value2Supp, Value4Supp, Value8Supp, Value16Supp,
3545
3546 // Undefined value error in conditional.
3547 CondSupp,
3548
3549 // Unaddressable read/write attempt at given size
3550 Addr1Supp, Addr2Supp, Addr4Supp, Addr8Supp, Addr16Supp,
3551
3552 JumpSupp, // Jump to unaddressable target
3553 FreeSupp, // Invalid or mismatching free
3554 OverlapSupp, // Overlapping blocks in memcpy(), strcpy(), etc
3555 LeakSupp, // Something to be suppressed in a leak check.
3556 MempoolSupp, // Memory pool suppression.
3557 }
3558 MC_SuppKind;
3559
njn51d827b2005-05-09 01:02:08 +00003560static Bool mc_recognised_suppression ( Char* name, Supp* su )
njn9e63cb62005-05-08 18:34:59 +00003561{
3562 SuppKind skind;
3563
njn1d0825f2006-03-27 11:37:07 +00003564 if (VG_STREQ(name, "Param")) skind = ParamSupp;
sewardj6362bb52006-11-28 00:15:35 +00003565 else if (VG_STREQ(name, "User")) skind = UserSupp;
njn1d0825f2006-03-27 11:37:07 +00003566 else if (VG_STREQ(name, "CoreMem")) skind = CoreMemSupp;
3567 else if (VG_STREQ(name, "Addr1")) skind = Addr1Supp;
3568 else if (VG_STREQ(name, "Addr2")) skind = Addr2Supp;
3569 else if (VG_STREQ(name, "Addr4")) skind = Addr4Supp;
3570 else if (VG_STREQ(name, "Addr8")) skind = Addr8Supp;
3571 else if (VG_STREQ(name, "Addr16")) skind = Addr16Supp;
njn718d3b12006-12-16 00:54:12 +00003572 else if (VG_STREQ(name, "Jump")) skind = JumpSupp;
njn1d0825f2006-03-27 11:37:07 +00003573 else if (VG_STREQ(name, "Free")) skind = FreeSupp;
3574 else if (VG_STREQ(name, "Leak")) skind = LeakSupp;
3575 else if (VG_STREQ(name, "Overlap")) skind = OverlapSupp;
3576 else if (VG_STREQ(name, "Mempool")) skind = MempoolSupp;
njn718d3b12006-12-16 00:54:12 +00003577 else if (VG_STREQ(name, "Cond")) skind = CondSupp;
3578 else if (VG_STREQ(name, "Value0")) skind = CondSupp; /* backwards compat */
njn9e63cb62005-05-08 18:34:59 +00003579 else if (VG_STREQ(name, "Value1")) skind = Value1Supp;
3580 else if (VG_STREQ(name, "Value2")) skind = Value2Supp;
3581 else if (VG_STREQ(name, "Value4")) skind = Value4Supp;
3582 else if (VG_STREQ(name, "Value8")) skind = Value8Supp;
3583 else if (VG_STREQ(name, "Value16")) skind = Value16Supp;
3584 else
3585 return False;
3586
3587 VG_(set_supp_kind)(su, skind);
3588 return True;
3589}
3590
njn1d0825f2006-03-27 11:37:07 +00003591static
3592Bool mc_read_extra_suppression_info ( Int fd, Char* buf, Int nBuf, Supp *su )
3593{
3594 Bool eof;
3595
3596 if (VG_(get_supp_kind)(su) == ParamSupp) {
3597 eof = VG_(get_line) ( fd, buf, nBuf );
3598 if (eof) return False;
3599 VG_(set_supp_string)(su, VG_(strdup)(buf));
3600 }
3601 return True;
3602}
3603
3604static Bool mc_error_matches_suppression(Error* err, Supp* su)
3605{
njn718d3b12006-12-16 00:54:12 +00003606 Int su_szB;
3607 MC_Error* extra = VG_(get_error_extra)(err);
3608 ErrorKind ekind = VG_(get_error_kind )(err);
njn1d0825f2006-03-27 11:37:07 +00003609
3610 switch (VG_(get_supp_kind)(su)) {
3611 case ParamSupp:
njn718d3b12006-12-16 00:54:12 +00003612 return ((ekind == Err_RegParam || ekind == Err_MemParam)
njn1d0825f2006-03-27 11:37:07 +00003613 && VG_STREQ(VG_(get_error_string)(err),
3614 VG_(get_supp_string)(su)));
3615
sewardj6362bb52006-11-28 00:15:35 +00003616 case UserSupp:
njn718d3b12006-12-16 00:54:12 +00003617 return (ekind == Err_User);
sewardj6362bb52006-11-28 00:15:35 +00003618
njn1d0825f2006-03-27 11:37:07 +00003619 case CoreMemSupp:
njn718d3b12006-12-16 00:54:12 +00003620 return (ekind == Err_CoreMem
njn1d0825f2006-03-27 11:37:07 +00003621 && VG_STREQ(VG_(get_error_string)(err),
3622 VG_(get_supp_string)(su)));
3623
njn718d3b12006-12-16 00:54:12 +00003624 case Value1Supp: su_szB = 1; goto value_case;
3625 case Value2Supp: su_szB = 2; goto value_case;
3626 case Value4Supp: su_szB = 4; goto value_case;
3627 case Value8Supp: su_szB = 8; goto value_case;
3628 case Value16Supp:su_szB =16; goto value_case;
njn1d0825f2006-03-27 11:37:07 +00003629 value_case:
njn718d3b12006-12-16 00:54:12 +00003630 return (ekind == Err_Value && extra->Err.Value.szB == su_szB);
njn1d0825f2006-03-27 11:37:07 +00003631
njn718d3b12006-12-16 00:54:12 +00003632 case CondSupp:
3633 return (ekind == Err_Cond);
3634
3635 case Addr1Supp: su_szB = 1; goto addr_case;
3636 case Addr2Supp: su_szB = 2; goto addr_case;
3637 case Addr4Supp: su_szB = 4; goto addr_case;
3638 case Addr8Supp: su_szB = 8; goto addr_case;
3639 case Addr16Supp:su_szB =16; goto addr_case;
njn1d0825f2006-03-27 11:37:07 +00003640 addr_case:
njn718d3b12006-12-16 00:54:12 +00003641 return (ekind == Err_Addr && extra->Err.Addr.szB == su_szB);
3642
3643 case JumpSupp:
3644 return (ekind == Err_Jump);
njn1d0825f2006-03-27 11:37:07 +00003645
3646 case FreeSupp:
njn718d3b12006-12-16 00:54:12 +00003647 return (ekind == Err_Free || ekind == Err_FreeMismatch);
njn1d0825f2006-03-27 11:37:07 +00003648
3649 case OverlapSupp:
njn718d3b12006-12-16 00:54:12 +00003650 return (ekind == Err_Overlap);
njn1d0825f2006-03-27 11:37:07 +00003651
3652 case LeakSupp:
njn718d3b12006-12-16 00:54:12 +00003653 return (ekind == Err_Leak);
njn1d0825f2006-03-27 11:37:07 +00003654
3655 case MempoolSupp:
njn718d3b12006-12-16 00:54:12 +00003656 return (ekind == Err_IllegalMempool);
njn1d0825f2006-03-27 11:37:07 +00003657
3658 default:
3659 VG_(printf)("Error:\n"
3660 " unknown suppression type %d\n",
3661 VG_(get_supp_kind)(su));
3662 VG_(tool_panic)("unknown suppression type in "
3663 "MC_(error_matches_suppression)");
3664 }
3665}
3666
3667static Char* mc_get_error_name ( Error* err )
3668{
njn1d0825f2006-03-27 11:37:07 +00003669 switch (VG_(get_error_kind)(err)) {
njn718d3b12006-12-16 00:54:12 +00003670 case Err_RegParam: return "Param";
3671 case Err_MemParam: return "Param";
3672 case Err_User: return "User";
3673 case Err_FreeMismatch: return "Free";
3674 case Err_IllegalMempool: return "Mempool";
3675 case Err_Free: return "Free";
3676 case Err_Jump: return "Jump";
3677 case Err_CoreMem: return "CoreMem";
3678 case Err_Overlap: return "Overlap";
3679 case Err_Leak: return "Leak";
3680 case Err_Cond: return "Cond";
3681 case Err_Addr: {
3682 MC_Error* extra = VG_(get_error_extra)(err);
3683 switch ( extra->Err.Addr.szB ) {
njn1d0825f2006-03-27 11:37:07 +00003684 case 1: return "Addr1";
3685 case 2: return "Addr2";
3686 case 4: return "Addr4";
3687 case 8: return "Addr8";
3688 case 16: return "Addr16";
3689 default: VG_(tool_panic)("unexpected size for Addr");
3690 }
njn718d3b12006-12-16 00:54:12 +00003691 }
3692 case Err_Value: {
3693 MC_Error* extra = VG_(get_error_extra)(err);
3694 switch ( extra->Err.Value.szB ) {
njn1d0825f2006-03-27 11:37:07 +00003695 case 1: return "Value1";
3696 case 2: return "Value2";
3697 case 4: return "Value4";
3698 case 8: return "Value8";
3699 case 16: return "Value16";
3700 default: VG_(tool_panic)("unexpected size for Value");
3701 }
njn718d3b12006-12-16 00:54:12 +00003702 }
njn1d0825f2006-03-27 11:37:07 +00003703 default: VG_(tool_panic)("get_error_name: unexpected type");
3704 }
njn1d0825f2006-03-27 11:37:07 +00003705}
3706
3707static void mc_print_extra_suppression_info ( Error* err )
3708{
njn718d3b12006-12-16 00:54:12 +00003709 ErrorKind ekind = VG_(get_error_kind )(err);
3710 if (Err_RegParam == ekind || Err_MemParam == ekind) {
njn1d0825f2006-03-27 11:37:07 +00003711 VG_(printf)(" %s\n", VG_(get_error_string)(err));
3712 }
3713}
3714
njn9e63cb62005-05-08 18:34:59 +00003715/*------------------------------------------------------------*/
sewardjc859fbf2005-04-22 21:10:28 +00003716/*--- Functions called directly from generated code: ---*/
3717/*--- Load/store handlers. ---*/
sewardj6cf40ff2005-04-20 22:31:26 +00003718/*------------------------------------------------------------*/
3719
njn1d0825f2006-03-27 11:37:07 +00003720/* Types: LOADV32, LOADV16, LOADV8 are:
sewardj6cf40ff2005-04-20 22:31:26 +00003721 UWord fn ( Addr a )
3722 so they return 32-bits on 32-bit machines and 64-bits on
3723 64-bit machines. Addr has the same size as a host word.
3724
njn1d0825f2006-03-27 11:37:07 +00003725 LOADV64 is always ULong fn ( Addr a )
sewardj6cf40ff2005-04-20 22:31:26 +00003726
njn1d0825f2006-03-27 11:37:07 +00003727 Similarly for STOREV8, STOREV16, STOREV32, the supplied vbits
3728 are a UWord, and for STOREV64 they are a ULong.
sewardj6cf40ff2005-04-20 22:31:26 +00003729*/
3730
njn1d0825f2006-03-27 11:37:07 +00003731/* If any part of '_a' indicated by the mask is 1, either
njn45e81252006-03-28 12:35:08 +00003732 '_a' is not naturally '_sz/8'-aligned, or it exceeds the range
njn1d0825f2006-03-27 11:37:07 +00003733 covered by the primary map. */
njn45e81252006-03-28 12:35:08 +00003734#define UNALIGNED_OR_HIGH(_a,_sz) ((_a) & MASK((_sz>>3)))
njn1d0825f2006-03-27 11:37:07 +00003735#define MASK(_sz) ( ~((0x10000-(_sz)) | ((N_PRIMARY_MAP-1) << 16)) )
3736
3737
sewardj95448072004-11-22 20:19:51 +00003738/* ------------------------ Size = 8 ------------------------ */
3739
njn1d0825f2006-03-27 11:37:07 +00003740static INLINE
3741ULong mc_LOADV64 ( Addr a, Bool isBigEndian )
3742{
3743 UWord sm_off16, vabits16;
3744 SecMap* sm;
3745
3746 PROF_EVENT(200, "mc_LOADV64");
3747
3748#ifndef PERF_FAST_LOADV
njn45e81252006-03-28 12:35:08 +00003749 return mc_LOADVn_slow( a, 64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003750#else
bart5dd8e6a2008-03-22 08:04:29 +00003751 if (UNLIKELY( UNALIGNED_OR_HIGH(a,64) )) {
njn1d0825f2006-03-27 11:37:07 +00003752 PROF_EVENT(201, "mc_LOADV64-slow1");
njn45e81252006-03-28 12:35:08 +00003753 return (ULong)mc_LOADVn_slow( a, 64, isBigEndian );
sewardjf9d81612005-04-23 23:25:49 +00003754 }
3755
njna7c7ebd2006-03-28 12:51:02 +00003756 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003757 sm_off16 = SM_OFF_16(a);
3758 vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
3759
3760 // Handle common case quickly: a is suitably aligned, is mapped, and
3761 // addressible.
3762 // Convert V bits from compact memory form to expanded register form.
bart5dd8e6a2008-03-22 08:04:29 +00003763 if (LIKELY(vabits16 == VA_BITS16_DEFINED)) {
njn1d0825f2006-03-27 11:37:07 +00003764 return V_BITS64_DEFINED;
bart5dd8e6a2008-03-22 08:04:29 +00003765 } else if (LIKELY(vabits16 == VA_BITS16_UNDEFINED)) {
njn1d0825f2006-03-27 11:37:07 +00003766 return V_BITS64_UNDEFINED;
3767 } else {
njndbf7ca72006-03-31 11:57:59 +00003768 /* Slow case: the 8 bytes are not all-defined or all-undefined. */
njn1d0825f2006-03-27 11:37:07 +00003769 PROF_EVENT(202, "mc_LOADV64-slow2");
njn45e81252006-03-28 12:35:08 +00003770 return mc_LOADVn_slow( a, 64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003771 }
3772#endif
3773}
3774
3775VG_REGPARM(1) ULong MC_(helperc_LOADV64be) ( Addr a )
3776{
3777 return mc_LOADV64(a, True);
3778}
3779VG_REGPARM(1) ULong MC_(helperc_LOADV64le) ( Addr a )
3780{
3781 return mc_LOADV64(a, False);
3782}
sewardjf9d81612005-04-23 23:25:49 +00003783
sewardjf9d81612005-04-23 23:25:49 +00003784
njn1d0825f2006-03-27 11:37:07 +00003785static INLINE
njn4cf530b2006-04-06 13:33:48 +00003786void mc_STOREV64 ( Addr a, ULong vbits64, Bool isBigEndian )
njn1d0825f2006-03-27 11:37:07 +00003787{
3788 UWord sm_off16, vabits16;
3789 SecMap* sm;
3790
3791 PROF_EVENT(210, "mc_STOREV64");
3792
3793#ifndef PERF_FAST_STOREV
3794 // XXX: this slow case seems to be marginally faster than the fast case!
3795 // Investigate further.
njn4cf530b2006-04-06 13:33:48 +00003796 mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003797#else
bart5dd8e6a2008-03-22 08:04:29 +00003798 if (UNLIKELY( UNALIGNED_OR_HIGH(a,64) )) {
njn1d0825f2006-03-27 11:37:07 +00003799 PROF_EVENT(211, "mc_STOREV64-slow1");
njn4cf530b2006-04-06 13:33:48 +00003800 mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003801 return;
sewardjf9d81612005-04-23 23:25:49 +00003802 }
3803
njna7c7ebd2006-03-28 12:51:02 +00003804 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003805 sm_off16 = SM_OFF_16(a);
3806 vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
3807
bart5dd8e6a2008-03-22 08:04:29 +00003808 if (LIKELY( !is_distinguished_sm(sm) &&
njndbf7ca72006-03-31 11:57:59 +00003809 (VA_BITS16_DEFINED == vabits16 ||
3810 VA_BITS16_UNDEFINED == vabits16) ))
njn1d0825f2006-03-27 11:37:07 +00003811 {
3812 /* Handle common case quickly: a is suitably aligned, */
3813 /* is mapped, and is addressible. */
3814 // Convert full V-bits in register to compact 2-bit form.
njn4cf530b2006-04-06 13:33:48 +00003815 if (V_BITS64_DEFINED == vbits64) {
njndbf7ca72006-03-31 11:57:59 +00003816 ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_DEFINED;
njn4cf530b2006-04-06 13:33:48 +00003817 } else if (V_BITS64_UNDEFINED == vbits64) {
njndbf7ca72006-03-31 11:57:59 +00003818 ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00003819 } else {
3820 /* Slow but general case -- writing partially defined bytes. */
3821 PROF_EVENT(212, "mc_STOREV64-slow2");
njn4cf530b2006-04-06 13:33:48 +00003822 mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003823 }
3824 } else {
3825 /* Slow but general case. */
3826 PROF_EVENT(213, "mc_STOREV64-slow3");
njn4cf530b2006-04-06 13:33:48 +00003827 mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003828 }
3829#endif
3830}
3831
njn4cf530b2006-04-06 13:33:48 +00003832VG_REGPARM(1) void MC_(helperc_STOREV64be) ( Addr a, ULong vbits64 )
njn1d0825f2006-03-27 11:37:07 +00003833{
njn4cf530b2006-04-06 13:33:48 +00003834 mc_STOREV64(a, vbits64, True);
njn1d0825f2006-03-27 11:37:07 +00003835}
njn4cf530b2006-04-06 13:33:48 +00003836VG_REGPARM(1) void MC_(helperc_STOREV64le) ( Addr a, ULong vbits64 )
njn1d0825f2006-03-27 11:37:07 +00003837{
njn4cf530b2006-04-06 13:33:48 +00003838 mc_STOREV64(a, vbits64, False);
njn1d0825f2006-03-27 11:37:07 +00003839}
sewardj95448072004-11-22 20:19:51 +00003840
sewardj95448072004-11-22 20:19:51 +00003841
3842/* ------------------------ Size = 4 ------------------------ */
3843
njn1d0825f2006-03-27 11:37:07 +00003844static INLINE
3845UWord mc_LOADV32 ( Addr a, Bool isBigEndian )
3846{
3847 UWord sm_off, vabits8;
3848 SecMap* sm;
3849
3850 PROF_EVENT(220, "mc_LOADV32");
3851
3852#ifndef PERF_FAST_LOADV
njn45e81252006-03-28 12:35:08 +00003853 return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003854#else
bart5dd8e6a2008-03-22 08:04:29 +00003855 if (UNLIKELY( UNALIGNED_OR_HIGH(a,32) )) {
njn1d0825f2006-03-27 11:37:07 +00003856 PROF_EVENT(221, "mc_LOADV32-slow1");
njn45e81252006-03-28 12:35:08 +00003857 return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
sewardjc1a2cda2005-04-21 17:34:00 +00003858 }
3859
njna7c7ebd2006-03-28 12:51:02 +00003860 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003861 sm_off = SM_OFF(a);
3862 vabits8 = sm->vabits8[sm_off];
3863
3864 // Handle common case quickly: a is suitably aligned, is mapped, and the
3865 // entire word32 it lives in is addressible.
3866 // Convert V bits from compact memory form to expanded register form.
3867 // For 64-bit platforms, set the high 32 bits of retval to 1 (undefined).
3868 // Almost certainly not necessary, but be paranoid.
bart5dd8e6a2008-03-22 08:04:29 +00003869 if (LIKELY(vabits8 == VA_BITS8_DEFINED)) {
njn1d0825f2006-03-27 11:37:07 +00003870 return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_DEFINED);
bart5dd8e6a2008-03-22 08:04:29 +00003871 } else if (LIKELY(vabits8 == VA_BITS8_UNDEFINED)) {
njn1d0825f2006-03-27 11:37:07 +00003872 return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_UNDEFINED);
3873 } else {
njndbf7ca72006-03-31 11:57:59 +00003874 /* Slow case: the 4 bytes are not all-defined or all-undefined. */
njn1d0825f2006-03-27 11:37:07 +00003875 PROF_EVENT(222, "mc_LOADV32-slow2");
njn45e81252006-03-28 12:35:08 +00003876 return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003877 }
3878#endif
3879}
3880
3881VG_REGPARM(1) UWord MC_(helperc_LOADV32be) ( Addr a )
3882{
3883 return mc_LOADV32(a, True);
3884}
3885VG_REGPARM(1) UWord MC_(helperc_LOADV32le) ( Addr a )
3886{
3887 return mc_LOADV32(a, False);
3888}
sewardjc1a2cda2005-04-21 17:34:00 +00003889
sewardjc1a2cda2005-04-21 17:34:00 +00003890
njn1d0825f2006-03-27 11:37:07 +00003891static INLINE
njn4cf530b2006-04-06 13:33:48 +00003892void mc_STOREV32 ( Addr a, UWord vbits32, Bool isBigEndian )
njn1d0825f2006-03-27 11:37:07 +00003893{
3894 UWord sm_off, vabits8;
3895 SecMap* sm;
3896
3897 PROF_EVENT(230, "mc_STOREV32");
3898
3899#ifndef PERF_FAST_STOREV
njn4cf530b2006-04-06 13:33:48 +00003900 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003901#else
bart5dd8e6a2008-03-22 08:04:29 +00003902 if (UNLIKELY( UNALIGNED_OR_HIGH(a,32) )) {
njn1d0825f2006-03-27 11:37:07 +00003903 PROF_EVENT(231, "mc_STOREV32-slow1");
njn4cf530b2006-04-06 13:33:48 +00003904 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003905 return;
sewardjc1a2cda2005-04-21 17:34:00 +00003906 }
3907
njna7c7ebd2006-03-28 12:51:02 +00003908 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003909 sm_off = SM_OFF(a);
3910 vabits8 = sm->vabits8[sm_off];
3911
3912//---------------------------------------------------------------------------
3913#if 1
3914 // Cleverness: sometimes we don't have to write the shadow memory at
3915 // all, if we can tell that what we want to write is the same as what is
3916 // already there.
njn4cf530b2006-04-06 13:33:48 +00003917 if (V_BITS32_DEFINED == vbits32) {
njndbf7ca72006-03-31 11:57:59 +00003918 if (vabits8 == (UInt)VA_BITS8_DEFINED) {
njn1d0825f2006-03-27 11:37:07 +00003919 return;
njndbf7ca72006-03-31 11:57:59 +00003920 } else if (!is_distinguished_sm(sm) && VA_BITS8_UNDEFINED == vabits8) {
3921 sm->vabits8[sm_off] = (UInt)VA_BITS8_DEFINED;
njn1d0825f2006-03-27 11:37:07 +00003922 } else {
njndbf7ca72006-03-31 11:57:59 +00003923 // not defined/undefined, or distinguished and changing state
njn1d0825f2006-03-27 11:37:07 +00003924 PROF_EVENT(232, "mc_STOREV32-slow2");
njn4cf530b2006-04-06 13:33:48 +00003925 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003926 }
njn4cf530b2006-04-06 13:33:48 +00003927 } else if (V_BITS32_UNDEFINED == vbits32) {
njndbf7ca72006-03-31 11:57:59 +00003928 if (vabits8 == (UInt)VA_BITS8_UNDEFINED) {
njn1d0825f2006-03-27 11:37:07 +00003929 return;
njndbf7ca72006-03-31 11:57:59 +00003930 } else if (!is_distinguished_sm(sm) && VA_BITS8_DEFINED == vabits8) {
3931 sm->vabits8[sm_off] = (UInt)VA_BITS8_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00003932 } else {
njndbf7ca72006-03-31 11:57:59 +00003933 // not defined/undefined, or distinguished and changing state
njn1d0825f2006-03-27 11:37:07 +00003934 PROF_EVENT(233, "mc_STOREV32-slow3");
njn4cf530b2006-04-06 13:33:48 +00003935 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003936 }
3937 } else {
3938 // Partially defined word
3939 PROF_EVENT(234, "mc_STOREV32-slow4");
njn4cf530b2006-04-06 13:33:48 +00003940 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003941 }
3942//---------------------------------------------------------------------------
3943#else
bart5dd8e6a2008-03-22 08:04:29 +00003944 if (LIKELY( !is_distinguished_sm(sm) &&
njndbf7ca72006-03-31 11:57:59 +00003945 (VA_BITS8_DEFINED == vabits8 ||
3946 VA_BITS8_UNDEFINED == vabits8) ))
njn1d0825f2006-03-27 11:37:07 +00003947 {
3948 /* Handle common case quickly: a is suitably aligned, */
3949 /* is mapped, and is addressible. */
3950 // Convert full V-bits in register to compact 2-bit form.
njn4cf530b2006-04-06 13:33:48 +00003951 if (V_BITS32_DEFINED == vbits32) {
njndbf7ca72006-03-31 11:57:59 +00003952 sm->vabits8[sm_off] = VA_BITS8_DEFINED;
njn4cf530b2006-04-06 13:33:48 +00003953 } else if (V_BITS32_UNDEFINED == vbits32) {
njndbf7ca72006-03-31 11:57:59 +00003954 sm->vabits8[sm_off] = VA_BITS8_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00003955 } else {
3956 /* Slow but general case -- writing partially defined bytes. */
3957 PROF_EVENT(232, "mc_STOREV32-slow2");
njn4cf530b2006-04-06 13:33:48 +00003958 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003959 }
3960 } else {
3961 /* Slow but general case. */
3962 PROF_EVENT(233, "mc_STOREV32-slow3");
njn4cf530b2006-04-06 13:33:48 +00003963 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003964 }
3965#endif
3966//---------------------------------------------------------------------------
3967#endif
3968}
3969
njn4cf530b2006-04-06 13:33:48 +00003970VG_REGPARM(2) void MC_(helperc_STOREV32be) ( Addr a, UWord vbits32 )
njn1d0825f2006-03-27 11:37:07 +00003971{
njn4cf530b2006-04-06 13:33:48 +00003972 mc_STOREV32(a, vbits32, True);
njn1d0825f2006-03-27 11:37:07 +00003973}
njn4cf530b2006-04-06 13:33:48 +00003974VG_REGPARM(2) void MC_(helperc_STOREV32le) ( Addr a, UWord vbits32 )
njn1d0825f2006-03-27 11:37:07 +00003975{
njn4cf530b2006-04-06 13:33:48 +00003976 mc_STOREV32(a, vbits32, False);
njn1d0825f2006-03-27 11:37:07 +00003977}
njn25e49d8e72002-09-23 09:36:25 +00003978
njn25e49d8e72002-09-23 09:36:25 +00003979
sewardj95448072004-11-22 20:19:51 +00003980/* ------------------------ Size = 2 ------------------------ */
3981
njn1d0825f2006-03-27 11:37:07 +00003982static INLINE
3983UWord mc_LOADV16 ( Addr a, Bool isBigEndian )
3984{
3985 UWord sm_off, vabits8;
3986 SecMap* sm;
3987
3988 PROF_EVENT(240, "mc_LOADV16");
3989
3990#ifndef PERF_FAST_LOADV
njn45e81252006-03-28 12:35:08 +00003991 return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003992#else
bart5dd8e6a2008-03-22 08:04:29 +00003993 if (UNLIKELY( UNALIGNED_OR_HIGH(a,16) )) {
njn1d0825f2006-03-27 11:37:07 +00003994 PROF_EVENT(241, "mc_LOADV16-slow1");
njn45e81252006-03-28 12:35:08 +00003995 return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
sewardjc1a2cda2005-04-21 17:34:00 +00003996 }
3997
njna7c7ebd2006-03-28 12:51:02 +00003998 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003999 sm_off = SM_OFF(a);
4000 vabits8 = sm->vabits8[sm_off];
4001 // Handle common case quickly: a is suitably aligned, is mapped, and is
4002 // addressible.
4003 // Convert V bits from compact memory form to expanded register form
njndbf7ca72006-03-31 11:57:59 +00004004 if (vabits8 == VA_BITS8_DEFINED ) { return V_BITS16_DEFINED; }
4005 else if (vabits8 == VA_BITS8_UNDEFINED) { return V_BITS16_UNDEFINED; }
njn1d0825f2006-03-27 11:37:07 +00004006 else {
njndbf7ca72006-03-31 11:57:59 +00004007 // The 4 (yes, 4) bytes are not all-defined or all-undefined, check
njn1d0825f2006-03-27 11:37:07 +00004008 // the two sub-bytes.
4009 UChar vabits4 = extract_vabits4_from_vabits8(a, vabits8);
njndbf7ca72006-03-31 11:57:59 +00004010 if (vabits4 == VA_BITS4_DEFINED ) { return V_BITS16_DEFINED; }
4011 else if (vabits4 == VA_BITS4_UNDEFINED) { return V_BITS16_UNDEFINED; }
njn1d0825f2006-03-27 11:37:07 +00004012 else {
njndbf7ca72006-03-31 11:57:59 +00004013 /* Slow case: the two bytes are not all-defined or all-undefined. */
njn1d0825f2006-03-27 11:37:07 +00004014 PROF_EVENT(242, "mc_LOADV16-slow2");
njn45e81252006-03-28 12:35:08 +00004015 return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00004016 }
4017 }
4018#endif
4019}
4020
4021VG_REGPARM(1) UWord MC_(helperc_LOADV16be) ( Addr a )
4022{
4023 return mc_LOADV16(a, True);
4024}
4025VG_REGPARM(1) UWord MC_(helperc_LOADV16le) ( Addr a )
4026{
4027 return mc_LOADV16(a, False);
4028}
sewardjc1a2cda2005-04-21 17:34:00 +00004029
sewardjc1a2cda2005-04-21 17:34:00 +00004030
njn1d0825f2006-03-27 11:37:07 +00004031static INLINE
njn4cf530b2006-04-06 13:33:48 +00004032void mc_STOREV16 ( Addr a, UWord vbits16, Bool isBigEndian )
njn1d0825f2006-03-27 11:37:07 +00004033{
4034 UWord sm_off, vabits8;
4035 SecMap* sm;
4036
4037 PROF_EVENT(250, "mc_STOREV16");
4038
4039#ifndef PERF_FAST_STOREV
njn4cf530b2006-04-06 13:33:48 +00004040 mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00004041#else
bart5dd8e6a2008-03-22 08:04:29 +00004042 if (UNLIKELY( UNALIGNED_OR_HIGH(a,16) )) {
njn1d0825f2006-03-27 11:37:07 +00004043 PROF_EVENT(251, "mc_STOREV16-slow1");
njn4cf530b2006-04-06 13:33:48 +00004044 mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00004045 return;
sewardjc1a2cda2005-04-21 17:34:00 +00004046 }
4047
njna7c7ebd2006-03-28 12:51:02 +00004048 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00004049 sm_off = SM_OFF(a);
4050 vabits8 = sm->vabits8[sm_off];
bart5dd8e6a2008-03-22 08:04:29 +00004051 if (LIKELY( !is_distinguished_sm(sm) &&
njndbf7ca72006-03-31 11:57:59 +00004052 (VA_BITS8_DEFINED == vabits8 ||
4053 VA_BITS8_UNDEFINED == vabits8) ))
njn1d0825f2006-03-27 11:37:07 +00004054 {
4055 /* Handle common case quickly: a is suitably aligned, */
4056 /* is mapped, and is addressible. */
4057 // Convert full V-bits in register to compact 2-bit form.
njn4cf530b2006-04-06 13:33:48 +00004058 if (V_BITS16_DEFINED == vbits16) {
njndbf7ca72006-03-31 11:57:59 +00004059 insert_vabits4_into_vabits8( a, VA_BITS4_DEFINED ,
njn1d0825f2006-03-27 11:37:07 +00004060 &(sm->vabits8[sm_off]) );
njn4cf530b2006-04-06 13:33:48 +00004061 } else if (V_BITS16_UNDEFINED == vbits16) {
njndbf7ca72006-03-31 11:57:59 +00004062 insert_vabits4_into_vabits8( a, VA_BITS4_UNDEFINED,
njn1d0825f2006-03-27 11:37:07 +00004063 &(sm->vabits8[sm_off]) );
4064 } else {
4065 /* Slow but general case -- writing partially defined bytes. */
4066 PROF_EVENT(252, "mc_STOREV16-slow2");
njn4cf530b2006-04-06 13:33:48 +00004067 mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00004068 }
4069 } else {
4070 /* Slow but general case. */
4071 PROF_EVENT(253, "mc_STOREV16-slow3");
njn4cf530b2006-04-06 13:33:48 +00004072 mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00004073 }
4074#endif
4075}
njn25e49d8e72002-09-23 09:36:25 +00004076
njn4cf530b2006-04-06 13:33:48 +00004077VG_REGPARM(2) void MC_(helperc_STOREV16be) ( Addr a, UWord vbits16 )
njn1d0825f2006-03-27 11:37:07 +00004078{
njn4cf530b2006-04-06 13:33:48 +00004079 mc_STOREV16(a, vbits16, True);
njn1d0825f2006-03-27 11:37:07 +00004080}
njn4cf530b2006-04-06 13:33:48 +00004081VG_REGPARM(2) void MC_(helperc_STOREV16le) ( Addr a, UWord vbits16 )
njn1d0825f2006-03-27 11:37:07 +00004082{
njn4cf530b2006-04-06 13:33:48 +00004083 mc_STOREV16(a, vbits16, False);
njn1d0825f2006-03-27 11:37:07 +00004084}
sewardj5d28efc2005-04-21 22:16:29 +00004085
njn25e49d8e72002-09-23 09:36:25 +00004086
sewardj95448072004-11-22 20:19:51 +00004087/* ------------------------ Size = 1 ------------------------ */
sewardj8cf88b72005-07-08 01:29:33 +00004088/* Note: endianness is irrelevant for size == 1 */
sewardj95448072004-11-22 20:19:51 +00004089
njnaf839f52005-06-23 03:27:57 +00004090VG_REGPARM(1)
njn1d0825f2006-03-27 11:37:07 +00004091UWord MC_(helperc_LOADV8) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +00004092{
njn1d0825f2006-03-27 11:37:07 +00004093 UWord sm_off, vabits8;
sewardjae986ca2005-10-12 12:53:20 +00004094 SecMap* sm;
4095
njn1d0825f2006-03-27 11:37:07 +00004096 PROF_EVENT(260, "mc_LOADV8");
sewardjc1a2cda2005-04-21 17:34:00 +00004097
njn1d0825f2006-03-27 11:37:07 +00004098#ifndef PERF_FAST_LOADV
njn45e81252006-03-28 12:35:08 +00004099 return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
njn1d0825f2006-03-27 11:37:07 +00004100#else
bart5dd8e6a2008-03-22 08:04:29 +00004101 if (UNLIKELY( UNALIGNED_OR_HIGH(a,8) )) {
njn1d0825f2006-03-27 11:37:07 +00004102 PROF_EVENT(261, "mc_LOADV8-slow1");
njn45e81252006-03-28 12:35:08 +00004103 return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00004104 }
4105
njna7c7ebd2006-03-28 12:51:02 +00004106 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00004107 sm_off = SM_OFF(a);
4108 vabits8 = sm->vabits8[sm_off];
4109 // Convert V bits from compact memory form to expanded register form
4110 // Handle common case quickly: a is mapped, and the entire
4111 // word32 it lives in is addressible.
njndbf7ca72006-03-31 11:57:59 +00004112 if (vabits8 == VA_BITS8_DEFINED ) { return V_BITS8_DEFINED; }
4113 else if (vabits8 == VA_BITS8_UNDEFINED) { return V_BITS8_UNDEFINED; }
njn1d0825f2006-03-27 11:37:07 +00004114 else {
njndbf7ca72006-03-31 11:57:59 +00004115 // The 4 (yes, 4) bytes are not all-defined or all-undefined, check
njn1d0825f2006-03-27 11:37:07 +00004116 // the single byte.
4117 UChar vabits2 = extract_vabits2_from_vabits8(a, vabits8);
njndbf7ca72006-03-31 11:57:59 +00004118 if (vabits2 == VA_BITS2_DEFINED ) { return V_BITS8_DEFINED; }
4119 else if (vabits2 == VA_BITS2_UNDEFINED) { return V_BITS8_UNDEFINED; }
njn1d0825f2006-03-27 11:37:07 +00004120 else {
njndbf7ca72006-03-31 11:57:59 +00004121 /* Slow case: the byte is not all-defined or all-undefined. */
njn1d0825f2006-03-27 11:37:07 +00004122 PROF_EVENT(262, "mc_LOADV8-slow2");
njn45e81252006-03-28 12:35:08 +00004123 return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
njn1d0825f2006-03-27 11:37:07 +00004124 }
sewardjc1a2cda2005-04-21 17:34:00 +00004125 }
njn1d0825f2006-03-27 11:37:07 +00004126#endif
njn25e49d8e72002-09-23 09:36:25 +00004127}
4128
sewardjc1a2cda2005-04-21 17:34:00 +00004129
njnaf839f52005-06-23 03:27:57 +00004130VG_REGPARM(2)
njn4cf530b2006-04-06 13:33:48 +00004131void MC_(helperc_STOREV8) ( Addr a, UWord vbits8 )
njn25e49d8e72002-09-23 09:36:25 +00004132{
njn1d0825f2006-03-27 11:37:07 +00004133 UWord sm_off, vabits8;
sewardjae986ca2005-10-12 12:53:20 +00004134 SecMap* sm;
4135
njn1d0825f2006-03-27 11:37:07 +00004136 PROF_EVENT(270, "mc_STOREV8");
sewardjc1a2cda2005-04-21 17:34:00 +00004137
njn1d0825f2006-03-27 11:37:07 +00004138#ifndef PERF_FAST_STOREV
njn4cf530b2006-04-06 13:33:48 +00004139 mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
njn1d0825f2006-03-27 11:37:07 +00004140#else
bart5dd8e6a2008-03-22 08:04:29 +00004141 if (UNLIKELY( UNALIGNED_OR_HIGH(a,8) )) {
njn1d0825f2006-03-27 11:37:07 +00004142 PROF_EVENT(271, "mc_STOREV8-slow1");
njn4cf530b2006-04-06 13:33:48 +00004143 mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00004144 return;
4145 }
4146
njna7c7ebd2006-03-28 12:51:02 +00004147 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00004148 sm_off = SM_OFF(a);
4149 vabits8 = sm->vabits8[sm_off];
bart5dd8e6a2008-03-22 08:04:29 +00004150 if (LIKELY
njn1d0825f2006-03-27 11:37:07 +00004151 ( !is_distinguished_sm(sm) &&
njndbf7ca72006-03-31 11:57:59 +00004152 ( (VA_BITS8_DEFINED == vabits8 || VA_BITS8_UNDEFINED == vabits8)
njn1d0825f2006-03-27 11:37:07 +00004153 || (VA_BITS2_NOACCESS != extract_vabits2_from_vabits8(a, vabits8))
4154 )
4155 )
4156 )
4157 {
sewardjc1a2cda2005-04-21 17:34:00 +00004158 /* Handle common case quickly: a is mapped, the entire word32 it
4159 lives in is addressible. */
njn1d0825f2006-03-27 11:37:07 +00004160 // Convert full V-bits in register to compact 2-bit form.
njn4cf530b2006-04-06 13:33:48 +00004161 if (V_BITS8_DEFINED == vbits8) {
njndbf7ca72006-03-31 11:57:59 +00004162 insert_vabits2_into_vabits8( a, VA_BITS2_DEFINED,
njn1d0825f2006-03-27 11:37:07 +00004163 &(sm->vabits8[sm_off]) );
njn4cf530b2006-04-06 13:33:48 +00004164 } else if (V_BITS8_UNDEFINED == vbits8) {
njndbf7ca72006-03-31 11:57:59 +00004165 insert_vabits2_into_vabits8( a, VA_BITS2_UNDEFINED,
njn1d0825f2006-03-27 11:37:07 +00004166 &(sm->vabits8[sm_off]) );
4167 } else {
4168 /* Slow but general case -- writing partially defined bytes. */
4169 PROF_EVENT(272, "mc_STOREV8-slow2");
njn4cf530b2006-04-06 13:33:48 +00004170 mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
njn1d0825f2006-03-27 11:37:07 +00004171 }
sewardjc1a2cda2005-04-21 17:34:00 +00004172 } else {
njn1d0825f2006-03-27 11:37:07 +00004173 /* Slow but general case. */
4174 PROF_EVENT(273, "mc_STOREV8-slow3");
njn4cf530b2006-04-06 13:33:48 +00004175 mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00004176 }
njn1d0825f2006-03-27 11:37:07 +00004177#endif
njn25e49d8e72002-09-23 09:36:25 +00004178}
4179
4180
sewardjc859fbf2005-04-22 21:10:28 +00004181/*------------------------------------------------------------*/
4182/*--- Functions called directly from generated code: ---*/
4183/*--- Value-check failure handlers. ---*/
4184/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00004185
njn5c004e42002-11-18 11:04:50 +00004186void MC_(helperc_value_check0_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00004187{
njn718d3b12006-12-16 00:54:12 +00004188 mc_record_cond_error ( VG_(get_running_tid)() );
njn25e49d8e72002-09-23 09:36:25 +00004189}
4190
njn5c004e42002-11-18 11:04:50 +00004191void MC_(helperc_value_check1_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00004192{
njn9e63cb62005-05-08 18:34:59 +00004193 mc_record_value_error ( VG_(get_running_tid)(), 1 );
njn25e49d8e72002-09-23 09:36:25 +00004194}
4195
njn5c004e42002-11-18 11:04:50 +00004196void MC_(helperc_value_check4_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00004197{
njn9e63cb62005-05-08 18:34:59 +00004198 mc_record_value_error ( VG_(get_running_tid)(), 4 );
njn25e49d8e72002-09-23 09:36:25 +00004199}
4200
sewardj11bcc4e2005-04-23 22:38:38 +00004201void MC_(helperc_value_check8_fail) ( void )
4202{
njn9e63cb62005-05-08 18:34:59 +00004203 mc_record_value_error ( VG_(get_running_tid)(), 8 );
sewardj11bcc4e2005-04-23 22:38:38 +00004204}
4205
njnaf839f52005-06-23 03:27:57 +00004206VG_REGPARM(1) void MC_(helperc_complain_undef) ( HWord sz )
sewardj95448072004-11-22 20:19:51 +00004207{
njn9e63cb62005-05-08 18:34:59 +00004208 mc_record_value_error ( VG_(get_running_tid)(), (Int)sz );
sewardj95448072004-11-22 20:19:51 +00004209}
4210
njn25e49d8e72002-09-23 09:36:25 +00004211
sewardjc2c12c22006-03-08 13:20:09 +00004212/*------------------------------------------------------------*/
4213/*--- Metadata get/set functions, for client requests. ---*/
4214/*------------------------------------------------------------*/
4215
njn1d0825f2006-03-27 11:37:07 +00004216// Nb: this expands the V+A bits out into register-form V bits, even though
4217// they're in memory. This is for backward compatibility, and because it's
4218// probably what the user wants.
4219
4220/* Copy Vbits from/to address 'a'. Returns: 1 == OK, 2 == alignment
sewardjc2c12c22006-03-08 13:20:09 +00004221 error [no longer used], 3 == addressing error. */
njn718d3b12006-12-16 00:54:12 +00004222/* Nb: We used to issue various definedness/addressability errors from here,
4223 but we took them out because they ranged from not-very-helpful to
4224 downright annoying, and they complicated the error data structures. */
sewardjc2c12c22006-03-08 13:20:09 +00004225static Int mc_get_or_set_vbits_for_client (
4226 ThreadId tid,
njn1d0825f2006-03-27 11:37:07 +00004227 Addr a,
4228 Addr vbits,
4229 SizeT szB,
sewardjc2c12c22006-03-08 13:20:09 +00004230 Bool setting /* True <=> set vbits, False <=> get vbits */
4231)
4232{
sewardjc2c12c22006-03-08 13:20:09 +00004233 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00004234 Bool ok;
4235 UChar vbits8;
sewardjc2c12c22006-03-08 13:20:09 +00004236
njn1d0825f2006-03-27 11:37:07 +00004237 /* Check that arrays are addressible before doing any getting/setting. */
4238 for (i = 0; i < szB; i++) {
njn718d3b12006-12-16 00:54:12 +00004239 if (VA_BITS2_NOACCESS == get_vabits2(a + i) ||
4240 VA_BITS2_NOACCESS == get_vabits2(vbits + i)) {
njn1d0825f2006-03-27 11:37:07 +00004241 return 3;
sewardjc2c12c22006-03-08 13:20:09 +00004242 }
4243 }
njn1d0825f2006-03-27 11:37:07 +00004244
sewardjc2c12c22006-03-08 13:20:09 +00004245 /* Do the copy */
4246 if (setting) {
njn1d0825f2006-03-27 11:37:07 +00004247 /* setting */
4248 for (i = 0; i < szB; i++) {
4249 ok = set_vbits8(a + i, ((UChar*)vbits)[i]);
4250 tl_assert(ok);
sewardjc2c12c22006-03-08 13:20:09 +00004251 }
4252 } else {
4253 /* getting */
njn1d0825f2006-03-27 11:37:07 +00004254 for (i = 0; i < szB; i++) {
4255 ok = get_vbits8(a + i, &vbits8);
4256 tl_assert(ok);
njn1d0825f2006-03-27 11:37:07 +00004257 ((UChar*)vbits)[i] = vbits8;
sewardjc2c12c22006-03-08 13:20:09 +00004258 }
4259 // The bytes in vbits[] have now been set, so mark them as such.
njndbf7ca72006-03-31 11:57:59 +00004260 MC_(make_mem_defined)(vbits, szB);
njn1d0825f2006-03-27 11:37:07 +00004261 }
sewardjc2c12c22006-03-08 13:20:09 +00004262
4263 return 1;
4264}
sewardj05fe85e2005-04-27 22:46:36 +00004265
4266
4267/*------------------------------------------------------------*/
4268/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
4269/*------------------------------------------------------------*/
4270
4271/* For the memory leak detector, say whether an entire 64k chunk of
4272 address space is possibly in use, or not. If in doubt return
4273 True.
4274*/
4275static
4276Bool mc_is_within_valid_secondary ( Addr a )
4277{
4278 SecMap* sm = maybe_get_secmap_for ( a );
sewardj05a46732006-10-17 01:28:10 +00004279 if (sm == NULL || sm == &sm_distinguished[SM_DIST_NOACCESS]
4280 || in_ignored_range(a)) {
sewardj05fe85e2005-04-27 22:46:36 +00004281 /* Definitely not in use. */
4282 return False;
4283 } else {
4284 return True;
4285 }
4286}
4287
4288
4289/* For the memory leak detector, say whether or not a given word
4290 address is to be regarded as valid. */
4291static
4292Bool mc_is_valid_aligned_word ( Addr a )
4293{
4294 tl_assert(sizeof(UWord) == 4 || sizeof(UWord) == 8);
4295 if (sizeof(UWord) == 4) {
4296 tl_assert(VG_IS_4_ALIGNED(a));
4297 } else {
4298 tl_assert(VG_IS_8_ALIGNED(a));
4299 }
sewardj05a46732006-10-17 01:28:10 +00004300 if (is_mem_defined( a, sizeof(UWord), NULL ) == MC_Ok
4301 && !in_ignored_range(a)) {
sewardj05fe85e2005-04-27 22:46:36 +00004302 return True;
4303 } else {
4304 return False;
4305 }
4306}
sewardja4495682002-10-21 07:29:59 +00004307
4308
nethercote996901a2004-08-03 13:29:09 +00004309/* Leak detector for this tool. We don't actually do anything, merely
sewardja4495682002-10-21 07:29:59 +00004310 run the generic leak detector with suitable parameters for this
nethercote996901a2004-08-03 13:29:09 +00004311 tool. */
njnb8dca862005-03-14 02:42:44 +00004312static void mc_detect_memory_leaks ( ThreadId tid, LeakCheckMode mode )
njn25e49d8e72002-09-23 09:36:25 +00004313{
njn1d0825f2006-03-27 11:37:07 +00004314 MC_(do_detect_memory_leaks) (
sewardj05fe85e2005-04-27 22:46:36 +00004315 tid,
4316 mode,
4317 mc_is_within_valid_secondary,
4318 mc_is_valid_aligned_word
4319 );
njn25e49d8e72002-09-23 09:36:25 +00004320}
4321
4322
sewardjc859fbf2005-04-22 21:10:28 +00004323/*------------------------------------------------------------*/
4324/*--- Initialisation ---*/
4325/*------------------------------------------------------------*/
4326
4327static void init_shadow_memory ( void )
4328{
4329 Int i;
4330 SecMap* sm;
4331
njn1d0825f2006-03-27 11:37:07 +00004332 tl_assert(V_BIT_UNDEFINED == 1);
4333 tl_assert(V_BIT_DEFINED == 0);
4334 tl_assert(V_BITS8_UNDEFINED == 0xFF);
4335 tl_assert(V_BITS8_DEFINED == 0);
4336
sewardjc859fbf2005-04-22 21:10:28 +00004337 /* Build the 3 distinguished secondaries */
sewardjc859fbf2005-04-22 21:10:28 +00004338 sm = &sm_distinguished[SM_DIST_NOACCESS];
njn1d0825f2006-03-27 11:37:07 +00004339 for (i = 0; i < SM_CHUNKS; i++) sm->vabits8[i] = VA_BITS8_NOACCESS;
sewardjc859fbf2005-04-22 21:10:28 +00004340
njndbf7ca72006-03-31 11:57:59 +00004341 sm = &sm_distinguished[SM_DIST_UNDEFINED];
4342 for (i = 0; i < SM_CHUNKS; i++) sm->vabits8[i] = VA_BITS8_UNDEFINED;
sewardjc859fbf2005-04-22 21:10:28 +00004343
njndbf7ca72006-03-31 11:57:59 +00004344 sm = &sm_distinguished[SM_DIST_DEFINED];
4345 for (i = 0; i < SM_CHUNKS; i++) sm->vabits8[i] = VA_BITS8_DEFINED;
sewardjc859fbf2005-04-22 21:10:28 +00004346
4347 /* Set up the primary map. */
4348 /* These entries gradually get overwritten as the used address
4349 space expands. */
4350 for (i = 0; i < N_PRIMARY_MAP; i++)
4351 primary_map[i] = &sm_distinguished[SM_DIST_NOACCESS];
4352
sewardj05a46732006-10-17 01:28:10 +00004353 /* Auxiliary primary maps */
4354 init_auxmap_L1_L2();
4355
sewardjc859fbf2005-04-22 21:10:28 +00004356 /* auxmap_size = auxmap_used = 0;
4357 no ... these are statically initialised */
njn1d0825f2006-03-27 11:37:07 +00004358
4359 /* Secondary V bit table */
4360 secVBitTable = createSecVBitTable();
sewardjc859fbf2005-04-22 21:10:28 +00004361}
4362
4363
4364/*------------------------------------------------------------*/
4365/*--- Sanity check machinery (permanently engaged) ---*/
4366/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00004367
njn51d827b2005-05-09 01:02:08 +00004368static Bool mc_cheap_sanity_check ( void )
njn25e49d8e72002-09-23 09:36:25 +00004369{
jseward9800fd32004-01-04 23:08:04 +00004370 /* nothing useful we can rapidly check */
sewardj23eb2fd2005-04-22 16:29:19 +00004371 n_sanity_cheap++;
sewardjc1a2cda2005-04-21 17:34:00 +00004372 PROF_EVENT(490, "cheap_sanity_check");
jseward9800fd32004-01-04 23:08:04 +00004373 return True;
njn25e49d8e72002-09-23 09:36:25 +00004374}
4375
njn51d827b2005-05-09 01:02:08 +00004376static Bool mc_expensive_sanity_check ( void )
njn25e49d8e72002-09-23 09:36:25 +00004377{
sewardj05a46732006-10-17 01:28:10 +00004378 Int i;
4379 Word n_secmaps_found;
sewardj45d94cc2005-04-20 14:44:11 +00004380 SecMap* sm;
sewardj05a46732006-10-17 01:28:10 +00004381 HChar* errmsg;
sewardj23eb2fd2005-04-22 16:29:19 +00004382 Bool bad = False;
njn25e49d8e72002-09-23 09:36:25 +00004383
sewardj05a46732006-10-17 01:28:10 +00004384 if (0) VG_(printf)("expensive sanity check\n");
4385 if (0) return True;
4386
sewardj23eb2fd2005-04-22 16:29:19 +00004387 n_sanity_expensive++;
sewardjc1a2cda2005-04-21 17:34:00 +00004388 PROF_EVENT(491, "expensive_sanity_check");
4389
njn1d0825f2006-03-27 11:37:07 +00004390 /* Check that the 3 distinguished SMs are still as they should be. */
njn25e49d8e72002-09-23 09:36:25 +00004391
njndbf7ca72006-03-31 11:57:59 +00004392 /* Check noaccess DSM. */
sewardj45d94cc2005-04-20 14:44:11 +00004393 sm = &sm_distinguished[SM_DIST_NOACCESS];
njn1d0825f2006-03-27 11:37:07 +00004394 for (i = 0; i < SM_CHUNKS; i++)
4395 if (sm->vabits8[i] != VA_BITS8_NOACCESS)
sewardj23eb2fd2005-04-22 16:29:19 +00004396 bad = True;
njn25e49d8e72002-09-23 09:36:25 +00004397
njndbf7ca72006-03-31 11:57:59 +00004398 /* Check undefined DSM. */
4399 sm = &sm_distinguished[SM_DIST_UNDEFINED];
njn1d0825f2006-03-27 11:37:07 +00004400 for (i = 0; i < SM_CHUNKS; i++)
njndbf7ca72006-03-31 11:57:59 +00004401 if (sm->vabits8[i] != VA_BITS8_UNDEFINED)
sewardj23eb2fd2005-04-22 16:29:19 +00004402 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00004403
njndbf7ca72006-03-31 11:57:59 +00004404 /* Check defined DSM. */
4405 sm = &sm_distinguished[SM_DIST_DEFINED];
njn1d0825f2006-03-27 11:37:07 +00004406 for (i = 0; i < SM_CHUNKS; i++)
njndbf7ca72006-03-31 11:57:59 +00004407 if (sm->vabits8[i] != VA_BITS8_DEFINED)
sewardj23eb2fd2005-04-22 16:29:19 +00004408 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00004409
sewardj23eb2fd2005-04-22 16:29:19 +00004410 if (bad) {
4411 VG_(printf)("memcheck expensive sanity: "
4412 "distinguished_secondaries have changed\n");
4413 return False;
4414 }
4415
njn1d0825f2006-03-27 11:37:07 +00004416 /* If we're not checking for undefined value errors, the secondary V bit
4417 * table should be empty. */
4418 if (!MC_(clo_undef_value_errors)) {
njne2a9ad32007-09-17 05:30:48 +00004419 if (0 != VG_(OSetGen_Size)(secVBitTable))
njn1d0825f2006-03-27 11:37:07 +00004420 return False;
4421 }
4422
sewardj05a46732006-10-17 01:28:10 +00004423 /* check the auxiliary maps, very thoroughly */
4424 n_secmaps_found = 0;
4425 errmsg = check_auxmap_L1_L2_sanity( &n_secmaps_found );
4426 if (errmsg) {
4427 VG_(printf)("memcheck expensive sanity, auxmaps:\n\t%s", errmsg);
sewardj23eb2fd2005-04-22 16:29:19 +00004428 return False;
4429 }
4430
sewardj05a46732006-10-17 01:28:10 +00004431 /* n_secmaps_found is now the number referred to by the auxiliary
4432 primary map. Now add on the ones referred to by the main
4433 primary map. */
sewardj23eb2fd2005-04-22 16:29:19 +00004434 for (i = 0; i < N_PRIMARY_MAP; i++) {
sewardj05a46732006-10-17 01:28:10 +00004435 if (primary_map[i] == NULL) {
sewardj23eb2fd2005-04-22 16:29:19 +00004436 bad = True;
4437 } else {
sewardj05a46732006-10-17 01:28:10 +00004438 if (!is_distinguished_sm(primary_map[i]))
sewardj23eb2fd2005-04-22 16:29:19 +00004439 n_secmaps_found++;
4440 }
4441 }
4442
sewardj05a46732006-10-17 01:28:10 +00004443 /* check that the number of secmaps issued matches the number that
4444 are reachable (iow, no secmap leaks) */
njn1d0825f2006-03-27 11:37:07 +00004445 if (n_secmaps_found != (n_issued_SMs - n_deissued_SMs))
sewardj23eb2fd2005-04-22 16:29:19 +00004446 bad = True;
4447
4448 if (bad) {
4449 VG_(printf)("memcheck expensive sanity: "
4450 "apparent secmap leakage\n");
4451 return False;
4452 }
4453
sewardj23eb2fd2005-04-22 16:29:19 +00004454 if (bad) {
4455 VG_(printf)("memcheck expensive sanity: "
4456 "auxmap covers wrong address space\n");
4457 return False;
4458 }
4459
4460 /* there is only one pointer to each secmap (expensive) */
njn25e49d8e72002-09-23 09:36:25 +00004461
4462 return True;
4463}
sewardj45d94cc2005-04-20 14:44:11 +00004464
njn25e49d8e72002-09-23 09:36:25 +00004465/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00004466/*--- Command line args ---*/
njn25e49d8e72002-09-23 09:36:25 +00004467/*------------------------------------------------------------*/
4468
njn1d0825f2006-03-27 11:37:07 +00004469Bool MC_(clo_partial_loads_ok) = False;
sewardjfa4ca3b2007-11-30 17:19:36 +00004470Long MC_(clo_freelist_vol) = 10*1000*1000LL;
njn1d0825f2006-03-27 11:37:07 +00004471LeakCheckMode MC_(clo_leak_check) = LC_Summary;
4472VgRes MC_(clo_leak_resolution) = Vg_LowRes;
4473Bool MC_(clo_show_reachable) = False;
4474Bool MC_(clo_workaround_gcc296_bugs) = False;
4475Bool MC_(clo_undef_value_errors) = True;
sewardjeb0fa932007-11-30 21:41:40 +00004476Int MC_(clo_malloc_fill) = -1;
4477Int MC_(clo_free_fill) = -1;
njn1d0825f2006-03-27 11:37:07 +00004478
4479static Bool mc_process_cmd_line_options(Char* arg)
njn25e49d8e72002-09-23 09:36:25 +00004480{
njn1d0825f2006-03-27 11:37:07 +00004481 VG_BOOL_CLO(arg, "--partial-loads-ok", MC_(clo_partial_loads_ok))
4482 else VG_BOOL_CLO(arg, "--show-reachable", MC_(clo_show_reachable))
4483 else VG_BOOL_CLO(arg, "--workaround-gcc296-bugs",MC_(clo_workaround_gcc296_bugs))
4484
4485 else VG_BOOL_CLO(arg, "--undef-value-errors", MC_(clo_undef_value_errors))
4486
sewardjfa4ca3b2007-11-30 17:19:36 +00004487 else VG_BNUM_CLO(arg, "--freelist-vol", MC_(clo_freelist_vol),
4488 0, 10*1000*1000*1000LL)
njn1d0825f2006-03-27 11:37:07 +00004489
4490 else if (VG_CLO_STREQ(arg, "--leak-check=no"))
4491 MC_(clo_leak_check) = LC_Off;
4492 else if (VG_CLO_STREQ(arg, "--leak-check=summary"))
4493 MC_(clo_leak_check) = LC_Summary;
4494 else if (VG_CLO_STREQ(arg, "--leak-check=yes") ||
4495 VG_CLO_STREQ(arg, "--leak-check=full"))
4496 MC_(clo_leak_check) = LC_Full;
4497
4498 else if (VG_CLO_STREQ(arg, "--leak-resolution=low"))
4499 MC_(clo_leak_resolution) = Vg_LowRes;
4500 else if (VG_CLO_STREQ(arg, "--leak-resolution=med"))
4501 MC_(clo_leak_resolution) = Vg_MedRes;
4502 else if (VG_CLO_STREQ(arg, "--leak-resolution=high"))
4503 MC_(clo_leak_resolution) = Vg_HighRes;
4504
sewardj05a46732006-10-17 01:28:10 +00004505 else if (VG_CLO_STREQN(16,arg,"--ignore-ranges=")) {
4506 Int i;
4507 UChar* txt = (UChar*)(arg+16);
4508 Bool ok = parse_ignore_ranges(txt);
4509 if (!ok)
4510 return False;
4511 tl_assert(ignoreRanges.used >= 0);
4512 tl_assert(ignoreRanges.used < M_IGNORE_RANGES);
4513 for (i = 0; i < ignoreRanges.used; i++) {
4514 Addr s = ignoreRanges.start[i];
4515 Addr e = ignoreRanges.end[i];
4516 Addr limit = 0x4000000; /* 64M - entirely arbitrary limit */
4517 if (e <= s) {
4518 VG_(message)(Vg_DebugMsg,
4519 "ERROR: --ignore-ranges: end <= start in range:");
4520 VG_(message)(Vg_DebugMsg,
4521 " 0x%lx-0x%lx", s, e);
4522 return False;
4523 }
4524 if (e - s > limit) {
4525 VG_(message)(Vg_DebugMsg,
4526 "ERROR: --ignore-ranges: suspiciously large range:");
4527 VG_(message)(Vg_DebugMsg,
4528 " 0x%lx-0x%lx (size %ld)", s, e, (UWord)(e-s));
4529 return False;
4530 }
4531 }
4532 }
4533
sewardjeb0fa932007-11-30 21:41:40 +00004534 else VG_BHEX_CLO(arg, "--malloc-fill", MC_(clo_malloc_fill), 0x00, 0xFF)
4535 else VG_BHEX_CLO(arg, "--free-fill", MC_(clo_free_fill), 0x00, 0xFF)
4536
njn1d0825f2006-03-27 11:37:07 +00004537 else
4538 return VG_(replacement_malloc_process_cmd_line_option)(arg);
4539
4540 return True;
njn25e49d8e72002-09-23 09:36:25 +00004541}
4542
njn51d827b2005-05-09 01:02:08 +00004543static void mc_print_usage(void)
njn25e49d8e72002-09-23 09:36:25 +00004544{
njn1d0825f2006-03-27 11:37:07 +00004545 VG_(printf)(
4546" --leak-check=no|summary|full search for memory leaks at exit? [summary]\n"
4547" --leak-resolution=low|med|high how much bt merging in leak check [low]\n"
4548" --show-reachable=no|yes show reachable blocks in leak check? [no]\n"
4549" --undef-value-errors=no|yes check for undefined value errors [yes]\n"
4550" --partial-loads-ok=no|yes too hard to explain here; see manual [no]\n"
sewardjfa4ca3b2007-11-30 17:19:36 +00004551" --freelist-vol=<number> volume of freed blocks queue [10000000]\n"
njn1d0825f2006-03-27 11:37:07 +00004552" --workaround-gcc296-bugs=no|yes self explanatory [no]\n"
sewardj05a46732006-10-17 01:28:10 +00004553" --ignore-ranges=0xPP-0xQQ[,0xRR-0xSS] assume given addresses are OK\n"
sewardjeb0fa932007-11-30 21:41:40 +00004554" --malloc-fill=<hexnumber> fill malloc'd areas with given value\n"
4555" --free-fill=<hexnumber> fill free'd areas with given value\n"
njn1d0825f2006-03-27 11:37:07 +00004556 );
4557 VG_(replacement_malloc_print_usage)();
njn3e884182003-04-15 13:03:23 +00004558}
4559
njn51d827b2005-05-09 01:02:08 +00004560static void mc_print_debug_usage(void)
njn3e884182003-04-15 13:03:23 +00004561{
njn1d0825f2006-03-27 11:37:07 +00004562 VG_(replacement_malloc_print_debug_usage)();
njn25e49d8e72002-09-23 09:36:25 +00004563}
4564
sewardjf3418c02005-11-08 14:10:24 +00004565
nethercote8b76fe52004-11-08 19:20:09 +00004566/*------------------------------------------------------------*/
4567/*--- Client requests ---*/
4568/*------------------------------------------------------------*/
4569
4570/* Client block management:
4571
4572 This is managed as an expanding array of client block descriptors.
4573 Indices of live descriptors are issued to the client, so it can ask
4574 to free them later. Therefore we cannot slide live entries down
4575 over dead ones. Instead we must use free/inuse flags and scan for
4576 an empty slot at allocation time. This in turn means allocation is
4577 relatively expensive, so we hope this does not happen too often.
nethercote8b76fe52004-11-08 19:20:09 +00004578
sewardjedc75ab2005-03-15 23:30:32 +00004579 An unused block has start == size == 0
4580*/
nethercote8b76fe52004-11-08 19:20:09 +00004581
4582typedef
4583 struct {
4584 Addr start;
4585 SizeT size;
4586 ExeContext* where;
sewardj8cf88b72005-07-08 01:29:33 +00004587 Char* desc;
nethercote8b76fe52004-11-08 19:20:09 +00004588 }
4589 CGenBlock;
4590
4591/* This subsystem is self-initialising. */
njn695c16e2005-03-27 03:40:28 +00004592static UInt cgb_size = 0;
4593static UInt cgb_used = 0;
4594static CGenBlock* cgbs = NULL;
nethercote8b76fe52004-11-08 19:20:09 +00004595
4596/* Stats for this subsystem. */
njn695c16e2005-03-27 03:40:28 +00004597static UInt cgb_used_MAX = 0; /* Max in use. */
4598static UInt cgb_allocs = 0; /* Number of allocs. */
4599static UInt cgb_discards = 0; /* Number of discards. */
4600static UInt cgb_search = 0; /* Number of searches. */
nethercote8b76fe52004-11-08 19:20:09 +00004601
4602
4603static
njn695c16e2005-03-27 03:40:28 +00004604Int alloc_client_block ( void )
nethercote8b76fe52004-11-08 19:20:09 +00004605{
4606 UInt i, sz_new;
4607 CGenBlock* cgbs_new;
4608
njn695c16e2005-03-27 03:40:28 +00004609 cgb_allocs++;
nethercote8b76fe52004-11-08 19:20:09 +00004610
njn695c16e2005-03-27 03:40:28 +00004611 for (i = 0; i < cgb_used; i++) {
4612 cgb_search++;
4613 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00004614 return i;
4615 }
4616
4617 /* Not found. Try to allocate one at the end. */
njn695c16e2005-03-27 03:40:28 +00004618 if (cgb_used < cgb_size) {
4619 cgb_used++;
4620 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00004621 }
4622
4623 /* Ok, we have to allocate a new one. */
njn695c16e2005-03-27 03:40:28 +00004624 tl_assert(cgb_used == cgb_size);
4625 sz_new = (cgbs == NULL) ? 10 : (2 * cgb_size);
nethercote8b76fe52004-11-08 19:20:09 +00004626
4627 cgbs_new = VG_(malloc)( sz_new * sizeof(CGenBlock) );
njn695c16e2005-03-27 03:40:28 +00004628 for (i = 0; i < cgb_used; i++)
4629 cgbs_new[i] = cgbs[i];
nethercote8b76fe52004-11-08 19:20:09 +00004630
njn695c16e2005-03-27 03:40:28 +00004631 if (cgbs != NULL)
4632 VG_(free)( cgbs );
4633 cgbs = cgbs_new;
nethercote8b76fe52004-11-08 19:20:09 +00004634
njn695c16e2005-03-27 03:40:28 +00004635 cgb_size = sz_new;
4636 cgb_used++;
4637 if (cgb_used > cgb_used_MAX)
4638 cgb_used_MAX = cgb_used;
4639 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00004640}
4641
4642
4643static void show_client_block_stats ( void )
4644{
4645 VG_(message)(Vg_DebugMsg,
4646 "general CBs: %d allocs, %d discards, %d maxinuse, %d search",
njn695c16e2005-03-27 03:40:28 +00004647 cgb_allocs, cgb_discards, cgb_used_MAX, cgb_search
nethercote8b76fe52004-11-08 19:20:09 +00004648 );
4649}
4650
nethercote8b76fe52004-11-08 19:20:09 +00004651static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai )
4652{
4653 UInt i;
nethercote8b76fe52004-11-08 19:20:09 +00004654
4655 /* Perhaps it's a general block ? */
njn695c16e2005-03-27 03:40:28 +00004656 for (i = 0; i < cgb_used; i++) {
4657 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00004658 continue;
njn717cde52005-05-10 02:47:21 +00004659 // Use zero as the redzone for client blocks.
4660 if (VG_(addr_is_in_block)(a, cgbs[i].start, cgbs[i].size, 0)) {
nethercote8b76fe52004-11-08 19:20:09 +00004661 /* OK - maybe it's a mempool, too? */
njn1d0825f2006-03-27 11:37:07 +00004662 MC_Mempool* mp = VG_(HT_lookup)(MC_(mempool_list),
njn12627272005-08-14 18:32:16 +00004663 (UWord)cgbs[i].start);
njn1d0cb0d2005-08-15 01:52:02 +00004664 if (mp != NULL) {
4665 if (mp->chunks != NULL) {
njn1d0825f2006-03-27 11:37:07 +00004666 MC_Chunk* mc;
njnf66dbfc2005-08-15 01:54:05 +00004667 VG_(HT_ResetIter)(mp->chunks);
4668 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
njn1d0825f2006-03-27 11:37:07 +00004669 if (addr_is_in_MC_Chunk(mc, a)) {
njn718d3b12006-12-16 00:54:12 +00004670 ai->tag = Addr_Block;
4671 ai->Addr.Block.block_kind = Block_MempoolChunk;
4672 ai->Addr.Block.block_desc = "block";
4673 ai->Addr.Block.block_szB = mc->szB;
4674 ai->Addr.Block.rwoffset = (Int)a - (Int)mc->data;
4675 ai->Addr.Block.lastchange = mc->where;
njn1d0cb0d2005-08-15 01:52:02 +00004676 return True;
4677 }
nethercote8b76fe52004-11-08 19:20:09 +00004678 }
4679 }
njn718d3b12006-12-16 00:54:12 +00004680 ai->tag = Addr_Block;
4681 ai->Addr.Block.block_kind = Block_Mempool;
4682 ai->Addr.Block.block_desc = "mempool";
4683 ai->Addr.Block.block_szB = cgbs[i].size;
4684 ai->Addr.Block.rwoffset = (Int)(a) - (Int)(cgbs[i].start);
4685 ai->Addr.Block.lastchange = cgbs[i].where;
nethercote8b76fe52004-11-08 19:20:09 +00004686 return True;
4687 }
njn718d3b12006-12-16 00:54:12 +00004688 ai->tag = Addr_Block;
4689 ai->Addr.Block.block_kind = Block_UserG;
4690 ai->Addr.Block.block_desc = cgbs[i].desc;
4691 ai->Addr.Block.block_szB = cgbs[i].size;
4692 ai->Addr.Block.rwoffset = (Int)(a) - (Int)(cgbs[i].start);
4693 ai->Addr.Block.lastchange = cgbs[i].where;
nethercote8b76fe52004-11-08 19:20:09 +00004694 return True;
4695 }
4696 }
4697 return False;
4698}
4699
njn51d827b2005-05-09 01:02:08 +00004700static Bool mc_handle_client_request ( ThreadId tid, UWord* arg, UWord* ret )
nethercote8b76fe52004-11-08 19:20:09 +00004701{
4702 Int i;
4703 Bool ok;
4704 Addr bad_addr;
4705
njnfc26ff92004-11-22 19:12:49 +00004706 if (!VG_IS_TOOL_USERREQ('M','C',arg[0])
nethercote8b76fe52004-11-08 19:20:09 +00004707 && VG_USERREQ__MALLOCLIKE_BLOCK != arg[0]
4708 && VG_USERREQ__FREELIKE_BLOCK != arg[0]
4709 && VG_USERREQ__CREATE_MEMPOOL != arg[0]
4710 && VG_USERREQ__DESTROY_MEMPOOL != arg[0]
4711 && VG_USERREQ__MEMPOOL_ALLOC != arg[0]
sewardj2c1c9df2006-07-28 00:06:37 +00004712 && VG_USERREQ__MEMPOOL_FREE != arg[0]
sewardjc740d762006-10-05 17:59:23 +00004713 && VG_USERREQ__MEMPOOL_TRIM != arg[0]
4714 && VG_USERREQ__MOVE_MEMPOOL != arg[0]
4715 && VG_USERREQ__MEMPOOL_CHANGE != arg[0]
4716 && VG_USERREQ__MEMPOOL_EXISTS != arg[0])
nethercote8b76fe52004-11-08 19:20:09 +00004717 return False;
4718
4719 switch (arg[0]) {
njndbf7ca72006-03-31 11:57:59 +00004720 case VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE:
4721 ok = is_mem_addressable ( arg[1], arg[2], &bad_addr );
nethercote8b76fe52004-11-08 19:20:09 +00004722 if (!ok)
njn718d3b12006-12-16 00:54:12 +00004723 mc_record_user_error ( tid, bad_addr, /*isAddrErr*/True );
nethercote8b76fe52004-11-08 19:20:09 +00004724 *ret = ok ? (UWord)NULL : bad_addr;
sewardj8cf88b72005-07-08 01:29:33 +00004725 break;
nethercote8b76fe52004-11-08 19:20:09 +00004726
njndbf7ca72006-03-31 11:57:59 +00004727 case VG_USERREQ__CHECK_MEM_IS_DEFINED: {
nethercote8b76fe52004-11-08 19:20:09 +00004728 MC_ReadResult res;
njndbf7ca72006-03-31 11:57:59 +00004729 res = is_mem_defined ( arg[1], arg[2], &bad_addr );
nethercote8b76fe52004-11-08 19:20:09 +00004730 if (MC_AddrErr == res)
njn718d3b12006-12-16 00:54:12 +00004731 mc_record_user_error ( tid, bad_addr, /*isAddrErr*/True );
nethercote8b76fe52004-11-08 19:20:09 +00004732 else if (MC_ValueErr == res)
njn718d3b12006-12-16 00:54:12 +00004733 mc_record_user_error ( tid, bad_addr, /*isAddrErr*/False );
nethercote8b76fe52004-11-08 19:20:09 +00004734 *ret = ( res==MC_Ok ? (UWord)NULL : bad_addr );
sewardj8cf88b72005-07-08 01:29:33 +00004735 break;
nethercote8b76fe52004-11-08 19:20:09 +00004736 }
4737
4738 case VG_USERREQ__DO_LEAK_CHECK:
njnb8dca862005-03-14 02:42:44 +00004739 mc_detect_memory_leaks(tid, arg[1] ? LC_Summary : LC_Full);
sewardj8cf88b72005-07-08 01:29:33 +00004740 *ret = 0; /* return value is meaningless */
4741 break;
nethercote8b76fe52004-11-08 19:20:09 +00004742
njndbf7ca72006-03-31 11:57:59 +00004743 case VG_USERREQ__MAKE_MEM_NOACCESS:
4744 MC_(make_mem_noaccess) ( arg[1], arg[2] );
sewardj8cf88b72005-07-08 01:29:33 +00004745 *ret = -1;
4746 break;
nethercote8b76fe52004-11-08 19:20:09 +00004747
njndbf7ca72006-03-31 11:57:59 +00004748 case VG_USERREQ__MAKE_MEM_UNDEFINED:
4749 MC_(make_mem_undefined) ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00004750 *ret = -1;
sewardj8cf88b72005-07-08 01:29:33 +00004751 break;
nethercote8b76fe52004-11-08 19:20:09 +00004752
njndbf7ca72006-03-31 11:57:59 +00004753 case VG_USERREQ__MAKE_MEM_DEFINED:
4754 MC_(make_mem_defined) ( arg[1], arg[2] );
sewardj8cf88b72005-07-08 01:29:33 +00004755 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00004756 break;
4757
njndbf7ca72006-03-31 11:57:59 +00004758 case VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE:
4759 make_mem_defined_if_addressable ( arg[1], arg[2] );
sewardjfb1e9ad2006-03-10 13:41:58 +00004760 *ret = -1;
4761 break;
4762
sewardjedc75ab2005-03-15 23:30:32 +00004763 case VG_USERREQ__CREATE_BLOCK: /* describe a block */
sewardj8cf88b72005-07-08 01:29:33 +00004764 if (arg[1] != 0 && arg[2] != 0) {
4765 i = alloc_client_block();
4766 /* VG_(printf)("allocated %d %p\n", i, cgbs); */
4767 cgbs[i].start = arg[1];
4768 cgbs[i].size = arg[2];
4769 cgbs[i].desc = VG_(strdup)((Char *)arg[3]);
sewardj39f34232007-11-09 23:02:28 +00004770 cgbs[i].where = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
sewardjedc75ab2005-03-15 23:30:32 +00004771
sewardj8cf88b72005-07-08 01:29:33 +00004772 *ret = i;
4773 } else
4774 *ret = -1;
4775 break;
sewardjedc75ab2005-03-15 23:30:32 +00004776
nethercote8b76fe52004-11-08 19:20:09 +00004777 case VG_USERREQ__DISCARD: /* discard */
njn695c16e2005-03-27 03:40:28 +00004778 if (cgbs == NULL
4779 || arg[2] >= cgb_used ||
sewardj8cf88b72005-07-08 01:29:33 +00004780 (cgbs[arg[2]].start == 0 && cgbs[arg[2]].size == 0)) {
sewardjedc75ab2005-03-15 23:30:32 +00004781 *ret = 1;
sewardj8cf88b72005-07-08 01:29:33 +00004782 } else {
4783 tl_assert(arg[2] >= 0 && arg[2] < cgb_used);
4784 cgbs[arg[2]].start = cgbs[arg[2]].size = 0;
4785 VG_(free)(cgbs[arg[2]].desc);
4786 cgb_discards++;
4787 *ret = 0;
4788 }
4789 break;
nethercote8b76fe52004-11-08 19:20:09 +00004790
sewardjc2c12c22006-03-08 13:20:09 +00004791 case VG_USERREQ__GET_VBITS:
sewardjc2c12c22006-03-08 13:20:09 +00004792 *ret = mc_get_or_set_vbits_for_client
4793 ( tid, arg[1], arg[2], arg[3], False /* get them */ );
4794 break;
4795
4796 case VG_USERREQ__SET_VBITS:
sewardjc2c12c22006-03-08 13:20:09 +00004797 *ret = mc_get_or_set_vbits_for_client
4798 ( tid, arg[1], arg[2], arg[3], True /* set them */ );
4799 break;
nethercote8b76fe52004-11-08 19:20:09 +00004800
njn1d0825f2006-03-27 11:37:07 +00004801 case VG_USERREQ__COUNT_LEAKS: { /* count leaked bytes */
4802 UWord** argp = (UWord**)arg;
4803 // MC_(bytes_leaked) et al were set by the last leak check (or zero
4804 // if no prior leak checks performed).
4805 *argp[1] = MC_(bytes_leaked) + MC_(bytes_indirect);
4806 *argp[2] = MC_(bytes_dubious);
4807 *argp[3] = MC_(bytes_reachable);
4808 *argp[4] = MC_(bytes_suppressed);
4809 // there is no argp[5]
4810 //*argp[5] = MC_(bytes_indirect);
njndbf7ca72006-03-31 11:57:59 +00004811 // XXX need to make *argp[1-4] defined
njn1d0825f2006-03-27 11:37:07 +00004812 *ret = 0;
4813 return True;
4814 }
4815 case VG_USERREQ__MALLOCLIKE_BLOCK: {
4816 Addr p = (Addr)arg[1];
4817 SizeT sizeB = arg[2];
4818 UInt rzB = arg[3];
4819 Bool is_zeroed = (Bool)arg[4];
4820
4821 MC_(new_block) ( tid, p, sizeB, /*ignored*/0, rzB, is_zeroed,
4822 MC_AllocCustom, MC_(malloc_list) );
4823 return True;
4824 }
4825 case VG_USERREQ__FREELIKE_BLOCK: {
4826 Addr p = (Addr)arg[1];
4827 UInt rzB = arg[2];
4828
4829 MC_(handle_free) ( tid, p, rzB, MC_AllocCustom );
4830 return True;
4831 }
4832
4833 case _VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR: {
njn718d3b12006-12-16 00:54:12 +00004834 Char* s = (Char*)arg[1];
4835 Addr dst = (Addr) arg[2];
4836 Addr src = (Addr) arg[3];
4837 SizeT len = (SizeT)arg[4];
4838 mc_record_overlap_error(tid, s, src, dst, len);
njn1d0825f2006-03-27 11:37:07 +00004839 return True;
4840 }
4841
4842 case VG_USERREQ__CREATE_MEMPOOL: {
4843 Addr pool = (Addr)arg[1];
4844 UInt rzB = arg[2];
4845 Bool is_zeroed = (Bool)arg[3];
4846
4847 MC_(create_mempool) ( pool, rzB, is_zeroed );
4848 return True;
4849 }
4850
4851 case VG_USERREQ__DESTROY_MEMPOOL: {
4852 Addr pool = (Addr)arg[1];
4853
4854 MC_(destroy_mempool) ( pool );
4855 return True;
4856 }
4857
4858 case VG_USERREQ__MEMPOOL_ALLOC: {
4859 Addr pool = (Addr)arg[1];
4860 Addr addr = (Addr)arg[2];
4861 UInt size = arg[3];
4862
4863 MC_(mempool_alloc) ( tid, pool, addr, size );
4864 return True;
4865 }
4866
4867 case VG_USERREQ__MEMPOOL_FREE: {
4868 Addr pool = (Addr)arg[1];
4869 Addr addr = (Addr)arg[2];
4870
4871 MC_(mempool_free) ( pool, addr );
4872 return True;
4873 }
4874
sewardj2c1c9df2006-07-28 00:06:37 +00004875 case VG_USERREQ__MEMPOOL_TRIM: {
4876 Addr pool = (Addr)arg[1];
4877 Addr addr = (Addr)arg[2];
4878 UInt size = arg[3];
4879
4880 MC_(mempool_trim) ( pool, addr, size );
4881 return True;
4882 }
4883
sewardjc740d762006-10-05 17:59:23 +00004884 case VG_USERREQ__MOVE_MEMPOOL: {
4885 Addr poolA = (Addr)arg[1];
4886 Addr poolB = (Addr)arg[2];
4887
4888 MC_(move_mempool) ( poolA, poolB );
4889 return True;
4890 }
4891
4892 case VG_USERREQ__MEMPOOL_CHANGE: {
4893 Addr pool = (Addr)arg[1];
4894 Addr addrA = (Addr)arg[2];
4895 Addr addrB = (Addr)arg[3];
4896 UInt size = arg[4];
4897
4898 MC_(mempool_change) ( pool, addrA, addrB, size );
4899 return True;
4900 }
4901
4902 case VG_USERREQ__MEMPOOL_EXISTS: {
4903 Addr pool = (Addr)arg[1];
4904
4905 *ret = (UWord) MC_(mempool_exists) ( pool );
4906 return True;
4907 }
4908
4909
nethercote8b76fe52004-11-08 19:20:09 +00004910 default:
njn1d0825f2006-03-27 11:37:07 +00004911 VG_(message)(Vg_UserMsg,
4912 "Warning: unknown memcheck client request code %llx",
4913 (ULong)arg[0]);
4914 return False;
nethercote8b76fe52004-11-08 19:20:09 +00004915 }
4916 return True;
4917}
njn25e49d8e72002-09-23 09:36:25 +00004918
4919/*------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00004920/*--- Crude profiling machinery. ---*/
4921/*------------------------------------------------------------*/
4922
4923// We track a number of interesting events (using PROF_EVENT)
4924// if MC_PROFILE_MEMORY is defined.
4925
4926#ifdef MC_PROFILE_MEMORY
4927
4928UInt MC_(event_ctr)[N_PROF_EVENTS];
4929HChar* MC_(event_ctr_name)[N_PROF_EVENTS];
4930
4931static void init_prof_mem ( void )
4932{
4933 Int i;
4934 for (i = 0; i < N_PROF_EVENTS; i++) {
4935 MC_(event_ctr)[i] = 0;
4936 MC_(event_ctr_name)[i] = NULL;
4937 }
4938}
4939
4940static void done_prof_mem ( void )
4941{
4942 Int i;
4943 Bool spaced = False;
4944 for (i = 0; i < N_PROF_EVENTS; i++) {
4945 if (!spaced && (i % 10) == 0) {
4946 VG_(printf)("\n");
4947 spaced = True;
4948 }
4949 if (MC_(event_ctr)[i] > 0) {
4950 spaced = False;
4951 VG_(printf)( "prof mem event %3d: %9d %s\n",
4952 i, MC_(event_ctr)[i],
4953 MC_(event_ctr_name)[i]
4954 ? MC_(event_ctr_name)[i] : "unnamed");
4955 }
4956 }
4957}
4958
4959#else
4960
4961static void init_prof_mem ( void ) { }
4962static void done_prof_mem ( void ) { }
4963
4964#endif
4965
4966/*------------------------------------------------------------*/
njn51d827b2005-05-09 01:02:08 +00004967/*--- Setup and finalisation ---*/
njn25e49d8e72002-09-23 09:36:25 +00004968/*------------------------------------------------------------*/
4969
njn51d827b2005-05-09 01:02:08 +00004970static void mc_post_clo_init ( void )
njn5c004e42002-11-18 11:04:50 +00004971{
sewardj71bc3cb2005-05-19 00:25:45 +00004972 /* If we've been asked to emit XML, mash around various other
4973 options so as to constrain the output somewhat. */
4974 if (VG_(clo_xml)) {
4975 /* Extract as much info as possible from the leak checker. */
njn1d0825f2006-03-27 11:37:07 +00004976 /* MC_(clo_show_reachable) = True; */
4977 MC_(clo_leak_check) = LC_Full;
sewardj71bc3cb2005-05-19 00:25:45 +00004978 }
njn5c004e42002-11-18 11:04:50 +00004979}
4980
njn1d0825f2006-03-27 11:37:07 +00004981static void print_SM_info(char* type, int n_SMs)
4982{
4983 VG_(message)(Vg_DebugMsg,
4984 " memcheck: SMs: %s = %d (%dk, %dM)",
4985 type,
4986 n_SMs,
4987 n_SMs * sizeof(SecMap) / 1024,
4988 n_SMs * sizeof(SecMap) / (1024 * 1024) );
4989}
4990
njn51d827b2005-05-09 01:02:08 +00004991static void mc_fini ( Int exitcode )
njn5c004e42002-11-18 11:04:50 +00004992{
njn1d0825f2006-03-27 11:37:07 +00004993 MC_(print_malloc_stats)();
sewardj23eb2fd2005-04-22 16:29:19 +00004994
njn1d0825f2006-03-27 11:37:07 +00004995 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
4996 if (MC_(clo_leak_check) == LC_Off)
4997 VG_(message)(Vg_UserMsg,
4998 "For a detailed leak analysis, rerun with: --leak-check=yes");
4999
5000 VG_(message)(Vg_UserMsg,
5001 "For counts of detected errors, rerun with: -v");
5002 }
5003 if (MC_(clo_leak_check) != LC_Off)
5004 mc_detect_memory_leaks(1/*bogus ThreadId*/, MC_(clo_leak_check));
5005
5006 done_prof_mem();
sewardjae986ca2005-10-12 12:53:20 +00005007
sewardj45d94cc2005-04-20 14:44:11 +00005008 if (VG_(clo_verbosity) > 1) {
njn1d0825f2006-03-27 11:37:07 +00005009 SizeT max_secVBit_szB, max_SMs_szB, max_shmem_szB;
5010
sewardj45d94cc2005-04-20 14:44:11 +00005011 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00005012 " memcheck: sanity checks: %d cheap, %d expensive",
5013 n_sanity_cheap, n_sanity_expensive );
sewardj45d94cc2005-04-20 14:44:11 +00005014 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00005015 " memcheck: auxmaps: %d auxmap entries (%dk, %dM) in use",
sewardj05a46732006-10-17 01:28:10 +00005016 n_auxmap_L2_nodes,
5017 n_auxmap_L2_nodes * 64,
5018 n_auxmap_L2_nodes / 16 );
sewardj23eb2fd2005-04-22 16:29:19 +00005019 VG_(message)(Vg_DebugMsg,
sewardj05a46732006-10-17 01:28:10 +00005020 " memcheck: auxmaps_L1: %lld searches, %lld cmps, ratio %lld:10",
5021 n_auxmap_L1_searches, n_auxmap_L1_cmps,
5022 (10ULL * n_auxmap_L1_cmps)
5023 / (n_auxmap_L1_searches ? n_auxmap_L1_searches : 1)
5024 );
5025 VG_(message)(Vg_DebugMsg,
5026 " memcheck: auxmaps_L2: %lld searches, %lld nodes",
5027 n_auxmap_L2_searches, n_auxmap_L2_nodes
5028 );
sewardj23eb2fd2005-04-22 16:29:19 +00005029
njndbf7ca72006-03-31 11:57:59 +00005030 print_SM_info("n_issued ", n_issued_SMs);
5031 print_SM_info("n_deissued ", n_deissued_SMs);
5032 print_SM_info("max_noaccess ", max_noaccess_SMs);
5033 print_SM_info("max_undefined", max_undefined_SMs);
5034 print_SM_info("max_defined ", max_defined_SMs);
5035 print_SM_info("max_non_DSM ", max_non_DSM_SMs);
njn1d0825f2006-03-27 11:37:07 +00005036
5037 // Three DSMs, plus the non-DSM ones
5038 max_SMs_szB = (3 + max_non_DSM_SMs) * sizeof(SecMap);
5039 // The 3*sizeof(Word) bytes is the AVL node metadata size.
5040 // The 4*sizeof(Word) bytes is the malloc metadata size.
5041 // Hardwiring these sizes in sucks, but I don't see how else to do it.
5042 max_secVBit_szB = max_secVBit_nodes *
5043 (sizeof(SecVBitNode) + 3*sizeof(Word) + 4*sizeof(Word));
5044 max_shmem_szB = sizeof(primary_map) + max_SMs_szB + max_secVBit_szB;
sewardj23eb2fd2005-04-22 16:29:19 +00005045
5046 VG_(message)(Vg_DebugMsg,
njn1d0825f2006-03-27 11:37:07 +00005047 " memcheck: max sec V bit nodes: %d (%dk, %dM)",
5048 max_secVBit_nodes, max_secVBit_szB / 1024,
5049 max_secVBit_szB / (1024 * 1024));
5050 VG_(message)(Vg_DebugMsg,
5051 " memcheck: set_sec_vbits8 calls: %llu (new: %llu, updates: %llu)",
5052 sec_vbits_new_nodes + sec_vbits_updates,
5053 sec_vbits_new_nodes, sec_vbits_updates );
5054 VG_(message)(Vg_DebugMsg,
5055 " memcheck: max shadow mem size: %dk, %dM",
5056 max_shmem_szB / 1024, max_shmem_szB / (1024 * 1024));
sewardj45d94cc2005-04-20 14:44:11 +00005057 }
5058
njn5c004e42002-11-18 11:04:50 +00005059 if (0) {
5060 VG_(message)(Vg_DebugMsg,
5061 "------ Valgrind's client block stats follow ---------------" );
nethercote8b76fe52004-11-08 19:20:09 +00005062 show_client_block_stats();
njn5c004e42002-11-18 11:04:50 +00005063 }
njn25e49d8e72002-09-23 09:36:25 +00005064}
5065
njn51d827b2005-05-09 01:02:08 +00005066static void mc_pre_clo_init(void)
5067{
5068 VG_(details_name) ("Memcheck");
5069 VG_(details_version) (NULL);
5070 VG_(details_description) ("a memory error detector");
5071 VG_(details_copyright_author)(
sewardj4d474d02008-02-11 11:34:59 +00005072 "Copyright (C) 2002-2008, and GNU GPL'd, by Julian Seward et al.");
njn51d827b2005-05-09 01:02:08 +00005073 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj05a46732006-10-17 01:28:10 +00005074 VG_(details_avg_translation_sizeB) ( 556 );
njn51d827b2005-05-09 01:02:08 +00005075
5076 VG_(basic_tool_funcs) (mc_post_clo_init,
5077 MC_(instrument),
5078 mc_fini);
5079
sewardj81651dc2007-08-28 06:05:20 +00005080 VG_(needs_final_IR_tidy_pass) ( MC_(final_tidy) );
5081
5082
njn51d827b2005-05-09 01:02:08 +00005083 VG_(needs_core_errors) ();
njn1d0825f2006-03-27 11:37:07 +00005084 VG_(needs_tool_errors) (mc_eq_Error,
njn51d827b2005-05-09 01:02:08 +00005085 mc_pp_Error,
sewardj39f34232007-11-09 23:02:28 +00005086 True,/*show TIDs for errors*/
njn1d0825f2006-03-27 11:37:07 +00005087 mc_update_extra,
njn51d827b2005-05-09 01:02:08 +00005088 mc_recognised_suppression,
njn1d0825f2006-03-27 11:37:07 +00005089 mc_read_extra_suppression_info,
5090 mc_error_matches_suppression,
5091 mc_get_error_name,
5092 mc_print_extra_suppression_info);
njn51d827b2005-05-09 01:02:08 +00005093 VG_(needs_libc_freeres) ();
njn1d0825f2006-03-27 11:37:07 +00005094 VG_(needs_command_line_options)(mc_process_cmd_line_options,
njn51d827b2005-05-09 01:02:08 +00005095 mc_print_usage,
5096 mc_print_debug_usage);
5097 VG_(needs_client_requests) (mc_handle_client_request);
5098 VG_(needs_sanity_checks) (mc_cheap_sanity_check,
5099 mc_expensive_sanity_check);
njn1d0825f2006-03-27 11:37:07 +00005100 VG_(needs_malloc_replacement) (MC_(malloc),
5101 MC_(__builtin_new),
5102 MC_(__builtin_vec_new),
5103 MC_(memalign),
5104 MC_(calloc),
5105 MC_(free),
5106 MC_(__builtin_delete),
5107 MC_(__builtin_vec_delete),
5108 MC_(realloc),
5109 MC_MALLOC_REDZONE_SZB );
njnca54af32006-04-16 10:25:43 +00005110 VG_(needs_xml_output) ();
njn51d827b2005-05-09 01:02:08 +00005111
njn1d0825f2006-03-27 11:37:07 +00005112 VG_(track_new_mem_startup) ( mc_new_mem_startup );
njndbf7ca72006-03-31 11:57:59 +00005113 VG_(track_new_mem_stack_signal)( MC_(make_mem_undefined) );
5114 VG_(track_new_mem_brk) ( MC_(make_mem_undefined) );
njn1d0825f2006-03-27 11:37:07 +00005115 VG_(track_new_mem_mmap) ( mc_new_mem_mmap );
njn51d827b2005-05-09 01:02:08 +00005116
njn1d0825f2006-03-27 11:37:07 +00005117 VG_(track_copy_mem_remap) ( MC_(copy_address_range_state) );
njn81623712005-10-07 04:48:37 +00005118
5119 // Nb: we don't do anything with mprotect. This means that V bits are
5120 // preserved if a program, for example, marks some memory as inaccessible
5121 // and then later marks it as accessible again.
5122 //
5123 // If an access violation occurs (eg. writing to read-only memory) we let
5124 // it fault and print an informative termination message. This doesn't
5125 // happen if the program catches the signal, though, which is bad. If we
5126 // had two A bits (for readability and writability) that were completely
5127 // distinct from V bits, then we could handle all this properly.
5128 VG_(track_change_mem_mprotect) ( NULL );
njn51d827b2005-05-09 01:02:08 +00005129
njndbf7ca72006-03-31 11:57:59 +00005130 VG_(track_die_mem_stack_signal)( MC_(make_mem_noaccess) );
5131 VG_(track_die_mem_brk) ( MC_(make_mem_noaccess) );
5132 VG_(track_die_mem_munmap) ( MC_(make_mem_noaccess) );
njn51d827b2005-05-09 01:02:08 +00005133
njn1d0825f2006-03-27 11:37:07 +00005134#ifdef PERF_FAST_STACK
5135 VG_(track_new_mem_stack_4) ( mc_new_mem_stack_4 );
5136 VG_(track_new_mem_stack_8) ( mc_new_mem_stack_8 );
5137 VG_(track_new_mem_stack_12) ( mc_new_mem_stack_12 );
5138 VG_(track_new_mem_stack_16) ( mc_new_mem_stack_16 );
5139 VG_(track_new_mem_stack_32) ( mc_new_mem_stack_32 );
5140 VG_(track_new_mem_stack_112) ( mc_new_mem_stack_112 );
5141 VG_(track_new_mem_stack_128) ( mc_new_mem_stack_128 );
5142 VG_(track_new_mem_stack_144) ( mc_new_mem_stack_144 );
5143 VG_(track_new_mem_stack_160) ( mc_new_mem_stack_160 );
5144#endif
5145 VG_(track_new_mem_stack) ( mc_new_mem_stack );
njn51d827b2005-05-09 01:02:08 +00005146
njn1d0825f2006-03-27 11:37:07 +00005147#ifdef PERF_FAST_STACK
5148 VG_(track_die_mem_stack_4) ( mc_die_mem_stack_4 );
5149 VG_(track_die_mem_stack_8) ( mc_die_mem_stack_8 );
5150 VG_(track_die_mem_stack_12) ( mc_die_mem_stack_12 );
5151 VG_(track_die_mem_stack_16) ( mc_die_mem_stack_16 );
5152 VG_(track_die_mem_stack_32) ( mc_die_mem_stack_32 );
5153 VG_(track_die_mem_stack_112) ( mc_die_mem_stack_112 );
5154 VG_(track_die_mem_stack_128) ( mc_die_mem_stack_128 );
5155 VG_(track_die_mem_stack_144) ( mc_die_mem_stack_144 );
5156 VG_(track_die_mem_stack_160) ( mc_die_mem_stack_160 );
5157#endif
5158 VG_(track_die_mem_stack) ( mc_die_mem_stack );
njn51d827b2005-05-09 01:02:08 +00005159
njndbf7ca72006-03-31 11:57:59 +00005160 VG_(track_ban_mem_stack) ( MC_(make_mem_noaccess) );
njn51d827b2005-05-09 01:02:08 +00005161
njndbf7ca72006-03-31 11:57:59 +00005162 VG_(track_pre_mem_read) ( check_mem_is_defined );
5163 VG_(track_pre_mem_read_asciiz) ( check_mem_is_defined_asciiz );
5164 VG_(track_pre_mem_write) ( check_mem_is_addressable );
njn1d0825f2006-03-27 11:37:07 +00005165 VG_(track_post_mem_write) ( mc_post_mem_write );
njn51d827b2005-05-09 01:02:08 +00005166
njn1d0825f2006-03-27 11:37:07 +00005167 if (MC_(clo_undef_value_errors))
5168 VG_(track_pre_reg_read) ( mc_pre_reg_read );
njn51d827b2005-05-09 01:02:08 +00005169
njn1d0825f2006-03-27 11:37:07 +00005170 VG_(track_post_reg_write) ( mc_post_reg_write );
5171 VG_(track_post_reg_write_clientcall_return)( mc_post_reg_write_clientcall );
njn51d827b2005-05-09 01:02:08 +00005172
5173 init_shadow_memory();
sewardj3f94a7d2007-08-25 07:19:08 +00005174 MC_(malloc_list) = VG_(HT_construct)( "MC_(malloc_list)" );
5175 MC_(mempool_list) = VG_(HT_construct)( "MC_(mempool_list)" );
njn1d0825f2006-03-27 11:37:07 +00005176 init_prof_mem();
njn51d827b2005-05-09 01:02:08 +00005177
5178 tl_assert( mc_expensive_sanity_check() );
njn1d0825f2006-03-27 11:37:07 +00005179
5180 // {LOADV,STOREV}[8421] will all fail horribly if this isn't true.
5181 tl_assert(sizeof(UWord) == sizeof(Addr));
sewardj05a46732006-10-17 01:28:10 +00005182 // Call me paranoid. I don't care.
5183 tl_assert(sizeof(void*) == sizeof(Addr));
njn1d0825f2006-03-27 11:37:07 +00005184
5185 // BYTES_PER_SEC_VBIT_NODE must be a power of two.
5186 tl_assert(-1 != VG_(log2)(BYTES_PER_SEC_VBIT_NODE));
njn51d827b2005-05-09 01:02:08 +00005187}
5188
sewardj45f4e7c2005-09-27 19:20:21 +00005189VG_DETERMINE_INTERFACE_VERSION(mc_pre_clo_init)
fitzhardinge98abfc72003-12-16 02:05:15 +00005190
njn25e49d8e72002-09-23 09:36:25 +00005191/*--------------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00005192/*--- end ---*/
njn25e49d8e72002-09-23 09:36:25 +00005193/*--------------------------------------------------------------------*/