blob: 026356b670bf52b445c980f5bc3a89950d0459bf [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
nethercote137bc552003-11-14 17:47:54 +00009 This file is part of MemCheck, a heavyweight Valgrind tool for
njnc9539842002-10-02 13:26:35 +000010 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
sewardje4b0bf02006-06-05 23:21:15 +000012 Copyright (C) 2000-2006 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
njnc7561b92005-06-19 01:24:32 +000033#include "pub_tool_basics.h"
njn4802b382005-06-11 04:58:29 +000034#include "pub_tool_aspacemgr.h"
njn1d0825f2006-03-27 11:37:07 +000035#include "pub_tool_hashtable.h" // For mc_include.h
njn97405b22005-06-02 03:39:33 +000036#include "pub_tool_libcbase.h"
njn132bfcc2005-06-04 19:16:06 +000037#include "pub_tool_libcassert.h"
njn36a20fa2005-06-03 03:08:39 +000038#include "pub_tool_libcprint.h"
njnf536bbb2005-06-13 04:21:38 +000039#include "pub_tool_machine.h"
njnc7561b92005-06-19 01:24:32 +000040#include "pub_tool_mallocfree.h"
41#include "pub_tool_options.h"
njn1d0825f2006-03-27 11:37:07 +000042#include "pub_tool_oset.h"
njnc7561b92005-06-19 01:24:32 +000043#include "pub_tool_replacemalloc.h"
44#include "pub_tool_tooliface.h"
45#include "pub_tool_threadstate.h"
46
47#include "mc_include.h"
48#include "memcheck.h" /* for client requests */
njn25e49d8e72002-09-23 09:36:25 +000049
tomd55121e2005-12-19 12:40:13 +000050#ifdef HAVE_BUILTIN_EXPECT
sewardjc1a2cda2005-04-21 17:34:00 +000051#define EXPECTED_TAKEN(cond) __builtin_expect((cond),1)
52#define EXPECTED_NOT_TAKEN(cond) __builtin_expect((cond),0)
tomd55121e2005-12-19 12:40:13 +000053#else
54#define EXPECTED_TAKEN(cond) (cond)
55#define EXPECTED_NOT_TAKEN(cond) (cond)
56#endif
sewardjc1a2cda2005-04-21 17:34:00 +000057
njn1d0825f2006-03-27 11:37:07 +000058/* Set to 1 to do a little more sanity checking */
sewardj23eb2fd2005-04-22 16:29:19 +000059#define VG_DEBUG_MEMORY 0
sewardjc1a2cda2005-04-21 17:34:00 +000060
njn25e49d8e72002-09-23 09:36:25 +000061#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
62
njn25e49d8e72002-09-23 09:36:25 +000063
njn25e49d8e72002-09-23 09:36:25 +000064/*------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +000065/*--- Fast-case knobs ---*/
66/*------------------------------------------------------------*/
67
68// Comment these out to disable the fast cases (don't just set them to zero).
69
70#define PERF_FAST_LOADV 1
71#define PERF_FAST_STOREV 1
72
73#define PERF_FAST_SARP 1
74
75#define PERF_FAST_STACK 1
76#define PERF_FAST_STACK2 1
77
78/*------------------------------------------------------------*/
79/*--- V bits and A bits ---*/
80/*------------------------------------------------------------*/
81
82/* Conceptually, every byte value has 8 V bits, which track whether Memcheck
83 thinks the corresponding value bit is defined. And every memory byte
84 has an A bit, which tracks whether Memcheck thinks the program can access
85 it safely. So every N-bit register is shadowed with N V bits, and every
86 memory byte is shadowed with 8 V bits and one A bit.
87
88 In the implementation, we use two forms of compression (compressed V bits
89 and distinguished secondary maps) to avoid the 9-bit-per-byte overhead
90 for memory.
91
92 Memcheck also tracks extra information about each heap block that is
93 allocated, for detecting memory leaks and other purposes.
94*/
95
96/*------------------------------------------------------------*/
sewardj45d94cc2005-04-20 14:44:11 +000097/*--- Basic A/V bitmap representation. ---*/
njn25e49d8e72002-09-23 09:36:25 +000098/*------------------------------------------------------------*/
99
njn1d0825f2006-03-27 11:37:07 +0000100/* All reads and writes are checked against a memory map (a.k.a. shadow
101 memory), which records the state of all memory in the process.
102
103 On 32-bit machines the memory map is organised as follows.
104 The top 16 bits of an address are used to index into a top-level
105 map table, containing 65536 entries. Each entry is a pointer to a
106 second-level map, which records the accesibililty and validity
107 permissions for the 65536 bytes indexed by the lower 16 bits of the
108 address. Each byte is represented by two bits (details are below). So
109 each second-level map contains 16384 bytes. This two-level arrangement
110 conveniently divides the 4G address space into 64k lumps, each size 64k
111 bytes.
112
113 All entries in the primary (top-level) map must point to a valid
114 secondary (second-level) map. Since many of the 64kB chunks will
njndbf7ca72006-03-31 11:57:59 +0000115 have the same status for every bit -- ie. noaccess (for unused
116 address space) or entirely addressable and defined (for code segments) --
117 there are three distinguished secondary maps, which indicate 'noaccess',
118 'undefined' and 'defined'. For these uniform 64kB chunks, the primary
119 map entry points to the relevant distinguished map. In practice,
120 typically more than half of the addressable memory is represented with
121 the 'undefined' or 'defined' distinguished secondary map, so it gives a
122 good saving. It also lets us set the V+A bits of large address regions
123 quickly in set_address_range_perms().
njn1d0825f2006-03-27 11:37:07 +0000124
125 On 64-bit machines it's more complicated. If we followed the same basic
126 scheme we'd have a four-level table which would require too many memory
127 accesses. So instead the top-level map table has 2^19 entries (indexed
128 using bits 16..34 of the address); this covers the bottom 32GB. Any
129 accesses above 32GB are handled with a slow, sparse auxiliary table.
130 Valgrind's address space manager tries very hard to keep things below
131 this 32GB barrier so that performance doesn't suffer too much.
132
133 Note that this file has a lot of different functions for reading and
134 writing shadow memory. Only a couple are strictly necessary (eg.
135 get_vabits2 and set_vabits2), most are just specialised for specific
136 common cases to improve performance.
137
138 Aside: the V+A bits are less precise than they could be -- we have no way
139 of marking memory as read-only. It would be great if we could add an
140 extra state VA_BITSn_READONLY. But then we'd have 5 different states,
141 which requires 2.3 bits to hold, and there's no way to do that elegantly
142 -- we'd have to double up to 4 bits of metadata per byte, which doesn't
143 seem worth it.
144*/
sewardjc859fbf2005-04-22 21:10:28 +0000145
sewardj45d94cc2005-04-20 14:44:11 +0000146/* --------------- Basic configuration --------------- */
sewardj95448072004-11-22 20:19:51 +0000147
sewardj23eb2fd2005-04-22 16:29:19 +0000148/* Only change this. N_PRIMARY_MAP *must* be a power of 2. */
sewardj21f7ff42005-04-28 10:32:02 +0000149
sewardje4ccc012005-05-02 12:53:38 +0000150#if VG_WORDSIZE == 4
sewardj21f7ff42005-04-28 10:32:02 +0000151
152/* cover the entire address space */
153# define N_PRIMARY_BITS 16
154
155#else
156
sewardj34483bc2005-09-28 11:50:20 +0000157/* Just handle the first 32G fast and the rest via auxiliary
sewardj21f7ff42005-04-28 10:32:02 +0000158 primaries. */
sewardj34483bc2005-09-28 11:50:20 +0000159# define N_PRIMARY_BITS 19
sewardj21f7ff42005-04-28 10:32:02 +0000160
161#endif
162
sewardj45d94cc2005-04-20 14:44:11 +0000163
sewardjc1a2cda2005-04-21 17:34:00 +0000164/* Do not change this. */
sewardje4ccc012005-05-02 12:53:38 +0000165#define N_PRIMARY_MAP ( ((UWord)1) << N_PRIMARY_BITS)
sewardjc1a2cda2005-04-21 17:34:00 +0000166
167/* Do not change this. */
sewardj23eb2fd2005-04-22 16:29:19 +0000168#define MAX_PRIMARY_ADDRESS (Addr)((((Addr)65536) * N_PRIMARY_MAP)-1)
169
170
sewardj45d94cc2005-04-20 14:44:11 +0000171/* --------------- Secondary maps --------------- */
njn25e49d8e72002-09-23 09:36:25 +0000172
njn1d0825f2006-03-27 11:37:07 +0000173// Each byte of memory conceptually has an A bit, which indicates its
174// addressability, and 8 V bits, which indicates its definedness.
175//
176// But because very few bytes are partially defined, we can use a nice
177// compression scheme to reduce the size of shadow memory. Each byte of
178// memory has 2 bits which indicates its state (ie. V+A bits):
179//
njndbf7ca72006-03-31 11:57:59 +0000180// 00: noaccess (unaddressable but treated as fully defined)
181// 01: undefined (addressable and fully undefined)
182// 10: defined (addressable and fully defined)
183// 11: partdefined (addressable and partially defined)
njn1d0825f2006-03-27 11:37:07 +0000184//
njndbf7ca72006-03-31 11:57:59 +0000185// In the "partdefined" case, we use a secondary table to store the V bits.
186// Each entry in the secondary-V-bits table maps a byte address to its 8 V
187// bits.
njn1d0825f2006-03-27 11:37:07 +0000188//
189// We store the compressed V+A bits in 8-bit chunks, ie. the V+A bits for
190// four bytes (32 bits) of memory are in each chunk. Hence the name
191// "vabits8". This lets us get the V+A bits for four bytes at a time
192// easily (without having to do any shifting and/or masking), and that is a
193// very common operation. (Note that although each vabits8 chunk
194// is 8 bits in size, it represents 32 bits of memory.)
195//
196// The representation is "inverse" little-endian... each 4 bytes of
197// memory is represented by a 1 byte value, where:
198//
199// - the status of byte (a+0) is held in bits [1..0]
200// - the status of byte (a+1) is held in bits [3..2]
201// - the status of byte (a+2) is held in bits [5..4]
202// - the status of byte (a+3) is held in bits [7..6]
203//
204// It's "inverse" because endianness normally describes a mapping from
205// value bits to memory addresses; in this case the mapping is inverted.
206// Ie. instead of particular value bits being held in certain addresses, in
207// this case certain addresses are represented by particular value bits.
208// See insert_vabits2_into_vabits8() for an example.
209//
210// But note that we don't compress the V bits stored in registers; they
211// need to be explicit to made the shadow operations possible. Therefore
212// when moving values between registers and memory we need to convert
213// between the expanded in-register format and the compressed in-memory
214// format. This isn't so difficult, it just requires careful attention in a
215// few places.
216
217// These represent eight bits of memory.
218#define VA_BITS2_NOACCESS 0x0 // 00b
njndbf7ca72006-03-31 11:57:59 +0000219#define VA_BITS2_UNDEFINED 0x1 // 01b
220#define VA_BITS2_DEFINED 0x2 // 10b
221#define VA_BITS2_PARTDEFINED 0x3 // 11b
njn1d0825f2006-03-27 11:37:07 +0000222
223// These represent 16 bits of memory.
224#define VA_BITS4_NOACCESS 0x0 // 00_00b
njndbf7ca72006-03-31 11:57:59 +0000225#define VA_BITS4_UNDEFINED 0x5 // 01_01b
226#define VA_BITS4_DEFINED 0xa // 10_10b
njn1d0825f2006-03-27 11:37:07 +0000227
228// These represent 32 bits of memory.
229#define VA_BITS8_NOACCESS 0x00 // 00_00_00_00b
njndbf7ca72006-03-31 11:57:59 +0000230#define VA_BITS8_UNDEFINED 0x55 // 01_01_01_01b
231#define VA_BITS8_DEFINED 0xaa // 10_10_10_10b
njn1d0825f2006-03-27 11:37:07 +0000232
233// These represent 64 bits of memory.
234#define VA_BITS16_NOACCESS 0x0000 // 00_00_00_00b x 2
njndbf7ca72006-03-31 11:57:59 +0000235#define VA_BITS16_UNDEFINED 0x5555 // 01_01_01_01b x 2
236#define VA_BITS16_DEFINED 0xaaaa // 10_10_10_10b x 2
njn1d0825f2006-03-27 11:37:07 +0000237
238
239#define SM_CHUNKS 16384
240#define SM_OFF(aaa) (((aaa) & 0xffff) >> 2)
241#define SM_OFF_16(aaa) (((aaa) & 0xffff) >> 3)
242
243// Paranoia: it's critical for performance that the requested inlining
244// occurs. So try extra hard.
245#define INLINE inline __attribute__((always_inline))
246
247static INLINE Addr start_of_this_sm ( Addr a ) {
248 return (a & (~SM_MASK));
249}
250static INLINE Bool is_start_of_sm ( Addr a ) {
251 return (start_of_this_sm(a) == a);
252}
253
njn25e49d8e72002-09-23 09:36:25 +0000254typedef
255 struct {
njn1d0825f2006-03-27 11:37:07 +0000256 UChar vabits8[SM_CHUNKS];
njn25e49d8e72002-09-23 09:36:25 +0000257 }
258 SecMap;
259
njn1d0825f2006-03-27 11:37:07 +0000260// 3 distinguished secondary maps, one for no-access, one for
261// accessible but undefined, and one for accessible and defined.
262// Distinguished secondaries may never be modified.
263#define SM_DIST_NOACCESS 0
njndbf7ca72006-03-31 11:57:59 +0000264#define SM_DIST_UNDEFINED 1
265#define SM_DIST_DEFINED 2
njnb8dca862005-03-14 02:42:44 +0000266
sewardj45d94cc2005-04-20 14:44:11 +0000267static SecMap sm_distinguished[3];
njnb8dca862005-03-14 02:42:44 +0000268
njn1d0825f2006-03-27 11:37:07 +0000269static INLINE Bool is_distinguished_sm ( SecMap* sm ) {
sewardj45d94cc2005-04-20 14:44:11 +0000270 return sm >= &sm_distinguished[0] && sm <= &sm_distinguished[2];
271}
njnb8dca862005-03-14 02:42:44 +0000272
njn1d0825f2006-03-27 11:37:07 +0000273// Forward declaration
274static void update_SM_counts(SecMap* oldSM, SecMap* newSM);
275
sewardj45d94cc2005-04-20 14:44:11 +0000276/* dist_sm points to one of our three distinguished secondaries. Make
277 a copy of it so that we can write to it.
278*/
279static SecMap* copy_for_writing ( SecMap* dist_sm )
280{
281 SecMap* new_sm;
282 tl_assert(dist_sm == &sm_distinguished[0]
njn1d0825f2006-03-27 11:37:07 +0000283 || dist_sm == &sm_distinguished[1]
284 || dist_sm == &sm_distinguished[2]);
njnb8dca862005-03-14 02:42:44 +0000285
sewardj45f4e7c2005-09-27 19:20:21 +0000286 new_sm = VG_(am_shadow_alloc)(sizeof(SecMap));
287 if (new_sm == NULL)
288 VG_(out_of_memory_NORETURN)( "memcheck:allocate new SecMap",
289 sizeof(SecMap) );
sewardj45d94cc2005-04-20 14:44:11 +0000290 VG_(memcpy)(new_sm, dist_sm, sizeof(SecMap));
njn1d0825f2006-03-27 11:37:07 +0000291 update_SM_counts(dist_sm, new_sm);
sewardj45d94cc2005-04-20 14:44:11 +0000292 return new_sm;
293}
njnb8dca862005-03-14 02:42:44 +0000294
njn1d0825f2006-03-27 11:37:07 +0000295/* --------------- Stats --------------- */
296
njndbf7ca72006-03-31 11:57:59 +0000297static Int n_issued_SMs = 0;
298static Int n_deissued_SMs = 0;
299static Int n_noaccess_SMs = N_PRIMARY_MAP; // start with many noaccess DSMs
300static Int n_undefined_SMs = 0;
301static Int n_defined_SMs = 0;
302static Int n_non_DSM_SMs = 0;
303static Int max_noaccess_SMs = 0;
304static Int max_undefined_SMs = 0;
305static Int max_defined_SMs = 0;
306static Int max_non_DSM_SMs = 0;
njn1d0825f2006-03-27 11:37:07 +0000307
308static ULong n_auxmap_searches = 0;
309static ULong n_auxmap_cmps = 0;
310static Int n_sanity_cheap = 0;
311static Int n_sanity_expensive = 0;
312
313static Int n_secVBit_nodes = 0;
314static Int max_secVBit_nodes = 0;
315
316static void update_SM_counts(SecMap* oldSM, SecMap* newSM)
317{
njndbf7ca72006-03-31 11:57:59 +0000318 if (oldSM == &sm_distinguished[SM_DIST_NOACCESS ]) n_noaccess_SMs --;
319 else if (oldSM == &sm_distinguished[SM_DIST_UNDEFINED]) n_undefined_SMs--;
320 else if (oldSM == &sm_distinguished[SM_DIST_DEFINED ]) n_defined_SMs --;
321 else { n_non_DSM_SMs --;
322 n_deissued_SMs ++; }
njn1d0825f2006-03-27 11:37:07 +0000323
njndbf7ca72006-03-31 11:57:59 +0000324 if (newSM == &sm_distinguished[SM_DIST_NOACCESS ]) n_noaccess_SMs ++;
325 else if (newSM == &sm_distinguished[SM_DIST_UNDEFINED]) n_undefined_SMs++;
326 else if (newSM == &sm_distinguished[SM_DIST_DEFINED ]) n_defined_SMs ++;
327 else { n_non_DSM_SMs ++;
328 n_issued_SMs ++; }
njn1d0825f2006-03-27 11:37:07 +0000329
njndbf7ca72006-03-31 11:57:59 +0000330 if (n_noaccess_SMs > max_noaccess_SMs ) max_noaccess_SMs = n_noaccess_SMs;
331 if (n_undefined_SMs > max_undefined_SMs) max_undefined_SMs = n_undefined_SMs;
332 if (n_defined_SMs > max_defined_SMs ) max_defined_SMs = n_defined_SMs;
333 if (n_non_DSM_SMs > max_non_DSM_SMs ) max_non_DSM_SMs = n_non_DSM_SMs;
njn1d0825f2006-03-27 11:37:07 +0000334}
sewardj45d94cc2005-04-20 14:44:11 +0000335
336/* --------------- Primary maps --------------- */
337
338/* The main primary map. This covers some initial part of the address
sewardj23eb2fd2005-04-22 16:29:19 +0000339 space, addresses 0 .. (N_PRIMARY_MAP << 16)-1. The rest of it is
sewardj45d94cc2005-04-20 14:44:11 +0000340 handled using the auxiliary primary map.
341*/
sewardj23eb2fd2005-04-22 16:29:19 +0000342static SecMap* primary_map[N_PRIMARY_MAP];
sewardj45d94cc2005-04-20 14:44:11 +0000343
344
345/* An entry in the auxiliary primary map. base must be a 64k-aligned
346 value, and sm points at the relevant secondary map. As with the
347 main primary map, the secondary may be either a real secondary, or
348 one of the three distinguished secondaries.
349*/
350typedef
351 struct {
sewardj23eb2fd2005-04-22 16:29:19 +0000352 Addr base;
sewardj45d94cc2005-04-20 14:44:11 +0000353 SecMap* sm;
354 }
355 AuxMapEnt;
356
357/* An expanding array of AuxMapEnts. */
sewardjaba741d2005-06-09 13:56:07 +0000358#define N_AUXMAPS 20000 /* HACK */
sewardj45d94cc2005-04-20 14:44:11 +0000359static AuxMapEnt hacky_auxmaps[N_AUXMAPS];
360static Int auxmap_size = N_AUXMAPS;
361static Int auxmap_used = 0;
362static AuxMapEnt* auxmap = &hacky_auxmaps[0];
363
sewardj45d94cc2005-04-20 14:44:11 +0000364
365/* Find an entry in the auxiliary map. If an entry is found, move it
366 one step closer to the front of the array, then return its address.
sewardj05fe85e2005-04-27 22:46:36 +0000367 If an entry is not found, return NULL. Note carefully that
sewardj45d94cc2005-04-20 14:44:11 +0000368 because a each call potentially rearranges the entries, each call
369 to this function invalidates ALL AuxMapEnt*s previously obtained by
370 calling this fn.
371*/
sewardj05fe85e2005-04-27 22:46:36 +0000372static AuxMapEnt* maybe_find_in_auxmap ( Addr a )
sewardj45d94cc2005-04-20 14:44:11 +0000373{
374 UWord i;
375 tl_assert(a > MAX_PRIMARY_ADDRESS);
376
377 a &= ~(Addr)0xFFFF;
378
379 /* Search .. */
380 n_auxmap_searches++;
381 for (i = 0; i < auxmap_used; i++) {
382 if (auxmap[i].base == a)
383 break;
384 }
385 n_auxmap_cmps += (ULong)(i+1);
386
387 if (i < auxmap_used) {
388 /* Found it. Nudge it a bit closer to the front. */
389 if (i > 0) {
390 AuxMapEnt tmp = auxmap[i-1];
391 auxmap[i-1] = auxmap[i];
392 auxmap[i] = tmp;
393 i--;
394 }
395 return &auxmap[i];
396 }
397
sewardj05fe85e2005-04-27 22:46:36 +0000398 return NULL;
399}
400
401
402/* Find an entry in the auxiliary map. If an entry is found, move it
403 one step closer to the front of the array, then return its address.
404 If an entry is not found, allocate one. Note carefully that
405 because a each call potentially rearranges the entries, each call
406 to this function invalidates ALL AuxMapEnt*s previously obtained by
407 calling this fn.
408*/
409static AuxMapEnt* find_or_alloc_in_auxmap ( Addr a )
410{
411 AuxMapEnt* am = maybe_find_in_auxmap(a);
412 if (am)
413 return am;
414
sewardj45d94cc2005-04-20 14:44:11 +0000415 /* We didn't find it. Hmm. This is a new piece of address space.
416 We'll need to allocate a new AuxMap entry for it. */
417 if (auxmap_used >= auxmap_size) {
418 tl_assert(auxmap_used == auxmap_size);
419 /* Out of auxmap entries. */
420 tl_assert2(0, "failed to expand the auxmap table");
421 }
422
423 tl_assert(auxmap_used < auxmap_size);
424
425 auxmap[auxmap_used].base = a & ~(Addr)0xFFFF;
426 auxmap[auxmap_used].sm = &sm_distinguished[SM_DIST_NOACCESS];
427
428 if (0)
429 VG_(printf)("new auxmap, base = 0x%llx\n",
430 (ULong)auxmap[auxmap_used].base );
431
432 auxmap_used++;
433 return &auxmap[auxmap_used-1];
434}
435
sewardj45d94cc2005-04-20 14:44:11 +0000436/* --------------- SecMap fundamentals --------------- */
437
njn1d0825f2006-03-27 11:37:07 +0000438// In all these, 'low' means it's definitely in the main primary map,
439// 'high' means it's definitely in the auxiliary table.
440
441static INLINE SecMap** get_secmap_low_ptr ( Addr a )
442{
443 UWord pm_off = a >> 16;
444# if VG_DEBUG_MEMORY >= 1
445 tl_assert(pm_off < N_PRIMARY_MAP);
446# endif
447 return &primary_map[ pm_off ];
448}
449
450static INLINE SecMap** get_secmap_high_ptr ( Addr a )
451{
452 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
453 return &am->sm;
454}
455
456static SecMap** get_secmap_ptr ( Addr a )
457{
458 return ( a <= MAX_PRIMARY_ADDRESS
459 ? get_secmap_low_ptr(a)
460 : get_secmap_high_ptr(a));
461}
462
njna7c7ebd2006-03-28 12:51:02 +0000463static INLINE SecMap* get_secmap_for_reading_low ( Addr a )
njn1d0825f2006-03-27 11:37:07 +0000464{
465 return *get_secmap_low_ptr(a);
466}
467
njna7c7ebd2006-03-28 12:51:02 +0000468static INLINE SecMap* get_secmap_for_reading_high ( Addr a )
njn1d0825f2006-03-27 11:37:07 +0000469{
470 return *get_secmap_high_ptr(a);
471}
472
njna7c7ebd2006-03-28 12:51:02 +0000473static INLINE SecMap* get_secmap_for_writing_low(Addr a)
njn1d0825f2006-03-27 11:37:07 +0000474{
475 SecMap** p = get_secmap_low_ptr(a);
476 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(*p)))
477 *p = copy_for_writing(*p);
478 return *p;
479}
480
njna7c7ebd2006-03-28 12:51:02 +0000481static INLINE SecMap* get_secmap_for_writing_high ( Addr a )
njn1d0825f2006-03-27 11:37:07 +0000482{
483 SecMap** p = get_secmap_high_ptr(a);
484 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(*p)))
485 *p = copy_for_writing(*p);
486 return *p;
487}
488
sewardj45d94cc2005-04-20 14:44:11 +0000489/* Produce the secmap for 'a', either from the primary map or by
490 ensuring there is an entry for it in the aux primary map. The
491 secmap may be a distinguished one as the caller will only want to
492 be able to read it.
493*/
njna7c7ebd2006-03-28 12:51:02 +0000494static SecMap* get_secmap_for_reading ( Addr a )
sewardj45d94cc2005-04-20 14:44:11 +0000495{
njn1d0825f2006-03-27 11:37:07 +0000496 return ( a <= MAX_PRIMARY_ADDRESS
njna7c7ebd2006-03-28 12:51:02 +0000497 ? get_secmap_for_reading_low (a)
498 : get_secmap_for_reading_high(a) );
sewardj45d94cc2005-04-20 14:44:11 +0000499}
500
501/* Produce the secmap for 'a', either from the primary map or by
502 ensuring there is an entry for it in the aux primary map. The
503 secmap may not be a distinguished one, since the caller will want
504 to be able to write it. If it is a distinguished secondary, make a
505 writable copy of it, install it, and return the copy instead. (COW
506 semantics).
507*/
njna7c7ebd2006-03-28 12:51:02 +0000508static SecMap* get_secmap_for_writing ( Addr a )
sewardj45d94cc2005-04-20 14:44:11 +0000509{
njn1d0825f2006-03-27 11:37:07 +0000510 return ( a <= MAX_PRIMARY_ADDRESS
njna7c7ebd2006-03-28 12:51:02 +0000511 ? get_secmap_for_writing_low (a)
512 : get_secmap_for_writing_high(a) );
njn1d0825f2006-03-27 11:37:07 +0000513}
514
515/* If 'a' has a SecMap, produce it. Else produce NULL. But don't
516 allocate one if one doesn't already exist. This is used by the
517 leak checker.
518*/
519static SecMap* maybe_get_secmap_for ( Addr a )
520{
sewardj45d94cc2005-04-20 14:44:11 +0000521 if (a <= MAX_PRIMARY_ADDRESS) {
njna7c7ebd2006-03-28 12:51:02 +0000522 return get_secmap_for_reading_low(a);
sewardj45d94cc2005-04-20 14:44:11 +0000523 } else {
njn1d0825f2006-03-27 11:37:07 +0000524 AuxMapEnt* am = maybe_find_in_auxmap(a);
525 return am ? am->sm : NULL;
sewardj45d94cc2005-04-20 14:44:11 +0000526 }
527}
528
njn1d0825f2006-03-27 11:37:07 +0000529/* --------------- Fundamental functions --------------- */
530
531static INLINE
532void insert_vabits2_into_vabits8 ( Addr a, UChar vabits2, UChar* vabits8 )
533{
534 UInt shift = (a & 3) << 1; // shift by 0, 2, 4, or 6
535 *vabits8 &= ~(0x3 << shift); // mask out the two old bits
536 *vabits8 |= (vabits2 << shift); // mask in the two new bits
537}
538
539static INLINE
540void insert_vabits4_into_vabits8 ( Addr a, UChar vabits4, UChar* vabits8 )
541{
542 UInt shift;
543 tl_assert(VG_IS_2_ALIGNED(a)); // Must be 2-aligned
544 shift = (a & 2) << 1; // shift by 0 or 4
545 *vabits8 &= ~(0xf << shift); // mask out the four old bits
546 *vabits8 |= (vabits4 << shift); // mask in the four new bits
547}
548
549static INLINE
550UChar extract_vabits2_from_vabits8 ( Addr a, UChar vabits8 )
551{
552 UInt shift = (a & 3) << 1; // shift by 0, 2, 4, or 6
553 vabits8 >>= shift; // shift the two bits to the bottom
554 return 0x3 & vabits8; // mask out the rest
555}
556
557static INLINE
558UChar extract_vabits4_from_vabits8 ( Addr a, UChar vabits8 )
559{
560 UInt shift;
561 tl_assert(VG_IS_2_ALIGNED(a)); // Must be 2-aligned
562 shift = (a & 2) << 1; // shift by 0 or 4
563 vabits8 >>= shift; // shift the four bits to the bottom
564 return 0xf & vabits8; // mask out the rest
565}
566
567// Note that these four are only used in slow cases. The fast cases do
568// clever things like combine the auxmap check (in
569// get_secmap_{read,writ}able) with alignment checks.
570
571// *** WARNING! ***
572// Any time this function is called, if it is possible that vabits2
njndbf7ca72006-03-31 11:57:59 +0000573// is equal to VA_BITS2_PARTDEFINED, then the corresponding entry in the
njn1d0825f2006-03-27 11:37:07 +0000574// sec-V-bits table must also be set!
575static INLINE
576void set_vabits2 ( Addr a, UChar vabits2 )
577{
njna7c7ebd2006-03-28 12:51:02 +0000578 SecMap* sm = get_secmap_for_writing(a);
njn1d0825f2006-03-27 11:37:07 +0000579 UWord sm_off = SM_OFF(a);
580 insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
581}
582
583static INLINE
584UChar get_vabits2 ( Addr a )
585{
njna7c7ebd2006-03-28 12:51:02 +0000586 SecMap* sm = get_secmap_for_reading(a);
njn1d0825f2006-03-27 11:37:07 +0000587 UWord sm_off = SM_OFF(a);
588 UChar vabits8 = sm->vabits8[sm_off];
589 return extract_vabits2_from_vabits8(a, vabits8);
590}
591
sewardjf2184912006-05-03 22:13:57 +0000592// *** WARNING! ***
593// Any time this function is called, if it is possible that any of the
594// 4 2-bit fields in vabits8 are equal to VA_BITS2_PARTDEFINED, then the
595// corresponding entry(s) in the sec-V-bits table must also be set!
596static INLINE
597UChar get_vabits8_for_aligned_word32 ( Addr a )
598{
599 SecMap* sm = get_secmap_for_reading(a);
600 UWord sm_off = SM_OFF(a);
601 UChar vabits8 = sm->vabits8[sm_off];
602 return vabits8;
603}
604
605static INLINE
606void set_vabits8_for_aligned_word32 ( Addr a, UChar vabits8 )
607{
608 SecMap* sm = get_secmap_for_writing(a);
609 UWord sm_off = SM_OFF(a);
610 sm->vabits8[sm_off] = vabits8;
611}
612
613
njn1d0825f2006-03-27 11:37:07 +0000614// Forward declarations
615static UWord get_sec_vbits8(Addr a);
616static void set_sec_vbits8(Addr a, UWord vbits8);
617
618// Returns False if there was an addressability error.
619static INLINE
620Bool set_vbits8 ( Addr a, UChar vbits8 )
621{
622 Bool ok = True;
623 UChar vabits2 = get_vabits2(a);
624 if ( VA_BITS2_NOACCESS != vabits2 ) {
625 // Addressable. Convert in-register format to in-memory format.
626 // Also remove any existing sec V bit entry for the byte if no
627 // longer necessary.
njndbf7ca72006-03-31 11:57:59 +0000628 if ( V_BITS8_DEFINED == vbits8 ) { vabits2 = VA_BITS2_DEFINED; }
629 else if ( V_BITS8_UNDEFINED == vbits8 ) { vabits2 = VA_BITS2_UNDEFINED; }
630 else { vabits2 = VA_BITS2_PARTDEFINED;
njn1d0825f2006-03-27 11:37:07 +0000631 set_sec_vbits8(a, vbits8); }
632 set_vabits2(a, vabits2);
633
634 } else {
635 // Unaddressable! Do nothing -- when writing to unaddressable
636 // memory it acts as a black hole, and the V bits can never be seen
637 // again. So we don't have to write them at all.
638 ok = False;
639 }
640 return ok;
641}
642
643// Returns False if there was an addressability error. In that case, we put
644// all defined bits into vbits8.
645static INLINE
646Bool get_vbits8 ( Addr a, UChar* vbits8 )
647{
648 Bool ok = True;
649 UChar vabits2 = get_vabits2(a);
650
651 // Convert the in-memory format to in-register format.
njndbf7ca72006-03-31 11:57:59 +0000652 if ( VA_BITS2_DEFINED == vabits2 ) { *vbits8 = V_BITS8_DEFINED; }
653 else if ( VA_BITS2_UNDEFINED == vabits2 ) { *vbits8 = V_BITS8_UNDEFINED; }
654 else if ( VA_BITS2_NOACCESS == vabits2 ) {
njn1d0825f2006-03-27 11:37:07 +0000655 *vbits8 = V_BITS8_DEFINED; // Make V bits defined!
656 ok = False;
657 } else {
njndbf7ca72006-03-31 11:57:59 +0000658 tl_assert( VA_BITS2_PARTDEFINED == vabits2 );
njn1d0825f2006-03-27 11:37:07 +0000659 *vbits8 = get_sec_vbits8(a);
660 }
661 return ok;
662}
663
664
665/* --------------- Secondary V bit table ------------ */
666
667// This table holds the full V bit pattern for partially-defined bytes
njndbf7ca72006-03-31 11:57:59 +0000668// (PDBs) that are represented by VA_BITS2_PARTDEFINED in the main shadow
669// memory.
njn1d0825f2006-03-27 11:37:07 +0000670//
671// Note: the nodes in this table can become stale. Eg. if you write a PDB,
672// then overwrite the same address with a fully defined byte, the sec-V-bit
673// node will not necessarily be removed. This is because checking for
674// whether removal is necessary would slow down the fast paths.
675//
676// To avoid the stale nodes building up too much, we periodically (once the
677// table reaches a certain size) garbage collect (GC) the table by
678// traversing it and evicting any "sufficiently stale" nodes, ie. nodes that
679// are stale and haven't been touched for a certain number of collections.
680// If more than a certain proportion of nodes survived, we increase the
681// table size so that GCs occur less often.
682//
683// (So this a bit different to a traditional GC, where you definitely want
684// to remove any dead nodes. It's more like we have a resizable cache and
685// we're trying to find the right balance how many elements to evict and how
686// big to make the cache.)
687//
688// This policy is designed to avoid bad table bloat in the worst case where
689// a program creates huge numbers of stale PDBs -- we would get this bloat
690// if we had no GC -- while handling well the case where a node becomes
691// stale but shortly afterwards is rewritten with a PDB and so becomes
692// non-stale again (which happens quite often, eg. in perf/bz2). If we just
693// remove all stale nodes as soon as possible, we just end up re-adding a
694// lot of them in later again. The "sufficiently stale" approach avoids
695// this. (If a program has many live PDBs, performance will just suck,
696// there's no way around that.)
697
698static OSet* secVBitTable;
699
700// Stats
701static ULong sec_vbits_new_nodes = 0;
702static ULong sec_vbits_updates = 0;
703
704// This must be a power of two; this is checked in mc_pre_clo_init().
705// The size chosen here is a trade-off: if the nodes are bigger (ie. cover
706// a larger address range) they take more space but we can get multiple
707// partially-defined bytes in one if they are close to each other, reducing
708// the number of total nodes. In practice sometimes they are clustered (eg.
709// perf/bz2 repeatedly writes then reads more than 20,000 in a contiguous
710// row), but often not. So we choose something intermediate.
711#define BYTES_PER_SEC_VBIT_NODE 16
712
713// We make the table bigger if more than this many nodes survive a GC.
714#define MAX_SURVIVOR_PROPORTION 0.5
715
716// Each time we make the table bigger, we increase it by this much.
717#define TABLE_GROWTH_FACTOR 2
718
719// This defines "sufficiently stale" -- any node that hasn't been touched in
720// this many GCs will be removed.
721#define MAX_STALE_AGE 2
722
723// We GC the table when it gets this many nodes in it, ie. it's effectively
724// the table size. It can change.
725static Int secVBitLimit = 1024;
726
727// The number of GCs done, used to age sec-V-bit nodes for eviction.
728// Because it's unsigned, wrapping doesn't matter -- the right answer will
729// come out anyway.
730static UInt GCs_done = 0;
731
732typedef
733 struct {
734 Addr a;
735 UChar vbits8[BYTES_PER_SEC_VBIT_NODE];
736 UInt last_touched;
737 }
738 SecVBitNode;
739
740static OSet* createSecVBitTable(void)
741{
742 return VG_(OSet_Create)( offsetof(SecVBitNode, a),
743 NULL, // use fast comparisons
744 VG_(malloc), VG_(free) );
745}
746
747static void gcSecVBitTable(void)
748{
749 OSet* secVBitTable2;
750 SecVBitNode* n;
751 Int i, n_nodes = 0, n_survivors = 0;
752
753 GCs_done++;
754
755 // Create the new table.
756 secVBitTable2 = createSecVBitTable();
757
758 // Traverse the table, moving fresh nodes into the new table.
759 VG_(OSet_ResetIter)(secVBitTable);
760 while ( (n = VG_(OSet_Next)(secVBitTable)) ) {
761 Bool keep = False;
762 if ( (GCs_done - n->last_touched) <= MAX_STALE_AGE ) {
763 // Keep node if it's been touched recently enough (regardless of
764 // freshness/staleness).
765 keep = True;
766 } else {
767 // Keep node if any of its bytes are non-stale. Using
768 // get_vabits2() for the lookup is not very efficient, but I don't
769 // think it matters.
770 for (i = 0; i < BYTES_PER_SEC_VBIT_NODE; i++) {
njndbf7ca72006-03-31 11:57:59 +0000771 if (VA_BITS2_PARTDEFINED == get_vabits2(n->a + i)) {
njn1d0825f2006-03-27 11:37:07 +0000772 keep = True; // Found a non-stale byte, so keep
773 break;
774 }
775 }
776 }
777
778 if ( keep ) {
779 // Insert a copy of the node into the new table.
780 SecVBitNode* n2 =
781 VG_(OSet_AllocNode)(secVBitTable2, sizeof(SecVBitNode));
782 *n2 = *n;
783 VG_(OSet_Insert)(secVBitTable2, n2);
784 }
785 }
786
787 // Get the before and after sizes.
788 n_nodes = VG_(OSet_Size)(secVBitTable);
789 n_survivors = VG_(OSet_Size)(secVBitTable2);
790
791 // Destroy the old table, and put the new one in its place.
792 VG_(OSet_Destroy)(secVBitTable, NULL);
793 secVBitTable = secVBitTable2;
794
795 if (VG_(clo_verbosity) > 1) {
796 Char percbuf[6];
797 VG_(percentify)(n_survivors, n_nodes, 1, 6, percbuf);
798 VG_(message)(Vg_DebugMsg, "memcheck GC: %d nodes, %d survivors (%s)",
799 n_nodes, n_survivors, percbuf);
800 }
801
802 // Increase table size if necessary.
803 if (n_survivors > (secVBitLimit * MAX_SURVIVOR_PROPORTION)) {
804 secVBitLimit *= TABLE_GROWTH_FACTOR;
805 if (VG_(clo_verbosity) > 1)
806 VG_(message)(Vg_DebugMsg, "memcheck GC: increase table size to %d",
807 secVBitLimit);
808 }
809}
810
811static UWord get_sec_vbits8(Addr a)
812{
813 Addr aAligned = VG_ROUNDDN(a, BYTES_PER_SEC_VBIT_NODE);
814 Int amod = a % BYTES_PER_SEC_VBIT_NODE;
815 SecVBitNode* n = VG_(OSet_Lookup)(secVBitTable, &aAligned);
816 UChar vbits8;
817 tl_assert2(n, "get_sec_vbits8: no node for address %p (%p)\n", aAligned, a);
818 // Shouldn't be fully defined or fully undefined -- those cases shouldn't
819 // make it to the secondary V bits table.
820 vbits8 = n->vbits8[amod];
821 tl_assert(V_BITS8_DEFINED != vbits8 && V_BITS8_UNDEFINED != vbits8);
822 return vbits8;
823}
824
825static void set_sec_vbits8(Addr a, UWord vbits8)
826{
827 Addr aAligned = VG_ROUNDDN(a, BYTES_PER_SEC_VBIT_NODE);
828 Int i, amod = a % BYTES_PER_SEC_VBIT_NODE;
829 SecVBitNode* n = VG_(OSet_Lookup)(secVBitTable, &aAligned);
830 // Shouldn't be fully defined or fully undefined -- those cases shouldn't
831 // make it to the secondary V bits table.
832 tl_assert(V_BITS8_DEFINED != vbits8 && V_BITS8_UNDEFINED != vbits8);
833 if (n) {
834 n->vbits8[amod] = vbits8; // update
835 n->last_touched = GCs_done;
836 sec_vbits_updates++;
837 } else {
838 // New node: assign the specific byte, make the rest invalid (they
839 // should never be read as-is, but be cautious).
840 n = VG_(OSet_AllocNode)(secVBitTable, sizeof(SecVBitNode));
841 n->a = aAligned;
842 for (i = 0; i < BYTES_PER_SEC_VBIT_NODE; i++) {
843 n->vbits8[i] = V_BITS8_UNDEFINED;
844 }
845 n->vbits8[amod] = vbits8;
846 n->last_touched = GCs_done;
847
848 // Do a table GC if necessary. Nb: do this before inserting the new
849 // node, to avoid erroneously GC'ing the new node.
850 if (secVBitLimit == VG_(OSet_Size)(secVBitTable)) {
851 gcSecVBitTable();
852 }
853
854 // Insert the new node.
855 VG_(OSet_Insert)(secVBitTable, n);
856 sec_vbits_new_nodes++;
857
858 n_secVBit_nodes = VG_(OSet_Size)(secVBitTable);
859 if (n_secVBit_nodes > max_secVBit_nodes)
860 max_secVBit_nodes = n_secVBit_nodes;
861 }
862}
sewardj45d94cc2005-04-20 14:44:11 +0000863
864/* --------------- Endianness helpers --------------- */
865
866/* Returns the offset in memory of the byteno-th most significant byte
867 in a wordszB-sized word, given the specified endianness. */
njn1d0825f2006-03-27 11:37:07 +0000868static INLINE UWord byte_offset_w ( UWord wordszB, Bool bigendian,
sewardj45d94cc2005-04-20 14:44:11 +0000869 UWord byteno ) {
870 return bigendian ? (wordszB-1-byteno) : byteno;
871}
872
sewardj45d94cc2005-04-20 14:44:11 +0000873/* --------------- Load/store slow cases. --------------- */
874
njn1d0825f2006-03-27 11:37:07 +0000875// Forward declarations
876static void mc_record_address_error ( ThreadId tid, Addr a,
877 Int size, Bool isWrite );
878static void mc_record_core_mem_error ( ThreadId tid, Bool isUnaddr, Char* s );
879static void mc_record_param_error ( ThreadId tid, Addr a, Bool isReg,
880 Bool isUnaddr, Char* msg );
881static void mc_record_jump_error ( ThreadId tid, Addr a );
882
sewardj45d94cc2005-04-20 14:44:11 +0000883static
njn1d0825f2006-03-27 11:37:07 +0000884#ifndef PERF_FAST_LOADV
885INLINE
886#endif
njn45e81252006-03-28 12:35:08 +0000887ULong mc_LOADVn_slow ( Addr a, SizeT nBits, Bool bigendian )
sewardj45d94cc2005-04-20 14:44:11 +0000888{
njn1d0825f2006-03-27 11:37:07 +0000889 /* Make up a 64-bit result V word, which contains the loaded data for
sewardjf3d57dd2005-04-22 20:23:27 +0000890 valid addresses and Defined for invalid addresses. Iterate over
891 the bytes in the word, from the most significant down to the
892 least. */
njn1d0825f2006-03-27 11:37:07 +0000893 ULong vbits64 = V_BITS64_UNDEFINED;
njn45e81252006-03-28 12:35:08 +0000894 SizeT szB = nBits / 8;
njn1d0825f2006-03-27 11:37:07 +0000895 SSizeT i = szB-1; // Must be signed
sewardj45d94cc2005-04-20 14:44:11 +0000896 SizeT n_addrs_bad = 0;
897 Addr ai;
njn1d0825f2006-03-27 11:37:07 +0000898 Bool partial_load_exemption_applies;
899 UChar vbits8;
900 Bool ok;
sewardj45d94cc2005-04-20 14:44:11 +0000901
sewardjc1a2cda2005-04-21 17:34:00 +0000902 PROF_EVENT(30, "mc_LOADVn_slow");
njn45e81252006-03-28 12:35:08 +0000903 tl_assert(nBits == 64 || nBits == 32 || nBits == 16 || nBits == 8);
sewardj45d94cc2005-04-20 14:44:11 +0000904
njn1d0825f2006-03-27 11:37:07 +0000905 for (i = szB-1; i >= 0; i--) {
sewardjc1a2cda2005-04-21 17:34:00 +0000906 PROF_EVENT(31, "mc_LOADVn_slow(loop)");
njn45e81252006-03-28 12:35:08 +0000907 ai = a + byte_offset_w(szB, bigendian, i);
njn1d0825f2006-03-27 11:37:07 +0000908 ok = get_vbits8(ai, &vbits8);
909 if (!ok) n_addrs_bad++;
910 vbits64 <<= 8;
911 vbits64 |= vbits8;
sewardj45d94cc2005-04-20 14:44:11 +0000912 }
913
sewardj0ded7a42005-11-08 02:25:37 +0000914 /* This is a hack which avoids producing errors for code which
915 insists in stepping along byte strings in aligned word-sized
916 chunks, and there is a partially defined word at the end. (eg,
917 optimised strlen). Such code is basically broken at least WRT
918 semantics of ANSI C, but sometimes users don't have the option
919 to fix it, and so this option is provided. Note it is now
920 defaulted to not-engaged.
921
922 A load from a partially-addressible place is allowed if:
923 - the command-line flag is set
924 - it's a word-sized, word-aligned load
925 - at least one of the addresses in the word *is* valid
926 */
927 partial_load_exemption_applies
njn1d0825f2006-03-27 11:37:07 +0000928 = MC_(clo_partial_loads_ok) && szB == VG_WORDSIZE
sewardj0ded7a42005-11-08 02:25:37 +0000929 && VG_IS_WORD_ALIGNED(a)
930 && n_addrs_bad < VG_WORDSIZE;
931
932 if (n_addrs_bad > 0 && !partial_load_exemption_applies)
njn1d0825f2006-03-27 11:37:07 +0000933 mc_record_address_error( VG_(get_running_tid)(), a, szB, False );
sewardj45d94cc2005-04-20 14:44:11 +0000934
njn1d0825f2006-03-27 11:37:07 +0000935 return vbits64;
sewardj45d94cc2005-04-20 14:44:11 +0000936}
937
938
njn1d0825f2006-03-27 11:37:07 +0000939static
940#ifndef PERF_FAST_STOREV
941INLINE
942#endif
njn45e81252006-03-28 12:35:08 +0000943void mc_STOREVn_slow ( Addr a, SizeT nBits, ULong vbytes, Bool bigendian )
sewardj45d94cc2005-04-20 14:44:11 +0000944{
njn45e81252006-03-28 12:35:08 +0000945 SizeT szB = nBits / 8;
njn1d0825f2006-03-27 11:37:07 +0000946 SizeT i, n_addrs_bad = 0;
947 UChar vbits8;
sewardj45d94cc2005-04-20 14:44:11 +0000948 Addr ai;
njn1d0825f2006-03-27 11:37:07 +0000949 Bool ok;
sewardj45d94cc2005-04-20 14:44:11 +0000950
sewardjc1a2cda2005-04-21 17:34:00 +0000951 PROF_EVENT(35, "mc_STOREVn_slow");
njn45e81252006-03-28 12:35:08 +0000952 tl_assert(nBits == 64 || nBits == 32 || nBits == 16 || nBits == 8);
sewardj45d94cc2005-04-20 14:44:11 +0000953
954 /* Dump vbytes in memory, iterating from least to most significant
955 byte. At the same time establish addressibility of the
956 location. */
957 for (i = 0; i < szB; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +0000958 PROF_EVENT(36, "mc_STOREVn_slow(loop)");
njn45e81252006-03-28 12:35:08 +0000959 ai = a + byte_offset_w(szB, bigendian, i);
njn1d0825f2006-03-27 11:37:07 +0000960 vbits8 = vbytes & 0xff;
961 ok = set_vbits8(ai, vbits8);
962 if (!ok) n_addrs_bad++;
sewardj45d94cc2005-04-20 14:44:11 +0000963 vbytes >>= 8;
964 }
965
966 /* If an address error has happened, report it. */
967 if (n_addrs_bad > 0)
njn1d0825f2006-03-27 11:37:07 +0000968 mc_record_address_error( VG_(get_running_tid)(), a, szB, True );
sewardj45d94cc2005-04-20 14:44:11 +0000969}
970
971
njn25e49d8e72002-09-23 09:36:25 +0000972/*------------------------------------------------------------*/
973/*--- Setting permissions over address ranges. ---*/
974/*------------------------------------------------------------*/
975
njn1d0825f2006-03-27 11:37:07 +0000976static void set_address_range_perms ( Addr a, SizeT lenT, UWord vabits16,
977 UWord dsm_num )
sewardj23eb2fd2005-04-22 16:29:19 +0000978{
njn1d0825f2006-03-27 11:37:07 +0000979 UWord sm_off, sm_off16;
980 UWord vabits2 = vabits16 & 0x3;
981 SizeT lenA, lenB, len_to_next_secmap;
982 Addr aNext;
sewardjae986ca2005-10-12 12:53:20 +0000983 SecMap* sm;
njn1d0825f2006-03-27 11:37:07 +0000984 SecMap** sm_ptr;
sewardjae986ca2005-10-12 12:53:20 +0000985 SecMap* example_dsm;
986
sewardj23eb2fd2005-04-22 16:29:19 +0000987 PROF_EVENT(150, "set_address_range_perms");
988
njn1d0825f2006-03-27 11:37:07 +0000989 /* Check the V+A bits make sense. */
njndbf7ca72006-03-31 11:57:59 +0000990 tl_assert(VA_BITS16_NOACCESS == vabits16 ||
991 VA_BITS16_UNDEFINED == vabits16 ||
992 VA_BITS16_DEFINED == vabits16);
sewardj23eb2fd2005-04-22 16:29:19 +0000993
njn1d0825f2006-03-27 11:37:07 +0000994 // This code should never write PDBs; ensure this. (See comment above
995 // set_vabits2().)
njndbf7ca72006-03-31 11:57:59 +0000996 tl_assert(VA_BITS2_PARTDEFINED != vabits2);
njn1d0825f2006-03-27 11:37:07 +0000997
998 if (lenT == 0)
sewardj23eb2fd2005-04-22 16:29:19 +0000999 return;
1000
njn1d0825f2006-03-27 11:37:07 +00001001 if (lenT > 100 * 1000 * 1000) {
1002 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
1003 Char* s = "unknown???";
njndbf7ca72006-03-31 11:57:59 +00001004 if (vabits16 == VA_BITS16_NOACCESS ) s = "noaccess";
1005 if (vabits16 == VA_BITS16_UNDEFINED) s = "undefined";
1006 if (vabits16 == VA_BITS16_DEFINED ) s = "defined";
njn1d0825f2006-03-27 11:37:07 +00001007 VG_(message)(Vg_UserMsg, "Warning: set address range perms: "
1008 "large range %lu (%s)", lenT, s);
sewardj23eb2fd2005-04-22 16:29:19 +00001009 }
1010 }
1011
njn1d0825f2006-03-27 11:37:07 +00001012#ifndef PERF_FAST_SARP
sewardj23eb2fd2005-04-22 16:29:19 +00001013 /*------------------ debug-only case ------------------ */
njn1d0825f2006-03-27 11:37:07 +00001014 {
1015 // Endianness doesn't matter here because all bytes are being set to
1016 // the same value.
1017 // Nb: We don't have to worry about updating the sec-V-bits table
1018 // after these set_vabits2() calls because this code never writes
njndbf7ca72006-03-31 11:57:59 +00001019 // VA_BITS2_PARTDEFINED values.
njn1d0825f2006-03-27 11:37:07 +00001020 SizeT i;
1021 for (i = 0; i < lenT; i++) {
1022 set_vabits2(a + i, vabits2);
1023 }
1024 return;
njn25e49d8e72002-09-23 09:36:25 +00001025 }
njn1d0825f2006-03-27 11:37:07 +00001026#endif
sewardj23eb2fd2005-04-22 16:29:19 +00001027
1028 /*------------------ standard handling ------------------ */
sewardj23eb2fd2005-04-22 16:29:19 +00001029
njn1d0825f2006-03-27 11:37:07 +00001030 /* Get the distinguished secondary that we might want
sewardj23eb2fd2005-04-22 16:29:19 +00001031 to use (part of the space-compression scheme). */
njn1d0825f2006-03-27 11:37:07 +00001032 example_dsm = &sm_distinguished[dsm_num];
1033
1034 // We have to handle ranges covering various combinations of partial and
1035 // whole sec-maps. Here is how parts 1, 2 and 3 are used in each case.
1036 // Cases marked with a '*' are common.
1037 //
1038 // TYPE PARTS USED
1039 // ---- ----------
1040 // * one partial sec-map (p) 1
1041 // - one whole sec-map (P) 2
1042 //
1043 // * two partial sec-maps (pp) 1,3
1044 // - one partial, one whole sec-map (pP) 1,2
1045 // - one whole, one partial sec-map (Pp) 2,3
1046 // - two whole sec-maps (PP) 2,2
1047 //
1048 // * one partial, one whole, one partial (pPp) 1,2,3
1049 // - one partial, two whole (pPP) 1,2,2
1050 // - two whole, one partial (PPp) 2,2,3
1051 // - three whole (PPP) 2,2,2
1052 //
1053 // * one partial, N-2 whole, one partial (pP...Pp) 1,2...2,3
1054 // - one partial, N-1 whole (pP...PP) 1,2...2,2
1055 // - N-1 whole, one partial (PP...Pp) 2,2...2,3
1056 // - N whole (PP...PP) 2,2...2,3
1057
1058 // Break up total length (lenT) into two parts: length in the first
1059 // sec-map (lenA), and the rest (lenB); lenT == lenA + lenB.
1060 aNext = start_of_this_sm(a) + SM_SIZE;
1061 len_to_next_secmap = aNext - a;
1062 if ( lenT <= len_to_next_secmap ) {
1063 // Range entirely within one sec-map. Covers almost all cases.
1064 PROF_EVENT(151, "set_address_range_perms-single-secmap");
1065 lenA = lenT;
1066 lenB = 0;
1067 } else if (is_start_of_sm(a)) {
1068 // Range spans at least one whole sec-map, and starts at the beginning
1069 // of a sec-map; skip to Part 2.
1070 PROF_EVENT(152, "set_address_range_perms-startof-secmap");
1071 lenA = 0;
1072 lenB = lenT;
1073 goto part2;
sewardj23eb2fd2005-04-22 16:29:19 +00001074 } else {
njn1d0825f2006-03-27 11:37:07 +00001075 // Range spans two or more sec-maps, first one is partial.
1076 PROF_EVENT(153, "set_address_range_perms-multiple-secmaps");
1077 lenA = len_to_next_secmap;
1078 lenB = lenT - lenA;
1079 }
1080
1081 //------------------------------------------------------------------------
1082 // Part 1: Deal with the first sec_map. Most of the time the range will be
1083 // entirely within a sec_map and this part alone will suffice. Also,
1084 // doing it this way lets us avoid repeatedly testing for the crossing of
1085 // a sec-map boundary within these loops.
1086 //------------------------------------------------------------------------
1087
1088 // If it's distinguished, make it undistinguished if necessary.
1089 sm_ptr = get_secmap_ptr(a);
1090 if (is_distinguished_sm(*sm_ptr)) {
1091 if (*sm_ptr == example_dsm) {
1092 // Sec-map already has the V+A bits that we want, so skip.
1093 PROF_EVENT(154, "set_address_range_perms-dist-sm1-quick");
1094 a = aNext;
1095 lenA = 0;
sewardj23eb2fd2005-04-22 16:29:19 +00001096 } else {
njn1d0825f2006-03-27 11:37:07 +00001097 PROF_EVENT(155, "set_address_range_perms-dist-sm1");
1098 *sm_ptr = copy_for_writing(*sm_ptr);
sewardj23eb2fd2005-04-22 16:29:19 +00001099 }
1100 }
njn1d0825f2006-03-27 11:37:07 +00001101 sm = *sm_ptr;
sewardj23eb2fd2005-04-22 16:29:19 +00001102
njn1d0825f2006-03-27 11:37:07 +00001103 // 1 byte steps
sewardj23eb2fd2005-04-22 16:29:19 +00001104 while (True) {
sewardj23eb2fd2005-04-22 16:29:19 +00001105 if (VG_IS_8_ALIGNED(a)) break;
njn1d0825f2006-03-27 11:37:07 +00001106 if (lenA < 1) break;
1107 PROF_EVENT(156, "set_address_range_perms-loop1a");
1108 sm_off = SM_OFF(a);
1109 insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
1110 a += 1;
1111 lenA -= 1;
1112 }
1113 // 8-aligned, 8 byte steps
sewardj23eb2fd2005-04-22 16:29:19 +00001114 while (True) {
njn1d0825f2006-03-27 11:37:07 +00001115 if (lenA < 8) break;
1116 PROF_EVENT(157, "set_address_range_perms-loop8a");
1117 sm_off16 = SM_OFF_16(a);
1118 ((UShort*)(sm->vabits8))[sm_off16] = vabits16;
1119 a += 8;
1120 lenA -= 8;
1121 }
1122 // 1 byte steps
1123 while (True) {
1124 if (lenA < 1) break;
1125 PROF_EVENT(158, "set_address_range_perms-loop1b");
1126 sm_off = SM_OFF(a);
1127 insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
1128 a += 1;
1129 lenA -= 1;
sewardj23eb2fd2005-04-22 16:29:19 +00001130 }
1131
njn1d0825f2006-03-27 11:37:07 +00001132 // We've finished the first sec-map. Is that it?
1133 if (lenB == 0)
sewardj23eb2fd2005-04-22 16:29:19 +00001134 return;
1135
njn1d0825f2006-03-27 11:37:07 +00001136 //------------------------------------------------------------------------
1137 // Part 2: Fast-set entire sec-maps at a time.
1138 //------------------------------------------------------------------------
1139 part2:
1140 // 64KB-aligned, 64KB steps.
1141 // Nb: we can reach here with lenB < SM_SIZE
sewardj23eb2fd2005-04-22 16:29:19 +00001142 while (True) {
njn1d0825f2006-03-27 11:37:07 +00001143 if (lenB < SM_SIZE) break;
1144 tl_assert(is_start_of_sm(a));
1145 PROF_EVENT(159, "set_address_range_perms-loop64K");
1146 sm_ptr = get_secmap_ptr(a);
1147 if (!is_distinguished_sm(*sm_ptr)) {
1148 PROF_EVENT(160, "set_address_range_perms-loop64K-free-dist-sm");
1149 // Free the non-distinguished sec-map that we're replacing. This
1150 // case happens moderately often, enough to be worthwhile.
1151 VG_(am_munmap_valgrind)((Addr)*sm_ptr, sizeof(SecMap));
1152 }
1153 update_SM_counts(*sm_ptr, example_dsm);
1154 // Make the sec-map entry point to the example DSM
1155 *sm_ptr = example_dsm;
1156 lenB -= SM_SIZE;
1157 a += SM_SIZE;
1158 }
sewardj23eb2fd2005-04-22 16:29:19 +00001159
njn1d0825f2006-03-27 11:37:07 +00001160 // We've finished the whole sec-maps. Is that it?
1161 if (lenB == 0)
1162 return;
1163
1164 //------------------------------------------------------------------------
1165 // Part 3: Finish off the final partial sec-map, if necessary.
1166 //------------------------------------------------------------------------
1167
1168 tl_assert(is_start_of_sm(a) && lenB < SM_SIZE);
1169
1170 // If it's distinguished, make it undistinguished if necessary.
1171 sm_ptr = get_secmap_ptr(a);
1172 if (is_distinguished_sm(*sm_ptr)) {
1173 if (*sm_ptr == example_dsm) {
1174 // Sec-map already has the V+A bits that we want, so stop.
1175 PROF_EVENT(161, "set_address_range_perms-dist-sm2-quick");
1176 return;
1177 } else {
1178 PROF_EVENT(162, "set_address_range_perms-dist-sm2");
1179 *sm_ptr = copy_for_writing(*sm_ptr);
1180 }
1181 }
1182 sm = *sm_ptr;
1183
1184 // 8-aligned, 8 byte steps
1185 while (True) {
1186 if (lenB < 8) break;
1187 PROF_EVENT(163, "set_address_range_perms-loop8b");
1188 sm_off16 = SM_OFF_16(a);
1189 ((UShort*)(sm->vabits8))[sm_off16] = vabits16;
1190 a += 8;
1191 lenB -= 8;
1192 }
1193 // 1 byte steps
1194 while (True) {
1195 if (lenB < 1) return;
1196 PROF_EVENT(164, "set_address_range_perms-loop1c");
1197 sm_off = SM_OFF(a);
1198 insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
1199 a += 1;
1200 lenB -= 1;
1201 }
sewardj23eb2fd2005-04-22 16:29:19 +00001202}
sewardj45d94cc2005-04-20 14:44:11 +00001203
sewardjc859fbf2005-04-22 21:10:28 +00001204
1205/* --- Set permissions for arbitrary address ranges --- */
njn25e49d8e72002-09-23 09:36:25 +00001206
njndbf7ca72006-03-31 11:57:59 +00001207void MC_(make_mem_noaccess) ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +00001208{
njndbf7ca72006-03-31 11:57:59 +00001209 PROF_EVENT(40, "MC_(make_mem_noaccess)");
1210 DEBUG("MC_(make_mem_noaccess)(%p, %lu)\n", a, len);
njn1d0825f2006-03-27 11:37:07 +00001211 set_address_range_perms ( a, len, VA_BITS16_NOACCESS, SM_DIST_NOACCESS );
njn25e49d8e72002-09-23 09:36:25 +00001212}
1213
njndbf7ca72006-03-31 11:57:59 +00001214void MC_(make_mem_undefined) ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +00001215{
njndbf7ca72006-03-31 11:57:59 +00001216 PROF_EVENT(41, "MC_(make_mem_undefined)");
1217 DEBUG("MC_(make_mem_undefined)(%p, %lu)\n", a, len);
1218 set_address_range_perms ( a, len, VA_BITS16_UNDEFINED, SM_DIST_UNDEFINED );
njn25e49d8e72002-09-23 09:36:25 +00001219}
1220
njndbf7ca72006-03-31 11:57:59 +00001221void MC_(make_mem_defined) ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +00001222{
njndbf7ca72006-03-31 11:57:59 +00001223 PROF_EVENT(42, "MC_(make_mem_defined)");
1224 DEBUG("MC_(make_mem_defined)(%p, %lu)\n", a, len);
1225 set_address_range_perms ( a, len, VA_BITS16_DEFINED, SM_DIST_DEFINED );
njn25e49d8e72002-09-23 09:36:25 +00001226}
1227
sewardjfb1e9ad2006-03-10 13:41:58 +00001228/* For each byte in [a,a+len), if the byte is addressable, make it be
1229 defined, but if it isn't addressible, leave it alone. In other
njndbf7ca72006-03-31 11:57:59 +00001230 words a version of MC_(make_mem_defined) that doesn't mess with
sewardjfb1e9ad2006-03-10 13:41:58 +00001231 addressibility. Low-performance implementation. */
njndbf7ca72006-03-31 11:57:59 +00001232static void make_mem_defined_if_addressable ( Addr a, SizeT len )
sewardjfb1e9ad2006-03-10 13:41:58 +00001233{
1234 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00001235 UChar vabits2;
njndbf7ca72006-03-31 11:57:59 +00001236 DEBUG("make_mem_defined_if_addressable(%p, %llu)\n", a, (ULong)len);
sewardjfb1e9ad2006-03-10 13:41:58 +00001237 for (i = 0; i < len; i++) {
njn1d0825f2006-03-27 11:37:07 +00001238 vabits2 = get_vabits2( a+i );
1239 if (EXPECTED_TAKEN(VA_BITS2_NOACCESS != vabits2)) {
njndbf7ca72006-03-31 11:57:59 +00001240 set_vabits2(a+i, VA_BITS2_DEFINED);
njn1d0825f2006-03-27 11:37:07 +00001241 }
sewardjfb1e9ad2006-03-10 13:41:58 +00001242 }
1243}
1244
njn9b007f62003-04-07 14:40:25 +00001245
sewardj45f4e7c2005-09-27 19:20:21 +00001246/* --- Block-copy permissions (needed for implementing realloc() and
1247 sys_mremap). --- */
sewardjc859fbf2005-04-22 21:10:28 +00001248
njn1d0825f2006-03-27 11:37:07 +00001249void MC_(copy_address_range_state) ( Addr src, Addr dst, SizeT len )
sewardjc859fbf2005-04-22 21:10:28 +00001250{
sewardj45f4e7c2005-09-27 19:20:21 +00001251 SizeT i, j;
sewardjf2184912006-05-03 22:13:57 +00001252 UChar vabits2, vabits8;
1253 Bool aligned, nooverlap;
sewardjc859fbf2005-04-22 21:10:28 +00001254
njn1d0825f2006-03-27 11:37:07 +00001255 DEBUG("MC_(copy_address_range_state)\n");
1256 PROF_EVENT(50, "MC_(copy_address_range_state)");
sewardj45f4e7c2005-09-27 19:20:21 +00001257
sewardjf2184912006-05-03 22:13:57 +00001258 if (len == 0 || src == dst)
sewardj45f4e7c2005-09-27 19:20:21 +00001259 return;
1260
sewardjf2184912006-05-03 22:13:57 +00001261 aligned = VG_IS_4_ALIGNED(src) && VG_IS_4_ALIGNED(dst);
1262 nooverlap = src+len <= dst || dst+len <= src;
sewardj45f4e7c2005-09-27 19:20:21 +00001263
sewardjf2184912006-05-03 22:13:57 +00001264 if (nooverlap && aligned) {
1265
1266 /* Vectorised fast case, when no overlap and suitably aligned */
1267 /* vector loop */
1268 i = 0;
1269 while (len >= 4) {
1270 vabits8 = get_vabits8_for_aligned_word32( src+i );
1271 set_vabits8_for_aligned_word32( dst+i, vabits8 );
1272 if (EXPECTED_TAKEN(VA_BITS8_DEFINED == vabits8
1273 || VA_BITS8_UNDEFINED == vabits8
1274 || VA_BITS8_NOACCESS == vabits8)) {
1275 /* do nothing */
1276 } else {
1277 /* have to copy secondary map info */
1278 if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+0 ))
1279 set_sec_vbits8( dst+i+0, get_sec_vbits8( src+i+0 ) );
1280 if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+1 ))
1281 set_sec_vbits8( dst+i+1, get_sec_vbits8( src+i+1 ) );
1282 if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+2 ))
1283 set_sec_vbits8( dst+i+2, get_sec_vbits8( src+i+2 ) );
1284 if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+3 ))
1285 set_sec_vbits8( dst+i+3, get_sec_vbits8( src+i+3 ) );
1286 }
1287 i += 4;
1288 len -= 4;
1289 }
1290 /* fixup loop */
1291 while (len >= 1) {
njn1d0825f2006-03-27 11:37:07 +00001292 vabits2 = get_vabits2( src+i );
1293 set_vabits2( dst+i, vabits2 );
njndbf7ca72006-03-31 11:57:59 +00001294 if (VA_BITS2_PARTDEFINED == vabits2) {
njn1d0825f2006-03-27 11:37:07 +00001295 set_sec_vbits8( dst+i, get_sec_vbits8( src+i ) );
1296 }
sewardjf2184912006-05-03 22:13:57 +00001297 i++;
1298 len--;
1299 }
1300
1301 } else {
1302
1303 /* We have to do things the slow way */
1304 if (src < dst) {
1305 for (i = 0, j = len-1; i < len; i++, j--) {
1306 PROF_EVENT(51, "MC_(copy_address_range_state)(loop)");
1307 vabits2 = get_vabits2( src+j );
1308 set_vabits2( dst+j, vabits2 );
1309 if (VA_BITS2_PARTDEFINED == vabits2) {
1310 set_sec_vbits8( dst+j, get_sec_vbits8( src+j ) );
1311 }
1312 }
1313 }
1314
1315 if (src > dst) {
1316 for (i = 0; i < len; i++) {
1317 PROF_EVENT(52, "MC_(copy_address_range_state)(loop)");
1318 vabits2 = get_vabits2( src+i );
1319 set_vabits2( dst+i, vabits2 );
1320 if (VA_BITS2_PARTDEFINED == vabits2) {
1321 set_sec_vbits8( dst+i, get_sec_vbits8( src+i ) );
1322 }
1323 }
sewardj45f4e7c2005-09-27 19:20:21 +00001324 }
sewardjc859fbf2005-04-22 21:10:28 +00001325 }
sewardjf2184912006-05-03 22:13:57 +00001326
sewardjc859fbf2005-04-22 21:10:28 +00001327}
1328
1329
1330/* --- Fast case permission setters, for dealing with stacks. --- */
1331
njn1d0825f2006-03-27 11:37:07 +00001332static INLINE
njndbf7ca72006-03-31 11:57:59 +00001333void make_aligned_word32_undefined ( Addr a )
njn9b007f62003-04-07 14:40:25 +00001334{
njn1d0825f2006-03-27 11:37:07 +00001335 UWord sm_off;
sewardjae986ca2005-10-12 12:53:20 +00001336 SecMap* sm;
1337
njndbf7ca72006-03-31 11:57:59 +00001338 PROF_EVENT(300, "make_aligned_word32_undefined");
sewardj5d28efc2005-04-21 22:16:29 +00001339
njn1d0825f2006-03-27 11:37:07 +00001340#ifndef PERF_FAST_STACK2
njndbf7ca72006-03-31 11:57:59 +00001341 MC_(make_mem_undefined)(a, 4);
njn1d0825f2006-03-27 11:37:07 +00001342#else
1343 if (EXPECTED_NOT_TAKEN(a > MAX_PRIMARY_ADDRESS)) {
njndbf7ca72006-03-31 11:57:59 +00001344 PROF_EVENT(301, "make_aligned_word32_undefined-slow1");
1345 MC_(make_mem_undefined)(a, 4);
sewardj5d28efc2005-04-21 22:16:29 +00001346 return;
1347 }
1348
njna7c7ebd2006-03-28 12:51:02 +00001349 sm = get_secmap_for_writing_low(a);
njn1d0825f2006-03-27 11:37:07 +00001350 sm_off = SM_OFF(a);
njndbf7ca72006-03-31 11:57:59 +00001351 sm->vabits8[sm_off] = VA_BITS8_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00001352#endif
njn9b007f62003-04-07 14:40:25 +00001353}
1354
sewardj5d28efc2005-04-21 22:16:29 +00001355
njn1d0825f2006-03-27 11:37:07 +00001356static INLINE
1357void make_aligned_word32_noaccess ( Addr a )
sewardj5d28efc2005-04-21 22:16:29 +00001358{
njn1d0825f2006-03-27 11:37:07 +00001359 UWord sm_off;
sewardjae986ca2005-10-12 12:53:20 +00001360 SecMap* sm;
1361
sewardj5d28efc2005-04-21 22:16:29 +00001362 PROF_EVENT(310, "make_aligned_word32_noaccess");
1363
njn1d0825f2006-03-27 11:37:07 +00001364#ifndef PERF_FAST_STACK2
njndbf7ca72006-03-31 11:57:59 +00001365 MC_(make_mem_noaccess)(a, 4);
njn1d0825f2006-03-27 11:37:07 +00001366#else
1367 if (EXPECTED_NOT_TAKEN(a > MAX_PRIMARY_ADDRESS)) {
sewardj5d28efc2005-04-21 22:16:29 +00001368 PROF_EVENT(311, "make_aligned_word32_noaccess-slow1");
njndbf7ca72006-03-31 11:57:59 +00001369 MC_(make_mem_noaccess)(a, 4);
sewardj5d28efc2005-04-21 22:16:29 +00001370 return;
1371 }
1372
njna7c7ebd2006-03-28 12:51:02 +00001373 sm = get_secmap_for_writing_low(a);
njn1d0825f2006-03-27 11:37:07 +00001374 sm_off = SM_OFF(a);
1375 sm->vabits8[sm_off] = VA_BITS8_NOACCESS;
1376#endif
sewardj5d28efc2005-04-21 22:16:29 +00001377}
1378
1379
njn9b007f62003-04-07 14:40:25 +00001380/* Nb: by "aligned" here we mean 8-byte aligned */
njn1d0825f2006-03-27 11:37:07 +00001381static INLINE
njndbf7ca72006-03-31 11:57:59 +00001382void make_aligned_word64_undefined ( Addr a )
njn9b007f62003-04-07 14:40:25 +00001383{
njn1d0825f2006-03-27 11:37:07 +00001384 UWord sm_off16;
sewardjae986ca2005-10-12 12:53:20 +00001385 SecMap* sm;
1386
njndbf7ca72006-03-31 11:57:59 +00001387 PROF_EVENT(320, "make_aligned_word64_undefined");
sewardj23eb2fd2005-04-22 16:29:19 +00001388
njn1d0825f2006-03-27 11:37:07 +00001389#ifndef PERF_FAST_STACK2
njndbf7ca72006-03-31 11:57:59 +00001390 MC_(make_mem_undefined)(a, 8);
njn1d0825f2006-03-27 11:37:07 +00001391#else
1392 if (EXPECTED_NOT_TAKEN(a > MAX_PRIMARY_ADDRESS)) {
njndbf7ca72006-03-31 11:57:59 +00001393 PROF_EVENT(321, "make_aligned_word64_undefined-slow1");
1394 MC_(make_mem_undefined)(a, 8);
sewardj23eb2fd2005-04-22 16:29:19 +00001395 return;
1396 }
1397
njna7c7ebd2006-03-28 12:51:02 +00001398 sm = get_secmap_for_writing_low(a);
njn1d0825f2006-03-27 11:37:07 +00001399 sm_off16 = SM_OFF_16(a);
njndbf7ca72006-03-31 11:57:59 +00001400 ((UShort*)(sm->vabits8))[sm_off16] = VA_BITS16_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00001401#endif
njn9b007f62003-04-07 14:40:25 +00001402}
1403
sewardj23eb2fd2005-04-22 16:29:19 +00001404
njn1d0825f2006-03-27 11:37:07 +00001405static INLINE
1406void make_aligned_word64_noaccess ( Addr a )
njn9b007f62003-04-07 14:40:25 +00001407{
njn1d0825f2006-03-27 11:37:07 +00001408 UWord sm_off16;
sewardjae986ca2005-10-12 12:53:20 +00001409 SecMap* sm;
1410
sewardj23eb2fd2005-04-22 16:29:19 +00001411 PROF_EVENT(330, "make_aligned_word64_noaccess");
1412
njn1d0825f2006-03-27 11:37:07 +00001413#ifndef PERF_FAST_STACK2
njndbf7ca72006-03-31 11:57:59 +00001414 MC_(make_mem_noaccess)(a, 8);
njn1d0825f2006-03-27 11:37:07 +00001415#else
1416 if (EXPECTED_NOT_TAKEN(a > MAX_PRIMARY_ADDRESS)) {
sewardj23eb2fd2005-04-22 16:29:19 +00001417 PROF_EVENT(331, "make_aligned_word64_noaccess-slow1");
njndbf7ca72006-03-31 11:57:59 +00001418 MC_(make_mem_noaccess)(a, 8);
sewardj23eb2fd2005-04-22 16:29:19 +00001419 return;
1420 }
1421
njna7c7ebd2006-03-28 12:51:02 +00001422 sm = get_secmap_for_writing_low(a);
njn1d0825f2006-03-27 11:37:07 +00001423 sm_off16 = SM_OFF_16(a);
1424 ((UShort*)(sm->vabits8))[sm_off16] = VA_BITS16_NOACCESS;
1425#endif
njn9b007f62003-04-07 14:40:25 +00001426}
1427
sewardj23eb2fd2005-04-22 16:29:19 +00001428
njn1d0825f2006-03-27 11:37:07 +00001429/*------------------------------------------------------------*/
1430/*--- Stack pointer adjustment ---*/
1431/*------------------------------------------------------------*/
1432
1433static void VG_REGPARM(1) mc_new_mem_stack_4(Addr new_SP)
1434{
1435 PROF_EVENT(110, "new_mem_stack_4");
1436 if (VG_IS_4_ALIGNED(new_SP)) {
njndbf7ca72006-03-31 11:57:59 +00001437 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
njn1d0825f2006-03-27 11:37:07 +00001438 } else {
njndbf7ca72006-03-31 11:57:59 +00001439 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 4 );
njn1d0825f2006-03-27 11:37:07 +00001440 }
1441}
1442
1443static void VG_REGPARM(1) mc_die_mem_stack_4(Addr new_SP)
1444{
1445 PROF_EVENT(120, "die_mem_stack_4");
1446 if (VG_IS_4_ALIGNED(new_SP)) {
njndbf7ca72006-03-31 11:57:59 +00001447 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
njn1d0825f2006-03-27 11:37:07 +00001448 } else {
njndbf7ca72006-03-31 11:57:59 +00001449 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-4, 4 );
njn1d0825f2006-03-27 11:37:07 +00001450 }
1451}
1452
1453static void VG_REGPARM(1) mc_new_mem_stack_8(Addr new_SP)
1454{
1455 PROF_EVENT(111, "new_mem_stack_8");
1456 if (VG_IS_8_ALIGNED(new_SP)) {
njndbf7ca72006-03-31 11:57:59 +00001457 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
njn1d0825f2006-03-27 11:37:07 +00001458 } else if (VG_IS_4_ALIGNED(new_SP)) {
njndbf7ca72006-03-31 11:57:59 +00001459 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1460 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
njn1d0825f2006-03-27 11:37:07 +00001461 } else {
njndbf7ca72006-03-31 11:57:59 +00001462 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 8 );
njn1d0825f2006-03-27 11:37:07 +00001463 }
1464}
1465
1466static void VG_REGPARM(1) mc_die_mem_stack_8(Addr new_SP)
1467{
1468 PROF_EVENT(121, "die_mem_stack_8");
1469 if (VG_IS_8_ALIGNED(new_SP)) {
njndbf7ca72006-03-31 11:57:59 +00001470 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
njn1d0825f2006-03-27 11:37:07 +00001471 } else if (VG_IS_4_ALIGNED(new_SP)) {
njndbf7ca72006-03-31 11:57:59 +00001472 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
1473 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
njn1d0825f2006-03-27 11:37:07 +00001474 } else {
njndbf7ca72006-03-31 11:57:59 +00001475 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-8, 8 );
njn1d0825f2006-03-27 11:37:07 +00001476 }
1477}
1478
1479static void VG_REGPARM(1) mc_new_mem_stack_12(Addr new_SP)
1480{
1481 PROF_EVENT(112, "new_mem_stack_12");
1482 if (VG_IS_8_ALIGNED(new_SP)) {
njndbf7ca72006-03-31 11:57:59 +00001483 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1484 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
njn1d0825f2006-03-27 11:37:07 +00001485 } else if (VG_IS_4_ALIGNED(new_SP)) {
njndbf7ca72006-03-31 11:57:59 +00001486 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1487 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
njn1d0825f2006-03-27 11:37:07 +00001488 } else {
njndbf7ca72006-03-31 11:57:59 +00001489 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 12 );
njn1d0825f2006-03-27 11:37:07 +00001490 }
1491}
1492
1493static void VG_REGPARM(1) mc_die_mem_stack_12(Addr new_SP)
1494{
1495 PROF_EVENT(122, "die_mem_stack_12");
1496 /* Note the -12 in the test */
1497 if (VG_IS_8_ALIGNED(new_SP-12)) {
njndbf7ca72006-03-31 11:57:59 +00001498 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
1499 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
njn1d0825f2006-03-27 11:37:07 +00001500 } else if (VG_IS_4_ALIGNED(new_SP)) {
njndbf7ca72006-03-31 11:57:59 +00001501 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
1502 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
njn1d0825f2006-03-27 11:37:07 +00001503 } else {
njndbf7ca72006-03-31 11:57:59 +00001504 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-12, 12 );
njn1d0825f2006-03-27 11:37:07 +00001505 }
1506}
1507
1508static void VG_REGPARM(1) mc_new_mem_stack_16(Addr new_SP)
1509{
1510 PROF_EVENT(113, "new_mem_stack_16");
1511 if (VG_IS_8_ALIGNED(new_SP)) {
njndbf7ca72006-03-31 11:57:59 +00001512 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1513 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
njn1d0825f2006-03-27 11:37:07 +00001514 } else if (VG_IS_4_ALIGNED(new_SP)) {
njndbf7ca72006-03-31 11:57:59 +00001515 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1516 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
1517 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+12 );
njn1d0825f2006-03-27 11:37:07 +00001518 } else {
njndbf7ca72006-03-31 11:57:59 +00001519 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 16 );
njn1d0825f2006-03-27 11:37:07 +00001520 }
1521}
1522
1523static void VG_REGPARM(1) mc_die_mem_stack_16(Addr new_SP)
1524{
1525 PROF_EVENT(123, "die_mem_stack_16");
1526 if (VG_IS_8_ALIGNED(new_SP)) {
njndbf7ca72006-03-31 11:57:59 +00001527 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
1528 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
njn1d0825f2006-03-27 11:37:07 +00001529 } else if (VG_IS_4_ALIGNED(new_SP)) {
njndbf7ca72006-03-31 11:57:59 +00001530 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
1531 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
1532 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
njn1d0825f2006-03-27 11:37:07 +00001533 } else {
njndbf7ca72006-03-31 11:57:59 +00001534 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-16, 16 );
njn1d0825f2006-03-27 11:37:07 +00001535 }
1536}
1537
1538static void VG_REGPARM(1) mc_new_mem_stack_32(Addr new_SP)
1539{
1540 PROF_EVENT(114, "new_mem_stack_32");
1541 if (VG_IS_8_ALIGNED(new_SP)) {
njndbf7ca72006-03-31 11:57:59 +00001542 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1543 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
1544 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
1545 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
njn1d0825f2006-03-27 11:37:07 +00001546 } else if (VG_IS_4_ALIGNED(new_SP)) {
njndbf7ca72006-03-31 11:57:59 +00001547 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1548 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
1549 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+12 );
1550 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+20 );
1551 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+28 );
njn1d0825f2006-03-27 11:37:07 +00001552 } else {
njndbf7ca72006-03-31 11:57:59 +00001553 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 32 );
njn1d0825f2006-03-27 11:37:07 +00001554 }
1555}
1556
1557static void VG_REGPARM(1) mc_die_mem_stack_32(Addr new_SP)
1558{
1559 PROF_EVENT(124, "die_mem_stack_32");
1560 if (VG_IS_8_ALIGNED(new_SP)) {
njndbf7ca72006-03-31 11:57:59 +00001561 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
1562 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
1563 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
1564 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
njn1d0825f2006-03-27 11:37:07 +00001565 } else if (VG_IS_4_ALIGNED(new_SP)) {
njndbf7ca72006-03-31 11:57:59 +00001566 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
1567 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-28 );
1568 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-20 );
1569 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
1570 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
njn1d0825f2006-03-27 11:37:07 +00001571 } else {
njndbf7ca72006-03-31 11:57:59 +00001572 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-32, 32 );
njn1d0825f2006-03-27 11:37:07 +00001573 }
1574}
1575
1576static void VG_REGPARM(1) mc_new_mem_stack_112(Addr new_SP)
1577{
1578 PROF_EVENT(115, "new_mem_stack_112");
1579 if (VG_IS_8_ALIGNED(new_SP)) {
njndbf7ca72006-03-31 11:57:59 +00001580 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1581 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
1582 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
1583 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
1584 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
1585 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
1586 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
1587 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
1588 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
1589 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
1590 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
1591 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
1592 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
1593 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104);
njn1d0825f2006-03-27 11:37:07 +00001594 } else {
njndbf7ca72006-03-31 11:57:59 +00001595 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 112 );
njn1d0825f2006-03-27 11:37:07 +00001596 }
1597}
1598
1599static void VG_REGPARM(1) mc_die_mem_stack_112(Addr new_SP)
1600{
1601 PROF_EVENT(125, "die_mem_stack_112");
1602 if (VG_IS_8_ALIGNED(new_SP)) {
njndbf7ca72006-03-31 11:57:59 +00001603 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
1604 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
1605 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
1606 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
1607 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
1608 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
1609 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
1610 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
1611 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
1612 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
1613 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
1614 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
1615 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
1616 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
njn1d0825f2006-03-27 11:37:07 +00001617 } else {
njndbf7ca72006-03-31 11:57:59 +00001618 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-112, 112 );
njn1d0825f2006-03-27 11:37:07 +00001619 }
1620}
1621
1622static void VG_REGPARM(1) mc_new_mem_stack_128(Addr new_SP)
1623{
1624 PROF_EVENT(116, "new_mem_stack_128");
1625 if (VG_IS_8_ALIGNED(new_SP)) {
njndbf7ca72006-03-31 11:57:59 +00001626 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1627 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
1628 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
1629 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
1630 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
1631 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
1632 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
1633 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
1634 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
1635 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
1636 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
1637 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
1638 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
1639 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104);
1640 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+112);
1641 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+120);
njn1d0825f2006-03-27 11:37:07 +00001642 } else {
njndbf7ca72006-03-31 11:57:59 +00001643 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 128 );
njn1d0825f2006-03-27 11:37:07 +00001644 }
1645}
1646
1647static void VG_REGPARM(1) mc_die_mem_stack_128(Addr new_SP)
1648{
1649 PROF_EVENT(126, "die_mem_stack_128");
1650 if (VG_IS_8_ALIGNED(new_SP)) {
njndbf7ca72006-03-31 11:57:59 +00001651 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
1652 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
1653 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
1654 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
1655 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
1656 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
1657 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
1658 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
1659 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
1660 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
1661 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
1662 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
1663 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
1664 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
1665 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
1666 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
njn1d0825f2006-03-27 11:37:07 +00001667 } else {
njndbf7ca72006-03-31 11:57:59 +00001668 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-128, 128 );
njn1d0825f2006-03-27 11:37:07 +00001669 }
1670}
1671
1672static void VG_REGPARM(1) mc_new_mem_stack_144(Addr new_SP)
1673{
1674 PROF_EVENT(117, "new_mem_stack_144");
1675 if (VG_IS_8_ALIGNED(new_SP)) {
njndbf7ca72006-03-31 11:57:59 +00001676 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1677 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
1678 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
1679 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
1680 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
1681 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
1682 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
1683 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
1684 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
1685 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
1686 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
1687 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
1688 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
1689 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104);
1690 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+112);
1691 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+120);
1692 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+128);
1693 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+136);
njn1d0825f2006-03-27 11:37:07 +00001694 } else {
njndbf7ca72006-03-31 11:57:59 +00001695 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 144 );
njn1d0825f2006-03-27 11:37:07 +00001696 }
1697}
1698
1699static void VG_REGPARM(1) mc_die_mem_stack_144(Addr new_SP)
1700{
1701 PROF_EVENT(127, "die_mem_stack_144");
1702 if (VG_IS_8_ALIGNED(new_SP)) {
njndbf7ca72006-03-31 11:57:59 +00001703 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-144);
1704 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-136);
1705 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
1706 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
1707 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
1708 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
1709 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
1710 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
1711 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
1712 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
1713 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
1714 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
1715 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
1716 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
1717 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
1718 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
1719 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
1720 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
njn1d0825f2006-03-27 11:37:07 +00001721 } else {
njndbf7ca72006-03-31 11:57:59 +00001722 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-144, 144 );
njn1d0825f2006-03-27 11:37:07 +00001723 }
1724}
1725
1726static void VG_REGPARM(1) mc_new_mem_stack_160(Addr new_SP)
1727{
1728 PROF_EVENT(118, "new_mem_stack_160");
1729 if (VG_IS_8_ALIGNED(new_SP)) {
njndbf7ca72006-03-31 11:57:59 +00001730 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1731 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
1732 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
1733 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
1734 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
1735 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
1736 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
1737 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
1738 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
1739 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
1740 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
1741 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
1742 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
1743 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104);
1744 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+112);
1745 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+120);
1746 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+128);
1747 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+136);
1748 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+144);
1749 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+152);
njn1d0825f2006-03-27 11:37:07 +00001750 } else {
njndbf7ca72006-03-31 11:57:59 +00001751 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 160 );
njn1d0825f2006-03-27 11:37:07 +00001752 }
1753}
1754
1755static void VG_REGPARM(1) mc_die_mem_stack_160(Addr new_SP)
1756{
1757 PROF_EVENT(128, "die_mem_stack_160");
1758 if (VG_IS_8_ALIGNED(new_SP)) {
njndbf7ca72006-03-31 11:57:59 +00001759 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-160);
1760 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-152);
1761 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-144);
1762 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-136);
1763 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
1764 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
1765 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
1766 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
1767 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
1768 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
1769 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
1770 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
1771 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
1772 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
1773 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
1774 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
1775 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
1776 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
1777 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
1778 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
njn1d0825f2006-03-27 11:37:07 +00001779 } else {
njndbf7ca72006-03-31 11:57:59 +00001780 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-160, 160 );
njn1d0825f2006-03-27 11:37:07 +00001781 }
1782}
1783
1784static void mc_new_mem_stack ( Addr a, SizeT len )
1785{
1786 PROF_EVENT(115, "new_mem_stack");
njndbf7ca72006-03-31 11:57:59 +00001787 MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + a, len );
njn1d0825f2006-03-27 11:37:07 +00001788}
1789
1790static void mc_die_mem_stack ( Addr a, SizeT len )
1791{
1792 PROF_EVENT(125, "die_mem_stack");
njndbf7ca72006-03-31 11:57:59 +00001793 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + a, len );
njn1d0825f2006-03-27 11:37:07 +00001794}
njn9b007f62003-04-07 14:40:25 +00001795
sewardj45d94cc2005-04-20 14:44:11 +00001796
njn1d0825f2006-03-27 11:37:07 +00001797/* The AMD64 ABI says:
1798
1799 "The 128-byte area beyond the location pointed to by %rsp is considered
1800 to be reserved and shall not be modified by signal or interrupt
1801 handlers. Therefore, functions may use this area for temporary data
1802 that is not needed across function calls. In particular, leaf functions
1803 may use this area for their entire stack frame, rather than adjusting
1804 the stack pointer in the prologue and epilogue. This area is known as
1805 red zone [sic]."
1806
1807 So after any call or return we need to mark this redzone as containing
1808 undefined values.
1809
1810 Consider this: we're in function f. f calls g. g moves rsp down
1811 modestly (say 16 bytes) and writes stuff all over the red zone, making it
1812 defined. g returns. f is buggy and reads from parts of the red zone
1813 that it didn't write on. But because g filled that area in, f is going
1814 to be picking up defined V bits and so any errors from reading bits of
1815 the red zone it didn't write, will be missed. The only solution I could
1816 think of was to make the red zone undefined when g returns to f.
1817
1818 This is in accordance with the ABI, which makes it clear the redzone
1819 is volatile across function calls.
1820
1821 The problem occurs the other way round too: f could fill the RZ up
1822 with defined values and g could mistakenly read them. So the RZ
1823 also needs to be nuked on function calls.
1824*/
sewardj826ec492005-05-12 18:05:00 +00001825void MC_(helperc_MAKE_STACK_UNINIT) ( Addr base, UWord len )
1826{
1827 tl_assert(sizeof(UWord) == sizeof(SizeT));
sewardj2a3a1a72005-05-12 23:25:43 +00001828 if (0)
1829 VG_(printf)("helperc_MAKE_STACK_UNINIT %p %d\n", base, len );
1830
1831# if 0
1832 /* Really slow version */
njndbf7ca72006-03-31 11:57:59 +00001833 MC_(make_mem_undefined)(base, len);
sewardj2a3a1a72005-05-12 23:25:43 +00001834# endif
1835
1836# if 0
1837 /* Slow(ish) version, which is fairly easily seen to be correct.
1838 */
1839 if (EXPECTED_TAKEN( VG_IS_8_ALIGNED(base) && len==128 )) {
njndbf7ca72006-03-31 11:57:59 +00001840 make_aligned_word64_undefined(base + 0);
1841 make_aligned_word64_undefined(base + 8);
1842 make_aligned_word64_undefined(base + 16);
1843 make_aligned_word64_undefined(base + 24);
sewardj2a3a1a72005-05-12 23:25:43 +00001844
njndbf7ca72006-03-31 11:57:59 +00001845 make_aligned_word64_undefined(base + 32);
1846 make_aligned_word64_undefined(base + 40);
1847 make_aligned_word64_undefined(base + 48);
1848 make_aligned_word64_undefined(base + 56);
sewardj2a3a1a72005-05-12 23:25:43 +00001849
njndbf7ca72006-03-31 11:57:59 +00001850 make_aligned_word64_undefined(base + 64);
1851 make_aligned_word64_undefined(base + 72);
1852 make_aligned_word64_undefined(base + 80);
1853 make_aligned_word64_undefined(base + 88);
sewardj2a3a1a72005-05-12 23:25:43 +00001854
njndbf7ca72006-03-31 11:57:59 +00001855 make_aligned_word64_undefined(base + 96);
1856 make_aligned_word64_undefined(base + 104);
1857 make_aligned_word64_undefined(base + 112);
1858 make_aligned_word64_undefined(base + 120);
sewardj2a3a1a72005-05-12 23:25:43 +00001859 } else {
njndbf7ca72006-03-31 11:57:59 +00001860 MC_(make_mem_undefined)(base, len);
sewardj2a3a1a72005-05-12 23:25:43 +00001861 }
1862# endif
1863
1864 /* Idea is: go fast when
1865 * 8-aligned and length is 128
1866 * the sm is available in the main primary map
njn1d0825f2006-03-27 11:37:07 +00001867 * the address range falls entirely with a single secondary map
1868 If all those conditions hold, just update the V+A bits by writing
1869 directly into the vabits array. (If the sm was distinguished, this
1870 will make a copy and then write to it.)
sewardj2a3a1a72005-05-12 23:25:43 +00001871 */
njn1d0825f2006-03-27 11:37:07 +00001872 if (EXPECTED_TAKEN( len == 128 && VG_IS_8_ALIGNED(base) )) {
1873 /* Now we know the address range is suitably sized and aligned. */
1874 UWord a_lo = (UWord)(base);
sewardj3f5f5562006-06-16 21:39:08 +00001875 UWord a_hi = (UWord)(base + 128 - 1);
njn1d0825f2006-03-27 11:37:07 +00001876 tl_assert(a_lo < a_hi); // paranoia: detect overflow
1877 if (a_hi < MAX_PRIMARY_ADDRESS) {
1878 // Now we know the entire range is within the main primary map.
njna7c7ebd2006-03-28 12:51:02 +00001879 SecMap* sm = get_secmap_for_writing_low(a_lo);
1880 SecMap* sm_hi = get_secmap_for_writing_low(a_hi);
sewardj2a3a1a72005-05-12 23:25:43 +00001881 /* Now we know that the entire address range falls within a
1882 single secondary map, and that that secondary 'lives' in
1883 the main primary map. */
njn1d0825f2006-03-27 11:37:07 +00001884 if (EXPECTED_TAKEN(sm == sm_hi)) {
1885 // Finally, we know that the range is entirely within one secmap.
1886 UWord v_off = SM_OFF(a_lo);
1887 UShort* p = (UShort*)(&sm->vabits8[v_off]);
njndbf7ca72006-03-31 11:57:59 +00001888 p[ 0] = VA_BITS16_UNDEFINED;
1889 p[ 1] = VA_BITS16_UNDEFINED;
1890 p[ 2] = VA_BITS16_UNDEFINED;
1891 p[ 3] = VA_BITS16_UNDEFINED;
1892 p[ 4] = VA_BITS16_UNDEFINED;
1893 p[ 5] = VA_BITS16_UNDEFINED;
1894 p[ 6] = VA_BITS16_UNDEFINED;
1895 p[ 7] = VA_BITS16_UNDEFINED;
1896 p[ 8] = VA_BITS16_UNDEFINED;
1897 p[ 9] = VA_BITS16_UNDEFINED;
1898 p[10] = VA_BITS16_UNDEFINED;
1899 p[11] = VA_BITS16_UNDEFINED;
1900 p[12] = VA_BITS16_UNDEFINED;
1901 p[13] = VA_BITS16_UNDEFINED;
1902 p[14] = VA_BITS16_UNDEFINED;
1903 p[15] = VA_BITS16_UNDEFINED;
sewardj2a3a1a72005-05-12 23:25:43 +00001904 return;
njn1d0825f2006-03-27 11:37:07 +00001905 }
sewardj2a3a1a72005-05-12 23:25:43 +00001906 }
1907 }
1908
sewardj2e1a6772006-01-18 04:16:27 +00001909 /* 288 bytes (36 ULongs) is the magic value for ELF ppc64. */
sewardj3f5f5562006-06-16 21:39:08 +00001910 if (EXPECTED_TAKEN( len == 288 && VG_IS_8_ALIGNED(base) )) {
njn1d0825f2006-03-27 11:37:07 +00001911 /* Now we know the address range is suitably sized and aligned. */
1912 UWord a_lo = (UWord)(base);
sewardj3f5f5562006-06-16 21:39:08 +00001913 UWord a_hi = (UWord)(base + 288 - 1);
njn1d0825f2006-03-27 11:37:07 +00001914 tl_assert(a_lo < a_hi); // paranoia: detect overflow
1915 if (a_hi < MAX_PRIMARY_ADDRESS) {
1916 // Now we know the entire range is within the main primary map.
njna7c7ebd2006-03-28 12:51:02 +00001917 SecMap* sm = get_secmap_for_writing_low(a_lo);
1918 SecMap* sm_hi = get_secmap_for_writing_low(a_hi);
sewardj2e1a6772006-01-18 04:16:27 +00001919 /* Now we know that the entire address range falls within a
1920 single secondary map, and that that secondary 'lives' in
1921 the main primary map. */
njn1d0825f2006-03-27 11:37:07 +00001922 if (EXPECTED_TAKEN(sm == sm_hi)) {
1923 // Finally, we know that the range is entirely within one secmap.
1924 UWord v_off = SM_OFF(a_lo);
1925 UShort* p = (UShort*)(&sm->vabits8[v_off]);
njndbf7ca72006-03-31 11:57:59 +00001926 p[ 0] = VA_BITS16_UNDEFINED;
1927 p[ 1] = VA_BITS16_UNDEFINED;
1928 p[ 2] = VA_BITS16_UNDEFINED;
1929 p[ 3] = VA_BITS16_UNDEFINED;
1930 p[ 4] = VA_BITS16_UNDEFINED;
1931 p[ 5] = VA_BITS16_UNDEFINED;
1932 p[ 6] = VA_BITS16_UNDEFINED;
1933 p[ 7] = VA_BITS16_UNDEFINED;
1934 p[ 8] = VA_BITS16_UNDEFINED;
1935 p[ 9] = VA_BITS16_UNDEFINED;
1936 p[10] = VA_BITS16_UNDEFINED;
1937 p[11] = VA_BITS16_UNDEFINED;
1938 p[12] = VA_BITS16_UNDEFINED;
1939 p[13] = VA_BITS16_UNDEFINED;
1940 p[14] = VA_BITS16_UNDEFINED;
1941 p[15] = VA_BITS16_UNDEFINED;
1942 p[16] = VA_BITS16_UNDEFINED;
1943 p[17] = VA_BITS16_UNDEFINED;
1944 p[18] = VA_BITS16_UNDEFINED;
1945 p[19] = VA_BITS16_UNDEFINED;
1946 p[20] = VA_BITS16_UNDEFINED;
1947 p[21] = VA_BITS16_UNDEFINED;
1948 p[22] = VA_BITS16_UNDEFINED;
1949 p[23] = VA_BITS16_UNDEFINED;
1950 p[24] = VA_BITS16_UNDEFINED;
1951 p[25] = VA_BITS16_UNDEFINED;
1952 p[26] = VA_BITS16_UNDEFINED;
1953 p[27] = VA_BITS16_UNDEFINED;
1954 p[28] = VA_BITS16_UNDEFINED;
1955 p[29] = VA_BITS16_UNDEFINED;
1956 p[30] = VA_BITS16_UNDEFINED;
1957 p[31] = VA_BITS16_UNDEFINED;
1958 p[32] = VA_BITS16_UNDEFINED;
1959 p[33] = VA_BITS16_UNDEFINED;
1960 p[34] = VA_BITS16_UNDEFINED;
1961 p[35] = VA_BITS16_UNDEFINED;
sewardj2e1a6772006-01-18 04:16:27 +00001962 return;
njn1d0825f2006-03-27 11:37:07 +00001963 }
sewardj2e1a6772006-01-18 04:16:27 +00001964 }
1965 }
1966
sewardj2a3a1a72005-05-12 23:25:43 +00001967 /* else fall into slow case */
njndbf7ca72006-03-31 11:57:59 +00001968 MC_(make_mem_undefined)(base, len);
sewardj826ec492005-05-12 18:05:00 +00001969}
1970
1971
nethercote8b76fe52004-11-08 19:20:09 +00001972/*------------------------------------------------------------*/
1973/*--- Checking memory ---*/
1974/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001975
sewardje4ccc012005-05-02 12:53:38 +00001976typedef
1977 enum {
1978 MC_Ok = 5,
1979 MC_AddrErr = 6,
1980 MC_ValueErr = 7
1981 }
1982 MC_ReadResult;
1983
1984
njn25e49d8e72002-09-23 09:36:25 +00001985/* Check permissions for address range. If inadequate permissions
1986 exist, *bad_addr is set to the offending address, so the caller can
1987 know what it is. */
1988
sewardjecf8e102003-07-12 12:11:39 +00001989/* Returns True if [a .. a+len) is not addressible. Otherwise,
1990 returns False, and if bad_addr is non-NULL, sets *bad_addr to
1991 indicate the lowest failing address. Functions below are
1992 similar. */
njndbf7ca72006-03-31 11:57:59 +00001993Bool MC_(check_mem_is_noaccess) ( Addr a, SizeT len, Addr* bad_addr )
sewardjecf8e102003-07-12 12:11:39 +00001994{
nethercote451eae92004-11-02 13:06:32 +00001995 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00001996 UWord vabits2;
1997
njndbf7ca72006-03-31 11:57:59 +00001998 PROF_EVENT(60, "check_mem_is_noaccess");
sewardjecf8e102003-07-12 12:11:39 +00001999 for (i = 0; i < len; i++) {
njndbf7ca72006-03-31 11:57:59 +00002000 PROF_EVENT(61, "check_mem_is_noaccess(loop)");
njn1d0825f2006-03-27 11:37:07 +00002001 vabits2 = get_vabits2(a);
2002 if (VA_BITS2_NOACCESS != vabits2) {
2003 if (bad_addr != NULL) *bad_addr = a;
sewardjecf8e102003-07-12 12:11:39 +00002004 return False;
2005 }
2006 a++;
2007 }
2008 return True;
2009}
2010
njndbf7ca72006-03-31 11:57:59 +00002011static Bool is_mem_addressable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00002012{
nethercote451eae92004-11-02 13:06:32 +00002013 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00002014 UWord vabits2;
2015
njndbf7ca72006-03-31 11:57:59 +00002016 PROF_EVENT(62, "is_mem_addressable");
njn25e49d8e72002-09-23 09:36:25 +00002017 for (i = 0; i < len; i++) {
njndbf7ca72006-03-31 11:57:59 +00002018 PROF_EVENT(63, "is_mem_addressable(loop)");
njn1d0825f2006-03-27 11:37:07 +00002019 vabits2 = get_vabits2(a);
2020 if (VA_BITS2_NOACCESS == vabits2) {
njn25e49d8e72002-09-23 09:36:25 +00002021 if (bad_addr != NULL) *bad_addr = a;
2022 return False;
2023 }
2024 a++;
2025 }
2026 return True;
2027}
2028
njndbf7ca72006-03-31 11:57:59 +00002029static MC_ReadResult is_mem_defined ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00002030{
nethercote451eae92004-11-02 13:06:32 +00002031 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00002032 UWord vabits2;
njn25e49d8e72002-09-23 09:36:25 +00002033
njndbf7ca72006-03-31 11:57:59 +00002034 PROF_EVENT(64, "is_mem_defined");
2035 DEBUG("is_mem_defined\n");
njn25e49d8e72002-09-23 09:36:25 +00002036 for (i = 0; i < len; i++) {
njndbf7ca72006-03-31 11:57:59 +00002037 PROF_EVENT(65, "is_mem_defined(loop)");
njn1d0825f2006-03-27 11:37:07 +00002038 vabits2 = get_vabits2(a);
njndbf7ca72006-03-31 11:57:59 +00002039 if (VA_BITS2_DEFINED != vabits2) {
njn1d0825f2006-03-27 11:37:07 +00002040 // Error! Nb: Report addressability errors in preference to
2041 // definedness errors. And don't report definedeness errors unless
2042 // --undef-value-errors=yes.
2043 if (bad_addr != NULL) *bad_addr = a;
2044 if ( VA_BITS2_NOACCESS == vabits2 ) return MC_AddrErr;
2045 else if ( MC_(clo_undef_value_errors) ) return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +00002046 }
2047 a++;
2048 }
nethercote8b76fe52004-11-08 19:20:09 +00002049 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +00002050}
2051
2052
2053/* Check a zero-terminated ascii string. Tricky -- don't want to
2054 examine the actual bytes, to find the end, until we're sure it is
2055 safe to do so. */
2056
njndbf7ca72006-03-31 11:57:59 +00002057static Bool mc_is_defined_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00002058{
njn1d0825f2006-03-27 11:37:07 +00002059 UWord vabits2;
2060
njndbf7ca72006-03-31 11:57:59 +00002061 PROF_EVENT(66, "mc_is_defined_asciiz");
2062 DEBUG("mc_is_defined_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +00002063 while (True) {
njndbf7ca72006-03-31 11:57:59 +00002064 PROF_EVENT(67, "mc_is_defined_asciiz(loop)");
njn1d0825f2006-03-27 11:37:07 +00002065 vabits2 = get_vabits2(a);
njndbf7ca72006-03-31 11:57:59 +00002066 if (VA_BITS2_DEFINED != vabits2) {
njn1d0825f2006-03-27 11:37:07 +00002067 // Error! Nb: Report addressability errors in preference to
2068 // definedness errors. And don't report definedeness errors unless
2069 // --undef-value-errors=yes.
2070 if (bad_addr != NULL) *bad_addr = a;
2071 if ( VA_BITS2_NOACCESS == vabits2 ) return MC_AddrErr;
2072 else if ( MC_(clo_undef_value_errors) ) return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +00002073 }
2074 /* Ok, a is safe to read. */
njn1d0825f2006-03-27 11:37:07 +00002075 if (* ((UChar*)a) == 0) {
sewardj45d94cc2005-04-20 14:44:11 +00002076 return MC_Ok;
njn1d0825f2006-03-27 11:37:07 +00002077 }
njn25e49d8e72002-09-23 09:36:25 +00002078 a++;
2079 }
2080}
2081
2082
2083/*------------------------------------------------------------*/
2084/*--- Memory event handlers ---*/
2085/*------------------------------------------------------------*/
2086
njn25e49d8e72002-09-23 09:36:25 +00002087static
njndbf7ca72006-03-31 11:57:59 +00002088void check_mem_is_addressable ( CorePart part, ThreadId tid, Char* s,
2089 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00002090{
njn25e49d8e72002-09-23 09:36:25 +00002091 Addr bad_addr;
njndbf7ca72006-03-31 11:57:59 +00002092 Bool ok = is_mem_addressable ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00002093
njn25e49d8e72002-09-23 09:36:25 +00002094 if (!ok) {
2095 switch (part) {
2096 case Vg_CoreSysCall:
njn1d0825f2006-03-27 11:37:07 +00002097 mc_record_param_error ( tid, bad_addr, /*isReg*/False,
nethercote8b76fe52004-11-08 19:20:09 +00002098 /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00002099 break;
2100
2101 case Vg_CorePThread:
2102 case Vg_CoreSignal:
njn1d0825f2006-03-27 11:37:07 +00002103 mc_record_core_mem_error( tid, /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00002104 break;
2105
2106 default:
njndbf7ca72006-03-31 11:57:59 +00002107 VG_(tool_panic)("check_mem_is_addressable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00002108 }
2109 }
njn25e49d8e72002-09-23 09:36:25 +00002110}
2111
2112static
njndbf7ca72006-03-31 11:57:59 +00002113void check_mem_is_defined ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +00002114 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00002115{
njn25e49d8e72002-09-23 09:36:25 +00002116 Addr bad_addr;
njndbf7ca72006-03-31 11:57:59 +00002117 MC_ReadResult res = is_mem_defined ( base, size, &bad_addr );
sewardj45f4e7c2005-09-27 19:20:21 +00002118
nethercote8b76fe52004-11-08 19:20:09 +00002119 if (MC_Ok != res) {
2120 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
sewardj45f4e7c2005-09-27 19:20:21 +00002121
njn25e49d8e72002-09-23 09:36:25 +00002122 switch (part) {
2123 case Vg_CoreSysCall:
njn1d0825f2006-03-27 11:37:07 +00002124 mc_record_param_error ( tid, bad_addr, /*isReg*/False,
njndbf7ca72006-03-31 11:57:59 +00002125 isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00002126 break;
2127
njn1d0825f2006-03-27 11:37:07 +00002128 case Vg_CoreClientReq: // Kludge: make this a CoreMemErr
njn25e49d8e72002-09-23 09:36:25 +00002129 case Vg_CorePThread:
njn1d0825f2006-03-27 11:37:07 +00002130 mc_record_core_mem_error( tid, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00002131 break;
2132
2133 /* If we're being asked to jump to a silly address, record an error
2134 message before potentially crashing the entire system. */
2135 case Vg_CoreTranslate:
njn1d0825f2006-03-27 11:37:07 +00002136 mc_record_jump_error( tid, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00002137 break;
2138
2139 default:
njndbf7ca72006-03-31 11:57:59 +00002140 VG_(tool_panic)("check_mem_is_defined: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00002141 }
2142 }
njn25e49d8e72002-09-23 09:36:25 +00002143}
2144
2145static
njndbf7ca72006-03-31 11:57:59 +00002146void check_mem_is_defined_asciiz ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +00002147 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +00002148{
nethercote8b76fe52004-11-08 19:20:09 +00002149 MC_ReadResult res;
njn5ab96ac2005-05-08 02:59:50 +00002150 Addr bad_addr = 0; // shut GCC up
njn25e49d8e72002-09-23 09:36:25 +00002151
njnca82cc02004-11-22 17:18:48 +00002152 tl_assert(part == Vg_CoreSysCall);
njndbf7ca72006-03-31 11:57:59 +00002153 res = mc_is_defined_asciiz ( (Addr)str, &bad_addr );
nethercote8b76fe52004-11-08 19:20:09 +00002154 if (MC_Ok != res) {
2155 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
njn1d0825f2006-03-27 11:37:07 +00002156 mc_record_param_error ( tid, bad_addr, /*isReg*/False, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00002157 }
njn25e49d8e72002-09-23 09:36:25 +00002158}
2159
njn25e49d8e72002-09-23 09:36:25 +00002160static
nethercote451eae92004-11-02 13:06:32 +00002161void mc_new_mem_startup( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00002162{
njndbf7ca72006-03-31 11:57:59 +00002163 /* Ignore the permissions, just make it defined. Seems to work... */
nethercote451eae92004-11-02 13:06:32 +00002164 DEBUG("mc_new_mem_startup(%p, %llu, rr=%u, ww=%u, xx=%u)\n",
njndbf7ca72006-03-31 11:57:59 +00002165 a, (ULong)len, rr, ww, xx);
2166 MC_(make_mem_defined)(a, len);
njn25e49d8e72002-09-23 09:36:25 +00002167}
2168
2169static
njnb8dca862005-03-14 02:42:44 +00002170void mc_new_mem_mmap ( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00002171{
njndbf7ca72006-03-31 11:57:59 +00002172 MC_(make_mem_defined)(a, len);
njn25e49d8e72002-09-23 09:36:25 +00002173}
2174
njncf45fd42004-11-24 16:30:22 +00002175static
2176void mc_post_mem_write(CorePart part, ThreadId tid, Addr a, SizeT len)
2177{
njndbf7ca72006-03-31 11:57:59 +00002178 MC_(make_mem_defined)(a, len);
njncf45fd42004-11-24 16:30:22 +00002179}
njn25e49d8e72002-09-23 09:36:25 +00002180
sewardj45d94cc2005-04-20 14:44:11 +00002181
njn25e49d8e72002-09-23 09:36:25 +00002182/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00002183/*--- Register event handlers ---*/
2184/*------------------------------------------------------------*/
2185
sewardj45d94cc2005-04-20 14:44:11 +00002186/* When some chunk of guest state is written, mark the corresponding
2187 shadow area as valid. This is used to initialise arbitrarily large
sewardj62eae5f2006-01-17 01:58:24 +00002188 chunks of guest state, hence the _SIZE value, which has to be as
2189 big as the biggest guest state.
sewardj45d94cc2005-04-20 14:44:11 +00002190*/
2191static void mc_post_reg_write ( CorePart part, ThreadId tid,
2192 OffT offset, SizeT size)
njnd3040452003-05-19 15:04:06 +00002193{
sewardjd68ac3e2006-01-20 14:31:57 +00002194# define MAX_REG_WRITE_SIZE 1392
cerion21082042005-12-06 19:07:08 +00002195 UChar area[MAX_REG_WRITE_SIZE];
2196 tl_assert(size <= MAX_REG_WRITE_SIZE);
njn1d0825f2006-03-27 11:37:07 +00002197 VG_(memset)(area, V_BITS8_DEFINED, size);
njncf45fd42004-11-24 16:30:22 +00002198 VG_(set_shadow_regs_area)( tid, offset, size, area );
cerion21082042005-12-06 19:07:08 +00002199# undef MAX_REG_WRITE_SIZE
njnd3040452003-05-19 15:04:06 +00002200}
2201
sewardj45d94cc2005-04-20 14:44:11 +00002202static
2203void mc_post_reg_write_clientcall ( ThreadId tid,
2204 OffT offset, SizeT size,
2205 Addr f)
njnd3040452003-05-19 15:04:06 +00002206{
njncf45fd42004-11-24 16:30:22 +00002207 mc_post_reg_write(/*dummy*/0, tid, offset, size);
njnd3040452003-05-19 15:04:06 +00002208}
2209
sewardj45d94cc2005-04-20 14:44:11 +00002210/* Look at the definedness of the guest's shadow state for
2211 [offset, offset+len). If any part of that is undefined, record
2212 a parameter error.
2213*/
2214static void mc_pre_reg_read ( CorePart part, ThreadId tid, Char* s,
2215 OffT offset, SizeT size)
nethercote8b76fe52004-11-08 19:20:09 +00002216{
sewardj45d94cc2005-04-20 14:44:11 +00002217 Int i;
2218 Bool bad;
2219
2220 UChar area[16];
2221 tl_assert(size <= 16);
2222
2223 VG_(get_shadow_regs_area)( tid, offset, size, area );
2224
2225 bad = False;
2226 for (i = 0; i < size; i++) {
njn1d0825f2006-03-27 11:37:07 +00002227 if (area[i] != V_BITS8_DEFINED) {
sewardj2c27f702005-05-03 18:19:05 +00002228 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00002229 break;
2230 }
nethercote8b76fe52004-11-08 19:20:09 +00002231 }
2232
sewardj45d94cc2005-04-20 14:44:11 +00002233 if (bad)
njn1d0825f2006-03-27 11:37:07 +00002234 mc_record_param_error ( tid, 0, /*isReg*/True, /*isUnaddr*/False, s );
nethercote8b76fe52004-11-08 19:20:09 +00002235}
njnd3040452003-05-19 15:04:06 +00002236
njn25e49d8e72002-09-23 09:36:25 +00002237
sewardj6cf40ff2005-04-20 22:31:26 +00002238/*------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00002239/*--- Error and suppression types ---*/
2240/*------------------------------------------------------------*/
2241
2242/* The classification of a faulting address. */
2243typedef
2244 enum {
2245 Undescribed, // as-yet unclassified
2246 Stack,
2247 Unknown, // classification yielded nothing useful
2248 Freed, Mallocd,
2249 UserG, // in a user-defined block
2250 Mempool, // in a mempool
2251 Register, // in a register; for Param errors only
2252 }
2253 AddrKind;
2254
2255/* Records info about a faulting address. */
2256typedef
2257 struct { // Used by:
2258 AddrKind akind; // ALL
2259 SizeT blksize; // Freed, Mallocd
2260 OffT rwoffset; // Freed, Mallocd
2261 ExeContext* lastchange; // Freed, Mallocd
2262 ThreadId stack_tid; // Stack
2263 const Char *desc; // UserG
2264 Bool maybe_gcc; // True if just below %esp -- could be a gcc bug.
2265 }
2266 AddrInfo;
2267
2268typedef
2269 enum {
2270 ParamSupp, // Bad syscall params
2271 CoreMemSupp, // Memory errors in core (pthread ops, signal handling)
2272
2273 // Use of invalid values of given size (MemCheck only)
2274 Value0Supp, Value1Supp, Value2Supp, Value4Supp, Value8Supp, Value16Supp,
2275
2276 // Invalid read/write attempt at given size
2277 Addr1Supp, Addr2Supp, Addr4Supp, Addr8Supp, Addr16Supp,
2278
2279 FreeSupp, // Invalid or mismatching free
2280 OverlapSupp, // Overlapping blocks in memcpy(), strcpy(), etc
2281 LeakSupp, // Something to be suppressed in a leak check.
2282 MempoolSupp, // Memory pool suppression.
2283 }
2284 MC_SuppKind;
2285
2286/* What kind of error it is. */
2287typedef
2288 enum { ValueErr,
2289 CoreMemErr, // Error in core op (pthread, signals) or client req
2290 AddrErr,
2291 ParamErr, UserErr, /* behaves like an anonymous ParamErr */
2292 FreeErr, FreeMismatchErr,
2293 OverlapErr,
2294 LeakErr,
2295 IllegalMempoolErr,
2296 }
2297 MC_ErrorKind;
2298
2299/* What kind of memory access is involved in the error? */
2300typedef
2301 enum { ReadAxs, WriteAxs, ExecAxs }
2302 AxsKind;
2303
2304/* Extra context for memory errors */
2305typedef
2306 struct { // Used by:
2307 AxsKind axskind; // AddrErr
2308 Int size; // AddrErr, ValueErr
2309 AddrInfo addrinfo; // {Addr,Free,FreeMismatch,Param,User}Err
2310 Bool isUnaddr; // {CoreMem,Param,User}Err
2311 }
2312 MC_Error;
2313
2314/*------------------------------------------------------------*/
njn9e63cb62005-05-08 18:34:59 +00002315/*--- Printing errors ---*/
2316/*------------------------------------------------------------*/
2317
njn1d0825f2006-03-27 11:37:07 +00002318static void mc_pp_AddrInfo ( Addr a, AddrInfo* ai )
2319{
2320 HChar* xpre = VG_(clo_xml) ? " <auxwhat>" : " ";
2321 HChar* xpost = VG_(clo_xml) ? "</auxwhat>" : "";
2322
2323 switch (ai->akind) {
2324 case Stack:
2325 VG_(message)(Vg_UserMsg,
2326 "%sAddress 0x%llx is on thread %d's stack%s",
2327 xpre, (ULong)a, ai->stack_tid, xpost);
2328 break;
2329 case Unknown:
2330 if (ai->maybe_gcc) {
2331 VG_(message)(Vg_UserMsg,
2332 "%sAddress 0x%llx is just below the stack ptr. "
2333 "To suppress, use: --workaround-gcc296-bugs=yes%s",
2334 xpre, (ULong)a, xpost
2335 );
2336 } else {
2337 VG_(message)(Vg_UserMsg,
2338 "%sAddress 0x%llx "
2339 "is not stack'd, malloc'd or (recently) free'd%s",
2340 xpre, (ULong)a, xpost);
2341 }
2342 break;
2343 case Freed: case Mallocd: case UserG: case Mempool: {
2344 SizeT delta;
2345 const Char* relative;
2346 const Char* kind;
2347 if (ai->akind == Mempool) {
2348 kind = "mempool";
2349 } else {
2350 kind = "block";
2351 }
2352 if (ai->desc != NULL)
2353 kind = ai->desc;
2354
2355 if (ai->rwoffset < 0) {
2356 delta = (SizeT)(- ai->rwoffset);
2357 relative = "before";
2358 } else if (ai->rwoffset >= ai->blksize) {
2359 delta = ai->rwoffset - ai->blksize;
2360 relative = "after";
2361 } else {
2362 delta = ai->rwoffset;
2363 relative = "inside";
2364 }
2365 VG_(message)(Vg_UserMsg,
2366 "%sAddress 0x%lx is %,lu bytes %s a %s of size %,lu %s%s",
2367 xpre,
2368 a, delta, relative, kind,
2369 ai->blksize,
2370 ai->akind==Mallocd ? "alloc'd"
2371 : ai->akind==Freed ? "free'd"
2372 : "client-defined",
2373 xpost);
2374 VG_(pp_ExeContext)(ai->lastchange);
2375 break;
2376 }
2377 case Register:
2378 // print nothing
2379 tl_assert(0 == a);
2380 break;
2381 default:
2382 VG_(tool_panic)("mc_pp_AddrInfo");
2383 }
2384}
2385
njn51d827b2005-05-09 01:02:08 +00002386static void mc_pp_Error ( Error* err )
njn9e63cb62005-05-08 18:34:59 +00002387{
njn1d0825f2006-03-27 11:37:07 +00002388 MC_Error* err_extra = VG_(get_error_extra)(err);
njn9e63cb62005-05-08 18:34:59 +00002389
sewardj71bc3cb2005-05-19 00:25:45 +00002390 HChar* xpre = VG_(clo_xml) ? " <what>" : "";
2391 HChar* xpost = VG_(clo_xml) ? "</what>" : "";
2392
njn9e63cb62005-05-08 18:34:59 +00002393 switch (VG_(get_error_kind)(err)) {
2394 case CoreMemErr: {
2395 Char* s = ( err_extra->isUnaddr ? "unaddressable" : "uninitialised" );
sewardj71bc3cb2005-05-19 00:25:45 +00002396 if (VG_(clo_xml))
2397 VG_(message)(Vg_UserMsg, " <kind>CoreMemError</kind>");
2398 /* What the hell *is* a CoreMemError? jrs 2005-May-18 */
2399 VG_(message)(Vg_UserMsg, "%s%s contains %s byte(s)%s",
2400 xpre, VG_(get_error_string)(err), s, xpost);
2401
njn9e63cb62005-05-08 18:34:59 +00002402 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
2403 break;
2404
2405 }
2406
2407 case ValueErr:
2408 if (err_extra->size == 0) {
sewardj71bc3cb2005-05-19 00:25:45 +00002409 if (VG_(clo_xml))
2410 VG_(message)(Vg_UserMsg, " <kind>UninitCondition</kind>");
2411 VG_(message)(Vg_UserMsg, "%sConditional jump or move depends"
2412 " on uninitialised value(s)%s",
2413 xpre, xpost);
njn9e63cb62005-05-08 18:34:59 +00002414 } else {
sewardj71bc3cb2005-05-19 00:25:45 +00002415 if (VG_(clo_xml))
2416 VG_(message)(Vg_UserMsg, " <kind>UninitValue</kind>");
2417 VG_(message)(Vg_UserMsg,
2418 "%sUse of uninitialised value of size %d%s",
2419 xpre, err_extra->size, xpost);
njn9e63cb62005-05-08 18:34:59 +00002420 }
2421 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
2422 break;
2423
2424 case ParamErr: {
2425 Bool isReg = ( Register == err_extra->addrinfo.akind );
2426 Char* s1 = ( isReg ? "contains" : "points to" );
2427 Char* s2 = ( err_extra->isUnaddr ? "unaddressable" : "uninitialised" );
2428 if (isReg) tl_assert(!err_extra->isUnaddr);
2429
sewardj71bc3cb2005-05-19 00:25:45 +00002430 if (VG_(clo_xml))
2431 VG_(message)(Vg_UserMsg, " <kind>SyscallParam</kind>");
2432 VG_(message)(Vg_UserMsg, "%sSyscall param %s %s %s byte(s)%s",
2433 xpre, VG_(get_error_string)(err), s1, s2, xpost);
njn9e63cb62005-05-08 18:34:59 +00002434
2435 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
njn1d0825f2006-03-27 11:37:07 +00002436 mc_pp_AddrInfo(VG_(get_error_address)(err), &err_extra->addrinfo);
njn9e63cb62005-05-08 18:34:59 +00002437 break;
2438 }
2439 case UserErr: {
2440 Char* s = ( err_extra->isUnaddr ? "Unaddressable" : "Uninitialised" );
2441
sewardj71bc3cb2005-05-19 00:25:45 +00002442 if (VG_(clo_xml))
2443 VG_(message)(Vg_UserMsg, " <kind>ClientCheck</kind>");
njn9e63cb62005-05-08 18:34:59 +00002444 VG_(message)(Vg_UserMsg,
sewardj71bc3cb2005-05-19 00:25:45 +00002445 "%s%s byte(s) found during client check request%s",
2446 xpre, s, xpost);
njn9e63cb62005-05-08 18:34:59 +00002447
2448 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
njn1d0825f2006-03-27 11:37:07 +00002449 mc_pp_AddrInfo(VG_(get_error_address)(err), &err_extra->addrinfo);
njn9e63cb62005-05-08 18:34:59 +00002450 break;
2451 }
njn1d0825f2006-03-27 11:37:07 +00002452 case FreeErr:
2453 if (VG_(clo_xml))
2454 VG_(message)(Vg_UserMsg, " <kind>InvalidFree</kind>");
2455 VG_(message)(Vg_UserMsg,
2456 "%sInvalid free() / delete / delete[]%s",
2457 xpre, xpost);
2458 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
2459 mc_pp_AddrInfo(VG_(get_error_address)(err), &err_extra->addrinfo);
njn9e63cb62005-05-08 18:34:59 +00002460 break;
njn1d0825f2006-03-27 11:37:07 +00002461
2462 case FreeMismatchErr:
2463 if (VG_(clo_xml))
2464 VG_(message)(Vg_UserMsg, " <kind>MismatchedFree</kind>");
2465 VG_(message)(Vg_UserMsg,
2466 "%sMismatched free() / delete / delete []%s",
2467 xpre, xpost);
2468 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
2469 mc_pp_AddrInfo(VG_(get_error_address)(err), &err_extra->addrinfo);
2470 break;
2471
2472 case AddrErr:
2473 switch (err_extra->axskind) {
2474 case ReadAxs:
2475 if (VG_(clo_xml))
2476 VG_(message)(Vg_UserMsg, " <kind>InvalidRead</kind>");
2477 VG_(message)(Vg_UserMsg,
2478 "%sInvalid read of size %d%s",
2479 xpre, err_extra->size, xpost );
2480 break;
2481 case WriteAxs:
2482 if (VG_(clo_xml))
2483 VG_(message)(Vg_UserMsg, " <kind>InvalidWrite</kind>");
2484 VG_(message)(Vg_UserMsg,
2485 "%sInvalid write of size %d%s",
2486 xpre, err_extra->size, xpost );
2487 break;
2488 case ExecAxs:
2489 if (VG_(clo_xml))
2490 VG_(message)(Vg_UserMsg, " <kind>InvalidJump</kind>");
2491 VG_(message)(Vg_UserMsg,
2492 "%sJump to the invalid address "
2493 "stated on the next line%s",
2494 xpre, xpost);
2495 break;
2496 default:
2497 VG_(tool_panic)("mc_pp_Error(axskind)");
2498 }
2499 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
2500 mc_pp_AddrInfo(VG_(get_error_address)(err), &err_extra->addrinfo);
2501 break;
2502
2503 case OverlapErr: {
2504 OverlapExtra* ov_extra = (OverlapExtra*)VG_(get_error_extra)(err);
2505 if (VG_(clo_xml))
2506 VG_(message)(Vg_UserMsg, " <kind>Overlap</kind>");
2507 if (ov_extra->len == -1)
2508 VG_(message)(Vg_UserMsg,
2509 "%sSource and destination overlap in %s(%p, %p)%s",
2510 xpre,
2511 VG_(get_error_string)(err),
2512 ov_extra->dst, ov_extra->src,
2513 xpost);
2514 else
2515 VG_(message)(Vg_UserMsg,
2516 "%sSource and destination overlap in %s(%p, %p, %d)%s",
2517 xpre,
2518 VG_(get_error_string)(err),
2519 ov_extra->dst, ov_extra->src, ov_extra->len,
2520 xpost);
2521 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
2522 break;
2523 }
2524 case LeakErr: {
2525 MC_(pp_LeakError)(err_extra);
2526 break;
2527 }
2528
2529 case IllegalMempoolErr:
2530 if (VG_(clo_xml))
2531 VG_(message)(Vg_UserMsg, " <kind>InvalidMemPool</kind>");
2532 VG_(message)(Vg_UserMsg, "%sIllegal memory pool address%s",
2533 xpre, xpost);
2534 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
2535 mc_pp_AddrInfo(VG_(get_error_address)(err), &err_extra->addrinfo);
2536 break;
2537
2538 default:
2539 VG_(printf)("Error:\n unknown Memcheck error code %d\n",
2540 VG_(get_error_kind)(err));
2541 VG_(tool_panic)("unknown error code in mc_pp_Error)");
njn9e63cb62005-05-08 18:34:59 +00002542 }
2543}
2544
2545/*------------------------------------------------------------*/
2546/*--- Recording errors ---*/
2547/*------------------------------------------------------------*/
2548
njn1d0825f2006-03-27 11:37:07 +00002549/* These many bytes below %ESP are considered addressible if we're
2550 doing the --workaround-gcc296-bugs hack. */
2551#define VG_GCC296_BUG_STACK_SLOP 1024
2552
2553/* Is this address within some small distance below %ESP? Used only
2554 for the --workaround-gcc296-bugs kludge. */
2555static Bool is_just_below_ESP( Addr esp, Addr aa )
2556{
2557 if (esp > aa && (esp - aa) <= VG_GCC296_BUG_STACK_SLOP)
2558 return True;
2559 else
2560 return False;
2561}
2562
2563static void mc_clear_MC_Error ( MC_Error* err_extra )
2564{
2565 err_extra->axskind = ReadAxs;
2566 err_extra->size = 0;
2567 err_extra->isUnaddr = True;
2568 err_extra->addrinfo.akind = Unknown;
2569 err_extra->addrinfo.blksize = 0;
2570 err_extra->addrinfo.rwoffset = 0;
2571 err_extra->addrinfo.lastchange = NULL;
2572 err_extra->addrinfo.stack_tid = VG_INVALID_THREADID;
2573 err_extra->addrinfo.maybe_gcc = False;
2574 err_extra->addrinfo.desc = NULL;
2575}
2576
2577/* This one called from generated code and non-generated code. */
2578static void mc_record_address_error ( ThreadId tid, Addr a, Int size,
2579 Bool isWrite )
2580{
2581 MC_Error err_extra;
2582 Bool just_below_esp;
2583
2584 just_below_esp = is_just_below_ESP( VG_(get_SP)(tid), a );
2585
2586 /* If this is caused by an access immediately below %ESP, and the
2587 user asks nicely, we just ignore it. */
2588 if (MC_(clo_workaround_gcc296_bugs) && just_below_esp)
2589 return;
2590
2591 mc_clear_MC_Error( &err_extra );
2592 err_extra.axskind = isWrite ? WriteAxs : ReadAxs;
2593 err_extra.size = size;
2594 err_extra.addrinfo.akind = Undescribed;
2595 err_extra.addrinfo.maybe_gcc = just_below_esp;
2596 VG_(maybe_record_error)( tid, AddrErr, a, /*s*/NULL, &err_extra );
2597}
2598
2599/* These ones are called from non-generated code */
2600
2601/* This is for memory errors in pthread functions, as opposed to pthread API
2602 errors which are found by the core. */
2603static void mc_record_core_mem_error ( ThreadId tid, Bool isUnaddr, Char* msg )
2604{
2605 MC_Error err_extra;
2606
2607 mc_clear_MC_Error( &err_extra );
2608 err_extra.isUnaddr = isUnaddr;
2609 VG_(maybe_record_error)( tid, CoreMemErr, /*addr*/0, msg, &err_extra );
2610}
2611
2612// Three kinds of param errors:
2613// - register arg contains undefined bytes
2614// - memory arg is unaddressable
2615// - memory arg contains undefined bytes
2616// 'isReg' and 'isUnaddr' dictate which of these it is.
2617static void mc_record_param_error ( ThreadId tid, Addr a, Bool isReg,
2618 Bool isUnaddr, Char* msg )
2619{
2620 MC_Error err_extra;
2621
sewardj1cf56cf2006-05-22 13:59:42 +00002622 if (!isUnaddr) tl_assert(MC_(clo_undef_value_errors));
njn1d0825f2006-03-27 11:37:07 +00002623 tl_assert(VG_INVALID_THREADID != tid);
2624 if (isUnaddr) tl_assert(!isReg); // unaddressable register is impossible
2625 mc_clear_MC_Error( &err_extra );
2626 err_extra.addrinfo.akind = ( isReg ? Register : Undescribed );
2627 err_extra.isUnaddr = isUnaddr;
2628 VG_(maybe_record_error)( tid, ParamErr, a, msg, &err_extra );
2629}
2630
2631static void mc_record_jump_error ( ThreadId tid, Addr a )
2632{
2633 MC_Error err_extra;
2634
2635 tl_assert(VG_INVALID_THREADID != tid);
2636 mc_clear_MC_Error( &err_extra );
2637 err_extra.axskind = ExecAxs;
2638 err_extra.size = 1; // size only used for suppressions
2639 err_extra.addrinfo.akind = Undescribed;
2640 VG_(maybe_record_error)( tid, AddrErr, a, /*s*/NULL, &err_extra );
2641}
2642
2643void MC_(record_free_error) ( ThreadId tid, Addr a )
2644{
2645 MC_Error err_extra;
2646
2647 tl_assert(VG_INVALID_THREADID != tid);
2648 mc_clear_MC_Error( &err_extra );
2649 err_extra.addrinfo.akind = Undescribed;
2650 VG_(maybe_record_error)( tid, FreeErr, a, /*s*/NULL, &err_extra );
2651}
2652
2653void MC_(record_illegal_mempool_error) ( ThreadId tid, Addr a )
2654{
2655 MC_Error err_extra;
2656
2657 tl_assert(VG_INVALID_THREADID != tid);
2658 mc_clear_MC_Error( &err_extra );
2659 err_extra.addrinfo.akind = Undescribed;
2660 VG_(maybe_record_error)( tid, IllegalMempoolErr, a, /*s*/NULL, &err_extra );
2661}
2662
2663void MC_(record_freemismatch_error) ( ThreadId tid, Addr a, MC_Chunk* mc )
2664{
2665 MC_Error err_extra;
2666 AddrInfo* ai;
2667
2668 tl_assert(VG_INVALID_THREADID != tid);
2669 mc_clear_MC_Error( &err_extra );
2670 ai = &err_extra.addrinfo;
2671 ai->akind = Mallocd; // Nb: not 'Freed'
2672 ai->blksize = mc->size;
2673 ai->rwoffset = (Int)a - (Int)mc->data;
2674 ai->lastchange = mc->where;
2675 VG_(maybe_record_error)( tid, FreeMismatchErr, a, /*s*/NULL, &err_extra );
2676}
2677
2678static void mc_record_overlap_error ( ThreadId tid,
2679 Char* function, OverlapExtra* ov_extra )
2680{
2681 VG_(maybe_record_error)(
2682 tid, OverlapErr, /*addr*/0, /*s*/function, ov_extra );
2683}
2684
2685Bool MC_(record_leak_error) ( ThreadId tid, /*LeakExtra*/void* leak_extra,
2686 ExeContext* where, Bool print_record )
2687{
2688 return
2689 VG_(unique_error) ( tid, LeakErr, /*Addr*/0, /*s*/NULL,
2690 /*extra*/leak_extra, where, print_record,
2691 /*allow_GDB_attach*/False, /*count_error*/False );
2692}
2693
2694
njn02bc4b82005-05-15 17:28:26 +00002695/* Creates a copy of the 'extra' part, updates the copy with address info if
njn9e63cb62005-05-08 18:34:59 +00002696 necessary, and returns the copy. */
2697/* This one called from generated code and non-generated code. */
njn96364822005-05-08 19:04:53 +00002698static void mc_record_value_error ( ThreadId tid, Int size )
njn9e63cb62005-05-08 18:34:59 +00002699{
njn1d0825f2006-03-27 11:37:07 +00002700 MC_Error err_extra;
njn9e63cb62005-05-08 18:34:59 +00002701
njn1d0825f2006-03-27 11:37:07 +00002702 tl_assert(MC_(clo_undef_value_errors));
2703 mc_clear_MC_Error( &err_extra );
njn9e63cb62005-05-08 18:34:59 +00002704 err_extra.size = size;
2705 err_extra.isUnaddr = False;
2706 VG_(maybe_record_error)( tid, ValueErr, /*addr*/0, /*s*/NULL, &err_extra );
2707}
2708
2709/* This called from non-generated code */
2710
njn96364822005-05-08 19:04:53 +00002711static void mc_record_user_error ( ThreadId tid, Addr a, Bool isWrite,
2712 Bool isUnaddr )
njn9e63cb62005-05-08 18:34:59 +00002713{
njn1d0825f2006-03-27 11:37:07 +00002714 MC_Error err_extra;
njn9e63cb62005-05-08 18:34:59 +00002715
2716 tl_assert(VG_INVALID_THREADID != tid);
njn1d0825f2006-03-27 11:37:07 +00002717 mc_clear_MC_Error( &err_extra );
njn9e63cb62005-05-08 18:34:59 +00002718 err_extra.addrinfo.akind = Undescribed;
2719 err_extra.isUnaddr = isUnaddr;
2720 VG_(maybe_record_error)( tid, UserErr, a, /*s*/NULL, &err_extra );
2721}
2722
njn1d0825f2006-03-27 11:37:07 +00002723__attribute__ ((unused))
2724static Bool eq_AddrInfo ( VgRes res, AddrInfo* ai1, AddrInfo* ai2 )
2725{
2726 if (ai1->akind != Undescribed
2727 && ai2->akind != Undescribed
2728 && ai1->akind != ai2->akind)
2729 return False;
2730 if (ai1->akind == Freed || ai1->akind == Mallocd) {
2731 if (ai1->blksize != ai2->blksize)
2732 return False;
2733 if (!VG_(eq_ExeContext)(res, ai1->lastchange, ai2->lastchange))
2734 return False;
2735 }
2736 return True;
2737}
2738
2739/* Compare error contexts, to detect duplicates. Note that if they
2740 are otherwise the same, the faulting addrs and associated rwoffsets
2741 are allowed to be different. */
2742static Bool mc_eq_Error ( VgRes res, Error* e1, Error* e2 )
2743{
2744 MC_Error* e1_extra = VG_(get_error_extra)(e1);
2745 MC_Error* e2_extra = VG_(get_error_extra)(e2);
2746
2747 /* Guaranteed by calling function */
2748 tl_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
2749
2750 switch (VG_(get_error_kind)(e1)) {
2751 case CoreMemErr: {
2752 Char *e1s, *e2s;
2753 if (e1_extra->isUnaddr != e2_extra->isUnaddr) return False;
2754 e1s = VG_(get_error_string)(e1);
2755 e2s = VG_(get_error_string)(e2);
2756 if (e1s == e2s) return True;
2757 if (0 == VG_(strcmp)(e1s, e2s)) return True;
2758 return False;
2759 }
2760
2761 // Perhaps we should also check the addrinfo.akinds for equality.
2762 // That would result in more error reports, but only in cases where
2763 // a register contains uninitialised bytes and points to memory
2764 // containing uninitialised bytes. Currently, the 2nd of those to be
2765 // detected won't be reported. That is (nearly?) always the memory
2766 // error, which is good.
2767 case ParamErr:
2768 if (0 != VG_(strcmp)(VG_(get_error_string)(e1),
2769 VG_(get_error_string)(e2))) return False;
2770 // fall through
2771 case UserErr:
2772 if (e1_extra->isUnaddr != e2_extra->isUnaddr) return False;
2773 return True;
2774
2775 case FreeErr:
2776 case FreeMismatchErr:
2777 /* JRS 2002-Aug-26: comparing addrs seems overkill and can
2778 cause excessive duplication of errors. Not even AddrErr
2779 below does that. So don't compare either the .addr field
2780 or the .addrinfo fields. */
2781 /* if (e1->addr != e2->addr) return False; */
2782 /* if (!eq_AddrInfo(res, &e1_extra->addrinfo, &e2_extra->addrinfo))
2783 return False;
2784 */
2785 return True;
2786
2787 case AddrErr:
2788 /* if (e1_extra->axskind != e2_extra->axskind) return False; */
2789 if (e1_extra->size != e2_extra->size) return False;
2790 /*
2791 if (!eq_AddrInfo(res, &e1_extra->addrinfo, &e2_extra->addrinfo))
2792 return False;
2793 */
2794 return True;
2795
2796 case ValueErr:
2797 if (e1_extra->size != e2_extra->size) return False;
2798 return True;
2799
2800 case OverlapErr:
2801 return True;
2802
2803 case LeakErr:
2804 VG_(tool_panic)("Shouldn't get LeakErr in mc_eq_Error,\n"
2805 "since it's handled with VG_(unique_error)()!");
2806
2807 case IllegalMempoolErr:
2808 return True;
2809
2810 default:
2811 VG_(printf)("Error:\n unknown error code %d\n",
2812 VG_(get_error_kind)(e1));
2813 VG_(tool_panic)("unknown error code in mc_eq_Error");
2814 }
2815}
2816
2817/* Function used when searching MC_Chunk lists */
2818static Bool addr_is_in_MC_Chunk(MC_Chunk* mc, Addr a)
2819{
2820 // Nb: this is not quite right! It assumes that the heap block has
2821 // a redzone of size MC_MALLOC_REDZONE_SZB. That's true for malloc'd
2822 // blocks, but not necessarily true for custom-alloc'd blocks. So
2823 // in some cases this could result in an incorrect description (eg.
2824 // saying "12 bytes after block A" when really it's within block B.
2825 // Fixing would require adding redzone size to MC_Chunks, though.
2826 return VG_(addr_is_in_block)( a, mc->data, mc->size,
2827 MC_MALLOC_REDZONE_SZB );
2828}
2829
2830// Forward declaration
2831static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai );
2832
2833/* Describe an address as best you can, for error messages,
2834 putting the result in ai. */
2835static void describe_addr ( Addr a, AddrInfo* ai )
2836{
2837 MC_Chunk* mc;
2838 ThreadId tid;
2839 Addr stack_min, stack_max;
2840
2841 /* Perhaps it's a user-def'd block? */
2842 if (client_perm_maybe_describe( a, ai ))
2843 return;
2844
2845 /* Perhaps it's on a thread's stack? */
2846 VG_(thread_stack_reset_iter)();
2847 while ( VG_(thread_stack_next)(&tid, &stack_min, &stack_max) ) {
2848 if (stack_min <= a && a <= stack_max) {
2849 ai->akind = Stack;
2850 ai->stack_tid = tid;
2851 return;
2852 }
2853 }
2854 /* Search for a recently freed block which might bracket it. */
2855 mc = MC_(get_freed_list_head)();
2856 while (mc) {
2857 if (addr_is_in_MC_Chunk(mc, a)) {
2858 ai->akind = Freed;
2859 ai->blksize = mc->size;
2860 ai->rwoffset = (Int)a - (Int)mc->data;
2861 ai->lastchange = mc->where;
2862 return;
2863 }
2864 mc = mc->next;
2865 }
2866 /* Search for a currently malloc'd block which might bracket it. */
2867 VG_(HT_ResetIter)(MC_(malloc_list));
2868 while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
2869 if (addr_is_in_MC_Chunk(mc, a)) {
2870 ai->akind = Mallocd;
2871 ai->blksize = mc->size;
2872 ai->rwoffset = (Int)(a) - (Int)mc->data;
2873 ai->lastchange = mc->where;
2874 return;
2875 }
2876 }
2877 /* Clueless ... */
2878 ai->akind = Unknown;
2879 return;
2880}
2881
2882/* Updates the copy with address info if necessary (but not for all errors). */
2883static UInt mc_update_extra( Error* err )
2884{
2885 switch (VG_(get_error_kind)(err)) {
2886 // These two don't have addresses associated with them, and so don't
2887 // need any updating.
2888 case CoreMemErr:
2889 case ValueErr: {
2890 MC_Error* extra = VG_(get_error_extra)(err);
2891 tl_assert(Unknown == extra->addrinfo.akind);
2892 return sizeof(MC_Error);
2893 }
2894
2895 // ParamErrs sometimes involve a memory address; call describe_addr() in
2896 // this case.
2897 case ParamErr: {
2898 MC_Error* extra = VG_(get_error_extra)(err);
2899 tl_assert(Undescribed == extra->addrinfo.akind ||
2900 Register == extra->addrinfo.akind);
2901 if (Undescribed == extra->addrinfo.akind)
2902 describe_addr ( VG_(get_error_address)(err), &(extra->addrinfo) );
2903 return sizeof(MC_Error);
2904 }
2905
2906 // These four always involve a memory address.
2907 case AddrErr:
2908 case UserErr:
2909 case FreeErr:
2910 case IllegalMempoolErr: {
2911 MC_Error* extra = VG_(get_error_extra)(err);
2912 tl_assert(Undescribed == extra->addrinfo.akind);
2913 describe_addr ( VG_(get_error_address)(err), &(extra->addrinfo) );
2914 return sizeof(MC_Error);
2915 }
2916
2917 // FreeMismatchErrs have already had their address described; this is
2918 // possible because we have the MC_Chunk on hand when the error is
2919 // detected. However, the address may be part of a user block, and if so
2920 // we override the pre-determined description with a user block one.
2921 case FreeMismatchErr: {
2922 MC_Error* extra = VG_(get_error_extra)(err);
2923 tl_assert(extra && Mallocd == extra->addrinfo.akind);
2924 (void)client_perm_maybe_describe( VG_(get_error_address)(err),
2925 &(extra->addrinfo) );
2926 return sizeof(MC_Error);
2927 }
2928
2929 // No memory address involved with these ones. Nb: for LeakErrs the
2930 // returned size does not matter -- LeakErrs are always shown with
2931 // VG_(unique_error)() so they're not copied.
2932 case LeakErr: return 0;
2933 case OverlapErr: return sizeof(OverlapExtra);
2934
2935 default: VG_(tool_panic)("mc_update_extra: bad errkind");
2936 }
2937}
2938
njn9e63cb62005-05-08 18:34:59 +00002939/*------------------------------------------------------------*/
2940/*--- Suppressions ---*/
2941/*------------------------------------------------------------*/
2942
njn51d827b2005-05-09 01:02:08 +00002943static Bool mc_recognised_suppression ( Char* name, Supp* su )
njn9e63cb62005-05-08 18:34:59 +00002944{
2945 SuppKind skind;
2946
njn1d0825f2006-03-27 11:37:07 +00002947 if (VG_STREQ(name, "Param")) skind = ParamSupp;
2948 else if (VG_STREQ(name, "CoreMem")) skind = CoreMemSupp;
2949 else if (VG_STREQ(name, "Addr1")) skind = Addr1Supp;
2950 else if (VG_STREQ(name, "Addr2")) skind = Addr2Supp;
2951 else if (VG_STREQ(name, "Addr4")) skind = Addr4Supp;
2952 else if (VG_STREQ(name, "Addr8")) skind = Addr8Supp;
2953 else if (VG_STREQ(name, "Addr16")) skind = Addr16Supp;
2954 else if (VG_STREQ(name, "Free")) skind = FreeSupp;
2955 else if (VG_STREQ(name, "Leak")) skind = LeakSupp;
2956 else if (VG_STREQ(name, "Overlap")) skind = OverlapSupp;
2957 else if (VG_STREQ(name, "Mempool")) skind = MempoolSupp;
njn9e63cb62005-05-08 18:34:59 +00002958 else if (VG_STREQ(name, "Cond")) skind = Value0Supp;
2959 else if (VG_STREQ(name, "Value0")) skind = Value0Supp;/* backwards compat */
2960 else if (VG_STREQ(name, "Value1")) skind = Value1Supp;
2961 else if (VG_STREQ(name, "Value2")) skind = Value2Supp;
2962 else if (VG_STREQ(name, "Value4")) skind = Value4Supp;
2963 else if (VG_STREQ(name, "Value8")) skind = Value8Supp;
2964 else if (VG_STREQ(name, "Value16")) skind = Value16Supp;
2965 else
2966 return False;
2967
2968 VG_(set_supp_kind)(su, skind);
2969 return True;
2970}
2971
njn1d0825f2006-03-27 11:37:07 +00002972static
2973Bool mc_read_extra_suppression_info ( Int fd, Char* buf, Int nBuf, Supp *su )
2974{
2975 Bool eof;
2976
2977 if (VG_(get_supp_kind)(su) == ParamSupp) {
2978 eof = VG_(get_line) ( fd, buf, nBuf );
2979 if (eof) return False;
2980 VG_(set_supp_string)(su, VG_(strdup)(buf));
2981 }
2982 return True;
2983}
2984
2985static Bool mc_error_matches_suppression(Error* err, Supp* su)
2986{
2987 Int su_size;
2988 MC_Error* err_extra = VG_(get_error_extra)(err);
2989 ErrorKind ekind = VG_(get_error_kind )(err);
2990
2991 switch (VG_(get_supp_kind)(su)) {
2992 case ParamSupp:
2993 return (ekind == ParamErr
2994 && VG_STREQ(VG_(get_error_string)(err),
2995 VG_(get_supp_string)(su)));
2996
2997 case CoreMemSupp:
2998 return (ekind == CoreMemErr
2999 && VG_STREQ(VG_(get_error_string)(err),
3000 VG_(get_supp_string)(su)));
3001
3002 case Value0Supp: su_size = 0; goto value_case;
3003 case Value1Supp: su_size = 1; goto value_case;
3004 case Value2Supp: su_size = 2; goto value_case;
3005 case Value4Supp: su_size = 4; goto value_case;
3006 case Value8Supp: su_size = 8; goto value_case;
3007 case Value16Supp:su_size =16; goto value_case;
3008 value_case:
3009 return (ekind == ValueErr && err_extra->size == su_size);
3010
3011 case Addr1Supp: su_size = 1; goto addr_case;
3012 case Addr2Supp: su_size = 2; goto addr_case;
3013 case Addr4Supp: su_size = 4; goto addr_case;
3014 case Addr8Supp: su_size = 8; goto addr_case;
3015 case Addr16Supp:su_size =16; goto addr_case;
3016 addr_case:
3017 return (ekind == AddrErr && err_extra->size == su_size);
3018
3019 case FreeSupp:
3020 return (ekind == FreeErr || ekind == FreeMismatchErr);
3021
3022 case OverlapSupp:
3023 return (ekind = OverlapErr);
3024
3025 case LeakSupp:
3026 return (ekind == LeakErr);
3027
3028 case MempoolSupp:
3029 return (ekind == IllegalMempoolErr);
3030
3031 default:
3032 VG_(printf)("Error:\n"
3033 " unknown suppression type %d\n",
3034 VG_(get_supp_kind)(su));
3035 VG_(tool_panic)("unknown suppression type in "
3036 "MC_(error_matches_suppression)");
3037 }
3038}
3039
3040static Char* mc_get_error_name ( Error* err )
3041{
3042 Char* s;
3043 switch (VG_(get_error_kind)(err)) {
3044 case ParamErr: return "Param";
3045 case UserErr: return NULL; /* Can't suppress User errors */
3046 case FreeMismatchErr: return "Free";
3047 case IllegalMempoolErr: return "Mempool";
3048 case FreeErr: return "Free";
3049 case AddrErr:
3050 switch ( ((MC_Error*)VG_(get_error_extra)(err))->size ) {
3051 case 1: return "Addr1";
3052 case 2: return "Addr2";
3053 case 4: return "Addr4";
3054 case 8: return "Addr8";
3055 case 16: return "Addr16";
3056 default: VG_(tool_panic)("unexpected size for Addr");
3057 }
3058
3059 case ValueErr:
3060 switch ( ((MC_Error*)VG_(get_error_extra)(err))->size ) {
3061 case 0: return "Cond";
3062 case 1: return "Value1";
3063 case 2: return "Value2";
3064 case 4: return "Value4";
3065 case 8: return "Value8";
3066 case 16: return "Value16";
3067 default: VG_(tool_panic)("unexpected size for Value");
3068 }
3069 case CoreMemErr: return "CoreMem";
3070 case OverlapErr: return "Overlap";
3071 case LeakErr: return "Leak";
3072 default: VG_(tool_panic)("get_error_name: unexpected type");
3073 }
3074 VG_(printf)(s);
3075}
3076
3077static void mc_print_extra_suppression_info ( Error* err )
3078{
3079 if (ParamErr == VG_(get_error_kind)(err)) {
3080 VG_(printf)(" %s\n", VG_(get_error_string)(err));
3081 }
3082}
3083
njn9e63cb62005-05-08 18:34:59 +00003084/*------------------------------------------------------------*/
sewardjc859fbf2005-04-22 21:10:28 +00003085/*--- Functions called directly from generated code: ---*/
3086/*--- Load/store handlers. ---*/
sewardj6cf40ff2005-04-20 22:31:26 +00003087/*------------------------------------------------------------*/
3088
njn1d0825f2006-03-27 11:37:07 +00003089/* Types: LOADV32, LOADV16, LOADV8 are:
sewardj6cf40ff2005-04-20 22:31:26 +00003090 UWord fn ( Addr a )
3091 so they return 32-bits on 32-bit machines and 64-bits on
3092 64-bit machines. Addr has the same size as a host word.
3093
njn1d0825f2006-03-27 11:37:07 +00003094 LOADV64 is always ULong fn ( Addr a )
sewardj6cf40ff2005-04-20 22:31:26 +00003095
njn1d0825f2006-03-27 11:37:07 +00003096 Similarly for STOREV8, STOREV16, STOREV32, the supplied vbits
3097 are a UWord, and for STOREV64 they are a ULong.
sewardj6cf40ff2005-04-20 22:31:26 +00003098*/
3099
njn1d0825f2006-03-27 11:37:07 +00003100/* If any part of '_a' indicated by the mask is 1, either
njn45e81252006-03-28 12:35:08 +00003101 '_a' is not naturally '_sz/8'-aligned, or it exceeds the range
njn1d0825f2006-03-27 11:37:07 +00003102 covered by the primary map. */
njn45e81252006-03-28 12:35:08 +00003103#define UNALIGNED_OR_HIGH(_a,_sz) ((_a) & MASK((_sz>>3)))
njn1d0825f2006-03-27 11:37:07 +00003104#define MASK(_sz) ( ~((0x10000-(_sz)) | ((N_PRIMARY_MAP-1) << 16)) )
3105
3106
sewardj95448072004-11-22 20:19:51 +00003107/* ------------------------ Size = 8 ------------------------ */
3108
njn1d0825f2006-03-27 11:37:07 +00003109static INLINE
3110ULong mc_LOADV64 ( Addr a, Bool isBigEndian )
3111{
3112 UWord sm_off16, vabits16;
3113 SecMap* sm;
3114
3115 PROF_EVENT(200, "mc_LOADV64");
3116
3117#ifndef PERF_FAST_LOADV
njn45e81252006-03-28 12:35:08 +00003118 return mc_LOADVn_slow( a, 64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003119#else
njn45e81252006-03-28 12:35:08 +00003120 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,64) )) {
njn1d0825f2006-03-27 11:37:07 +00003121 PROF_EVENT(201, "mc_LOADV64-slow1");
njn45e81252006-03-28 12:35:08 +00003122 return (ULong)mc_LOADVn_slow( a, 64, isBigEndian );
sewardjf9d81612005-04-23 23:25:49 +00003123 }
3124
njna7c7ebd2006-03-28 12:51:02 +00003125 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003126 sm_off16 = SM_OFF_16(a);
3127 vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
3128
3129 // Handle common case quickly: a is suitably aligned, is mapped, and
3130 // addressible.
3131 // Convert V bits from compact memory form to expanded register form.
njndbf7ca72006-03-31 11:57:59 +00003132 if (EXPECTED_TAKEN(vabits16 == VA_BITS16_DEFINED)) {
njn1d0825f2006-03-27 11:37:07 +00003133 return V_BITS64_DEFINED;
njndbf7ca72006-03-31 11:57:59 +00003134 } else if (EXPECTED_TAKEN(vabits16 == VA_BITS16_UNDEFINED)) {
njn1d0825f2006-03-27 11:37:07 +00003135 return V_BITS64_UNDEFINED;
3136 } else {
njndbf7ca72006-03-31 11:57:59 +00003137 /* Slow case: the 8 bytes are not all-defined or all-undefined. */
njn1d0825f2006-03-27 11:37:07 +00003138 PROF_EVENT(202, "mc_LOADV64-slow2");
njn45e81252006-03-28 12:35:08 +00003139 return mc_LOADVn_slow( a, 64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003140 }
3141#endif
3142}
3143
3144VG_REGPARM(1) ULong MC_(helperc_LOADV64be) ( Addr a )
3145{
3146 return mc_LOADV64(a, True);
3147}
3148VG_REGPARM(1) ULong MC_(helperc_LOADV64le) ( Addr a )
3149{
3150 return mc_LOADV64(a, False);
3151}
sewardjf9d81612005-04-23 23:25:49 +00003152
sewardjf9d81612005-04-23 23:25:49 +00003153
njn1d0825f2006-03-27 11:37:07 +00003154static INLINE
njn4cf530b2006-04-06 13:33:48 +00003155void mc_STOREV64 ( Addr a, ULong vbits64, Bool isBigEndian )
njn1d0825f2006-03-27 11:37:07 +00003156{
3157 UWord sm_off16, vabits16;
3158 SecMap* sm;
3159
3160 PROF_EVENT(210, "mc_STOREV64");
3161
3162#ifndef PERF_FAST_STOREV
3163 // XXX: this slow case seems to be marginally faster than the fast case!
3164 // Investigate further.
njn4cf530b2006-04-06 13:33:48 +00003165 mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003166#else
njn45e81252006-03-28 12:35:08 +00003167 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,64) )) {
njn1d0825f2006-03-27 11:37:07 +00003168 PROF_EVENT(211, "mc_STOREV64-slow1");
njn4cf530b2006-04-06 13:33:48 +00003169 mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003170 return;
sewardjf9d81612005-04-23 23:25:49 +00003171 }
3172
njna7c7ebd2006-03-28 12:51:02 +00003173 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003174 sm_off16 = SM_OFF_16(a);
3175 vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
3176
3177 if (EXPECTED_TAKEN( !is_distinguished_sm(sm) &&
njndbf7ca72006-03-31 11:57:59 +00003178 (VA_BITS16_DEFINED == vabits16 ||
3179 VA_BITS16_UNDEFINED == vabits16) ))
njn1d0825f2006-03-27 11:37:07 +00003180 {
3181 /* Handle common case quickly: a is suitably aligned, */
3182 /* is mapped, and is addressible. */
3183 // Convert full V-bits in register to compact 2-bit form.
njn4cf530b2006-04-06 13:33:48 +00003184 if (V_BITS64_DEFINED == vbits64) {
njndbf7ca72006-03-31 11:57:59 +00003185 ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_DEFINED;
njn4cf530b2006-04-06 13:33:48 +00003186 } else if (V_BITS64_UNDEFINED == vbits64) {
njndbf7ca72006-03-31 11:57:59 +00003187 ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00003188 } else {
3189 /* Slow but general case -- writing partially defined bytes. */
3190 PROF_EVENT(212, "mc_STOREV64-slow2");
njn4cf530b2006-04-06 13:33:48 +00003191 mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003192 }
3193 } else {
3194 /* Slow but general case. */
3195 PROF_EVENT(213, "mc_STOREV64-slow3");
njn4cf530b2006-04-06 13:33:48 +00003196 mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003197 }
3198#endif
3199}
3200
njn4cf530b2006-04-06 13:33:48 +00003201VG_REGPARM(1) void MC_(helperc_STOREV64be) ( Addr a, ULong vbits64 )
njn1d0825f2006-03-27 11:37:07 +00003202{
njn4cf530b2006-04-06 13:33:48 +00003203 mc_STOREV64(a, vbits64, True);
njn1d0825f2006-03-27 11:37:07 +00003204}
njn4cf530b2006-04-06 13:33:48 +00003205VG_REGPARM(1) void MC_(helperc_STOREV64le) ( Addr a, ULong vbits64 )
njn1d0825f2006-03-27 11:37:07 +00003206{
njn4cf530b2006-04-06 13:33:48 +00003207 mc_STOREV64(a, vbits64, False);
njn1d0825f2006-03-27 11:37:07 +00003208}
sewardj95448072004-11-22 20:19:51 +00003209
sewardj95448072004-11-22 20:19:51 +00003210
3211/* ------------------------ Size = 4 ------------------------ */
3212
njn1d0825f2006-03-27 11:37:07 +00003213static INLINE
3214UWord mc_LOADV32 ( Addr a, Bool isBigEndian )
3215{
3216 UWord sm_off, vabits8;
3217 SecMap* sm;
3218
3219 PROF_EVENT(220, "mc_LOADV32");
3220
3221#ifndef PERF_FAST_LOADV
njn45e81252006-03-28 12:35:08 +00003222 return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003223#else
njn45e81252006-03-28 12:35:08 +00003224 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,32) )) {
njn1d0825f2006-03-27 11:37:07 +00003225 PROF_EVENT(221, "mc_LOADV32-slow1");
njn45e81252006-03-28 12:35:08 +00003226 return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
sewardjc1a2cda2005-04-21 17:34:00 +00003227 }
3228
njna7c7ebd2006-03-28 12:51:02 +00003229 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003230 sm_off = SM_OFF(a);
3231 vabits8 = sm->vabits8[sm_off];
3232
3233 // Handle common case quickly: a is suitably aligned, is mapped, and the
3234 // entire word32 it lives in is addressible.
3235 // Convert V bits from compact memory form to expanded register form.
3236 // For 64-bit platforms, set the high 32 bits of retval to 1 (undefined).
3237 // Almost certainly not necessary, but be paranoid.
njndbf7ca72006-03-31 11:57:59 +00003238 if (EXPECTED_TAKEN(vabits8 == VA_BITS8_DEFINED)) {
njn1d0825f2006-03-27 11:37:07 +00003239 return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_DEFINED);
njndbf7ca72006-03-31 11:57:59 +00003240 } else if (EXPECTED_TAKEN(vabits8 == VA_BITS8_UNDEFINED)) {
njn1d0825f2006-03-27 11:37:07 +00003241 return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_UNDEFINED);
3242 } else {
njndbf7ca72006-03-31 11:57:59 +00003243 /* Slow case: the 4 bytes are not all-defined or all-undefined. */
njn1d0825f2006-03-27 11:37:07 +00003244 PROF_EVENT(222, "mc_LOADV32-slow2");
njn45e81252006-03-28 12:35:08 +00003245 return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003246 }
3247#endif
3248}
3249
3250VG_REGPARM(1) UWord MC_(helperc_LOADV32be) ( Addr a )
3251{
3252 return mc_LOADV32(a, True);
3253}
3254VG_REGPARM(1) UWord MC_(helperc_LOADV32le) ( Addr a )
3255{
3256 return mc_LOADV32(a, False);
3257}
sewardjc1a2cda2005-04-21 17:34:00 +00003258
sewardjc1a2cda2005-04-21 17:34:00 +00003259
njn1d0825f2006-03-27 11:37:07 +00003260static INLINE
njn4cf530b2006-04-06 13:33:48 +00003261void mc_STOREV32 ( Addr a, UWord vbits32, Bool isBigEndian )
njn1d0825f2006-03-27 11:37:07 +00003262{
3263 UWord sm_off, vabits8;
3264 SecMap* sm;
3265
3266 PROF_EVENT(230, "mc_STOREV32");
3267
3268#ifndef PERF_FAST_STOREV
njn4cf530b2006-04-06 13:33:48 +00003269 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003270#else
njn45e81252006-03-28 12:35:08 +00003271 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,32) )) {
njn1d0825f2006-03-27 11:37:07 +00003272 PROF_EVENT(231, "mc_STOREV32-slow1");
njn4cf530b2006-04-06 13:33:48 +00003273 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003274 return;
sewardjc1a2cda2005-04-21 17:34:00 +00003275 }
3276
njna7c7ebd2006-03-28 12:51:02 +00003277 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003278 sm_off = SM_OFF(a);
3279 vabits8 = sm->vabits8[sm_off];
3280
3281//---------------------------------------------------------------------------
3282#if 1
3283 // Cleverness: sometimes we don't have to write the shadow memory at
3284 // all, if we can tell that what we want to write is the same as what is
3285 // already there.
njn4cf530b2006-04-06 13:33:48 +00003286 if (V_BITS32_DEFINED == vbits32) {
njndbf7ca72006-03-31 11:57:59 +00003287 if (vabits8 == (UInt)VA_BITS8_DEFINED) {
njn1d0825f2006-03-27 11:37:07 +00003288 return;
njndbf7ca72006-03-31 11:57:59 +00003289 } else if (!is_distinguished_sm(sm) && VA_BITS8_UNDEFINED == vabits8) {
3290 sm->vabits8[sm_off] = (UInt)VA_BITS8_DEFINED;
njn1d0825f2006-03-27 11:37:07 +00003291 } else {
njndbf7ca72006-03-31 11:57:59 +00003292 // not defined/undefined, or distinguished and changing state
njn1d0825f2006-03-27 11:37:07 +00003293 PROF_EVENT(232, "mc_STOREV32-slow2");
njn4cf530b2006-04-06 13:33:48 +00003294 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003295 }
njn4cf530b2006-04-06 13:33:48 +00003296 } else if (V_BITS32_UNDEFINED == vbits32) {
njndbf7ca72006-03-31 11:57:59 +00003297 if (vabits8 == (UInt)VA_BITS8_UNDEFINED) {
njn1d0825f2006-03-27 11:37:07 +00003298 return;
njndbf7ca72006-03-31 11:57:59 +00003299 } else if (!is_distinguished_sm(sm) && VA_BITS8_DEFINED == vabits8) {
3300 sm->vabits8[sm_off] = (UInt)VA_BITS8_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00003301 } else {
njndbf7ca72006-03-31 11:57:59 +00003302 // not defined/undefined, or distinguished and changing state
njn1d0825f2006-03-27 11:37:07 +00003303 PROF_EVENT(233, "mc_STOREV32-slow3");
njn4cf530b2006-04-06 13:33:48 +00003304 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003305 }
3306 } else {
3307 // Partially defined word
3308 PROF_EVENT(234, "mc_STOREV32-slow4");
njn4cf530b2006-04-06 13:33:48 +00003309 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003310 }
3311//---------------------------------------------------------------------------
3312#else
3313 if (EXPECTED_TAKEN( !is_distinguished_sm(sm) &&
njndbf7ca72006-03-31 11:57:59 +00003314 (VA_BITS8_DEFINED == vabits8 ||
3315 VA_BITS8_UNDEFINED == vabits8) ))
njn1d0825f2006-03-27 11:37:07 +00003316 {
3317 /* Handle common case quickly: a is suitably aligned, */
3318 /* is mapped, and is addressible. */
3319 // Convert full V-bits in register to compact 2-bit form.
njn4cf530b2006-04-06 13:33:48 +00003320 if (V_BITS32_DEFINED == vbits32) {
njndbf7ca72006-03-31 11:57:59 +00003321 sm->vabits8[sm_off] = VA_BITS8_DEFINED;
njn4cf530b2006-04-06 13:33:48 +00003322 } else if (V_BITS32_UNDEFINED == vbits32) {
njndbf7ca72006-03-31 11:57:59 +00003323 sm->vabits8[sm_off] = VA_BITS8_UNDEFINED;
njn1d0825f2006-03-27 11:37:07 +00003324 } else {
3325 /* Slow but general case -- writing partially defined bytes. */
3326 PROF_EVENT(232, "mc_STOREV32-slow2");
njn4cf530b2006-04-06 13:33:48 +00003327 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003328 }
3329 } else {
3330 /* Slow but general case. */
3331 PROF_EVENT(233, "mc_STOREV32-slow3");
njn4cf530b2006-04-06 13:33:48 +00003332 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003333 }
3334#endif
3335//---------------------------------------------------------------------------
3336#endif
3337}
3338
njn4cf530b2006-04-06 13:33:48 +00003339VG_REGPARM(2) void MC_(helperc_STOREV32be) ( Addr a, UWord vbits32 )
njn1d0825f2006-03-27 11:37:07 +00003340{
njn4cf530b2006-04-06 13:33:48 +00003341 mc_STOREV32(a, vbits32, True);
njn1d0825f2006-03-27 11:37:07 +00003342}
njn4cf530b2006-04-06 13:33:48 +00003343VG_REGPARM(2) void MC_(helperc_STOREV32le) ( Addr a, UWord vbits32 )
njn1d0825f2006-03-27 11:37:07 +00003344{
njn4cf530b2006-04-06 13:33:48 +00003345 mc_STOREV32(a, vbits32, False);
njn1d0825f2006-03-27 11:37:07 +00003346}
njn25e49d8e72002-09-23 09:36:25 +00003347
njn25e49d8e72002-09-23 09:36:25 +00003348
sewardj95448072004-11-22 20:19:51 +00003349/* ------------------------ Size = 2 ------------------------ */
3350
njn1d0825f2006-03-27 11:37:07 +00003351static INLINE
3352UWord mc_LOADV16 ( Addr a, Bool isBigEndian )
3353{
3354 UWord sm_off, vabits8;
3355 SecMap* sm;
3356
3357 PROF_EVENT(240, "mc_LOADV16");
3358
3359#ifndef PERF_FAST_LOADV
njn45e81252006-03-28 12:35:08 +00003360 return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003361#else
njn45e81252006-03-28 12:35:08 +00003362 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,16) )) {
njn1d0825f2006-03-27 11:37:07 +00003363 PROF_EVENT(241, "mc_LOADV16-slow1");
njn45e81252006-03-28 12:35:08 +00003364 return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
sewardjc1a2cda2005-04-21 17:34:00 +00003365 }
3366
njna7c7ebd2006-03-28 12:51:02 +00003367 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003368 sm_off = SM_OFF(a);
3369 vabits8 = sm->vabits8[sm_off];
3370 // Handle common case quickly: a is suitably aligned, is mapped, and is
3371 // addressible.
3372 // Convert V bits from compact memory form to expanded register form
3373 // XXX: set the high 16/48 bits of retval to 1 for 64-bit paranoia?
njndbf7ca72006-03-31 11:57:59 +00003374 if (vabits8 == VA_BITS8_DEFINED ) { return V_BITS16_DEFINED; }
3375 else if (vabits8 == VA_BITS8_UNDEFINED) { return V_BITS16_UNDEFINED; }
njn1d0825f2006-03-27 11:37:07 +00003376 else {
njndbf7ca72006-03-31 11:57:59 +00003377 // The 4 (yes, 4) bytes are not all-defined or all-undefined, check
njn1d0825f2006-03-27 11:37:07 +00003378 // the two sub-bytes.
3379 UChar vabits4 = extract_vabits4_from_vabits8(a, vabits8);
njndbf7ca72006-03-31 11:57:59 +00003380 if (vabits4 == VA_BITS4_DEFINED ) { return V_BITS16_DEFINED; }
3381 else if (vabits4 == VA_BITS4_UNDEFINED) { return V_BITS16_UNDEFINED; }
njn1d0825f2006-03-27 11:37:07 +00003382 else {
njndbf7ca72006-03-31 11:57:59 +00003383 /* Slow case: the two bytes are not all-defined or all-undefined. */
njn1d0825f2006-03-27 11:37:07 +00003384 PROF_EVENT(242, "mc_LOADV16-slow2");
njn45e81252006-03-28 12:35:08 +00003385 return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003386 }
3387 }
3388#endif
3389}
3390
3391VG_REGPARM(1) UWord MC_(helperc_LOADV16be) ( Addr a )
3392{
3393 return mc_LOADV16(a, True);
3394}
3395VG_REGPARM(1) UWord MC_(helperc_LOADV16le) ( Addr a )
3396{
3397 return mc_LOADV16(a, False);
3398}
sewardjc1a2cda2005-04-21 17:34:00 +00003399
sewardjc1a2cda2005-04-21 17:34:00 +00003400
njn1d0825f2006-03-27 11:37:07 +00003401static INLINE
njn4cf530b2006-04-06 13:33:48 +00003402void mc_STOREV16 ( Addr a, UWord vbits16, Bool isBigEndian )
njn1d0825f2006-03-27 11:37:07 +00003403{
3404 UWord sm_off, vabits8;
3405 SecMap* sm;
3406
3407 PROF_EVENT(250, "mc_STOREV16");
3408
3409#ifndef PERF_FAST_STOREV
njn4cf530b2006-04-06 13:33:48 +00003410 mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003411#else
njn45e81252006-03-28 12:35:08 +00003412 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,16) )) {
njn1d0825f2006-03-27 11:37:07 +00003413 PROF_EVENT(251, "mc_STOREV16-slow1");
njn4cf530b2006-04-06 13:33:48 +00003414 mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003415 return;
sewardjc1a2cda2005-04-21 17:34:00 +00003416 }
3417
njna7c7ebd2006-03-28 12:51:02 +00003418 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003419 sm_off = SM_OFF(a);
3420 vabits8 = sm->vabits8[sm_off];
3421 if (EXPECTED_TAKEN( !is_distinguished_sm(sm) &&
njndbf7ca72006-03-31 11:57:59 +00003422 (VA_BITS8_DEFINED == vabits8 ||
3423 VA_BITS8_UNDEFINED == vabits8) ))
njn1d0825f2006-03-27 11:37:07 +00003424 {
3425 /* Handle common case quickly: a is suitably aligned, */
3426 /* is mapped, and is addressible. */
3427 // Convert full V-bits in register to compact 2-bit form.
njn4cf530b2006-04-06 13:33:48 +00003428 if (V_BITS16_DEFINED == vbits16) {
njndbf7ca72006-03-31 11:57:59 +00003429 insert_vabits4_into_vabits8( a, VA_BITS4_DEFINED ,
njn1d0825f2006-03-27 11:37:07 +00003430 &(sm->vabits8[sm_off]) );
njn4cf530b2006-04-06 13:33:48 +00003431 } else if (V_BITS16_UNDEFINED == vbits16) {
njndbf7ca72006-03-31 11:57:59 +00003432 insert_vabits4_into_vabits8( a, VA_BITS4_UNDEFINED,
njn1d0825f2006-03-27 11:37:07 +00003433 &(sm->vabits8[sm_off]) );
3434 } else {
3435 /* Slow but general case -- writing partially defined bytes. */
3436 PROF_EVENT(252, "mc_STOREV16-slow2");
njn4cf530b2006-04-06 13:33:48 +00003437 mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003438 }
3439 } else {
3440 /* Slow but general case. */
3441 PROF_EVENT(253, "mc_STOREV16-slow3");
njn4cf530b2006-04-06 13:33:48 +00003442 mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
njn1d0825f2006-03-27 11:37:07 +00003443 }
3444#endif
3445}
njn25e49d8e72002-09-23 09:36:25 +00003446
njn4cf530b2006-04-06 13:33:48 +00003447VG_REGPARM(2) void MC_(helperc_STOREV16be) ( Addr a, UWord vbits16 )
njn1d0825f2006-03-27 11:37:07 +00003448{
njn4cf530b2006-04-06 13:33:48 +00003449 mc_STOREV16(a, vbits16, True);
njn1d0825f2006-03-27 11:37:07 +00003450}
njn4cf530b2006-04-06 13:33:48 +00003451VG_REGPARM(2) void MC_(helperc_STOREV16le) ( Addr a, UWord vbits16 )
njn1d0825f2006-03-27 11:37:07 +00003452{
njn4cf530b2006-04-06 13:33:48 +00003453 mc_STOREV16(a, vbits16, False);
njn1d0825f2006-03-27 11:37:07 +00003454}
sewardj5d28efc2005-04-21 22:16:29 +00003455
njn25e49d8e72002-09-23 09:36:25 +00003456
sewardj95448072004-11-22 20:19:51 +00003457/* ------------------------ Size = 1 ------------------------ */
sewardj8cf88b72005-07-08 01:29:33 +00003458/* Note: endianness is irrelevant for size == 1 */
sewardj95448072004-11-22 20:19:51 +00003459
njnaf839f52005-06-23 03:27:57 +00003460VG_REGPARM(1)
njn1d0825f2006-03-27 11:37:07 +00003461UWord MC_(helperc_LOADV8) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +00003462{
njn1d0825f2006-03-27 11:37:07 +00003463 UWord sm_off, vabits8;
sewardjae986ca2005-10-12 12:53:20 +00003464 SecMap* sm;
3465
njn1d0825f2006-03-27 11:37:07 +00003466 PROF_EVENT(260, "mc_LOADV8");
sewardjc1a2cda2005-04-21 17:34:00 +00003467
njn1d0825f2006-03-27 11:37:07 +00003468#ifndef PERF_FAST_LOADV
njn45e81252006-03-28 12:35:08 +00003469 return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
njn1d0825f2006-03-27 11:37:07 +00003470#else
njn45e81252006-03-28 12:35:08 +00003471 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,8) )) {
njn1d0825f2006-03-27 11:37:07 +00003472 PROF_EVENT(261, "mc_LOADV8-slow1");
njn45e81252006-03-28 12:35:08 +00003473 return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00003474 }
3475
njna7c7ebd2006-03-28 12:51:02 +00003476 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003477 sm_off = SM_OFF(a);
3478 vabits8 = sm->vabits8[sm_off];
3479 // Convert V bits from compact memory form to expanded register form
3480 // Handle common case quickly: a is mapped, and the entire
3481 // word32 it lives in is addressible.
3482 // XXX: set the high 24/56 bits of retval to 1 for 64-bit paranoia?
njndbf7ca72006-03-31 11:57:59 +00003483 if (vabits8 == VA_BITS8_DEFINED ) { return V_BITS8_DEFINED; }
3484 else if (vabits8 == VA_BITS8_UNDEFINED) { return V_BITS8_UNDEFINED; }
njn1d0825f2006-03-27 11:37:07 +00003485 else {
njndbf7ca72006-03-31 11:57:59 +00003486 // The 4 (yes, 4) bytes are not all-defined or all-undefined, check
njn1d0825f2006-03-27 11:37:07 +00003487 // the single byte.
3488 UChar vabits2 = extract_vabits2_from_vabits8(a, vabits8);
njndbf7ca72006-03-31 11:57:59 +00003489 if (vabits2 == VA_BITS2_DEFINED ) { return V_BITS8_DEFINED; }
3490 else if (vabits2 == VA_BITS2_UNDEFINED) { return V_BITS8_UNDEFINED; }
njn1d0825f2006-03-27 11:37:07 +00003491 else {
njndbf7ca72006-03-31 11:57:59 +00003492 /* Slow case: the byte is not all-defined or all-undefined. */
njn1d0825f2006-03-27 11:37:07 +00003493 PROF_EVENT(262, "mc_LOADV8-slow2");
njn45e81252006-03-28 12:35:08 +00003494 return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
njn1d0825f2006-03-27 11:37:07 +00003495 }
sewardjc1a2cda2005-04-21 17:34:00 +00003496 }
njn1d0825f2006-03-27 11:37:07 +00003497#endif
njn25e49d8e72002-09-23 09:36:25 +00003498}
3499
sewardjc1a2cda2005-04-21 17:34:00 +00003500
njnaf839f52005-06-23 03:27:57 +00003501VG_REGPARM(2)
njn4cf530b2006-04-06 13:33:48 +00003502void MC_(helperc_STOREV8) ( Addr a, UWord vbits8 )
njn25e49d8e72002-09-23 09:36:25 +00003503{
njn1d0825f2006-03-27 11:37:07 +00003504 UWord sm_off, vabits8;
sewardjae986ca2005-10-12 12:53:20 +00003505 SecMap* sm;
3506
njn1d0825f2006-03-27 11:37:07 +00003507 PROF_EVENT(270, "mc_STOREV8");
sewardjc1a2cda2005-04-21 17:34:00 +00003508
njn1d0825f2006-03-27 11:37:07 +00003509#ifndef PERF_FAST_STOREV
njn4cf530b2006-04-06 13:33:48 +00003510 mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
njn1d0825f2006-03-27 11:37:07 +00003511#else
njn45e81252006-03-28 12:35:08 +00003512 if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,8) )) {
njn1d0825f2006-03-27 11:37:07 +00003513 PROF_EVENT(271, "mc_STOREV8-slow1");
njn4cf530b2006-04-06 13:33:48 +00003514 mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00003515 return;
3516 }
3517
njna7c7ebd2006-03-28 12:51:02 +00003518 sm = get_secmap_for_reading_low(a);
njn1d0825f2006-03-27 11:37:07 +00003519 sm_off = SM_OFF(a);
3520 vabits8 = sm->vabits8[sm_off];
3521 if (EXPECTED_TAKEN
3522 ( !is_distinguished_sm(sm) &&
njndbf7ca72006-03-31 11:57:59 +00003523 ( (VA_BITS8_DEFINED == vabits8 || VA_BITS8_UNDEFINED == vabits8)
njn1d0825f2006-03-27 11:37:07 +00003524 || (VA_BITS2_NOACCESS != extract_vabits2_from_vabits8(a, vabits8))
3525 )
3526 )
3527 )
3528 {
sewardjc1a2cda2005-04-21 17:34:00 +00003529 /* Handle common case quickly: a is mapped, the entire word32 it
3530 lives in is addressible. */
njn1d0825f2006-03-27 11:37:07 +00003531 // Convert full V-bits in register to compact 2-bit form.
njn4cf530b2006-04-06 13:33:48 +00003532 if (V_BITS8_DEFINED == vbits8) {
njndbf7ca72006-03-31 11:57:59 +00003533 insert_vabits2_into_vabits8( a, VA_BITS2_DEFINED,
njn1d0825f2006-03-27 11:37:07 +00003534 &(sm->vabits8[sm_off]) );
njn4cf530b2006-04-06 13:33:48 +00003535 } else if (V_BITS8_UNDEFINED == vbits8) {
njndbf7ca72006-03-31 11:57:59 +00003536 insert_vabits2_into_vabits8( a, VA_BITS2_UNDEFINED,
njn1d0825f2006-03-27 11:37:07 +00003537 &(sm->vabits8[sm_off]) );
3538 } else {
3539 /* Slow but general case -- writing partially defined bytes. */
3540 PROF_EVENT(272, "mc_STOREV8-slow2");
njn4cf530b2006-04-06 13:33:48 +00003541 mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
njn1d0825f2006-03-27 11:37:07 +00003542 }
sewardjc1a2cda2005-04-21 17:34:00 +00003543 } else {
njn1d0825f2006-03-27 11:37:07 +00003544 /* Slow but general case. */
3545 PROF_EVENT(273, "mc_STOREV8-slow3");
njn4cf530b2006-04-06 13:33:48 +00003546 mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00003547 }
njn1d0825f2006-03-27 11:37:07 +00003548#endif
njn25e49d8e72002-09-23 09:36:25 +00003549}
3550
3551
sewardjc859fbf2005-04-22 21:10:28 +00003552/*------------------------------------------------------------*/
3553/*--- Functions called directly from generated code: ---*/
3554/*--- Value-check failure handlers. ---*/
3555/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00003556
njn5c004e42002-11-18 11:04:50 +00003557void MC_(helperc_value_check0_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00003558{
njn9e63cb62005-05-08 18:34:59 +00003559 mc_record_value_error ( VG_(get_running_tid)(), 0 );
njn25e49d8e72002-09-23 09:36:25 +00003560}
3561
njn5c004e42002-11-18 11:04:50 +00003562void MC_(helperc_value_check1_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00003563{
njn9e63cb62005-05-08 18:34:59 +00003564 mc_record_value_error ( VG_(get_running_tid)(), 1 );
njn25e49d8e72002-09-23 09:36:25 +00003565}
3566
njn5c004e42002-11-18 11:04:50 +00003567void MC_(helperc_value_check4_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00003568{
njn9e63cb62005-05-08 18:34:59 +00003569 mc_record_value_error ( VG_(get_running_tid)(), 4 );
njn25e49d8e72002-09-23 09:36:25 +00003570}
3571
sewardj11bcc4e2005-04-23 22:38:38 +00003572void MC_(helperc_value_check8_fail) ( void )
3573{
njn9e63cb62005-05-08 18:34:59 +00003574 mc_record_value_error ( VG_(get_running_tid)(), 8 );
sewardj11bcc4e2005-04-23 22:38:38 +00003575}
3576
njnaf839f52005-06-23 03:27:57 +00003577VG_REGPARM(1) void MC_(helperc_complain_undef) ( HWord sz )
sewardj95448072004-11-22 20:19:51 +00003578{
njn9e63cb62005-05-08 18:34:59 +00003579 mc_record_value_error ( VG_(get_running_tid)(), (Int)sz );
sewardj95448072004-11-22 20:19:51 +00003580}
3581
njn25e49d8e72002-09-23 09:36:25 +00003582
sewardjc2c12c22006-03-08 13:20:09 +00003583/*------------------------------------------------------------*/
3584/*--- Metadata get/set functions, for client requests. ---*/
3585/*------------------------------------------------------------*/
3586
njn1d0825f2006-03-27 11:37:07 +00003587// Nb: this expands the V+A bits out into register-form V bits, even though
3588// they're in memory. This is for backward compatibility, and because it's
3589// probably what the user wants.
3590
3591/* Copy Vbits from/to address 'a'. Returns: 1 == OK, 2 == alignment
sewardjc2c12c22006-03-08 13:20:09 +00003592 error [no longer used], 3 == addressing error. */
3593static Int mc_get_or_set_vbits_for_client (
3594 ThreadId tid,
njn1d0825f2006-03-27 11:37:07 +00003595 Addr a,
3596 Addr vbits,
3597 SizeT szB,
sewardjc2c12c22006-03-08 13:20:09 +00003598 Bool setting /* True <=> set vbits, False <=> get vbits */
3599)
3600{
sewardjc2c12c22006-03-08 13:20:09 +00003601 SizeT i;
njn1d0825f2006-03-27 11:37:07 +00003602 Bool ok;
3603 UChar vbits8;
sewardjc2c12c22006-03-08 13:20:09 +00003604
njn1d0825f2006-03-27 11:37:07 +00003605 /* Check that arrays are addressible before doing any getting/setting. */
3606 for (i = 0; i < szB; i++) {
3607 if (VA_BITS2_NOACCESS == get_vabits2(a + i)) {
3608 mc_record_address_error( tid, a + i, 1, setting ? True : False );
3609 return 3;
sewardjc2c12c22006-03-08 13:20:09 +00003610 }
njn1d0825f2006-03-27 11:37:07 +00003611 if (VA_BITS2_NOACCESS == get_vabits2(vbits + i)) {
3612 mc_record_address_error( tid, vbits + i, 1, setting ? False : True );
3613 return 3;
sewardjc2c12c22006-03-08 13:20:09 +00003614 }
3615 }
njn1d0825f2006-03-27 11:37:07 +00003616
sewardjc2c12c22006-03-08 13:20:09 +00003617 /* Do the copy */
3618 if (setting) {
njn1d0825f2006-03-27 11:37:07 +00003619
3620 // It's actually a tool ClientReq, but Vg_CoreClientReq is the closest
3621 // thing we have.
njndbf7ca72006-03-31 11:57:59 +00003622 check_mem_is_defined(Vg_CoreClientReq, tid, "SET_VBITS(vbits)",
3623 vbits, szB);
njn1d0825f2006-03-27 11:37:07 +00003624
3625 /* setting */
3626 for (i = 0; i < szB; i++) {
3627 ok = set_vbits8(a + i, ((UChar*)vbits)[i]);
3628 tl_assert(ok);
sewardjc2c12c22006-03-08 13:20:09 +00003629 }
3630 } else {
3631 /* getting */
njn1d0825f2006-03-27 11:37:07 +00003632 for (i = 0; i < szB; i++) {
3633 ok = get_vbits8(a + i, &vbits8);
3634 tl_assert(ok);
3635// XXX: used to do this, but it's a pain
3636// if (V_BITS8_DEFINED != vbits8)
3637// mc_record_value_error(tid, 1);
3638 ((UChar*)vbits)[i] = vbits8;
sewardjc2c12c22006-03-08 13:20:09 +00003639 }
3640 // The bytes in vbits[] have now been set, so mark them as such.
njndbf7ca72006-03-31 11:57:59 +00003641 MC_(make_mem_defined)(vbits, szB);
njn1d0825f2006-03-27 11:37:07 +00003642 }
sewardjc2c12c22006-03-08 13:20:09 +00003643
3644 return 1;
3645}
sewardj05fe85e2005-04-27 22:46:36 +00003646
3647
3648/*------------------------------------------------------------*/
3649/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
3650/*------------------------------------------------------------*/
3651
3652/* For the memory leak detector, say whether an entire 64k chunk of
3653 address space is possibly in use, or not. If in doubt return
3654 True.
3655*/
3656static
3657Bool mc_is_within_valid_secondary ( Addr a )
3658{
3659 SecMap* sm = maybe_get_secmap_for ( a );
3660 if (sm == NULL || sm == &sm_distinguished[SM_DIST_NOACCESS]) {
3661 /* Definitely not in use. */
3662 return False;
3663 } else {
3664 return True;
3665 }
3666}
3667
3668
3669/* For the memory leak detector, say whether or not a given word
3670 address is to be regarded as valid. */
3671static
3672Bool mc_is_valid_aligned_word ( Addr a )
3673{
3674 tl_assert(sizeof(UWord) == 4 || sizeof(UWord) == 8);
3675 if (sizeof(UWord) == 4) {
3676 tl_assert(VG_IS_4_ALIGNED(a));
3677 } else {
3678 tl_assert(VG_IS_8_ALIGNED(a));
3679 }
njndbf7ca72006-03-31 11:57:59 +00003680 if (is_mem_defined( a, sizeof(UWord), NULL ) == MC_Ok) {
sewardj05fe85e2005-04-27 22:46:36 +00003681 return True;
3682 } else {
3683 return False;
3684 }
3685}
sewardja4495682002-10-21 07:29:59 +00003686
3687
nethercote996901a2004-08-03 13:29:09 +00003688/* Leak detector for this tool. We don't actually do anything, merely
sewardja4495682002-10-21 07:29:59 +00003689 run the generic leak detector with suitable parameters for this
nethercote996901a2004-08-03 13:29:09 +00003690 tool. */
njnb8dca862005-03-14 02:42:44 +00003691static void mc_detect_memory_leaks ( ThreadId tid, LeakCheckMode mode )
njn25e49d8e72002-09-23 09:36:25 +00003692{
njn1d0825f2006-03-27 11:37:07 +00003693 MC_(do_detect_memory_leaks) (
sewardj05fe85e2005-04-27 22:46:36 +00003694 tid,
3695 mode,
3696 mc_is_within_valid_secondary,
3697 mc_is_valid_aligned_word
3698 );
njn25e49d8e72002-09-23 09:36:25 +00003699}
3700
3701
sewardjc859fbf2005-04-22 21:10:28 +00003702/*------------------------------------------------------------*/
3703/*--- Initialisation ---*/
3704/*------------------------------------------------------------*/
3705
3706static void init_shadow_memory ( void )
3707{
3708 Int i;
3709 SecMap* sm;
3710
njn1d0825f2006-03-27 11:37:07 +00003711 tl_assert(V_BIT_UNDEFINED == 1);
3712 tl_assert(V_BIT_DEFINED == 0);
3713 tl_assert(V_BITS8_UNDEFINED == 0xFF);
3714 tl_assert(V_BITS8_DEFINED == 0);
3715
sewardjc859fbf2005-04-22 21:10:28 +00003716 /* Build the 3 distinguished secondaries */
sewardjc859fbf2005-04-22 21:10:28 +00003717 sm = &sm_distinguished[SM_DIST_NOACCESS];
njn1d0825f2006-03-27 11:37:07 +00003718 for (i = 0; i < SM_CHUNKS; i++) sm->vabits8[i] = VA_BITS8_NOACCESS;
sewardjc859fbf2005-04-22 21:10:28 +00003719
njndbf7ca72006-03-31 11:57:59 +00003720 sm = &sm_distinguished[SM_DIST_UNDEFINED];
3721 for (i = 0; i < SM_CHUNKS; i++) sm->vabits8[i] = VA_BITS8_UNDEFINED;
sewardjc859fbf2005-04-22 21:10:28 +00003722
njndbf7ca72006-03-31 11:57:59 +00003723 sm = &sm_distinguished[SM_DIST_DEFINED];
3724 for (i = 0; i < SM_CHUNKS; i++) sm->vabits8[i] = VA_BITS8_DEFINED;
sewardjc859fbf2005-04-22 21:10:28 +00003725
3726 /* Set up the primary map. */
3727 /* These entries gradually get overwritten as the used address
3728 space expands. */
3729 for (i = 0; i < N_PRIMARY_MAP; i++)
3730 primary_map[i] = &sm_distinguished[SM_DIST_NOACCESS];
3731
3732 /* auxmap_size = auxmap_used = 0;
3733 no ... these are statically initialised */
njn1d0825f2006-03-27 11:37:07 +00003734
3735 /* Secondary V bit table */
3736 secVBitTable = createSecVBitTable();
sewardjc859fbf2005-04-22 21:10:28 +00003737}
3738
3739
3740/*------------------------------------------------------------*/
3741/*--- Sanity check machinery (permanently engaged) ---*/
3742/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00003743
njn51d827b2005-05-09 01:02:08 +00003744static Bool mc_cheap_sanity_check ( void )
njn25e49d8e72002-09-23 09:36:25 +00003745{
jseward9800fd32004-01-04 23:08:04 +00003746 /* nothing useful we can rapidly check */
sewardj23eb2fd2005-04-22 16:29:19 +00003747 n_sanity_cheap++;
sewardjc1a2cda2005-04-21 17:34:00 +00003748 PROF_EVENT(490, "cheap_sanity_check");
jseward9800fd32004-01-04 23:08:04 +00003749 return True;
njn25e49d8e72002-09-23 09:36:25 +00003750}
3751
njn51d827b2005-05-09 01:02:08 +00003752static Bool mc_expensive_sanity_check ( void )
njn25e49d8e72002-09-23 09:36:25 +00003753{
sewardj23eb2fd2005-04-22 16:29:19 +00003754 Int i, n_secmaps_found;
sewardj45d94cc2005-04-20 14:44:11 +00003755 SecMap* sm;
sewardj23eb2fd2005-04-22 16:29:19 +00003756 Bool bad = False;
njn25e49d8e72002-09-23 09:36:25 +00003757
sewardj23eb2fd2005-04-22 16:29:19 +00003758 n_sanity_expensive++;
sewardjc1a2cda2005-04-21 17:34:00 +00003759 PROF_EVENT(491, "expensive_sanity_check");
3760
njn1d0825f2006-03-27 11:37:07 +00003761 /* Check that the 3 distinguished SMs are still as they should be. */
njn25e49d8e72002-09-23 09:36:25 +00003762
njndbf7ca72006-03-31 11:57:59 +00003763 /* Check noaccess DSM. */
sewardj45d94cc2005-04-20 14:44:11 +00003764 sm = &sm_distinguished[SM_DIST_NOACCESS];
njn1d0825f2006-03-27 11:37:07 +00003765 for (i = 0; i < SM_CHUNKS; i++)
3766 if (sm->vabits8[i] != VA_BITS8_NOACCESS)
sewardj23eb2fd2005-04-22 16:29:19 +00003767 bad = True;
njn25e49d8e72002-09-23 09:36:25 +00003768
njndbf7ca72006-03-31 11:57:59 +00003769 /* Check undefined DSM. */
3770 sm = &sm_distinguished[SM_DIST_UNDEFINED];
njn1d0825f2006-03-27 11:37:07 +00003771 for (i = 0; i < SM_CHUNKS; i++)
njndbf7ca72006-03-31 11:57:59 +00003772 if (sm->vabits8[i] != VA_BITS8_UNDEFINED)
sewardj23eb2fd2005-04-22 16:29:19 +00003773 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00003774
njndbf7ca72006-03-31 11:57:59 +00003775 /* Check defined DSM. */
3776 sm = &sm_distinguished[SM_DIST_DEFINED];
njn1d0825f2006-03-27 11:37:07 +00003777 for (i = 0; i < SM_CHUNKS; i++)
njndbf7ca72006-03-31 11:57:59 +00003778 if (sm->vabits8[i] != VA_BITS8_DEFINED)
sewardj23eb2fd2005-04-22 16:29:19 +00003779 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00003780
sewardj23eb2fd2005-04-22 16:29:19 +00003781 if (bad) {
3782 VG_(printf)("memcheck expensive sanity: "
3783 "distinguished_secondaries have changed\n");
3784 return False;
3785 }
3786
njn1d0825f2006-03-27 11:37:07 +00003787 /* If we're not checking for undefined value errors, the secondary V bit
3788 * table should be empty. */
3789 if (!MC_(clo_undef_value_errors)) {
3790 if (0 != VG_(OSet_Size)(secVBitTable))
3791 return False;
3792 }
3793
sewardj23eb2fd2005-04-22 16:29:19 +00003794 /* check nonsensical auxmap sizing */
sewardj45d94cc2005-04-20 14:44:11 +00003795 if (auxmap_used > auxmap_size)
sewardj23eb2fd2005-04-22 16:29:19 +00003796 bad = True;
3797
3798 if (bad) {
3799 VG_(printf)("memcheck expensive sanity: "
3800 "nonsensical auxmap sizing\n");
3801 return False;
3802 }
3803
3804 /* check that the number of secmaps issued matches the number that
3805 are reachable (iow, no secmap leaks) */
3806 n_secmaps_found = 0;
3807 for (i = 0; i < N_PRIMARY_MAP; i++) {
3808 if (primary_map[i] == NULL) {
3809 bad = True;
3810 } else {
3811 if (!is_distinguished_sm(primary_map[i]))
3812 n_secmaps_found++;
3813 }
3814 }
3815
3816 for (i = 0; i < auxmap_used; i++) {
3817 if (auxmap[i].sm == NULL) {
3818 bad = True;
3819 } else {
3820 if (!is_distinguished_sm(auxmap[i].sm))
3821 n_secmaps_found++;
3822 }
3823 }
3824
njn1d0825f2006-03-27 11:37:07 +00003825 if (n_secmaps_found != (n_issued_SMs - n_deissued_SMs))
sewardj23eb2fd2005-04-22 16:29:19 +00003826 bad = True;
3827
3828 if (bad) {
3829 VG_(printf)("memcheck expensive sanity: "
3830 "apparent secmap leakage\n");
3831 return False;
3832 }
3833
njn1d0825f2006-03-27 11:37:07 +00003834 /* check that auxmap only covers address space that the primary doesn't */
sewardj23eb2fd2005-04-22 16:29:19 +00003835
3836 for (i = 0; i < auxmap_used; i++)
3837 if (auxmap[i].base <= MAX_PRIMARY_ADDRESS)
3838 bad = True;
3839
3840 if (bad) {
3841 VG_(printf)("memcheck expensive sanity: "
3842 "auxmap covers wrong address space\n");
3843 return False;
3844 }
3845
3846 /* there is only one pointer to each secmap (expensive) */
njn25e49d8e72002-09-23 09:36:25 +00003847
3848 return True;
3849}
sewardj45d94cc2005-04-20 14:44:11 +00003850
njn25e49d8e72002-09-23 09:36:25 +00003851/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00003852/*--- Command line args ---*/
njn25e49d8e72002-09-23 09:36:25 +00003853/*------------------------------------------------------------*/
3854
njn1d0825f2006-03-27 11:37:07 +00003855Bool MC_(clo_partial_loads_ok) = False;
3856Int MC_(clo_freelist_vol) = 5000000;
3857LeakCheckMode MC_(clo_leak_check) = LC_Summary;
3858VgRes MC_(clo_leak_resolution) = Vg_LowRes;
3859Bool MC_(clo_show_reachable) = False;
3860Bool MC_(clo_workaround_gcc296_bugs) = False;
3861Bool MC_(clo_undef_value_errors) = True;
3862
3863static Bool mc_process_cmd_line_options(Char* arg)
njn25e49d8e72002-09-23 09:36:25 +00003864{
njn1d0825f2006-03-27 11:37:07 +00003865 VG_BOOL_CLO(arg, "--partial-loads-ok", MC_(clo_partial_loads_ok))
3866 else VG_BOOL_CLO(arg, "--show-reachable", MC_(clo_show_reachable))
3867 else VG_BOOL_CLO(arg, "--workaround-gcc296-bugs",MC_(clo_workaround_gcc296_bugs))
3868
3869 else VG_BOOL_CLO(arg, "--undef-value-errors", MC_(clo_undef_value_errors))
3870
3871 else VG_BNUM_CLO(arg, "--freelist-vol", MC_(clo_freelist_vol), 0, 1000000000)
3872
3873 else if (VG_CLO_STREQ(arg, "--leak-check=no"))
3874 MC_(clo_leak_check) = LC_Off;
3875 else if (VG_CLO_STREQ(arg, "--leak-check=summary"))
3876 MC_(clo_leak_check) = LC_Summary;
3877 else if (VG_CLO_STREQ(arg, "--leak-check=yes") ||
3878 VG_CLO_STREQ(arg, "--leak-check=full"))
3879 MC_(clo_leak_check) = LC_Full;
3880
3881 else if (VG_CLO_STREQ(arg, "--leak-resolution=low"))
3882 MC_(clo_leak_resolution) = Vg_LowRes;
3883 else if (VG_CLO_STREQ(arg, "--leak-resolution=med"))
3884 MC_(clo_leak_resolution) = Vg_MedRes;
3885 else if (VG_CLO_STREQ(arg, "--leak-resolution=high"))
3886 MC_(clo_leak_resolution) = Vg_HighRes;
3887
3888 else
3889 return VG_(replacement_malloc_process_cmd_line_option)(arg);
3890
3891 return True;
njn25e49d8e72002-09-23 09:36:25 +00003892}
3893
njn51d827b2005-05-09 01:02:08 +00003894static void mc_print_usage(void)
njn25e49d8e72002-09-23 09:36:25 +00003895{
njn1d0825f2006-03-27 11:37:07 +00003896 VG_(printf)(
3897" --leak-check=no|summary|full search for memory leaks at exit? [summary]\n"
3898" --leak-resolution=low|med|high how much bt merging in leak check [low]\n"
3899" --show-reachable=no|yes show reachable blocks in leak check? [no]\n"
3900" --undef-value-errors=no|yes check for undefined value errors [yes]\n"
3901" --partial-loads-ok=no|yes too hard to explain here; see manual [no]\n"
3902" --freelist-vol=<number> volume of freed blocks queue [5000000]\n"
3903" --workaround-gcc296-bugs=no|yes self explanatory [no]\n"
3904 );
3905 VG_(replacement_malloc_print_usage)();
njn3e884182003-04-15 13:03:23 +00003906}
3907
njn51d827b2005-05-09 01:02:08 +00003908static void mc_print_debug_usage(void)
njn3e884182003-04-15 13:03:23 +00003909{
njn1d0825f2006-03-27 11:37:07 +00003910 VG_(replacement_malloc_print_debug_usage)();
njn25e49d8e72002-09-23 09:36:25 +00003911}
3912
sewardjf3418c02005-11-08 14:10:24 +00003913
nethercote8b76fe52004-11-08 19:20:09 +00003914/*------------------------------------------------------------*/
3915/*--- Client requests ---*/
3916/*------------------------------------------------------------*/
3917
3918/* Client block management:
3919
3920 This is managed as an expanding array of client block descriptors.
3921 Indices of live descriptors are issued to the client, so it can ask
3922 to free them later. Therefore we cannot slide live entries down
3923 over dead ones. Instead we must use free/inuse flags and scan for
3924 an empty slot at allocation time. This in turn means allocation is
3925 relatively expensive, so we hope this does not happen too often.
nethercote8b76fe52004-11-08 19:20:09 +00003926
sewardjedc75ab2005-03-15 23:30:32 +00003927 An unused block has start == size == 0
3928*/
nethercote8b76fe52004-11-08 19:20:09 +00003929
3930typedef
3931 struct {
3932 Addr start;
3933 SizeT size;
3934 ExeContext* where;
sewardj8cf88b72005-07-08 01:29:33 +00003935 Char* desc;
nethercote8b76fe52004-11-08 19:20:09 +00003936 }
3937 CGenBlock;
3938
3939/* This subsystem is self-initialising. */
njn695c16e2005-03-27 03:40:28 +00003940static UInt cgb_size = 0;
3941static UInt cgb_used = 0;
3942static CGenBlock* cgbs = NULL;
nethercote8b76fe52004-11-08 19:20:09 +00003943
3944/* Stats for this subsystem. */
njn695c16e2005-03-27 03:40:28 +00003945static UInt cgb_used_MAX = 0; /* Max in use. */
3946static UInt cgb_allocs = 0; /* Number of allocs. */
3947static UInt cgb_discards = 0; /* Number of discards. */
3948static UInt cgb_search = 0; /* Number of searches. */
nethercote8b76fe52004-11-08 19:20:09 +00003949
3950
3951static
njn695c16e2005-03-27 03:40:28 +00003952Int alloc_client_block ( void )
nethercote8b76fe52004-11-08 19:20:09 +00003953{
3954 UInt i, sz_new;
3955 CGenBlock* cgbs_new;
3956
njn695c16e2005-03-27 03:40:28 +00003957 cgb_allocs++;
nethercote8b76fe52004-11-08 19:20:09 +00003958
njn695c16e2005-03-27 03:40:28 +00003959 for (i = 0; i < cgb_used; i++) {
3960 cgb_search++;
3961 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00003962 return i;
3963 }
3964
3965 /* Not found. Try to allocate one at the end. */
njn695c16e2005-03-27 03:40:28 +00003966 if (cgb_used < cgb_size) {
3967 cgb_used++;
3968 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00003969 }
3970
3971 /* Ok, we have to allocate a new one. */
njn695c16e2005-03-27 03:40:28 +00003972 tl_assert(cgb_used == cgb_size);
3973 sz_new = (cgbs == NULL) ? 10 : (2 * cgb_size);
nethercote8b76fe52004-11-08 19:20:09 +00003974
3975 cgbs_new = VG_(malloc)( sz_new * sizeof(CGenBlock) );
njn695c16e2005-03-27 03:40:28 +00003976 for (i = 0; i < cgb_used; i++)
3977 cgbs_new[i] = cgbs[i];
nethercote8b76fe52004-11-08 19:20:09 +00003978
njn695c16e2005-03-27 03:40:28 +00003979 if (cgbs != NULL)
3980 VG_(free)( cgbs );
3981 cgbs = cgbs_new;
nethercote8b76fe52004-11-08 19:20:09 +00003982
njn695c16e2005-03-27 03:40:28 +00003983 cgb_size = sz_new;
3984 cgb_used++;
3985 if (cgb_used > cgb_used_MAX)
3986 cgb_used_MAX = cgb_used;
3987 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00003988}
3989
3990
3991static void show_client_block_stats ( void )
3992{
3993 VG_(message)(Vg_DebugMsg,
3994 "general CBs: %d allocs, %d discards, %d maxinuse, %d search",
njn695c16e2005-03-27 03:40:28 +00003995 cgb_allocs, cgb_discards, cgb_used_MAX, cgb_search
nethercote8b76fe52004-11-08 19:20:09 +00003996 );
3997}
3998
nethercote8b76fe52004-11-08 19:20:09 +00003999static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai )
4000{
4001 UInt i;
4002 /* VG_(printf)("try to identify %d\n", a); */
4003
4004 /* Perhaps it's a general block ? */
njn695c16e2005-03-27 03:40:28 +00004005 for (i = 0; i < cgb_used; i++) {
4006 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00004007 continue;
njn717cde52005-05-10 02:47:21 +00004008 // Use zero as the redzone for client blocks.
4009 if (VG_(addr_is_in_block)(a, cgbs[i].start, cgbs[i].size, 0)) {
nethercote8b76fe52004-11-08 19:20:09 +00004010 /* OK - maybe it's a mempool, too? */
njn1d0825f2006-03-27 11:37:07 +00004011 MC_Mempool* mp = VG_(HT_lookup)(MC_(mempool_list),
njn12627272005-08-14 18:32:16 +00004012 (UWord)cgbs[i].start);
njn1d0cb0d2005-08-15 01:52:02 +00004013 if (mp != NULL) {
4014 if (mp->chunks != NULL) {
njn1d0825f2006-03-27 11:37:07 +00004015 MC_Chunk* mc;
njnf66dbfc2005-08-15 01:54:05 +00004016 VG_(HT_ResetIter)(mp->chunks);
4017 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
njn1d0825f2006-03-27 11:37:07 +00004018 if (addr_is_in_MC_Chunk(mc, a)) {
njn1d0cb0d2005-08-15 01:52:02 +00004019 ai->akind = UserG;
4020 ai->blksize = mc->size;
4021 ai->rwoffset = (Int)(a) - (Int)mc->data;
4022 ai->lastchange = mc->where;
4023 return True;
4024 }
nethercote8b76fe52004-11-08 19:20:09 +00004025 }
4026 }
njn1d0cb0d2005-08-15 01:52:02 +00004027 ai->akind = Mempool;
4028 ai->blksize = cgbs[i].size;
4029 ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
njn695c16e2005-03-27 03:40:28 +00004030 ai->lastchange = cgbs[i].where;
nethercote8b76fe52004-11-08 19:20:09 +00004031 return True;
4032 }
njn1d0cb0d2005-08-15 01:52:02 +00004033 ai->akind = UserG;
4034 ai->blksize = cgbs[i].size;
4035 ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
njn695c16e2005-03-27 03:40:28 +00004036 ai->lastchange = cgbs[i].where;
njn1d0cb0d2005-08-15 01:52:02 +00004037 ai->desc = cgbs[i].desc;
nethercote8b76fe52004-11-08 19:20:09 +00004038 return True;
4039 }
4040 }
4041 return False;
4042}
4043
njn51d827b2005-05-09 01:02:08 +00004044static Bool mc_handle_client_request ( ThreadId tid, UWord* arg, UWord* ret )
nethercote8b76fe52004-11-08 19:20:09 +00004045{
4046 Int i;
4047 Bool ok;
4048 Addr bad_addr;
4049
njnfc26ff92004-11-22 19:12:49 +00004050 if (!VG_IS_TOOL_USERREQ('M','C',arg[0])
nethercote8b76fe52004-11-08 19:20:09 +00004051 && VG_USERREQ__MALLOCLIKE_BLOCK != arg[0]
4052 && VG_USERREQ__FREELIKE_BLOCK != arg[0]
4053 && VG_USERREQ__CREATE_MEMPOOL != arg[0]
4054 && VG_USERREQ__DESTROY_MEMPOOL != arg[0]
4055 && VG_USERREQ__MEMPOOL_ALLOC != arg[0]
sewardj2c1c9df2006-07-28 00:06:37 +00004056 && VG_USERREQ__MEMPOOL_FREE != arg[0]
4057 && VG_USERREQ__MEMPOOL_TRIM != arg[0])
nethercote8b76fe52004-11-08 19:20:09 +00004058 return False;
4059
4060 switch (arg[0]) {
njndbf7ca72006-03-31 11:57:59 +00004061 case VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE:
4062 ok = is_mem_addressable ( arg[1], arg[2], &bad_addr );
nethercote8b76fe52004-11-08 19:20:09 +00004063 if (!ok)
njn9e63cb62005-05-08 18:34:59 +00004064 mc_record_user_error ( tid, bad_addr, /*isWrite*/True,
4065 /*isUnaddr*/True );
nethercote8b76fe52004-11-08 19:20:09 +00004066 *ret = ok ? (UWord)NULL : bad_addr;
sewardj8cf88b72005-07-08 01:29:33 +00004067 break;
nethercote8b76fe52004-11-08 19:20:09 +00004068
njndbf7ca72006-03-31 11:57:59 +00004069 case VG_USERREQ__CHECK_MEM_IS_DEFINED: {
nethercote8b76fe52004-11-08 19:20:09 +00004070 MC_ReadResult res;
njndbf7ca72006-03-31 11:57:59 +00004071 res = is_mem_defined ( arg[1], arg[2], &bad_addr );
nethercote8b76fe52004-11-08 19:20:09 +00004072 if (MC_AddrErr == res)
njn9e63cb62005-05-08 18:34:59 +00004073 mc_record_user_error ( tid, bad_addr, /*isWrite*/False,
4074 /*isUnaddr*/True );
nethercote8b76fe52004-11-08 19:20:09 +00004075 else if (MC_ValueErr == res)
njn9e63cb62005-05-08 18:34:59 +00004076 mc_record_user_error ( tid, bad_addr, /*isWrite*/False,
4077 /*isUnaddr*/False );
nethercote8b76fe52004-11-08 19:20:09 +00004078 *ret = ( res==MC_Ok ? (UWord)NULL : bad_addr );
sewardj8cf88b72005-07-08 01:29:33 +00004079 break;
nethercote8b76fe52004-11-08 19:20:09 +00004080 }
4081
4082 case VG_USERREQ__DO_LEAK_CHECK:
njnb8dca862005-03-14 02:42:44 +00004083 mc_detect_memory_leaks(tid, arg[1] ? LC_Summary : LC_Full);
sewardj8cf88b72005-07-08 01:29:33 +00004084 *ret = 0; /* return value is meaningless */
4085 break;
nethercote8b76fe52004-11-08 19:20:09 +00004086
njndbf7ca72006-03-31 11:57:59 +00004087 case VG_USERREQ__MAKE_MEM_NOACCESS:
4088 MC_(make_mem_noaccess) ( arg[1], arg[2] );
sewardj8cf88b72005-07-08 01:29:33 +00004089 *ret = -1;
4090 break;
nethercote8b76fe52004-11-08 19:20:09 +00004091
njndbf7ca72006-03-31 11:57:59 +00004092 case VG_USERREQ__MAKE_MEM_UNDEFINED:
4093 MC_(make_mem_undefined) ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00004094 *ret = -1;
sewardj8cf88b72005-07-08 01:29:33 +00004095 break;
nethercote8b76fe52004-11-08 19:20:09 +00004096
njndbf7ca72006-03-31 11:57:59 +00004097 case VG_USERREQ__MAKE_MEM_DEFINED:
4098 MC_(make_mem_defined) ( arg[1], arg[2] );
sewardj8cf88b72005-07-08 01:29:33 +00004099 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00004100 break;
4101
njndbf7ca72006-03-31 11:57:59 +00004102 case VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE:
4103 make_mem_defined_if_addressable ( arg[1], arg[2] );
sewardjfb1e9ad2006-03-10 13:41:58 +00004104 *ret = -1;
4105 break;
4106
sewardjedc75ab2005-03-15 23:30:32 +00004107 case VG_USERREQ__CREATE_BLOCK: /* describe a block */
sewardj8cf88b72005-07-08 01:29:33 +00004108 if (arg[1] != 0 && arg[2] != 0) {
4109 i = alloc_client_block();
4110 /* VG_(printf)("allocated %d %p\n", i, cgbs); */
4111 cgbs[i].start = arg[1];
4112 cgbs[i].size = arg[2];
4113 cgbs[i].desc = VG_(strdup)((Char *)arg[3]);
4114 cgbs[i].where = VG_(record_ExeContext) ( tid );
sewardjedc75ab2005-03-15 23:30:32 +00004115
sewardj8cf88b72005-07-08 01:29:33 +00004116 *ret = i;
4117 } else
4118 *ret = -1;
4119 break;
sewardjedc75ab2005-03-15 23:30:32 +00004120
nethercote8b76fe52004-11-08 19:20:09 +00004121 case VG_USERREQ__DISCARD: /* discard */
njn695c16e2005-03-27 03:40:28 +00004122 if (cgbs == NULL
4123 || arg[2] >= cgb_used ||
sewardj8cf88b72005-07-08 01:29:33 +00004124 (cgbs[arg[2]].start == 0 && cgbs[arg[2]].size == 0)) {
sewardjedc75ab2005-03-15 23:30:32 +00004125 *ret = 1;
sewardj8cf88b72005-07-08 01:29:33 +00004126 } else {
4127 tl_assert(arg[2] >= 0 && arg[2] < cgb_used);
4128 cgbs[arg[2]].start = cgbs[arg[2]].size = 0;
4129 VG_(free)(cgbs[arg[2]].desc);
4130 cgb_discards++;
4131 *ret = 0;
4132 }
4133 break;
nethercote8b76fe52004-11-08 19:20:09 +00004134
sewardjc2c12c22006-03-08 13:20:09 +00004135 case VG_USERREQ__GET_VBITS:
4136 /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
4137 error. */
4138 /* VG_(printf)("get_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
4139 *ret = mc_get_or_set_vbits_for_client
4140 ( tid, arg[1], arg[2], arg[3], False /* get them */ );
4141 break;
4142
4143 case VG_USERREQ__SET_VBITS:
4144 /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
4145 error. */
4146 /* VG_(printf)("set_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
4147 *ret = mc_get_or_set_vbits_for_client
4148 ( tid, arg[1], arg[2], arg[3], True /* set them */ );
4149 break;
nethercote8b76fe52004-11-08 19:20:09 +00004150
njn1d0825f2006-03-27 11:37:07 +00004151 case VG_USERREQ__COUNT_LEAKS: { /* count leaked bytes */
4152 UWord** argp = (UWord**)arg;
4153 // MC_(bytes_leaked) et al were set by the last leak check (or zero
4154 // if no prior leak checks performed).
4155 *argp[1] = MC_(bytes_leaked) + MC_(bytes_indirect);
4156 *argp[2] = MC_(bytes_dubious);
4157 *argp[3] = MC_(bytes_reachable);
4158 *argp[4] = MC_(bytes_suppressed);
4159 // there is no argp[5]
4160 //*argp[5] = MC_(bytes_indirect);
njndbf7ca72006-03-31 11:57:59 +00004161 // XXX need to make *argp[1-4] defined
njn1d0825f2006-03-27 11:37:07 +00004162 *ret = 0;
4163 return True;
4164 }
4165 case VG_USERREQ__MALLOCLIKE_BLOCK: {
4166 Addr p = (Addr)arg[1];
4167 SizeT sizeB = arg[2];
4168 UInt rzB = arg[3];
4169 Bool is_zeroed = (Bool)arg[4];
4170
4171 MC_(new_block) ( tid, p, sizeB, /*ignored*/0, rzB, is_zeroed,
4172 MC_AllocCustom, MC_(malloc_list) );
4173 return True;
4174 }
4175 case VG_USERREQ__FREELIKE_BLOCK: {
4176 Addr p = (Addr)arg[1];
4177 UInt rzB = arg[2];
4178
4179 MC_(handle_free) ( tid, p, rzB, MC_AllocCustom );
4180 return True;
4181 }
4182
4183 case _VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR: {
4184 Char* s = (Char*) arg[1];
4185 OverlapExtra* extra = (OverlapExtra*)arg[2];
4186 mc_record_overlap_error(tid, s, extra);
4187 return True;
4188 }
4189
4190 case VG_USERREQ__CREATE_MEMPOOL: {
4191 Addr pool = (Addr)arg[1];
4192 UInt rzB = arg[2];
4193 Bool is_zeroed = (Bool)arg[3];
4194
4195 MC_(create_mempool) ( pool, rzB, is_zeroed );
4196 return True;
4197 }
4198
4199 case VG_USERREQ__DESTROY_MEMPOOL: {
4200 Addr pool = (Addr)arg[1];
4201
4202 MC_(destroy_mempool) ( pool );
4203 return True;
4204 }
4205
4206 case VG_USERREQ__MEMPOOL_ALLOC: {
4207 Addr pool = (Addr)arg[1];
4208 Addr addr = (Addr)arg[2];
4209 UInt size = arg[3];
4210
4211 MC_(mempool_alloc) ( tid, pool, addr, size );
4212 return True;
4213 }
4214
4215 case VG_USERREQ__MEMPOOL_FREE: {
4216 Addr pool = (Addr)arg[1];
4217 Addr addr = (Addr)arg[2];
4218
4219 MC_(mempool_free) ( pool, addr );
4220 return True;
4221 }
4222
sewardj2c1c9df2006-07-28 00:06:37 +00004223 case VG_USERREQ__MEMPOOL_TRIM: {
4224 Addr pool = (Addr)arg[1];
4225 Addr addr = (Addr)arg[2];
4226 UInt size = arg[3];
4227
4228 MC_(mempool_trim) ( pool, addr, size );
4229 return True;
4230 }
4231
nethercote8b76fe52004-11-08 19:20:09 +00004232 default:
njn1d0825f2006-03-27 11:37:07 +00004233 VG_(message)(Vg_UserMsg,
4234 "Warning: unknown memcheck client request code %llx",
4235 (ULong)arg[0]);
4236 return False;
nethercote8b76fe52004-11-08 19:20:09 +00004237 }
4238 return True;
4239}
njn25e49d8e72002-09-23 09:36:25 +00004240
4241/*------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00004242/*--- Crude profiling machinery. ---*/
4243/*------------------------------------------------------------*/
4244
4245// We track a number of interesting events (using PROF_EVENT)
4246// if MC_PROFILE_MEMORY is defined.
4247
4248#ifdef MC_PROFILE_MEMORY
4249
4250UInt MC_(event_ctr)[N_PROF_EVENTS];
4251HChar* MC_(event_ctr_name)[N_PROF_EVENTS];
4252
4253static void init_prof_mem ( void )
4254{
4255 Int i;
4256 for (i = 0; i < N_PROF_EVENTS; i++) {
4257 MC_(event_ctr)[i] = 0;
4258 MC_(event_ctr_name)[i] = NULL;
4259 }
4260}
4261
4262static void done_prof_mem ( void )
4263{
4264 Int i;
4265 Bool spaced = False;
4266 for (i = 0; i < N_PROF_EVENTS; i++) {
4267 if (!spaced && (i % 10) == 0) {
4268 VG_(printf)("\n");
4269 spaced = True;
4270 }
4271 if (MC_(event_ctr)[i] > 0) {
4272 spaced = False;
4273 VG_(printf)( "prof mem event %3d: %9d %s\n",
4274 i, MC_(event_ctr)[i],
4275 MC_(event_ctr_name)[i]
4276 ? MC_(event_ctr_name)[i] : "unnamed");
4277 }
4278 }
4279}
4280
4281#else
4282
4283static void init_prof_mem ( void ) { }
4284static void done_prof_mem ( void ) { }
4285
4286#endif
4287
4288/*------------------------------------------------------------*/
njn51d827b2005-05-09 01:02:08 +00004289/*--- Setup and finalisation ---*/
njn25e49d8e72002-09-23 09:36:25 +00004290/*------------------------------------------------------------*/
4291
njn51d827b2005-05-09 01:02:08 +00004292static void mc_post_clo_init ( void )
njn5c004e42002-11-18 11:04:50 +00004293{
sewardj71bc3cb2005-05-19 00:25:45 +00004294 /* If we've been asked to emit XML, mash around various other
4295 options so as to constrain the output somewhat. */
4296 if (VG_(clo_xml)) {
4297 /* Extract as much info as possible from the leak checker. */
njn1d0825f2006-03-27 11:37:07 +00004298 /* MC_(clo_show_reachable) = True; */
4299 MC_(clo_leak_check) = LC_Full;
sewardj71bc3cb2005-05-19 00:25:45 +00004300 }
njn5c004e42002-11-18 11:04:50 +00004301}
4302
njn1d0825f2006-03-27 11:37:07 +00004303static void print_SM_info(char* type, int n_SMs)
4304{
4305 VG_(message)(Vg_DebugMsg,
4306 " memcheck: SMs: %s = %d (%dk, %dM)",
4307 type,
4308 n_SMs,
4309 n_SMs * sizeof(SecMap) / 1024,
4310 n_SMs * sizeof(SecMap) / (1024 * 1024) );
4311}
4312
njn51d827b2005-05-09 01:02:08 +00004313static void mc_fini ( Int exitcode )
njn5c004e42002-11-18 11:04:50 +00004314{
njn1d0825f2006-03-27 11:37:07 +00004315 MC_(print_malloc_stats)();
sewardj23eb2fd2005-04-22 16:29:19 +00004316
njn1d0825f2006-03-27 11:37:07 +00004317 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
4318 if (MC_(clo_leak_check) == LC_Off)
4319 VG_(message)(Vg_UserMsg,
4320 "For a detailed leak analysis, rerun with: --leak-check=yes");
4321
4322 VG_(message)(Vg_UserMsg,
4323 "For counts of detected errors, rerun with: -v");
4324 }
4325 if (MC_(clo_leak_check) != LC_Off)
4326 mc_detect_memory_leaks(1/*bogus ThreadId*/, MC_(clo_leak_check));
4327
4328 done_prof_mem();
sewardjae986ca2005-10-12 12:53:20 +00004329
sewardj45d94cc2005-04-20 14:44:11 +00004330 if (VG_(clo_verbosity) > 1) {
njn1d0825f2006-03-27 11:37:07 +00004331 SizeT max_secVBit_szB, max_SMs_szB, max_shmem_szB;
4332
sewardj45d94cc2005-04-20 14:44:11 +00004333 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00004334 " memcheck: sanity checks: %d cheap, %d expensive",
4335 n_sanity_cheap, n_sanity_expensive );
sewardj45d94cc2005-04-20 14:44:11 +00004336 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00004337 " memcheck: auxmaps: %d auxmap entries (%dk, %dM) in use",
4338 auxmap_used,
4339 auxmap_used * 64,
4340 auxmap_used / 16 );
4341 VG_(message)(Vg_DebugMsg,
4342 " memcheck: auxmaps: %lld searches, %lld comparisons",
sewardj45d94cc2005-04-20 14:44:11 +00004343 n_auxmap_searches, n_auxmap_cmps );
sewardj23eb2fd2005-04-22 16:29:19 +00004344
njndbf7ca72006-03-31 11:57:59 +00004345 print_SM_info("n_issued ", n_issued_SMs);
4346 print_SM_info("n_deissued ", n_deissued_SMs);
4347 print_SM_info("max_noaccess ", max_noaccess_SMs);
4348 print_SM_info("max_undefined", max_undefined_SMs);
4349 print_SM_info("max_defined ", max_defined_SMs);
4350 print_SM_info("max_non_DSM ", max_non_DSM_SMs);
njn1d0825f2006-03-27 11:37:07 +00004351
4352 // Three DSMs, plus the non-DSM ones
4353 max_SMs_szB = (3 + max_non_DSM_SMs) * sizeof(SecMap);
4354 // The 3*sizeof(Word) bytes is the AVL node metadata size.
4355 // The 4*sizeof(Word) bytes is the malloc metadata size.
4356 // Hardwiring these sizes in sucks, but I don't see how else to do it.
4357 max_secVBit_szB = max_secVBit_nodes *
4358 (sizeof(SecVBitNode) + 3*sizeof(Word) + 4*sizeof(Word));
4359 max_shmem_szB = sizeof(primary_map) + max_SMs_szB + max_secVBit_szB;
sewardj23eb2fd2005-04-22 16:29:19 +00004360
4361 VG_(message)(Vg_DebugMsg,
njn1d0825f2006-03-27 11:37:07 +00004362 " memcheck: max sec V bit nodes: %d (%dk, %dM)",
4363 max_secVBit_nodes, max_secVBit_szB / 1024,
4364 max_secVBit_szB / (1024 * 1024));
4365 VG_(message)(Vg_DebugMsg,
4366 " memcheck: set_sec_vbits8 calls: %llu (new: %llu, updates: %llu)",
4367 sec_vbits_new_nodes + sec_vbits_updates,
4368 sec_vbits_new_nodes, sec_vbits_updates );
4369 VG_(message)(Vg_DebugMsg,
4370 " memcheck: max shadow mem size: %dk, %dM",
4371 max_shmem_szB / 1024, max_shmem_szB / (1024 * 1024));
sewardj45d94cc2005-04-20 14:44:11 +00004372 }
4373
njn5c004e42002-11-18 11:04:50 +00004374 if (0) {
4375 VG_(message)(Vg_DebugMsg,
4376 "------ Valgrind's client block stats follow ---------------" );
nethercote8b76fe52004-11-08 19:20:09 +00004377 show_client_block_stats();
njn5c004e42002-11-18 11:04:50 +00004378 }
njn25e49d8e72002-09-23 09:36:25 +00004379}
4380
njn51d827b2005-05-09 01:02:08 +00004381static void mc_pre_clo_init(void)
4382{
4383 VG_(details_name) ("Memcheck");
4384 VG_(details_version) (NULL);
4385 VG_(details_description) ("a memory error detector");
4386 VG_(details_copyright_author)(
sewardje4b0bf02006-06-05 23:21:15 +00004387 "Copyright (C) 2002-2006, and GNU GPL'd, by Julian Seward et al.");
njn51d827b2005-05-09 01:02:08 +00004388 VG_(details_bug_reports_to) (VG_BUGS_TO);
4389 VG_(details_avg_translation_sizeB) ( 370 );
4390
4391 VG_(basic_tool_funcs) (mc_post_clo_init,
4392 MC_(instrument),
4393 mc_fini);
4394
4395 VG_(needs_core_errors) ();
njn1d0825f2006-03-27 11:37:07 +00004396 VG_(needs_tool_errors) (mc_eq_Error,
njn51d827b2005-05-09 01:02:08 +00004397 mc_pp_Error,
njn1d0825f2006-03-27 11:37:07 +00004398 mc_update_extra,
njn51d827b2005-05-09 01:02:08 +00004399 mc_recognised_suppression,
njn1d0825f2006-03-27 11:37:07 +00004400 mc_read_extra_suppression_info,
4401 mc_error_matches_suppression,
4402 mc_get_error_name,
4403 mc_print_extra_suppression_info);
njn51d827b2005-05-09 01:02:08 +00004404 VG_(needs_libc_freeres) ();
njn1d0825f2006-03-27 11:37:07 +00004405 VG_(needs_command_line_options)(mc_process_cmd_line_options,
njn51d827b2005-05-09 01:02:08 +00004406 mc_print_usage,
4407 mc_print_debug_usage);
4408 VG_(needs_client_requests) (mc_handle_client_request);
4409 VG_(needs_sanity_checks) (mc_cheap_sanity_check,
4410 mc_expensive_sanity_check);
njn1d0825f2006-03-27 11:37:07 +00004411 VG_(needs_malloc_replacement) (MC_(malloc),
4412 MC_(__builtin_new),
4413 MC_(__builtin_vec_new),
4414 MC_(memalign),
4415 MC_(calloc),
4416 MC_(free),
4417 MC_(__builtin_delete),
4418 MC_(__builtin_vec_delete),
4419 MC_(realloc),
4420 MC_MALLOC_REDZONE_SZB );
njnca54af32006-04-16 10:25:43 +00004421 VG_(needs_xml_output) ();
njn51d827b2005-05-09 01:02:08 +00004422
njn1d0825f2006-03-27 11:37:07 +00004423 VG_(track_new_mem_startup) ( mc_new_mem_startup );
njndbf7ca72006-03-31 11:57:59 +00004424 VG_(track_new_mem_stack_signal)( MC_(make_mem_undefined) );
4425 VG_(track_new_mem_brk) ( MC_(make_mem_undefined) );
njn1d0825f2006-03-27 11:37:07 +00004426 VG_(track_new_mem_mmap) ( mc_new_mem_mmap );
njn51d827b2005-05-09 01:02:08 +00004427
njn1d0825f2006-03-27 11:37:07 +00004428 VG_(track_copy_mem_remap) ( MC_(copy_address_range_state) );
njn81623712005-10-07 04:48:37 +00004429
4430 // Nb: we don't do anything with mprotect. This means that V bits are
4431 // preserved if a program, for example, marks some memory as inaccessible
4432 // and then later marks it as accessible again.
4433 //
4434 // If an access violation occurs (eg. writing to read-only memory) we let
4435 // it fault and print an informative termination message. This doesn't
4436 // happen if the program catches the signal, though, which is bad. If we
4437 // had two A bits (for readability and writability) that were completely
4438 // distinct from V bits, then we could handle all this properly.
4439 VG_(track_change_mem_mprotect) ( NULL );
njn51d827b2005-05-09 01:02:08 +00004440
njndbf7ca72006-03-31 11:57:59 +00004441 VG_(track_die_mem_stack_signal)( MC_(make_mem_noaccess) );
4442 VG_(track_die_mem_brk) ( MC_(make_mem_noaccess) );
4443 VG_(track_die_mem_munmap) ( MC_(make_mem_noaccess) );
njn51d827b2005-05-09 01:02:08 +00004444
njn1d0825f2006-03-27 11:37:07 +00004445#ifdef PERF_FAST_STACK
4446 VG_(track_new_mem_stack_4) ( mc_new_mem_stack_4 );
4447 VG_(track_new_mem_stack_8) ( mc_new_mem_stack_8 );
4448 VG_(track_new_mem_stack_12) ( mc_new_mem_stack_12 );
4449 VG_(track_new_mem_stack_16) ( mc_new_mem_stack_16 );
4450 VG_(track_new_mem_stack_32) ( mc_new_mem_stack_32 );
4451 VG_(track_new_mem_stack_112) ( mc_new_mem_stack_112 );
4452 VG_(track_new_mem_stack_128) ( mc_new_mem_stack_128 );
4453 VG_(track_new_mem_stack_144) ( mc_new_mem_stack_144 );
4454 VG_(track_new_mem_stack_160) ( mc_new_mem_stack_160 );
4455#endif
4456 VG_(track_new_mem_stack) ( mc_new_mem_stack );
njn51d827b2005-05-09 01:02:08 +00004457
njn1d0825f2006-03-27 11:37:07 +00004458#ifdef PERF_FAST_STACK
4459 VG_(track_die_mem_stack_4) ( mc_die_mem_stack_4 );
4460 VG_(track_die_mem_stack_8) ( mc_die_mem_stack_8 );
4461 VG_(track_die_mem_stack_12) ( mc_die_mem_stack_12 );
4462 VG_(track_die_mem_stack_16) ( mc_die_mem_stack_16 );
4463 VG_(track_die_mem_stack_32) ( mc_die_mem_stack_32 );
4464 VG_(track_die_mem_stack_112) ( mc_die_mem_stack_112 );
4465 VG_(track_die_mem_stack_128) ( mc_die_mem_stack_128 );
4466 VG_(track_die_mem_stack_144) ( mc_die_mem_stack_144 );
4467 VG_(track_die_mem_stack_160) ( mc_die_mem_stack_160 );
4468#endif
4469 VG_(track_die_mem_stack) ( mc_die_mem_stack );
njn51d827b2005-05-09 01:02:08 +00004470
njndbf7ca72006-03-31 11:57:59 +00004471 VG_(track_ban_mem_stack) ( MC_(make_mem_noaccess) );
njn51d827b2005-05-09 01:02:08 +00004472
njndbf7ca72006-03-31 11:57:59 +00004473 VG_(track_pre_mem_read) ( check_mem_is_defined );
4474 VG_(track_pre_mem_read_asciiz) ( check_mem_is_defined_asciiz );
4475 VG_(track_pre_mem_write) ( check_mem_is_addressable );
njn1d0825f2006-03-27 11:37:07 +00004476 VG_(track_post_mem_write) ( mc_post_mem_write );
njn51d827b2005-05-09 01:02:08 +00004477
njn1d0825f2006-03-27 11:37:07 +00004478 if (MC_(clo_undef_value_errors))
4479 VG_(track_pre_reg_read) ( mc_pre_reg_read );
njn51d827b2005-05-09 01:02:08 +00004480
njn1d0825f2006-03-27 11:37:07 +00004481 VG_(track_post_reg_write) ( mc_post_reg_write );
4482 VG_(track_post_reg_write_clientcall_return)( mc_post_reg_write_clientcall );
njn51d827b2005-05-09 01:02:08 +00004483
4484 init_shadow_memory();
njn1d0825f2006-03-27 11:37:07 +00004485 MC_(malloc_list) = VG_(HT_construct)( 80021 ); // prime, big
4486 MC_(mempool_list) = VG_(HT_construct)( 1009 ); // prime, not so big
4487 init_prof_mem();
njn51d827b2005-05-09 01:02:08 +00004488
4489 tl_assert( mc_expensive_sanity_check() );
njn1d0825f2006-03-27 11:37:07 +00004490
4491 // {LOADV,STOREV}[8421] will all fail horribly if this isn't true.
4492 tl_assert(sizeof(UWord) == sizeof(Addr));
4493
4494 // BYTES_PER_SEC_VBIT_NODE must be a power of two.
4495 tl_assert(-1 != VG_(log2)(BYTES_PER_SEC_VBIT_NODE));
njn51d827b2005-05-09 01:02:08 +00004496}
4497
sewardj45f4e7c2005-09-27 19:20:21 +00004498VG_DETERMINE_INTERFACE_VERSION(mc_pre_clo_init)
fitzhardinge98abfc72003-12-16 02:05:15 +00004499
njn25e49d8e72002-09-23 09:36:25 +00004500/*--------------------------------------------------------------------*/
njn1d0825f2006-03-27 11:37:07 +00004501/*--- end ---*/
njn25e49d8e72002-09-23 09:36:25 +00004502/*--------------------------------------------------------------------*/