blob: 8f88791a6ef1a37975fb6c9f4a2098b919841582 [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
nethercote137bc552003-11-14 17:47:54 +00009 This file is part of MemCheck, a heavyweight Valgrind tool for
njnc9539842002-10-02 13:26:35 +000010 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
njn53612422005-03-12 16:22:54 +000012 Copyright (C) 2000-2005 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
sewardjc859fbf2005-04-22 21:10:28 +000033/* TODO 22 Apr 05
sewardj45d94cc2005-04-20 14:44:11 +000034
sewardjc859fbf2005-04-22 21:10:28 +000035 test whether it would be faster, for LOADV4, to check
36 only for 8-byte validity on the fast path
sewardj45d94cc2005-04-20 14:44:11 +000037*/
38
njn25cac76cb2002-09-23 11:21:57 +000039#include "mc_include.h"
40#include "memcheck.h" /* for client requests */
njn25e49d8e72002-09-23 09:36:25 +000041
sewardj45d94cc2005-04-20 14:44:11 +000042
sewardjc1a2cda2005-04-21 17:34:00 +000043#define EXPECTED_TAKEN(cond) __builtin_expect((cond),1)
44#define EXPECTED_NOT_TAKEN(cond) __builtin_expect((cond),0)
45
46/* Define to debug the mem audit system. Set to:
47 0 no debugging, fast cases are used
48 1 some sanity checking, fast cases are used
49 2 max sanity checking, only slow cases are used
50*/
sewardj23eb2fd2005-04-22 16:29:19 +000051#define VG_DEBUG_MEMORY 0
sewardjc1a2cda2005-04-21 17:34:00 +000052
53
sewardj45d94cc2005-04-20 14:44:11 +000054typedef enum {
55 MC_Ok = 5, MC_AddrErr = 6, MC_ValueErr = 7
56} MC_ReadResult;
njn25e49d8e72002-09-23 09:36:25 +000057
njn25e49d8e72002-09-23 09:36:25 +000058#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
59
njn25e49d8e72002-09-23 09:36:25 +000060
njn25e49d8e72002-09-23 09:36:25 +000061/*------------------------------------------------------------*/
sewardj45d94cc2005-04-20 14:44:11 +000062/*--- Basic A/V bitmap representation. ---*/
njn25e49d8e72002-09-23 09:36:25 +000063/*------------------------------------------------------------*/
64
sewardjc859fbf2005-04-22 21:10:28 +000065/* TODO: fix this comment */
66//zz /* All reads and writes are checked against a memory map, which
67//zz records the state of all memory in the process. The memory map is
68//zz organised like this:
69//zz
70//zz The top 16 bits of an address are used to index into a top-level
71//zz map table, containing 65536 entries. Each entry is a pointer to a
72//zz second-level map, which records the accesibililty and validity
73//zz permissions for the 65536 bytes indexed by the lower 16 bits of the
74//zz address. Each byte is represented by nine bits, one indicating
75//zz accessibility, the other eight validity. So each second-level map
76//zz contains 73728 bytes. This two-level arrangement conveniently
77//zz divides the 4G address space into 64k lumps, each size 64k bytes.
78//zz
79//zz All entries in the primary (top-level) map must point to a valid
80//zz secondary (second-level) map. Since most of the 4G of address
81//zz space will not be in use -- ie, not mapped at all -- there is a
82//zz distinguished secondary map, which indicates `not addressible and
83//zz not valid' writeable for all bytes. Entries in the primary map for
84//zz which the entire 64k is not in use at all point at this
85//zz distinguished map.
86//zz
87//zz There are actually 4 distinguished secondaries. These are used to
88//zz represent a memory range which is either not addressable (validity
89//zz doesn't matter), addressable+not valid, addressable+valid.
90//zz
91//zz [...] lots of stuff deleted due to out of date-ness
92//zz
93//zz As a final optimisation, the alignment and address checks for
94//zz 4-byte loads and stores are combined in a neat way. The primary
95//zz map is extended to have 262144 entries (2^18), rather than 2^16.
96//zz The top 3/4 of these entries are permanently set to the
97//zz distinguished secondary map. For a 4-byte load/store, the
98//zz top-level map is indexed not with (addr >> 16) but instead f(addr),
99//zz where
100//zz
101//zz f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
102//zz = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
103//zz = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
104//zz
105//zz ie the lowest two bits are placed above the 16 high address bits.
106//zz If either of these two bits are nonzero, the address is misaligned;
107//zz this will select a secondary map from the upper 3/4 of the primary
108//zz map. Because this is always the distinguished secondary map, a
109//zz (bogus) address check failure will result. The failure handling
110//zz code can then figure out whether this is a genuine addr check
111//zz failure or whether it is a possibly-legitimate access at a
112//zz misaligned address.
113//zz */
114
sewardj45d94cc2005-04-20 14:44:11 +0000115/* --------------- Basic configuration --------------- */
sewardj95448072004-11-22 20:19:51 +0000116
sewardj45d94cc2005-04-20 14:44:11 +0000117/* The number of entries in the primary map can be altered. However
118 we hardwire the assumption that each secondary map covers precisely
119 64k of address space. */
sewardj23eb2fd2005-04-22 16:29:19 +0000120#define SECONDARY_SIZE 65536 /* DO NOT CHANGE */
121#define SECONDARY_MASK (SECONDARY_SIZE-1) /* DO NOT CHANGE */
sewardj95448072004-11-22 20:19:51 +0000122
sewardj23eb2fd2005-04-22 16:29:19 +0000123/* Only change this. N_PRIMARY_MAP *must* be a power of 2. */
sewardj45d94cc2005-04-20 14:44:11 +0000124#define N_PRIMARY_BITS 16
sewardj45d94cc2005-04-20 14:44:11 +0000125
sewardjc1a2cda2005-04-21 17:34:00 +0000126/* Do not change this. */
sewardj23eb2fd2005-04-22 16:29:19 +0000127#define N_PRIMARY_MAP (1 << N_PRIMARY_BITS)
sewardjc1a2cda2005-04-21 17:34:00 +0000128
129/* Do not change this. */
sewardj23eb2fd2005-04-22 16:29:19 +0000130#define MAX_PRIMARY_ADDRESS (Addr)((((Addr)65536) * N_PRIMARY_MAP)-1)
131
132
133/* --------------- Stats maps --------------- */
134
135static Int n_secmaps_issued = 0;
136static ULong n_auxmap_searches = 0;
137static ULong n_auxmap_cmps = 0;
138static Int n_sanity_cheap = 0;
139static Int n_sanity_expensive = 0;
sewardj45d94cc2005-04-20 14:44:11 +0000140
141
142/* --------------- Secondary maps --------------- */
njn25e49d8e72002-09-23 09:36:25 +0000143
144typedef
145 struct {
sewardj45d94cc2005-04-20 14:44:11 +0000146 UChar abits[8192];
147 UChar vbyte[65536];
njn25e49d8e72002-09-23 09:36:25 +0000148 }
149 SecMap;
150
sewardj45d94cc2005-04-20 14:44:11 +0000151/* 3 distinguished secondary maps, one for no-access, one for
152 accessible but undefined, and one for accessible and defined.
153 Distinguished secondaries may never be modified.
154*/
155#define SM_DIST_NOACCESS 0
156#define SM_DIST_ACCESS_UNDEFINED 1
157#define SM_DIST_ACCESS_DEFINED 2
njnb8dca862005-03-14 02:42:44 +0000158
sewardj45d94cc2005-04-20 14:44:11 +0000159static SecMap sm_distinguished[3];
njnb8dca862005-03-14 02:42:44 +0000160
sewardj45d94cc2005-04-20 14:44:11 +0000161static inline Bool is_distinguished_sm ( SecMap* sm ) {
162 return sm >= &sm_distinguished[0] && sm <= &sm_distinguished[2];
163}
njnb8dca862005-03-14 02:42:44 +0000164
sewardj45d94cc2005-04-20 14:44:11 +0000165/* dist_sm points to one of our three distinguished secondaries. Make
166 a copy of it so that we can write to it.
167*/
168static SecMap* copy_for_writing ( SecMap* dist_sm )
169{
170 SecMap* new_sm;
171 tl_assert(dist_sm == &sm_distinguished[0]
172 || dist_sm == &sm_distinguished[1]
173 || dist_sm == &sm_distinguished[2]);
njnb8dca862005-03-14 02:42:44 +0000174
sewardj45d94cc2005-04-20 14:44:11 +0000175 new_sm = VG_(shadow_alloc)(sizeof(SecMap));
176 VG_(memcpy)(new_sm, dist_sm, sizeof(SecMap));
sewardj23eb2fd2005-04-22 16:29:19 +0000177 n_secmaps_issued++;
sewardj45d94cc2005-04-20 14:44:11 +0000178 return new_sm;
179}
njnb8dca862005-03-14 02:42:44 +0000180
sewardj45d94cc2005-04-20 14:44:11 +0000181
182/* --------------- Primary maps --------------- */
183
184/* The main primary map. This covers some initial part of the address
sewardj23eb2fd2005-04-22 16:29:19 +0000185 space, addresses 0 .. (N_PRIMARY_MAP << 16)-1. The rest of it is
sewardj45d94cc2005-04-20 14:44:11 +0000186 handled using the auxiliary primary map.
187*/
sewardj23eb2fd2005-04-22 16:29:19 +0000188static SecMap* primary_map[N_PRIMARY_MAP];
sewardj45d94cc2005-04-20 14:44:11 +0000189
190
191/* An entry in the auxiliary primary map. base must be a 64k-aligned
192 value, and sm points at the relevant secondary map. As with the
193 main primary map, the secondary may be either a real secondary, or
194 one of the three distinguished secondaries.
195*/
196typedef
197 struct {
sewardj23eb2fd2005-04-22 16:29:19 +0000198 Addr base;
sewardj45d94cc2005-04-20 14:44:11 +0000199 SecMap* sm;
200 }
201 AuxMapEnt;
202
203/* An expanding array of AuxMapEnts. */
204#define N_AUXMAPS 500 /* HACK */
205static AuxMapEnt hacky_auxmaps[N_AUXMAPS];
206static Int auxmap_size = N_AUXMAPS;
207static Int auxmap_used = 0;
208static AuxMapEnt* auxmap = &hacky_auxmaps[0];
209
sewardj45d94cc2005-04-20 14:44:11 +0000210
211/* Find an entry in the auxiliary map. If an entry is found, move it
212 one step closer to the front of the array, then return its address.
213 If an entry is not found, allocate one. Note carefully that
214 because a each call potentially rearranges the entries, each call
215 to this function invalidates ALL AuxMapEnt*s previously obtained by
216 calling this fn.
217*/
218static AuxMapEnt* find_or_alloc_in_auxmap ( Addr a )
219{
220 UWord i;
221 tl_assert(a > MAX_PRIMARY_ADDRESS);
222
223 a &= ~(Addr)0xFFFF;
224
225 /* Search .. */
226 n_auxmap_searches++;
227 for (i = 0; i < auxmap_used; i++) {
228 if (auxmap[i].base == a)
229 break;
230 }
231 n_auxmap_cmps += (ULong)(i+1);
232
233 if (i < auxmap_used) {
234 /* Found it. Nudge it a bit closer to the front. */
235 if (i > 0) {
236 AuxMapEnt tmp = auxmap[i-1];
237 auxmap[i-1] = auxmap[i];
238 auxmap[i] = tmp;
239 i--;
240 }
241 return &auxmap[i];
242 }
243
244 /* We didn't find it. Hmm. This is a new piece of address space.
245 We'll need to allocate a new AuxMap entry for it. */
246 if (auxmap_used >= auxmap_size) {
247 tl_assert(auxmap_used == auxmap_size);
248 /* Out of auxmap entries. */
249 tl_assert2(0, "failed to expand the auxmap table");
250 }
251
252 tl_assert(auxmap_used < auxmap_size);
253
254 auxmap[auxmap_used].base = a & ~(Addr)0xFFFF;
255 auxmap[auxmap_used].sm = &sm_distinguished[SM_DIST_NOACCESS];
256
257 if (0)
258 VG_(printf)("new auxmap, base = 0x%llx\n",
259 (ULong)auxmap[auxmap_used].base );
260
261 auxmap_used++;
262 return &auxmap[auxmap_used-1];
263}
264
265
266/* --------------- SecMap fundamentals --------------- */
267
268/* Produce the secmap for 'a', either from the primary map or by
269 ensuring there is an entry for it in the aux primary map. The
270 secmap may be a distinguished one as the caller will only want to
271 be able to read it.
272*/
273static SecMap* get_secmap_readable ( Addr a )
274{
275 if (a <= MAX_PRIMARY_ADDRESS) {
276 UWord pm_off = a >> 16;
277 return primary_map[ pm_off ];
278 } else {
279 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
280 return am->sm;
281 }
282}
283
284/* Produce the secmap for 'a', either from the primary map or by
285 ensuring there is an entry for it in the aux primary map. The
286 secmap may not be a distinguished one, since the caller will want
287 to be able to write it. If it is a distinguished secondary, make a
288 writable copy of it, install it, and return the copy instead. (COW
289 semantics).
290*/
291static SecMap* get_secmap_writable ( Addr a )
292{
293 if (a <= MAX_PRIMARY_ADDRESS) {
294 UWord pm_off = a >> 16;
295 if (is_distinguished_sm(primary_map[ pm_off ]))
296 primary_map[pm_off] = copy_for_writing(primary_map[pm_off]);
297 return primary_map[pm_off];
298 } else {
299 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
300 if (is_distinguished_sm(am->sm))
301 am->sm = copy_for_writing(am->sm);
302 return am->sm;
303 }
304}
305
306
307/* --------------- Endianness helpers --------------- */
308
309/* Returns the offset in memory of the byteno-th most significant byte
310 in a wordszB-sized word, given the specified endianness. */
311static inline UWord byte_offset_w ( UWord wordszB, Bool bigendian,
312 UWord byteno ) {
313 return bigendian ? (wordszB-1-byteno) : byteno;
314}
315
316
317/* --------------- Fundamental functions --------------- */
318
319static
320void get_abit_and_vbyte ( /*OUT*/UWord* abit,
321 /*OUT*/UWord* vbyte,
322 Addr a )
323{
324 SecMap* sm = get_secmap_readable(a);
325 *vbyte = 0xFF & sm->vbyte[a & 0xFFFF];
326 *abit = read_bit_array(sm->abits, a & 0xFFFF);
327}
328
329static
330UWord get_abit ( Addr a )
331{
332 SecMap* sm = get_secmap_readable(a);
333 return read_bit_array(sm->abits, a & 0xFFFF);
334}
335
336static
337void set_abit_and_vbyte ( Addr a, UWord abit, UWord vbyte )
338{
339 SecMap* sm = get_secmap_writable(a);
340 sm->vbyte[a & 0xFFFF] = 0xFF & vbyte;
341 write_bit_array(sm->abits, a & 0xFFFF, abit);
342}
343
344static
345void set_vbyte ( Addr a, UWord vbyte )
346{
347 SecMap* sm = get_secmap_writable(a);
348 sm->vbyte[a & 0xFFFF] = 0xFF & vbyte;
349}
350
351
352/* --------------- Load/store slow cases. --------------- */
353
354static
355ULong mc_LOADVn_slow ( Addr a, SizeT szB, Bool bigendian )
356{
357 /* Make up a result V word, which contains the loaded data for
sewardjf3d57dd2005-04-22 20:23:27 +0000358 valid addresses and Defined for invalid addresses. Iterate over
359 the bytes in the word, from the most significant down to the
360 least. */
sewardj45d94cc2005-04-20 14:44:11 +0000361 ULong vw = VGM_WORD64_INVALID;
362 SizeT i = szB-1;
363 SizeT n_addrs_bad = 0;
364 Addr ai;
365 Bool aok;
366 UWord abit, vbyte;
367
sewardjc1a2cda2005-04-21 17:34:00 +0000368 PROF_EVENT(30, "mc_LOADVn_slow");
sewardj45d94cc2005-04-20 14:44:11 +0000369 tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
370
371 while (True) {
sewardjc1a2cda2005-04-21 17:34:00 +0000372 PROF_EVENT(31, "mc_LOADVn_slow(loop)");
sewardj45d94cc2005-04-20 14:44:11 +0000373 ai = a+byte_offset_w(szB,bigendian,i);
374 get_abit_and_vbyte(&abit, &vbyte, ai);
375 aok = abit == VGM_BIT_VALID;
376 if (!aok)
377 n_addrs_bad++;
378 vw <<= 8;
sewardjf3d57dd2005-04-22 20:23:27 +0000379 vw |= 0xFF & (aok ? vbyte : VGM_BYTE_VALID);
sewardj45d94cc2005-04-20 14:44:11 +0000380 if (i == 0) break;
381 i--;
382 }
383
384 if (n_addrs_bad > 0)
385 MAC_(record_address_error)( VG_(get_running_tid)(), a, szB, False );
386
sewardj45d94cc2005-04-20 14:44:11 +0000387 return vw;
388}
389
390
391static
392void mc_STOREVn_slow ( Addr a, SizeT szB, UWord vbytes, Bool bigendian )
393{
394 SizeT i;
395 SizeT n_addrs_bad = 0;
396 UWord abit;
397 Bool aok;
398 Addr ai;
399
sewardjc1a2cda2005-04-21 17:34:00 +0000400 PROF_EVENT(35, "mc_STOREVn_slow");
sewardj45d94cc2005-04-20 14:44:11 +0000401 tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
402
403 /* Dump vbytes in memory, iterating from least to most significant
404 byte. At the same time establish addressibility of the
405 location. */
406 for (i = 0; i < szB; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +0000407 PROF_EVENT(36, "mc_STOREVn_slow(loop)");
sewardj45d94cc2005-04-20 14:44:11 +0000408 ai = a+byte_offset_w(szB,bigendian,i);
409 abit = get_abit(ai);
410 aok = abit == VGM_BIT_VALID;
411 if (!aok)
412 n_addrs_bad++;
413 set_vbyte(ai, vbytes & 0xFF );
414 vbytes >>= 8;
415 }
416
417 /* If an address error has happened, report it. */
418 if (n_addrs_bad > 0)
419 MAC_(record_address_error)( VG_(get_running_tid)(), a, szB, True );
420}
421
422
sewardj45d94cc2005-04-20 14:44:11 +0000423//zz /* Reading/writing of the bitmaps, for aligned word-sized accesses. */
424//zz
425//zz static __inline__ UChar get_abits4_ALIGNED ( Addr a )
426//zz {
427//zz SecMap* sm;
428//zz UInt sm_off;
429//zz UChar abits8;
430//zz PROF_EVENT(24);
431//zz # ifdef VG_DEBUG_MEMORY
432//zz tl_assert(VG_IS_4_ALIGNED(a));
433//zz # endif
434//zz sm = primary_map[PM_IDX(a)];
435//zz sm_off = SM_OFF(a);
436//zz abits8 = sm->abits[sm_off >> 3];
437//zz abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
438//zz abits8 &= 0x0F;
439//zz return abits8;
440//zz }
441//zz
442//zz static UInt __inline__ get_vbytes4_ALIGNED ( Addr a )
443//zz {
444//zz SecMap* sm = primary_map[PM_IDX(a)];
445//zz UInt sm_off = SM_OFF(a);
446//zz PROF_EVENT(25);
447//zz # ifdef VG_DEBUG_MEMORY
448//zz tl_assert(VG_IS_4_ALIGNED(a));
449//zz # endif
450//zz return ((UInt*)(sm->vbyte))[sm_off >> 2];
451//zz }
452//zz
453//zz
454//zz static void __inline__ set_vbytes4_ALIGNED ( Addr a, UInt vbytes )
455//zz {
456//zz SecMap* sm;
457//zz UInt sm_off;
458//zz ENSURE_MAPPABLE(a, "set_vbytes4_ALIGNED");
459//zz sm = primary_map[PM_IDX(a)];
460//zz sm_off = SM_OFF(a);
461//zz PROF_EVENT(23);
462//zz # ifdef VG_DEBUG_MEMORY
463//zz tl_assert(VG_IS_4_ALIGNED(a));
464//zz # endif
465//zz ((UInt*)(sm->vbyte))[sm_off >> 2] = vbytes;
466//zz }
sewardjee070842003-07-05 17:53:55 +0000467
468
njn25e49d8e72002-09-23 09:36:25 +0000469/*------------------------------------------------------------*/
470/*--- Setting permissions over address ranges. ---*/
471/*------------------------------------------------------------*/
472
sewardj23eb2fd2005-04-22 16:29:19 +0000473/* Given address 'a', find the place where the pointer to a's
474 secondary map lives. If a falls into the primary map, the returned
475 value points to one of the entries in primary_map[]. Otherwise,
476 the auxiliary primary map is searched for 'a', or an entry is
477 created for it; either way, the returned value points to the
478 relevant AuxMapEnt's .sm field.
479
480 The point of this is to enable set_address_range_perms to assign
481 secondary maps in a uniform way, without worrying about whether a
482 given secondary map is pointed to from the main or auxiliary
483 primary map.
484*/
485
486static SecMap** find_secmap_binder_for_addr ( Addr aA )
487{
488 if (aA > MAX_PRIMARY_ADDRESS) {
489 AuxMapEnt* am = find_or_alloc_in_auxmap(aA);
490 return &am->sm;
491 } else {
492 UWord a = (UWord)aA;
493 UWord sec_no = (UWord)(a >> 16);
494# if VG_DEBUG_MEMORY >= 1
495 tl_assert(sec_no < N_PRIMARY_MAP);
496# endif
497 return &primary_map[sec_no];
498 }
499}
500
501
502static void set_address_range_perms ( Addr aA, SizeT len,
sewardj45d94cc2005-04-20 14:44:11 +0000503 UWord example_a_bit,
504 UWord example_v_bit )
njn25e49d8e72002-09-23 09:36:25 +0000505{
sewardj23eb2fd2005-04-22 16:29:19 +0000506 PROF_EVENT(150, "set_address_range_perms");
507
508 /* Check the permissions make sense. */
509 tl_assert(example_a_bit == VGM_BIT_VALID
510 || example_a_bit == VGM_BIT_INVALID);
511 tl_assert(example_v_bit == VGM_BIT_VALID
512 || example_v_bit == VGM_BIT_INVALID);
513 if (example_a_bit == VGM_BIT_INVALID)
514 tl_assert(example_v_bit == VGM_BIT_INVALID);
515
516 if (len == 0)
517 return;
518
519 if (VG_(clo_verbosity) > 0) {
520 if (len > 100 * 1000 * 1000) {
521 VG_(message)(Vg_UserMsg,
522 "Warning: set address range perms: "
523 "large range %u, a %d, v %d",
524 len, example_a_bit, example_v_bit );
525 }
526 }
527
528 UWord a = (UWord)aA;
529
530# if VG_DEBUG_MEMORY >= 2
531
532 /*------------------ debug-only case ------------------ */
sewardj45d94cc2005-04-20 14:44:11 +0000533 SizeT i;
njn25e49d8e72002-09-23 09:36:25 +0000534
sewardj23eb2fd2005-04-22 16:29:19 +0000535 UWord example_vbyte = BIT_TO_BYTE(example_v_bit);
sewardj45d94cc2005-04-20 14:44:11 +0000536
537 tl_assert(sizeof(SizeT) == sizeof(Addr));
538
539 if (0 && len >= 4096)
540 VG_(printf)("s_a_r_p(0x%llx, %d, %d,%d)\n",
541 (ULong)a, len, example_a_bit, example_v_bit);
njn25e49d8e72002-09-23 09:36:25 +0000542
543 if (len == 0)
544 return;
545
sewardj45d94cc2005-04-20 14:44:11 +0000546 for (i = 0; i < len; i++) {
547 set_abit_and_vbyte(a+i, example_a_bit, example_vbyte);
njn25e49d8e72002-09-23 09:36:25 +0000548 }
njn25e49d8e72002-09-23 09:36:25 +0000549
sewardj23eb2fd2005-04-22 16:29:19 +0000550# else
551
552 /*------------------ standard handling ------------------ */
553 UWord vbits8, abits8, vbits32, v_off, a_off;
554 SecMap* sm;
555 SecMap** binder;
556 SecMap* example_dsm;
557
558 /* Decide on the distinguished secondary that we might want
559 to use (part of the space-compression scheme). */
560 if (example_a_bit == VGM_BIT_INVALID) {
561 example_dsm = &sm_distinguished[SM_DIST_NOACCESS];
562 } else {
563 if (example_v_bit == VGM_BIT_VALID) {
564 example_dsm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
565 } else {
566 example_dsm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
567 }
568 }
569
570 /* Make various wider versions of the A/V values to use. */
571 vbits8 = BIT_TO_BYTE(example_v_bit);
572 abits8 = BIT_TO_BYTE(example_a_bit);
573 vbits32 = (vbits8 << 24) | (vbits8 << 16) | (vbits8 << 8) | vbits8;
574
575 /* Slowly do parts preceding 8-byte alignment. */
576 while (True) {
577 if (len == 0) break;
578 PROF_EVENT(151, "set_address_range_perms-loop1-pre");
579 if (VG_IS_8_ALIGNED(a)) break;
580 set_abit_and_vbyte( a, example_a_bit, vbits8 );
581 a++;
582 len--;
583 }
584
585 if (len == 0)
586 return;
587
588 tl_assert(VG_IS_8_ALIGNED(a) && len > 0);
589
590 /* Now go in steps of 8 bytes. */
591 binder = find_secmap_binder_for_addr(a);
592
593 while (True) {
594
595 if (len < 8) break;
596
597 PROF_EVENT(152, "set_address_range_perms-loop8");
598
599 if ((a & SECONDARY_MASK) == 0) {
600 /* we just traversed a primary map boundary, so update the
601 binder. */
602 binder = find_secmap_binder_for_addr(a);
603 PROF_EVENT(153, "set_address_range_perms-update-binder");
604
605 /* Space-optimisation. If we are setting the entire
606 secondary map, just point this entry at one of our
607 distinguished secondaries. However, only do that if it
608 already points at a distinguished secondary, since doing
609 otherwise would leak the existing secondary. We could do
610 better and free up any pre-existing non-distinguished
611 secondary at this point, since we are guaranteed that each
612 non-dist secondary only has one pointer to it, and we have
613 that pointer right here. */
614 if (len >= SECONDARY_SIZE && is_distinguished_sm(*binder)) {
615 PROF_EVENT(154, "set_address_range_perms-entire-secmap");
616 *binder = example_dsm;
617 len -= SECONDARY_SIZE;
618 a += SECONDARY_SIZE;
619 continue;
620 }
621 }
622
623 /* If the primary is already pointing to a distinguished map
624 with the same properties as we're trying to set, then leave
625 it that way. */
626 if (*binder == example_dsm) {
627 a += 8;
628 len -= 8;
629 continue;
630 }
631
632 /* Make sure it's OK to write the secondary. */
633 if (is_distinguished_sm(*binder))
634 *binder = copy_for_writing(*binder);
635
636 sm = *binder;
637 v_off = a & 0xFFFF;
638 a_off = v_off >> 3;
639 sm->abits[a_off] = (UChar)abits8;
640 ((UInt*)(sm->vbyte))[(v_off >> 2) + 0] = (UInt)vbits32;
641 ((UInt*)(sm->vbyte))[(v_off >> 2) + 1] = (UInt)vbits32;
642
643 a += 8;
644 len -= 8;
645 }
646
647 if (len == 0)
648 return;
649
650 tl_assert(VG_IS_8_ALIGNED(a) && len > 0 && len < 8);
651
652 /* Finish the upper fragment. */
653 while (True) {
654 if (len == 0) break;
655 PROF_EVENT(155, "set_address_range_perms-loop1-post");
656 set_abit_and_vbyte ( a, example_a_bit, vbits8 );
657 a++;
658 len--;
659 }
660
661# endif
662}
sewardj45d94cc2005-04-20 14:44:11 +0000663
sewardjc859fbf2005-04-22 21:10:28 +0000664
665/* --- Set permissions for arbitrary address ranges --- */
njn25e49d8e72002-09-23 09:36:25 +0000666
nethercote8b76fe52004-11-08 19:20:09 +0000667static void mc_make_noaccess ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000668{
sewardjc1a2cda2005-04-21 17:34:00 +0000669 PROF_EVENT(40, "mc_make_noaccess");
nethercote8b76fe52004-11-08 19:20:09 +0000670 DEBUG("mc_make_noaccess(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000671 set_address_range_perms ( a, len, VGM_BIT_INVALID, VGM_BIT_INVALID );
672}
673
nethercote8b76fe52004-11-08 19:20:09 +0000674static void mc_make_writable ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000675{
sewardjc1a2cda2005-04-21 17:34:00 +0000676 PROF_EVENT(41, "mc_make_writable");
nethercote8b76fe52004-11-08 19:20:09 +0000677 DEBUG("mc_make_writable(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000678 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_INVALID );
679}
680
nethercote8b76fe52004-11-08 19:20:09 +0000681static void mc_make_readable ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000682{
sewardjc1a2cda2005-04-21 17:34:00 +0000683 PROF_EVENT(42, "mc_make_readable");
nethercote8b76fe52004-11-08 19:20:09 +0000684 DEBUG("mc_make_readable(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000685 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_VALID );
686}
687
njn9b007f62003-04-07 14:40:25 +0000688
sewardjc859fbf2005-04-22 21:10:28 +0000689/* --- Block-copy permissions (needed for implementing realloc()). --- */
690
691static void mc_copy_address_range_state ( Addr src, Addr dst, SizeT len )
692{
693 SizeT i;
694 UWord abit, vbyte;
695
696 DEBUG("mc_copy_address_range_state\n");
697
698 PROF_EVENT(50, "mc_copy_address_range_state");
699 for (i = 0; i < len; i++) {
700 PROF_EVENT(51, "mc_copy_address_range_state(loop)");
701 get_abit_and_vbyte( &abit, &vbyte, src+i );
702 set_abit_and_vbyte( dst+i, abit, vbyte );
703 }
704}
705
706
707/* --- Fast case permission setters, for dealing with stacks. --- */
708
njn9b007f62003-04-07 14:40:25 +0000709static __inline__
sewardj5d28efc2005-04-21 22:16:29 +0000710void make_aligned_word32_writable ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000711{
sewardj5d28efc2005-04-21 22:16:29 +0000712 PROF_EVENT(300, "make_aligned_word32_writable");
713
714# if VG_DEBUG_MEMORY >= 2
715 mc_make_writable(aA, 4);
716# else
717
718 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
sewardj23eb2fd2005-04-22 16:29:19 +0000719 PROF_EVENT(301, "make_aligned_word32_writable-slow1");
sewardj5d28efc2005-04-21 22:16:29 +0000720 mc_make_writable(aA, 4);
721 return;
722 }
723
724 UWord a = (UWord)aA;
725 UWord sec_no = (UWord)(a >> 16);
726# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +0000727 tl_assert(sec_no < N_PRIMARY_MAP);
sewardj5d28efc2005-04-21 22:16:29 +0000728# endif
729
730 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
731 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
732
733 SecMap* sm = primary_map[sec_no];
734 UWord v_off = a & 0xFFFF;
735 UWord a_off = v_off >> 3;
736
737 /* Paint the new area as uninitialised. */
738 ((UInt*)(sm->vbyte))[v_off >> 2] = VGM_WORD32_INVALID;
739
740 UWord mask = 0x0F;
741 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
742 /* mask now contains 1s where we wish to make address bits valid
743 (0s). */
744 sm->abits[a_off] &= ~mask;
745# endif
njn9b007f62003-04-07 14:40:25 +0000746}
747
sewardj5d28efc2005-04-21 22:16:29 +0000748
749static __inline__
750void make_aligned_word32_noaccess ( Addr aA )
751{
752 PROF_EVENT(310, "make_aligned_word32_noaccess");
753
754# if VG_DEBUG_MEMORY >= 2
755 mc_make_noaccess(aA, 4);
756# else
757
758 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
759 PROF_EVENT(311, "make_aligned_word32_noaccess-slow1");
760 mc_make_noaccess(aA, 4);
761 return;
762 }
763
764 UWord a = (UWord)aA;
765 UWord sec_no = (UWord)(a >> 16);
766# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +0000767 tl_assert(sec_no < N_PRIMARY_MAP);
sewardj5d28efc2005-04-21 22:16:29 +0000768# endif
769
770 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
771 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
772
773 SecMap* sm = primary_map[sec_no];
774 UWord v_off = a & 0xFFFF;
775 UWord a_off = v_off >> 3;
776
777 /* Paint the abandoned data as uninitialised. Probably not
778 necessary, but still .. */
779 ((UInt*)(sm->vbyte))[v_off >> 2] = VGM_WORD32_INVALID;
780
781 UWord mask = 0x0F;
782 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
783 /* mask now contains 1s where we wish to make address bits invalid
784 (1s). */
785 sm->abits[a_off] |= mask;
786# endif
787}
788
789
njn9b007f62003-04-07 14:40:25 +0000790/* Nb: by "aligned" here we mean 8-byte aligned */
791static __inline__
sewardj23eb2fd2005-04-22 16:29:19 +0000792void make_aligned_word64_writable ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000793{
sewardj23eb2fd2005-04-22 16:29:19 +0000794 PROF_EVENT(320, "make_aligned_word64_writable");
795
796# if VG_DEBUG_MEMORY >= 2
797 mc_make_writable(aA, 8);
798# else
799
800 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
801 PROF_EVENT(321, "make_aligned_word64_writable-slow1");
802 mc_make_writable(aA, 8);
803 return;
804 }
805
806 UWord a = (UWord)aA;
807 UWord sec_no = (UWord)(a >> 16);
808# if VG_DEBUG_MEMORY >= 1
809 tl_assert(sec_no < N_PRIMARY_MAP);
810# endif
811
812 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
813 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
814
815 SecMap* sm = primary_map[sec_no];
816 UWord v_off = a & 0xFFFF;
817 UWord a_off = v_off >> 3;
818
819 /* Paint the new area as uninitialised. */
820 ((ULong*)(sm->vbyte))[v_off >> 3] = VGM_WORD64_INVALID;
821
822 /* Make the relevant area accessible. */
823 sm->abits[a_off] = VGM_BYTE_VALID;
824# endif
njn9b007f62003-04-07 14:40:25 +0000825}
826
sewardj23eb2fd2005-04-22 16:29:19 +0000827
njn9b007f62003-04-07 14:40:25 +0000828static __inline__
sewardj23eb2fd2005-04-22 16:29:19 +0000829void make_aligned_word64_noaccess ( Addr aA )
njn9b007f62003-04-07 14:40:25 +0000830{
sewardj23eb2fd2005-04-22 16:29:19 +0000831 PROF_EVENT(330, "make_aligned_word64_noaccess");
832
833# if VG_DEBUG_MEMORY >= 2
834 mc_make_noaccess(aA, 8);
835# else
836
837 if (EXPECTED_NOT_TAKEN(aA > MAX_PRIMARY_ADDRESS)) {
838 PROF_EVENT(331, "make_aligned_word64_noaccess-slow1");
839 mc_make_noaccess(aA, 8);
840 return;
841 }
842
843 UWord a = (UWord)aA;
844 UWord sec_no = (UWord)(a >> 16);
845# if VG_DEBUG_MEMORY >= 1
846 tl_assert(sec_no < N_PRIMARY_MAP);
847# endif
848
849 if (EXPECTED_NOT_TAKEN(is_distinguished_sm(primary_map[sec_no])))
850 primary_map[sec_no] = copy_for_writing(primary_map[sec_no]);
851
852 SecMap* sm = primary_map[sec_no];
853 UWord v_off = a & 0xFFFF;
854 UWord a_off = v_off >> 3;
855
856 /* Paint the abandoned data as uninitialised. Probably not
857 necessary, but still .. */
858 ((ULong*)(sm->vbyte))[v_off >> 3] = VGM_WORD64_INVALID;
859
860 /* Make the abandoned area inaccessible. */
861 sm->abits[a_off] = VGM_BYTE_INVALID;
862# endif
njn9b007f62003-04-07 14:40:25 +0000863}
864
sewardj23eb2fd2005-04-22 16:29:19 +0000865
sewardj45d94cc2005-04-20 14:44:11 +0000866/* The stack-pointer update handling functions */
867SP_UPDATE_HANDLERS ( make_aligned_word32_writable,
868 make_aligned_word32_noaccess,
869 make_aligned_word64_writable,
870 make_aligned_word64_noaccess,
871 mc_make_writable,
872 mc_make_noaccess
873 );
njn9b007f62003-04-07 14:40:25 +0000874
sewardj45d94cc2005-04-20 14:44:11 +0000875
nethercote8b76fe52004-11-08 19:20:09 +0000876/*------------------------------------------------------------*/
877/*--- Checking memory ---*/
878/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000879
880/* Check permissions for address range. If inadequate permissions
881 exist, *bad_addr is set to the offending address, so the caller can
882 know what it is. */
883
sewardjecf8e102003-07-12 12:11:39 +0000884/* Returns True if [a .. a+len) is not addressible. Otherwise,
885 returns False, and if bad_addr is non-NULL, sets *bad_addr to
886 indicate the lowest failing address. Functions below are
887 similar. */
nethercote8b76fe52004-11-08 19:20:09 +0000888static Bool mc_check_noaccess ( Addr a, SizeT len, Addr* bad_addr )
sewardjecf8e102003-07-12 12:11:39 +0000889{
nethercote451eae92004-11-02 13:06:32 +0000890 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +0000891 UWord abit;
sewardjc1a2cda2005-04-21 17:34:00 +0000892 PROF_EVENT(60, "mc_check_noaccess");
sewardjecf8e102003-07-12 12:11:39 +0000893 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +0000894 PROF_EVENT(61, "mc_check_noaccess(loop)");
sewardjecf8e102003-07-12 12:11:39 +0000895 abit = get_abit(a);
896 if (abit == VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +0000897 if (bad_addr != NULL)
898 *bad_addr = a;
sewardjecf8e102003-07-12 12:11:39 +0000899 return False;
900 }
901 a++;
902 }
903 return True;
904}
905
nethercote8b76fe52004-11-08 19:20:09 +0000906static Bool mc_check_writable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000907{
nethercote451eae92004-11-02 13:06:32 +0000908 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +0000909 UWord abit;
sewardjc1a2cda2005-04-21 17:34:00 +0000910 PROF_EVENT(62, "mc_check_writable");
njn25e49d8e72002-09-23 09:36:25 +0000911 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +0000912 PROF_EVENT(63, "mc_check_writable(loop)");
njn25e49d8e72002-09-23 09:36:25 +0000913 abit = get_abit(a);
914 if (abit == VGM_BIT_INVALID) {
915 if (bad_addr != NULL) *bad_addr = a;
916 return False;
917 }
918 a++;
919 }
920 return True;
921}
922
nethercote8b76fe52004-11-08 19:20:09 +0000923static MC_ReadResult mc_check_readable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000924{
nethercote451eae92004-11-02 13:06:32 +0000925 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +0000926 UWord abit;
927 UWord vbyte;
njn25e49d8e72002-09-23 09:36:25 +0000928
sewardjc1a2cda2005-04-21 17:34:00 +0000929 PROF_EVENT(64, "mc_check_readable");
nethercote8b76fe52004-11-08 19:20:09 +0000930 DEBUG("mc_check_readable\n");
njn25e49d8e72002-09-23 09:36:25 +0000931 for (i = 0; i < len; i++) {
sewardjc1a2cda2005-04-21 17:34:00 +0000932 PROF_EVENT(65, "mc_check_readable(loop)");
sewardj45d94cc2005-04-20 14:44:11 +0000933 get_abit_and_vbyte(&abit, &vbyte, a);
nethercote8b76fe52004-11-08 19:20:09 +0000934 // Report addressability errors in preference to definedness errors
935 // by checking the A bits first.
936 if (abit != VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +0000937 if (bad_addr != NULL)
938 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +0000939 return MC_AddrErr;
940 }
941 if (vbyte != VGM_BYTE_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +0000942 if (bad_addr != NULL)
943 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +0000944 return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +0000945 }
946 a++;
947 }
nethercote8b76fe52004-11-08 19:20:09 +0000948 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +0000949}
950
951
952/* Check a zero-terminated ascii string. Tricky -- don't want to
953 examine the actual bytes, to find the end, until we're sure it is
954 safe to do so. */
955
njn9b007f62003-04-07 14:40:25 +0000956static Bool mc_check_readable_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000957{
sewardj45d94cc2005-04-20 14:44:11 +0000958 UWord abit;
959 UWord vbyte;
sewardjc1a2cda2005-04-21 17:34:00 +0000960 PROF_EVENT(66, "mc_check_readable_asciiz");
njn5c004e42002-11-18 11:04:50 +0000961 DEBUG("mc_check_readable_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +0000962 while (True) {
sewardjc1a2cda2005-04-21 17:34:00 +0000963 PROF_EVENT(67, "mc_check_readable_asciiz(loop)");
sewardj45d94cc2005-04-20 14:44:11 +0000964 get_abit_and_vbyte(&abit, &vbyte, a);
nethercote8b76fe52004-11-08 19:20:09 +0000965 // As in mc_check_readable(), check A bits first
966 if (abit != VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +0000967 if (bad_addr != NULL)
968 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +0000969 return MC_AddrErr;
970 }
971 if (vbyte != VGM_BYTE_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +0000972 if (bad_addr != NULL)
973 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +0000974 return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +0000975 }
976 /* Ok, a is safe to read. */
sewardj45d94cc2005-04-20 14:44:11 +0000977 if (* ((UChar*)a) == 0)
978 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +0000979 a++;
980 }
981}
982
983
984/*------------------------------------------------------------*/
985/*--- Memory event handlers ---*/
986/*------------------------------------------------------------*/
987
njn25e49d8e72002-09-23 09:36:25 +0000988static
njn72718642003-07-24 08:45:32 +0000989void mc_check_is_writable ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +0000990 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +0000991{
992 Bool ok;
993 Addr bad_addr;
994
995 VGP_PUSHCC(VgpCheckMem);
996
997 /* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
998 base,base+size-1); */
nethercote8b76fe52004-11-08 19:20:09 +0000999 ok = mc_check_writable ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00001000 if (!ok) {
1001 switch (part) {
1002 case Vg_CoreSysCall:
nethercote8b76fe52004-11-08 19:20:09 +00001003 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False,
1004 /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00001005 break;
1006
1007 case Vg_CorePThread:
1008 case Vg_CoreSignal:
nethercote8b76fe52004-11-08 19:20:09 +00001009 MAC_(record_core_mem_error)( tid, /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00001010 break;
1011
1012 default:
njn67993252004-11-22 18:02:32 +00001013 VG_(tool_panic)("mc_check_is_writable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001014 }
1015 }
1016
1017 VGP_POPCC(VgpCheckMem);
1018}
1019
1020static
njn72718642003-07-24 08:45:32 +00001021void mc_check_is_readable ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +00001022 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001023{
njn25e49d8e72002-09-23 09:36:25 +00001024 Addr bad_addr;
nethercote8b76fe52004-11-08 19:20:09 +00001025 MC_ReadResult res;
njn25e49d8e72002-09-23 09:36:25 +00001026
1027 VGP_PUSHCC(VgpCheckMem);
1028
1029 /* VG_(message)(Vg_DebugMsg,"check is readable: %x .. %x",
1030 base,base+size-1); */
nethercote8b76fe52004-11-08 19:20:09 +00001031 res = mc_check_readable ( base, size, &bad_addr );
1032 if (MC_Ok != res) {
1033 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
1034
njn25e49d8e72002-09-23 09:36:25 +00001035 switch (part) {
1036 case Vg_CoreSysCall:
nethercote8b76fe52004-11-08 19:20:09 +00001037 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False,
1038 isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001039 break;
1040
1041 case Vg_CorePThread:
nethercote8b76fe52004-11-08 19:20:09 +00001042 MAC_(record_core_mem_error)( tid, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001043 break;
1044
1045 /* If we're being asked to jump to a silly address, record an error
1046 message before potentially crashing the entire system. */
1047 case Vg_CoreTranslate:
njn72718642003-07-24 08:45:32 +00001048 MAC_(record_jump_error)( tid, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00001049 break;
1050
1051 default:
njn67993252004-11-22 18:02:32 +00001052 VG_(tool_panic)("mc_check_is_readable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001053 }
1054 }
1055 VGP_POPCC(VgpCheckMem);
1056}
1057
1058static
njn72718642003-07-24 08:45:32 +00001059void mc_check_is_readable_asciiz ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +00001060 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +00001061{
nethercote8b76fe52004-11-08 19:20:09 +00001062 MC_ReadResult res;
sewardj45d94cc2005-04-20 14:44:11 +00001063 Addr bad_addr;
njn25e49d8e72002-09-23 09:36:25 +00001064 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
1065
1066 VGP_PUSHCC(VgpCheckMem);
1067
njnca82cc02004-11-22 17:18:48 +00001068 tl_assert(part == Vg_CoreSysCall);
nethercote8b76fe52004-11-08 19:20:09 +00001069 res = mc_check_readable_asciiz ( (Addr)str, &bad_addr );
1070 if (MC_Ok != res) {
1071 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
1072 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001073 }
1074
1075 VGP_POPCC(VgpCheckMem);
1076}
1077
njn25e49d8e72002-09-23 09:36:25 +00001078static
nethercote451eae92004-11-02 13:06:32 +00001079void mc_new_mem_startup( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00001080{
njn1f3a9092002-10-04 09:22:30 +00001081 /* Ignore the permissions, just make it readable. Seems to work... */
nethercote451eae92004-11-02 13:06:32 +00001082 DEBUG("mc_new_mem_startup(%p, %llu, rr=%u, ww=%u, xx=%u)\n",
1083 a,(ULong)len,rr,ww,xx);
nethercote8b76fe52004-11-08 19:20:09 +00001084 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001085}
1086
1087static
nethercote451eae92004-11-02 13:06:32 +00001088void mc_new_mem_heap ( Addr a, SizeT len, Bool is_inited )
njn25e49d8e72002-09-23 09:36:25 +00001089{
1090 if (is_inited) {
nethercote8b76fe52004-11-08 19:20:09 +00001091 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001092 } else {
nethercote8b76fe52004-11-08 19:20:09 +00001093 mc_make_writable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001094 }
1095}
1096
1097static
njnb8dca862005-03-14 02:42:44 +00001098void mc_new_mem_mmap ( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00001099{
njnb8dca862005-03-14 02:42:44 +00001100 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001101}
1102
njncf45fd42004-11-24 16:30:22 +00001103static
1104void mc_post_mem_write(CorePart part, ThreadId tid, Addr a, SizeT len)
1105{
1106 mc_make_readable(a, len);
1107}
njn25e49d8e72002-09-23 09:36:25 +00001108
sewardj45d94cc2005-04-20 14:44:11 +00001109
njn25e49d8e72002-09-23 09:36:25 +00001110/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00001111/*--- Register event handlers ---*/
1112/*------------------------------------------------------------*/
1113
sewardj45d94cc2005-04-20 14:44:11 +00001114/* When some chunk of guest state is written, mark the corresponding
1115 shadow area as valid. This is used to initialise arbitrarily large
1116 chunks of guest state, hence the (somewhat arbitrary) 512 limit.
1117*/
1118static void mc_post_reg_write ( CorePart part, ThreadId tid,
1119 OffT offset, SizeT size)
njnd3040452003-05-19 15:04:06 +00001120{
sewardj6cf40ff2005-04-20 22:31:26 +00001121 UChar area[1024];
1122 tl_assert(size <= 1024);
njncf45fd42004-11-24 16:30:22 +00001123 VG_(memset)(area, VGM_BYTE_VALID, size);
1124 VG_(set_shadow_regs_area)( tid, offset, size, area );
njnd3040452003-05-19 15:04:06 +00001125}
1126
sewardj45d94cc2005-04-20 14:44:11 +00001127static
1128void mc_post_reg_write_clientcall ( ThreadId tid,
1129 OffT offset, SizeT size,
1130 Addr f)
njnd3040452003-05-19 15:04:06 +00001131{
njncf45fd42004-11-24 16:30:22 +00001132 mc_post_reg_write(/*dummy*/0, tid, offset, size);
njnd3040452003-05-19 15:04:06 +00001133}
1134
sewardj45d94cc2005-04-20 14:44:11 +00001135/* Look at the definedness of the guest's shadow state for
1136 [offset, offset+len). If any part of that is undefined, record
1137 a parameter error.
1138*/
1139static void mc_pre_reg_read ( CorePart part, ThreadId tid, Char* s,
1140 OffT offset, SizeT size)
nethercote8b76fe52004-11-08 19:20:09 +00001141{
sewardj45d94cc2005-04-20 14:44:11 +00001142 Int i;
1143 Bool bad;
1144
1145 UChar area[16];
1146 tl_assert(size <= 16);
1147
1148 VG_(get_shadow_regs_area)( tid, offset, size, area );
1149
1150 bad = False;
1151 for (i = 0; i < size; i++) {
1152 if (area[i] != VGM_BYTE_VALID) {
1153 bad = False;
1154 break;
1155 }
nethercote8b76fe52004-11-08 19:20:09 +00001156 }
1157
sewardj45d94cc2005-04-20 14:44:11 +00001158 if (bad)
nethercote8b76fe52004-11-08 19:20:09 +00001159 MAC_(record_param_error) ( tid, 0, /*isReg*/True, /*isUnaddr*/False, s );
1160}
njnd3040452003-05-19 15:04:06 +00001161
njn25e49d8e72002-09-23 09:36:25 +00001162
sewardj6cf40ff2005-04-20 22:31:26 +00001163/*------------------------------------------------------------*/
sewardjc859fbf2005-04-22 21:10:28 +00001164/*--- Functions called directly from generated code: ---*/
1165/*--- Load/store handlers. ---*/
sewardj6cf40ff2005-04-20 22:31:26 +00001166/*------------------------------------------------------------*/
1167
1168/* Types: LOADV4, LOADV2, LOADV1 are:
1169 UWord fn ( Addr a )
1170 so they return 32-bits on 32-bit machines and 64-bits on
1171 64-bit machines. Addr has the same size as a host word.
1172
1173 LOADV8 is always ULong fn ( Addr a )
1174
1175 Similarly for STOREV1, STOREV2, STOREV4, the supplied vbits
1176 are a UWord, and for STOREV8 they are a ULong.
1177*/
1178
sewardj95448072004-11-22 20:19:51 +00001179/* ------------------------ Size = 8 ------------------------ */
1180
njn9fb73db2005-03-27 01:55:21 +00001181VGA_REGPARM(1)
sewardj95448072004-11-22 20:19:51 +00001182ULong MC_(helperc_LOADV8) ( Addr a )
1183{
sewardjc1a2cda2005-04-21 17:34:00 +00001184 PROF_EVENT(70, "helperc_LOADV8");
sewardj45d94cc2005-04-20 14:44:11 +00001185 return mc_LOADVn_slow( a, 8, False/*littleendian*/ );
sewardj95448072004-11-22 20:19:51 +00001186}
1187
njn9fb73db2005-03-27 01:55:21 +00001188VGA_REGPARM(1)
sewardj95448072004-11-22 20:19:51 +00001189void MC_(helperc_STOREV8) ( Addr a, ULong vbytes )
1190{
sewardjc1a2cda2005-04-21 17:34:00 +00001191 PROF_EVENT(71, "helperc_STOREV8");
sewardj45d94cc2005-04-20 14:44:11 +00001192 mc_STOREVn_slow( a, 8, vbytes, False/*littleendian*/ );
sewardj95448072004-11-22 20:19:51 +00001193}
1194
1195/* ------------------------ Size = 4 ------------------------ */
1196
njn9fb73db2005-03-27 01:55:21 +00001197VGA_REGPARM(1)
sewardjc1a2cda2005-04-21 17:34:00 +00001198UWord MC_(helperc_LOADV4) ( Addr aA )
njn25e49d8e72002-09-23 09:36:25 +00001199{
sewardjc1a2cda2005-04-21 17:34:00 +00001200 PROF_EVENT(220, "helperc_LOADV4");
1201
1202# if VG_DEBUG_MEMORY >= 2
1203 return (UWord)mc_LOADVn_slow( aA, 4, False/*littleendian*/ );
1204# else
1205
sewardj23eb2fd2005-04-22 16:29:19 +00001206 const UWord mask = ~((0x10000-4) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001207 UWord a = (UWord)aA;
1208
1209 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1210 naturally aligned, or 'a' exceeds the range covered by the
1211 primary map. Either way we defer to the slow-path case. */
1212 if (EXPECTED_NOT_TAKEN(a & mask)) {
1213 PROF_EVENT(221, "helperc_LOADV4-slow1");
1214 return (UWord)mc_LOADVn_slow( aA, 4, False/*littleendian*/ );
1215 }
1216
1217 UWord sec_no = (UWord)(a >> 16);
1218
1219# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001220 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001221# endif
1222
1223 SecMap* sm = primary_map[sec_no];
1224 UWord v_off = a & 0xFFFF;
1225 UWord a_off = v_off >> 3;
1226 UWord abits = (UWord)(sm->abits[a_off]);
1227 abits >>= (a & 4);
1228 abits &= 15;
1229 if (EXPECTED_TAKEN(abits == VGM_NIBBLE_VALID)) {
1230 /* Handle common case quickly: a is suitably aligned, is mapped,
1231 and is addressible. */
sewardj5d28efc2005-04-21 22:16:29 +00001232 /* On a 32-bit platform, simply hoick the required 32 bits out of
1233 the vbyte array. On a 64-bit platform, also set the upper 32
1234 bits to 1 ("undefined"), just in case. This almost certainly
1235 isn't necessary, but be paranoid. */
1236 UWord ret = (UWord)0xFFFFFFFF00000000ULL;
1237 ret |= (UWord)( ((UInt*)(sm->vbyte))[ v_off >> 2 ] );
1238 return ret;
sewardjc1a2cda2005-04-21 17:34:00 +00001239 } else {
1240 /* Slow but general case. */
1241 PROF_EVENT(222, "helperc_LOADV4-slow2");
1242 return (UWord)mc_LOADVn_slow( a, 4, False/*littleendian*/ );
1243 }
1244
1245# endif
njn25e49d8e72002-09-23 09:36:25 +00001246}
1247
njn9fb73db2005-03-27 01:55:21 +00001248VGA_REGPARM(2)
sewardjc1a2cda2005-04-21 17:34:00 +00001249void MC_(helperc_STOREV4) ( Addr aA, UWord vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001250{
sewardjc1a2cda2005-04-21 17:34:00 +00001251 PROF_EVENT(230, "helperc_STOREV4");
1252
1253# if VG_DEBUG_MEMORY >= 2
sewardj23eb2fd2005-04-22 16:29:19 +00001254 mc_STOREVn_slow( aA, 4, (ULong)vbytes, False/*littleendian*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001255# else
1256
sewardj23eb2fd2005-04-22 16:29:19 +00001257 const UWord mask = ~((0x10000-4) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001258 UWord a = (UWord)aA;
1259
1260 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1261 naturally aligned, or 'a' exceeds the range covered by the
1262 primary map. Either way we defer to the slow-path case. */
1263 if (EXPECTED_NOT_TAKEN(a & mask)) {
1264 PROF_EVENT(231, "helperc_STOREV4-slow1");
1265 mc_STOREVn_slow( aA, 4, (ULong)vbytes, False/*littleendian*/ );
1266 return;
1267 }
1268
1269 UWord sec_no = (UWord)(a >> 16);
1270
1271# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001272 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001273# endif
1274
1275 SecMap* sm = primary_map[sec_no];
1276 UWord v_off = a & 0xFFFF;
1277 UWord a_off = v_off >> 3;
1278 UWord abits = (UWord)(sm->abits[a_off]);
1279 abits >>= (a & 4);
1280 abits &= 15;
1281 if (EXPECTED_TAKEN(!is_distinguished_sm(sm)
1282 && abits == VGM_NIBBLE_VALID)) {
1283 /* Handle common case quickly: a is suitably aligned, is mapped,
1284 and is addressible. */
1285 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = (UInt)vbytes;
1286 } else {
1287 /* Slow but general case. */
1288 PROF_EVENT(232, "helperc_STOREV4-slow2");
1289 mc_STOREVn_slow( aA, 4, (ULong)vbytes, False/*littleendian*/ );
1290 }
1291# endif
njn25e49d8e72002-09-23 09:36:25 +00001292}
1293
sewardj95448072004-11-22 20:19:51 +00001294/* ------------------------ Size = 2 ------------------------ */
1295
njn9fb73db2005-03-27 01:55:21 +00001296VGA_REGPARM(1)
sewardjc1a2cda2005-04-21 17:34:00 +00001297UWord MC_(helperc_LOADV2) ( Addr aA )
njn25e49d8e72002-09-23 09:36:25 +00001298{
sewardjc1a2cda2005-04-21 17:34:00 +00001299 PROF_EVENT(240, "helperc_LOADV2");
1300
1301# if VG_DEBUG_MEMORY >= 2
1302 return (UWord)mc_LOADVn_slow( aA, 2, False/*littleendian*/ );
1303# else
1304
sewardj23eb2fd2005-04-22 16:29:19 +00001305 const UWord mask = ~((0x10000-2) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001306 UWord a = (UWord)aA;
1307
1308 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1309 naturally aligned, or 'a' exceeds the range covered by the
1310 primary map. Either way we defer to the slow-path case. */
1311 if (EXPECTED_NOT_TAKEN(a & mask)) {
1312 PROF_EVENT(241, "helperc_LOADV2-slow1");
1313 return (UWord)mc_LOADVn_slow( aA, 2, False/*littleendian*/ );
1314 }
1315
1316 UWord sec_no = (UWord)(a >> 16);
1317
1318# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001319 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001320# endif
1321
1322 SecMap* sm = primary_map[sec_no];
1323 UWord v_off = a & 0xFFFF;
1324 UWord a_off = v_off >> 3;
1325 UWord abits = (UWord)(sm->abits[a_off]);
sewardjc1a2cda2005-04-21 17:34:00 +00001326 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) {
1327 /* Handle common case quickly: a is mapped, and the entire
1328 word32 it lives in is addressible. */
sewardj5d28efc2005-04-21 22:16:29 +00001329 /* Set the upper 16/48 bits of the result to 1 ("undefined"),
1330 just in case. This almost certainly isn't necessary, but be
1331 paranoid. */
sewardjc1a2cda2005-04-21 17:34:00 +00001332 return (~(UWord)0xFFFF)
1333 |
1334 (UWord)( ((UShort*)(sm->vbyte))[ v_off >> 1 ] );
1335 } else {
1336 /* Slow but general case. */
1337 PROF_EVENT(242, "helperc_LOADV2-slow2");
1338 return (UWord)mc_LOADVn_slow( aA, 2, False/*littleendian*/ );
1339 }
1340
1341# endif
njn25e49d8e72002-09-23 09:36:25 +00001342}
1343
njn9fb73db2005-03-27 01:55:21 +00001344VGA_REGPARM(2)
sewardj5d28efc2005-04-21 22:16:29 +00001345void MC_(helperc_STOREV2) ( Addr aA, UWord vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001346{
sewardjc1a2cda2005-04-21 17:34:00 +00001347 PROF_EVENT(250, "helperc_STOREV2");
sewardj5d28efc2005-04-21 22:16:29 +00001348
1349# if VG_DEBUG_MEMORY >= 2
sewardj23eb2fd2005-04-22 16:29:19 +00001350 mc_STOREVn_slow( aA, 2, (ULong)vbytes, False/*littleendian*/ );
sewardj5d28efc2005-04-21 22:16:29 +00001351# else
1352
sewardj23eb2fd2005-04-22 16:29:19 +00001353 const UWord mask = ~((0x10000-2) | ((N_PRIMARY_MAP-1) << 16));
sewardj5d28efc2005-04-21 22:16:29 +00001354 UWord a = (UWord)aA;
1355
1356 /* If any part of 'a' indicated by the mask is 1, either 'a' is not
1357 naturally aligned, or 'a' exceeds the range covered by the
1358 primary map. Either way we defer to the slow-path case. */
1359 if (EXPECTED_NOT_TAKEN(a & mask)) {
1360 PROF_EVENT(251, "helperc_STOREV2-slow1");
1361 mc_STOREVn_slow( aA, 2, (ULong)vbytes, False/*littleendian*/ );
1362 return;
1363 }
1364
1365 UWord sec_no = (UWord)(a >> 16);
1366
1367# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001368 tl_assert(sec_no < N_PRIMARY_MAP);
sewardj5d28efc2005-04-21 22:16:29 +00001369# endif
1370
1371 SecMap* sm = primary_map[sec_no];
1372 UWord v_off = a & 0xFFFF;
1373 UWord a_off = v_off >> 3;
1374 UWord abits = (UWord)(sm->abits[a_off]);
1375 if (EXPECTED_TAKEN(!is_distinguished_sm(sm)
1376 && abits == VGM_BYTE_VALID)) {
1377 /* Handle common case quickly. */
1378 ((UShort*)(sm->vbyte))[ v_off >> 1 ] = (UShort)vbytes;
1379 } else {
1380 /* Slow but general case. */
1381 PROF_EVENT(252, "helperc_STOREV2-slow2");
1382 mc_STOREVn_slow( aA, 2, (ULong)vbytes, False/*littleendian*/ );
1383 }
1384# endif
njn25e49d8e72002-09-23 09:36:25 +00001385}
1386
sewardj95448072004-11-22 20:19:51 +00001387/* ------------------------ Size = 1 ------------------------ */
1388
njn9fb73db2005-03-27 01:55:21 +00001389VGA_REGPARM(1)
sewardjc1a2cda2005-04-21 17:34:00 +00001390UWord MC_(helperc_LOADV1) ( Addr aA )
njn25e49d8e72002-09-23 09:36:25 +00001391{
sewardjc1a2cda2005-04-21 17:34:00 +00001392 PROF_EVENT(260, "helperc_LOADV1");
1393
1394# if VG_DEBUG_MEMORY >= 2
sewardj23eb2fd2005-04-22 16:29:19 +00001395 return (UWord)mc_LOADVn_slow( aA, 1, False/*littleendian*/ );
sewardjc1a2cda2005-04-21 17:34:00 +00001396# else
1397
sewardj23eb2fd2005-04-22 16:29:19 +00001398 const UWord mask = ~((0x10000-1) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001399 UWord a = (UWord)aA;
1400
1401 /* If any part of 'a' indicated by the mask is 1, it means 'a'
1402 exceeds the range covered by the primary map. In which case we
1403 defer to the slow-path case. */
1404 if (EXPECTED_NOT_TAKEN(a & mask)) {
1405 PROF_EVENT(261, "helperc_LOADV1-slow1");
1406 return (UWord)mc_LOADVn_slow( aA, 1, False/*littleendian*/ );
1407 }
1408
1409 UWord sec_no = (UWord)(a >> 16);
1410
1411# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001412 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001413# endif
1414
1415 SecMap* sm = primary_map[sec_no];
1416 UWord v_off = a & 0xFFFF;
1417 UWord a_off = v_off >> 3;
sewardj5d28efc2005-04-21 22:16:29 +00001418 UWord abits = (UWord)(sm->abits[a_off]);
sewardjc1a2cda2005-04-21 17:34:00 +00001419 if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) {
1420 /* Handle common case quickly: a is mapped, and the entire
1421 word32 it lives in is addressible. */
sewardj5d28efc2005-04-21 22:16:29 +00001422 /* Set the upper 24/56 bits of the result to 1 ("undefined"),
1423 just in case. This almost certainly isn't necessary, but be
1424 paranoid. */
sewardjc1a2cda2005-04-21 17:34:00 +00001425 return (~(UWord)0xFF)
1426 |
1427 (UWord)( ((UChar*)(sm->vbyte))[ v_off ] );
1428 } else {
1429 /* Slow but general case. */
1430 PROF_EVENT(262, "helperc_LOADV1-slow2");
1431 return (UWord)mc_LOADVn_slow( aA, 1, False/*littleendian*/ );
1432 }
1433# endif
njn25e49d8e72002-09-23 09:36:25 +00001434}
1435
sewardjc1a2cda2005-04-21 17:34:00 +00001436
njn9fb73db2005-03-27 01:55:21 +00001437VGA_REGPARM(2)
sewardjc1a2cda2005-04-21 17:34:00 +00001438void MC_(helperc_STOREV1) ( Addr aA, UWord vbyte )
njn25e49d8e72002-09-23 09:36:25 +00001439{
sewardjc1a2cda2005-04-21 17:34:00 +00001440 PROF_EVENT(270, "helperc_STOREV1");
1441
1442# if VG_DEBUG_MEMORY >= 2
1443 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*littleendian*/ );
1444# else
1445
sewardj23eb2fd2005-04-22 16:29:19 +00001446 const UWord mask = ~((0x10000-1) | ((N_PRIMARY_MAP-1) << 16));
sewardjc1a2cda2005-04-21 17:34:00 +00001447 UWord a = (UWord)aA;
1448 /* If any part of 'a' indicated by the mask is 1, it means 'a'
1449 exceeds the range covered by the primary map. In which case we
1450 defer to the slow-path case. */
1451 if (EXPECTED_NOT_TAKEN(a & mask)) {
1452 PROF_EVENT(271, "helperc_STOREV1-slow1");
1453 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*littleendian*/ );
1454 return;
1455 }
1456
1457 UWord sec_no = (UWord)(a >> 16);
1458
1459# if VG_DEBUG_MEMORY >= 1
sewardj23eb2fd2005-04-22 16:29:19 +00001460 tl_assert(sec_no < N_PRIMARY_MAP);
sewardjc1a2cda2005-04-21 17:34:00 +00001461# endif
1462
1463 SecMap* sm = primary_map[sec_no];
1464 UWord v_off = a & 0xFFFF;
1465 UWord a_off = v_off >> 3;
sewardj5d28efc2005-04-21 22:16:29 +00001466 UWord abits = (UWord)(sm->abits[a_off]);
sewardjc1a2cda2005-04-21 17:34:00 +00001467 if (EXPECTED_TAKEN(!is_distinguished_sm(sm)
1468 && abits == VGM_BYTE_VALID)) {
1469 /* Handle common case quickly: a is mapped, the entire word32 it
1470 lives in is addressible. */
1471 ((UChar*)(sm->vbyte))[ v_off ] = (UChar)vbyte;
1472 } else {
1473 PROF_EVENT(272, "helperc_STOREV1-slow2");
1474 mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*littleendian*/ );
1475 }
1476
1477# endif
njn25e49d8e72002-09-23 09:36:25 +00001478}
1479
1480
sewardjc859fbf2005-04-22 21:10:28 +00001481/*------------------------------------------------------------*/
1482/*--- Functions called directly from generated code: ---*/
1483/*--- Value-check failure handlers. ---*/
1484/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001485
njn5c004e42002-11-18 11:04:50 +00001486void MC_(helperc_value_check0_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001487{
njnb8dca862005-03-14 02:42:44 +00001488 MC_(record_value_error) ( VG_(get_running_tid)(), 0 );
njn25e49d8e72002-09-23 09:36:25 +00001489}
1490
njn5c004e42002-11-18 11:04:50 +00001491void MC_(helperc_value_check1_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001492{
njnb8dca862005-03-14 02:42:44 +00001493 MC_(record_value_error) ( VG_(get_running_tid)(), 1 );
njn25e49d8e72002-09-23 09:36:25 +00001494}
1495
sewardj45d94cc2005-04-20 14:44:11 +00001496//zz void MC_(helperc_value_check2_fail) ( void )
1497//zz {
1498//zz MC_(record_value_error) ( VG_(get_running_tid)(), 2 );
1499//zz }
njn25e49d8e72002-09-23 09:36:25 +00001500
njn5c004e42002-11-18 11:04:50 +00001501void MC_(helperc_value_check4_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001502{
njnb8dca862005-03-14 02:42:44 +00001503 MC_(record_value_error) ( VG_(get_running_tid)(), 4 );
njn25e49d8e72002-09-23 09:36:25 +00001504}
1505
njn9fb73db2005-03-27 01:55:21 +00001506VGA_REGPARM(1) void MC_(helperc_complain_undef) ( HWord sz )
sewardj95448072004-11-22 20:19:51 +00001507{
njnb8dca862005-03-14 02:42:44 +00001508 MC_(record_value_error) ( VG_(get_running_tid)(), (Int)sz );
sewardj95448072004-11-22 20:19:51 +00001509}
1510
njn25e49d8e72002-09-23 09:36:25 +00001511
sewardj45d94cc2005-04-20 14:44:11 +00001512//zz /*------------------------------------------------------------*/
1513//zz /*--- Metadata get/set functions, for client requests. ---*/
1514//zz /*------------------------------------------------------------*/
1515//zz
1516//zz /* Copy Vbits for src into vbits. Returns: 1 == OK, 2 == alignment
1517//zz error, 3 == addressing error. */
1518//zz static Int mc_get_or_set_vbits_for_client (
1519//zz ThreadId tid,
1520//zz Addr dataV,
1521//zz Addr vbitsV,
1522//zz SizeT size,
1523//zz Bool setting /* True <=> set vbits, False <=> get vbits */
1524//zz )
1525//zz {
1526//zz Bool addressibleD = True;
1527//zz Bool addressibleV = True;
1528//zz UInt* data = (UInt*)dataV;
1529//zz UInt* vbits = (UInt*)vbitsV;
1530//zz SizeT szW = size / 4; /* sigh */
1531//zz SizeT i;
1532//zz UInt* dataP = NULL; /* bogus init to keep gcc happy */
1533//zz UInt* vbitsP = NULL; /* ditto */
1534//zz
1535//zz /* Check alignment of args. */
1536//zz if (!(VG_IS_4_ALIGNED(data) && VG_IS_4_ALIGNED(vbits)))
1537//zz return 2;
1538//zz if ((size & 3) != 0)
1539//zz return 2;
1540//zz
1541//zz /* Check that arrays are addressible. */
1542//zz for (i = 0; i < szW; i++) {
1543//zz dataP = &data[i];
1544//zz vbitsP = &vbits[i];
1545//zz if (get_abits4_ALIGNED((Addr)dataP) != VGM_NIBBLE_VALID) {
1546//zz addressibleD = False;
1547//zz break;
1548//zz }
1549//zz if (get_abits4_ALIGNED((Addr)vbitsP) != VGM_NIBBLE_VALID) {
1550//zz addressibleV = False;
1551//zz break;
1552//zz }
1553//zz }
1554//zz if (!addressibleD) {
1555//zz MAC_(record_address_error)( tid, (Addr)dataP, 4,
1556//zz setting ? True : False );
1557//zz return 3;
1558//zz }
1559//zz if (!addressibleV) {
1560//zz MAC_(record_address_error)( tid, (Addr)vbitsP, 4,
1561//zz setting ? False : True );
1562//zz return 3;
1563//zz }
1564//zz
1565//zz /* Do the copy */
1566//zz if (setting) {
1567//zz /* setting */
1568//zz for (i = 0; i < szW; i++) {
1569//zz if (get_vbytes4_ALIGNED( (Addr)&vbits[i] ) != VGM_WORD_VALID)
1570//zz MC_(record_value_error)(tid, 4);
1571//zz set_vbytes4_ALIGNED( (Addr)&data[i], vbits[i] );
1572//zz }
1573//zz } else {
1574//zz /* getting */
1575//zz for (i = 0; i < szW; i++) {
1576//zz vbits[i] = get_vbytes4_ALIGNED( (Addr)&data[i] );
1577//zz set_vbytes4_ALIGNED( (Addr)&vbits[i], VGM_WORD_VALID );
1578//zz }
1579//zz }
1580//zz
1581//zz return 1;
1582//zz }
1583//zz
1584//zz
1585//zz /*------------------------------------------------------------*/
1586//zz /*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1587//zz /*------------------------------------------------------------*/
1588//zz
1589//zz /* For the memory leak detector, say whether an entire 64k chunk of
1590//zz address space is possibly in use, or not. If in doubt return
1591//zz True.
1592//zz */
1593//zz static
1594//zz Bool mc_is_valid_64k_chunk ( UInt chunk_number )
1595//zz {
1596//zz tl_assert(chunk_number >= 0 && chunk_number < PRIMARY_SIZE);
1597//zz if (primary_map[chunk_number] == DSM_NOTADDR) {
1598//zz /* Definitely not in use. */
1599//zz return False;
1600//zz } else {
1601//zz return True;
1602//zz }
1603//zz }
1604//zz
1605//zz
1606//zz /* For the memory leak detector, say whether or not a given word
1607//zz address is to be regarded as valid. */
1608//zz static
1609//zz Bool mc_is_valid_address ( Addr a )
1610//zz {
1611//zz UInt vbytes;
1612//zz UChar abits;
1613//zz tl_assert(VG_IS_4_ALIGNED(a));
1614//zz abits = get_abits4_ALIGNED(a);
1615//zz vbytes = get_vbytes4_ALIGNED(a);
1616//zz if (abits == VGM_NIBBLE_VALID && vbytes == VGM_WORD_VALID) {
1617//zz return True;
1618//zz } else {
1619//zz return False;
1620//zz }
1621//zz }
sewardja4495682002-10-21 07:29:59 +00001622
1623
nethercote996901a2004-08-03 13:29:09 +00001624/* Leak detector for this tool. We don't actually do anything, merely
sewardja4495682002-10-21 07:29:59 +00001625 run the generic leak detector with suitable parameters for this
nethercote996901a2004-08-03 13:29:09 +00001626 tool. */
njnb8dca862005-03-14 02:42:44 +00001627static void mc_detect_memory_leaks ( ThreadId tid, LeakCheckMode mode )
njn25e49d8e72002-09-23 09:36:25 +00001628{
sewardj45d94cc2005-04-20 14:44:11 +00001629 VG_(printf)("memcheck: leak detection currently disabled\n");
1630 // MAC_(do_detect_memory_leaks) (
1631 // tid, mode, mc_is_valid_64k_chunk, mc_is_valid_address );
njn25e49d8e72002-09-23 09:36:25 +00001632}
1633
1634
sewardjc859fbf2005-04-22 21:10:28 +00001635/*------------------------------------------------------------*/
1636/*--- Initialisation ---*/
1637/*------------------------------------------------------------*/
1638
1639static void init_shadow_memory ( void )
1640{
1641 Int i;
1642 SecMap* sm;
1643
1644 /* Build the 3 distinguished secondaries */
1645 tl_assert(VGM_BIT_INVALID == 1);
1646 tl_assert(VGM_BIT_VALID == 0);
1647 tl_assert(VGM_BYTE_INVALID == 0xFF);
1648 tl_assert(VGM_BYTE_VALID == 0);
1649
1650 /* Set A invalid, V invalid. */
1651 sm = &sm_distinguished[SM_DIST_NOACCESS];
1652 for (i = 0; i < 65536; i++)
1653 sm->vbyte[i] = VGM_BYTE_INVALID;
1654 for (i = 0; i < 8192; i++)
1655 sm->abits[i] = VGM_BYTE_INVALID;
1656
1657 /* Set A valid, V invalid. */
1658 sm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
1659 for (i = 0; i < 65536; i++)
1660 sm->vbyte[i] = VGM_BYTE_INVALID;
1661 for (i = 0; i < 8192; i++)
1662 sm->abits[i] = VGM_BYTE_VALID;
1663
1664 /* Set A valid, V valid. */
1665 sm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
1666 for (i = 0; i < 65536; i++)
1667 sm->vbyte[i] = VGM_BYTE_VALID;
1668 for (i = 0; i < 8192; i++)
1669 sm->abits[i] = VGM_BYTE_VALID;
1670
1671 /* Set up the primary map. */
1672 /* These entries gradually get overwritten as the used address
1673 space expands. */
1674 for (i = 0; i < N_PRIMARY_MAP; i++)
1675 primary_map[i] = &sm_distinguished[SM_DIST_NOACCESS];
1676
1677 /* auxmap_size = auxmap_used = 0;
1678 no ... these are statically initialised */
1679}
1680
1681
1682/*------------------------------------------------------------*/
1683/*--- Sanity check machinery (permanently engaged) ---*/
1684/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001685
njn26f02512004-11-22 18:33:15 +00001686Bool TL_(cheap_sanity_check) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001687{
jseward9800fd32004-01-04 23:08:04 +00001688 /* nothing useful we can rapidly check */
sewardj23eb2fd2005-04-22 16:29:19 +00001689 n_sanity_cheap++;
sewardjc1a2cda2005-04-21 17:34:00 +00001690 PROF_EVENT(490, "cheap_sanity_check");
jseward9800fd32004-01-04 23:08:04 +00001691 return True;
njn25e49d8e72002-09-23 09:36:25 +00001692}
1693
njn26f02512004-11-22 18:33:15 +00001694Bool TL_(expensive_sanity_check) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001695{
sewardj23eb2fd2005-04-22 16:29:19 +00001696 Int i, n_secmaps_found;
sewardj45d94cc2005-04-20 14:44:11 +00001697 SecMap* sm;
sewardj23eb2fd2005-04-22 16:29:19 +00001698 Bool bad = False;
njn25e49d8e72002-09-23 09:36:25 +00001699
sewardj23eb2fd2005-04-22 16:29:19 +00001700 n_sanity_expensive++;
sewardjc1a2cda2005-04-21 17:34:00 +00001701 PROF_EVENT(491, "expensive_sanity_check");
1702
sewardj23eb2fd2005-04-22 16:29:19 +00001703 /* Check that the 3 distinguished SMs are still as they should
1704 be. */
njn25e49d8e72002-09-23 09:36:25 +00001705
sewardj45d94cc2005-04-20 14:44:11 +00001706 /* Check A invalid, V invalid. */
1707 sm = &sm_distinguished[SM_DIST_NOACCESS];
njn25e49d8e72002-09-23 09:36:25 +00001708 for (i = 0; i < 65536; i++)
sewardj45d94cc2005-04-20 14:44:11 +00001709 if (!(sm->vbyte[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00001710 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00001711 for (i = 0; i < 8192; i++)
1712 if (!(sm->abits[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00001713 bad = True;
njn25e49d8e72002-09-23 09:36:25 +00001714
sewardj45d94cc2005-04-20 14:44:11 +00001715 /* Check A valid, V invalid. */
1716 sm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
1717 for (i = 0; i < 65536; i++)
1718 if (!(sm->vbyte[i] == VGM_BYTE_INVALID))
sewardj23eb2fd2005-04-22 16:29:19 +00001719 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00001720 for (i = 0; i < 8192; i++)
1721 if (!(sm->abits[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00001722 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00001723
1724 /* Check A valid, V valid. */
1725 sm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
1726 for (i = 0; i < 65536; i++)
1727 if (!(sm->vbyte[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00001728 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00001729 for (i = 0; i < 8192; i++)
1730 if (!(sm->abits[i] == VGM_BYTE_VALID))
sewardj23eb2fd2005-04-22 16:29:19 +00001731 bad = True;
sewardj45d94cc2005-04-20 14:44:11 +00001732
sewardj23eb2fd2005-04-22 16:29:19 +00001733 if (bad) {
1734 VG_(printf)("memcheck expensive sanity: "
1735 "distinguished_secondaries have changed\n");
1736 return False;
1737 }
1738
1739 /* check nonsensical auxmap sizing */
sewardj45d94cc2005-04-20 14:44:11 +00001740 if (auxmap_used > auxmap_size)
sewardj23eb2fd2005-04-22 16:29:19 +00001741 bad = True;
1742
1743 if (bad) {
1744 VG_(printf)("memcheck expensive sanity: "
1745 "nonsensical auxmap sizing\n");
1746 return False;
1747 }
1748
1749 /* check that the number of secmaps issued matches the number that
1750 are reachable (iow, no secmap leaks) */
1751 n_secmaps_found = 0;
1752 for (i = 0; i < N_PRIMARY_MAP; i++) {
1753 if (primary_map[i] == NULL) {
1754 bad = True;
1755 } else {
1756 if (!is_distinguished_sm(primary_map[i]))
1757 n_secmaps_found++;
1758 }
1759 }
1760
1761 for (i = 0; i < auxmap_used; i++) {
1762 if (auxmap[i].sm == NULL) {
1763 bad = True;
1764 } else {
1765 if (!is_distinguished_sm(auxmap[i].sm))
1766 n_secmaps_found++;
1767 }
1768 }
1769
1770 if (n_secmaps_found != n_secmaps_issued)
1771 bad = True;
1772
1773 if (bad) {
1774 VG_(printf)("memcheck expensive sanity: "
1775 "apparent secmap leakage\n");
1776 return False;
1777 }
1778
1779 /* check that auxmap only covers address space that the primary
1780 doesn't */
1781
1782 for (i = 0; i < auxmap_used; i++)
1783 if (auxmap[i].base <= MAX_PRIMARY_ADDRESS)
1784 bad = True;
1785
1786 if (bad) {
1787 VG_(printf)("memcheck expensive sanity: "
1788 "auxmap covers wrong address space\n");
1789 return False;
1790 }
1791
1792 /* there is only one pointer to each secmap (expensive) */
njn25e49d8e72002-09-23 09:36:25 +00001793
1794 return True;
1795}
sewardj45d94cc2005-04-20 14:44:11 +00001796
njn25e49d8e72002-09-23 09:36:25 +00001797
njn25e49d8e72002-09-23 09:36:25 +00001798/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00001799/*--- Command line args ---*/
njn25e49d8e72002-09-23 09:36:25 +00001800/*------------------------------------------------------------*/
1801
njn43c799e2003-04-08 00:08:52 +00001802Bool MC_(clo_avoid_strlen_errors) = True;
njn43c799e2003-04-08 00:08:52 +00001803
njn26f02512004-11-22 18:33:15 +00001804Bool TL_(process_cmd_line_option)(Char* arg)
njn25e49d8e72002-09-23 09:36:25 +00001805{
njn45270a22005-03-27 01:00:11 +00001806 VG_BOOL_CLO(arg, "--avoid-strlen-errors", MC_(clo_avoid_strlen_errors))
njn25e49d8e72002-09-23 09:36:25 +00001807 else
njn43c799e2003-04-08 00:08:52 +00001808 return MAC_(process_common_cmd_line_option)(arg);
njn25e49d8e72002-09-23 09:36:25 +00001809
1810 return True;
njn25e49d8e72002-09-23 09:36:25 +00001811}
1812
njn26f02512004-11-22 18:33:15 +00001813void TL_(print_usage)(void)
njn25e49d8e72002-09-23 09:36:25 +00001814{
njn3e884182003-04-15 13:03:23 +00001815 MAC_(print_common_usage)();
1816 VG_(printf)(
1817" --avoid-strlen-errors=no|yes suppress errs from inlined strlen [yes]\n"
1818 );
1819}
1820
njn26f02512004-11-22 18:33:15 +00001821void TL_(print_debug_usage)(void)
njn3e884182003-04-15 13:03:23 +00001822{
1823 MAC_(print_common_debug_usage)();
1824 VG_(printf)(
sewardj8ec2cfc2002-10-13 00:57:26 +00001825" --cleanup=no|yes improve after instrumentation? [yes]\n"
njn3e884182003-04-15 13:03:23 +00001826 );
njn25e49d8e72002-09-23 09:36:25 +00001827}
1828
nethercote8b76fe52004-11-08 19:20:09 +00001829/*------------------------------------------------------------*/
1830/*--- Client requests ---*/
1831/*------------------------------------------------------------*/
1832
1833/* Client block management:
1834
1835 This is managed as an expanding array of client block descriptors.
1836 Indices of live descriptors are issued to the client, so it can ask
1837 to free them later. Therefore we cannot slide live entries down
1838 over dead ones. Instead we must use free/inuse flags and scan for
1839 an empty slot at allocation time. This in turn means allocation is
1840 relatively expensive, so we hope this does not happen too often.
nethercote8b76fe52004-11-08 19:20:09 +00001841
sewardjedc75ab2005-03-15 23:30:32 +00001842 An unused block has start == size == 0
1843*/
nethercote8b76fe52004-11-08 19:20:09 +00001844
1845typedef
1846 struct {
1847 Addr start;
1848 SizeT size;
1849 ExeContext* where;
sewardjedc75ab2005-03-15 23:30:32 +00001850 Char* desc;
nethercote8b76fe52004-11-08 19:20:09 +00001851 }
1852 CGenBlock;
1853
1854/* This subsystem is self-initialising. */
njn695c16e2005-03-27 03:40:28 +00001855static UInt cgb_size = 0;
1856static UInt cgb_used = 0;
1857static CGenBlock* cgbs = NULL;
nethercote8b76fe52004-11-08 19:20:09 +00001858
1859/* Stats for this subsystem. */
njn695c16e2005-03-27 03:40:28 +00001860static UInt cgb_used_MAX = 0; /* Max in use. */
1861static UInt cgb_allocs = 0; /* Number of allocs. */
1862static UInt cgb_discards = 0; /* Number of discards. */
1863static UInt cgb_search = 0; /* Number of searches. */
nethercote8b76fe52004-11-08 19:20:09 +00001864
1865
1866static
njn695c16e2005-03-27 03:40:28 +00001867Int alloc_client_block ( void )
nethercote8b76fe52004-11-08 19:20:09 +00001868{
1869 UInt i, sz_new;
1870 CGenBlock* cgbs_new;
1871
njn695c16e2005-03-27 03:40:28 +00001872 cgb_allocs++;
nethercote8b76fe52004-11-08 19:20:09 +00001873
njn695c16e2005-03-27 03:40:28 +00001874 for (i = 0; i < cgb_used; i++) {
1875 cgb_search++;
1876 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00001877 return i;
1878 }
1879
1880 /* Not found. Try to allocate one at the end. */
njn695c16e2005-03-27 03:40:28 +00001881 if (cgb_used < cgb_size) {
1882 cgb_used++;
1883 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00001884 }
1885
1886 /* Ok, we have to allocate a new one. */
njn695c16e2005-03-27 03:40:28 +00001887 tl_assert(cgb_used == cgb_size);
1888 sz_new = (cgbs == NULL) ? 10 : (2 * cgb_size);
nethercote8b76fe52004-11-08 19:20:09 +00001889
1890 cgbs_new = VG_(malloc)( sz_new * sizeof(CGenBlock) );
njn695c16e2005-03-27 03:40:28 +00001891 for (i = 0; i < cgb_used; i++)
1892 cgbs_new[i] = cgbs[i];
nethercote8b76fe52004-11-08 19:20:09 +00001893
njn695c16e2005-03-27 03:40:28 +00001894 if (cgbs != NULL)
1895 VG_(free)( cgbs );
1896 cgbs = cgbs_new;
nethercote8b76fe52004-11-08 19:20:09 +00001897
njn695c16e2005-03-27 03:40:28 +00001898 cgb_size = sz_new;
1899 cgb_used++;
1900 if (cgb_used > cgb_used_MAX)
1901 cgb_used_MAX = cgb_used;
1902 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00001903}
1904
1905
1906static void show_client_block_stats ( void )
1907{
1908 VG_(message)(Vg_DebugMsg,
1909 "general CBs: %d allocs, %d discards, %d maxinuse, %d search",
njn695c16e2005-03-27 03:40:28 +00001910 cgb_allocs, cgb_discards, cgb_used_MAX, cgb_search
nethercote8b76fe52004-11-08 19:20:09 +00001911 );
1912}
1913
1914static Bool find_addr(VgHashNode* sh_ch, void* ap)
1915{
1916 MAC_Chunk *m = (MAC_Chunk*)sh_ch;
1917 Addr a = *(Addr*)ap;
1918
1919 return VG_(addr_is_in_block)(a, m->data, m->size);
1920}
1921
1922static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai )
1923{
1924 UInt i;
1925 /* VG_(printf)("try to identify %d\n", a); */
1926
1927 /* Perhaps it's a general block ? */
njn695c16e2005-03-27 03:40:28 +00001928 for (i = 0; i < cgb_used; i++) {
1929 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00001930 continue;
njn695c16e2005-03-27 03:40:28 +00001931 if (VG_(addr_is_in_block)(a, cgbs[i].start, cgbs[i].size)) {
nethercote8b76fe52004-11-08 19:20:09 +00001932 MAC_Mempool **d, *mp;
1933
1934 /* OK - maybe it's a mempool, too? */
1935 mp = (MAC_Mempool*)VG_(HT_get_node)(MAC_(mempool_list),
njn695c16e2005-03-27 03:40:28 +00001936 (UWord)cgbs[i].start,
nethercote8b76fe52004-11-08 19:20:09 +00001937 (void*)&d);
1938 if(mp != NULL) {
1939 if(mp->chunks != NULL) {
1940 MAC_Chunk *mc;
1941
1942 mc = (MAC_Chunk*)VG_(HT_first_match)(mp->chunks, find_addr, &a);
1943 if(mc != NULL) {
1944 ai->akind = UserG;
1945 ai->blksize = mc->size;
1946 ai->rwoffset = (Int)(a) - (Int)mc->data;
1947 ai->lastchange = mc->where;
1948 return True;
1949 }
1950 }
1951 ai->akind = Mempool;
njn695c16e2005-03-27 03:40:28 +00001952 ai->blksize = cgbs[i].size;
1953 ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
1954 ai->lastchange = cgbs[i].where;
nethercote8b76fe52004-11-08 19:20:09 +00001955 return True;
1956 }
1957 ai->akind = UserG;
njn695c16e2005-03-27 03:40:28 +00001958 ai->blksize = cgbs[i].size;
1959 ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
1960 ai->lastchange = cgbs[i].where;
1961 ai->desc = cgbs[i].desc;
nethercote8b76fe52004-11-08 19:20:09 +00001962 return True;
1963 }
1964 }
1965 return False;
1966}
1967
njn26f02512004-11-22 18:33:15 +00001968Bool TL_(handle_client_request) ( ThreadId tid, UWord* arg, UWord* ret )
nethercote8b76fe52004-11-08 19:20:09 +00001969{
1970 Int i;
1971 Bool ok;
1972 Addr bad_addr;
1973
njnfc26ff92004-11-22 19:12:49 +00001974 if (!VG_IS_TOOL_USERREQ('M','C',arg[0])
nethercote8b76fe52004-11-08 19:20:09 +00001975 && VG_USERREQ__MALLOCLIKE_BLOCK != arg[0]
1976 && VG_USERREQ__FREELIKE_BLOCK != arg[0]
1977 && VG_USERREQ__CREATE_MEMPOOL != arg[0]
1978 && VG_USERREQ__DESTROY_MEMPOOL != arg[0]
1979 && VG_USERREQ__MEMPOOL_ALLOC != arg[0]
1980 && VG_USERREQ__MEMPOOL_FREE != arg[0])
1981 return False;
1982
1983 switch (arg[0]) {
1984 case VG_USERREQ__CHECK_WRITABLE: /* check writable */
1985 ok = mc_check_writable ( arg[1], arg[2], &bad_addr );
1986 if (!ok)
1987 MC_(record_user_error) ( tid, bad_addr, /*isWrite*/True,
1988 /*isUnaddr*/True );
1989 *ret = ok ? (UWord)NULL : bad_addr;
1990 break;
1991
1992 case VG_USERREQ__CHECK_READABLE: { /* check readable */
1993 MC_ReadResult res;
1994 res = mc_check_readable ( arg[1], arg[2], &bad_addr );
1995 if (MC_AddrErr == res)
1996 MC_(record_user_error) ( tid, bad_addr, /*isWrite*/False,
1997 /*isUnaddr*/True );
1998 else if (MC_ValueErr == res)
1999 MC_(record_user_error) ( tid, bad_addr, /*isWrite*/False,
2000 /*isUnaddr*/False );
2001 *ret = ( res==MC_Ok ? (UWord)NULL : bad_addr );
2002 break;
2003 }
2004
2005 case VG_USERREQ__DO_LEAK_CHECK:
njnb8dca862005-03-14 02:42:44 +00002006 mc_detect_memory_leaks(tid, arg[1] ? LC_Summary : LC_Full);
nethercote8b76fe52004-11-08 19:20:09 +00002007 *ret = 0; /* return value is meaningless */
2008 break;
2009
2010 case VG_USERREQ__MAKE_NOACCESS: /* make no access */
nethercote8b76fe52004-11-08 19:20:09 +00002011 mc_make_noaccess ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00002012 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00002013 break;
2014
2015 case VG_USERREQ__MAKE_WRITABLE: /* make writable */
nethercote8b76fe52004-11-08 19:20:09 +00002016 mc_make_writable ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00002017 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00002018 break;
2019
2020 case VG_USERREQ__MAKE_READABLE: /* make readable */
nethercote8b76fe52004-11-08 19:20:09 +00002021 mc_make_readable ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00002022 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00002023 break;
2024
sewardjedc75ab2005-03-15 23:30:32 +00002025 case VG_USERREQ__CREATE_BLOCK: /* describe a block */
2026 if (arg[1] != 0 && arg[2] != 0) {
njn695c16e2005-03-27 03:40:28 +00002027 i = alloc_client_block();
2028 /* VG_(printf)("allocated %d %p\n", i, cgbs); */
2029 cgbs[i].start = arg[1];
2030 cgbs[i].size = arg[2];
2031 cgbs[i].desc = VG_(strdup)((Char *)arg[3]);
2032 cgbs[i].where = VG_(record_ExeContext) ( tid );
sewardjedc75ab2005-03-15 23:30:32 +00002033
2034 *ret = i;
2035 } else
2036 *ret = -1;
2037 break;
2038
nethercote8b76fe52004-11-08 19:20:09 +00002039 case VG_USERREQ__DISCARD: /* discard */
njn695c16e2005-03-27 03:40:28 +00002040 if (cgbs == NULL
2041 || arg[2] >= cgb_used ||
2042 (cgbs[arg[2]].start == 0 && cgbs[arg[2]].size == 0)) {
sewardjedc75ab2005-03-15 23:30:32 +00002043 *ret = 1;
2044 } else {
njn695c16e2005-03-27 03:40:28 +00002045 tl_assert(arg[2] >= 0 && arg[2] < cgb_used);
2046 cgbs[arg[2]].start = cgbs[arg[2]].size = 0;
2047 VG_(free)(cgbs[arg[2]].desc);
2048 cgb_discards++;
sewardjedc75ab2005-03-15 23:30:32 +00002049 *ret = 0;
2050 }
nethercote8b76fe52004-11-08 19:20:09 +00002051 break;
2052
sewardj45d94cc2005-04-20 14:44:11 +00002053//zz case VG_USERREQ__GET_VBITS:
2054//zz /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
2055//zz error. */
2056//zz /* VG_(printf)("get_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
2057//zz *ret = mc_get_or_set_vbits_for_client
2058//zz ( tid, arg[1], arg[2], arg[3], False /* get them */ );
2059//zz break;
2060//zz
2061//zz case VG_USERREQ__SET_VBITS:
2062//zz /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
2063//zz error. */
2064//zz /* VG_(printf)("set_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
2065//zz *ret = mc_get_or_set_vbits_for_client
2066//zz ( tid, arg[1], arg[2], arg[3], True /* set them */ );
2067//zz break;
nethercote8b76fe52004-11-08 19:20:09 +00002068
2069 default:
2070 if (MAC_(handle_common_client_requests)(tid, arg, ret )) {
2071 return True;
2072 } else {
2073 VG_(message)(Vg_UserMsg,
2074 "Warning: unknown memcheck client request code %llx",
2075 (ULong)arg[0]);
2076 return False;
2077 }
2078 }
2079 return True;
2080}
njn25e49d8e72002-09-23 09:36:25 +00002081
2082/*------------------------------------------------------------*/
2083/*--- Setup ---*/
2084/*------------------------------------------------------------*/
2085
njn26f02512004-11-22 18:33:15 +00002086void TL_(pre_clo_init)(void)
njn25e49d8e72002-09-23 09:36:25 +00002087{
njn810086f2002-11-14 12:42:47 +00002088 VG_(details_name) ("Memcheck");
2089 VG_(details_version) (NULL);
nethercote262eedf2003-11-13 17:57:18 +00002090 VG_(details_description) ("a memory error detector");
njn810086f2002-11-14 12:42:47 +00002091 VG_(details_copyright_author)(
njn53612422005-03-12 16:22:54 +00002092 "Copyright (C) 2002-2005, and GNU GPL'd, by Julian Seward et al.");
nethercote421281e2003-11-20 16:20:55 +00002093 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj9ebf9fd2004-11-28 16:56:51 +00002094 VG_(details_avg_translation_sizeB) ( 370 );
njn25e49d8e72002-09-23 09:36:25 +00002095
njn8a97c6d2005-03-31 04:37:24 +00002096 VG_(basic_tool_funcs) (TL_(post_clo_init),
2097 TL_(instrument),
2098 TL_(fini));
2099
njn810086f2002-11-14 12:42:47 +00002100 VG_(needs_core_errors) ();
njn8a97c6d2005-03-31 04:37:24 +00002101 VG_(needs_tool_errors) (TL_(eq_Error),
2102 TL_(pp_Error),
2103 TL_(update_extra),
2104 TL_(recognised_suppression),
2105 TL_(read_extra_suppression_info),
2106 TL_(error_matches_suppression),
2107 TL_(get_error_name),
2108 TL_(print_extra_suppression_info));
njn810086f2002-11-14 12:42:47 +00002109 VG_(needs_libc_freeres) ();
njn8a97c6d2005-03-31 04:37:24 +00002110 VG_(needs_command_line_options)(TL_(process_cmd_line_option),
2111 TL_(print_usage),
2112 TL_(print_debug_usage));
2113 VG_(needs_client_requests) (TL_(handle_client_request));
2114 VG_(needs_sanity_checks) (TL_(cheap_sanity_check),
2115 TL_(expensive_sanity_check));
fitzhardinge98abfc72003-12-16 02:05:15 +00002116 VG_(needs_shadow_memory) ();
njn25e49d8e72002-09-23 09:36:25 +00002117
njn8a97c6d2005-03-31 04:37:24 +00002118 VG_(malloc_funcs) (TL_(malloc),
2119 TL_(__builtin_new),
2120 TL_(__builtin_vec_new),
2121 TL_(memalign),
2122 TL_(calloc),
2123 TL_(free),
2124 TL_(__builtin_delete),
2125 TL_(__builtin_vec_delete),
2126 TL_(realloc),
2127 MALLOC_REDZONE_SZB );
2128
njn3e884182003-04-15 13:03:23 +00002129 MAC_( new_mem_heap) = & mc_new_mem_heap;
nethercote8b76fe52004-11-08 19:20:09 +00002130 MAC_( ban_mem_heap) = & mc_make_noaccess;
njn3e884182003-04-15 13:03:23 +00002131 MAC_(copy_mem_heap) = & mc_copy_address_range_state;
nethercote8b76fe52004-11-08 19:20:09 +00002132 MAC_( die_mem_heap) = & mc_make_noaccess;
2133 MAC_(check_noaccess) = & mc_check_noaccess;
njn3e884182003-04-15 13:03:23 +00002134
fitzhardinge98abfc72003-12-16 02:05:15 +00002135 VG_(init_new_mem_startup) ( & mc_new_mem_startup );
nethercote8b76fe52004-11-08 19:20:09 +00002136 VG_(init_new_mem_stack_signal) ( & mc_make_writable );
2137 VG_(init_new_mem_brk) ( & mc_make_writable );
njnb8dca862005-03-14 02:42:44 +00002138 VG_(init_new_mem_mmap) ( & mc_new_mem_mmap );
njn25e49d8e72002-09-23 09:36:25 +00002139
fitzhardinge98abfc72003-12-16 02:05:15 +00002140 VG_(init_copy_mem_remap) ( & mc_copy_address_range_state );
njn3e884182003-04-15 13:03:23 +00002141
nethercote8b76fe52004-11-08 19:20:09 +00002142 VG_(init_die_mem_stack_signal) ( & mc_make_noaccess );
2143 VG_(init_die_mem_brk) ( & mc_make_noaccess );
2144 VG_(init_die_mem_munmap) ( & mc_make_noaccess );
njn3e884182003-04-15 13:03:23 +00002145
fitzhardinge98abfc72003-12-16 02:05:15 +00002146 VG_(init_new_mem_stack_4) ( & MAC_(new_mem_stack_4) );
2147 VG_(init_new_mem_stack_8) ( & MAC_(new_mem_stack_8) );
2148 VG_(init_new_mem_stack_12) ( & MAC_(new_mem_stack_12) );
2149 VG_(init_new_mem_stack_16) ( & MAC_(new_mem_stack_16) );
2150 VG_(init_new_mem_stack_32) ( & MAC_(new_mem_stack_32) );
2151 VG_(init_new_mem_stack) ( & MAC_(new_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00002152
fitzhardinge98abfc72003-12-16 02:05:15 +00002153 VG_(init_die_mem_stack_4) ( & MAC_(die_mem_stack_4) );
2154 VG_(init_die_mem_stack_8) ( & MAC_(die_mem_stack_8) );
2155 VG_(init_die_mem_stack_12) ( & MAC_(die_mem_stack_12) );
2156 VG_(init_die_mem_stack_16) ( & MAC_(die_mem_stack_16) );
2157 VG_(init_die_mem_stack_32) ( & MAC_(die_mem_stack_32) );
2158 VG_(init_die_mem_stack) ( & MAC_(die_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00002159
nethercote8b76fe52004-11-08 19:20:09 +00002160 VG_(init_ban_mem_stack) ( & mc_make_noaccess );
njn25e49d8e72002-09-23 09:36:25 +00002161
fitzhardinge98abfc72003-12-16 02:05:15 +00002162 VG_(init_pre_mem_read) ( & mc_check_is_readable );
2163 VG_(init_pre_mem_read_asciiz) ( & mc_check_is_readable_asciiz );
2164 VG_(init_pre_mem_write) ( & mc_check_is_writable );
njncf45fd42004-11-24 16:30:22 +00002165 VG_(init_post_mem_write) ( & mc_post_mem_write );
nethercote8b76fe52004-11-08 19:20:09 +00002166
2167 VG_(init_pre_reg_read) ( & mc_pre_reg_read );
njn25e49d8e72002-09-23 09:36:25 +00002168
njncf45fd42004-11-24 16:30:22 +00002169 VG_(init_post_reg_write) ( & mc_post_reg_write );
fitzhardinge98abfc72003-12-16 02:05:15 +00002170 VG_(init_post_reg_write_clientcall_return) ( & mc_post_reg_write_clientcall );
njnd3040452003-05-19 15:04:06 +00002171
njn31066fd2005-03-26 00:42:02 +00002172 VG_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
2173 VG_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
2174 VG_(register_profile_event) ( VgpESPAdj, "adjust-ESP" );
njnd04b7c62002-10-03 14:05:52 +00002175
njn43c799e2003-04-08 00:08:52 +00002176 /* Additional block description for VG_(describe_addr)() */
nethercote8b76fe52004-11-08 19:20:09 +00002177 MAC_(describe_addr_supp) = client_perm_maybe_describe;
njn43c799e2003-04-08 00:08:52 +00002178
njnd04b7c62002-10-03 14:05:52 +00002179 init_shadow_memory();
njn3e884182003-04-15 13:03:23 +00002180 MAC_(common_pre_clo_init)();
sewardjc1a2cda2005-04-21 17:34:00 +00002181
2182 tl_assert( TL_(expensive_sanity_check)() );
njn5c004e42002-11-18 11:04:50 +00002183}
2184
njn26f02512004-11-22 18:33:15 +00002185void TL_(post_clo_init) ( void )
njn5c004e42002-11-18 11:04:50 +00002186{
2187}
2188
njn26f02512004-11-22 18:33:15 +00002189void TL_(fini) ( Int exitcode )
njn5c004e42002-11-18 11:04:50 +00002190{
nethercote8b76fe52004-11-08 19:20:09 +00002191 MAC_(common_fini)( mc_detect_memory_leaks );
sewardj45d94cc2005-04-20 14:44:11 +00002192
sewardj23eb2fd2005-04-22 16:29:19 +00002193 Int i, n_accessible_dist;
2194 SecMap* sm;
2195
sewardj45d94cc2005-04-20 14:44:11 +00002196 if (VG_(clo_verbosity) > 1) {
2197 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00002198 " memcheck: sanity checks: %d cheap, %d expensive",
2199 n_sanity_cheap, n_sanity_expensive );
sewardj45d94cc2005-04-20 14:44:11 +00002200 VG_(message)(Vg_DebugMsg,
sewardj23eb2fd2005-04-22 16:29:19 +00002201 " memcheck: auxmaps: %d auxmap entries (%dk, %dM) in use",
2202 auxmap_used,
2203 auxmap_used * 64,
2204 auxmap_used / 16 );
2205 VG_(message)(Vg_DebugMsg,
2206 " memcheck: auxmaps: %lld searches, %lld comparisons",
sewardj45d94cc2005-04-20 14:44:11 +00002207 n_auxmap_searches, n_auxmap_cmps );
sewardj23eb2fd2005-04-22 16:29:19 +00002208 VG_(message)(Vg_DebugMsg,
2209 " memcheck: secondaries: %d issued (%dk, %dM)",
2210 n_secmaps_issued,
2211 n_secmaps_issued * 64,
2212 n_secmaps_issued / 16 );
2213
2214 n_accessible_dist = 0;
2215 for (i = 0; i < N_PRIMARY_MAP; i++) {
2216 sm = primary_map[i];
2217 if (is_distinguished_sm(sm)
2218 && sm != &sm_distinguished[SM_DIST_NOACCESS])
2219 n_accessible_dist ++;
2220 }
2221 for (i = 0; i < auxmap_used; i++) {
2222 sm = auxmap[i].sm;
2223 if (is_distinguished_sm(sm)
2224 && sm != &sm_distinguished[SM_DIST_NOACCESS])
2225 n_accessible_dist ++;
2226 }
2227
2228 VG_(message)(Vg_DebugMsg,
2229 " memcheck: secondaries: %d accessible and distinguished (%dk, %dM)",
2230 n_accessible_dist,
2231 n_accessible_dist * 64,
2232 n_accessible_dist / 16 );
2233
sewardj45d94cc2005-04-20 14:44:11 +00002234 }
2235
njn5c004e42002-11-18 11:04:50 +00002236 if (0) {
2237 VG_(message)(Vg_DebugMsg,
2238 "------ Valgrind's client block stats follow ---------------" );
nethercote8b76fe52004-11-08 19:20:09 +00002239 show_client_block_stats();
njn5c004e42002-11-18 11:04:50 +00002240 }
njn25e49d8e72002-09-23 09:36:25 +00002241}
2242
njn26f02512004-11-22 18:33:15 +00002243VG_DETERMINE_INTERFACE_VERSION(TL_(pre_clo_init), 9./8)
fitzhardinge98abfc72003-12-16 02:05:15 +00002244
njn25e49d8e72002-09-23 09:36:25 +00002245/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00002246/*--- end mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00002247/*--------------------------------------------------------------------*/