blob: f7b873309bc62f8999eada00f621b12cb53a4792 [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
nethercote137bc552003-11-14 17:47:54 +00009 This file is part of MemCheck, a heavyweight Valgrind tool for
njnc9539842002-10-02 13:26:35 +000010 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
njn53612422005-03-12 16:22:54 +000012 Copyright (C) 2000-2005 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
sewardj45d94cc2005-04-20 14:44:11 +000033/* TODO urgently
34
35 sanity check:
36 auxmap only covers address space that the primary doesn't
37 auxmap entries non-duplicated (expensive)
38
39 types of helper functions
40
41 set_address_range_perms to notice when a distinguished secondary
42 will work, and use that (viz, re-implement compression scheme)
43
44 profile
45
46 reinstate fast-path cases
47*/
48
49
njn25cac76cb2002-09-23 11:21:57 +000050#include "mc_include.h"
51#include "memcheck.h" /* for client requests */
njn25e49d8e72002-09-23 09:36:25 +000052//#include "vg_profile.c"
53
sewardj45d94cc2005-04-20 14:44:11 +000054
55typedef enum {
56 MC_Ok = 5, MC_AddrErr = 6, MC_ValueErr = 7
57} MC_ReadResult;
njn25e49d8e72002-09-23 09:36:25 +000058
njn25e49d8e72002-09-23 09:36:25 +000059#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
60
njn25e49d8e72002-09-23 09:36:25 +000061
njn25e49d8e72002-09-23 09:36:25 +000062/*------------------------------------------------------------*/
sewardj45d94cc2005-04-20 14:44:11 +000063/*--- Basic A/V bitmap representation. ---*/
njn25e49d8e72002-09-23 09:36:25 +000064/*------------------------------------------------------------*/
65
sewardj45d94cc2005-04-20 14:44:11 +000066/* --------------- Basic configuration --------------- */
sewardj95448072004-11-22 20:19:51 +000067
sewardj45d94cc2005-04-20 14:44:11 +000068/* The number of entries in the primary map can be altered. However
69 we hardwire the assumption that each secondary map covers precisely
70 64k of address space. */
sewardj95448072004-11-22 20:19:51 +000071
sewardj45d94cc2005-04-20 14:44:11 +000072#define N_PRIMARY_BITS 16
73#define N_PRIMARY_MAPS ((1 << N_PRIMARY_BITS)-1)
74
75#define MAX_PRIMARY_ADDRESS (Addr)(((Addr)65536) * N_PRIMARY_MAPS)
76
77
78/* --------------- Secondary maps --------------- */
njn25e49d8e72002-09-23 09:36:25 +000079
80typedef
81 struct {
sewardj45d94cc2005-04-20 14:44:11 +000082 UChar abits[8192];
83 UChar vbyte[65536];
njn25e49d8e72002-09-23 09:36:25 +000084 }
85 SecMap;
86
sewardj45d94cc2005-04-20 14:44:11 +000087/* 3 distinguished secondary maps, one for no-access, one for
88 accessible but undefined, and one for accessible and defined.
89 Distinguished secondaries may never be modified.
90*/
91#define SM_DIST_NOACCESS 0
92#define SM_DIST_ACCESS_UNDEFINED 1
93#define SM_DIST_ACCESS_DEFINED 2
njnb8dca862005-03-14 02:42:44 +000094
sewardj45d94cc2005-04-20 14:44:11 +000095static SecMap sm_distinguished[3];
njnb8dca862005-03-14 02:42:44 +000096
sewardj45d94cc2005-04-20 14:44:11 +000097static inline Bool is_distinguished_sm ( SecMap* sm ) {
98 return sm >= &sm_distinguished[0] && sm <= &sm_distinguished[2];
99}
njnb8dca862005-03-14 02:42:44 +0000100
sewardj45d94cc2005-04-20 14:44:11 +0000101/* dist_sm points to one of our three distinguished secondaries. Make
102 a copy of it so that we can write to it.
103*/
104static SecMap* copy_for_writing ( SecMap* dist_sm )
105{
106 SecMap* new_sm;
107 tl_assert(dist_sm == &sm_distinguished[0]
108 || dist_sm == &sm_distinguished[1]
109 || dist_sm == &sm_distinguished[2]);
njnb8dca862005-03-14 02:42:44 +0000110
sewardj45d94cc2005-04-20 14:44:11 +0000111 new_sm = VG_(shadow_alloc)(sizeof(SecMap));
112 VG_(memcpy)(new_sm, dist_sm, sizeof(SecMap));
113 return new_sm;
114}
njnb8dca862005-03-14 02:42:44 +0000115
sewardj45d94cc2005-04-20 14:44:11 +0000116
117/* --------------- Primary maps --------------- */
118
119/* The main primary map. This covers some initial part of the address
120 space, addresses 0 .. (N_PRIMARY_MAPS << 16)-1. The rest of it is
121 handled using the auxiliary primary map.
122*/
123static SecMap* primary_map[N_PRIMARY_MAPS];
124
125
126/* An entry in the auxiliary primary map. base must be a 64k-aligned
127 value, and sm points at the relevant secondary map. As with the
128 main primary map, the secondary may be either a real secondary, or
129 one of the three distinguished secondaries.
130*/
131typedef
132 struct {
133 Addr base;
134 SecMap* sm;
135 }
136 AuxMapEnt;
137
138/* An expanding array of AuxMapEnts. */
139#define N_AUXMAPS 500 /* HACK */
140static AuxMapEnt hacky_auxmaps[N_AUXMAPS];
141static Int auxmap_size = N_AUXMAPS;
142static Int auxmap_used = 0;
143static AuxMapEnt* auxmap = &hacky_auxmaps[0];
144
145/* Auxmap statistics */
146static ULong n_auxmap_searches = 0;
147static ULong n_auxmap_cmps = 0;
148
149
150/* Find an entry in the auxiliary map. If an entry is found, move it
151 one step closer to the front of the array, then return its address.
152 If an entry is not found, allocate one. Note carefully that
153 because a each call potentially rearranges the entries, each call
154 to this function invalidates ALL AuxMapEnt*s previously obtained by
155 calling this fn.
156*/
157static AuxMapEnt* find_or_alloc_in_auxmap ( Addr a )
158{
159 UWord i;
160 tl_assert(a > MAX_PRIMARY_ADDRESS);
161
162 a &= ~(Addr)0xFFFF;
163
164 /* Search .. */
165 n_auxmap_searches++;
166 for (i = 0; i < auxmap_used; i++) {
167 if (auxmap[i].base == a)
168 break;
169 }
170 n_auxmap_cmps += (ULong)(i+1);
171
172 if (i < auxmap_used) {
173 /* Found it. Nudge it a bit closer to the front. */
174 if (i > 0) {
175 AuxMapEnt tmp = auxmap[i-1];
176 auxmap[i-1] = auxmap[i];
177 auxmap[i] = tmp;
178 i--;
179 }
180 return &auxmap[i];
181 }
182
183 /* We didn't find it. Hmm. This is a new piece of address space.
184 We'll need to allocate a new AuxMap entry for it. */
185 if (auxmap_used >= auxmap_size) {
186 tl_assert(auxmap_used == auxmap_size);
187 /* Out of auxmap entries. */
188 tl_assert2(0, "failed to expand the auxmap table");
189 }
190
191 tl_assert(auxmap_used < auxmap_size);
192
193 auxmap[auxmap_used].base = a & ~(Addr)0xFFFF;
194 auxmap[auxmap_used].sm = &sm_distinguished[SM_DIST_NOACCESS];
195
196 if (0)
197 VG_(printf)("new auxmap, base = 0x%llx\n",
198 (ULong)auxmap[auxmap_used].base );
199
200 auxmap_used++;
201 return &auxmap[auxmap_used-1];
202}
203
204
205/* --------------- SecMap fundamentals --------------- */
206
207/* Produce the secmap for 'a', either from the primary map or by
208 ensuring there is an entry for it in the aux primary map. The
209 secmap may be a distinguished one as the caller will only want to
210 be able to read it.
211*/
212static SecMap* get_secmap_readable ( Addr a )
213{
214 if (a <= MAX_PRIMARY_ADDRESS) {
215 UWord pm_off = a >> 16;
216 return primary_map[ pm_off ];
217 } else {
218 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
219 return am->sm;
220 }
221}
222
223/* Produce the secmap for 'a', either from the primary map or by
224 ensuring there is an entry for it in the aux primary map. The
225 secmap may not be a distinguished one, since the caller will want
226 to be able to write it. If it is a distinguished secondary, make a
227 writable copy of it, install it, and return the copy instead. (COW
228 semantics).
229*/
230static SecMap* get_secmap_writable ( Addr a )
231{
232 if (a <= MAX_PRIMARY_ADDRESS) {
233 UWord pm_off = a >> 16;
234 if (is_distinguished_sm(primary_map[ pm_off ]))
235 primary_map[pm_off] = copy_for_writing(primary_map[pm_off]);
236 return primary_map[pm_off];
237 } else {
238 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
239 if (is_distinguished_sm(am->sm))
240 am->sm = copy_for_writing(am->sm);
241 return am->sm;
242 }
243}
244
245
246/* --------------- Endianness helpers --------------- */
247
248/* Returns the offset in memory of the byteno-th most significant byte
249 in a wordszB-sized word, given the specified endianness. */
250static inline UWord byte_offset_w ( UWord wordszB, Bool bigendian,
251 UWord byteno ) {
252 return bigendian ? (wordszB-1-byteno) : byteno;
253}
254
255
256/* --------------- Fundamental functions --------------- */
257
258static
259void get_abit_and_vbyte ( /*OUT*/UWord* abit,
260 /*OUT*/UWord* vbyte,
261 Addr a )
262{
263 SecMap* sm = get_secmap_readable(a);
264 *vbyte = 0xFF & sm->vbyte[a & 0xFFFF];
265 *abit = read_bit_array(sm->abits, a & 0xFFFF);
266}
267
268static
269UWord get_abit ( Addr a )
270{
271 SecMap* sm = get_secmap_readable(a);
272 return read_bit_array(sm->abits, a & 0xFFFF);
273}
274
275static
276void set_abit_and_vbyte ( Addr a, UWord abit, UWord vbyte )
277{
278 SecMap* sm = get_secmap_writable(a);
279 sm->vbyte[a & 0xFFFF] = 0xFF & vbyte;
280 write_bit_array(sm->abits, a & 0xFFFF, abit);
281}
282
283static
284void set_vbyte ( Addr a, UWord vbyte )
285{
286 SecMap* sm = get_secmap_writable(a);
287 sm->vbyte[a & 0xFFFF] = 0xFF & vbyte;
288}
289
290
291/* --------------- Load/store slow cases. --------------- */
292
293static
294ULong mc_LOADVn_slow ( Addr a, SizeT szB, Bool bigendian )
295{
296 /* Make up a result V word, which contains the loaded data for
297 valid addresses and Undefined for invalid addresses. Iterate
298 over the bytes in the word, from the most significant down to
299 the least. */
300 ULong vw = VGM_WORD64_INVALID;
301 SizeT i = szB-1;
302 SizeT n_addrs_bad = 0;
303 Addr ai;
304 Bool aok;
305 UWord abit, vbyte;
306
307 PROF_EVENT(70);
308 tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
309
310 while (True) {
311 ai = a+byte_offset_w(szB,bigendian,i);
312 get_abit_and_vbyte(&abit, &vbyte, ai);
313 aok = abit == VGM_BIT_VALID;
314 if (!aok)
315 n_addrs_bad++;
316 vw <<= 8;
317 vw |= 0xFF & (aok ? vbyte : VGM_BYTE_INVALID);
318 if (i == 0) break;
319 i--;
320 }
321
322 if (n_addrs_bad > 0)
323 MAC_(record_address_error)( VG_(get_running_tid)(), a, szB, False );
324
325 //if (n_addrs_bad == n)
326 // vw = VGM_WORD64_VALID;
327 return vw;
328}
329
330
331static
332void mc_STOREVn_slow ( Addr a, SizeT szB, UWord vbytes, Bool bigendian )
333{
334 SizeT i;
335 SizeT n_addrs_bad = 0;
336 UWord abit;
337 Bool aok;
338 Addr ai;
339
340 PROF_EVENT(71);
341 tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
342
343 /* Dump vbytes in memory, iterating from least to most significant
344 byte. At the same time establish addressibility of the
345 location. */
346 for (i = 0; i < szB; i++) {
347 ai = a+byte_offset_w(szB,bigendian,i);
348 abit = get_abit(ai);
349 aok = abit == VGM_BIT_VALID;
350 if (!aok)
351 n_addrs_bad++;
352 set_vbyte(ai, vbytes & 0xFF );
353 vbytes >>= 8;
354 }
355
356 /* If an address error has happened, report it. */
357 if (n_addrs_bad > 0)
358 MAC_(record_address_error)( VG_(get_running_tid)(), a, szB, True );
359}
360
361
362///////////////////////////////////////////////////////////////
363
364
365
366
367/////////////////////////////////////////////////////////////////
368/////////////////////////////////////////////////////////////////
369/////////////////////////////////////////////////////////////////
370/////////////////////////////////////////////////////////////////
371/////////////////////////////////////////////////////////////////
372/////////////////////////////////////////////////////////////////
373/////////////////////////////////////////////////////////////////
374/////////////////////////////////////////////////////////////////
375/////////////////////////////////////////////////////////////////
376
377//zz #if 0 /* this is the old implementation */
378//zz
379//zz /* Define to debug the mem audit system. */
380//zz /* #define VG_DEBUG_MEMORY */
381//zz
382//zz
383//zz /*------------------------------------------------------------*/
384//zz /*--- Low-level support for memory checking. ---*/
385//zz /*------------------------------------------------------------*/
386//zz
387//zz /* All reads and writes are checked against a memory map, which
388//zz records the state of all memory in the process. The memory map is
389//zz organised like this:
390//zz
391//zz The top 16 bits of an address are used to index into a top-level
392//zz map table, containing 65536 entries. Each entry is a pointer to a
393//zz second-level map, which records the accesibililty and validity
394//zz permissions for the 65536 bytes indexed by the lower 16 bits of the
395//zz address. Each byte is represented by nine bits, one indicating
396//zz accessibility, the other eight validity. So each second-level map
397//zz contains 73728 bytes. This two-level arrangement conveniently
398//zz divides the 4G address space into 64k lumps, each size 64k bytes.
399//zz
400//zz All entries in the primary (top-level) map must point to a valid
401//zz secondary (second-level) map. Since most of the 4G of address
402//zz space will not be in use -- ie, not mapped at all -- there is a
403//zz distinguished secondary map, which indicates `not addressible and
404//zz not valid' writeable for all bytes. Entries in the primary map for
405//zz which the entire 64k is not in use at all point at this
406//zz distinguished map.
407//zz
408//zz There are actually 4 distinguished secondaries. These are used to
409//zz represent a memory range which is either not addressable (validity
410//zz doesn't matter), addressable+not valid, addressable+valid.
411//zz
412//zz [...] lots of stuff deleted due to out of date-ness
413//zz
414//zz As a final optimisation, the alignment and address checks for
415//zz 4-byte loads and stores are combined in a neat way. The primary
416//zz map is extended to have 262144 entries (2^18), rather than 2^16.
417//zz The top 3/4 of these entries are permanently set to the
418//zz distinguished secondary map. For a 4-byte load/store, the
419//zz top-level map is indexed not with (addr >> 16) but instead f(addr),
420//zz where
421//zz
422//zz f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
423//zz = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
424//zz = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
425//zz
426//zz ie the lowest two bits are placed above the 16 high address bits.
427//zz If either of these two bits are nonzero, the address is misaligned;
428//zz this will select a secondary map from the upper 3/4 of the primary
429//zz map. Because this is always the distinguished secondary map, a
430//zz (bogus) address check failure will result. The failure handling
431//zz code can then figure out whether this is a genuine addr check
432//zz failure or whether it is a possibly-legitimate access at a
433//zz misaligned address.
434//zz */
435//zz
436//zz /*------------------------------------------------------------*/
437//zz /*--- Function declarations. ---*/
438//zz /*------------------------------------------------------------*/
439//zz
440//zz static ULong mc_rd_V8_SLOWLY ( Addr a );
441//zz static UInt mc_rd_V4_SLOWLY ( Addr a );
442//zz static UInt mc_rd_V2_SLOWLY ( Addr a );
443//zz static UInt mc_rd_V1_SLOWLY ( Addr a );
444//zz
445//zz static void mc_wr_V8_SLOWLY ( Addr a, ULong vbytes );
446//zz static void mc_wr_V4_SLOWLY ( Addr a, UInt vbytes );
447//zz static void mc_wr_V2_SLOWLY ( Addr a, UInt vbytes );
448//zz static void mc_wr_V1_SLOWLY ( Addr a, UInt vbytes );
449//zz
450//zz /*------------------------------------------------------------*/
451//zz /*--- Data defns. ---*/
452//zz /*------------------------------------------------------------*/
453//zz
454//zz typedef
455//zz struct {
456//zz UChar abits[SECONDARY_SIZE/8];
457//zz UChar vbyte[SECONDARY_SIZE];
458//zz }
459//zz SecMap;
460//zz
461//zz
462//zz static SecMap* primary_map[ /*PRIMARY_SIZE*/ PRIMARY_SIZE*4 ];
463//zz
464//zz #define DSM_IDX(a, v) ((((a)&1) << 1) + ((v)&1))
465//zz
466//zz /* 4 secondary maps, but one is redundant (because the !addressable &&
467//zz valid state is meaningless) */
468//zz static const SecMap distinguished_secondary_maps[4] = {
469//zz #define INIT(a, v) \
470//zz [ DSM_IDX(a, v) ] = { { [0 ... (SECONDARY_SIZE/8)-1] = BIT_EXPAND(a) }, \
471//zz { [0 ... SECONDARY_SIZE-1] = BIT_EXPAND(a|v) } }
472//zz INIT(VGM_BIT_VALID, VGM_BIT_VALID),
473//zz INIT(VGM_BIT_VALID, VGM_BIT_INVALID),
474//zz INIT(VGM_BIT_INVALID, VGM_BIT_VALID),
475//zz INIT(VGM_BIT_INVALID, VGM_BIT_INVALID),
476//zz #undef INIT
477//zz };
478//zz #define N_SECONDARY_MAPS (sizeof(distinguished_secondary_maps)/sizeof(*distinguished_secondary_maps))
479//zz
480//zz #define DSM(a,v) ((SecMap *)&distinguished_secondary_maps[DSM_IDX(a, v)])
481//zz
482//zz #define DSM_NOTADDR DSM(VGM_BIT_INVALID, VGM_BIT_INVALID)
483//zz #define DSM_ADDR_NOTVALID DSM(VGM_BIT_VALID, VGM_BIT_INVALID)
484//zz #define DSM_ADDR_VALID DSM(VGM_BIT_VALID, VGM_BIT_VALID)
njn25e49d8e72002-09-23 09:36:25 +0000485
njn25e49d8e72002-09-23 09:36:25 +0000486static void init_shadow_memory ( void )
487{
sewardj45d94cc2005-04-20 14:44:11 +0000488 Int i;
489 SecMap* sm;
njn25e49d8e72002-09-23 09:36:25 +0000490
sewardj45d94cc2005-04-20 14:44:11 +0000491 /* Build the 3 distinguished secondaries */
njnb8dca862005-03-14 02:42:44 +0000492 tl_assert(VGM_BIT_INVALID == 1);
493 tl_assert(VGM_BIT_VALID == 0);
sewardj45d94cc2005-04-20 14:44:11 +0000494 tl_assert(VGM_BYTE_INVALID == 0xFF);
495 tl_assert(VGM_BYTE_VALID == 0);
njn25e49d8e72002-09-23 09:36:25 +0000496
sewardj45d94cc2005-04-20 14:44:11 +0000497 /* Set A invalid, V invalid. */
498 sm = &sm_distinguished[SM_DIST_NOACCESS];
499 for (i = 0; i < 65536; i++)
500 sm->vbyte[i] = VGM_BYTE_INVALID;
501 for (i = 0; i < 8192; i++)
502 sm->abits[i] = VGM_BYTE_INVALID;
njnb8dca862005-03-14 02:42:44 +0000503
sewardj45d94cc2005-04-20 14:44:11 +0000504 /* Set A valid, V invalid. */
505 sm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
506 for (i = 0; i < 65536; i++)
507 sm->vbyte[i] = VGM_BYTE_INVALID;
508 for (i = 0; i < 8192; i++)
509 sm->abits[i] = VGM_BYTE_VALID;
510
511 /* Set A valid, V valid. */
512 sm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
513 for (i = 0; i < 65536; i++)
514 sm->vbyte[i] = VGM_BYTE_VALID;
515 for (i = 0; i < 8192; i++)
516 sm->abits[i] = VGM_BYTE_VALID;
517
518 /* Set up the primary map. */
njn25e49d8e72002-09-23 09:36:25 +0000519 /* These entries gradually get overwritten as the used address
520 space expands. */
sewardj45d94cc2005-04-20 14:44:11 +0000521 for (i = 0; i < N_PRIMARY_MAPS; i++)
522 primary_map[i] = &sm_distinguished[SM_DIST_NOACCESS];
njn25e49d8e72002-09-23 09:36:25 +0000523
sewardj45d94cc2005-04-20 14:44:11 +0000524 /* auxmap_size = auxmap_used = 0;
525 no ... these are statically initialised */
njn25e49d8e72002-09-23 09:36:25 +0000526
sewardj45d94cc2005-04-20 14:44:11 +0000527 tl_assert( TL_(expensive_sanity_check)() );
njn25e49d8e72002-09-23 09:36:25 +0000528}
529
530
sewardj45d94cc2005-04-20 14:44:11 +0000531//zz /*------------------------------------------------------------*/
532//zz /*--- Basic bitmap management, reading and writing. ---*/
533//zz /*------------------------------------------------------------*/
534//zz
535//zz /* Allocate and initialise a secondary map. */
536//zz
537//zz static SecMap* alloc_secondary_map ( __attribute__ ((unused))
538//zz Char* caller,
539//zz const SecMap *prototype)
540//zz {
541//zz SecMap* map;
542//zz PROF_EVENT(10);
543//zz
544//zz map = (SecMap *)VG_(shadow_alloc)(sizeof(SecMap));
545//zz
546//zz VG_(memcpy)(map, prototype, sizeof(*map));
547//zz
548//zz /* VG_(printf)("ALLOC_2MAP(%s)\n", caller ); */
549//zz return map;
550//zz }
551//zz
552//zz
553//zz /* Basic reading/writing of the bitmaps, for byte-sized accesses. */
554//zz
555//zz static __inline__ UChar get_abit ( Addr a )
556//zz {
557//zz SecMap* sm = primary_map[PM_IDX(a)];
558//zz UInt sm_off = SM_OFF(a);
559//zz PROF_EVENT(20);
560//zz # if 0
561//zz if (IS_DISTINGUISHED_SM(sm))
562//zz VG_(message)(Vg_DebugMsg,
563//zz "accessed distinguished 2ndary (A)map! 0x%x\n", a);
564//zz # endif
565//zz return BITARR_TEST(sm->abits, sm_off)
566//zz ? VGM_BIT_INVALID : VGM_BIT_VALID;
567//zz }
568//zz
569//zz static __inline__ UChar get_vbyte ( Addr a )
570//zz {
571//zz SecMap* sm = primary_map[PM_IDX(a)];
572//zz UInt sm_off = SM_OFF(a);
573//zz PROF_EVENT(21);
574//zz # if 0
575//zz if (IS_DISTINGUISHED_SM(sm))
576//zz VG_(message)(Vg_DebugMsg,
577//zz "accessed distinguished 2ndary (V)map! 0x%x\n", a);
578//zz # endif
579//zz return sm->vbyte[sm_off];
580//zz }
581//zz
582//zz static /* __inline__ */ void set_abit ( Addr a, UChar abit )
583//zz {
584//zz SecMap* sm;
585//zz UInt sm_off;
586//zz PROF_EVENT(22);
587//zz ENSURE_MAPPABLE(a, "set_abit");
588//zz sm = primary_map[PM_IDX(a)];
589//zz sm_off = SM_OFF(a);
590//zz if (abit)
591//zz BITARR_SET(sm->abits, sm_off);
592//zz else
593//zz BITARR_CLEAR(sm->abits, sm_off);
594//zz }
595//zz
596//zz static __inline__ void set_vbyte ( Addr a, UChar vbyte )
597//zz {
598//zz SecMap* sm;
599//zz UInt sm_off;
600//zz PROF_EVENT(23);
601//zz ENSURE_MAPPABLE(a, "set_vbyte");
602//zz sm = primary_map[PM_IDX(a)];
603//zz sm_off = SM_OFF(a);
604//zz sm->vbyte[sm_off] = vbyte;
605//zz }
606//zz
607//zz
608//zz /* Reading/writing of the bitmaps, for aligned word-sized accesses. */
609//zz
610//zz static __inline__ UChar get_abits4_ALIGNED ( Addr a )
611//zz {
612//zz SecMap* sm;
613//zz UInt sm_off;
614//zz UChar abits8;
615//zz PROF_EVENT(24);
616//zz # ifdef VG_DEBUG_MEMORY
617//zz tl_assert(VG_IS_4_ALIGNED(a));
618//zz # endif
619//zz sm = primary_map[PM_IDX(a)];
620//zz sm_off = SM_OFF(a);
621//zz abits8 = sm->abits[sm_off >> 3];
622//zz abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
623//zz abits8 &= 0x0F;
624//zz return abits8;
625//zz }
626//zz
627//zz static UInt __inline__ get_vbytes4_ALIGNED ( Addr a )
628//zz {
629//zz SecMap* sm = primary_map[PM_IDX(a)];
630//zz UInt sm_off = SM_OFF(a);
631//zz PROF_EVENT(25);
632//zz # ifdef VG_DEBUG_MEMORY
633//zz tl_assert(VG_IS_4_ALIGNED(a));
634//zz # endif
635//zz return ((UInt*)(sm->vbyte))[sm_off >> 2];
636//zz }
637//zz
638//zz
639//zz static void __inline__ set_vbytes4_ALIGNED ( Addr a, UInt vbytes )
640//zz {
641//zz SecMap* sm;
642//zz UInt sm_off;
643//zz ENSURE_MAPPABLE(a, "set_vbytes4_ALIGNED");
644//zz sm = primary_map[PM_IDX(a)];
645//zz sm_off = SM_OFF(a);
646//zz PROF_EVENT(23);
647//zz # ifdef VG_DEBUG_MEMORY
648//zz tl_assert(VG_IS_4_ALIGNED(a));
649//zz # endif
650//zz ((UInt*)(sm->vbyte))[sm_off >> 2] = vbytes;
651//zz }
sewardjee070842003-07-05 17:53:55 +0000652
653
njn25e49d8e72002-09-23 09:36:25 +0000654/*------------------------------------------------------------*/
655/*--- Setting permissions over address ranges. ---*/
656/*------------------------------------------------------------*/
657
nethercote451eae92004-11-02 13:06:32 +0000658static void set_address_range_perms ( Addr a, SizeT len,
sewardj45d94cc2005-04-20 14:44:11 +0000659 UWord example_a_bit,
660 UWord example_v_bit )
njn25e49d8e72002-09-23 09:36:25 +0000661{
sewardj45d94cc2005-04-20 14:44:11 +0000662 SizeT i;
njn25e49d8e72002-09-23 09:36:25 +0000663
sewardj45d94cc2005-04-20 14:44:11 +0000664 UWord example_vbyte = 1 & example_v_bit;
665 example_vbyte |= (example_vbyte << 1);
666 example_vbyte |= (example_vbyte << 2);
667 example_vbyte |= (example_vbyte << 4);
668
669 tl_assert(sizeof(SizeT) == sizeof(Addr));
670
671 if (0 && len >= 4096)
672 VG_(printf)("s_a_r_p(0x%llx, %d, %d,%d)\n",
673 (ULong)a, len, example_a_bit, example_v_bit);
njn25e49d8e72002-09-23 09:36:25 +0000674
675 if (len == 0)
676 return;
677
sewardj45d94cc2005-04-20 14:44:11 +0000678 for (i = 0; i < len; i++) {
679 set_abit_and_vbyte(a+i, example_a_bit, example_vbyte);
njn25e49d8e72002-09-23 09:36:25 +0000680 }
njn25e49d8e72002-09-23 09:36:25 +0000681}
682
sewardj45d94cc2005-04-20 14:44:11 +0000683//zz {
684//zz UChar vbyte, abyte8;
685//zz UInt vword4, sm_off;
686//zz SecMap* sm;
687//zz
688//zz PROF_EVENT(30);
689//zz
690//zz if (len == 0)
691//zz return;
692//zz
693//zz if (VG_(clo_verbosity) > 0) {
694//zz if (len > 100 * 1000 * 1000) {
695//zz VG_(message)(Vg_UserMsg,
696//zz "Warning: set address range perms: "
697//zz "large range %u, a %d, v %d",
698//zz len, example_a_bit, example_v_bit );
699//zz }
700//zz }
701//zz
702//zz VGP_PUSHCC(VgpSetMem);
703//zz
704//zz /* Requests to change permissions of huge address ranges may
705//zz indicate bugs in our machinery. 30,000,000 is arbitrary, but so
706//zz far all legitimate requests have fallen beneath that size. */
707//zz /* 4 Mar 02: this is just stupid; get rid of it. */
708//zz /* tl_assert(len < 30000000); */
709//zz
710//zz /* Check the permissions make sense. */
711//zz tl_assert(example_a_bit == VGM_BIT_VALID
712//zz || example_a_bit == VGM_BIT_INVALID);
713//zz tl_assert(example_v_bit == VGM_BIT_VALID
714//zz || example_v_bit == VGM_BIT_INVALID);
715//zz if (example_a_bit == VGM_BIT_INVALID)
716//zz tl_assert(example_v_bit == VGM_BIT_INVALID);
717//zz
718//zz /* The validity bits to write. */
719//zz vbyte = example_v_bit==VGM_BIT_VALID
720//zz ? VGM_BYTE_VALID : VGM_BYTE_INVALID;
721//zz
722//zz /* In order that we can charge through the address space at 8
723//zz bytes/main-loop iteration, make up some perms. */
724//zz abyte8 = BIT_EXPAND(example_a_bit);
725//zz vword4 = (vbyte << 24) | (vbyte << 16) | (vbyte << 8) | vbyte;
726//zz
727//zz # ifdef VG_DEBUG_MEMORY
728//zz /* Do it ... */
729//zz while (True) {
730//zz PROF_EVENT(31);
731//zz if (len == 0) break;
732//zz set_abit ( a, example_a_bit );
733//zz set_vbyte ( a, vbyte );
734//zz a++;
735//zz len--;
736//zz }
737//zz
738//zz # else
739//zz /* Slowly do parts preceding 8-byte alignment. */
740//zz while (True) {
741//zz PROF_EVENT(31);
742//zz if (len == 0) break;
743//zz if ((a % 8) == 0) break;
744//zz set_abit ( a, example_a_bit );
745//zz set_vbyte ( a, vbyte );
746//zz a++;
747//zz len--;
748//zz }
749//zz
750//zz if (len == 0) {
751//zz VGP_POPCC(VgpSetMem);
752//zz return;
753//zz }
754//zz tl_assert((a % 8) == 0 && len > 0);
755//zz
756//zz /* Now align to the next primary_map entry */
757//zz for (; (a & SECONDARY_MASK) && len >= 8; a += 8, len -= 8) {
758//zz
759//zz PROF_EVENT(32);
760//zz /* If the primary is already pointing to a distinguished map
761//zz with the same properties as we're trying to set, then leave
762//zz it that way. */
763//zz if (primary_map[PM_IDX(a)] == DSM(example_a_bit, example_v_bit))
764//zz continue;
765//zz
766//zz ENSURE_MAPPABLE(a, "set_address_range_perms(fast)");
767//zz sm = primary_map[PM_IDX(a)];
768//zz sm_off = SM_OFF(a);
769//zz sm->abits[sm_off >> 3] = abyte8;
770//zz ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = vword4;
771//zz ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = vword4;
772//zz }
773//zz
774//zz /* Now set whole secondary maps to the right distinguished value.
775//zz
776//zz Note that if the primary already points to a non-distinguished
777//zz secondary, then don't replace the reference. That would just
778//zz leak memory.
779//zz */
780//zz for(; len >= SECONDARY_SIZE; a += SECONDARY_SIZE, len -= SECONDARY_SIZE) {
781//zz sm = primary_map[PM_IDX(a)];
782//zz
783//zz if (IS_DISTINGUISHED_SM(sm))
784//zz primary_map[PM_IDX(a)] = DSM(example_a_bit, example_v_bit);
785//zz else {
786//zz VG_(memset)(sm->abits, abyte8, sizeof(sm->abits));
787//zz VG_(memset)(sm->vbyte, vbyte, sizeof(sm->vbyte));
788//zz }
789//zz }
790//zz
791//zz /* Now finish off any remains */
792//zz for (; len >= 8; a += 8, len -= 8) {
793//zz PROF_EVENT(32);
794//zz
795//zz /* If the primary is already pointing to a distinguished map
796//zz with the same properties as we're trying to set, then leave
797//zz it that way. */
798//zz if (primary_map[PM_IDX(a)] == DSM(example_a_bit, example_v_bit))
799//zz continue;
800//zz
801//zz ENSURE_MAPPABLE(a, "set_address_range_perms(fast)");
802//zz sm = primary_map[PM_IDX(a)];
803//zz sm_off = SM_OFF(a);
804//zz sm->abits[sm_off >> 3] = abyte8;
805//zz ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = vword4;
806//zz ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = vword4;
807//zz }
808//zz
809//zz /* Finish the upper fragment. */
810//zz while (True) {
811//zz PROF_EVENT(33);
812//zz if (len == 0) break;
813//zz set_abit ( a, example_a_bit );
814//zz set_vbyte ( a, vbyte );
815//zz a++;
816//zz len--;
817//zz }
818//zz # endif
819//zz
820//zz /* Check that zero page and highest page have not been written to
821//zz -- this could happen with buggy syscall wrappers. Today
822//zz (2001-04-26) had precisely such a problem with __NR_setitimer. */
823//zz tl_assert(TL_(cheap_sanity_check)());
824//zz VGP_POPCC(VgpSetMem);
825//zz }
826
njn25e49d8e72002-09-23 09:36:25 +0000827/* Set permissions for address ranges ... */
828
nethercote8b76fe52004-11-08 19:20:09 +0000829static void mc_make_noaccess ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000830{
831 PROF_EVENT(35);
nethercote8b76fe52004-11-08 19:20:09 +0000832 DEBUG("mc_make_noaccess(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000833 set_address_range_perms ( a, len, VGM_BIT_INVALID, VGM_BIT_INVALID );
834}
835
nethercote8b76fe52004-11-08 19:20:09 +0000836static void mc_make_writable ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000837{
838 PROF_EVENT(36);
nethercote8b76fe52004-11-08 19:20:09 +0000839 DEBUG("mc_make_writable(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000840 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_INVALID );
841}
842
nethercote8b76fe52004-11-08 19:20:09 +0000843static void mc_make_readable ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000844{
845 PROF_EVENT(37);
nethercote8b76fe52004-11-08 19:20:09 +0000846 DEBUG("mc_make_readable(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000847 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_VALID );
848}
849
njn9b007f62003-04-07 14:40:25 +0000850static __inline__
sewardj45d94cc2005-04-20 14:44:11 +0000851void make_aligned_word32_writable(Addr a)
njn9b007f62003-04-07 14:40:25 +0000852{
sewardj45d94cc2005-04-20 14:44:11 +0000853 mc_make_writable(a, 4);
854//zz SecMap* sm;
855//zz UInt sm_off;
856//zz UChar mask;
857//zz
858//zz VGP_PUSHCC(VgpESPAdj);
859//zz ENSURE_MAPPABLE(a, "make_aligned_word_writable");
860//zz sm = primary_map[PM_IDX(a)];
861//zz sm_off = SM_OFF(a);
862//zz ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
863//zz mask = 0x0F;
864//zz mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
865//zz /* mask now contains 1s where we wish to make address bits invalid (0s). */
866//zz sm->abits[sm_off >> 3] &= ~mask;
867//zz VGP_POPCC(VgpESPAdj);
njn9b007f62003-04-07 14:40:25 +0000868}
869
870static __inline__
sewardj45d94cc2005-04-20 14:44:11 +0000871void make_aligned_word32_noaccess(Addr a)
njn9b007f62003-04-07 14:40:25 +0000872{
sewardj45d94cc2005-04-20 14:44:11 +0000873 mc_make_noaccess(a, 4);
874//zz SecMap* sm;
875//zz UInt sm_off;
876//zz UChar mask;
877//zz
878//zz VGP_PUSHCC(VgpESPAdj);
879//zz ENSURE_MAPPABLE(a, "make_aligned_word_noaccess");
880//zz sm = primary_map[PM_IDX(a)];
881//zz sm_off = SM_OFF(a);
882//zz ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
883//zz mask = 0x0F;
884//zz mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
885//zz /* mask now contains 1s where we wish to make address bits invalid (1s). */
886//zz sm->abits[sm_off >> 3] |= mask;
887//zz VGP_POPCC(VgpESPAdj);
njn9b007f62003-04-07 14:40:25 +0000888}
889
890/* Nb: by "aligned" here we mean 8-byte aligned */
891static __inline__
sewardj45d94cc2005-04-20 14:44:11 +0000892void make_aligned_word64_writable(Addr a)
njn9b007f62003-04-07 14:40:25 +0000893{
sewardj45d94cc2005-04-20 14:44:11 +0000894 mc_make_writable(a, 8);
895//zz SecMap* sm;
896//zz UInt sm_off;
897//zz
898//zz VGP_PUSHCC(VgpESPAdj);
899//zz ENSURE_MAPPABLE(a, "make_aligned_doubleword_writable");
900//zz sm = primary_map[PM_IDX(a)];
901//zz sm_off = SM_OFF(a);
902//zz sm->abits[sm_off >> 3] = VGM_BYTE_VALID;
903//zz ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = VGM_WORD_INVALID;
904//zz ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = VGM_WORD_INVALID;
905//zz VGP_POPCC(VgpESPAdj);
njn9b007f62003-04-07 14:40:25 +0000906}
907
908static __inline__
sewardj45d94cc2005-04-20 14:44:11 +0000909void make_aligned_word64_noaccess(Addr a)
njn9b007f62003-04-07 14:40:25 +0000910{
sewardj45d94cc2005-04-20 14:44:11 +0000911 mc_make_noaccess(a, 8);
912//zz SecMap* sm;
913//zz UInt sm_off;
914//zz
915//zz VGP_PUSHCC(VgpESPAdj);
916//zz ENSURE_MAPPABLE(a, "make_aligned_doubleword_noaccess");
917//zz sm = primary_map[PM_IDX(a)];
918//zz sm_off = SM_OFF(a);
919//zz sm->abits[sm_off >> 3] = VGM_BYTE_INVALID;
920//zz ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = VGM_WORD_INVALID;
921//zz ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = VGM_WORD_INVALID;
922//zz VGP_POPCC(VgpESPAdj);
njn9b007f62003-04-07 14:40:25 +0000923}
924
sewardj45d94cc2005-04-20 14:44:11 +0000925/* The stack-pointer update handling functions */
926SP_UPDATE_HANDLERS ( make_aligned_word32_writable,
927 make_aligned_word32_noaccess,
928 make_aligned_word64_writable,
929 make_aligned_word64_noaccess,
930 mc_make_writable,
931 mc_make_noaccess
932 );
njn9b007f62003-04-07 14:40:25 +0000933
934/* Block-copy permissions (needed for implementing realloc()). */
nethercote451eae92004-11-02 13:06:32 +0000935static void mc_copy_address_range_state ( Addr src, Addr dst, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000936{
nethercote451eae92004-11-02 13:06:32 +0000937 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +0000938 UWord abit, vbyte;
njn25e49d8e72002-09-23 09:36:25 +0000939
njn5c004e42002-11-18 11:04:50 +0000940 DEBUG("mc_copy_address_range_state\n");
njn25e49d8e72002-09-23 09:36:25 +0000941
942 PROF_EVENT(40);
943 for (i = 0; i < len; i++) {
njn25e49d8e72002-09-23 09:36:25 +0000944 PROF_EVENT(41);
sewardj45d94cc2005-04-20 14:44:11 +0000945 get_abit_and_vbyte( &abit, &vbyte, src+i );
946 set_abit_and_vbyte( dst+i, abit, vbyte );
njn25e49d8e72002-09-23 09:36:25 +0000947 }
948}
949
sewardj45d94cc2005-04-20 14:44:11 +0000950
nethercote8b76fe52004-11-08 19:20:09 +0000951/*------------------------------------------------------------*/
952/*--- Checking memory ---*/
953/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000954
955/* Check permissions for address range. If inadequate permissions
956 exist, *bad_addr is set to the offending address, so the caller can
957 know what it is. */
958
sewardjecf8e102003-07-12 12:11:39 +0000959/* Returns True if [a .. a+len) is not addressible. Otherwise,
960 returns False, and if bad_addr is non-NULL, sets *bad_addr to
961 indicate the lowest failing address. Functions below are
962 similar. */
nethercote8b76fe52004-11-08 19:20:09 +0000963static Bool mc_check_noaccess ( Addr a, SizeT len, Addr* bad_addr )
sewardjecf8e102003-07-12 12:11:39 +0000964{
nethercote451eae92004-11-02 13:06:32 +0000965 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +0000966 UWord abit;
sewardjecf8e102003-07-12 12:11:39 +0000967 PROF_EVENT(42);
968 for (i = 0; i < len; i++) {
969 PROF_EVENT(43);
970 abit = get_abit(a);
971 if (abit == VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +0000972 if (bad_addr != NULL)
973 *bad_addr = a;
sewardjecf8e102003-07-12 12:11:39 +0000974 return False;
975 }
976 a++;
977 }
978 return True;
979}
980
nethercote8b76fe52004-11-08 19:20:09 +0000981static Bool mc_check_writable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000982{
nethercote451eae92004-11-02 13:06:32 +0000983 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +0000984 UWord abit;
njn25e49d8e72002-09-23 09:36:25 +0000985 PROF_EVENT(42);
986 for (i = 0; i < len; i++) {
987 PROF_EVENT(43);
988 abit = get_abit(a);
989 if (abit == VGM_BIT_INVALID) {
990 if (bad_addr != NULL) *bad_addr = a;
991 return False;
992 }
993 a++;
994 }
995 return True;
996}
997
nethercote8b76fe52004-11-08 19:20:09 +0000998static MC_ReadResult mc_check_readable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000999{
nethercote451eae92004-11-02 13:06:32 +00001000 SizeT i;
sewardj45d94cc2005-04-20 14:44:11 +00001001 UWord abit;
1002 UWord vbyte;
njn25e49d8e72002-09-23 09:36:25 +00001003
1004 PROF_EVENT(44);
nethercote8b76fe52004-11-08 19:20:09 +00001005 DEBUG("mc_check_readable\n");
njn25e49d8e72002-09-23 09:36:25 +00001006 for (i = 0; i < len; i++) {
njn25e49d8e72002-09-23 09:36:25 +00001007 PROF_EVENT(45);
sewardj45d94cc2005-04-20 14:44:11 +00001008 get_abit_and_vbyte(&abit, &vbyte, a);
nethercote8b76fe52004-11-08 19:20:09 +00001009 // Report addressability errors in preference to definedness errors
1010 // by checking the A bits first.
1011 if (abit != VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001012 if (bad_addr != NULL)
1013 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001014 return MC_AddrErr;
1015 }
1016 if (vbyte != VGM_BYTE_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001017 if (bad_addr != NULL)
1018 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001019 return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +00001020 }
1021 a++;
1022 }
nethercote8b76fe52004-11-08 19:20:09 +00001023 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +00001024}
1025
1026
1027/* Check a zero-terminated ascii string. Tricky -- don't want to
1028 examine the actual bytes, to find the end, until we're sure it is
1029 safe to do so. */
1030
njn9b007f62003-04-07 14:40:25 +00001031static Bool mc_check_readable_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +00001032{
sewardj45d94cc2005-04-20 14:44:11 +00001033 UWord abit;
1034 UWord vbyte;
njn25e49d8e72002-09-23 09:36:25 +00001035 PROF_EVENT(46);
njn5c004e42002-11-18 11:04:50 +00001036 DEBUG("mc_check_readable_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +00001037 while (True) {
1038 PROF_EVENT(47);
sewardj45d94cc2005-04-20 14:44:11 +00001039 get_abit_and_vbyte(&abit, &vbyte, a);
nethercote8b76fe52004-11-08 19:20:09 +00001040 // As in mc_check_readable(), check A bits first
1041 if (abit != VGM_BIT_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001042 if (bad_addr != NULL)
1043 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001044 return MC_AddrErr;
1045 }
1046 if (vbyte != VGM_BYTE_VALID) {
sewardj45d94cc2005-04-20 14:44:11 +00001047 if (bad_addr != NULL)
1048 *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +00001049 return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +00001050 }
1051 /* Ok, a is safe to read. */
sewardj45d94cc2005-04-20 14:44:11 +00001052 if (* ((UChar*)a) == 0)
1053 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +00001054 a++;
1055 }
1056}
1057
1058
1059/*------------------------------------------------------------*/
1060/*--- Memory event handlers ---*/
1061/*------------------------------------------------------------*/
1062
njn25e49d8e72002-09-23 09:36:25 +00001063static
njn72718642003-07-24 08:45:32 +00001064void mc_check_is_writable ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +00001065 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001066{
1067 Bool ok;
1068 Addr bad_addr;
1069
1070 VGP_PUSHCC(VgpCheckMem);
1071
1072 /* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
1073 base,base+size-1); */
nethercote8b76fe52004-11-08 19:20:09 +00001074 ok = mc_check_writable ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00001075 if (!ok) {
1076 switch (part) {
1077 case Vg_CoreSysCall:
nethercote8b76fe52004-11-08 19:20:09 +00001078 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False,
1079 /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00001080 break;
1081
1082 case Vg_CorePThread:
1083 case Vg_CoreSignal:
nethercote8b76fe52004-11-08 19:20:09 +00001084 MAC_(record_core_mem_error)( tid, /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +00001085 break;
1086
1087 default:
njn67993252004-11-22 18:02:32 +00001088 VG_(tool_panic)("mc_check_is_writable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001089 }
1090 }
1091
1092 VGP_POPCC(VgpCheckMem);
1093}
1094
1095static
njn72718642003-07-24 08:45:32 +00001096void mc_check_is_readable ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +00001097 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001098{
njn25e49d8e72002-09-23 09:36:25 +00001099 Addr bad_addr;
nethercote8b76fe52004-11-08 19:20:09 +00001100 MC_ReadResult res;
njn25e49d8e72002-09-23 09:36:25 +00001101
1102 VGP_PUSHCC(VgpCheckMem);
1103
1104 /* VG_(message)(Vg_DebugMsg,"check is readable: %x .. %x",
1105 base,base+size-1); */
nethercote8b76fe52004-11-08 19:20:09 +00001106 res = mc_check_readable ( base, size, &bad_addr );
1107 if (MC_Ok != res) {
1108 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
1109
njn25e49d8e72002-09-23 09:36:25 +00001110 switch (part) {
1111 case Vg_CoreSysCall:
nethercote8b76fe52004-11-08 19:20:09 +00001112 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False,
1113 isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001114 break;
1115
1116 case Vg_CorePThread:
nethercote8b76fe52004-11-08 19:20:09 +00001117 MAC_(record_core_mem_error)( tid, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001118 break;
1119
1120 /* If we're being asked to jump to a silly address, record an error
1121 message before potentially crashing the entire system. */
1122 case Vg_CoreTranslate:
njn72718642003-07-24 08:45:32 +00001123 MAC_(record_jump_error)( tid, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +00001124 break;
1125
1126 default:
njn67993252004-11-22 18:02:32 +00001127 VG_(tool_panic)("mc_check_is_readable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001128 }
1129 }
1130 VGP_POPCC(VgpCheckMem);
1131}
1132
1133static
njn72718642003-07-24 08:45:32 +00001134void mc_check_is_readable_asciiz ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +00001135 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +00001136{
nethercote8b76fe52004-11-08 19:20:09 +00001137 MC_ReadResult res;
sewardj45d94cc2005-04-20 14:44:11 +00001138 Addr bad_addr;
njn25e49d8e72002-09-23 09:36:25 +00001139 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
1140
1141 VGP_PUSHCC(VgpCheckMem);
1142
njnca82cc02004-11-22 17:18:48 +00001143 tl_assert(part == Vg_CoreSysCall);
nethercote8b76fe52004-11-08 19:20:09 +00001144 res = mc_check_readable_asciiz ( (Addr)str, &bad_addr );
1145 if (MC_Ok != res) {
1146 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
1147 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +00001148 }
1149
1150 VGP_POPCC(VgpCheckMem);
1151}
1152
njn25e49d8e72002-09-23 09:36:25 +00001153static
nethercote451eae92004-11-02 13:06:32 +00001154void mc_new_mem_startup( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00001155{
njn1f3a9092002-10-04 09:22:30 +00001156 /* Ignore the permissions, just make it readable. Seems to work... */
nethercote451eae92004-11-02 13:06:32 +00001157 DEBUG("mc_new_mem_startup(%p, %llu, rr=%u, ww=%u, xx=%u)\n",
1158 a,(ULong)len,rr,ww,xx);
nethercote8b76fe52004-11-08 19:20:09 +00001159 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001160}
1161
1162static
nethercote451eae92004-11-02 13:06:32 +00001163void mc_new_mem_heap ( Addr a, SizeT len, Bool is_inited )
njn25e49d8e72002-09-23 09:36:25 +00001164{
1165 if (is_inited) {
nethercote8b76fe52004-11-08 19:20:09 +00001166 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001167 } else {
nethercote8b76fe52004-11-08 19:20:09 +00001168 mc_make_writable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001169 }
1170}
1171
1172static
njnb8dca862005-03-14 02:42:44 +00001173void mc_new_mem_mmap ( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00001174{
njnb8dca862005-03-14 02:42:44 +00001175 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +00001176}
1177
njncf45fd42004-11-24 16:30:22 +00001178static
1179void mc_post_mem_write(CorePart part, ThreadId tid, Addr a, SizeT len)
1180{
1181 mc_make_readable(a, len);
1182}
njn25e49d8e72002-09-23 09:36:25 +00001183
sewardj45d94cc2005-04-20 14:44:11 +00001184
njn25e49d8e72002-09-23 09:36:25 +00001185/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00001186/*--- Register event handlers ---*/
1187/*------------------------------------------------------------*/
1188
sewardj45d94cc2005-04-20 14:44:11 +00001189/* When some chunk of guest state is written, mark the corresponding
1190 shadow area as valid. This is used to initialise arbitrarily large
1191 chunks of guest state, hence the (somewhat arbitrary) 512 limit.
1192*/
1193static void mc_post_reg_write ( CorePart part, ThreadId tid,
1194 OffT offset, SizeT size)
njnd3040452003-05-19 15:04:06 +00001195{
sewardj45d94cc2005-04-20 14:44:11 +00001196 UChar area[512];
1197 tl_assert(size <= 512);
njncf45fd42004-11-24 16:30:22 +00001198 VG_(memset)(area, VGM_BYTE_VALID, size);
1199 VG_(set_shadow_regs_area)( tid, offset, size, area );
njnd3040452003-05-19 15:04:06 +00001200}
1201
sewardj45d94cc2005-04-20 14:44:11 +00001202static
1203void mc_post_reg_write_clientcall ( ThreadId tid,
1204 OffT offset, SizeT size,
1205 Addr f)
njnd3040452003-05-19 15:04:06 +00001206{
njncf45fd42004-11-24 16:30:22 +00001207 mc_post_reg_write(/*dummy*/0, tid, offset, size);
njnd3040452003-05-19 15:04:06 +00001208}
1209
sewardj45d94cc2005-04-20 14:44:11 +00001210/* Look at the definedness of the guest's shadow state for
1211 [offset, offset+len). If any part of that is undefined, record
1212 a parameter error.
1213*/
1214static void mc_pre_reg_read ( CorePart part, ThreadId tid, Char* s,
1215 OffT offset, SizeT size)
nethercote8b76fe52004-11-08 19:20:09 +00001216{
sewardj45d94cc2005-04-20 14:44:11 +00001217 Int i;
1218 Bool bad;
1219
1220 UChar area[16];
1221 tl_assert(size <= 16);
1222
1223 VG_(get_shadow_regs_area)( tid, offset, size, area );
1224
1225 bad = False;
1226 for (i = 0; i < size; i++) {
1227 if (area[i] != VGM_BYTE_VALID) {
1228 bad = False;
1229 break;
1230 }
nethercote8b76fe52004-11-08 19:20:09 +00001231 }
1232
sewardj45d94cc2005-04-20 14:44:11 +00001233 if (bad)
nethercote8b76fe52004-11-08 19:20:09 +00001234 MAC_(record_param_error) ( tid, 0, /*isReg*/True, /*isUnaddr*/False, s );
1235}
njnd3040452003-05-19 15:04:06 +00001236
njn25e49d8e72002-09-23 09:36:25 +00001237
sewardj45d94cc2005-04-20 14:44:11 +00001238//zz /*------------------------------------------------------------*/
1239//zz /*--- Functions called directly from generated code. ---*/
1240//zz /*------------------------------------------------------------*/
1241//zz
1242//zz static __inline__ UInt rotateRight16 ( UInt x )
1243//zz {
1244//zz /* Amazingly, gcc turns this into a single rotate insn. */
1245//zz return (x >> 16) | (x << 16);
1246//zz }
1247//zz
1248//zz
1249//zz static __inline__ UInt shiftRight16 ( UInt x )
1250//zz {
1251//zz return x >> 16;
1252//zz }
1253//zz
1254//zz
1255//zz /* Read/write 1/2/4/8 sized V bytes, and emit an address error if
1256//zz needed. */
1257//zz
1258//zz /* MC_(helperc_{LD,ST}V{1,2,4,8}) handle the common case fast.
1259//zz Under all other circumstances, it defers to the relevant _SLOWLY
1260//zz function, which can handle all situations.
1261//zz */
sewardj95448072004-11-22 20:19:51 +00001262
1263/* ------------------------ Size = 8 ------------------------ */
1264
njn9fb73db2005-03-27 01:55:21 +00001265VGA_REGPARM(1)
sewardj95448072004-11-22 20:19:51 +00001266ULong MC_(helperc_LOADV8) ( Addr a )
1267{
sewardj45d94cc2005-04-20 14:44:11 +00001268 return mc_LOADVn_slow( a, 8, False/*littleendian*/ );
1269//zz # ifdef VG_DEBUG_MEMORY
1270//zz return mc_rd_V8_SLOWLY(a);
1271//zz # else
1272//zz if (VG_IS_8_ALIGNED(a)) {
1273//zz UInt sec_no = shiftRight16(a) & 0xFFFF;
1274//zz SecMap* sm = primary_map[sec_no];
1275//zz UInt a_off = (SM_OFF(a)) >> 3;
1276//zz UChar abits = sm->abits[a_off];
1277//zz if (abits == VGM_BYTE_VALID) {
1278//zz /* a is 8-aligned, mapped, and addressible. */
1279//zz UInt v_off = SM_OFF(a);
1280//zz /* LITTLE-ENDIAN */
1281//zz UInt vLo = ((UInt*)(sm->vbyte))[ (v_off >> 2) ];
1282//zz UInt vHi = ((UInt*)(sm->vbyte))[ (v_off >> 2) + 1 ];
1283//zz return ( ((ULong)vHi) << 32 ) | ((ULong)vLo);
1284//zz } else {
1285//zz return mc_rd_V8_SLOWLY(a);
1286//zz }
1287//zz }
1288//zz else
1289//zz if (VG_IS_4_ALIGNED(a)) {
1290//zz /* LITTLE-ENDIAN */
1291//zz UInt vLo = MC_(helperc_LOADV4)(a+0);
1292//zz UInt vHi = MC_(helperc_LOADV4)(a+4);
1293//zz return ( ((ULong)vHi) << 32 ) | ((ULong)vLo);
1294//zz }
1295//zz else
1296//zz return mc_rd_V8_SLOWLY(a);
1297//zz # endif
sewardj95448072004-11-22 20:19:51 +00001298}
1299
njn9fb73db2005-03-27 01:55:21 +00001300VGA_REGPARM(1)
sewardj95448072004-11-22 20:19:51 +00001301void MC_(helperc_STOREV8) ( Addr a, ULong vbytes )
1302{
sewardj45d94cc2005-04-20 14:44:11 +00001303 mc_STOREVn_slow( a, 8, vbytes, False/*littleendian*/ );
1304//zz # ifdef VG_DEBUG_MEMORY
1305//zz mc_wr_V8_SLOWLY(a, vbytes);
1306//zz # else
1307//zz if (VG_IS_8_ALIGNED(a)) {
1308//zz UInt sec_no = shiftRight16(a) & 0xFFFF;
1309//zz SecMap* sm = primary_map[sec_no];
1310//zz UInt a_off = (SM_OFF(a)) >> 3;
1311//zz if (!IS_DISTINGUISHED_SM(sm) && sm->abits[a_off] == VGM_BYTE_VALID) {
1312//zz /* a is 8-aligned, mapped, and addressible. */
1313//zz UInt v_off = SM_OFF(a);
1314//zz UInt vHi = (UInt)(vbytes >> 32);
1315//zz UInt vLo = (UInt)vbytes;
1316//zz /* LITTLE-ENDIAN */
1317//zz ((UInt*)(sm->vbyte))[ (v_off >> 2) ] = vLo;
1318//zz ((UInt*)(sm->vbyte))[ (v_off >> 2) + 1 ] = vHi;
1319//zz } else {
1320//zz mc_wr_V8_SLOWLY(a, vbytes);
1321//zz }
1322//zz return;
1323//zz }
1324//zz else
1325//zz if (VG_IS_4_ALIGNED(a)) {
1326//zz UInt vHi = (UInt)(vbytes >> 32);
1327//zz UInt vLo = (UInt)vbytes;
1328//zz /* LITTLE-ENDIAN */
1329//zz MC_(helperc_STOREV4)(a+0, vLo);
1330//zz MC_(helperc_STOREV4)(a+4, vHi);
1331//zz return;
1332//zz }
1333//zz else
1334//zz mc_wr_V8_SLOWLY(a, vbytes);
1335//zz # endif
sewardj95448072004-11-22 20:19:51 +00001336}
1337
1338/* ------------------------ Size = 4 ------------------------ */
1339
njn9fb73db2005-03-27 01:55:21 +00001340VGA_REGPARM(1)
njn5c004e42002-11-18 11:04:50 +00001341UInt MC_(helperc_LOADV4) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +00001342{
sewardj45d94cc2005-04-20 14:44:11 +00001343 return (UInt)mc_LOADVn_slow( a, 4, False/*littleendian*/ );
1344//zz # ifdef VG_DEBUG_MEMORY
1345//zz return mc_rd_V4_SLOWLY(a);
1346//zz # else
1347//zz UInt sec_no = rotateRight16(a) & 0x3FFFF;
1348//zz SecMap* sm = primary_map[sec_no];
1349//zz UInt a_off = (SM_OFF(a)) >> 3;
1350//zz UChar abits = sm->abits[a_off];
1351//zz abits >>= (a & 4);
1352//zz abits &= 15;
1353//zz PROF_EVENT(60);
1354//zz if (abits == VGM_NIBBLE_VALID) {
1355//zz /* Handle common case quickly: a is suitably aligned, is mapped,
1356//zz and is addressible. */
1357//zz UInt v_off = SM_OFF(a);
1358//zz return ((UInt*)(sm->vbyte))[ v_off >> 2 ];
1359//zz } else {
1360//zz /* Slow but general case. */
1361//zz return mc_rd_V4_SLOWLY(a);
1362//zz }
1363//zz # endif
njn25e49d8e72002-09-23 09:36:25 +00001364}
1365
njn9fb73db2005-03-27 01:55:21 +00001366VGA_REGPARM(2)
njn5c004e42002-11-18 11:04:50 +00001367void MC_(helperc_STOREV4) ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001368{
sewardj45d94cc2005-04-20 14:44:11 +00001369 mc_STOREVn_slow( a, 4, vbytes, False/*littleendian*/ );
1370//zz # ifdef VG_DEBUG_MEMORY
1371//zz mc_wr_V4_SLOWLY(a, vbytes);
1372//zz # else
1373//zz UInt sec_no = rotateRight16(a) & 0x3FFFF;
1374//zz SecMap* sm = primary_map[sec_no];
1375//zz UInt a_off = (SM_OFF(a)) >> 3;
1376//zz UChar abits = sm->abits[a_off];
1377//zz abits >>= (a & 4);
1378//zz abits &= 15;
1379//zz PROF_EVENT(61);
1380//zz if (!IS_DISTINGUISHED_SM(sm) && abits == VGM_NIBBLE_VALID) {
1381//zz /* Handle common case quickly: a is suitably aligned, is mapped,
1382//zz and is addressible. */
1383//zz UInt v_off = SM_OFF(a);
1384//zz ((UInt*)(sm->vbyte))[ v_off >> 2 ] = vbytes;
1385//zz } else {
1386//zz /* Slow but general case. */
1387//zz mc_wr_V4_SLOWLY(a, vbytes);
1388//zz }
1389//zz # endif
njn25e49d8e72002-09-23 09:36:25 +00001390}
1391
sewardj95448072004-11-22 20:19:51 +00001392/* ------------------------ Size = 2 ------------------------ */
1393
njn9fb73db2005-03-27 01:55:21 +00001394VGA_REGPARM(1)
njn5c004e42002-11-18 11:04:50 +00001395UInt MC_(helperc_LOADV2) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +00001396{
sewardj45d94cc2005-04-20 14:44:11 +00001397 return (UInt)mc_LOADVn_slow( a, 2, False/*littleendian*/ );
1398//zz # ifdef VG_DEBUG_MEMORY
1399//zz return mc_rd_V2_SLOWLY(a);
1400//zz # else
1401//zz UInt sec_no = rotateRight16(a) & 0x1FFFF;
1402//zz SecMap* sm = primary_map[sec_no];
1403//zz UInt a_off = (SM_OFF(a)) >> 3;
1404//zz PROF_EVENT(62);
1405//zz if (sm->abits[a_off] == VGM_BYTE_VALID) {
1406//zz /* Handle common case quickly. */
1407//zz UInt v_off = SM_OFF(a);
1408//zz return 0xFFFF0000
1409//zz |
1410//zz (UInt)( ((UShort*)(sm->vbyte))[ v_off >> 1 ] );
1411//zz } else {
1412//zz /* Slow but general case. */
1413//zz return mc_rd_V2_SLOWLY(a);
1414//zz }
1415//zz # endif
njn25e49d8e72002-09-23 09:36:25 +00001416}
1417
njn9fb73db2005-03-27 01:55:21 +00001418VGA_REGPARM(2)
njn5c004e42002-11-18 11:04:50 +00001419void MC_(helperc_STOREV2) ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001420{
sewardj45d94cc2005-04-20 14:44:11 +00001421 mc_STOREVn_slow( a, 2, vbytes, False/*littleendian*/ );
1422//zz # ifdef VG_DEBUG_MEMORY
1423//zz mc_wr_V2_SLOWLY(a, vbytes);
1424//zz # else
1425//zz UInt sec_no = rotateRight16(a) & 0x1FFFF;
1426//zz SecMap* sm = primary_map[sec_no];
1427//zz UInt a_off = (SM_OFF(a)) >> 3;
1428//zz PROF_EVENT(63);
1429//zz if (!IS_DISTINGUISHED_SM(sm) && sm->abits[a_off] == VGM_BYTE_VALID) {
1430//zz /* Handle common case quickly. */
1431//zz UInt v_off = SM_OFF(a);
1432//zz ((UShort*)(sm->vbyte))[ v_off >> 1 ] = vbytes & 0x0000FFFF;
1433//zz } else {
1434//zz /* Slow but general case. */
1435//zz mc_wr_V2_SLOWLY(a, vbytes);
1436//zz }
1437//zz # endif
njn25e49d8e72002-09-23 09:36:25 +00001438}
1439
sewardj95448072004-11-22 20:19:51 +00001440/* ------------------------ Size = 1 ------------------------ */
1441
njn9fb73db2005-03-27 01:55:21 +00001442VGA_REGPARM(1)
njn5c004e42002-11-18 11:04:50 +00001443UInt MC_(helperc_LOADV1) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +00001444{
sewardj45d94cc2005-04-20 14:44:11 +00001445 return (UInt)mc_LOADVn_slow( a, 1, False/*littleendian*/ );
1446//zz # ifdef VG_DEBUG_MEMORY
1447//zz return mc_rd_V1_SLOWLY(a);
1448//zz # else
1449//zz UInt sec_no = shiftRight16(a);
1450//zz SecMap* sm = primary_map[sec_no];
1451//zz UInt a_off = (SM_OFF(a)) >> 3;
1452//zz PROF_EVENT(64);
1453//zz if (sm->abits[a_off] == VGM_BYTE_VALID) {
1454//zz /* Handle common case quickly. */
1455//zz UInt v_off = SM_OFF(a);
1456//zz return 0xFFFFFF00
1457//zz |
1458//zz (UInt)( ((UChar*)(sm->vbyte))[ v_off ] );
1459//zz } else {
1460//zz /* Slow but general case. */
1461//zz return mc_rd_V1_SLOWLY(a);
1462//zz }
1463//zz # endif
njn25e49d8e72002-09-23 09:36:25 +00001464}
1465
njn9fb73db2005-03-27 01:55:21 +00001466VGA_REGPARM(2)
njn5c004e42002-11-18 11:04:50 +00001467void MC_(helperc_STOREV1) ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001468{
sewardj45d94cc2005-04-20 14:44:11 +00001469 mc_STOREVn_slow( a, 1, vbytes, False/*littleendian*/ );
1470//zz # ifdef VG_DEBUG_MEMORY
1471//zz mc_wr_V1_SLOWLY(a, vbytes);
1472//zz # else
1473//zz UInt sec_no = shiftRight16(a);
1474//zz SecMap* sm = primary_map[sec_no];
1475//zz UInt a_off = (SM_OFF(a)) >> 3;
1476//zz PROF_EVENT(65);
1477//zz if (!IS_DISTINGUISHED_SM(sm) && sm->abits[a_off] == VGM_BYTE_VALID) {
1478//zz /* Handle common case quickly. */
1479//zz UInt v_off = SM_OFF(a);
1480//zz ((UChar*)(sm->vbyte))[ v_off ] = vbytes & 0x000000FF;
1481//zz } else {
1482//zz /* Slow but general case. */
1483//zz mc_wr_V1_SLOWLY(a, vbytes);
1484//zz }
1485//zz # endif
njn25e49d8e72002-09-23 09:36:25 +00001486}
1487
1488
sewardj45d94cc2005-04-20 14:44:11 +00001489//zz /*------------------------------------------------------------*/
1490//zz /*--- Fallback functions to handle cases that the above ---*/
1491//zz /*--- VG_(helperc_{LD,ST}V{1,2,4,8}) can't manage. ---*/
1492//zz /*------------------------------------------------------------*/
1493//zz
1494//zz /* ------------------------ Size = 8 ------------------------ */
1495//zz
1496//zz static ULong mc_rd_V8_SLOWLY ( Addr a )
1497//zz {
1498//zz Bool a0ok, a1ok, a2ok, a3ok, a4ok, a5ok, a6ok, a7ok;
1499//zz UInt vb0, vb1, vb2, vb3, vb4, vb5, vb6, vb7;
1500//zz
1501//zz PROF_EVENT(70);
1502//zz
1503//zz /* First establish independently the addressibility of the 4 bytes
1504//zz involved. */
1505//zz a0ok = get_abit(a+0) == VGM_BIT_VALID;
1506//zz a1ok = get_abit(a+1) == VGM_BIT_VALID;
1507//zz a2ok = get_abit(a+2) == VGM_BIT_VALID;
1508//zz a3ok = get_abit(a+3) == VGM_BIT_VALID;
1509//zz a4ok = get_abit(a+4) == VGM_BIT_VALID;
1510//zz a5ok = get_abit(a+5) == VGM_BIT_VALID;
1511//zz a6ok = get_abit(a+6) == VGM_BIT_VALID;
1512//zz a7ok = get_abit(a+7) == VGM_BIT_VALID;
1513//zz
1514//zz /* Also get the validity bytes for the address. */
1515//zz vb0 = (UInt)get_vbyte(a+0);
1516//zz vb1 = (UInt)get_vbyte(a+1);
1517//zz vb2 = (UInt)get_vbyte(a+2);
1518//zz vb3 = (UInt)get_vbyte(a+3);
1519//zz vb4 = (UInt)get_vbyte(a+4);
1520//zz vb5 = (UInt)get_vbyte(a+5);
1521//zz vb6 = (UInt)get_vbyte(a+6);
1522//zz vb7 = (UInt)get_vbyte(a+7);
1523//zz
1524//zz /* Now distinguish 3 cases */
1525//zz
1526//zz /* Case 1: the address is completely valid, so:
1527//zz - no addressing error
1528//zz - return V bytes as read from memory
1529//zz */
1530//zz if (a0ok && a1ok && a2ok && a3ok && a4ok && a5ok && a6ok && a7ok) {
1531//zz ULong vw = VGM_WORD64_INVALID;
1532//zz vw <<= 8; vw |= vb7;
1533//zz vw <<= 8; vw |= vb6;
1534//zz vw <<= 8; vw |= vb5;
1535//zz vw <<= 8; vw |= vb4;
1536//zz vw <<= 8; vw |= vb3;
1537//zz vw <<= 8; vw |= vb2;
1538//zz vw <<= 8; vw |= vb1;
1539//zz vw <<= 8; vw |= vb0;
1540//zz return vw;
1541//zz }
1542//zz
1543//zz /* Case 2: the address is completely invalid.
1544//zz - emit addressing error
1545//zz - return V word indicating validity.
1546//zz This sounds strange, but if we make loads from invalid addresses
1547//zz give invalid data, we also risk producing a number of confusing
1548//zz undefined-value errors later, which confuses the fact that the
1549//zz error arose in the first place from an invalid address.
1550//zz */
1551//zz /* VG_(printf)("%p (%d %d %d %d)\n", a, a0ok, a1ok, a2ok, a3ok); */
1552//zz if (!MAC_(clo_partial_loads_ok)
1553//zz || ((a & 7) != 0)
1554//zz || (!a0ok && !a1ok && !a2ok && !a3ok && !a4ok && !a5ok && !a6ok && !a7ok)) {
1555//zz MAC_(record_address_error)( VG_(get_running_tid)(), a, 8, False );
1556//zz return VGM_WORD64_VALID;
1557//zz }
1558//zz
1559//zz /* Case 3: the address is partially valid.
1560//zz - no addressing error
1561//zz - returned V word is invalid where the address is invalid,
1562//zz and contains V bytes from memory otherwise.
1563//zz Case 3 is only allowed if MC_(clo_partial_loads_ok) is True
1564//zz (which is the default), and the address is 4-aligned.
1565//zz If not, Case 2 will have applied.
1566//zz */
1567//zz tl_assert(MAC_(clo_partial_loads_ok));
1568//zz {
1569//zz ULong vw = VGM_WORD64_INVALID;
1570//zz vw <<= 8; vw |= (a7ok ? vb7 : VGM_BYTE_INVALID);
1571//zz vw <<= 8; vw |= (a6ok ? vb6 : VGM_BYTE_INVALID);
1572//zz vw <<= 8; vw |= (a5ok ? vb5 : VGM_BYTE_INVALID);
1573//zz vw <<= 8; vw |= (a4ok ? vb4 : VGM_BYTE_INVALID);
1574//zz vw <<= 8; vw |= (a3ok ? vb3 : VGM_BYTE_INVALID);
1575//zz vw <<= 8; vw |= (a2ok ? vb2 : VGM_BYTE_INVALID);
1576//zz vw <<= 8; vw |= (a1ok ? vb1 : VGM_BYTE_INVALID);
1577//zz vw <<= 8; vw |= (a0ok ? vb0 : VGM_BYTE_INVALID);
1578//zz return vw;
1579//zz }
1580//zz }
1581//zz
1582//zz static void mc_wr_V8_SLOWLY ( Addr a, ULong vbytes )
1583//zz {
1584//zz /* Check the address for validity. */
1585//zz Bool aerr = False;
1586//zz PROF_EVENT(71);
1587//zz
1588//zz if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1589//zz if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1590//zz if (get_abit(a+2) != VGM_BIT_VALID) aerr = True;
1591//zz if (get_abit(a+3) != VGM_BIT_VALID) aerr = True;
1592//zz if (get_abit(a+4) != VGM_BIT_VALID) aerr = True;
1593//zz if (get_abit(a+5) != VGM_BIT_VALID) aerr = True;
1594//zz if (get_abit(a+6) != VGM_BIT_VALID) aerr = True;
1595//zz if (get_abit(a+7) != VGM_BIT_VALID) aerr = True;
1596//zz
1597//zz /* Store the V bytes, remembering to do it little-endian-ly. */
1598//zz set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
1599//zz set_vbyte( a+1, vbytes & 0x000000FF ); vbytes >>= 8;
1600//zz set_vbyte( a+2, vbytes & 0x000000FF ); vbytes >>= 8;
1601//zz set_vbyte( a+3, vbytes & 0x000000FF ); vbytes >>= 8;
1602//zz set_vbyte( a+4, vbytes & 0x000000FF ); vbytes >>= 8;
1603//zz set_vbyte( a+5, vbytes & 0x000000FF ); vbytes >>= 8;
1604//zz set_vbyte( a+6, vbytes & 0x000000FF ); vbytes >>= 8;
1605//zz set_vbyte( a+7, vbytes & 0x000000FF );
1606//zz
1607//zz /* If an address error has happened, report it. */
1608//zz if (aerr)
1609//zz MAC_(record_address_error)( VG_(get_running_tid)(), a, 8, True );
1610//zz }
1611//zz
1612//zz /* ------------------------ Size = 4 ------------------------ */
1613//zz
1614//zz static UInt mc_rd_V4_SLOWLY ( Addr a )
1615//zz {
1616//zz Bool a0ok, a1ok, a2ok, a3ok;
1617//zz UInt vb0, vb1, vb2, vb3;
1618//zz
1619//zz PROF_EVENT(70);
1620//zz
1621//zz /* First establish independently the addressibility of the 4 bytes
1622//zz involved. */
1623//zz a0ok = get_abit(a+0) == VGM_BIT_VALID;
1624//zz a1ok = get_abit(a+1) == VGM_BIT_VALID;
1625//zz a2ok = get_abit(a+2) == VGM_BIT_VALID;
1626//zz a3ok = get_abit(a+3) == VGM_BIT_VALID;
1627//zz
1628//zz /* Also get the validity bytes for the address. */
1629//zz vb0 = (UInt)get_vbyte(a+0);
1630//zz vb1 = (UInt)get_vbyte(a+1);
1631//zz vb2 = (UInt)get_vbyte(a+2);
1632//zz vb3 = (UInt)get_vbyte(a+3);
1633//zz
1634//zz /* Now distinguish 3 cases */
1635//zz
1636//zz /* Case 1: the address is completely valid, so:
1637//zz - no addressing error
1638//zz - return V bytes as read from memory
1639//zz */
1640//zz if (a0ok && a1ok && a2ok && a3ok) {
1641//zz UInt vw = VGM_WORD_INVALID;
1642//zz vw <<= 8; vw |= vb3;
1643//zz vw <<= 8; vw |= vb2;
1644//zz vw <<= 8; vw |= vb1;
1645//zz vw <<= 8; vw |= vb0;
1646//zz return vw;
1647//zz }
1648//zz
1649//zz /* Case 2: the address is completely invalid.
1650//zz - emit addressing error
1651//zz - return V word indicating validity.
1652//zz This sounds strange, but if we make loads from invalid addresses
1653//zz give invalid data, we also risk producing a number of confusing
1654//zz undefined-value errors later, which confuses the fact that the
1655//zz error arose in the first place from an invalid address.
1656//zz */
1657//zz /* VG_(printf)("%p (%d %d %d %d)\n", a, a0ok, a1ok, a2ok, a3ok); */
1658//zz if (!MAC_(clo_partial_loads_ok)
1659//zz || ((a & 3) != 0)
1660//zz || (!a0ok && !a1ok && !a2ok && !a3ok)) {
1661//zz MAC_(record_address_error)( VG_(get_running_tid)(), a, 4, False );
1662//zz return (VGM_BYTE_VALID << 24) | (VGM_BYTE_VALID << 16)
1663//zz | (VGM_BYTE_VALID << 8) | VGM_BYTE_VALID;
1664//zz }
1665//zz
1666//zz /* Case 3: the address is partially valid.
1667//zz - no addressing error
1668//zz - returned V word is invalid where the address is invalid,
1669//zz and contains V bytes from memory otherwise.
1670//zz Case 3 is only allowed if MC_(clo_partial_loads_ok) is True
1671//zz (which is the default), and the address is 4-aligned.
1672//zz If not, Case 2 will have applied.
1673//zz */
1674//zz tl_assert(MAC_(clo_partial_loads_ok));
1675//zz {
1676//zz UInt vw = VGM_WORD_INVALID;
1677//zz vw <<= 8; vw |= (a3ok ? vb3 : VGM_BYTE_INVALID);
1678//zz vw <<= 8; vw |= (a2ok ? vb2 : VGM_BYTE_INVALID);
1679//zz vw <<= 8; vw |= (a1ok ? vb1 : VGM_BYTE_INVALID);
1680//zz vw <<= 8; vw |= (a0ok ? vb0 : VGM_BYTE_INVALID);
1681//zz return vw;
1682//zz }
1683//zz }
1684//zz
1685//zz static void mc_wr_V4_SLOWLY ( Addr a, UInt vbytes )
1686//zz {
1687//zz /* Check the address for validity. */
1688//zz Bool aerr = False;
1689//zz PROF_EVENT(71);
1690//zz
1691//zz if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1692//zz if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1693//zz if (get_abit(a+2) != VGM_BIT_VALID) aerr = True;
1694//zz if (get_abit(a+3) != VGM_BIT_VALID) aerr = True;
1695//zz
1696//zz /* Store the V bytes, remembering to do it little-endian-ly. */
1697//zz set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
1698//zz set_vbyte( a+1, vbytes & 0x000000FF ); vbytes >>= 8;
1699//zz set_vbyte( a+2, vbytes & 0x000000FF ); vbytes >>= 8;
1700//zz set_vbyte( a+3, vbytes & 0x000000FF );
1701//zz
1702//zz /* If an address error has happened, report it. */
1703//zz if (aerr)
1704//zz MAC_(record_address_error)( VG_(get_running_tid)(), a, 4, True );
1705//zz }
1706//zz
1707//zz /* ------------------------ Size = 2 ------------------------ */
1708//zz
1709//zz static UInt mc_rd_V2_SLOWLY ( Addr a )
1710//zz {
1711//zz /* Check the address for validity. */
1712//zz UInt vw = VGM_WORD_INVALID;
1713//zz Bool aerr = False;
1714//zz PROF_EVENT(72);
1715//zz
1716//zz if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1717//zz if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1718//zz
1719//zz /* Fetch the V bytes, remembering to do it little-endian-ly. */
1720//zz vw <<= 8; vw |= (UInt)get_vbyte(a+1);
1721//zz vw <<= 8; vw |= (UInt)get_vbyte(a+0);
1722//zz
1723//zz /* If an address error has happened, report it. */
1724//zz if (aerr) {
1725//zz MAC_(record_address_error)( VG_(get_running_tid)(), a, 2, False );
1726//zz vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
1727//zz | (VGM_BYTE_VALID << 8) | (VGM_BYTE_VALID);
1728//zz }
1729//zz return vw;
1730//zz }
1731//zz
1732//zz static void mc_wr_V2_SLOWLY ( Addr a, UInt vbytes )
1733//zz {
1734//zz /* Check the address for validity. */
1735//zz Bool aerr = False;
1736//zz PROF_EVENT(73);
1737//zz
1738//zz if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1739//zz if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1740//zz
1741//zz /* Store the V bytes, remembering to do it little-endian-ly. */
1742//zz set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
1743//zz set_vbyte( a+1, vbytes & 0x000000FF );
1744//zz
1745//zz /* If an address error has happened, report it. */
1746//zz if (aerr)
1747//zz MAC_(record_address_error)( VG_(get_running_tid)(), a, 2, True );
1748//zz }
1749//zz
1750//zz /* ------------------------ Size = 1 ------------------------ */
1751//zz
1752//zz static UInt mc_rd_V1_SLOWLY ( Addr a )
1753//zz {
1754//zz /* Check the address for validity. */
1755//zz UInt vw = VGM_WORD_INVALID;
1756//zz Bool aerr = False;
1757//zz PROF_EVENT(74);
1758//zz
1759//zz if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1760//zz
1761//zz /* Fetch the V byte. */
1762//zz vw <<= 8; vw |= (UInt)get_vbyte(a+0);
1763//zz
1764//zz /* If an address error has happened, report it. */
1765//zz if (aerr) {
1766//zz MAC_(record_address_error)( VG_(get_running_tid)(), a, 1, False );
1767//zz vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
1768//zz | (VGM_BYTE_INVALID << 8) | (VGM_BYTE_VALID);
1769//zz }
1770//zz return vw;
1771//zz }
1772//zz
1773//zz static void mc_wr_V1_SLOWLY ( Addr a, UInt vbytes )
1774//zz {
1775//zz /* Check the address for validity. */
1776//zz Bool aerr = False;
1777//zz PROF_EVENT(75);
1778//zz if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1779//zz
1780//zz /* Store the V bytes, remembering to do it little-endian-ly. */
1781//zz set_vbyte( a+0, vbytes & 0x000000FF );
1782//zz
1783//zz /* If an address error has happened, report it. */
1784//zz if (aerr)
1785//zz MAC_(record_address_error)( VG_(get_running_tid)(), a, 1, True );
1786//zz }
njn25e49d8e72002-09-23 09:36:25 +00001787
1788
1789/* ---------------------------------------------------------------------
1790 Called from generated code, or from the assembly helpers.
1791 Handlers for value check failures.
1792 ------------------------------------------------------------------ */
1793
njn5c004e42002-11-18 11:04:50 +00001794void MC_(helperc_value_check0_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001795{
njnb8dca862005-03-14 02:42:44 +00001796 MC_(record_value_error) ( VG_(get_running_tid)(), 0 );
njn25e49d8e72002-09-23 09:36:25 +00001797}
1798
njn5c004e42002-11-18 11:04:50 +00001799void MC_(helperc_value_check1_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001800{
njnb8dca862005-03-14 02:42:44 +00001801 MC_(record_value_error) ( VG_(get_running_tid)(), 1 );
njn25e49d8e72002-09-23 09:36:25 +00001802}
1803
sewardj45d94cc2005-04-20 14:44:11 +00001804//zz void MC_(helperc_value_check2_fail) ( void )
1805//zz {
1806//zz MC_(record_value_error) ( VG_(get_running_tid)(), 2 );
1807//zz }
njn25e49d8e72002-09-23 09:36:25 +00001808
njn5c004e42002-11-18 11:04:50 +00001809void MC_(helperc_value_check4_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001810{
njnb8dca862005-03-14 02:42:44 +00001811 MC_(record_value_error) ( VG_(get_running_tid)(), 4 );
njn25e49d8e72002-09-23 09:36:25 +00001812}
1813
njn9fb73db2005-03-27 01:55:21 +00001814VGA_REGPARM(1) void MC_(helperc_complain_undef) ( HWord sz )
sewardj95448072004-11-22 20:19:51 +00001815{
njnb8dca862005-03-14 02:42:44 +00001816 MC_(record_value_error) ( VG_(get_running_tid)(), (Int)sz );
sewardj95448072004-11-22 20:19:51 +00001817}
1818
njn25e49d8e72002-09-23 09:36:25 +00001819
sewardj45d94cc2005-04-20 14:44:11 +00001820//zz /*------------------------------------------------------------*/
1821//zz /*--- Metadata get/set functions, for client requests. ---*/
1822//zz /*------------------------------------------------------------*/
1823//zz
1824//zz /* Copy Vbits for src into vbits. Returns: 1 == OK, 2 == alignment
1825//zz error, 3 == addressing error. */
1826//zz static Int mc_get_or_set_vbits_for_client (
1827//zz ThreadId tid,
1828//zz Addr dataV,
1829//zz Addr vbitsV,
1830//zz SizeT size,
1831//zz Bool setting /* True <=> set vbits, False <=> get vbits */
1832//zz )
1833//zz {
1834//zz Bool addressibleD = True;
1835//zz Bool addressibleV = True;
1836//zz UInt* data = (UInt*)dataV;
1837//zz UInt* vbits = (UInt*)vbitsV;
1838//zz SizeT szW = size / 4; /* sigh */
1839//zz SizeT i;
1840//zz UInt* dataP = NULL; /* bogus init to keep gcc happy */
1841//zz UInt* vbitsP = NULL; /* ditto */
1842//zz
1843//zz /* Check alignment of args. */
1844//zz if (!(VG_IS_4_ALIGNED(data) && VG_IS_4_ALIGNED(vbits)))
1845//zz return 2;
1846//zz if ((size & 3) != 0)
1847//zz return 2;
1848//zz
1849//zz /* Check that arrays are addressible. */
1850//zz for (i = 0; i < szW; i++) {
1851//zz dataP = &data[i];
1852//zz vbitsP = &vbits[i];
1853//zz if (get_abits4_ALIGNED((Addr)dataP) != VGM_NIBBLE_VALID) {
1854//zz addressibleD = False;
1855//zz break;
1856//zz }
1857//zz if (get_abits4_ALIGNED((Addr)vbitsP) != VGM_NIBBLE_VALID) {
1858//zz addressibleV = False;
1859//zz break;
1860//zz }
1861//zz }
1862//zz if (!addressibleD) {
1863//zz MAC_(record_address_error)( tid, (Addr)dataP, 4,
1864//zz setting ? True : False );
1865//zz return 3;
1866//zz }
1867//zz if (!addressibleV) {
1868//zz MAC_(record_address_error)( tid, (Addr)vbitsP, 4,
1869//zz setting ? False : True );
1870//zz return 3;
1871//zz }
1872//zz
1873//zz /* Do the copy */
1874//zz if (setting) {
1875//zz /* setting */
1876//zz for (i = 0; i < szW; i++) {
1877//zz if (get_vbytes4_ALIGNED( (Addr)&vbits[i] ) != VGM_WORD_VALID)
1878//zz MC_(record_value_error)(tid, 4);
1879//zz set_vbytes4_ALIGNED( (Addr)&data[i], vbits[i] );
1880//zz }
1881//zz } else {
1882//zz /* getting */
1883//zz for (i = 0; i < szW; i++) {
1884//zz vbits[i] = get_vbytes4_ALIGNED( (Addr)&data[i] );
1885//zz set_vbytes4_ALIGNED( (Addr)&vbits[i], VGM_WORD_VALID );
1886//zz }
1887//zz }
1888//zz
1889//zz return 1;
1890//zz }
1891//zz
1892//zz
1893//zz /*------------------------------------------------------------*/
1894//zz /*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1895//zz /*------------------------------------------------------------*/
1896//zz
1897//zz /* For the memory leak detector, say whether an entire 64k chunk of
1898//zz address space is possibly in use, or not. If in doubt return
1899//zz True.
1900//zz */
1901//zz static
1902//zz Bool mc_is_valid_64k_chunk ( UInt chunk_number )
1903//zz {
1904//zz tl_assert(chunk_number >= 0 && chunk_number < PRIMARY_SIZE);
1905//zz if (primary_map[chunk_number] == DSM_NOTADDR) {
1906//zz /* Definitely not in use. */
1907//zz return False;
1908//zz } else {
1909//zz return True;
1910//zz }
1911//zz }
1912//zz
1913//zz
1914//zz /* For the memory leak detector, say whether or not a given word
1915//zz address is to be regarded as valid. */
1916//zz static
1917//zz Bool mc_is_valid_address ( Addr a )
1918//zz {
1919//zz UInt vbytes;
1920//zz UChar abits;
1921//zz tl_assert(VG_IS_4_ALIGNED(a));
1922//zz abits = get_abits4_ALIGNED(a);
1923//zz vbytes = get_vbytes4_ALIGNED(a);
1924//zz if (abits == VGM_NIBBLE_VALID && vbytes == VGM_WORD_VALID) {
1925//zz return True;
1926//zz } else {
1927//zz return False;
1928//zz }
1929//zz }
sewardja4495682002-10-21 07:29:59 +00001930
1931
nethercote996901a2004-08-03 13:29:09 +00001932/* Leak detector for this tool. We don't actually do anything, merely
sewardja4495682002-10-21 07:29:59 +00001933 run the generic leak detector with suitable parameters for this
nethercote996901a2004-08-03 13:29:09 +00001934 tool. */
njnb8dca862005-03-14 02:42:44 +00001935static void mc_detect_memory_leaks ( ThreadId tid, LeakCheckMode mode )
njn25e49d8e72002-09-23 09:36:25 +00001936{
sewardj45d94cc2005-04-20 14:44:11 +00001937 VG_(printf)("memcheck: leak detection currently disabled\n");
1938 // MAC_(do_detect_memory_leaks) (
1939 // tid, mode, mc_is_valid_64k_chunk, mc_is_valid_address );
njn25e49d8e72002-09-23 09:36:25 +00001940}
1941
1942
1943/* ---------------------------------------------------------------------
1944 Sanity check machinery (permanently engaged).
1945 ------------------------------------------------------------------ */
1946
njn26f02512004-11-22 18:33:15 +00001947Bool TL_(cheap_sanity_check) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001948{
jseward9800fd32004-01-04 23:08:04 +00001949 /* nothing useful we can rapidly check */
1950 return True;
njn25e49d8e72002-09-23 09:36:25 +00001951}
1952
njn26f02512004-11-22 18:33:15 +00001953Bool TL_(expensive_sanity_check) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001954{
sewardj45d94cc2005-04-20 14:44:11 +00001955 Int i;
1956 SecMap* sm;
njn25e49d8e72002-09-23 09:36:25 +00001957
sewardj45d94cc2005-04-20 14:44:11 +00001958 /* Check the 3 distinguished SMs. */
njn25e49d8e72002-09-23 09:36:25 +00001959
sewardj45d94cc2005-04-20 14:44:11 +00001960 /* Check A invalid, V invalid. */
1961 sm = &sm_distinguished[SM_DIST_NOACCESS];
njn25e49d8e72002-09-23 09:36:25 +00001962 for (i = 0; i < 65536; i++)
sewardj45d94cc2005-04-20 14:44:11 +00001963 if (!(sm->vbyte[i] == VGM_BYTE_INVALID))
njn25e49d8e72002-09-23 09:36:25 +00001964 return False;
sewardj45d94cc2005-04-20 14:44:11 +00001965 for (i = 0; i < 8192; i++)
1966 if (!(sm->abits[i] == VGM_BYTE_INVALID))
1967 return False;
njn25e49d8e72002-09-23 09:36:25 +00001968
sewardj45d94cc2005-04-20 14:44:11 +00001969 /* Check A valid, V invalid. */
1970 sm = &sm_distinguished[SM_DIST_ACCESS_UNDEFINED];
1971 for (i = 0; i < 65536; i++)
1972 if (!(sm->vbyte[i] == VGM_BYTE_INVALID))
njn25e49d8e72002-09-23 09:36:25 +00001973 return False;
sewardj45d94cc2005-04-20 14:44:11 +00001974 for (i = 0; i < 8192; i++)
1975 if (!(sm->abits[i] == VGM_BYTE_VALID))
1976 return False;
1977
1978 /* Check A valid, V valid. */
1979 sm = &sm_distinguished[SM_DIST_ACCESS_DEFINED];
1980 for (i = 0; i < 65536; i++)
1981 if (!(sm->vbyte[i] == VGM_BYTE_VALID))
1982 return False;
1983 for (i = 0; i < 8192; i++)
1984 if (!(sm->abits[i] == VGM_BYTE_VALID))
1985 return False;
1986
1987 if (auxmap_used > auxmap_size)
1988 return False;
njn25e49d8e72002-09-23 09:36:25 +00001989
1990 return True;
1991}
sewardj45d94cc2005-04-20 14:44:11 +00001992
njn25e49d8e72002-09-23 09:36:25 +00001993
sewardj45d94cc2005-04-20 14:44:11 +00001994/////////////////////////////////////////////////////////////////
1995/////////////////////////////////////////////////////////////////
1996/////////////////////////////////////////////////////////////////
1997/////////////////////////////////////////////////////////////////
1998/////////////////////////////////////////////////////////////////
1999/////////////////////////////////////////////////////////////////
2000/////////////////////////////////////////////////////////////////
2001/////////////////////////////////////////////////////////////////
2002/////////////////////////////////////////////////////////////////
2003
njn25e49d8e72002-09-23 09:36:25 +00002004/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00002005/*--- Command line args ---*/
njn25e49d8e72002-09-23 09:36:25 +00002006/*------------------------------------------------------------*/
2007
njn43c799e2003-04-08 00:08:52 +00002008Bool MC_(clo_avoid_strlen_errors) = True;
njn43c799e2003-04-08 00:08:52 +00002009
njn26f02512004-11-22 18:33:15 +00002010Bool TL_(process_cmd_line_option)(Char* arg)
njn25e49d8e72002-09-23 09:36:25 +00002011{
njn45270a22005-03-27 01:00:11 +00002012 VG_BOOL_CLO(arg, "--avoid-strlen-errors", MC_(clo_avoid_strlen_errors))
njn25e49d8e72002-09-23 09:36:25 +00002013 else
njn43c799e2003-04-08 00:08:52 +00002014 return MAC_(process_common_cmd_line_option)(arg);
njn25e49d8e72002-09-23 09:36:25 +00002015
2016 return True;
njn25e49d8e72002-09-23 09:36:25 +00002017}
2018
njn26f02512004-11-22 18:33:15 +00002019void TL_(print_usage)(void)
njn25e49d8e72002-09-23 09:36:25 +00002020{
njn3e884182003-04-15 13:03:23 +00002021 MAC_(print_common_usage)();
2022 VG_(printf)(
2023" --avoid-strlen-errors=no|yes suppress errs from inlined strlen [yes]\n"
2024 );
2025}
2026
njn26f02512004-11-22 18:33:15 +00002027void TL_(print_debug_usage)(void)
njn3e884182003-04-15 13:03:23 +00002028{
2029 MAC_(print_common_debug_usage)();
2030 VG_(printf)(
sewardj8ec2cfc2002-10-13 00:57:26 +00002031" --cleanup=no|yes improve after instrumentation? [yes]\n"
njn3e884182003-04-15 13:03:23 +00002032 );
njn25e49d8e72002-09-23 09:36:25 +00002033}
2034
nethercote8b76fe52004-11-08 19:20:09 +00002035/*------------------------------------------------------------*/
2036/*--- Client requests ---*/
2037/*------------------------------------------------------------*/
2038
2039/* Client block management:
2040
2041 This is managed as an expanding array of client block descriptors.
2042 Indices of live descriptors are issued to the client, so it can ask
2043 to free them later. Therefore we cannot slide live entries down
2044 over dead ones. Instead we must use free/inuse flags and scan for
2045 an empty slot at allocation time. This in turn means allocation is
2046 relatively expensive, so we hope this does not happen too often.
nethercote8b76fe52004-11-08 19:20:09 +00002047
sewardjedc75ab2005-03-15 23:30:32 +00002048 An unused block has start == size == 0
2049*/
nethercote8b76fe52004-11-08 19:20:09 +00002050
2051typedef
2052 struct {
2053 Addr start;
2054 SizeT size;
2055 ExeContext* where;
sewardjedc75ab2005-03-15 23:30:32 +00002056 Char* desc;
nethercote8b76fe52004-11-08 19:20:09 +00002057 }
2058 CGenBlock;
2059
2060/* This subsystem is self-initialising. */
njn695c16e2005-03-27 03:40:28 +00002061static UInt cgb_size = 0;
2062static UInt cgb_used = 0;
2063static CGenBlock* cgbs = NULL;
nethercote8b76fe52004-11-08 19:20:09 +00002064
2065/* Stats for this subsystem. */
njn695c16e2005-03-27 03:40:28 +00002066static UInt cgb_used_MAX = 0; /* Max in use. */
2067static UInt cgb_allocs = 0; /* Number of allocs. */
2068static UInt cgb_discards = 0; /* Number of discards. */
2069static UInt cgb_search = 0; /* Number of searches. */
nethercote8b76fe52004-11-08 19:20:09 +00002070
2071
2072static
njn695c16e2005-03-27 03:40:28 +00002073Int alloc_client_block ( void )
nethercote8b76fe52004-11-08 19:20:09 +00002074{
2075 UInt i, sz_new;
2076 CGenBlock* cgbs_new;
2077
njn695c16e2005-03-27 03:40:28 +00002078 cgb_allocs++;
nethercote8b76fe52004-11-08 19:20:09 +00002079
njn695c16e2005-03-27 03:40:28 +00002080 for (i = 0; i < cgb_used; i++) {
2081 cgb_search++;
2082 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00002083 return i;
2084 }
2085
2086 /* Not found. Try to allocate one at the end. */
njn695c16e2005-03-27 03:40:28 +00002087 if (cgb_used < cgb_size) {
2088 cgb_used++;
2089 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00002090 }
2091
2092 /* Ok, we have to allocate a new one. */
njn695c16e2005-03-27 03:40:28 +00002093 tl_assert(cgb_used == cgb_size);
2094 sz_new = (cgbs == NULL) ? 10 : (2 * cgb_size);
nethercote8b76fe52004-11-08 19:20:09 +00002095
2096 cgbs_new = VG_(malloc)( sz_new * sizeof(CGenBlock) );
njn695c16e2005-03-27 03:40:28 +00002097 for (i = 0; i < cgb_used; i++)
2098 cgbs_new[i] = cgbs[i];
nethercote8b76fe52004-11-08 19:20:09 +00002099
njn695c16e2005-03-27 03:40:28 +00002100 if (cgbs != NULL)
2101 VG_(free)( cgbs );
2102 cgbs = cgbs_new;
nethercote8b76fe52004-11-08 19:20:09 +00002103
njn695c16e2005-03-27 03:40:28 +00002104 cgb_size = sz_new;
2105 cgb_used++;
2106 if (cgb_used > cgb_used_MAX)
2107 cgb_used_MAX = cgb_used;
2108 return cgb_used-1;
nethercote8b76fe52004-11-08 19:20:09 +00002109}
2110
2111
2112static void show_client_block_stats ( void )
2113{
2114 VG_(message)(Vg_DebugMsg,
2115 "general CBs: %d allocs, %d discards, %d maxinuse, %d search",
njn695c16e2005-03-27 03:40:28 +00002116 cgb_allocs, cgb_discards, cgb_used_MAX, cgb_search
nethercote8b76fe52004-11-08 19:20:09 +00002117 );
2118}
2119
2120static Bool find_addr(VgHashNode* sh_ch, void* ap)
2121{
2122 MAC_Chunk *m = (MAC_Chunk*)sh_ch;
2123 Addr a = *(Addr*)ap;
2124
2125 return VG_(addr_is_in_block)(a, m->data, m->size);
2126}
2127
2128static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai )
2129{
2130 UInt i;
2131 /* VG_(printf)("try to identify %d\n", a); */
2132
2133 /* Perhaps it's a general block ? */
njn695c16e2005-03-27 03:40:28 +00002134 for (i = 0; i < cgb_used; i++) {
2135 if (cgbs[i].start == 0 && cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00002136 continue;
njn695c16e2005-03-27 03:40:28 +00002137 if (VG_(addr_is_in_block)(a, cgbs[i].start, cgbs[i].size)) {
nethercote8b76fe52004-11-08 19:20:09 +00002138 MAC_Mempool **d, *mp;
2139
2140 /* OK - maybe it's a mempool, too? */
2141 mp = (MAC_Mempool*)VG_(HT_get_node)(MAC_(mempool_list),
njn695c16e2005-03-27 03:40:28 +00002142 (UWord)cgbs[i].start,
nethercote8b76fe52004-11-08 19:20:09 +00002143 (void*)&d);
2144 if(mp != NULL) {
2145 if(mp->chunks != NULL) {
2146 MAC_Chunk *mc;
2147
2148 mc = (MAC_Chunk*)VG_(HT_first_match)(mp->chunks, find_addr, &a);
2149 if(mc != NULL) {
2150 ai->akind = UserG;
2151 ai->blksize = mc->size;
2152 ai->rwoffset = (Int)(a) - (Int)mc->data;
2153 ai->lastchange = mc->where;
2154 return True;
2155 }
2156 }
2157 ai->akind = Mempool;
njn695c16e2005-03-27 03:40:28 +00002158 ai->blksize = cgbs[i].size;
2159 ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
2160 ai->lastchange = cgbs[i].where;
nethercote8b76fe52004-11-08 19:20:09 +00002161 return True;
2162 }
2163 ai->akind = UserG;
njn695c16e2005-03-27 03:40:28 +00002164 ai->blksize = cgbs[i].size;
2165 ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start);
2166 ai->lastchange = cgbs[i].where;
2167 ai->desc = cgbs[i].desc;
nethercote8b76fe52004-11-08 19:20:09 +00002168 return True;
2169 }
2170 }
2171 return False;
2172}
2173
njn26f02512004-11-22 18:33:15 +00002174Bool TL_(handle_client_request) ( ThreadId tid, UWord* arg, UWord* ret )
nethercote8b76fe52004-11-08 19:20:09 +00002175{
2176 Int i;
2177 Bool ok;
2178 Addr bad_addr;
2179
njnfc26ff92004-11-22 19:12:49 +00002180 if (!VG_IS_TOOL_USERREQ('M','C',arg[0])
nethercote8b76fe52004-11-08 19:20:09 +00002181 && VG_USERREQ__MALLOCLIKE_BLOCK != arg[0]
2182 && VG_USERREQ__FREELIKE_BLOCK != arg[0]
2183 && VG_USERREQ__CREATE_MEMPOOL != arg[0]
2184 && VG_USERREQ__DESTROY_MEMPOOL != arg[0]
2185 && VG_USERREQ__MEMPOOL_ALLOC != arg[0]
2186 && VG_USERREQ__MEMPOOL_FREE != arg[0])
2187 return False;
2188
2189 switch (arg[0]) {
2190 case VG_USERREQ__CHECK_WRITABLE: /* check writable */
2191 ok = mc_check_writable ( arg[1], arg[2], &bad_addr );
2192 if (!ok)
2193 MC_(record_user_error) ( tid, bad_addr, /*isWrite*/True,
2194 /*isUnaddr*/True );
2195 *ret = ok ? (UWord)NULL : bad_addr;
2196 break;
2197
2198 case VG_USERREQ__CHECK_READABLE: { /* check readable */
2199 MC_ReadResult res;
2200 res = mc_check_readable ( arg[1], arg[2], &bad_addr );
2201 if (MC_AddrErr == res)
2202 MC_(record_user_error) ( tid, bad_addr, /*isWrite*/False,
2203 /*isUnaddr*/True );
2204 else if (MC_ValueErr == res)
2205 MC_(record_user_error) ( tid, bad_addr, /*isWrite*/False,
2206 /*isUnaddr*/False );
2207 *ret = ( res==MC_Ok ? (UWord)NULL : bad_addr );
2208 break;
2209 }
2210
2211 case VG_USERREQ__DO_LEAK_CHECK:
njnb8dca862005-03-14 02:42:44 +00002212 mc_detect_memory_leaks(tid, arg[1] ? LC_Summary : LC_Full);
nethercote8b76fe52004-11-08 19:20:09 +00002213 *ret = 0; /* return value is meaningless */
2214 break;
2215
2216 case VG_USERREQ__MAKE_NOACCESS: /* make no access */
nethercote8b76fe52004-11-08 19:20:09 +00002217 mc_make_noaccess ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00002218 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00002219 break;
2220
2221 case VG_USERREQ__MAKE_WRITABLE: /* make writable */
nethercote8b76fe52004-11-08 19:20:09 +00002222 mc_make_writable ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00002223 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00002224 break;
2225
2226 case VG_USERREQ__MAKE_READABLE: /* make readable */
nethercote8b76fe52004-11-08 19:20:09 +00002227 mc_make_readable ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00002228 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00002229 break;
2230
sewardjedc75ab2005-03-15 23:30:32 +00002231 case VG_USERREQ__CREATE_BLOCK: /* describe a block */
2232 if (arg[1] != 0 && arg[2] != 0) {
njn695c16e2005-03-27 03:40:28 +00002233 i = alloc_client_block();
2234 /* VG_(printf)("allocated %d %p\n", i, cgbs); */
2235 cgbs[i].start = arg[1];
2236 cgbs[i].size = arg[2];
2237 cgbs[i].desc = VG_(strdup)((Char *)arg[3]);
2238 cgbs[i].where = VG_(record_ExeContext) ( tid );
sewardjedc75ab2005-03-15 23:30:32 +00002239
2240 *ret = i;
2241 } else
2242 *ret = -1;
2243 break;
2244
nethercote8b76fe52004-11-08 19:20:09 +00002245 case VG_USERREQ__DISCARD: /* discard */
njn695c16e2005-03-27 03:40:28 +00002246 if (cgbs == NULL
2247 || arg[2] >= cgb_used ||
2248 (cgbs[arg[2]].start == 0 && cgbs[arg[2]].size == 0)) {
sewardjedc75ab2005-03-15 23:30:32 +00002249 *ret = 1;
2250 } else {
njn695c16e2005-03-27 03:40:28 +00002251 tl_assert(arg[2] >= 0 && arg[2] < cgb_used);
2252 cgbs[arg[2]].start = cgbs[arg[2]].size = 0;
2253 VG_(free)(cgbs[arg[2]].desc);
2254 cgb_discards++;
sewardjedc75ab2005-03-15 23:30:32 +00002255 *ret = 0;
2256 }
nethercote8b76fe52004-11-08 19:20:09 +00002257 break;
2258
sewardj45d94cc2005-04-20 14:44:11 +00002259//zz case VG_USERREQ__GET_VBITS:
2260//zz /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
2261//zz error. */
2262//zz /* VG_(printf)("get_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
2263//zz *ret = mc_get_or_set_vbits_for_client
2264//zz ( tid, arg[1], arg[2], arg[3], False /* get them */ );
2265//zz break;
2266//zz
2267//zz case VG_USERREQ__SET_VBITS:
2268//zz /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
2269//zz error. */
2270//zz /* VG_(printf)("set_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
2271//zz *ret = mc_get_or_set_vbits_for_client
2272//zz ( tid, arg[1], arg[2], arg[3], True /* set them */ );
2273//zz break;
nethercote8b76fe52004-11-08 19:20:09 +00002274
2275 default:
2276 if (MAC_(handle_common_client_requests)(tid, arg, ret )) {
2277 return True;
2278 } else {
2279 VG_(message)(Vg_UserMsg,
2280 "Warning: unknown memcheck client request code %llx",
2281 (ULong)arg[0]);
2282 return False;
2283 }
2284 }
2285 return True;
2286}
njn25e49d8e72002-09-23 09:36:25 +00002287
2288/*------------------------------------------------------------*/
2289/*--- Setup ---*/
2290/*------------------------------------------------------------*/
2291
njn26f02512004-11-22 18:33:15 +00002292void TL_(pre_clo_init)(void)
njn25e49d8e72002-09-23 09:36:25 +00002293{
njn810086f2002-11-14 12:42:47 +00002294 VG_(details_name) ("Memcheck");
2295 VG_(details_version) (NULL);
nethercote262eedf2003-11-13 17:57:18 +00002296 VG_(details_description) ("a memory error detector");
njn810086f2002-11-14 12:42:47 +00002297 VG_(details_copyright_author)(
njn53612422005-03-12 16:22:54 +00002298 "Copyright (C) 2002-2005, and GNU GPL'd, by Julian Seward et al.");
nethercote421281e2003-11-20 16:20:55 +00002299 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj9ebf9fd2004-11-28 16:56:51 +00002300 VG_(details_avg_translation_sizeB) ( 370 );
njn25e49d8e72002-09-23 09:36:25 +00002301
njn8a97c6d2005-03-31 04:37:24 +00002302 VG_(basic_tool_funcs) (TL_(post_clo_init),
2303 TL_(instrument),
2304 TL_(fini));
2305
njn810086f2002-11-14 12:42:47 +00002306 VG_(needs_core_errors) ();
njn8a97c6d2005-03-31 04:37:24 +00002307 VG_(needs_tool_errors) (TL_(eq_Error),
2308 TL_(pp_Error),
2309 TL_(update_extra),
2310 TL_(recognised_suppression),
2311 TL_(read_extra_suppression_info),
2312 TL_(error_matches_suppression),
2313 TL_(get_error_name),
2314 TL_(print_extra_suppression_info));
njn810086f2002-11-14 12:42:47 +00002315 VG_(needs_libc_freeres) ();
njn8a97c6d2005-03-31 04:37:24 +00002316 VG_(needs_command_line_options)(TL_(process_cmd_line_option),
2317 TL_(print_usage),
2318 TL_(print_debug_usage));
2319 VG_(needs_client_requests) (TL_(handle_client_request));
2320 VG_(needs_sanity_checks) (TL_(cheap_sanity_check),
2321 TL_(expensive_sanity_check));
fitzhardinge98abfc72003-12-16 02:05:15 +00002322 VG_(needs_shadow_memory) ();
njn25e49d8e72002-09-23 09:36:25 +00002323
njn8a97c6d2005-03-31 04:37:24 +00002324 VG_(malloc_funcs) (TL_(malloc),
2325 TL_(__builtin_new),
2326 TL_(__builtin_vec_new),
2327 TL_(memalign),
2328 TL_(calloc),
2329 TL_(free),
2330 TL_(__builtin_delete),
2331 TL_(__builtin_vec_delete),
2332 TL_(realloc),
2333 MALLOC_REDZONE_SZB );
2334
njn3e884182003-04-15 13:03:23 +00002335 MAC_( new_mem_heap) = & mc_new_mem_heap;
nethercote8b76fe52004-11-08 19:20:09 +00002336 MAC_( ban_mem_heap) = & mc_make_noaccess;
njn3e884182003-04-15 13:03:23 +00002337 MAC_(copy_mem_heap) = & mc_copy_address_range_state;
nethercote8b76fe52004-11-08 19:20:09 +00002338 MAC_( die_mem_heap) = & mc_make_noaccess;
2339 MAC_(check_noaccess) = & mc_check_noaccess;
njn3e884182003-04-15 13:03:23 +00002340
fitzhardinge98abfc72003-12-16 02:05:15 +00002341 VG_(init_new_mem_startup) ( & mc_new_mem_startup );
nethercote8b76fe52004-11-08 19:20:09 +00002342 VG_(init_new_mem_stack_signal) ( & mc_make_writable );
2343 VG_(init_new_mem_brk) ( & mc_make_writable );
njnb8dca862005-03-14 02:42:44 +00002344 VG_(init_new_mem_mmap) ( & mc_new_mem_mmap );
njn25e49d8e72002-09-23 09:36:25 +00002345
fitzhardinge98abfc72003-12-16 02:05:15 +00002346 VG_(init_copy_mem_remap) ( & mc_copy_address_range_state );
njn3e884182003-04-15 13:03:23 +00002347
nethercote8b76fe52004-11-08 19:20:09 +00002348 VG_(init_die_mem_stack_signal) ( & mc_make_noaccess );
2349 VG_(init_die_mem_brk) ( & mc_make_noaccess );
2350 VG_(init_die_mem_munmap) ( & mc_make_noaccess );
njn3e884182003-04-15 13:03:23 +00002351
fitzhardinge98abfc72003-12-16 02:05:15 +00002352 VG_(init_new_mem_stack_4) ( & MAC_(new_mem_stack_4) );
2353 VG_(init_new_mem_stack_8) ( & MAC_(new_mem_stack_8) );
2354 VG_(init_new_mem_stack_12) ( & MAC_(new_mem_stack_12) );
2355 VG_(init_new_mem_stack_16) ( & MAC_(new_mem_stack_16) );
2356 VG_(init_new_mem_stack_32) ( & MAC_(new_mem_stack_32) );
2357 VG_(init_new_mem_stack) ( & MAC_(new_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00002358
fitzhardinge98abfc72003-12-16 02:05:15 +00002359 VG_(init_die_mem_stack_4) ( & MAC_(die_mem_stack_4) );
2360 VG_(init_die_mem_stack_8) ( & MAC_(die_mem_stack_8) );
2361 VG_(init_die_mem_stack_12) ( & MAC_(die_mem_stack_12) );
2362 VG_(init_die_mem_stack_16) ( & MAC_(die_mem_stack_16) );
2363 VG_(init_die_mem_stack_32) ( & MAC_(die_mem_stack_32) );
2364 VG_(init_die_mem_stack) ( & MAC_(die_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00002365
nethercote8b76fe52004-11-08 19:20:09 +00002366 VG_(init_ban_mem_stack) ( & mc_make_noaccess );
njn25e49d8e72002-09-23 09:36:25 +00002367
fitzhardinge98abfc72003-12-16 02:05:15 +00002368 VG_(init_pre_mem_read) ( & mc_check_is_readable );
2369 VG_(init_pre_mem_read_asciiz) ( & mc_check_is_readable_asciiz );
2370 VG_(init_pre_mem_write) ( & mc_check_is_writable );
njncf45fd42004-11-24 16:30:22 +00002371 VG_(init_post_mem_write) ( & mc_post_mem_write );
nethercote8b76fe52004-11-08 19:20:09 +00002372
2373 VG_(init_pre_reg_read) ( & mc_pre_reg_read );
njn25e49d8e72002-09-23 09:36:25 +00002374
njncf45fd42004-11-24 16:30:22 +00002375 VG_(init_post_reg_write) ( & mc_post_reg_write );
fitzhardinge98abfc72003-12-16 02:05:15 +00002376 VG_(init_post_reg_write_clientcall_return) ( & mc_post_reg_write_clientcall );
njnd3040452003-05-19 15:04:06 +00002377
njn31066fd2005-03-26 00:42:02 +00002378 VG_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
2379 VG_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
2380 VG_(register_profile_event) ( VgpESPAdj, "adjust-ESP" );
njnd04b7c62002-10-03 14:05:52 +00002381
njn43c799e2003-04-08 00:08:52 +00002382 /* Additional block description for VG_(describe_addr)() */
nethercote8b76fe52004-11-08 19:20:09 +00002383 MAC_(describe_addr_supp) = client_perm_maybe_describe;
njn43c799e2003-04-08 00:08:52 +00002384
njnd04b7c62002-10-03 14:05:52 +00002385 init_shadow_memory();
njn3e884182003-04-15 13:03:23 +00002386 MAC_(common_pre_clo_init)();
njn5c004e42002-11-18 11:04:50 +00002387}
2388
njn26f02512004-11-22 18:33:15 +00002389void TL_(post_clo_init) ( void )
njn5c004e42002-11-18 11:04:50 +00002390{
2391}
2392
njn26f02512004-11-22 18:33:15 +00002393void TL_(fini) ( Int exitcode )
njn5c004e42002-11-18 11:04:50 +00002394{
nethercote8b76fe52004-11-08 19:20:09 +00002395 MAC_(common_fini)( mc_detect_memory_leaks );
sewardj45d94cc2005-04-20 14:44:11 +00002396
2397 if (VG_(clo_verbosity) > 1) {
2398 VG_(message)(Vg_DebugMsg,
2399 "memcheck: auxmaps: %d auxmap entries (%dk, %dM) in use",
2400 auxmap_used,
2401 64 * auxmap_used, auxmap_used / 16 );
2402 VG_(message)(Vg_DebugMsg,
2403 "memcheck: auxmaps: %lld searches, %lld comparisons",
2404 n_auxmap_searches, n_auxmap_cmps );
2405 }
2406
njn5c004e42002-11-18 11:04:50 +00002407 if (0) {
2408 VG_(message)(Vg_DebugMsg,
2409 "------ Valgrind's client block stats follow ---------------" );
nethercote8b76fe52004-11-08 19:20:09 +00002410 show_client_block_stats();
njn5c004e42002-11-18 11:04:50 +00002411 }
njn25e49d8e72002-09-23 09:36:25 +00002412}
2413
njn26f02512004-11-22 18:33:15 +00002414VG_DETERMINE_INTERFACE_VERSION(TL_(pre_clo_init), 9./8)
fitzhardinge98abfc72003-12-16 02:05:15 +00002415
njn25e49d8e72002-09-23 09:36:25 +00002416/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00002417/*--- end mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00002418/*--------------------------------------------------------------------*/