blob: 4654251012a3b42d25142a6798320dc31a762687 [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4/*--- accessibility (A) and validity (V) status of each byte. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
nethercote137bc552003-11-14 17:47:54 +00009 This file is part of MemCheck, a heavyweight Valgrind tool for
njnc9539842002-10-02 13:26:35 +000010 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
njn53612422005-03-12 16:22:54 +000012 Copyright (C) 2000-2005 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
njn25cac76cb2002-09-23 11:21:57 +000033#include "mc_include.h"
34#include "memcheck.h" /* for client requests */
njn25e49d8e72002-09-23 09:36:25 +000035//#include "vg_profile.c"
36
37/* Define to debug the mem audit system. */
38/* #define VG_DEBUG_MEMORY */
39
njn25e49d8e72002-09-23 09:36:25 +000040#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
41
42/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000043/*--- Low-level support for memory checking. ---*/
44/*------------------------------------------------------------*/
45
46/* All reads and writes are checked against a memory map, which
47 records the state of all memory in the process. The memory map is
48 organised like this:
49
50 The top 16 bits of an address are used to index into a top-level
51 map table, containing 65536 entries. Each entry is a pointer to a
52 second-level map, which records the accesibililty and validity
53 permissions for the 65536 bytes indexed by the lower 16 bits of the
54 address. Each byte is represented by nine bits, one indicating
55 accessibility, the other eight validity. So each second-level map
56 contains 73728 bytes. This two-level arrangement conveniently
57 divides the 4G address space into 64k lumps, each size 64k bytes.
58
59 All entries in the primary (top-level) map must point to a valid
60 secondary (second-level) map. Since most of the 4G of address
61 space will not be in use -- ie, not mapped at all -- there is a
62 distinguished secondary map, which indicates `not addressible and
63 not valid' writeable for all bytes. Entries in the primary map for
64 which the entire 64k is not in use at all point at this
65 distinguished map.
66
njnb8dca862005-03-14 02:42:44 +000067 There are actually 4 distinguished secondaries. These are used to
68 represent a memory range which is either not addressable (validity
69 doesn't matter), addressable+not valid, addressable+valid.
70
njn25e49d8e72002-09-23 09:36:25 +000071 [...] lots of stuff deleted due to out of date-ness
72
73 As a final optimisation, the alignment and address checks for
74 4-byte loads and stores are combined in a neat way. The primary
75 map is extended to have 262144 entries (2^18), rather than 2^16.
76 The top 3/4 of these entries are permanently set to the
77 distinguished secondary map. For a 4-byte load/store, the
78 top-level map is indexed not with (addr >> 16) but instead f(addr),
79 where
80
81 f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
82 = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
83 = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
84
85 ie the lowest two bits are placed above the 16 high address bits.
86 If either of these two bits are nonzero, the address is misaligned;
87 this will select a secondary map from the upper 3/4 of the primary
88 map. Because this is always the distinguished secondary map, a
89 (bogus) address check failure will result. The failure handling
90 code can then figure out whether this is a genuine addr check
91 failure or whether it is a possibly-legitimate access at a
92 misaligned address.
93*/
94
njn25e49d8e72002-09-23 09:36:25 +000095/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000096/*--- Function declarations. ---*/
97/*------------------------------------------------------------*/
98
sewardj95448072004-11-22 20:19:51 +000099static ULong mc_rd_V8_SLOWLY ( Addr a );
100static UInt mc_rd_V4_SLOWLY ( Addr a );
101static UInt mc_rd_V2_SLOWLY ( Addr a );
102static UInt mc_rd_V1_SLOWLY ( Addr a );
103
104static void mc_wr_V8_SLOWLY ( Addr a, ULong vbytes );
njn5c004e42002-11-18 11:04:50 +0000105static void mc_wr_V4_SLOWLY ( Addr a, UInt vbytes );
106static void mc_wr_V2_SLOWLY ( Addr a, UInt vbytes );
107static void mc_wr_V1_SLOWLY ( Addr a, UInt vbytes );
sewardj95448072004-11-22 20:19:51 +0000108
njn25e49d8e72002-09-23 09:36:25 +0000109/*------------------------------------------------------------*/
110/*--- Data defns. ---*/
111/*------------------------------------------------------------*/
112
113typedef
114 struct {
njnb8dca862005-03-14 02:42:44 +0000115 UChar abits[SECONDARY_SIZE/8];
116 UChar vbyte[SECONDARY_SIZE];
njn25e49d8e72002-09-23 09:36:25 +0000117 }
118 SecMap;
119
njnb8dca862005-03-14 02:42:44 +0000120
121static SecMap* primary_map[ /*PRIMARY_SIZE*/ PRIMARY_SIZE*4 ];
122
123#define DSM_IDX(a, v) ((((a)&1) << 1) + ((v)&1))
124
125/* 4 secondary maps, but one is redundant (because the !addressable &&
126 valid state is meaningless) */
127static const SecMap distinguished_secondary_maps[4] = {
128#define INIT(a, v) \
129 [ DSM_IDX(a, v) ] = { { [0 ... (SECONDARY_SIZE/8)-1] = BIT_EXPAND(a) }, \
130 { [0 ... SECONDARY_SIZE-1] = BIT_EXPAND(a|v) } }
131 INIT(VGM_BIT_VALID, VGM_BIT_VALID),
132 INIT(VGM_BIT_VALID, VGM_BIT_INVALID),
133 INIT(VGM_BIT_INVALID, VGM_BIT_VALID),
134 INIT(VGM_BIT_INVALID, VGM_BIT_INVALID),
135#undef INIT
136};
137#define N_SECONDARY_MAPS (sizeof(distinguished_secondary_maps)/sizeof(*distinguished_secondary_maps))
138
139#define DSM(a,v) ((SecMap *)&distinguished_secondary_maps[DSM_IDX(a, v)])
140
141#define DSM_NOTADDR DSM(VGM_BIT_INVALID, VGM_BIT_INVALID)
142#define DSM_ADDR_NOTVALID DSM(VGM_BIT_VALID, VGM_BIT_INVALID)
143#define DSM_ADDR_VALID DSM(VGM_BIT_VALID, VGM_BIT_VALID)
njn25e49d8e72002-09-23 09:36:25 +0000144
njn25e49d8e72002-09-23 09:36:25 +0000145static void init_shadow_memory ( void )
146{
njnb8dca862005-03-14 02:42:44 +0000147 Int i, a, v;
njn25e49d8e72002-09-23 09:36:25 +0000148
njnb8dca862005-03-14 02:42:44 +0000149 /* check construction of the 4 distinguished secondaries */
150 tl_assert(VGM_BIT_INVALID == 1);
151 tl_assert(VGM_BIT_VALID == 0);
njn25e49d8e72002-09-23 09:36:25 +0000152
njnb8dca862005-03-14 02:42:44 +0000153 for (a = 0; a <= 1; a++) {
154 for (v = 0; v <= 1; v++) {
155 if (DSM(a,v)->abits[0] != BIT_EXPAND(a))
156 VG_(printf)("DSM(%d,%d)[%d]->abits[0] == %x not %x\n",
157 a,v,DSM_IDX(a,v),DSM(a,v)->abits[0], BIT_EXPAND(a));
158 if (DSM(a,v)->vbyte[0] != BIT_EXPAND(a|v))
159 VG_(printf)("DSM(%d,%d)[%d]->vbyte[0] == %x not %x\n",
160 a,v,DSM_IDX(a,v),DSM(a,v)->vbyte[0], BIT_EXPAND(a|v));
161
162 tl_assert(DSM(a,v)->abits[0] == BIT_EXPAND(a));
163 tl_assert(DSM(a,v)->vbyte[0] == BIT_EXPAND(v|a));
164 }
165 }
166
njn25e49d8e72002-09-23 09:36:25 +0000167 /* These entries gradually get overwritten as the used address
168 space expands. */
njnb8dca862005-03-14 02:42:44 +0000169 for (i = 0; i < PRIMARY_SIZE; i++)
170 primary_map[i] = DSM_NOTADDR;
njn25e49d8e72002-09-23 09:36:25 +0000171
172 /* These ones should never change; it's a bug in Valgrind if they do. */
njnb8dca862005-03-14 02:42:44 +0000173 for (i = PRIMARY_SIZE; i < PRIMARY_SIZE*4; i++)
174 primary_map[i] = DSM_NOTADDR;
njn25e49d8e72002-09-23 09:36:25 +0000175}
176
njn25e49d8e72002-09-23 09:36:25 +0000177/*------------------------------------------------------------*/
178/*--- Basic bitmap management, reading and writing. ---*/
179/*------------------------------------------------------------*/
180
181/* Allocate and initialise a secondary map. */
182
183static SecMap* alloc_secondary_map ( __attribute__ ((unused))
njnb8dca862005-03-14 02:42:44 +0000184 Char* caller,
185 const SecMap *prototype)
njn25e49d8e72002-09-23 09:36:25 +0000186{
187 SecMap* map;
njn25e49d8e72002-09-23 09:36:25 +0000188 PROF_EVENT(10);
189
fitzhardinge98abfc72003-12-16 02:05:15 +0000190 map = (SecMap *)VG_(shadow_alloc)(sizeof(SecMap));
njn25e49d8e72002-09-23 09:36:25 +0000191
njnb8dca862005-03-14 02:42:44 +0000192 VG_(memcpy)(map, prototype, sizeof(*map));
njn25e49d8e72002-09-23 09:36:25 +0000193
194 /* VG_(printf)("ALLOC_2MAP(%s)\n", caller ); */
195 return map;
196}
197
198
199/* Basic reading/writing of the bitmaps, for byte-sized accesses. */
200
201static __inline__ UChar get_abit ( Addr a )
202{
njnb8dca862005-03-14 02:42:44 +0000203 SecMap* sm = primary_map[PM_IDX(a)];
204 UInt sm_off = SM_OFF(a);
njn25e49d8e72002-09-23 09:36:25 +0000205 PROF_EVENT(20);
206# if 0
207 if (IS_DISTINGUISHED_SM(sm))
208 VG_(message)(Vg_DebugMsg,
209 "accessed distinguished 2ndary (A)map! 0x%x\n", a);
210# endif
211 return BITARR_TEST(sm->abits, sm_off)
212 ? VGM_BIT_INVALID : VGM_BIT_VALID;
213}
214
215static __inline__ UChar get_vbyte ( Addr a )
216{
njnb8dca862005-03-14 02:42:44 +0000217 SecMap* sm = primary_map[PM_IDX(a)];
218 UInt sm_off = SM_OFF(a);
njn25e49d8e72002-09-23 09:36:25 +0000219 PROF_EVENT(21);
220# if 0
221 if (IS_DISTINGUISHED_SM(sm))
222 VG_(message)(Vg_DebugMsg,
223 "accessed distinguished 2ndary (V)map! 0x%x\n", a);
224# endif
225 return sm->vbyte[sm_off];
226}
227
sewardj56867352003-10-12 10:27:06 +0000228static /* __inline__ */ void set_abit ( Addr a, UChar abit )
njn25e49d8e72002-09-23 09:36:25 +0000229{
230 SecMap* sm;
231 UInt sm_off;
232 PROF_EVENT(22);
233 ENSURE_MAPPABLE(a, "set_abit");
njnb8dca862005-03-14 02:42:44 +0000234 sm = primary_map[PM_IDX(a)];
235 sm_off = SM_OFF(a);
njn25e49d8e72002-09-23 09:36:25 +0000236 if (abit)
237 BITARR_SET(sm->abits, sm_off);
238 else
239 BITARR_CLEAR(sm->abits, sm_off);
240}
241
242static __inline__ void set_vbyte ( Addr a, UChar vbyte )
243{
244 SecMap* sm;
245 UInt sm_off;
246 PROF_EVENT(23);
247 ENSURE_MAPPABLE(a, "set_vbyte");
njnb8dca862005-03-14 02:42:44 +0000248 sm = primary_map[PM_IDX(a)];
249 sm_off = SM_OFF(a);
njn25e49d8e72002-09-23 09:36:25 +0000250 sm->vbyte[sm_off] = vbyte;
251}
252
253
254/* Reading/writing of the bitmaps, for aligned word-sized accesses. */
255
256static __inline__ UChar get_abits4_ALIGNED ( Addr a )
257{
258 SecMap* sm;
259 UInt sm_off;
260 UChar abits8;
261 PROF_EVENT(24);
262# ifdef VG_DEBUG_MEMORY
njnedfa0f62004-11-30 18:08:05 +0000263 tl_assert(IS_4_ALIGNED(a));
njn25e49d8e72002-09-23 09:36:25 +0000264# endif
njnb8dca862005-03-14 02:42:44 +0000265 sm = primary_map[PM_IDX(a)];
266 sm_off = SM_OFF(a);
njn25e49d8e72002-09-23 09:36:25 +0000267 abits8 = sm->abits[sm_off >> 3];
268 abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
269 abits8 &= 0x0F;
270 return abits8;
271}
272
273static UInt __inline__ get_vbytes4_ALIGNED ( Addr a )
274{
njnb8dca862005-03-14 02:42:44 +0000275 SecMap* sm = primary_map[PM_IDX(a)];
276 UInt sm_off = SM_OFF(a);
njn25e49d8e72002-09-23 09:36:25 +0000277 PROF_EVENT(25);
278# ifdef VG_DEBUG_MEMORY
njnedfa0f62004-11-30 18:08:05 +0000279 tl_assert(IS_4_ALIGNED(a));
njn25e49d8e72002-09-23 09:36:25 +0000280# endif
281 return ((UInt*)(sm->vbyte))[sm_off >> 2];
282}
283
284
sewardjee070842003-07-05 17:53:55 +0000285static void __inline__ set_vbytes4_ALIGNED ( Addr a, UInt vbytes )
286{
287 SecMap* sm;
288 UInt sm_off;
289 ENSURE_MAPPABLE(a, "set_vbytes4_ALIGNED");
njnb8dca862005-03-14 02:42:44 +0000290 sm = primary_map[PM_IDX(a)];
291 sm_off = SM_OFF(a);
sewardjee070842003-07-05 17:53:55 +0000292 PROF_EVENT(23);
293# ifdef VG_DEBUG_MEMORY
njnedfa0f62004-11-30 18:08:05 +0000294 tl_assert(IS_4_ALIGNED(a));
sewardjee070842003-07-05 17:53:55 +0000295# endif
296 ((UInt*)(sm->vbyte))[sm_off >> 2] = vbytes;
297}
298
299
njn25e49d8e72002-09-23 09:36:25 +0000300/*------------------------------------------------------------*/
301/*--- Setting permissions over address ranges. ---*/
302/*------------------------------------------------------------*/
303
nethercote451eae92004-11-02 13:06:32 +0000304static void set_address_range_perms ( Addr a, SizeT len,
njn25e49d8e72002-09-23 09:36:25 +0000305 UInt example_a_bit,
306 UInt example_v_bit )
307{
308 UChar vbyte, abyte8;
309 UInt vword4, sm_off;
310 SecMap* sm;
311
312 PROF_EVENT(30);
313
314 if (len == 0)
315 return;
316
nethercotea66033c2004-03-08 15:37:58 +0000317 if (VG_(clo_verbosity) > 0) {
318 if (len > 100 * 1000 * 1000) {
319 VG_(message)(Vg_UserMsg,
320 "Warning: set address range perms: "
321 "large range %u, a %d, v %d",
322 len, example_a_bit, example_v_bit );
323 }
njn25e49d8e72002-09-23 09:36:25 +0000324 }
325
326 VGP_PUSHCC(VgpSetMem);
327
328 /* Requests to change permissions of huge address ranges may
329 indicate bugs in our machinery. 30,000,000 is arbitrary, but so
330 far all legitimate requests have fallen beneath that size. */
331 /* 4 Mar 02: this is just stupid; get rid of it. */
njnca82cc02004-11-22 17:18:48 +0000332 /* tl_assert(len < 30000000); */
njn25e49d8e72002-09-23 09:36:25 +0000333
334 /* Check the permissions make sense. */
njnca82cc02004-11-22 17:18:48 +0000335 tl_assert(example_a_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000336 || example_a_bit == VGM_BIT_INVALID);
njnca82cc02004-11-22 17:18:48 +0000337 tl_assert(example_v_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000338 || example_v_bit == VGM_BIT_INVALID);
339 if (example_a_bit == VGM_BIT_INVALID)
njnca82cc02004-11-22 17:18:48 +0000340 tl_assert(example_v_bit == VGM_BIT_INVALID);
njn25e49d8e72002-09-23 09:36:25 +0000341
342 /* The validity bits to write. */
343 vbyte = example_v_bit==VGM_BIT_VALID
344 ? VGM_BYTE_VALID : VGM_BYTE_INVALID;
345
346 /* In order that we can charge through the address space at 8
347 bytes/main-loop iteration, make up some perms. */
njnb8dca862005-03-14 02:42:44 +0000348 abyte8 = BIT_EXPAND(example_a_bit);
njn25e49d8e72002-09-23 09:36:25 +0000349 vword4 = (vbyte << 24) | (vbyte << 16) | (vbyte << 8) | vbyte;
350
351# ifdef VG_DEBUG_MEMORY
352 /* Do it ... */
353 while (True) {
354 PROF_EVENT(31);
355 if (len == 0) break;
356 set_abit ( a, example_a_bit );
357 set_vbyte ( a, vbyte );
358 a++;
359 len--;
360 }
361
362# else
363 /* Slowly do parts preceding 8-byte alignment. */
364 while (True) {
365 PROF_EVENT(31);
366 if (len == 0) break;
367 if ((a % 8) == 0) break;
368 set_abit ( a, example_a_bit );
369 set_vbyte ( a, vbyte );
370 a++;
371 len--;
372 }
373
374 if (len == 0) {
375 VGP_POPCC(VgpSetMem);
376 return;
377 }
njnca82cc02004-11-22 17:18:48 +0000378 tl_assert((a % 8) == 0 && len > 0);
njn25e49d8e72002-09-23 09:36:25 +0000379
njnb8dca862005-03-14 02:42:44 +0000380 /* Now align to the next primary_map entry */
381 for (; (a & SECONDARY_MASK) && len >= 8; a += 8, len -= 8) {
382
njn25e49d8e72002-09-23 09:36:25 +0000383 PROF_EVENT(32);
njnb8dca862005-03-14 02:42:44 +0000384 /* If the primary is already pointing to a distinguished map
385 with the same properties as we're trying to set, then leave
386 it that way. */
387 if (primary_map[PM_IDX(a)] == DSM(example_a_bit, example_v_bit))
388 continue;
389
njn25e49d8e72002-09-23 09:36:25 +0000390 ENSURE_MAPPABLE(a, "set_address_range_perms(fast)");
njnb8dca862005-03-14 02:42:44 +0000391 sm = primary_map[PM_IDX(a)];
392 sm_off = SM_OFF(a);
njn25e49d8e72002-09-23 09:36:25 +0000393 sm->abits[sm_off >> 3] = abyte8;
394 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = vword4;
395 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = vword4;
njn25e49d8e72002-09-23 09:36:25 +0000396 }
397
njnb8dca862005-03-14 02:42:44 +0000398 /* Now set whole secondary maps to the right distinguished value.
399
400 Note that if the primary already points to a non-distinguished
401 secondary, then don't replace the reference. That would just
402 leak memory.
403 */
404 for(; len >= SECONDARY_SIZE; a += SECONDARY_SIZE, len -= SECONDARY_SIZE) {
405 sm = primary_map[PM_IDX(a)];
406
407 if (IS_DISTINGUISHED_SM(sm))
408 primary_map[PM_IDX(a)] = DSM(example_a_bit, example_v_bit);
409 else {
410 VG_(memset)(sm->abits, abyte8, sizeof(sm->abits));
411 VG_(memset)(sm->vbyte, vbyte, sizeof(sm->vbyte));
412 }
njn25e49d8e72002-09-23 09:36:25 +0000413 }
njnb8dca862005-03-14 02:42:44 +0000414
415 /* Now finish off any remains */
416 for (; len >= 8; a += 8, len -= 8) {
417 PROF_EVENT(32);
418
419 /* If the primary is already pointing to a distinguished map
420 with the same properties as we're trying to set, then leave
421 it that way. */
422 if (primary_map[PM_IDX(a)] == DSM(example_a_bit, example_v_bit))
423 continue;
424
425 ENSURE_MAPPABLE(a, "set_address_range_perms(fast)");
426 sm = primary_map[PM_IDX(a)];
427 sm_off = SM_OFF(a);
428 sm->abits[sm_off >> 3] = abyte8;
429 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = vword4;
430 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = vword4;
431 }
njn25e49d8e72002-09-23 09:36:25 +0000432
433 /* Finish the upper fragment. */
434 while (True) {
435 PROF_EVENT(33);
436 if (len == 0) break;
437 set_abit ( a, example_a_bit );
438 set_vbyte ( a, vbyte );
439 a++;
440 len--;
441 }
442# endif
443
444 /* Check that zero page and highest page have not been written to
445 -- this could happen with buggy syscall wrappers. Today
446 (2001-04-26) had precisely such a problem with __NR_setitimer. */
njn26f02512004-11-22 18:33:15 +0000447 tl_assert(TL_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +0000448 VGP_POPCC(VgpSetMem);
449}
450
451/* Set permissions for address ranges ... */
452
nethercote8b76fe52004-11-08 19:20:09 +0000453static void mc_make_noaccess ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000454{
455 PROF_EVENT(35);
nethercote8b76fe52004-11-08 19:20:09 +0000456 DEBUG("mc_make_noaccess(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000457 set_address_range_perms ( a, len, VGM_BIT_INVALID, VGM_BIT_INVALID );
458}
459
nethercote8b76fe52004-11-08 19:20:09 +0000460static void mc_make_writable ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000461{
462 PROF_EVENT(36);
nethercote8b76fe52004-11-08 19:20:09 +0000463 DEBUG("mc_make_writable(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000464 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_INVALID );
465}
466
nethercote8b76fe52004-11-08 19:20:09 +0000467static void mc_make_readable ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000468{
469 PROF_EVENT(37);
nethercote8b76fe52004-11-08 19:20:09 +0000470 DEBUG("mc_make_readable(%p, %llu)\n", a, (ULong)len);
njn25e49d8e72002-09-23 09:36:25 +0000471 set_address_range_perms ( a, len, VGM_BIT_VALID, VGM_BIT_VALID );
472}
473
njn9b007f62003-04-07 14:40:25 +0000474static __inline__
475void make_aligned_word_writable(Addr a)
476{
477 SecMap* sm;
478 UInt sm_off;
479 UChar mask;
njn25e49d8e72002-09-23 09:36:25 +0000480
njn9b007f62003-04-07 14:40:25 +0000481 VGP_PUSHCC(VgpESPAdj);
482 ENSURE_MAPPABLE(a, "make_aligned_word_writable");
njnb8dca862005-03-14 02:42:44 +0000483 sm = primary_map[PM_IDX(a)];
484 sm_off = SM_OFF(a);
njn9b007f62003-04-07 14:40:25 +0000485 ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
486 mask = 0x0F;
487 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
488 /* mask now contains 1s where we wish to make address bits invalid (0s). */
489 sm->abits[sm_off >> 3] &= ~mask;
490 VGP_POPCC(VgpESPAdj);
491}
492
493static __inline__
494void make_aligned_word_noaccess(Addr a)
495{
496 SecMap* sm;
497 UInt sm_off;
498 UChar mask;
499
500 VGP_PUSHCC(VgpESPAdj);
501 ENSURE_MAPPABLE(a, "make_aligned_word_noaccess");
njnb8dca862005-03-14 02:42:44 +0000502 sm = primary_map[PM_IDX(a)];
503 sm_off = SM_OFF(a);
njn9b007f62003-04-07 14:40:25 +0000504 ((UInt*)(sm->vbyte))[sm_off >> 2] = VGM_WORD_INVALID;
505 mask = 0x0F;
506 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
507 /* mask now contains 1s where we wish to make address bits invalid (1s). */
508 sm->abits[sm_off >> 3] |= mask;
509 VGP_POPCC(VgpESPAdj);
510}
511
512/* Nb: by "aligned" here we mean 8-byte aligned */
513static __inline__
514void make_aligned_doubleword_writable(Addr a)
515{
516 SecMap* sm;
517 UInt sm_off;
518
519 VGP_PUSHCC(VgpESPAdj);
520 ENSURE_MAPPABLE(a, "make_aligned_doubleword_writable");
njnb8dca862005-03-14 02:42:44 +0000521 sm = primary_map[PM_IDX(a)];
522 sm_off = SM_OFF(a);
njn9b007f62003-04-07 14:40:25 +0000523 sm->abits[sm_off >> 3] = VGM_BYTE_VALID;
524 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = VGM_WORD_INVALID;
525 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = VGM_WORD_INVALID;
526 VGP_POPCC(VgpESPAdj);
527}
528
529static __inline__
530void make_aligned_doubleword_noaccess(Addr a)
531{
532 SecMap* sm;
533 UInt sm_off;
534
535 VGP_PUSHCC(VgpESPAdj);
536 ENSURE_MAPPABLE(a, "make_aligned_doubleword_noaccess");
njnb8dca862005-03-14 02:42:44 +0000537 sm = primary_map[PM_IDX(a)];
538 sm_off = SM_OFF(a);
njn9b007f62003-04-07 14:40:25 +0000539 sm->abits[sm_off >> 3] = VGM_BYTE_INVALID;
540 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 0] = VGM_WORD_INVALID;
541 ((UInt*)(sm->vbyte))[(sm_off >> 2) + 1] = VGM_WORD_INVALID;
542 VGP_POPCC(VgpESPAdj);
543}
544
545/* The %esp update handling functions */
546ESP_UPDATE_HANDLERS ( make_aligned_word_writable,
547 make_aligned_word_noaccess,
548 make_aligned_doubleword_writable,
549 make_aligned_doubleword_noaccess,
nethercote8b76fe52004-11-08 19:20:09 +0000550 mc_make_writable,
551 mc_make_noaccess
njn9b007f62003-04-07 14:40:25 +0000552 );
553
554/* Block-copy permissions (needed for implementing realloc()). */
nethercote451eae92004-11-02 13:06:32 +0000555static void mc_copy_address_range_state ( Addr src, Addr dst, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +0000556{
nethercote451eae92004-11-02 13:06:32 +0000557 SizeT i;
njn25e49d8e72002-09-23 09:36:25 +0000558
njn5c004e42002-11-18 11:04:50 +0000559 DEBUG("mc_copy_address_range_state\n");
njn25e49d8e72002-09-23 09:36:25 +0000560
561 PROF_EVENT(40);
562 for (i = 0; i < len; i++) {
563 UChar abit = get_abit ( src+i );
564 UChar vbyte = get_vbyte ( src+i );
565 PROF_EVENT(41);
566 set_abit ( dst+i, abit );
567 set_vbyte ( dst+i, vbyte );
568 }
569}
570
nethercote8b76fe52004-11-08 19:20:09 +0000571/*------------------------------------------------------------*/
572/*--- Checking memory ---*/
573/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000574
575/* Check permissions for address range. If inadequate permissions
576 exist, *bad_addr is set to the offending address, so the caller can
577 know what it is. */
578
sewardjecf8e102003-07-12 12:11:39 +0000579/* Returns True if [a .. a+len) is not addressible. Otherwise,
580 returns False, and if bad_addr is non-NULL, sets *bad_addr to
581 indicate the lowest failing address. Functions below are
582 similar. */
nethercote8b76fe52004-11-08 19:20:09 +0000583static Bool mc_check_noaccess ( Addr a, SizeT len, Addr* bad_addr )
sewardjecf8e102003-07-12 12:11:39 +0000584{
nethercote451eae92004-11-02 13:06:32 +0000585 SizeT i;
sewardjecf8e102003-07-12 12:11:39 +0000586 UChar abit;
587 PROF_EVENT(42);
588 for (i = 0; i < len; i++) {
589 PROF_EVENT(43);
590 abit = get_abit(a);
591 if (abit == VGM_BIT_VALID) {
592 if (bad_addr != NULL) *bad_addr = a;
593 return False;
594 }
595 a++;
596 }
597 return True;
598}
599
nethercote8b76fe52004-11-08 19:20:09 +0000600static Bool mc_check_writable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000601{
nethercote451eae92004-11-02 13:06:32 +0000602 SizeT i;
njn25e49d8e72002-09-23 09:36:25 +0000603 UChar abit;
604 PROF_EVENT(42);
605 for (i = 0; i < len; i++) {
606 PROF_EVENT(43);
607 abit = get_abit(a);
608 if (abit == VGM_BIT_INVALID) {
609 if (bad_addr != NULL) *bad_addr = a;
610 return False;
611 }
612 a++;
613 }
614 return True;
615}
616
nethercote8b76fe52004-11-08 19:20:09 +0000617typedef enum {
618 MC_Ok = 5, MC_AddrErr = 6, MC_ValueErr = 7
619} MC_ReadResult;
620
621static MC_ReadResult mc_check_readable ( Addr a, SizeT len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000622{
nethercote451eae92004-11-02 13:06:32 +0000623 SizeT i;
njn25e49d8e72002-09-23 09:36:25 +0000624 UChar abit;
625 UChar vbyte;
626
627 PROF_EVENT(44);
nethercote8b76fe52004-11-08 19:20:09 +0000628 DEBUG("mc_check_readable\n");
njn25e49d8e72002-09-23 09:36:25 +0000629 for (i = 0; i < len; i++) {
630 abit = get_abit(a);
631 vbyte = get_vbyte(a);
632 PROF_EVENT(45);
nethercote8b76fe52004-11-08 19:20:09 +0000633 // Report addressability errors in preference to definedness errors
634 // by checking the A bits first.
635 if (abit != VGM_BIT_VALID) {
njn25e49d8e72002-09-23 09:36:25 +0000636 if (bad_addr != NULL) *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +0000637 return MC_AddrErr;
638 }
639 if (vbyte != VGM_BYTE_VALID) {
640 if (bad_addr != NULL) *bad_addr = a;
641 return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +0000642 }
643 a++;
644 }
nethercote8b76fe52004-11-08 19:20:09 +0000645 return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +0000646}
647
648
649/* Check a zero-terminated ascii string. Tricky -- don't want to
650 examine the actual bytes, to find the end, until we're sure it is
651 safe to do so. */
652
njn9b007f62003-04-07 14:40:25 +0000653static Bool mc_check_readable_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000654{
655 UChar abit;
656 UChar vbyte;
657 PROF_EVENT(46);
njn5c004e42002-11-18 11:04:50 +0000658 DEBUG("mc_check_readable_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +0000659 while (True) {
660 PROF_EVENT(47);
661 abit = get_abit(a);
662 vbyte = get_vbyte(a);
nethercote8b76fe52004-11-08 19:20:09 +0000663 // As in mc_check_readable(), check A bits first
664 if (abit != VGM_BIT_VALID) {
njn25e49d8e72002-09-23 09:36:25 +0000665 if (bad_addr != NULL) *bad_addr = a;
nethercote8b76fe52004-11-08 19:20:09 +0000666 return MC_AddrErr;
667 }
668 if (vbyte != VGM_BYTE_VALID) {
669 if (bad_addr != NULL) *bad_addr = a;
670 return MC_ValueErr;
njn25e49d8e72002-09-23 09:36:25 +0000671 }
672 /* Ok, a is safe to read. */
nethercote8b76fe52004-11-08 19:20:09 +0000673 if (* ((UChar*)a) == 0) return MC_Ok;
njn25e49d8e72002-09-23 09:36:25 +0000674 a++;
675 }
676}
677
678
679/*------------------------------------------------------------*/
680/*--- Memory event handlers ---*/
681/*------------------------------------------------------------*/
682
njn25e49d8e72002-09-23 09:36:25 +0000683static
njn72718642003-07-24 08:45:32 +0000684void mc_check_is_writable ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +0000685 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +0000686{
687 Bool ok;
688 Addr bad_addr;
689
690 VGP_PUSHCC(VgpCheckMem);
691
692 /* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
693 base,base+size-1); */
nethercote8b76fe52004-11-08 19:20:09 +0000694 ok = mc_check_writable ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000695 if (!ok) {
696 switch (part) {
697 case Vg_CoreSysCall:
nethercote8b76fe52004-11-08 19:20:09 +0000698 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False,
699 /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +0000700 break;
701
702 case Vg_CorePThread:
703 case Vg_CoreSignal:
nethercote8b76fe52004-11-08 19:20:09 +0000704 MAC_(record_core_mem_error)( tid, /*isUnaddr*/True, s );
njn25e49d8e72002-09-23 09:36:25 +0000705 break;
706
707 default:
njn67993252004-11-22 18:02:32 +0000708 VG_(tool_panic)("mc_check_is_writable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000709 }
710 }
711
712 VGP_POPCC(VgpCheckMem);
713}
714
715static
njn72718642003-07-24 08:45:32 +0000716void mc_check_is_readable ( CorePart part, ThreadId tid, Char* s,
nethercote451eae92004-11-02 13:06:32 +0000717 Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +0000718{
njn25e49d8e72002-09-23 09:36:25 +0000719 Addr bad_addr;
nethercote8b76fe52004-11-08 19:20:09 +0000720 MC_ReadResult res;
njn25e49d8e72002-09-23 09:36:25 +0000721
722 VGP_PUSHCC(VgpCheckMem);
723
724 /* VG_(message)(Vg_DebugMsg,"check is readable: %x .. %x",
725 base,base+size-1); */
nethercote8b76fe52004-11-08 19:20:09 +0000726 res = mc_check_readable ( base, size, &bad_addr );
727 if (MC_Ok != res) {
728 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
729
njn25e49d8e72002-09-23 09:36:25 +0000730 switch (part) {
731 case Vg_CoreSysCall:
nethercote8b76fe52004-11-08 19:20:09 +0000732 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False,
733 isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +0000734 break;
735
736 case Vg_CorePThread:
nethercote8b76fe52004-11-08 19:20:09 +0000737 MAC_(record_core_mem_error)( tid, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +0000738 break;
739
740 /* If we're being asked to jump to a silly address, record an error
741 message before potentially crashing the entire system. */
742 case Vg_CoreTranslate:
njn72718642003-07-24 08:45:32 +0000743 MAC_(record_jump_error)( tid, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000744 break;
745
746 default:
njn67993252004-11-22 18:02:32 +0000747 VG_(tool_panic)("mc_check_is_readable: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000748 }
749 }
750 VGP_POPCC(VgpCheckMem);
751}
752
753static
njn72718642003-07-24 08:45:32 +0000754void mc_check_is_readable_asciiz ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +0000755 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +0000756{
nethercote8b76fe52004-11-08 19:20:09 +0000757 MC_ReadResult res;
njnb8dca862005-03-14 02:42:44 +0000758 Addr bad_addr = 0; // initialise to shut gcc up
njn25e49d8e72002-09-23 09:36:25 +0000759 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
760
761 VGP_PUSHCC(VgpCheckMem);
762
njnca82cc02004-11-22 17:18:48 +0000763 tl_assert(part == Vg_CoreSysCall);
nethercote8b76fe52004-11-08 19:20:09 +0000764 res = mc_check_readable_asciiz ( (Addr)str, &bad_addr );
765 if (MC_Ok != res) {
766 Bool isUnaddr = ( MC_AddrErr == res ? True : False );
767 MAC_(record_param_error) ( tid, bad_addr, /*isReg*/False, isUnaddr, s );
njn25e49d8e72002-09-23 09:36:25 +0000768 }
769
770 VGP_POPCC(VgpCheckMem);
771}
772
773
774static
nethercote451eae92004-11-02 13:06:32 +0000775void mc_new_mem_startup( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +0000776{
njn1f3a9092002-10-04 09:22:30 +0000777 /* Ignore the permissions, just make it readable. Seems to work... */
nethercote451eae92004-11-02 13:06:32 +0000778 DEBUG("mc_new_mem_startup(%p, %llu, rr=%u, ww=%u, xx=%u)\n",
779 a,(ULong)len,rr,ww,xx);
nethercote8b76fe52004-11-08 19:20:09 +0000780 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000781}
782
783static
nethercote451eae92004-11-02 13:06:32 +0000784void mc_new_mem_heap ( Addr a, SizeT len, Bool is_inited )
njn25e49d8e72002-09-23 09:36:25 +0000785{
786 if (is_inited) {
nethercote8b76fe52004-11-08 19:20:09 +0000787 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000788 } else {
nethercote8b76fe52004-11-08 19:20:09 +0000789 mc_make_writable(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000790 }
791}
792
793static
njnb8dca862005-03-14 02:42:44 +0000794void mc_new_mem_mmap ( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +0000795{
njnb8dca862005-03-14 02:42:44 +0000796 mc_make_readable(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000797}
798
njncf45fd42004-11-24 16:30:22 +0000799static
800void mc_post_mem_write(CorePart part, ThreadId tid, Addr a, SizeT len)
801{
802 mc_make_readable(a, len);
803}
njn25e49d8e72002-09-23 09:36:25 +0000804
805/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +0000806/*--- Register event handlers ---*/
807/*------------------------------------------------------------*/
808
njncf45fd42004-11-24 16:30:22 +0000809// When a reg is written, mark the corresponding shadow reg bytes as valid.
810static void mc_post_reg_write(CorePart part, ThreadId tid, OffT offset,
811 SizeT size)
njnd3040452003-05-19 15:04:06 +0000812{
njncf45fd42004-11-24 16:30:22 +0000813 UChar area[size];
814 VG_(memset)(area, VGM_BYTE_VALID, size);
815 VG_(set_shadow_regs_area)( tid, offset, size, area );
njnd3040452003-05-19 15:04:06 +0000816}
817
njncf45fd42004-11-24 16:30:22 +0000818static void mc_post_reg_write_clientcall(ThreadId tid, OffT offset, SizeT size,
819 Addr f)
njnd3040452003-05-19 15:04:06 +0000820{
njncf45fd42004-11-24 16:30:22 +0000821 mc_post_reg_write(/*dummy*/0, tid, offset, size);
njnd3040452003-05-19 15:04:06 +0000822}
823
njncf45fd42004-11-24 16:30:22 +0000824static void mc_pre_reg_read(CorePart part, ThreadId tid, Char* s, OffT offset,
nethercote8b76fe52004-11-08 19:20:09 +0000825 SizeT size)
826{
827 UWord mask;
njncf45fd42004-11-24 16:30:22 +0000828 UWord sh_reg_contents;
nethercote8b76fe52004-11-08 19:20:09 +0000829
830 // XXX: the only one at the moment
njnca82cc02004-11-22 17:18:48 +0000831 tl_assert(Vg_CoreSysCall == part);
njnb8dca862005-03-14 02:42:44 +0000832
nethercote8b76fe52004-11-08 19:20:09 +0000833 switch (size) {
834 case 4: mask = 0xffffffff; break;
835 case 2: mask = 0xffff; break;
836 case 1: mask = 0xff; break;
njn67993252004-11-22 18:02:32 +0000837 default: VG_(tool_panic)("Unhandled size in mc_pre_reg_read");
nethercote8b76fe52004-11-08 19:20:09 +0000838 }
839
njncf45fd42004-11-24 16:30:22 +0000840 VG_(get_shadow_regs_area)( tid, offset, size, (UChar*)&sh_reg_contents );
841 if ( VGM_WORD_VALID != (mask & sh_reg_contents) )
nethercote8b76fe52004-11-08 19:20:09 +0000842 MAC_(record_param_error) ( tid, 0, /*isReg*/True, /*isUnaddr*/False, s );
843}
njnd3040452003-05-19 15:04:06 +0000844
845/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000846/*--- Functions called directly from generated code. ---*/
847/*------------------------------------------------------------*/
848
849static __inline__ UInt rotateRight16 ( UInt x )
850{
851 /* Amazingly, gcc turns this into a single rotate insn. */
852 return (x >> 16) | (x << 16);
853}
854
855
856static __inline__ UInt shiftRight16 ( UInt x )
857{
858 return x >> 16;
859}
860
861
sewardj95448072004-11-22 20:19:51 +0000862/* Read/write 1/2/4/8 sized V bytes, and emit an address error if
njn25e49d8e72002-09-23 09:36:25 +0000863 needed. */
864
sewardj95448072004-11-22 20:19:51 +0000865/* MC_(helperc_{LD,ST}V{1,2,4,8}) handle the common case fast.
njn25e49d8e72002-09-23 09:36:25 +0000866 Under all other circumstances, it defers to the relevant _SLOWLY
867 function, which can handle all situations.
868*/
sewardj95448072004-11-22 20:19:51 +0000869
870/* ------------------------ Size = 8 ------------------------ */
871
872REGPARM(1)
873ULong MC_(helperc_LOADV8) ( Addr a )
874{
875# ifdef VG_DEBUG_MEMORY
876 return mc_rd_V8_SLOWLY(a);
877# else
sewardjc5047482005-03-16 00:41:34 +0000878 if (IS_8_ALIGNED(a)) {
sewardj95448072004-11-22 20:19:51 +0000879 UInt sec_no = shiftRight16(a) & 0xFFFF;
880 SecMap* sm = primary_map[sec_no];
njnb8dca862005-03-14 02:42:44 +0000881 UInt a_off = (SM_OFF(a)) >> 3;
sewardj95448072004-11-22 20:19:51 +0000882 UChar abits = sm->abits[a_off];
883 if (abits == VGM_BYTE_VALID) {
884 /* a is 8-aligned, mapped, and addressible. */
njnb8dca862005-03-14 02:42:44 +0000885 UInt v_off = SM_OFF(a);
sewardj95448072004-11-22 20:19:51 +0000886 /* LITTLE-ENDIAN */
887 UInt vLo = ((UInt*)(sm->vbyte))[ (v_off >> 2) ];
888 UInt vHi = ((UInt*)(sm->vbyte))[ (v_off >> 2) + 1 ];
889 return ( ((ULong)vHi) << 32 ) | ((ULong)vLo);
890 } else {
891 return mc_rd_V8_SLOWLY(a);
892 }
893 }
894 else
njnedfa0f62004-11-30 18:08:05 +0000895 if (IS_4_ALIGNED(a)) {
sewardj95448072004-11-22 20:19:51 +0000896 /* LITTLE-ENDIAN */
897 UInt vLo = MC_(helperc_LOADV4)(a+0);
898 UInt vHi = MC_(helperc_LOADV4)(a+4);
899 return ( ((ULong)vHi) << 32 ) | ((ULong)vLo);
900 }
901 else
902 return mc_rd_V8_SLOWLY(a);
903# endif
904}
905
906REGPARM(1)
907void MC_(helperc_STOREV8) ( Addr a, ULong vbytes )
908{
909# ifdef VG_DEBUG_MEMORY
910 mc_wr_V8_SLOWLY(a, vbytes);
911# else
sewardjc5047482005-03-16 00:41:34 +0000912 if (IS_8_ALIGNED(a)) {
sewardj95448072004-11-22 20:19:51 +0000913 UInt sec_no = shiftRight16(a) & 0xFFFF;
914 SecMap* sm = primary_map[sec_no];
njnb8dca862005-03-14 02:42:44 +0000915 UInt a_off = (SM_OFF(a)) >> 3;
sewardjc5047482005-03-16 00:41:34 +0000916 if (!IS_DISTINGUISHED_SM(sm) && sm->abits[a_off] == VGM_BYTE_VALID) {
sewardj95448072004-11-22 20:19:51 +0000917 /* a is 8-aligned, mapped, and addressible. */
njnb8dca862005-03-14 02:42:44 +0000918 UInt v_off = SM_OFF(a);
sewardj95448072004-11-22 20:19:51 +0000919 UInt vHi = (UInt)(vbytes >> 32);
920 UInt vLo = (UInt)vbytes;
921 /* LITTLE-ENDIAN */
922 ((UInt*)(sm->vbyte))[ (v_off >> 2) ] = vLo;
923 ((UInt*)(sm->vbyte))[ (v_off >> 2) + 1 ] = vHi;
924 } else {
925 mc_wr_V8_SLOWLY(a, vbytes);
926 }
927 return;
928 }
929 else
njnedfa0f62004-11-30 18:08:05 +0000930 if (IS_4_ALIGNED(a)) {
sewardj95448072004-11-22 20:19:51 +0000931 UInt vHi = (UInt)(vbytes >> 32);
932 UInt vLo = (UInt)vbytes;
933 /* LITTLE-ENDIAN */
934 MC_(helperc_STOREV4)(a+0, vLo);
935 MC_(helperc_STOREV4)(a+4, vHi);
936 return;
937 }
938 else
939 mc_wr_V8_SLOWLY(a, vbytes);
940# endif
941}
942
943/* ------------------------ Size = 4 ------------------------ */
944
nethercoteeec46302004-08-23 15:06:23 +0000945REGPARM(1)
njn5c004e42002-11-18 11:04:50 +0000946UInt MC_(helperc_LOADV4) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000947{
948# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000949 return mc_rd_V4_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000950# else
951 UInt sec_no = rotateRight16(a) & 0x3FFFF;
952 SecMap* sm = primary_map[sec_no];
njnb8dca862005-03-14 02:42:44 +0000953 UInt a_off = (SM_OFF(a)) >> 3;
njn25e49d8e72002-09-23 09:36:25 +0000954 UChar abits = sm->abits[a_off];
955 abits >>= (a & 4);
956 abits &= 15;
957 PROF_EVENT(60);
958 if (abits == VGM_NIBBLE_VALID) {
959 /* Handle common case quickly: a is suitably aligned, is mapped,
960 and is addressible. */
njnb8dca862005-03-14 02:42:44 +0000961 UInt v_off = SM_OFF(a);
njn25e49d8e72002-09-23 09:36:25 +0000962 return ((UInt*)(sm->vbyte))[ v_off >> 2 ];
963 } else {
964 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000965 return mc_rd_V4_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000966 }
967# endif
968}
969
nethercoteeec46302004-08-23 15:06:23 +0000970REGPARM(2)
njn5c004e42002-11-18 11:04:50 +0000971void MC_(helperc_STOREV4) ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +0000972{
973# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000974 mc_wr_V4_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000975# else
976 UInt sec_no = rotateRight16(a) & 0x3FFFF;
977 SecMap* sm = primary_map[sec_no];
njnb8dca862005-03-14 02:42:44 +0000978 UInt a_off = (SM_OFF(a)) >> 3;
njn25e49d8e72002-09-23 09:36:25 +0000979 UChar abits = sm->abits[a_off];
980 abits >>= (a & 4);
981 abits &= 15;
982 PROF_EVENT(61);
njnb8dca862005-03-14 02:42:44 +0000983 if (!IS_DISTINGUISHED_SM(sm) && abits == VGM_NIBBLE_VALID) {
njn25e49d8e72002-09-23 09:36:25 +0000984 /* Handle common case quickly: a is suitably aligned, is mapped,
985 and is addressible. */
njnb8dca862005-03-14 02:42:44 +0000986 UInt v_off = SM_OFF(a);
njn25e49d8e72002-09-23 09:36:25 +0000987 ((UInt*)(sm->vbyte))[ v_off >> 2 ] = vbytes;
988 } else {
989 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000990 mc_wr_V4_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +0000991 }
992# endif
993}
994
sewardj95448072004-11-22 20:19:51 +0000995/* ------------------------ Size = 2 ------------------------ */
996
nethercoteeec46302004-08-23 15:06:23 +0000997REGPARM(1)
njn5c004e42002-11-18 11:04:50 +0000998UInt MC_(helperc_LOADV2) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000999{
1000# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +00001001 return mc_rd_V2_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +00001002# else
1003 UInt sec_no = rotateRight16(a) & 0x1FFFF;
1004 SecMap* sm = primary_map[sec_no];
njnb8dca862005-03-14 02:42:44 +00001005 UInt a_off = (SM_OFF(a)) >> 3;
njn25e49d8e72002-09-23 09:36:25 +00001006 PROF_EVENT(62);
1007 if (sm->abits[a_off] == VGM_BYTE_VALID) {
1008 /* Handle common case quickly. */
njnb8dca862005-03-14 02:42:44 +00001009 UInt v_off = SM_OFF(a);
njn25e49d8e72002-09-23 09:36:25 +00001010 return 0xFFFF0000
1011 |
1012 (UInt)( ((UShort*)(sm->vbyte))[ v_off >> 1 ] );
1013 } else {
1014 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +00001015 return mc_rd_V2_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +00001016 }
1017# endif
1018}
1019
nethercoteeec46302004-08-23 15:06:23 +00001020REGPARM(2)
njn5c004e42002-11-18 11:04:50 +00001021void MC_(helperc_STOREV2) ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001022{
1023# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +00001024 mc_wr_V2_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +00001025# else
1026 UInt sec_no = rotateRight16(a) & 0x1FFFF;
1027 SecMap* sm = primary_map[sec_no];
njnb8dca862005-03-14 02:42:44 +00001028 UInt a_off = (SM_OFF(a)) >> 3;
njn25e49d8e72002-09-23 09:36:25 +00001029 PROF_EVENT(63);
njnb8dca862005-03-14 02:42:44 +00001030 if (!IS_DISTINGUISHED_SM(sm) && sm->abits[a_off] == VGM_BYTE_VALID) {
njn25e49d8e72002-09-23 09:36:25 +00001031 /* Handle common case quickly. */
njnb8dca862005-03-14 02:42:44 +00001032 UInt v_off = SM_OFF(a);
njn25e49d8e72002-09-23 09:36:25 +00001033 ((UShort*)(sm->vbyte))[ v_off >> 1 ] = vbytes & 0x0000FFFF;
1034 } else {
1035 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +00001036 mc_wr_V2_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +00001037 }
1038# endif
1039}
1040
sewardj95448072004-11-22 20:19:51 +00001041/* ------------------------ Size = 1 ------------------------ */
1042
nethercoteeec46302004-08-23 15:06:23 +00001043REGPARM(1)
njn5c004e42002-11-18 11:04:50 +00001044UInt MC_(helperc_LOADV1) ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +00001045{
1046# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +00001047 return mc_rd_V1_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +00001048# else
1049 UInt sec_no = shiftRight16(a);
1050 SecMap* sm = primary_map[sec_no];
njnb8dca862005-03-14 02:42:44 +00001051 UInt a_off = (SM_OFF(a)) >> 3;
njn25e49d8e72002-09-23 09:36:25 +00001052 PROF_EVENT(64);
1053 if (sm->abits[a_off] == VGM_BYTE_VALID) {
1054 /* Handle common case quickly. */
njnb8dca862005-03-14 02:42:44 +00001055 UInt v_off = SM_OFF(a);
njn25e49d8e72002-09-23 09:36:25 +00001056 return 0xFFFFFF00
1057 |
1058 (UInt)( ((UChar*)(sm->vbyte))[ v_off ] );
1059 } else {
1060 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +00001061 return mc_rd_V1_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +00001062 }
1063# endif
1064}
1065
nethercoteeec46302004-08-23 15:06:23 +00001066REGPARM(2)
njn5c004e42002-11-18 11:04:50 +00001067void MC_(helperc_STOREV1) ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001068{
1069# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +00001070 mc_wr_V1_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +00001071# else
1072 UInt sec_no = shiftRight16(a);
1073 SecMap* sm = primary_map[sec_no];
njnb8dca862005-03-14 02:42:44 +00001074 UInt a_off = (SM_OFF(a)) >> 3;
njn25e49d8e72002-09-23 09:36:25 +00001075 PROF_EVENT(65);
njnb8dca862005-03-14 02:42:44 +00001076 if (!IS_DISTINGUISHED_SM(sm) && sm->abits[a_off] == VGM_BYTE_VALID) {
njn25e49d8e72002-09-23 09:36:25 +00001077 /* Handle common case quickly. */
njnb8dca862005-03-14 02:42:44 +00001078 UInt v_off = SM_OFF(a);
njn25e49d8e72002-09-23 09:36:25 +00001079 ((UChar*)(sm->vbyte))[ v_off ] = vbytes & 0x000000FF;
1080 } else {
1081 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +00001082 mc_wr_V1_SLOWLY(a, vbytes);
njn25e49d8e72002-09-23 09:36:25 +00001083 }
1084# endif
1085}
1086
1087
1088/*------------------------------------------------------------*/
1089/*--- Fallback functions to handle cases that the above ---*/
sewardj95448072004-11-22 20:19:51 +00001090/*--- VG_(helperc_{LD,ST}V{1,2,4,8}) can't manage. ---*/
njn25e49d8e72002-09-23 09:36:25 +00001091/*------------------------------------------------------------*/
1092
sewardj95448072004-11-22 20:19:51 +00001093/* ------------------------ Size = 8 ------------------------ */
1094
1095static ULong mc_rd_V8_SLOWLY ( Addr a )
1096{
1097 Bool a0ok, a1ok, a2ok, a3ok, a4ok, a5ok, a6ok, a7ok;
1098 UInt vb0, vb1, vb2, vb3, vb4, vb5, vb6, vb7;
1099
1100 PROF_EVENT(70);
1101
1102 /* First establish independently the addressibility of the 4 bytes
1103 involved. */
1104 a0ok = get_abit(a+0) == VGM_BIT_VALID;
1105 a1ok = get_abit(a+1) == VGM_BIT_VALID;
1106 a2ok = get_abit(a+2) == VGM_BIT_VALID;
1107 a3ok = get_abit(a+3) == VGM_BIT_VALID;
1108 a4ok = get_abit(a+4) == VGM_BIT_VALID;
1109 a5ok = get_abit(a+5) == VGM_BIT_VALID;
1110 a6ok = get_abit(a+6) == VGM_BIT_VALID;
1111 a7ok = get_abit(a+7) == VGM_BIT_VALID;
1112
1113 /* Also get the validity bytes for the address. */
1114 vb0 = (UInt)get_vbyte(a+0);
1115 vb1 = (UInt)get_vbyte(a+1);
1116 vb2 = (UInt)get_vbyte(a+2);
1117 vb3 = (UInt)get_vbyte(a+3);
1118 vb4 = (UInt)get_vbyte(a+4);
1119 vb5 = (UInt)get_vbyte(a+5);
1120 vb6 = (UInt)get_vbyte(a+6);
1121 vb7 = (UInt)get_vbyte(a+7);
1122
1123 /* Now distinguish 3 cases */
1124
1125 /* Case 1: the address is completely valid, so:
1126 - no addressing error
1127 - return V bytes as read from memory
1128 */
1129 if (a0ok && a1ok && a2ok && a3ok && a4ok && a5ok && a6ok && a7ok) {
1130 ULong vw = VGM_WORD64_INVALID;
1131 vw <<= 8; vw |= vb7;
1132 vw <<= 8; vw |= vb6;
1133 vw <<= 8; vw |= vb5;
1134 vw <<= 8; vw |= vb4;
1135 vw <<= 8; vw |= vb3;
1136 vw <<= 8; vw |= vb2;
1137 vw <<= 8; vw |= vb1;
1138 vw <<= 8; vw |= vb0;
1139 return vw;
1140 }
1141
1142 /* Case 2: the address is completely invalid.
1143 - emit addressing error
1144 - return V word indicating validity.
1145 This sounds strange, but if we make loads from invalid addresses
1146 give invalid data, we also risk producing a number of confusing
1147 undefined-value errors later, which confuses the fact that the
1148 error arose in the first place from an invalid address.
1149 */
1150 /* VG_(printf)("%p (%d %d %d %d)\n", a, a0ok, a1ok, a2ok, a3ok); */
1151 if (!MAC_(clo_partial_loads_ok)
1152 || ((a & 7) != 0)
1153 || (!a0ok && !a1ok && !a2ok && !a3ok && !a4ok && !a5ok && !a6ok && !a7ok)) {
njnb8dca862005-03-14 02:42:44 +00001154 MAC_(record_address_error)( VG_(get_running_tid)(), a, 8, False );
sewardj95448072004-11-22 20:19:51 +00001155 return VGM_WORD64_VALID;
1156 }
1157
1158 /* Case 3: the address is partially valid.
1159 - no addressing error
1160 - returned V word is invalid where the address is invalid,
1161 and contains V bytes from memory otherwise.
1162 Case 3 is only allowed if MC_(clo_partial_loads_ok) is True
1163 (which is the default), and the address is 4-aligned.
1164 If not, Case 2 will have applied.
1165 */
1166 tl_assert(MAC_(clo_partial_loads_ok));
1167 {
1168 ULong vw = VGM_WORD64_INVALID;
1169 vw <<= 8; vw |= (a7ok ? vb7 : VGM_BYTE_INVALID);
1170 vw <<= 8; vw |= (a6ok ? vb6 : VGM_BYTE_INVALID);
1171 vw <<= 8; vw |= (a5ok ? vb5 : VGM_BYTE_INVALID);
1172 vw <<= 8; vw |= (a4ok ? vb4 : VGM_BYTE_INVALID);
1173 vw <<= 8; vw |= (a3ok ? vb3 : VGM_BYTE_INVALID);
1174 vw <<= 8; vw |= (a2ok ? vb2 : VGM_BYTE_INVALID);
1175 vw <<= 8; vw |= (a1ok ? vb1 : VGM_BYTE_INVALID);
1176 vw <<= 8; vw |= (a0ok ? vb0 : VGM_BYTE_INVALID);
1177 return vw;
1178 }
1179}
1180
1181static void mc_wr_V8_SLOWLY ( Addr a, ULong vbytes )
1182{
1183 /* Check the address for validity. */
1184 Bool aerr = False;
1185 PROF_EVENT(71);
1186
1187 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1188 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1189 if (get_abit(a+2) != VGM_BIT_VALID) aerr = True;
1190 if (get_abit(a+3) != VGM_BIT_VALID) aerr = True;
1191 if (get_abit(a+4) != VGM_BIT_VALID) aerr = True;
1192 if (get_abit(a+5) != VGM_BIT_VALID) aerr = True;
1193 if (get_abit(a+6) != VGM_BIT_VALID) aerr = True;
1194 if (get_abit(a+7) != VGM_BIT_VALID) aerr = True;
1195
1196 /* Store the V bytes, remembering to do it little-endian-ly. */
1197 set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
1198 set_vbyte( a+1, vbytes & 0x000000FF ); vbytes >>= 8;
1199 set_vbyte( a+2, vbytes & 0x000000FF ); vbytes >>= 8;
1200 set_vbyte( a+3, vbytes & 0x000000FF ); vbytes >>= 8;
1201 set_vbyte( a+4, vbytes & 0x000000FF ); vbytes >>= 8;
1202 set_vbyte( a+5, vbytes & 0x000000FF ); vbytes >>= 8;
1203 set_vbyte( a+6, vbytes & 0x000000FF ); vbytes >>= 8;
1204 set_vbyte( a+7, vbytes & 0x000000FF );
1205
1206 /* If an address error has happened, report it. */
1207 if (aerr)
njnb8dca862005-03-14 02:42:44 +00001208 MAC_(record_address_error)( VG_(get_running_tid)(), a, 8, True );
sewardj95448072004-11-22 20:19:51 +00001209}
1210
1211/* ------------------------ Size = 4 ------------------------ */
1212
njn5c004e42002-11-18 11:04:50 +00001213static UInt mc_rd_V4_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +00001214{
1215 Bool a0ok, a1ok, a2ok, a3ok;
1216 UInt vb0, vb1, vb2, vb3;
1217
1218 PROF_EVENT(70);
1219
1220 /* First establish independently the addressibility of the 4 bytes
1221 involved. */
1222 a0ok = get_abit(a+0) == VGM_BIT_VALID;
1223 a1ok = get_abit(a+1) == VGM_BIT_VALID;
1224 a2ok = get_abit(a+2) == VGM_BIT_VALID;
1225 a3ok = get_abit(a+3) == VGM_BIT_VALID;
1226
1227 /* Also get the validity bytes for the address. */
1228 vb0 = (UInt)get_vbyte(a+0);
1229 vb1 = (UInt)get_vbyte(a+1);
1230 vb2 = (UInt)get_vbyte(a+2);
1231 vb3 = (UInt)get_vbyte(a+3);
1232
1233 /* Now distinguish 3 cases */
1234
1235 /* Case 1: the address is completely valid, so:
1236 - no addressing error
1237 - return V bytes as read from memory
1238 */
1239 if (a0ok && a1ok && a2ok && a3ok) {
1240 UInt vw = VGM_WORD_INVALID;
1241 vw <<= 8; vw |= vb3;
1242 vw <<= 8; vw |= vb2;
1243 vw <<= 8; vw |= vb1;
1244 vw <<= 8; vw |= vb0;
1245 return vw;
1246 }
1247
1248 /* Case 2: the address is completely invalid.
1249 - emit addressing error
1250 - return V word indicating validity.
1251 This sounds strange, but if we make loads from invalid addresses
1252 give invalid data, we also risk producing a number of confusing
1253 undefined-value errors later, which confuses the fact that the
1254 error arose in the first place from an invalid address.
1255 */
1256 /* VG_(printf)("%p (%d %d %d %d)\n", a, a0ok, a1ok, a2ok, a3ok); */
njn43c799e2003-04-08 00:08:52 +00001257 if (!MAC_(clo_partial_loads_ok)
njn25e49d8e72002-09-23 09:36:25 +00001258 || ((a & 3) != 0)
1259 || (!a0ok && !a1ok && !a2ok && !a3ok)) {
njnb8dca862005-03-14 02:42:44 +00001260 MAC_(record_address_error)( VG_(get_running_tid)(), a, 4, False );
njn25e49d8e72002-09-23 09:36:25 +00001261 return (VGM_BYTE_VALID << 24) | (VGM_BYTE_VALID << 16)
1262 | (VGM_BYTE_VALID << 8) | VGM_BYTE_VALID;
1263 }
1264
1265 /* Case 3: the address is partially valid.
1266 - no addressing error
1267 - returned V word is invalid where the address is invalid,
1268 and contains V bytes from memory otherwise.
njn5c004e42002-11-18 11:04:50 +00001269 Case 3 is only allowed if MC_(clo_partial_loads_ok) is True
njn25e49d8e72002-09-23 09:36:25 +00001270 (which is the default), and the address is 4-aligned.
1271 If not, Case 2 will have applied.
1272 */
njnca82cc02004-11-22 17:18:48 +00001273 tl_assert(MAC_(clo_partial_loads_ok));
njn25e49d8e72002-09-23 09:36:25 +00001274 {
1275 UInt vw = VGM_WORD_INVALID;
1276 vw <<= 8; vw |= (a3ok ? vb3 : VGM_BYTE_INVALID);
1277 vw <<= 8; vw |= (a2ok ? vb2 : VGM_BYTE_INVALID);
1278 vw <<= 8; vw |= (a1ok ? vb1 : VGM_BYTE_INVALID);
1279 vw <<= 8; vw |= (a0ok ? vb0 : VGM_BYTE_INVALID);
1280 return vw;
1281 }
1282}
1283
njn5c004e42002-11-18 11:04:50 +00001284static void mc_wr_V4_SLOWLY ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001285{
1286 /* Check the address for validity. */
1287 Bool aerr = False;
1288 PROF_EVENT(71);
1289
1290 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1291 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1292 if (get_abit(a+2) != VGM_BIT_VALID) aerr = True;
1293 if (get_abit(a+3) != VGM_BIT_VALID) aerr = True;
1294
1295 /* Store the V bytes, remembering to do it little-endian-ly. */
1296 set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
1297 set_vbyte( a+1, vbytes & 0x000000FF ); vbytes >>= 8;
1298 set_vbyte( a+2, vbytes & 0x000000FF ); vbytes >>= 8;
1299 set_vbyte( a+3, vbytes & 0x000000FF );
1300
1301 /* If an address error has happened, report it. */
1302 if (aerr)
njnb8dca862005-03-14 02:42:44 +00001303 MAC_(record_address_error)( VG_(get_running_tid)(), a, 4, True );
njn25e49d8e72002-09-23 09:36:25 +00001304}
1305
sewardj95448072004-11-22 20:19:51 +00001306/* ------------------------ Size = 2 ------------------------ */
1307
njn5c004e42002-11-18 11:04:50 +00001308static UInt mc_rd_V2_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +00001309{
1310 /* Check the address for validity. */
1311 UInt vw = VGM_WORD_INVALID;
1312 Bool aerr = False;
1313 PROF_EVENT(72);
1314
1315 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1316 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1317
1318 /* Fetch the V bytes, remembering to do it little-endian-ly. */
1319 vw <<= 8; vw |= (UInt)get_vbyte(a+1);
1320 vw <<= 8; vw |= (UInt)get_vbyte(a+0);
1321
1322 /* If an address error has happened, report it. */
1323 if (aerr) {
njnb8dca862005-03-14 02:42:44 +00001324 MAC_(record_address_error)( VG_(get_running_tid)(), a, 2, False );
njn25e49d8e72002-09-23 09:36:25 +00001325 vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
1326 | (VGM_BYTE_VALID << 8) | (VGM_BYTE_VALID);
1327 }
1328 return vw;
1329}
1330
njn5c004e42002-11-18 11:04:50 +00001331static void mc_wr_V2_SLOWLY ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001332{
1333 /* Check the address for validity. */
1334 Bool aerr = False;
1335 PROF_EVENT(73);
1336
1337 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1338 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1339
1340 /* Store the V bytes, remembering to do it little-endian-ly. */
1341 set_vbyte( a+0, vbytes & 0x000000FF ); vbytes >>= 8;
1342 set_vbyte( a+1, vbytes & 0x000000FF );
1343
1344 /* If an address error has happened, report it. */
1345 if (aerr)
njnb8dca862005-03-14 02:42:44 +00001346 MAC_(record_address_error)( VG_(get_running_tid)(), a, 2, True );
njn25e49d8e72002-09-23 09:36:25 +00001347}
1348
sewardj95448072004-11-22 20:19:51 +00001349/* ------------------------ Size = 1 ------------------------ */
1350
njn5c004e42002-11-18 11:04:50 +00001351static UInt mc_rd_V1_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +00001352{
1353 /* Check the address for validity. */
1354 UInt vw = VGM_WORD_INVALID;
1355 Bool aerr = False;
1356 PROF_EVENT(74);
1357
1358 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1359
1360 /* Fetch the V byte. */
1361 vw <<= 8; vw |= (UInt)get_vbyte(a+0);
1362
1363 /* If an address error has happened, report it. */
1364 if (aerr) {
njnb8dca862005-03-14 02:42:44 +00001365 MAC_(record_address_error)( VG_(get_running_tid)(), a, 1, False );
njn25e49d8e72002-09-23 09:36:25 +00001366 vw = (VGM_BYTE_INVALID << 24) | (VGM_BYTE_INVALID << 16)
1367 | (VGM_BYTE_INVALID << 8) | (VGM_BYTE_VALID);
1368 }
1369 return vw;
1370}
1371
njn5c004e42002-11-18 11:04:50 +00001372static void mc_wr_V1_SLOWLY ( Addr a, UInt vbytes )
njn25e49d8e72002-09-23 09:36:25 +00001373{
1374 /* Check the address for validity. */
1375 Bool aerr = False;
1376 PROF_EVENT(75);
1377 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1378
1379 /* Store the V bytes, remembering to do it little-endian-ly. */
1380 set_vbyte( a+0, vbytes & 0x000000FF );
1381
1382 /* If an address error has happened, report it. */
1383 if (aerr)
njnb8dca862005-03-14 02:42:44 +00001384 MAC_(record_address_error)( VG_(get_running_tid)(), a, 1, True );
njn25e49d8e72002-09-23 09:36:25 +00001385}
1386
1387
1388/* ---------------------------------------------------------------------
1389 Called from generated code, or from the assembly helpers.
1390 Handlers for value check failures.
1391 ------------------------------------------------------------------ */
1392
njn5c004e42002-11-18 11:04:50 +00001393void MC_(helperc_value_check0_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001394{
njnb8dca862005-03-14 02:42:44 +00001395 MC_(record_value_error) ( VG_(get_running_tid)(), 0 );
njn25e49d8e72002-09-23 09:36:25 +00001396}
1397
njn5c004e42002-11-18 11:04:50 +00001398void MC_(helperc_value_check1_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001399{
njnb8dca862005-03-14 02:42:44 +00001400 MC_(record_value_error) ( VG_(get_running_tid)(), 1 );
njn25e49d8e72002-09-23 09:36:25 +00001401}
1402
njn5c004e42002-11-18 11:04:50 +00001403void MC_(helperc_value_check2_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001404{
njnb8dca862005-03-14 02:42:44 +00001405 MC_(record_value_error) ( VG_(get_running_tid)(), 2 );
njn25e49d8e72002-09-23 09:36:25 +00001406}
1407
njn5c004e42002-11-18 11:04:50 +00001408void MC_(helperc_value_check4_fail) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001409{
njnb8dca862005-03-14 02:42:44 +00001410 MC_(record_value_error) ( VG_(get_running_tid)(), 4 );
njn25e49d8e72002-09-23 09:36:25 +00001411}
1412
sewardj95448072004-11-22 20:19:51 +00001413REGPARM(1) void MC_(helperc_complain_undef) ( HWord sz )
1414{
njnb8dca862005-03-14 02:42:44 +00001415 MC_(record_value_error) ( VG_(get_running_tid)(), (Int)sz );
sewardj95448072004-11-22 20:19:51 +00001416}
1417
njn25e49d8e72002-09-23 09:36:25 +00001418
njn25e49d8e72002-09-23 09:36:25 +00001419/*------------------------------------------------------------*/
sewardjee070842003-07-05 17:53:55 +00001420/*--- Metadata get/set functions, for client requests. ---*/
1421/*------------------------------------------------------------*/
1422
1423/* Copy Vbits for src into vbits. Returns: 1 == OK, 2 == alignment
1424 error, 3 == addressing error. */
nethercote8b76fe52004-11-08 19:20:09 +00001425static Int mc_get_or_set_vbits_for_client (
njn72718642003-07-24 08:45:32 +00001426 ThreadId tid,
sewardjee070842003-07-05 17:53:55 +00001427 Addr dataV,
1428 Addr vbitsV,
nethercote451eae92004-11-02 13:06:32 +00001429 SizeT size,
sewardjee070842003-07-05 17:53:55 +00001430 Bool setting /* True <=> set vbits, False <=> get vbits */
1431)
1432{
1433 Bool addressibleD = True;
1434 Bool addressibleV = True;
1435 UInt* data = (UInt*)dataV;
1436 UInt* vbits = (UInt*)vbitsV;
nethercote451eae92004-11-02 13:06:32 +00001437 SizeT szW = size / 4; /* sigh */
1438 SizeT i;
sewardjaf48a602003-07-06 00:54:47 +00001439 UInt* dataP = NULL; /* bogus init to keep gcc happy */
1440 UInt* vbitsP = NULL; /* ditto */
sewardjee070842003-07-05 17:53:55 +00001441
1442 /* Check alignment of args. */
njnedfa0f62004-11-30 18:08:05 +00001443 if (!(IS_4_ALIGNED(data) && IS_4_ALIGNED(vbits)))
sewardjee070842003-07-05 17:53:55 +00001444 return 2;
1445 if ((size & 3) != 0)
1446 return 2;
1447
1448 /* Check that arrays are addressible. */
1449 for (i = 0; i < szW; i++) {
sewardjaf48a602003-07-06 00:54:47 +00001450 dataP = &data[i];
1451 vbitsP = &vbits[i];
sewardjee070842003-07-05 17:53:55 +00001452 if (get_abits4_ALIGNED((Addr)dataP) != VGM_NIBBLE_VALID) {
1453 addressibleD = False;
1454 break;
1455 }
1456 if (get_abits4_ALIGNED((Addr)vbitsP) != VGM_NIBBLE_VALID) {
1457 addressibleV = False;
1458 break;
1459 }
1460 }
1461 if (!addressibleD) {
njn72718642003-07-24 08:45:32 +00001462 MAC_(record_address_error)( tid, (Addr)dataP, 4,
sewardjee070842003-07-05 17:53:55 +00001463 setting ? True : False );
1464 return 3;
1465 }
1466 if (!addressibleV) {
njn72718642003-07-24 08:45:32 +00001467 MAC_(record_address_error)( tid, (Addr)vbitsP, 4,
sewardjee070842003-07-05 17:53:55 +00001468 setting ? False : True );
1469 return 3;
1470 }
1471
1472 /* Do the copy */
1473 if (setting) {
1474 /* setting */
1475 for (i = 0; i < szW; i++) {
1476 if (get_vbytes4_ALIGNED( (Addr)&vbits[i] ) != VGM_WORD_VALID)
njn72718642003-07-24 08:45:32 +00001477 MC_(record_value_error)(tid, 4);
sewardjee070842003-07-05 17:53:55 +00001478 set_vbytes4_ALIGNED( (Addr)&data[i], vbits[i] );
1479 }
1480 } else {
1481 /* getting */
1482 for (i = 0; i < szW; i++) {
1483 vbits[i] = get_vbytes4_ALIGNED( (Addr)&data[i] );
1484 set_vbytes4_ALIGNED( (Addr)&vbits[i], VGM_WORD_VALID );
1485 }
1486 }
1487
1488 return 1;
1489}
1490
1491
1492/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001493/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1494/*------------------------------------------------------------*/
1495
sewardja4495682002-10-21 07:29:59 +00001496/* For the memory leak detector, say whether an entire 64k chunk of
1497 address space is possibly in use, or not. If in doubt return
1498 True.
njn25e49d8e72002-09-23 09:36:25 +00001499*/
sewardja4495682002-10-21 07:29:59 +00001500static
1501Bool mc_is_valid_64k_chunk ( UInt chunk_number )
njn25e49d8e72002-09-23 09:36:25 +00001502{
njnb8dca862005-03-14 02:42:44 +00001503 tl_assert(chunk_number >= 0 && chunk_number < PRIMARY_SIZE);
1504 if (primary_map[chunk_number] == DSM_NOTADDR) {
sewardja4495682002-10-21 07:29:59 +00001505 /* Definitely not in use. */
1506 return False;
1507 } else {
1508 return True;
njn25e49d8e72002-09-23 09:36:25 +00001509 }
1510}
1511
1512
sewardja4495682002-10-21 07:29:59 +00001513/* For the memory leak detector, say whether or not a given word
1514 address is to be regarded as valid. */
1515static
1516Bool mc_is_valid_address ( Addr a )
1517{
1518 UInt vbytes;
1519 UChar abits;
njnedfa0f62004-11-30 18:08:05 +00001520 tl_assert(IS_4_ALIGNED(a));
sewardja4495682002-10-21 07:29:59 +00001521 abits = get_abits4_ALIGNED(a);
1522 vbytes = get_vbytes4_ALIGNED(a);
1523 if (abits == VGM_NIBBLE_VALID && vbytes == VGM_WORD_VALID) {
1524 return True;
1525 } else {
1526 return False;
1527 }
1528}
1529
1530
nethercote996901a2004-08-03 13:29:09 +00001531/* Leak detector for this tool. We don't actually do anything, merely
sewardja4495682002-10-21 07:29:59 +00001532 run the generic leak detector with suitable parameters for this
nethercote996901a2004-08-03 13:29:09 +00001533 tool. */
njnb8dca862005-03-14 02:42:44 +00001534static void mc_detect_memory_leaks ( ThreadId tid, LeakCheckMode mode )
njn25e49d8e72002-09-23 09:36:25 +00001535{
sewardj2a99cf62004-11-24 10:44:19 +00001536 MAC_(do_detect_memory_leaks) (
njnb8dca862005-03-14 02:42:44 +00001537 tid, mode, mc_is_valid_64k_chunk, mc_is_valid_address );
njn25e49d8e72002-09-23 09:36:25 +00001538}
1539
1540
1541/* ---------------------------------------------------------------------
1542 Sanity check machinery (permanently engaged).
1543 ------------------------------------------------------------------ */
1544
njn26f02512004-11-22 18:33:15 +00001545Bool TL_(cheap_sanity_check) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001546{
jseward9800fd32004-01-04 23:08:04 +00001547 /* nothing useful we can rapidly check */
1548 return True;
njn25e49d8e72002-09-23 09:36:25 +00001549}
1550
njn26f02512004-11-22 18:33:15 +00001551Bool TL_(expensive_sanity_check) ( void )
njn25e49d8e72002-09-23 09:36:25 +00001552{
1553 Int i;
1554
njnb8dca862005-03-14 02:42:44 +00001555 /* Make sure nobody changed the distinguished secondary.
1556 They're in read-only memory, so that would be hard.
1557 */
1558#if 0
njn25e49d8e72002-09-23 09:36:25 +00001559 for (i = 0; i < 8192; i++)
1560 if (distinguished_secondary_map.abits[i] != VGM_BYTE_INVALID)
1561 return False;
1562
1563 for (i = 0; i < 65536; i++)
1564 if (distinguished_secondary_map.vbyte[i] != VGM_BYTE_INVALID)
1565 return False;
njnb8dca862005-03-14 02:42:44 +00001566#endif
njn25e49d8e72002-09-23 09:36:25 +00001567
1568 /* Make sure that the upper 3/4 of the primary map hasn't
1569 been messed with. */
njnb8dca862005-03-14 02:42:44 +00001570 for (i = PRIMARY_SIZE; i < PRIMARY_SIZE*4; i++)
1571 if (primary_map[i] != DSM_NOTADDR)
njn25e49d8e72002-09-23 09:36:25 +00001572 return False;
1573
1574 return True;
1575}
1576
njn25e49d8e72002-09-23 09:36:25 +00001577/*------------------------------------------------------------*/
njnd3040452003-05-19 15:04:06 +00001578/*--- Command line args ---*/
njn25e49d8e72002-09-23 09:36:25 +00001579/*------------------------------------------------------------*/
1580
njn43c799e2003-04-08 00:08:52 +00001581Bool MC_(clo_avoid_strlen_errors) = True;
njn43c799e2003-04-08 00:08:52 +00001582
njn26f02512004-11-22 18:33:15 +00001583Bool TL_(process_cmd_line_option)(Char* arg)
njn25e49d8e72002-09-23 09:36:25 +00001584{
nethercote27fec902004-06-16 21:26:32 +00001585 VG_BOOL_CLO("--avoid-strlen-errors", MC_(clo_avoid_strlen_errors))
njn25e49d8e72002-09-23 09:36:25 +00001586 else
njn43c799e2003-04-08 00:08:52 +00001587 return MAC_(process_common_cmd_line_option)(arg);
njn25e49d8e72002-09-23 09:36:25 +00001588
1589 return True;
njn25e49d8e72002-09-23 09:36:25 +00001590}
1591
njn26f02512004-11-22 18:33:15 +00001592void TL_(print_usage)(void)
njn25e49d8e72002-09-23 09:36:25 +00001593{
njn3e884182003-04-15 13:03:23 +00001594 MAC_(print_common_usage)();
1595 VG_(printf)(
1596" --avoid-strlen-errors=no|yes suppress errs from inlined strlen [yes]\n"
1597 );
1598}
1599
njn26f02512004-11-22 18:33:15 +00001600void TL_(print_debug_usage)(void)
njn3e884182003-04-15 13:03:23 +00001601{
1602 MAC_(print_common_debug_usage)();
1603 VG_(printf)(
sewardj8ec2cfc2002-10-13 00:57:26 +00001604" --cleanup=no|yes improve after instrumentation? [yes]\n"
njn3e884182003-04-15 13:03:23 +00001605 );
njn25e49d8e72002-09-23 09:36:25 +00001606}
1607
nethercote8b76fe52004-11-08 19:20:09 +00001608/*------------------------------------------------------------*/
1609/*--- Client requests ---*/
1610/*------------------------------------------------------------*/
1611
1612/* Client block management:
1613
1614 This is managed as an expanding array of client block descriptors.
1615 Indices of live descriptors are issued to the client, so it can ask
1616 to free them later. Therefore we cannot slide live entries down
1617 over dead ones. Instead we must use free/inuse flags and scan for
1618 an empty slot at allocation time. This in turn means allocation is
1619 relatively expensive, so we hope this does not happen too often.
nethercote8b76fe52004-11-08 19:20:09 +00001620
sewardjedc75ab2005-03-15 23:30:32 +00001621 An unused block has start == size == 0
1622*/
nethercote8b76fe52004-11-08 19:20:09 +00001623
1624typedef
1625 struct {
1626 Addr start;
1627 SizeT size;
1628 ExeContext* where;
sewardjedc75ab2005-03-15 23:30:32 +00001629 Char* desc;
nethercote8b76fe52004-11-08 19:20:09 +00001630 }
1631 CGenBlock;
1632
1633/* This subsystem is self-initialising. */
1634static UInt vg_cgb_size = 0;
1635static UInt vg_cgb_used = 0;
1636static CGenBlock* vg_cgbs = NULL;
1637
1638/* Stats for this subsystem. */
1639static UInt vg_cgb_used_MAX = 0; /* Max in use. */
1640static UInt vg_cgb_allocs = 0; /* Number of allocs. */
1641static UInt vg_cgb_discards = 0; /* Number of discards. */
1642static UInt vg_cgb_search = 0; /* Number of searches. */
1643
1644
1645static
1646Int vg_alloc_client_block ( void )
1647{
1648 UInt i, sz_new;
1649 CGenBlock* cgbs_new;
1650
1651 vg_cgb_allocs++;
1652
1653 for (i = 0; i < vg_cgb_used; i++) {
1654 vg_cgb_search++;
sewardjedc75ab2005-03-15 23:30:32 +00001655 if (vg_cgbs[i].start == 0 && vg_cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00001656 return i;
1657 }
1658
1659 /* Not found. Try to allocate one at the end. */
1660 if (vg_cgb_used < vg_cgb_size) {
1661 vg_cgb_used++;
1662 return vg_cgb_used-1;
1663 }
1664
1665 /* Ok, we have to allocate a new one. */
njnca82cc02004-11-22 17:18:48 +00001666 tl_assert(vg_cgb_used == vg_cgb_size);
nethercote8b76fe52004-11-08 19:20:09 +00001667 sz_new = (vg_cgbs == NULL) ? 10 : (2 * vg_cgb_size);
1668
1669 cgbs_new = VG_(malloc)( sz_new * sizeof(CGenBlock) );
1670 for (i = 0; i < vg_cgb_used; i++)
1671 cgbs_new[i] = vg_cgbs[i];
1672
1673 if (vg_cgbs != NULL)
1674 VG_(free)( vg_cgbs );
1675 vg_cgbs = cgbs_new;
1676
1677 vg_cgb_size = sz_new;
1678 vg_cgb_used++;
1679 if (vg_cgb_used > vg_cgb_used_MAX)
1680 vg_cgb_used_MAX = vg_cgb_used;
1681 return vg_cgb_used-1;
1682}
1683
1684
1685static void show_client_block_stats ( void )
1686{
1687 VG_(message)(Vg_DebugMsg,
1688 "general CBs: %d allocs, %d discards, %d maxinuse, %d search",
1689 vg_cgb_allocs, vg_cgb_discards, vg_cgb_used_MAX, vg_cgb_search
1690 );
1691}
1692
1693static Bool find_addr(VgHashNode* sh_ch, void* ap)
1694{
1695 MAC_Chunk *m = (MAC_Chunk*)sh_ch;
1696 Addr a = *(Addr*)ap;
1697
1698 return VG_(addr_is_in_block)(a, m->data, m->size);
1699}
1700
1701static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai )
1702{
1703 UInt i;
1704 /* VG_(printf)("try to identify %d\n", a); */
1705
1706 /* Perhaps it's a general block ? */
1707 for (i = 0; i < vg_cgb_used; i++) {
sewardjedc75ab2005-03-15 23:30:32 +00001708 if (vg_cgbs[i].start == 0 && vg_cgbs[i].size == 0)
nethercote8b76fe52004-11-08 19:20:09 +00001709 continue;
1710 if (VG_(addr_is_in_block)(a, vg_cgbs[i].start, vg_cgbs[i].size)) {
1711 MAC_Mempool **d, *mp;
1712
1713 /* OK - maybe it's a mempool, too? */
1714 mp = (MAC_Mempool*)VG_(HT_get_node)(MAC_(mempool_list),
1715 (UWord)vg_cgbs[i].start,
1716 (void*)&d);
1717 if(mp != NULL) {
1718 if(mp->chunks != NULL) {
1719 MAC_Chunk *mc;
1720
1721 mc = (MAC_Chunk*)VG_(HT_first_match)(mp->chunks, find_addr, &a);
1722 if(mc != NULL) {
1723 ai->akind = UserG;
1724 ai->blksize = mc->size;
1725 ai->rwoffset = (Int)(a) - (Int)mc->data;
1726 ai->lastchange = mc->where;
1727 return True;
1728 }
1729 }
1730 ai->akind = Mempool;
1731 ai->blksize = vg_cgbs[i].size;
1732 ai->rwoffset = (Int)(a) - (Int)(vg_cgbs[i].start);
1733 ai->lastchange = vg_cgbs[i].where;
1734 return True;
1735 }
1736 ai->akind = UserG;
1737 ai->blksize = vg_cgbs[i].size;
1738 ai->rwoffset = (Int)(a) - (Int)(vg_cgbs[i].start);
1739 ai->lastchange = vg_cgbs[i].where;
sewardjedc75ab2005-03-15 23:30:32 +00001740 ai->desc = vg_cgbs[i].desc;
nethercote8b76fe52004-11-08 19:20:09 +00001741 return True;
1742 }
1743 }
1744 return False;
1745}
1746
njn26f02512004-11-22 18:33:15 +00001747Bool TL_(handle_client_request) ( ThreadId tid, UWord* arg, UWord* ret )
nethercote8b76fe52004-11-08 19:20:09 +00001748{
1749 Int i;
1750 Bool ok;
1751 Addr bad_addr;
1752
njnfc26ff92004-11-22 19:12:49 +00001753 if (!VG_IS_TOOL_USERREQ('M','C',arg[0])
nethercote8b76fe52004-11-08 19:20:09 +00001754 && VG_USERREQ__MALLOCLIKE_BLOCK != arg[0]
1755 && VG_USERREQ__FREELIKE_BLOCK != arg[0]
1756 && VG_USERREQ__CREATE_MEMPOOL != arg[0]
1757 && VG_USERREQ__DESTROY_MEMPOOL != arg[0]
1758 && VG_USERREQ__MEMPOOL_ALLOC != arg[0]
1759 && VG_USERREQ__MEMPOOL_FREE != arg[0])
1760 return False;
1761
1762 switch (arg[0]) {
1763 case VG_USERREQ__CHECK_WRITABLE: /* check writable */
1764 ok = mc_check_writable ( arg[1], arg[2], &bad_addr );
1765 if (!ok)
1766 MC_(record_user_error) ( tid, bad_addr, /*isWrite*/True,
1767 /*isUnaddr*/True );
1768 *ret = ok ? (UWord)NULL : bad_addr;
1769 break;
1770
1771 case VG_USERREQ__CHECK_READABLE: { /* check readable */
1772 MC_ReadResult res;
1773 res = mc_check_readable ( arg[1], arg[2], &bad_addr );
1774 if (MC_AddrErr == res)
1775 MC_(record_user_error) ( tid, bad_addr, /*isWrite*/False,
1776 /*isUnaddr*/True );
1777 else if (MC_ValueErr == res)
1778 MC_(record_user_error) ( tid, bad_addr, /*isWrite*/False,
1779 /*isUnaddr*/False );
1780 *ret = ( res==MC_Ok ? (UWord)NULL : bad_addr );
1781 break;
1782 }
1783
1784 case VG_USERREQ__DO_LEAK_CHECK:
njnb8dca862005-03-14 02:42:44 +00001785 mc_detect_memory_leaks(tid, arg[1] ? LC_Summary : LC_Full);
nethercote8b76fe52004-11-08 19:20:09 +00001786 *ret = 0; /* return value is meaningless */
1787 break;
1788
1789 case VG_USERREQ__MAKE_NOACCESS: /* make no access */
nethercote8b76fe52004-11-08 19:20:09 +00001790 mc_make_noaccess ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00001791 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00001792 break;
1793
1794 case VG_USERREQ__MAKE_WRITABLE: /* make writable */
nethercote8b76fe52004-11-08 19:20:09 +00001795 mc_make_writable ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00001796 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00001797 break;
1798
1799 case VG_USERREQ__MAKE_READABLE: /* make readable */
nethercote8b76fe52004-11-08 19:20:09 +00001800 mc_make_readable ( arg[1], arg[2] );
sewardjedc75ab2005-03-15 23:30:32 +00001801 *ret = -1;
nethercote8b76fe52004-11-08 19:20:09 +00001802 break;
1803
sewardjedc75ab2005-03-15 23:30:32 +00001804 case VG_USERREQ__CREATE_BLOCK: /* describe a block */
1805 if (arg[1] != 0 && arg[2] != 0) {
1806 i = vg_alloc_client_block();
1807 /* VG_(printf)("allocated %d %p\n", i, vg_cgbs); */
1808 vg_cgbs[i].start = arg[1];
1809 vg_cgbs[i].size = arg[2];
1810 vg_cgbs[i].desc = VG_(strdup)((Char *)arg[3]);
njnd01fef72005-03-25 23:35:48 +00001811 vg_cgbs[i].where = VG_(record_ExeContext) ( tid );
sewardjedc75ab2005-03-15 23:30:32 +00001812
1813 *ret = i;
1814 } else
1815 *ret = -1;
1816 break;
1817
nethercote8b76fe52004-11-08 19:20:09 +00001818 case VG_USERREQ__DISCARD: /* discard */
1819 if (vg_cgbs == NULL
sewardjedc75ab2005-03-15 23:30:32 +00001820 || arg[2] >= vg_cgb_used ||
1821 (vg_cgbs[arg[2]].start == 0 && vg_cgbs[arg[2]].size == 0)) {
1822 *ret = 1;
1823 } else {
1824 tl_assert(arg[2] >= 0 && arg[2] < vg_cgb_used);
1825 vg_cgbs[arg[2]].start = vg_cgbs[arg[2]].size = 0;
1826 VG_(free)(vg_cgbs[arg[2]].desc);
1827 vg_cgb_discards++;
1828 *ret = 0;
1829 }
nethercote8b76fe52004-11-08 19:20:09 +00001830 break;
1831
1832 case VG_USERREQ__GET_VBITS:
1833 /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
1834 error. */
1835 /* VG_(printf)("get_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
1836 *ret = mc_get_or_set_vbits_for_client
1837 ( tid, arg[1], arg[2], arg[3], False /* get them */ );
1838 break;
1839
1840 case VG_USERREQ__SET_VBITS:
1841 /* Returns: 1 == OK, 2 == alignment error, 3 == addressing
1842 error. */
1843 /* VG_(printf)("set_vbits %p %p %d\n", arg[1], arg[2], arg[3] ); */
1844 *ret = mc_get_or_set_vbits_for_client
1845 ( tid, arg[1], arg[2], arg[3], True /* set them */ );
1846 break;
1847
1848 default:
1849 if (MAC_(handle_common_client_requests)(tid, arg, ret )) {
1850 return True;
1851 } else {
1852 VG_(message)(Vg_UserMsg,
1853 "Warning: unknown memcheck client request code %llx",
1854 (ULong)arg[0]);
1855 return False;
1856 }
1857 }
1858 return True;
1859}
njn25e49d8e72002-09-23 09:36:25 +00001860
sewardjedc75ab2005-03-15 23:30:32 +00001861
njn25e49d8e72002-09-23 09:36:25 +00001862/*------------------------------------------------------------*/
1863/*--- Setup ---*/
1864/*------------------------------------------------------------*/
1865
njn26f02512004-11-22 18:33:15 +00001866void TL_(pre_clo_init)(void)
njn25e49d8e72002-09-23 09:36:25 +00001867{
njn810086f2002-11-14 12:42:47 +00001868 VG_(details_name) ("Memcheck");
1869 VG_(details_version) (NULL);
nethercote262eedf2003-11-13 17:57:18 +00001870 VG_(details_description) ("a memory error detector");
njn810086f2002-11-14 12:42:47 +00001871 VG_(details_copyright_author)(
njn53612422005-03-12 16:22:54 +00001872 "Copyright (C) 2002-2005, and GNU GPL'd, by Julian Seward et al.");
nethercote421281e2003-11-20 16:20:55 +00001873 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj9ebf9fd2004-11-28 16:56:51 +00001874 VG_(details_avg_translation_sizeB) ( 370 );
njn25e49d8e72002-09-23 09:36:25 +00001875
njn810086f2002-11-14 12:42:47 +00001876 VG_(needs_core_errors) ();
njn95ec8702004-11-22 16:46:13 +00001877 VG_(needs_tool_errors) ();
njn810086f2002-11-14 12:42:47 +00001878 VG_(needs_libc_freeres) ();
njn810086f2002-11-14 12:42:47 +00001879 VG_(needs_command_line_options)();
1880 VG_(needs_client_requests) ();
njn810086f2002-11-14 12:42:47 +00001881 VG_(needs_sanity_checks) ();
fitzhardinge98abfc72003-12-16 02:05:15 +00001882 VG_(needs_shadow_memory) ();
njn25e49d8e72002-09-23 09:36:25 +00001883
njn3e884182003-04-15 13:03:23 +00001884 MAC_( new_mem_heap) = & mc_new_mem_heap;
nethercote8b76fe52004-11-08 19:20:09 +00001885 MAC_( ban_mem_heap) = & mc_make_noaccess;
njn3e884182003-04-15 13:03:23 +00001886 MAC_(copy_mem_heap) = & mc_copy_address_range_state;
nethercote8b76fe52004-11-08 19:20:09 +00001887 MAC_( die_mem_heap) = & mc_make_noaccess;
1888 MAC_(check_noaccess) = & mc_check_noaccess;
njn3e884182003-04-15 13:03:23 +00001889
fitzhardinge98abfc72003-12-16 02:05:15 +00001890 VG_(init_new_mem_startup) ( & mc_new_mem_startup );
nethercote8b76fe52004-11-08 19:20:09 +00001891 VG_(init_new_mem_stack_signal) ( & mc_make_writable );
1892 VG_(init_new_mem_brk) ( & mc_make_writable );
njnb8dca862005-03-14 02:42:44 +00001893 VG_(init_new_mem_mmap) ( & mc_new_mem_mmap );
njn25e49d8e72002-09-23 09:36:25 +00001894
fitzhardinge98abfc72003-12-16 02:05:15 +00001895 VG_(init_copy_mem_remap) ( & mc_copy_address_range_state );
njnb8dca862005-03-14 02:42:44 +00001896 //VG_(init_change_mem_mprotect) ( & mc_set_perms );
njn3e884182003-04-15 13:03:23 +00001897
nethercote8b76fe52004-11-08 19:20:09 +00001898 VG_(init_die_mem_stack_signal) ( & mc_make_noaccess );
1899 VG_(init_die_mem_brk) ( & mc_make_noaccess );
1900 VG_(init_die_mem_munmap) ( & mc_make_noaccess );
njn3e884182003-04-15 13:03:23 +00001901
fitzhardinge98abfc72003-12-16 02:05:15 +00001902 VG_(init_new_mem_stack_4) ( & MAC_(new_mem_stack_4) );
1903 VG_(init_new_mem_stack_8) ( & MAC_(new_mem_stack_8) );
1904 VG_(init_new_mem_stack_12) ( & MAC_(new_mem_stack_12) );
1905 VG_(init_new_mem_stack_16) ( & MAC_(new_mem_stack_16) );
1906 VG_(init_new_mem_stack_32) ( & MAC_(new_mem_stack_32) );
1907 VG_(init_new_mem_stack) ( & MAC_(new_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00001908
fitzhardinge98abfc72003-12-16 02:05:15 +00001909 VG_(init_die_mem_stack_4) ( & MAC_(die_mem_stack_4) );
1910 VG_(init_die_mem_stack_8) ( & MAC_(die_mem_stack_8) );
1911 VG_(init_die_mem_stack_12) ( & MAC_(die_mem_stack_12) );
1912 VG_(init_die_mem_stack_16) ( & MAC_(die_mem_stack_16) );
1913 VG_(init_die_mem_stack_32) ( & MAC_(die_mem_stack_32) );
1914 VG_(init_die_mem_stack) ( & MAC_(die_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00001915
nethercote8b76fe52004-11-08 19:20:09 +00001916 VG_(init_ban_mem_stack) ( & mc_make_noaccess );
njn25e49d8e72002-09-23 09:36:25 +00001917
fitzhardinge98abfc72003-12-16 02:05:15 +00001918 VG_(init_pre_mem_read) ( & mc_check_is_readable );
1919 VG_(init_pre_mem_read_asciiz) ( & mc_check_is_readable_asciiz );
1920 VG_(init_pre_mem_write) ( & mc_check_is_writable );
njncf45fd42004-11-24 16:30:22 +00001921 VG_(init_post_mem_write) ( & mc_post_mem_write );
nethercote8b76fe52004-11-08 19:20:09 +00001922
1923 VG_(init_pre_reg_read) ( & mc_pre_reg_read );
njn25e49d8e72002-09-23 09:36:25 +00001924
njncf45fd42004-11-24 16:30:22 +00001925 VG_(init_post_reg_write) ( & mc_post_reg_write );
fitzhardinge98abfc72003-12-16 02:05:15 +00001926 VG_(init_post_reg_write_clientcall_return) ( & mc_post_reg_write_clientcall );
njnd3040452003-05-19 15:04:06 +00001927
njn25e49d8e72002-09-23 09:36:25 +00001928 VGP_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
1929 VGP_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
njn9b007f62003-04-07 14:40:25 +00001930 VGP_(register_profile_event) ( VgpESPAdj, "adjust-ESP" );
njnd04b7c62002-10-03 14:05:52 +00001931
njn43c799e2003-04-08 00:08:52 +00001932 /* Additional block description for VG_(describe_addr)() */
nethercote8b76fe52004-11-08 19:20:09 +00001933 MAC_(describe_addr_supp) = client_perm_maybe_describe;
njn43c799e2003-04-08 00:08:52 +00001934
njnd04b7c62002-10-03 14:05:52 +00001935 init_shadow_memory();
njn3e884182003-04-15 13:03:23 +00001936 MAC_(common_pre_clo_init)();
njn5c004e42002-11-18 11:04:50 +00001937}
1938
njn26f02512004-11-22 18:33:15 +00001939void TL_(post_clo_init) ( void )
njn5c004e42002-11-18 11:04:50 +00001940{
1941}
1942
njn26f02512004-11-22 18:33:15 +00001943void TL_(fini) ( Int exitcode )
njn5c004e42002-11-18 11:04:50 +00001944{
nethercote8b76fe52004-11-08 19:20:09 +00001945 MAC_(common_fini)( mc_detect_memory_leaks );
njn3e884182003-04-15 13:03:23 +00001946
njn5c004e42002-11-18 11:04:50 +00001947 if (0) {
1948 VG_(message)(Vg_DebugMsg,
1949 "------ Valgrind's client block stats follow ---------------" );
nethercote8b76fe52004-11-08 19:20:09 +00001950 show_client_block_stats();
njn5c004e42002-11-18 11:04:50 +00001951 }
njn25e49d8e72002-09-23 09:36:25 +00001952}
1953
njn26f02512004-11-22 18:33:15 +00001954VG_DETERMINE_INTERFACE_VERSION(TL_(pre_clo_init), 9./8)
fitzhardinge98abfc72003-12-16 02:05:15 +00001955
njn25e49d8e72002-09-23 09:36:25 +00001956/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00001957/*--- end mc_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00001958/*--------------------------------------------------------------------*/