blob: 5b3cc6bd61bf4b04cb620d67440cf4cc8b8b7cbe [file] [log] [blame]
njn25e49d8e72002-09-23 09:36:25 +00001
2/*--------------------------------------------------------------------*/
3/*--- The AddrCheck skin: like MemCheck, but only does address ---*/
4/*--- checking. No definedness checking. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- ac_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
njnc9539842002-10-02 13:26:35 +00009 This file is part of AddrCheck, a lightweight Valgrind skin for
10 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
12 Copyright (C) 2000-2002 Julian Seward
13 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
njn5c004e42002-11-18 11:04:50 +000033#include "mc_common.h"
sewardjd8033d92002-12-08 22:16:58 +000034#include "memcheck.h"
njn25e49d8e72002-09-23 09:36:25 +000035//#include "vg_profile.c"
36
njn9b007f62003-04-07 14:40:25 +000037#include "mc_common.c"
38
39
40
njn27f1a382002-11-08 15:48:16 +000041VG_DETERMINE_INTERFACE_VERSION
42
njn25e49d8e72002-09-23 09:36:25 +000043/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000044/*--- Comparing and printing errors ---*/
45/*------------------------------------------------------------*/
46
njn810086f2002-11-14 12:42:47 +000047void SK_(pp_SkinError) ( Error* err, void (*pp_ExeContext)(void) )
njn25e49d8e72002-09-23 09:36:25 +000048{
njn5c004e42002-11-18 11:04:50 +000049 MemCheckError* err_extra = VG_(get_error_extra)(err);
njn25e49d8e72002-09-23 09:36:25 +000050
njn810086f2002-11-14 12:42:47 +000051 switch (VG_(get_error_kind)(err)) {
njn25e49d8e72002-09-23 09:36:25 +000052 case CoreMemErr:
53 if (err_extra->isWrite) {
54 VG_(message)(Vg_UserMsg,
sewardj5de6ee02002-12-14 23:11:35 +000055 "%s contains unaddressable byte(s)",
56 VG_(get_error_string)(err));
njn25e49d8e72002-09-23 09:36:25 +000057 } else {
58 VG_(message)(Vg_UserMsg,
sewardj5de6ee02002-12-14 23:11:35 +000059 "%s contains unaddressable byte(s)",
60 VG_(get_error_string)(err));
njn25e49d8e72002-09-23 09:36:25 +000061 }
62 pp_ExeContext();
63 break;
64
65 case AddrErr:
66 switch (err_extra->axskind) {
67 case ReadAxs:
68 case WriteAxs:
69 /* These two aren't actually differentiated ever. */
70 VG_(message)(Vg_UserMsg, "Invalid memory access of size %d",
71 err_extra->size );
72 break;
73 case ExecAxs:
74 VG_(message)(Vg_UserMsg, "Jump to the invalid address "
75 "stated on the next line");
76 break;
77 default:
njn5c004e42002-11-18 11:04:50 +000078 VG_(skin_panic)("SK_(pp_SkinError)(axskind)");
njn25e49d8e72002-09-23 09:36:25 +000079 }
80 pp_ExeContext();
njn5c004e42002-11-18 11:04:50 +000081 MC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
njn25e49d8e72002-09-23 09:36:25 +000082 break;
83
84 case FreeErr:
85 VG_(message)(Vg_UserMsg,"Invalid free() / delete / delete[]");
86 /* fall through */
87 case FreeMismatchErr:
njn810086f2002-11-14 12:42:47 +000088 if (VG_(get_error_kind)(err) == FreeMismatchErr)
njn25e49d8e72002-09-23 09:36:25 +000089 VG_(message)(Vg_UserMsg,
90 "Mismatched free() / delete / delete []");
91 pp_ExeContext();
njn5c004e42002-11-18 11:04:50 +000092 MC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
njn25e49d8e72002-09-23 09:36:25 +000093 break;
94
95 case ParamErr:
96 if (err_extra->isWrite) {
97 VG_(message)(Vg_UserMsg,
njn810086f2002-11-14 12:42:47 +000098 "Syscall param %s contains unaddressable byte(s)",
99 VG_(get_error_string)(err) );
njn25e49d8e72002-09-23 09:36:25 +0000100 } else {
101 VG_(message)(Vg_UserMsg,
102 "Syscall param %s contains uninitialised or "
103 "unaddressable byte(s)",
njn810086f2002-11-14 12:42:47 +0000104 VG_(get_error_string)(err));
njn25e49d8e72002-09-23 09:36:25 +0000105 }
106 pp_ExeContext();
njn5c004e42002-11-18 11:04:50 +0000107 MC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
njn25e49d8e72002-09-23 09:36:25 +0000108 break;
109
110 case UserErr:
111 if (err_extra->isWrite) {
112 VG_(message)(Vg_UserMsg,
113 "Unaddressable byte(s) found during client check request");
114 } else {
115 VG_(message)(Vg_UserMsg,
116 "Uninitialised or "
117 "unaddressable byte(s) found during client check request");
118 }
119 pp_ExeContext();
njn5c004e42002-11-18 11:04:50 +0000120 MC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
njn25e49d8e72002-09-23 09:36:25 +0000121 break;
122
123 default:
njn810086f2002-11-14 12:42:47 +0000124 VG_(printf)("Error:\n unknown AddrCheck error code %d\n",
125 VG_(get_error_kind)(err));
njne427a662002-10-02 11:08:25 +0000126 VG_(skin_panic)("unknown error code in SK_(pp_SkinError)");
njn25e49d8e72002-09-23 09:36:25 +0000127 }
128}
129
130/*------------------------------------------------------------*/
131/*--- Recording errors ---*/
132/*------------------------------------------------------------*/
133
134/* Describe an address as best you can, for error messages,
135 putting the result in ai. */
136
njn5c004e42002-11-18 11:04:50 +0000137static void describe_addr ( Addr a, AddrInfo* ai )
njn25e49d8e72002-09-23 09:36:25 +0000138{
139 ShadowChunk* sc;
140 ThreadId tid;
141
142 /* Nested functions, yeah. Need the lexical scoping of 'a'. */
143
144 /* Closure for searching thread stacks */
145 Bool addr_is_in_bounds(Addr stack_min, Addr stack_max)
146 {
147 return (stack_min <= a && a <= stack_max);
148 }
149 /* Closure for searching malloc'd and free'd lists */
150 Bool addr_is_in_block(ShadowChunk *sh_ch)
151 {
njn810086f2002-11-14 12:42:47 +0000152 return VG_(addr_is_in_block) ( a, VG_(get_sc_data)(sh_ch),
153 VG_(get_sc_size)(sh_ch) );
njn25e49d8e72002-09-23 09:36:25 +0000154 }
155 /* Perhaps it's on a thread's stack? */
156 tid = VG_(any_matching_thread_stack)(addr_is_in_bounds);
157 if (tid != VG_INVALID_THREADID) {
158 ai->akind = Stack;
159 ai->stack_tid = tid;
160 return;
161 }
162 /* Search for a recently freed block which might bracket it. */
njn5c004e42002-11-18 11:04:50 +0000163 sc = MC_(any_matching_freed_ShadowChunks)(addr_is_in_block);
njn25e49d8e72002-09-23 09:36:25 +0000164 if (NULL != sc) {
165 ai->akind = Freed;
njn810086f2002-11-14 12:42:47 +0000166 ai->blksize = VG_(get_sc_size)(sc);
167 ai->rwoffset = (Int)(a) - (Int)(VG_(get_sc_data)(sc));
168 ai->lastchange = (ExeContext*)( VG_(get_sc_extra)(sc, 0) );
njn25e49d8e72002-09-23 09:36:25 +0000169 return;
170 }
171 /* Search for a currently malloc'd block which might bracket it. */
172 sc = VG_(any_matching_mallocd_ShadowChunks)(addr_is_in_block);
173 if (NULL != sc) {
174 ai->akind = Mallocd;
njn810086f2002-11-14 12:42:47 +0000175 ai->blksize = VG_(get_sc_size)(sc);
176 ai->rwoffset = (Int)(a) - (Int)(VG_(get_sc_data)(sc));
177 ai->lastchange = (ExeContext*)( VG_(get_sc_extra)(sc, 0) );
njn25e49d8e72002-09-23 09:36:25 +0000178 return;
179 }
180 /* Clueless ... */
181 ai->akind = Unknown;
182 return;
183}
184
185
njn810086f2002-11-14 12:42:47 +0000186/* Creates a copy of the `extra' part, updates the copy with address info if
187 necessary, and returns the copy. */
188void* SK_(dup_extra_and_update)(Error* err)
njn25e49d8e72002-09-23 09:36:25 +0000189{
njnb3f7c092003-02-17 10:09:19 +0000190 MemCheckError* extra;
191 MemCheckError* new_extra = NULL;
njn25e49d8e72002-09-23 09:36:25 +0000192
njnb3f7c092003-02-17 10:09:19 +0000193 extra = ((MemCheckError*)VG_(get_error_extra)(err));
194 if (extra != NULL) {
195 new_extra = VG_(malloc)(sizeof(MemCheckError));
196 *new_extra = *extra;
197 if (new_extra->addrinfo.akind == Undescribed)
198 describe_addr ( VG_(get_error_address)(err), &(new_extra->addrinfo) );
199 }
njn25e49d8e72002-09-23 09:36:25 +0000200
njnb3f7c092003-02-17 10:09:19 +0000201
njn25e49d8e72002-09-23 09:36:25 +0000202
njn810086f2002-11-14 12:42:47 +0000203 return new_extra;
njn25e49d8e72002-09-23 09:36:25 +0000204}
205
njn25e49d8e72002-09-23 09:36:25 +0000206/*------------------------------------------------------------*/
207/*--- Suppressions ---*/
208/*------------------------------------------------------------*/
209
210#define STREQ(s1,s2) (s1 != NULL && s2 != NULL \
211 && VG_(strcmp)((s1),(s2))==0)
212
njn810086f2002-11-14 12:42:47 +0000213Bool SK_(recognised_suppression) ( Char* name, Supp* su )
njn25e49d8e72002-09-23 09:36:25 +0000214{
njn810086f2002-11-14 12:42:47 +0000215 SuppKind skind;
216
217 if (STREQ(name, "Param")) skind = ParamSupp;
218 else if (STREQ(name, "CoreMem")) skind = CoreMemSupp;
219 else if (STREQ(name, "Addr1")) skind = Addr1Supp;
220 else if (STREQ(name, "Addr2")) skind = Addr2Supp;
221 else if (STREQ(name, "Addr4")) skind = Addr4Supp;
222 else if (STREQ(name, "Addr8")) skind = Addr8Supp;
223 else if (STREQ(name, "Free")) skind = FreeSupp;
sewardja75cd5a2002-12-28 12:36:55 +0000224 else if (STREQ(name, "Leak")) skind = LeakSupp;
njn25e49d8e72002-09-23 09:36:25 +0000225 else
226 return False;
227
njn810086f2002-11-14 12:42:47 +0000228 VG_(set_supp_kind)(su, skind);
njn25e49d8e72002-09-23 09:36:25 +0000229 return True;
230}
231
njn25e49d8e72002-09-23 09:36:25 +0000232# undef STREQ
233
234
njn5c004e42002-11-18 11:04:50 +0000235#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
236
njn25e49d8e72002-09-23 09:36:25 +0000237/*------------------------------------------------------------*/
238/*--- Low-level support for memory checking. ---*/
239/*------------------------------------------------------------*/
240
241/* All reads and writes are checked against a memory map, which
242 records the state of all memory in the process. The memory map is
243 organised like this:
244
245 The top 16 bits of an address are used to index into a top-level
246 map table, containing 65536 entries. Each entry is a pointer to a
247 second-level map, which records the accesibililty and validity
248 permissions for the 65536 bytes indexed by the lower 16 bits of the
249 address. Each byte is represented by one bit, indicating
250 accessibility. So each second-level map contains 8192 bytes. This
251 two-level arrangement conveniently divides the 4G address space
252 into 64k lumps, each size 64k bytes.
253
254 All entries in the primary (top-level) map must point to a valid
255 secondary (second-level) map. Since most of the 4G of address
256 space will not be in use -- ie, not mapped at all -- there is a
257 distinguished secondary map, which indicates `not addressible and
258 not valid' writeable for all bytes. Entries in the primary map for
259 which the entire 64k is not in use at all point at this
260 distinguished map.
261
262 [...] lots of stuff deleted due to out of date-ness
263
264 As a final optimisation, the alignment and address checks for
265 4-byte loads and stores are combined in a neat way. The primary
266 map is extended to have 262144 entries (2^18), rather than 2^16.
267 The top 3/4 of these entries are permanently set to the
268 distinguished secondary map. For a 4-byte load/store, the
269 top-level map is indexed not with (addr >> 16) but instead f(addr),
270 where
271
272 f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
273 = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
274 = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
275
276 ie the lowest two bits are placed above the 16 high address bits.
277 If either of these two bits are nonzero, the address is misaligned;
278 this will select a secondary map from the upper 3/4 of the primary
279 map. Because this is always the distinguished secondary map, a
280 (bogus) address check failure will result. The failure handling
281 code can then figure out whether this is a genuine addr check
282 failure or whether it is a possibly-legitimate access at a
283 misaligned address. */
284
285
286/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000287/*--- Function declarations. ---*/
288/*------------------------------------------------------------*/
289
njn5c004e42002-11-18 11:04:50 +0000290static void ac_ACCESS4_SLOWLY ( Addr a );
291static void ac_ACCESS2_SLOWLY ( Addr a );
292static void ac_ACCESS1_SLOWLY ( Addr a );
293static void ac_fpu_ACCESS_check_SLOWLY ( Addr addr, Int size );
njn25e49d8e72002-09-23 09:36:25 +0000294
295/*------------------------------------------------------------*/
296/*--- Data defns. ---*/
297/*------------------------------------------------------------*/
298
299typedef
300 struct {
301 UChar abits[8192];
302 }
303 AcSecMap;
304
305static AcSecMap* primary_map[ /*65536*/ 262144 ];
306static AcSecMap distinguished_secondary_map;
307
njn25e49d8e72002-09-23 09:36:25 +0000308static void init_shadow_memory ( void )
309{
310 Int i;
311
312 for (i = 0; i < 8192; i++) /* Invalid address */
313 distinguished_secondary_map.abits[i] = VGM_BYTE_INVALID;
314
315 /* These entries gradually get overwritten as the used address
316 space expands. */
317 for (i = 0; i < 65536; i++)
318 primary_map[i] = &distinguished_secondary_map;
319
320 /* These ones should never change; it's a bug in Valgrind if they do. */
321 for (i = 65536; i < 262144; i++)
322 primary_map[i] = &distinguished_secondary_map;
323}
324
njn25e49d8e72002-09-23 09:36:25 +0000325/*------------------------------------------------------------*/
326/*--- Basic bitmap management, reading and writing. ---*/
327/*------------------------------------------------------------*/
328
329/* Allocate and initialise a secondary map. */
330
331static AcSecMap* alloc_secondary_map ( __attribute__ ((unused))
332 Char* caller )
333{
334 AcSecMap* map;
335 UInt i;
336 PROF_EVENT(10);
337
338 /* Mark all bytes as invalid access and invalid value. */
339
340 /* It just happens that a AcSecMap occupies exactly 18 pages --
341 although this isn't important, so the following assert is
342 spurious. */
njne427a662002-10-02 11:08:25 +0000343 sk_assert(0 == (sizeof(AcSecMap) % VKI_BYTES_PER_PAGE));
njn25e49d8e72002-09-23 09:36:25 +0000344 map = VG_(get_memory_from_mmap)( sizeof(AcSecMap), caller );
345
346 for (i = 0; i < 8192; i++)
347 map->abits[i] = VGM_BYTE_INVALID; /* Invalid address */
348
349 /* VG_(printf)("ALLOC_2MAP(%s)\n", caller ); */
350 return map;
351}
352
353
354/* Basic reading/writing of the bitmaps, for byte-sized accesses. */
355
356static __inline__ UChar get_abit ( Addr a )
357{
358 AcSecMap* sm = primary_map[a >> 16];
359 UInt sm_off = a & 0xFFFF;
360 PROF_EVENT(20);
361# if 0
362 if (IS_DISTINGUISHED_SM(sm))
363 VG_(message)(Vg_DebugMsg,
364 "accessed distinguished 2ndary (A)map! 0x%x\n", a);
365# endif
366 return BITARR_TEST(sm->abits, sm_off)
367 ? VGM_BIT_INVALID : VGM_BIT_VALID;
368}
369
370static __inline__ void set_abit ( Addr a, UChar abit )
371{
372 AcSecMap* sm;
373 UInt sm_off;
374 PROF_EVENT(22);
375 ENSURE_MAPPABLE(a, "set_abit");
376 sm = primary_map[a >> 16];
377 sm_off = a & 0xFFFF;
378 if (abit)
379 BITARR_SET(sm->abits, sm_off);
380 else
381 BITARR_CLEAR(sm->abits, sm_off);
382}
383
384
385/* Reading/writing of the bitmaps, for aligned word-sized accesses. */
386
387static __inline__ UChar get_abits4_ALIGNED ( Addr a )
388{
389 AcSecMap* sm;
390 UInt sm_off;
391 UChar abits8;
392 PROF_EVENT(24);
393# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000394 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000395# endif
396 sm = primary_map[a >> 16];
397 sm_off = a & 0xFFFF;
398 abits8 = sm->abits[sm_off >> 3];
399 abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
400 abits8 &= 0x0F;
401 return abits8;
402}
403
404
405
406/*------------------------------------------------------------*/
407/*--- Setting permissions over address ranges. ---*/
408/*------------------------------------------------------------*/
409
sewardj5de6ee02002-12-14 23:11:35 +0000410static __inline__
411void set_address_range_perms ( Addr a, UInt len,
412 UInt example_a_bit )
njn25e49d8e72002-09-23 09:36:25 +0000413{
414 UChar abyte8;
415 UInt sm_off;
416 AcSecMap* sm;
417
418 PROF_EVENT(30);
419
420 if (len == 0)
421 return;
422
423 if (len > 100 * 1000 * 1000) {
424 VG_(message)(Vg_UserMsg,
425 "Warning: set address range perms: "
426 "large range %u, a %d",
427 len, example_a_bit );
428 }
429
430 VGP_PUSHCC(VgpSetMem);
431
432 /* Requests to change permissions of huge address ranges may
433 indicate bugs in our machinery. 30,000,000 is arbitrary, but so
434 far all legitimate requests have fallen beneath that size. */
435 /* 4 Mar 02: this is just stupid; get rid of it. */
njne427a662002-10-02 11:08:25 +0000436 /* sk_assert(len < 30000000); */
njn25e49d8e72002-09-23 09:36:25 +0000437
438 /* Check the permissions make sense. */
njne427a662002-10-02 11:08:25 +0000439 sk_assert(example_a_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000440 || example_a_bit == VGM_BIT_INVALID);
441
442 /* In order that we can charge through the address space at 8
443 bytes/main-loop iteration, make up some perms. */
444 abyte8 = (example_a_bit << 7)
445 | (example_a_bit << 6)
446 | (example_a_bit << 5)
447 | (example_a_bit << 4)
448 | (example_a_bit << 3)
449 | (example_a_bit << 2)
450 | (example_a_bit << 1)
451 | (example_a_bit << 0);
452
453# ifdef VG_DEBUG_MEMORY
454 /* Do it ... */
455 while (True) {
456 PROF_EVENT(31);
457 if (len == 0) break;
458 set_abit ( a, example_a_bit );
459 set_vbyte ( a, vbyte );
460 a++;
461 len--;
462 }
463
464# else
465 /* Slowly do parts preceding 8-byte alignment. */
466 while (True) {
467 PROF_EVENT(31);
468 if (len == 0) break;
469 if ((a % 8) == 0) break;
470 set_abit ( a, example_a_bit );
471 a++;
472 len--;
473 }
474
475 if (len == 0) {
476 VGP_POPCC(VgpSetMem);
477 return;
478 }
njne427a662002-10-02 11:08:25 +0000479 sk_assert((a % 8) == 0 && len > 0);
njn25e49d8e72002-09-23 09:36:25 +0000480
481 /* Once aligned, go fast. */
482 while (True) {
483 PROF_EVENT(32);
484 if (len < 8) break;
485 ENSURE_MAPPABLE(a, "set_address_range_perms(fast)");
486 sm = primary_map[a >> 16];
487 sm_off = a & 0xFFFF;
488 sm->abits[sm_off >> 3] = abyte8;
489 a += 8;
490 len -= 8;
491 }
492
493 if (len == 0) {
494 VGP_POPCC(VgpSetMem);
495 return;
496 }
njne427a662002-10-02 11:08:25 +0000497 sk_assert((a % 8) == 0 && len > 0 && len < 8);
njn25e49d8e72002-09-23 09:36:25 +0000498
499 /* Finish the upper fragment. */
500 while (True) {
501 PROF_EVENT(33);
502 if (len == 0) break;
503 set_abit ( a, example_a_bit );
504 a++;
505 len--;
506 }
507# endif
508
509 /* Check that zero page and highest page have not been written to
510 -- this could happen with buggy syscall wrappers. Today
511 (2001-04-26) had precisely such a problem with __NR_setitimer. */
njne427a662002-10-02 11:08:25 +0000512 sk_assert(SK_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +0000513 VGP_POPCC(VgpSetMem);
514}
515
516/* Set permissions for address ranges ... */
517
njn5c004e42002-11-18 11:04:50 +0000518static void ac_make_noaccess ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000519{
520 PROF_EVENT(35);
njn5c004e42002-11-18 11:04:50 +0000521 DEBUG("ac_make_noaccess(%p, %x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000522 set_address_range_perms ( a, len, VGM_BIT_INVALID );
523}
524
njn5c004e42002-11-18 11:04:50 +0000525static void ac_make_accessible ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000526{
njn5c004e42002-11-18 11:04:50 +0000527 PROF_EVENT(38);
528 DEBUG("ac_make_accessible(%p, %x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000529 set_address_range_perms ( a, len, VGM_BIT_VALID );
530}
531
njn9b007f62003-04-07 14:40:25 +0000532static __inline__
533void make_aligned_word_noaccess(Addr a)
534{
535 AcSecMap* sm;
536 UInt sm_off;
537 UChar mask;
538
539 VGP_PUSHCC(VgpESPAdj);
540 ENSURE_MAPPABLE(a, "make_aligned_word_noaccess");
541 sm = primary_map[a >> 16];
542 sm_off = a & 0xFFFF;
543 mask = 0x0F;
544 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
545 /* mask now contains 1s where we wish to make address bits invalid (1s). */
546 sm->abits[sm_off >> 3] |= mask;
547 VGP_POPCC(VgpESPAdj);
548}
549
550static __inline__
551void make_aligned_word_accessible(Addr a)
552{
553 AcSecMap* sm;
554 UInt sm_off;
555 UChar mask;
556
557 VGP_PUSHCC(VgpESPAdj);
558 ENSURE_MAPPABLE(a, "make_aligned_word_accessible");
559 sm = primary_map[a >> 16];
560 sm_off = a & 0xFFFF;
561 mask = 0x0F;
562 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
563 /* mask now contains 1s where we wish to make address bits
564 invalid (0s). */
565 sm->abits[sm_off >> 3] &= ~mask;
566 VGP_POPCC(VgpESPAdj);
567}
568
569/* Nb: by "aligned" here we mean 8-byte aligned */
570static __inline__
571void make_aligned_doubleword_accessible(Addr a)
572{
573 AcSecMap* sm;
574 UInt sm_off;
575
576 VGP_PUSHCC(VgpESPAdj);
577 ENSURE_MAPPABLE(a, "make_aligned_doubleword_accessible");
578 sm = primary_map[a >> 16];
579 sm_off = a & 0xFFFF;
580 sm->abits[sm_off >> 3] = VGM_BYTE_VALID;
581 VGP_POPCC(VgpESPAdj);
582}
583
584static __inline__
585void make_aligned_doubleword_noaccess(Addr a)
586{
587 AcSecMap* sm;
588 UInt sm_off;
589
590 VGP_PUSHCC(VgpESPAdj);
591 ENSURE_MAPPABLE(a, "make_aligned_doubleword_noaccess");
592 sm = primary_map[a >> 16];
593 sm_off = a & 0xFFFF;
594 sm->abits[sm_off >> 3] = VGM_BYTE_INVALID;
595 VGP_POPCC(VgpESPAdj);
596}
597
598/* The %esp update handling functions */
599ESP_UPDATE_HANDLERS ( make_aligned_word_accessible,
600 make_aligned_word_noaccess,
601 make_aligned_doubleword_accessible,
602 make_aligned_doubleword_noaccess,
603 ac_make_accessible,
604 ac_make_noaccess
605 );
606
607
njn25e49d8e72002-09-23 09:36:25 +0000608/* Block-copy permissions (needed for implementing realloc()). */
609
njn5c004e42002-11-18 11:04:50 +0000610static void ac_copy_address_range_state ( Addr src, Addr dst, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000611{
612 UInt i;
613
njn5c004e42002-11-18 11:04:50 +0000614 DEBUG("ac_copy_address_range_state\n");
njn25e49d8e72002-09-23 09:36:25 +0000615
616 PROF_EVENT(40);
617 for (i = 0; i < len; i++) {
618 UChar abit = get_abit ( src+i );
619 PROF_EVENT(41);
620 set_abit ( dst+i, abit );
621 }
622}
623
624
625/* Check permissions for address range. If inadequate permissions
626 exist, *bad_addr is set to the offending address, so the caller can
627 know what it is. */
628
njn5c004e42002-11-18 11:04:50 +0000629static __inline__
630Bool ac_check_accessible ( Addr a, UInt len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000631{
632 UInt i;
633 UChar abit;
njn5c004e42002-11-18 11:04:50 +0000634 PROF_EVENT(48);
njn25e49d8e72002-09-23 09:36:25 +0000635 for (i = 0; i < len; i++) {
njn5c004e42002-11-18 11:04:50 +0000636 PROF_EVENT(49);
njn25e49d8e72002-09-23 09:36:25 +0000637 abit = get_abit(a);
638 if (abit == VGM_BIT_INVALID) {
639 if (bad_addr != NULL) *bad_addr = a;
640 return False;
641 }
642 a++;
643 }
644 return True;
645}
646
njn25e49d8e72002-09-23 09:36:25 +0000647/* Check a zero-terminated ascii string. Tricky -- don't want to
648 examine the actual bytes, to find the end, until we're sure it is
649 safe to do so. */
650
njn5c004e42002-11-18 11:04:50 +0000651static __inline__
652Bool ac_check_readable_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000653{
654 UChar abit;
655 PROF_EVENT(46);
njn5c004e42002-11-18 11:04:50 +0000656 DEBUG("ac_check_readable_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +0000657 while (True) {
658 PROF_EVENT(47);
659 abit = get_abit(a);
660 if (abit != VGM_BIT_VALID) {
661 if (bad_addr != NULL) *bad_addr = a;
662 return False;
663 }
664 /* Ok, a is safe to read. */
665 if (* ((UChar*)a) == 0) return True;
666 a++;
667 }
668}
669
670
671/*------------------------------------------------------------*/
672/*--- Memory event handlers ---*/
673/*------------------------------------------------------------*/
674
njn5c004e42002-11-18 11:04:50 +0000675static __inline__
676void ac_check_is_accessible ( CorePart part, ThreadState* tst,
677 Char* s, Addr base, UInt size, Bool isWrite )
njn25e49d8e72002-09-23 09:36:25 +0000678{
679 Bool ok;
680 Addr bad_addr;
681
682 VGP_PUSHCC(VgpCheckMem);
683
njn5c004e42002-11-18 11:04:50 +0000684 ok = ac_check_accessible ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000685 if (!ok) {
686 switch (part) {
687 case Vg_CoreSysCall:
njn5c004e42002-11-18 11:04:50 +0000688 MC_(record_param_error) ( tst, bad_addr, isWrite, s );
njn25e49d8e72002-09-23 09:36:25 +0000689 break;
690
njn25e49d8e72002-09-23 09:36:25 +0000691 case Vg_CoreSignal:
njn5c004e42002-11-18 11:04:50 +0000692 sk_assert(isWrite); /* Should only happen with isWrite case */
693 /* fall through */
njn25e49d8e72002-09-23 09:36:25 +0000694 case Vg_CorePThread:
njn5c004e42002-11-18 11:04:50 +0000695 MC_(record_core_mem_error)( tst, isWrite, s );
njn25e49d8e72002-09-23 09:36:25 +0000696 break;
697
698 /* If we're being asked to jump to a silly address, record an error
699 message before potentially crashing the entire system. */
700 case Vg_CoreTranslate:
njn5c004e42002-11-18 11:04:50 +0000701 sk_assert(!isWrite); /* Should only happen with !isWrite case */
702 MC_(record_jump_error)( tst, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000703 break;
704
705 default:
njn5c004e42002-11-18 11:04:50 +0000706 VG_(skin_panic)("ac_check_is_accessible: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000707 }
708 }
njn5c004e42002-11-18 11:04:50 +0000709
njn25e49d8e72002-09-23 09:36:25 +0000710 VGP_POPCC(VgpCheckMem);
711}
712
713static
njn5c004e42002-11-18 11:04:50 +0000714void ac_check_is_writable ( CorePart part, ThreadState* tst,
715 Char* s, Addr base, UInt size )
716{
717 ac_check_is_accessible ( part, tst, s, base, size, /*isWrite*/True );
718}
719
720static
721void ac_check_is_readable ( CorePart part, ThreadState* tst,
722 Char* s, Addr base, UInt size )
723{
724 ac_check_is_accessible ( part, tst, s, base, size, /*isWrite*/False );
725}
726
727static
728void ac_check_is_readable_asciiz ( CorePart part, ThreadState* tst,
729 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +0000730{
731 Bool ok = True;
732 Addr bad_addr;
733 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
734
735 VGP_PUSHCC(VgpCheckMem);
736
njne427a662002-10-02 11:08:25 +0000737 sk_assert(part == Vg_CoreSysCall);
njn5c004e42002-11-18 11:04:50 +0000738 ok = ac_check_readable_asciiz ( (Addr)str, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000739 if (!ok) {
njn5c004e42002-11-18 11:04:50 +0000740 MC_(record_param_error) ( tst, bad_addr, /*is_writable =*/False, s );
njn25e49d8e72002-09-23 09:36:25 +0000741 }
742
743 VGP_POPCC(VgpCheckMem);
744}
745
746static
njn5c004e42002-11-18 11:04:50 +0000747void ac_new_mem_startup( Addr a, UInt len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +0000748{
njn1f3a9092002-10-04 09:22:30 +0000749 /* Ignore the permissions, just make it readable. Seems to work... */
njn25e49d8e72002-09-23 09:36:25 +0000750 DEBUG("new_mem_startup(%p, %u, rr=%u, ww=%u, xx=%u)\n", a,len,rr,ww,xx);
njn5c004e42002-11-18 11:04:50 +0000751 ac_make_accessible(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000752}
753
754static
njn5c004e42002-11-18 11:04:50 +0000755void ac_new_mem_heap ( Addr a, UInt len, Bool is_inited )
njn25e49d8e72002-09-23 09:36:25 +0000756{
njn5c004e42002-11-18 11:04:50 +0000757 ac_make_accessible(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000758}
759
760static
njn5c004e42002-11-18 11:04:50 +0000761void ac_set_perms (Addr a, UInt len,
sewardj40f8ebe2002-10-23 21:46:13 +0000762 Bool rr, Bool ww, Bool xx)
njn25e49d8e72002-09-23 09:36:25 +0000763{
njn5c004e42002-11-18 11:04:50 +0000764 DEBUG("ac_set_perms(%p, %u, rr=%u ww=%u, xx=%u)\n",
sewardj40f8ebe2002-10-23 21:46:13 +0000765 a, len, rr, ww, xx);
njn25e49d8e72002-09-23 09:36:25 +0000766 if (rr || ww || xx) {
njn5c004e42002-11-18 11:04:50 +0000767 ac_make_accessible(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000768 } else {
njn5c004e42002-11-18 11:04:50 +0000769 ac_make_noaccess(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000770 }
771}
772
773
774/*------------------------------------------------------------*/
775/*--- Functions called directly from generated code. ---*/
776/*------------------------------------------------------------*/
777
778static __inline__ UInt rotateRight16 ( UInt x )
779{
780 /* Amazingly, gcc turns this into a single rotate insn. */
781 return (x >> 16) | (x << 16);
782}
783
njn25e49d8e72002-09-23 09:36:25 +0000784static __inline__ UInt shiftRight16 ( UInt x )
785{
786 return x >> 16;
787}
788
789
790/* Read/write 1/2/4 sized V bytes, and emit an address error if
791 needed. */
792
njn5c004e42002-11-18 11:04:50 +0000793/* ac_helperc_ACCESS{1,2,4} handle the common case fast.
njn25e49d8e72002-09-23 09:36:25 +0000794 Under all other circumstances, it defers to the relevant _SLOWLY
795 function, which can handle all situations.
796*/
797__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000798static void ac_helperc_ACCESS4 ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000799{
800# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000801 return ac_ACCESS4_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000802# else
803 UInt sec_no = rotateRight16(a) & 0x3FFFF;
804 AcSecMap* sm = primary_map[sec_no];
805 UInt a_off = (a & 0xFFFF) >> 3;
806 UChar abits = sm->abits[a_off];
807 abits >>= (a & 4);
808 abits &= 15;
njn5c004e42002-11-18 11:04:50 +0000809 PROF_EVENT(66);
njn25e49d8e72002-09-23 09:36:25 +0000810 if (abits == VGM_NIBBLE_VALID) {
811 /* Handle common case quickly: a is suitably aligned, is mapped,
812 and is addressible. So just return. */
813 return;
814 } else {
815 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000816 ac_ACCESS4_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000817 }
818# endif
819}
820
821__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000822static void ac_helperc_ACCESS2 ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000823{
824# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000825 return ac_ACCESS2_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000826# else
827 UInt sec_no = rotateRight16(a) & 0x1FFFF;
828 AcSecMap* sm = primary_map[sec_no];
829 UInt a_off = (a & 0xFFFF) >> 3;
njn5c004e42002-11-18 11:04:50 +0000830 PROF_EVENT(67);
njn25e49d8e72002-09-23 09:36:25 +0000831 if (sm->abits[a_off] == VGM_BYTE_VALID) {
832 /* Handle common case quickly. */
833 return;
834 } else {
835 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000836 ac_ACCESS2_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000837 }
838# endif
839}
840
841__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000842static void ac_helperc_ACCESS1 ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000843{
844# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000845 return ac_ACCESS1_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000846# else
847 UInt sec_no = shiftRight16(a);
848 AcSecMap* sm = primary_map[sec_no];
849 UInt a_off = (a & 0xFFFF) >> 3;
njn5c004e42002-11-18 11:04:50 +0000850 PROF_EVENT(68);
njn25e49d8e72002-09-23 09:36:25 +0000851 if (sm->abits[a_off] == VGM_BYTE_VALID) {
852 /* Handle common case quickly. */
853 return;
854 } else {
855 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000856 ac_ACCESS1_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000857 }
858# endif
859}
860
861
862/*------------------------------------------------------------*/
863/*--- Fallback functions to handle cases that the above ---*/
864/*--- VG_(helperc_ACCESS{1,2,4}) can't manage. ---*/
865/*------------------------------------------------------------*/
866
njn5c004e42002-11-18 11:04:50 +0000867static void ac_ACCESS4_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000868{
869 Bool a0ok, a1ok, a2ok, a3ok;
870
njn5c004e42002-11-18 11:04:50 +0000871 PROF_EVENT(76);
njn25e49d8e72002-09-23 09:36:25 +0000872
873 /* First establish independently the addressibility of the 4 bytes
874 involved. */
875 a0ok = get_abit(a+0) == VGM_BIT_VALID;
876 a1ok = get_abit(a+1) == VGM_BIT_VALID;
877 a2ok = get_abit(a+2) == VGM_BIT_VALID;
878 a3ok = get_abit(a+3) == VGM_BIT_VALID;
879
880 /* Now distinguish 3 cases */
881
882 /* Case 1: the address is completely valid, so:
883 - no addressing error
884 */
885 if (a0ok && a1ok && a2ok && a3ok) {
886 return;
887 }
888
889 /* Case 2: the address is completely invalid.
890 - emit addressing error
891 */
892 /* VG_(printf)("%p (%d %d %d %d)\n", a, a0ok, a1ok, a2ok, a3ok); */
njn5c004e42002-11-18 11:04:50 +0000893 if (!MC_(clo_partial_loads_ok)
njn25e49d8e72002-09-23 09:36:25 +0000894 || ((a & 3) != 0)
895 || (!a0ok && !a1ok && !a2ok && !a3ok)) {
njn5c004e42002-11-18 11:04:50 +0000896 MC_(record_address_error)( a, 4, False );
njn25e49d8e72002-09-23 09:36:25 +0000897 return;
898 }
899
900 /* Case 3: the address is partially valid.
901 - no addressing error
njn5c004e42002-11-18 11:04:50 +0000902 Case 3 is only allowed if MC_(clo_partial_loads_ok) is True
njn25e49d8e72002-09-23 09:36:25 +0000903 (which is the default), and the address is 4-aligned.
904 If not, Case 2 will have applied.
905 */
njn5c004e42002-11-18 11:04:50 +0000906 sk_assert(MC_(clo_partial_loads_ok));
njn25e49d8e72002-09-23 09:36:25 +0000907 {
908 return;
909 }
910}
911
njn5c004e42002-11-18 11:04:50 +0000912static void ac_ACCESS2_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000913{
914 /* Check the address for validity. */
915 Bool aerr = False;
njn5c004e42002-11-18 11:04:50 +0000916 PROF_EVENT(77);
njn25e49d8e72002-09-23 09:36:25 +0000917
918 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
919 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
920
921 /* If an address error has happened, report it. */
922 if (aerr) {
njn5c004e42002-11-18 11:04:50 +0000923 MC_(record_address_error)( a, 2, False );
njn25e49d8e72002-09-23 09:36:25 +0000924 }
925}
926
njn5c004e42002-11-18 11:04:50 +0000927static void ac_ACCESS1_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000928{
929 /* Check the address for validity. */
930 Bool aerr = False;
njn5c004e42002-11-18 11:04:50 +0000931 PROF_EVENT(78);
njn25e49d8e72002-09-23 09:36:25 +0000932
933 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
934
935 /* If an address error has happened, report it. */
936 if (aerr) {
njn5c004e42002-11-18 11:04:50 +0000937 MC_(record_address_error)( a, 1, False );
njn25e49d8e72002-09-23 09:36:25 +0000938 }
939}
940
941
942/* ---------------------------------------------------------------------
943 FPU load and store checks, called from generated code.
944 ------------------------------------------------------------------ */
945
946__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +0000947static void ac_fpu_ACCESS_check ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +0000948{
949 /* Ensure the read area is both addressible and valid (ie,
950 readable). If there's an address error, don't report a value
951 error too; but if there isn't an address error, check for a
952 value error.
953
954 Try to be reasonably fast on the common case; wimp out and defer
njn5c004e42002-11-18 11:04:50 +0000955 to ac_fpu_ACCESS_check_SLOWLY for everything else. */
njn25e49d8e72002-09-23 09:36:25 +0000956
957 AcSecMap* sm;
958 UInt sm_off, a_off;
959 Addr addr4;
960
njn5c004e42002-11-18 11:04:50 +0000961 PROF_EVENT(90);
njn25e49d8e72002-09-23 09:36:25 +0000962
963# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000964 ac_fpu_ACCESS_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +0000965# else
966
967 if (size == 4) {
968 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
njn5c004e42002-11-18 11:04:50 +0000969 PROF_EVENT(91);
njn25e49d8e72002-09-23 09:36:25 +0000970 /* Properly aligned. */
971 sm = primary_map[addr >> 16];
972 sm_off = addr & 0xFFFF;
973 a_off = sm_off >> 3;
974 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
975 /* Properly aligned and addressible. */
976 return;
977 slow4:
njn5c004e42002-11-18 11:04:50 +0000978 ac_fpu_ACCESS_check_SLOWLY ( addr, 4 );
njn25e49d8e72002-09-23 09:36:25 +0000979 return;
980 }
981
982 if (size == 8) {
983 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
njn5c004e42002-11-18 11:04:50 +0000984 PROF_EVENT(92);
njn25e49d8e72002-09-23 09:36:25 +0000985 /* Properly aligned. Do it in two halves. */
986 addr4 = addr + 4;
987 /* First half. */
988 sm = primary_map[addr >> 16];
989 sm_off = addr & 0xFFFF;
990 a_off = sm_off >> 3;
991 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
992 /* First half properly aligned and addressible. */
993 /* Second half. */
994 sm = primary_map[addr4 >> 16];
995 sm_off = addr4 & 0xFFFF;
996 a_off = sm_off >> 3;
997 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
998 /* Second half properly aligned and addressible. */
999 /* Both halves properly aligned and addressible. */
1000 return;
1001 slow8:
njn5c004e42002-11-18 11:04:50 +00001002 ac_fpu_ACCESS_check_SLOWLY ( addr, 8 );
njn25e49d8e72002-09-23 09:36:25 +00001003 return;
1004 }
1005
1006 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
1007 cases go quickly. */
1008 if (size == 2) {
njn5c004e42002-11-18 11:04:50 +00001009 PROF_EVENT(93);
1010 ac_fpu_ACCESS_check_SLOWLY ( addr, 2 );
njn25e49d8e72002-09-23 09:36:25 +00001011 return;
1012 }
1013
njn5c004e42002-11-18 11:04:50 +00001014 if (size == 10 || size == 28 || size == 108) {
1015 PROF_EVENT(94);
1016 ac_fpu_ACCESS_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001017 return;
1018 }
1019
1020 VG_(printf)("size is %d\n", size);
njne427a662002-10-02 11:08:25 +00001021 VG_(skin_panic)("fpu_ACCESS_check: unhandled size");
njn25e49d8e72002-09-23 09:36:25 +00001022# endif
1023}
1024
1025
1026/* ---------------------------------------------------------------------
1027 Slow, general cases for FPU access checks.
1028 ------------------------------------------------------------------ */
1029
njn5c004e42002-11-18 11:04:50 +00001030void ac_fpu_ACCESS_check_SLOWLY ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001031{
1032 Int i;
1033 Bool aerr = False;
njn5c004e42002-11-18 11:04:50 +00001034 PROF_EVENT(100);
njn25e49d8e72002-09-23 09:36:25 +00001035 for (i = 0; i < size; i++) {
njn5c004e42002-11-18 11:04:50 +00001036 PROF_EVENT(101);
njn25e49d8e72002-09-23 09:36:25 +00001037 if (get_abit(addr+i) != VGM_BIT_VALID)
1038 aerr = True;
1039 }
1040
1041 if (aerr) {
njn5c004e42002-11-18 11:04:50 +00001042 MC_(record_address_error)( addr, size, False );
njn25e49d8e72002-09-23 09:36:25 +00001043 }
1044}
1045
1046
1047/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001048/*--- Our instrumenter ---*/
1049/*------------------------------------------------------------*/
1050
njn25e49d8e72002-09-23 09:36:25 +00001051UCodeBlock* SK_(instrument)(UCodeBlock* cb_in, Addr orig_addr)
1052{
1053/* Use this rather than eg. -1 because it's a UInt. */
1054#define INVALID_DATA_SIZE 999999
1055
1056 UCodeBlock* cb;
1057 Int i;
1058 UInstr* u_in;
1059 Int t_addr, t_size;
1060
njn810086f2002-11-14 12:42:47 +00001061 cb = VG_(setup_UCodeBlock)(cb_in);
njn25e49d8e72002-09-23 09:36:25 +00001062
njn810086f2002-11-14 12:42:47 +00001063 for (i = 0; i < VG_(get_num_instrs)(cb_in); i++) {
njn25e49d8e72002-09-23 09:36:25 +00001064
1065 t_addr = t_size = INVALID_TEMPREG;
njn810086f2002-11-14 12:42:47 +00001066 u_in = VG_(get_instr)(cb_in, i);
njn25e49d8e72002-09-23 09:36:25 +00001067
1068 switch (u_in->opcode) {
sewardj7a5ebcf2002-11-13 22:42:13 +00001069 case NOP: case LOCK: case CALLM_E: case CALLM_S:
njn25e49d8e72002-09-23 09:36:25 +00001070 break;
1071
1072 /* For memory-ref instrs, copy the data_addr into a temporary to be
njn9b007f62003-04-07 14:40:25 +00001073 * passed to the helper at the end of the instruction.
njn25e49d8e72002-09-23 09:36:25 +00001074 */
1075 case LOAD:
1076 t_addr = u_in->val1;
1077 goto do_LOAD_or_STORE;
1078 case STORE: t_addr = u_in->val2;
1079 goto do_LOAD_or_STORE;
1080 do_LOAD_or_STORE:
1081 uInstr1(cb, CCALL, 0, TempReg, t_addr);
1082 switch (u_in->size) {
njn5c004e42002-11-18 11:04:50 +00001083 case 4: uCCall(cb, (Addr) & ac_helperc_ACCESS4, 1, 1, False );
njn25e49d8e72002-09-23 09:36:25 +00001084 break;
njn5c004e42002-11-18 11:04:50 +00001085 case 2: uCCall(cb, (Addr) & ac_helperc_ACCESS2, 1, 1, False );
njn25e49d8e72002-09-23 09:36:25 +00001086 break;
njn5c004e42002-11-18 11:04:50 +00001087 case 1: uCCall(cb, (Addr) & ac_helperc_ACCESS1, 1, 1, False );
njn25e49d8e72002-09-23 09:36:25 +00001088 break;
1089 default:
njne427a662002-10-02 11:08:25 +00001090 VG_(skin_panic)("addrcheck::SK_(instrument):LOAD/STORE");
njn25e49d8e72002-09-23 09:36:25 +00001091 }
njn4ba5a792002-09-30 10:23:54 +00001092 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00001093 break;
1094
1095 case FPU_R:
1096 case FPU_W:
1097 t_addr = u_in->val2;
1098 t_size = newTemp(cb);
1099 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
1100 uLiteral(cb, u_in->size);
1101 uInstr2(cb, CCALL, 0, TempReg, t_addr, TempReg, t_size);
njn5c004e42002-11-18 11:04:50 +00001102 uCCall(cb, (Addr) & ac_fpu_ACCESS_check, 2, 2, False );
njn4ba5a792002-09-30 10:23:54 +00001103 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00001104 break;
1105
sewardj3d7c9c82003-03-26 21:08:13 +00001106 case MMX2_MemRd:
1107 case MMX2_MemWr:
sewardjd7971012003-04-04 00:21:58 +00001108 sk_assert(u_in->size == 4 || u_in->size == 8);
sewardj3d7c9c82003-03-26 21:08:13 +00001109 t_addr = u_in->val2;
1110 t_size = newTemp(cb);
1111 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
sewardjd7971012003-04-04 00:21:58 +00001112 uLiteral(cb, u_in->size);
sewardj3d7c9c82003-03-26 21:08:13 +00001113 uInstr2(cb, CCALL, 0, TempReg, t_addr, TempReg, t_size);
1114 uCCall(cb, (Addr) & ac_fpu_ACCESS_check, 2, 2, False );
1115 VG_(copy_UInstr)(cb, u_in);
1116 break;
1117
njn25e49d8e72002-09-23 09:36:25 +00001118 default:
njn4ba5a792002-09-30 10:23:54 +00001119 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00001120 break;
1121 }
1122 }
1123
njn4ba5a792002-09-30 10:23:54 +00001124 VG_(free_UCodeBlock)(cb_in);
njn25e49d8e72002-09-23 09:36:25 +00001125 return cb;
1126}
1127
1128
njn25e49d8e72002-09-23 09:36:25 +00001129/*------------------------------------------------------------*/
1130/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1131/*------------------------------------------------------------*/
1132
sewardja4495682002-10-21 07:29:59 +00001133/* For the memory leak detector, say whether an entire 64k chunk of
1134 address space is possibly in use, or not. If in doubt return
1135 True.
njn25e49d8e72002-09-23 09:36:25 +00001136*/
sewardja4495682002-10-21 07:29:59 +00001137static
1138Bool ac_is_valid_64k_chunk ( UInt chunk_number )
njn25e49d8e72002-09-23 09:36:25 +00001139{
sewardja4495682002-10-21 07:29:59 +00001140 sk_assert(chunk_number >= 0 && chunk_number < 65536);
1141 if (IS_DISTINGUISHED_SM(primary_map[chunk_number])) {
1142 /* Definitely not in use. */
1143 return False;
1144 } else {
1145 return True;
njn25e49d8e72002-09-23 09:36:25 +00001146 }
1147}
1148
1149
sewardja4495682002-10-21 07:29:59 +00001150/* For the memory leak detector, say whether or not a given word
1151 address is to be regarded as valid. */
1152static
1153Bool ac_is_valid_address ( Addr a )
1154{
1155 UChar abits;
1156 sk_assert(IS_ALIGNED4_ADDR(a));
1157 abits = get_abits4_ALIGNED(a);
1158 if (abits == VGM_NIBBLE_VALID) {
1159 return True;
1160 } else {
1161 return False;
1162 }
1163}
1164
1165
1166/* Leak detector for this skin. We don't actually do anything, merely
1167 run the generic leak detector with suitable parameters for this
1168 skin. */
njn5c004e42002-11-18 11:04:50 +00001169static void ac_detect_memory_leaks ( void )
njn25e49d8e72002-09-23 09:36:25 +00001170{
sewardja4495682002-10-21 07:29:59 +00001171 VG_(generic_detect_memory_leaks) (
1172 ac_is_valid_64k_chunk,
1173 ac_is_valid_address,
njn5c004e42002-11-18 11:04:50 +00001174 MC_(get_where),
1175 MC_(clo_leak_resolution),
sewardj99aac972002-12-26 01:53:45 +00001176 MC_(clo_show_reachable),
1177 (UInt)LeakSupp
sewardja4495682002-10-21 07:29:59 +00001178 );
njn25e49d8e72002-09-23 09:36:25 +00001179}
1180
1181
1182/* ---------------------------------------------------------------------
1183 Sanity check machinery (permanently engaged).
1184 ------------------------------------------------------------------ */
1185
1186/* Check that nobody has spuriously claimed that the first or last 16
1187 pages (64 KB) of address space have become accessible. Failure of
1188 the following do not per se indicate an internal consistency
1189 problem, but they are so likely to that we really want to know
1190 about it if so. */
1191
1192Bool SK_(cheap_sanity_check) ( void )
1193{
sewardjd5815ec2003-04-06 12:23:27 +00001194 if (IS_DISTINGUISHED_SM(primary_map[0])
1195 /* kludge: kernel drops a page up at top of address range for
1196 magic "optimized syscalls", so we can no longer check the
1197 highest page */
1198 /* && IS_DISTINGUISHED_SM(primary_map[65535]) */
1199 )
njn25e49d8e72002-09-23 09:36:25 +00001200 return True;
1201 else
1202 return False;
1203}
1204
1205Bool SK_(expensive_sanity_check) ( void )
1206{
1207 Int i;
1208
1209 /* Make sure nobody changed the distinguished secondary. */
1210 for (i = 0; i < 8192; i++)
1211 if (distinguished_secondary_map.abits[i] != VGM_BYTE_INVALID)
1212 return False;
1213
1214 /* Make sure that the upper 3/4 of the primary map hasn't
1215 been messed with. */
1216 for (i = 65536; i < 262144; i++)
1217 if (primary_map[i] != & distinguished_secondary_map)
1218 return False;
1219
1220 return True;
1221}
1222
sewardjd8033d92002-12-08 22:16:58 +00001223/*
1224 Client requests
1225 */
1226Bool SK_(handle_client_request) ( ThreadState* tst, UInt* arg_block, UInt *ret )
1227{
sewardjbf310d92002-12-28 13:09:57 +00001228#define IGNORE(what) \
1229 do { \
1230 if (moans-- > 0) { \
1231 VG_(message)(Vg_UserMsg, \
1232 "Warning: Addrcheck: ignoring `%s' request.", what); \
1233 VG_(message)(Vg_UserMsg, \
1234 " To honour this request, rerun with --skin=memcheck."); \
1235 } \
1236 } while (0)
1237
sewardjd8033d92002-12-08 22:16:58 +00001238 UInt* arg = arg_block;
sewardjbf310d92002-12-28 13:09:57 +00001239 static Int moans = 3;
sewardjd8033d92002-12-08 22:16:58 +00001240
1241 /* Overload memcheck client reqs */
1242 if (!VG_IS_SKIN_USERREQ('M','C',arg[0]))
1243 return False;
1244
1245 switch (arg[0]) {
1246 case VG_USERREQ__DO_LEAK_CHECK:
1247 ac_detect_memory_leaks();
1248 *ret = 0; /* return value is meaningless */
1249 break;
1250
sewardjbf310d92002-12-28 13:09:57 +00001251 /* Ignore these */
sewardjd8033d92002-12-08 22:16:58 +00001252 case VG_USERREQ__CHECK_WRITABLE: /* check writable */
sewardjbf310d92002-12-28 13:09:57 +00001253 IGNORE("VALGRIND_CHECK_WRITABLE");
1254 return False;
sewardjd8033d92002-12-08 22:16:58 +00001255 case VG_USERREQ__CHECK_READABLE: /* check readable */
sewardjbf310d92002-12-28 13:09:57 +00001256 IGNORE("VALGRIND_CHECK_READABLE");
1257 return False;
sewardjd8033d92002-12-08 22:16:58 +00001258 case VG_USERREQ__MAKE_NOACCESS: /* make no access */
sewardjbf310d92002-12-28 13:09:57 +00001259 IGNORE("VALGRIND_MAKE_NOACCESS");
1260 return False;
sewardjd8033d92002-12-08 22:16:58 +00001261 case VG_USERREQ__MAKE_WRITABLE: /* make writable */
sewardjbf310d92002-12-28 13:09:57 +00001262 IGNORE("VALGRIND_MAKE_WRITABLE");
1263 return False;
sewardjd8033d92002-12-08 22:16:58 +00001264 case VG_USERREQ__MAKE_READABLE: /* make readable */
sewardjbf310d92002-12-28 13:09:57 +00001265 IGNORE("VALGRIND_MAKE_READABLE");
1266 return False;
sewardjd8033d92002-12-08 22:16:58 +00001267 case VG_USERREQ__DISCARD: /* discard */
sewardjbf310d92002-12-28 13:09:57 +00001268 IGNORE("VALGRIND_CHECK_DISCARD");
1269 return False;
sewardjd8033d92002-12-08 22:16:58 +00001270
1271 default:
1272 VG_(message)(Vg_UserMsg,
sewardja81709d2002-12-28 12:55:48 +00001273 "Warning: unknown addrcheck client request code %d",
sewardjd8033d92002-12-08 22:16:58 +00001274 arg[0]);
1275 return False;
1276 }
1277 return True;
sewardjbf310d92002-12-28 13:09:57 +00001278
1279#undef IGNORE
sewardjd8033d92002-12-08 22:16:58 +00001280}
1281
njn25e49d8e72002-09-23 09:36:25 +00001282/*------------------------------------------------------------*/
1283/*--- Setup ---*/
1284/*------------------------------------------------------------*/
1285
njn25e49d8e72002-09-23 09:36:25 +00001286Bool SK_(process_cmd_line_option)(Char* arg)
1287{
njn5c004e42002-11-18 11:04:50 +00001288 return MC_(process_common_cmd_line_option)(arg);
njn25e49d8e72002-09-23 09:36:25 +00001289}
1290
1291Char* SK_(usage)(void)
1292{
1293 return
1294" --partial-loads-ok=no|yes too hard to explain here; see manual [yes]\n"
1295" --freelist-vol=<number> volume of freed blocks queue [1000000]\n"
1296" --leak-check=no|yes search for memory leaks at exit? [no]\n"
1297" --leak-resolution=low|med|high\n"
1298" amount of bt merging in leak check [low]\n"
1299" --show-reachable=no|yes show reachable blocks in leak check? [no]\n"
1300" --workaround-gcc296-bugs=no|yes self explanatory [no]\n"
njn25e49d8e72002-09-23 09:36:25 +00001301"\n"
1302" --cleanup=no|yes improve after instrumentation? [yes]\n";
1303}
1304
1305
1306/*------------------------------------------------------------*/
1307/*--- Setup ---*/
1308/*------------------------------------------------------------*/
1309
njn810086f2002-11-14 12:42:47 +00001310void SK_(pre_clo_init)(void)
njn25e49d8e72002-09-23 09:36:25 +00001311{
njn810086f2002-11-14 12:42:47 +00001312 VG_(details_name) ("Addrcheck");
1313 VG_(details_version) (NULL);
1314 VG_(details_description) ("a fine-grained address checker");
1315 VG_(details_copyright_author)(
1316 "Copyright (C) 2002, and GNU GPL'd, by Julian Seward.");
1317 VG_(details_bug_reports_to) ("jseward@acm.org");
sewardj78210aa2002-12-01 02:55:46 +00001318 VG_(details_avg_translation_sizeB) ( 135 );
njn25e49d8e72002-09-23 09:36:25 +00001319
njn810086f2002-11-14 12:42:47 +00001320 VG_(needs_core_errors) ();
1321 VG_(needs_skin_errors) ();
1322 VG_(needs_libc_freeres) ();
1323 VG_(needs_sizeof_shadow_block) ( 1 );
1324 VG_(needs_command_line_options)();
1325 VG_(needs_client_requests) ();
1326 VG_(needs_syscall_wrapper) ();
1327 VG_(needs_alternative_free) ();
1328 VG_(needs_sanity_checks) ();
njn25e49d8e72002-09-23 09:36:25 +00001329
njn5c004e42002-11-18 11:04:50 +00001330 VG_(track_new_mem_startup) ( & ac_new_mem_startup );
1331 VG_(track_new_mem_heap) ( & ac_new_mem_heap );
njn5c004e42002-11-18 11:04:50 +00001332 VG_(track_new_mem_stack_signal) ( & ac_make_accessible );
1333 VG_(track_new_mem_brk) ( & ac_make_accessible );
1334 VG_(track_new_mem_mmap) ( & ac_set_perms );
njn25e49d8e72002-09-23 09:36:25 +00001335
njn9b007f62003-04-07 14:40:25 +00001336 VG_(track_new_mem_stack_4) ( & MC_(new_mem_stack_4) );
1337 VG_(track_new_mem_stack_8) ( & MC_(new_mem_stack_8) );
1338 VG_(track_new_mem_stack_12) ( & MC_(new_mem_stack_12) );
1339 VG_(track_new_mem_stack_16) ( & MC_(new_mem_stack_16) );
1340 VG_(track_new_mem_stack_32) ( & MC_(new_mem_stack_32) );
1341 VG_(track_new_mem_stack) ( & MC_(new_mem_stack) );
1342
njn5c004e42002-11-18 11:04:50 +00001343 VG_(track_copy_mem_heap) ( & ac_copy_address_range_state );
1344 VG_(track_copy_mem_remap) ( & ac_copy_address_range_state );
1345 VG_(track_change_mem_mprotect) ( & ac_set_perms );
njn25e49d8e72002-09-23 09:36:25 +00001346
njn5c004e42002-11-18 11:04:50 +00001347 VG_(track_ban_mem_heap) ( & ac_make_noaccess );
1348 VG_(track_ban_mem_stack) ( & ac_make_noaccess );
njn25e49d8e72002-09-23 09:36:25 +00001349
njn5c004e42002-11-18 11:04:50 +00001350 VG_(track_die_mem_heap) ( & ac_make_noaccess );
njn5c004e42002-11-18 11:04:50 +00001351 VG_(track_die_mem_stack_signal) ( & ac_make_noaccess );
1352 VG_(track_die_mem_brk) ( & ac_make_noaccess );
1353 VG_(track_die_mem_munmap) ( & ac_make_noaccess );
njn25e49d8e72002-09-23 09:36:25 +00001354
njn9b007f62003-04-07 14:40:25 +00001355 VG_(track_die_mem_stack_4) ( & MC_(die_mem_stack_4) );
1356 VG_(track_die_mem_stack_8) ( & MC_(die_mem_stack_8) );
1357 VG_(track_die_mem_stack_12) ( & MC_(die_mem_stack_12) );
1358 VG_(track_die_mem_stack_16) ( & MC_(die_mem_stack_16) );
1359 VG_(track_die_mem_stack_32) ( & MC_(die_mem_stack_32) );
1360 VG_(track_die_mem_stack) ( & MC_(die_mem_stack) );
1361
njn5c004e42002-11-18 11:04:50 +00001362 VG_(track_bad_free) ( & MC_(record_free_error) );
1363 VG_(track_mismatched_free) ( & MC_(record_freemismatch_error) );
njn25e49d8e72002-09-23 09:36:25 +00001364
njn5c004e42002-11-18 11:04:50 +00001365 VG_(track_pre_mem_read) ( & ac_check_is_readable );
1366 VG_(track_pre_mem_read_asciiz) ( & ac_check_is_readable_asciiz );
1367 VG_(track_pre_mem_write) ( & ac_check_is_writable );
1368 VG_(track_post_mem_write) ( & ac_make_accessible );
njn25e49d8e72002-09-23 09:36:25 +00001369
njn5c004e42002-11-18 11:04:50 +00001370 VG_(register_compact_helper)((Addr) & ac_helperc_ACCESS4);
1371 VG_(register_compact_helper)((Addr) & ac_helperc_ACCESS2);
1372 VG_(register_compact_helper)((Addr) & ac_helperc_ACCESS1);
1373 VG_(register_compact_helper)((Addr) & ac_fpu_ACCESS_check);
njn25e49d8e72002-09-23 09:36:25 +00001374
1375 VGP_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
1376 VGP_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
njn9b007f62003-04-07 14:40:25 +00001377 VGP_(register_profile_event) ( VgpESPAdj, "adjust-ESP" );
njnd04b7c62002-10-03 14:05:52 +00001378
1379 init_shadow_memory();
njn5c004e42002-11-18 11:04:50 +00001380 MC_(init_prof_mem)();
1381}
1382
1383void SK_(post_clo_init) ( void )
1384{
1385}
1386
1387void SK_(fini) ( void )
1388{
1389 VG_(print_malloc_stats)();
1390
1391 if (VG_(clo_verbosity) == 1) {
1392 if (!MC_(clo_leak_check))
1393 VG_(message)(Vg_UserMsg,
1394 "For a detailed leak analysis, rerun with: --leak-check=yes");
1395
1396 VG_(message)(Vg_UserMsg,
1397 "For counts of detected errors, rerun with: -v");
1398 }
1399 if (MC_(clo_leak_check)) ac_detect_memory_leaks();
1400
1401 MC_(done_prof_mem)();
njn25e49d8e72002-09-23 09:36:25 +00001402}
1403
1404/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00001405/*--- end ac_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00001406/*--------------------------------------------------------------------*/