blob: eb8a31d3e59b28f274ceb44e0e6b7b5a60650d6e [file] [log] [blame]
njn25e49d8e72002-09-23 09:36:25 +00001
2/*--------------------------------------------------------------------*/
3/*--- The AddrCheck skin: like MemCheck, but only does address ---*/
4/*--- checking. No definedness checking. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- ac_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
njnc9539842002-10-02 13:26:35 +00009 This file is part of AddrCheck, a lightweight Valgrind skin for
10 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
12 Copyright (C) 2000-2002 Julian Seward
13 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
njn5c004e42002-11-18 11:04:50 +000033#include "mc_common.h"
sewardjd8033d92002-12-08 22:16:58 +000034#include "memcheck.h"
njn25e49d8e72002-09-23 09:36:25 +000035//#include "vg_profile.c"
36
njn27f1a382002-11-08 15:48:16 +000037VG_DETERMINE_INTERFACE_VERSION
38
njn25e49d8e72002-09-23 09:36:25 +000039/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000040/*--- Comparing and printing errors ---*/
41/*------------------------------------------------------------*/
42
njn810086f2002-11-14 12:42:47 +000043void SK_(pp_SkinError) ( Error* err, void (*pp_ExeContext)(void) )
njn25e49d8e72002-09-23 09:36:25 +000044{
njn5c004e42002-11-18 11:04:50 +000045 MemCheckError* err_extra = VG_(get_error_extra)(err);
njn25e49d8e72002-09-23 09:36:25 +000046
njn810086f2002-11-14 12:42:47 +000047 switch (VG_(get_error_kind)(err)) {
njn25e49d8e72002-09-23 09:36:25 +000048 case CoreMemErr:
49 if (err_extra->isWrite) {
50 VG_(message)(Vg_UserMsg,
sewardj5de6ee02002-12-14 23:11:35 +000051 "%s contains unaddressable byte(s)",
52 VG_(get_error_string)(err));
njn25e49d8e72002-09-23 09:36:25 +000053 } else {
54 VG_(message)(Vg_UserMsg,
sewardj5de6ee02002-12-14 23:11:35 +000055 "%s contains unaddressable byte(s)",
56 VG_(get_error_string)(err));
njn25e49d8e72002-09-23 09:36:25 +000057 }
58 pp_ExeContext();
59 break;
60
61 case AddrErr:
62 switch (err_extra->axskind) {
63 case ReadAxs:
64 case WriteAxs:
65 /* These two aren't actually differentiated ever. */
66 VG_(message)(Vg_UserMsg, "Invalid memory access of size %d",
67 err_extra->size );
68 break;
69 case ExecAxs:
70 VG_(message)(Vg_UserMsg, "Jump to the invalid address "
71 "stated on the next line");
72 break;
73 default:
njn5c004e42002-11-18 11:04:50 +000074 VG_(skin_panic)("SK_(pp_SkinError)(axskind)");
njn25e49d8e72002-09-23 09:36:25 +000075 }
76 pp_ExeContext();
njn5c004e42002-11-18 11:04:50 +000077 MC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
njn25e49d8e72002-09-23 09:36:25 +000078 break;
79
80 case FreeErr:
81 VG_(message)(Vg_UserMsg,"Invalid free() / delete / delete[]");
82 /* fall through */
83 case FreeMismatchErr:
njn810086f2002-11-14 12:42:47 +000084 if (VG_(get_error_kind)(err) == FreeMismatchErr)
njn25e49d8e72002-09-23 09:36:25 +000085 VG_(message)(Vg_UserMsg,
86 "Mismatched free() / delete / delete []");
87 pp_ExeContext();
njn5c004e42002-11-18 11:04:50 +000088 MC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
njn25e49d8e72002-09-23 09:36:25 +000089 break;
90
91 case ParamErr:
92 if (err_extra->isWrite) {
93 VG_(message)(Vg_UserMsg,
njn810086f2002-11-14 12:42:47 +000094 "Syscall param %s contains unaddressable byte(s)",
95 VG_(get_error_string)(err) );
njn25e49d8e72002-09-23 09:36:25 +000096 } else {
97 VG_(message)(Vg_UserMsg,
98 "Syscall param %s contains uninitialised or "
99 "unaddressable byte(s)",
njn810086f2002-11-14 12:42:47 +0000100 VG_(get_error_string)(err));
njn25e49d8e72002-09-23 09:36:25 +0000101 }
102 pp_ExeContext();
njn5c004e42002-11-18 11:04:50 +0000103 MC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
njn25e49d8e72002-09-23 09:36:25 +0000104 break;
105
106 case UserErr:
107 if (err_extra->isWrite) {
108 VG_(message)(Vg_UserMsg,
109 "Unaddressable byte(s) found during client check request");
110 } else {
111 VG_(message)(Vg_UserMsg,
112 "Uninitialised or "
113 "unaddressable byte(s) found during client check request");
114 }
115 pp_ExeContext();
njn5c004e42002-11-18 11:04:50 +0000116 MC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
njn25e49d8e72002-09-23 09:36:25 +0000117 break;
118
119 default:
njn810086f2002-11-14 12:42:47 +0000120 VG_(printf)("Error:\n unknown AddrCheck error code %d\n",
121 VG_(get_error_kind)(err));
njne427a662002-10-02 11:08:25 +0000122 VG_(skin_panic)("unknown error code in SK_(pp_SkinError)");
njn25e49d8e72002-09-23 09:36:25 +0000123 }
124}
125
126/*------------------------------------------------------------*/
127/*--- Recording errors ---*/
128/*------------------------------------------------------------*/
129
130/* Describe an address as best you can, for error messages,
131 putting the result in ai. */
132
njn5c004e42002-11-18 11:04:50 +0000133static void describe_addr ( Addr a, AddrInfo* ai )
njn25e49d8e72002-09-23 09:36:25 +0000134{
135 ShadowChunk* sc;
136 ThreadId tid;
137
138 /* Nested functions, yeah. Need the lexical scoping of 'a'. */
139
140 /* Closure for searching thread stacks */
141 Bool addr_is_in_bounds(Addr stack_min, Addr stack_max)
142 {
143 return (stack_min <= a && a <= stack_max);
144 }
145 /* Closure for searching malloc'd and free'd lists */
146 Bool addr_is_in_block(ShadowChunk *sh_ch)
147 {
njn810086f2002-11-14 12:42:47 +0000148 return VG_(addr_is_in_block) ( a, VG_(get_sc_data)(sh_ch),
149 VG_(get_sc_size)(sh_ch) );
njn25e49d8e72002-09-23 09:36:25 +0000150 }
151 /* Perhaps it's on a thread's stack? */
152 tid = VG_(any_matching_thread_stack)(addr_is_in_bounds);
153 if (tid != VG_INVALID_THREADID) {
154 ai->akind = Stack;
155 ai->stack_tid = tid;
156 return;
157 }
158 /* Search for a recently freed block which might bracket it. */
njn5c004e42002-11-18 11:04:50 +0000159 sc = MC_(any_matching_freed_ShadowChunks)(addr_is_in_block);
njn25e49d8e72002-09-23 09:36:25 +0000160 if (NULL != sc) {
161 ai->akind = Freed;
njn810086f2002-11-14 12:42:47 +0000162 ai->blksize = VG_(get_sc_size)(sc);
163 ai->rwoffset = (Int)(a) - (Int)(VG_(get_sc_data)(sc));
164 ai->lastchange = (ExeContext*)( VG_(get_sc_extra)(sc, 0) );
njn25e49d8e72002-09-23 09:36:25 +0000165 return;
166 }
167 /* Search for a currently malloc'd block which might bracket it. */
168 sc = VG_(any_matching_mallocd_ShadowChunks)(addr_is_in_block);
169 if (NULL != sc) {
170 ai->akind = Mallocd;
njn810086f2002-11-14 12:42:47 +0000171 ai->blksize = VG_(get_sc_size)(sc);
172 ai->rwoffset = (Int)(a) - (Int)(VG_(get_sc_data)(sc));
173 ai->lastchange = (ExeContext*)( VG_(get_sc_extra)(sc, 0) );
njn25e49d8e72002-09-23 09:36:25 +0000174 return;
175 }
176 /* Clueless ... */
177 ai->akind = Unknown;
178 return;
179}
180
181
njn810086f2002-11-14 12:42:47 +0000182/* Creates a copy of the `extra' part, updates the copy with address info if
183 necessary, and returns the copy. */
184void* SK_(dup_extra_and_update)(Error* err)
njn25e49d8e72002-09-23 09:36:25 +0000185{
njnb3f7c092003-02-17 10:09:19 +0000186 MemCheckError* extra;
187 MemCheckError* new_extra = NULL;
njn25e49d8e72002-09-23 09:36:25 +0000188
njnb3f7c092003-02-17 10:09:19 +0000189 extra = ((MemCheckError*)VG_(get_error_extra)(err));
190 if (extra != NULL) {
191 new_extra = VG_(malloc)(sizeof(MemCheckError));
192 *new_extra = *extra;
193 if (new_extra->addrinfo.akind == Undescribed)
194 describe_addr ( VG_(get_error_address)(err), &(new_extra->addrinfo) );
195 }
njn25e49d8e72002-09-23 09:36:25 +0000196
njnb3f7c092003-02-17 10:09:19 +0000197
njn25e49d8e72002-09-23 09:36:25 +0000198
njn810086f2002-11-14 12:42:47 +0000199 return new_extra;
njn25e49d8e72002-09-23 09:36:25 +0000200}
201
njn25e49d8e72002-09-23 09:36:25 +0000202/*------------------------------------------------------------*/
203/*--- Suppressions ---*/
204/*------------------------------------------------------------*/
205
206#define STREQ(s1,s2) (s1 != NULL && s2 != NULL \
207 && VG_(strcmp)((s1),(s2))==0)
208
njn810086f2002-11-14 12:42:47 +0000209Bool SK_(recognised_suppression) ( Char* name, Supp* su )
njn25e49d8e72002-09-23 09:36:25 +0000210{
njn810086f2002-11-14 12:42:47 +0000211 SuppKind skind;
212
213 if (STREQ(name, "Param")) skind = ParamSupp;
214 else if (STREQ(name, "CoreMem")) skind = CoreMemSupp;
215 else if (STREQ(name, "Addr1")) skind = Addr1Supp;
216 else if (STREQ(name, "Addr2")) skind = Addr2Supp;
217 else if (STREQ(name, "Addr4")) skind = Addr4Supp;
218 else if (STREQ(name, "Addr8")) skind = Addr8Supp;
219 else if (STREQ(name, "Free")) skind = FreeSupp;
sewardja75cd5a2002-12-28 12:36:55 +0000220 else if (STREQ(name, "Leak")) skind = LeakSupp;
njn25e49d8e72002-09-23 09:36:25 +0000221 else
222 return False;
223
njn810086f2002-11-14 12:42:47 +0000224 VG_(set_supp_kind)(su, skind);
njn25e49d8e72002-09-23 09:36:25 +0000225 return True;
226}
227
njn25e49d8e72002-09-23 09:36:25 +0000228# undef STREQ
229
230
njn25e49d8e72002-09-23 09:36:25 +0000231/*------------------------------------------------------------*/
232/*--- Profiling events ---*/
233/*------------------------------------------------------------*/
234
235typedef
236 enum {
237 VgpCheckMem = VgpFini+1,
238 VgpSetMem
239 }
240 VgpSkinCC;
241
njn5c004e42002-11-18 11:04:50 +0000242#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
243
njn25e49d8e72002-09-23 09:36:25 +0000244/*------------------------------------------------------------*/
245/*--- Low-level support for memory checking. ---*/
246/*------------------------------------------------------------*/
247
248/* All reads and writes are checked against a memory map, which
249 records the state of all memory in the process. The memory map is
250 organised like this:
251
252 The top 16 bits of an address are used to index into a top-level
253 map table, containing 65536 entries. Each entry is a pointer to a
254 second-level map, which records the accesibililty and validity
255 permissions for the 65536 bytes indexed by the lower 16 bits of the
256 address. Each byte is represented by one bit, indicating
257 accessibility. So each second-level map contains 8192 bytes. This
258 two-level arrangement conveniently divides the 4G address space
259 into 64k lumps, each size 64k bytes.
260
261 All entries in the primary (top-level) map must point to a valid
262 secondary (second-level) map. Since most of the 4G of address
263 space will not be in use -- ie, not mapped at all -- there is a
264 distinguished secondary map, which indicates `not addressible and
265 not valid' writeable for all bytes. Entries in the primary map for
266 which the entire 64k is not in use at all point at this
267 distinguished map.
268
269 [...] lots of stuff deleted due to out of date-ness
270
271 As a final optimisation, the alignment and address checks for
272 4-byte loads and stores are combined in a neat way. The primary
273 map is extended to have 262144 entries (2^18), rather than 2^16.
274 The top 3/4 of these entries are permanently set to the
275 distinguished secondary map. For a 4-byte load/store, the
276 top-level map is indexed not with (addr >> 16) but instead f(addr),
277 where
278
279 f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
280 = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
281 = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
282
283 ie the lowest two bits are placed above the 16 high address bits.
284 If either of these two bits are nonzero, the address is misaligned;
285 this will select a secondary map from the upper 3/4 of the primary
286 map. Because this is always the distinguished secondary map, a
287 (bogus) address check failure will result. The failure handling
288 code can then figure out whether this is a genuine addr check
289 failure or whether it is a possibly-legitimate access at a
290 misaligned address. */
291
292
293/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000294/*--- Function declarations. ---*/
295/*------------------------------------------------------------*/
296
njn5c004e42002-11-18 11:04:50 +0000297static void ac_ACCESS4_SLOWLY ( Addr a );
298static void ac_ACCESS2_SLOWLY ( Addr a );
299static void ac_ACCESS1_SLOWLY ( Addr a );
300static void ac_fpu_ACCESS_check_SLOWLY ( Addr addr, Int size );
njn25e49d8e72002-09-23 09:36:25 +0000301
302/*------------------------------------------------------------*/
303/*--- Data defns. ---*/
304/*------------------------------------------------------------*/
305
306typedef
307 struct {
308 UChar abits[8192];
309 }
310 AcSecMap;
311
312static AcSecMap* primary_map[ /*65536*/ 262144 ];
313static AcSecMap distinguished_secondary_map;
314
njn25e49d8e72002-09-23 09:36:25 +0000315static void init_shadow_memory ( void )
316{
317 Int i;
318
319 for (i = 0; i < 8192; i++) /* Invalid address */
320 distinguished_secondary_map.abits[i] = VGM_BYTE_INVALID;
321
322 /* These entries gradually get overwritten as the used address
323 space expands. */
324 for (i = 0; i < 65536; i++)
325 primary_map[i] = &distinguished_secondary_map;
326
327 /* These ones should never change; it's a bug in Valgrind if they do. */
328 for (i = 65536; i < 262144; i++)
329 primary_map[i] = &distinguished_secondary_map;
330}
331
njn25e49d8e72002-09-23 09:36:25 +0000332/*------------------------------------------------------------*/
333/*--- Basic bitmap management, reading and writing. ---*/
334/*------------------------------------------------------------*/
335
336/* Allocate and initialise a secondary map. */
337
338static AcSecMap* alloc_secondary_map ( __attribute__ ((unused))
339 Char* caller )
340{
341 AcSecMap* map;
342 UInt i;
343 PROF_EVENT(10);
344
345 /* Mark all bytes as invalid access and invalid value. */
346
347 /* It just happens that a AcSecMap occupies exactly 18 pages --
348 although this isn't important, so the following assert is
349 spurious. */
njne427a662002-10-02 11:08:25 +0000350 sk_assert(0 == (sizeof(AcSecMap) % VKI_BYTES_PER_PAGE));
njn25e49d8e72002-09-23 09:36:25 +0000351 map = VG_(get_memory_from_mmap)( sizeof(AcSecMap), caller );
352
353 for (i = 0; i < 8192; i++)
354 map->abits[i] = VGM_BYTE_INVALID; /* Invalid address */
355
356 /* VG_(printf)("ALLOC_2MAP(%s)\n", caller ); */
357 return map;
358}
359
360
361/* Basic reading/writing of the bitmaps, for byte-sized accesses. */
362
363static __inline__ UChar get_abit ( Addr a )
364{
365 AcSecMap* sm = primary_map[a >> 16];
366 UInt sm_off = a & 0xFFFF;
367 PROF_EVENT(20);
368# if 0
369 if (IS_DISTINGUISHED_SM(sm))
370 VG_(message)(Vg_DebugMsg,
371 "accessed distinguished 2ndary (A)map! 0x%x\n", a);
372# endif
373 return BITARR_TEST(sm->abits, sm_off)
374 ? VGM_BIT_INVALID : VGM_BIT_VALID;
375}
376
377static __inline__ void set_abit ( Addr a, UChar abit )
378{
379 AcSecMap* sm;
380 UInt sm_off;
381 PROF_EVENT(22);
382 ENSURE_MAPPABLE(a, "set_abit");
383 sm = primary_map[a >> 16];
384 sm_off = a & 0xFFFF;
385 if (abit)
386 BITARR_SET(sm->abits, sm_off);
387 else
388 BITARR_CLEAR(sm->abits, sm_off);
389}
390
391
392/* Reading/writing of the bitmaps, for aligned word-sized accesses. */
393
394static __inline__ UChar get_abits4_ALIGNED ( Addr a )
395{
396 AcSecMap* sm;
397 UInt sm_off;
398 UChar abits8;
399 PROF_EVENT(24);
400# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000401 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000402# endif
403 sm = primary_map[a >> 16];
404 sm_off = a & 0xFFFF;
405 abits8 = sm->abits[sm_off >> 3];
406 abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
407 abits8 &= 0x0F;
408 return abits8;
409}
410
411
412
413/*------------------------------------------------------------*/
414/*--- Setting permissions over address ranges. ---*/
415/*------------------------------------------------------------*/
416
sewardj5de6ee02002-12-14 23:11:35 +0000417static __inline__
418void set_address_range_perms ( Addr a, UInt len,
419 UInt example_a_bit )
njn25e49d8e72002-09-23 09:36:25 +0000420{
421 UChar abyte8;
422 UInt sm_off;
423 AcSecMap* sm;
424
425 PROF_EVENT(30);
426
427 if (len == 0)
428 return;
429
430 if (len > 100 * 1000 * 1000) {
431 VG_(message)(Vg_UserMsg,
432 "Warning: set address range perms: "
433 "large range %u, a %d",
434 len, example_a_bit );
435 }
436
437 VGP_PUSHCC(VgpSetMem);
438
439 /* Requests to change permissions of huge address ranges may
440 indicate bugs in our machinery. 30,000,000 is arbitrary, but so
441 far all legitimate requests have fallen beneath that size. */
442 /* 4 Mar 02: this is just stupid; get rid of it. */
njne427a662002-10-02 11:08:25 +0000443 /* sk_assert(len < 30000000); */
njn25e49d8e72002-09-23 09:36:25 +0000444
445 /* Check the permissions make sense. */
njne427a662002-10-02 11:08:25 +0000446 sk_assert(example_a_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000447 || example_a_bit == VGM_BIT_INVALID);
448
449 /* In order that we can charge through the address space at 8
450 bytes/main-loop iteration, make up some perms. */
451 abyte8 = (example_a_bit << 7)
452 | (example_a_bit << 6)
453 | (example_a_bit << 5)
454 | (example_a_bit << 4)
455 | (example_a_bit << 3)
456 | (example_a_bit << 2)
457 | (example_a_bit << 1)
458 | (example_a_bit << 0);
459
460# ifdef VG_DEBUG_MEMORY
461 /* Do it ... */
462 while (True) {
463 PROF_EVENT(31);
464 if (len == 0) break;
465 set_abit ( a, example_a_bit );
466 set_vbyte ( a, vbyte );
467 a++;
468 len--;
469 }
470
471# else
472 /* Slowly do parts preceding 8-byte alignment. */
473 while (True) {
474 PROF_EVENT(31);
475 if (len == 0) break;
476 if ((a % 8) == 0) break;
477 set_abit ( a, example_a_bit );
478 a++;
479 len--;
480 }
481
482 if (len == 0) {
483 VGP_POPCC(VgpSetMem);
484 return;
485 }
njne427a662002-10-02 11:08:25 +0000486 sk_assert((a % 8) == 0 && len > 0);
njn25e49d8e72002-09-23 09:36:25 +0000487
488 /* Once aligned, go fast. */
489 while (True) {
490 PROF_EVENT(32);
491 if (len < 8) break;
492 ENSURE_MAPPABLE(a, "set_address_range_perms(fast)");
493 sm = primary_map[a >> 16];
494 sm_off = a & 0xFFFF;
495 sm->abits[sm_off >> 3] = abyte8;
496 a += 8;
497 len -= 8;
498 }
499
500 if (len == 0) {
501 VGP_POPCC(VgpSetMem);
502 return;
503 }
njne427a662002-10-02 11:08:25 +0000504 sk_assert((a % 8) == 0 && len > 0 && len < 8);
njn25e49d8e72002-09-23 09:36:25 +0000505
506 /* Finish the upper fragment. */
507 while (True) {
508 PROF_EVENT(33);
509 if (len == 0) break;
510 set_abit ( a, example_a_bit );
511 a++;
512 len--;
513 }
514# endif
515
516 /* Check that zero page and highest page have not been written to
517 -- this could happen with buggy syscall wrappers. Today
518 (2001-04-26) had precisely such a problem with __NR_setitimer. */
njne427a662002-10-02 11:08:25 +0000519 sk_assert(SK_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +0000520 VGP_POPCC(VgpSetMem);
521}
522
523/* Set permissions for address ranges ... */
524
njn5c004e42002-11-18 11:04:50 +0000525static void ac_make_noaccess ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000526{
527 PROF_EVENT(35);
njn5c004e42002-11-18 11:04:50 +0000528 DEBUG("ac_make_noaccess(%p, %x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000529 set_address_range_perms ( a, len, VGM_BIT_INVALID );
530}
531
njn5c004e42002-11-18 11:04:50 +0000532static void ac_make_accessible ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000533{
njn5c004e42002-11-18 11:04:50 +0000534 PROF_EVENT(38);
535 DEBUG("ac_make_accessible(%p, %x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000536 set_address_range_perms ( a, len, VGM_BIT_VALID );
537}
538
539/* Block-copy permissions (needed for implementing realloc()). */
540
njn5c004e42002-11-18 11:04:50 +0000541static void ac_copy_address_range_state ( Addr src, Addr dst, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000542{
543 UInt i;
544
njn5c004e42002-11-18 11:04:50 +0000545 DEBUG("ac_copy_address_range_state\n");
njn25e49d8e72002-09-23 09:36:25 +0000546
547 PROF_EVENT(40);
548 for (i = 0; i < len; i++) {
549 UChar abit = get_abit ( src+i );
550 PROF_EVENT(41);
551 set_abit ( dst+i, abit );
552 }
553}
554
555
556/* Check permissions for address range. If inadequate permissions
557 exist, *bad_addr is set to the offending address, so the caller can
558 know what it is. */
559
njn5c004e42002-11-18 11:04:50 +0000560static __inline__
561Bool ac_check_accessible ( Addr a, UInt len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000562{
563 UInt i;
564 UChar abit;
njn5c004e42002-11-18 11:04:50 +0000565 PROF_EVENT(48);
njn25e49d8e72002-09-23 09:36:25 +0000566 for (i = 0; i < len; i++) {
njn5c004e42002-11-18 11:04:50 +0000567 PROF_EVENT(49);
njn25e49d8e72002-09-23 09:36:25 +0000568 abit = get_abit(a);
569 if (abit == VGM_BIT_INVALID) {
570 if (bad_addr != NULL) *bad_addr = a;
571 return False;
572 }
573 a++;
574 }
575 return True;
576}
577
njn25e49d8e72002-09-23 09:36:25 +0000578/* Check a zero-terminated ascii string. Tricky -- don't want to
579 examine the actual bytes, to find the end, until we're sure it is
580 safe to do so. */
581
njn5c004e42002-11-18 11:04:50 +0000582static __inline__
583Bool ac_check_readable_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000584{
585 UChar abit;
586 PROF_EVENT(46);
njn5c004e42002-11-18 11:04:50 +0000587 DEBUG("ac_check_readable_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +0000588 while (True) {
589 PROF_EVENT(47);
590 abit = get_abit(a);
591 if (abit != VGM_BIT_VALID) {
592 if (bad_addr != NULL) *bad_addr = a;
593 return False;
594 }
595 /* Ok, a is safe to read. */
596 if (* ((UChar*)a) == 0) return True;
597 a++;
598 }
599}
600
601
602/*------------------------------------------------------------*/
603/*--- Memory event handlers ---*/
604/*------------------------------------------------------------*/
605
606/* Setting permissions for aligned words. This supports fast stack
607 operations. */
608
njn5c004e42002-11-18 11:04:50 +0000609static void ac_make_noaccess_aligned ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000610{
611 AcSecMap* sm;
612 UInt sm_off;
613 UChar mask;
614 Addr a_past_end = a + len;
615
616 VGP_PUSHCC(VgpSetMem);
617
618 PROF_EVENT(50);
619# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000620 sk_assert(IS_ALIGNED4_ADDR(a));
621 sk_assert(IS_ALIGNED4_ADDR(len));
njn25e49d8e72002-09-23 09:36:25 +0000622# endif
623
624 for ( ; a < a_past_end; a += 4) {
njn5c004e42002-11-18 11:04:50 +0000625 ENSURE_MAPPABLE(a, "ac_make_noaccess_aligned");
njn25e49d8e72002-09-23 09:36:25 +0000626 sm = primary_map[a >> 16];
627 sm_off = a & 0xFFFF;
628 mask = 0x0F;
629 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
630 /* mask now contains 1s where we wish to make address bits
631 invalid (1s). */
632 sm->abits[sm_off >> 3] |= mask;
633 }
634 VGP_POPCC(VgpSetMem);
635}
636
njn5c004e42002-11-18 11:04:50 +0000637static void ac_make_writable_aligned ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000638{
639 AcSecMap* sm;
640 UInt sm_off;
641 UChar mask;
642 Addr a_past_end = a + len;
643
644 VGP_PUSHCC(VgpSetMem);
645
646 PROF_EVENT(51);
647# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000648 sk_assert(IS_ALIGNED4_ADDR(a));
649 sk_assert(IS_ALIGNED4_ADDR(len));
njn25e49d8e72002-09-23 09:36:25 +0000650# endif
651
652 for ( ; a < a_past_end; a += 4) {
njn5c004e42002-11-18 11:04:50 +0000653 ENSURE_MAPPABLE(a, "ac_make_writable_aligned");
njn25e49d8e72002-09-23 09:36:25 +0000654 sm = primary_map[a >> 16];
655 sm_off = a & 0xFFFF;
656 mask = 0x0F;
657 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
658 /* mask now contains 1s where we wish to make address bits
659 invalid (0s). */
660 sm->abits[sm_off >> 3] &= ~mask;
661 }
662 VGP_POPCC(VgpSetMem);
663}
664
665
njn5c004e42002-11-18 11:04:50 +0000666static __inline__
667void ac_check_is_accessible ( CorePart part, ThreadState* tst,
668 Char* s, Addr base, UInt size, Bool isWrite )
njn25e49d8e72002-09-23 09:36:25 +0000669{
670 Bool ok;
671 Addr bad_addr;
672
673 VGP_PUSHCC(VgpCheckMem);
674
njn5c004e42002-11-18 11:04:50 +0000675 ok = ac_check_accessible ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000676 if (!ok) {
677 switch (part) {
678 case Vg_CoreSysCall:
njn5c004e42002-11-18 11:04:50 +0000679 MC_(record_param_error) ( tst, bad_addr, isWrite, s );
njn25e49d8e72002-09-23 09:36:25 +0000680 break;
681
njn25e49d8e72002-09-23 09:36:25 +0000682 case Vg_CoreSignal:
njn5c004e42002-11-18 11:04:50 +0000683 sk_assert(isWrite); /* Should only happen with isWrite case */
684 /* fall through */
njn25e49d8e72002-09-23 09:36:25 +0000685 case Vg_CorePThread:
njn5c004e42002-11-18 11:04:50 +0000686 MC_(record_core_mem_error)( tst, isWrite, s );
njn25e49d8e72002-09-23 09:36:25 +0000687 break;
688
689 /* If we're being asked to jump to a silly address, record an error
690 message before potentially crashing the entire system. */
691 case Vg_CoreTranslate:
njn5c004e42002-11-18 11:04:50 +0000692 sk_assert(!isWrite); /* Should only happen with !isWrite case */
693 MC_(record_jump_error)( tst, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000694 break;
695
696 default:
njn5c004e42002-11-18 11:04:50 +0000697 VG_(skin_panic)("ac_check_is_accessible: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000698 }
699 }
njn5c004e42002-11-18 11:04:50 +0000700
njn25e49d8e72002-09-23 09:36:25 +0000701 VGP_POPCC(VgpCheckMem);
702}
703
704static
njn5c004e42002-11-18 11:04:50 +0000705void ac_check_is_writable ( CorePart part, ThreadState* tst,
706 Char* s, Addr base, UInt size )
707{
708 ac_check_is_accessible ( part, tst, s, base, size, /*isWrite*/True );
709}
710
711static
712void ac_check_is_readable ( CorePart part, ThreadState* tst,
713 Char* s, Addr base, UInt size )
714{
715 ac_check_is_accessible ( part, tst, s, base, size, /*isWrite*/False );
716}
717
718static
719void ac_check_is_readable_asciiz ( CorePart part, ThreadState* tst,
720 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +0000721{
722 Bool ok = True;
723 Addr bad_addr;
724 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
725
726 VGP_PUSHCC(VgpCheckMem);
727
njne427a662002-10-02 11:08:25 +0000728 sk_assert(part == Vg_CoreSysCall);
njn5c004e42002-11-18 11:04:50 +0000729 ok = ac_check_readable_asciiz ( (Addr)str, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000730 if (!ok) {
njn5c004e42002-11-18 11:04:50 +0000731 MC_(record_param_error) ( tst, bad_addr, /*is_writable =*/False, s );
njn25e49d8e72002-09-23 09:36:25 +0000732 }
733
734 VGP_POPCC(VgpCheckMem);
735}
736
737static
njn5c004e42002-11-18 11:04:50 +0000738void ac_new_mem_startup( Addr a, UInt len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +0000739{
njn1f3a9092002-10-04 09:22:30 +0000740 /* Ignore the permissions, just make it readable. Seems to work... */
njn25e49d8e72002-09-23 09:36:25 +0000741 DEBUG("new_mem_startup(%p, %u, rr=%u, ww=%u, xx=%u)\n", a,len,rr,ww,xx);
njn5c004e42002-11-18 11:04:50 +0000742 ac_make_accessible(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000743}
744
745static
njn5c004e42002-11-18 11:04:50 +0000746void ac_new_mem_heap ( Addr a, UInt len, Bool is_inited )
njn25e49d8e72002-09-23 09:36:25 +0000747{
njn5c004e42002-11-18 11:04:50 +0000748 ac_make_accessible(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000749}
750
751static
njn5c004e42002-11-18 11:04:50 +0000752void ac_set_perms (Addr a, UInt len,
sewardj40f8ebe2002-10-23 21:46:13 +0000753 Bool rr, Bool ww, Bool xx)
njn25e49d8e72002-09-23 09:36:25 +0000754{
njn5c004e42002-11-18 11:04:50 +0000755 DEBUG("ac_set_perms(%p, %u, rr=%u ww=%u, xx=%u)\n",
sewardj40f8ebe2002-10-23 21:46:13 +0000756 a, len, rr, ww, xx);
njn25e49d8e72002-09-23 09:36:25 +0000757 if (rr || ww || xx) {
njn5c004e42002-11-18 11:04:50 +0000758 ac_make_accessible(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000759 } else {
njn5c004e42002-11-18 11:04:50 +0000760 ac_make_noaccess(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000761 }
762}
763
764
765/*------------------------------------------------------------*/
766/*--- Functions called directly from generated code. ---*/
767/*------------------------------------------------------------*/
768
769static __inline__ UInt rotateRight16 ( UInt x )
770{
771 /* Amazingly, gcc turns this into a single rotate insn. */
772 return (x >> 16) | (x << 16);
773}
774
njn25e49d8e72002-09-23 09:36:25 +0000775static __inline__ UInt shiftRight16 ( UInt x )
776{
777 return x >> 16;
778}
779
780
781/* Read/write 1/2/4 sized V bytes, and emit an address error if
782 needed. */
783
njn5c004e42002-11-18 11:04:50 +0000784/* ac_helperc_ACCESS{1,2,4} handle the common case fast.
njn25e49d8e72002-09-23 09:36:25 +0000785 Under all other circumstances, it defers to the relevant _SLOWLY
786 function, which can handle all situations.
787*/
788__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000789static void ac_helperc_ACCESS4 ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000790{
791# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000792 return ac_ACCESS4_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000793# else
794 UInt sec_no = rotateRight16(a) & 0x3FFFF;
795 AcSecMap* sm = primary_map[sec_no];
796 UInt a_off = (a & 0xFFFF) >> 3;
797 UChar abits = sm->abits[a_off];
798 abits >>= (a & 4);
799 abits &= 15;
njn5c004e42002-11-18 11:04:50 +0000800 PROF_EVENT(66);
njn25e49d8e72002-09-23 09:36:25 +0000801 if (abits == VGM_NIBBLE_VALID) {
802 /* Handle common case quickly: a is suitably aligned, is mapped,
803 and is addressible. So just return. */
804 return;
805 } else {
806 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000807 ac_ACCESS4_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000808 }
809# endif
810}
811
812__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000813static void ac_helperc_ACCESS2 ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000814{
815# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000816 return ac_ACCESS2_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000817# else
818 UInt sec_no = rotateRight16(a) & 0x1FFFF;
819 AcSecMap* sm = primary_map[sec_no];
820 UInt a_off = (a & 0xFFFF) >> 3;
njn5c004e42002-11-18 11:04:50 +0000821 PROF_EVENT(67);
njn25e49d8e72002-09-23 09:36:25 +0000822 if (sm->abits[a_off] == VGM_BYTE_VALID) {
823 /* Handle common case quickly. */
824 return;
825 } else {
826 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000827 ac_ACCESS2_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000828 }
829# endif
830}
831
832__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000833static void ac_helperc_ACCESS1 ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000834{
835# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000836 return ac_ACCESS1_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000837# else
838 UInt sec_no = shiftRight16(a);
839 AcSecMap* sm = primary_map[sec_no];
840 UInt a_off = (a & 0xFFFF) >> 3;
njn5c004e42002-11-18 11:04:50 +0000841 PROF_EVENT(68);
njn25e49d8e72002-09-23 09:36:25 +0000842 if (sm->abits[a_off] == VGM_BYTE_VALID) {
843 /* Handle common case quickly. */
844 return;
845 } else {
846 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000847 ac_ACCESS1_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000848 }
849# endif
850}
851
852
853/*------------------------------------------------------------*/
854/*--- Fallback functions to handle cases that the above ---*/
855/*--- VG_(helperc_ACCESS{1,2,4}) can't manage. ---*/
856/*------------------------------------------------------------*/
857
njn5c004e42002-11-18 11:04:50 +0000858static void ac_ACCESS4_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000859{
860 Bool a0ok, a1ok, a2ok, a3ok;
861
njn5c004e42002-11-18 11:04:50 +0000862 PROF_EVENT(76);
njn25e49d8e72002-09-23 09:36:25 +0000863
864 /* First establish independently the addressibility of the 4 bytes
865 involved. */
866 a0ok = get_abit(a+0) == VGM_BIT_VALID;
867 a1ok = get_abit(a+1) == VGM_BIT_VALID;
868 a2ok = get_abit(a+2) == VGM_BIT_VALID;
869 a3ok = get_abit(a+3) == VGM_BIT_VALID;
870
871 /* Now distinguish 3 cases */
872
873 /* Case 1: the address is completely valid, so:
874 - no addressing error
875 */
876 if (a0ok && a1ok && a2ok && a3ok) {
877 return;
878 }
879
880 /* Case 2: the address is completely invalid.
881 - emit addressing error
882 */
883 /* VG_(printf)("%p (%d %d %d %d)\n", a, a0ok, a1ok, a2ok, a3ok); */
njn5c004e42002-11-18 11:04:50 +0000884 if (!MC_(clo_partial_loads_ok)
njn25e49d8e72002-09-23 09:36:25 +0000885 || ((a & 3) != 0)
886 || (!a0ok && !a1ok && !a2ok && !a3ok)) {
njn5c004e42002-11-18 11:04:50 +0000887 MC_(record_address_error)( a, 4, False );
njn25e49d8e72002-09-23 09:36:25 +0000888 return;
889 }
890
891 /* Case 3: the address is partially valid.
892 - no addressing error
njn5c004e42002-11-18 11:04:50 +0000893 Case 3 is only allowed if MC_(clo_partial_loads_ok) is True
njn25e49d8e72002-09-23 09:36:25 +0000894 (which is the default), and the address is 4-aligned.
895 If not, Case 2 will have applied.
896 */
njn5c004e42002-11-18 11:04:50 +0000897 sk_assert(MC_(clo_partial_loads_ok));
njn25e49d8e72002-09-23 09:36:25 +0000898 {
899 return;
900 }
901}
902
njn5c004e42002-11-18 11:04:50 +0000903static void ac_ACCESS2_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000904{
905 /* Check the address for validity. */
906 Bool aerr = False;
njn5c004e42002-11-18 11:04:50 +0000907 PROF_EVENT(77);
njn25e49d8e72002-09-23 09:36:25 +0000908
909 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
910 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
911
912 /* If an address error has happened, report it. */
913 if (aerr) {
njn5c004e42002-11-18 11:04:50 +0000914 MC_(record_address_error)( a, 2, False );
njn25e49d8e72002-09-23 09:36:25 +0000915 }
916}
917
njn5c004e42002-11-18 11:04:50 +0000918static void ac_ACCESS1_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000919{
920 /* Check the address for validity. */
921 Bool aerr = False;
njn5c004e42002-11-18 11:04:50 +0000922 PROF_EVENT(78);
njn25e49d8e72002-09-23 09:36:25 +0000923
924 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
925
926 /* If an address error has happened, report it. */
927 if (aerr) {
njn5c004e42002-11-18 11:04:50 +0000928 MC_(record_address_error)( a, 1, False );
njn25e49d8e72002-09-23 09:36:25 +0000929 }
930}
931
932
933/* ---------------------------------------------------------------------
934 FPU load and store checks, called from generated code.
935 ------------------------------------------------------------------ */
936
937__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +0000938static void ac_fpu_ACCESS_check ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +0000939{
940 /* Ensure the read area is both addressible and valid (ie,
941 readable). If there's an address error, don't report a value
942 error too; but if there isn't an address error, check for a
943 value error.
944
945 Try to be reasonably fast on the common case; wimp out and defer
njn5c004e42002-11-18 11:04:50 +0000946 to ac_fpu_ACCESS_check_SLOWLY for everything else. */
njn25e49d8e72002-09-23 09:36:25 +0000947
948 AcSecMap* sm;
949 UInt sm_off, a_off;
950 Addr addr4;
951
njn5c004e42002-11-18 11:04:50 +0000952 PROF_EVENT(90);
njn25e49d8e72002-09-23 09:36:25 +0000953
954# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000955 ac_fpu_ACCESS_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +0000956# else
957
958 if (size == 4) {
959 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
njn5c004e42002-11-18 11:04:50 +0000960 PROF_EVENT(91);
njn25e49d8e72002-09-23 09:36:25 +0000961 /* Properly aligned. */
962 sm = primary_map[addr >> 16];
963 sm_off = addr & 0xFFFF;
964 a_off = sm_off >> 3;
965 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
966 /* Properly aligned and addressible. */
967 return;
968 slow4:
njn5c004e42002-11-18 11:04:50 +0000969 ac_fpu_ACCESS_check_SLOWLY ( addr, 4 );
njn25e49d8e72002-09-23 09:36:25 +0000970 return;
971 }
972
973 if (size == 8) {
974 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
njn5c004e42002-11-18 11:04:50 +0000975 PROF_EVENT(92);
njn25e49d8e72002-09-23 09:36:25 +0000976 /* Properly aligned. Do it in two halves. */
977 addr4 = addr + 4;
978 /* First half. */
979 sm = primary_map[addr >> 16];
980 sm_off = addr & 0xFFFF;
981 a_off = sm_off >> 3;
982 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
983 /* First half properly aligned and addressible. */
984 /* Second half. */
985 sm = primary_map[addr4 >> 16];
986 sm_off = addr4 & 0xFFFF;
987 a_off = sm_off >> 3;
988 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
989 /* Second half properly aligned and addressible. */
990 /* Both halves properly aligned and addressible. */
991 return;
992 slow8:
njn5c004e42002-11-18 11:04:50 +0000993 ac_fpu_ACCESS_check_SLOWLY ( addr, 8 );
njn25e49d8e72002-09-23 09:36:25 +0000994 return;
995 }
996
997 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
998 cases go quickly. */
999 if (size == 2) {
njn5c004e42002-11-18 11:04:50 +00001000 PROF_EVENT(93);
1001 ac_fpu_ACCESS_check_SLOWLY ( addr, 2 );
njn25e49d8e72002-09-23 09:36:25 +00001002 return;
1003 }
1004
njn5c004e42002-11-18 11:04:50 +00001005 if (size == 10 || size == 28 || size == 108) {
1006 PROF_EVENT(94);
1007 ac_fpu_ACCESS_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001008 return;
1009 }
1010
1011 VG_(printf)("size is %d\n", size);
njne427a662002-10-02 11:08:25 +00001012 VG_(skin_panic)("fpu_ACCESS_check: unhandled size");
njn25e49d8e72002-09-23 09:36:25 +00001013# endif
1014}
1015
1016
1017/* ---------------------------------------------------------------------
1018 Slow, general cases for FPU access checks.
1019 ------------------------------------------------------------------ */
1020
njn5c004e42002-11-18 11:04:50 +00001021void ac_fpu_ACCESS_check_SLOWLY ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001022{
1023 Int i;
1024 Bool aerr = False;
njn5c004e42002-11-18 11:04:50 +00001025 PROF_EVENT(100);
njn25e49d8e72002-09-23 09:36:25 +00001026 for (i = 0; i < size; i++) {
njn5c004e42002-11-18 11:04:50 +00001027 PROF_EVENT(101);
njn25e49d8e72002-09-23 09:36:25 +00001028 if (get_abit(addr+i) != VGM_BIT_VALID)
1029 aerr = True;
1030 }
1031
1032 if (aerr) {
njn5c004e42002-11-18 11:04:50 +00001033 MC_(record_address_error)( addr, size, False );
njn25e49d8e72002-09-23 09:36:25 +00001034 }
1035}
1036
1037
1038/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001039/*--- Our instrumenter ---*/
1040/*------------------------------------------------------------*/
1041
njn25e49d8e72002-09-23 09:36:25 +00001042UCodeBlock* SK_(instrument)(UCodeBlock* cb_in, Addr orig_addr)
1043{
1044/* Use this rather than eg. -1 because it's a UInt. */
1045#define INVALID_DATA_SIZE 999999
1046
1047 UCodeBlock* cb;
1048 Int i;
1049 UInstr* u_in;
1050 Int t_addr, t_size;
1051
njn810086f2002-11-14 12:42:47 +00001052 cb = VG_(setup_UCodeBlock)(cb_in);
njn25e49d8e72002-09-23 09:36:25 +00001053
njn810086f2002-11-14 12:42:47 +00001054 for (i = 0; i < VG_(get_num_instrs)(cb_in); i++) {
njn25e49d8e72002-09-23 09:36:25 +00001055
1056 t_addr = t_size = INVALID_TEMPREG;
njn810086f2002-11-14 12:42:47 +00001057 u_in = VG_(get_instr)(cb_in, i);
njn25e49d8e72002-09-23 09:36:25 +00001058
1059 switch (u_in->opcode) {
sewardj7a5ebcf2002-11-13 22:42:13 +00001060 case NOP: case LOCK: case CALLM_E: case CALLM_S:
njn25e49d8e72002-09-23 09:36:25 +00001061 break;
1062
1063 /* For memory-ref instrs, copy the data_addr into a temporary to be
1064 * passed to the cachesim_* helper at the end of the instruction.
1065 */
1066 case LOAD:
1067 t_addr = u_in->val1;
1068 goto do_LOAD_or_STORE;
1069 case STORE: t_addr = u_in->val2;
1070 goto do_LOAD_or_STORE;
1071 do_LOAD_or_STORE:
1072 uInstr1(cb, CCALL, 0, TempReg, t_addr);
1073 switch (u_in->size) {
njn5c004e42002-11-18 11:04:50 +00001074 case 4: uCCall(cb, (Addr) & ac_helperc_ACCESS4, 1, 1, False );
njn25e49d8e72002-09-23 09:36:25 +00001075 break;
njn5c004e42002-11-18 11:04:50 +00001076 case 2: uCCall(cb, (Addr) & ac_helperc_ACCESS2, 1, 1, False );
njn25e49d8e72002-09-23 09:36:25 +00001077 break;
njn5c004e42002-11-18 11:04:50 +00001078 case 1: uCCall(cb, (Addr) & ac_helperc_ACCESS1, 1, 1, False );
njn25e49d8e72002-09-23 09:36:25 +00001079 break;
1080 default:
njne427a662002-10-02 11:08:25 +00001081 VG_(skin_panic)("addrcheck::SK_(instrument):LOAD/STORE");
njn25e49d8e72002-09-23 09:36:25 +00001082 }
njn4ba5a792002-09-30 10:23:54 +00001083 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00001084 break;
1085
1086 case FPU_R:
1087 case FPU_W:
1088 t_addr = u_in->val2;
1089 t_size = newTemp(cb);
1090 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
1091 uLiteral(cb, u_in->size);
1092 uInstr2(cb, CCALL, 0, TempReg, t_addr, TempReg, t_size);
njn5c004e42002-11-18 11:04:50 +00001093 uCCall(cb, (Addr) & ac_fpu_ACCESS_check, 2, 2, False );
njn4ba5a792002-09-30 10:23:54 +00001094 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00001095 break;
1096
sewardj3d7c9c82003-03-26 21:08:13 +00001097 case MMX2_MemRd:
1098 case MMX2_MemWr:
sewardjd7971012003-04-04 00:21:58 +00001099 sk_assert(u_in->size == 4 || u_in->size == 8);
sewardj3d7c9c82003-03-26 21:08:13 +00001100 t_addr = u_in->val2;
1101 t_size = newTemp(cb);
1102 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
sewardjd7971012003-04-04 00:21:58 +00001103 uLiteral(cb, u_in->size);
sewardj3d7c9c82003-03-26 21:08:13 +00001104 uInstr2(cb, CCALL, 0, TempReg, t_addr, TempReg, t_size);
1105 uCCall(cb, (Addr) & ac_fpu_ACCESS_check, 2, 2, False );
1106 VG_(copy_UInstr)(cb, u_in);
1107 break;
1108
njn25e49d8e72002-09-23 09:36:25 +00001109 default:
njn4ba5a792002-09-30 10:23:54 +00001110 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00001111 break;
1112 }
1113 }
1114
njn4ba5a792002-09-30 10:23:54 +00001115 VG_(free_UCodeBlock)(cb_in);
njn25e49d8e72002-09-23 09:36:25 +00001116 return cb;
1117}
1118
1119
njn25e49d8e72002-09-23 09:36:25 +00001120/*------------------------------------------------------------*/
1121/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1122/*------------------------------------------------------------*/
1123
sewardja4495682002-10-21 07:29:59 +00001124/* For the memory leak detector, say whether an entire 64k chunk of
1125 address space is possibly in use, or not. If in doubt return
1126 True.
njn25e49d8e72002-09-23 09:36:25 +00001127*/
sewardja4495682002-10-21 07:29:59 +00001128static
1129Bool ac_is_valid_64k_chunk ( UInt chunk_number )
njn25e49d8e72002-09-23 09:36:25 +00001130{
sewardja4495682002-10-21 07:29:59 +00001131 sk_assert(chunk_number >= 0 && chunk_number < 65536);
1132 if (IS_DISTINGUISHED_SM(primary_map[chunk_number])) {
1133 /* Definitely not in use. */
1134 return False;
1135 } else {
1136 return True;
njn25e49d8e72002-09-23 09:36:25 +00001137 }
1138}
1139
1140
sewardja4495682002-10-21 07:29:59 +00001141/* For the memory leak detector, say whether or not a given word
1142 address is to be regarded as valid. */
1143static
1144Bool ac_is_valid_address ( Addr a )
1145{
1146 UChar abits;
1147 sk_assert(IS_ALIGNED4_ADDR(a));
1148 abits = get_abits4_ALIGNED(a);
1149 if (abits == VGM_NIBBLE_VALID) {
1150 return True;
1151 } else {
1152 return False;
1153 }
1154}
1155
1156
1157/* Leak detector for this skin. We don't actually do anything, merely
1158 run the generic leak detector with suitable parameters for this
1159 skin. */
njn5c004e42002-11-18 11:04:50 +00001160static void ac_detect_memory_leaks ( void )
njn25e49d8e72002-09-23 09:36:25 +00001161{
sewardja4495682002-10-21 07:29:59 +00001162 VG_(generic_detect_memory_leaks) (
1163 ac_is_valid_64k_chunk,
1164 ac_is_valid_address,
njn5c004e42002-11-18 11:04:50 +00001165 MC_(get_where),
1166 MC_(clo_leak_resolution),
sewardj99aac972002-12-26 01:53:45 +00001167 MC_(clo_show_reachable),
1168 (UInt)LeakSupp
sewardja4495682002-10-21 07:29:59 +00001169 );
njn25e49d8e72002-09-23 09:36:25 +00001170}
1171
1172
1173/* ---------------------------------------------------------------------
1174 Sanity check machinery (permanently engaged).
1175 ------------------------------------------------------------------ */
1176
1177/* Check that nobody has spuriously claimed that the first or last 16
1178 pages (64 KB) of address space have become accessible. Failure of
1179 the following do not per se indicate an internal consistency
1180 problem, but they are so likely to that we really want to know
1181 about it if so. */
1182
1183Bool SK_(cheap_sanity_check) ( void )
1184{
sewardjd5815ec2003-04-06 12:23:27 +00001185 if (IS_DISTINGUISHED_SM(primary_map[0])
1186 /* kludge: kernel drops a page up at top of address range for
1187 magic "optimized syscalls", so we can no longer check the
1188 highest page */
1189 /* && IS_DISTINGUISHED_SM(primary_map[65535]) */
1190 )
njn25e49d8e72002-09-23 09:36:25 +00001191 return True;
1192 else
1193 return False;
1194}
1195
1196Bool SK_(expensive_sanity_check) ( void )
1197{
1198 Int i;
1199
1200 /* Make sure nobody changed the distinguished secondary. */
1201 for (i = 0; i < 8192; i++)
1202 if (distinguished_secondary_map.abits[i] != VGM_BYTE_INVALID)
1203 return False;
1204
1205 /* Make sure that the upper 3/4 of the primary map hasn't
1206 been messed with. */
1207 for (i = 65536; i < 262144; i++)
1208 if (primary_map[i] != & distinguished_secondary_map)
1209 return False;
1210
1211 return True;
1212}
1213
sewardjd8033d92002-12-08 22:16:58 +00001214/*
1215 Client requests
1216 */
1217Bool SK_(handle_client_request) ( ThreadState* tst, UInt* arg_block, UInt *ret )
1218{
sewardjbf310d92002-12-28 13:09:57 +00001219#define IGNORE(what) \
1220 do { \
1221 if (moans-- > 0) { \
1222 VG_(message)(Vg_UserMsg, \
1223 "Warning: Addrcheck: ignoring `%s' request.", what); \
1224 VG_(message)(Vg_UserMsg, \
1225 " To honour this request, rerun with --skin=memcheck."); \
1226 } \
1227 } while (0)
1228
sewardjd8033d92002-12-08 22:16:58 +00001229 UInt* arg = arg_block;
sewardjbf310d92002-12-28 13:09:57 +00001230 static Int moans = 3;
sewardjd8033d92002-12-08 22:16:58 +00001231
1232 /* Overload memcheck client reqs */
1233 if (!VG_IS_SKIN_USERREQ('M','C',arg[0]))
1234 return False;
1235
1236 switch (arg[0]) {
1237 case VG_USERREQ__DO_LEAK_CHECK:
1238 ac_detect_memory_leaks();
1239 *ret = 0; /* return value is meaningless */
1240 break;
1241
sewardjbf310d92002-12-28 13:09:57 +00001242 /* Ignore these */
sewardjd8033d92002-12-08 22:16:58 +00001243 case VG_USERREQ__CHECK_WRITABLE: /* check writable */
sewardjbf310d92002-12-28 13:09:57 +00001244 IGNORE("VALGRIND_CHECK_WRITABLE");
1245 return False;
sewardjd8033d92002-12-08 22:16:58 +00001246 case VG_USERREQ__CHECK_READABLE: /* check readable */
sewardjbf310d92002-12-28 13:09:57 +00001247 IGNORE("VALGRIND_CHECK_READABLE");
1248 return False;
sewardjd8033d92002-12-08 22:16:58 +00001249 case VG_USERREQ__MAKE_NOACCESS: /* make no access */
sewardjbf310d92002-12-28 13:09:57 +00001250 IGNORE("VALGRIND_MAKE_NOACCESS");
1251 return False;
sewardjd8033d92002-12-08 22:16:58 +00001252 case VG_USERREQ__MAKE_WRITABLE: /* make writable */
sewardjbf310d92002-12-28 13:09:57 +00001253 IGNORE("VALGRIND_MAKE_WRITABLE");
1254 return False;
sewardjd8033d92002-12-08 22:16:58 +00001255 case VG_USERREQ__MAKE_READABLE: /* make readable */
sewardjbf310d92002-12-28 13:09:57 +00001256 IGNORE("VALGRIND_MAKE_READABLE");
1257 return False;
sewardjd8033d92002-12-08 22:16:58 +00001258 case VG_USERREQ__DISCARD: /* discard */
sewardjbf310d92002-12-28 13:09:57 +00001259 IGNORE("VALGRIND_CHECK_DISCARD");
1260 return False;
sewardjd8033d92002-12-08 22:16:58 +00001261
1262 default:
1263 VG_(message)(Vg_UserMsg,
sewardja81709d2002-12-28 12:55:48 +00001264 "Warning: unknown addrcheck client request code %d",
sewardjd8033d92002-12-08 22:16:58 +00001265 arg[0]);
1266 return False;
1267 }
1268 return True;
sewardjbf310d92002-12-28 13:09:57 +00001269
1270#undef IGNORE
sewardjd8033d92002-12-08 22:16:58 +00001271}
1272
njn25e49d8e72002-09-23 09:36:25 +00001273/*------------------------------------------------------------*/
1274/*--- Setup ---*/
1275/*------------------------------------------------------------*/
1276
njn25e49d8e72002-09-23 09:36:25 +00001277Bool SK_(process_cmd_line_option)(Char* arg)
1278{
njn5c004e42002-11-18 11:04:50 +00001279 return MC_(process_common_cmd_line_option)(arg);
njn25e49d8e72002-09-23 09:36:25 +00001280}
1281
1282Char* SK_(usage)(void)
1283{
1284 return
1285" --partial-loads-ok=no|yes too hard to explain here; see manual [yes]\n"
1286" --freelist-vol=<number> volume of freed blocks queue [1000000]\n"
1287" --leak-check=no|yes search for memory leaks at exit? [no]\n"
1288" --leak-resolution=low|med|high\n"
1289" amount of bt merging in leak check [low]\n"
1290" --show-reachable=no|yes show reachable blocks in leak check? [no]\n"
1291" --workaround-gcc296-bugs=no|yes self explanatory [no]\n"
njn25e49d8e72002-09-23 09:36:25 +00001292"\n"
1293" --cleanup=no|yes improve after instrumentation? [yes]\n";
1294}
1295
1296
1297/*------------------------------------------------------------*/
1298/*--- Setup ---*/
1299/*------------------------------------------------------------*/
1300
njn810086f2002-11-14 12:42:47 +00001301void SK_(pre_clo_init)(void)
njn25e49d8e72002-09-23 09:36:25 +00001302{
njn810086f2002-11-14 12:42:47 +00001303 VG_(details_name) ("Addrcheck");
1304 VG_(details_version) (NULL);
1305 VG_(details_description) ("a fine-grained address checker");
1306 VG_(details_copyright_author)(
1307 "Copyright (C) 2002, and GNU GPL'd, by Julian Seward.");
1308 VG_(details_bug_reports_to) ("jseward@acm.org");
sewardj78210aa2002-12-01 02:55:46 +00001309 VG_(details_avg_translation_sizeB) ( 135 );
njn25e49d8e72002-09-23 09:36:25 +00001310
njn810086f2002-11-14 12:42:47 +00001311 VG_(needs_core_errors) ();
1312 VG_(needs_skin_errors) ();
1313 VG_(needs_libc_freeres) ();
1314 VG_(needs_sizeof_shadow_block) ( 1 );
1315 VG_(needs_command_line_options)();
1316 VG_(needs_client_requests) ();
1317 VG_(needs_syscall_wrapper) ();
1318 VG_(needs_alternative_free) ();
1319 VG_(needs_sanity_checks) ();
njn25e49d8e72002-09-23 09:36:25 +00001320
njn5c004e42002-11-18 11:04:50 +00001321 VG_(track_new_mem_startup) ( & ac_new_mem_startup );
1322 VG_(track_new_mem_heap) ( & ac_new_mem_heap );
1323 VG_(track_new_mem_stack) ( & ac_make_accessible );
1324 VG_(track_new_mem_stack_aligned)( & ac_make_writable_aligned );
1325 VG_(track_new_mem_stack_signal) ( & ac_make_accessible );
1326 VG_(track_new_mem_brk) ( & ac_make_accessible );
1327 VG_(track_new_mem_mmap) ( & ac_set_perms );
njn25e49d8e72002-09-23 09:36:25 +00001328
njn5c004e42002-11-18 11:04:50 +00001329 VG_(track_copy_mem_heap) ( & ac_copy_address_range_state );
1330 VG_(track_copy_mem_remap) ( & ac_copy_address_range_state );
1331 VG_(track_change_mem_mprotect) ( & ac_set_perms );
njn25e49d8e72002-09-23 09:36:25 +00001332
njn5c004e42002-11-18 11:04:50 +00001333 VG_(track_ban_mem_heap) ( & ac_make_noaccess );
1334 VG_(track_ban_mem_stack) ( & ac_make_noaccess );
njn25e49d8e72002-09-23 09:36:25 +00001335
njn5c004e42002-11-18 11:04:50 +00001336 VG_(track_die_mem_heap) ( & ac_make_noaccess );
1337 VG_(track_die_mem_stack) ( & ac_make_noaccess );
1338 VG_(track_die_mem_stack_aligned)( & ac_make_noaccess_aligned );
1339 VG_(track_die_mem_stack_signal) ( & ac_make_noaccess );
1340 VG_(track_die_mem_brk) ( & ac_make_noaccess );
1341 VG_(track_die_mem_munmap) ( & ac_make_noaccess );
njn25e49d8e72002-09-23 09:36:25 +00001342
njn5c004e42002-11-18 11:04:50 +00001343 VG_(track_bad_free) ( & MC_(record_free_error) );
1344 VG_(track_mismatched_free) ( & MC_(record_freemismatch_error) );
njn25e49d8e72002-09-23 09:36:25 +00001345
njn5c004e42002-11-18 11:04:50 +00001346 VG_(track_pre_mem_read) ( & ac_check_is_readable );
1347 VG_(track_pre_mem_read_asciiz) ( & ac_check_is_readable_asciiz );
1348 VG_(track_pre_mem_write) ( & ac_check_is_writable );
1349 VG_(track_post_mem_write) ( & ac_make_accessible );
njn25e49d8e72002-09-23 09:36:25 +00001350
njn5c004e42002-11-18 11:04:50 +00001351 VG_(register_compact_helper)((Addr) & ac_helperc_ACCESS4);
1352 VG_(register_compact_helper)((Addr) & ac_helperc_ACCESS2);
1353 VG_(register_compact_helper)((Addr) & ac_helperc_ACCESS1);
1354 VG_(register_compact_helper)((Addr) & ac_fpu_ACCESS_check);
njn25e49d8e72002-09-23 09:36:25 +00001355
1356 VGP_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
1357 VGP_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
njnd04b7c62002-10-03 14:05:52 +00001358
1359 init_shadow_memory();
njn5c004e42002-11-18 11:04:50 +00001360 MC_(init_prof_mem)();
1361}
1362
1363void SK_(post_clo_init) ( void )
1364{
1365}
1366
1367void SK_(fini) ( void )
1368{
1369 VG_(print_malloc_stats)();
1370
1371 if (VG_(clo_verbosity) == 1) {
1372 if (!MC_(clo_leak_check))
1373 VG_(message)(Vg_UserMsg,
1374 "For a detailed leak analysis, rerun with: --leak-check=yes");
1375
1376 VG_(message)(Vg_UserMsg,
1377 "For counts of detected errors, rerun with: -v");
1378 }
1379 if (MC_(clo_leak_check)) ac_detect_memory_leaks();
1380
1381 MC_(done_prof_mem)();
njn25e49d8e72002-09-23 09:36:25 +00001382}
1383
1384/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00001385/*--- end ac_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00001386/*--------------------------------------------------------------------*/