blob: 869d6e4b3d5fe0b5c7716244551253daa76b9ba1 [file] [log] [blame]
njn25e49d8e72002-09-23 09:36:25 +00001
2/*--------------------------------------------------------------------*/
3/*--- The AddrCheck skin: like MemCheck, but only does address ---*/
4/*--- checking. No definedness checking. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- ac_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
njnc9539842002-10-02 13:26:35 +00009 This file is part of AddrCheck, a lightweight Valgrind skin for
10 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
12 Copyright (C) 2000-2002 Julian Seward
13 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
njn5c004e42002-11-18 11:04:50 +000033#include "mc_common.h"
sewardjd8033d92002-12-08 22:16:58 +000034#include "memcheck.h"
njn25e49d8e72002-09-23 09:36:25 +000035//#include "vg_profile.c"
36
njn27f1a382002-11-08 15:48:16 +000037VG_DETERMINE_INTERFACE_VERSION
38
njn25e49d8e72002-09-23 09:36:25 +000039/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000040/*--- Comparing and printing errors ---*/
41/*------------------------------------------------------------*/
42
njn810086f2002-11-14 12:42:47 +000043void SK_(pp_SkinError) ( Error* err, void (*pp_ExeContext)(void) )
njn25e49d8e72002-09-23 09:36:25 +000044{
njn5c004e42002-11-18 11:04:50 +000045 MemCheckError* err_extra = VG_(get_error_extra)(err);
njn25e49d8e72002-09-23 09:36:25 +000046
njn810086f2002-11-14 12:42:47 +000047 switch (VG_(get_error_kind)(err)) {
njn25e49d8e72002-09-23 09:36:25 +000048 case CoreMemErr:
49 if (err_extra->isWrite) {
50 VG_(message)(Vg_UserMsg,
sewardj5de6ee02002-12-14 23:11:35 +000051 "%s contains unaddressable byte(s)",
52 VG_(get_error_string)(err));
njn25e49d8e72002-09-23 09:36:25 +000053 } else {
54 VG_(message)(Vg_UserMsg,
sewardj5de6ee02002-12-14 23:11:35 +000055 "%s contains unaddressable byte(s)",
56 VG_(get_error_string)(err));
njn25e49d8e72002-09-23 09:36:25 +000057 }
58 pp_ExeContext();
59 break;
60
61 case AddrErr:
62 switch (err_extra->axskind) {
63 case ReadAxs:
64 case WriteAxs:
65 /* These two aren't actually differentiated ever. */
66 VG_(message)(Vg_UserMsg, "Invalid memory access of size %d",
67 err_extra->size );
68 break;
69 case ExecAxs:
70 VG_(message)(Vg_UserMsg, "Jump to the invalid address "
71 "stated on the next line");
72 break;
73 default:
njn5c004e42002-11-18 11:04:50 +000074 VG_(skin_panic)("SK_(pp_SkinError)(axskind)");
njn25e49d8e72002-09-23 09:36:25 +000075 }
76 pp_ExeContext();
njn5c004e42002-11-18 11:04:50 +000077 MC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
njn25e49d8e72002-09-23 09:36:25 +000078 break;
79
80 case FreeErr:
81 VG_(message)(Vg_UserMsg,"Invalid free() / delete / delete[]");
82 /* fall through */
83 case FreeMismatchErr:
njn810086f2002-11-14 12:42:47 +000084 if (VG_(get_error_kind)(err) == FreeMismatchErr)
njn25e49d8e72002-09-23 09:36:25 +000085 VG_(message)(Vg_UserMsg,
86 "Mismatched free() / delete / delete []");
87 pp_ExeContext();
njn5c004e42002-11-18 11:04:50 +000088 MC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
njn25e49d8e72002-09-23 09:36:25 +000089 break;
90
91 case ParamErr:
92 if (err_extra->isWrite) {
93 VG_(message)(Vg_UserMsg,
njn810086f2002-11-14 12:42:47 +000094 "Syscall param %s contains unaddressable byte(s)",
95 VG_(get_error_string)(err) );
njn25e49d8e72002-09-23 09:36:25 +000096 } else {
97 VG_(message)(Vg_UserMsg,
98 "Syscall param %s contains uninitialised or "
99 "unaddressable byte(s)",
njn810086f2002-11-14 12:42:47 +0000100 VG_(get_error_string)(err));
njn25e49d8e72002-09-23 09:36:25 +0000101 }
102 pp_ExeContext();
njn5c004e42002-11-18 11:04:50 +0000103 MC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
njn25e49d8e72002-09-23 09:36:25 +0000104 break;
105
106 case UserErr:
107 if (err_extra->isWrite) {
108 VG_(message)(Vg_UserMsg,
109 "Unaddressable byte(s) found during client check request");
110 } else {
111 VG_(message)(Vg_UserMsg,
112 "Uninitialised or "
113 "unaddressable byte(s) found during client check request");
114 }
115 pp_ExeContext();
njn5c004e42002-11-18 11:04:50 +0000116 MC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
njn25e49d8e72002-09-23 09:36:25 +0000117 break;
118
119 default:
njn810086f2002-11-14 12:42:47 +0000120 VG_(printf)("Error:\n unknown AddrCheck error code %d\n",
121 VG_(get_error_kind)(err));
njne427a662002-10-02 11:08:25 +0000122 VG_(skin_panic)("unknown error code in SK_(pp_SkinError)");
njn25e49d8e72002-09-23 09:36:25 +0000123 }
124}
125
126/*------------------------------------------------------------*/
127/*--- Recording errors ---*/
128/*------------------------------------------------------------*/
129
130/* Describe an address as best you can, for error messages,
131 putting the result in ai. */
132
njn5c004e42002-11-18 11:04:50 +0000133static void describe_addr ( Addr a, AddrInfo* ai )
njn25e49d8e72002-09-23 09:36:25 +0000134{
135 ShadowChunk* sc;
136 ThreadId tid;
137
138 /* Nested functions, yeah. Need the lexical scoping of 'a'. */
139
140 /* Closure for searching thread stacks */
141 Bool addr_is_in_bounds(Addr stack_min, Addr stack_max)
142 {
143 return (stack_min <= a && a <= stack_max);
144 }
145 /* Closure for searching malloc'd and free'd lists */
146 Bool addr_is_in_block(ShadowChunk *sh_ch)
147 {
njn810086f2002-11-14 12:42:47 +0000148 return VG_(addr_is_in_block) ( a, VG_(get_sc_data)(sh_ch),
149 VG_(get_sc_size)(sh_ch) );
njn25e49d8e72002-09-23 09:36:25 +0000150 }
151 /* Perhaps it's on a thread's stack? */
152 tid = VG_(any_matching_thread_stack)(addr_is_in_bounds);
153 if (tid != VG_INVALID_THREADID) {
154 ai->akind = Stack;
155 ai->stack_tid = tid;
156 return;
157 }
158 /* Search for a recently freed block which might bracket it. */
njn5c004e42002-11-18 11:04:50 +0000159 sc = MC_(any_matching_freed_ShadowChunks)(addr_is_in_block);
njn25e49d8e72002-09-23 09:36:25 +0000160 if (NULL != sc) {
161 ai->akind = Freed;
njn810086f2002-11-14 12:42:47 +0000162 ai->blksize = VG_(get_sc_size)(sc);
163 ai->rwoffset = (Int)(a) - (Int)(VG_(get_sc_data)(sc));
164 ai->lastchange = (ExeContext*)( VG_(get_sc_extra)(sc, 0) );
njn25e49d8e72002-09-23 09:36:25 +0000165 return;
166 }
167 /* Search for a currently malloc'd block which might bracket it. */
168 sc = VG_(any_matching_mallocd_ShadowChunks)(addr_is_in_block);
169 if (NULL != sc) {
170 ai->akind = Mallocd;
njn810086f2002-11-14 12:42:47 +0000171 ai->blksize = VG_(get_sc_size)(sc);
172 ai->rwoffset = (Int)(a) - (Int)(VG_(get_sc_data)(sc));
173 ai->lastchange = (ExeContext*)( VG_(get_sc_extra)(sc, 0) );
njn25e49d8e72002-09-23 09:36:25 +0000174 return;
175 }
176 /* Clueless ... */
177 ai->akind = Unknown;
178 return;
179}
180
181
njn810086f2002-11-14 12:42:47 +0000182/* Creates a copy of the `extra' part, updates the copy with address info if
183 necessary, and returns the copy. */
184void* SK_(dup_extra_and_update)(Error* err)
njn25e49d8e72002-09-23 09:36:25 +0000185{
njn5c004e42002-11-18 11:04:50 +0000186 MemCheckError* new_extra;
njn25e49d8e72002-09-23 09:36:25 +0000187
njn5c004e42002-11-18 11:04:50 +0000188 new_extra = VG_(malloc)(sizeof(MemCheckError));
189 *new_extra = *((MemCheckError*)VG_(get_error_extra)(err));
njn25e49d8e72002-09-23 09:36:25 +0000190
njn810086f2002-11-14 12:42:47 +0000191 if (new_extra->addrinfo.akind == Undescribed)
192 describe_addr ( VG_(get_error_address)(err), &(new_extra->addrinfo) );
njn25e49d8e72002-09-23 09:36:25 +0000193
njn810086f2002-11-14 12:42:47 +0000194 return new_extra;
njn25e49d8e72002-09-23 09:36:25 +0000195}
196
njn25e49d8e72002-09-23 09:36:25 +0000197/*------------------------------------------------------------*/
198/*--- Suppressions ---*/
199/*------------------------------------------------------------*/
200
201#define STREQ(s1,s2) (s1 != NULL && s2 != NULL \
202 && VG_(strcmp)((s1),(s2))==0)
203
njn810086f2002-11-14 12:42:47 +0000204Bool SK_(recognised_suppression) ( Char* name, Supp* su )
njn25e49d8e72002-09-23 09:36:25 +0000205{
njn810086f2002-11-14 12:42:47 +0000206 SuppKind skind;
207
208 if (STREQ(name, "Param")) skind = ParamSupp;
209 else if (STREQ(name, "CoreMem")) skind = CoreMemSupp;
210 else if (STREQ(name, "Addr1")) skind = Addr1Supp;
211 else if (STREQ(name, "Addr2")) skind = Addr2Supp;
212 else if (STREQ(name, "Addr4")) skind = Addr4Supp;
213 else if (STREQ(name, "Addr8")) skind = Addr8Supp;
214 else if (STREQ(name, "Free")) skind = FreeSupp;
sewardja75cd5a2002-12-28 12:36:55 +0000215 else if (STREQ(name, "Leak")) skind = LeakSupp;
njn25e49d8e72002-09-23 09:36:25 +0000216 else
217 return False;
218
njn810086f2002-11-14 12:42:47 +0000219 VG_(set_supp_kind)(su, skind);
njn25e49d8e72002-09-23 09:36:25 +0000220 return True;
221}
222
njn25e49d8e72002-09-23 09:36:25 +0000223# undef STREQ
224
225
njn25e49d8e72002-09-23 09:36:25 +0000226/*------------------------------------------------------------*/
227/*--- Profiling events ---*/
228/*------------------------------------------------------------*/
229
230typedef
231 enum {
232 VgpCheckMem = VgpFini+1,
233 VgpSetMem
234 }
235 VgpSkinCC;
236
njn5c004e42002-11-18 11:04:50 +0000237#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
238
njn25e49d8e72002-09-23 09:36:25 +0000239/*------------------------------------------------------------*/
240/*--- Low-level support for memory checking. ---*/
241/*------------------------------------------------------------*/
242
243/* All reads and writes are checked against a memory map, which
244 records the state of all memory in the process. The memory map is
245 organised like this:
246
247 The top 16 bits of an address are used to index into a top-level
248 map table, containing 65536 entries. Each entry is a pointer to a
249 second-level map, which records the accesibililty and validity
250 permissions for the 65536 bytes indexed by the lower 16 bits of the
251 address. Each byte is represented by one bit, indicating
252 accessibility. So each second-level map contains 8192 bytes. This
253 two-level arrangement conveniently divides the 4G address space
254 into 64k lumps, each size 64k bytes.
255
256 All entries in the primary (top-level) map must point to a valid
257 secondary (second-level) map. Since most of the 4G of address
258 space will not be in use -- ie, not mapped at all -- there is a
259 distinguished secondary map, which indicates `not addressible and
260 not valid' writeable for all bytes. Entries in the primary map for
261 which the entire 64k is not in use at all point at this
262 distinguished map.
263
264 [...] lots of stuff deleted due to out of date-ness
265
266 As a final optimisation, the alignment and address checks for
267 4-byte loads and stores are combined in a neat way. The primary
268 map is extended to have 262144 entries (2^18), rather than 2^16.
269 The top 3/4 of these entries are permanently set to the
270 distinguished secondary map. For a 4-byte load/store, the
271 top-level map is indexed not with (addr >> 16) but instead f(addr),
272 where
273
274 f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
275 = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
276 = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
277
278 ie the lowest two bits are placed above the 16 high address bits.
279 If either of these two bits are nonzero, the address is misaligned;
280 this will select a secondary map from the upper 3/4 of the primary
281 map. Because this is always the distinguished secondary map, a
282 (bogus) address check failure will result. The failure handling
283 code can then figure out whether this is a genuine addr check
284 failure or whether it is a possibly-legitimate access at a
285 misaligned address. */
286
287
288/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000289/*--- Function declarations. ---*/
290/*------------------------------------------------------------*/
291
njn5c004e42002-11-18 11:04:50 +0000292static void ac_ACCESS4_SLOWLY ( Addr a );
293static void ac_ACCESS2_SLOWLY ( Addr a );
294static void ac_ACCESS1_SLOWLY ( Addr a );
295static void ac_fpu_ACCESS_check_SLOWLY ( Addr addr, Int size );
njn25e49d8e72002-09-23 09:36:25 +0000296
297/*------------------------------------------------------------*/
298/*--- Data defns. ---*/
299/*------------------------------------------------------------*/
300
301typedef
302 struct {
303 UChar abits[8192];
304 }
305 AcSecMap;
306
307static AcSecMap* primary_map[ /*65536*/ 262144 ];
308static AcSecMap distinguished_secondary_map;
309
njn25e49d8e72002-09-23 09:36:25 +0000310static void init_shadow_memory ( void )
311{
312 Int i;
313
314 for (i = 0; i < 8192; i++) /* Invalid address */
315 distinguished_secondary_map.abits[i] = VGM_BYTE_INVALID;
316
317 /* These entries gradually get overwritten as the used address
318 space expands. */
319 for (i = 0; i < 65536; i++)
320 primary_map[i] = &distinguished_secondary_map;
321
322 /* These ones should never change; it's a bug in Valgrind if they do. */
323 for (i = 65536; i < 262144; i++)
324 primary_map[i] = &distinguished_secondary_map;
325}
326
njn25e49d8e72002-09-23 09:36:25 +0000327/*------------------------------------------------------------*/
328/*--- Basic bitmap management, reading and writing. ---*/
329/*------------------------------------------------------------*/
330
331/* Allocate and initialise a secondary map. */
332
333static AcSecMap* alloc_secondary_map ( __attribute__ ((unused))
334 Char* caller )
335{
336 AcSecMap* map;
337 UInt i;
338 PROF_EVENT(10);
339
340 /* Mark all bytes as invalid access and invalid value. */
341
342 /* It just happens that a AcSecMap occupies exactly 18 pages --
343 although this isn't important, so the following assert is
344 spurious. */
njne427a662002-10-02 11:08:25 +0000345 sk_assert(0 == (sizeof(AcSecMap) % VKI_BYTES_PER_PAGE));
njn25e49d8e72002-09-23 09:36:25 +0000346 map = VG_(get_memory_from_mmap)( sizeof(AcSecMap), caller );
347
348 for (i = 0; i < 8192; i++)
349 map->abits[i] = VGM_BYTE_INVALID; /* Invalid address */
350
351 /* VG_(printf)("ALLOC_2MAP(%s)\n", caller ); */
352 return map;
353}
354
355
356/* Basic reading/writing of the bitmaps, for byte-sized accesses. */
357
358static __inline__ UChar get_abit ( Addr a )
359{
360 AcSecMap* sm = primary_map[a >> 16];
361 UInt sm_off = a & 0xFFFF;
362 PROF_EVENT(20);
363# if 0
364 if (IS_DISTINGUISHED_SM(sm))
365 VG_(message)(Vg_DebugMsg,
366 "accessed distinguished 2ndary (A)map! 0x%x\n", a);
367# endif
368 return BITARR_TEST(sm->abits, sm_off)
369 ? VGM_BIT_INVALID : VGM_BIT_VALID;
370}
371
372static __inline__ void set_abit ( Addr a, UChar abit )
373{
374 AcSecMap* sm;
375 UInt sm_off;
376 PROF_EVENT(22);
377 ENSURE_MAPPABLE(a, "set_abit");
378 sm = primary_map[a >> 16];
379 sm_off = a & 0xFFFF;
380 if (abit)
381 BITARR_SET(sm->abits, sm_off);
382 else
383 BITARR_CLEAR(sm->abits, sm_off);
384}
385
386
387/* Reading/writing of the bitmaps, for aligned word-sized accesses. */
388
389static __inline__ UChar get_abits4_ALIGNED ( Addr a )
390{
391 AcSecMap* sm;
392 UInt sm_off;
393 UChar abits8;
394 PROF_EVENT(24);
395# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000396 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000397# endif
398 sm = primary_map[a >> 16];
399 sm_off = a & 0xFFFF;
400 abits8 = sm->abits[sm_off >> 3];
401 abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
402 abits8 &= 0x0F;
403 return abits8;
404}
405
406
407
408/*------------------------------------------------------------*/
409/*--- Setting permissions over address ranges. ---*/
410/*------------------------------------------------------------*/
411
sewardj5de6ee02002-12-14 23:11:35 +0000412static __inline__
413void set_address_range_perms ( Addr a, UInt len,
414 UInt example_a_bit )
njn25e49d8e72002-09-23 09:36:25 +0000415{
416 UChar abyte8;
417 UInt sm_off;
418 AcSecMap* sm;
419
420 PROF_EVENT(30);
421
422 if (len == 0)
423 return;
424
425 if (len > 100 * 1000 * 1000) {
426 VG_(message)(Vg_UserMsg,
427 "Warning: set address range perms: "
428 "large range %u, a %d",
429 len, example_a_bit );
430 }
431
432 VGP_PUSHCC(VgpSetMem);
433
434 /* Requests to change permissions of huge address ranges may
435 indicate bugs in our machinery. 30,000,000 is arbitrary, but so
436 far all legitimate requests have fallen beneath that size. */
437 /* 4 Mar 02: this is just stupid; get rid of it. */
njne427a662002-10-02 11:08:25 +0000438 /* sk_assert(len < 30000000); */
njn25e49d8e72002-09-23 09:36:25 +0000439
440 /* Check the permissions make sense. */
njne427a662002-10-02 11:08:25 +0000441 sk_assert(example_a_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000442 || example_a_bit == VGM_BIT_INVALID);
443
444 /* In order that we can charge through the address space at 8
445 bytes/main-loop iteration, make up some perms. */
446 abyte8 = (example_a_bit << 7)
447 | (example_a_bit << 6)
448 | (example_a_bit << 5)
449 | (example_a_bit << 4)
450 | (example_a_bit << 3)
451 | (example_a_bit << 2)
452 | (example_a_bit << 1)
453 | (example_a_bit << 0);
454
455# ifdef VG_DEBUG_MEMORY
456 /* Do it ... */
457 while (True) {
458 PROF_EVENT(31);
459 if (len == 0) break;
460 set_abit ( a, example_a_bit );
461 set_vbyte ( a, vbyte );
462 a++;
463 len--;
464 }
465
466# else
467 /* Slowly do parts preceding 8-byte alignment. */
468 while (True) {
469 PROF_EVENT(31);
470 if (len == 0) break;
471 if ((a % 8) == 0) break;
472 set_abit ( a, example_a_bit );
473 a++;
474 len--;
475 }
476
477 if (len == 0) {
478 VGP_POPCC(VgpSetMem);
479 return;
480 }
njne427a662002-10-02 11:08:25 +0000481 sk_assert((a % 8) == 0 && len > 0);
njn25e49d8e72002-09-23 09:36:25 +0000482
483 /* Once aligned, go fast. */
484 while (True) {
485 PROF_EVENT(32);
486 if (len < 8) break;
487 ENSURE_MAPPABLE(a, "set_address_range_perms(fast)");
488 sm = primary_map[a >> 16];
489 sm_off = a & 0xFFFF;
490 sm->abits[sm_off >> 3] = abyte8;
491 a += 8;
492 len -= 8;
493 }
494
495 if (len == 0) {
496 VGP_POPCC(VgpSetMem);
497 return;
498 }
njne427a662002-10-02 11:08:25 +0000499 sk_assert((a % 8) == 0 && len > 0 && len < 8);
njn25e49d8e72002-09-23 09:36:25 +0000500
501 /* Finish the upper fragment. */
502 while (True) {
503 PROF_EVENT(33);
504 if (len == 0) break;
505 set_abit ( a, example_a_bit );
506 a++;
507 len--;
508 }
509# endif
510
511 /* Check that zero page and highest page have not been written to
512 -- this could happen with buggy syscall wrappers. Today
513 (2001-04-26) had precisely such a problem with __NR_setitimer. */
njne427a662002-10-02 11:08:25 +0000514 sk_assert(SK_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +0000515 VGP_POPCC(VgpSetMem);
516}
517
518/* Set permissions for address ranges ... */
519
njn5c004e42002-11-18 11:04:50 +0000520static void ac_make_noaccess ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000521{
522 PROF_EVENT(35);
njn5c004e42002-11-18 11:04:50 +0000523 DEBUG("ac_make_noaccess(%p, %x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000524 set_address_range_perms ( a, len, VGM_BIT_INVALID );
525}
526
njn5c004e42002-11-18 11:04:50 +0000527static void ac_make_accessible ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000528{
njn5c004e42002-11-18 11:04:50 +0000529 PROF_EVENT(38);
530 DEBUG("ac_make_accessible(%p, %x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000531 set_address_range_perms ( a, len, VGM_BIT_VALID );
532}
533
534/* Block-copy permissions (needed for implementing realloc()). */
535
njn5c004e42002-11-18 11:04:50 +0000536static void ac_copy_address_range_state ( Addr src, Addr dst, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000537{
538 UInt i;
539
njn5c004e42002-11-18 11:04:50 +0000540 DEBUG("ac_copy_address_range_state\n");
njn25e49d8e72002-09-23 09:36:25 +0000541
542 PROF_EVENT(40);
543 for (i = 0; i < len; i++) {
544 UChar abit = get_abit ( src+i );
545 PROF_EVENT(41);
546 set_abit ( dst+i, abit );
547 }
548}
549
550
551/* Check permissions for address range. If inadequate permissions
552 exist, *bad_addr is set to the offending address, so the caller can
553 know what it is. */
554
njn5c004e42002-11-18 11:04:50 +0000555static __inline__
556Bool ac_check_accessible ( Addr a, UInt len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000557{
558 UInt i;
559 UChar abit;
njn5c004e42002-11-18 11:04:50 +0000560 PROF_EVENT(48);
njn25e49d8e72002-09-23 09:36:25 +0000561 for (i = 0; i < len; i++) {
njn5c004e42002-11-18 11:04:50 +0000562 PROF_EVENT(49);
njn25e49d8e72002-09-23 09:36:25 +0000563 abit = get_abit(a);
564 if (abit == VGM_BIT_INVALID) {
565 if (bad_addr != NULL) *bad_addr = a;
566 return False;
567 }
568 a++;
569 }
570 return True;
571}
572
njn25e49d8e72002-09-23 09:36:25 +0000573/* Check a zero-terminated ascii string. Tricky -- don't want to
574 examine the actual bytes, to find the end, until we're sure it is
575 safe to do so. */
576
njn5c004e42002-11-18 11:04:50 +0000577static __inline__
578Bool ac_check_readable_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000579{
580 UChar abit;
581 PROF_EVENT(46);
njn5c004e42002-11-18 11:04:50 +0000582 DEBUG("ac_check_readable_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +0000583 while (True) {
584 PROF_EVENT(47);
585 abit = get_abit(a);
586 if (abit != VGM_BIT_VALID) {
587 if (bad_addr != NULL) *bad_addr = a;
588 return False;
589 }
590 /* Ok, a is safe to read. */
591 if (* ((UChar*)a) == 0) return True;
592 a++;
593 }
594}
595
596
597/*------------------------------------------------------------*/
598/*--- Memory event handlers ---*/
599/*------------------------------------------------------------*/
600
601/* Setting permissions for aligned words. This supports fast stack
602 operations. */
603
njn5c004e42002-11-18 11:04:50 +0000604static void ac_make_noaccess_aligned ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000605{
606 AcSecMap* sm;
607 UInt sm_off;
608 UChar mask;
609 Addr a_past_end = a + len;
610
611 VGP_PUSHCC(VgpSetMem);
612
613 PROF_EVENT(50);
614# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000615 sk_assert(IS_ALIGNED4_ADDR(a));
616 sk_assert(IS_ALIGNED4_ADDR(len));
njn25e49d8e72002-09-23 09:36:25 +0000617# endif
618
619 for ( ; a < a_past_end; a += 4) {
njn5c004e42002-11-18 11:04:50 +0000620 ENSURE_MAPPABLE(a, "ac_make_noaccess_aligned");
njn25e49d8e72002-09-23 09:36:25 +0000621 sm = primary_map[a >> 16];
622 sm_off = a & 0xFFFF;
623 mask = 0x0F;
624 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
625 /* mask now contains 1s where we wish to make address bits
626 invalid (1s). */
627 sm->abits[sm_off >> 3] |= mask;
628 }
629 VGP_POPCC(VgpSetMem);
630}
631
njn5c004e42002-11-18 11:04:50 +0000632static void ac_make_writable_aligned ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000633{
634 AcSecMap* sm;
635 UInt sm_off;
636 UChar mask;
637 Addr a_past_end = a + len;
638
639 VGP_PUSHCC(VgpSetMem);
640
641 PROF_EVENT(51);
642# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000643 sk_assert(IS_ALIGNED4_ADDR(a));
644 sk_assert(IS_ALIGNED4_ADDR(len));
njn25e49d8e72002-09-23 09:36:25 +0000645# endif
646
647 for ( ; a < a_past_end; a += 4) {
njn5c004e42002-11-18 11:04:50 +0000648 ENSURE_MAPPABLE(a, "ac_make_writable_aligned");
njn25e49d8e72002-09-23 09:36:25 +0000649 sm = primary_map[a >> 16];
650 sm_off = a & 0xFFFF;
651 mask = 0x0F;
652 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
653 /* mask now contains 1s where we wish to make address bits
654 invalid (0s). */
655 sm->abits[sm_off >> 3] &= ~mask;
656 }
657 VGP_POPCC(VgpSetMem);
658}
659
660
njn5c004e42002-11-18 11:04:50 +0000661static __inline__
662void ac_check_is_accessible ( CorePart part, ThreadState* tst,
663 Char* s, Addr base, UInt size, Bool isWrite )
njn25e49d8e72002-09-23 09:36:25 +0000664{
665 Bool ok;
666 Addr bad_addr;
667
668 VGP_PUSHCC(VgpCheckMem);
669
njn5c004e42002-11-18 11:04:50 +0000670 ok = ac_check_accessible ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000671 if (!ok) {
672 switch (part) {
673 case Vg_CoreSysCall:
njn5c004e42002-11-18 11:04:50 +0000674 MC_(record_param_error) ( tst, bad_addr, isWrite, s );
njn25e49d8e72002-09-23 09:36:25 +0000675 break;
676
njn25e49d8e72002-09-23 09:36:25 +0000677 case Vg_CoreSignal:
njn5c004e42002-11-18 11:04:50 +0000678 sk_assert(isWrite); /* Should only happen with isWrite case */
679 /* fall through */
njn25e49d8e72002-09-23 09:36:25 +0000680 case Vg_CorePThread:
njn5c004e42002-11-18 11:04:50 +0000681 MC_(record_core_mem_error)( tst, isWrite, s );
njn25e49d8e72002-09-23 09:36:25 +0000682 break;
683
684 /* If we're being asked to jump to a silly address, record an error
685 message before potentially crashing the entire system. */
686 case Vg_CoreTranslate:
njn5c004e42002-11-18 11:04:50 +0000687 sk_assert(!isWrite); /* Should only happen with !isWrite case */
688 MC_(record_jump_error)( tst, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000689 break;
690
691 default:
njn5c004e42002-11-18 11:04:50 +0000692 VG_(skin_panic)("ac_check_is_accessible: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000693 }
694 }
njn5c004e42002-11-18 11:04:50 +0000695
njn25e49d8e72002-09-23 09:36:25 +0000696 VGP_POPCC(VgpCheckMem);
697}
698
699static
njn5c004e42002-11-18 11:04:50 +0000700void ac_check_is_writable ( CorePart part, ThreadState* tst,
701 Char* s, Addr base, UInt size )
702{
703 ac_check_is_accessible ( part, tst, s, base, size, /*isWrite*/True );
704}
705
706static
707void ac_check_is_readable ( CorePart part, ThreadState* tst,
708 Char* s, Addr base, UInt size )
709{
710 ac_check_is_accessible ( part, tst, s, base, size, /*isWrite*/False );
711}
712
713static
714void ac_check_is_readable_asciiz ( CorePart part, ThreadState* tst,
715 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +0000716{
717 Bool ok = True;
718 Addr bad_addr;
719 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
720
721 VGP_PUSHCC(VgpCheckMem);
722
njne427a662002-10-02 11:08:25 +0000723 sk_assert(part == Vg_CoreSysCall);
njn5c004e42002-11-18 11:04:50 +0000724 ok = ac_check_readable_asciiz ( (Addr)str, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000725 if (!ok) {
njn5c004e42002-11-18 11:04:50 +0000726 MC_(record_param_error) ( tst, bad_addr, /*is_writable =*/False, s );
njn25e49d8e72002-09-23 09:36:25 +0000727 }
728
729 VGP_POPCC(VgpCheckMem);
730}
731
732static
njn5c004e42002-11-18 11:04:50 +0000733void ac_new_mem_startup( Addr a, UInt len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +0000734{
njn1f3a9092002-10-04 09:22:30 +0000735 /* Ignore the permissions, just make it readable. Seems to work... */
njn25e49d8e72002-09-23 09:36:25 +0000736 DEBUG("new_mem_startup(%p, %u, rr=%u, ww=%u, xx=%u)\n", a,len,rr,ww,xx);
njn5c004e42002-11-18 11:04:50 +0000737 ac_make_accessible(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000738}
739
740static
njn5c004e42002-11-18 11:04:50 +0000741void ac_new_mem_heap ( Addr a, UInt len, Bool is_inited )
njn25e49d8e72002-09-23 09:36:25 +0000742{
njn5c004e42002-11-18 11:04:50 +0000743 ac_make_accessible(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000744}
745
746static
njn5c004e42002-11-18 11:04:50 +0000747void ac_set_perms (Addr a, UInt len,
sewardj40f8ebe2002-10-23 21:46:13 +0000748 Bool rr, Bool ww, Bool xx)
njn25e49d8e72002-09-23 09:36:25 +0000749{
njn5c004e42002-11-18 11:04:50 +0000750 DEBUG("ac_set_perms(%p, %u, rr=%u ww=%u, xx=%u)\n",
sewardj40f8ebe2002-10-23 21:46:13 +0000751 a, len, rr, ww, xx);
njn25e49d8e72002-09-23 09:36:25 +0000752 if (rr || ww || xx) {
njn5c004e42002-11-18 11:04:50 +0000753 ac_make_accessible(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000754 } else {
njn5c004e42002-11-18 11:04:50 +0000755 ac_make_noaccess(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000756 }
757}
758
759
760/*------------------------------------------------------------*/
761/*--- Functions called directly from generated code. ---*/
762/*------------------------------------------------------------*/
763
764static __inline__ UInt rotateRight16 ( UInt x )
765{
766 /* Amazingly, gcc turns this into a single rotate insn. */
767 return (x >> 16) | (x << 16);
768}
769
njn25e49d8e72002-09-23 09:36:25 +0000770static __inline__ UInt shiftRight16 ( UInt x )
771{
772 return x >> 16;
773}
774
775
776/* Read/write 1/2/4 sized V bytes, and emit an address error if
777 needed. */
778
njn5c004e42002-11-18 11:04:50 +0000779/* ac_helperc_ACCESS{1,2,4} handle the common case fast.
njn25e49d8e72002-09-23 09:36:25 +0000780 Under all other circumstances, it defers to the relevant _SLOWLY
781 function, which can handle all situations.
782*/
783__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000784static void ac_helperc_ACCESS4 ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000785{
786# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000787 return ac_ACCESS4_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000788# else
789 UInt sec_no = rotateRight16(a) & 0x3FFFF;
790 AcSecMap* sm = primary_map[sec_no];
791 UInt a_off = (a & 0xFFFF) >> 3;
792 UChar abits = sm->abits[a_off];
793 abits >>= (a & 4);
794 abits &= 15;
njn5c004e42002-11-18 11:04:50 +0000795 PROF_EVENT(66);
njn25e49d8e72002-09-23 09:36:25 +0000796 if (abits == VGM_NIBBLE_VALID) {
797 /* Handle common case quickly: a is suitably aligned, is mapped,
798 and is addressible. So just return. */
799 return;
800 } else {
801 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000802 ac_ACCESS4_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000803 }
804# endif
805}
806
807__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000808static void ac_helperc_ACCESS2 ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000809{
810# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000811 return ac_ACCESS2_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000812# else
813 UInt sec_no = rotateRight16(a) & 0x1FFFF;
814 AcSecMap* sm = primary_map[sec_no];
815 UInt a_off = (a & 0xFFFF) >> 3;
njn5c004e42002-11-18 11:04:50 +0000816 PROF_EVENT(67);
njn25e49d8e72002-09-23 09:36:25 +0000817 if (sm->abits[a_off] == VGM_BYTE_VALID) {
818 /* Handle common case quickly. */
819 return;
820 } else {
821 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000822 ac_ACCESS2_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000823 }
824# endif
825}
826
827__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000828static void ac_helperc_ACCESS1 ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000829{
830# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000831 return ac_ACCESS1_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000832# else
833 UInt sec_no = shiftRight16(a);
834 AcSecMap* sm = primary_map[sec_no];
835 UInt a_off = (a & 0xFFFF) >> 3;
njn5c004e42002-11-18 11:04:50 +0000836 PROF_EVENT(68);
njn25e49d8e72002-09-23 09:36:25 +0000837 if (sm->abits[a_off] == VGM_BYTE_VALID) {
838 /* Handle common case quickly. */
839 return;
840 } else {
841 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000842 ac_ACCESS1_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000843 }
844# endif
845}
846
847
848/*------------------------------------------------------------*/
849/*--- Fallback functions to handle cases that the above ---*/
850/*--- VG_(helperc_ACCESS{1,2,4}) can't manage. ---*/
851/*------------------------------------------------------------*/
852
njn5c004e42002-11-18 11:04:50 +0000853static void ac_ACCESS4_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000854{
855 Bool a0ok, a1ok, a2ok, a3ok;
856
njn5c004e42002-11-18 11:04:50 +0000857 PROF_EVENT(76);
njn25e49d8e72002-09-23 09:36:25 +0000858
859 /* First establish independently the addressibility of the 4 bytes
860 involved. */
861 a0ok = get_abit(a+0) == VGM_BIT_VALID;
862 a1ok = get_abit(a+1) == VGM_BIT_VALID;
863 a2ok = get_abit(a+2) == VGM_BIT_VALID;
864 a3ok = get_abit(a+3) == VGM_BIT_VALID;
865
866 /* Now distinguish 3 cases */
867
868 /* Case 1: the address is completely valid, so:
869 - no addressing error
870 */
871 if (a0ok && a1ok && a2ok && a3ok) {
872 return;
873 }
874
875 /* Case 2: the address is completely invalid.
876 - emit addressing error
877 */
878 /* VG_(printf)("%p (%d %d %d %d)\n", a, a0ok, a1ok, a2ok, a3ok); */
njn5c004e42002-11-18 11:04:50 +0000879 if (!MC_(clo_partial_loads_ok)
njn25e49d8e72002-09-23 09:36:25 +0000880 || ((a & 3) != 0)
881 || (!a0ok && !a1ok && !a2ok && !a3ok)) {
njn5c004e42002-11-18 11:04:50 +0000882 MC_(record_address_error)( a, 4, False );
njn25e49d8e72002-09-23 09:36:25 +0000883 return;
884 }
885
886 /* Case 3: the address is partially valid.
887 - no addressing error
njn5c004e42002-11-18 11:04:50 +0000888 Case 3 is only allowed if MC_(clo_partial_loads_ok) is True
njn25e49d8e72002-09-23 09:36:25 +0000889 (which is the default), and the address is 4-aligned.
890 If not, Case 2 will have applied.
891 */
njn5c004e42002-11-18 11:04:50 +0000892 sk_assert(MC_(clo_partial_loads_ok));
njn25e49d8e72002-09-23 09:36:25 +0000893 {
894 return;
895 }
896}
897
njn5c004e42002-11-18 11:04:50 +0000898static void ac_ACCESS2_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000899{
900 /* Check the address for validity. */
901 Bool aerr = False;
njn5c004e42002-11-18 11:04:50 +0000902 PROF_EVENT(77);
njn25e49d8e72002-09-23 09:36:25 +0000903
904 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
905 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
906
907 /* If an address error has happened, report it. */
908 if (aerr) {
njn5c004e42002-11-18 11:04:50 +0000909 MC_(record_address_error)( a, 2, False );
njn25e49d8e72002-09-23 09:36:25 +0000910 }
911}
912
njn5c004e42002-11-18 11:04:50 +0000913static void ac_ACCESS1_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000914{
915 /* Check the address for validity. */
916 Bool aerr = False;
njn5c004e42002-11-18 11:04:50 +0000917 PROF_EVENT(78);
njn25e49d8e72002-09-23 09:36:25 +0000918
919 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
920
921 /* If an address error has happened, report it. */
922 if (aerr) {
njn5c004e42002-11-18 11:04:50 +0000923 MC_(record_address_error)( a, 1, False );
njn25e49d8e72002-09-23 09:36:25 +0000924 }
925}
926
927
928/* ---------------------------------------------------------------------
929 FPU load and store checks, called from generated code.
930 ------------------------------------------------------------------ */
931
932__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +0000933static void ac_fpu_ACCESS_check ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +0000934{
935 /* Ensure the read area is both addressible and valid (ie,
936 readable). If there's an address error, don't report a value
937 error too; but if there isn't an address error, check for a
938 value error.
939
940 Try to be reasonably fast on the common case; wimp out and defer
njn5c004e42002-11-18 11:04:50 +0000941 to ac_fpu_ACCESS_check_SLOWLY for everything else. */
njn25e49d8e72002-09-23 09:36:25 +0000942
943 AcSecMap* sm;
944 UInt sm_off, a_off;
945 Addr addr4;
946
njn5c004e42002-11-18 11:04:50 +0000947 PROF_EVENT(90);
njn25e49d8e72002-09-23 09:36:25 +0000948
949# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000950 ac_fpu_ACCESS_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +0000951# else
952
953 if (size == 4) {
954 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
njn5c004e42002-11-18 11:04:50 +0000955 PROF_EVENT(91);
njn25e49d8e72002-09-23 09:36:25 +0000956 /* Properly aligned. */
957 sm = primary_map[addr >> 16];
958 sm_off = addr & 0xFFFF;
959 a_off = sm_off >> 3;
960 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
961 /* Properly aligned and addressible. */
962 return;
963 slow4:
njn5c004e42002-11-18 11:04:50 +0000964 ac_fpu_ACCESS_check_SLOWLY ( addr, 4 );
njn25e49d8e72002-09-23 09:36:25 +0000965 return;
966 }
967
968 if (size == 8) {
969 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
njn5c004e42002-11-18 11:04:50 +0000970 PROF_EVENT(92);
njn25e49d8e72002-09-23 09:36:25 +0000971 /* Properly aligned. Do it in two halves. */
972 addr4 = addr + 4;
973 /* First half. */
974 sm = primary_map[addr >> 16];
975 sm_off = addr & 0xFFFF;
976 a_off = sm_off >> 3;
977 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
978 /* First half properly aligned and addressible. */
979 /* Second half. */
980 sm = primary_map[addr4 >> 16];
981 sm_off = addr4 & 0xFFFF;
982 a_off = sm_off >> 3;
983 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
984 /* Second half properly aligned and addressible. */
985 /* Both halves properly aligned and addressible. */
986 return;
987 slow8:
njn5c004e42002-11-18 11:04:50 +0000988 ac_fpu_ACCESS_check_SLOWLY ( addr, 8 );
njn25e49d8e72002-09-23 09:36:25 +0000989 return;
990 }
991
992 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
993 cases go quickly. */
994 if (size == 2) {
njn5c004e42002-11-18 11:04:50 +0000995 PROF_EVENT(93);
996 ac_fpu_ACCESS_check_SLOWLY ( addr, 2 );
njn25e49d8e72002-09-23 09:36:25 +0000997 return;
998 }
999
njn5c004e42002-11-18 11:04:50 +00001000 if (size == 10 || size == 28 || size == 108) {
1001 PROF_EVENT(94);
1002 ac_fpu_ACCESS_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +00001003 return;
1004 }
1005
1006 VG_(printf)("size is %d\n", size);
njne427a662002-10-02 11:08:25 +00001007 VG_(skin_panic)("fpu_ACCESS_check: unhandled size");
njn25e49d8e72002-09-23 09:36:25 +00001008# endif
1009}
1010
1011
1012/* ---------------------------------------------------------------------
1013 Slow, general cases for FPU access checks.
1014 ------------------------------------------------------------------ */
1015
njn5c004e42002-11-18 11:04:50 +00001016void ac_fpu_ACCESS_check_SLOWLY ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +00001017{
1018 Int i;
1019 Bool aerr = False;
njn5c004e42002-11-18 11:04:50 +00001020 PROF_EVENT(100);
njn25e49d8e72002-09-23 09:36:25 +00001021 for (i = 0; i < size; i++) {
njn5c004e42002-11-18 11:04:50 +00001022 PROF_EVENT(101);
njn25e49d8e72002-09-23 09:36:25 +00001023 if (get_abit(addr+i) != VGM_BIT_VALID)
1024 aerr = True;
1025 }
1026
1027 if (aerr) {
njn5c004e42002-11-18 11:04:50 +00001028 MC_(record_address_error)( addr, size, False );
njn25e49d8e72002-09-23 09:36:25 +00001029 }
1030}
1031
1032
1033/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00001034/*--- Our instrumenter ---*/
1035/*------------------------------------------------------------*/
1036
njn25e49d8e72002-09-23 09:36:25 +00001037UCodeBlock* SK_(instrument)(UCodeBlock* cb_in, Addr orig_addr)
1038{
1039/* Use this rather than eg. -1 because it's a UInt. */
1040#define INVALID_DATA_SIZE 999999
1041
1042 UCodeBlock* cb;
1043 Int i;
1044 UInstr* u_in;
1045 Int t_addr, t_size;
1046
njn810086f2002-11-14 12:42:47 +00001047 cb = VG_(setup_UCodeBlock)(cb_in);
njn25e49d8e72002-09-23 09:36:25 +00001048
njn810086f2002-11-14 12:42:47 +00001049 for (i = 0; i < VG_(get_num_instrs)(cb_in); i++) {
njn25e49d8e72002-09-23 09:36:25 +00001050
1051 t_addr = t_size = INVALID_TEMPREG;
njn810086f2002-11-14 12:42:47 +00001052 u_in = VG_(get_instr)(cb_in, i);
njn25e49d8e72002-09-23 09:36:25 +00001053
1054 switch (u_in->opcode) {
sewardj7a5ebcf2002-11-13 22:42:13 +00001055 case NOP: case LOCK: case CALLM_E: case CALLM_S:
njn25e49d8e72002-09-23 09:36:25 +00001056 break;
1057
1058 /* For memory-ref instrs, copy the data_addr into a temporary to be
1059 * passed to the cachesim_* helper at the end of the instruction.
1060 */
1061 case LOAD:
1062 t_addr = u_in->val1;
1063 goto do_LOAD_or_STORE;
1064 case STORE: t_addr = u_in->val2;
1065 goto do_LOAD_or_STORE;
1066 do_LOAD_or_STORE:
1067 uInstr1(cb, CCALL, 0, TempReg, t_addr);
1068 switch (u_in->size) {
njn5c004e42002-11-18 11:04:50 +00001069 case 4: uCCall(cb, (Addr) & ac_helperc_ACCESS4, 1, 1, False );
njn25e49d8e72002-09-23 09:36:25 +00001070 break;
njn5c004e42002-11-18 11:04:50 +00001071 case 2: uCCall(cb, (Addr) & ac_helperc_ACCESS2, 1, 1, False );
njn25e49d8e72002-09-23 09:36:25 +00001072 break;
njn5c004e42002-11-18 11:04:50 +00001073 case 1: uCCall(cb, (Addr) & ac_helperc_ACCESS1, 1, 1, False );
njn25e49d8e72002-09-23 09:36:25 +00001074 break;
1075 default:
njne427a662002-10-02 11:08:25 +00001076 VG_(skin_panic)("addrcheck::SK_(instrument):LOAD/STORE");
njn25e49d8e72002-09-23 09:36:25 +00001077 }
njn4ba5a792002-09-30 10:23:54 +00001078 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00001079 break;
1080
1081 case FPU_R:
1082 case FPU_W:
1083 t_addr = u_in->val2;
1084 t_size = newTemp(cb);
1085 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
1086 uLiteral(cb, u_in->size);
1087 uInstr2(cb, CCALL, 0, TempReg, t_addr, TempReg, t_size);
njn5c004e42002-11-18 11:04:50 +00001088 uCCall(cb, (Addr) & ac_fpu_ACCESS_check, 2, 2, False );
njn4ba5a792002-09-30 10:23:54 +00001089 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00001090 break;
1091
1092 default:
njn4ba5a792002-09-30 10:23:54 +00001093 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00001094 break;
1095 }
1096 }
1097
njn4ba5a792002-09-30 10:23:54 +00001098 VG_(free_UCodeBlock)(cb_in);
njn25e49d8e72002-09-23 09:36:25 +00001099 return cb;
1100}
1101
1102
njn25e49d8e72002-09-23 09:36:25 +00001103/*------------------------------------------------------------*/
1104/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1105/*------------------------------------------------------------*/
1106
sewardja4495682002-10-21 07:29:59 +00001107/* For the memory leak detector, say whether an entire 64k chunk of
1108 address space is possibly in use, or not. If in doubt return
1109 True.
njn25e49d8e72002-09-23 09:36:25 +00001110*/
sewardja4495682002-10-21 07:29:59 +00001111static
1112Bool ac_is_valid_64k_chunk ( UInt chunk_number )
njn25e49d8e72002-09-23 09:36:25 +00001113{
sewardja4495682002-10-21 07:29:59 +00001114 sk_assert(chunk_number >= 0 && chunk_number < 65536);
1115 if (IS_DISTINGUISHED_SM(primary_map[chunk_number])) {
1116 /* Definitely not in use. */
1117 return False;
1118 } else {
1119 return True;
njn25e49d8e72002-09-23 09:36:25 +00001120 }
1121}
1122
1123
sewardja4495682002-10-21 07:29:59 +00001124/* For the memory leak detector, say whether or not a given word
1125 address is to be regarded as valid. */
1126static
1127Bool ac_is_valid_address ( Addr a )
1128{
1129 UChar abits;
1130 sk_assert(IS_ALIGNED4_ADDR(a));
1131 abits = get_abits4_ALIGNED(a);
1132 if (abits == VGM_NIBBLE_VALID) {
1133 return True;
1134 } else {
1135 return False;
1136 }
1137}
1138
1139
1140/* Leak detector for this skin. We don't actually do anything, merely
1141 run the generic leak detector with suitable parameters for this
1142 skin. */
njn5c004e42002-11-18 11:04:50 +00001143static void ac_detect_memory_leaks ( void )
njn25e49d8e72002-09-23 09:36:25 +00001144{
sewardja4495682002-10-21 07:29:59 +00001145 VG_(generic_detect_memory_leaks) (
1146 ac_is_valid_64k_chunk,
1147 ac_is_valid_address,
njn5c004e42002-11-18 11:04:50 +00001148 MC_(get_where),
1149 MC_(clo_leak_resolution),
sewardj99aac972002-12-26 01:53:45 +00001150 MC_(clo_show_reachable),
1151 (UInt)LeakSupp
sewardja4495682002-10-21 07:29:59 +00001152 );
njn25e49d8e72002-09-23 09:36:25 +00001153}
1154
1155
1156/* ---------------------------------------------------------------------
1157 Sanity check machinery (permanently engaged).
1158 ------------------------------------------------------------------ */
1159
1160/* Check that nobody has spuriously claimed that the first or last 16
1161 pages (64 KB) of address space have become accessible. Failure of
1162 the following do not per se indicate an internal consistency
1163 problem, but they are so likely to that we really want to know
1164 about it if so. */
1165
1166Bool SK_(cheap_sanity_check) ( void )
1167{
1168 if (IS_DISTINGUISHED_SM(primary_map[0]) &&
1169 IS_DISTINGUISHED_SM(primary_map[65535]))
1170 return True;
1171 else
1172 return False;
1173}
1174
1175Bool SK_(expensive_sanity_check) ( void )
1176{
1177 Int i;
1178
1179 /* Make sure nobody changed the distinguished secondary. */
1180 for (i = 0; i < 8192; i++)
1181 if (distinguished_secondary_map.abits[i] != VGM_BYTE_INVALID)
1182 return False;
1183
1184 /* Make sure that the upper 3/4 of the primary map hasn't
1185 been messed with. */
1186 for (i = 65536; i < 262144; i++)
1187 if (primary_map[i] != & distinguished_secondary_map)
1188 return False;
1189
1190 return True;
1191}
1192
sewardjd8033d92002-12-08 22:16:58 +00001193/*
1194 Client requests
1195 */
1196Bool SK_(handle_client_request) ( ThreadState* tst, UInt* arg_block, UInt *ret )
1197{
sewardjbf310d92002-12-28 13:09:57 +00001198#define IGNORE(what) \
1199 do { \
1200 if (moans-- > 0) { \
1201 VG_(message)(Vg_UserMsg, \
1202 "Warning: Addrcheck: ignoring `%s' request.", what); \
1203 VG_(message)(Vg_UserMsg, \
1204 " To honour this request, rerun with --skin=memcheck."); \
1205 } \
1206 } while (0)
1207
sewardjd8033d92002-12-08 22:16:58 +00001208 UInt* arg = arg_block;
sewardjbf310d92002-12-28 13:09:57 +00001209 static Int moans = 3;
sewardjd8033d92002-12-08 22:16:58 +00001210
1211 /* Overload memcheck client reqs */
1212 if (!VG_IS_SKIN_USERREQ('M','C',arg[0]))
1213 return False;
1214
1215 switch (arg[0]) {
1216 case VG_USERREQ__DO_LEAK_CHECK:
1217 ac_detect_memory_leaks();
1218 *ret = 0; /* return value is meaningless */
1219 break;
1220
sewardjbf310d92002-12-28 13:09:57 +00001221 /* Ignore these */
sewardjd8033d92002-12-08 22:16:58 +00001222 case VG_USERREQ__CHECK_WRITABLE: /* check writable */
sewardjbf310d92002-12-28 13:09:57 +00001223 IGNORE("VALGRIND_CHECK_WRITABLE");
1224 return False;
sewardjd8033d92002-12-08 22:16:58 +00001225 case VG_USERREQ__CHECK_READABLE: /* check readable */
sewardjbf310d92002-12-28 13:09:57 +00001226 IGNORE("VALGRIND_CHECK_READABLE");
1227 return False;
sewardjd8033d92002-12-08 22:16:58 +00001228 case VG_USERREQ__MAKE_NOACCESS: /* make no access */
sewardjbf310d92002-12-28 13:09:57 +00001229 IGNORE("VALGRIND_MAKE_NOACCESS");
1230 return False;
sewardjd8033d92002-12-08 22:16:58 +00001231 case VG_USERREQ__MAKE_WRITABLE: /* make writable */
sewardjbf310d92002-12-28 13:09:57 +00001232 IGNORE("VALGRIND_MAKE_WRITABLE");
1233 return False;
sewardjd8033d92002-12-08 22:16:58 +00001234 case VG_USERREQ__MAKE_READABLE: /* make readable */
sewardjbf310d92002-12-28 13:09:57 +00001235 IGNORE("VALGRIND_MAKE_READABLE");
1236 return False;
sewardjd8033d92002-12-08 22:16:58 +00001237 case VG_USERREQ__DISCARD: /* discard */
sewardjbf310d92002-12-28 13:09:57 +00001238 IGNORE("VALGRIND_CHECK_DISCARD");
1239 return False;
sewardjd8033d92002-12-08 22:16:58 +00001240
1241 default:
1242 VG_(message)(Vg_UserMsg,
sewardja81709d2002-12-28 12:55:48 +00001243 "Warning: unknown addrcheck client request code %d",
sewardjd8033d92002-12-08 22:16:58 +00001244 arg[0]);
1245 return False;
1246 }
1247 return True;
sewardjbf310d92002-12-28 13:09:57 +00001248
1249#undef IGNORE
sewardjd8033d92002-12-08 22:16:58 +00001250}
1251
njn25e49d8e72002-09-23 09:36:25 +00001252/*------------------------------------------------------------*/
1253/*--- Setup ---*/
1254/*------------------------------------------------------------*/
1255
njn25e49d8e72002-09-23 09:36:25 +00001256Bool SK_(process_cmd_line_option)(Char* arg)
1257{
njn5c004e42002-11-18 11:04:50 +00001258 return MC_(process_common_cmd_line_option)(arg);
njn25e49d8e72002-09-23 09:36:25 +00001259}
1260
1261Char* SK_(usage)(void)
1262{
1263 return
1264" --partial-loads-ok=no|yes too hard to explain here; see manual [yes]\n"
1265" --freelist-vol=<number> volume of freed blocks queue [1000000]\n"
1266" --leak-check=no|yes search for memory leaks at exit? [no]\n"
1267" --leak-resolution=low|med|high\n"
1268" amount of bt merging in leak check [low]\n"
1269" --show-reachable=no|yes show reachable blocks in leak check? [no]\n"
1270" --workaround-gcc296-bugs=no|yes self explanatory [no]\n"
njn25e49d8e72002-09-23 09:36:25 +00001271"\n"
1272" --cleanup=no|yes improve after instrumentation? [yes]\n";
1273}
1274
1275
1276/*------------------------------------------------------------*/
1277/*--- Setup ---*/
1278/*------------------------------------------------------------*/
1279
njn810086f2002-11-14 12:42:47 +00001280void SK_(pre_clo_init)(void)
njn25e49d8e72002-09-23 09:36:25 +00001281{
njn810086f2002-11-14 12:42:47 +00001282 VG_(details_name) ("Addrcheck");
1283 VG_(details_version) (NULL);
1284 VG_(details_description) ("a fine-grained address checker");
1285 VG_(details_copyright_author)(
1286 "Copyright (C) 2002, and GNU GPL'd, by Julian Seward.");
1287 VG_(details_bug_reports_to) ("jseward@acm.org");
sewardj78210aa2002-12-01 02:55:46 +00001288 VG_(details_avg_translation_sizeB) ( 135 );
njn25e49d8e72002-09-23 09:36:25 +00001289
njn810086f2002-11-14 12:42:47 +00001290 VG_(needs_core_errors) ();
1291 VG_(needs_skin_errors) ();
1292 VG_(needs_libc_freeres) ();
1293 VG_(needs_sizeof_shadow_block) ( 1 );
1294 VG_(needs_command_line_options)();
1295 VG_(needs_client_requests) ();
1296 VG_(needs_syscall_wrapper) ();
1297 VG_(needs_alternative_free) ();
1298 VG_(needs_sanity_checks) ();
njn25e49d8e72002-09-23 09:36:25 +00001299
njn5c004e42002-11-18 11:04:50 +00001300 VG_(track_new_mem_startup) ( & ac_new_mem_startup );
1301 VG_(track_new_mem_heap) ( & ac_new_mem_heap );
1302 VG_(track_new_mem_stack) ( & ac_make_accessible );
1303 VG_(track_new_mem_stack_aligned)( & ac_make_writable_aligned );
1304 VG_(track_new_mem_stack_signal) ( & ac_make_accessible );
1305 VG_(track_new_mem_brk) ( & ac_make_accessible );
1306 VG_(track_new_mem_mmap) ( & ac_set_perms );
njn25e49d8e72002-09-23 09:36:25 +00001307
njn5c004e42002-11-18 11:04:50 +00001308 VG_(track_copy_mem_heap) ( & ac_copy_address_range_state );
1309 VG_(track_copy_mem_remap) ( & ac_copy_address_range_state );
1310 VG_(track_change_mem_mprotect) ( & ac_set_perms );
njn25e49d8e72002-09-23 09:36:25 +00001311
njn5c004e42002-11-18 11:04:50 +00001312 VG_(track_ban_mem_heap) ( & ac_make_noaccess );
1313 VG_(track_ban_mem_stack) ( & ac_make_noaccess );
njn25e49d8e72002-09-23 09:36:25 +00001314
njn5c004e42002-11-18 11:04:50 +00001315 VG_(track_die_mem_heap) ( & ac_make_noaccess );
1316 VG_(track_die_mem_stack) ( & ac_make_noaccess );
1317 VG_(track_die_mem_stack_aligned)( & ac_make_noaccess_aligned );
1318 VG_(track_die_mem_stack_signal) ( & ac_make_noaccess );
1319 VG_(track_die_mem_brk) ( & ac_make_noaccess );
1320 VG_(track_die_mem_munmap) ( & ac_make_noaccess );
njn25e49d8e72002-09-23 09:36:25 +00001321
njn5c004e42002-11-18 11:04:50 +00001322 VG_(track_bad_free) ( & MC_(record_free_error) );
1323 VG_(track_mismatched_free) ( & MC_(record_freemismatch_error) );
njn25e49d8e72002-09-23 09:36:25 +00001324
njn5c004e42002-11-18 11:04:50 +00001325 VG_(track_pre_mem_read) ( & ac_check_is_readable );
1326 VG_(track_pre_mem_read_asciiz) ( & ac_check_is_readable_asciiz );
1327 VG_(track_pre_mem_write) ( & ac_check_is_writable );
1328 VG_(track_post_mem_write) ( & ac_make_accessible );
njn25e49d8e72002-09-23 09:36:25 +00001329
njn5c004e42002-11-18 11:04:50 +00001330 VG_(register_compact_helper)((Addr) & ac_helperc_ACCESS4);
1331 VG_(register_compact_helper)((Addr) & ac_helperc_ACCESS2);
1332 VG_(register_compact_helper)((Addr) & ac_helperc_ACCESS1);
1333 VG_(register_compact_helper)((Addr) & ac_fpu_ACCESS_check);
njn25e49d8e72002-09-23 09:36:25 +00001334
1335 VGP_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
1336 VGP_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
njnd04b7c62002-10-03 14:05:52 +00001337
1338 init_shadow_memory();
njn5c004e42002-11-18 11:04:50 +00001339 MC_(init_prof_mem)();
1340}
1341
1342void SK_(post_clo_init) ( void )
1343{
1344}
1345
1346void SK_(fini) ( void )
1347{
1348 VG_(print_malloc_stats)();
1349
1350 if (VG_(clo_verbosity) == 1) {
1351 if (!MC_(clo_leak_check))
1352 VG_(message)(Vg_UserMsg,
1353 "For a detailed leak analysis, rerun with: --leak-check=yes");
1354
1355 VG_(message)(Vg_UserMsg,
1356 "For counts of detected errors, rerun with: -v");
1357 }
1358 if (MC_(clo_leak_check)) ac_detect_memory_leaks();
1359
1360 MC_(done_prof_mem)();
njn25e49d8e72002-09-23 09:36:25 +00001361}
1362
1363/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00001364/*--- end ac_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00001365/*--------------------------------------------------------------------*/