blob: c5e2b51ebaa4d613e447d5aa72f5b30a7a4922f2 [file] [log] [blame]
njn25e49d8e72002-09-23 09:36:25 +00001
2/*--------------------------------------------------------------------*/
nethercote137bc552003-11-14 17:47:54 +00003/*--- The AddrCheck tool: like MemCheck, but only does address ---*/
njn25e49d8e72002-09-23 09:36:25 +00004/*--- checking. No definedness checking. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- ac_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
nethercote137bc552003-11-14 17:47:54 +00009 This file is part of AddrCheck, a lightweight Valgrind tool for
njnc9539842002-10-02 13:26:35 +000010 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
nethercotebb1c9912004-01-04 16:43:23 +000012 Copyright (C) 2000-2004 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
njn43c799e2003-04-08 00:08:52 +000033#include "mac_shared.h"
sewardjd8033d92002-12-08 22:16:58 +000034#include "memcheck.h"
njn25e49d8e72002-09-23 09:36:25 +000035//#include "vg_profile.c"
36
njn9b007f62003-04-07 14:40:25 +000037
njn25e49d8e72002-09-23 09:36:25 +000038/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000039/*--- Comparing and printing errors ---*/
40/*------------------------------------------------------------*/
41
njn43c799e2003-04-08 00:08:52 +000042void SK_(pp_SkinError) ( Error* err )
njn25e49d8e72002-09-23 09:36:25 +000043{
njn43c799e2003-04-08 00:08:52 +000044 MAC_Error* err_extra = VG_(get_error_extra)(err);
njn25e49d8e72002-09-23 09:36:25 +000045
njn810086f2002-11-14 12:42:47 +000046 switch (VG_(get_error_kind)(err)) {
njn25e49d8e72002-09-23 09:36:25 +000047 case CoreMemErr:
njn43c799e2003-04-08 00:08:52 +000048 VG_(message)(Vg_UserMsg, "%s contains unaddressable byte(s)",
49 VG_(get_error_string)(err));
50 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
njn25e49d8e72002-09-23 09:36:25 +000051 break;
52
njn25e49d8e72002-09-23 09:36:25 +000053 case ParamErr:
njn43c799e2003-04-08 00:08:52 +000054 VG_(message)(Vg_UserMsg,
55 "Syscall param %s contains unaddressable byte(s)",
56 VG_(get_error_string)(err) );
57 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
58 MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
njn25e49d8e72002-09-23 09:36:25 +000059 break;
60
61 case UserErr:
njn43c799e2003-04-08 00:08:52 +000062 VG_(message)(Vg_UserMsg,
63 "Unaddressable byte(s) found during client check request");
64 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
65 MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
njn25e49d8e72002-09-23 09:36:25 +000066 break;
67
68 default:
njn43c799e2003-04-08 00:08:52 +000069 MAC_(pp_shared_SkinError)(err);
70 break;
njn25e49d8e72002-09-23 09:36:25 +000071 }
72}
73
74/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000075/*--- Suppressions ---*/
76/*------------------------------------------------------------*/
77
njn810086f2002-11-14 12:42:47 +000078Bool SK_(recognised_suppression) ( Char* name, Supp* su )
njn25e49d8e72002-09-23 09:36:25 +000079{
njn43c799e2003-04-08 00:08:52 +000080 return MAC_(shared_recognised_suppression)(name, su);
njn25e49d8e72002-09-23 09:36:25 +000081}
82
njn5c004e42002-11-18 11:04:50 +000083#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
84
njn25e49d8e72002-09-23 09:36:25 +000085/*------------------------------------------------------------*/
86/*--- Low-level support for memory checking. ---*/
87/*------------------------------------------------------------*/
88
89/* All reads and writes are checked against a memory map, which
90 records the state of all memory in the process. The memory map is
91 organised like this:
92
93 The top 16 bits of an address are used to index into a top-level
94 map table, containing 65536 entries. Each entry is a pointer to a
95 second-level map, which records the accesibililty and validity
96 permissions for the 65536 bytes indexed by the lower 16 bits of the
97 address. Each byte is represented by one bit, indicating
98 accessibility. So each second-level map contains 8192 bytes. This
99 two-level arrangement conveniently divides the 4G address space
100 into 64k lumps, each size 64k bytes.
101
102 All entries in the primary (top-level) map must point to a valid
103 secondary (second-level) map. Since most of the 4G of address
104 space will not be in use -- ie, not mapped at all -- there is a
105 distinguished secondary map, which indicates `not addressible and
106 not valid' writeable for all bytes. Entries in the primary map for
107 which the entire 64k is not in use at all point at this
108 distinguished map.
109
110 [...] lots of stuff deleted due to out of date-ness
111
112 As a final optimisation, the alignment and address checks for
113 4-byte loads and stores are combined in a neat way. The primary
114 map is extended to have 262144 entries (2^18), rather than 2^16.
115 The top 3/4 of these entries are permanently set to the
116 distinguished secondary map. For a 4-byte load/store, the
117 top-level map is indexed not with (addr >> 16) but instead f(addr),
118 where
119
120 f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
121 = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
122 = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
123
124 ie the lowest two bits are placed above the 16 high address bits.
125 If either of these two bits are nonzero, the address is misaligned;
126 this will select a secondary map from the upper 3/4 of the primary
127 map. Because this is always the distinguished secondary map, a
128 (bogus) address check failure will result. The failure handling
129 code can then figure out whether this is a genuine addr check
130 failure or whether it is a possibly-legitimate access at a
131 misaligned address. */
132
133
134/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000135/*--- Function declarations. ---*/
136/*------------------------------------------------------------*/
137
njnc2699f62003-09-05 23:29:33 +0000138static void ac_ACCESS4_SLOWLY ( Addr a, Bool isWrite );
139static void ac_ACCESS2_SLOWLY ( Addr a, Bool isWrite );
140static void ac_ACCESS1_SLOWLY ( Addr a, Bool isWrite );
141static void ac_fpu_ACCESS_check_SLOWLY ( Addr addr, Int size, Bool isWrite );
njn25e49d8e72002-09-23 09:36:25 +0000142
143/*------------------------------------------------------------*/
144/*--- Data defns. ---*/
145/*------------------------------------------------------------*/
146
147typedef
148 struct {
149 UChar abits[8192];
150 }
151 AcSecMap;
152
153static AcSecMap* primary_map[ /*65536*/ 262144 ];
154static AcSecMap distinguished_secondary_map;
155
njn25e49d8e72002-09-23 09:36:25 +0000156static void init_shadow_memory ( void )
157{
158 Int i;
159
160 for (i = 0; i < 8192; i++) /* Invalid address */
161 distinguished_secondary_map.abits[i] = VGM_BYTE_INVALID;
162
163 /* These entries gradually get overwritten as the used address
164 space expands. */
165 for (i = 0; i < 65536; i++)
166 primary_map[i] = &distinguished_secondary_map;
167
168 /* These ones should never change; it's a bug in Valgrind if they do. */
169 for (i = 65536; i < 262144; i++)
170 primary_map[i] = &distinguished_secondary_map;
171}
172
njn25e49d8e72002-09-23 09:36:25 +0000173/*------------------------------------------------------------*/
174/*--- Basic bitmap management, reading and writing. ---*/
175/*------------------------------------------------------------*/
176
177/* Allocate and initialise a secondary map. */
178
179static AcSecMap* alloc_secondary_map ( __attribute__ ((unused))
180 Char* caller )
181{
182 AcSecMap* map;
183 UInt i;
184 PROF_EVENT(10);
185
186 /* Mark all bytes as invalid access and invalid value. */
fitzhardinge98abfc72003-12-16 02:05:15 +0000187 map = (AcSecMap *)VG_(shadow_alloc)(sizeof(AcSecMap));
njn25e49d8e72002-09-23 09:36:25 +0000188 for (i = 0; i < 8192; i++)
189 map->abits[i] = VGM_BYTE_INVALID; /* Invalid address */
190
191 /* VG_(printf)("ALLOC_2MAP(%s)\n", caller ); */
192 return map;
193}
194
195
196/* Basic reading/writing of the bitmaps, for byte-sized accesses. */
197
198static __inline__ UChar get_abit ( Addr a )
199{
200 AcSecMap* sm = primary_map[a >> 16];
201 UInt sm_off = a & 0xFFFF;
202 PROF_EVENT(20);
203# if 0
204 if (IS_DISTINGUISHED_SM(sm))
205 VG_(message)(Vg_DebugMsg,
206 "accessed distinguished 2ndary (A)map! 0x%x\n", a);
207# endif
208 return BITARR_TEST(sm->abits, sm_off)
209 ? VGM_BIT_INVALID : VGM_BIT_VALID;
210}
211
sewardj56867352003-10-12 10:27:06 +0000212static /* __inline__ */ void set_abit ( Addr a, UChar abit )
njn25e49d8e72002-09-23 09:36:25 +0000213{
214 AcSecMap* sm;
215 UInt sm_off;
216 PROF_EVENT(22);
217 ENSURE_MAPPABLE(a, "set_abit");
218 sm = primary_map[a >> 16];
219 sm_off = a & 0xFFFF;
220 if (abit)
221 BITARR_SET(sm->abits, sm_off);
222 else
223 BITARR_CLEAR(sm->abits, sm_off);
224}
225
226
227/* Reading/writing of the bitmaps, for aligned word-sized accesses. */
228
229static __inline__ UChar get_abits4_ALIGNED ( Addr a )
230{
231 AcSecMap* sm;
232 UInt sm_off;
233 UChar abits8;
234 PROF_EVENT(24);
235# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000236 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000237# endif
238 sm = primary_map[a >> 16];
239 sm_off = a & 0xFFFF;
240 abits8 = sm->abits[sm_off >> 3];
241 abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
242 abits8 &= 0x0F;
243 return abits8;
244}
245
246
247
248/*------------------------------------------------------------*/
249/*--- Setting permissions over address ranges. ---*/
250/*------------------------------------------------------------*/
251
sewardj56867352003-10-12 10:27:06 +0000252static /* __inline__ */
sewardj5de6ee02002-12-14 23:11:35 +0000253void set_address_range_perms ( Addr a, UInt len,
254 UInt example_a_bit )
njn25e49d8e72002-09-23 09:36:25 +0000255{
256 UChar abyte8;
257 UInt sm_off;
258 AcSecMap* sm;
259
260 PROF_EVENT(30);
261
262 if (len == 0)
263 return;
264
265 if (len > 100 * 1000 * 1000) {
266 VG_(message)(Vg_UserMsg,
267 "Warning: set address range perms: "
268 "large range %u, a %d",
269 len, example_a_bit );
270 }
271
272 VGP_PUSHCC(VgpSetMem);
273
274 /* Requests to change permissions of huge address ranges may
275 indicate bugs in our machinery. 30,000,000 is arbitrary, but so
276 far all legitimate requests have fallen beneath that size. */
277 /* 4 Mar 02: this is just stupid; get rid of it. */
njne427a662002-10-02 11:08:25 +0000278 /* sk_assert(len < 30000000); */
njn25e49d8e72002-09-23 09:36:25 +0000279
280 /* Check the permissions make sense. */
njne427a662002-10-02 11:08:25 +0000281 sk_assert(example_a_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000282 || example_a_bit == VGM_BIT_INVALID);
283
284 /* In order that we can charge through the address space at 8
285 bytes/main-loop iteration, make up some perms. */
286 abyte8 = (example_a_bit << 7)
287 | (example_a_bit << 6)
288 | (example_a_bit << 5)
289 | (example_a_bit << 4)
290 | (example_a_bit << 3)
291 | (example_a_bit << 2)
292 | (example_a_bit << 1)
293 | (example_a_bit << 0);
294
295# ifdef VG_DEBUG_MEMORY
296 /* Do it ... */
297 while (True) {
298 PROF_EVENT(31);
299 if (len == 0) break;
300 set_abit ( a, example_a_bit );
301 set_vbyte ( a, vbyte );
302 a++;
303 len--;
304 }
305
306# else
307 /* Slowly do parts preceding 8-byte alignment. */
308 while (True) {
309 PROF_EVENT(31);
310 if (len == 0) break;
311 if ((a % 8) == 0) break;
312 set_abit ( a, example_a_bit );
313 a++;
314 len--;
315 }
316
317 if (len == 0) {
318 VGP_POPCC(VgpSetMem);
319 return;
320 }
njne427a662002-10-02 11:08:25 +0000321 sk_assert((a % 8) == 0 && len > 0);
njn25e49d8e72002-09-23 09:36:25 +0000322
323 /* Once aligned, go fast. */
324 while (True) {
325 PROF_EVENT(32);
326 if (len < 8) break;
327 ENSURE_MAPPABLE(a, "set_address_range_perms(fast)");
328 sm = primary_map[a >> 16];
329 sm_off = a & 0xFFFF;
330 sm->abits[sm_off >> 3] = abyte8;
331 a += 8;
332 len -= 8;
333 }
334
335 if (len == 0) {
336 VGP_POPCC(VgpSetMem);
337 return;
338 }
njne427a662002-10-02 11:08:25 +0000339 sk_assert((a % 8) == 0 && len > 0 && len < 8);
njn25e49d8e72002-09-23 09:36:25 +0000340
341 /* Finish the upper fragment. */
342 while (True) {
343 PROF_EVENT(33);
344 if (len == 0) break;
345 set_abit ( a, example_a_bit );
346 a++;
347 len--;
348 }
349# endif
350
351 /* Check that zero page and highest page have not been written to
352 -- this could happen with buggy syscall wrappers. Today
353 (2001-04-26) had precisely such a problem with __NR_setitimer. */
njne427a662002-10-02 11:08:25 +0000354 sk_assert(SK_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +0000355 VGP_POPCC(VgpSetMem);
356}
357
358/* Set permissions for address ranges ... */
359
njn5c004e42002-11-18 11:04:50 +0000360static void ac_make_noaccess ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000361{
362 PROF_EVENT(35);
njn5c004e42002-11-18 11:04:50 +0000363 DEBUG("ac_make_noaccess(%p, %x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000364 set_address_range_perms ( a, len, VGM_BIT_INVALID );
365}
366
njn5c004e42002-11-18 11:04:50 +0000367static void ac_make_accessible ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000368{
njn5c004e42002-11-18 11:04:50 +0000369 PROF_EVENT(38);
370 DEBUG("ac_make_accessible(%p, %x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000371 set_address_range_perms ( a, len, VGM_BIT_VALID );
372}
373
njn9b007f62003-04-07 14:40:25 +0000374static __inline__
375void make_aligned_word_noaccess(Addr a)
376{
377 AcSecMap* sm;
378 UInt sm_off;
379 UChar mask;
380
381 VGP_PUSHCC(VgpESPAdj);
382 ENSURE_MAPPABLE(a, "make_aligned_word_noaccess");
383 sm = primary_map[a >> 16];
384 sm_off = a & 0xFFFF;
385 mask = 0x0F;
386 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
387 /* mask now contains 1s where we wish to make address bits invalid (1s). */
388 sm->abits[sm_off >> 3] |= mask;
389 VGP_POPCC(VgpESPAdj);
390}
391
392static __inline__
393void make_aligned_word_accessible(Addr a)
394{
395 AcSecMap* sm;
396 UInt sm_off;
397 UChar mask;
398
399 VGP_PUSHCC(VgpESPAdj);
400 ENSURE_MAPPABLE(a, "make_aligned_word_accessible");
401 sm = primary_map[a >> 16];
402 sm_off = a & 0xFFFF;
403 mask = 0x0F;
404 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
405 /* mask now contains 1s where we wish to make address bits
406 invalid (0s). */
407 sm->abits[sm_off >> 3] &= ~mask;
408 VGP_POPCC(VgpESPAdj);
409}
410
411/* Nb: by "aligned" here we mean 8-byte aligned */
412static __inline__
413void make_aligned_doubleword_accessible(Addr a)
414{
415 AcSecMap* sm;
416 UInt sm_off;
417
418 VGP_PUSHCC(VgpESPAdj);
419 ENSURE_MAPPABLE(a, "make_aligned_doubleword_accessible");
420 sm = primary_map[a >> 16];
421 sm_off = a & 0xFFFF;
422 sm->abits[sm_off >> 3] = VGM_BYTE_VALID;
423 VGP_POPCC(VgpESPAdj);
424}
425
426static __inline__
427void make_aligned_doubleword_noaccess(Addr a)
428{
429 AcSecMap* sm;
430 UInt sm_off;
431
432 VGP_PUSHCC(VgpESPAdj);
433 ENSURE_MAPPABLE(a, "make_aligned_doubleword_noaccess");
434 sm = primary_map[a >> 16];
435 sm_off = a & 0xFFFF;
436 sm->abits[sm_off >> 3] = VGM_BYTE_INVALID;
437 VGP_POPCC(VgpESPAdj);
438}
439
440/* The %esp update handling functions */
441ESP_UPDATE_HANDLERS ( make_aligned_word_accessible,
442 make_aligned_word_noaccess,
443 make_aligned_doubleword_accessible,
444 make_aligned_doubleword_noaccess,
445 ac_make_accessible,
446 ac_make_noaccess
447 );
448
449
njn25e49d8e72002-09-23 09:36:25 +0000450/* Block-copy permissions (needed for implementing realloc()). */
451
njn5c004e42002-11-18 11:04:50 +0000452static void ac_copy_address_range_state ( Addr src, Addr dst, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000453{
454 UInt i;
455
njn5c004e42002-11-18 11:04:50 +0000456 DEBUG("ac_copy_address_range_state\n");
njn25e49d8e72002-09-23 09:36:25 +0000457
458 PROF_EVENT(40);
459 for (i = 0; i < len; i++) {
460 UChar abit = get_abit ( src+i );
461 PROF_EVENT(41);
462 set_abit ( dst+i, abit );
463 }
464}
465
466
467/* Check permissions for address range. If inadequate permissions
468 exist, *bad_addr is set to the offending address, so the caller can
469 know what it is. */
470
njn5c004e42002-11-18 11:04:50 +0000471static __inline__
472Bool ac_check_accessible ( Addr a, UInt len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000473{
474 UInt i;
475 UChar abit;
njn5c004e42002-11-18 11:04:50 +0000476 PROF_EVENT(48);
njn25e49d8e72002-09-23 09:36:25 +0000477 for (i = 0; i < len; i++) {
njn5c004e42002-11-18 11:04:50 +0000478 PROF_EVENT(49);
njn25e49d8e72002-09-23 09:36:25 +0000479 abit = get_abit(a);
480 if (abit == VGM_BIT_INVALID) {
481 if (bad_addr != NULL) *bad_addr = a;
482 return False;
483 }
484 a++;
485 }
486 return True;
487}
488
sewardjecf8e102003-07-12 12:11:39 +0000489/* The opposite; check that an address range is inaccessible. */
490static
491Bool ac_check_noaccess ( Addr a, UInt len, Addr* bad_addr )
492{
493 UInt i;
494 UChar abit;
495 PROF_EVENT(48);
496 for (i = 0; i < len; i++) {
497 PROF_EVENT(49);
498 abit = get_abit(a);
499 if (abit == VGM_BIT_VALID) {
500 if (bad_addr != NULL) *bad_addr = a;
501 return False;
502 }
503 a++;
504 }
505 return True;
506}
507
njn25e49d8e72002-09-23 09:36:25 +0000508/* Check a zero-terminated ascii string. Tricky -- don't want to
509 examine the actual bytes, to find the end, until we're sure it is
510 safe to do so. */
511
njn5c004e42002-11-18 11:04:50 +0000512static __inline__
513Bool ac_check_readable_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000514{
515 UChar abit;
516 PROF_EVENT(46);
njn5c004e42002-11-18 11:04:50 +0000517 DEBUG("ac_check_readable_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +0000518 while (True) {
519 PROF_EVENT(47);
520 abit = get_abit(a);
521 if (abit != VGM_BIT_VALID) {
522 if (bad_addr != NULL) *bad_addr = a;
523 return False;
524 }
525 /* Ok, a is safe to read. */
526 if (* ((UChar*)a) == 0) return True;
527 a++;
528 }
529}
530
531
532/*------------------------------------------------------------*/
533/*--- Memory event handlers ---*/
534/*------------------------------------------------------------*/
535
njn5c004e42002-11-18 11:04:50 +0000536static __inline__
njn72718642003-07-24 08:45:32 +0000537void ac_check_is_accessible ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +0000538 Char* s, Addr base, UInt size, Bool isWrite )
njn25e49d8e72002-09-23 09:36:25 +0000539{
540 Bool ok;
541 Addr bad_addr;
542
543 VGP_PUSHCC(VgpCheckMem);
544
njn5c004e42002-11-18 11:04:50 +0000545 ok = ac_check_accessible ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000546 if (!ok) {
547 switch (part) {
548 case Vg_CoreSysCall:
njn72718642003-07-24 08:45:32 +0000549 MAC_(record_param_error) ( tid, bad_addr, isWrite, s );
njn25e49d8e72002-09-23 09:36:25 +0000550 break;
551
njn25e49d8e72002-09-23 09:36:25 +0000552 case Vg_CoreSignal:
njn5c004e42002-11-18 11:04:50 +0000553 sk_assert(isWrite); /* Should only happen with isWrite case */
554 /* fall through */
njn25e49d8e72002-09-23 09:36:25 +0000555 case Vg_CorePThread:
njn72718642003-07-24 08:45:32 +0000556 MAC_(record_core_mem_error)( tid, isWrite, s );
njn25e49d8e72002-09-23 09:36:25 +0000557 break;
558
559 /* If we're being asked to jump to a silly address, record an error
560 message before potentially crashing the entire system. */
561 case Vg_CoreTranslate:
njn5c004e42002-11-18 11:04:50 +0000562 sk_assert(!isWrite); /* Should only happen with !isWrite case */
njn72718642003-07-24 08:45:32 +0000563 MAC_(record_jump_error)( tid, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000564 break;
565
566 default:
njn5c004e42002-11-18 11:04:50 +0000567 VG_(skin_panic)("ac_check_is_accessible: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000568 }
569 }
njn5c004e42002-11-18 11:04:50 +0000570
njn25e49d8e72002-09-23 09:36:25 +0000571 VGP_POPCC(VgpCheckMem);
572}
573
574static
njn72718642003-07-24 08:45:32 +0000575void ac_check_is_writable ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +0000576 Char* s, Addr base, UInt size )
577{
njn72718642003-07-24 08:45:32 +0000578 ac_check_is_accessible ( part, tid, s, base, size, /*isWrite*/True );
njn5c004e42002-11-18 11:04:50 +0000579}
580
581static
njn72718642003-07-24 08:45:32 +0000582void ac_check_is_readable ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +0000583 Char* s, Addr base, UInt size )
584{
njn72718642003-07-24 08:45:32 +0000585 ac_check_is_accessible ( part, tid, s, base, size, /*isWrite*/False );
njn5c004e42002-11-18 11:04:50 +0000586}
587
588static
njn72718642003-07-24 08:45:32 +0000589void ac_check_is_readable_asciiz ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +0000590 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +0000591{
592 Bool ok = True;
593 Addr bad_addr;
594 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
595
596 VGP_PUSHCC(VgpCheckMem);
597
njne427a662002-10-02 11:08:25 +0000598 sk_assert(part == Vg_CoreSysCall);
njn5c004e42002-11-18 11:04:50 +0000599 ok = ac_check_readable_asciiz ( (Addr)str, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000600 if (!ok) {
njn72718642003-07-24 08:45:32 +0000601 MAC_(record_param_error) ( tid, bad_addr, /*is_writable =*/False, s );
njn25e49d8e72002-09-23 09:36:25 +0000602 }
603
604 VGP_POPCC(VgpCheckMem);
605}
606
607static
njn5c004e42002-11-18 11:04:50 +0000608void ac_new_mem_startup( Addr a, UInt len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +0000609{
njn1f3a9092002-10-04 09:22:30 +0000610 /* Ignore the permissions, just make it readable. Seems to work... */
njn25e49d8e72002-09-23 09:36:25 +0000611 DEBUG("new_mem_startup(%p, %u, rr=%u, ww=%u, xx=%u)\n", a,len,rr,ww,xx);
njn5c004e42002-11-18 11:04:50 +0000612 ac_make_accessible(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000613}
614
615static
njn5c004e42002-11-18 11:04:50 +0000616void ac_new_mem_heap ( Addr a, UInt len, Bool is_inited )
njn25e49d8e72002-09-23 09:36:25 +0000617{
njn5c004e42002-11-18 11:04:50 +0000618 ac_make_accessible(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000619}
620
621static
njnda2e36d2003-09-30 13:33:24 +0000622void ac_set_perms (Addr a, UInt len, Bool rr, Bool ww, Bool xx)
njn25e49d8e72002-09-23 09:36:25 +0000623{
njn5c004e42002-11-18 11:04:50 +0000624 DEBUG("ac_set_perms(%p, %u, rr=%u ww=%u, xx=%u)\n",
sewardj40f8ebe2002-10-23 21:46:13 +0000625 a, len, rr, ww, xx);
njn25e49d8e72002-09-23 09:36:25 +0000626 if (rr || ww || xx) {
njn5c004e42002-11-18 11:04:50 +0000627 ac_make_accessible(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000628 } else {
njn5c004e42002-11-18 11:04:50 +0000629 ac_make_noaccess(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000630 }
631}
632
633
634/*------------------------------------------------------------*/
635/*--- Functions called directly from generated code. ---*/
636/*------------------------------------------------------------*/
637
638static __inline__ UInt rotateRight16 ( UInt x )
639{
640 /* Amazingly, gcc turns this into a single rotate insn. */
641 return (x >> 16) | (x << 16);
642}
643
njn25e49d8e72002-09-23 09:36:25 +0000644static __inline__ UInt shiftRight16 ( UInt x )
645{
646 return x >> 16;
647}
648
649
650/* Read/write 1/2/4 sized V bytes, and emit an address error if
651 needed. */
652
njn5c004e42002-11-18 11:04:50 +0000653/* ac_helperc_ACCESS{1,2,4} handle the common case fast.
njn25e49d8e72002-09-23 09:36:25 +0000654 Under all other circumstances, it defers to the relevant _SLOWLY
655 function, which can handle all situations.
656*/
njnc2699f62003-09-05 23:29:33 +0000657static __inline__ void ac_helperc_ACCESS4 ( Addr a, Bool isWrite )
njn25e49d8e72002-09-23 09:36:25 +0000658{
659# ifdef VG_DEBUG_MEMORY
njnc2699f62003-09-05 23:29:33 +0000660 return ac_ACCESS4_SLOWLY(a, isWrite);
njn25e49d8e72002-09-23 09:36:25 +0000661# else
662 UInt sec_no = rotateRight16(a) & 0x3FFFF;
njnda2e36d2003-09-30 13:33:24 +0000663 AcSecMap* sm = primary_map[sec_no];
njn25e49d8e72002-09-23 09:36:25 +0000664 UInt a_off = (a & 0xFFFF) >> 3;
665 UChar abits = sm->abits[a_off];
666 abits >>= (a & 4);
667 abits &= 15;
njn5c004e42002-11-18 11:04:50 +0000668 PROF_EVENT(66);
njn25e49d8e72002-09-23 09:36:25 +0000669 if (abits == VGM_NIBBLE_VALID) {
670 /* Handle common case quickly: a is suitably aligned, is mapped,
671 and is addressible. So just return. */
672 return;
673 } else {
674 /* Slow but general case. */
njnc2699f62003-09-05 23:29:33 +0000675 ac_ACCESS4_SLOWLY(a, isWrite);
njn25e49d8e72002-09-23 09:36:25 +0000676 }
677# endif
678}
679
njnc2699f62003-09-05 23:29:33 +0000680static __inline__ void ac_helperc_ACCESS2 ( Addr a, Bool isWrite )
njn25e49d8e72002-09-23 09:36:25 +0000681{
682# ifdef VG_DEBUG_MEMORY
njnc2699f62003-09-05 23:29:33 +0000683 return ac_ACCESS2_SLOWLY(a, isWrite);
njn25e49d8e72002-09-23 09:36:25 +0000684# else
685 UInt sec_no = rotateRight16(a) & 0x1FFFF;
686 AcSecMap* sm = primary_map[sec_no];
687 UInt a_off = (a & 0xFFFF) >> 3;
njn5c004e42002-11-18 11:04:50 +0000688 PROF_EVENT(67);
njn25e49d8e72002-09-23 09:36:25 +0000689 if (sm->abits[a_off] == VGM_BYTE_VALID) {
690 /* Handle common case quickly. */
691 return;
692 } else {
693 /* Slow but general case. */
njnc2699f62003-09-05 23:29:33 +0000694 ac_ACCESS2_SLOWLY(a, isWrite);
njn25e49d8e72002-09-23 09:36:25 +0000695 }
696# endif
697}
698
njnc2699f62003-09-05 23:29:33 +0000699static __inline__ void ac_helperc_ACCESS1 ( Addr a, Bool isWrite )
njn25e49d8e72002-09-23 09:36:25 +0000700{
701# ifdef VG_DEBUG_MEMORY
njnc2699f62003-09-05 23:29:33 +0000702 return ac_ACCESS1_SLOWLY(a, isWrite);
njn25e49d8e72002-09-23 09:36:25 +0000703# else
704 UInt sec_no = shiftRight16(a);
705 AcSecMap* sm = primary_map[sec_no];
706 UInt a_off = (a & 0xFFFF) >> 3;
njn5c004e42002-11-18 11:04:50 +0000707 PROF_EVENT(68);
njn25e49d8e72002-09-23 09:36:25 +0000708 if (sm->abits[a_off] == VGM_BYTE_VALID) {
709 /* Handle common case quickly. */
710 return;
711 } else {
712 /* Slow but general case. */
njnc2699f62003-09-05 23:29:33 +0000713 ac_ACCESS1_SLOWLY(a, isWrite);
njn25e49d8e72002-09-23 09:36:25 +0000714 }
715# endif
716}
717
nethercoteeec46302004-08-23 15:06:23 +0000718REGPARM(1)
njnc2699f62003-09-05 23:29:33 +0000719static void ac_helperc_LOAD4 ( Addr a )
720{
721 ac_helperc_ACCESS4 ( a, /*isWrite*/False );
722}
nethercoteeec46302004-08-23 15:06:23 +0000723REGPARM(1)
njnc2699f62003-09-05 23:29:33 +0000724static void ac_helperc_STORE4 ( Addr a )
725{
726 ac_helperc_ACCESS4 ( a, /*isWrite*/True );
727}
728
nethercoteeec46302004-08-23 15:06:23 +0000729REGPARM(1)
njnc2699f62003-09-05 23:29:33 +0000730static void ac_helperc_LOAD2 ( Addr a )
731{
732 ac_helperc_ACCESS2 ( a, /*isWrite*/False );
733}
nethercoteeec46302004-08-23 15:06:23 +0000734REGPARM(1)
njnc2699f62003-09-05 23:29:33 +0000735static void ac_helperc_STORE2 ( Addr a )
736{
737 ac_helperc_ACCESS2 ( a, /*isWrite*/True );
738}
739
nethercoteeec46302004-08-23 15:06:23 +0000740REGPARM(1)
njnc2699f62003-09-05 23:29:33 +0000741static void ac_helperc_LOAD1 ( Addr a )
742{
743 ac_helperc_ACCESS1 ( a, /*isWrite*/False );
744}
nethercoteeec46302004-08-23 15:06:23 +0000745REGPARM(1)
njnc2699f62003-09-05 23:29:33 +0000746static void ac_helperc_STORE1 ( Addr a )
747{
748 ac_helperc_ACCESS1 ( a, /*isWrite*/True );
749}
750
njn25e49d8e72002-09-23 09:36:25 +0000751
752/*------------------------------------------------------------*/
753/*--- Fallback functions to handle cases that the above ---*/
njnc2699f62003-09-05 23:29:33 +0000754/*--- ac_helperc_ACCESS{1,2,4} can't manage. ---*/
njn25e49d8e72002-09-23 09:36:25 +0000755/*------------------------------------------------------------*/
756
njnc2699f62003-09-05 23:29:33 +0000757static void ac_ACCESS4_SLOWLY ( Addr a, Bool isWrite )
njn25e49d8e72002-09-23 09:36:25 +0000758{
759 Bool a0ok, a1ok, a2ok, a3ok;
760
njn5c004e42002-11-18 11:04:50 +0000761 PROF_EVENT(76);
njn25e49d8e72002-09-23 09:36:25 +0000762
763 /* First establish independently the addressibility of the 4 bytes
764 involved. */
765 a0ok = get_abit(a+0) == VGM_BIT_VALID;
766 a1ok = get_abit(a+1) == VGM_BIT_VALID;
767 a2ok = get_abit(a+2) == VGM_BIT_VALID;
768 a3ok = get_abit(a+3) == VGM_BIT_VALID;
769
770 /* Now distinguish 3 cases */
771
772 /* Case 1: the address is completely valid, so:
773 - no addressing error
774 */
775 if (a0ok && a1ok && a2ok && a3ok) {
776 return;
777 }
778
779 /* Case 2: the address is completely invalid.
780 - emit addressing error
781 */
782 /* VG_(printf)("%p (%d %d %d %d)\n", a, a0ok, a1ok, a2ok, a3ok); */
njn43c799e2003-04-08 00:08:52 +0000783 if (!MAC_(clo_partial_loads_ok)
njn25e49d8e72002-09-23 09:36:25 +0000784 || ((a & 3) != 0)
785 || (!a0ok && !a1ok && !a2ok && !a3ok)) {
njnc2699f62003-09-05 23:29:33 +0000786 MAC_(record_address_error)( VG_(get_current_tid)(), a, 4, isWrite );
njn25e49d8e72002-09-23 09:36:25 +0000787 return;
788 }
789
790 /* Case 3: the address is partially valid.
791 - no addressing error
njn43c799e2003-04-08 00:08:52 +0000792 Case 3 is only allowed if MAC_(clo_partial_loads_ok) is True
njn25e49d8e72002-09-23 09:36:25 +0000793 (which is the default), and the address is 4-aligned.
794 If not, Case 2 will have applied.
795 */
njn43c799e2003-04-08 00:08:52 +0000796 sk_assert(MAC_(clo_partial_loads_ok));
njn25e49d8e72002-09-23 09:36:25 +0000797 {
798 return;
799 }
800}
801
njnc2699f62003-09-05 23:29:33 +0000802static void ac_ACCESS2_SLOWLY ( Addr a, Bool isWrite )
njn25e49d8e72002-09-23 09:36:25 +0000803{
804 /* Check the address for validity. */
805 Bool aerr = False;
njn5c004e42002-11-18 11:04:50 +0000806 PROF_EVENT(77);
njn25e49d8e72002-09-23 09:36:25 +0000807
808 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
809 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
810
811 /* If an address error has happened, report it. */
812 if (aerr) {
njnc2699f62003-09-05 23:29:33 +0000813 MAC_(record_address_error)( VG_(get_current_tid)(), a, 2, isWrite );
njn25e49d8e72002-09-23 09:36:25 +0000814 }
815}
816
njnc2699f62003-09-05 23:29:33 +0000817static void ac_ACCESS1_SLOWLY ( Addr a, Bool isWrite)
njn25e49d8e72002-09-23 09:36:25 +0000818{
819 /* Check the address for validity. */
820 Bool aerr = False;
njn5c004e42002-11-18 11:04:50 +0000821 PROF_EVENT(78);
njn25e49d8e72002-09-23 09:36:25 +0000822
823 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
824
825 /* If an address error has happened, report it. */
826 if (aerr) {
njnc2699f62003-09-05 23:29:33 +0000827 MAC_(record_address_error)( VG_(get_current_tid)(), a, 1, isWrite );
njn25e49d8e72002-09-23 09:36:25 +0000828 }
829}
830
831
832/* ---------------------------------------------------------------------
833 FPU load and store checks, called from generated code.
834 ------------------------------------------------------------------ */
835
sewardj56867352003-10-12 10:27:06 +0000836static
njnc2699f62003-09-05 23:29:33 +0000837void ac_fpu_ACCESS_check ( Addr addr, Int size, Bool isWrite )
njn25e49d8e72002-09-23 09:36:25 +0000838{
839 /* Ensure the read area is both addressible and valid (ie,
840 readable). If there's an address error, don't report a value
841 error too; but if there isn't an address error, check for a
842 value error.
843
844 Try to be reasonably fast on the common case; wimp out and defer
njn5c004e42002-11-18 11:04:50 +0000845 to ac_fpu_ACCESS_check_SLOWLY for everything else. */
njn25e49d8e72002-09-23 09:36:25 +0000846
847 AcSecMap* sm;
848 UInt sm_off, a_off;
849 Addr addr4;
850
njn5c004e42002-11-18 11:04:50 +0000851 PROF_EVENT(90);
njn25e49d8e72002-09-23 09:36:25 +0000852
853# ifdef VG_DEBUG_MEMORY
njnc2699f62003-09-05 23:29:33 +0000854 ac_fpu_ACCESS_check_SLOWLY ( addr, size, isWrite );
njn25e49d8e72002-09-23 09:36:25 +0000855# else
856
857 if (size == 4) {
858 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
njn5c004e42002-11-18 11:04:50 +0000859 PROF_EVENT(91);
njn25e49d8e72002-09-23 09:36:25 +0000860 /* Properly aligned. */
861 sm = primary_map[addr >> 16];
862 sm_off = addr & 0xFFFF;
863 a_off = sm_off >> 3;
864 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
865 /* Properly aligned and addressible. */
866 return;
867 slow4:
njnc2699f62003-09-05 23:29:33 +0000868 ac_fpu_ACCESS_check_SLOWLY ( addr, 4, isWrite );
njn25e49d8e72002-09-23 09:36:25 +0000869 return;
870 }
871
872 if (size == 8) {
873 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
njn5c004e42002-11-18 11:04:50 +0000874 PROF_EVENT(92);
njn25e49d8e72002-09-23 09:36:25 +0000875 /* Properly aligned. Do it in two halves. */
876 addr4 = addr + 4;
877 /* First half. */
878 sm = primary_map[addr >> 16];
879 sm_off = addr & 0xFFFF;
880 a_off = sm_off >> 3;
881 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
882 /* First half properly aligned and addressible. */
883 /* Second half. */
884 sm = primary_map[addr4 >> 16];
885 sm_off = addr4 & 0xFFFF;
886 a_off = sm_off >> 3;
887 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
888 /* Second half properly aligned and addressible. */
889 /* Both halves properly aligned and addressible. */
890 return;
891 slow8:
njnc2699f62003-09-05 23:29:33 +0000892 ac_fpu_ACCESS_check_SLOWLY ( addr, 8, isWrite );
njn25e49d8e72002-09-23 09:36:25 +0000893 return;
894 }
895
896 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
897 cases go quickly. */
898 if (size == 2) {
njn5c004e42002-11-18 11:04:50 +0000899 PROF_EVENT(93);
njnc2699f62003-09-05 23:29:33 +0000900 ac_fpu_ACCESS_check_SLOWLY ( addr, 2, isWrite );
njn25e49d8e72002-09-23 09:36:25 +0000901 return;
902 }
903
jsewardfca60182004-01-04 23:30:55 +0000904 if (size == 16 || size == 10 || size == 28 || size == 108 || size == 512) {
njn5c004e42002-11-18 11:04:50 +0000905 PROF_EVENT(94);
njnc2699f62003-09-05 23:29:33 +0000906 ac_fpu_ACCESS_check_SLOWLY ( addr, size, isWrite );
njn25e49d8e72002-09-23 09:36:25 +0000907 return;
908 }
909
910 VG_(printf)("size is %d\n", size);
njne427a662002-10-02 11:08:25 +0000911 VG_(skin_panic)("fpu_ACCESS_check: unhandled size");
njn25e49d8e72002-09-23 09:36:25 +0000912# endif
913}
914
nethercoteeec46302004-08-23 15:06:23 +0000915REGPARM(2)
njnc2699f62003-09-05 23:29:33 +0000916static void ac_fpu_READ_check ( Addr addr, Int size )
917{
918 ac_fpu_ACCESS_check ( addr, size, /*isWrite*/False );
919}
920
nethercoteeec46302004-08-23 15:06:23 +0000921REGPARM(2)
njnc2699f62003-09-05 23:29:33 +0000922static void ac_fpu_WRITE_check ( Addr addr, Int size )
923{
924 ac_fpu_ACCESS_check ( addr, size, /*isWrite*/True );
925}
njn25e49d8e72002-09-23 09:36:25 +0000926
927/* ---------------------------------------------------------------------
928 Slow, general cases for FPU access checks.
929 ------------------------------------------------------------------ */
930
njnc2699f62003-09-05 23:29:33 +0000931void ac_fpu_ACCESS_check_SLOWLY ( Addr addr, Int size, Bool isWrite )
njn25e49d8e72002-09-23 09:36:25 +0000932{
933 Int i;
934 Bool aerr = False;
njn5c004e42002-11-18 11:04:50 +0000935 PROF_EVENT(100);
njn25e49d8e72002-09-23 09:36:25 +0000936 for (i = 0; i < size; i++) {
njn5c004e42002-11-18 11:04:50 +0000937 PROF_EVENT(101);
njn25e49d8e72002-09-23 09:36:25 +0000938 if (get_abit(addr+i) != VGM_BIT_VALID)
939 aerr = True;
940 }
941
942 if (aerr) {
njnc2699f62003-09-05 23:29:33 +0000943 MAC_(record_address_error)( VG_(get_current_tid)(), addr, size, isWrite );
njn25e49d8e72002-09-23 09:36:25 +0000944 }
945}
946
947
948/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000949/*--- Our instrumenter ---*/
950/*------------------------------------------------------------*/
951
njn25e49d8e72002-09-23 09:36:25 +0000952UCodeBlock* SK_(instrument)(UCodeBlock* cb_in, Addr orig_addr)
953{
954/* Use this rather than eg. -1 because it's a UInt. */
955#define INVALID_DATA_SIZE 999999
956
957 UCodeBlock* cb;
958 Int i;
959 UInstr* u_in;
960 Int t_addr, t_size;
njnc2699f62003-09-05 23:29:33 +0000961 Addr helper;
njn25e49d8e72002-09-23 09:36:25 +0000962
njn810086f2002-11-14 12:42:47 +0000963 cb = VG_(setup_UCodeBlock)(cb_in);
njn25e49d8e72002-09-23 09:36:25 +0000964
njn810086f2002-11-14 12:42:47 +0000965 for (i = 0; i < VG_(get_num_instrs)(cb_in); i++) {
njn25e49d8e72002-09-23 09:36:25 +0000966
967 t_addr = t_size = INVALID_TEMPREG;
njn810086f2002-11-14 12:42:47 +0000968 u_in = VG_(get_instr)(cb_in, i);
njn25e49d8e72002-09-23 09:36:25 +0000969
970 switch (u_in->opcode) {
sewardj7a5ebcf2002-11-13 22:42:13 +0000971 case NOP: case LOCK: case CALLM_E: case CALLM_S:
njn25e49d8e72002-09-23 09:36:25 +0000972 break;
973
sewardj77d30a22003-10-19 08:18:52 +0000974 /* For memory-ref instrs, copy the data_addr into a temporary
975 * to be passed to the helper at the end of the instruction.
njn25e49d8e72002-09-23 09:36:25 +0000976 */
njnc2699f62003-09-05 23:29:33 +0000977 case LOAD:
njn25e49d8e72002-09-23 09:36:25 +0000978 switch (u_in->size) {
njnc2699f62003-09-05 23:29:33 +0000979 case 4: helper = (Addr)ac_helperc_LOAD4; break;
980 case 2: helper = (Addr)ac_helperc_LOAD2; break;
981 case 1: helper = (Addr)ac_helperc_LOAD1; break;
sewardj77d30a22003-10-19 08:18:52 +0000982 default: VG_(skin_panic)
983 ("addrcheck::SK_(instrument):LOAD");
njn25e49d8e72002-09-23 09:36:25 +0000984 }
njnc2699f62003-09-05 23:29:33 +0000985 uInstr1(cb, CCALL, 0, TempReg, u_in->val1);
986 uCCall (cb, helper, 1, 1, False );
987 VG_(copy_UInstr)(cb, u_in);
988 break;
989
990 case STORE:
991 switch (u_in->size) {
992 case 4: helper = (Addr)ac_helperc_STORE4; break;
993 case 2: helper = (Addr)ac_helperc_STORE2; break;
994 case 1: helper = (Addr)ac_helperc_STORE1; break;
sewardj77d30a22003-10-19 08:18:52 +0000995 default: VG_(skin_panic)
996 ("addrcheck::SK_(instrument):STORE");
njnc2699f62003-09-05 23:29:33 +0000997 }
998 uInstr1(cb, CCALL, 0, TempReg, u_in->val2);
999 uCCall (cb, helper, 1, 1, False );
njn4ba5a792002-09-30 10:23:54 +00001000 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00001001 break;
1002
sewardje3891fa2003-06-15 03:13:48 +00001003 case SSE3ag_MemRd_RegWr:
1004 sk_assert(u_in->size == 4 || u_in->size == 8);
njnc2699f62003-09-05 23:29:33 +00001005 helper = (Addr)ac_fpu_READ_check;
sewardje3891fa2003-06-15 03:13:48 +00001006 goto do_Access_ARG1;
1007 do_Access_ARG1:
1008 sk_assert(u_in->tag1 == TempReg);
1009 t_addr = u_in->val1;
1010 t_size = newTemp(cb);
1011 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
1012 uLiteral(cb, u_in->size);
1013 uInstr2(cb, CCALL, 0, TempReg, t_addr, TempReg, t_size);
njnc2699f62003-09-05 23:29:33 +00001014 uCCall(cb, helper, 2, 2, False );
sewardje3891fa2003-06-15 03:13:48 +00001015 VG_(copy_UInstr)(cb, u_in);
1016 break;
1017
sewardj3d7c9c82003-03-26 21:08:13 +00001018 case MMX2_MemRd:
njnc2699f62003-09-05 23:29:33 +00001019 sk_assert(u_in->size == 4 || u_in->size == 8);
1020 helper = (Addr)ac_fpu_READ_check;
1021 goto do_Access_ARG2;
sewardj3d7c9c82003-03-26 21:08:13 +00001022 case MMX2_MemWr:
sewardjd7971012003-04-04 00:21:58 +00001023 sk_assert(u_in->size == 4 || u_in->size == 8);
njnc2699f62003-09-05 23:29:33 +00001024 helper = (Addr)ac_fpu_WRITE_check;
sewardj1863abc2003-06-14 16:01:32 +00001025 goto do_Access_ARG2;
1026 case FPU_R:
njnc2699f62003-09-05 23:29:33 +00001027 helper = (Addr)ac_fpu_READ_check;
1028 goto do_Access_ARG2;
sewardj1863abc2003-06-14 16:01:32 +00001029 case FPU_W:
njnc2699f62003-09-05 23:29:33 +00001030 helper = (Addr)ac_fpu_WRITE_check;
sewardj1863abc2003-06-14 16:01:32 +00001031 goto do_Access_ARG2;
1032 do_Access_ARG2:
1033 sk_assert(u_in->tag2 == TempReg);
sewardj3d7c9c82003-03-26 21:08:13 +00001034 t_addr = u_in->val2;
1035 t_size = newTemp(cb);
1036 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
sewardjd7971012003-04-04 00:21:58 +00001037 uLiteral(cb, u_in->size);
sewardj3d7c9c82003-03-26 21:08:13 +00001038 uInstr2(cb, CCALL, 0, TempReg, t_addr, TempReg, t_size);
njnc2699f62003-09-05 23:29:33 +00001039 uCCall(cb, helper, 2, 2, False );
sewardj3d7c9c82003-03-26 21:08:13 +00001040 VG_(copy_UInstr)(cb, u_in);
1041 break;
1042
nethercote42d9cd12004-04-20 10:07:44 +00001043 case MMX2a1_MemRd:
sewardj77d30a22003-10-19 08:18:52 +00001044 case SSE3a_MemRd:
sewardj1863abc2003-06-14 16:01:32 +00001045 case SSE2a_MemRd:
nethercoteb1affa82004-01-19 19:14:18 +00001046 case SSE3a1_MemRd:
1047 case SSE2a1_MemRd:
njnc2699f62003-09-05 23:29:33 +00001048 helper = (Addr)ac_fpu_READ_check;
1049 goto do_Access_ARG3;
sewardj1863abc2003-06-14 16:01:32 +00001050 case SSE2a_MemWr:
1051 case SSE3a_MemWr:
njnc2699f62003-09-05 23:29:33 +00001052 helper = (Addr)ac_fpu_WRITE_check;
sewardj1863abc2003-06-14 16:01:32 +00001053 goto do_Access_ARG3;
1054 do_Access_ARG3:
jsewardfca60182004-01-04 23:30:55 +00001055 sk_assert(u_in->size == 4 || u_in->size == 8
1056 || u_in->size == 16 || u_in->size == 512);
sewardj1863abc2003-06-14 16:01:32 +00001057 sk_assert(u_in->tag3 == TempReg);
1058 t_addr = u_in->val3;
1059 t_size = newTemp(cb);
1060 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
1061 uLiteral(cb, u_in->size);
1062 uInstr2(cb, CCALL, 0, TempReg, t_addr, TempReg, t_size);
njnc2699f62003-09-05 23:29:33 +00001063 uCCall(cb, helper, 2, 2, False );
sewardj1863abc2003-06-14 16:01:32 +00001064 VG_(copy_UInstr)(cb, u_in);
1065 break;
1066
sewardj095c3bc2003-06-15 23:26:04 +00001067 case SSE3e1_RegRd:
sewardjabf8bf82003-06-15 22:28:05 +00001068 case SSE3e_RegWr:
sewardje3891fa2003-06-15 03:13:48 +00001069 case SSE3g1_RegWr:
sewardj6bc40552003-06-15 01:40:58 +00001070 case SSE5:
sewardj1863abc2003-06-14 16:01:32 +00001071 case SSE3g_RegWr:
sewardj4fbe6e92003-06-15 21:54:34 +00001072 case SSE3e_RegRd:
sewardj1863abc2003-06-14 16:01:32 +00001073 case SSE4:
sewardj77d30a22003-10-19 08:18:52 +00001074 case SSE3:
njn25e49d8e72002-09-23 09:36:25 +00001075 default:
njn4ba5a792002-09-30 10:23:54 +00001076 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00001077 break;
1078 }
1079 }
1080
njn4ba5a792002-09-30 10:23:54 +00001081 VG_(free_UCodeBlock)(cb_in);
njn25e49d8e72002-09-23 09:36:25 +00001082 return cb;
1083}
1084
1085
njn25e49d8e72002-09-23 09:36:25 +00001086/*------------------------------------------------------------*/
1087/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1088/*------------------------------------------------------------*/
1089
sewardja4495682002-10-21 07:29:59 +00001090/* For the memory leak detector, say whether an entire 64k chunk of
1091 address space is possibly in use, or not. If in doubt return
1092 True.
njn25e49d8e72002-09-23 09:36:25 +00001093*/
sewardja4495682002-10-21 07:29:59 +00001094static
1095Bool ac_is_valid_64k_chunk ( UInt chunk_number )
njn25e49d8e72002-09-23 09:36:25 +00001096{
sewardja4495682002-10-21 07:29:59 +00001097 sk_assert(chunk_number >= 0 && chunk_number < 65536);
1098 if (IS_DISTINGUISHED_SM(primary_map[chunk_number])) {
1099 /* Definitely not in use. */
1100 return False;
1101 } else {
1102 return True;
njn25e49d8e72002-09-23 09:36:25 +00001103 }
1104}
1105
1106
sewardja4495682002-10-21 07:29:59 +00001107/* For the memory leak detector, say whether or not a given word
1108 address is to be regarded as valid. */
1109static
1110Bool ac_is_valid_address ( Addr a )
1111{
1112 UChar abits;
1113 sk_assert(IS_ALIGNED4_ADDR(a));
1114 abits = get_abits4_ALIGNED(a);
1115 if (abits == VGM_NIBBLE_VALID) {
1116 return True;
1117 } else {
1118 return False;
1119 }
1120}
1121
1122
nethercote996901a2004-08-03 13:29:09 +00001123/* Leak detector for this tool. We don't actually do anything, merely
sewardja4495682002-10-21 07:29:59 +00001124 run the generic leak detector with suitable parameters for this
nethercote996901a2004-08-03 13:29:09 +00001125 tool. */
njn5c004e42002-11-18 11:04:50 +00001126static void ac_detect_memory_leaks ( void )
njn25e49d8e72002-09-23 09:36:25 +00001127{
njn43c799e2003-04-08 00:08:52 +00001128 MAC_(do_detect_memory_leaks) ( ac_is_valid_64k_chunk, ac_is_valid_address );
njn25e49d8e72002-09-23 09:36:25 +00001129}
1130
1131
1132/* ---------------------------------------------------------------------
1133 Sanity check machinery (permanently engaged).
1134 ------------------------------------------------------------------ */
1135
njn25e49d8e72002-09-23 09:36:25 +00001136Bool SK_(cheap_sanity_check) ( void )
1137{
jseward9800fd32004-01-04 23:08:04 +00001138 /* nothing useful we can rapidly check */
1139 return True;
njn25e49d8e72002-09-23 09:36:25 +00001140}
1141
1142Bool SK_(expensive_sanity_check) ( void )
1143{
1144 Int i;
1145
1146 /* Make sure nobody changed the distinguished secondary. */
1147 for (i = 0; i < 8192; i++)
1148 if (distinguished_secondary_map.abits[i] != VGM_BYTE_INVALID)
1149 return False;
1150
1151 /* Make sure that the upper 3/4 of the primary map hasn't
1152 been messed with. */
1153 for (i = 65536; i < 262144; i++)
1154 if (primary_map[i] != & distinguished_secondary_map)
1155 return False;
1156
1157 return True;
1158}
1159
njn47363ab2003-04-21 13:24:40 +00001160/*------------------------------------------------------------*/
1161/*--- Client requests ---*/
1162/*------------------------------------------------------------*/
1163
njn72718642003-07-24 08:45:32 +00001164Bool SK_(handle_client_request) ( ThreadId tid, UInt* arg_block, UInt *ret )
sewardjd8033d92002-12-08 22:16:58 +00001165{
sewardjbf310d92002-12-28 13:09:57 +00001166#define IGNORE(what) \
1167 do { \
1168 if (moans-- > 0) { \
1169 VG_(message)(Vg_UserMsg, \
1170 "Warning: Addrcheck: ignoring `%s' request.", what); \
1171 VG_(message)(Vg_UserMsg, \
nethercote137bc552003-11-14 17:47:54 +00001172 " To honour this request, rerun with --tool=memcheck."); \
sewardjbf310d92002-12-28 13:09:57 +00001173 } \
1174 } while (0)
1175
sewardjd8033d92002-12-08 22:16:58 +00001176 UInt* arg = arg_block;
sewardjbf310d92002-12-28 13:09:57 +00001177 static Int moans = 3;
sewardjd8033d92002-12-08 22:16:58 +00001178
1179 /* Overload memcheck client reqs */
njnd7994182003-10-02 13:44:04 +00001180 if (!VG_IS_SKIN_USERREQ('M','C',arg[0])
1181 && VG_USERREQ__MALLOCLIKE_BLOCK != arg[0]
rjwalshbc0bb832004-06-19 18:12:36 +00001182 && VG_USERREQ__FREELIKE_BLOCK != arg[0]
1183 && VG_USERREQ__CREATE_MEMPOOL != arg[0]
1184 && VG_USERREQ__DESTROY_MEMPOOL != arg[0]
1185 && VG_USERREQ__MEMPOOL_ALLOC != arg[0]
1186 && VG_USERREQ__MEMPOOL_FREE != arg[0])
sewardjd8033d92002-12-08 22:16:58 +00001187 return False;
1188
1189 switch (arg[0]) {
1190 case VG_USERREQ__DO_LEAK_CHECK:
1191 ac_detect_memory_leaks();
1192 *ret = 0; /* return value is meaningless */
1193 break;
1194
sewardjbf310d92002-12-28 13:09:57 +00001195 /* Ignore these */
sewardjd8033d92002-12-08 22:16:58 +00001196 case VG_USERREQ__CHECK_WRITABLE: /* check writable */
sewardjbf310d92002-12-28 13:09:57 +00001197 IGNORE("VALGRIND_CHECK_WRITABLE");
1198 return False;
sewardjd8033d92002-12-08 22:16:58 +00001199 case VG_USERREQ__CHECK_READABLE: /* check readable */
sewardjbf310d92002-12-28 13:09:57 +00001200 IGNORE("VALGRIND_CHECK_READABLE");
1201 return False;
sewardjd8033d92002-12-08 22:16:58 +00001202 case VG_USERREQ__MAKE_NOACCESS: /* make no access */
sewardjbf310d92002-12-28 13:09:57 +00001203 IGNORE("VALGRIND_MAKE_NOACCESS");
1204 return False;
sewardjd8033d92002-12-08 22:16:58 +00001205 case VG_USERREQ__MAKE_WRITABLE: /* make writable */
sewardjbf310d92002-12-28 13:09:57 +00001206 IGNORE("VALGRIND_MAKE_WRITABLE");
1207 return False;
sewardjd8033d92002-12-08 22:16:58 +00001208 case VG_USERREQ__MAKE_READABLE: /* make readable */
sewardjbf310d92002-12-28 13:09:57 +00001209 IGNORE("VALGRIND_MAKE_READABLE");
1210 return False;
sewardjd8033d92002-12-08 22:16:58 +00001211 case VG_USERREQ__DISCARD: /* discard */
sewardjbf310d92002-12-28 13:09:57 +00001212 IGNORE("VALGRIND_CHECK_DISCARD");
1213 return False;
sewardjd8033d92002-12-08 22:16:58 +00001214
1215 default:
njn72718642003-07-24 08:45:32 +00001216 if (MAC_(handle_common_client_requests)(tid, arg_block, ret )) {
njn47363ab2003-04-21 13:24:40 +00001217 return True;
1218 } else {
1219 VG_(message)(Vg_UserMsg,
1220 "Warning: unknown addrcheck client request code %d",
1221 arg[0]);
1222 return False;
1223 }
sewardjd8033d92002-12-08 22:16:58 +00001224 }
1225 return True;
sewardjbf310d92002-12-28 13:09:57 +00001226
1227#undef IGNORE
sewardjd8033d92002-12-08 22:16:58 +00001228}
1229
njn25e49d8e72002-09-23 09:36:25 +00001230/*------------------------------------------------------------*/
1231/*--- Setup ---*/
1232/*------------------------------------------------------------*/
1233
njn25e49d8e72002-09-23 09:36:25 +00001234Bool SK_(process_cmd_line_option)(Char* arg)
1235{
njn43c799e2003-04-08 00:08:52 +00001236 return MAC_(process_common_cmd_line_option)(arg);
njn25e49d8e72002-09-23 09:36:25 +00001237}
1238
njn3e884182003-04-15 13:03:23 +00001239void SK_(print_usage)(void)
njn25e49d8e72002-09-23 09:36:25 +00001240{
njn3e884182003-04-15 13:03:23 +00001241 MAC_(print_common_usage)();
1242}
1243
1244void SK_(print_debug_usage)(void)
1245{
1246 MAC_(print_common_debug_usage)();
njn25e49d8e72002-09-23 09:36:25 +00001247}
1248
1249
1250/*------------------------------------------------------------*/
1251/*--- Setup ---*/
1252/*------------------------------------------------------------*/
1253
njn810086f2002-11-14 12:42:47 +00001254void SK_(pre_clo_init)(void)
njn25e49d8e72002-09-23 09:36:25 +00001255{
njn810086f2002-11-14 12:42:47 +00001256 VG_(details_name) ("Addrcheck");
1257 VG_(details_version) (NULL);
1258 VG_(details_description) ("a fine-grained address checker");
1259 VG_(details_copyright_author)(
nethercote08fa9a72004-07-16 17:44:00 +00001260 "Copyright (C) 2002-2004, and GNU GPL'd, by Julian Seward et al.");
nethercote421281e2003-11-20 16:20:55 +00001261 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj78210aa2002-12-01 02:55:46 +00001262 VG_(details_avg_translation_sizeB) ( 135 );
njn25e49d8e72002-09-23 09:36:25 +00001263
njn810086f2002-11-14 12:42:47 +00001264 VG_(needs_core_errors) ();
1265 VG_(needs_skin_errors) ();
1266 VG_(needs_libc_freeres) ();
njn810086f2002-11-14 12:42:47 +00001267 VG_(needs_command_line_options)();
1268 VG_(needs_client_requests) ();
1269 VG_(needs_syscall_wrapper) ();
njn810086f2002-11-14 12:42:47 +00001270 VG_(needs_sanity_checks) ();
fitzhardinge98abfc72003-12-16 02:05:15 +00001271 VG_(needs_shadow_memory) ();
njn25e49d8e72002-09-23 09:36:25 +00001272
njn3e884182003-04-15 13:03:23 +00001273 MAC_( new_mem_heap) = & ac_new_mem_heap;
1274 MAC_( ban_mem_heap) = & ac_make_noaccess;
1275 MAC_(copy_mem_heap) = & ac_copy_address_range_state;
1276 MAC_( die_mem_heap) = & ac_make_noaccess;
sewardjecf8e102003-07-12 12:11:39 +00001277 MAC_(check_noaccess) = & ac_check_noaccess;
njn3e884182003-04-15 13:03:23 +00001278
fitzhardinge98abfc72003-12-16 02:05:15 +00001279 VG_(init_new_mem_startup) ( & ac_new_mem_startup );
1280 VG_(init_new_mem_stack_signal) ( & ac_make_accessible );
1281 VG_(init_new_mem_brk) ( & ac_make_accessible );
1282 VG_(init_new_mem_mmap) ( & ac_set_perms );
njn25e49d8e72002-09-23 09:36:25 +00001283
fitzhardinge98abfc72003-12-16 02:05:15 +00001284 VG_(init_copy_mem_remap) ( & ac_copy_address_range_state );
1285 VG_(init_change_mem_mprotect) ( & ac_set_perms );
njn3e884182003-04-15 13:03:23 +00001286
fitzhardinge98abfc72003-12-16 02:05:15 +00001287 VG_(init_die_mem_stack_signal) ( & ac_make_noaccess );
1288 VG_(init_die_mem_brk) ( & ac_make_noaccess );
1289 VG_(init_die_mem_munmap) ( & ac_make_noaccess );
njn3e884182003-04-15 13:03:23 +00001290
fitzhardinge98abfc72003-12-16 02:05:15 +00001291 VG_(init_new_mem_stack_4) ( & MAC_(new_mem_stack_4) );
1292 VG_(init_new_mem_stack_8) ( & MAC_(new_mem_stack_8) );
1293 VG_(init_new_mem_stack_12) ( & MAC_(new_mem_stack_12) );
1294 VG_(init_new_mem_stack_16) ( & MAC_(new_mem_stack_16) );
1295 VG_(init_new_mem_stack_32) ( & MAC_(new_mem_stack_32) );
1296 VG_(init_new_mem_stack) ( & MAC_(new_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00001297
fitzhardinge98abfc72003-12-16 02:05:15 +00001298 VG_(init_die_mem_stack_4) ( & MAC_(die_mem_stack_4) );
1299 VG_(init_die_mem_stack_8) ( & MAC_(die_mem_stack_8) );
1300 VG_(init_die_mem_stack_12) ( & MAC_(die_mem_stack_12) );
1301 VG_(init_die_mem_stack_16) ( & MAC_(die_mem_stack_16) );
1302 VG_(init_die_mem_stack_32) ( & MAC_(die_mem_stack_32) );
1303 VG_(init_die_mem_stack) ( & MAC_(die_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00001304
fitzhardinge98abfc72003-12-16 02:05:15 +00001305 VG_(init_ban_mem_stack) ( & ac_make_noaccess );
njn25e49d8e72002-09-23 09:36:25 +00001306
fitzhardinge98abfc72003-12-16 02:05:15 +00001307 VG_(init_pre_mem_read) ( & ac_check_is_readable );
1308 VG_(init_pre_mem_read_asciiz) ( & ac_check_is_readable_asciiz );
1309 VG_(init_pre_mem_write) ( & ac_check_is_writable );
1310 VG_(init_post_mem_write) ( & ac_make_accessible );
njn25e49d8e72002-09-23 09:36:25 +00001311
njnc2699f62003-09-05 23:29:33 +00001312 VG_(register_compact_helper)((Addr) & ac_helperc_LOAD4);
1313 VG_(register_compact_helper)((Addr) & ac_helperc_LOAD2);
1314 VG_(register_compact_helper)((Addr) & ac_helperc_LOAD1);
1315 VG_(register_compact_helper)((Addr) & ac_helperc_STORE4);
1316 VG_(register_compact_helper)((Addr) & ac_helperc_STORE2);
1317 VG_(register_compact_helper)((Addr) & ac_helperc_STORE1);
1318 VG_(register_noncompact_helper)((Addr) & ac_fpu_READ_check);
1319 VG_(register_noncompact_helper)((Addr) & ac_fpu_WRITE_check);
njn25e49d8e72002-09-23 09:36:25 +00001320
1321 VGP_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
1322 VGP_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
njn9b007f62003-04-07 14:40:25 +00001323 VGP_(register_profile_event) ( VgpESPAdj, "adjust-ESP" );
njnd04b7c62002-10-03 14:05:52 +00001324
1325 init_shadow_memory();
njn3e884182003-04-15 13:03:23 +00001326 MAC_(common_pre_clo_init)();
njn5c004e42002-11-18 11:04:50 +00001327}
1328
1329void SK_(post_clo_init) ( void )
1330{
1331}
1332
njn7d9f94d2003-04-22 21:41:40 +00001333void SK_(fini) ( Int exitcode )
njn5c004e42002-11-18 11:04:50 +00001334{
njn3e884182003-04-15 13:03:23 +00001335 MAC_(common_fini)( ac_detect_memory_leaks );
njn25e49d8e72002-09-23 09:36:25 +00001336}
1337
fitzhardinge98abfc72003-12-16 02:05:15 +00001338VG_DETERMINE_INTERFACE_VERSION(SK_(pre_clo_init), 1./8)
1339
1340
njn25e49d8e72002-09-23 09:36:25 +00001341/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00001342/*--- end ac_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00001343/*--------------------------------------------------------------------*/