blob: 760e219d6452252e95c98f39ea9d6911b275b1bf [file] [log] [blame]
njn25e49d8e72002-09-23 09:36:25 +00001
2/*--------------------------------------------------------------------*/
3/*--- The AddrCheck skin: like MemCheck, but only does address ---*/
4/*--- checking. No definedness checking. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- ac_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
njnc9539842002-10-02 13:26:35 +00009 This file is part of AddrCheck, a lightweight Valgrind skin for
10 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
njn0e1b5142003-04-15 14:58:06 +000012 Copyright (C) 2000-2003 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
njn43c799e2003-04-08 00:08:52 +000033#include "mac_shared.h"
sewardjd8033d92002-12-08 22:16:58 +000034#include "memcheck.h"
njn25e49d8e72002-09-23 09:36:25 +000035//#include "vg_profile.c"
36
njn9b007f62003-04-07 14:40:25 +000037
njn27f1a382002-11-08 15:48:16 +000038VG_DETERMINE_INTERFACE_VERSION
39
njn25e49d8e72002-09-23 09:36:25 +000040/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000041/*--- Comparing and printing errors ---*/
42/*------------------------------------------------------------*/
43
njn43c799e2003-04-08 00:08:52 +000044void SK_(pp_SkinError) ( Error* err )
njn25e49d8e72002-09-23 09:36:25 +000045{
njn43c799e2003-04-08 00:08:52 +000046 MAC_Error* err_extra = VG_(get_error_extra)(err);
njn25e49d8e72002-09-23 09:36:25 +000047
njn810086f2002-11-14 12:42:47 +000048 switch (VG_(get_error_kind)(err)) {
njn25e49d8e72002-09-23 09:36:25 +000049 case CoreMemErr:
njn43c799e2003-04-08 00:08:52 +000050 VG_(message)(Vg_UserMsg, "%s contains unaddressable byte(s)",
51 VG_(get_error_string)(err));
52 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
njn25e49d8e72002-09-23 09:36:25 +000053 break;
54
55 case AddrErr:
56 switch (err_extra->axskind) {
57 case ReadAxs:
njnc2699f62003-09-05 23:29:33 +000058 VG_(message)(Vg_UserMsg, "Invalid read of size %d",
59 err_extra->size );
60 break;
njn25e49d8e72002-09-23 09:36:25 +000061 case WriteAxs:
njnc2699f62003-09-05 23:29:33 +000062 VG_(message)(Vg_UserMsg, "Invalid write of size %d",
njn25e49d8e72002-09-23 09:36:25 +000063 err_extra->size );
64 break;
65 case ExecAxs:
66 VG_(message)(Vg_UserMsg, "Jump to the invalid address "
67 "stated on the next line");
68 break;
69 default:
njn5c004e42002-11-18 11:04:50 +000070 VG_(skin_panic)("SK_(pp_SkinError)(axskind)");
njn25e49d8e72002-09-23 09:36:25 +000071 }
njn43c799e2003-04-08 00:08:52 +000072 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
73 MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
njn25e49d8e72002-09-23 09:36:25 +000074 break;
75
76 case ParamErr:
njn43c799e2003-04-08 00:08:52 +000077 VG_(message)(Vg_UserMsg,
78 "Syscall param %s contains unaddressable byte(s)",
79 VG_(get_error_string)(err) );
80 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
81 MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
njn25e49d8e72002-09-23 09:36:25 +000082 break;
83
84 case UserErr:
njn43c799e2003-04-08 00:08:52 +000085 VG_(message)(Vg_UserMsg,
86 "Unaddressable byte(s) found during client check request");
87 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
88 MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
njn25e49d8e72002-09-23 09:36:25 +000089 break;
90
91 default:
njn43c799e2003-04-08 00:08:52 +000092 MAC_(pp_shared_SkinError)(err);
93 break;
njn25e49d8e72002-09-23 09:36:25 +000094 }
95}
96
97/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000098/*--- Suppressions ---*/
99/*------------------------------------------------------------*/
100
njn810086f2002-11-14 12:42:47 +0000101Bool SK_(recognised_suppression) ( Char* name, Supp* su )
njn25e49d8e72002-09-23 09:36:25 +0000102{
njn43c799e2003-04-08 00:08:52 +0000103 return MAC_(shared_recognised_suppression)(name, su);
njn25e49d8e72002-09-23 09:36:25 +0000104}
105
njn5c004e42002-11-18 11:04:50 +0000106#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
107
njn25e49d8e72002-09-23 09:36:25 +0000108/*------------------------------------------------------------*/
109/*--- Low-level support for memory checking. ---*/
110/*------------------------------------------------------------*/
111
112/* All reads and writes are checked against a memory map, which
113 records the state of all memory in the process. The memory map is
114 organised like this:
115
116 The top 16 bits of an address are used to index into a top-level
117 map table, containing 65536 entries. Each entry is a pointer to a
118 second-level map, which records the accesibililty and validity
119 permissions for the 65536 bytes indexed by the lower 16 bits of the
120 address. Each byte is represented by one bit, indicating
121 accessibility. So each second-level map contains 8192 bytes. This
122 two-level arrangement conveniently divides the 4G address space
123 into 64k lumps, each size 64k bytes.
124
125 All entries in the primary (top-level) map must point to a valid
126 secondary (second-level) map. Since most of the 4G of address
127 space will not be in use -- ie, not mapped at all -- there is a
128 distinguished secondary map, which indicates `not addressible and
129 not valid' writeable for all bytes. Entries in the primary map for
130 which the entire 64k is not in use at all point at this
131 distinguished map.
132
133 [...] lots of stuff deleted due to out of date-ness
134
135 As a final optimisation, the alignment and address checks for
136 4-byte loads and stores are combined in a neat way. The primary
137 map is extended to have 262144 entries (2^18), rather than 2^16.
138 The top 3/4 of these entries are permanently set to the
139 distinguished secondary map. For a 4-byte load/store, the
140 top-level map is indexed not with (addr >> 16) but instead f(addr),
141 where
142
143 f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
144 = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
145 = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
146
147 ie the lowest two bits are placed above the 16 high address bits.
148 If either of these two bits are nonzero, the address is misaligned;
149 this will select a secondary map from the upper 3/4 of the primary
150 map. Because this is always the distinguished secondary map, a
151 (bogus) address check failure will result. The failure handling
152 code can then figure out whether this is a genuine addr check
153 failure or whether it is a possibly-legitimate access at a
154 misaligned address. */
155
156
157/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000158/*--- Function declarations. ---*/
159/*------------------------------------------------------------*/
160
njnc2699f62003-09-05 23:29:33 +0000161static void ac_ACCESS4_SLOWLY ( Addr a, Bool isWrite );
162static void ac_ACCESS2_SLOWLY ( Addr a, Bool isWrite );
163static void ac_ACCESS1_SLOWLY ( Addr a, Bool isWrite );
164static void ac_fpu_ACCESS_check_SLOWLY ( Addr addr, Int size, Bool isWrite );
njn25e49d8e72002-09-23 09:36:25 +0000165
166/*------------------------------------------------------------*/
167/*--- Data defns. ---*/
168/*------------------------------------------------------------*/
169
170typedef
171 struct {
172 UChar abits[8192];
173 }
174 AcSecMap;
175
176static AcSecMap* primary_map[ /*65536*/ 262144 ];
177static AcSecMap distinguished_secondary_map;
178
njn25e49d8e72002-09-23 09:36:25 +0000179static void init_shadow_memory ( void )
180{
181 Int i;
182
183 for (i = 0; i < 8192; i++) /* Invalid address */
184 distinguished_secondary_map.abits[i] = VGM_BYTE_INVALID;
185
186 /* These entries gradually get overwritten as the used address
187 space expands. */
188 for (i = 0; i < 65536; i++)
189 primary_map[i] = &distinguished_secondary_map;
190
191 /* These ones should never change; it's a bug in Valgrind if they do. */
192 for (i = 65536; i < 262144; i++)
193 primary_map[i] = &distinguished_secondary_map;
194}
195
njn25e49d8e72002-09-23 09:36:25 +0000196/*------------------------------------------------------------*/
197/*--- Basic bitmap management, reading and writing. ---*/
198/*------------------------------------------------------------*/
199
200/* Allocate and initialise a secondary map. */
201
202static AcSecMap* alloc_secondary_map ( __attribute__ ((unused))
203 Char* caller )
204{
205 AcSecMap* map;
206 UInt i;
207 PROF_EVENT(10);
208
209 /* Mark all bytes as invalid access and invalid value. */
210
211 /* It just happens that a AcSecMap occupies exactly 18 pages --
212 although this isn't important, so the following assert is
213 spurious. */
njne427a662002-10-02 11:08:25 +0000214 sk_assert(0 == (sizeof(AcSecMap) % VKI_BYTES_PER_PAGE));
njn25e49d8e72002-09-23 09:36:25 +0000215 map = VG_(get_memory_from_mmap)( sizeof(AcSecMap), caller );
216
217 for (i = 0; i < 8192; i++)
218 map->abits[i] = VGM_BYTE_INVALID; /* Invalid address */
219
220 /* VG_(printf)("ALLOC_2MAP(%s)\n", caller ); */
221 return map;
222}
223
224
225/* Basic reading/writing of the bitmaps, for byte-sized accesses. */
226
227static __inline__ UChar get_abit ( Addr a )
228{
229 AcSecMap* sm = primary_map[a >> 16];
230 UInt sm_off = a & 0xFFFF;
231 PROF_EVENT(20);
232# if 0
233 if (IS_DISTINGUISHED_SM(sm))
234 VG_(message)(Vg_DebugMsg,
235 "accessed distinguished 2ndary (A)map! 0x%x\n", a);
236# endif
237 return BITARR_TEST(sm->abits, sm_off)
238 ? VGM_BIT_INVALID : VGM_BIT_VALID;
239}
240
241static __inline__ void set_abit ( Addr a, UChar abit )
242{
243 AcSecMap* sm;
244 UInt sm_off;
245 PROF_EVENT(22);
246 ENSURE_MAPPABLE(a, "set_abit");
247 sm = primary_map[a >> 16];
248 sm_off = a & 0xFFFF;
249 if (abit)
250 BITARR_SET(sm->abits, sm_off);
251 else
252 BITARR_CLEAR(sm->abits, sm_off);
253}
254
255
256/* Reading/writing of the bitmaps, for aligned word-sized accesses. */
257
258static __inline__ UChar get_abits4_ALIGNED ( Addr a )
259{
260 AcSecMap* sm;
261 UInt sm_off;
262 UChar abits8;
263 PROF_EVENT(24);
264# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000265 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000266# endif
267 sm = primary_map[a >> 16];
268 sm_off = a & 0xFFFF;
269 abits8 = sm->abits[sm_off >> 3];
270 abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
271 abits8 &= 0x0F;
272 return abits8;
273}
274
275
276
277/*------------------------------------------------------------*/
278/*--- Setting permissions over address ranges. ---*/
279/*------------------------------------------------------------*/
280
sewardj5de6ee02002-12-14 23:11:35 +0000281static __inline__
282void set_address_range_perms ( Addr a, UInt len,
283 UInt example_a_bit )
njn25e49d8e72002-09-23 09:36:25 +0000284{
285 UChar abyte8;
286 UInt sm_off;
287 AcSecMap* sm;
288
289 PROF_EVENT(30);
290
291 if (len == 0)
292 return;
293
294 if (len > 100 * 1000 * 1000) {
295 VG_(message)(Vg_UserMsg,
296 "Warning: set address range perms: "
297 "large range %u, a %d",
298 len, example_a_bit );
299 }
300
301 VGP_PUSHCC(VgpSetMem);
302
303 /* Requests to change permissions of huge address ranges may
304 indicate bugs in our machinery. 30,000,000 is arbitrary, but so
305 far all legitimate requests have fallen beneath that size. */
306 /* 4 Mar 02: this is just stupid; get rid of it. */
njne427a662002-10-02 11:08:25 +0000307 /* sk_assert(len < 30000000); */
njn25e49d8e72002-09-23 09:36:25 +0000308
309 /* Check the permissions make sense. */
njne427a662002-10-02 11:08:25 +0000310 sk_assert(example_a_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000311 || example_a_bit == VGM_BIT_INVALID);
312
313 /* In order that we can charge through the address space at 8
314 bytes/main-loop iteration, make up some perms. */
315 abyte8 = (example_a_bit << 7)
316 | (example_a_bit << 6)
317 | (example_a_bit << 5)
318 | (example_a_bit << 4)
319 | (example_a_bit << 3)
320 | (example_a_bit << 2)
321 | (example_a_bit << 1)
322 | (example_a_bit << 0);
323
324# ifdef VG_DEBUG_MEMORY
325 /* Do it ... */
326 while (True) {
327 PROF_EVENT(31);
328 if (len == 0) break;
329 set_abit ( a, example_a_bit );
330 set_vbyte ( a, vbyte );
331 a++;
332 len--;
333 }
334
335# else
336 /* Slowly do parts preceding 8-byte alignment. */
337 while (True) {
338 PROF_EVENT(31);
339 if (len == 0) break;
340 if ((a % 8) == 0) break;
341 set_abit ( a, example_a_bit );
342 a++;
343 len--;
344 }
345
346 if (len == 0) {
347 VGP_POPCC(VgpSetMem);
348 return;
349 }
njne427a662002-10-02 11:08:25 +0000350 sk_assert((a % 8) == 0 && len > 0);
njn25e49d8e72002-09-23 09:36:25 +0000351
352 /* Once aligned, go fast. */
353 while (True) {
354 PROF_EVENT(32);
355 if (len < 8) break;
356 ENSURE_MAPPABLE(a, "set_address_range_perms(fast)");
357 sm = primary_map[a >> 16];
358 sm_off = a & 0xFFFF;
359 sm->abits[sm_off >> 3] = abyte8;
360 a += 8;
361 len -= 8;
362 }
363
364 if (len == 0) {
365 VGP_POPCC(VgpSetMem);
366 return;
367 }
njne427a662002-10-02 11:08:25 +0000368 sk_assert((a % 8) == 0 && len > 0 && len < 8);
njn25e49d8e72002-09-23 09:36:25 +0000369
370 /* Finish the upper fragment. */
371 while (True) {
372 PROF_EVENT(33);
373 if (len == 0) break;
374 set_abit ( a, example_a_bit );
375 a++;
376 len--;
377 }
378# endif
379
380 /* Check that zero page and highest page have not been written to
381 -- this could happen with buggy syscall wrappers. Today
382 (2001-04-26) had precisely such a problem with __NR_setitimer. */
njne427a662002-10-02 11:08:25 +0000383 sk_assert(SK_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +0000384 VGP_POPCC(VgpSetMem);
385}
386
387/* Set permissions for address ranges ... */
388
njn5c004e42002-11-18 11:04:50 +0000389static void ac_make_noaccess ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000390{
391 PROF_EVENT(35);
njn5c004e42002-11-18 11:04:50 +0000392 DEBUG("ac_make_noaccess(%p, %x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000393 set_address_range_perms ( a, len, VGM_BIT_INVALID );
394}
395
njn5c004e42002-11-18 11:04:50 +0000396static void ac_make_accessible ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000397{
njn5c004e42002-11-18 11:04:50 +0000398 PROF_EVENT(38);
399 DEBUG("ac_make_accessible(%p, %x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000400 set_address_range_perms ( a, len, VGM_BIT_VALID );
401}
402
njn9b007f62003-04-07 14:40:25 +0000403static __inline__
404void make_aligned_word_noaccess(Addr a)
405{
406 AcSecMap* sm;
407 UInt sm_off;
408 UChar mask;
409
410 VGP_PUSHCC(VgpESPAdj);
411 ENSURE_MAPPABLE(a, "make_aligned_word_noaccess");
412 sm = primary_map[a >> 16];
413 sm_off = a & 0xFFFF;
414 mask = 0x0F;
415 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
416 /* mask now contains 1s where we wish to make address bits invalid (1s). */
417 sm->abits[sm_off >> 3] |= mask;
418 VGP_POPCC(VgpESPAdj);
419}
420
421static __inline__
422void make_aligned_word_accessible(Addr a)
423{
424 AcSecMap* sm;
425 UInt sm_off;
426 UChar mask;
427
428 VGP_PUSHCC(VgpESPAdj);
429 ENSURE_MAPPABLE(a, "make_aligned_word_accessible");
430 sm = primary_map[a >> 16];
431 sm_off = a & 0xFFFF;
432 mask = 0x0F;
433 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
434 /* mask now contains 1s where we wish to make address bits
435 invalid (0s). */
436 sm->abits[sm_off >> 3] &= ~mask;
437 VGP_POPCC(VgpESPAdj);
438}
439
440/* Nb: by "aligned" here we mean 8-byte aligned */
441static __inline__
442void make_aligned_doubleword_accessible(Addr a)
443{
444 AcSecMap* sm;
445 UInt sm_off;
446
447 VGP_PUSHCC(VgpESPAdj);
448 ENSURE_MAPPABLE(a, "make_aligned_doubleword_accessible");
449 sm = primary_map[a >> 16];
450 sm_off = a & 0xFFFF;
451 sm->abits[sm_off >> 3] = VGM_BYTE_VALID;
452 VGP_POPCC(VgpESPAdj);
453}
454
455static __inline__
456void make_aligned_doubleword_noaccess(Addr a)
457{
458 AcSecMap* sm;
459 UInt sm_off;
460
461 VGP_PUSHCC(VgpESPAdj);
462 ENSURE_MAPPABLE(a, "make_aligned_doubleword_noaccess");
463 sm = primary_map[a >> 16];
464 sm_off = a & 0xFFFF;
465 sm->abits[sm_off >> 3] = VGM_BYTE_INVALID;
466 VGP_POPCC(VgpESPAdj);
467}
468
469/* The %esp update handling functions */
470ESP_UPDATE_HANDLERS ( make_aligned_word_accessible,
471 make_aligned_word_noaccess,
472 make_aligned_doubleword_accessible,
473 make_aligned_doubleword_noaccess,
474 ac_make_accessible,
475 ac_make_noaccess
476 );
477
478
njn25e49d8e72002-09-23 09:36:25 +0000479/* Block-copy permissions (needed for implementing realloc()). */
480
njn5c004e42002-11-18 11:04:50 +0000481static void ac_copy_address_range_state ( Addr src, Addr dst, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000482{
483 UInt i;
484
njn5c004e42002-11-18 11:04:50 +0000485 DEBUG("ac_copy_address_range_state\n");
njn25e49d8e72002-09-23 09:36:25 +0000486
487 PROF_EVENT(40);
488 for (i = 0; i < len; i++) {
489 UChar abit = get_abit ( src+i );
490 PROF_EVENT(41);
491 set_abit ( dst+i, abit );
492 }
493}
494
495
496/* Check permissions for address range. If inadequate permissions
497 exist, *bad_addr is set to the offending address, so the caller can
498 know what it is. */
499
njn5c004e42002-11-18 11:04:50 +0000500static __inline__
501Bool ac_check_accessible ( Addr a, UInt len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000502{
503 UInt i;
504 UChar abit;
njn5c004e42002-11-18 11:04:50 +0000505 PROF_EVENT(48);
njn25e49d8e72002-09-23 09:36:25 +0000506 for (i = 0; i < len; i++) {
njn5c004e42002-11-18 11:04:50 +0000507 PROF_EVENT(49);
njn25e49d8e72002-09-23 09:36:25 +0000508 abit = get_abit(a);
509 if (abit == VGM_BIT_INVALID) {
510 if (bad_addr != NULL) *bad_addr = a;
511 return False;
512 }
513 a++;
514 }
515 return True;
516}
517
sewardjecf8e102003-07-12 12:11:39 +0000518/* The opposite; check that an address range is inaccessible. */
519static
520Bool ac_check_noaccess ( Addr a, UInt len, Addr* bad_addr )
521{
522 UInt i;
523 UChar abit;
524 PROF_EVENT(48);
525 for (i = 0; i < len; i++) {
526 PROF_EVENT(49);
527 abit = get_abit(a);
528 if (abit == VGM_BIT_VALID) {
529 if (bad_addr != NULL) *bad_addr = a;
530 return False;
531 }
532 a++;
533 }
534 return True;
535}
536
njn25e49d8e72002-09-23 09:36:25 +0000537/* Check a zero-terminated ascii string. Tricky -- don't want to
538 examine the actual bytes, to find the end, until we're sure it is
539 safe to do so. */
540
njn5c004e42002-11-18 11:04:50 +0000541static __inline__
542Bool ac_check_readable_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000543{
544 UChar abit;
545 PROF_EVENT(46);
njn5c004e42002-11-18 11:04:50 +0000546 DEBUG("ac_check_readable_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +0000547 while (True) {
548 PROF_EVENT(47);
549 abit = get_abit(a);
550 if (abit != VGM_BIT_VALID) {
551 if (bad_addr != NULL) *bad_addr = a;
552 return False;
553 }
554 /* Ok, a is safe to read. */
555 if (* ((UChar*)a) == 0) return True;
556 a++;
557 }
558}
559
560
561/*------------------------------------------------------------*/
562/*--- Memory event handlers ---*/
563/*------------------------------------------------------------*/
564
njn5c004e42002-11-18 11:04:50 +0000565static __inline__
njn72718642003-07-24 08:45:32 +0000566void ac_check_is_accessible ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +0000567 Char* s, Addr base, UInt size, Bool isWrite )
njn25e49d8e72002-09-23 09:36:25 +0000568{
569 Bool ok;
570 Addr bad_addr;
571
572 VGP_PUSHCC(VgpCheckMem);
573
njn5c004e42002-11-18 11:04:50 +0000574 ok = ac_check_accessible ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000575 if (!ok) {
576 switch (part) {
577 case Vg_CoreSysCall:
njn72718642003-07-24 08:45:32 +0000578 MAC_(record_param_error) ( tid, bad_addr, isWrite, s );
njn25e49d8e72002-09-23 09:36:25 +0000579 break;
580
njn25e49d8e72002-09-23 09:36:25 +0000581 case Vg_CoreSignal:
njn5c004e42002-11-18 11:04:50 +0000582 sk_assert(isWrite); /* Should only happen with isWrite case */
583 /* fall through */
njn25e49d8e72002-09-23 09:36:25 +0000584 case Vg_CorePThread:
njn72718642003-07-24 08:45:32 +0000585 MAC_(record_core_mem_error)( tid, isWrite, s );
njn25e49d8e72002-09-23 09:36:25 +0000586 break;
587
588 /* If we're being asked to jump to a silly address, record an error
589 message before potentially crashing the entire system. */
590 case Vg_CoreTranslate:
njn5c004e42002-11-18 11:04:50 +0000591 sk_assert(!isWrite); /* Should only happen with !isWrite case */
njn72718642003-07-24 08:45:32 +0000592 MAC_(record_jump_error)( tid, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000593 break;
594
595 default:
njn5c004e42002-11-18 11:04:50 +0000596 VG_(skin_panic)("ac_check_is_accessible: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000597 }
598 }
njn5c004e42002-11-18 11:04:50 +0000599
njn25e49d8e72002-09-23 09:36:25 +0000600 VGP_POPCC(VgpCheckMem);
601}
602
603static
njn72718642003-07-24 08:45:32 +0000604void ac_check_is_writable ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +0000605 Char* s, Addr base, UInt size )
606{
njn72718642003-07-24 08:45:32 +0000607 ac_check_is_accessible ( part, tid, s, base, size, /*isWrite*/True );
njn5c004e42002-11-18 11:04:50 +0000608}
609
610static
njn72718642003-07-24 08:45:32 +0000611void ac_check_is_readable ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +0000612 Char* s, Addr base, UInt size )
613{
njn72718642003-07-24 08:45:32 +0000614 ac_check_is_accessible ( part, tid, s, base, size, /*isWrite*/False );
njn5c004e42002-11-18 11:04:50 +0000615}
616
617static
njn72718642003-07-24 08:45:32 +0000618void ac_check_is_readable_asciiz ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +0000619 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +0000620{
621 Bool ok = True;
622 Addr bad_addr;
623 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
624
625 VGP_PUSHCC(VgpCheckMem);
626
njne427a662002-10-02 11:08:25 +0000627 sk_assert(part == Vg_CoreSysCall);
njn5c004e42002-11-18 11:04:50 +0000628 ok = ac_check_readable_asciiz ( (Addr)str, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000629 if (!ok) {
njn72718642003-07-24 08:45:32 +0000630 MAC_(record_param_error) ( tid, bad_addr, /*is_writable =*/False, s );
njn25e49d8e72002-09-23 09:36:25 +0000631 }
632
633 VGP_POPCC(VgpCheckMem);
634}
635
636static
njn5c004e42002-11-18 11:04:50 +0000637void ac_new_mem_startup( Addr a, UInt len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +0000638{
njn1f3a9092002-10-04 09:22:30 +0000639 /* Ignore the permissions, just make it readable. Seems to work... */
njn25e49d8e72002-09-23 09:36:25 +0000640 DEBUG("new_mem_startup(%p, %u, rr=%u, ww=%u, xx=%u)\n", a,len,rr,ww,xx);
njn5c004e42002-11-18 11:04:50 +0000641 ac_make_accessible(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000642}
643
644static
njn5c004e42002-11-18 11:04:50 +0000645void ac_new_mem_heap ( Addr a, UInt len, Bool is_inited )
njn25e49d8e72002-09-23 09:36:25 +0000646{
njn5c004e42002-11-18 11:04:50 +0000647 ac_make_accessible(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000648}
649
650static
njn5c004e42002-11-18 11:04:50 +0000651void ac_set_perms (Addr a, UInt len,
sewardj40f8ebe2002-10-23 21:46:13 +0000652 Bool rr, Bool ww, Bool xx)
njn25e49d8e72002-09-23 09:36:25 +0000653{
njn5c004e42002-11-18 11:04:50 +0000654 DEBUG("ac_set_perms(%p, %u, rr=%u ww=%u, xx=%u)\n",
sewardj40f8ebe2002-10-23 21:46:13 +0000655 a, len, rr, ww, xx);
njn25e49d8e72002-09-23 09:36:25 +0000656 if (rr || ww || xx) {
njn5c004e42002-11-18 11:04:50 +0000657 ac_make_accessible(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000658 } else {
njn5c004e42002-11-18 11:04:50 +0000659 ac_make_noaccess(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000660 }
661}
662
663
664/*------------------------------------------------------------*/
665/*--- Functions called directly from generated code. ---*/
666/*------------------------------------------------------------*/
667
668static __inline__ UInt rotateRight16 ( UInt x )
669{
670 /* Amazingly, gcc turns this into a single rotate insn. */
671 return (x >> 16) | (x << 16);
672}
673
njn25e49d8e72002-09-23 09:36:25 +0000674static __inline__ UInt shiftRight16 ( UInt x )
675{
676 return x >> 16;
677}
678
679
680/* Read/write 1/2/4 sized V bytes, and emit an address error if
681 needed. */
682
njn5c004e42002-11-18 11:04:50 +0000683/* ac_helperc_ACCESS{1,2,4} handle the common case fast.
njn25e49d8e72002-09-23 09:36:25 +0000684 Under all other circumstances, it defers to the relevant _SLOWLY
685 function, which can handle all situations.
686*/
njnc2699f62003-09-05 23:29:33 +0000687static __inline__ void ac_helperc_ACCESS4 ( Addr a, Bool isWrite )
njn25e49d8e72002-09-23 09:36:25 +0000688{
689# ifdef VG_DEBUG_MEMORY
njnc2699f62003-09-05 23:29:33 +0000690 return ac_ACCESS4_SLOWLY(a, isWrite);
njn25e49d8e72002-09-23 09:36:25 +0000691# else
692 UInt sec_no = rotateRight16(a) & 0x3FFFF;
693 AcSecMap* sm = primary_map[sec_no];
694 UInt a_off = (a & 0xFFFF) >> 3;
695 UChar abits = sm->abits[a_off];
696 abits >>= (a & 4);
697 abits &= 15;
njn5c004e42002-11-18 11:04:50 +0000698 PROF_EVENT(66);
njn25e49d8e72002-09-23 09:36:25 +0000699 if (abits == VGM_NIBBLE_VALID) {
700 /* Handle common case quickly: a is suitably aligned, is mapped,
701 and is addressible. So just return. */
702 return;
703 } else {
704 /* Slow but general case. */
njnc2699f62003-09-05 23:29:33 +0000705 ac_ACCESS4_SLOWLY(a, isWrite);
njn25e49d8e72002-09-23 09:36:25 +0000706 }
707# endif
708}
709
njnc2699f62003-09-05 23:29:33 +0000710static __inline__ void ac_helperc_ACCESS2 ( Addr a, Bool isWrite )
njn25e49d8e72002-09-23 09:36:25 +0000711{
712# ifdef VG_DEBUG_MEMORY
njnc2699f62003-09-05 23:29:33 +0000713 return ac_ACCESS2_SLOWLY(a, isWrite);
njn25e49d8e72002-09-23 09:36:25 +0000714# else
715 UInt sec_no = rotateRight16(a) & 0x1FFFF;
716 AcSecMap* sm = primary_map[sec_no];
717 UInt a_off = (a & 0xFFFF) >> 3;
njn5c004e42002-11-18 11:04:50 +0000718 PROF_EVENT(67);
njn25e49d8e72002-09-23 09:36:25 +0000719 if (sm->abits[a_off] == VGM_BYTE_VALID) {
720 /* Handle common case quickly. */
721 return;
722 } else {
723 /* Slow but general case. */
njnc2699f62003-09-05 23:29:33 +0000724 ac_ACCESS2_SLOWLY(a, isWrite);
njn25e49d8e72002-09-23 09:36:25 +0000725 }
726# endif
727}
728
njnc2699f62003-09-05 23:29:33 +0000729static __inline__ void ac_helperc_ACCESS1 ( Addr a, Bool isWrite )
njn25e49d8e72002-09-23 09:36:25 +0000730{
731# ifdef VG_DEBUG_MEMORY
njnc2699f62003-09-05 23:29:33 +0000732 return ac_ACCESS1_SLOWLY(a, isWrite);
njn25e49d8e72002-09-23 09:36:25 +0000733# else
734 UInt sec_no = shiftRight16(a);
735 AcSecMap* sm = primary_map[sec_no];
736 UInt a_off = (a & 0xFFFF) >> 3;
njn5c004e42002-11-18 11:04:50 +0000737 PROF_EVENT(68);
njn25e49d8e72002-09-23 09:36:25 +0000738 if (sm->abits[a_off] == VGM_BYTE_VALID) {
739 /* Handle common case quickly. */
740 return;
741 } else {
742 /* Slow but general case. */
njnc2699f62003-09-05 23:29:33 +0000743 ac_ACCESS1_SLOWLY(a, isWrite);
njn25e49d8e72002-09-23 09:36:25 +0000744 }
745# endif
746}
747
njnc2699f62003-09-05 23:29:33 +0000748__attribute__ ((regparm(1)))
749static void ac_helperc_LOAD4 ( Addr a )
750{
751 ac_helperc_ACCESS4 ( a, /*isWrite*/False );
752}
753__attribute__ ((regparm(1)))
754static void ac_helperc_STORE4 ( Addr a )
755{
756 ac_helperc_ACCESS4 ( a, /*isWrite*/True );
757}
758
759__attribute__ ((regparm(1)))
760static void ac_helperc_LOAD2 ( Addr a )
761{
762 ac_helperc_ACCESS2 ( a, /*isWrite*/False );
763}
764__attribute__ ((regparm(1)))
765static void ac_helperc_STORE2 ( Addr a )
766{
767 ac_helperc_ACCESS2 ( a, /*isWrite*/True );
768}
769
770__attribute__ ((regparm(1)))
771static void ac_helperc_LOAD1 ( Addr a )
772{
773 ac_helperc_ACCESS1 ( a, /*isWrite*/False );
774}
775__attribute__ ((regparm(1)))
776static void ac_helperc_STORE1 ( Addr a )
777{
778 ac_helperc_ACCESS1 ( a, /*isWrite*/True );
779}
780
njn25e49d8e72002-09-23 09:36:25 +0000781
782/*------------------------------------------------------------*/
783/*--- Fallback functions to handle cases that the above ---*/
njnc2699f62003-09-05 23:29:33 +0000784/*--- ac_helperc_ACCESS{1,2,4} can't manage. ---*/
njn25e49d8e72002-09-23 09:36:25 +0000785/*------------------------------------------------------------*/
786
njnc2699f62003-09-05 23:29:33 +0000787static void ac_ACCESS4_SLOWLY ( Addr a, Bool isWrite )
njn25e49d8e72002-09-23 09:36:25 +0000788{
789 Bool a0ok, a1ok, a2ok, a3ok;
790
njn5c004e42002-11-18 11:04:50 +0000791 PROF_EVENT(76);
njn25e49d8e72002-09-23 09:36:25 +0000792
793 /* First establish independently the addressibility of the 4 bytes
794 involved. */
795 a0ok = get_abit(a+0) == VGM_BIT_VALID;
796 a1ok = get_abit(a+1) == VGM_BIT_VALID;
797 a2ok = get_abit(a+2) == VGM_BIT_VALID;
798 a3ok = get_abit(a+3) == VGM_BIT_VALID;
799
800 /* Now distinguish 3 cases */
801
802 /* Case 1: the address is completely valid, so:
803 - no addressing error
804 */
805 if (a0ok && a1ok && a2ok && a3ok) {
806 return;
807 }
808
809 /* Case 2: the address is completely invalid.
810 - emit addressing error
811 */
812 /* VG_(printf)("%p (%d %d %d %d)\n", a, a0ok, a1ok, a2ok, a3ok); */
njn43c799e2003-04-08 00:08:52 +0000813 if (!MAC_(clo_partial_loads_ok)
njn25e49d8e72002-09-23 09:36:25 +0000814 || ((a & 3) != 0)
815 || (!a0ok && !a1ok && !a2ok && !a3ok)) {
njnc2699f62003-09-05 23:29:33 +0000816 MAC_(record_address_error)( VG_(get_current_tid)(), a, 4, isWrite );
njn25e49d8e72002-09-23 09:36:25 +0000817 return;
818 }
819
820 /* Case 3: the address is partially valid.
821 - no addressing error
njn43c799e2003-04-08 00:08:52 +0000822 Case 3 is only allowed if MAC_(clo_partial_loads_ok) is True
njn25e49d8e72002-09-23 09:36:25 +0000823 (which is the default), and the address is 4-aligned.
824 If not, Case 2 will have applied.
825 */
njn43c799e2003-04-08 00:08:52 +0000826 sk_assert(MAC_(clo_partial_loads_ok));
njn25e49d8e72002-09-23 09:36:25 +0000827 {
828 return;
829 }
830}
831
njnc2699f62003-09-05 23:29:33 +0000832static void ac_ACCESS2_SLOWLY ( Addr a, Bool isWrite )
njn25e49d8e72002-09-23 09:36:25 +0000833{
834 /* Check the address for validity. */
835 Bool aerr = False;
njn5c004e42002-11-18 11:04:50 +0000836 PROF_EVENT(77);
njn25e49d8e72002-09-23 09:36:25 +0000837
838 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
839 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
840
841 /* If an address error has happened, report it. */
842 if (aerr) {
njnc2699f62003-09-05 23:29:33 +0000843 MAC_(record_address_error)( VG_(get_current_tid)(), a, 2, isWrite );
njn25e49d8e72002-09-23 09:36:25 +0000844 }
845}
846
njnc2699f62003-09-05 23:29:33 +0000847static void ac_ACCESS1_SLOWLY ( Addr a, Bool isWrite)
njn25e49d8e72002-09-23 09:36:25 +0000848{
849 /* Check the address for validity. */
850 Bool aerr = False;
njn5c004e42002-11-18 11:04:50 +0000851 PROF_EVENT(78);
njn25e49d8e72002-09-23 09:36:25 +0000852
853 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
854
855 /* If an address error has happened, report it. */
856 if (aerr) {
njnc2699f62003-09-05 23:29:33 +0000857 MAC_(record_address_error)( VG_(get_current_tid)(), a, 1, isWrite );
njn25e49d8e72002-09-23 09:36:25 +0000858 }
859}
860
861
862/* ---------------------------------------------------------------------
863 FPU load and store checks, called from generated code.
864 ------------------------------------------------------------------ */
865
njnc2699f62003-09-05 23:29:33 +0000866static __inline__
867void ac_fpu_ACCESS_check ( Addr addr, Int size, Bool isWrite )
njn25e49d8e72002-09-23 09:36:25 +0000868{
869 /* Ensure the read area is both addressible and valid (ie,
870 readable). If there's an address error, don't report a value
871 error too; but if there isn't an address error, check for a
872 value error.
873
874 Try to be reasonably fast on the common case; wimp out and defer
njn5c004e42002-11-18 11:04:50 +0000875 to ac_fpu_ACCESS_check_SLOWLY for everything else. */
njn25e49d8e72002-09-23 09:36:25 +0000876
877 AcSecMap* sm;
878 UInt sm_off, a_off;
879 Addr addr4;
880
njn5c004e42002-11-18 11:04:50 +0000881 PROF_EVENT(90);
njn25e49d8e72002-09-23 09:36:25 +0000882
883# ifdef VG_DEBUG_MEMORY
njnc2699f62003-09-05 23:29:33 +0000884 ac_fpu_ACCESS_check_SLOWLY ( addr, size, isWrite );
njn25e49d8e72002-09-23 09:36:25 +0000885# else
886
887 if (size == 4) {
888 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
njn5c004e42002-11-18 11:04:50 +0000889 PROF_EVENT(91);
njn25e49d8e72002-09-23 09:36:25 +0000890 /* Properly aligned. */
891 sm = primary_map[addr >> 16];
892 sm_off = addr & 0xFFFF;
893 a_off = sm_off >> 3;
894 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
895 /* Properly aligned and addressible. */
896 return;
897 slow4:
njnc2699f62003-09-05 23:29:33 +0000898 ac_fpu_ACCESS_check_SLOWLY ( addr, 4, isWrite );
njn25e49d8e72002-09-23 09:36:25 +0000899 return;
900 }
901
902 if (size == 8) {
903 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
njn5c004e42002-11-18 11:04:50 +0000904 PROF_EVENT(92);
njn25e49d8e72002-09-23 09:36:25 +0000905 /* Properly aligned. Do it in two halves. */
906 addr4 = addr + 4;
907 /* First half. */
908 sm = primary_map[addr >> 16];
909 sm_off = addr & 0xFFFF;
910 a_off = sm_off >> 3;
911 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
912 /* First half properly aligned and addressible. */
913 /* Second half. */
914 sm = primary_map[addr4 >> 16];
915 sm_off = addr4 & 0xFFFF;
916 a_off = sm_off >> 3;
917 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
918 /* Second half properly aligned and addressible. */
919 /* Both halves properly aligned and addressible. */
920 return;
921 slow8:
njnc2699f62003-09-05 23:29:33 +0000922 ac_fpu_ACCESS_check_SLOWLY ( addr, 8, isWrite );
njn25e49d8e72002-09-23 09:36:25 +0000923 return;
924 }
925
926 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
927 cases go quickly. */
928 if (size == 2) {
njn5c004e42002-11-18 11:04:50 +0000929 PROF_EVENT(93);
njnc2699f62003-09-05 23:29:33 +0000930 ac_fpu_ACCESS_check_SLOWLY ( addr, 2, isWrite );
njn25e49d8e72002-09-23 09:36:25 +0000931 return;
932 }
933
sewardj1863abc2003-06-14 16:01:32 +0000934 if (size == 16 || size == 10 || size == 28 || size == 108) {
njn5c004e42002-11-18 11:04:50 +0000935 PROF_EVENT(94);
njnc2699f62003-09-05 23:29:33 +0000936 ac_fpu_ACCESS_check_SLOWLY ( addr, size, isWrite );
njn25e49d8e72002-09-23 09:36:25 +0000937 return;
938 }
939
940 VG_(printf)("size is %d\n", size);
njne427a662002-10-02 11:08:25 +0000941 VG_(skin_panic)("fpu_ACCESS_check: unhandled size");
njn25e49d8e72002-09-23 09:36:25 +0000942# endif
943}
944
njnc2699f62003-09-05 23:29:33 +0000945__attribute__ ((regparm(2)))
946static void ac_fpu_READ_check ( Addr addr, Int size )
947{
948 ac_fpu_ACCESS_check ( addr, size, /*isWrite*/False );
949}
950
951__attribute__ ((regparm(2)))
952static void ac_fpu_WRITE_check ( Addr addr, Int size )
953{
954 ac_fpu_ACCESS_check ( addr, size, /*isWrite*/True );
955}
njn25e49d8e72002-09-23 09:36:25 +0000956
957/* ---------------------------------------------------------------------
958 Slow, general cases for FPU access checks.
959 ------------------------------------------------------------------ */
960
njnc2699f62003-09-05 23:29:33 +0000961void ac_fpu_ACCESS_check_SLOWLY ( Addr addr, Int size, Bool isWrite )
njn25e49d8e72002-09-23 09:36:25 +0000962{
963 Int i;
964 Bool aerr = False;
njn5c004e42002-11-18 11:04:50 +0000965 PROF_EVENT(100);
njn25e49d8e72002-09-23 09:36:25 +0000966 for (i = 0; i < size; i++) {
njn5c004e42002-11-18 11:04:50 +0000967 PROF_EVENT(101);
njn25e49d8e72002-09-23 09:36:25 +0000968 if (get_abit(addr+i) != VGM_BIT_VALID)
969 aerr = True;
970 }
971
972 if (aerr) {
njnc2699f62003-09-05 23:29:33 +0000973 MAC_(record_address_error)( VG_(get_current_tid)(), addr, size, isWrite );
njn25e49d8e72002-09-23 09:36:25 +0000974 }
975}
976
977
978/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000979/*--- Our instrumenter ---*/
980/*------------------------------------------------------------*/
981
njn25e49d8e72002-09-23 09:36:25 +0000982UCodeBlock* SK_(instrument)(UCodeBlock* cb_in, Addr orig_addr)
983{
984/* Use this rather than eg. -1 because it's a UInt. */
985#define INVALID_DATA_SIZE 999999
986
987 UCodeBlock* cb;
988 Int i;
989 UInstr* u_in;
990 Int t_addr, t_size;
njnc2699f62003-09-05 23:29:33 +0000991 Addr helper;
njn25e49d8e72002-09-23 09:36:25 +0000992
njn810086f2002-11-14 12:42:47 +0000993 cb = VG_(setup_UCodeBlock)(cb_in);
njn25e49d8e72002-09-23 09:36:25 +0000994
njn810086f2002-11-14 12:42:47 +0000995 for (i = 0; i < VG_(get_num_instrs)(cb_in); i++) {
njn25e49d8e72002-09-23 09:36:25 +0000996
997 t_addr = t_size = INVALID_TEMPREG;
njn810086f2002-11-14 12:42:47 +0000998 u_in = VG_(get_instr)(cb_in, i);
njn25e49d8e72002-09-23 09:36:25 +0000999
1000 switch (u_in->opcode) {
sewardj7a5ebcf2002-11-13 22:42:13 +00001001 case NOP: case LOCK: case CALLM_E: case CALLM_S:
njn25e49d8e72002-09-23 09:36:25 +00001002 break;
1003
1004 /* For memory-ref instrs, copy the data_addr into a temporary to be
njn9b007f62003-04-07 14:40:25 +00001005 * passed to the helper at the end of the instruction.
njn25e49d8e72002-09-23 09:36:25 +00001006 */
njnc2699f62003-09-05 23:29:33 +00001007 case LOAD:
njn25e49d8e72002-09-23 09:36:25 +00001008 switch (u_in->size) {
njnc2699f62003-09-05 23:29:33 +00001009 case 4: helper = (Addr)ac_helperc_LOAD4; break;
1010 case 2: helper = (Addr)ac_helperc_LOAD2; break;
1011 case 1: helper = (Addr)ac_helperc_LOAD1; break;
1012 default: VG_(skin_panic)("addrcheck::SK_(instrument):LOAD");
njn25e49d8e72002-09-23 09:36:25 +00001013 }
njnc2699f62003-09-05 23:29:33 +00001014 uInstr1(cb, CCALL, 0, TempReg, u_in->val1);
1015 uCCall (cb, helper, 1, 1, False );
1016 VG_(copy_UInstr)(cb, u_in);
1017 break;
1018
1019 case STORE:
1020 switch (u_in->size) {
1021 case 4: helper = (Addr)ac_helperc_STORE4; break;
1022 case 2: helper = (Addr)ac_helperc_STORE2; break;
1023 case 1: helper = (Addr)ac_helperc_STORE1; break;
1024 default: VG_(skin_panic)("addrcheck::SK_(instrument):STORE");
1025 }
1026 uInstr1(cb, CCALL, 0, TempReg, u_in->val2);
1027 uCCall (cb, helper, 1, 1, False );
njn4ba5a792002-09-30 10:23:54 +00001028 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00001029 break;
1030
sewardje3891fa2003-06-15 03:13:48 +00001031 case SSE3ag_MemRd_RegWr:
1032 sk_assert(u_in->size == 4 || u_in->size == 8);
njnc2699f62003-09-05 23:29:33 +00001033 helper = (Addr)ac_fpu_READ_check;
sewardje3891fa2003-06-15 03:13:48 +00001034 goto do_Access_ARG1;
1035 do_Access_ARG1:
1036 sk_assert(u_in->tag1 == TempReg);
1037 t_addr = u_in->val1;
1038 t_size = newTemp(cb);
1039 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
1040 uLiteral(cb, u_in->size);
1041 uInstr2(cb, CCALL, 0, TempReg, t_addr, TempReg, t_size);
njnc2699f62003-09-05 23:29:33 +00001042 uCCall(cb, helper, 2, 2, False );
sewardje3891fa2003-06-15 03:13:48 +00001043 VG_(copy_UInstr)(cb, u_in);
1044 break;
1045
sewardj3d7c9c82003-03-26 21:08:13 +00001046 case MMX2_MemRd:
njnc2699f62003-09-05 23:29:33 +00001047 sk_assert(u_in->size == 4 || u_in->size == 8);
1048 helper = (Addr)ac_fpu_READ_check;
1049 goto do_Access_ARG2;
sewardj3d7c9c82003-03-26 21:08:13 +00001050 case MMX2_MemWr:
sewardjd7971012003-04-04 00:21:58 +00001051 sk_assert(u_in->size == 4 || u_in->size == 8);
njnc2699f62003-09-05 23:29:33 +00001052 helper = (Addr)ac_fpu_WRITE_check;
sewardj1863abc2003-06-14 16:01:32 +00001053 goto do_Access_ARG2;
1054 case FPU_R:
njnc2699f62003-09-05 23:29:33 +00001055 helper = (Addr)ac_fpu_READ_check;
1056 goto do_Access_ARG2;
sewardj1863abc2003-06-14 16:01:32 +00001057 case FPU_W:
njnc2699f62003-09-05 23:29:33 +00001058 helper = (Addr)ac_fpu_WRITE_check;
sewardj1863abc2003-06-14 16:01:32 +00001059 goto do_Access_ARG2;
1060 do_Access_ARG2:
1061 sk_assert(u_in->tag2 == TempReg);
sewardj3d7c9c82003-03-26 21:08:13 +00001062 t_addr = u_in->val2;
1063 t_size = newTemp(cb);
1064 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
sewardjd7971012003-04-04 00:21:58 +00001065 uLiteral(cb, u_in->size);
sewardj3d7c9c82003-03-26 21:08:13 +00001066 uInstr2(cb, CCALL, 0, TempReg, t_addr, TempReg, t_size);
njnc2699f62003-09-05 23:29:33 +00001067 uCCall(cb, helper, 2, 2, False );
sewardj3d7c9c82003-03-26 21:08:13 +00001068 VG_(copy_UInstr)(cb, u_in);
1069 break;
1070
sewardj1863abc2003-06-14 16:01:32 +00001071 case SSE3a_MemRd: // this one causes trouble
1072 case SSE2a_MemRd:
njnc2699f62003-09-05 23:29:33 +00001073 helper = (Addr)ac_fpu_READ_check;
1074 goto do_Access_ARG3;
sewardj1863abc2003-06-14 16:01:32 +00001075 case SSE2a_MemWr:
1076 case SSE3a_MemWr:
njnc2699f62003-09-05 23:29:33 +00001077 helper = (Addr)ac_fpu_WRITE_check;
sewardj1863abc2003-06-14 16:01:32 +00001078 goto do_Access_ARG3;
1079 do_Access_ARG3:
njnc2699f62003-09-05 23:29:33 +00001080 sk_assert(u_in->size == 4 || u_in->size == 8 || u_in->size == 16);
sewardj1863abc2003-06-14 16:01:32 +00001081 sk_assert(u_in->tag3 == TempReg);
1082 t_addr = u_in->val3;
1083 t_size = newTemp(cb);
1084 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
1085 uLiteral(cb, u_in->size);
1086 uInstr2(cb, CCALL, 0, TempReg, t_addr, TempReg, t_size);
njnc2699f62003-09-05 23:29:33 +00001087 uCCall(cb, helper, 2, 2, False );
sewardj1863abc2003-06-14 16:01:32 +00001088 VG_(copy_UInstr)(cb, u_in);
1089 break;
1090
sewardj6bc40552003-06-15 01:40:58 +00001091 // case SSE2a1_MemRd:
1092 // case SSE2a1_MemWr:
sewardj6bc40552003-06-15 01:40:58 +00001093 // case SSE3a1_MemRd:
1094 // case SSE3a1_MemWr:
sewardj1863abc2003-06-14 16:01:32 +00001095 VG_(pp_UInstr)(0,u_in);
1096 VG_(skin_panic)("AddrCheck: unhandled SSE uinstr");
1097 break;
1098
sewardj095c3bc2003-06-15 23:26:04 +00001099 case SSE3e1_RegRd:
sewardjabf8bf82003-06-15 22:28:05 +00001100 case SSE3e_RegWr:
sewardje3891fa2003-06-15 03:13:48 +00001101 case SSE3g1_RegWr:
sewardj6bc40552003-06-15 01:40:58 +00001102 case SSE5:
sewardj1863abc2003-06-14 16:01:32 +00001103 case SSE3g_RegWr:
sewardj4fbe6e92003-06-15 21:54:34 +00001104 case SSE3e_RegRd:
sewardj1863abc2003-06-14 16:01:32 +00001105 case SSE4:
njn25e49d8e72002-09-23 09:36:25 +00001106 default:
njn4ba5a792002-09-30 10:23:54 +00001107 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00001108 break;
1109 }
1110 }
1111
njn4ba5a792002-09-30 10:23:54 +00001112 VG_(free_UCodeBlock)(cb_in);
njn25e49d8e72002-09-23 09:36:25 +00001113 return cb;
1114}
1115
1116
njn25e49d8e72002-09-23 09:36:25 +00001117/*------------------------------------------------------------*/
1118/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1119/*------------------------------------------------------------*/
1120
sewardja4495682002-10-21 07:29:59 +00001121/* For the memory leak detector, say whether an entire 64k chunk of
1122 address space is possibly in use, or not. If in doubt return
1123 True.
njn25e49d8e72002-09-23 09:36:25 +00001124*/
sewardja4495682002-10-21 07:29:59 +00001125static
1126Bool ac_is_valid_64k_chunk ( UInt chunk_number )
njn25e49d8e72002-09-23 09:36:25 +00001127{
sewardja4495682002-10-21 07:29:59 +00001128 sk_assert(chunk_number >= 0 && chunk_number < 65536);
1129 if (IS_DISTINGUISHED_SM(primary_map[chunk_number])) {
1130 /* Definitely not in use. */
1131 return False;
1132 } else {
1133 return True;
njn25e49d8e72002-09-23 09:36:25 +00001134 }
1135}
1136
1137
sewardja4495682002-10-21 07:29:59 +00001138/* For the memory leak detector, say whether or not a given word
1139 address is to be regarded as valid. */
1140static
1141Bool ac_is_valid_address ( Addr a )
1142{
1143 UChar abits;
1144 sk_assert(IS_ALIGNED4_ADDR(a));
1145 abits = get_abits4_ALIGNED(a);
1146 if (abits == VGM_NIBBLE_VALID) {
1147 return True;
1148 } else {
1149 return False;
1150 }
1151}
1152
1153
1154/* Leak detector for this skin. We don't actually do anything, merely
1155 run the generic leak detector with suitable parameters for this
1156 skin. */
njn5c004e42002-11-18 11:04:50 +00001157static void ac_detect_memory_leaks ( void )
njn25e49d8e72002-09-23 09:36:25 +00001158{
njn43c799e2003-04-08 00:08:52 +00001159 MAC_(do_detect_memory_leaks) ( ac_is_valid_64k_chunk, ac_is_valid_address );
njn25e49d8e72002-09-23 09:36:25 +00001160}
1161
1162
1163/* ---------------------------------------------------------------------
1164 Sanity check machinery (permanently engaged).
1165 ------------------------------------------------------------------ */
1166
1167/* Check that nobody has spuriously claimed that the first or last 16
1168 pages (64 KB) of address space have become accessible. Failure of
1169 the following do not per se indicate an internal consistency
1170 problem, but they are so likely to that we really want to know
1171 about it if so. */
1172
1173Bool SK_(cheap_sanity_check) ( void )
1174{
sewardjd5815ec2003-04-06 12:23:27 +00001175 if (IS_DISTINGUISHED_SM(primary_map[0])
1176 /* kludge: kernel drops a page up at top of address range for
1177 magic "optimized syscalls", so we can no longer check the
1178 highest page */
1179 /* && IS_DISTINGUISHED_SM(primary_map[65535]) */
1180 )
njn25e49d8e72002-09-23 09:36:25 +00001181 return True;
1182 else
1183 return False;
1184}
1185
1186Bool SK_(expensive_sanity_check) ( void )
1187{
1188 Int i;
1189
1190 /* Make sure nobody changed the distinguished secondary. */
1191 for (i = 0; i < 8192; i++)
1192 if (distinguished_secondary_map.abits[i] != VGM_BYTE_INVALID)
1193 return False;
1194
1195 /* Make sure that the upper 3/4 of the primary map hasn't
1196 been messed with. */
1197 for (i = 65536; i < 262144; i++)
1198 if (primary_map[i] != & distinguished_secondary_map)
1199 return False;
1200
1201 return True;
1202}
1203
njn47363ab2003-04-21 13:24:40 +00001204/*------------------------------------------------------------*/
1205/*--- Client requests ---*/
1206/*------------------------------------------------------------*/
1207
njn72718642003-07-24 08:45:32 +00001208Bool SK_(handle_client_request) ( ThreadId tid, UInt* arg_block, UInt *ret )
sewardjd8033d92002-12-08 22:16:58 +00001209{
sewardjbf310d92002-12-28 13:09:57 +00001210#define IGNORE(what) \
1211 do { \
1212 if (moans-- > 0) { \
1213 VG_(message)(Vg_UserMsg, \
1214 "Warning: Addrcheck: ignoring `%s' request.", what); \
1215 VG_(message)(Vg_UserMsg, \
1216 " To honour this request, rerun with --skin=memcheck."); \
1217 } \
1218 } while (0)
1219
sewardjd8033d92002-12-08 22:16:58 +00001220 UInt* arg = arg_block;
sewardjbf310d92002-12-28 13:09:57 +00001221 static Int moans = 3;
sewardjd8033d92002-12-08 22:16:58 +00001222
1223 /* Overload memcheck client reqs */
1224 if (!VG_IS_SKIN_USERREQ('M','C',arg[0]))
1225 return False;
1226
1227 switch (arg[0]) {
1228 case VG_USERREQ__DO_LEAK_CHECK:
1229 ac_detect_memory_leaks();
1230 *ret = 0; /* return value is meaningless */
1231 break;
1232
sewardjbf310d92002-12-28 13:09:57 +00001233 /* Ignore these */
sewardjd8033d92002-12-08 22:16:58 +00001234 case VG_USERREQ__CHECK_WRITABLE: /* check writable */
sewardjbf310d92002-12-28 13:09:57 +00001235 IGNORE("VALGRIND_CHECK_WRITABLE");
1236 return False;
sewardjd8033d92002-12-08 22:16:58 +00001237 case VG_USERREQ__CHECK_READABLE: /* check readable */
sewardjbf310d92002-12-28 13:09:57 +00001238 IGNORE("VALGRIND_CHECK_READABLE");
1239 return False;
sewardjd8033d92002-12-08 22:16:58 +00001240 case VG_USERREQ__MAKE_NOACCESS: /* make no access */
sewardjbf310d92002-12-28 13:09:57 +00001241 IGNORE("VALGRIND_MAKE_NOACCESS");
1242 return False;
sewardjd8033d92002-12-08 22:16:58 +00001243 case VG_USERREQ__MAKE_WRITABLE: /* make writable */
sewardjbf310d92002-12-28 13:09:57 +00001244 IGNORE("VALGRIND_MAKE_WRITABLE");
1245 return False;
sewardjd8033d92002-12-08 22:16:58 +00001246 case VG_USERREQ__MAKE_READABLE: /* make readable */
sewardjbf310d92002-12-28 13:09:57 +00001247 IGNORE("VALGRIND_MAKE_READABLE");
1248 return False;
sewardjd8033d92002-12-08 22:16:58 +00001249 case VG_USERREQ__DISCARD: /* discard */
sewardjbf310d92002-12-28 13:09:57 +00001250 IGNORE("VALGRIND_CHECK_DISCARD");
1251 return False;
sewardjd8033d92002-12-08 22:16:58 +00001252
1253 default:
njn72718642003-07-24 08:45:32 +00001254 if (MAC_(handle_common_client_requests)(tid, arg_block, ret )) {
njn47363ab2003-04-21 13:24:40 +00001255 return True;
1256 } else {
1257 VG_(message)(Vg_UserMsg,
1258 "Warning: unknown addrcheck client request code %d",
1259 arg[0]);
1260 return False;
1261 }
sewardjd8033d92002-12-08 22:16:58 +00001262 }
1263 return True;
sewardjbf310d92002-12-28 13:09:57 +00001264
1265#undef IGNORE
sewardjd8033d92002-12-08 22:16:58 +00001266}
1267
njn25e49d8e72002-09-23 09:36:25 +00001268/*------------------------------------------------------------*/
1269/*--- Setup ---*/
1270/*------------------------------------------------------------*/
1271
njn25e49d8e72002-09-23 09:36:25 +00001272Bool SK_(process_cmd_line_option)(Char* arg)
1273{
njn43c799e2003-04-08 00:08:52 +00001274 return MAC_(process_common_cmd_line_option)(arg);
njn25e49d8e72002-09-23 09:36:25 +00001275}
1276
njn3e884182003-04-15 13:03:23 +00001277void SK_(print_usage)(void)
njn25e49d8e72002-09-23 09:36:25 +00001278{
njn3e884182003-04-15 13:03:23 +00001279 MAC_(print_common_usage)();
1280}
1281
1282void SK_(print_debug_usage)(void)
1283{
1284 MAC_(print_common_debug_usage)();
njn25e49d8e72002-09-23 09:36:25 +00001285}
1286
1287
1288/*------------------------------------------------------------*/
1289/*--- Setup ---*/
1290/*------------------------------------------------------------*/
1291
njn810086f2002-11-14 12:42:47 +00001292void SK_(pre_clo_init)(void)
njn25e49d8e72002-09-23 09:36:25 +00001293{
njn810086f2002-11-14 12:42:47 +00001294 VG_(details_name) ("Addrcheck");
1295 VG_(details_version) (NULL);
1296 VG_(details_description) ("a fine-grained address checker");
1297 VG_(details_copyright_author)(
njn0e1b5142003-04-15 14:58:06 +00001298 "Copyright (C) 2002-2003, and GNU GPL'd, by Julian Seward.");
njn810086f2002-11-14 12:42:47 +00001299 VG_(details_bug_reports_to) ("jseward@acm.org");
sewardj78210aa2002-12-01 02:55:46 +00001300 VG_(details_avg_translation_sizeB) ( 135 );
njn25e49d8e72002-09-23 09:36:25 +00001301
njn810086f2002-11-14 12:42:47 +00001302 VG_(needs_core_errors) ();
1303 VG_(needs_skin_errors) ();
1304 VG_(needs_libc_freeres) ();
njn810086f2002-11-14 12:42:47 +00001305 VG_(needs_command_line_options)();
1306 VG_(needs_client_requests) ();
1307 VG_(needs_syscall_wrapper) ();
njn810086f2002-11-14 12:42:47 +00001308 VG_(needs_sanity_checks) ();
njn25e49d8e72002-09-23 09:36:25 +00001309
njn3e884182003-04-15 13:03:23 +00001310 MAC_( new_mem_heap) = & ac_new_mem_heap;
1311 MAC_( ban_mem_heap) = & ac_make_noaccess;
1312 MAC_(copy_mem_heap) = & ac_copy_address_range_state;
1313 MAC_( die_mem_heap) = & ac_make_noaccess;
sewardjecf8e102003-07-12 12:11:39 +00001314 MAC_(check_noaccess) = & ac_check_noaccess;
njn3e884182003-04-15 13:03:23 +00001315
njn5c004e42002-11-18 11:04:50 +00001316 VG_(track_new_mem_startup) ( & ac_new_mem_startup );
njn5c004e42002-11-18 11:04:50 +00001317 VG_(track_new_mem_stack_signal) ( & ac_make_accessible );
1318 VG_(track_new_mem_brk) ( & ac_make_accessible );
1319 VG_(track_new_mem_mmap) ( & ac_set_perms );
njn25e49d8e72002-09-23 09:36:25 +00001320
njn3e884182003-04-15 13:03:23 +00001321 VG_(track_copy_mem_remap) ( & ac_copy_address_range_state );
1322 VG_(track_change_mem_mprotect) ( & ac_set_perms );
1323
1324 VG_(track_die_mem_stack_signal) ( & ac_make_noaccess );
1325 VG_(track_die_mem_brk) ( & ac_make_noaccess );
1326 VG_(track_die_mem_munmap) ( & ac_make_noaccess );
1327
njn43c799e2003-04-08 00:08:52 +00001328 VG_(track_new_mem_stack_4) ( & MAC_(new_mem_stack_4) );
1329 VG_(track_new_mem_stack_8) ( & MAC_(new_mem_stack_8) );
1330 VG_(track_new_mem_stack_12) ( & MAC_(new_mem_stack_12) );
1331 VG_(track_new_mem_stack_16) ( & MAC_(new_mem_stack_16) );
1332 VG_(track_new_mem_stack_32) ( & MAC_(new_mem_stack_32) );
1333 VG_(track_new_mem_stack) ( & MAC_(new_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00001334
njn43c799e2003-04-08 00:08:52 +00001335 VG_(track_die_mem_stack_4) ( & MAC_(die_mem_stack_4) );
1336 VG_(track_die_mem_stack_8) ( & MAC_(die_mem_stack_8) );
1337 VG_(track_die_mem_stack_12) ( & MAC_(die_mem_stack_12) );
1338 VG_(track_die_mem_stack_16) ( & MAC_(die_mem_stack_16) );
1339 VG_(track_die_mem_stack_32) ( & MAC_(die_mem_stack_32) );
1340 VG_(track_die_mem_stack) ( & MAC_(die_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00001341
njn3e884182003-04-15 13:03:23 +00001342 VG_(track_ban_mem_stack) ( & ac_make_noaccess );
njn25e49d8e72002-09-23 09:36:25 +00001343
njn5c004e42002-11-18 11:04:50 +00001344 VG_(track_pre_mem_read) ( & ac_check_is_readable );
1345 VG_(track_pre_mem_read_asciiz) ( & ac_check_is_readable_asciiz );
1346 VG_(track_pre_mem_write) ( & ac_check_is_writable );
1347 VG_(track_post_mem_write) ( & ac_make_accessible );
njn25e49d8e72002-09-23 09:36:25 +00001348
njnc2699f62003-09-05 23:29:33 +00001349 VG_(register_compact_helper)((Addr) & ac_helperc_LOAD4);
1350 VG_(register_compact_helper)((Addr) & ac_helperc_LOAD2);
1351 VG_(register_compact_helper)((Addr) & ac_helperc_LOAD1);
1352 VG_(register_compact_helper)((Addr) & ac_helperc_STORE4);
1353 VG_(register_compact_helper)((Addr) & ac_helperc_STORE2);
1354 VG_(register_compact_helper)((Addr) & ac_helperc_STORE1);
1355 VG_(register_noncompact_helper)((Addr) & ac_fpu_READ_check);
1356 VG_(register_noncompact_helper)((Addr) & ac_fpu_WRITE_check);
njn25e49d8e72002-09-23 09:36:25 +00001357
1358 VGP_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
1359 VGP_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
njn9b007f62003-04-07 14:40:25 +00001360 VGP_(register_profile_event) ( VgpESPAdj, "adjust-ESP" );
njnd04b7c62002-10-03 14:05:52 +00001361
1362 init_shadow_memory();
njn3e884182003-04-15 13:03:23 +00001363 MAC_(common_pre_clo_init)();
njn5c004e42002-11-18 11:04:50 +00001364}
1365
1366void SK_(post_clo_init) ( void )
1367{
1368}
1369
njn7d9f94d2003-04-22 21:41:40 +00001370void SK_(fini) ( Int exitcode )
njn5c004e42002-11-18 11:04:50 +00001371{
njn3e884182003-04-15 13:03:23 +00001372 MAC_(common_fini)( ac_detect_memory_leaks );
njn25e49d8e72002-09-23 09:36:25 +00001373}
1374
1375/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00001376/*--- end ac_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00001377/*--------------------------------------------------------------------*/