blob: 92c80d2aa988ad775aa0f94374d77b7bc28225f1 [file] [log] [blame]
njn25e49d8e72002-09-23 09:36:25 +00001
2/*--------------------------------------------------------------------*/
3/*--- The AddrCheck skin: like MemCheck, but only does address ---*/
4/*--- checking. No definedness checking. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- ac_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
njnc9539842002-10-02 13:26:35 +00009 This file is part of AddrCheck, a lightweight Valgrind skin for
10 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
njn0e1b5142003-04-15 14:58:06 +000012 Copyright (C) 2000-2003 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
njn43c799e2003-04-08 00:08:52 +000033#include "mac_shared.h"
sewardjd8033d92002-12-08 22:16:58 +000034#include "memcheck.h"
njn25e49d8e72002-09-23 09:36:25 +000035//#include "vg_profile.c"
36
njn9b007f62003-04-07 14:40:25 +000037
njn27f1a382002-11-08 15:48:16 +000038VG_DETERMINE_INTERFACE_VERSION
39
njn25e49d8e72002-09-23 09:36:25 +000040/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000041/*--- Comparing and printing errors ---*/
42/*------------------------------------------------------------*/
43
njn43c799e2003-04-08 00:08:52 +000044void SK_(pp_SkinError) ( Error* err )
njn25e49d8e72002-09-23 09:36:25 +000045{
njn43c799e2003-04-08 00:08:52 +000046 MAC_Error* err_extra = VG_(get_error_extra)(err);
njn25e49d8e72002-09-23 09:36:25 +000047
njn810086f2002-11-14 12:42:47 +000048 switch (VG_(get_error_kind)(err)) {
njn25e49d8e72002-09-23 09:36:25 +000049 case CoreMemErr:
njn43c799e2003-04-08 00:08:52 +000050 VG_(message)(Vg_UserMsg, "%s contains unaddressable byte(s)",
51 VG_(get_error_string)(err));
52 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
njn25e49d8e72002-09-23 09:36:25 +000053 break;
54
55 case AddrErr:
56 switch (err_extra->axskind) {
57 case ReadAxs:
58 case WriteAxs:
59 /* These two aren't actually differentiated ever. */
60 VG_(message)(Vg_UserMsg, "Invalid memory access of size %d",
61 err_extra->size );
62 break;
63 case ExecAxs:
64 VG_(message)(Vg_UserMsg, "Jump to the invalid address "
65 "stated on the next line");
66 break;
67 default:
njn5c004e42002-11-18 11:04:50 +000068 VG_(skin_panic)("SK_(pp_SkinError)(axskind)");
njn25e49d8e72002-09-23 09:36:25 +000069 }
njn43c799e2003-04-08 00:08:52 +000070 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
71 MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
njn25e49d8e72002-09-23 09:36:25 +000072 break;
73
74 case ParamErr:
njn43c799e2003-04-08 00:08:52 +000075 VG_(message)(Vg_UserMsg,
76 "Syscall param %s contains unaddressable byte(s)",
77 VG_(get_error_string)(err) );
78 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
79 MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
njn25e49d8e72002-09-23 09:36:25 +000080 break;
81
82 case UserErr:
njn43c799e2003-04-08 00:08:52 +000083 VG_(message)(Vg_UserMsg,
84 "Unaddressable byte(s) found during client check request");
85 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
86 MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
njn25e49d8e72002-09-23 09:36:25 +000087 break;
88
89 default:
njn43c799e2003-04-08 00:08:52 +000090 MAC_(pp_shared_SkinError)(err);
91 break;
njn25e49d8e72002-09-23 09:36:25 +000092 }
93}
94
95/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000096/*--- Suppressions ---*/
97/*------------------------------------------------------------*/
98
njn810086f2002-11-14 12:42:47 +000099Bool SK_(recognised_suppression) ( Char* name, Supp* su )
njn25e49d8e72002-09-23 09:36:25 +0000100{
njn43c799e2003-04-08 00:08:52 +0000101 return MAC_(shared_recognised_suppression)(name, su);
njn25e49d8e72002-09-23 09:36:25 +0000102}
103
njn5c004e42002-11-18 11:04:50 +0000104#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
105
njn25e49d8e72002-09-23 09:36:25 +0000106/*------------------------------------------------------------*/
107/*--- Low-level support for memory checking. ---*/
108/*------------------------------------------------------------*/
109
110/* All reads and writes are checked against a memory map, which
111 records the state of all memory in the process. The memory map is
112 organised like this:
113
114 The top 16 bits of an address are used to index into a top-level
115 map table, containing 65536 entries. Each entry is a pointer to a
116 second-level map, which records the accesibililty and validity
117 permissions for the 65536 bytes indexed by the lower 16 bits of the
118 address. Each byte is represented by one bit, indicating
119 accessibility. So each second-level map contains 8192 bytes. This
120 two-level arrangement conveniently divides the 4G address space
121 into 64k lumps, each size 64k bytes.
122
123 All entries in the primary (top-level) map must point to a valid
124 secondary (second-level) map. Since most of the 4G of address
125 space will not be in use -- ie, not mapped at all -- there is a
126 distinguished secondary map, which indicates `not addressible and
127 not valid' writeable for all bytes. Entries in the primary map for
128 which the entire 64k is not in use at all point at this
129 distinguished map.
130
131 [...] lots of stuff deleted due to out of date-ness
132
133 As a final optimisation, the alignment and address checks for
134 4-byte loads and stores are combined in a neat way. The primary
135 map is extended to have 262144 entries (2^18), rather than 2^16.
136 The top 3/4 of these entries are permanently set to the
137 distinguished secondary map. For a 4-byte load/store, the
138 top-level map is indexed not with (addr >> 16) but instead f(addr),
139 where
140
141 f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
142 = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
143 = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
144
145 ie the lowest two bits are placed above the 16 high address bits.
146 If either of these two bits are nonzero, the address is misaligned;
147 this will select a secondary map from the upper 3/4 of the primary
148 map. Because this is always the distinguished secondary map, a
149 (bogus) address check failure will result. The failure handling
150 code can then figure out whether this is a genuine addr check
151 failure or whether it is a possibly-legitimate access at a
152 misaligned address. */
153
154
155/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000156/*--- Function declarations. ---*/
157/*------------------------------------------------------------*/
158
njn5c004e42002-11-18 11:04:50 +0000159static void ac_ACCESS4_SLOWLY ( Addr a );
160static void ac_ACCESS2_SLOWLY ( Addr a );
161static void ac_ACCESS1_SLOWLY ( Addr a );
162static void ac_fpu_ACCESS_check_SLOWLY ( Addr addr, Int size );
njn25e49d8e72002-09-23 09:36:25 +0000163
164/*------------------------------------------------------------*/
165/*--- Data defns. ---*/
166/*------------------------------------------------------------*/
167
168typedef
169 struct {
170 UChar abits[8192];
171 }
172 AcSecMap;
173
174static AcSecMap* primary_map[ /*65536*/ 262144 ];
175static AcSecMap distinguished_secondary_map;
176
njn25e49d8e72002-09-23 09:36:25 +0000177static void init_shadow_memory ( void )
178{
179 Int i;
180
181 for (i = 0; i < 8192; i++) /* Invalid address */
182 distinguished_secondary_map.abits[i] = VGM_BYTE_INVALID;
183
184 /* These entries gradually get overwritten as the used address
185 space expands. */
186 for (i = 0; i < 65536; i++)
187 primary_map[i] = &distinguished_secondary_map;
188
189 /* These ones should never change; it's a bug in Valgrind if they do. */
190 for (i = 65536; i < 262144; i++)
191 primary_map[i] = &distinguished_secondary_map;
192}
193
njn25e49d8e72002-09-23 09:36:25 +0000194/*------------------------------------------------------------*/
195/*--- Basic bitmap management, reading and writing. ---*/
196/*------------------------------------------------------------*/
197
198/* Allocate and initialise a secondary map. */
199
200static AcSecMap* alloc_secondary_map ( __attribute__ ((unused))
201 Char* caller )
202{
203 AcSecMap* map;
204 UInt i;
205 PROF_EVENT(10);
206
207 /* Mark all bytes as invalid access and invalid value. */
208
209 /* It just happens that a AcSecMap occupies exactly 18 pages --
210 although this isn't important, so the following assert is
211 spurious. */
njne427a662002-10-02 11:08:25 +0000212 sk_assert(0 == (sizeof(AcSecMap) % VKI_BYTES_PER_PAGE));
njn25e49d8e72002-09-23 09:36:25 +0000213 map = VG_(get_memory_from_mmap)( sizeof(AcSecMap), caller );
214
215 for (i = 0; i < 8192; i++)
216 map->abits[i] = VGM_BYTE_INVALID; /* Invalid address */
217
218 /* VG_(printf)("ALLOC_2MAP(%s)\n", caller ); */
219 return map;
220}
221
222
223/* Basic reading/writing of the bitmaps, for byte-sized accesses. */
224
225static __inline__ UChar get_abit ( Addr a )
226{
227 AcSecMap* sm = primary_map[a >> 16];
228 UInt sm_off = a & 0xFFFF;
229 PROF_EVENT(20);
230# if 0
231 if (IS_DISTINGUISHED_SM(sm))
232 VG_(message)(Vg_DebugMsg,
233 "accessed distinguished 2ndary (A)map! 0x%x\n", a);
234# endif
235 return BITARR_TEST(sm->abits, sm_off)
236 ? VGM_BIT_INVALID : VGM_BIT_VALID;
237}
238
239static __inline__ void set_abit ( Addr a, UChar abit )
240{
241 AcSecMap* sm;
242 UInt sm_off;
243 PROF_EVENT(22);
244 ENSURE_MAPPABLE(a, "set_abit");
245 sm = primary_map[a >> 16];
246 sm_off = a & 0xFFFF;
247 if (abit)
248 BITARR_SET(sm->abits, sm_off);
249 else
250 BITARR_CLEAR(sm->abits, sm_off);
251}
252
253
254/* Reading/writing of the bitmaps, for aligned word-sized accesses. */
255
256static __inline__ UChar get_abits4_ALIGNED ( Addr a )
257{
258 AcSecMap* sm;
259 UInt sm_off;
260 UChar abits8;
261 PROF_EVENT(24);
262# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000263 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000264# endif
265 sm = primary_map[a >> 16];
266 sm_off = a & 0xFFFF;
267 abits8 = sm->abits[sm_off >> 3];
268 abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
269 abits8 &= 0x0F;
270 return abits8;
271}
272
273
274
275/*------------------------------------------------------------*/
276/*--- Setting permissions over address ranges. ---*/
277/*------------------------------------------------------------*/
278
sewardj5de6ee02002-12-14 23:11:35 +0000279static __inline__
280void set_address_range_perms ( Addr a, UInt len,
281 UInt example_a_bit )
njn25e49d8e72002-09-23 09:36:25 +0000282{
283 UChar abyte8;
284 UInt sm_off;
285 AcSecMap* sm;
286
287 PROF_EVENT(30);
288
289 if (len == 0)
290 return;
291
292 if (len > 100 * 1000 * 1000) {
293 VG_(message)(Vg_UserMsg,
294 "Warning: set address range perms: "
295 "large range %u, a %d",
296 len, example_a_bit );
297 }
298
299 VGP_PUSHCC(VgpSetMem);
300
301 /* Requests to change permissions of huge address ranges may
302 indicate bugs in our machinery. 30,000,000 is arbitrary, but so
303 far all legitimate requests have fallen beneath that size. */
304 /* 4 Mar 02: this is just stupid; get rid of it. */
njne427a662002-10-02 11:08:25 +0000305 /* sk_assert(len < 30000000); */
njn25e49d8e72002-09-23 09:36:25 +0000306
307 /* Check the permissions make sense. */
njne427a662002-10-02 11:08:25 +0000308 sk_assert(example_a_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000309 || example_a_bit == VGM_BIT_INVALID);
310
311 /* In order that we can charge through the address space at 8
312 bytes/main-loop iteration, make up some perms. */
313 abyte8 = (example_a_bit << 7)
314 | (example_a_bit << 6)
315 | (example_a_bit << 5)
316 | (example_a_bit << 4)
317 | (example_a_bit << 3)
318 | (example_a_bit << 2)
319 | (example_a_bit << 1)
320 | (example_a_bit << 0);
321
322# ifdef VG_DEBUG_MEMORY
323 /* Do it ... */
324 while (True) {
325 PROF_EVENT(31);
326 if (len == 0) break;
327 set_abit ( a, example_a_bit );
328 set_vbyte ( a, vbyte );
329 a++;
330 len--;
331 }
332
333# else
334 /* Slowly do parts preceding 8-byte alignment. */
335 while (True) {
336 PROF_EVENT(31);
337 if (len == 0) break;
338 if ((a % 8) == 0) break;
339 set_abit ( a, example_a_bit );
340 a++;
341 len--;
342 }
343
344 if (len == 0) {
345 VGP_POPCC(VgpSetMem);
346 return;
347 }
njne427a662002-10-02 11:08:25 +0000348 sk_assert((a % 8) == 0 && len > 0);
njn25e49d8e72002-09-23 09:36:25 +0000349
350 /* Once aligned, go fast. */
351 while (True) {
352 PROF_EVENT(32);
353 if (len < 8) break;
354 ENSURE_MAPPABLE(a, "set_address_range_perms(fast)");
355 sm = primary_map[a >> 16];
356 sm_off = a & 0xFFFF;
357 sm->abits[sm_off >> 3] = abyte8;
358 a += 8;
359 len -= 8;
360 }
361
362 if (len == 0) {
363 VGP_POPCC(VgpSetMem);
364 return;
365 }
njne427a662002-10-02 11:08:25 +0000366 sk_assert((a % 8) == 0 && len > 0 && len < 8);
njn25e49d8e72002-09-23 09:36:25 +0000367
368 /* Finish the upper fragment. */
369 while (True) {
370 PROF_EVENT(33);
371 if (len == 0) break;
372 set_abit ( a, example_a_bit );
373 a++;
374 len--;
375 }
376# endif
377
378 /* Check that zero page and highest page have not been written to
379 -- this could happen with buggy syscall wrappers. Today
380 (2001-04-26) had precisely such a problem with __NR_setitimer. */
njne427a662002-10-02 11:08:25 +0000381 sk_assert(SK_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +0000382 VGP_POPCC(VgpSetMem);
383}
384
385/* Set permissions for address ranges ... */
386
njn5c004e42002-11-18 11:04:50 +0000387static void ac_make_noaccess ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000388{
389 PROF_EVENT(35);
njn5c004e42002-11-18 11:04:50 +0000390 DEBUG("ac_make_noaccess(%p, %x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000391 set_address_range_perms ( a, len, VGM_BIT_INVALID );
392}
393
njn5c004e42002-11-18 11:04:50 +0000394static void ac_make_accessible ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000395{
njn5c004e42002-11-18 11:04:50 +0000396 PROF_EVENT(38);
397 DEBUG("ac_make_accessible(%p, %x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000398 set_address_range_perms ( a, len, VGM_BIT_VALID );
399}
400
njn9b007f62003-04-07 14:40:25 +0000401static __inline__
402void make_aligned_word_noaccess(Addr a)
403{
404 AcSecMap* sm;
405 UInt sm_off;
406 UChar mask;
407
408 VGP_PUSHCC(VgpESPAdj);
409 ENSURE_MAPPABLE(a, "make_aligned_word_noaccess");
410 sm = primary_map[a >> 16];
411 sm_off = a & 0xFFFF;
412 mask = 0x0F;
413 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
414 /* mask now contains 1s where we wish to make address bits invalid (1s). */
415 sm->abits[sm_off >> 3] |= mask;
416 VGP_POPCC(VgpESPAdj);
417}
418
419static __inline__
420void make_aligned_word_accessible(Addr a)
421{
422 AcSecMap* sm;
423 UInt sm_off;
424 UChar mask;
425
426 VGP_PUSHCC(VgpESPAdj);
427 ENSURE_MAPPABLE(a, "make_aligned_word_accessible");
428 sm = primary_map[a >> 16];
429 sm_off = a & 0xFFFF;
430 mask = 0x0F;
431 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
432 /* mask now contains 1s where we wish to make address bits
433 invalid (0s). */
434 sm->abits[sm_off >> 3] &= ~mask;
435 VGP_POPCC(VgpESPAdj);
436}
437
438/* Nb: by "aligned" here we mean 8-byte aligned */
439static __inline__
440void make_aligned_doubleword_accessible(Addr a)
441{
442 AcSecMap* sm;
443 UInt sm_off;
444
445 VGP_PUSHCC(VgpESPAdj);
446 ENSURE_MAPPABLE(a, "make_aligned_doubleword_accessible");
447 sm = primary_map[a >> 16];
448 sm_off = a & 0xFFFF;
449 sm->abits[sm_off >> 3] = VGM_BYTE_VALID;
450 VGP_POPCC(VgpESPAdj);
451}
452
453static __inline__
454void make_aligned_doubleword_noaccess(Addr a)
455{
456 AcSecMap* sm;
457 UInt sm_off;
458
459 VGP_PUSHCC(VgpESPAdj);
460 ENSURE_MAPPABLE(a, "make_aligned_doubleword_noaccess");
461 sm = primary_map[a >> 16];
462 sm_off = a & 0xFFFF;
463 sm->abits[sm_off >> 3] = VGM_BYTE_INVALID;
464 VGP_POPCC(VgpESPAdj);
465}
466
467/* The %esp update handling functions */
468ESP_UPDATE_HANDLERS ( make_aligned_word_accessible,
469 make_aligned_word_noaccess,
470 make_aligned_doubleword_accessible,
471 make_aligned_doubleword_noaccess,
472 ac_make_accessible,
473 ac_make_noaccess
474 );
475
476
njn25e49d8e72002-09-23 09:36:25 +0000477/* Block-copy permissions (needed for implementing realloc()). */
478
njn5c004e42002-11-18 11:04:50 +0000479static void ac_copy_address_range_state ( Addr src, Addr dst, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000480{
481 UInt i;
482
njn5c004e42002-11-18 11:04:50 +0000483 DEBUG("ac_copy_address_range_state\n");
njn25e49d8e72002-09-23 09:36:25 +0000484
485 PROF_EVENT(40);
486 for (i = 0; i < len; i++) {
487 UChar abit = get_abit ( src+i );
488 PROF_EVENT(41);
489 set_abit ( dst+i, abit );
490 }
491}
492
493
494/* Check permissions for address range. If inadequate permissions
495 exist, *bad_addr is set to the offending address, so the caller can
496 know what it is. */
497
njn5c004e42002-11-18 11:04:50 +0000498static __inline__
499Bool ac_check_accessible ( Addr a, UInt len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000500{
501 UInt i;
502 UChar abit;
njn5c004e42002-11-18 11:04:50 +0000503 PROF_EVENT(48);
njn25e49d8e72002-09-23 09:36:25 +0000504 for (i = 0; i < len; i++) {
njn5c004e42002-11-18 11:04:50 +0000505 PROF_EVENT(49);
njn25e49d8e72002-09-23 09:36:25 +0000506 abit = get_abit(a);
507 if (abit == VGM_BIT_INVALID) {
508 if (bad_addr != NULL) *bad_addr = a;
509 return False;
510 }
511 a++;
512 }
513 return True;
514}
515
njn25e49d8e72002-09-23 09:36:25 +0000516/* Check a zero-terminated ascii string. Tricky -- don't want to
517 examine the actual bytes, to find the end, until we're sure it is
518 safe to do so. */
519
njn5c004e42002-11-18 11:04:50 +0000520static __inline__
521Bool ac_check_readable_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000522{
523 UChar abit;
524 PROF_EVENT(46);
njn5c004e42002-11-18 11:04:50 +0000525 DEBUG("ac_check_readable_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +0000526 while (True) {
527 PROF_EVENT(47);
528 abit = get_abit(a);
529 if (abit != VGM_BIT_VALID) {
530 if (bad_addr != NULL) *bad_addr = a;
531 return False;
532 }
533 /* Ok, a is safe to read. */
534 if (* ((UChar*)a) == 0) return True;
535 a++;
536 }
537}
538
539
540/*------------------------------------------------------------*/
541/*--- Memory event handlers ---*/
542/*------------------------------------------------------------*/
543
njn5c004e42002-11-18 11:04:50 +0000544static __inline__
545void ac_check_is_accessible ( CorePart part, ThreadState* tst,
546 Char* s, Addr base, UInt size, Bool isWrite )
njn25e49d8e72002-09-23 09:36:25 +0000547{
548 Bool ok;
549 Addr bad_addr;
550
551 VGP_PUSHCC(VgpCheckMem);
552
njn5c004e42002-11-18 11:04:50 +0000553 ok = ac_check_accessible ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000554 if (!ok) {
555 switch (part) {
556 case Vg_CoreSysCall:
njn43c799e2003-04-08 00:08:52 +0000557 MAC_(record_param_error) ( tst, bad_addr, isWrite, s );
njn25e49d8e72002-09-23 09:36:25 +0000558 break;
559
njn25e49d8e72002-09-23 09:36:25 +0000560 case Vg_CoreSignal:
njn5c004e42002-11-18 11:04:50 +0000561 sk_assert(isWrite); /* Should only happen with isWrite case */
562 /* fall through */
njn25e49d8e72002-09-23 09:36:25 +0000563 case Vg_CorePThread:
njn43c799e2003-04-08 00:08:52 +0000564 MAC_(record_core_mem_error)( tst, isWrite, s );
njn25e49d8e72002-09-23 09:36:25 +0000565 break;
566
567 /* If we're being asked to jump to a silly address, record an error
568 message before potentially crashing the entire system. */
569 case Vg_CoreTranslate:
njn5c004e42002-11-18 11:04:50 +0000570 sk_assert(!isWrite); /* Should only happen with !isWrite case */
njn43c799e2003-04-08 00:08:52 +0000571 MAC_(record_jump_error)( tst, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000572 break;
573
574 default:
njn5c004e42002-11-18 11:04:50 +0000575 VG_(skin_panic)("ac_check_is_accessible: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000576 }
577 }
njn5c004e42002-11-18 11:04:50 +0000578
njn25e49d8e72002-09-23 09:36:25 +0000579 VGP_POPCC(VgpCheckMem);
580}
581
582static
njn5c004e42002-11-18 11:04:50 +0000583void ac_check_is_writable ( CorePart part, ThreadState* tst,
584 Char* s, Addr base, UInt size )
585{
586 ac_check_is_accessible ( part, tst, s, base, size, /*isWrite*/True );
587}
588
589static
590void ac_check_is_readable ( CorePart part, ThreadState* tst,
591 Char* s, Addr base, UInt size )
592{
593 ac_check_is_accessible ( part, tst, s, base, size, /*isWrite*/False );
594}
595
596static
597void ac_check_is_readable_asciiz ( CorePart part, ThreadState* tst,
598 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +0000599{
600 Bool ok = True;
601 Addr bad_addr;
602 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
603
604 VGP_PUSHCC(VgpCheckMem);
605
njne427a662002-10-02 11:08:25 +0000606 sk_assert(part == Vg_CoreSysCall);
njn5c004e42002-11-18 11:04:50 +0000607 ok = ac_check_readable_asciiz ( (Addr)str, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000608 if (!ok) {
njn43c799e2003-04-08 00:08:52 +0000609 MAC_(record_param_error) ( tst, bad_addr, /*is_writable =*/False, s );
njn25e49d8e72002-09-23 09:36:25 +0000610 }
611
612 VGP_POPCC(VgpCheckMem);
613}
614
615static
njn5c004e42002-11-18 11:04:50 +0000616void ac_new_mem_startup( Addr a, UInt len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +0000617{
njn1f3a9092002-10-04 09:22:30 +0000618 /* Ignore the permissions, just make it readable. Seems to work... */
njn25e49d8e72002-09-23 09:36:25 +0000619 DEBUG("new_mem_startup(%p, %u, rr=%u, ww=%u, xx=%u)\n", a,len,rr,ww,xx);
njn5c004e42002-11-18 11:04:50 +0000620 ac_make_accessible(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000621}
622
623static
njn5c004e42002-11-18 11:04:50 +0000624void ac_new_mem_heap ( Addr a, UInt len, Bool is_inited )
njn25e49d8e72002-09-23 09:36:25 +0000625{
njn5c004e42002-11-18 11:04:50 +0000626 ac_make_accessible(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000627}
628
629static
njn5c004e42002-11-18 11:04:50 +0000630void ac_set_perms (Addr a, UInt len,
sewardj40f8ebe2002-10-23 21:46:13 +0000631 Bool rr, Bool ww, Bool xx)
njn25e49d8e72002-09-23 09:36:25 +0000632{
njn5c004e42002-11-18 11:04:50 +0000633 DEBUG("ac_set_perms(%p, %u, rr=%u ww=%u, xx=%u)\n",
sewardj40f8ebe2002-10-23 21:46:13 +0000634 a, len, rr, ww, xx);
njn25e49d8e72002-09-23 09:36:25 +0000635 if (rr || ww || xx) {
njn5c004e42002-11-18 11:04:50 +0000636 ac_make_accessible(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000637 } else {
njn5c004e42002-11-18 11:04:50 +0000638 ac_make_noaccess(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000639 }
640}
641
642
643/*------------------------------------------------------------*/
644/*--- Functions called directly from generated code. ---*/
645/*------------------------------------------------------------*/
646
647static __inline__ UInt rotateRight16 ( UInt x )
648{
649 /* Amazingly, gcc turns this into a single rotate insn. */
650 return (x >> 16) | (x << 16);
651}
652
njn25e49d8e72002-09-23 09:36:25 +0000653static __inline__ UInt shiftRight16 ( UInt x )
654{
655 return x >> 16;
656}
657
658
659/* Read/write 1/2/4 sized V bytes, and emit an address error if
660 needed. */
661
njn5c004e42002-11-18 11:04:50 +0000662/* ac_helperc_ACCESS{1,2,4} handle the common case fast.
njn25e49d8e72002-09-23 09:36:25 +0000663 Under all other circumstances, it defers to the relevant _SLOWLY
664 function, which can handle all situations.
665*/
666__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000667static void ac_helperc_ACCESS4 ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000668{
669# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000670 return ac_ACCESS4_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000671# else
672 UInt sec_no = rotateRight16(a) & 0x3FFFF;
673 AcSecMap* sm = primary_map[sec_no];
674 UInt a_off = (a & 0xFFFF) >> 3;
675 UChar abits = sm->abits[a_off];
676 abits >>= (a & 4);
677 abits &= 15;
njn5c004e42002-11-18 11:04:50 +0000678 PROF_EVENT(66);
njn25e49d8e72002-09-23 09:36:25 +0000679 if (abits == VGM_NIBBLE_VALID) {
680 /* Handle common case quickly: a is suitably aligned, is mapped,
681 and is addressible. So just return. */
682 return;
683 } else {
684 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000685 ac_ACCESS4_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000686 }
687# endif
688}
689
690__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000691static void ac_helperc_ACCESS2 ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000692{
693# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000694 return ac_ACCESS2_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000695# else
696 UInt sec_no = rotateRight16(a) & 0x1FFFF;
697 AcSecMap* sm = primary_map[sec_no];
698 UInt a_off = (a & 0xFFFF) >> 3;
njn5c004e42002-11-18 11:04:50 +0000699 PROF_EVENT(67);
njn25e49d8e72002-09-23 09:36:25 +0000700 if (sm->abits[a_off] == VGM_BYTE_VALID) {
701 /* Handle common case quickly. */
702 return;
703 } else {
704 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000705 ac_ACCESS2_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000706 }
707# endif
708}
709
710__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000711static void ac_helperc_ACCESS1 ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000712{
713# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000714 return ac_ACCESS1_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000715# else
716 UInt sec_no = shiftRight16(a);
717 AcSecMap* sm = primary_map[sec_no];
718 UInt a_off = (a & 0xFFFF) >> 3;
njn5c004e42002-11-18 11:04:50 +0000719 PROF_EVENT(68);
njn25e49d8e72002-09-23 09:36:25 +0000720 if (sm->abits[a_off] == VGM_BYTE_VALID) {
721 /* Handle common case quickly. */
722 return;
723 } else {
724 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000725 ac_ACCESS1_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000726 }
727# endif
728}
729
730
731/*------------------------------------------------------------*/
732/*--- Fallback functions to handle cases that the above ---*/
733/*--- VG_(helperc_ACCESS{1,2,4}) can't manage. ---*/
734/*------------------------------------------------------------*/
735
njn5c004e42002-11-18 11:04:50 +0000736static void ac_ACCESS4_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000737{
738 Bool a0ok, a1ok, a2ok, a3ok;
739
njn5c004e42002-11-18 11:04:50 +0000740 PROF_EVENT(76);
njn25e49d8e72002-09-23 09:36:25 +0000741
742 /* First establish independently the addressibility of the 4 bytes
743 involved. */
744 a0ok = get_abit(a+0) == VGM_BIT_VALID;
745 a1ok = get_abit(a+1) == VGM_BIT_VALID;
746 a2ok = get_abit(a+2) == VGM_BIT_VALID;
747 a3ok = get_abit(a+3) == VGM_BIT_VALID;
748
749 /* Now distinguish 3 cases */
750
751 /* Case 1: the address is completely valid, so:
752 - no addressing error
753 */
754 if (a0ok && a1ok && a2ok && a3ok) {
755 return;
756 }
757
758 /* Case 2: the address is completely invalid.
759 - emit addressing error
760 */
761 /* VG_(printf)("%p (%d %d %d %d)\n", a, a0ok, a1ok, a2ok, a3ok); */
njn43c799e2003-04-08 00:08:52 +0000762 if (!MAC_(clo_partial_loads_ok)
njn25e49d8e72002-09-23 09:36:25 +0000763 || ((a & 3) != 0)
764 || (!a0ok && !a1ok && !a2ok && !a3ok)) {
njn43c799e2003-04-08 00:08:52 +0000765 MAC_(record_address_error)( a, 4, False );
njn25e49d8e72002-09-23 09:36:25 +0000766 return;
767 }
768
769 /* Case 3: the address is partially valid.
770 - no addressing error
njn43c799e2003-04-08 00:08:52 +0000771 Case 3 is only allowed if MAC_(clo_partial_loads_ok) is True
njn25e49d8e72002-09-23 09:36:25 +0000772 (which is the default), and the address is 4-aligned.
773 If not, Case 2 will have applied.
774 */
njn43c799e2003-04-08 00:08:52 +0000775 sk_assert(MAC_(clo_partial_loads_ok));
njn25e49d8e72002-09-23 09:36:25 +0000776 {
777 return;
778 }
779}
780
njn5c004e42002-11-18 11:04:50 +0000781static void ac_ACCESS2_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000782{
783 /* Check the address for validity. */
784 Bool aerr = False;
njn5c004e42002-11-18 11:04:50 +0000785 PROF_EVENT(77);
njn25e49d8e72002-09-23 09:36:25 +0000786
787 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
788 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
789
790 /* If an address error has happened, report it. */
791 if (aerr) {
njn43c799e2003-04-08 00:08:52 +0000792 MAC_(record_address_error)( a, 2, False );
njn25e49d8e72002-09-23 09:36:25 +0000793 }
794}
795
njn5c004e42002-11-18 11:04:50 +0000796static void ac_ACCESS1_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000797{
798 /* Check the address for validity. */
799 Bool aerr = False;
njn5c004e42002-11-18 11:04:50 +0000800 PROF_EVENT(78);
njn25e49d8e72002-09-23 09:36:25 +0000801
802 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
803
804 /* If an address error has happened, report it. */
805 if (aerr) {
njn43c799e2003-04-08 00:08:52 +0000806 MAC_(record_address_error)( a, 1, False );
njn25e49d8e72002-09-23 09:36:25 +0000807 }
808}
809
810
811/* ---------------------------------------------------------------------
812 FPU load and store checks, called from generated code.
813 ------------------------------------------------------------------ */
814
815__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +0000816static void ac_fpu_ACCESS_check ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +0000817{
818 /* Ensure the read area is both addressible and valid (ie,
819 readable). If there's an address error, don't report a value
820 error too; but if there isn't an address error, check for a
821 value error.
822
823 Try to be reasonably fast on the common case; wimp out and defer
njn5c004e42002-11-18 11:04:50 +0000824 to ac_fpu_ACCESS_check_SLOWLY for everything else. */
njn25e49d8e72002-09-23 09:36:25 +0000825
826 AcSecMap* sm;
827 UInt sm_off, a_off;
828 Addr addr4;
829
njn5c004e42002-11-18 11:04:50 +0000830 PROF_EVENT(90);
njn25e49d8e72002-09-23 09:36:25 +0000831
832# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000833 ac_fpu_ACCESS_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +0000834# else
835
836 if (size == 4) {
837 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
njn5c004e42002-11-18 11:04:50 +0000838 PROF_EVENT(91);
njn25e49d8e72002-09-23 09:36:25 +0000839 /* Properly aligned. */
840 sm = primary_map[addr >> 16];
841 sm_off = addr & 0xFFFF;
842 a_off = sm_off >> 3;
843 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
844 /* Properly aligned and addressible. */
845 return;
846 slow4:
njn5c004e42002-11-18 11:04:50 +0000847 ac_fpu_ACCESS_check_SLOWLY ( addr, 4 );
njn25e49d8e72002-09-23 09:36:25 +0000848 return;
849 }
850
851 if (size == 8) {
852 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
njn5c004e42002-11-18 11:04:50 +0000853 PROF_EVENT(92);
njn25e49d8e72002-09-23 09:36:25 +0000854 /* Properly aligned. Do it in two halves. */
855 addr4 = addr + 4;
856 /* First half. */
857 sm = primary_map[addr >> 16];
858 sm_off = addr & 0xFFFF;
859 a_off = sm_off >> 3;
860 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
861 /* First half properly aligned and addressible. */
862 /* Second half. */
863 sm = primary_map[addr4 >> 16];
864 sm_off = addr4 & 0xFFFF;
865 a_off = sm_off >> 3;
866 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
867 /* Second half properly aligned and addressible. */
868 /* Both halves properly aligned and addressible. */
869 return;
870 slow8:
njn5c004e42002-11-18 11:04:50 +0000871 ac_fpu_ACCESS_check_SLOWLY ( addr, 8 );
njn25e49d8e72002-09-23 09:36:25 +0000872 return;
873 }
874
875 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
876 cases go quickly. */
877 if (size == 2) {
njn5c004e42002-11-18 11:04:50 +0000878 PROF_EVENT(93);
879 ac_fpu_ACCESS_check_SLOWLY ( addr, 2 );
njn25e49d8e72002-09-23 09:36:25 +0000880 return;
881 }
882
sewardj1863abc2003-06-14 16:01:32 +0000883 if (size == 16 || size == 10 || size == 28 || size == 108) {
njn5c004e42002-11-18 11:04:50 +0000884 PROF_EVENT(94);
885 ac_fpu_ACCESS_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +0000886 return;
887 }
888
889 VG_(printf)("size is %d\n", size);
njne427a662002-10-02 11:08:25 +0000890 VG_(skin_panic)("fpu_ACCESS_check: unhandled size");
njn25e49d8e72002-09-23 09:36:25 +0000891# endif
892}
893
894
895/* ---------------------------------------------------------------------
896 Slow, general cases for FPU access checks.
897 ------------------------------------------------------------------ */
898
njn5c004e42002-11-18 11:04:50 +0000899void ac_fpu_ACCESS_check_SLOWLY ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +0000900{
901 Int i;
902 Bool aerr = False;
njn5c004e42002-11-18 11:04:50 +0000903 PROF_EVENT(100);
njn25e49d8e72002-09-23 09:36:25 +0000904 for (i = 0; i < size; i++) {
njn5c004e42002-11-18 11:04:50 +0000905 PROF_EVENT(101);
njn25e49d8e72002-09-23 09:36:25 +0000906 if (get_abit(addr+i) != VGM_BIT_VALID)
907 aerr = True;
908 }
909
910 if (aerr) {
njn43c799e2003-04-08 00:08:52 +0000911 MAC_(record_address_error)( addr, size, False );
njn25e49d8e72002-09-23 09:36:25 +0000912 }
913}
914
915
916/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000917/*--- Our instrumenter ---*/
918/*------------------------------------------------------------*/
919
njn25e49d8e72002-09-23 09:36:25 +0000920UCodeBlock* SK_(instrument)(UCodeBlock* cb_in, Addr orig_addr)
921{
922/* Use this rather than eg. -1 because it's a UInt. */
923#define INVALID_DATA_SIZE 999999
924
925 UCodeBlock* cb;
926 Int i;
927 UInstr* u_in;
928 Int t_addr, t_size;
929
njn810086f2002-11-14 12:42:47 +0000930 cb = VG_(setup_UCodeBlock)(cb_in);
njn25e49d8e72002-09-23 09:36:25 +0000931
njn810086f2002-11-14 12:42:47 +0000932 for (i = 0; i < VG_(get_num_instrs)(cb_in); i++) {
njn25e49d8e72002-09-23 09:36:25 +0000933
934 t_addr = t_size = INVALID_TEMPREG;
njn810086f2002-11-14 12:42:47 +0000935 u_in = VG_(get_instr)(cb_in, i);
njn25e49d8e72002-09-23 09:36:25 +0000936
937 switch (u_in->opcode) {
sewardj7a5ebcf2002-11-13 22:42:13 +0000938 case NOP: case LOCK: case CALLM_E: case CALLM_S:
njn25e49d8e72002-09-23 09:36:25 +0000939 break;
940
941 /* For memory-ref instrs, copy the data_addr into a temporary to be
njn9b007f62003-04-07 14:40:25 +0000942 * passed to the helper at the end of the instruction.
njn25e49d8e72002-09-23 09:36:25 +0000943 */
944 case LOAD:
945 t_addr = u_in->val1;
946 goto do_LOAD_or_STORE;
947 case STORE: t_addr = u_in->val2;
948 goto do_LOAD_or_STORE;
949 do_LOAD_or_STORE:
950 uInstr1(cb, CCALL, 0, TempReg, t_addr);
951 switch (u_in->size) {
njn5c004e42002-11-18 11:04:50 +0000952 case 4: uCCall(cb, (Addr) & ac_helperc_ACCESS4, 1, 1, False );
njn25e49d8e72002-09-23 09:36:25 +0000953 break;
njn5c004e42002-11-18 11:04:50 +0000954 case 2: uCCall(cb, (Addr) & ac_helperc_ACCESS2, 1, 1, False );
njn25e49d8e72002-09-23 09:36:25 +0000955 break;
njn5c004e42002-11-18 11:04:50 +0000956 case 1: uCCall(cb, (Addr) & ac_helperc_ACCESS1, 1, 1, False );
njn25e49d8e72002-09-23 09:36:25 +0000957 break;
958 default:
njne427a662002-10-02 11:08:25 +0000959 VG_(skin_panic)("addrcheck::SK_(instrument):LOAD/STORE");
njn25e49d8e72002-09-23 09:36:25 +0000960 }
njn4ba5a792002-09-30 10:23:54 +0000961 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +0000962 break;
963
sewardj3d7c9c82003-03-26 21:08:13 +0000964 case MMX2_MemRd:
965 case MMX2_MemWr:
sewardjd7971012003-04-04 00:21:58 +0000966 sk_assert(u_in->size == 4 || u_in->size == 8);
sewardj1863abc2003-06-14 16:01:32 +0000967 goto do_Access_ARG2;
968 case FPU_R:
969 case FPU_W:
970 goto do_Access_ARG2;
971 do_Access_ARG2:
972 sk_assert(u_in->tag2 == TempReg);
sewardj3d7c9c82003-03-26 21:08:13 +0000973 t_addr = u_in->val2;
974 t_size = newTemp(cb);
975 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
sewardjd7971012003-04-04 00:21:58 +0000976 uLiteral(cb, u_in->size);
sewardj3d7c9c82003-03-26 21:08:13 +0000977 uInstr2(cb, CCALL, 0, TempReg, t_addr, TempReg, t_size);
978 uCCall(cb, (Addr) & ac_fpu_ACCESS_check, 2, 2, False );
979 VG_(copy_UInstr)(cb, u_in);
980 break;
981
sewardj1863abc2003-06-14 16:01:32 +0000982 case SSE3a_MemRd: // this one causes trouble
983 case SSE2a_MemRd:
984 case SSE2a_MemWr:
985 case SSE3a_MemWr:
sewardj6bc40552003-06-15 01:40:58 +0000986 sk_assert(u_in->size == 4 || u_in->size == 8
987 || u_in->size == 16);
sewardj1863abc2003-06-14 16:01:32 +0000988 goto do_Access_ARG3;
989 do_Access_ARG3:
990 sk_assert(u_in->tag3 == TempReg);
991 t_addr = u_in->val3;
992 t_size = newTemp(cb);
993 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
994 uLiteral(cb, u_in->size);
995 uInstr2(cb, CCALL, 0, TempReg, t_addr, TempReg, t_size);
996 uCCall(cb, (Addr) & ac_fpu_ACCESS_check, 2, 2, False );
997 VG_(copy_UInstr)(cb, u_in);
998 break;
999
sewardj6bc40552003-06-15 01:40:58 +00001000 // case SSE2a1_MemRd:
1001 // case SSE2a1_MemWr:
sewardj1863abc2003-06-14 16:01:32 +00001002 case SSE3g1_RegWr:
1003 case SSE3g1_RegRd:
sewardj6bc40552003-06-15 01:40:58 +00001004 // case SSE3ag_MemRd_RegWr:
1005 // case SSE3a1_MemRd:
1006 // case SSE3a1_MemWr:
sewardj1863abc2003-06-14 16:01:32 +00001007 VG_(pp_UInstr)(0,u_in);
1008 VG_(skin_panic)("AddrCheck: unhandled SSE uinstr");
1009 break;
1010
sewardj6bc40552003-06-15 01:40:58 +00001011 case SSE5:
sewardj1863abc2003-06-14 16:01:32 +00001012 case SSE3g_RegWr:
1013 case SSE3g_RegRd:
1014 case SSE4:
njn25e49d8e72002-09-23 09:36:25 +00001015 default:
njn4ba5a792002-09-30 10:23:54 +00001016 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00001017 break;
1018 }
1019 }
1020
njn4ba5a792002-09-30 10:23:54 +00001021 VG_(free_UCodeBlock)(cb_in);
njn25e49d8e72002-09-23 09:36:25 +00001022 return cb;
1023}
1024
1025
njn25e49d8e72002-09-23 09:36:25 +00001026/*------------------------------------------------------------*/
1027/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1028/*------------------------------------------------------------*/
1029
sewardja4495682002-10-21 07:29:59 +00001030/* For the memory leak detector, say whether an entire 64k chunk of
1031 address space is possibly in use, or not. If in doubt return
1032 True.
njn25e49d8e72002-09-23 09:36:25 +00001033*/
sewardja4495682002-10-21 07:29:59 +00001034static
1035Bool ac_is_valid_64k_chunk ( UInt chunk_number )
njn25e49d8e72002-09-23 09:36:25 +00001036{
sewardja4495682002-10-21 07:29:59 +00001037 sk_assert(chunk_number >= 0 && chunk_number < 65536);
1038 if (IS_DISTINGUISHED_SM(primary_map[chunk_number])) {
1039 /* Definitely not in use. */
1040 return False;
1041 } else {
1042 return True;
njn25e49d8e72002-09-23 09:36:25 +00001043 }
1044}
1045
1046
sewardja4495682002-10-21 07:29:59 +00001047/* For the memory leak detector, say whether or not a given word
1048 address is to be regarded as valid. */
1049static
1050Bool ac_is_valid_address ( Addr a )
1051{
1052 UChar abits;
1053 sk_assert(IS_ALIGNED4_ADDR(a));
1054 abits = get_abits4_ALIGNED(a);
1055 if (abits == VGM_NIBBLE_VALID) {
1056 return True;
1057 } else {
1058 return False;
1059 }
1060}
1061
1062
1063/* Leak detector for this skin. We don't actually do anything, merely
1064 run the generic leak detector with suitable parameters for this
1065 skin. */
njn5c004e42002-11-18 11:04:50 +00001066static void ac_detect_memory_leaks ( void )
njn25e49d8e72002-09-23 09:36:25 +00001067{
njn43c799e2003-04-08 00:08:52 +00001068 MAC_(do_detect_memory_leaks) ( ac_is_valid_64k_chunk, ac_is_valid_address );
njn25e49d8e72002-09-23 09:36:25 +00001069}
1070
1071
1072/* ---------------------------------------------------------------------
1073 Sanity check machinery (permanently engaged).
1074 ------------------------------------------------------------------ */
1075
1076/* Check that nobody has spuriously claimed that the first or last 16
1077 pages (64 KB) of address space have become accessible. Failure of
1078 the following do not per se indicate an internal consistency
1079 problem, but they are so likely to that we really want to know
1080 about it if so. */
1081
1082Bool SK_(cheap_sanity_check) ( void )
1083{
sewardjd5815ec2003-04-06 12:23:27 +00001084 if (IS_DISTINGUISHED_SM(primary_map[0])
1085 /* kludge: kernel drops a page up at top of address range for
1086 magic "optimized syscalls", so we can no longer check the
1087 highest page */
1088 /* && IS_DISTINGUISHED_SM(primary_map[65535]) */
1089 )
njn25e49d8e72002-09-23 09:36:25 +00001090 return True;
1091 else
1092 return False;
1093}
1094
1095Bool SK_(expensive_sanity_check) ( void )
1096{
1097 Int i;
1098
1099 /* Make sure nobody changed the distinguished secondary. */
1100 for (i = 0; i < 8192; i++)
1101 if (distinguished_secondary_map.abits[i] != VGM_BYTE_INVALID)
1102 return False;
1103
1104 /* Make sure that the upper 3/4 of the primary map hasn't
1105 been messed with. */
1106 for (i = 65536; i < 262144; i++)
1107 if (primary_map[i] != & distinguished_secondary_map)
1108 return False;
1109
1110 return True;
1111}
1112
njn47363ab2003-04-21 13:24:40 +00001113/*------------------------------------------------------------*/
1114/*--- Client requests ---*/
1115/*------------------------------------------------------------*/
1116
sewardjd8033d92002-12-08 22:16:58 +00001117Bool SK_(handle_client_request) ( ThreadState* tst, UInt* arg_block, UInt *ret )
1118{
sewardjbf310d92002-12-28 13:09:57 +00001119#define IGNORE(what) \
1120 do { \
1121 if (moans-- > 0) { \
1122 VG_(message)(Vg_UserMsg, \
1123 "Warning: Addrcheck: ignoring `%s' request.", what); \
1124 VG_(message)(Vg_UserMsg, \
1125 " To honour this request, rerun with --skin=memcheck."); \
1126 } \
1127 } while (0)
1128
sewardjd8033d92002-12-08 22:16:58 +00001129 UInt* arg = arg_block;
sewardjbf310d92002-12-28 13:09:57 +00001130 static Int moans = 3;
sewardjd8033d92002-12-08 22:16:58 +00001131
1132 /* Overload memcheck client reqs */
1133 if (!VG_IS_SKIN_USERREQ('M','C',arg[0]))
1134 return False;
1135
1136 switch (arg[0]) {
1137 case VG_USERREQ__DO_LEAK_CHECK:
1138 ac_detect_memory_leaks();
1139 *ret = 0; /* return value is meaningless */
1140 break;
1141
sewardjbf310d92002-12-28 13:09:57 +00001142 /* Ignore these */
sewardjd8033d92002-12-08 22:16:58 +00001143 case VG_USERREQ__CHECK_WRITABLE: /* check writable */
sewardjbf310d92002-12-28 13:09:57 +00001144 IGNORE("VALGRIND_CHECK_WRITABLE");
1145 return False;
sewardjd8033d92002-12-08 22:16:58 +00001146 case VG_USERREQ__CHECK_READABLE: /* check readable */
sewardjbf310d92002-12-28 13:09:57 +00001147 IGNORE("VALGRIND_CHECK_READABLE");
1148 return False;
sewardjd8033d92002-12-08 22:16:58 +00001149 case VG_USERREQ__MAKE_NOACCESS: /* make no access */
sewardjbf310d92002-12-28 13:09:57 +00001150 IGNORE("VALGRIND_MAKE_NOACCESS");
1151 return False;
sewardjd8033d92002-12-08 22:16:58 +00001152 case VG_USERREQ__MAKE_WRITABLE: /* make writable */
sewardjbf310d92002-12-28 13:09:57 +00001153 IGNORE("VALGRIND_MAKE_WRITABLE");
1154 return False;
sewardjd8033d92002-12-08 22:16:58 +00001155 case VG_USERREQ__MAKE_READABLE: /* make readable */
sewardjbf310d92002-12-28 13:09:57 +00001156 IGNORE("VALGRIND_MAKE_READABLE");
1157 return False;
sewardjd8033d92002-12-08 22:16:58 +00001158 case VG_USERREQ__DISCARD: /* discard */
sewardjbf310d92002-12-28 13:09:57 +00001159 IGNORE("VALGRIND_CHECK_DISCARD");
1160 return False;
sewardjd8033d92002-12-08 22:16:58 +00001161
1162 default:
njn47363ab2003-04-21 13:24:40 +00001163 if (MAC_(handle_common_client_requests)(tst, arg_block, ret )) {
1164 return True;
1165 } else {
1166 VG_(message)(Vg_UserMsg,
1167 "Warning: unknown addrcheck client request code %d",
1168 arg[0]);
1169 return False;
1170 }
sewardjd8033d92002-12-08 22:16:58 +00001171 }
1172 return True;
sewardjbf310d92002-12-28 13:09:57 +00001173
1174#undef IGNORE
sewardjd8033d92002-12-08 22:16:58 +00001175}
1176
njn25e49d8e72002-09-23 09:36:25 +00001177/*------------------------------------------------------------*/
1178/*--- Setup ---*/
1179/*------------------------------------------------------------*/
1180
njn25e49d8e72002-09-23 09:36:25 +00001181Bool SK_(process_cmd_line_option)(Char* arg)
1182{
njn43c799e2003-04-08 00:08:52 +00001183 return MAC_(process_common_cmd_line_option)(arg);
njn25e49d8e72002-09-23 09:36:25 +00001184}
1185
njn3e884182003-04-15 13:03:23 +00001186void SK_(print_usage)(void)
njn25e49d8e72002-09-23 09:36:25 +00001187{
njn3e884182003-04-15 13:03:23 +00001188 MAC_(print_common_usage)();
1189}
1190
1191void SK_(print_debug_usage)(void)
1192{
1193 MAC_(print_common_debug_usage)();
njn25e49d8e72002-09-23 09:36:25 +00001194}
1195
1196
1197/*------------------------------------------------------------*/
1198/*--- Setup ---*/
1199/*------------------------------------------------------------*/
1200
njn810086f2002-11-14 12:42:47 +00001201void SK_(pre_clo_init)(void)
njn25e49d8e72002-09-23 09:36:25 +00001202{
njn810086f2002-11-14 12:42:47 +00001203 VG_(details_name) ("Addrcheck");
1204 VG_(details_version) (NULL);
1205 VG_(details_description) ("a fine-grained address checker");
1206 VG_(details_copyright_author)(
njn0e1b5142003-04-15 14:58:06 +00001207 "Copyright (C) 2002-2003, and GNU GPL'd, by Julian Seward.");
njn810086f2002-11-14 12:42:47 +00001208 VG_(details_bug_reports_to) ("jseward@acm.org");
sewardj78210aa2002-12-01 02:55:46 +00001209 VG_(details_avg_translation_sizeB) ( 135 );
njn25e49d8e72002-09-23 09:36:25 +00001210
njn810086f2002-11-14 12:42:47 +00001211 VG_(needs_core_errors) ();
1212 VG_(needs_skin_errors) ();
1213 VG_(needs_libc_freeres) ();
njn810086f2002-11-14 12:42:47 +00001214 VG_(needs_command_line_options)();
1215 VG_(needs_client_requests) ();
1216 VG_(needs_syscall_wrapper) ();
njn810086f2002-11-14 12:42:47 +00001217 VG_(needs_sanity_checks) ();
njn25e49d8e72002-09-23 09:36:25 +00001218
njn3e884182003-04-15 13:03:23 +00001219 MAC_( new_mem_heap) = & ac_new_mem_heap;
1220 MAC_( ban_mem_heap) = & ac_make_noaccess;
1221 MAC_(copy_mem_heap) = & ac_copy_address_range_state;
1222 MAC_( die_mem_heap) = & ac_make_noaccess;
1223
njn5c004e42002-11-18 11:04:50 +00001224 VG_(track_new_mem_startup) ( & ac_new_mem_startup );
njn5c004e42002-11-18 11:04:50 +00001225 VG_(track_new_mem_stack_signal) ( & ac_make_accessible );
1226 VG_(track_new_mem_brk) ( & ac_make_accessible );
1227 VG_(track_new_mem_mmap) ( & ac_set_perms );
njn25e49d8e72002-09-23 09:36:25 +00001228
njn3e884182003-04-15 13:03:23 +00001229 VG_(track_copy_mem_remap) ( & ac_copy_address_range_state );
1230 VG_(track_change_mem_mprotect) ( & ac_set_perms );
1231
1232 VG_(track_die_mem_stack_signal) ( & ac_make_noaccess );
1233 VG_(track_die_mem_brk) ( & ac_make_noaccess );
1234 VG_(track_die_mem_munmap) ( & ac_make_noaccess );
1235
njn43c799e2003-04-08 00:08:52 +00001236 VG_(track_new_mem_stack_4) ( & MAC_(new_mem_stack_4) );
1237 VG_(track_new_mem_stack_8) ( & MAC_(new_mem_stack_8) );
1238 VG_(track_new_mem_stack_12) ( & MAC_(new_mem_stack_12) );
1239 VG_(track_new_mem_stack_16) ( & MAC_(new_mem_stack_16) );
1240 VG_(track_new_mem_stack_32) ( & MAC_(new_mem_stack_32) );
1241 VG_(track_new_mem_stack) ( & MAC_(new_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00001242
njn43c799e2003-04-08 00:08:52 +00001243 VG_(track_die_mem_stack_4) ( & MAC_(die_mem_stack_4) );
1244 VG_(track_die_mem_stack_8) ( & MAC_(die_mem_stack_8) );
1245 VG_(track_die_mem_stack_12) ( & MAC_(die_mem_stack_12) );
1246 VG_(track_die_mem_stack_16) ( & MAC_(die_mem_stack_16) );
1247 VG_(track_die_mem_stack_32) ( & MAC_(die_mem_stack_32) );
1248 VG_(track_die_mem_stack) ( & MAC_(die_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00001249
njn3e884182003-04-15 13:03:23 +00001250 VG_(track_ban_mem_stack) ( & ac_make_noaccess );
njn25e49d8e72002-09-23 09:36:25 +00001251
njn5c004e42002-11-18 11:04:50 +00001252 VG_(track_pre_mem_read) ( & ac_check_is_readable );
1253 VG_(track_pre_mem_read_asciiz) ( & ac_check_is_readable_asciiz );
1254 VG_(track_pre_mem_write) ( & ac_check_is_writable );
1255 VG_(track_post_mem_write) ( & ac_make_accessible );
njn25e49d8e72002-09-23 09:36:25 +00001256
njn5c004e42002-11-18 11:04:50 +00001257 VG_(register_compact_helper)((Addr) & ac_helperc_ACCESS4);
1258 VG_(register_compact_helper)((Addr) & ac_helperc_ACCESS2);
1259 VG_(register_compact_helper)((Addr) & ac_helperc_ACCESS1);
1260 VG_(register_compact_helper)((Addr) & ac_fpu_ACCESS_check);
njn25e49d8e72002-09-23 09:36:25 +00001261
1262 VGP_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
1263 VGP_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
njn9b007f62003-04-07 14:40:25 +00001264 VGP_(register_profile_event) ( VgpESPAdj, "adjust-ESP" );
njnd04b7c62002-10-03 14:05:52 +00001265
1266 init_shadow_memory();
njn3e884182003-04-15 13:03:23 +00001267 MAC_(common_pre_clo_init)();
njn5c004e42002-11-18 11:04:50 +00001268}
1269
1270void SK_(post_clo_init) ( void )
1271{
1272}
1273
njn7d9f94d2003-04-22 21:41:40 +00001274void SK_(fini) ( Int exitcode )
njn5c004e42002-11-18 11:04:50 +00001275{
njn3e884182003-04-15 13:03:23 +00001276 MAC_(common_fini)( ac_detect_memory_leaks );
njn25e49d8e72002-09-23 09:36:25 +00001277}
1278
1279/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00001280/*--- end ac_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00001281/*--------------------------------------------------------------------*/