blob: 2efaa3a25a15c64b871e2fadc456ed37dc1bf7d7 [file] [log] [blame]
njn25e49d8e72002-09-23 09:36:25 +00001
2/*--------------------------------------------------------------------*/
3/*--- The AddrCheck skin: like MemCheck, but only does address ---*/
4/*--- checking. No definedness checking. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- ac_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
njnc9539842002-10-02 13:26:35 +00009 This file is part of AddrCheck, a lightweight Valgrind skin for
10 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
njn0e1b5142003-04-15 14:58:06 +000012 Copyright (C) 2000-2003 Julian Seward
njn25e49d8e72002-09-23 09:36:25 +000013 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
njn43c799e2003-04-08 00:08:52 +000033#include "mac_shared.h"
sewardjd8033d92002-12-08 22:16:58 +000034#include "memcheck.h"
njn25e49d8e72002-09-23 09:36:25 +000035//#include "vg_profile.c"
36
njn9b007f62003-04-07 14:40:25 +000037
njn27f1a382002-11-08 15:48:16 +000038VG_DETERMINE_INTERFACE_VERSION
39
njn25e49d8e72002-09-23 09:36:25 +000040/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000041/*--- Comparing and printing errors ---*/
42/*------------------------------------------------------------*/
43
njn43c799e2003-04-08 00:08:52 +000044void SK_(pp_SkinError) ( Error* err )
njn25e49d8e72002-09-23 09:36:25 +000045{
njn43c799e2003-04-08 00:08:52 +000046 MAC_Error* err_extra = VG_(get_error_extra)(err);
njn25e49d8e72002-09-23 09:36:25 +000047
njn810086f2002-11-14 12:42:47 +000048 switch (VG_(get_error_kind)(err)) {
njn25e49d8e72002-09-23 09:36:25 +000049 case CoreMemErr:
njn43c799e2003-04-08 00:08:52 +000050 VG_(message)(Vg_UserMsg, "%s contains unaddressable byte(s)",
51 VG_(get_error_string)(err));
52 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
njn25e49d8e72002-09-23 09:36:25 +000053 break;
54
55 case AddrErr:
56 switch (err_extra->axskind) {
57 case ReadAxs:
58 case WriteAxs:
59 /* These two aren't actually differentiated ever. */
60 VG_(message)(Vg_UserMsg, "Invalid memory access of size %d",
61 err_extra->size );
62 break;
63 case ExecAxs:
64 VG_(message)(Vg_UserMsg, "Jump to the invalid address "
65 "stated on the next line");
66 break;
67 default:
njn5c004e42002-11-18 11:04:50 +000068 VG_(skin_panic)("SK_(pp_SkinError)(axskind)");
njn25e49d8e72002-09-23 09:36:25 +000069 }
njn43c799e2003-04-08 00:08:52 +000070 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
71 MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
njn25e49d8e72002-09-23 09:36:25 +000072 break;
73
74 case ParamErr:
njn43c799e2003-04-08 00:08:52 +000075 VG_(message)(Vg_UserMsg,
76 "Syscall param %s contains unaddressable byte(s)",
77 VG_(get_error_string)(err) );
78 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
79 MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
njn25e49d8e72002-09-23 09:36:25 +000080 break;
81
82 case UserErr:
njn43c799e2003-04-08 00:08:52 +000083 VG_(message)(Vg_UserMsg,
84 "Unaddressable byte(s) found during client check request");
85 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
86 MAC_(pp_AddrInfo)(VG_(get_error_address)(err), &err_extra->addrinfo);
njn25e49d8e72002-09-23 09:36:25 +000087 break;
88
89 default:
njn43c799e2003-04-08 00:08:52 +000090 MAC_(pp_shared_SkinError)(err);
91 break;
njn25e49d8e72002-09-23 09:36:25 +000092 }
93}
94
95/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000096/*--- Suppressions ---*/
97/*------------------------------------------------------------*/
98
njn810086f2002-11-14 12:42:47 +000099Bool SK_(recognised_suppression) ( Char* name, Supp* su )
njn25e49d8e72002-09-23 09:36:25 +0000100{
njn43c799e2003-04-08 00:08:52 +0000101 return MAC_(shared_recognised_suppression)(name, su);
njn25e49d8e72002-09-23 09:36:25 +0000102}
103
njn5c004e42002-11-18 11:04:50 +0000104#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
105
njn25e49d8e72002-09-23 09:36:25 +0000106/*------------------------------------------------------------*/
107/*--- Low-level support for memory checking. ---*/
108/*------------------------------------------------------------*/
109
110/* All reads and writes are checked against a memory map, which
111 records the state of all memory in the process. The memory map is
112 organised like this:
113
114 The top 16 bits of an address are used to index into a top-level
115 map table, containing 65536 entries. Each entry is a pointer to a
116 second-level map, which records the accesibililty and validity
117 permissions for the 65536 bytes indexed by the lower 16 bits of the
118 address. Each byte is represented by one bit, indicating
119 accessibility. So each second-level map contains 8192 bytes. This
120 two-level arrangement conveniently divides the 4G address space
121 into 64k lumps, each size 64k bytes.
122
123 All entries in the primary (top-level) map must point to a valid
124 secondary (second-level) map. Since most of the 4G of address
125 space will not be in use -- ie, not mapped at all -- there is a
126 distinguished secondary map, which indicates `not addressible and
127 not valid' writeable for all bytes. Entries in the primary map for
128 which the entire 64k is not in use at all point at this
129 distinguished map.
130
131 [...] lots of stuff deleted due to out of date-ness
132
133 As a final optimisation, the alignment and address checks for
134 4-byte loads and stores are combined in a neat way. The primary
135 map is extended to have 262144 entries (2^18), rather than 2^16.
136 The top 3/4 of these entries are permanently set to the
137 distinguished secondary map. For a 4-byte load/store, the
138 top-level map is indexed not with (addr >> 16) but instead f(addr),
139 where
140
141 f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
142 = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
143 = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
144
145 ie the lowest two bits are placed above the 16 high address bits.
146 If either of these two bits are nonzero, the address is misaligned;
147 this will select a secondary map from the upper 3/4 of the primary
148 map. Because this is always the distinguished secondary map, a
149 (bogus) address check failure will result. The failure handling
150 code can then figure out whether this is a genuine addr check
151 failure or whether it is a possibly-legitimate access at a
152 misaligned address. */
153
154
155/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000156/*--- Function declarations. ---*/
157/*------------------------------------------------------------*/
158
njn5c004e42002-11-18 11:04:50 +0000159static void ac_ACCESS4_SLOWLY ( Addr a );
160static void ac_ACCESS2_SLOWLY ( Addr a );
161static void ac_ACCESS1_SLOWLY ( Addr a );
162static void ac_fpu_ACCESS_check_SLOWLY ( Addr addr, Int size );
njn25e49d8e72002-09-23 09:36:25 +0000163
164/*------------------------------------------------------------*/
165/*--- Data defns. ---*/
166/*------------------------------------------------------------*/
167
168typedef
169 struct {
170 UChar abits[8192];
171 }
172 AcSecMap;
173
174static AcSecMap* primary_map[ /*65536*/ 262144 ];
175static AcSecMap distinguished_secondary_map;
176
njn25e49d8e72002-09-23 09:36:25 +0000177static void init_shadow_memory ( void )
178{
179 Int i;
180
181 for (i = 0; i < 8192; i++) /* Invalid address */
182 distinguished_secondary_map.abits[i] = VGM_BYTE_INVALID;
183
184 /* These entries gradually get overwritten as the used address
185 space expands. */
186 for (i = 0; i < 65536; i++)
187 primary_map[i] = &distinguished_secondary_map;
188
189 /* These ones should never change; it's a bug in Valgrind if they do. */
190 for (i = 65536; i < 262144; i++)
191 primary_map[i] = &distinguished_secondary_map;
192}
193
njn25e49d8e72002-09-23 09:36:25 +0000194/*------------------------------------------------------------*/
195/*--- Basic bitmap management, reading and writing. ---*/
196/*------------------------------------------------------------*/
197
198/* Allocate and initialise a secondary map. */
199
200static AcSecMap* alloc_secondary_map ( __attribute__ ((unused))
201 Char* caller )
202{
203 AcSecMap* map;
204 UInt i;
205 PROF_EVENT(10);
206
207 /* Mark all bytes as invalid access and invalid value. */
208
209 /* It just happens that a AcSecMap occupies exactly 18 pages --
210 although this isn't important, so the following assert is
211 spurious. */
njne427a662002-10-02 11:08:25 +0000212 sk_assert(0 == (sizeof(AcSecMap) % VKI_BYTES_PER_PAGE));
njn25e49d8e72002-09-23 09:36:25 +0000213 map = VG_(get_memory_from_mmap)( sizeof(AcSecMap), caller );
214
215 for (i = 0; i < 8192; i++)
216 map->abits[i] = VGM_BYTE_INVALID; /* Invalid address */
217
218 /* VG_(printf)("ALLOC_2MAP(%s)\n", caller ); */
219 return map;
220}
221
222
223/* Basic reading/writing of the bitmaps, for byte-sized accesses. */
224
225static __inline__ UChar get_abit ( Addr a )
226{
227 AcSecMap* sm = primary_map[a >> 16];
228 UInt sm_off = a & 0xFFFF;
229 PROF_EVENT(20);
230# if 0
231 if (IS_DISTINGUISHED_SM(sm))
232 VG_(message)(Vg_DebugMsg,
233 "accessed distinguished 2ndary (A)map! 0x%x\n", a);
234# endif
235 return BITARR_TEST(sm->abits, sm_off)
236 ? VGM_BIT_INVALID : VGM_BIT_VALID;
237}
238
239static __inline__ void set_abit ( Addr a, UChar abit )
240{
241 AcSecMap* sm;
242 UInt sm_off;
243 PROF_EVENT(22);
244 ENSURE_MAPPABLE(a, "set_abit");
245 sm = primary_map[a >> 16];
246 sm_off = a & 0xFFFF;
247 if (abit)
248 BITARR_SET(sm->abits, sm_off);
249 else
250 BITARR_CLEAR(sm->abits, sm_off);
251}
252
253
254/* Reading/writing of the bitmaps, for aligned word-sized accesses. */
255
256static __inline__ UChar get_abits4_ALIGNED ( Addr a )
257{
258 AcSecMap* sm;
259 UInt sm_off;
260 UChar abits8;
261 PROF_EVENT(24);
262# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000263 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000264# endif
265 sm = primary_map[a >> 16];
266 sm_off = a & 0xFFFF;
267 abits8 = sm->abits[sm_off >> 3];
268 abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
269 abits8 &= 0x0F;
270 return abits8;
271}
272
273
274
275/*------------------------------------------------------------*/
276/*--- Setting permissions over address ranges. ---*/
277/*------------------------------------------------------------*/
278
sewardj5de6ee02002-12-14 23:11:35 +0000279static __inline__
280void set_address_range_perms ( Addr a, UInt len,
281 UInt example_a_bit )
njn25e49d8e72002-09-23 09:36:25 +0000282{
283 UChar abyte8;
284 UInt sm_off;
285 AcSecMap* sm;
286
287 PROF_EVENT(30);
288
289 if (len == 0)
290 return;
291
292 if (len > 100 * 1000 * 1000) {
293 VG_(message)(Vg_UserMsg,
294 "Warning: set address range perms: "
295 "large range %u, a %d",
296 len, example_a_bit );
297 }
298
299 VGP_PUSHCC(VgpSetMem);
300
301 /* Requests to change permissions of huge address ranges may
302 indicate bugs in our machinery. 30,000,000 is arbitrary, but so
303 far all legitimate requests have fallen beneath that size. */
304 /* 4 Mar 02: this is just stupid; get rid of it. */
njne427a662002-10-02 11:08:25 +0000305 /* sk_assert(len < 30000000); */
njn25e49d8e72002-09-23 09:36:25 +0000306
307 /* Check the permissions make sense. */
njne427a662002-10-02 11:08:25 +0000308 sk_assert(example_a_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000309 || example_a_bit == VGM_BIT_INVALID);
310
311 /* In order that we can charge through the address space at 8
312 bytes/main-loop iteration, make up some perms. */
313 abyte8 = (example_a_bit << 7)
314 | (example_a_bit << 6)
315 | (example_a_bit << 5)
316 | (example_a_bit << 4)
317 | (example_a_bit << 3)
318 | (example_a_bit << 2)
319 | (example_a_bit << 1)
320 | (example_a_bit << 0);
321
322# ifdef VG_DEBUG_MEMORY
323 /* Do it ... */
324 while (True) {
325 PROF_EVENT(31);
326 if (len == 0) break;
327 set_abit ( a, example_a_bit );
328 set_vbyte ( a, vbyte );
329 a++;
330 len--;
331 }
332
333# else
334 /* Slowly do parts preceding 8-byte alignment. */
335 while (True) {
336 PROF_EVENT(31);
337 if (len == 0) break;
338 if ((a % 8) == 0) break;
339 set_abit ( a, example_a_bit );
340 a++;
341 len--;
342 }
343
344 if (len == 0) {
345 VGP_POPCC(VgpSetMem);
346 return;
347 }
njne427a662002-10-02 11:08:25 +0000348 sk_assert((a % 8) == 0 && len > 0);
njn25e49d8e72002-09-23 09:36:25 +0000349
350 /* Once aligned, go fast. */
351 while (True) {
352 PROF_EVENT(32);
353 if (len < 8) break;
354 ENSURE_MAPPABLE(a, "set_address_range_perms(fast)");
355 sm = primary_map[a >> 16];
356 sm_off = a & 0xFFFF;
357 sm->abits[sm_off >> 3] = abyte8;
358 a += 8;
359 len -= 8;
360 }
361
362 if (len == 0) {
363 VGP_POPCC(VgpSetMem);
364 return;
365 }
njne427a662002-10-02 11:08:25 +0000366 sk_assert((a % 8) == 0 && len > 0 && len < 8);
njn25e49d8e72002-09-23 09:36:25 +0000367
368 /* Finish the upper fragment. */
369 while (True) {
370 PROF_EVENT(33);
371 if (len == 0) break;
372 set_abit ( a, example_a_bit );
373 a++;
374 len--;
375 }
376# endif
377
378 /* Check that zero page and highest page have not been written to
379 -- this could happen with buggy syscall wrappers. Today
380 (2001-04-26) had precisely such a problem with __NR_setitimer. */
njne427a662002-10-02 11:08:25 +0000381 sk_assert(SK_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +0000382 VGP_POPCC(VgpSetMem);
383}
384
385/* Set permissions for address ranges ... */
386
njn5c004e42002-11-18 11:04:50 +0000387static void ac_make_noaccess ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000388{
389 PROF_EVENT(35);
njn5c004e42002-11-18 11:04:50 +0000390 DEBUG("ac_make_noaccess(%p, %x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000391 set_address_range_perms ( a, len, VGM_BIT_INVALID );
392}
393
njn5c004e42002-11-18 11:04:50 +0000394static void ac_make_accessible ( Addr a, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000395{
njn5c004e42002-11-18 11:04:50 +0000396 PROF_EVENT(38);
397 DEBUG("ac_make_accessible(%p, %x)\n", a, len);
njn25e49d8e72002-09-23 09:36:25 +0000398 set_address_range_perms ( a, len, VGM_BIT_VALID );
399}
400
njn9b007f62003-04-07 14:40:25 +0000401static __inline__
402void make_aligned_word_noaccess(Addr a)
403{
404 AcSecMap* sm;
405 UInt sm_off;
406 UChar mask;
407
408 VGP_PUSHCC(VgpESPAdj);
409 ENSURE_MAPPABLE(a, "make_aligned_word_noaccess");
410 sm = primary_map[a >> 16];
411 sm_off = a & 0xFFFF;
412 mask = 0x0F;
413 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
414 /* mask now contains 1s where we wish to make address bits invalid (1s). */
415 sm->abits[sm_off >> 3] |= mask;
416 VGP_POPCC(VgpESPAdj);
417}
418
419static __inline__
420void make_aligned_word_accessible(Addr a)
421{
422 AcSecMap* sm;
423 UInt sm_off;
424 UChar mask;
425
426 VGP_PUSHCC(VgpESPAdj);
427 ENSURE_MAPPABLE(a, "make_aligned_word_accessible");
428 sm = primary_map[a >> 16];
429 sm_off = a & 0xFFFF;
430 mask = 0x0F;
431 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
432 /* mask now contains 1s where we wish to make address bits
433 invalid (0s). */
434 sm->abits[sm_off >> 3] &= ~mask;
435 VGP_POPCC(VgpESPAdj);
436}
437
438/* Nb: by "aligned" here we mean 8-byte aligned */
439static __inline__
440void make_aligned_doubleword_accessible(Addr a)
441{
442 AcSecMap* sm;
443 UInt sm_off;
444
445 VGP_PUSHCC(VgpESPAdj);
446 ENSURE_MAPPABLE(a, "make_aligned_doubleword_accessible");
447 sm = primary_map[a >> 16];
448 sm_off = a & 0xFFFF;
449 sm->abits[sm_off >> 3] = VGM_BYTE_VALID;
450 VGP_POPCC(VgpESPAdj);
451}
452
453static __inline__
454void make_aligned_doubleword_noaccess(Addr a)
455{
456 AcSecMap* sm;
457 UInt sm_off;
458
459 VGP_PUSHCC(VgpESPAdj);
460 ENSURE_MAPPABLE(a, "make_aligned_doubleword_noaccess");
461 sm = primary_map[a >> 16];
462 sm_off = a & 0xFFFF;
463 sm->abits[sm_off >> 3] = VGM_BYTE_INVALID;
464 VGP_POPCC(VgpESPAdj);
465}
466
467/* The %esp update handling functions */
468ESP_UPDATE_HANDLERS ( make_aligned_word_accessible,
469 make_aligned_word_noaccess,
470 make_aligned_doubleword_accessible,
471 make_aligned_doubleword_noaccess,
472 ac_make_accessible,
473 ac_make_noaccess
474 );
475
476
njn25e49d8e72002-09-23 09:36:25 +0000477/* Block-copy permissions (needed for implementing realloc()). */
478
njn5c004e42002-11-18 11:04:50 +0000479static void ac_copy_address_range_state ( Addr src, Addr dst, UInt len )
njn25e49d8e72002-09-23 09:36:25 +0000480{
481 UInt i;
482
njn5c004e42002-11-18 11:04:50 +0000483 DEBUG("ac_copy_address_range_state\n");
njn25e49d8e72002-09-23 09:36:25 +0000484
485 PROF_EVENT(40);
486 for (i = 0; i < len; i++) {
487 UChar abit = get_abit ( src+i );
488 PROF_EVENT(41);
489 set_abit ( dst+i, abit );
490 }
491}
492
493
494/* Check permissions for address range. If inadequate permissions
495 exist, *bad_addr is set to the offending address, so the caller can
496 know what it is. */
497
njn5c004e42002-11-18 11:04:50 +0000498static __inline__
499Bool ac_check_accessible ( Addr a, UInt len, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000500{
501 UInt i;
502 UChar abit;
njn5c004e42002-11-18 11:04:50 +0000503 PROF_EVENT(48);
njn25e49d8e72002-09-23 09:36:25 +0000504 for (i = 0; i < len; i++) {
njn5c004e42002-11-18 11:04:50 +0000505 PROF_EVENT(49);
njn25e49d8e72002-09-23 09:36:25 +0000506 abit = get_abit(a);
507 if (abit == VGM_BIT_INVALID) {
508 if (bad_addr != NULL) *bad_addr = a;
509 return False;
510 }
511 a++;
512 }
513 return True;
514}
515
sewardjecf8e102003-07-12 12:11:39 +0000516/* The opposite; check that an address range is inaccessible. */
517static
518Bool ac_check_noaccess ( Addr a, UInt len, Addr* bad_addr )
519{
520 UInt i;
521 UChar abit;
522 PROF_EVENT(48);
523 for (i = 0; i < len; i++) {
524 PROF_EVENT(49);
525 abit = get_abit(a);
526 if (abit == VGM_BIT_VALID) {
527 if (bad_addr != NULL) *bad_addr = a;
528 return False;
529 }
530 a++;
531 }
532 return True;
533}
534
njn25e49d8e72002-09-23 09:36:25 +0000535/* Check a zero-terminated ascii string. Tricky -- don't want to
536 examine the actual bytes, to find the end, until we're sure it is
537 safe to do so. */
538
njn5c004e42002-11-18 11:04:50 +0000539static __inline__
540Bool ac_check_readable_asciiz ( Addr a, Addr* bad_addr )
njn25e49d8e72002-09-23 09:36:25 +0000541{
542 UChar abit;
543 PROF_EVENT(46);
njn5c004e42002-11-18 11:04:50 +0000544 DEBUG("ac_check_readable_asciiz\n");
njn25e49d8e72002-09-23 09:36:25 +0000545 while (True) {
546 PROF_EVENT(47);
547 abit = get_abit(a);
548 if (abit != VGM_BIT_VALID) {
549 if (bad_addr != NULL) *bad_addr = a;
550 return False;
551 }
552 /* Ok, a is safe to read. */
553 if (* ((UChar*)a) == 0) return True;
554 a++;
555 }
556}
557
558
559/*------------------------------------------------------------*/
560/*--- Memory event handlers ---*/
561/*------------------------------------------------------------*/
562
njn5c004e42002-11-18 11:04:50 +0000563static __inline__
njn72718642003-07-24 08:45:32 +0000564void ac_check_is_accessible ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +0000565 Char* s, Addr base, UInt size, Bool isWrite )
njn25e49d8e72002-09-23 09:36:25 +0000566{
567 Bool ok;
568 Addr bad_addr;
569
570 VGP_PUSHCC(VgpCheckMem);
571
njn5c004e42002-11-18 11:04:50 +0000572 ok = ac_check_accessible ( base, size, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000573 if (!ok) {
574 switch (part) {
575 case Vg_CoreSysCall:
njn72718642003-07-24 08:45:32 +0000576 MAC_(record_param_error) ( tid, bad_addr, isWrite, s );
njn25e49d8e72002-09-23 09:36:25 +0000577 break;
578
njn25e49d8e72002-09-23 09:36:25 +0000579 case Vg_CoreSignal:
njn5c004e42002-11-18 11:04:50 +0000580 sk_assert(isWrite); /* Should only happen with isWrite case */
581 /* fall through */
njn25e49d8e72002-09-23 09:36:25 +0000582 case Vg_CorePThread:
njn72718642003-07-24 08:45:32 +0000583 MAC_(record_core_mem_error)( tid, isWrite, s );
njn25e49d8e72002-09-23 09:36:25 +0000584 break;
585
586 /* If we're being asked to jump to a silly address, record an error
587 message before potentially crashing the entire system. */
588 case Vg_CoreTranslate:
njn5c004e42002-11-18 11:04:50 +0000589 sk_assert(!isWrite); /* Should only happen with !isWrite case */
njn72718642003-07-24 08:45:32 +0000590 MAC_(record_jump_error)( tid, bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000591 break;
592
593 default:
njn5c004e42002-11-18 11:04:50 +0000594 VG_(skin_panic)("ac_check_is_accessible: unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +0000595 }
596 }
njn5c004e42002-11-18 11:04:50 +0000597
njn25e49d8e72002-09-23 09:36:25 +0000598 VGP_POPCC(VgpCheckMem);
599}
600
601static
njn72718642003-07-24 08:45:32 +0000602void ac_check_is_writable ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +0000603 Char* s, Addr base, UInt size )
604{
njn72718642003-07-24 08:45:32 +0000605 ac_check_is_accessible ( part, tid, s, base, size, /*isWrite*/True );
njn5c004e42002-11-18 11:04:50 +0000606}
607
608static
njn72718642003-07-24 08:45:32 +0000609void ac_check_is_readable ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +0000610 Char* s, Addr base, UInt size )
611{
njn72718642003-07-24 08:45:32 +0000612 ac_check_is_accessible ( part, tid, s, base, size, /*isWrite*/False );
njn5c004e42002-11-18 11:04:50 +0000613}
614
615static
njn72718642003-07-24 08:45:32 +0000616void ac_check_is_readable_asciiz ( CorePart part, ThreadId tid,
njn5c004e42002-11-18 11:04:50 +0000617 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +0000618{
619 Bool ok = True;
620 Addr bad_addr;
621 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
622
623 VGP_PUSHCC(VgpCheckMem);
624
njne427a662002-10-02 11:08:25 +0000625 sk_assert(part == Vg_CoreSysCall);
njn5c004e42002-11-18 11:04:50 +0000626 ok = ac_check_readable_asciiz ( (Addr)str, &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000627 if (!ok) {
njn72718642003-07-24 08:45:32 +0000628 MAC_(record_param_error) ( tid, bad_addr, /*is_writable =*/False, s );
njn25e49d8e72002-09-23 09:36:25 +0000629 }
630
631 VGP_POPCC(VgpCheckMem);
632}
633
634static
njn5c004e42002-11-18 11:04:50 +0000635void ac_new_mem_startup( Addr a, UInt len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +0000636{
njn1f3a9092002-10-04 09:22:30 +0000637 /* Ignore the permissions, just make it readable. Seems to work... */
njn25e49d8e72002-09-23 09:36:25 +0000638 DEBUG("new_mem_startup(%p, %u, rr=%u, ww=%u, xx=%u)\n", a,len,rr,ww,xx);
njn5c004e42002-11-18 11:04:50 +0000639 ac_make_accessible(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000640}
641
642static
njn5c004e42002-11-18 11:04:50 +0000643void ac_new_mem_heap ( Addr a, UInt len, Bool is_inited )
njn25e49d8e72002-09-23 09:36:25 +0000644{
njn5c004e42002-11-18 11:04:50 +0000645 ac_make_accessible(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000646}
647
648static
njn5c004e42002-11-18 11:04:50 +0000649void ac_set_perms (Addr a, UInt len,
sewardj40f8ebe2002-10-23 21:46:13 +0000650 Bool rr, Bool ww, Bool xx)
njn25e49d8e72002-09-23 09:36:25 +0000651{
njn5c004e42002-11-18 11:04:50 +0000652 DEBUG("ac_set_perms(%p, %u, rr=%u ww=%u, xx=%u)\n",
sewardj40f8ebe2002-10-23 21:46:13 +0000653 a, len, rr, ww, xx);
njn25e49d8e72002-09-23 09:36:25 +0000654 if (rr || ww || xx) {
njn5c004e42002-11-18 11:04:50 +0000655 ac_make_accessible(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000656 } else {
njn5c004e42002-11-18 11:04:50 +0000657 ac_make_noaccess(a, len);
njn25e49d8e72002-09-23 09:36:25 +0000658 }
659}
660
661
662/*------------------------------------------------------------*/
663/*--- Functions called directly from generated code. ---*/
664/*------------------------------------------------------------*/
665
666static __inline__ UInt rotateRight16 ( UInt x )
667{
668 /* Amazingly, gcc turns this into a single rotate insn. */
669 return (x >> 16) | (x << 16);
670}
671
njn25e49d8e72002-09-23 09:36:25 +0000672static __inline__ UInt shiftRight16 ( UInt x )
673{
674 return x >> 16;
675}
676
677
678/* Read/write 1/2/4 sized V bytes, and emit an address error if
679 needed. */
680
njn5c004e42002-11-18 11:04:50 +0000681/* ac_helperc_ACCESS{1,2,4} handle the common case fast.
njn25e49d8e72002-09-23 09:36:25 +0000682 Under all other circumstances, it defers to the relevant _SLOWLY
683 function, which can handle all situations.
684*/
685__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000686static void ac_helperc_ACCESS4 ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000687{
688# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000689 return ac_ACCESS4_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000690# else
691 UInt sec_no = rotateRight16(a) & 0x3FFFF;
692 AcSecMap* sm = primary_map[sec_no];
693 UInt a_off = (a & 0xFFFF) >> 3;
694 UChar abits = sm->abits[a_off];
695 abits >>= (a & 4);
696 abits &= 15;
njn5c004e42002-11-18 11:04:50 +0000697 PROF_EVENT(66);
njn25e49d8e72002-09-23 09:36:25 +0000698 if (abits == VGM_NIBBLE_VALID) {
699 /* Handle common case quickly: a is suitably aligned, is mapped,
700 and is addressible. So just return. */
701 return;
702 } else {
703 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000704 ac_ACCESS4_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000705 }
706# endif
707}
708
709__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000710static void ac_helperc_ACCESS2 ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000711{
712# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000713 return ac_ACCESS2_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000714# else
715 UInt sec_no = rotateRight16(a) & 0x1FFFF;
716 AcSecMap* sm = primary_map[sec_no];
717 UInt a_off = (a & 0xFFFF) >> 3;
njn5c004e42002-11-18 11:04:50 +0000718 PROF_EVENT(67);
njn25e49d8e72002-09-23 09:36:25 +0000719 if (sm->abits[a_off] == VGM_BYTE_VALID) {
720 /* Handle common case quickly. */
721 return;
722 } else {
723 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000724 ac_ACCESS2_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000725 }
726# endif
727}
728
729__attribute__ ((regparm(1)))
njn5c004e42002-11-18 11:04:50 +0000730static void ac_helperc_ACCESS1 ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000731{
732# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000733 return ac_ACCESS1_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000734# else
735 UInt sec_no = shiftRight16(a);
736 AcSecMap* sm = primary_map[sec_no];
737 UInt a_off = (a & 0xFFFF) >> 3;
njn5c004e42002-11-18 11:04:50 +0000738 PROF_EVENT(68);
njn25e49d8e72002-09-23 09:36:25 +0000739 if (sm->abits[a_off] == VGM_BYTE_VALID) {
740 /* Handle common case quickly. */
741 return;
742 } else {
743 /* Slow but general case. */
njn5c004e42002-11-18 11:04:50 +0000744 ac_ACCESS1_SLOWLY(a);
njn25e49d8e72002-09-23 09:36:25 +0000745 }
746# endif
747}
748
749
750/*------------------------------------------------------------*/
751/*--- Fallback functions to handle cases that the above ---*/
752/*--- VG_(helperc_ACCESS{1,2,4}) can't manage. ---*/
753/*------------------------------------------------------------*/
754
njn5c004e42002-11-18 11:04:50 +0000755static void ac_ACCESS4_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000756{
757 Bool a0ok, a1ok, a2ok, a3ok;
758
njn5c004e42002-11-18 11:04:50 +0000759 PROF_EVENT(76);
njn25e49d8e72002-09-23 09:36:25 +0000760
761 /* First establish independently the addressibility of the 4 bytes
762 involved. */
763 a0ok = get_abit(a+0) == VGM_BIT_VALID;
764 a1ok = get_abit(a+1) == VGM_BIT_VALID;
765 a2ok = get_abit(a+2) == VGM_BIT_VALID;
766 a3ok = get_abit(a+3) == VGM_BIT_VALID;
767
768 /* Now distinguish 3 cases */
769
770 /* Case 1: the address is completely valid, so:
771 - no addressing error
772 */
773 if (a0ok && a1ok && a2ok && a3ok) {
774 return;
775 }
776
777 /* Case 2: the address is completely invalid.
778 - emit addressing error
779 */
780 /* VG_(printf)("%p (%d %d %d %d)\n", a, a0ok, a1ok, a2ok, a3ok); */
njn43c799e2003-04-08 00:08:52 +0000781 if (!MAC_(clo_partial_loads_ok)
njn25e49d8e72002-09-23 09:36:25 +0000782 || ((a & 3) != 0)
783 || (!a0ok && !a1ok && !a2ok && !a3ok)) {
njn72718642003-07-24 08:45:32 +0000784 MAC_(record_address_error)( VG_(get_current_tid)(), a, 4, False );
njn25e49d8e72002-09-23 09:36:25 +0000785 return;
786 }
787
788 /* Case 3: the address is partially valid.
789 - no addressing error
njn43c799e2003-04-08 00:08:52 +0000790 Case 3 is only allowed if MAC_(clo_partial_loads_ok) is True
njn25e49d8e72002-09-23 09:36:25 +0000791 (which is the default), and the address is 4-aligned.
792 If not, Case 2 will have applied.
793 */
njn43c799e2003-04-08 00:08:52 +0000794 sk_assert(MAC_(clo_partial_loads_ok));
njn25e49d8e72002-09-23 09:36:25 +0000795 {
796 return;
797 }
798}
799
njn5c004e42002-11-18 11:04:50 +0000800static void ac_ACCESS2_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000801{
802 /* Check the address for validity. */
803 Bool aerr = False;
njn5c004e42002-11-18 11:04:50 +0000804 PROF_EVENT(77);
njn25e49d8e72002-09-23 09:36:25 +0000805
806 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
807 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
808
809 /* If an address error has happened, report it. */
810 if (aerr) {
njn72718642003-07-24 08:45:32 +0000811 MAC_(record_address_error)( VG_(get_current_tid)(), a, 2, False );
njn25e49d8e72002-09-23 09:36:25 +0000812 }
813}
814
njn5c004e42002-11-18 11:04:50 +0000815static void ac_ACCESS1_SLOWLY ( Addr a )
njn25e49d8e72002-09-23 09:36:25 +0000816{
817 /* Check the address for validity. */
818 Bool aerr = False;
njn5c004e42002-11-18 11:04:50 +0000819 PROF_EVENT(78);
njn25e49d8e72002-09-23 09:36:25 +0000820
821 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
822
823 /* If an address error has happened, report it. */
824 if (aerr) {
njn72718642003-07-24 08:45:32 +0000825 MAC_(record_address_error)( VG_(get_current_tid)(), a, 1, False );
njn25e49d8e72002-09-23 09:36:25 +0000826 }
827}
828
829
830/* ---------------------------------------------------------------------
831 FPU load and store checks, called from generated code.
832 ------------------------------------------------------------------ */
833
834__attribute__ ((regparm(2)))
njn5c004e42002-11-18 11:04:50 +0000835static void ac_fpu_ACCESS_check ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +0000836{
837 /* Ensure the read area is both addressible and valid (ie,
838 readable). If there's an address error, don't report a value
839 error too; but if there isn't an address error, check for a
840 value error.
841
842 Try to be reasonably fast on the common case; wimp out and defer
njn5c004e42002-11-18 11:04:50 +0000843 to ac_fpu_ACCESS_check_SLOWLY for everything else. */
njn25e49d8e72002-09-23 09:36:25 +0000844
845 AcSecMap* sm;
846 UInt sm_off, a_off;
847 Addr addr4;
848
njn5c004e42002-11-18 11:04:50 +0000849 PROF_EVENT(90);
njn25e49d8e72002-09-23 09:36:25 +0000850
851# ifdef VG_DEBUG_MEMORY
njn5c004e42002-11-18 11:04:50 +0000852 ac_fpu_ACCESS_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +0000853# else
854
855 if (size == 4) {
856 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
njn5c004e42002-11-18 11:04:50 +0000857 PROF_EVENT(91);
njn25e49d8e72002-09-23 09:36:25 +0000858 /* Properly aligned. */
859 sm = primary_map[addr >> 16];
860 sm_off = addr & 0xFFFF;
861 a_off = sm_off >> 3;
862 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
863 /* Properly aligned and addressible. */
864 return;
865 slow4:
njn5c004e42002-11-18 11:04:50 +0000866 ac_fpu_ACCESS_check_SLOWLY ( addr, 4 );
njn25e49d8e72002-09-23 09:36:25 +0000867 return;
868 }
869
870 if (size == 8) {
871 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
njn5c004e42002-11-18 11:04:50 +0000872 PROF_EVENT(92);
njn25e49d8e72002-09-23 09:36:25 +0000873 /* Properly aligned. Do it in two halves. */
874 addr4 = addr + 4;
875 /* First half. */
876 sm = primary_map[addr >> 16];
877 sm_off = addr & 0xFFFF;
878 a_off = sm_off >> 3;
879 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
880 /* First half properly aligned and addressible. */
881 /* Second half. */
882 sm = primary_map[addr4 >> 16];
883 sm_off = addr4 & 0xFFFF;
884 a_off = sm_off >> 3;
885 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
886 /* Second half properly aligned and addressible. */
887 /* Both halves properly aligned and addressible. */
888 return;
889 slow8:
njn5c004e42002-11-18 11:04:50 +0000890 ac_fpu_ACCESS_check_SLOWLY ( addr, 8 );
njn25e49d8e72002-09-23 09:36:25 +0000891 return;
892 }
893
894 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
895 cases go quickly. */
896 if (size == 2) {
njn5c004e42002-11-18 11:04:50 +0000897 PROF_EVENT(93);
898 ac_fpu_ACCESS_check_SLOWLY ( addr, 2 );
njn25e49d8e72002-09-23 09:36:25 +0000899 return;
900 }
901
sewardj1863abc2003-06-14 16:01:32 +0000902 if (size == 16 || size == 10 || size == 28 || size == 108) {
njn5c004e42002-11-18 11:04:50 +0000903 PROF_EVENT(94);
904 ac_fpu_ACCESS_check_SLOWLY ( addr, size );
njn25e49d8e72002-09-23 09:36:25 +0000905 return;
906 }
907
908 VG_(printf)("size is %d\n", size);
njne427a662002-10-02 11:08:25 +0000909 VG_(skin_panic)("fpu_ACCESS_check: unhandled size");
njn25e49d8e72002-09-23 09:36:25 +0000910# endif
911}
912
913
914/* ---------------------------------------------------------------------
915 Slow, general cases for FPU access checks.
916 ------------------------------------------------------------------ */
917
njn5c004e42002-11-18 11:04:50 +0000918void ac_fpu_ACCESS_check_SLOWLY ( Addr addr, Int size )
njn25e49d8e72002-09-23 09:36:25 +0000919{
920 Int i;
921 Bool aerr = False;
njn5c004e42002-11-18 11:04:50 +0000922 PROF_EVENT(100);
njn25e49d8e72002-09-23 09:36:25 +0000923 for (i = 0; i < size; i++) {
njn5c004e42002-11-18 11:04:50 +0000924 PROF_EVENT(101);
njn25e49d8e72002-09-23 09:36:25 +0000925 if (get_abit(addr+i) != VGM_BIT_VALID)
926 aerr = True;
927 }
928
929 if (aerr) {
njn72718642003-07-24 08:45:32 +0000930 MAC_(record_address_error)( VG_(get_current_tid)(), addr, size, False );
njn25e49d8e72002-09-23 09:36:25 +0000931 }
932}
933
934
935/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000936/*--- Our instrumenter ---*/
937/*------------------------------------------------------------*/
938
njn25e49d8e72002-09-23 09:36:25 +0000939UCodeBlock* SK_(instrument)(UCodeBlock* cb_in, Addr orig_addr)
940{
941/* Use this rather than eg. -1 because it's a UInt. */
942#define INVALID_DATA_SIZE 999999
943
944 UCodeBlock* cb;
945 Int i;
946 UInstr* u_in;
947 Int t_addr, t_size;
948
njn810086f2002-11-14 12:42:47 +0000949 cb = VG_(setup_UCodeBlock)(cb_in);
njn25e49d8e72002-09-23 09:36:25 +0000950
njn810086f2002-11-14 12:42:47 +0000951 for (i = 0; i < VG_(get_num_instrs)(cb_in); i++) {
njn25e49d8e72002-09-23 09:36:25 +0000952
953 t_addr = t_size = INVALID_TEMPREG;
njn810086f2002-11-14 12:42:47 +0000954 u_in = VG_(get_instr)(cb_in, i);
njn25e49d8e72002-09-23 09:36:25 +0000955
956 switch (u_in->opcode) {
sewardj7a5ebcf2002-11-13 22:42:13 +0000957 case NOP: case LOCK: case CALLM_E: case CALLM_S:
njn25e49d8e72002-09-23 09:36:25 +0000958 break;
959
960 /* For memory-ref instrs, copy the data_addr into a temporary to be
njn9b007f62003-04-07 14:40:25 +0000961 * passed to the helper at the end of the instruction.
njn25e49d8e72002-09-23 09:36:25 +0000962 */
963 case LOAD:
964 t_addr = u_in->val1;
965 goto do_LOAD_or_STORE;
966 case STORE: t_addr = u_in->val2;
967 goto do_LOAD_or_STORE;
968 do_LOAD_or_STORE:
969 uInstr1(cb, CCALL, 0, TempReg, t_addr);
970 switch (u_in->size) {
njn5c004e42002-11-18 11:04:50 +0000971 case 4: uCCall(cb, (Addr) & ac_helperc_ACCESS4, 1, 1, False );
njn25e49d8e72002-09-23 09:36:25 +0000972 break;
njn5c004e42002-11-18 11:04:50 +0000973 case 2: uCCall(cb, (Addr) & ac_helperc_ACCESS2, 1, 1, False );
njn25e49d8e72002-09-23 09:36:25 +0000974 break;
njn5c004e42002-11-18 11:04:50 +0000975 case 1: uCCall(cb, (Addr) & ac_helperc_ACCESS1, 1, 1, False );
njn25e49d8e72002-09-23 09:36:25 +0000976 break;
977 default:
njne427a662002-10-02 11:08:25 +0000978 VG_(skin_panic)("addrcheck::SK_(instrument):LOAD/STORE");
njn25e49d8e72002-09-23 09:36:25 +0000979 }
njn4ba5a792002-09-30 10:23:54 +0000980 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +0000981 break;
982
sewardje3891fa2003-06-15 03:13:48 +0000983 case SSE3ag_MemRd_RegWr:
984 sk_assert(u_in->size == 4 || u_in->size == 8);
985 goto do_Access_ARG1;
986 do_Access_ARG1:
987 sk_assert(u_in->tag1 == TempReg);
988 t_addr = u_in->val1;
989 t_size = newTemp(cb);
990 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
991 uLiteral(cb, u_in->size);
992 uInstr2(cb, CCALL, 0, TempReg, t_addr, TempReg, t_size);
993 uCCall(cb, (Addr) & ac_fpu_ACCESS_check, 2, 2, False );
994 VG_(copy_UInstr)(cb, u_in);
995 break;
996
sewardj3d7c9c82003-03-26 21:08:13 +0000997 case MMX2_MemRd:
998 case MMX2_MemWr:
sewardjd7971012003-04-04 00:21:58 +0000999 sk_assert(u_in->size == 4 || u_in->size == 8);
sewardj1863abc2003-06-14 16:01:32 +00001000 goto do_Access_ARG2;
1001 case FPU_R:
1002 case FPU_W:
1003 goto do_Access_ARG2;
1004 do_Access_ARG2:
1005 sk_assert(u_in->tag2 == TempReg);
sewardj3d7c9c82003-03-26 21:08:13 +00001006 t_addr = u_in->val2;
1007 t_size = newTemp(cb);
1008 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
sewardjd7971012003-04-04 00:21:58 +00001009 uLiteral(cb, u_in->size);
sewardj3d7c9c82003-03-26 21:08:13 +00001010 uInstr2(cb, CCALL, 0, TempReg, t_addr, TempReg, t_size);
1011 uCCall(cb, (Addr) & ac_fpu_ACCESS_check, 2, 2, False );
1012 VG_(copy_UInstr)(cb, u_in);
1013 break;
1014
sewardj1863abc2003-06-14 16:01:32 +00001015 case SSE3a_MemRd: // this one causes trouble
1016 case SSE2a_MemRd:
1017 case SSE2a_MemWr:
1018 case SSE3a_MemWr:
sewardj6bc40552003-06-15 01:40:58 +00001019 sk_assert(u_in->size == 4 || u_in->size == 8
1020 || u_in->size == 16);
sewardj1863abc2003-06-14 16:01:32 +00001021 goto do_Access_ARG3;
1022 do_Access_ARG3:
1023 sk_assert(u_in->tag3 == TempReg);
1024 t_addr = u_in->val3;
1025 t_size = newTemp(cb);
1026 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
1027 uLiteral(cb, u_in->size);
1028 uInstr2(cb, CCALL, 0, TempReg, t_addr, TempReg, t_size);
1029 uCCall(cb, (Addr) & ac_fpu_ACCESS_check, 2, 2, False );
1030 VG_(copy_UInstr)(cb, u_in);
1031 break;
1032
sewardj6bc40552003-06-15 01:40:58 +00001033 // case SSE2a1_MemRd:
1034 // case SSE2a1_MemWr:
sewardj6bc40552003-06-15 01:40:58 +00001035 // case SSE3a1_MemRd:
1036 // case SSE3a1_MemWr:
sewardj1863abc2003-06-14 16:01:32 +00001037 VG_(pp_UInstr)(0,u_in);
1038 VG_(skin_panic)("AddrCheck: unhandled SSE uinstr");
1039 break;
1040
sewardj095c3bc2003-06-15 23:26:04 +00001041 case SSE3e1_RegRd:
sewardjabf8bf82003-06-15 22:28:05 +00001042 case SSE3e_RegWr:
sewardje3891fa2003-06-15 03:13:48 +00001043 case SSE3g1_RegWr:
sewardj6bc40552003-06-15 01:40:58 +00001044 case SSE5:
sewardj1863abc2003-06-14 16:01:32 +00001045 case SSE3g_RegWr:
sewardj4fbe6e92003-06-15 21:54:34 +00001046 case SSE3e_RegRd:
sewardj1863abc2003-06-14 16:01:32 +00001047 case SSE4:
njn25e49d8e72002-09-23 09:36:25 +00001048 default:
njn4ba5a792002-09-30 10:23:54 +00001049 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00001050 break;
1051 }
1052 }
1053
njn4ba5a792002-09-30 10:23:54 +00001054 VG_(free_UCodeBlock)(cb_in);
njn25e49d8e72002-09-23 09:36:25 +00001055 return cb;
1056}
1057
1058
njn25e49d8e72002-09-23 09:36:25 +00001059/*------------------------------------------------------------*/
1060/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1061/*------------------------------------------------------------*/
1062
sewardja4495682002-10-21 07:29:59 +00001063/* For the memory leak detector, say whether an entire 64k chunk of
1064 address space is possibly in use, or not. If in doubt return
1065 True.
njn25e49d8e72002-09-23 09:36:25 +00001066*/
sewardja4495682002-10-21 07:29:59 +00001067static
1068Bool ac_is_valid_64k_chunk ( UInt chunk_number )
njn25e49d8e72002-09-23 09:36:25 +00001069{
sewardja4495682002-10-21 07:29:59 +00001070 sk_assert(chunk_number >= 0 && chunk_number < 65536);
1071 if (IS_DISTINGUISHED_SM(primary_map[chunk_number])) {
1072 /* Definitely not in use. */
1073 return False;
1074 } else {
1075 return True;
njn25e49d8e72002-09-23 09:36:25 +00001076 }
1077}
1078
1079
sewardja4495682002-10-21 07:29:59 +00001080/* For the memory leak detector, say whether or not a given word
1081 address is to be regarded as valid. */
1082static
1083Bool ac_is_valid_address ( Addr a )
1084{
1085 UChar abits;
1086 sk_assert(IS_ALIGNED4_ADDR(a));
1087 abits = get_abits4_ALIGNED(a);
1088 if (abits == VGM_NIBBLE_VALID) {
1089 return True;
1090 } else {
1091 return False;
1092 }
1093}
1094
1095
1096/* Leak detector for this skin. We don't actually do anything, merely
1097 run the generic leak detector with suitable parameters for this
1098 skin. */
njn5c004e42002-11-18 11:04:50 +00001099static void ac_detect_memory_leaks ( void )
njn25e49d8e72002-09-23 09:36:25 +00001100{
njn43c799e2003-04-08 00:08:52 +00001101 MAC_(do_detect_memory_leaks) ( ac_is_valid_64k_chunk, ac_is_valid_address );
njn25e49d8e72002-09-23 09:36:25 +00001102}
1103
1104
1105/* ---------------------------------------------------------------------
1106 Sanity check machinery (permanently engaged).
1107 ------------------------------------------------------------------ */
1108
1109/* Check that nobody has spuriously claimed that the first or last 16
1110 pages (64 KB) of address space have become accessible. Failure of
1111 the following do not per se indicate an internal consistency
1112 problem, but they are so likely to that we really want to know
1113 about it if so. */
1114
1115Bool SK_(cheap_sanity_check) ( void )
1116{
sewardjd5815ec2003-04-06 12:23:27 +00001117 if (IS_DISTINGUISHED_SM(primary_map[0])
1118 /* kludge: kernel drops a page up at top of address range for
1119 magic "optimized syscalls", so we can no longer check the
1120 highest page */
1121 /* && IS_DISTINGUISHED_SM(primary_map[65535]) */
1122 )
njn25e49d8e72002-09-23 09:36:25 +00001123 return True;
1124 else
1125 return False;
1126}
1127
1128Bool SK_(expensive_sanity_check) ( void )
1129{
1130 Int i;
1131
1132 /* Make sure nobody changed the distinguished secondary. */
1133 for (i = 0; i < 8192; i++)
1134 if (distinguished_secondary_map.abits[i] != VGM_BYTE_INVALID)
1135 return False;
1136
1137 /* Make sure that the upper 3/4 of the primary map hasn't
1138 been messed with. */
1139 for (i = 65536; i < 262144; i++)
1140 if (primary_map[i] != & distinguished_secondary_map)
1141 return False;
1142
1143 return True;
1144}
1145
njn47363ab2003-04-21 13:24:40 +00001146/*------------------------------------------------------------*/
1147/*--- Client requests ---*/
1148/*------------------------------------------------------------*/
1149
njn72718642003-07-24 08:45:32 +00001150Bool SK_(handle_client_request) ( ThreadId tid, UInt* arg_block, UInt *ret )
sewardjd8033d92002-12-08 22:16:58 +00001151{
sewardjbf310d92002-12-28 13:09:57 +00001152#define IGNORE(what) \
1153 do { \
1154 if (moans-- > 0) { \
1155 VG_(message)(Vg_UserMsg, \
1156 "Warning: Addrcheck: ignoring `%s' request.", what); \
1157 VG_(message)(Vg_UserMsg, \
1158 " To honour this request, rerun with --skin=memcheck."); \
1159 } \
1160 } while (0)
1161
sewardjd8033d92002-12-08 22:16:58 +00001162 UInt* arg = arg_block;
sewardjbf310d92002-12-28 13:09:57 +00001163 static Int moans = 3;
sewardjd8033d92002-12-08 22:16:58 +00001164
1165 /* Overload memcheck client reqs */
1166 if (!VG_IS_SKIN_USERREQ('M','C',arg[0]))
1167 return False;
1168
1169 switch (arg[0]) {
1170 case VG_USERREQ__DO_LEAK_CHECK:
1171 ac_detect_memory_leaks();
1172 *ret = 0; /* return value is meaningless */
1173 break;
1174
sewardjbf310d92002-12-28 13:09:57 +00001175 /* Ignore these */
sewardjd8033d92002-12-08 22:16:58 +00001176 case VG_USERREQ__CHECK_WRITABLE: /* check writable */
sewardjbf310d92002-12-28 13:09:57 +00001177 IGNORE("VALGRIND_CHECK_WRITABLE");
1178 return False;
sewardjd8033d92002-12-08 22:16:58 +00001179 case VG_USERREQ__CHECK_READABLE: /* check readable */
sewardjbf310d92002-12-28 13:09:57 +00001180 IGNORE("VALGRIND_CHECK_READABLE");
1181 return False;
sewardjd8033d92002-12-08 22:16:58 +00001182 case VG_USERREQ__MAKE_NOACCESS: /* make no access */
sewardjbf310d92002-12-28 13:09:57 +00001183 IGNORE("VALGRIND_MAKE_NOACCESS");
1184 return False;
sewardjd8033d92002-12-08 22:16:58 +00001185 case VG_USERREQ__MAKE_WRITABLE: /* make writable */
sewardjbf310d92002-12-28 13:09:57 +00001186 IGNORE("VALGRIND_MAKE_WRITABLE");
1187 return False;
sewardjd8033d92002-12-08 22:16:58 +00001188 case VG_USERREQ__MAKE_READABLE: /* make readable */
sewardjbf310d92002-12-28 13:09:57 +00001189 IGNORE("VALGRIND_MAKE_READABLE");
1190 return False;
sewardjd8033d92002-12-08 22:16:58 +00001191 case VG_USERREQ__DISCARD: /* discard */
sewardjbf310d92002-12-28 13:09:57 +00001192 IGNORE("VALGRIND_CHECK_DISCARD");
1193 return False;
sewardjd8033d92002-12-08 22:16:58 +00001194
1195 default:
njn72718642003-07-24 08:45:32 +00001196 if (MAC_(handle_common_client_requests)(tid, arg_block, ret )) {
njn47363ab2003-04-21 13:24:40 +00001197 return True;
1198 } else {
1199 VG_(message)(Vg_UserMsg,
1200 "Warning: unknown addrcheck client request code %d",
1201 arg[0]);
1202 return False;
1203 }
sewardjd8033d92002-12-08 22:16:58 +00001204 }
1205 return True;
sewardjbf310d92002-12-28 13:09:57 +00001206
1207#undef IGNORE
sewardjd8033d92002-12-08 22:16:58 +00001208}
1209
njn25e49d8e72002-09-23 09:36:25 +00001210/*------------------------------------------------------------*/
1211/*--- Setup ---*/
1212/*------------------------------------------------------------*/
1213
njn25e49d8e72002-09-23 09:36:25 +00001214Bool SK_(process_cmd_line_option)(Char* arg)
1215{
njn43c799e2003-04-08 00:08:52 +00001216 return MAC_(process_common_cmd_line_option)(arg);
njn25e49d8e72002-09-23 09:36:25 +00001217}
1218
njn3e884182003-04-15 13:03:23 +00001219void SK_(print_usage)(void)
njn25e49d8e72002-09-23 09:36:25 +00001220{
njn3e884182003-04-15 13:03:23 +00001221 MAC_(print_common_usage)();
1222}
1223
1224void SK_(print_debug_usage)(void)
1225{
1226 MAC_(print_common_debug_usage)();
njn25e49d8e72002-09-23 09:36:25 +00001227}
1228
1229
1230/*------------------------------------------------------------*/
1231/*--- Setup ---*/
1232/*------------------------------------------------------------*/
1233
njn810086f2002-11-14 12:42:47 +00001234void SK_(pre_clo_init)(void)
njn25e49d8e72002-09-23 09:36:25 +00001235{
njn810086f2002-11-14 12:42:47 +00001236 VG_(details_name) ("Addrcheck");
1237 VG_(details_version) (NULL);
1238 VG_(details_description) ("a fine-grained address checker");
1239 VG_(details_copyright_author)(
njn0e1b5142003-04-15 14:58:06 +00001240 "Copyright (C) 2002-2003, and GNU GPL'd, by Julian Seward.");
njn810086f2002-11-14 12:42:47 +00001241 VG_(details_bug_reports_to) ("jseward@acm.org");
sewardj78210aa2002-12-01 02:55:46 +00001242 VG_(details_avg_translation_sizeB) ( 135 );
njn25e49d8e72002-09-23 09:36:25 +00001243
njn810086f2002-11-14 12:42:47 +00001244 VG_(needs_core_errors) ();
1245 VG_(needs_skin_errors) ();
1246 VG_(needs_libc_freeres) ();
njn810086f2002-11-14 12:42:47 +00001247 VG_(needs_command_line_options)();
1248 VG_(needs_client_requests) ();
1249 VG_(needs_syscall_wrapper) ();
njn810086f2002-11-14 12:42:47 +00001250 VG_(needs_sanity_checks) ();
njn25e49d8e72002-09-23 09:36:25 +00001251
njn3e884182003-04-15 13:03:23 +00001252 MAC_( new_mem_heap) = & ac_new_mem_heap;
1253 MAC_( ban_mem_heap) = & ac_make_noaccess;
1254 MAC_(copy_mem_heap) = & ac_copy_address_range_state;
1255 MAC_( die_mem_heap) = & ac_make_noaccess;
sewardjecf8e102003-07-12 12:11:39 +00001256 MAC_(check_noaccess) = & ac_check_noaccess;
njn3e884182003-04-15 13:03:23 +00001257
njn5c004e42002-11-18 11:04:50 +00001258 VG_(track_new_mem_startup) ( & ac_new_mem_startup );
njn5c004e42002-11-18 11:04:50 +00001259 VG_(track_new_mem_stack_signal) ( & ac_make_accessible );
1260 VG_(track_new_mem_brk) ( & ac_make_accessible );
1261 VG_(track_new_mem_mmap) ( & ac_set_perms );
njn25e49d8e72002-09-23 09:36:25 +00001262
njn3e884182003-04-15 13:03:23 +00001263 VG_(track_copy_mem_remap) ( & ac_copy_address_range_state );
1264 VG_(track_change_mem_mprotect) ( & ac_set_perms );
1265
1266 VG_(track_die_mem_stack_signal) ( & ac_make_noaccess );
1267 VG_(track_die_mem_brk) ( & ac_make_noaccess );
1268 VG_(track_die_mem_munmap) ( & ac_make_noaccess );
1269
njn43c799e2003-04-08 00:08:52 +00001270 VG_(track_new_mem_stack_4) ( & MAC_(new_mem_stack_4) );
1271 VG_(track_new_mem_stack_8) ( & MAC_(new_mem_stack_8) );
1272 VG_(track_new_mem_stack_12) ( & MAC_(new_mem_stack_12) );
1273 VG_(track_new_mem_stack_16) ( & MAC_(new_mem_stack_16) );
1274 VG_(track_new_mem_stack_32) ( & MAC_(new_mem_stack_32) );
1275 VG_(track_new_mem_stack) ( & MAC_(new_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00001276
njn43c799e2003-04-08 00:08:52 +00001277 VG_(track_die_mem_stack_4) ( & MAC_(die_mem_stack_4) );
1278 VG_(track_die_mem_stack_8) ( & MAC_(die_mem_stack_8) );
1279 VG_(track_die_mem_stack_12) ( & MAC_(die_mem_stack_12) );
1280 VG_(track_die_mem_stack_16) ( & MAC_(die_mem_stack_16) );
1281 VG_(track_die_mem_stack_32) ( & MAC_(die_mem_stack_32) );
1282 VG_(track_die_mem_stack) ( & MAC_(die_mem_stack) );
njn9b007f62003-04-07 14:40:25 +00001283
njn3e884182003-04-15 13:03:23 +00001284 VG_(track_ban_mem_stack) ( & ac_make_noaccess );
njn25e49d8e72002-09-23 09:36:25 +00001285
njn5c004e42002-11-18 11:04:50 +00001286 VG_(track_pre_mem_read) ( & ac_check_is_readable );
1287 VG_(track_pre_mem_read_asciiz) ( & ac_check_is_readable_asciiz );
1288 VG_(track_pre_mem_write) ( & ac_check_is_writable );
1289 VG_(track_post_mem_write) ( & ac_make_accessible );
njn25e49d8e72002-09-23 09:36:25 +00001290
njn5c004e42002-11-18 11:04:50 +00001291 VG_(register_compact_helper)((Addr) & ac_helperc_ACCESS4);
1292 VG_(register_compact_helper)((Addr) & ac_helperc_ACCESS2);
1293 VG_(register_compact_helper)((Addr) & ac_helperc_ACCESS1);
1294 VG_(register_compact_helper)((Addr) & ac_fpu_ACCESS_check);
njn25e49d8e72002-09-23 09:36:25 +00001295
1296 VGP_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
1297 VGP_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
njn9b007f62003-04-07 14:40:25 +00001298 VGP_(register_profile_event) ( VgpESPAdj, "adjust-ESP" );
njnd04b7c62002-10-03 14:05:52 +00001299
1300 init_shadow_memory();
njn3e884182003-04-15 13:03:23 +00001301 MAC_(common_pre_clo_init)();
njn5c004e42002-11-18 11:04:50 +00001302}
1303
1304void SK_(post_clo_init) ( void )
1305{
1306}
1307
njn7d9f94d2003-04-22 21:41:40 +00001308void SK_(fini) ( Int exitcode )
njn5c004e42002-11-18 11:04:50 +00001309{
njn3e884182003-04-15 13:03:23 +00001310 MAC_(common_fini)( ac_detect_memory_leaks );
njn25e49d8e72002-09-23 09:36:25 +00001311}
1312
1313/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00001314/*--- end ac_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00001315/*--------------------------------------------------------------------*/