blob: 10e2daad9bbbbfef2440d2e6dfb04e9ece83d615 [file] [log] [blame]
njn25e49d8e72002-09-23 09:36:25 +00001
2/*--------------------------------------------------------------------*/
3/*--- The AddrCheck skin: like MemCheck, but only does address ---*/
4/*--- checking. No definedness checking. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- ac_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
njnc9539842002-10-02 13:26:35 +00009 This file is part of AddrCheck, a lightweight Valgrind skin for
10 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
12 Copyright (C) 2000-2002 Julian Seward
13 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
njn25cac76cb2002-09-23 11:21:57 +000033#include "ac_include.h"
njn25e49d8e72002-09-23 09:36:25 +000034//#include "vg_profile.c"
35
36/*------------------------------------------------------------*/
37/*--- Defns ---*/
38/*------------------------------------------------------------*/
39
40/* These many bytes below %ESP are considered addressible if we're
41 doing the --workaround-gcc296-bugs hack. */
42#define VG_GCC296_BUG_STACK_SLOP 1024
43
44
45typedef
46 enum {
47 /* Bad syscall params */
48 ParamSupp,
49 /* Memory errors in core (pthread ops, signal handling) */
50 CoreMemSupp,
51 /* Invalid read/write attempt at given size */
52 Addr1Supp, Addr2Supp, Addr4Supp, Addr8Supp,
53 /* Invalid or mismatching free */
54 FreeSupp
55 }
56 AddrCheckSuppKind;
57
58/* What kind of error it is. */
59typedef
60 enum { CoreMemErr,
61 AddrErr,
62 ParamErr, UserErr, /* behaves like an anonymous ParamErr */
63 FreeErr, FreeMismatchErr
64 }
65 AddrCheckErrorKind;
66
67/* What kind of memory access is involved in the error? */
68typedef
69 enum { ReadAxs, WriteAxs, ExecAxs }
70 AxsKind;
71
72/* Extra context for memory errors */
73typedef
74 struct {
75 /* AddrErr */
76 AxsKind axskind;
77 /* AddrErr */
78 Int size;
79 /* AddrErr, FreeErr, FreeMismatchErr, ParamErr, UserErr */
80 AcAddrInfo addrinfo;
81 /* ParamErr, UserErr, CoreMemErr */
82 Bool isWrite;
83 }
84 AddrCheckError;
85
86/*------------------------------------------------------------*/
87/*--- Comparing and printing errors ---*/
88/*------------------------------------------------------------*/
89
90static __inline__
91void clear_AcAddrInfo ( AcAddrInfo* ai )
92{
93 ai->akind = Unknown;
94 ai->blksize = 0;
95 ai->rwoffset = 0;
96 ai->lastchange = NULL;
97 ai->stack_tid = VG_INVALID_THREADID;
98 ai->maybe_gcc = False;
99}
100
101static __inline__
102void clear_AddrCheckError ( AddrCheckError* err_extra )
103{
104 err_extra->axskind = ReadAxs;
105 err_extra->size = 0;
106 clear_AcAddrInfo ( &err_extra->addrinfo );
107 err_extra->isWrite = False;
108}
109
110__attribute__((unused))
111static Bool eq_AcAddrInfo ( VgRes res, AcAddrInfo* ai1, AcAddrInfo* ai2 )
112{
113 if (ai1->akind != Undescribed
114 && ai2->akind != Undescribed
115 && ai1->akind != ai2->akind)
116 return False;
117 if (ai1->akind == Freed || ai1->akind == Mallocd) {
118 if (ai1->blksize != ai2->blksize)
119 return False;
120 if (!VG_(eq_ExeContext)(res, ai1->lastchange, ai2->lastchange))
121 return False;
122 }
123 return True;
124}
125
126/* Compare error contexts, to detect duplicates. Note that if they
127 are otherwise the same, the faulting addrs and associated rwoffsets
128 are allowed to be different. */
129
130Bool SK_(eq_SkinError) ( VgRes res,
131 SkinError* e1, SkinError* e2 )
132{
133 AddrCheckError* e1_extra = e1->extra;
134 AddrCheckError* e2_extra = e2->extra;
135
136 switch (e1->ekind) {
137 case CoreMemErr:
138 if (e1_extra->isWrite != e2_extra->isWrite) return False;
139 if (e2->ekind != CoreMemErr) return False;
140 if (e1->string == e2->string) return True;
141 if (0 == VG_(strcmp)(e1->string, e2->string)) return True;
142 return False;
143
144 case UserErr:
145 case ParamErr:
146 if (e1_extra->isWrite != e2_extra->isWrite)
147 return False;
148 if (e1->ekind == ParamErr
149 && 0 != VG_(strcmp)(e1->string, e2->string))
150 return False;
151 return True;
152
153 case FreeErr:
154 case FreeMismatchErr:
155 /* JRS 2002-Aug-26: comparing addrs seems overkill and can
156 cause excessive duplication of errors. Not even AddrErr
157 below does that. So don't compare either the .addr field
158 or the .addrinfo fields. */
159 /* if (e1->addr != e2->addr) return False; */
160 /* if (!eq_AcAddrInfo(res, &e1_extra->addrinfo, &e2_extra->addrinfo))
161 return False;
162 */
163 return True;
164
165 case AddrErr:
166 /* if (e1_extra->axskind != e2_extra->axskind) return False; */
167 if (e1_extra->size != e2_extra->size) return False;
168 /*
169 if (!eq_AcAddrInfo(res, &e1_extra->addrinfo, &e2_extra->addrinfo))
170 return False;
171 */
172 return True;
173
174 default:
175 VG_(printf)("Error:\n unknown AddrCheck error code %d\n", e1->ekind);
njne427a662002-10-02 11:08:25 +0000176 VG_(skin_panic)("unknown error code in SK_(eq_SkinError)");
njn25e49d8e72002-09-23 09:36:25 +0000177 }
178}
179
180static void pp_AcAddrInfo ( Addr a, AcAddrInfo* ai )
181{
182 switch (ai->akind) {
183 case Stack:
184 VG_(message)(Vg_UserMsg,
185 " Address 0x%x is on thread %d's stack",
186 a, ai->stack_tid);
187 break;
188 case Unknown:
189 if (ai->maybe_gcc) {
190 VG_(message)(Vg_UserMsg,
191 " Address 0x%x is just below %%esp. Possibly a bug in GCC/G++",
192 a);
193 VG_(message)(Vg_UserMsg,
194 " v 2.96 or 3.0.X. To suppress, use: --workaround-gcc296-bugs=yes");
195 } else {
196 VG_(message)(Vg_UserMsg,
197 " Address 0x%x is not stack'd, malloc'd or free'd", a);
198 }
199 break;
200 case Freed: case Mallocd: {
201 UInt delta;
202 UChar* relative;
203 if (ai->rwoffset < 0) {
204 delta = (UInt)(- ai->rwoffset);
205 relative = "before";
206 } else if (ai->rwoffset >= ai->blksize) {
207 delta = ai->rwoffset - ai->blksize;
208 relative = "after";
209 } else {
210 delta = ai->rwoffset;
211 relative = "inside";
212 }
213 {
214 VG_(message)(Vg_UserMsg,
215 " Address 0x%x is %d bytes %s a block of size %d %s",
216 a, delta, relative,
217 ai->blksize,
218 ai->akind==Mallocd ? "alloc'd"
219 : ai->akind==Freed ? "free'd"
220 : "client-defined");
221 }
222 VG_(pp_ExeContext)(ai->lastchange);
223 break;
224 }
225 default:
njne427a662002-10-02 11:08:25 +0000226 VG_(skin_panic)("pp_AcAddrInfo");
njn25e49d8e72002-09-23 09:36:25 +0000227 }
228}
229
230void SK_(pp_SkinError) ( SkinError* err, void (*pp_ExeContext)(void) )
231{
232 AddrCheckError* err_extra = err->extra;
233
234 switch (err->ekind) {
235 case CoreMemErr:
236 if (err_extra->isWrite) {
237 VG_(message)(Vg_UserMsg,
238 "%s contains unaddressable byte(s)", err->string );
239 } else {
240 VG_(message)(Vg_UserMsg,
241 "%s contains unaddressable byte(s)", err->string );
242 }
243 pp_ExeContext();
244 break;
245
246 case AddrErr:
247 switch (err_extra->axskind) {
248 case ReadAxs:
249 case WriteAxs:
250 /* These two aren't actually differentiated ever. */
251 VG_(message)(Vg_UserMsg, "Invalid memory access of size %d",
252 err_extra->size );
253 break;
254 case ExecAxs:
255 VG_(message)(Vg_UserMsg, "Jump to the invalid address "
256 "stated on the next line");
257 break;
258 default:
njne427a662002-10-02 11:08:25 +0000259 VG_(skin_panic)("pp_SkinError(axskind)");
njn25e49d8e72002-09-23 09:36:25 +0000260 }
261 pp_ExeContext();
262 pp_AcAddrInfo(err->addr, &err_extra->addrinfo);
263 break;
264
265 case FreeErr:
266 VG_(message)(Vg_UserMsg,"Invalid free() / delete / delete[]");
267 /* fall through */
268 case FreeMismatchErr:
269 if (err->ekind == FreeMismatchErr)
270 VG_(message)(Vg_UserMsg,
271 "Mismatched free() / delete / delete []");
272 pp_ExeContext();
273 pp_AcAddrInfo(err->addr, &err_extra->addrinfo);
274 break;
275
276 case ParamErr:
277 if (err_extra->isWrite) {
278 VG_(message)(Vg_UserMsg,
279 "Syscall param %s contains unaddressable byte(s)",
280 err->string );
281 } else {
282 VG_(message)(Vg_UserMsg,
283 "Syscall param %s contains uninitialised or "
284 "unaddressable byte(s)",
285 err->string);
286 }
287 pp_ExeContext();
288 pp_AcAddrInfo(err->addr, &err_extra->addrinfo);
289 break;
290
291 case UserErr:
292 if (err_extra->isWrite) {
293 VG_(message)(Vg_UserMsg,
294 "Unaddressable byte(s) found during client check request");
295 } else {
296 VG_(message)(Vg_UserMsg,
297 "Uninitialised or "
298 "unaddressable byte(s) found during client check request");
299 }
300 pp_ExeContext();
301 pp_AcAddrInfo(err->addr, &err_extra->addrinfo);
302 break;
303
304 default:
305 VG_(printf)("Error:\n unknown AddrCheck error code %d\n", err->ekind);
njne427a662002-10-02 11:08:25 +0000306 VG_(skin_panic)("unknown error code in SK_(pp_SkinError)");
njn25e49d8e72002-09-23 09:36:25 +0000307 }
308}
309
310/*------------------------------------------------------------*/
311/*--- Recording errors ---*/
312/*------------------------------------------------------------*/
313
314/* Describe an address as best you can, for error messages,
315 putting the result in ai. */
316
317static void describe_addr ( Addr a, AcAddrInfo* ai )
318{
319 ShadowChunk* sc;
320 ThreadId tid;
321
322 /* Nested functions, yeah. Need the lexical scoping of 'a'. */
323
324 /* Closure for searching thread stacks */
325 Bool addr_is_in_bounds(Addr stack_min, Addr stack_max)
326 {
327 return (stack_min <= a && a <= stack_max);
328 }
329 /* Closure for searching malloc'd and free'd lists */
330 Bool addr_is_in_block(ShadowChunk *sh_ch)
331 {
332 return VG_(addr_is_in_block) ( a, sh_ch->data, sh_ch->size );
333 }
334 /* Perhaps it's on a thread's stack? */
335 tid = VG_(any_matching_thread_stack)(addr_is_in_bounds);
336 if (tid != VG_INVALID_THREADID) {
337 ai->akind = Stack;
338 ai->stack_tid = tid;
339 return;
340 }
341 /* Search for a recently freed block which might bracket it. */
342 sc = SK_(any_matching_freed_ShadowChunks)(addr_is_in_block);
343 if (NULL != sc) {
344 ai->akind = Freed;
345 ai->blksize = sc->size;
346 ai->rwoffset = (Int)(a) - (Int)(sc->data);
347 ai->lastchange = (ExeContext*)sc->skin_extra[0];
348 return;
349 }
350 /* Search for a currently malloc'd block which might bracket it. */
351 sc = VG_(any_matching_mallocd_ShadowChunks)(addr_is_in_block);
352 if (NULL != sc) {
353 ai->akind = Mallocd;
354 ai->blksize = sc->size;
355 ai->rwoffset = (Int)(a) - (Int)(sc->data);
356 ai->lastchange = (ExeContext*)sc->skin_extra[0];
357 return;
358 }
359 /* Clueless ... */
360 ai->akind = Unknown;
361 return;
362}
363
364
365/* Creates a copy of the err_extra, updates the copy with address info if
366 necessary, sticks the copy into the SkinError. */
367void SK_(dup_extra_and_update)(SkinError* err)
368{
369 AddrCheckError* err_extra;
370
371 err_extra = VG_(malloc)(sizeof(AddrCheckError));
372 *err_extra = *((AddrCheckError*)err->extra);
373
374 if (err_extra->addrinfo.akind == Undescribed)
375 describe_addr ( err->addr, &(err_extra->addrinfo) );
376
377 err->extra = err_extra;
378}
379
380/* Is this address within some small distance below %ESP? Used only
381 for the --workaround-gcc296-bugs kludge. */
382Bool VG_(is_just_below_ESP)( Addr esp, Addr aa )
383{
384 if ((UInt)esp > (UInt)aa
385 && ((UInt)esp - (UInt)aa) <= VG_GCC296_BUG_STACK_SLOP)
386 return True;
387 else
388 return False;
389}
390
391static
392void sk_record_address_error ( Addr a, Int size, Bool isWrite )
393{
394 AddrCheckError err_extra;
395 Bool just_below_esp;
396
397 just_below_esp
398 = VG_(is_just_below_ESP)( VG_(get_stack_pointer)(), a );
399
400 /* If this is caused by an access immediately below %ESP, and the
401 user asks nicely, we just ignore it. */
402 if (SK_(clo_workaround_gcc296_bugs) && just_below_esp)
403 return;
404
405 clear_AddrCheckError( &err_extra );
406 err_extra.axskind = isWrite ? WriteAxs : ReadAxs;
407 err_extra.size = size;
408 err_extra.addrinfo.akind = Undescribed;
409 err_extra.addrinfo.maybe_gcc = just_below_esp;
410 VG_(maybe_record_error)( NULL, AddrErr, a, /*s*/NULL, &err_extra );
411}
412
413/* These ones are called from non-generated code */
414
415/* This is for memory errors in pthread functions, as opposed to pthread API
416 errors which are found by the core. */
417void SK_(record_core_mem_error) ( ThreadState* tst, Bool isWrite, Char* msg )
418{
419 AddrCheckError err_extra;
420
421 clear_AddrCheckError( &err_extra );
422 err_extra.isWrite = isWrite;
423 VG_(maybe_record_error)( tst, CoreMemErr, /*addr*/0, msg, &err_extra );
424}
425
426void SK_(record_param_error) ( ThreadState* tst, Addr a, Bool isWrite,
427 Char* msg )
428{
429 AddrCheckError err_extra;
430
njne427a662002-10-02 11:08:25 +0000431 sk_assert(NULL != tst);
njn25e49d8e72002-09-23 09:36:25 +0000432 clear_AddrCheckError( &err_extra );
433 err_extra.addrinfo.akind = Undescribed;
434 err_extra.isWrite = isWrite;
435 VG_(maybe_record_error)( tst, ParamErr, a, msg, &err_extra );
436}
437
438void SK_(record_jump_error) ( ThreadState* tst, Addr a )
439{
440 AddrCheckError err_extra;
441
njne427a662002-10-02 11:08:25 +0000442 sk_assert(NULL != tst);
njn25e49d8e72002-09-23 09:36:25 +0000443
444 clear_AddrCheckError( &err_extra );
445 err_extra.axskind = ExecAxs;
446 err_extra.addrinfo.akind = Undescribed;
447 VG_(maybe_record_error)( tst, AddrErr, a, /*s*/NULL, &err_extra );
448}
449
450void SK_(record_free_error) ( ThreadState* tst, Addr a )
451{
452 AddrCheckError err_extra;
453
njne427a662002-10-02 11:08:25 +0000454 sk_assert(NULL != tst);
njn25e49d8e72002-09-23 09:36:25 +0000455
456 clear_AddrCheckError( &err_extra );
457 err_extra.addrinfo.akind = Undescribed;
458 VG_(maybe_record_error)( tst, FreeErr, a, /*s*/NULL, &err_extra );
459}
460
461void SK_(record_freemismatch_error) ( ThreadState* tst, Addr a )
462{
463 AddrCheckError err_extra;
464
njne427a662002-10-02 11:08:25 +0000465 sk_assert(NULL != tst);
njn25e49d8e72002-09-23 09:36:25 +0000466
467 clear_AddrCheckError( &err_extra );
468 err_extra.addrinfo.akind = Undescribed;
469 VG_(maybe_record_error)( tst, FreeMismatchErr, a, /*s*/NULL, &err_extra );
470}
471
472void SK_(record_user_error) ( ThreadState* tst, Addr a, Bool isWrite )
473{
474 AddrCheckError err_extra;
475
njne427a662002-10-02 11:08:25 +0000476 sk_assert(NULL != tst);
njn25e49d8e72002-09-23 09:36:25 +0000477
478 clear_AddrCheckError( &err_extra );
479 err_extra.addrinfo.akind = Undescribed;
480 err_extra.isWrite = isWrite;
481 VG_(maybe_record_error)( tst, UserErr, a, /*s*/NULL, &err_extra );
482}
483
484
485/*------------------------------------------------------------*/
486/*--- Suppressions ---*/
487/*------------------------------------------------------------*/
488
489#define STREQ(s1,s2) (s1 != NULL && s2 != NULL \
490 && VG_(strcmp)((s1),(s2))==0)
491
492Bool SK_(recognised_suppression) ( Char* name, SuppKind *skind )
493{
494 if (STREQ(name, "Param")) *skind = ParamSupp;
495 else if (STREQ(name, "CoreMem")) *skind = CoreMemSupp;
496 else if (STREQ(name, "Addr1")) *skind = Addr1Supp;
497 else if (STREQ(name, "Addr2")) *skind = Addr2Supp;
498 else if (STREQ(name, "Addr4")) *skind = Addr4Supp;
499 else if (STREQ(name, "Addr8")) *skind = Addr8Supp;
500 else if (STREQ(name, "Free")) *skind = FreeSupp;
501 else
502 return False;
503
504 return True;
505}
506
507Bool SK_(read_extra_suppression_info) ( Int fd, Char* buf, Int nBuf,
508 SkinSupp *s )
509{
510 Bool eof;
511
512 if (s->skind == ParamSupp) {
njn4ba5a792002-09-30 10:23:54 +0000513 eof = VG_(get_line) ( fd, buf, nBuf );
njn25e49d8e72002-09-23 09:36:25 +0000514 if (eof) return False;
515 s->string = VG_(strdup)(buf);
516 }
517 return True;
518}
519
520extern Bool SK_(error_matches_suppression)(SkinError* err, SkinSupp* su)
521{
522 UInt su_size;
523 AddrCheckError* err_extra = err->extra;
524
525 switch (su->skind) {
526 case ParamSupp:
527 return (err->ekind == ParamErr && STREQ(su->string, err->string));
528
529 case CoreMemSupp:
530 return (err->ekind == CoreMemErr && STREQ(su->string, err->string));
531
532 case Addr1Supp: su_size = 1; goto addr_case;
533 case Addr2Supp: su_size = 2; goto addr_case;
534 case Addr4Supp: su_size = 4; goto addr_case;
535 case Addr8Supp: su_size = 8; goto addr_case;
536 addr_case:
sewardj0da28b92002-10-06 00:19:16 +0000537 return (err->ekind == AddrErr && err_extra->size == su_size);
njn25e49d8e72002-09-23 09:36:25 +0000538
539 case FreeSupp:
540 return (err->ekind == FreeErr || err->ekind == FreeMismatchErr);
541
542 default:
543 VG_(printf)("Error:\n"
544 " unknown AddrCheck suppression type %d\n", su->skind);
njne427a662002-10-02 11:08:25 +0000545 VG_(skin_panic)("unknown suppression type in "
546 "SK_(error_matches_suppression)");
njn25e49d8e72002-09-23 09:36:25 +0000547 }
548}
549
550# undef STREQ
551
552
553/*--------------------------------------------------------------------*/
554/*--- Part of the AddrCheck skin: Maintain bitmaps of memory, ---*/
555/*--- tracking the accessibility (A) each byte. ---*/
556/*--------------------------------------------------------------------*/
557
558#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
559
560/*------------------------------------------------------------*/
561/*--- Command line options ---*/
562/*------------------------------------------------------------*/
563
564Bool SK_(clo_partial_loads_ok) = True;
565Int SK_(clo_freelist_vol) = 1000000;
566Bool SK_(clo_leak_check) = False;
567VgRes SK_(clo_leak_resolution) = Vg_LowRes;
568Bool SK_(clo_show_reachable) = False;
569Bool SK_(clo_workaround_gcc296_bugs) = False;
570Bool SK_(clo_cleanup) = True;
571
572/*------------------------------------------------------------*/
573/*--- Profiling events ---*/
574/*------------------------------------------------------------*/
575
576typedef
577 enum {
578 VgpCheckMem = VgpFini+1,
579 VgpSetMem
580 }
581 VgpSkinCC;
582
583/*------------------------------------------------------------*/
584/*--- Low-level support for memory checking. ---*/
585/*------------------------------------------------------------*/
586
587/* All reads and writes are checked against a memory map, which
588 records the state of all memory in the process. The memory map is
589 organised like this:
590
591 The top 16 bits of an address are used to index into a top-level
592 map table, containing 65536 entries. Each entry is a pointer to a
593 second-level map, which records the accesibililty and validity
594 permissions for the 65536 bytes indexed by the lower 16 bits of the
595 address. Each byte is represented by one bit, indicating
596 accessibility. So each second-level map contains 8192 bytes. This
597 two-level arrangement conveniently divides the 4G address space
598 into 64k lumps, each size 64k bytes.
599
600 All entries in the primary (top-level) map must point to a valid
601 secondary (second-level) map. Since most of the 4G of address
602 space will not be in use -- ie, not mapped at all -- there is a
603 distinguished secondary map, which indicates `not addressible and
604 not valid' writeable for all bytes. Entries in the primary map for
605 which the entire 64k is not in use at all point at this
606 distinguished map.
607
608 [...] lots of stuff deleted due to out of date-ness
609
610 As a final optimisation, the alignment and address checks for
611 4-byte loads and stores are combined in a neat way. The primary
612 map is extended to have 262144 entries (2^18), rather than 2^16.
613 The top 3/4 of these entries are permanently set to the
614 distinguished secondary map. For a 4-byte load/store, the
615 top-level map is indexed not with (addr >> 16) but instead f(addr),
616 where
617
618 f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
619 = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
620 = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
621
622 ie the lowest two bits are placed above the 16 high address bits.
623 If either of these two bits are nonzero, the address is misaligned;
624 this will select a secondary map from the upper 3/4 of the primary
625 map. Because this is always the distinguished secondary map, a
626 (bogus) address check failure will result. The failure handling
627 code can then figure out whether this is a genuine addr check
628 failure or whether it is a possibly-legitimate access at a
629 misaligned address. */
630
631
632/*------------------------------------------------------------*/
633/*--- Crude profiling machinery. ---*/
634/*------------------------------------------------------------*/
635
636#ifdef VG_PROFILE_MEMORY
637
638#define N_PROF_EVENTS 150
639
640static UInt event_ctr[N_PROF_EVENTS];
641
642static void init_prof_mem ( void )
643{
644 Int i;
645 for (i = 0; i < N_PROF_EVENTS; i++)
646 event_ctr[i] = 0;
647}
648
649static void done_prof_mem ( void )
650{
651 Int i;
652 for (i = 0; i < N_PROF_EVENTS; i++) {
653 if ((i % 10) == 0)
654 VG_(printf)("\n");
655 if (event_ctr[i] > 0)
656 VG_(printf)( "prof mem event %2d: %d\n", i, event_ctr[i] );
657 }
658 VG_(printf)("\n");
659}
660
661#define PROF_EVENT(ev) \
njne427a662002-10-02 11:08:25 +0000662 do { sk_assert((ev) >= 0 && (ev) < N_PROF_EVENTS); \
njn25e49d8e72002-09-23 09:36:25 +0000663 event_ctr[ev]++; \
664 } while (False);
665
666#else
667
668static void init_prof_mem ( void ) { }
669static void done_prof_mem ( void ) { }
670
671#define PROF_EVENT(ev) /* */
672
673#endif
674
675/* Event index. If just the name of the fn is given, this means the
676 number of calls to the fn. Otherwise it is the specified event.
677
678 10 alloc_secondary_map
679
680 20 get_abit
681 21 get_vbyte
682 22 set_abit
683 23 set_vbyte
684 24 get_abits4_ALIGNED
685 25 get_vbytes4_ALIGNED
686
687 30 set_address_range_perms
688 31 set_address_range_perms(lower byte loop)
689 32 set_address_range_perms(quadword loop)
690 33 set_address_range_perms(upper byte loop)
691
692 35 make_noaccess
693 36 make_writable
694 37 make_readable
695
696 40 copy_address_range_state
697 41 copy_address_range_state(byte loop)
698 42 check_writable
699 43 check_writable(byte loop)
700 44 check_readable
701 45 check_readable(byte loop)
702 46 check_readable_asciiz
703 47 check_readable_asciiz(byte loop)
704
705 50 make_aligned_word_NOACCESS
706 51 make_aligned_word_WRITABLE
707
708 60 helperc_LOADV4
709 61 helperc_STOREV4
710 62 helperc_LOADV2
711 63 helperc_STOREV2
712 64 helperc_LOADV1
713 65 helperc_STOREV1
714
715 70 rim_rd_V4_SLOWLY
716 71 rim_wr_V4_SLOWLY
717 72 rim_rd_V2_SLOWLY
718 73 rim_wr_V2_SLOWLY
719 74 rim_rd_V1_SLOWLY
720 75 rim_wr_V1_SLOWLY
721
722 80 fpu_read
723 81 fpu_read aligned 4
724 82 fpu_read aligned 8
725 83 fpu_read 2
726 84 fpu_read 10
727
728 85 fpu_write
729 86 fpu_write aligned 4
730 87 fpu_write aligned 8
731 88 fpu_write 2
732 89 fpu_write 10
733
734 90 fpu_read_check_SLOWLY
735 91 fpu_read_check_SLOWLY(byte loop)
736 92 fpu_write_check_SLOWLY
737 93 fpu_write_check_SLOWLY(byte loop)
738
739 100 is_plausible_stack_addr
740 101 handle_esp_assignment
741 102 handle_esp_assignment(-4)
742 103 handle_esp_assignment(+4)
743 104 handle_esp_assignment(-12)
744 105 handle_esp_assignment(-8)
745 106 handle_esp_assignment(+16)
746 107 handle_esp_assignment(+12)
747 108 handle_esp_assignment(0)
748 109 handle_esp_assignment(+8)
749 110 handle_esp_assignment(-16)
750 111 handle_esp_assignment(+20)
751 112 handle_esp_assignment(-20)
752 113 handle_esp_assignment(+24)
753 114 handle_esp_assignment(-24)
754
755 120 vg_handle_esp_assignment_SLOWLY
756 121 vg_handle_esp_assignment_SLOWLY(normal; move down)
757 122 vg_handle_esp_assignment_SLOWLY(normal; move up)
758 123 vg_handle_esp_assignment_SLOWLY(normal)
759 124 vg_handle_esp_assignment_SLOWLY(>= HUGE_DELTA)
760*/
761
762/*------------------------------------------------------------*/
763/*--- Function declarations. ---*/
764/*------------------------------------------------------------*/
765
766static void vgmext_ACCESS4_SLOWLY ( Addr a );
767static void vgmext_ACCESS2_SLOWLY ( Addr a );
768static void vgmext_ACCESS1_SLOWLY ( Addr a );
769static void fpu_ACCESS_check_SLOWLY ( Addr addr, Int size );
770
771/*------------------------------------------------------------*/
772/*--- Data defns. ---*/
773/*------------------------------------------------------------*/
774
775typedef
776 struct {
777 UChar abits[8192];
778 }
779 AcSecMap;
780
781static AcSecMap* primary_map[ /*65536*/ 262144 ];
782static AcSecMap distinguished_secondary_map;
783
784#define IS_DISTINGUISHED_SM(smap) \
785 ((smap) == &distinguished_secondary_map)
786
787#define ENSURE_MAPPABLE(addr,caller) \
788 do { \
789 if (IS_DISTINGUISHED_SM(primary_map[(addr) >> 16])) { \
790 primary_map[(addr) >> 16] = alloc_secondary_map(caller); \
791 /* VG_(printf)("new 2map because of %p\n", addr); */ \
792 } \
793 } while(0)
794
795#define BITARR_SET(aaa_p,iii_p) \
796 do { \
797 UInt iii = (UInt)iii_p; \
798 UChar* aaa = (UChar*)aaa_p; \
799 aaa[iii >> 3] |= (1 << (iii & 7)); \
800 } while (0)
801
802#define BITARR_CLEAR(aaa_p,iii_p) \
803 do { \
804 UInt iii = (UInt)iii_p; \
805 UChar* aaa = (UChar*)aaa_p; \
806 aaa[iii >> 3] &= ~(1 << (iii & 7)); \
807 } while (0)
808
809#define BITARR_TEST(aaa_p,iii_p) \
810 (0 != (((UChar*)aaa_p)[ ((UInt)iii_p) >> 3 ] \
811 & (1 << (((UInt)iii_p) & 7)))) \
812
813
814#define VGM_BIT_VALID 0
815#define VGM_BIT_INVALID 1
816
817#define VGM_NIBBLE_VALID 0
818#define VGM_NIBBLE_INVALID 0xF
819
820#define VGM_BYTE_VALID 0
821#define VGM_BYTE_INVALID 0xFF
822
823#define VGM_WORD_VALID 0
824#define VGM_WORD_INVALID 0xFFFFFFFF
825
826#define VGM_EFLAGS_VALID 0xFFFFFFFE
827#define VGM_EFLAGS_INVALID 0xFFFFFFFF /* not used */
828
829
830static void init_shadow_memory ( void )
831{
832 Int i;
833
834 for (i = 0; i < 8192; i++) /* Invalid address */
835 distinguished_secondary_map.abits[i] = VGM_BYTE_INVALID;
836
837 /* These entries gradually get overwritten as the used address
838 space expands. */
839 for (i = 0; i < 65536; i++)
840 primary_map[i] = &distinguished_secondary_map;
841
842 /* These ones should never change; it's a bug in Valgrind if they do. */
843 for (i = 65536; i < 262144; i++)
844 primary_map[i] = &distinguished_secondary_map;
845}
846
847void SK_(post_clo_init) ( void )
848{
849}
850
851void SK_(fini) ( void )
852{
853 VG_(print_malloc_stats)();
854
855 if (VG_(clo_verbosity) == 1) {
856 if (!SK_(clo_leak_check))
857 VG_(message)(Vg_UserMsg,
858 "For a detailed leak analysis, rerun with: --leak-check=yes");
859
860 VG_(message)(Vg_UserMsg,
861 "For counts of detected errors, rerun with: -v");
862 }
863 if (SK_(clo_leak_check)) SK_(detect_memory_leaks)();
864
865 done_prof_mem();
866}
867
868/*------------------------------------------------------------*/
869/*--- Basic bitmap management, reading and writing. ---*/
870/*------------------------------------------------------------*/
871
872/* Allocate and initialise a secondary map. */
873
874static AcSecMap* alloc_secondary_map ( __attribute__ ((unused))
875 Char* caller )
876{
877 AcSecMap* map;
878 UInt i;
879 PROF_EVENT(10);
880
881 /* Mark all bytes as invalid access and invalid value. */
882
883 /* It just happens that a AcSecMap occupies exactly 18 pages --
884 although this isn't important, so the following assert is
885 spurious. */
njne427a662002-10-02 11:08:25 +0000886 sk_assert(0 == (sizeof(AcSecMap) % VKI_BYTES_PER_PAGE));
njn25e49d8e72002-09-23 09:36:25 +0000887 map = VG_(get_memory_from_mmap)( sizeof(AcSecMap), caller );
888
889 for (i = 0; i < 8192; i++)
890 map->abits[i] = VGM_BYTE_INVALID; /* Invalid address */
891
892 /* VG_(printf)("ALLOC_2MAP(%s)\n", caller ); */
893 return map;
894}
895
896
897/* Basic reading/writing of the bitmaps, for byte-sized accesses. */
898
899static __inline__ UChar get_abit ( Addr a )
900{
901 AcSecMap* sm = primary_map[a >> 16];
902 UInt sm_off = a & 0xFFFF;
903 PROF_EVENT(20);
904# if 0
905 if (IS_DISTINGUISHED_SM(sm))
906 VG_(message)(Vg_DebugMsg,
907 "accessed distinguished 2ndary (A)map! 0x%x\n", a);
908# endif
909 return BITARR_TEST(sm->abits, sm_off)
910 ? VGM_BIT_INVALID : VGM_BIT_VALID;
911}
912
913static __inline__ void set_abit ( Addr a, UChar abit )
914{
915 AcSecMap* sm;
916 UInt sm_off;
917 PROF_EVENT(22);
918 ENSURE_MAPPABLE(a, "set_abit");
919 sm = primary_map[a >> 16];
920 sm_off = a & 0xFFFF;
921 if (abit)
922 BITARR_SET(sm->abits, sm_off);
923 else
924 BITARR_CLEAR(sm->abits, sm_off);
925}
926
927
928/* Reading/writing of the bitmaps, for aligned word-sized accesses. */
929
930static __inline__ UChar get_abits4_ALIGNED ( Addr a )
931{
932 AcSecMap* sm;
933 UInt sm_off;
934 UChar abits8;
935 PROF_EVENT(24);
936# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000937 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000938# endif
939 sm = primary_map[a >> 16];
940 sm_off = a & 0xFFFF;
941 abits8 = sm->abits[sm_off >> 3];
942 abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
943 abits8 &= 0x0F;
944 return abits8;
945}
946
947
948
949/*------------------------------------------------------------*/
950/*--- Setting permissions over address ranges. ---*/
951/*------------------------------------------------------------*/
952
953static void set_address_range_perms ( Addr a, UInt len,
954 UInt example_a_bit )
955{
956 UChar abyte8;
957 UInt sm_off;
958 AcSecMap* sm;
959
960 PROF_EVENT(30);
961
962 if (len == 0)
963 return;
964
965 if (len > 100 * 1000 * 1000) {
966 VG_(message)(Vg_UserMsg,
967 "Warning: set address range perms: "
968 "large range %u, a %d",
969 len, example_a_bit );
970 }
971
972 VGP_PUSHCC(VgpSetMem);
973
974 /* Requests to change permissions of huge address ranges may
975 indicate bugs in our machinery. 30,000,000 is arbitrary, but so
976 far all legitimate requests have fallen beneath that size. */
977 /* 4 Mar 02: this is just stupid; get rid of it. */
njne427a662002-10-02 11:08:25 +0000978 /* sk_assert(len < 30000000); */
njn25e49d8e72002-09-23 09:36:25 +0000979
980 /* Check the permissions make sense. */
njne427a662002-10-02 11:08:25 +0000981 sk_assert(example_a_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000982 || example_a_bit == VGM_BIT_INVALID);
983
984 /* In order that we can charge through the address space at 8
985 bytes/main-loop iteration, make up some perms. */
986 abyte8 = (example_a_bit << 7)
987 | (example_a_bit << 6)
988 | (example_a_bit << 5)
989 | (example_a_bit << 4)
990 | (example_a_bit << 3)
991 | (example_a_bit << 2)
992 | (example_a_bit << 1)
993 | (example_a_bit << 0);
994
995# ifdef VG_DEBUG_MEMORY
996 /* Do it ... */
997 while (True) {
998 PROF_EVENT(31);
999 if (len == 0) break;
1000 set_abit ( a, example_a_bit );
1001 set_vbyte ( a, vbyte );
1002 a++;
1003 len--;
1004 }
1005
1006# else
1007 /* Slowly do parts preceding 8-byte alignment. */
1008 while (True) {
1009 PROF_EVENT(31);
1010 if (len == 0) break;
1011 if ((a % 8) == 0) break;
1012 set_abit ( a, example_a_bit );
1013 a++;
1014 len--;
1015 }
1016
1017 if (len == 0) {
1018 VGP_POPCC(VgpSetMem);
1019 return;
1020 }
njne427a662002-10-02 11:08:25 +00001021 sk_assert((a % 8) == 0 && len > 0);
njn25e49d8e72002-09-23 09:36:25 +00001022
1023 /* Once aligned, go fast. */
1024 while (True) {
1025 PROF_EVENT(32);
1026 if (len < 8) break;
1027 ENSURE_MAPPABLE(a, "set_address_range_perms(fast)");
1028 sm = primary_map[a >> 16];
1029 sm_off = a & 0xFFFF;
1030 sm->abits[sm_off >> 3] = abyte8;
1031 a += 8;
1032 len -= 8;
1033 }
1034
1035 if (len == 0) {
1036 VGP_POPCC(VgpSetMem);
1037 return;
1038 }
njne427a662002-10-02 11:08:25 +00001039 sk_assert((a % 8) == 0 && len > 0 && len < 8);
njn25e49d8e72002-09-23 09:36:25 +00001040
1041 /* Finish the upper fragment. */
1042 while (True) {
1043 PROF_EVENT(33);
1044 if (len == 0) break;
1045 set_abit ( a, example_a_bit );
1046 a++;
1047 len--;
1048 }
1049# endif
1050
1051 /* Check that zero page and highest page have not been written to
1052 -- this could happen with buggy syscall wrappers. Today
1053 (2001-04-26) had precisely such a problem with __NR_setitimer. */
njne427a662002-10-02 11:08:25 +00001054 sk_assert(SK_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +00001055 VGP_POPCC(VgpSetMem);
1056}
1057
1058/* Set permissions for address ranges ... */
1059
1060void SK_(make_noaccess) ( Addr a, UInt len )
1061{
1062 PROF_EVENT(35);
1063 DEBUG("SK_(make_noaccess)(%p, %x)\n", a, len);
1064 set_address_range_perms ( a, len, VGM_BIT_INVALID );
1065}
1066
1067void SK_(make_accessible) ( Addr a, UInt len )
1068{
1069 PROF_EVENT(36);
1070 DEBUG("SK_(make_accessible)(%p, %x)\n", a, len);
1071 set_address_range_perms ( a, len, VGM_BIT_VALID );
1072}
1073
1074/* Block-copy permissions (needed for implementing realloc()). */
1075
1076static void copy_address_range_state ( Addr src, Addr dst, UInt len )
1077{
1078 UInt i;
1079
1080 DEBUG("copy_address_range_state\n");
1081
1082 PROF_EVENT(40);
1083 for (i = 0; i < len; i++) {
1084 UChar abit = get_abit ( src+i );
1085 PROF_EVENT(41);
1086 set_abit ( dst+i, abit );
1087 }
1088}
1089
1090
1091/* Check permissions for address range. If inadequate permissions
1092 exist, *bad_addr is set to the offending address, so the caller can
1093 know what it is. */
1094
1095Bool SK_(check_writable) ( Addr a, UInt len, Addr* bad_addr )
1096{
1097 UInt i;
1098 UChar abit;
1099 PROF_EVENT(42);
1100 for (i = 0; i < len; i++) {
1101 PROF_EVENT(43);
1102 abit = get_abit(a);
1103 if (abit == VGM_BIT_INVALID) {
1104 if (bad_addr != NULL) *bad_addr = a;
1105 return False;
1106 }
1107 a++;
1108 }
1109 return True;
1110}
1111
1112Bool SK_(check_readable) ( Addr a, UInt len, Addr* bad_addr )
1113{
1114 UInt i;
1115 UChar abit;
1116
1117 PROF_EVENT(44);
1118 DEBUG("SK_(check_readable)\n");
1119 for (i = 0; i < len; i++) {
1120 abit = get_abit(a);
1121 PROF_EVENT(45);
1122 if (abit != VGM_BIT_VALID) {
1123 if (bad_addr != NULL) *bad_addr = a;
1124 return False;
1125 }
1126 a++;
1127 }
1128 return True;
1129}
1130
1131
1132/* Check a zero-terminated ascii string. Tricky -- don't want to
1133 examine the actual bytes, to find the end, until we're sure it is
1134 safe to do so. */
1135
1136Bool SK_(check_readable_asciiz) ( Addr a, Addr* bad_addr )
1137{
1138 UChar abit;
1139 PROF_EVENT(46);
1140 DEBUG("SK_(check_readable_asciiz)\n");
1141 while (True) {
1142 PROF_EVENT(47);
1143 abit = get_abit(a);
1144 if (abit != VGM_BIT_VALID) {
1145 if (bad_addr != NULL) *bad_addr = a;
1146 return False;
1147 }
1148 /* Ok, a is safe to read. */
1149 if (* ((UChar*)a) == 0) return True;
1150 a++;
1151 }
1152}
1153
1154
1155/*------------------------------------------------------------*/
1156/*--- Memory event handlers ---*/
1157/*------------------------------------------------------------*/
1158
1159/* Setting permissions for aligned words. This supports fast stack
1160 operations. */
1161
1162static void make_noaccess_aligned ( Addr a, UInt len )
1163{
1164 AcSecMap* sm;
1165 UInt sm_off;
1166 UChar mask;
1167 Addr a_past_end = a + len;
1168
1169 VGP_PUSHCC(VgpSetMem);
1170
1171 PROF_EVENT(50);
1172# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +00001173 sk_assert(IS_ALIGNED4_ADDR(a));
1174 sk_assert(IS_ALIGNED4_ADDR(len));
njn25e49d8e72002-09-23 09:36:25 +00001175# endif
1176
1177 for ( ; a < a_past_end; a += 4) {
1178 ENSURE_MAPPABLE(a, "make_noaccess_aligned");
1179 sm = primary_map[a >> 16];
1180 sm_off = a & 0xFFFF;
1181 mask = 0x0F;
1182 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
1183 /* mask now contains 1s where we wish to make address bits
1184 invalid (1s). */
1185 sm->abits[sm_off >> 3] |= mask;
1186 }
1187 VGP_POPCC(VgpSetMem);
1188}
1189
1190static void make_writable_aligned ( Addr a, UInt len )
1191{
1192 AcSecMap* sm;
1193 UInt sm_off;
1194 UChar mask;
1195 Addr a_past_end = a + len;
1196
1197 VGP_PUSHCC(VgpSetMem);
1198
1199 PROF_EVENT(51);
1200# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +00001201 sk_assert(IS_ALIGNED4_ADDR(a));
1202 sk_assert(IS_ALIGNED4_ADDR(len));
njn25e49d8e72002-09-23 09:36:25 +00001203# endif
1204
1205 for ( ; a < a_past_end; a += 4) {
1206 ENSURE_MAPPABLE(a, "make_writable_aligned");
1207 sm = primary_map[a >> 16];
1208 sm_off = a & 0xFFFF;
1209 mask = 0x0F;
1210 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
1211 /* mask now contains 1s where we wish to make address bits
1212 invalid (0s). */
1213 sm->abits[sm_off >> 3] &= ~mask;
1214 }
1215 VGP_POPCC(VgpSetMem);
1216}
1217
1218
1219static
1220void check_is_writable ( CorePart part, ThreadState* tst,
1221 Char* s, UInt base, UInt size )
1222{
1223 Bool ok;
1224 Addr bad_addr;
1225
1226 VGP_PUSHCC(VgpCheckMem);
1227
1228 /* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
1229 base,base+size-1); */
1230 ok = SK_(check_writable) ( base, size, &bad_addr );
1231 if (!ok) {
1232 switch (part) {
1233 case Vg_CoreSysCall:
1234 SK_(record_param_error) ( tst, bad_addr, /*isWrite =*/True, s );
1235 break;
1236
1237 case Vg_CorePThread:
1238 case Vg_CoreSignal:
1239 SK_(record_core_mem_error)( tst, /*isWrite=*/True, s );
1240 break;
1241
1242 default:
njne427a662002-10-02 11:08:25 +00001243 VG_(skin_panic)("check_is_readable: Unknown or unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001244 }
1245 }
1246
1247 VGP_POPCC(VgpCheckMem);
1248}
1249
1250static
1251void check_is_readable ( CorePart part, ThreadState* tst,
1252 Char* s, UInt base, UInt size )
1253{
1254 Bool ok;
1255 Addr bad_addr;
1256
1257 VGP_PUSHCC(VgpCheckMem);
1258
1259 /* VG_(message)(Vg_DebugMsg,"check is readable: %x .. %x",
1260 base,base+size-1); */
1261 ok = SK_(check_readable) ( base, size, &bad_addr );
1262 if (!ok) {
1263 switch (part) {
1264 case Vg_CoreSysCall:
1265 SK_(record_param_error) ( tst, bad_addr, /*isWrite =*/False, s );
1266 break;
1267
1268 case Vg_CorePThread:
1269 SK_(record_core_mem_error)( tst, /*isWrite=*/False, s );
1270 break;
1271
1272 /* If we're being asked to jump to a silly address, record an error
1273 message before potentially crashing the entire system. */
1274 case Vg_CoreTranslate:
1275 SK_(record_jump_error)( tst, bad_addr );
1276 break;
1277
1278 default:
njne427a662002-10-02 11:08:25 +00001279 VG_(skin_panic)("check_is_readable: Unknown or unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001280 }
1281 }
1282 VGP_POPCC(VgpCheckMem);
1283}
1284
1285static
1286void check_is_readable_asciiz ( CorePart part, ThreadState* tst,
1287 Char* s, UInt str )
1288{
1289 Bool ok = True;
1290 Addr bad_addr;
1291 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
1292
1293 VGP_PUSHCC(VgpCheckMem);
1294
njne427a662002-10-02 11:08:25 +00001295 sk_assert(part == Vg_CoreSysCall);
njn25e49d8e72002-09-23 09:36:25 +00001296 ok = SK_(check_readable_asciiz) ( (Addr)str, &bad_addr );
1297 if (!ok) {
1298 SK_(record_param_error) ( tst, bad_addr, /*is_writable =*/False, s );
1299 }
1300
1301 VGP_POPCC(VgpCheckMem);
1302}
1303
1304static
1305void addrcheck_new_mem_startup( Addr a, UInt len, Bool rr, Bool ww, Bool xx )
1306{
njn1f3a9092002-10-04 09:22:30 +00001307 /* Ignore the permissions, just make it readable. Seems to work... */
njn25e49d8e72002-09-23 09:36:25 +00001308 DEBUG("new_mem_startup(%p, %u, rr=%u, ww=%u, xx=%u)\n", a,len,rr,ww,xx);
1309 SK_(make_accessible)(a, len);
1310}
1311
1312static
1313void addrcheck_new_mem_heap ( Addr a, UInt len, Bool is_inited )
1314{
1315 SK_(make_accessible)(a, len);
1316}
1317
1318static
1319void addrcheck_set_perms (Addr a, UInt len,
1320 Bool nn, Bool rr, Bool ww, Bool xx)
1321{
1322 DEBUG("addrcheck_set_perms(%p, %u, nn=%u, rr=%u ww=%u, xx=%u)\n",
1323 a, len, nn, rr, ww, xx);
1324 if (rr || ww || xx) {
1325 SK_(make_accessible)(a, len);
1326 } else {
1327 SK_(make_noaccess)(a, len);
1328 }
1329}
1330
1331
1332/*------------------------------------------------------------*/
1333/*--- Functions called directly from generated code. ---*/
1334/*------------------------------------------------------------*/
1335
1336static __inline__ UInt rotateRight16 ( UInt x )
1337{
1338 /* Amazingly, gcc turns this into a single rotate insn. */
1339 return (x >> 16) | (x << 16);
1340}
1341
1342
1343static __inline__ UInt shiftRight16 ( UInt x )
1344{
1345 return x >> 16;
1346}
1347
1348
1349/* Read/write 1/2/4 sized V bytes, and emit an address error if
1350 needed. */
1351
1352/* SK_(helperc_ACCESS{1,2,4}) handle the common case fast.
1353 Under all other circumstances, it defers to the relevant _SLOWLY
1354 function, which can handle all situations.
1355*/
1356__attribute__ ((regparm(1)))
1357void SK_(helperc_ACCESS4) ( Addr a )
1358{
1359# ifdef VG_DEBUG_MEMORY
1360 return vgmext_ACCESS4_SLOWLY(a);
1361# else
1362 UInt sec_no = rotateRight16(a) & 0x3FFFF;
1363 AcSecMap* sm = primary_map[sec_no];
1364 UInt a_off = (a & 0xFFFF) >> 3;
1365 UChar abits = sm->abits[a_off];
1366 abits >>= (a & 4);
1367 abits &= 15;
1368 PROF_EVENT(60);
1369 if (abits == VGM_NIBBLE_VALID) {
1370 /* Handle common case quickly: a is suitably aligned, is mapped,
1371 and is addressible. So just return. */
1372 return;
1373 } else {
1374 /* Slow but general case. */
1375 vgmext_ACCESS4_SLOWLY(a);
1376 }
1377# endif
1378}
1379
1380__attribute__ ((regparm(1)))
1381void SK_(helperc_ACCESS2) ( Addr a )
1382{
1383# ifdef VG_DEBUG_MEMORY
1384 return vgmext_ACCESS2_SLOWLY(a);
1385# else
1386 UInt sec_no = rotateRight16(a) & 0x1FFFF;
1387 AcSecMap* sm = primary_map[sec_no];
1388 UInt a_off = (a & 0xFFFF) >> 3;
1389 PROF_EVENT(62);
1390 if (sm->abits[a_off] == VGM_BYTE_VALID) {
1391 /* Handle common case quickly. */
1392 return;
1393 } else {
1394 /* Slow but general case. */
1395 vgmext_ACCESS2_SLOWLY(a);
1396 }
1397# endif
1398}
1399
1400__attribute__ ((regparm(1)))
1401void SK_(helperc_ACCESS1) ( Addr a )
1402{
1403# ifdef VG_DEBUG_MEMORY
1404 return vgmext_ACCESS1_SLOWLY(a);
1405# else
1406 UInt sec_no = shiftRight16(a);
1407 AcSecMap* sm = primary_map[sec_no];
1408 UInt a_off = (a & 0xFFFF) >> 3;
1409 PROF_EVENT(64);
1410 if (sm->abits[a_off] == VGM_BYTE_VALID) {
1411 /* Handle common case quickly. */
1412 return;
1413 } else {
1414 /* Slow but general case. */
1415 vgmext_ACCESS1_SLOWLY(a);
1416 }
1417# endif
1418}
1419
1420
1421/*------------------------------------------------------------*/
1422/*--- Fallback functions to handle cases that the above ---*/
1423/*--- VG_(helperc_ACCESS{1,2,4}) can't manage. ---*/
1424/*------------------------------------------------------------*/
1425
1426static void vgmext_ACCESS4_SLOWLY ( Addr a )
1427{
1428 Bool a0ok, a1ok, a2ok, a3ok;
1429
1430 PROF_EVENT(70);
1431
1432 /* First establish independently the addressibility of the 4 bytes
1433 involved. */
1434 a0ok = get_abit(a+0) == VGM_BIT_VALID;
1435 a1ok = get_abit(a+1) == VGM_BIT_VALID;
1436 a2ok = get_abit(a+2) == VGM_BIT_VALID;
1437 a3ok = get_abit(a+3) == VGM_BIT_VALID;
1438
1439 /* Now distinguish 3 cases */
1440
1441 /* Case 1: the address is completely valid, so:
1442 - no addressing error
1443 */
1444 if (a0ok && a1ok && a2ok && a3ok) {
1445 return;
1446 }
1447
1448 /* Case 2: the address is completely invalid.
1449 - emit addressing error
1450 */
1451 /* VG_(printf)("%p (%d %d %d %d)\n", a, a0ok, a1ok, a2ok, a3ok); */
1452 if (!SK_(clo_partial_loads_ok)
1453 || ((a & 3) != 0)
1454 || (!a0ok && !a1ok && !a2ok && !a3ok)) {
1455 sk_record_address_error( a, 4, False );
1456 return;
1457 }
1458
1459 /* Case 3: the address is partially valid.
1460 - no addressing error
1461 Case 3 is only allowed if SK_(clo_partial_loads_ok) is True
1462 (which is the default), and the address is 4-aligned.
1463 If not, Case 2 will have applied.
1464 */
njne427a662002-10-02 11:08:25 +00001465 sk_assert(SK_(clo_partial_loads_ok));
njn25e49d8e72002-09-23 09:36:25 +00001466 {
1467 return;
1468 }
1469}
1470
1471static void vgmext_ACCESS2_SLOWLY ( Addr a )
1472{
1473 /* Check the address for validity. */
1474 Bool aerr = False;
1475 PROF_EVENT(72);
1476
1477 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1478 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1479
1480 /* If an address error has happened, report it. */
1481 if (aerr) {
1482 sk_record_address_error( a, 2, False );
1483 }
1484}
1485
1486static void vgmext_ACCESS1_SLOWLY ( Addr a )
1487{
1488 /* Check the address for validity. */
1489 Bool aerr = False;
1490 PROF_EVENT(74);
1491
1492 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1493
1494 /* If an address error has happened, report it. */
1495 if (aerr) {
1496 sk_record_address_error( a, 1, False );
1497 }
1498}
1499
1500
1501/* ---------------------------------------------------------------------
1502 FPU load and store checks, called from generated code.
1503 ------------------------------------------------------------------ */
1504
1505__attribute__ ((regparm(2)))
1506void SK_(fpu_ACCESS_check) ( Addr addr, Int size )
1507{
1508 /* Ensure the read area is both addressible and valid (ie,
1509 readable). If there's an address error, don't report a value
1510 error too; but if there isn't an address error, check for a
1511 value error.
1512
1513 Try to be reasonably fast on the common case; wimp out and defer
1514 to fpu_ACCESS_check_SLOWLY for everything else. */
1515
1516 AcSecMap* sm;
1517 UInt sm_off, a_off;
1518 Addr addr4;
1519
1520 PROF_EVENT(80);
1521
1522# ifdef VG_DEBUG_MEMORY
1523 fpu_ACCESS_check_SLOWLY ( addr, size );
1524# else
1525
1526 if (size == 4) {
1527 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
1528 PROF_EVENT(81);
1529 /* Properly aligned. */
1530 sm = primary_map[addr >> 16];
1531 sm_off = addr & 0xFFFF;
1532 a_off = sm_off >> 3;
1533 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
1534 /* Properly aligned and addressible. */
1535 return;
1536 slow4:
1537 fpu_ACCESS_check_SLOWLY ( addr, 4 );
1538 return;
1539 }
1540
1541 if (size == 8) {
1542 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
1543 PROF_EVENT(82);
1544 /* Properly aligned. Do it in two halves. */
1545 addr4 = addr + 4;
1546 /* First half. */
1547 sm = primary_map[addr >> 16];
1548 sm_off = addr & 0xFFFF;
1549 a_off = sm_off >> 3;
1550 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1551 /* First half properly aligned and addressible. */
1552 /* Second half. */
1553 sm = primary_map[addr4 >> 16];
1554 sm_off = addr4 & 0xFFFF;
1555 a_off = sm_off >> 3;
1556 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1557 /* Second half properly aligned and addressible. */
1558 /* Both halves properly aligned and addressible. */
1559 return;
1560 slow8:
1561 fpu_ACCESS_check_SLOWLY ( addr, 8 );
1562 return;
1563 }
1564
1565 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
1566 cases go quickly. */
1567 if (size == 2) {
1568 PROF_EVENT(83);
1569 fpu_ACCESS_check_SLOWLY ( addr, 2 );
1570 return;
1571 }
1572
1573 if (size == 10) {
1574 PROF_EVENT(84);
1575 fpu_ACCESS_check_SLOWLY ( addr, 10 );
1576 return;
1577 }
1578
1579 if (size == 28 || size == 108) {
1580 PROF_EVENT(84); /* XXX assign correct event number */
1581 fpu_ACCESS_check_SLOWLY ( addr, size );
1582 return;
1583 }
1584
1585 VG_(printf)("size is %d\n", size);
njne427a662002-10-02 11:08:25 +00001586 VG_(skin_panic)("fpu_ACCESS_check: unhandled size");
njn25e49d8e72002-09-23 09:36:25 +00001587# endif
1588}
1589
1590
1591/* ---------------------------------------------------------------------
1592 Slow, general cases for FPU access checks.
1593 ------------------------------------------------------------------ */
1594
1595void fpu_ACCESS_check_SLOWLY ( Addr addr, Int size )
1596{
1597 Int i;
1598 Bool aerr = False;
1599 PROF_EVENT(90);
1600 for (i = 0; i < size; i++) {
1601 PROF_EVENT(91);
1602 if (get_abit(addr+i) != VGM_BIT_VALID)
1603 aerr = True;
1604 }
1605
1606 if (aerr) {
1607 sk_record_address_error( addr, size, False );
1608 }
1609}
1610
1611
1612/*------------------------------------------------------------*/
1613/*--- Shadow chunks info ---*/
1614/*------------------------------------------------------------*/
1615
1616static __inline__
1617void set_where( ShadowChunk* sc, ExeContext* ec )
1618{
1619 sc->skin_extra[0] = (UInt)ec;
1620}
1621
1622static __inline__
1623ExeContext *get_where( ShadowChunk* sc )
1624{
1625 return (ExeContext*)sc->skin_extra[0];
1626}
1627
1628void SK_(complete_shadow_chunk) ( ShadowChunk* sc, ThreadState* tst )
1629{
1630 set_where( sc, VG_(get_ExeContext) ( tst ) );
1631}
1632
1633/*------------------------------------------------------------*/
1634/*--- Postponing free()ing ---*/
1635/*------------------------------------------------------------*/
1636
1637/* Holds blocks after freeing. */
1638static ShadowChunk* vg_freed_list_start = NULL;
1639static ShadowChunk* vg_freed_list_end = NULL;
1640static Int vg_freed_list_volume = 0;
1641
1642static __attribute__ ((unused))
1643 Int count_freelist ( void )
1644{
1645 ShadowChunk* sc;
1646 Int n = 0;
1647 for (sc = vg_freed_list_start; sc != NULL; sc = sc->next)
1648 n++;
1649 return n;
1650}
1651
1652static __attribute__ ((unused))
1653 void freelist_sanity ( void )
1654{
1655 ShadowChunk* sc;
1656 Int n = 0;
1657 /* VG_(printf)("freelist sanity\n"); */
1658 for (sc = vg_freed_list_start; sc != NULL; sc = sc->next)
1659 n += sc->size;
njne427a662002-10-02 11:08:25 +00001660 sk_assert(n == vg_freed_list_volume);
njn25e49d8e72002-09-23 09:36:25 +00001661}
1662
1663/* Put a shadow chunk on the freed blocks queue, possibly freeing up
1664 some of the oldest blocks in the queue at the same time. */
1665static void add_to_freed_queue ( ShadowChunk* sc )
1666{
1667 ShadowChunk* sc1;
1668
1669 /* Put it at the end of the freed list */
1670 if (vg_freed_list_end == NULL) {
njne427a662002-10-02 11:08:25 +00001671 sk_assert(vg_freed_list_start == NULL);
njn25e49d8e72002-09-23 09:36:25 +00001672 vg_freed_list_end = vg_freed_list_start = sc;
1673 vg_freed_list_volume = sc->size;
1674 } else {
njne427a662002-10-02 11:08:25 +00001675 sk_assert(vg_freed_list_end->next == NULL);
njn25e49d8e72002-09-23 09:36:25 +00001676 vg_freed_list_end->next = sc;
1677 vg_freed_list_end = sc;
1678 vg_freed_list_volume += sc->size;
1679 }
1680 sc->next = NULL;
1681
1682 /* Release enough of the oldest blocks to bring the free queue
1683 volume below vg_clo_freelist_vol. */
1684
1685 while (vg_freed_list_volume > SK_(clo_freelist_vol)) {
1686 /* freelist_sanity(); */
njne427a662002-10-02 11:08:25 +00001687 sk_assert(vg_freed_list_start != NULL);
1688 sk_assert(vg_freed_list_end != NULL);
njn25e49d8e72002-09-23 09:36:25 +00001689
1690 sc1 = vg_freed_list_start;
1691 vg_freed_list_volume -= sc1->size;
1692 /* VG_(printf)("volume now %d\n", vg_freed_list_volume); */
njne427a662002-10-02 11:08:25 +00001693 sk_assert(vg_freed_list_volume >= 0);
njn25e49d8e72002-09-23 09:36:25 +00001694
1695 if (vg_freed_list_start == vg_freed_list_end) {
1696 vg_freed_list_start = vg_freed_list_end = NULL;
1697 } else {
1698 vg_freed_list_start = sc1->next;
1699 }
1700 sc1->next = NULL; /* just paranoia */
njn4ba5a792002-09-30 10:23:54 +00001701 VG_(free_ShadowChunk) ( sc1 );
njn25e49d8e72002-09-23 09:36:25 +00001702 }
1703}
1704
1705/* Return the first shadow chunk satisfying the predicate p. */
1706ShadowChunk* SK_(any_matching_freed_ShadowChunks)
1707 ( Bool (*p) ( ShadowChunk* ))
1708{
1709 ShadowChunk* sc;
1710
1711 /* No point looking through freed blocks if we're not keeping
1712 them around for a while... */
1713 for (sc = vg_freed_list_start; sc != NULL; sc = sc->next)
1714 if (p(sc))
1715 return sc;
1716
1717 return NULL;
1718}
1719
1720void SK_(alt_free) ( ShadowChunk* sc, ThreadState* tst )
1721{
1722 /* Record where freed */
1723 set_where( sc, VG_(get_ExeContext) ( tst ) );
1724
1725 /* Put it out of harm's way for a while. */
1726 add_to_freed_queue ( sc );
1727}
1728
1729
1730/*------------------------------------------------------------*/
1731/*--- Our instrumenter ---*/
1732/*------------------------------------------------------------*/
1733
njn25e49d8e72002-09-23 09:36:25 +00001734UCodeBlock* SK_(instrument)(UCodeBlock* cb_in, Addr orig_addr)
1735{
1736/* Use this rather than eg. -1 because it's a UInt. */
1737#define INVALID_DATA_SIZE 999999
1738
1739 UCodeBlock* cb;
1740 Int i;
1741 UInstr* u_in;
1742 Int t_addr, t_size;
1743
njn4ba5a792002-09-30 10:23:54 +00001744 cb = VG_(alloc_UCodeBlock)();
njn25e49d8e72002-09-23 09:36:25 +00001745 cb->nextTemp = cb_in->nextTemp;
1746
1747 for (i = 0; i < cb_in->used; i++) {
1748
1749 t_addr = t_size = INVALID_TEMPREG;
1750 u_in = &cb_in->instrs[i];
1751
1752 switch (u_in->opcode) {
1753 case NOP: case CALLM_E: case CALLM_S:
1754 break;
1755
1756 /* For memory-ref instrs, copy the data_addr into a temporary to be
1757 * passed to the cachesim_* helper at the end of the instruction.
1758 */
1759 case LOAD:
1760 t_addr = u_in->val1;
1761 goto do_LOAD_or_STORE;
1762 case STORE: t_addr = u_in->val2;
1763 goto do_LOAD_or_STORE;
1764 do_LOAD_or_STORE:
1765 uInstr1(cb, CCALL, 0, TempReg, t_addr);
1766 switch (u_in->size) {
njn4ba5a792002-09-30 10:23:54 +00001767 case 4: uCCall(cb, (Addr)&SK_(helperc_ACCESS4), 1, 1, False );
njn25e49d8e72002-09-23 09:36:25 +00001768 break;
njn4ba5a792002-09-30 10:23:54 +00001769 case 2: uCCall(cb, (Addr)&SK_(helperc_ACCESS2), 1, 1, False );
njn25e49d8e72002-09-23 09:36:25 +00001770 break;
njn4ba5a792002-09-30 10:23:54 +00001771 case 1: uCCall(cb, (Addr)&SK_(helperc_ACCESS1), 1, 1, False );
njn25e49d8e72002-09-23 09:36:25 +00001772 break;
1773 default:
njne427a662002-10-02 11:08:25 +00001774 VG_(skin_panic)("addrcheck::SK_(instrument):LOAD/STORE");
njn25e49d8e72002-09-23 09:36:25 +00001775 }
njn4ba5a792002-09-30 10:23:54 +00001776 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00001777 break;
1778
1779 case FPU_R:
1780 case FPU_W:
1781 t_addr = u_in->val2;
1782 t_size = newTemp(cb);
1783 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
1784 uLiteral(cb, u_in->size);
1785 uInstr2(cb, CCALL, 0, TempReg, t_addr, TempReg, t_size);
njn4ba5a792002-09-30 10:23:54 +00001786 uCCall(cb, (Addr)&SK_(fpu_ACCESS_check), 2, 2, False );
1787 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00001788 break;
1789
1790 default:
njn4ba5a792002-09-30 10:23:54 +00001791 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00001792 break;
1793 }
1794 }
1795
njn4ba5a792002-09-30 10:23:54 +00001796 VG_(free_UCodeBlock)(cb_in);
njn25e49d8e72002-09-23 09:36:25 +00001797 return cb;
1798}
1799
1800
njn25e49d8e72002-09-23 09:36:25 +00001801/*------------------------------------------------------------*/
1802/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1803/*------------------------------------------------------------*/
1804
sewardja4495682002-10-21 07:29:59 +00001805/* For the memory leak detector, say whether an entire 64k chunk of
1806 address space is possibly in use, or not. If in doubt return
1807 True.
njn25e49d8e72002-09-23 09:36:25 +00001808*/
sewardja4495682002-10-21 07:29:59 +00001809static
1810Bool ac_is_valid_64k_chunk ( UInt chunk_number )
njn25e49d8e72002-09-23 09:36:25 +00001811{
sewardja4495682002-10-21 07:29:59 +00001812 sk_assert(chunk_number >= 0 && chunk_number < 65536);
1813 if (IS_DISTINGUISHED_SM(primary_map[chunk_number])) {
1814 /* Definitely not in use. */
1815 return False;
1816 } else {
1817 return True;
njn25e49d8e72002-09-23 09:36:25 +00001818 }
1819}
1820
1821
sewardja4495682002-10-21 07:29:59 +00001822/* For the memory leak detector, say whether or not a given word
1823 address is to be regarded as valid. */
1824static
1825Bool ac_is_valid_address ( Addr a )
1826{
1827 UChar abits;
1828 sk_assert(IS_ALIGNED4_ADDR(a));
1829 abits = get_abits4_ALIGNED(a);
1830 if (abits == VGM_NIBBLE_VALID) {
1831 return True;
1832 } else {
1833 return False;
1834 }
1835}
1836
1837
1838/* Leak detector for this skin. We don't actually do anything, merely
1839 run the generic leak detector with suitable parameters for this
1840 skin. */
njn25e49d8e72002-09-23 09:36:25 +00001841void SK_(detect_memory_leaks) ( void )
1842{
sewardja4495682002-10-21 07:29:59 +00001843 VG_(generic_detect_memory_leaks) (
1844 ac_is_valid_64k_chunk,
1845 ac_is_valid_address,
1846 get_where,
1847 SK_(clo_leak_resolution),
1848 SK_(clo_show_reachable)
1849 );
njn25e49d8e72002-09-23 09:36:25 +00001850}
1851
1852
1853/* ---------------------------------------------------------------------
1854 Sanity check machinery (permanently engaged).
1855 ------------------------------------------------------------------ */
1856
1857/* Check that nobody has spuriously claimed that the first or last 16
1858 pages (64 KB) of address space have become accessible. Failure of
1859 the following do not per se indicate an internal consistency
1860 problem, but they are so likely to that we really want to know
1861 about it if so. */
1862
1863Bool SK_(cheap_sanity_check) ( void )
1864{
1865 if (IS_DISTINGUISHED_SM(primary_map[0]) &&
1866 IS_DISTINGUISHED_SM(primary_map[65535]))
1867 return True;
1868 else
1869 return False;
1870}
1871
1872Bool SK_(expensive_sanity_check) ( void )
1873{
1874 Int i;
1875
1876 /* Make sure nobody changed the distinguished secondary. */
1877 for (i = 0; i < 8192; i++)
1878 if (distinguished_secondary_map.abits[i] != VGM_BYTE_INVALID)
1879 return False;
1880
1881 /* Make sure that the upper 3/4 of the primary map hasn't
1882 been messed with. */
1883 for (i = 65536; i < 262144; i++)
1884 if (primary_map[i] != & distinguished_secondary_map)
1885 return False;
1886
1887 return True;
1888}
1889
1890/* ---------------------------------------------------------------------
1891 Debugging machinery (turn on to debug). Something of a mess.
1892 ------------------------------------------------------------------ */
1893
1894#if 0
1895/* Print the value tags on the 8 integer registers & flag reg. */
1896
1897static void uint_to_bits ( UInt x, Char* str )
1898{
1899 Int i;
1900 Int w = 0;
1901 /* str must point to a space of at least 36 bytes. */
1902 for (i = 31; i >= 0; i--) {
1903 str[w++] = (x & ( ((UInt)1) << i)) ? '1' : '0';
1904 if (i == 24 || i == 16 || i == 8)
1905 str[w++] = ' ';
1906 }
1907 str[w++] = 0;
njne427a662002-10-02 11:08:25 +00001908 sk_assert(w == 36);
njn25e49d8e72002-09-23 09:36:25 +00001909}
1910
1911/* Caution! Not vthread-safe; looks in VG_(baseBlock), not the thread
1912 state table. */
1913
1914static void vg_show_reg_tags ( void )
1915{
1916 Char buf1[36];
1917 Char buf2[36];
1918 UInt z_eax, z_ebx, z_ecx, z_edx,
1919 z_esi, z_edi, z_ebp, z_esp, z_eflags;
1920
1921 z_eax = VG_(baseBlock)[VGOFF_(sh_eax)];
1922 z_ebx = VG_(baseBlock)[VGOFF_(sh_ebx)];
1923 z_ecx = VG_(baseBlock)[VGOFF_(sh_ecx)];
1924 z_edx = VG_(baseBlock)[VGOFF_(sh_edx)];
1925 z_esi = VG_(baseBlock)[VGOFF_(sh_esi)];
1926 z_edi = VG_(baseBlock)[VGOFF_(sh_edi)];
1927 z_ebp = VG_(baseBlock)[VGOFF_(sh_ebp)];
1928 z_esp = VG_(baseBlock)[VGOFF_(sh_esp)];
1929 z_eflags = VG_(baseBlock)[VGOFF_(sh_eflags)];
1930
1931 uint_to_bits(z_eflags, buf1);
njn9b6d34e2002-10-15 08:48:08 +00001932 VG_(message)(Vg_DebugMsg, "efl %s\n", buf1);
njn25e49d8e72002-09-23 09:36:25 +00001933
1934 uint_to_bits(z_eax, buf1);
1935 uint_to_bits(z_ebx, buf2);
1936 VG_(message)(Vg_DebugMsg, "eax %s ebx %s\n", buf1, buf2);
1937
1938 uint_to_bits(z_ecx, buf1);
1939 uint_to_bits(z_edx, buf2);
1940 VG_(message)(Vg_DebugMsg, "ecx %s edx %s\n", buf1, buf2);
1941
1942 uint_to_bits(z_esi, buf1);
1943 uint_to_bits(z_edi, buf2);
1944 VG_(message)(Vg_DebugMsg, "esi %s edi %s\n", buf1, buf2);
1945
1946 uint_to_bits(z_ebp, buf1);
1947 uint_to_bits(z_esp, buf2);
1948 VG_(message)(Vg_DebugMsg, "ebp %s esp %s\n", buf1, buf2);
1949}
1950
1951
1952/* For debugging only. Scan the address space and touch all allegedly
1953 addressible words. Useful for establishing where Valgrind's idea of
1954 addressibility has diverged from what the kernel believes. */
1955
1956static
1957void zzzmemscan_notify_word ( Addr a, UInt w )
1958{
1959}
1960
1961void zzzmemscan ( void )
1962{
1963 Int n_notifies
1964 = VG_(scan_all_valid_memory)( zzzmemscan_notify_word );
1965 VG_(printf)("zzzmemscan: n_bytes = %d\n", 4 * n_notifies );
1966}
1967#endif
1968
1969
1970
1971
1972#if 0
1973static Int zzz = 0;
1974
1975void show_bb ( Addr eip_next )
1976{
1977 VG_(printf)("[%4d] ", zzz);
1978 vg_show_reg_tags( &VG_(m_shadow );
1979 VG_(translate) ( eip_next, NULL, NULL, NULL );
1980}
1981#endif /* 0 */
1982
1983/*------------------------------------------------------------*/
1984/*--- Syscall wrappers ---*/
1985/*------------------------------------------------------------*/
1986
1987void* SK_(pre_syscall) ( ThreadId tid, UInt syscallno, Bool isBlocking )
1988{
1989 Int sane = SK_(cheap_sanity_check)();
1990 return (void*)sane;
1991}
1992
1993void SK_(post_syscall) ( ThreadId tid, UInt syscallno,
1994 void* pre_result, Int res, Bool isBlocking )
1995{
1996 Int sane_before_call = (Int)pre_result;
1997 Bool sane_after_call = SK_(cheap_sanity_check)();
1998
1999 if ((Int)sane_before_call && (!sane_after_call)) {
2000 VG_(message)(Vg_DebugMsg, "post-syscall: ");
2001 VG_(message)(Vg_DebugMsg,
2002 "probable sanity check failure for syscall number %d\n",
2003 syscallno );
njne427a662002-10-02 11:08:25 +00002004 VG_(skin_panic)("aborting due to the above ... bye!");
njn25e49d8e72002-09-23 09:36:25 +00002005 }
2006}
2007
2008
2009/*------------------------------------------------------------*/
2010/*--- Setup ---*/
2011/*------------------------------------------------------------*/
2012
2013void SK_(written_shadow_regs_values)( UInt* gen_reg_value, UInt* eflags_value )
2014{
2015 *gen_reg_value = VGM_WORD_VALID;
2016 *eflags_value = VGM_EFLAGS_VALID;
2017}
2018
2019Bool SK_(process_cmd_line_option)(Char* arg)
2020{
2021# define STREQ(s1,s2) (0==VG_(strcmp_ws)((s1),(s2)))
2022# define STREQN(nn,s1,s2) (0==VG_(strncmp_ws)((s1),(s2),(nn)))
2023
2024 if (STREQ(arg, "--partial-loads-ok=yes"))
2025 SK_(clo_partial_loads_ok) = True;
2026 else if (STREQ(arg, "--partial-loads-ok=no"))
2027 SK_(clo_partial_loads_ok) = False;
2028
2029 else if (STREQN(15, arg, "--freelist-vol=")) {
2030 SK_(clo_freelist_vol) = (Int)VG_(atoll)(&arg[15]);
2031 if (SK_(clo_freelist_vol) < 0) SK_(clo_freelist_vol) = 0;
2032 }
2033
2034 else if (STREQ(arg, "--leak-check=yes"))
2035 SK_(clo_leak_check) = True;
2036 else if (STREQ(arg, "--leak-check=no"))
2037 SK_(clo_leak_check) = False;
2038
2039 else if (STREQ(arg, "--leak-resolution=low"))
2040 SK_(clo_leak_resolution) = Vg_LowRes;
2041 else if (STREQ(arg, "--leak-resolution=med"))
2042 SK_(clo_leak_resolution) = Vg_MedRes;
2043 else if (STREQ(arg, "--leak-resolution=high"))
2044 SK_(clo_leak_resolution) = Vg_HighRes;
2045
2046 else if (STREQ(arg, "--show-reachable=yes"))
2047 SK_(clo_show_reachable) = True;
2048 else if (STREQ(arg, "--show-reachable=no"))
2049 SK_(clo_show_reachable) = False;
2050
2051 else if (STREQ(arg, "--workaround-gcc296-bugs=yes"))
2052 SK_(clo_workaround_gcc296_bugs) = True;
2053 else if (STREQ(arg, "--workaround-gcc296-bugs=no"))
2054 SK_(clo_workaround_gcc296_bugs) = False;
2055
2056 else if (STREQ(arg, "--cleanup=yes"))
2057 SK_(clo_cleanup) = True;
2058 else if (STREQ(arg, "--cleanup=no"))
2059 SK_(clo_cleanup) = False;
2060
2061 else
2062 return False;
2063
2064 return True;
2065
2066#undef STREQ
2067#undef STREQN
2068}
2069
2070Char* SK_(usage)(void)
2071{
2072 return
2073" --partial-loads-ok=no|yes too hard to explain here; see manual [yes]\n"
2074" --freelist-vol=<number> volume of freed blocks queue [1000000]\n"
2075" --leak-check=no|yes search for memory leaks at exit? [no]\n"
2076" --leak-resolution=low|med|high\n"
2077" amount of bt merging in leak check [low]\n"
2078" --show-reachable=no|yes show reachable blocks in leak check? [no]\n"
2079" --workaround-gcc296-bugs=no|yes self explanatory [no]\n"
2080" --check-addrVs=no|yes experimental lighterweight checking? [yes]\n"
2081" yes == Valgrind's original behaviour\n"
2082"\n"
2083" --cleanup=no|yes improve after instrumentation? [yes]\n";
2084}
2085
2086
2087/*------------------------------------------------------------*/
2088/*--- Setup ---*/
2089/*------------------------------------------------------------*/
2090
njnd04b7c62002-10-03 14:05:52 +00002091void SK_(pre_clo_init)(VgDetails* details, VgNeeds* needs, VgTrackEvents* track)
njn25e49d8e72002-09-23 09:36:25 +00002092{
sewardj4aa62ba2002-10-05 15:49:27 +00002093 details->name = "Addrcheck";
njnd04b7c62002-10-03 14:05:52 +00002094 details->version = NULL;
2095 details->description = "a fine-grained address checker";
2096 details->copyright_author =
2097 "Copyright (C) 2002, and GNU GPL'd, by Julian Seward.";
2098 details->bug_reports_to = "jseward@acm.org";
njn25e49d8e72002-09-23 09:36:25 +00002099
njnd04b7c62002-10-03 14:05:52 +00002100 needs->core_errors = True;
2101 needs->skin_errors = True;
2102 needs->libc_freeres = True;
2103 needs->sizeof_shadow_block = 1;
2104 needs->basic_block_discards = False;
2105 needs->shadow_regs = False;
2106 needs->command_line_options = True;
2107 needs->client_requests = True;
2108 needs->extended_UCode = False;
2109 needs->syscall_wrapper = True;
2110 needs->alternative_free = True;
2111 needs->sanity_checks = True;
njn25e49d8e72002-09-23 09:36:25 +00002112
njn25e49d8e72002-09-23 09:36:25 +00002113 track->new_mem_startup = & addrcheck_new_mem_startup;
2114 track->new_mem_heap = & addrcheck_new_mem_heap;
2115 track->new_mem_stack = & SK_(make_accessible);
2116 track->new_mem_stack_aligned = & make_writable_aligned;
2117 track->new_mem_stack_signal = & SK_(make_accessible);
2118 track->new_mem_brk = & SK_(make_accessible);
2119 track->new_mem_mmap = & addrcheck_set_perms;
2120
2121 track->copy_mem_heap = & copy_address_range_state;
2122 track->copy_mem_remap = & copy_address_range_state;
2123 track->change_mem_mprotect = & addrcheck_set_perms;
2124
2125 track->ban_mem_heap = & SK_(make_noaccess);
2126 track->ban_mem_stack = & SK_(make_noaccess);
2127
2128 track->die_mem_heap = & SK_(make_noaccess);
2129 track->die_mem_stack = & SK_(make_noaccess);
2130 track->die_mem_stack_aligned = & make_noaccess_aligned;
2131 track->die_mem_stack_signal = & SK_(make_noaccess);
2132 track->die_mem_brk = & SK_(make_noaccess);
2133 track->die_mem_munmap = & SK_(make_noaccess);
2134
2135 track->bad_free = & SK_(record_free_error);
2136 track->mismatched_free = & SK_(record_freemismatch_error);
2137
2138 track->pre_mem_read = & check_is_readable;
2139 track->pre_mem_read_asciiz = & check_is_readable_asciiz;
2140 track->pre_mem_write = & check_is_writable;
2141 track->post_mem_write = & SK_(make_accessible);
2142
njnd04b7c62002-10-03 14:05:52 +00002143 VG_(register_compact_helper)((Addr) & SK_(helperc_ACCESS4));
2144 VG_(register_compact_helper)((Addr) & SK_(helperc_ACCESS2));
2145 VG_(register_compact_helper)((Addr) & SK_(helperc_ACCESS1));
2146 VG_(register_compact_helper)((Addr) & SK_(fpu_ACCESS_check));
njn25e49d8e72002-09-23 09:36:25 +00002147
2148 VGP_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
2149 VGP_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
njnd04b7c62002-10-03 14:05:52 +00002150
2151 init_shadow_memory();
2152 init_prof_mem();
njn25e49d8e72002-09-23 09:36:25 +00002153}
2154
2155/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00002156/*--- end ac_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00002157/*--------------------------------------------------------------------*/