blob: 984cc15a01a4d144a3787b944c1dffe4ff122307 [file] [log] [blame]
njn25e49d8e72002-09-23 09:36:25 +00001
2/*--------------------------------------------------------------------*/
3/*--- The AddrCheck skin: like MemCheck, but only does address ---*/
4/*--- checking. No definedness checking. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- ac_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
njnc9539842002-10-02 13:26:35 +00009 This file is part of AddrCheck, a lightweight Valgrind skin for
10 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
12 Copyright (C) 2000-2002 Julian Seward
13 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
njn25cac76cb2002-09-23 11:21:57 +000033#include "ac_include.h"
njn25e49d8e72002-09-23 09:36:25 +000034//#include "vg_profile.c"
35
njn27f1a382002-11-08 15:48:16 +000036VG_DETERMINE_INTERFACE_VERSION
37
njn25e49d8e72002-09-23 09:36:25 +000038/*------------------------------------------------------------*/
39/*--- Defns ---*/
40/*------------------------------------------------------------*/
41
42/* These many bytes below %ESP are considered addressible if we're
43 doing the --workaround-gcc296-bugs hack. */
44#define VG_GCC296_BUG_STACK_SLOP 1024
45
46
47typedef
48 enum {
49 /* Bad syscall params */
50 ParamSupp,
51 /* Memory errors in core (pthread ops, signal handling) */
52 CoreMemSupp,
53 /* Invalid read/write attempt at given size */
54 Addr1Supp, Addr2Supp, Addr4Supp, Addr8Supp,
55 /* Invalid or mismatching free */
56 FreeSupp
57 }
58 AddrCheckSuppKind;
59
60/* What kind of error it is. */
61typedef
62 enum { CoreMemErr,
63 AddrErr,
64 ParamErr, UserErr, /* behaves like an anonymous ParamErr */
65 FreeErr, FreeMismatchErr
66 }
67 AddrCheckErrorKind;
68
69/* What kind of memory access is involved in the error? */
70typedef
71 enum { ReadAxs, WriteAxs, ExecAxs }
72 AxsKind;
73
74/* Extra context for memory errors */
75typedef
76 struct {
77 /* AddrErr */
78 AxsKind axskind;
79 /* AddrErr */
80 Int size;
81 /* AddrErr, FreeErr, FreeMismatchErr, ParamErr, UserErr */
82 AcAddrInfo addrinfo;
83 /* ParamErr, UserErr, CoreMemErr */
84 Bool isWrite;
85 }
86 AddrCheckError;
87
88/*------------------------------------------------------------*/
89/*--- Comparing and printing errors ---*/
90/*------------------------------------------------------------*/
91
92static __inline__
93void clear_AcAddrInfo ( AcAddrInfo* ai )
94{
95 ai->akind = Unknown;
96 ai->blksize = 0;
97 ai->rwoffset = 0;
98 ai->lastchange = NULL;
99 ai->stack_tid = VG_INVALID_THREADID;
100 ai->maybe_gcc = False;
101}
102
103static __inline__
104void clear_AddrCheckError ( AddrCheckError* err_extra )
105{
106 err_extra->axskind = ReadAxs;
107 err_extra->size = 0;
108 clear_AcAddrInfo ( &err_extra->addrinfo );
109 err_extra->isWrite = False;
110}
111
112__attribute__((unused))
113static Bool eq_AcAddrInfo ( VgRes res, AcAddrInfo* ai1, AcAddrInfo* ai2 )
114{
115 if (ai1->akind != Undescribed
116 && ai2->akind != Undescribed
117 && ai1->akind != ai2->akind)
118 return False;
119 if (ai1->akind == Freed || ai1->akind == Mallocd) {
120 if (ai1->blksize != ai2->blksize)
121 return False;
122 if (!VG_(eq_ExeContext)(res, ai1->lastchange, ai2->lastchange))
123 return False;
124 }
125 return True;
126}
127
128/* Compare error contexts, to detect duplicates. Note that if they
129 are otherwise the same, the faulting addrs and associated rwoffsets
130 are allowed to be different. */
131
132Bool SK_(eq_SkinError) ( VgRes res,
133 SkinError* e1, SkinError* e2 )
134{
135 AddrCheckError* e1_extra = e1->extra;
136 AddrCheckError* e2_extra = e2->extra;
137
138 switch (e1->ekind) {
139 case CoreMemErr:
140 if (e1_extra->isWrite != e2_extra->isWrite) return False;
141 if (e2->ekind != CoreMemErr) return False;
142 if (e1->string == e2->string) return True;
143 if (0 == VG_(strcmp)(e1->string, e2->string)) return True;
144 return False;
145
146 case UserErr:
147 case ParamErr:
148 if (e1_extra->isWrite != e2_extra->isWrite)
149 return False;
150 if (e1->ekind == ParamErr
151 && 0 != VG_(strcmp)(e1->string, e2->string))
152 return False;
153 return True;
154
155 case FreeErr:
156 case FreeMismatchErr:
157 /* JRS 2002-Aug-26: comparing addrs seems overkill and can
158 cause excessive duplication of errors. Not even AddrErr
159 below does that. So don't compare either the .addr field
160 or the .addrinfo fields. */
161 /* if (e1->addr != e2->addr) return False; */
162 /* if (!eq_AcAddrInfo(res, &e1_extra->addrinfo, &e2_extra->addrinfo))
163 return False;
164 */
165 return True;
166
167 case AddrErr:
168 /* if (e1_extra->axskind != e2_extra->axskind) return False; */
169 if (e1_extra->size != e2_extra->size) return False;
170 /*
171 if (!eq_AcAddrInfo(res, &e1_extra->addrinfo, &e2_extra->addrinfo))
172 return False;
173 */
174 return True;
175
176 default:
177 VG_(printf)("Error:\n unknown AddrCheck error code %d\n", e1->ekind);
njne427a662002-10-02 11:08:25 +0000178 VG_(skin_panic)("unknown error code in SK_(eq_SkinError)");
njn25e49d8e72002-09-23 09:36:25 +0000179 }
180}
181
182static void pp_AcAddrInfo ( Addr a, AcAddrInfo* ai )
183{
184 switch (ai->akind) {
185 case Stack:
186 VG_(message)(Vg_UserMsg,
187 " Address 0x%x is on thread %d's stack",
188 a, ai->stack_tid);
189 break;
190 case Unknown:
191 if (ai->maybe_gcc) {
192 VG_(message)(Vg_UserMsg,
193 " Address 0x%x is just below %%esp. Possibly a bug in GCC/G++",
194 a);
195 VG_(message)(Vg_UserMsg,
196 " v 2.96 or 3.0.X. To suppress, use: --workaround-gcc296-bugs=yes");
197 } else {
198 VG_(message)(Vg_UserMsg,
199 " Address 0x%x is not stack'd, malloc'd or free'd", a);
200 }
201 break;
202 case Freed: case Mallocd: {
203 UInt delta;
204 UChar* relative;
205 if (ai->rwoffset < 0) {
206 delta = (UInt)(- ai->rwoffset);
207 relative = "before";
208 } else if (ai->rwoffset >= ai->blksize) {
209 delta = ai->rwoffset - ai->blksize;
210 relative = "after";
211 } else {
212 delta = ai->rwoffset;
213 relative = "inside";
214 }
215 {
216 VG_(message)(Vg_UserMsg,
217 " Address 0x%x is %d bytes %s a block of size %d %s",
218 a, delta, relative,
219 ai->blksize,
220 ai->akind==Mallocd ? "alloc'd"
221 : ai->akind==Freed ? "free'd"
222 : "client-defined");
223 }
224 VG_(pp_ExeContext)(ai->lastchange);
225 break;
226 }
227 default:
njne427a662002-10-02 11:08:25 +0000228 VG_(skin_panic)("pp_AcAddrInfo");
njn25e49d8e72002-09-23 09:36:25 +0000229 }
230}
231
232void SK_(pp_SkinError) ( SkinError* err, void (*pp_ExeContext)(void) )
233{
234 AddrCheckError* err_extra = err->extra;
235
236 switch (err->ekind) {
237 case CoreMemErr:
238 if (err_extra->isWrite) {
239 VG_(message)(Vg_UserMsg,
240 "%s contains unaddressable byte(s)", err->string );
241 } else {
242 VG_(message)(Vg_UserMsg,
243 "%s contains unaddressable byte(s)", err->string );
244 }
245 pp_ExeContext();
246 break;
247
248 case AddrErr:
249 switch (err_extra->axskind) {
250 case ReadAxs:
251 case WriteAxs:
252 /* These two aren't actually differentiated ever. */
253 VG_(message)(Vg_UserMsg, "Invalid memory access of size %d",
254 err_extra->size );
255 break;
256 case ExecAxs:
257 VG_(message)(Vg_UserMsg, "Jump to the invalid address "
258 "stated on the next line");
259 break;
260 default:
njne427a662002-10-02 11:08:25 +0000261 VG_(skin_panic)("pp_SkinError(axskind)");
njn25e49d8e72002-09-23 09:36:25 +0000262 }
263 pp_ExeContext();
264 pp_AcAddrInfo(err->addr, &err_extra->addrinfo);
265 break;
266
267 case FreeErr:
268 VG_(message)(Vg_UserMsg,"Invalid free() / delete / delete[]");
269 /* fall through */
270 case FreeMismatchErr:
271 if (err->ekind == FreeMismatchErr)
272 VG_(message)(Vg_UserMsg,
273 "Mismatched free() / delete / delete []");
274 pp_ExeContext();
275 pp_AcAddrInfo(err->addr, &err_extra->addrinfo);
276 break;
277
278 case ParamErr:
279 if (err_extra->isWrite) {
280 VG_(message)(Vg_UserMsg,
281 "Syscall param %s contains unaddressable byte(s)",
282 err->string );
283 } else {
284 VG_(message)(Vg_UserMsg,
285 "Syscall param %s contains uninitialised or "
286 "unaddressable byte(s)",
287 err->string);
288 }
289 pp_ExeContext();
290 pp_AcAddrInfo(err->addr, &err_extra->addrinfo);
291 break;
292
293 case UserErr:
294 if (err_extra->isWrite) {
295 VG_(message)(Vg_UserMsg,
296 "Unaddressable byte(s) found during client check request");
297 } else {
298 VG_(message)(Vg_UserMsg,
299 "Uninitialised or "
300 "unaddressable byte(s) found during client check request");
301 }
302 pp_ExeContext();
303 pp_AcAddrInfo(err->addr, &err_extra->addrinfo);
304 break;
305
306 default:
307 VG_(printf)("Error:\n unknown AddrCheck error code %d\n", err->ekind);
njne427a662002-10-02 11:08:25 +0000308 VG_(skin_panic)("unknown error code in SK_(pp_SkinError)");
njn25e49d8e72002-09-23 09:36:25 +0000309 }
310}
311
312/*------------------------------------------------------------*/
313/*--- Recording errors ---*/
314/*------------------------------------------------------------*/
315
316/* Describe an address as best you can, for error messages,
317 putting the result in ai. */
318
319static void describe_addr ( Addr a, AcAddrInfo* ai )
320{
321 ShadowChunk* sc;
322 ThreadId tid;
323
324 /* Nested functions, yeah. Need the lexical scoping of 'a'. */
325
326 /* Closure for searching thread stacks */
327 Bool addr_is_in_bounds(Addr stack_min, Addr stack_max)
328 {
329 return (stack_min <= a && a <= stack_max);
330 }
331 /* Closure for searching malloc'd and free'd lists */
332 Bool addr_is_in_block(ShadowChunk *sh_ch)
333 {
334 return VG_(addr_is_in_block) ( a, sh_ch->data, sh_ch->size );
335 }
336 /* Perhaps it's on a thread's stack? */
337 tid = VG_(any_matching_thread_stack)(addr_is_in_bounds);
338 if (tid != VG_INVALID_THREADID) {
339 ai->akind = Stack;
340 ai->stack_tid = tid;
341 return;
342 }
343 /* Search for a recently freed block which might bracket it. */
344 sc = SK_(any_matching_freed_ShadowChunks)(addr_is_in_block);
345 if (NULL != sc) {
346 ai->akind = Freed;
347 ai->blksize = sc->size;
348 ai->rwoffset = (Int)(a) - (Int)(sc->data);
349 ai->lastchange = (ExeContext*)sc->skin_extra[0];
350 return;
351 }
352 /* Search for a currently malloc'd block which might bracket it. */
353 sc = VG_(any_matching_mallocd_ShadowChunks)(addr_is_in_block);
354 if (NULL != sc) {
355 ai->akind = Mallocd;
356 ai->blksize = sc->size;
357 ai->rwoffset = (Int)(a) - (Int)(sc->data);
358 ai->lastchange = (ExeContext*)sc->skin_extra[0];
359 return;
360 }
361 /* Clueless ... */
362 ai->akind = Unknown;
363 return;
364}
365
366
367/* Creates a copy of the err_extra, updates the copy with address info if
368 necessary, sticks the copy into the SkinError. */
369void SK_(dup_extra_and_update)(SkinError* err)
370{
371 AddrCheckError* err_extra;
372
373 err_extra = VG_(malloc)(sizeof(AddrCheckError));
374 *err_extra = *((AddrCheckError*)err->extra);
375
376 if (err_extra->addrinfo.akind == Undescribed)
377 describe_addr ( err->addr, &(err_extra->addrinfo) );
378
379 err->extra = err_extra;
380}
381
382/* Is this address within some small distance below %ESP? Used only
383 for the --workaround-gcc296-bugs kludge. */
384Bool VG_(is_just_below_ESP)( Addr esp, Addr aa )
385{
386 if ((UInt)esp > (UInt)aa
387 && ((UInt)esp - (UInt)aa) <= VG_GCC296_BUG_STACK_SLOP)
388 return True;
389 else
390 return False;
391}
392
393static
394void sk_record_address_error ( Addr a, Int size, Bool isWrite )
395{
396 AddrCheckError err_extra;
397 Bool just_below_esp;
398
399 just_below_esp
400 = VG_(is_just_below_ESP)( VG_(get_stack_pointer)(), a );
401
402 /* If this is caused by an access immediately below %ESP, and the
403 user asks nicely, we just ignore it. */
404 if (SK_(clo_workaround_gcc296_bugs) && just_below_esp)
405 return;
406
407 clear_AddrCheckError( &err_extra );
408 err_extra.axskind = isWrite ? WriteAxs : ReadAxs;
409 err_extra.size = size;
410 err_extra.addrinfo.akind = Undescribed;
411 err_extra.addrinfo.maybe_gcc = just_below_esp;
412 VG_(maybe_record_error)( NULL, AddrErr, a, /*s*/NULL, &err_extra );
413}
414
415/* These ones are called from non-generated code */
416
417/* This is for memory errors in pthread functions, as opposed to pthread API
418 errors which are found by the core. */
419void SK_(record_core_mem_error) ( ThreadState* tst, Bool isWrite, Char* msg )
420{
421 AddrCheckError err_extra;
422
423 clear_AddrCheckError( &err_extra );
424 err_extra.isWrite = isWrite;
425 VG_(maybe_record_error)( tst, CoreMemErr, /*addr*/0, msg, &err_extra );
426}
427
428void SK_(record_param_error) ( ThreadState* tst, Addr a, Bool isWrite,
429 Char* msg )
430{
431 AddrCheckError err_extra;
432
njne427a662002-10-02 11:08:25 +0000433 sk_assert(NULL != tst);
njn25e49d8e72002-09-23 09:36:25 +0000434 clear_AddrCheckError( &err_extra );
435 err_extra.addrinfo.akind = Undescribed;
436 err_extra.isWrite = isWrite;
437 VG_(maybe_record_error)( tst, ParamErr, a, msg, &err_extra );
438}
439
440void SK_(record_jump_error) ( ThreadState* tst, Addr a )
441{
442 AddrCheckError err_extra;
443
njne427a662002-10-02 11:08:25 +0000444 sk_assert(NULL != tst);
njn25e49d8e72002-09-23 09:36:25 +0000445
446 clear_AddrCheckError( &err_extra );
447 err_extra.axskind = ExecAxs;
448 err_extra.addrinfo.akind = Undescribed;
449 VG_(maybe_record_error)( tst, AddrErr, a, /*s*/NULL, &err_extra );
450}
451
452void SK_(record_free_error) ( ThreadState* tst, Addr a )
453{
454 AddrCheckError err_extra;
455
njne427a662002-10-02 11:08:25 +0000456 sk_assert(NULL != tst);
njn25e49d8e72002-09-23 09:36:25 +0000457
458 clear_AddrCheckError( &err_extra );
459 err_extra.addrinfo.akind = Undescribed;
460 VG_(maybe_record_error)( tst, FreeErr, a, /*s*/NULL, &err_extra );
461}
462
463void SK_(record_freemismatch_error) ( ThreadState* tst, Addr a )
464{
465 AddrCheckError err_extra;
466
njne427a662002-10-02 11:08:25 +0000467 sk_assert(NULL != tst);
njn25e49d8e72002-09-23 09:36:25 +0000468
469 clear_AddrCheckError( &err_extra );
470 err_extra.addrinfo.akind = Undescribed;
471 VG_(maybe_record_error)( tst, FreeMismatchErr, a, /*s*/NULL, &err_extra );
472}
473
474void SK_(record_user_error) ( ThreadState* tst, Addr a, Bool isWrite )
475{
476 AddrCheckError err_extra;
477
njne427a662002-10-02 11:08:25 +0000478 sk_assert(NULL != tst);
njn25e49d8e72002-09-23 09:36:25 +0000479
480 clear_AddrCheckError( &err_extra );
481 err_extra.addrinfo.akind = Undescribed;
482 err_extra.isWrite = isWrite;
483 VG_(maybe_record_error)( tst, UserErr, a, /*s*/NULL, &err_extra );
484}
485
486
487/*------------------------------------------------------------*/
488/*--- Suppressions ---*/
489/*------------------------------------------------------------*/
490
491#define STREQ(s1,s2) (s1 != NULL && s2 != NULL \
492 && VG_(strcmp)((s1),(s2))==0)
493
494Bool SK_(recognised_suppression) ( Char* name, SuppKind *skind )
495{
496 if (STREQ(name, "Param")) *skind = ParamSupp;
497 else if (STREQ(name, "CoreMem")) *skind = CoreMemSupp;
498 else if (STREQ(name, "Addr1")) *skind = Addr1Supp;
499 else if (STREQ(name, "Addr2")) *skind = Addr2Supp;
500 else if (STREQ(name, "Addr4")) *skind = Addr4Supp;
501 else if (STREQ(name, "Addr8")) *skind = Addr8Supp;
502 else if (STREQ(name, "Free")) *skind = FreeSupp;
503 else
504 return False;
505
506 return True;
507}
508
509Bool SK_(read_extra_suppression_info) ( Int fd, Char* buf, Int nBuf,
510 SkinSupp *s )
511{
512 Bool eof;
513
514 if (s->skind == ParamSupp) {
njn4ba5a792002-09-30 10:23:54 +0000515 eof = VG_(get_line) ( fd, buf, nBuf );
njn25e49d8e72002-09-23 09:36:25 +0000516 if (eof) return False;
517 s->string = VG_(strdup)(buf);
518 }
519 return True;
520}
521
522extern Bool SK_(error_matches_suppression)(SkinError* err, SkinSupp* su)
523{
524 UInt su_size;
525 AddrCheckError* err_extra = err->extra;
526
527 switch (su->skind) {
528 case ParamSupp:
529 return (err->ekind == ParamErr && STREQ(su->string, err->string));
530
531 case CoreMemSupp:
532 return (err->ekind == CoreMemErr && STREQ(su->string, err->string));
533
534 case Addr1Supp: su_size = 1; goto addr_case;
535 case Addr2Supp: su_size = 2; goto addr_case;
536 case Addr4Supp: su_size = 4; goto addr_case;
537 case Addr8Supp: su_size = 8; goto addr_case;
538 addr_case:
sewardj0da28b92002-10-06 00:19:16 +0000539 return (err->ekind == AddrErr && err_extra->size == su_size);
njn25e49d8e72002-09-23 09:36:25 +0000540
541 case FreeSupp:
542 return (err->ekind == FreeErr || err->ekind == FreeMismatchErr);
543
544 default:
545 VG_(printf)("Error:\n"
546 " unknown AddrCheck suppression type %d\n", su->skind);
njne427a662002-10-02 11:08:25 +0000547 VG_(skin_panic)("unknown suppression type in "
548 "SK_(error_matches_suppression)");
njn25e49d8e72002-09-23 09:36:25 +0000549 }
550}
551
552# undef STREQ
553
554
555/*--------------------------------------------------------------------*/
556/*--- Part of the AddrCheck skin: Maintain bitmaps of memory, ---*/
557/*--- tracking the accessibility (A) each byte. ---*/
558/*--------------------------------------------------------------------*/
559
560#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
561
562/*------------------------------------------------------------*/
563/*--- Command line options ---*/
564/*------------------------------------------------------------*/
565
566Bool SK_(clo_partial_loads_ok) = True;
567Int SK_(clo_freelist_vol) = 1000000;
568Bool SK_(clo_leak_check) = False;
569VgRes SK_(clo_leak_resolution) = Vg_LowRes;
570Bool SK_(clo_show_reachable) = False;
571Bool SK_(clo_workaround_gcc296_bugs) = False;
572Bool SK_(clo_cleanup) = True;
573
574/*------------------------------------------------------------*/
575/*--- Profiling events ---*/
576/*------------------------------------------------------------*/
577
578typedef
579 enum {
580 VgpCheckMem = VgpFini+1,
581 VgpSetMem
582 }
583 VgpSkinCC;
584
585/*------------------------------------------------------------*/
586/*--- Low-level support for memory checking. ---*/
587/*------------------------------------------------------------*/
588
589/* All reads and writes are checked against a memory map, which
590 records the state of all memory in the process. The memory map is
591 organised like this:
592
593 The top 16 bits of an address are used to index into a top-level
594 map table, containing 65536 entries. Each entry is a pointer to a
595 second-level map, which records the accesibililty and validity
596 permissions for the 65536 bytes indexed by the lower 16 bits of the
597 address. Each byte is represented by one bit, indicating
598 accessibility. So each second-level map contains 8192 bytes. This
599 two-level arrangement conveniently divides the 4G address space
600 into 64k lumps, each size 64k bytes.
601
602 All entries in the primary (top-level) map must point to a valid
603 secondary (second-level) map. Since most of the 4G of address
604 space will not be in use -- ie, not mapped at all -- there is a
605 distinguished secondary map, which indicates `not addressible and
606 not valid' writeable for all bytes. Entries in the primary map for
607 which the entire 64k is not in use at all point at this
608 distinguished map.
609
610 [...] lots of stuff deleted due to out of date-ness
611
612 As a final optimisation, the alignment and address checks for
613 4-byte loads and stores are combined in a neat way. The primary
614 map is extended to have 262144 entries (2^18), rather than 2^16.
615 The top 3/4 of these entries are permanently set to the
616 distinguished secondary map. For a 4-byte load/store, the
617 top-level map is indexed not with (addr >> 16) but instead f(addr),
618 where
619
620 f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
621 = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
622 = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
623
624 ie the lowest two bits are placed above the 16 high address bits.
625 If either of these two bits are nonzero, the address is misaligned;
626 this will select a secondary map from the upper 3/4 of the primary
627 map. Because this is always the distinguished secondary map, a
628 (bogus) address check failure will result. The failure handling
629 code can then figure out whether this is a genuine addr check
630 failure or whether it is a possibly-legitimate access at a
631 misaligned address. */
632
633
634/*------------------------------------------------------------*/
635/*--- Crude profiling machinery. ---*/
636/*------------------------------------------------------------*/
637
638#ifdef VG_PROFILE_MEMORY
639
640#define N_PROF_EVENTS 150
641
642static UInt event_ctr[N_PROF_EVENTS];
643
644static void init_prof_mem ( void )
645{
646 Int i;
647 for (i = 0; i < N_PROF_EVENTS; i++)
648 event_ctr[i] = 0;
649}
650
651static void done_prof_mem ( void )
652{
653 Int i;
654 for (i = 0; i < N_PROF_EVENTS; i++) {
655 if ((i % 10) == 0)
656 VG_(printf)("\n");
657 if (event_ctr[i] > 0)
658 VG_(printf)( "prof mem event %2d: %d\n", i, event_ctr[i] );
659 }
660 VG_(printf)("\n");
661}
662
663#define PROF_EVENT(ev) \
njne427a662002-10-02 11:08:25 +0000664 do { sk_assert((ev) >= 0 && (ev) < N_PROF_EVENTS); \
njn25e49d8e72002-09-23 09:36:25 +0000665 event_ctr[ev]++; \
666 } while (False);
667
668#else
669
670static void init_prof_mem ( void ) { }
671static void done_prof_mem ( void ) { }
672
673#define PROF_EVENT(ev) /* */
674
675#endif
676
677/* Event index. If just the name of the fn is given, this means the
678 number of calls to the fn. Otherwise it is the specified event.
679
680 10 alloc_secondary_map
681
682 20 get_abit
683 21 get_vbyte
684 22 set_abit
685 23 set_vbyte
686 24 get_abits4_ALIGNED
687 25 get_vbytes4_ALIGNED
688
689 30 set_address_range_perms
690 31 set_address_range_perms(lower byte loop)
691 32 set_address_range_perms(quadword loop)
692 33 set_address_range_perms(upper byte loop)
693
694 35 make_noaccess
695 36 make_writable
696 37 make_readable
697
698 40 copy_address_range_state
699 41 copy_address_range_state(byte loop)
700 42 check_writable
701 43 check_writable(byte loop)
702 44 check_readable
703 45 check_readable(byte loop)
704 46 check_readable_asciiz
705 47 check_readable_asciiz(byte loop)
706
707 50 make_aligned_word_NOACCESS
708 51 make_aligned_word_WRITABLE
709
710 60 helperc_LOADV4
711 61 helperc_STOREV4
712 62 helperc_LOADV2
713 63 helperc_STOREV2
714 64 helperc_LOADV1
715 65 helperc_STOREV1
716
717 70 rim_rd_V4_SLOWLY
718 71 rim_wr_V4_SLOWLY
719 72 rim_rd_V2_SLOWLY
720 73 rim_wr_V2_SLOWLY
721 74 rim_rd_V1_SLOWLY
722 75 rim_wr_V1_SLOWLY
723
724 80 fpu_read
725 81 fpu_read aligned 4
726 82 fpu_read aligned 8
727 83 fpu_read 2
728 84 fpu_read 10
729
730 85 fpu_write
731 86 fpu_write aligned 4
732 87 fpu_write aligned 8
733 88 fpu_write 2
734 89 fpu_write 10
735
736 90 fpu_read_check_SLOWLY
737 91 fpu_read_check_SLOWLY(byte loop)
738 92 fpu_write_check_SLOWLY
739 93 fpu_write_check_SLOWLY(byte loop)
740
741 100 is_plausible_stack_addr
742 101 handle_esp_assignment
743 102 handle_esp_assignment(-4)
744 103 handle_esp_assignment(+4)
745 104 handle_esp_assignment(-12)
746 105 handle_esp_assignment(-8)
747 106 handle_esp_assignment(+16)
748 107 handle_esp_assignment(+12)
749 108 handle_esp_assignment(0)
750 109 handle_esp_assignment(+8)
751 110 handle_esp_assignment(-16)
752 111 handle_esp_assignment(+20)
753 112 handle_esp_assignment(-20)
754 113 handle_esp_assignment(+24)
755 114 handle_esp_assignment(-24)
756
757 120 vg_handle_esp_assignment_SLOWLY
758 121 vg_handle_esp_assignment_SLOWLY(normal; move down)
759 122 vg_handle_esp_assignment_SLOWLY(normal; move up)
760 123 vg_handle_esp_assignment_SLOWLY(normal)
761 124 vg_handle_esp_assignment_SLOWLY(>= HUGE_DELTA)
762*/
763
764/*------------------------------------------------------------*/
765/*--- Function declarations. ---*/
766/*------------------------------------------------------------*/
767
768static void vgmext_ACCESS4_SLOWLY ( Addr a );
769static void vgmext_ACCESS2_SLOWLY ( Addr a );
770static void vgmext_ACCESS1_SLOWLY ( Addr a );
771static void fpu_ACCESS_check_SLOWLY ( Addr addr, Int size );
772
773/*------------------------------------------------------------*/
774/*--- Data defns. ---*/
775/*------------------------------------------------------------*/
776
777typedef
778 struct {
779 UChar abits[8192];
780 }
781 AcSecMap;
782
783static AcSecMap* primary_map[ /*65536*/ 262144 ];
784static AcSecMap distinguished_secondary_map;
785
786#define IS_DISTINGUISHED_SM(smap) \
787 ((smap) == &distinguished_secondary_map)
788
789#define ENSURE_MAPPABLE(addr,caller) \
790 do { \
791 if (IS_DISTINGUISHED_SM(primary_map[(addr) >> 16])) { \
792 primary_map[(addr) >> 16] = alloc_secondary_map(caller); \
793 /* VG_(printf)("new 2map because of %p\n", addr); */ \
794 } \
795 } while(0)
796
797#define BITARR_SET(aaa_p,iii_p) \
798 do { \
799 UInt iii = (UInt)iii_p; \
800 UChar* aaa = (UChar*)aaa_p; \
801 aaa[iii >> 3] |= (1 << (iii & 7)); \
802 } while (0)
803
804#define BITARR_CLEAR(aaa_p,iii_p) \
805 do { \
806 UInt iii = (UInt)iii_p; \
807 UChar* aaa = (UChar*)aaa_p; \
808 aaa[iii >> 3] &= ~(1 << (iii & 7)); \
809 } while (0)
810
811#define BITARR_TEST(aaa_p,iii_p) \
812 (0 != (((UChar*)aaa_p)[ ((UInt)iii_p) >> 3 ] \
813 & (1 << (((UInt)iii_p) & 7)))) \
814
815
816#define VGM_BIT_VALID 0
817#define VGM_BIT_INVALID 1
818
819#define VGM_NIBBLE_VALID 0
820#define VGM_NIBBLE_INVALID 0xF
821
822#define VGM_BYTE_VALID 0
823#define VGM_BYTE_INVALID 0xFF
824
825#define VGM_WORD_VALID 0
826#define VGM_WORD_INVALID 0xFFFFFFFF
827
828#define VGM_EFLAGS_VALID 0xFFFFFFFE
829#define VGM_EFLAGS_INVALID 0xFFFFFFFF /* not used */
830
831
832static void init_shadow_memory ( void )
833{
834 Int i;
835
836 for (i = 0; i < 8192; i++) /* Invalid address */
837 distinguished_secondary_map.abits[i] = VGM_BYTE_INVALID;
838
839 /* These entries gradually get overwritten as the used address
840 space expands. */
841 for (i = 0; i < 65536; i++)
842 primary_map[i] = &distinguished_secondary_map;
843
844 /* These ones should never change; it's a bug in Valgrind if they do. */
845 for (i = 65536; i < 262144; i++)
846 primary_map[i] = &distinguished_secondary_map;
847}
848
849void SK_(post_clo_init) ( void )
850{
851}
852
853void SK_(fini) ( void )
854{
855 VG_(print_malloc_stats)();
856
857 if (VG_(clo_verbosity) == 1) {
858 if (!SK_(clo_leak_check))
859 VG_(message)(Vg_UserMsg,
860 "For a detailed leak analysis, rerun with: --leak-check=yes");
861
862 VG_(message)(Vg_UserMsg,
863 "For counts of detected errors, rerun with: -v");
864 }
865 if (SK_(clo_leak_check)) SK_(detect_memory_leaks)();
866
867 done_prof_mem();
868}
869
870/*------------------------------------------------------------*/
871/*--- Basic bitmap management, reading and writing. ---*/
872/*------------------------------------------------------------*/
873
874/* Allocate and initialise a secondary map. */
875
876static AcSecMap* alloc_secondary_map ( __attribute__ ((unused))
877 Char* caller )
878{
879 AcSecMap* map;
880 UInt i;
881 PROF_EVENT(10);
882
883 /* Mark all bytes as invalid access and invalid value. */
884
885 /* It just happens that a AcSecMap occupies exactly 18 pages --
886 although this isn't important, so the following assert is
887 spurious. */
njne427a662002-10-02 11:08:25 +0000888 sk_assert(0 == (sizeof(AcSecMap) % VKI_BYTES_PER_PAGE));
njn25e49d8e72002-09-23 09:36:25 +0000889 map = VG_(get_memory_from_mmap)( sizeof(AcSecMap), caller );
890
891 for (i = 0; i < 8192; i++)
892 map->abits[i] = VGM_BYTE_INVALID; /* Invalid address */
893
894 /* VG_(printf)("ALLOC_2MAP(%s)\n", caller ); */
895 return map;
896}
897
898
899/* Basic reading/writing of the bitmaps, for byte-sized accesses. */
900
901static __inline__ UChar get_abit ( Addr a )
902{
903 AcSecMap* sm = primary_map[a >> 16];
904 UInt sm_off = a & 0xFFFF;
905 PROF_EVENT(20);
906# if 0
907 if (IS_DISTINGUISHED_SM(sm))
908 VG_(message)(Vg_DebugMsg,
909 "accessed distinguished 2ndary (A)map! 0x%x\n", a);
910# endif
911 return BITARR_TEST(sm->abits, sm_off)
912 ? VGM_BIT_INVALID : VGM_BIT_VALID;
913}
914
915static __inline__ void set_abit ( Addr a, UChar abit )
916{
917 AcSecMap* sm;
918 UInt sm_off;
919 PROF_EVENT(22);
920 ENSURE_MAPPABLE(a, "set_abit");
921 sm = primary_map[a >> 16];
922 sm_off = a & 0xFFFF;
923 if (abit)
924 BITARR_SET(sm->abits, sm_off);
925 else
926 BITARR_CLEAR(sm->abits, sm_off);
927}
928
929
930/* Reading/writing of the bitmaps, for aligned word-sized accesses. */
931
932static __inline__ UChar get_abits4_ALIGNED ( Addr a )
933{
934 AcSecMap* sm;
935 UInt sm_off;
936 UChar abits8;
937 PROF_EVENT(24);
938# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000939 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000940# endif
941 sm = primary_map[a >> 16];
942 sm_off = a & 0xFFFF;
943 abits8 = sm->abits[sm_off >> 3];
944 abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
945 abits8 &= 0x0F;
946 return abits8;
947}
948
949
950
951/*------------------------------------------------------------*/
952/*--- Setting permissions over address ranges. ---*/
953/*------------------------------------------------------------*/
954
955static void set_address_range_perms ( Addr a, UInt len,
956 UInt example_a_bit )
957{
958 UChar abyte8;
959 UInt sm_off;
960 AcSecMap* sm;
961
962 PROF_EVENT(30);
963
964 if (len == 0)
965 return;
966
967 if (len > 100 * 1000 * 1000) {
968 VG_(message)(Vg_UserMsg,
969 "Warning: set address range perms: "
970 "large range %u, a %d",
971 len, example_a_bit );
972 }
973
974 VGP_PUSHCC(VgpSetMem);
975
976 /* Requests to change permissions of huge address ranges may
977 indicate bugs in our machinery. 30,000,000 is arbitrary, but so
978 far all legitimate requests have fallen beneath that size. */
979 /* 4 Mar 02: this is just stupid; get rid of it. */
njne427a662002-10-02 11:08:25 +0000980 /* sk_assert(len < 30000000); */
njn25e49d8e72002-09-23 09:36:25 +0000981
982 /* Check the permissions make sense. */
njne427a662002-10-02 11:08:25 +0000983 sk_assert(example_a_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000984 || example_a_bit == VGM_BIT_INVALID);
985
986 /* In order that we can charge through the address space at 8
987 bytes/main-loop iteration, make up some perms. */
988 abyte8 = (example_a_bit << 7)
989 | (example_a_bit << 6)
990 | (example_a_bit << 5)
991 | (example_a_bit << 4)
992 | (example_a_bit << 3)
993 | (example_a_bit << 2)
994 | (example_a_bit << 1)
995 | (example_a_bit << 0);
996
997# ifdef VG_DEBUG_MEMORY
998 /* Do it ... */
999 while (True) {
1000 PROF_EVENT(31);
1001 if (len == 0) break;
1002 set_abit ( a, example_a_bit );
1003 set_vbyte ( a, vbyte );
1004 a++;
1005 len--;
1006 }
1007
1008# else
1009 /* Slowly do parts preceding 8-byte alignment. */
1010 while (True) {
1011 PROF_EVENT(31);
1012 if (len == 0) break;
1013 if ((a % 8) == 0) break;
1014 set_abit ( a, example_a_bit );
1015 a++;
1016 len--;
1017 }
1018
1019 if (len == 0) {
1020 VGP_POPCC(VgpSetMem);
1021 return;
1022 }
njne427a662002-10-02 11:08:25 +00001023 sk_assert((a % 8) == 0 && len > 0);
njn25e49d8e72002-09-23 09:36:25 +00001024
1025 /* Once aligned, go fast. */
1026 while (True) {
1027 PROF_EVENT(32);
1028 if (len < 8) break;
1029 ENSURE_MAPPABLE(a, "set_address_range_perms(fast)");
1030 sm = primary_map[a >> 16];
1031 sm_off = a & 0xFFFF;
1032 sm->abits[sm_off >> 3] = abyte8;
1033 a += 8;
1034 len -= 8;
1035 }
1036
1037 if (len == 0) {
1038 VGP_POPCC(VgpSetMem);
1039 return;
1040 }
njne427a662002-10-02 11:08:25 +00001041 sk_assert((a % 8) == 0 && len > 0 && len < 8);
njn25e49d8e72002-09-23 09:36:25 +00001042
1043 /* Finish the upper fragment. */
1044 while (True) {
1045 PROF_EVENT(33);
1046 if (len == 0) break;
1047 set_abit ( a, example_a_bit );
1048 a++;
1049 len--;
1050 }
1051# endif
1052
1053 /* Check that zero page and highest page have not been written to
1054 -- this could happen with buggy syscall wrappers. Today
1055 (2001-04-26) had precisely such a problem with __NR_setitimer. */
njne427a662002-10-02 11:08:25 +00001056 sk_assert(SK_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +00001057 VGP_POPCC(VgpSetMem);
1058}
1059
1060/* Set permissions for address ranges ... */
1061
1062void SK_(make_noaccess) ( Addr a, UInt len )
1063{
1064 PROF_EVENT(35);
1065 DEBUG("SK_(make_noaccess)(%p, %x)\n", a, len);
1066 set_address_range_perms ( a, len, VGM_BIT_INVALID );
1067}
1068
1069void SK_(make_accessible) ( Addr a, UInt len )
1070{
1071 PROF_EVENT(36);
1072 DEBUG("SK_(make_accessible)(%p, %x)\n", a, len);
1073 set_address_range_perms ( a, len, VGM_BIT_VALID );
1074}
1075
1076/* Block-copy permissions (needed for implementing realloc()). */
1077
1078static void copy_address_range_state ( Addr src, Addr dst, UInt len )
1079{
1080 UInt i;
1081
1082 DEBUG("copy_address_range_state\n");
1083
1084 PROF_EVENT(40);
1085 for (i = 0; i < len; i++) {
1086 UChar abit = get_abit ( src+i );
1087 PROF_EVENT(41);
1088 set_abit ( dst+i, abit );
1089 }
1090}
1091
1092
1093/* Check permissions for address range. If inadequate permissions
1094 exist, *bad_addr is set to the offending address, so the caller can
1095 know what it is. */
1096
1097Bool SK_(check_writable) ( Addr a, UInt len, Addr* bad_addr )
1098{
1099 UInt i;
1100 UChar abit;
1101 PROF_EVENT(42);
1102 for (i = 0; i < len; i++) {
1103 PROF_EVENT(43);
1104 abit = get_abit(a);
1105 if (abit == VGM_BIT_INVALID) {
1106 if (bad_addr != NULL) *bad_addr = a;
1107 return False;
1108 }
1109 a++;
1110 }
1111 return True;
1112}
1113
1114Bool SK_(check_readable) ( Addr a, UInt len, Addr* bad_addr )
1115{
1116 UInt i;
1117 UChar abit;
1118
1119 PROF_EVENT(44);
1120 DEBUG("SK_(check_readable)\n");
1121 for (i = 0; i < len; i++) {
1122 abit = get_abit(a);
1123 PROF_EVENT(45);
1124 if (abit != VGM_BIT_VALID) {
1125 if (bad_addr != NULL) *bad_addr = a;
1126 return False;
1127 }
1128 a++;
1129 }
1130 return True;
1131}
1132
1133
1134/* Check a zero-terminated ascii string. Tricky -- don't want to
1135 examine the actual bytes, to find the end, until we're sure it is
1136 safe to do so. */
1137
1138Bool SK_(check_readable_asciiz) ( Addr a, Addr* bad_addr )
1139{
1140 UChar abit;
1141 PROF_EVENT(46);
1142 DEBUG("SK_(check_readable_asciiz)\n");
1143 while (True) {
1144 PROF_EVENT(47);
1145 abit = get_abit(a);
1146 if (abit != VGM_BIT_VALID) {
1147 if (bad_addr != NULL) *bad_addr = a;
1148 return False;
1149 }
1150 /* Ok, a is safe to read. */
1151 if (* ((UChar*)a) == 0) return True;
1152 a++;
1153 }
1154}
1155
1156
1157/*------------------------------------------------------------*/
1158/*--- Memory event handlers ---*/
1159/*------------------------------------------------------------*/
1160
1161/* Setting permissions for aligned words. This supports fast stack
1162 operations. */
1163
1164static void make_noaccess_aligned ( Addr a, UInt len )
1165{
1166 AcSecMap* sm;
1167 UInt sm_off;
1168 UChar mask;
1169 Addr a_past_end = a + len;
1170
1171 VGP_PUSHCC(VgpSetMem);
1172
1173 PROF_EVENT(50);
1174# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +00001175 sk_assert(IS_ALIGNED4_ADDR(a));
1176 sk_assert(IS_ALIGNED4_ADDR(len));
njn25e49d8e72002-09-23 09:36:25 +00001177# endif
1178
1179 for ( ; a < a_past_end; a += 4) {
1180 ENSURE_MAPPABLE(a, "make_noaccess_aligned");
1181 sm = primary_map[a >> 16];
1182 sm_off = a & 0xFFFF;
1183 mask = 0x0F;
1184 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
1185 /* mask now contains 1s where we wish to make address bits
1186 invalid (1s). */
1187 sm->abits[sm_off >> 3] |= mask;
1188 }
1189 VGP_POPCC(VgpSetMem);
1190}
1191
1192static void make_writable_aligned ( Addr a, UInt len )
1193{
1194 AcSecMap* sm;
1195 UInt sm_off;
1196 UChar mask;
1197 Addr a_past_end = a + len;
1198
1199 VGP_PUSHCC(VgpSetMem);
1200
1201 PROF_EVENT(51);
1202# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +00001203 sk_assert(IS_ALIGNED4_ADDR(a));
1204 sk_assert(IS_ALIGNED4_ADDR(len));
njn25e49d8e72002-09-23 09:36:25 +00001205# endif
1206
1207 for ( ; a < a_past_end; a += 4) {
1208 ENSURE_MAPPABLE(a, "make_writable_aligned");
1209 sm = primary_map[a >> 16];
1210 sm_off = a & 0xFFFF;
1211 mask = 0x0F;
1212 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
1213 /* mask now contains 1s where we wish to make address bits
1214 invalid (0s). */
1215 sm->abits[sm_off >> 3] &= ~mask;
1216 }
1217 VGP_POPCC(VgpSetMem);
1218}
1219
1220
1221static
1222void check_is_writable ( CorePart part, ThreadState* tst,
njn4a540d02002-10-23 12:54:11 +00001223 Char* s, Addr base, UInt size )
njn25e49d8e72002-09-23 09:36:25 +00001224{
1225 Bool ok;
1226 Addr bad_addr;
1227
1228 VGP_PUSHCC(VgpCheckMem);
1229
1230 /* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
1231 base,base+size-1); */
1232 ok = SK_(check_writable) ( base, size, &bad_addr );
1233 if (!ok) {
1234 switch (part) {
1235 case Vg_CoreSysCall:
1236 SK_(record_param_error) ( tst, bad_addr, /*isWrite =*/True, s );
1237 break;
1238
1239 case Vg_CorePThread:
1240 case Vg_CoreSignal:
1241 SK_(record_core_mem_error)( tst, /*isWrite=*/True, s );
1242 break;
1243
1244 default:
njn4a540d02002-10-23 12:54:11 +00001245 VG_(skin_panic)("check_is_writable: Unknown or unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001246 }
1247 }
1248
1249 VGP_POPCC(VgpCheckMem);
1250}
1251
1252static
1253void check_is_readable ( CorePart part, ThreadState* tst,
njn4a540d02002-10-23 12:54:11 +00001254 Char* s, Addr base, UInt size )
njn25e49d8e72002-09-23 09:36:25 +00001255{
1256 Bool ok;
1257 Addr bad_addr;
1258
1259 VGP_PUSHCC(VgpCheckMem);
1260
1261 /* VG_(message)(Vg_DebugMsg,"check is readable: %x .. %x",
1262 base,base+size-1); */
1263 ok = SK_(check_readable) ( base, size, &bad_addr );
1264 if (!ok) {
1265 switch (part) {
1266 case Vg_CoreSysCall:
1267 SK_(record_param_error) ( tst, bad_addr, /*isWrite =*/False, s );
1268 break;
1269
1270 case Vg_CorePThread:
1271 SK_(record_core_mem_error)( tst, /*isWrite=*/False, s );
1272 break;
1273
1274 /* If we're being asked to jump to a silly address, record an error
1275 message before potentially crashing the entire system. */
1276 case Vg_CoreTranslate:
1277 SK_(record_jump_error)( tst, bad_addr );
1278 break;
1279
1280 default:
njne427a662002-10-02 11:08:25 +00001281 VG_(skin_panic)("check_is_readable: Unknown or unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001282 }
1283 }
1284 VGP_POPCC(VgpCheckMem);
1285}
1286
1287static
1288void check_is_readable_asciiz ( CorePart part, ThreadState* tst,
njn4a540d02002-10-23 12:54:11 +00001289 Char* s, Addr str )
njn25e49d8e72002-09-23 09:36:25 +00001290{
1291 Bool ok = True;
1292 Addr bad_addr;
1293 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
1294
1295 VGP_PUSHCC(VgpCheckMem);
1296
njne427a662002-10-02 11:08:25 +00001297 sk_assert(part == Vg_CoreSysCall);
njn25e49d8e72002-09-23 09:36:25 +00001298 ok = SK_(check_readable_asciiz) ( (Addr)str, &bad_addr );
1299 if (!ok) {
1300 SK_(record_param_error) ( tst, bad_addr, /*is_writable =*/False, s );
1301 }
1302
1303 VGP_POPCC(VgpCheckMem);
1304}
1305
1306static
1307void addrcheck_new_mem_startup( Addr a, UInt len, Bool rr, Bool ww, Bool xx )
1308{
njn1f3a9092002-10-04 09:22:30 +00001309 /* Ignore the permissions, just make it readable. Seems to work... */
njn25e49d8e72002-09-23 09:36:25 +00001310 DEBUG("new_mem_startup(%p, %u, rr=%u, ww=%u, xx=%u)\n", a,len,rr,ww,xx);
1311 SK_(make_accessible)(a, len);
1312}
1313
1314static
1315void addrcheck_new_mem_heap ( Addr a, UInt len, Bool is_inited )
1316{
1317 SK_(make_accessible)(a, len);
1318}
1319
1320static
1321void addrcheck_set_perms (Addr a, UInt len,
sewardj40f8ebe2002-10-23 21:46:13 +00001322 Bool rr, Bool ww, Bool xx)
njn25e49d8e72002-09-23 09:36:25 +00001323{
sewardj40f8ebe2002-10-23 21:46:13 +00001324 DEBUG("addrcheck_set_perms(%p, %u, rr=%u ww=%u, xx=%u)\n",
1325 a, len, rr, ww, xx);
njn25e49d8e72002-09-23 09:36:25 +00001326 if (rr || ww || xx) {
1327 SK_(make_accessible)(a, len);
1328 } else {
1329 SK_(make_noaccess)(a, len);
1330 }
1331}
1332
1333
1334/*------------------------------------------------------------*/
1335/*--- Functions called directly from generated code. ---*/
1336/*------------------------------------------------------------*/
1337
1338static __inline__ UInt rotateRight16 ( UInt x )
1339{
1340 /* Amazingly, gcc turns this into a single rotate insn. */
1341 return (x >> 16) | (x << 16);
1342}
1343
1344
1345static __inline__ UInt shiftRight16 ( UInt x )
1346{
1347 return x >> 16;
1348}
1349
1350
1351/* Read/write 1/2/4 sized V bytes, and emit an address error if
1352 needed. */
1353
1354/* SK_(helperc_ACCESS{1,2,4}) handle the common case fast.
1355 Under all other circumstances, it defers to the relevant _SLOWLY
1356 function, which can handle all situations.
1357*/
1358__attribute__ ((regparm(1)))
1359void SK_(helperc_ACCESS4) ( Addr a )
1360{
1361# ifdef VG_DEBUG_MEMORY
1362 return vgmext_ACCESS4_SLOWLY(a);
1363# else
1364 UInt sec_no = rotateRight16(a) & 0x3FFFF;
1365 AcSecMap* sm = primary_map[sec_no];
1366 UInt a_off = (a & 0xFFFF) >> 3;
1367 UChar abits = sm->abits[a_off];
1368 abits >>= (a & 4);
1369 abits &= 15;
1370 PROF_EVENT(60);
1371 if (abits == VGM_NIBBLE_VALID) {
1372 /* Handle common case quickly: a is suitably aligned, is mapped,
1373 and is addressible. So just return. */
1374 return;
1375 } else {
1376 /* Slow but general case. */
1377 vgmext_ACCESS4_SLOWLY(a);
1378 }
1379# endif
1380}
1381
1382__attribute__ ((regparm(1)))
1383void SK_(helperc_ACCESS2) ( Addr a )
1384{
1385# ifdef VG_DEBUG_MEMORY
1386 return vgmext_ACCESS2_SLOWLY(a);
1387# else
1388 UInt sec_no = rotateRight16(a) & 0x1FFFF;
1389 AcSecMap* sm = primary_map[sec_no];
1390 UInt a_off = (a & 0xFFFF) >> 3;
1391 PROF_EVENT(62);
1392 if (sm->abits[a_off] == VGM_BYTE_VALID) {
1393 /* Handle common case quickly. */
1394 return;
1395 } else {
1396 /* Slow but general case. */
1397 vgmext_ACCESS2_SLOWLY(a);
1398 }
1399# endif
1400}
1401
1402__attribute__ ((regparm(1)))
1403void SK_(helperc_ACCESS1) ( Addr a )
1404{
1405# ifdef VG_DEBUG_MEMORY
1406 return vgmext_ACCESS1_SLOWLY(a);
1407# else
1408 UInt sec_no = shiftRight16(a);
1409 AcSecMap* sm = primary_map[sec_no];
1410 UInt a_off = (a & 0xFFFF) >> 3;
1411 PROF_EVENT(64);
1412 if (sm->abits[a_off] == VGM_BYTE_VALID) {
1413 /* Handle common case quickly. */
1414 return;
1415 } else {
1416 /* Slow but general case. */
1417 vgmext_ACCESS1_SLOWLY(a);
1418 }
1419# endif
1420}
1421
1422
1423/*------------------------------------------------------------*/
1424/*--- Fallback functions to handle cases that the above ---*/
1425/*--- VG_(helperc_ACCESS{1,2,4}) can't manage. ---*/
1426/*------------------------------------------------------------*/
1427
1428static void vgmext_ACCESS4_SLOWLY ( Addr a )
1429{
1430 Bool a0ok, a1ok, a2ok, a3ok;
1431
1432 PROF_EVENT(70);
1433
1434 /* First establish independently the addressibility of the 4 bytes
1435 involved. */
1436 a0ok = get_abit(a+0) == VGM_BIT_VALID;
1437 a1ok = get_abit(a+1) == VGM_BIT_VALID;
1438 a2ok = get_abit(a+2) == VGM_BIT_VALID;
1439 a3ok = get_abit(a+3) == VGM_BIT_VALID;
1440
1441 /* Now distinguish 3 cases */
1442
1443 /* Case 1: the address is completely valid, so:
1444 - no addressing error
1445 */
1446 if (a0ok && a1ok && a2ok && a3ok) {
1447 return;
1448 }
1449
1450 /* Case 2: the address is completely invalid.
1451 - emit addressing error
1452 */
1453 /* VG_(printf)("%p (%d %d %d %d)\n", a, a0ok, a1ok, a2ok, a3ok); */
1454 if (!SK_(clo_partial_loads_ok)
1455 || ((a & 3) != 0)
1456 || (!a0ok && !a1ok && !a2ok && !a3ok)) {
1457 sk_record_address_error( a, 4, False );
1458 return;
1459 }
1460
1461 /* Case 3: the address is partially valid.
1462 - no addressing error
1463 Case 3 is only allowed if SK_(clo_partial_loads_ok) is True
1464 (which is the default), and the address is 4-aligned.
1465 If not, Case 2 will have applied.
1466 */
njne427a662002-10-02 11:08:25 +00001467 sk_assert(SK_(clo_partial_loads_ok));
njn25e49d8e72002-09-23 09:36:25 +00001468 {
1469 return;
1470 }
1471}
1472
1473static void vgmext_ACCESS2_SLOWLY ( Addr a )
1474{
1475 /* Check the address for validity. */
1476 Bool aerr = False;
1477 PROF_EVENT(72);
1478
1479 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1480 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1481
1482 /* If an address error has happened, report it. */
1483 if (aerr) {
1484 sk_record_address_error( a, 2, False );
1485 }
1486}
1487
1488static void vgmext_ACCESS1_SLOWLY ( Addr a )
1489{
1490 /* Check the address for validity. */
1491 Bool aerr = False;
1492 PROF_EVENT(74);
1493
1494 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1495
1496 /* If an address error has happened, report it. */
1497 if (aerr) {
1498 sk_record_address_error( a, 1, False );
1499 }
1500}
1501
1502
1503/* ---------------------------------------------------------------------
1504 FPU load and store checks, called from generated code.
1505 ------------------------------------------------------------------ */
1506
1507__attribute__ ((regparm(2)))
1508void SK_(fpu_ACCESS_check) ( Addr addr, Int size )
1509{
1510 /* Ensure the read area is both addressible and valid (ie,
1511 readable). If there's an address error, don't report a value
1512 error too; but if there isn't an address error, check for a
1513 value error.
1514
1515 Try to be reasonably fast on the common case; wimp out and defer
1516 to fpu_ACCESS_check_SLOWLY for everything else. */
1517
1518 AcSecMap* sm;
1519 UInt sm_off, a_off;
1520 Addr addr4;
1521
1522 PROF_EVENT(80);
1523
1524# ifdef VG_DEBUG_MEMORY
1525 fpu_ACCESS_check_SLOWLY ( addr, size );
1526# else
1527
1528 if (size == 4) {
1529 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
1530 PROF_EVENT(81);
1531 /* Properly aligned. */
1532 sm = primary_map[addr >> 16];
1533 sm_off = addr & 0xFFFF;
1534 a_off = sm_off >> 3;
1535 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
1536 /* Properly aligned and addressible. */
1537 return;
1538 slow4:
1539 fpu_ACCESS_check_SLOWLY ( addr, 4 );
1540 return;
1541 }
1542
1543 if (size == 8) {
1544 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
1545 PROF_EVENT(82);
1546 /* Properly aligned. Do it in two halves. */
1547 addr4 = addr + 4;
1548 /* First half. */
1549 sm = primary_map[addr >> 16];
1550 sm_off = addr & 0xFFFF;
1551 a_off = sm_off >> 3;
1552 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1553 /* First half properly aligned and addressible. */
1554 /* Second half. */
1555 sm = primary_map[addr4 >> 16];
1556 sm_off = addr4 & 0xFFFF;
1557 a_off = sm_off >> 3;
1558 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1559 /* Second half properly aligned and addressible. */
1560 /* Both halves properly aligned and addressible. */
1561 return;
1562 slow8:
1563 fpu_ACCESS_check_SLOWLY ( addr, 8 );
1564 return;
1565 }
1566
1567 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
1568 cases go quickly. */
1569 if (size == 2) {
1570 PROF_EVENT(83);
1571 fpu_ACCESS_check_SLOWLY ( addr, 2 );
1572 return;
1573 }
1574
1575 if (size == 10) {
1576 PROF_EVENT(84);
1577 fpu_ACCESS_check_SLOWLY ( addr, 10 );
1578 return;
1579 }
1580
1581 if (size == 28 || size == 108) {
1582 PROF_EVENT(84); /* XXX assign correct event number */
1583 fpu_ACCESS_check_SLOWLY ( addr, size );
1584 return;
1585 }
1586
1587 VG_(printf)("size is %d\n", size);
njne427a662002-10-02 11:08:25 +00001588 VG_(skin_panic)("fpu_ACCESS_check: unhandled size");
njn25e49d8e72002-09-23 09:36:25 +00001589# endif
1590}
1591
1592
1593/* ---------------------------------------------------------------------
1594 Slow, general cases for FPU access checks.
1595 ------------------------------------------------------------------ */
1596
1597void fpu_ACCESS_check_SLOWLY ( Addr addr, Int size )
1598{
1599 Int i;
1600 Bool aerr = False;
1601 PROF_EVENT(90);
1602 for (i = 0; i < size; i++) {
1603 PROF_EVENT(91);
1604 if (get_abit(addr+i) != VGM_BIT_VALID)
1605 aerr = True;
1606 }
1607
1608 if (aerr) {
1609 sk_record_address_error( addr, size, False );
1610 }
1611}
1612
1613
1614/*------------------------------------------------------------*/
1615/*--- Shadow chunks info ---*/
1616/*------------------------------------------------------------*/
1617
1618static __inline__
1619void set_where( ShadowChunk* sc, ExeContext* ec )
1620{
1621 sc->skin_extra[0] = (UInt)ec;
1622}
1623
1624static __inline__
1625ExeContext *get_where( ShadowChunk* sc )
1626{
1627 return (ExeContext*)sc->skin_extra[0];
1628}
1629
1630void SK_(complete_shadow_chunk) ( ShadowChunk* sc, ThreadState* tst )
1631{
1632 set_where( sc, VG_(get_ExeContext) ( tst ) );
1633}
1634
1635/*------------------------------------------------------------*/
1636/*--- Postponing free()ing ---*/
1637/*------------------------------------------------------------*/
1638
1639/* Holds blocks after freeing. */
1640static ShadowChunk* vg_freed_list_start = NULL;
1641static ShadowChunk* vg_freed_list_end = NULL;
1642static Int vg_freed_list_volume = 0;
1643
1644static __attribute__ ((unused))
1645 Int count_freelist ( void )
1646{
1647 ShadowChunk* sc;
1648 Int n = 0;
1649 for (sc = vg_freed_list_start; sc != NULL; sc = sc->next)
1650 n++;
1651 return n;
1652}
1653
1654static __attribute__ ((unused))
1655 void freelist_sanity ( void )
1656{
1657 ShadowChunk* sc;
1658 Int n = 0;
1659 /* VG_(printf)("freelist sanity\n"); */
1660 for (sc = vg_freed_list_start; sc != NULL; sc = sc->next)
1661 n += sc->size;
njne427a662002-10-02 11:08:25 +00001662 sk_assert(n == vg_freed_list_volume);
njn25e49d8e72002-09-23 09:36:25 +00001663}
1664
1665/* Put a shadow chunk on the freed blocks queue, possibly freeing up
1666 some of the oldest blocks in the queue at the same time. */
1667static void add_to_freed_queue ( ShadowChunk* sc )
1668{
1669 ShadowChunk* sc1;
1670
1671 /* Put it at the end of the freed list */
1672 if (vg_freed_list_end == NULL) {
njne427a662002-10-02 11:08:25 +00001673 sk_assert(vg_freed_list_start == NULL);
njn25e49d8e72002-09-23 09:36:25 +00001674 vg_freed_list_end = vg_freed_list_start = sc;
1675 vg_freed_list_volume = sc->size;
1676 } else {
njne427a662002-10-02 11:08:25 +00001677 sk_assert(vg_freed_list_end->next == NULL);
njn25e49d8e72002-09-23 09:36:25 +00001678 vg_freed_list_end->next = sc;
1679 vg_freed_list_end = sc;
1680 vg_freed_list_volume += sc->size;
1681 }
1682 sc->next = NULL;
1683
1684 /* Release enough of the oldest blocks to bring the free queue
1685 volume below vg_clo_freelist_vol. */
1686
1687 while (vg_freed_list_volume > SK_(clo_freelist_vol)) {
1688 /* freelist_sanity(); */
njne427a662002-10-02 11:08:25 +00001689 sk_assert(vg_freed_list_start != NULL);
1690 sk_assert(vg_freed_list_end != NULL);
njn25e49d8e72002-09-23 09:36:25 +00001691
1692 sc1 = vg_freed_list_start;
1693 vg_freed_list_volume -= sc1->size;
1694 /* VG_(printf)("volume now %d\n", vg_freed_list_volume); */
njne427a662002-10-02 11:08:25 +00001695 sk_assert(vg_freed_list_volume >= 0);
njn25e49d8e72002-09-23 09:36:25 +00001696
1697 if (vg_freed_list_start == vg_freed_list_end) {
1698 vg_freed_list_start = vg_freed_list_end = NULL;
1699 } else {
1700 vg_freed_list_start = sc1->next;
1701 }
1702 sc1->next = NULL; /* just paranoia */
njn4ba5a792002-09-30 10:23:54 +00001703 VG_(free_ShadowChunk) ( sc1 );
njn25e49d8e72002-09-23 09:36:25 +00001704 }
1705}
1706
1707/* Return the first shadow chunk satisfying the predicate p. */
1708ShadowChunk* SK_(any_matching_freed_ShadowChunks)
1709 ( Bool (*p) ( ShadowChunk* ))
1710{
1711 ShadowChunk* sc;
1712
1713 /* No point looking through freed blocks if we're not keeping
1714 them around for a while... */
1715 for (sc = vg_freed_list_start; sc != NULL; sc = sc->next)
1716 if (p(sc))
1717 return sc;
1718
1719 return NULL;
1720}
1721
1722void SK_(alt_free) ( ShadowChunk* sc, ThreadState* tst )
1723{
1724 /* Record where freed */
1725 set_where( sc, VG_(get_ExeContext) ( tst ) );
1726
1727 /* Put it out of harm's way for a while. */
1728 add_to_freed_queue ( sc );
1729}
1730
1731
1732/*------------------------------------------------------------*/
1733/*--- Our instrumenter ---*/
1734/*------------------------------------------------------------*/
1735
njn25e49d8e72002-09-23 09:36:25 +00001736UCodeBlock* SK_(instrument)(UCodeBlock* cb_in, Addr orig_addr)
1737{
1738/* Use this rather than eg. -1 because it's a UInt. */
1739#define INVALID_DATA_SIZE 999999
1740
1741 UCodeBlock* cb;
1742 Int i;
1743 UInstr* u_in;
1744 Int t_addr, t_size;
1745
njn4ba5a792002-09-30 10:23:54 +00001746 cb = VG_(alloc_UCodeBlock)();
njn25e49d8e72002-09-23 09:36:25 +00001747 cb->nextTemp = cb_in->nextTemp;
1748
1749 for (i = 0; i < cb_in->used; i++) {
1750
1751 t_addr = t_size = INVALID_TEMPREG;
1752 u_in = &cb_in->instrs[i];
1753
1754 switch (u_in->opcode) {
sewardj7a5ebcf2002-11-13 22:42:13 +00001755 case NOP: case LOCK: case CALLM_E: case CALLM_S:
njn25e49d8e72002-09-23 09:36:25 +00001756 break;
1757
1758 /* For memory-ref instrs, copy the data_addr into a temporary to be
1759 * passed to the cachesim_* helper at the end of the instruction.
1760 */
1761 case LOAD:
1762 t_addr = u_in->val1;
1763 goto do_LOAD_or_STORE;
1764 case STORE: t_addr = u_in->val2;
1765 goto do_LOAD_or_STORE;
1766 do_LOAD_or_STORE:
1767 uInstr1(cb, CCALL, 0, TempReg, t_addr);
1768 switch (u_in->size) {
njn4ba5a792002-09-30 10:23:54 +00001769 case 4: uCCall(cb, (Addr)&SK_(helperc_ACCESS4), 1, 1, False );
njn25e49d8e72002-09-23 09:36:25 +00001770 break;
njn4ba5a792002-09-30 10:23:54 +00001771 case 2: uCCall(cb, (Addr)&SK_(helperc_ACCESS2), 1, 1, False );
njn25e49d8e72002-09-23 09:36:25 +00001772 break;
njn4ba5a792002-09-30 10:23:54 +00001773 case 1: uCCall(cb, (Addr)&SK_(helperc_ACCESS1), 1, 1, False );
njn25e49d8e72002-09-23 09:36:25 +00001774 break;
1775 default:
njne427a662002-10-02 11:08:25 +00001776 VG_(skin_panic)("addrcheck::SK_(instrument):LOAD/STORE");
njn25e49d8e72002-09-23 09:36:25 +00001777 }
njn4ba5a792002-09-30 10:23:54 +00001778 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00001779 break;
1780
1781 case FPU_R:
1782 case FPU_W:
1783 t_addr = u_in->val2;
1784 t_size = newTemp(cb);
1785 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
1786 uLiteral(cb, u_in->size);
1787 uInstr2(cb, CCALL, 0, TempReg, t_addr, TempReg, t_size);
njn4ba5a792002-09-30 10:23:54 +00001788 uCCall(cb, (Addr)&SK_(fpu_ACCESS_check), 2, 2, False );
1789 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00001790 break;
1791
1792 default:
njn4ba5a792002-09-30 10:23:54 +00001793 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00001794 break;
1795 }
1796 }
1797
njn4ba5a792002-09-30 10:23:54 +00001798 VG_(free_UCodeBlock)(cb_in);
njn25e49d8e72002-09-23 09:36:25 +00001799 return cb;
1800}
1801
1802
njn25e49d8e72002-09-23 09:36:25 +00001803/*------------------------------------------------------------*/
1804/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1805/*------------------------------------------------------------*/
1806
sewardja4495682002-10-21 07:29:59 +00001807/* For the memory leak detector, say whether an entire 64k chunk of
1808 address space is possibly in use, or not. If in doubt return
1809 True.
njn25e49d8e72002-09-23 09:36:25 +00001810*/
sewardja4495682002-10-21 07:29:59 +00001811static
1812Bool ac_is_valid_64k_chunk ( UInt chunk_number )
njn25e49d8e72002-09-23 09:36:25 +00001813{
sewardja4495682002-10-21 07:29:59 +00001814 sk_assert(chunk_number >= 0 && chunk_number < 65536);
1815 if (IS_DISTINGUISHED_SM(primary_map[chunk_number])) {
1816 /* Definitely not in use. */
1817 return False;
1818 } else {
1819 return True;
njn25e49d8e72002-09-23 09:36:25 +00001820 }
1821}
1822
1823
sewardja4495682002-10-21 07:29:59 +00001824/* For the memory leak detector, say whether or not a given word
1825 address is to be regarded as valid. */
1826static
1827Bool ac_is_valid_address ( Addr a )
1828{
1829 UChar abits;
1830 sk_assert(IS_ALIGNED4_ADDR(a));
1831 abits = get_abits4_ALIGNED(a);
1832 if (abits == VGM_NIBBLE_VALID) {
1833 return True;
1834 } else {
1835 return False;
1836 }
1837}
1838
1839
1840/* Leak detector for this skin. We don't actually do anything, merely
1841 run the generic leak detector with suitable parameters for this
1842 skin. */
njn25e49d8e72002-09-23 09:36:25 +00001843void SK_(detect_memory_leaks) ( void )
1844{
sewardja4495682002-10-21 07:29:59 +00001845 VG_(generic_detect_memory_leaks) (
1846 ac_is_valid_64k_chunk,
1847 ac_is_valid_address,
1848 get_where,
1849 SK_(clo_leak_resolution),
1850 SK_(clo_show_reachable)
1851 );
njn25e49d8e72002-09-23 09:36:25 +00001852}
1853
1854
1855/* ---------------------------------------------------------------------
1856 Sanity check machinery (permanently engaged).
1857 ------------------------------------------------------------------ */
1858
1859/* Check that nobody has spuriously claimed that the first or last 16
1860 pages (64 KB) of address space have become accessible. Failure of
1861 the following do not per se indicate an internal consistency
1862 problem, but they are so likely to that we really want to know
1863 about it if so. */
1864
1865Bool SK_(cheap_sanity_check) ( void )
1866{
1867 if (IS_DISTINGUISHED_SM(primary_map[0]) &&
1868 IS_DISTINGUISHED_SM(primary_map[65535]))
1869 return True;
1870 else
1871 return False;
1872}
1873
1874Bool SK_(expensive_sanity_check) ( void )
1875{
1876 Int i;
1877
1878 /* Make sure nobody changed the distinguished secondary. */
1879 for (i = 0; i < 8192; i++)
1880 if (distinguished_secondary_map.abits[i] != VGM_BYTE_INVALID)
1881 return False;
1882
1883 /* Make sure that the upper 3/4 of the primary map hasn't
1884 been messed with. */
1885 for (i = 65536; i < 262144; i++)
1886 if (primary_map[i] != & distinguished_secondary_map)
1887 return False;
1888
1889 return True;
1890}
1891
1892/* ---------------------------------------------------------------------
1893 Debugging machinery (turn on to debug). Something of a mess.
1894 ------------------------------------------------------------------ */
1895
1896#if 0
1897/* Print the value tags on the 8 integer registers & flag reg. */
1898
1899static void uint_to_bits ( UInt x, Char* str )
1900{
1901 Int i;
1902 Int w = 0;
1903 /* str must point to a space of at least 36 bytes. */
1904 for (i = 31; i >= 0; i--) {
1905 str[w++] = (x & ( ((UInt)1) << i)) ? '1' : '0';
1906 if (i == 24 || i == 16 || i == 8)
1907 str[w++] = ' ';
1908 }
1909 str[w++] = 0;
njne427a662002-10-02 11:08:25 +00001910 sk_assert(w == 36);
njn25e49d8e72002-09-23 09:36:25 +00001911}
1912
1913/* Caution! Not vthread-safe; looks in VG_(baseBlock), not the thread
1914 state table. */
1915
1916static void vg_show_reg_tags ( void )
1917{
1918 Char buf1[36];
1919 Char buf2[36];
1920 UInt z_eax, z_ebx, z_ecx, z_edx,
1921 z_esi, z_edi, z_ebp, z_esp, z_eflags;
1922
1923 z_eax = VG_(baseBlock)[VGOFF_(sh_eax)];
1924 z_ebx = VG_(baseBlock)[VGOFF_(sh_ebx)];
1925 z_ecx = VG_(baseBlock)[VGOFF_(sh_ecx)];
1926 z_edx = VG_(baseBlock)[VGOFF_(sh_edx)];
1927 z_esi = VG_(baseBlock)[VGOFF_(sh_esi)];
1928 z_edi = VG_(baseBlock)[VGOFF_(sh_edi)];
1929 z_ebp = VG_(baseBlock)[VGOFF_(sh_ebp)];
1930 z_esp = VG_(baseBlock)[VGOFF_(sh_esp)];
1931 z_eflags = VG_(baseBlock)[VGOFF_(sh_eflags)];
1932
1933 uint_to_bits(z_eflags, buf1);
njn9b6d34e2002-10-15 08:48:08 +00001934 VG_(message)(Vg_DebugMsg, "efl %s\n", buf1);
njn25e49d8e72002-09-23 09:36:25 +00001935
1936 uint_to_bits(z_eax, buf1);
1937 uint_to_bits(z_ebx, buf2);
1938 VG_(message)(Vg_DebugMsg, "eax %s ebx %s\n", buf1, buf2);
1939
1940 uint_to_bits(z_ecx, buf1);
1941 uint_to_bits(z_edx, buf2);
1942 VG_(message)(Vg_DebugMsg, "ecx %s edx %s\n", buf1, buf2);
1943
1944 uint_to_bits(z_esi, buf1);
1945 uint_to_bits(z_edi, buf2);
1946 VG_(message)(Vg_DebugMsg, "esi %s edi %s\n", buf1, buf2);
1947
1948 uint_to_bits(z_ebp, buf1);
1949 uint_to_bits(z_esp, buf2);
1950 VG_(message)(Vg_DebugMsg, "ebp %s esp %s\n", buf1, buf2);
1951}
1952
1953
1954/* For debugging only. Scan the address space and touch all allegedly
1955 addressible words. Useful for establishing where Valgrind's idea of
1956 addressibility has diverged from what the kernel believes. */
1957
1958static
1959void zzzmemscan_notify_word ( Addr a, UInt w )
1960{
1961}
1962
1963void zzzmemscan ( void )
1964{
1965 Int n_notifies
1966 = VG_(scan_all_valid_memory)( zzzmemscan_notify_word );
1967 VG_(printf)("zzzmemscan: n_bytes = %d\n", 4 * n_notifies );
1968}
1969#endif
1970
1971
1972
1973
1974#if 0
1975static Int zzz = 0;
1976
1977void show_bb ( Addr eip_next )
1978{
1979 VG_(printf)("[%4d] ", zzz);
1980 vg_show_reg_tags( &VG_(m_shadow );
1981 VG_(translate) ( eip_next, NULL, NULL, NULL );
1982}
1983#endif /* 0 */
1984
1985/*------------------------------------------------------------*/
1986/*--- Syscall wrappers ---*/
1987/*------------------------------------------------------------*/
1988
1989void* SK_(pre_syscall) ( ThreadId tid, UInt syscallno, Bool isBlocking )
1990{
1991 Int sane = SK_(cheap_sanity_check)();
1992 return (void*)sane;
1993}
1994
1995void SK_(post_syscall) ( ThreadId tid, UInt syscallno,
1996 void* pre_result, Int res, Bool isBlocking )
1997{
1998 Int sane_before_call = (Int)pre_result;
1999 Bool sane_after_call = SK_(cheap_sanity_check)();
2000
2001 if ((Int)sane_before_call && (!sane_after_call)) {
2002 VG_(message)(Vg_DebugMsg, "post-syscall: ");
2003 VG_(message)(Vg_DebugMsg,
2004 "probable sanity check failure for syscall number %d\n",
2005 syscallno );
njne427a662002-10-02 11:08:25 +00002006 VG_(skin_panic)("aborting due to the above ... bye!");
njn25e49d8e72002-09-23 09:36:25 +00002007 }
2008}
2009
2010
2011/*------------------------------------------------------------*/
2012/*--- Setup ---*/
2013/*------------------------------------------------------------*/
2014
2015void SK_(written_shadow_regs_values)( UInt* gen_reg_value, UInt* eflags_value )
2016{
2017 *gen_reg_value = VGM_WORD_VALID;
2018 *eflags_value = VGM_EFLAGS_VALID;
2019}
2020
2021Bool SK_(process_cmd_line_option)(Char* arg)
2022{
2023# define STREQ(s1,s2) (0==VG_(strcmp_ws)((s1),(s2)))
2024# define STREQN(nn,s1,s2) (0==VG_(strncmp_ws)((s1),(s2),(nn)))
2025
2026 if (STREQ(arg, "--partial-loads-ok=yes"))
2027 SK_(clo_partial_loads_ok) = True;
2028 else if (STREQ(arg, "--partial-loads-ok=no"))
2029 SK_(clo_partial_loads_ok) = False;
2030
2031 else if (STREQN(15, arg, "--freelist-vol=")) {
2032 SK_(clo_freelist_vol) = (Int)VG_(atoll)(&arg[15]);
2033 if (SK_(clo_freelist_vol) < 0) SK_(clo_freelist_vol) = 0;
2034 }
2035
2036 else if (STREQ(arg, "--leak-check=yes"))
2037 SK_(clo_leak_check) = True;
2038 else if (STREQ(arg, "--leak-check=no"))
2039 SK_(clo_leak_check) = False;
2040
2041 else if (STREQ(arg, "--leak-resolution=low"))
2042 SK_(clo_leak_resolution) = Vg_LowRes;
2043 else if (STREQ(arg, "--leak-resolution=med"))
2044 SK_(clo_leak_resolution) = Vg_MedRes;
2045 else if (STREQ(arg, "--leak-resolution=high"))
2046 SK_(clo_leak_resolution) = Vg_HighRes;
2047
2048 else if (STREQ(arg, "--show-reachable=yes"))
2049 SK_(clo_show_reachable) = True;
2050 else if (STREQ(arg, "--show-reachable=no"))
2051 SK_(clo_show_reachable) = False;
2052
2053 else if (STREQ(arg, "--workaround-gcc296-bugs=yes"))
2054 SK_(clo_workaround_gcc296_bugs) = True;
2055 else if (STREQ(arg, "--workaround-gcc296-bugs=no"))
2056 SK_(clo_workaround_gcc296_bugs) = False;
2057
2058 else if (STREQ(arg, "--cleanup=yes"))
2059 SK_(clo_cleanup) = True;
2060 else if (STREQ(arg, "--cleanup=no"))
2061 SK_(clo_cleanup) = False;
2062
2063 else
2064 return False;
2065
2066 return True;
2067
2068#undef STREQ
2069#undef STREQN
2070}
2071
2072Char* SK_(usage)(void)
2073{
2074 return
2075" --partial-loads-ok=no|yes too hard to explain here; see manual [yes]\n"
2076" --freelist-vol=<number> volume of freed blocks queue [1000000]\n"
2077" --leak-check=no|yes search for memory leaks at exit? [no]\n"
2078" --leak-resolution=low|med|high\n"
2079" amount of bt merging in leak check [low]\n"
2080" --show-reachable=no|yes show reachable blocks in leak check? [no]\n"
2081" --workaround-gcc296-bugs=no|yes self explanatory [no]\n"
2082" --check-addrVs=no|yes experimental lighterweight checking? [yes]\n"
2083" yes == Valgrind's original behaviour\n"
2084"\n"
2085" --cleanup=no|yes improve after instrumentation? [yes]\n";
2086}
2087
2088
2089/*------------------------------------------------------------*/
2090/*--- Setup ---*/
2091/*------------------------------------------------------------*/
2092
njnd04b7c62002-10-03 14:05:52 +00002093void SK_(pre_clo_init)(VgDetails* details, VgNeeds* needs, VgTrackEvents* track)
njn25e49d8e72002-09-23 09:36:25 +00002094{
sewardj4aa62ba2002-10-05 15:49:27 +00002095 details->name = "Addrcheck";
njnd04b7c62002-10-03 14:05:52 +00002096 details->version = NULL;
2097 details->description = "a fine-grained address checker";
2098 details->copyright_author =
2099 "Copyright (C) 2002, and GNU GPL'd, by Julian Seward.";
2100 details->bug_reports_to = "jseward@acm.org";
njn25e49d8e72002-09-23 09:36:25 +00002101
njnd04b7c62002-10-03 14:05:52 +00002102 needs->core_errors = True;
2103 needs->skin_errors = True;
2104 needs->libc_freeres = True;
2105 needs->sizeof_shadow_block = 1;
2106 needs->basic_block_discards = False;
2107 needs->shadow_regs = False;
2108 needs->command_line_options = True;
2109 needs->client_requests = True;
2110 needs->extended_UCode = False;
2111 needs->syscall_wrapper = True;
2112 needs->alternative_free = True;
2113 needs->sanity_checks = True;
njn25e49d8e72002-09-23 09:36:25 +00002114
njn25e49d8e72002-09-23 09:36:25 +00002115 track->new_mem_startup = & addrcheck_new_mem_startup;
2116 track->new_mem_heap = & addrcheck_new_mem_heap;
2117 track->new_mem_stack = & SK_(make_accessible);
2118 track->new_mem_stack_aligned = & make_writable_aligned;
2119 track->new_mem_stack_signal = & SK_(make_accessible);
2120 track->new_mem_brk = & SK_(make_accessible);
2121 track->new_mem_mmap = & addrcheck_set_perms;
2122
2123 track->copy_mem_heap = & copy_address_range_state;
2124 track->copy_mem_remap = & copy_address_range_state;
2125 track->change_mem_mprotect = & addrcheck_set_perms;
2126
2127 track->ban_mem_heap = & SK_(make_noaccess);
2128 track->ban_mem_stack = & SK_(make_noaccess);
2129
2130 track->die_mem_heap = & SK_(make_noaccess);
2131 track->die_mem_stack = & SK_(make_noaccess);
2132 track->die_mem_stack_aligned = & make_noaccess_aligned;
2133 track->die_mem_stack_signal = & SK_(make_noaccess);
2134 track->die_mem_brk = & SK_(make_noaccess);
2135 track->die_mem_munmap = & SK_(make_noaccess);
2136
2137 track->bad_free = & SK_(record_free_error);
2138 track->mismatched_free = & SK_(record_freemismatch_error);
2139
2140 track->pre_mem_read = & check_is_readable;
2141 track->pre_mem_read_asciiz = & check_is_readable_asciiz;
2142 track->pre_mem_write = & check_is_writable;
2143 track->post_mem_write = & SK_(make_accessible);
2144
njnd04b7c62002-10-03 14:05:52 +00002145 VG_(register_compact_helper)((Addr) & SK_(helperc_ACCESS4));
2146 VG_(register_compact_helper)((Addr) & SK_(helperc_ACCESS2));
2147 VG_(register_compact_helper)((Addr) & SK_(helperc_ACCESS1));
2148 VG_(register_compact_helper)((Addr) & SK_(fpu_ACCESS_check));
njn25e49d8e72002-09-23 09:36:25 +00002149
2150 VGP_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
2151 VGP_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
njnd04b7c62002-10-03 14:05:52 +00002152
2153 init_shadow_memory();
2154 init_prof_mem();
njn25e49d8e72002-09-23 09:36:25 +00002155}
2156
2157/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00002158/*--- end ac_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00002159/*--------------------------------------------------------------------*/