blob: ac38e9bad9950770bfe4e1e15177da8260b8dcaa [file] [log] [blame]
njn25e49d8e72002-09-23 09:36:25 +00001
2/*--------------------------------------------------------------------*/
3/*--- The AddrCheck skin: like MemCheck, but only does address ---*/
4/*--- checking. No definedness checking. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- ac_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
njnc9539842002-10-02 13:26:35 +00009 This file is part of AddrCheck, a lightweight Valgrind skin for
10 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
12 Copyright (C) 2000-2002 Julian Seward
13 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
njn25cac76cb2002-09-23 11:21:57 +000033#include "ac_include.h"
njn25e49d8e72002-09-23 09:36:25 +000034//#include "vg_profile.c"
35
36/*------------------------------------------------------------*/
37/*--- Defns ---*/
38/*------------------------------------------------------------*/
39
40/* These many bytes below %ESP are considered addressible if we're
41 doing the --workaround-gcc296-bugs hack. */
42#define VG_GCC296_BUG_STACK_SLOP 1024
43
44
45typedef
46 enum {
47 /* Bad syscall params */
48 ParamSupp,
49 /* Memory errors in core (pthread ops, signal handling) */
50 CoreMemSupp,
51 /* Invalid read/write attempt at given size */
52 Addr1Supp, Addr2Supp, Addr4Supp, Addr8Supp,
53 /* Invalid or mismatching free */
54 FreeSupp
55 }
56 AddrCheckSuppKind;
57
58/* What kind of error it is. */
59typedef
60 enum { CoreMemErr,
61 AddrErr,
62 ParamErr, UserErr, /* behaves like an anonymous ParamErr */
63 FreeErr, FreeMismatchErr
64 }
65 AddrCheckErrorKind;
66
67/* What kind of memory access is involved in the error? */
68typedef
69 enum { ReadAxs, WriteAxs, ExecAxs }
70 AxsKind;
71
72/* Extra context for memory errors */
73typedef
74 struct {
75 /* AddrErr */
76 AxsKind axskind;
77 /* AddrErr */
78 Int size;
79 /* AddrErr, FreeErr, FreeMismatchErr, ParamErr, UserErr */
80 AcAddrInfo addrinfo;
81 /* ParamErr, UserErr, CoreMemErr */
82 Bool isWrite;
83 }
84 AddrCheckError;
85
86/*------------------------------------------------------------*/
87/*--- Comparing and printing errors ---*/
88/*------------------------------------------------------------*/
89
90static __inline__
91void clear_AcAddrInfo ( AcAddrInfo* ai )
92{
93 ai->akind = Unknown;
94 ai->blksize = 0;
95 ai->rwoffset = 0;
96 ai->lastchange = NULL;
97 ai->stack_tid = VG_INVALID_THREADID;
98 ai->maybe_gcc = False;
99}
100
101static __inline__
102void clear_AddrCheckError ( AddrCheckError* err_extra )
103{
104 err_extra->axskind = ReadAxs;
105 err_extra->size = 0;
106 clear_AcAddrInfo ( &err_extra->addrinfo );
107 err_extra->isWrite = False;
108}
109
110__attribute__((unused))
111static Bool eq_AcAddrInfo ( VgRes res, AcAddrInfo* ai1, AcAddrInfo* ai2 )
112{
113 if (ai1->akind != Undescribed
114 && ai2->akind != Undescribed
115 && ai1->akind != ai2->akind)
116 return False;
117 if (ai1->akind == Freed || ai1->akind == Mallocd) {
118 if (ai1->blksize != ai2->blksize)
119 return False;
120 if (!VG_(eq_ExeContext)(res, ai1->lastchange, ai2->lastchange))
121 return False;
122 }
123 return True;
124}
125
126/* Compare error contexts, to detect duplicates. Note that if they
127 are otherwise the same, the faulting addrs and associated rwoffsets
128 are allowed to be different. */
129
130Bool SK_(eq_SkinError) ( VgRes res,
131 SkinError* e1, SkinError* e2 )
132{
133 AddrCheckError* e1_extra = e1->extra;
134 AddrCheckError* e2_extra = e2->extra;
135
136 switch (e1->ekind) {
137 case CoreMemErr:
138 if (e1_extra->isWrite != e2_extra->isWrite) return False;
139 if (e2->ekind != CoreMemErr) return False;
140 if (e1->string == e2->string) return True;
141 if (0 == VG_(strcmp)(e1->string, e2->string)) return True;
142 return False;
143
144 case UserErr:
145 case ParamErr:
146 if (e1_extra->isWrite != e2_extra->isWrite)
147 return False;
148 if (e1->ekind == ParamErr
149 && 0 != VG_(strcmp)(e1->string, e2->string))
150 return False;
151 return True;
152
153 case FreeErr:
154 case FreeMismatchErr:
155 /* JRS 2002-Aug-26: comparing addrs seems overkill and can
156 cause excessive duplication of errors. Not even AddrErr
157 below does that. So don't compare either the .addr field
158 or the .addrinfo fields. */
159 /* if (e1->addr != e2->addr) return False; */
160 /* if (!eq_AcAddrInfo(res, &e1_extra->addrinfo, &e2_extra->addrinfo))
161 return False;
162 */
163 return True;
164
165 case AddrErr:
166 /* if (e1_extra->axskind != e2_extra->axskind) return False; */
167 if (e1_extra->size != e2_extra->size) return False;
168 /*
169 if (!eq_AcAddrInfo(res, &e1_extra->addrinfo, &e2_extra->addrinfo))
170 return False;
171 */
172 return True;
173
174 default:
175 VG_(printf)("Error:\n unknown AddrCheck error code %d\n", e1->ekind);
njne427a662002-10-02 11:08:25 +0000176 VG_(skin_panic)("unknown error code in SK_(eq_SkinError)");
njn25e49d8e72002-09-23 09:36:25 +0000177 }
178}
179
180static void pp_AcAddrInfo ( Addr a, AcAddrInfo* ai )
181{
182 switch (ai->akind) {
183 case Stack:
184 VG_(message)(Vg_UserMsg,
185 " Address 0x%x is on thread %d's stack",
186 a, ai->stack_tid);
187 break;
188 case Unknown:
189 if (ai->maybe_gcc) {
190 VG_(message)(Vg_UserMsg,
191 " Address 0x%x is just below %%esp. Possibly a bug in GCC/G++",
192 a);
193 VG_(message)(Vg_UserMsg,
194 " v 2.96 or 3.0.X. To suppress, use: --workaround-gcc296-bugs=yes");
195 } else {
196 VG_(message)(Vg_UserMsg,
197 " Address 0x%x is not stack'd, malloc'd or free'd", a);
198 }
199 break;
200 case Freed: case Mallocd: {
201 UInt delta;
202 UChar* relative;
203 if (ai->rwoffset < 0) {
204 delta = (UInt)(- ai->rwoffset);
205 relative = "before";
206 } else if (ai->rwoffset >= ai->blksize) {
207 delta = ai->rwoffset - ai->blksize;
208 relative = "after";
209 } else {
210 delta = ai->rwoffset;
211 relative = "inside";
212 }
213 {
214 VG_(message)(Vg_UserMsg,
215 " Address 0x%x is %d bytes %s a block of size %d %s",
216 a, delta, relative,
217 ai->blksize,
218 ai->akind==Mallocd ? "alloc'd"
219 : ai->akind==Freed ? "free'd"
220 : "client-defined");
221 }
222 VG_(pp_ExeContext)(ai->lastchange);
223 break;
224 }
225 default:
njne427a662002-10-02 11:08:25 +0000226 VG_(skin_panic)("pp_AcAddrInfo");
njn25e49d8e72002-09-23 09:36:25 +0000227 }
228}
229
230void SK_(pp_SkinError) ( SkinError* err, void (*pp_ExeContext)(void) )
231{
232 AddrCheckError* err_extra = err->extra;
233
234 switch (err->ekind) {
235 case CoreMemErr:
236 if (err_extra->isWrite) {
237 VG_(message)(Vg_UserMsg,
238 "%s contains unaddressable byte(s)", err->string );
239 } else {
240 VG_(message)(Vg_UserMsg,
241 "%s contains unaddressable byte(s)", err->string );
242 }
243 pp_ExeContext();
244 break;
245
246 case AddrErr:
247 switch (err_extra->axskind) {
248 case ReadAxs:
249 case WriteAxs:
250 /* These two aren't actually differentiated ever. */
251 VG_(message)(Vg_UserMsg, "Invalid memory access of size %d",
252 err_extra->size );
253 break;
254 case ExecAxs:
255 VG_(message)(Vg_UserMsg, "Jump to the invalid address "
256 "stated on the next line");
257 break;
258 default:
njne427a662002-10-02 11:08:25 +0000259 VG_(skin_panic)("pp_SkinError(axskind)");
njn25e49d8e72002-09-23 09:36:25 +0000260 }
261 pp_ExeContext();
262 pp_AcAddrInfo(err->addr, &err_extra->addrinfo);
263 break;
264
265 case FreeErr:
266 VG_(message)(Vg_UserMsg,"Invalid free() / delete / delete[]");
267 /* fall through */
268 case FreeMismatchErr:
269 if (err->ekind == FreeMismatchErr)
270 VG_(message)(Vg_UserMsg,
271 "Mismatched free() / delete / delete []");
272 pp_ExeContext();
273 pp_AcAddrInfo(err->addr, &err_extra->addrinfo);
274 break;
275
276 case ParamErr:
277 if (err_extra->isWrite) {
278 VG_(message)(Vg_UserMsg,
279 "Syscall param %s contains unaddressable byte(s)",
280 err->string );
281 } else {
282 VG_(message)(Vg_UserMsg,
283 "Syscall param %s contains uninitialised or "
284 "unaddressable byte(s)",
285 err->string);
286 }
287 pp_ExeContext();
288 pp_AcAddrInfo(err->addr, &err_extra->addrinfo);
289 break;
290
291 case UserErr:
292 if (err_extra->isWrite) {
293 VG_(message)(Vg_UserMsg,
294 "Unaddressable byte(s) found during client check request");
295 } else {
296 VG_(message)(Vg_UserMsg,
297 "Uninitialised or "
298 "unaddressable byte(s) found during client check request");
299 }
300 pp_ExeContext();
301 pp_AcAddrInfo(err->addr, &err_extra->addrinfo);
302 break;
303
304 default:
305 VG_(printf)("Error:\n unknown AddrCheck error code %d\n", err->ekind);
njne427a662002-10-02 11:08:25 +0000306 VG_(skin_panic)("unknown error code in SK_(pp_SkinError)");
njn25e49d8e72002-09-23 09:36:25 +0000307 }
308}
309
310/*------------------------------------------------------------*/
311/*--- Recording errors ---*/
312/*------------------------------------------------------------*/
313
314/* Describe an address as best you can, for error messages,
315 putting the result in ai. */
316
317static void describe_addr ( Addr a, AcAddrInfo* ai )
318{
319 ShadowChunk* sc;
320 ThreadId tid;
321
322 /* Nested functions, yeah. Need the lexical scoping of 'a'. */
323
324 /* Closure for searching thread stacks */
325 Bool addr_is_in_bounds(Addr stack_min, Addr stack_max)
326 {
327 return (stack_min <= a && a <= stack_max);
328 }
329 /* Closure for searching malloc'd and free'd lists */
330 Bool addr_is_in_block(ShadowChunk *sh_ch)
331 {
332 return VG_(addr_is_in_block) ( a, sh_ch->data, sh_ch->size );
333 }
334 /* Perhaps it's on a thread's stack? */
335 tid = VG_(any_matching_thread_stack)(addr_is_in_bounds);
336 if (tid != VG_INVALID_THREADID) {
337 ai->akind = Stack;
338 ai->stack_tid = tid;
339 return;
340 }
341 /* Search for a recently freed block which might bracket it. */
342 sc = SK_(any_matching_freed_ShadowChunks)(addr_is_in_block);
343 if (NULL != sc) {
344 ai->akind = Freed;
345 ai->blksize = sc->size;
346 ai->rwoffset = (Int)(a) - (Int)(sc->data);
347 ai->lastchange = (ExeContext*)sc->skin_extra[0];
348 return;
349 }
350 /* Search for a currently malloc'd block which might bracket it. */
351 sc = VG_(any_matching_mallocd_ShadowChunks)(addr_is_in_block);
352 if (NULL != sc) {
353 ai->akind = Mallocd;
354 ai->blksize = sc->size;
355 ai->rwoffset = (Int)(a) - (Int)(sc->data);
356 ai->lastchange = (ExeContext*)sc->skin_extra[0];
357 return;
358 }
359 /* Clueless ... */
360 ai->akind = Unknown;
361 return;
362}
363
364
365/* Creates a copy of the err_extra, updates the copy with address info if
366 necessary, sticks the copy into the SkinError. */
367void SK_(dup_extra_and_update)(SkinError* err)
368{
369 AddrCheckError* err_extra;
370
371 err_extra = VG_(malloc)(sizeof(AddrCheckError));
372 *err_extra = *((AddrCheckError*)err->extra);
373
374 if (err_extra->addrinfo.akind == Undescribed)
375 describe_addr ( err->addr, &(err_extra->addrinfo) );
376
377 err->extra = err_extra;
378}
379
380/* Is this address within some small distance below %ESP? Used only
381 for the --workaround-gcc296-bugs kludge. */
382Bool VG_(is_just_below_ESP)( Addr esp, Addr aa )
383{
384 if ((UInt)esp > (UInt)aa
385 && ((UInt)esp - (UInt)aa) <= VG_GCC296_BUG_STACK_SLOP)
386 return True;
387 else
388 return False;
389}
390
391static
392void sk_record_address_error ( Addr a, Int size, Bool isWrite )
393{
394 AddrCheckError err_extra;
395 Bool just_below_esp;
396
397 just_below_esp
398 = VG_(is_just_below_ESP)( VG_(get_stack_pointer)(), a );
399
400 /* If this is caused by an access immediately below %ESP, and the
401 user asks nicely, we just ignore it. */
402 if (SK_(clo_workaround_gcc296_bugs) && just_below_esp)
403 return;
404
405 clear_AddrCheckError( &err_extra );
406 err_extra.axskind = isWrite ? WriteAxs : ReadAxs;
407 err_extra.size = size;
408 err_extra.addrinfo.akind = Undescribed;
409 err_extra.addrinfo.maybe_gcc = just_below_esp;
410 VG_(maybe_record_error)( NULL, AddrErr, a, /*s*/NULL, &err_extra );
411}
412
413/* These ones are called from non-generated code */
414
415/* This is for memory errors in pthread functions, as opposed to pthread API
416 errors which are found by the core. */
417void SK_(record_core_mem_error) ( ThreadState* tst, Bool isWrite, Char* msg )
418{
419 AddrCheckError err_extra;
420
421 clear_AddrCheckError( &err_extra );
422 err_extra.isWrite = isWrite;
423 VG_(maybe_record_error)( tst, CoreMemErr, /*addr*/0, msg, &err_extra );
424}
425
426void SK_(record_param_error) ( ThreadState* tst, Addr a, Bool isWrite,
427 Char* msg )
428{
429 AddrCheckError err_extra;
430
njne427a662002-10-02 11:08:25 +0000431 sk_assert(NULL != tst);
njn25e49d8e72002-09-23 09:36:25 +0000432 clear_AddrCheckError( &err_extra );
433 err_extra.addrinfo.akind = Undescribed;
434 err_extra.isWrite = isWrite;
435 VG_(maybe_record_error)( tst, ParamErr, a, msg, &err_extra );
436}
437
438void SK_(record_jump_error) ( ThreadState* tst, Addr a )
439{
440 AddrCheckError err_extra;
441
njne427a662002-10-02 11:08:25 +0000442 sk_assert(NULL != tst);
njn25e49d8e72002-09-23 09:36:25 +0000443
444 clear_AddrCheckError( &err_extra );
445 err_extra.axskind = ExecAxs;
446 err_extra.addrinfo.akind = Undescribed;
447 VG_(maybe_record_error)( tst, AddrErr, a, /*s*/NULL, &err_extra );
448}
449
450void SK_(record_free_error) ( ThreadState* tst, Addr a )
451{
452 AddrCheckError err_extra;
453
njne427a662002-10-02 11:08:25 +0000454 sk_assert(NULL != tst);
njn25e49d8e72002-09-23 09:36:25 +0000455
456 clear_AddrCheckError( &err_extra );
457 err_extra.addrinfo.akind = Undescribed;
458 VG_(maybe_record_error)( tst, FreeErr, a, /*s*/NULL, &err_extra );
459}
460
461void SK_(record_freemismatch_error) ( ThreadState* tst, Addr a )
462{
463 AddrCheckError err_extra;
464
njne427a662002-10-02 11:08:25 +0000465 sk_assert(NULL != tst);
njn25e49d8e72002-09-23 09:36:25 +0000466
467 clear_AddrCheckError( &err_extra );
468 err_extra.addrinfo.akind = Undescribed;
469 VG_(maybe_record_error)( tst, FreeMismatchErr, a, /*s*/NULL, &err_extra );
470}
471
472void SK_(record_user_error) ( ThreadState* tst, Addr a, Bool isWrite )
473{
474 AddrCheckError err_extra;
475
njne427a662002-10-02 11:08:25 +0000476 sk_assert(NULL != tst);
njn25e49d8e72002-09-23 09:36:25 +0000477
478 clear_AddrCheckError( &err_extra );
479 err_extra.addrinfo.akind = Undescribed;
480 err_extra.isWrite = isWrite;
481 VG_(maybe_record_error)( tst, UserErr, a, /*s*/NULL, &err_extra );
482}
483
484
485/*------------------------------------------------------------*/
486/*--- Suppressions ---*/
487/*------------------------------------------------------------*/
488
489#define STREQ(s1,s2) (s1 != NULL && s2 != NULL \
490 && VG_(strcmp)((s1),(s2))==0)
491
492Bool SK_(recognised_suppression) ( Char* name, SuppKind *skind )
493{
494 if (STREQ(name, "Param")) *skind = ParamSupp;
495 else if (STREQ(name, "CoreMem")) *skind = CoreMemSupp;
496 else if (STREQ(name, "Addr1")) *skind = Addr1Supp;
497 else if (STREQ(name, "Addr2")) *skind = Addr2Supp;
498 else if (STREQ(name, "Addr4")) *skind = Addr4Supp;
499 else if (STREQ(name, "Addr8")) *skind = Addr8Supp;
500 else if (STREQ(name, "Free")) *skind = FreeSupp;
501 else
502 return False;
503
504 return True;
505}
506
507Bool SK_(read_extra_suppression_info) ( Int fd, Char* buf, Int nBuf,
508 SkinSupp *s )
509{
510 Bool eof;
511
512 if (s->skind == ParamSupp) {
njn4ba5a792002-09-30 10:23:54 +0000513 eof = VG_(get_line) ( fd, buf, nBuf );
njn25e49d8e72002-09-23 09:36:25 +0000514 if (eof) return False;
515 s->string = VG_(strdup)(buf);
516 }
517 return True;
518}
519
520extern Bool SK_(error_matches_suppression)(SkinError* err, SkinSupp* su)
521{
522 UInt su_size;
523 AddrCheckError* err_extra = err->extra;
524
525 switch (su->skind) {
526 case ParamSupp:
527 return (err->ekind == ParamErr && STREQ(su->string, err->string));
528
529 case CoreMemSupp:
530 return (err->ekind == CoreMemErr && STREQ(su->string, err->string));
531
532 case Addr1Supp: su_size = 1; goto addr_case;
533 case Addr2Supp: su_size = 2; goto addr_case;
534 case Addr4Supp: su_size = 4; goto addr_case;
535 case Addr8Supp: su_size = 8; goto addr_case;
536 addr_case:
537 return (err->ekind == AddrErr && err_extra->size != su_size);
538
539 case FreeSupp:
540 return (err->ekind == FreeErr || err->ekind == FreeMismatchErr);
541
542 default:
543 VG_(printf)("Error:\n"
544 " unknown AddrCheck suppression type %d\n", su->skind);
njne427a662002-10-02 11:08:25 +0000545 VG_(skin_panic)("unknown suppression type in "
546 "SK_(error_matches_suppression)");
njn25e49d8e72002-09-23 09:36:25 +0000547 }
548}
549
550# undef STREQ
551
552
553/*--------------------------------------------------------------------*/
554/*--- Part of the AddrCheck skin: Maintain bitmaps of memory, ---*/
555/*--- tracking the accessibility (A) each byte. ---*/
556/*--------------------------------------------------------------------*/
557
558#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
559
560/*------------------------------------------------------------*/
561/*--- Command line options ---*/
562/*------------------------------------------------------------*/
563
564Bool SK_(clo_partial_loads_ok) = True;
565Int SK_(clo_freelist_vol) = 1000000;
566Bool SK_(clo_leak_check) = False;
567VgRes SK_(clo_leak_resolution) = Vg_LowRes;
568Bool SK_(clo_show_reachable) = False;
569Bool SK_(clo_workaround_gcc296_bugs) = False;
570Bool SK_(clo_cleanup) = True;
571
572/*------------------------------------------------------------*/
573/*--- Profiling events ---*/
574/*------------------------------------------------------------*/
575
576typedef
577 enum {
578 VgpCheckMem = VgpFini+1,
579 VgpSetMem
580 }
581 VgpSkinCC;
582
583/*------------------------------------------------------------*/
584/*--- Low-level support for memory checking. ---*/
585/*------------------------------------------------------------*/
586
587/* All reads and writes are checked against a memory map, which
588 records the state of all memory in the process. The memory map is
589 organised like this:
590
591 The top 16 bits of an address are used to index into a top-level
592 map table, containing 65536 entries. Each entry is a pointer to a
593 second-level map, which records the accesibililty and validity
594 permissions for the 65536 bytes indexed by the lower 16 bits of the
595 address. Each byte is represented by one bit, indicating
596 accessibility. So each second-level map contains 8192 bytes. This
597 two-level arrangement conveniently divides the 4G address space
598 into 64k lumps, each size 64k bytes.
599
600 All entries in the primary (top-level) map must point to a valid
601 secondary (second-level) map. Since most of the 4G of address
602 space will not be in use -- ie, not mapped at all -- there is a
603 distinguished secondary map, which indicates `not addressible and
604 not valid' writeable for all bytes. Entries in the primary map for
605 which the entire 64k is not in use at all point at this
606 distinguished map.
607
608 [...] lots of stuff deleted due to out of date-ness
609
610 As a final optimisation, the alignment and address checks for
611 4-byte loads and stores are combined in a neat way. The primary
612 map is extended to have 262144 entries (2^18), rather than 2^16.
613 The top 3/4 of these entries are permanently set to the
614 distinguished secondary map. For a 4-byte load/store, the
615 top-level map is indexed not with (addr >> 16) but instead f(addr),
616 where
617
618 f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
619 = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
620 = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
621
622 ie the lowest two bits are placed above the 16 high address bits.
623 If either of these two bits are nonzero, the address is misaligned;
624 this will select a secondary map from the upper 3/4 of the primary
625 map. Because this is always the distinguished secondary map, a
626 (bogus) address check failure will result. The failure handling
627 code can then figure out whether this is a genuine addr check
628 failure or whether it is a possibly-legitimate access at a
629 misaligned address. */
630
631
632/*------------------------------------------------------------*/
633/*--- Crude profiling machinery. ---*/
634/*------------------------------------------------------------*/
635
636#ifdef VG_PROFILE_MEMORY
637
638#define N_PROF_EVENTS 150
639
640static UInt event_ctr[N_PROF_EVENTS];
641
642static void init_prof_mem ( void )
643{
644 Int i;
645 for (i = 0; i < N_PROF_EVENTS; i++)
646 event_ctr[i] = 0;
647}
648
649static void done_prof_mem ( void )
650{
651 Int i;
652 for (i = 0; i < N_PROF_EVENTS; i++) {
653 if ((i % 10) == 0)
654 VG_(printf)("\n");
655 if (event_ctr[i] > 0)
656 VG_(printf)( "prof mem event %2d: %d\n", i, event_ctr[i] );
657 }
658 VG_(printf)("\n");
659}
660
661#define PROF_EVENT(ev) \
njne427a662002-10-02 11:08:25 +0000662 do { sk_assert((ev) >= 0 && (ev) < N_PROF_EVENTS); \
njn25e49d8e72002-09-23 09:36:25 +0000663 event_ctr[ev]++; \
664 } while (False);
665
666#else
667
668static void init_prof_mem ( void ) { }
669static void done_prof_mem ( void ) { }
670
671#define PROF_EVENT(ev) /* */
672
673#endif
674
675/* Event index. If just the name of the fn is given, this means the
676 number of calls to the fn. Otherwise it is the specified event.
677
678 10 alloc_secondary_map
679
680 20 get_abit
681 21 get_vbyte
682 22 set_abit
683 23 set_vbyte
684 24 get_abits4_ALIGNED
685 25 get_vbytes4_ALIGNED
686
687 30 set_address_range_perms
688 31 set_address_range_perms(lower byte loop)
689 32 set_address_range_perms(quadword loop)
690 33 set_address_range_perms(upper byte loop)
691
692 35 make_noaccess
693 36 make_writable
694 37 make_readable
695
696 40 copy_address_range_state
697 41 copy_address_range_state(byte loop)
698 42 check_writable
699 43 check_writable(byte loop)
700 44 check_readable
701 45 check_readable(byte loop)
702 46 check_readable_asciiz
703 47 check_readable_asciiz(byte loop)
704
705 50 make_aligned_word_NOACCESS
706 51 make_aligned_word_WRITABLE
707
708 60 helperc_LOADV4
709 61 helperc_STOREV4
710 62 helperc_LOADV2
711 63 helperc_STOREV2
712 64 helperc_LOADV1
713 65 helperc_STOREV1
714
715 70 rim_rd_V4_SLOWLY
716 71 rim_wr_V4_SLOWLY
717 72 rim_rd_V2_SLOWLY
718 73 rim_wr_V2_SLOWLY
719 74 rim_rd_V1_SLOWLY
720 75 rim_wr_V1_SLOWLY
721
722 80 fpu_read
723 81 fpu_read aligned 4
724 82 fpu_read aligned 8
725 83 fpu_read 2
726 84 fpu_read 10
727
728 85 fpu_write
729 86 fpu_write aligned 4
730 87 fpu_write aligned 8
731 88 fpu_write 2
732 89 fpu_write 10
733
734 90 fpu_read_check_SLOWLY
735 91 fpu_read_check_SLOWLY(byte loop)
736 92 fpu_write_check_SLOWLY
737 93 fpu_write_check_SLOWLY(byte loop)
738
739 100 is_plausible_stack_addr
740 101 handle_esp_assignment
741 102 handle_esp_assignment(-4)
742 103 handle_esp_assignment(+4)
743 104 handle_esp_assignment(-12)
744 105 handle_esp_assignment(-8)
745 106 handle_esp_assignment(+16)
746 107 handle_esp_assignment(+12)
747 108 handle_esp_assignment(0)
748 109 handle_esp_assignment(+8)
749 110 handle_esp_assignment(-16)
750 111 handle_esp_assignment(+20)
751 112 handle_esp_assignment(-20)
752 113 handle_esp_assignment(+24)
753 114 handle_esp_assignment(-24)
754
755 120 vg_handle_esp_assignment_SLOWLY
756 121 vg_handle_esp_assignment_SLOWLY(normal; move down)
757 122 vg_handle_esp_assignment_SLOWLY(normal; move up)
758 123 vg_handle_esp_assignment_SLOWLY(normal)
759 124 vg_handle_esp_assignment_SLOWLY(>= HUGE_DELTA)
760*/
761
762/*------------------------------------------------------------*/
763/*--- Function declarations. ---*/
764/*------------------------------------------------------------*/
765
766static void vgmext_ACCESS4_SLOWLY ( Addr a );
767static void vgmext_ACCESS2_SLOWLY ( Addr a );
768static void vgmext_ACCESS1_SLOWLY ( Addr a );
769static void fpu_ACCESS_check_SLOWLY ( Addr addr, Int size );
770
771/*------------------------------------------------------------*/
772/*--- Data defns. ---*/
773/*------------------------------------------------------------*/
774
775typedef
776 struct {
777 UChar abits[8192];
778 }
779 AcSecMap;
780
781static AcSecMap* primary_map[ /*65536*/ 262144 ];
782static AcSecMap distinguished_secondary_map;
783
784#define IS_DISTINGUISHED_SM(smap) \
785 ((smap) == &distinguished_secondary_map)
786
787#define ENSURE_MAPPABLE(addr,caller) \
788 do { \
789 if (IS_DISTINGUISHED_SM(primary_map[(addr) >> 16])) { \
790 primary_map[(addr) >> 16] = alloc_secondary_map(caller); \
791 /* VG_(printf)("new 2map because of %p\n", addr); */ \
792 } \
793 } while(0)
794
795#define BITARR_SET(aaa_p,iii_p) \
796 do { \
797 UInt iii = (UInt)iii_p; \
798 UChar* aaa = (UChar*)aaa_p; \
799 aaa[iii >> 3] |= (1 << (iii & 7)); \
800 } while (0)
801
802#define BITARR_CLEAR(aaa_p,iii_p) \
803 do { \
804 UInt iii = (UInt)iii_p; \
805 UChar* aaa = (UChar*)aaa_p; \
806 aaa[iii >> 3] &= ~(1 << (iii & 7)); \
807 } while (0)
808
809#define BITARR_TEST(aaa_p,iii_p) \
810 (0 != (((UChar*)aaa_p)[ ((UInt)iii_p) >> 3 ] \
811 & (1 << (((UInt)iii_p) & 7)))) \
812
813
814#define VGM_BIT_VALID 0
815#define VGM_BIT_INVALID 1
816
817#define VGM_NIBBLE_VALID 0
818#define VGM_NIBBLE_INVALID 0xF
819
820#define VGM_BYTE_VALID 0
821#define VGM_BYTE_INVALID 0xFF
822
823#define VGM_WORD_VALID 0
824#define VGM_WORD_INVALID 0xFFFFFFFF
825
826#define VGM_EFLAGS_VALID 0xFFFFFFFE
827#define VGM_EFLAGS_INVALID 0xFFFFFFFF /* not used */
828
829
830static void init_shadow_memory ( void )
831{
832 Int i;
833
834 for (i = 0; i < 8192; i++) /* Invalid address */
835 distinguished_secondary_map.abits[i] = VGM_BYTE_INVALID;
836
837 /* These entries gradually get overwritten as the used address
838 space expands. */
839 for (i = 0; i < 65536; i++)
840 primary_map[i] = &distinguished_secondary_map;
841
842 /* These ones should never change; it's a bug in Valgrind if they do. */
843 for (i = 65536; i < 262144; i++)
844 primary_map[i] = &distinguished_secondary_map;
845}
846
847void SK_(post_clo_init) ( void )
848{
849}
850
851void SK_(fini) ( void )
852{
853 VG_(print_malloc_stats)();
854
855 if (VG_(clo_verbosity) == 1) {
856 if (!SK_(clo_leak_check))
857 VG_(message)(Vg_UserMsg,
858 "For a detailed leak analysis, rerun with: --leak-check=yes");
859
860 VG_(message)(Vg_UserMsg,
861 "For counts of detected errors, rerun with: -v");
862 }
863 if (SK_(clo_leak_check)) SK_(detect_memory_leaks)();
864
865 done_prof_mem();
866}
867
868/*------------------------------------------------------------*/
869/*--- Basic bitmap management, reading and writing. ---*/
870/*------------------------------------------------------------*/
871
872/* Allocate and initialise a secondary map. */
873
874static AcSecMap* alloc_secondary_map ( __attribute__ ((unused))
875 Char* caller )
876{
877 AcSecMap* map;
878 UInt i;
879 PROF_EVENT(10);
880
881 /* Mark all bytes as invalid access and invalid value. */
882
883 /* It just happens that a AcSecMap occupies exactly 18 pages --
884 although this isn't important, so the following assert is
885 spurious. */
njne427a662002-10-02 11:08:25 +0000886 sk_assert(0 == (sizeof(AcSecMap) % VKI_BYTES_PER_PAGE));
njn25e49d8e72002-09-23 09:36:25 +0000887 map = VG_(get_memory_from_mmap)( sizeof(AcSecMap), caller );
888
889 for (i = 0; i < 8192; i++)
890 map->abits[i] = VGM_BYTE_INVALID; /* Invalid address */
891
892 /* VG_(printf)("ALLOC_2MAP(%s)\n", caller ); */
893 return map;
894}
895
896
897/* Basic reading/writing of the bitmaps, for byte-sized accesses. */
898
899static __inline__ UChar get_abit ( Addr a )
900{
901 AcSecMap* sm = primary_map[a >> 16];
902 UInt sm_off = a & 0xFFFF;
903 PROF_EVENT(20);
904# if 0
905 if (IS_DISTINGUISHED_SM(sm))
906 VG_(message)(Vg_DebugMsg,
907 "accessed distinguished 2ndary (A)map! 0x%x\n", a);
908# endif
909 return BITARR_TEST(sm->abits, sm_off)
910 ? VGM_BIT_INVALID : VGM_BIT_VALID;
911}
912
913static __inline__ void set_abit ( Addr a, UChar abit )
914{
915 AcSecMap* sm;
916 UInt sm_off;
917 PROF_EVENT(22);
918 ENSURE_MAPPABLE(a, "set_abit");
919 sm = primary_map[a >> 16];
920 sm_off = a & 0xFFFF;
921 if (abit)
922 BITARR_SET(sm->abits, sm_off);
923 else
924 BITARR_CLEAR(sm->abits, sm_off);
925}
926
927
928/* Reading/writing of the bitmaps, for aligned word-sized accesses. */
929
930static __inline__ UChar get_abits4_ALIGNED ( Addr a )
931{
932 AcSecMap* sm;
933 UInt sm_off;
934 UChar abits8;
935 PROF_EVENT(24);
936# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000937 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000938# endif
939 sm = primary_map[a >> 16];
940 sm_off = a & 0xFFFF;
941 abits8 = sm->abits[sm_off >> 3];
942 abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
943 abits8 &= 0x0F;
944 return abits8;
945}
946
947
948
949/*------------------------------------------------------------*/
950/*--- Setting permissions over address ranges. ---*/
951/*------------------------------------------------------------*/
952
953static void set_address_range_perms ( Addr a, UInt len,
954 UInt example_a_bit )
955{
956 UChar abyte8;
957 UInt sm_off;
958 AcSecMap* sm;
959
960 PROF_EVENT(30);
961
962 if (len == 0)
963 return;
964
965 if (len > 100 * 1000 * 1000) {
966 VG_(message)(Vg_UserMsg,
967 "Warning: set address range perms: "
968 "large range %u, a %d",
969 len, example_a_bit );
970 }
971
972 VGP_PUSHCC(VgpSetMem);
973
974 /* Requests to change permissions of huge address ranges may
975 indicate bugs in our machinery. 30,000,000 is arbitrary, but so
976 far all legitimate requests have fallen beneath that size. */
977 /* 4 Mar 02: this is just stupid; get rid of it. */
njne427a662002-10-02 11:08:25 +0000978 /* sk_assert(len < 30000000); */
njn25e49d8e72002-09-23 09:36:25 +0000979
980 /* Check the permissions make sense. */
njne427a662002-10-02 11:08:25 +0000981 sk_assert(example_a_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000982 || example_a_bit == VGM_BIT_INVALID);
983
984 /* In order that we can charge through the address space at 8
985 bytes/main-loop iteration, make up some perms. */
986 abyte8 = (example_a_bit << 7)
987 | (example_a_bit << 6)
988 | (example_a_bit << 5)
989 | (example_a_bit << 4)
990 | (example_a_bit << 3)
991 | (example_a_bit << 2)
992 | (example_a_bit << 1)
993 | (example_a_bit << 0);
994
995# ifdef VG_DEBUG_MEMORY
996 /* Do it ... */
997 while (True) {
998 PROF_EVENT(31);
999 if (len == 0) break;
1000 set_abit ( a, example_a_bit );
1001 set_vbyte ( a, vbyte );
1002 a++;
1003 len--;
1004 }
1005
1006# else
1007 /* Slowly do parts preceding 8-byte alignment. */
1008 while (True) {
1009 PROF_EVENT(31);
1010 if (len == 0) break;
1011 if ((a % 8) == 0) break;
1012 set_abit ( a, example_a_bit );
1013 a++;
1014 len--;
1015 }
1016
1017 if (len == 0) {
1018 VGP_POPCC(VgpSetMem);
1019 return;
1020 }
njne427a662002-10-02 11:08:25 +00001021 sk_assert((a % 8) == 0 && len > 0);
njn25e49d8e72002-09-23 09:36:25 +00001022
1023 /* Once aligned, go fast. */
1024 while (True) {
1025 PROF_EVENT(32);
1026 if (len < 8) break;
1027 ENSURE_MAPPABLE(a, "set_address_range_perms(fast)");
1028 sm = primary_map[a >> 16];
1029 sm_off = a & 0xFFFF;
1030 sm->abits[sm_off >> 3] = abyte8;
1031 a += 8;
1032 len -= 8;
1033 }
1034
1035 if (len == 0) {
1036 VGP_POPCC(VgpSetMem);
1037 return;
1038 }
njne427a662002-10-02 11:08:25 +00001039 sk_assert((a % 8) == 0 && len > 0 && len < 8);
njn25e49d8e72002-09-23 09:36:25 +00001040
1041 /* Finish the upper fragment. */
1042 while (True) {
1043 PROF_EVENT(33);
1044 if (len == 0) break;
1045 set_abit ( a, example_a_bit );
1046 a++;
1047 len--;
1048 }
1049# endif
1050
1051 /* Check that zero page and highest page have not been written to
1052 -- this could happen with buggy syscall wrappers. Today
1053 (2001-04-26) had precisely such a problem with __NR_setitimer. */
njne427a662002-10-02 11:08:25 +00001054 sk_assert(SK_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +00001055 VGP_POPCC(VgpSetMem);
1056}
1057
1058/* Set permissions for address ranges ... */
1059
1060void SK_(make_noaccess) ( Addr a, UInt len )
1061{
1062 PROF_EVENT(35);
1063 DEBUG("SK_(make_noaccess)(%p, %x)\n", a, len);
1064 set_address_range_perms ( a, len, VGM_BIT_INVALID );
1065}
1066
1067void SK_(make_accessible) ( Addr a, UInt len )
1068{
1069 PROF_EVENT(36);
1070 DEBUG("SK_(make_accessible)(%p, %x)\n", a, len);
1071 set_address_range_perms ( a, len, VGM_BIT_VALID );
1072}
1073
1074/* Block-copy permissions (needed for implementing realloc()). */
1075
1076static void copy_address_range_state ( Addr src, Addr dst, UInt len )
1077{
1078 UInt i;
1079
1080 DEBUG("copy_address_range_state\n");
1081
1082 PROF_EVENT(40);
1083 for (i = 0; i < len; i++) {
1084 UChar abit = get_abit ( src+i );
1085 PROF_EVENT(41);
1086 set_abit ( dst+i, abit );
1087 }
1088}
1089
1090
1091/* Check permissions for address range. If inadequate permissions
1092 exist, *bad_addr is set to the offending address, so the caller can
1093 know what it is. */
1094
1095Bool SK_(check_writable) ( Addr a, UInt len, Addr* bad_addr )
1096{
1097 UInt i;
1098 UChar abit;
1099 PROF_EVENT(42);
1100 for (i = 0; i < len; i++) {
1101 PROF_EVENT(43);
1102 abit = get_abit(a);
1103 if (abit == VGM_BIT_INVALID) {
1104 if (bad_addr != NULL) *bad_addr = a;
1105 return False;
1106 }
1107 a++;
1108 }
1109 return True;
1110}
1111
1112Bool SK_(check_readable) ( Addr a, UInt len, Addr* bad_addr )
1113{
1114 UInt i;
1115 UChar abit;
1116
1117 PROF_EVENT(44);
1118 DEBUG("SK_(check_readable)\n");
1119 for (i = 0; i < len; i++) {
1120 abit = get_abit(a);
1121 PROF_EVENT(45);
1122 if (abit != VGM_BIT_VALID) {
1123 if (bad_addr != NULL) *bad_addr = a;
1124 return False;
1125 }
1126 a++;
1127 }
1128 return True;
1129}
1130
1131
1132/* Check a zero-terminated ascii string. Tricky -- don't want to
1133 examine the actual bytes, to find the end, until we're sure it is
1134 safe to do so. */
1135
1136Bool SK_(check_readable_asciiz) ( Addr a, Addr* bad_addr )
1137{
1138 UChar abit;
1139 PROF_EVENT(46);
1140 DEBUG("SK_(check_readable_asciiz)\n");
1141 while (True) {
1142 PROF_EVENT(47);
1143 abit = get_abit(a);
1144 if (abit != VGM_BIT_VALID) {
1145 if (bad_addr != NULL) *bad_addr = a;
1146 return False;
1147 }
1148 /* Ok, a is safe to read. */
1149 if (* ((UChar*)a) == 0) return True;
1150 a++;
1151 }
1152}
1153
1154
1155/*------------------------------------------------------------*/
1156/*--- Memory event handlers ---*/
1157/*------------------------------------------------------------*/
1158
1159/* Setting permissions for aligned words. This supports fast stack
1160 operations. */
1161
1162static void make_noaccess_aligned ( Addr a, UInt len )
1163{
1164 AcSecMap* sm;
1165 UInt sm_off;
1166 UChar mask;
1167 Addr a_past_end = a + len;
1168
1169 VGP_PUSHCC(VgpSetMem);
1170
1171 PROF_EVENT(50);
1172# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +00001173 sk_assert(IS_ALIGNED4_ADDR(a));
1174 sk_assert(IS_ALIGNED4_ADDR(len));
njn25e49d8e72002-09-23 09:36:25 +00001175# endif
1176
1177 for ( ; a < a_past_end; a += 4) {
1178 ENSURE_MAPPABLE(a, "make_noaccess_aligned");
1179 sm = primary_map[a >> 16];
1180 sm_off = a & 0xFFFF;
1181 mask = 0x0F;
1182 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
1183 /* mask now contains 1s where we wish to make address bits
1184 invalid (1s). */
1185 sm->abits[sm_off >> 3] |= mask;
1186 }
1187 VGP_POPCC(VgpSetMem);
1188}
1189
1190static void make_writable_aligned ( Addr a, UInt len )
1191{
1192 AcSecMap* sm;
1193 UInt sm_off;
1194 UChar mask;
1195 Addr a_past_end = a + len;
1196
1197 VGP_PUSHCC(VgpSetMem);
1198
1199 PROF_EVENT(51);
1200# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +00001201 sk_assert(IS_ALIGNED4_ADDR(a));
1202 sk_assert(IS_ALIGNED4_ADDR(len));
njn25e49d8e72002-09-23 09:36:25 +00001203# endif
1204
1205 for ( ; a < a_past_end; a += 4) {
1206 ENSURE_MAPPABLE(a, "make_writable_aligned");
1207 sm = primary_map[a >> 16];
1208 sm_off = a & 0xFFFF;
1209 mask = 0x0F;
1210 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
1211 /* mask now contains 1s where we wish to make address bits
1212 invalid (0s). */
1213 sm->abits[sm_off >> 3] &= ~mask;
1214 }
1215 VGP_POPCC(VgpSetMem);
1216}
1217
1218
1219static
1220void check_is_writable ( CorePart part, ThreadState* tst,
1221 Char* s, UInt base, UInt size )
1222{
1223 Bool ok;
1224 Addr bad_addr;
1225
1226 VGP_PUSHCC(VgpCheckMem);
1227
1228 /* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
1229 base,base+size-1); */
1230 ok = SK_(check_writable) ( base, size, &bad_addr );
1231 if (!ok) {
1232 switch (part) {
1233 case Vg_CoreSysCall:
1234 SK_(record_param_error) ( tst, bad_addr, /*isWrite =*/True, s );
1235 break;
1236
1237 case Vg_CorePThread:
1238 case Vg_CoreSignal:
1239 SK_(record_core_mem_error)( tst, /*isWrite=*/True, s );
1240 break;
1241
1242 default:
njne427a662002-10-02 11:08:25 +00001243 VG_(skin_panic)("check_is_readable: Unknown or unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001244 }
1245 }
1246
1247 VGP_POPCC(VgpCheckMem);
1248}
1249
1250static
1251void check_is_readable ( CorePart part, ThreadState* tst,
1252 Char* s, UInt base, UInt size )
1253{
1254 Bool ok;
1255 Addr bad_addr;
1256
1257 VGP_PUSHCC(VgpCheckMem);
1258
1259 /* VG_(message)(Vg_DebugMsg,"check is readable: %x .. %x",
1260 base,base+size-1); */
1261 ok = SK_(check_readable) ( base, size, &bad_addr );
1262 if (!ok) {
1263 switch (part) {
1264 case Vg_CoreSysCall:
1265 SK_(record_param_error) ( tst, bad_addr, /*isWrite =*/False, s );
1266 break;
1267
1268 case Vg_CorePThread:
1269 SK_(record_core_mem_error)( tst, /*isWrite=*/False, s );
1270 break;
1271
1272 /* If we're being asked to jump to a silly address, record an error
1273 message before potentially crashing the entire system. */
1274 case Vg_CoreTranslate:
1275 SK_(record_jump_error)( tst, bad_addr );
1276 break;
1277
1278 default:
njne427a662002-10-02 11:08:25 +00001279 VG_(skin_panic)("check_is_readable: Unknown or unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001280 }
1281 }
1282 VGP_POPCC(VgpCheckMem);
1283}
1284
1285static
1286void check_is_readable_asciiz ( CorePart part, ThreadState* tst,
1287 Char* s, UInt str )
1288{
1289 Bool ok = True;
1290 Addr bad_addr;
1291 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
1292
1293 VGP_PUSHCC(VgpCheckMem);
1294
njne427a662002-10-02 11:08:25 +00001295 sk_assert(part == Vg_CoreSysCall);
njn25e49d8e72002-09-23 09:36:25 +00001296 ok = SK_(check_readable_asciiz) ( (Addr)str, &bad_addr );
1297 if (!ok) {
1298 SK_(record_param_error) ( tst, bad_addr, /*is_writable =*/False, s );
1299 }
1300
1301 VGP_POPCC(VgpCheckMem);
1302}
1303
1304static
1305void addrcheck_new_mem_startup( Addr a, UInt len, Bool rr, Bool ww, Bool xx )
1306{
njn1f3a9092002-10-04 09:22:30 +00001307 /* Ignore the permissions, just make it readable. Seems to work... */
njn25e49d8e72002-09-23 09:36:25 +00001308 DEBUG("new_mem_startup(%p, %u, rr=%u, ww=%u, xx=%u)\n", a,len,rr,ww,xx);
1309 SK_(make_accessible)(a, len);
1310}
1311
1312static
1313void addrcheck_new_mem_heap ( Addr a, UInt len, Bool is_inited )
1314{
1315 SK_(make_accessible)(a, len);
1316}
1317
1318static
1319void addrcheck_set_perms (Addr a, UInt len,
1320 Bool nn, Bool rr, Bool ww, Bool xx)
1321{
1322 DEBUG("addrcheck_set_perms(%p, %u, nn=%u, rr=%u ww=%u, xx=%u)\n",
1323 a, len, nn, rr, ww, xx);
1324 if (rr || ww || xx) {
1325 SK_(make_accessible)(a, len);
1326 } else {
1327 SK_(make_noaccess)(a, len);
1328 }
1329}
1330
1331
1332/*------------------------------------------------------------*/
1333/*--- Functions called directly from generated code. ---*/
1334/*------------------------------------------------------------*/
1335
1336static __inline__ UInt rotateRight16 ( UInt x )
1337{
1338 /* Amazingly, gcc turns this into a single rotate insn. */
1339 return (x >> 16) | (x << 16);
1340}
1341
1342
1343static __inline__ UInt shiftRight16 ( UInt x )
1344{
1345 return x >> 16;
1346}
1347
1348
1349/* Read/write 1/2/4 sized V bytes, and emit an address error if
1350 needed. */
1351
1352/* SK_(helperc_ACCESS{1,2,4}) handle the common case fast.
1353 Under all other circumstances, it defers to the relevant _SLOWLY
1354 function, which can handle all situations.
1355*/
1356__attribute__ ((regparm(1)))
1357void SK_(helperc_ACCESS4) ( Addr a )
1358{
1359# ifdef VG_DEBUG_MEMORY
1360 return vgmext_ACCESS4_SLOWLY(a);
1361# else
1362 UInt sec_no = rotateRight16(a) & 0x3FFFF;
1363 AcSecMap* sm = primary_map[sec_no];
1364 UInt a_off = (a & 0xFFFF) >> 3;
1365 UChar abits = sm->abits[a_off];
1366 abits >>= (a & 4);
1367 abits &= 15;
1368 PROF_EVENT(60);
1369 if (abits == VGM_NIBBLE_VALID) {
1370 /* Handle common case quickly: a is suitably aligned, is mapped,
1371 and is addressible. So just return. */
1372 return;
1373 } else {
1374 /* Slow but general case. */
1375 vgmext_ACCESS4_SLOWLY(a);
1376 }
1377# endif
1378}
1379
1380__attribute__ ((regparm(1)))
1381void SK_(helperc_ACCESS2) ( Addr a )
1382{
1383# ifdef VG_DEBUG_MEMORY
1384 return vgmext_ACCESS2_SLOWLY(a);
1385# else
1386 UInt sec_no = rotateRight16(a) & 0x1FFFF;
1387 AcSecMap* sm = primary_map[sec_no];
1388 UInt a_off = (a & 0xFFFF) >> 3;
1389 PROF_EVENT(62);
1390 if (sm->abits[a_off] == VGM_BYTE_VALID) {
1391 /* Handle common case quickly. */
1392 return;
1393 } else {
1394 /* Slow but general case. */
1395 vgmext_ACCESS2_SLOWLY(a);
1396 }
1397# endif
1398}
1399
1400__attribute__ ((regparm(1)))
1401void SK_(helperc_ACCESS1) ( Addr a )
1402{
1403# ifdef VG_DEBUG_MEMORY
1404 return vgmext_ACCESS1_SLOWLY(a);
1405# else
1406 UInt sec_no = shiftRight16(a);
1407 AcSecMap* sm = primary_map[sec_no];
1408 UInt a_off = (a & 0xFFFF) >> 3;
1409 PROF_EVENT(64);
1410 if (sm->abits[a_off] == VGM_BYTE_VALID) {
1411 /* Handle common case quickly. */
1412 return;
1413 } else {
1414 /* Slow but general case. */
1415 vgmext_ACCESS1_SLOWLY(a);
1416 }
1417# endif
1418}
1419
1420
1421/*------------------------------------------------------------*/
1422/*--- Fallback functions to handle cases that the above ---*/
1423/*--- VG_(helperc_ACCESS{1,2,4}) can't manage. ---*/
1424/*------------------------------------------------------------*/
1425
1426static void vgmext_ACCESS4_SLOWLY ( Addr a )
1427{
1428 Bool a0ok, a1ok, a2ok, a3ok;
1429
1430 PROF_EVENT(70);
1431
1432 /* First establish independently the addressibility of the 4 bytes
1433 involved. */
1434 a0ok = get_abit(a+0) == VGM_BIT_VALID;
1435 a1ok = get_abit(a+1) == VGM_BIT_VALID;
1436 a2ok = get_abit(a+2) == VGM_BIT_VALID;
1437 a3ok = get_abit(a+3) == VGM_BIT_VALID;
1438
1439 /* Now distinguish 3 cases */
1440
1441 /* Case 1: the address is completely valid, so:
1442 - no addressing error
1443 */
1444 if (a0ok && a1ok && a2ok && a3ok) {
1445 return;
1446 }
1447
1448 /* Case 2: the address is completely invalid.
1449 - emit addressing error
1450 */
1451 /* VG_(printf)("%p (%d %d %d %d)\n", a, a0ok, a1ok, a2ok, a3ok); */
1452 if (!SK_(clo_partial_loads_ok)
1453 || ((a & 3) != 0)
1454 || (!a0ok && !a1ok && !a2ok && !a3ok)) {
1455 sk_record_address_error( a, 4, False );
1456 return;
1457 }
1458
1459 /* Case 3: the address is partially valid.
1460 - no addressing error
1461 Case 3 is only allowed if SK_(clo_partial_loads_ok) is True
1462 (which is the default), and the address is 4-aligned.
1463 If not, Case 2 will have applied.
1464 */
njne427a662002-10-02 11:08:25 +00001465 sk_assert(SK_(clo_partial_loads_ok));
njn25e49d8e72002-09-23 09:36:25 +00001466 {
1467 return;
1468 }
1469}
1470
1471static void vgmext_ACCESS2_SLOWLY ( Addr a )
1472{
1473 /* Check the address for validity. */
1474 Bool aerr = False;
1475 PROF_EVENT(72);
1476
1477 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1478 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1479
1480 /* If an address error has happened, report it. */
1481 if (aerr) {
1482 sk_record_address_error( a, 2, False );
1483 }
1484}
1485
1486static void vgmext_ACCESS1_SLOWLY ( Addr a )
1487{
1488 /* Check the address for validity. */
1489 Bool aerr = False;
1490 PROF_EVENT(74);
1491
1492 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1493
1494 /* If an address error has happened, report it. */
1495 if (aerr) {
1496 sk_record_address_error( a, 1, False );
1497 }
1498}
1499
1500
1501/* ---------------------------------------------------------------------
1502 FPU load and store checks, called from generated code.
1503 ------------------------------------------------------------------ */
1504
1505__attribute__ ((regparm(2)))
1506void SK_(fpu_ACCESS_check) ( Addr addr, Int size )
1507{
1508 /* Ensure the read area is both addressible and valid (ie,
1509 readable). If there's an address error, don't report a value
1510 error too; but if there isn't an address error, check for a
1511 value error.
1512
1513 Try to be reasonably fast on the common case; wimp out and defer
1514 to fpu_ACCESS_check_SLOWLY for everything else. */
1515
1516 AcSecMap* sm;
1517 UInt sm_off, a_off;
1518 Addr addr4;
1519
1520 PROF_EVENT(80);
1521
1522# ifdef VG_DEBUG_MEMORY
1523 fpu_ACCESS_check_SLOWLY ( addr, size );
1524# else
1525
1526 if (size == 4) {
1527 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
1528 PROF_EVENT(81);
1529 /* Properly aligned. */
1530 sm = primary_map[addr >> 16];
1531 sm_off = addr & 0xFFFF;
1532 a_off = sm_off >> 3;
1533 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
1534 /* Properly aligned and addressible. */
1535 return;
1536 slow4:
1537 fpu_ACCESS_check_SLOWLY ( addr, 4 );
1538 return;
1539 }
1540
1541 if (size == 8) {
1542 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
1543 PROF_EVENT(82);
1544 /* Properly aligned. Do it in two halves. */
1545 addr4 = addr + 4;
1546 /* First half. */
1547 sm = primary_map[addr >> 16];
1548 sm_off = addr & 0xFFFF;
1549 a_off = sm_off >> 3;
1550 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1551 /* First half properly aligned and addressible. */
1552 /* Second half. */
1553 sm = primary_map[addr4 >> 16];
1554 sm_off = addr4 & 0xFFFF;
1555 a_off = sm_off >> 3;
1556 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1557 /* Second half properly aligned and addressible. */
1558 /* Both halves properly aligned and addressible. */
1559 return;
1560 slow8:
1561 fpu_ACCESS_check_SLOWLY ( addr, 8 );
1562 return;
1563 }
1564
1565 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
1566 cases go quickly. */
1567 if (size == 2) {
1568 PROF_EVENT(83);
1569 fpu_ACCESS_check_SLOWLY ( addr, 2 );
1570 return;
1571 }
1572
1573 if (size == 10) {
1574 PROF_EVENT(84);
1575 fpu_ACCESS_check_SLOWLY ( addr, 10 );
1576 return;
1577 }
1578
1579 if (size == 28 || size == 108) {
1580 PROF_EVENT(84); /* XXX assign correct event number */
1581 fpu_ACCESS_check_SLOWLY ( addr, size );
1582 return;
1583 }
1584
1585 VG_(printf)("size is %d\n", size);
njne427a662002-10-02 11:08:25 +00001586 VG_(skin_panic)("fpu_ACCESS_check: unhandled size");
njn25e49d8e72002-09-23 09:36:25 +00001587# endif
1588}
1589
1590
1591/* ---------------------------------------------------------------------
1592 Slow, general cases for FPU access checks.
1593 ------------------------------------------------------------------ */
1594
1595void fpu_ACCESS_check_SLOWLY ( Addr addr, Int size )
1596{
1597 Int i;
1598 Bool aerr = False;
1599 PROF_EVENT(90);
1600 for (i = 0; i < size; i++) {
1601 PROF_EVENT(91);
1602 if (get_abit(addr+i) != VGM_BIT_VALID)
1603 aerr = True;
1604 }
1605
1606 if (aerr) {
1607 sk_record_address_error( addr, size, False );
1608 }
1609}
1610
1611
1612/*------------------------------------------------------------*/
1613/*--- Shadow chunks info ---*/
1614/*------------------------------------------------------------*/
1615
1616static __inline__
1617void set_where( ShadowChunk* sc, ExeContext* ec )
1618{
1619 sc->skin_extra[0] = (UInt)ec;
1620}
1621
1622static __inline__
1623ExeContext *get_where( ShadowChunk* sc )
1624{
1625 return (ExeContext*)sc->skin_extra[0];
1626}
1627
1628void SK_(complete_shadow_chunk) ( ShadowChunk* sc, ThreadState* tst )
1629{
1630 set_where( sc, VG_(get_ExeContext) ( tst ) );
1631}
1632
1633/*------------------------------------------------------------*/
1634/*--- Postponing free()ing ---*/
1635/*------------------------------------------------------------*/
1636
1637/* Holds blocks after freeing. */
1638static ShadowChunk* vg_freed_list_start = NULL;
1639static ShadowChunk* vg_freed_list_end = NULL;
1640static Int vg_freed_list_volume = 0;
1641
1642static __attribute__ ((unused))
1643 Int count_freelist ( void )
1644{
1645 ShadowChunk* sc;
1646 Int n = 0;
1647 for (sc = vg_freed_list_start; sc != NULL; sc = sc->next)
1648 n++;
1649 return n;
1650}
1651
1652static __attribute__ ((unused))
1653 void freelist_sanity ( void )
1654{
1655 ShadowChunk* sc;
1656 Int n = 0;
1657 /* VG_(printf)("freelist sanity\n"); */
1658 for (sc = vg_freed_list_start; sc != NULL; sc = sc->next)
1659 n += sc->size;
njne427a662002-10-02 11:08:25 +00001660 sk_assert(n == vg_freed_list_volume);
njn25e49d8e72002-09-23 09:36:25 +00001661}
1662
1663/* Put a shadow chunk on the freed blocks queue, possibly freeing up
1664 some of the oldest blocks in the queue at the same time. */
1665static void add_to_freed_queue ( ShadowChunk* sc )
1666{
1667 ShadowChunk* sc1;
1668
1669 /* Put it at the end of the freed list */
1670 if (vg_freed_list_end == NULL) {
njne427a662002-10-02 11:08:25 +00001671 sk_assert(vg_freed_list_start == NULL);
njn25e49d8e72002-09-23 09:36:25 +00001672 vg_freed_list_end = vg_freed_list_start = sc;
1673 vg_freed_list_volume = sc->size;
1674 } else {
njne427a662002-10-02 11:08:25 +00001675 sk_assert(vg_freed_list_end->next == NULL);
njn25e49d8e72002-09-23 09:36:25 +00001676 vg_freed_list_end->next = sc;
1677 vg_freed_list_end = sc;
1678 vg_freed_list_volume += sc->size;
1679 }
1680 sc->next = NULL;
1681
1682 /* Release enough of the oldest blocks to bring the free queue
1683 volume below vg_clo_freelist_vol. */
1684
1685 while (vg_freed_list_volume > SK_(clo_freelist_vol)) {
1686 /* freelist_sanity(); */
njne427a662002-10-02 11:08:25 +00001687 sk_assert(vg_freed_list_start != NULL);
1688 sk_assert(vg_freed_list_end != NULL);
njn25e49d8e72002-09-23 09:36:25 +00001689
1690 sc1 = vg_freed_list_start;
1691 vg_freed_list_volume -= sc1->size;
1692 /* VG_(printf)("volume now %d\n", vg_freed_list_volume); */
njne427a662002-10-02 11:08:25 +00001693 sk_assert(vg_freed_list_volume >= 0);
njn25e49d8e72002-09-23 09:36:25 +00001694
1695 if (vg_freed_list_start == vg_freed_list_end) {
1696 vg_freed_list_start = vg_freed_list_end = NULL;
1697 } else {
1698 vg_freed_list_start = sc1->next;
1699 }
1700 sc1->next = NULL; /* just paranoia */
njn4ba5a792002-09-30 10:23:54 +00001701 VG_(free_ShadowChunk) ( sc1 );
njn25e49d8e72002-09-23 09:36:25 +00001702 }
1703}
1704
1705/* Return the first shadow chunk satisfying the predicate p. */
1706ShadowChunk* SK_(any_matching_freed_ShadowChunks)
1707 ( Bool (*p) ( ShadowChunk* ))
1708{
1709 ShadowChunk* sc;
1710
1711 /* No point looking through freed blocks if we're not keeping
1712 them around for a while... */
1713 for (sc = vg_freed_list_start; sc != NULL; sc = sc->next)
1714 if (p(sc))
1715 return sc;
1716
1717 return NULL;
1718}
1719
1720void SK_(alt_free) ( ShadowChunk* sc, ThreadState* tst )
1721{
1722 /* Record where freed */
1723 set_where( sc, VG_(get_ExeContext) ( tst ) );
1724
1725 /* Put it out of harm's way for a while. */
1726 add_to_freed_queue ( sc );
1727}
1728
1729
1730/*------------------------------------------------------------*/
1731/*--- Our instrumenter ---*/
1732/*------------------------------------------------------------*/
1733
njn4ba5a792002-09-30 10:23:54 +00001734#define uInstr1 VG_(new_UInstr1)
1735#define uInstr2 VG_(new_UInstr2)
1736#define uLiteral VG_(set_lit_field)
1737#define uCCall VG_(set_ccall_fields)
1738#define newTemp VG_(get_new_temp)
njn25e49d8e72002-09-23 09:36:25 +00001739
1740UCodeBlock* SK_(instrument)(UCodeBlock* cb_in, Addr orig_addr)
1741{
1742/* Use this rather than eg. -1 because it's a UInt. */
1743#define INVALID_DATA_SIZE 999999
1744
1745 UCodeBlock* cb;
1746 Int i;
1747 UInstr* u_in;
1748 Int t_addr, t_size;
1749
njn4ba5a792002-09-30 10:23:54 +00001750 cb = VG_(alloc_UCodeBlock)();
njn25e49d8e72002-09-23 09:36:25 +00001751 cb->nextTemp = cb_in->nextTemp;
1752
1753 for (i = 0; i < cb_in->used; i++) {
1754
1755 t_addr = t_size = INVALID_TEMPREG;
1756 u_in = &cb_in->instrs[i];
1757
1758 switch (u_in->opcode) {
1759 case NOP: case CALLM_E: case CALLM_S:
1760 break;
1761
1762 /* For memory-ref instrs, copy the data_addr into a temporary to be
1763 * passed to the cachesim_* helper at the end of the instruction.
1764 */
1765 case LOAD:
1766 t_addr = u_in->val1;
1767 goto do_LOAD_or_STORE;
1768 case STORE: t_addr = u_in->val2;
1769 goto do_LOAD_or_STORE;
1770 do_LOAD_or_STORE:
1771 uInstr1(cb, CCALL, 0, TempReg, t_addr);
1772 switch (u_in->size) {
njn4ba5a792002-09-30 10:23:54 +00001773 case 4: uCCall(cb, (Addr)&SK_(helperc_ACCESS4), 1, 1, False );
njn25e49d8e72002-09-23 09:36:25 +00001774 break;
njn4ba5a792002-09-30 10:23:54 +00001775 case 2: uCCall(cb, (Addr)&SK_(helperc_ACCESS2), 1, 1, False );
njn25e49d8e72002-09-23 09:36:25 +00001776 break;
njn4ba5a792002-09-30 10:23:54 +00001777 case 1: uCCall(cb, (Addr)&SK_(helperc_ACCESS1), 1, 1, False );
njn25e49d8e72002-09-23 09:36:25 +00001778 break;
1779 default:
njne427a662002-10-02 11:08:25 +00001780 VG_(skin_panic)("addrcheck::SK_(instrument):LOAD/STORE");
njn25e49d8e72002-09-23 09:36:25 +00001781 }
njn4ba5a792002-09-30 10:23:54 +00001782 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00001783 break;
1784
1785 case FPU_R:
1786 case FPU_W:
1787 t_addr = u_in->val2;
1788 t_size = newTemp(cb);
1789 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
1790 uLiteral(cb, u_in->size);
1791 uInstr2(cb, CCALL, 0, TempReg, t_addr, TempReg, t_size);
njn4ba5a792002-09-30 10:23:54 +00001792 uCCall(cb, (Addr)&SK_(fpu_ACCESS_check), 2, 2, False );
1793 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00001794 break;
1795
1796 default:
njn4ba5a792002-09-30 10:23:54 +00001797 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00001798 break;
1799 }
1800 }
1801
njn4ba5a792002-09-30 10:23:54 +00001802 VG_(free_UCodeBlock)(cb_in);
njn25e49d8e72002-09-23 09:36:25 +00001803 return cb;
1804}
1805
1806
1807
1808/*------------------------------------------------------------*/
1809/*--- Low-level address-space scanning, for the leak ---*/
1810/*--- detector. ---*/
1811/*------------------------------------------------------------*/
1812
1813static
1814jmp_buf memscan_jmpbuf;
1815
1816static
1817void vg_scan_all_valid_memory_sighandler ( Int sigNo )
1818{
1819 __builtin_longjmp(memscan_jmpbuf, 1);
1820}
1821
1822/* Safely (avoiding SIGSEGV / SIGBUS) scan the entire valid address
1823 space and pass the addresses and values of all addressible,
1824 defined, aligned words to notify_word. This is the basis for the
1825 leak detector. Returns the number of calls made to notify_word. */
1826UInt VG_(scan_all_valid_memory) ( void (*notify_word)( Addr, UInt ) )
1827{
1828 /* All volatile, because some gccs seem paranoid about longjmp(). */
1829 volatile UInt res, numPages, page, primaryMapNo, nWordsNotified;
1830 volatile Addr pageBase, addr;
1831 volatile AcSecMap* sm;
1832 volatile UChar abits;
1833 volatile UInt page_first_word;
1834
1835 vki_ksigaction sigbus_saved;
1836 vki_ksigaction sigbus_new;
1837 vki_ksigaction sigsegv_saved;
1838 vki_ksigaction sigsegv_new;
1839 vki_ksigset_t blockmask_saved;
1840 vki_ksigset_t unblockmask_new;
1841
1842 /* Temporarily install a new sigsegv and sigbus handler, and make
1843 sure SIGBUS, SIGSEGV and SIGTERM are unblocked. (Perhaps the
1844 first two can never be blocked anyway?) */
1845
1846 sigbus_new.ksa_handler = vg_scan_all_valid_memory_sighandler;
1847 sigbus_new.ksa_flags = VKI_SA_ONSTACK | VKI_SA_RESTART;
1848 sigbus_new.ksa_restorer = NULL;
1849 res = VG_(ksigemptyset)( &sigbus_new.ksa_mask );
njne427a662002-10-02 11:08:25 +00001850 sk_assert(res == 0);
njn25e49d8e72002-09-23 09:36:25 +00001851
1852 sigsegv_new.ksa_handler = vg_scan_all_valid_memory_sighandler;
1853 sigsegv_new.ksa_flags = VKI_SA_ONSTACK | VKI_SA_RESTART;
1854 sigsegv_new.ksa_restorer = NULL;
1855 res = VG_(ksigemptyset)( &sigsegv_new.ksa_mask );
njne427a662002-10-02 11:08:25 +00001856 sk_assert(res == 0+0);
njn25e49d8e72002-09-23 09:36:25 +00001857
1858 res = VG_(ksigemptyset)( &unblockmask_new );
1859 res |= VG_(ksigaddset)( &unblockmask_new, VKI_SIGBUS );
1860 res |= VG_(ksigaddset)( &unblockmask_new, VKI_SIGSEGV );
1861 res |= VG_(ksigaddset)( &unblockmask_new, VKI_SIGTERM );
njne427a662002-10-02 11:08:25 +00001862 sk_assert(res == 0+0+0);
njn25e49d8e72002-09-23 09:36:25 +00001863
1864 res = VG_(ksigaction)( VKI_SIGBUS, &sigbus_new, &sigbus_saved );
njne427a662002-10-02 11:08:25 +00001865 sk_assert(res == 0+0+0+0);
njn25e49d8e72002-09-23 09:36:25 +00001866
1867 res = VG_(ksigaction)( VKI_SIGSEGV, &sigsegv_new, &sigsegv_saved );
njne427a662002-10-02 11:08:25 +00001868 sk_assert(res == 0+0+0+0+0);
njn25e49d8e72002-09-23 09:36:25 +00001869
1870 res = VG_(ksigprocmask)( VKI_SIG_UNBLOCK, &unblockmask_new, &blockmask_saved );
njne427a662002-10-02 11:08:25 +00001871 sk_assert(res == 0+0+0+0+0+0);
njn25e49d8e72002-09-23 09:36:25 +00001872
1873 /* The signal handlers are installed. Actually do the memory scan. */
1874 numPages = 1 << (32-VKI_BYTES_PER_PAGE_BITS);
njne427a662002-10-02 11:08:25 +00001875 sk_assert(numPages == 1048576);
1876 sk_assert(4096 == (1 << VKI_BYTES_PER_PAGE_BITS));
njn25e49d8e72002-09-23 09:36:25 +00001877
1878 nWordsNotified = 0;
1879
1880 for (page = 0; page < numPages; page++) {
1881 pageBase = page << VKI_BYTES_PER_PAGE_BITS;
1882 primaryMapNo = pageBase >> 16;
1883 sm = primary_map[primaryMapNo];
1884 if (IS_DISTINGUISHED_SM(sm)) continue;
1885 if (__builtin_setjmp(memscan_jmpbuf) == 0) {
1886 /* try this ... */
1887 page_first_word = * (volatile UInt*)pageBase;
1888 /* we get here if we didn't get a fault */
1889 /* Scan the page */
1890 for (addr = pageBase; addr < pageBase+VKI_BYTES_PER_PAGE; addr += 4) {
1891 abits = get_abits4_ALIGNED(addr);
1892 if (abits == VGM_NIBBLE_VALID) {
1893 nWordsNotified++;
1894 notify_word ( addr, *(UInt*)addr );
1895 }
1896 }
1897 } else {
1898 /* We get here if reading the first word of the page caused a
1899 fault, which in turn caused the signal handler to longjmp.
1900 Ignore this page. */
1901 if (0)
1902 VG_(printf)(
1903 "vg_scan_all_valid_memory_sighandler: ignoring page at %p\n",
1904 (void*)pageBase
1905 );
1906 }
1907 }
1908
1909 /* Restore signal state to whatever it was before. */
1910 res = VG_(ksigaction)( VKI_SIGBUS, &sigbus_saved, NULL );
njne427a662002-10-02 11:08:25 +00001911 sk_assert(res == 0 +0);
njn25e49d8e72002-09-23 09:36:25 +00001912
1913 res = VG_(ksigaction)( VKI_SIGSEGV, &sigsegv_saved, NULL );
njne427a662002-10-02 11:08:25 +00001914 sk_assert(res == 0 +0 +0);
njn25e49d8e72002-09-23 09:36:25 +00001915
1916 res = VG_(ksigprocmask)( VKI_SIG_SETMASK, &blockmask_saved, NULL );
njne427a662002-10-02 11:08:25 +00001917 sk_assert(res == 0 +0 +0 +0);
njn25e49d8e72002-09-23 09:36:25 +00001918
1919 return nWordsNotified;
1920}
1921
1922
1923/*------------------------------------------------------------*/
1924/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1925/*------------------------------------------------------------*/
1926
1927/* A block is either
1928 -- Proper-ly reached; a pointer to its start has been found
1929 -- Interior-ly reached; only an interior pointer to it has been found
1930 -- Unreached; so far, no pointers to any part of it have been found.
1931*/
1932typedef
1933 enum { Unreached, Interior, Proper }
1934 Reachedness;
1935
1936/* A block record, used for generating err msgs. */
1937typedef
1938 struct _LossRecord {
1939 struct _LossRecord* next;
1940 /* Where these lost blocks were allocated. */
1941 ExeContext* allocated_at;
1942 /* Their reachability. */
1943 Reachedness loss_mode;
1944 /* Number of blocks and total # bytes involved. */
1945 UInt total_bytes;
1946 UInt num_blocks;
1947 }
1948 LossRecord;
1949
1950
1951/* Find the i such that ptr points at or inside the block described by
1952 shadows[i]. Return -1 if none found. This assumes that shadows[]
1953 has been sorted on the ->data field. */
1954
1955#ifdef VG_DEBUG_LEAKCHECK
1956/* Used to sanity-check the fast binary-search mechanism. */
1957static Int find_shadow_for_OLD ( Addr ptr,
1958 ShadowChunk** shadows,
1959 Int n_shadows )
1960
1961{
1962 Int i;
1963 Addr a_lo, a_hi;
1964 PROF_EVENT(70);
1965 for (i = 0; i < n_shadows; i++) {
1966 PROF_EVENT(71);
1967 a_lo = shadows[i]->data;
1968 a_hi = ((Addr)shadows[i]->data) + shadows[i]->size - 1;
1969 if (a_lo <= ptr && ptr <= a_hi)
1970 return i;
1971 }
1972 return -1;
1973}
1974#endif
1975
1976
1977static Int find_shadow_for ( Addr ptr,
1978 ShadowChunk** shadows,
1979 Int n_shadows )
1980{
1981 Addr a_mid_lo, a_mid_hi;
1982 Int lo, mid, hi, retVal;
1983 PROF_EVENT(70);
1984 /* VG_(printf)("find shadow for %p = ", ptr); */
1985 retVal = -1;
1986 lo = 0;
1987 hi = n_shadows-1;
1988 while (True) {
1989 PROF_EVENT(71);
1990
1991 /* invariant: current unsearched space is from lo to hi,
1992 inclusive. */
1993 if (lo > hi) break; /* not found */
1994
1995 mid = (lo + hi) / 2;
1996 a_mid_lo = shadows[mid]->data;
1997 a_mid_hi = ((Addr)shadows[mid]->data) + shadows[mid]->size - 1;
1998
1999 if (ptr < a_mid_lo) {
2000 hi = mid-1;
2001 continue;
2002 }
2003 if (ptr > a_mid_hi) {
2004 lo = mid+1;
2005 continue;
2006 }
njne427a662002-10-02 11:08:25 +00002007 sk_assert(ptr >= a_mid_lo && ptr <= a_mid_hi);
njn25e49d8e72002-09-23 09:36:25 +00002008 retVal = mid;
2009 break;
2010 }
2011
2012# ifdef VG_DEBUG_LEAKCHECK
njne427a662002-10-02 11:08:25 +00002013 sk_assert(retVal == find_shadow_for_OLD ( ptr, shadows, n_shadows ));
njn25e49d8e72002-09-23 09:36:25 +00002014# endif
2015 /* VG_(printf)("%d\n", retVal); */
2016 return retVal;
2017}
2018
2019
2020
2021static void sort_malloc_shadows ( ShadowChunk** shadows, UInt n_shadows )
2022{
2023 Int incs[14] = { 1, 4, 13, 40, 121, 364, 1093, 3280,
2024 9841, 29524, 88573, 265720,
2025 797161, 2391484 };
2026 Int lo = 0;
2027 Int hi = n_shadows-1;
2028 Int i, j, h, bigN, hp;
2029 ShadowChunk* v;
2030
2031 PROF_EVENT(72);
2032 bigN = hi - lo + 1; if (bigN < 2) return;
2033 hp = 0; while (incs[hp] < bigN) hp++; hp--;
2034
2035 for (; hp >= 0; hp--) {
2036 PROF_EVENT(73);
2037 h = incs[hp];
2038 i = lo + h;
2039 while (1) {
2040 PROF_EVENT(74);
2041 if (i > hi) break;
2042 v = shadows[i];
2043 j = i;
2044 while (shadows[j-h]->data > v->data) {
2045 PROF_EVENT(75);
2046 shadows[j] = shadows[j-h];
2047 j = j - h;
2048 if (j <= (lo + h - 1)) break;
2049 }
2050 shadows[j] = v;
2051 i++;
2052 }
2053 }
2054}
2055
2056/* Globals, for the callback used by SK_(detect_memory_leaks). */
2057
2058static ShadowChunk** vglc_shadows;
2059static Int vglc_n_shadows;
2060static Reachedness* vglc_reachedness;
2061static Addr vglc_min_mallocd_addr;
2062static Addr vglc_max_mallocd_addr;
2063
2064static
2065void vg_detect_memory_leaks_notify_addr ( Addr a, UInt word_at_a )
2066{
2067 Int sh_no;
2068 Addr ptr;
2069
2070 /* Rule out some known causes of bogus pointers. Mostly these do
2071 not cause much trouble because only a few false pointers can
2072 ever lurk in these places. This mainly stops it reporting that
2073 blocks are still reachable in stupid test programs like this
2074
2075 int main (void) { char* a = malloc(100); return 0; }
2076
2077 which people seem inordinately fond of writing, for some reason.
2078
2079 Note that this is a complete kludge. It would be better to
2080 ignore any addresses corresponding to valgrind.so's .bss and
2081 .data segments, but I cannot think of a reliable way to identify
2082 where the .bss segment has been put. If you can, drop me a
2083 line.
2084 */
2085 if (VG_(within_stack)(a)) return;
2086 if (VG_(within_m_state_static)(a)) return;
2087 if (a == (Addr)(&vglc_min_mallocd_addr)) return;
2088 if (a == (Addr)(&vglc_max_mallocd_addr)) return;
2089
2090 /* OK, let's get on and do something Useful for a change. */
2091
2092 ptr = (Addr)word_at_a;
2093 if (ptr >= vglc_min_mallocd_addr && ptr <= vglc_max_mallocd_addr) {
2094 /* Might be legitimate; we'll have to investigate further. */
2095 sh_no = find_shadow_for ( ptr, vglc_shadows, vglc_n_shadows );
2096 if (sh_no != -1) {
2097 /* Found a block at/into which ptr points. */
njne427a662002-10-02 11:08:25 +00002098 sk_assert(sh_no >= 0 && sh_no < vglc_n_shadows);
2099 sk_assert(ptr < vglc_shadows[sh_no]->data
njn25e49d8e72002-09-23 09:36:25 +00002100 + vglc_shadows[sh_no]->size);
2101 /* Decide whether Proper-ly or Interior-ly reached. */
2102 if (ptr == vglc_shadows[sh_no]->data) {
2103 if (0) VG_(printf)("pointer at %p to %p\n", a, word_at_a );
2104 vglc_reachedness[sh_no] = Proper;
2105 } else {
2106 if (vglc_reachedness[sh_no] == Unreached)
2107 vglc_reachedness[sh_no] = Interior;
2108 }
2109 }
2110 }
2111}
2112
2113
2114void SK_(detect_memory_leaks) ( void )
2115{
2116 Int i;
2117 Int blocks_leaked, bytes_leaked;
2118 Int blocks_dubious, bytes_dubious;
2119 Int blocks_reachable, bytes_reachable;
2120 Int n_lossrecords;
2121 UInt bytes_notified;
2122
2123 LossRecord* errlist;
2124 LossRecord* p;
2125
2126 PROF_EVENT(76);
2127
2128 /* VG_(get_malloc_shadows) allocates storage for shadows */
2129 vglc_shadows = VG_(get_malloc_shadows)( &vglc_n_shadows );
2130 if (vglc_n_shadows == 0) {
njne427a662002-10-02 11:08:25 +00002131 sk_assert(vglc_shadows == NULL);
njn25e49d8e72002-09-23 09:36:25 +00002132 VG_(message)(Vg_UserMsg,
2133 "No malloc'd blocks -- no leaks are possible.\n");
2134 return;
2135 }
2136
2137 VG_(message)(Vg_UserMsg,
2138 "searching for pointers to %d not-freed blocks.",
2139 vglc_n_shadows );
2140 sort_malloc_shadows ( vglc_shadows, vglc_n_shadows );
2141
2142 /* Sanity check; assert that the blocks are now in order and that
2143 they don't overlap. */
2144 for (i = 0; i < vglc_n_shadows-1; i++) {
njne427a662002-10-02 11:08:25 +00002145 sk_assert( ((Addr)vglc_shadows[i]->data)
njn25e49d8e72002-09-23 09:36:25 +00002146 < ((Addr)vglc_shadows[i+1]->data) );
njne427a662002-10-02 11:08:25 +00002147 sk_assert( ((Addr)vglc_shadows[i]->data) + vglc_shadows[i]->size
njn25e49d8e72002-09-23 09:36:25 +00002148 < ((Addr)vglc_shadows[i+1]->data) );
2149 }
2150
2151 vglc_min_mallocd_addr = ((Addr)vglc_shadows[0]->data);
2152 vglc_max_mallocd_addr = ((Addr)vglc_shadows[vglc_n_shadows-1]->data)
2153 + vglc_shadows[vglc_n_shadows-1]->size - 1;
2154
2155 vglc_reachedness
2156 = VG_(malloc)( vglc_n_shadows * sizeof(Reachedness) );
2157 for (i = 0; i < vglc_n_shadows; i++)
2158 vglc_reachedness[i] = Unreached;
2159
2160 /* Do the scan of memory. */
2161 bytes_notified
2162 = VG_(scan_all_valid_memory)( &vg_detect_memory_leaks_notify_addr )
2163 * VKI_BYTES_PER_WORD;
2164
2165 VG_(message)(Vg_UserMsg, "checked %d bytes.", bytes_notified);
2166
2167 blocks_leaked = bytes_leaked = 0;
2168 blocks_dubious = bytes_dubious = 0;
2169 blocks_reachable = bytes_reachable = 0;
2170
2171 for (i = 0; i < vglc_n_shadows; i++) {
2172 if (vglc_reachedness[i] == Unreached) {
2173 blocks_leaked++;
2174 bytes_leaked += vglc_shadows[i]->size;
2175 }
2176 else if (vglc_reachedness[i] == Interior) {
2177 blocks_dubious++;
2178 bytes_dubious += vglc_shadows[i]->size;
2179 }
2180 else if (vglc_reachedness[i] == Proper) {
2181 blocks_reachable++;
2182 bytes_reachable += vglc_shadows[i]->size;
2183 }
2184 }
2185
2186 VG_(message)(Vg_UserMsg, "");
2187 VG_(message)(Vg_UserMsg, "definitely lost: %d bytes in %d blocks.",
2188 bytes_leaked, blocks_leaked );
2189 VG_(message)(Vg_UserMsg, "possibly lost: %d bytes in %d blocks.",
2190 bytes_dubious, blocks_dubious );
2191 VG_(message)(Vg_UserMsg, "still reachable: %d bytes in %d blocks.",
2192 bytes_reachable, blocks_reachable );
2193
2194
2195 /* Common up the lost blocks so we can print sensible error
2196 messages. */
2197
2198 n_lossrecords = 0;
2199 errlist = NULL;
2200 for (i = 0; i < vglc_n_shadows; i++) {
2201
2202 /* 'where' stored in 'skin_extra' field */
2203 ExeContext* where = get_where ( vglc_shadows[i] );
2204
2205 for (p = errlist; p != NULL; p = p->next) {
2206 if (p->loss_mode == vglc_reachedness[i]
2207 && VG_(eq_ExeContext) ( SK_(clo_leak_resolution),
2208 p->allocated_at,
2209 where) ) {
2210 break;
2211 }
2212 }
2213 if (p != NULL) {
2214 p->num_blocks ++;
2215 p->total_bytes += vglc_shadows[i]->size;
2216 } else {
2217 n_lossrecords ++;
2218 p = VG_(malloc)(sizeof(LossRecord));
2219 p->loss_mode = vglc_reachedness[i];
2220 p->allocated_at = where;
2221 p->total_bytes = vglc_shadows[i]->size;
2222 p->num_blocks = 1;
2223 p->next = errlist;
2224 errlist = p;
2225 }
2226 }
2227
2228 for (i = 0; i < n_lossrecords; i++) {
2229 LossRecord* p_min = NULL;
2230 UInt n_min = 0xFFFFFFFF;
2231 for (p = errlist; p != NULL; p = p->next) {
2232 if (p->num_blocks > 0 && p->total_bytes < n_min) {
2233 n_min = p->total_bytes;
2234 p_min = p;
2235 }
2236 }
njne427a662002-10-02 11:08:25 +00002237 sk_assert(p_min != NULL);
njn25e49d8e72002-09-23 09:36:25 +00002238
2239 if ( (!SK_(clo_show_reachable)) && p_min->loss_mode == Proper) {
2240 p_min->num_blocks = 0;
2241 continue;
2242 }
2243
2244 VG_(message)(Vg_UserMsg, "");
2245 VG_(message)(
2246 Vg_UserMsg,
2247 "%d bytes in %d blocks are %s in loss record %d of %d",
2248 p_min->total_bytes, p_min->num_blocks,
2249 p_min->loss_mode==Unreached ? "definitely lost" :
2250 (p_min->loss_mode==Interior ? "possibly lost"
2251 : "still reachable"),
2252 i+1, n_lossrecords
2253 );
2254 VG_(pp_ExeContext)(p_min->allocated_at);
2255 p_min->num_blocks = 0;
2256 }
2257
2258 VG_(message)(Vg_UserMsg, "");
2259 VG_(message)(Vg_UserMsg, "LEAK SUMMARY:");
2260 VG_(message)(Vg_UserMsg, " definitely lost: %d bytes in %d blocks.",
2261 bytes_leaked, blocks_leaked );
2262 VG_(message)(Vg_UserMsg, " possibly lost: %d bytes in %d blocks.",
2263 bytes_dubious, blocks_dubious );
2264 VG_(message)(Vg_UserMsg, " still reachable: %d bytes in %d blocks.",
2265 bytes_reachable, blocks_reachable );
2266 if (!SK_(clo_show_reachable)) {
2267 VG_(message)(Vg_UserMsg,
2268 "Reachable blocks (those to which a pointer was found) are not shown.");
2269 VG_(message)(Vg_UserMsg,
2270 "To see them, rerun with: --show-reachable=yes");
2271 }
2272 VG_(message)(Vg_UserMsg, "");
2273
2274 VG_(free) ( vglc_shadows );
2275 VG_(free) ( vglc_reachedness );
2276}
2277
2278
2279/* ---------------------------------------------------------------------
2280 Sanity check machinery (permanently engaged).
2281 ------------------------------------------------------------------ */
2282
2283/* Check that nobody has spuriously claimed that the first or last 16
2284 pages (64 KB) of address space have become accessible. Failure of
2285 the following do not per se indicate an internal consistency
2286 problem, but they are so likely to that we really want to know
2287 about it if so. */
2288
2289Bool SK_(cheap_sanity_check) ( void )
2290{
2291 if (IS_DISTINGUISHED_SM(primary_map[0]) &&
2292 IS_DISTINGUISHED_SM(primary_map[65535]))
2293 return True;
2294 else
2295 return False;
2296}
2297
2298Bool SK_(expensive_sanity_check) ( void )
2299{
2300 Int i;
2301
2302 /* Make sure nobody changed the distinguished secondary. */
2303 for (i = 0; i < 8192; i++)
2304 if (distinguished_secondary_map.abits[i] != VGM_BYTE_INVALID)
2305 return False;
2306
2307 /* Make sure that the upper 3/4 of the primary map hasn't
2308 been messed with. */
2309 for (i = 65536; i < 262144; i++)
2310 if (primary_map[i] != & distinguished_secondary_map)
2311 return False;
2312
2313 return True;
2314}
2315
2316/* ---------------------------------------------------------------------
2317 Debugging machinery (turn on to debug). Something of a mess.
2318 ------------------------------------------------------------------ */
2319
2320#if 0
2321/* Print the value tags on the 8 integer registers & flag reg. */
2322
2323static void uint_to_bits ( UInt x, Char* str )
2324{
2325 Int i;
2326 Int w = 0;
2327 /* str must point to a space of at least 36 bytes. */
2328 for (i = 31; i >= 0; i--) {
2329 str[w++] = (x & ( ((UInt)1) << i)) ? '1' : '0';
2330 if (i == 24 || i == 16 || i == 8)
2331 str[w++] = ' ';
2332 }
2333 str[w++] = 0;
njne427a662002-10-02 11:08:25 +00002334 sk_assert(w == 36);
njn25e49d8e72002-09-23 09:36:25 +00002335}
2336
2337/* Caution! Not vthread-safe; looks in VG_(baseBlock), not the thread
2338 state table. */
2339
2340static void vg_show_reg_tags ( void )
2341{
2342 Char buf1[36];
2343 Char buf2[36];
2344 UInt z_eax, z_ebx, z_ecx, z_edx,
2345 z_esi, z_edi, z_ebp, z_esp, z_eflags;
2346
2347 z_eax = VG_(baseBlock)[VGOFF_(sh_eax)];
2348 z_ebx = VG_(baseBlock)[VGOFF_(sh_ebx)];
2349 z_ecx = VG_(baseBlock)[VGOFF_(sh_ecx)];
2350 z_edx = VG_(baseBlock)[VGOFF_(sh_edx)];
2351 z_esi = VG_(baseBlock)[VGOFF_(sh_esi)];
2352 z_edi = VG_(baseBlock)[VGOFF_(sh_edi)];
2353 z_ebp = VG_(baseBlock)[VGOFF_(sh_ebp)];
2354 z_esp = VG_(baseBlock)[VGOFF_(sh_esp)];
2355 z_eflags = VG_(baseBlock)[VGOFF_(sh_eflags)];
2356
2357 uint_to_bits(z_eflags, buf1);
2358 VG_(message)(Vg_DebugMsg, "efl %\n", buf1);
2359
2360 uint_to_bits(z_eax, buf1);
2361 uint_to_bits(z_ebx, buf2);
2362 VG_(message)(Vg_DebugMsg, "eax %s ebx %s\n", buf1, buf2);
2363
2364 uint_to_bits(z_ecx, buf1);
2365 uint_to_bits(z_edx, buf2);
2366 VG_(message)(Vg_DebugMsg, "ecx %s edx %s\n", buf1, buf2);
2367
2368 uint_to_bits(z_esi, buf1);
2369 uint_to_bits(z_edi, buf2);
2370 VG_(message)(Vg_DebugMsg, "esi %s edi %s\n", buf1, buf2);
2371
2372 uint_to_bits(z_ebp, buf1);
2373 uint_to_bits(z_esp, buf2);
2374 VG_(message)(Vg_DebugMsg, "ebp %s esp %s\n", buf1, buf2);
2375}
2376
2377
2378/* For debugging only. Scan the address space and touch all allegedly
2379 addressible words. Useful for establishing where Valgrind's idea of
2380 addressibility has diverged from what the kernel believes. */
2381
2382static
2383void zzzmemscan_notify_word ( Addr a, UInt w )
2384{
2385}
2386
2387void zzzmemscan ( void )
2388{
2389 Int n_notifies
2390 = VG_(scan_all_valid_memory)( zzzmemscan_notify_word );
2391 VG_(printf)("zzzmemscan: n_bytes = %d\n", 4 * n_notifies );
2392}
2393#endif
2394
2395
2396
2397
2398#if 0
2399static Int zzz = 0;
2400
2401void show_bb ( Addr eip_next )
2402{
2403 VG_(printf)("[%4d] ", zzz);
2404 vg_show_reg_tags( &VG_(m_shadow );
2405 VG_(translate) ( eip_next, NULL, NULL, NULL );
2406}
2407#endif /* 0 */
2408
2409/*------------------------------------------------------------*/
2410/*--- Syscall wrappers ---*/
2411/*------------------------------------------------------------*/
2412
2413void* SK_(pre_syscall) ( ThreadId tid, UInt syscallno, Bool isBlocking )
2414{
2415 Int sane = SK_(cheap_sanity_check)();
2416 return (void*)sane;
2417}
2418
2419void SK_(post_syscall) ( ThreadId tid, UInt syscallno,
2420 void* pre_result, Int res, Bool isBlocking )
2421{
2422 Int sane_before_call = (Int)pre_result;
2423 Bool sane_after_call = SK_(cheap_sanity_check)();
2424
2425 if ((Int)sane_before_call && (!sane_after_call)) {
2426 VG_(message)(Vg_DebugMsg, "post-syscall: ");
2427 VG_(message)(Vg_DebugMsg,
2428 "probable sanity check failure for syscall number %d\n",
2429 syscallno );
njne427a662002-10-02 11:08:25 +00002430 VG_(skin_panic)("aborting due to the above ... bye!");
njn25e49d8e72002-09-23 09:36:25 +00002431 }
2432}
2433
2434
2435/*------------------------------------------------------------*/
2436/*--- Setup ---*/
2437/*------------------------------------------------------------*/
2438
2439void SK_(written_shadow_regs_values)( UInt* gen_reg_value, UInt* eflags_value )
2440{
2441 *gen_reg_value = VGM_WORD_VALID;
2442 *eflags_value = VGM_EFLAGS_VALID;
2443}
2444
2445Bool SK_(process_cmd_line_option)(Char* arg)
2446{
2447# define STREQ(s1,s2) (0==VG_(strcmp_ws)((s1),(s2)))
2448# define STREQN(nn,s1,s2) (0==VG_(strncmp_ws)((s1),(s2),(nn)))
2449
2450 if (STREQ(arg, "--partial-loads-ok=yes"))
2451 SK_(clo_partial_loads_ok) = True;
2452 else if (STREQ(arg, "--partial-loads-ok=no"))
2453 SK_(clo_partial_loads_ok) = False;
2454
2455 else if (STREQN(15, arg, "--freelist-vol=")) {
2456 SK_(clo_freelist_vol) = (Int)VG_(atoll)(&arg[15]);
2457 if (SK_(clo_freelist_vol) < 0) SK_(clo_freelist_vol) = 0;
2458 }
2459
2460 else if (STREQ(arg, "--leak-check=yes"))
2461 SK_(clo_leak_check) = True;
2462 else if (STREQ(arg, "--leak-check=no"))
2463 SK_(clo_leak_check) = False;
2464
2465 else if (STREQ(arg, "--leak-resolution=low"))
2466 SK_(clo_leak_resolution) = Vg_LowRes;
2467 else if (STREQ(arg, "--leak-resolution=med"))
2468 SK_(clo_leak_resolution) = Vg_MedRes;
2469 else if (STREQ(arg, "--leak-resolution=high"))
2470 SK_(clo_leak_resolution) = Vg_HighRes;
2471
2472 else if (STREQ(arg, "--show-reachable=yes"))
2473 SK_(clo_show_reachable) = True;
2474 else if (STREQ(arg, "--show-reachable=no"))
2475 SK_(clo_show_reachable) = False;
2476
2477 else if (STREQ(arg, "--workaround-gcc296-bugs=yes"))
2478 SK_(clo_workaround_gcc296_bugs) = True;
2479 else if (STREQ(arg, "--workaround-gcc296-bugs=no"))
2480 SK_(clo_workaround_gcc296_bugs) = False;
2481
2482 else if (STREQ(arg, "--cleanup=yes"))
2483 SK_(clo_cleanup) = True;
2484 else if (STREQ(arg, "--cleanup=no"))
2485 SK_(clo_cleanup) = False;
2486
2487 else
2488 return False;
2489
2490 return True;
2491
2492#undef STREQ
2493#undef STREQN
2494}
2495
2496Char* SK_(usage)(void)
2497{
2498 return
2499" --partial-loads-ok=no|yes too hard to explain here; see manual [yes]\n"
2500" --freelist-vol=<number> volume of freed blocks queue [1000000]\n"
2501" --leak-check=no|yes search for memory leaks at exit? [no]\n"
2502" --leak-resolution=low|med|high\n"
2503" amount of bt merging in leak check [low]\n"
2504" --show-reachable=no|yes show reachable blocks in leak check? [no]\n"
2505" --workaround-gcc296-bugs=no|yes self explanatory [no]\n"
2506" --check-addrVs=no|yes experimental lighterweight checking? [yes]\n"
2507" yes == Valgrind's original behaviour\n"
2508"\n"
2509" --cleanup=no|yes improve after instrumentation? [yes]\n";
2510}
2511
2512
2513/*------------------------------------------------------------*/
2514/*--- Setup ---*/
2515/*------------------------------------------------------------*/
2516
njnd04b7c62002-10-03 14:05:52 +00002517void SK_(pre_clo_init)(VgDetails* details, VgNeeds* needs, VgTrackEvents* track)
njn25e49d8e72002-09-23 09:36:25 +00002518{
njnd04b7c62002-10-03 14:05:52 +00002519 details->name = "addrcheck";
2520 details->version = NULL;
2521 details->description = "a fine-grained address checker";
2522 details->copyright_author =
2523 "Copyright (C) 2002, and GNU GPL'd, by Julian Seward.";
2524 details->bug_reports_to = "jseward@acm.org";
njn25e49d8e72002-09-23 09:36:25 +00002525
njnd04b7c62002-10-03 14:05:52 +00002526 needs->core_errors = True;
2527 needs->skin_errors = True;
2528 needs->libc_freeres = True;
2529 needs->sizeof_shadow_block = 1;
2530 needs->basic_block_discards = False;
2531 needs->shadow_regs = False;
2532 needs->command_line_options = True;
2533 needs->client_requests = True;
2534 needs->extended_UCode = False;
2535 needs->syscall_wrapper = True;
2536 needs->alternative_free = True;
2537 needs->sanity_checks = True;
njn25e49d8e72002-09-23 09:36:25 +00002538
njn25e49d8e72002-09-23 09:36:25 +00002539 track->new_mem_startup = & addrcheck_new_mem_startup;
2540 track->new_mem_heap = & addrcheck_new_mem_heap;
2541 track->new_mem_stack = & SK_(make_accessible);
2542 track->new_mem_stack_aligned = & make_writable_aligned;
2543 track->new_mem_stack_signal = & SK_(make_accessible);
2544 track->new_mem_brk = & SK_(make_accessible);
2545 track->new_mem_mmap = & addrcheck_set_perms;
2546
2547 track->copy_mem_heap = & copy_address_range_state;
2548 track->copy_mem_remap = & copy_address_range_state;
2549 track->change_mem_mprotect = & addrcheck_set_perms;
2550
2551 track->ban_mem_heap = & SK_(make_noaccess);
2552 track->ban_mem_stack = & SK_(make_noaccess);
2553
2554 track->die_mem_heap = & SK_(make_noaccess);
2555 track->die_mem_stack = & SK_(make_noaccess);
2556 track->die_mem_stack_aligned = & make_noaccess_aligned;
2557 track->die_mem_stack_signal = & SK_(make_noaccess);
2558 track->die_mem_brk = & SK_(make_noaccess);
2559 track->die_mem_munmap = & SK_(make_noaccess);
2560
2561 track->bad_free = & SK_(record_free_error);
2562 track->mismatched_free = & SK_(record_freemismatch_error);
2563
2564 track->pre_mem_read = & check_is_readable;
2565 track->pre_mem_read_asciiz = & check_is_readable_asciiz;
2566 track->pre_mem_write = & check_is_writable;
2567 track->post_mem_write = & SK_(make_accessible);
2568
njnd04b7c62002-10-03 14:05:52 +00002569 VG_(register_compact_helper)((Addr) & SK_(helperc_ACCESS4));
2570 VG_(register_compact_helper)((Addr) & SK_(helperc_ACCESS2));
2571 VG_(register_compact_helper)((Addr) & SK_(helperc_ACCESS1));
2572 VG_(register_compact_helper)((Addr) & SK_(fpu_ACCESS_check));
njn25e49d8e72002-09-23 09:36:25 +00002573
2574 VGP_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
2575 VGP_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
njnd04b7c62002-10-03 14:05:52 +00002576
2577 init_shadow_memory();
2578 init_prof_mem();
njn25e49d8e72002-09-23 09:36:25 +00002579}
2580
2581/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00002582/*--- end ac_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00002583/*--------------------------------------------------------------------*/