blob: e2e8e3f8b55ae60ed7d66ff1c122c421bd64c3b0 [file] [log] [blame]
njn25e49d8e72002-09-23 09:36:25 +00001
2/*--------------------------------------------------------------------*/
3/*--- The AddrCheck skin: like MemCheck, but only does address ---*/
4/*--- checking. No definedness checking. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- ac_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00006/*--------------------------------------------------------------------*/
7
8/*
njnc9539842002-10-02 13:26:35 +00009 This file is part of AddrCheck, a lightweight Valgrind skin for
10 detecting memory errors.
njn25e49d8e72002-09-23 09:36:25 +000011
12 Copyright (C) 2000-2002 Julian Seward
13 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
njn25cac76cb2002-09-23 11:21:57 +000033#include "ac_include.h"
njn25e49d8e72002-09-23 09:36:25 +000034//#include "vg_profile.c"
35
36/*------------------------------------------------------------*/
37/*--- Defns ---*/
38/*------------------------------------------------------------*/
39
40/* These many bytes below %ESP are considered addressible if we're
41 doing the --workaround-gcc296-bugs hack. */
42#define VG_GCC296_BUG_STACK_SLOP 1024
43
44
45typedef
46 enum {
47 /* Bad syscall params */
48 ParamSupp,
49 /* Memory errors in core (pthread ops, signal handling) */
50 CoreMemSupp,
51 /* Invalid read/write attempt at given size */
52 Addr1Supp, Addr2Supp, Addr4Supp, Addr8Supp,
53 /* Invalid or mismatching free */
54 FreeSupp
55 }
56 AddrCheckSuppKind;
57
58/* What kind of error it is. */
59typedef
60 enum { CoreMemErr,
61 AddrErr,
62 ParamErr, UserErr, /* behaves like an anonymous ParamErr */
63 FreeErr, FreeMismatchErr
64 }
65 AddrCheckErrorKind;
66
67/* What kind of memory access is involved in the error? */
68typedef
69 enum { ReadAxs, WriteAxs, ExecAxs }
70 AxsKind;
71
72/* Extra context for memory errors */
73typedef
74 struct {
75 /* AddrErr */
76 AxsKind axskind;
77 /* AddrErr */
78 Int size;
79 /* AddrErr, FreeErr, FreeMismatchErr, ParamErr, UserErr */
80 AcAddrInfo addrinfo;
81 /* ParamErr, UserErr, CoreMemErr */
82 Bool isWrite;
83 }
84 AddrCheckError;
85
86/*------------------------------------------------------------*/
87/*--- Comparing and printing errors ---*/
88/*------------------------------------------------------------*/
89
90static __inline__
91void clear_AcAddrInfo ( AcAddrInfo* ai )
92{
93 ai->akind = Unknown;
94 ai->blksize = 0;
95 ai->rwoffset = 0;
96 ai->lastchange = NULL;
97 ai->stack_tid = VG_INVALID_THREADID;
98 ai->maybe_gcc = False;
99}
100
101static __inline__
102void clear_AddrCheckError ( AddrCheckError* err_extra )
103{
104 err_extra->axskind = ReadAxs;
105 err_extra->size = 0;
106 clear_AcAddrInfo ( &err_extra->addrinfo );
107 err_extra->isWrite = False;
108}
109
110__attribute__((unused))
111static Bool eq_AcAddrInfo ( VgRes res, AcAddrInfo* ai1, AcAddrInfo* ai2 )
112{
113 if (ai1->akind != Undescribed
114 && ai2->akind != Undescribed
115 && ai1->akind != ai2->akind)
116 return False;
117 if (ai1->akind == Freed || ai1->akind == Mallocd) {
118 if (ai1->blksize != ai2->blksize)
119 return False;
120 if (!VG_(eq_ExeContext)(res, ai1->lastchange, ai2->lastchange))
121 return False;
122 }
123 return True;
124}
125
126/* Compare error contexts, to detect duplicates. Note that if they
127 are otherwise the same, the faulting addrs and associated rwoffsets
128 are allowed to be different. */
129
130Bool SK_(eq_SkinError) ( VgRes res,
131 SkinError* e1, SkinError* e2 )
132{
133 AddrCheckError* e1_extra = e1->extra;
134 AddrCheckError* e2_extra = e2->extra;
135
136 switch (e1->ekind) {
137 case CoreMemErr:
138 if (e1_extra->isWrite != e2_extra->isWrite) return False;
139 if (e2->ekind != CoreMemErr) return False;
140 if (e1->string == e2->string) return True;
141 if (0 == VG_(strcmp)(e1->string, e2->string)) return True;
142 return False;
143
144 case UserErr:
145 case ParamErr:
146 if (e1_extra->isWrite != e2_extra->isWrite)
147 return False;
148 if (e1->ekind == ParamErr
149 && 0 != VG_(strcmp)(e1->string, e2->string))
150 return False;
151 return True;
152
153 case FreeErr:
154 case FreeMismatchErr:
155 /* JRS 2002-Aug-26: comparing addrs seems overkill and can
156 cause excessive duplication of errors. Not even AddrErr
157 below does that. So don't compare either the .addr field
158 or the .addrinfo fields. */
159 /* if (e1->addr != e2->addr) return False; */
160 /* if (!eq_AcAddrInfo(res, &e1_extra->addrinfo, &e2_extra->addrinfo))
161 return False;
162 */
163 return True;
164
165 case AddrErr:
166 /* if (e1_extra->axskind != e2_extra->axskind) return False; */
167 if (e1_extra->size != e2_extra->size) return False;
168 /*
169 if (!eq_AcAddrInfo(res, &e1_extra->addrinfo, &e2_extra->addrinfo))
170 return False;
171 */
172 return True;
173
174 default:
175 VG_(printf)("Error:\n unknown AddrCheck error code %d\n", e1->ekind);
njne427a662002-10-02 11:08:25 +0000176 VG_(skin_panic)("unknown error code in SK_(eq_SkinError)");
njn25e49d8e72002-09-23 09:36:25 +0000177 }
178}
179
180static void pp_AcAddrInfo ( Addr a, AcAddrInfo* ai )
181{
182 switch (ai->akind) {
183 case Stack:
184 VG_(message)(Vg_UserMsg,
185 " Address 0x%x is on thread %d's stack",
186 a, ai->stack_tid);
187 break;
188 case Unknown:
189 if (ai->maybe_gcc) {
190 VG_(message)(Vg_UserMsg,
191 " Address 0x%x is just below %%esp. Possibly a bug in GCC/G++",
192 a);
193 VG_(message)(Vg_UserMsg,
194 " v 2.96 or 3.0.X. To suppress, use: --workaround-gcc296-bugs=yes");
195 } else {
196 VG_(message)(Vg_UserMsg,
197 " Address 0x%x is not stack'd, malloc'd or free'd", a);
198 }
199 break;
200 case Freed: case Mallocd: {
201 UInt delta;
202 UChar* relative;
203 if (ai->rwoffset < 0) {
204 delta = (UInt)(- ai->rwoffset);
205 relative = "before";
206 } else if (ai->rwoffset >= ai->blksize) {
207 delta = ai->rwoffset - ai->blksize;
208 relative = "after";
209 } else {
210 delta = ai->rwoffset;
211 relative = "inside";
212 }
213 {
214 VG_(message)(Vg_UserMsg,
215 " Address 0x%x is %d bytes %s a block of size %d %s",
216 a, delta, relative,
217 ai->blksize,
218 ai->akind==Mallocd ? "alloc'd"
219 : ai->akind==Freed ? "free'd"
220 : "client-defined");
221 }
222 VG_(pp_ExeContext)(ai->lastchange);
223 break;
224 }
225 default:
njne427a662002-10-02 11:08:25 +0000226 VG_(skin_panic)("pp_AcAddrInfo");
njn25e49d8e72002-09-23 09:36:25 +0000227 }
228}
229
230void SK_(pp_SkinError) ( SkinError* err, void (*pp_ExeContext)(void) )
231{
232 AddrCheckError* err_extra = err->extra;
233
234 switch (err->ekind) {
235 case CoreMemErr:
236 if (err_extra->isWrite) {
237 VG_(message)(Vg_UserMsg,
238 "%s contains unaddressable byte(s)", err->string );
239 } else {
240 VG_(message)(Vg_UserMsg,
241 "%s contains unaddressable byte(s)", err->string );
242 }
243 pp_ExeContext();
244 break;
245
246 case AddrErr:
247 switch (err_extra->axskind) {
248 case ReadAxs:
249 case WriteAxs:
250 /* These two aren't actually differentiated ever. */
251 VG_(message)(Vg_UserMsg, "Invalid memory access of size %d",
252 err_extra->size );
253 break;
254 case ExecAxs:
255 VG_(message)(Vg_UserMsg, "Jump to the invalid address "
256 "stated on the next line");
257 break;
258 default:
njne427a662002-10-02 11:08:25 +0000259 VG_(skin_panic)("pp_SkinError(axskind)");
njn25e49d8e72002-09-23 09:36:25 +0000260 }
261 pp_ExeContext();
262 pp_AcAddrInfo(err->addr, &err_extra->addrinfo);
263 break;
264
265 case FreeErr:
266 VG_(message)(Vg_UserMsg,"Invalid free() / delete / delete[]");
267 /* fall through */
268 case FreeMismatchErr:
269 if (err->ekind == FreeMismatchErr)
270 VG_(message)(Vg_UserMsg,
271 "Mismatched free() / delete / delete []");
272 pp_ExeContext();
273 pp_AcAddrInfo(err->addr, &err_extra->addrinfo);
274 break;
275
276 case ParamErr:
277 if (err_extra->isWrite) {
278 VG_(message)(Vg_UserMsg,
279 "Syscall param %s contains unaddressable byte(s)",
280 err->string );
281 } else {
282 VG_(message)(Vg_UserMsg,
283 "Syscall param %s contains uninitialised or "
284 "unaddressable byte(s)",
285 err->string);
286 }
287 pp_ExeContext();
288 pp_AcAddrInfo(err->addr, &err_extra->addrinfo);
289 break;
290
291 case UserErr:
292 if (err_extra->isWrite) {
293 VG_(message)(Vg_UserMsg,
294 "Unaddressable byte(s) found during client check request");
295 } else {
296 VG_(message)(Vg_UserMsg,
297 "Uninitialised or "
298 "unaddressable byte(s) found during client check request");
299 }
300 pp_ExeContext();
301 pp_AcAddrInfo(err->addr, &err_extra->addrinfo);
302 break;
303
304 default:
305 VG_(printf)("Error:\n unknown AddrCheck error code %d\n", err->ekind);
njne427a662002-10-02 11:08:25 +0000306 VG_(skin_panic)("unknown error code in SK_(pp_SkinError)");
njn25e49d8e72002-09-23 09:36:25 +0000307 }
308}
309
310/*------------------------------------------------------------*/
311/*--- Recording errors ---*/
312/*------------------------------------------------------------*/
313
314/* Describe an address as best you can, for error messages,
315 putting the result in ai. */
316
317static void describe_addr ( Addr a, AcAddrInfo* ai )
318{
319 ShadowChunk* sc;
320 ThreadId tid;
321
322 /* Nested functions, yeah. Need the lexical scoping of 'a'. */
323
324 /* Closure for searching thread stacks */
325 Bool addr_is_in_bounds(Addr stack_min, Addr stack_max)
326 {
327 return (stack_min <= a && a <= stack_max);
328 }
329 /* Closure for searching malloc'd and free'd lists */
330 Bool addr_is_in_block(ShadowChunk *sh_ch)
331 {
332 return VG_(addr_is_in_block) ( a, sh_ch->data, sh_ch->size );
333 }
334 /* Perhaps it's on a thread's stack? */
335 tid = VG_(any_matching_thread_stack)(addr_is_in_bounds);
336 if (tid != VG_INVALID_THREADID) {
337 ai->akind = Stack;
338 ai->stack_tid = tid;
339 return;
340 }
341 /* Search for a recently freed block which might bracket it. */
342 sc = SK_(any_matching_freed_ShadowChunks)(addr_is_in_block);
343 if (NULL != sc) {
344 ai->akind = Freed;
345 ai->blksize = sc->size;
346 ai->rwoffset = (Int)(a) - (Int)(sc->data);
347 ai->lastchange = (ExeContext*)sc->skin_extra[0];
348 return;
349 }
350 /* Search for a currently malloc'd block which might bracket it. */
351 sc = VG_(any_matching_mallocd_ShadowChunks)(addr_is_in_block);
352 if (NULL != sc) {
353 ai->akind = Mallocd;
354 ai->blksize = sc->size;
355 ai->rwoffset = (Int)(a) - (Int)(sc->data);
356 ai->lastchange = (ExeContext*)sc->skin_extra[0];
357 return;
358 }
359 /* Clueless ... */
360 ai->akind = Unknown;
361 return;
362}
363
364
365/* Creates a copy of the err_extra, updates the copy with address info if
366 necessary, sticks the copy into the SkinError. */
367void SK_(dup_extra_and_update)(SkinError* err)
368{
369 AddrCheckError* err_extra;
370
371 err_extra = VG_(malloc)(sizeof(AddrCheckError));
372 *err_extra = *((AddrCheckError*)err->extra);
373
374 if (err_extra->addrinfo.akind == Undescribed)
375 describe_addr ( err->addr, &(err_extra->addrinfo) );
376
377 err->extra = err_extra;
378}
379
380/* Is this address within some small distance below %ESP? Used only
381 for the --workaround-gcc296-bugs kludge. */
382Bool VG_(is_just_below_ESP)( Addr esp, Addr aa )
383{
384 if ((UInt)esp > (UInt)aa
385 && ((UInt)esp - (UInt)aa) <= VG_GCC296_BUG_STACK_SLOP)
386 return True;
387 else
388 return False;
389}
390
391static
392void sk_record_address_error ( Addr a, Int size, Bool isWrite )
393{
394 AddrCheckError err_extra;
395 Bool just_below_esp;
396
397 just_below_esp
398 = VG_(is_just_below_ESP)( VG_(get_stack_pointer)(), a );
399
400 /* If this is caused by an access immediately below %ESP, and the
401 user asks nicely, we just ignore it. */
402 if (SK_(clo_workaround_gcc296_bugs) && just_below_esp)
403 return;
404
405 clear_AddrCheckError( &err_extra );
406 err_extra.axskind = isWrite ? WriteAxs : ReadAxs;
407 err_extra.size = size;
408 err_extra.addrinfo.akind = Undescribed;
409 err_extra.addrinfo.maybe_gcc = just_below_esp;
410 VG_(maybe_record_error)( NULL, AddrErr, a, /*s*/NULL, &err_extra );
411}
412
413/* These ones are called from non-generated code */
414
415/* This is for memory errors in pthread functions, as opposed to pthread API
416 errors which are found by the core. */
417void SK_(record_core_mem_error) ( ThreadState* tst, Bool isWrite, Char* msg )
418{
419 AddrCheckError err_extra;
420
421 clear_AddrCheckError( &err_extra );
422 err_extra.isWrite = isWrite;
423 VG_(maybe_record_error)( tst, CoreMemErr, /*addr*/0, msg, &err_extra );
424}
425
426void SK_(record_param_error) ( ThreadState* tst, Addr a, Bool isWrite,
427 Char* msg )
428{
429 AddrCheckError err_extra;
430
njne427a662002-10-02 11:08:25 +0000431 sk_assert(NULL != tst);
njn25e49d8e72002-09-23 09:36:25 +0000432 clear_AddrCheckError( &err_extra );
433 err_extra.addrinfo.akind = Undescribed;
434 err_extra.isWrite = isWrite;
435 VG_(maybe_record_error)( tst, ParamErr, a, msg, &err_extra );
436}
437
438void SK_(record_jump_error) ( ThreadState* tst, Addr a )
439{
440 AddrCheckError err_extra;
441
njne427a662002-10-02 11:08:25 +0000442 sk_assert(NULL != tst);
njn25e49d8e72002-09-23 09:36:25 +0000443
444 clear_AddrCheckError( &err_extra );
445 err_extra.axskind = ExecAxs;
446 err_extra.addrinfo.akind = Undescribed;
447 VG_(maybe_record_error)( tst, AddrErr, a, /*s*/NULL, &err_extra );
448}
449
450void SK_(record_free_error) ( ThreadState* tst, Addr a )
451{
452 AddrCheckError err_extra;
453
njne427a662002-10-02 11:08:25 +0000454 sk_assert(NULL != tst);
njn25e49d8e72002-09-23 09:36:25 +0000455
456 clear_AddrCheckError( &err_extra );
457 err_extra.addrinfo.akind = Undescribed;
458 VG_(maybe_record_error)( tst, FreeErr, a, /*s*/NULL, &err_extra );
459}
460
461void SK_(record_freemismatch_error) ( ThreadState* tst, Addr a )
462{
463 AddrCheckError err_extra;
464
njne427a662002-10-02 11:08:25 +0000465 sk_assert(NULL != tst);
njn25e49d8e72002-09-23 09:36:25 +0000466
467 clear_AddrCheckError( &err_extra );
468 err_extra.addrinfo.akind = Undescribed;
469 VG_(maybe_record_error)( tst, FreeMismatchErr, a, /*s*/NULL, &err_extra );
470}
471
472void SK_(record_user_error) ( ThreadState* tst, Addr a, Bool isWrite )
473{
474 AddrCheckError err_extra;
475
njne427a662002-10-02 11:08:25 +0000476 sk_assert(NULL != tst);
njn25e49d8e72002-09-23 09:36:25 +0000477
478 clear_AddrCheckError( &err_extra );
479 err_extra.addrinfo.akind = Undescribed;
480 err_extra.isWrite = isWrite;
481 VG_(maybe_record_error)( tst, UserErr, a, /*s*/NULL, &err_extra );
482}
483
484
485/*------------------------------------------------------------*/
486/*--- Suppressions ---*/
487/*------------------------------------------------------------*/
488
489#define STREQ(s1,s2) (s1 != NULL && s2 != NULL \
490 && VG_(strcmp)((s1),(s2))==0)
491
492Bool SK_(recognised_suppression) ( Char* name, SuppKind *skind )
493{
494 if (STREQ(name, "Param")) *skind = ParamSupp;
495 else if (STREQ(name, "CoreMem")) *skind = CoreMemSupp;
496 else if (STREQ(name, "Addr1")) *skind = Addr1Supp;
497 else if (STREQ(name, "Addr2")) *skind = Addr2Supp;
498 else if (STREQ(name, "Addr4")) *skind = Addr4Supp;
499 else if (STREQ(name, "Addr8")) *skind = Addr8Supp;
500 else if (STREQ(name, "Free")) *skind = FreeSupp;
501 else
502 return False;
503
504 return True;
505}
506
507Bool SK_(read_extra_suppression_info) ( Int fd, Char* buf, Int nBuf,
508 SkinSupp *s )
509{
510 Bool eof;
511
512 if (s->skind == ParamSupp) {
njn4ba5a792002-09-30 10:23:54 +0000513 eof = VG_(get_line) ( fd, buf, nBuf );
njn25e49d8e72002-09-23 09:36:25 +0000514 if (eof) return False;
515 s->string = VG_(strdup)(buf);
516 }
517 return True;
518}
519
520extern Bool SK_(error_matches_suppression)(SkinError* err, SkinSupp* su)
521{
522 UInt su_size;
523 AddrCheckError* err_extra = err->extra;
524
525 switch (su->skind) {
526 case ParamSupp:
527 return (err->ekind == ParamErr && STREQ(su->string, err->string));
528
529 case CoreMemSupp:
530 return (err->ekind == CoreMemErr && STREQ(su->string, err->string));
531
532 case Addr1Supp: su_size = 1; goto addr_case;
533 case Addr2Supp: su_size = 2; goto addr_case;
534 case Addr4Supp: su_size = 4; goto addr_case;
535 case Addr8Supp: su_size = 8; goto addr_case;
536 addr_case:
537 return (err->ekind == AddrErr && err_extra->size != su_size);
538
539 case FreeSupp:
540 return (err->ekind == FreeErr || err->ekind == FreeMismatchErr);
541
542 default:
543 VG_(printf)("Error:\n"
544 " unknown AddrCheck suppression type %d\n", su->skind);
njne427a662002-10-02 11:08:25 +0000545 VG_(skin_panic)("unknown suppression type in "
546 "SK_(error_matches_suppression)");
njn25e49d8e72002-09-23 09:36:25 +0000547 }
548}
549
550# undef STREQ
551
552
553/*--------------------------------------------------------------------*/
554/*--- Part of the AddrCheck skin: Maintain bitmaps of memory, ---*/
555/*--- tracking the accessibility (A) each byte. ---*/
556/*--------------------------------------------------------------------*/
557
558#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
559
560/*------------------------------------------------------------*/
561/*--- Command line options ---*/
562/*------------------------------------------------------------*/
563
564Bool SK_(clo_partial_loads_ok) = True;
565Int SK_(clo_freelist_vol) = 1000000;
566Bool SK_(clo_leak_check) = False;
567VgRes SK_(clo_leak_resolution) = Vg_LowRes;
568Bool SK_(clo_show_reachable) = False;
569Bool SK_(clo_workaround_gcc296_bugs) = False;
570Bool SK_(clo_cleanup) = True;
571
572/*------------------------------------------------------------*/
573/*--- Profiling events ---*/
574/*------------------------------------------------------------*/
575
576typedef
577 enum {
578 VgpCheckMem = VgpFini+1,
579 VgpSetMem
580 }
581 VgpSkinCC;
582
583/*------------------------------------------------------------*/
584/*--- Low-level support for memory checking. ---*/
585/*------------------------------------------------------------*/
586
587/* All reads and writes are checked against a memory map, which
588 records the state of all memory in the process. The memory map is
589 organised like this:
590
591 The top 16 bits of an address are used to index into a top-level
592 map table, containing 65536 entries. Each entry is a pointer to a
593 second-level map, which records the accesibililty and validity
594 permissions for the 65536 bytes indexed by the lower 16 bits of the
595 address. Each byte is represented by one bit, indicating
596 accessibility. So each second-level map contains 8192 bytes. This
597 two-level arrangement conveniently divides the 4G address space
598 into 64k lumps, each size 64k bytes.
599
600 All entries in the primary (top-level) map must point to a valid
601 secondary (second-level) map. Since most of the 4G of address
602 space will not be in use -- ie, not mapped at all -- there is a
603 distinguished secondary map, which indicates `not addressible and
604 not valid' writeable for all bytes. Entries in the primary map for
605 which the entire 64k is not in use at all point at this
606 distinguished map.
607
608 [...] lots of stuff deleted due to out of date-ness
609
610 As a final optimisation, the alignment and address checks for
611 4-byte loads and stores are combined in a neat way. The primary
612 map is extended to have 262144 entries (2^18), rather than 2^16.
613 The top 3/4 of these entries are permanently set to the
614 distinguished secondary map. For a 4-byte load/store, the
615 top-level map is indexed not with (addr >> 16) but instead f(addr),
616 where
617
618 f( XXXX XXXX XXXX XXXX ____ ____ ____ __YZ )
619 = ____ ____ ____ __YZ XXXX XXXX XXXX XXXX or
620 = ____ ____ ____ __ZY XXXX XXXX XXXX XXXX
621
622 ie the lowest two bits are placed above the 16 high address bits.
623 If either of these two bits are nonzero, the address is misaligned;
624 this will select a secondary map from the upper 3/4 of the primary
625 map. Because this is always the distinguished secondary map, a
626 (bogus) address check failure will result. The failure handling
627 code can then figure out whether this is a genuine addr check
628 failure or whether it is a possibly-legitimate access at a
629 misaligned address. */
630
631
632/*------------------------------------------------------------*/
633/*--- Crude profiling machinery. ---*/
634/*------------------------------------------------------------*/
635
636#ifdef VG_PROFILE_MEMORY
637
638#define N_PROF_EVENTS 150
639
640static UInt event_ctr[N_PROF_EVENTS];
641
642static void init_prof_mem ( void )
643{
644 Int i;
645 for (i = 0; i < N_PROF_EVENTS; i++)
646 event_ctr[i] = 0;
647}
648
649static void done_prof_mem ( void )
650{
651 Int i;
652 for (i = 0; i < N_PROF_EVENTS; i++) {
653 if ((i % 10) == 0)
654 VG_(printf)("\n");
655 if (event_ctr[i] > 0)
656 VG_(printf)( "prof mem event %2d: %d\n", i, event_ctr[i] );
657 }
658 VG_(printf)("\n");
659}
660
661#define PROF_EVENT(ev) \
njne427a662002-10-02 11:08:25 +0000662 do { sk_assert((ev) >= 0 && (ev) < N_PROF_EVENTS); \
njn25e49d8e72002-09-23 09:36:25 +0000663 event_ctr[ev]++; \
664 } while (False);
665
666#else
667
668static void init_prof_mem ( void ) { }
669static void done_prof_mem ( void ) { }
670
671#define PROF_EVENT(ev) /* */
672
673#endif
674
675/* Event index. If just the name of the fn is given, this means the
676 number of calls to the fn. Otherwise it is the specified event.
677
678 10 alloc_secondary_map
679
680 20 get_abit
681 21 get_vbyte
682 22 set_abit
683 23 set_vbyte
684 24 get_abits4_ALIGNED
685 25 get_vbytes4_ALIGNED
686
687 30 set_address_range_perms
688 31 set_address_range_perms(lower byte loop)
689 32 set_address_range_perms(quadword loop)
690 33 set_address_range_perms(upper byte loop)
691
692 35 make_noaccess
693 36 make_writable
694 37 make_readable
695
696 40 copy_address_range_state
697 41 copy_address_range_state(byte loop)
698 42 check_writable
699 43 check_writable(byte loop)
700 44 check_readable
701 45 check_readable(byte loop)
702 46 check_readable_asciiz
703 47 check_readable_asciiz(byte loop)
704
705 50 make_aligned_word_NOACCESS
706 51 make_aligned_word_WRITABLE
707
708 60 helperc_LOADV4
709 61 helperc_STOREV4
710 62 helperc_LOADV2
711 63 helperc_STOREV2
712 64 helperc_LOADV1
713 65 helperc_STOREV1
714
715 70 rim_rd_V4_SLOWLY
716 71 rim_wr_V4_SLOWLY
717 72 rim_rd_V2_SLOWLY
718 73 rim_wr_V2_SLOWLY
719 74 rim_rd_V1_SLOWLY
720 75 rim_wr_V1_SLOWLY
721
722 80 fpu_read
723 81 fpu_read aligned 4
724 82 fpu_read aligned 8
725 83 fpu_read 2
726 84 fpu_read 10
727
728 85 fpu_write
729 86 fpu_write aligned 4
730 87 fpu_write aligned 8
731 88 fpu_write 2
732 89 fpu_write 10
733
734 90 fpu_read_check_SLOWLY
735 91 fpu_read_check_SLOWLY(byte loop)
736 92 fpu_write_check_SLOWLY
737 93 fpu_write_check_SLOWLY(byte loop)
738
739 100 is_plausible_stack_addr
740 101 handle_esp_assignment
741 102 handle_esp_assignment(-4)
742 103 handle_esp_assignment(+4)
743 104 handle_esp_assignment(-12)
744 105 handle_esp_assignment(-8)
745 106 handle_esp_assignment(+16)
746 107 handle_esp_assignment(+12)
747 108 handle_esp_assignment(0)
748 109 handle_esp_assignment(+8)
749 110 handle_esp_assignment(-16)
750 111 handle_esp_assignment(+20)
751 112 handle_esp_assignment(-20)
752 113 handle_esp_assignment(+24)
753 114 handle_esp_assignment(-24)
754
755 120 vg_handle_esp_assignment_SLOWLY
756 121 vg_handle_esp_assignment_SLOWLY(normal; move down)
757 122 vg_handle_esp_assignment_SLOWLY(normal; move up)
758 123 vg_handle_esp_assignment_SLOWLY(normal)
759 124 vg_handle_esp_assignment_SLOWLY(>= HUGE_DELTA)
760*/
761
762/*------------------------------------------------------------*/
763/*--- Function declarations. ---*/
764/*------------------------------------------------------------*/
765
766static void vgmext_ACCESS4_SLOWLY ( Addr a );
767static void vgmext_ACCESS2_SLOWLY ( Addr a );
768static void vgmext_ACCESS1_SLOWLY ( Addr a );
769static void fpu_ACCESS_check_SLOWLY ( Addr addr, Int size );
770
771/*------------------------------------------------------------*/
772/*--- Data defns. ---*/
773/*------------------------------------------------------------*/
774
775typedef
776 struct {
777 UChar abits[8192];
778 }
779 AcSecMap;
780
781static AcSecMap* primary_map[ /*65536*/ 262144 ];
782static AcSecMap distinguished_secondary_map;
783
784#define IS_DISTINGUISHED_SM(smap) \
785 ((smap) == &distinguished_secondary_map)
786
787#define ENSURE_MAPPABLE(addr,caller) \
788 do { \
789 if (IS_DISTINGUISHED_SM(primary_map[(addr) >> 16])) { \
790 primary_map[(addr) >> 16] = alloc_secondary_map(caller); \
791 /* VG_(printf)("new 2map because of %p\n", addr); */ \
792 } \
793 } while(0)
794
795#define BITARR_SET(aaa_p,iii_p) \
796 do { \
797 UInt iii = (UInt)iii_p; \
798 UChar* aaa = (UChar*)aaa_p; \
799 aaa[iii >> 3] |= (1 << (iii & 7)); \
800 } while (0)
801
802#define BITARR_CLEAR(aaa_p,iii_p) \
803 do { \
804 UInt iii = (UInt)iii_p; \
805 UChar* aaa = (UChar*)aaa_p; \
806 aaa[iii >> 3] &= ~(1 << (iii & 7)); \
807 } while (0)
808
809#define BITARR_TEST(aaa_p,iii_p) \
810 (0 != (((UChar*)aaa_p)[ ((UInt)iii_p) >> 3 ] \
811 & (1 << (((UInt)iii_p) & 7)))) \
812
813
814#define VGM_BIT_VALID 0
815#define VGM_BIT_INVALID 1
816
817#define VGM_NIBBLE_VALID 0
818#define VGM_NIBBLE_INVALID 0xF
819
820#define VGM_BYTE_VALID 0
821#define VGM_BYTE_INVALID 0xFF
822
823#define VGM_WORD_VALID 0
824#define VGM_WORD_INVALID 0xFFFFFFFF
825
826#define VGM_EFLAGS_VALID 0xFFFFFFFE
827#define VGM_EFLAGS_INVALID 0xFFFFFFFF /* not used */
828
829
830static void init_shadow_memory ( void )
831{
832 Int i;
833
834 for (i = 0; i < 8192; i++) /* Invalid address */
835 distinguished_secondary_map.abits[i] = VGM_BYTE_INVALID;
836
837 /* These entries gradually get overwritten as the used address
838 space expands. */
839 for (i = 0; i < 65536; i++)
840 primary_map[i] = &distinguished_secondary_map;
841
842 /* These ones should never change; it's a bug in Valgrind if they do. */
843 for (i = 65536; i < 262144; i++)
844 primary_map[i] = &distinguished_secondary_map;
845}
846
847void SK_(post_clo_init) ( void )
848{
849}
850
851void SK_(fini) ( void )
852{
853 VG_(print_malloc_stats)();
854
855 if (VG_(clo_verbosity) == 1) {
856 if (!SK_(clo_leak_check))
857 VG_(message)(Vg_UserMsg,
858 "For a detailed leak analysis, rerun with: --leak-check=yes");
859
860 VG_(message)(Vg_UserMsg,
861 "For counts of detected errors, rerun with: -v");
862 }
863 if (SK_(clo_leak_check)) SK_(detect_memory_leaks)();
864
865 done_prof_mem();
866}
867
868/*------------------------------------------------------------*/
869/*--- Basic bitmap management, reading and writing. ---*/
870/*------------------------------------------------------------*/
871
872/* Allocate and initialise a secondary map. */
873
874static AcSecMap* alloc_secondary_map ( __attribute__ ((unused))
875 Char* caller )
876{
877 AcSecMap* map;
878 UInt i;
879 PROF_EVENT(10);
880
881 /* Mark all bytes as invalid access and invalid value. */
882
883 /* It just happens that a AcSecMap occupies exactly 18 pages --
884 although this isn't important, so the following assert is
885 spurious. */
njne427a662002-10-02 11:08:25 +0000886 sk_assert(0 == (sizeof(AcSecMap) % VKI_BYTES_PER_PAGE));
njn25e49d8e72002-09-23 09:36:25 +0000887 map = VG_(get_memory_from_mmap)( sizeof(AcSecMap), caller );
888
889 for (i = 0; i < 8192; i++)
890 map->abits[i] = VGM_BYTE_INVALID; /* Invalid address */
891
892 /* VG_(printf)("ALLOC_2MAP(%s)\n", caller ); */
893 return map;
894}
895
896
897/* Basic reading/writing of the bitmaps, for byte-sized accesses. */
898
899static __inline__ UChar get_abit ( Addr a )
900{
901 AcSecMap* sm = primary_map[a >> 16];
902 UInt sm_off = a & 0xFFFF;
903 PROF_EVENT(20);
904# if 0
905 if (IS_DISTINGUISHED_SM(sm))
906 VG_(message)(Vg_DebugMsg,
907 "accessed distinguished 2ndary (A)map! 0x%x\n", a);
908# endif
909 return BITARR_TEST(sm->abits, sm_off)
910 ? VGM_BIT_INVALID : VGM_BIT_VALID;
911}
912
913static __inline__ void set_abit ( Addr a, UChar abit )
914{
915 AcSecMap* sm;
916 UInt sm_off;
917 PROF_EVENT(22);
918 ENSURE_MAPPABLE(a, "set_abit");
919 sm = primary_map[a >> 16];
920 sm_off = a & 0xFFFF;
921 if (abit)
922 BITARR_SET(sm->abits, sm_off);
923 else
924 BITARR_CLEAR(sm->abits, sm_off);
925}
926
927
928/* Reading/writing of the bitmaps, for aligned word-sized accesses. */
929
930static __inline__ UChar get_abits4_ALIGNED ( Addr a )
931{
932 AcSecMap* sm;
933 UInt sm_off;
934 UChar abits8;
935 PROF_EVENT(24);
936# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +0000937 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000938# endif
939 sm = primary_map[a >> 16];
940 sm_off = a & 0xFFFF;
941 abits8 = sm->abits[sm_off >> 3];
942 abits8 >>= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
943 abits8 &= 0x0F;
944 return abits8;
945}
946
947
948
949/*------------------------------------------------------------*/
950/*--- Setting permissions over address ranges. ---*/
951/*------------------------------------------------------------*/
952
953static void set_address_range_perms ( Addr a, UInt len,
954 UInt example_a_bit )
955{
956 UChar abyte8;
957 UInt sm_off;
958 AcSecMap* sm;
959
960 PROF_EVENT(30);
961
962 if (len == 0)
963 return;
964
965 if (len > 100 * 1000 * 1000) {
966 VG_(message)(Vg_UserMsg,
967 "Warning: set address range perms: "
968 "large range %u, a %d",
969 len, example_a_bit );
970 }
971
972 VGP_PUSHCC(VgpSetMem);
973
974 /* Requests to change permissions of huge address ranges may
975 indicate bugs in our machinery. 30,000,000 is arbitrary, but so
976 far all legitimate requests have fallen beneath that size. */
977 /* 4 Mar 02: this is just stupid; get rid of it. */
njne427a662002-10-02 11:08:25 +0000978 /* sk_assert(len < 30000000); */
njn25e49d8e72002-09-23 09:36:25 +0000979
980 /* Check the permissions make sense. */
njne427a662002-10-02 11:08:25 +0000981 sk_assert(example_a_bit == VGM_BIT_VALID
njn25e49d8e72002-09-23 09:36:25 +0000982 || example_a_bit == VGM_BIT_INVALID);
983
984 /* In order that we can charge through the address space at 8
985 bytes/main-loop iteration, make up some perms. */
986 abyte8 = (example_a_bit << 7)
987 | (example_a_bit << 6)
988 | (example_a_bit << 5)
989 | (example_a_bit << 4)
990 | (example_a_bit << 3)
991 | (example_a_bit << 2)
992 | (example_a_bit << 1)
993 | (example_a_bit << 0);
994
995# ifdef VG_DEBUG_MEMORY
996 /* Do it ... */
997 while (True) {
998 PROF_EVENT(31);
999 if (len == 0) break;
1000 set_abit ( a, example_a_bit );
1001 set_vbyte ( a, vbyte );
1002 a++;
1003 len--;
1004 }
1005
1006# else
1007 /* Slowly do parts preceding 8-byte alignment. */
1008 while (True) {
1009 PROF_EVENT(31);
1010 if (len == 0) break;
1011 if ((a % 8) == 0) break;
1012 set_abit ( a, example_a_bit );
1013 a++;
1014 len--;
1015 }
1016
1017 if (len == 0) {
1018 VGP_POPCC(VgpSetMem);
1019 return;
1020 }
njne427a662002-10-02 11:08:25 +00001021 sk_assert((a % 8) == 0 && len > 0);
njn25e49d8e72002-09-23 09:36:25 +00001022
1023 /* Once aligned, go fast. */
1024 while (True) {
1025 PROF_EVENT(32);
1026 if (len < 8) break;
1027 ENSURE_MAPPABLE(a, "set_address_range_perms(fast)");
1028 sm = primary_map[a >> 16];
1029 sm_off = a & 0xFFFF;
1030 sm->abits[sm_off >> 3] = abyte8;
1031 a += 8;
1032 len -= 8;
1033 }
1034
1035 if (len == 0) {
1036 VGP_POPCC(VgpSetMem);
1037 return;
1038 }
njne427a662002-10-02 11:08:25 +00001039 sk_assert((a % 8) == 0 && len > 0 && len < 8);
njn25e49d8e72002-09-23 09:36:25 +00001040
1041 /* Finish the upper fragment. */
1042 while (True) {
1043 PROF_EVENT(33);
1044 if (len == 0) break;
1045 set_abit ( a, example_a_bit );
1046 a++;
1047 len--;
1048 }
1049# endif
1050
1051 /* Check that zero page and highest page have not been written to
1052 -- this could happen with buggy syscall wrappers. Today
1053 (2001-04-26) had precisely such a problem with __NR_setitimer. */
njne427a662002-10-02 11:08:25 +00001054 sk_assert(SK_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +00001055 VGP_POPCC(VgpSetMem);
1056}
1057
1058/* Set permissions for address ranges ... */
1059
1060void SK_(make_noaccess) ( Addr a, UInt len )
1061{
1062 PROF_EVENT(35);
1063 DEBUG("SK_(make_noaccess)(%p, %x)\n", a, len);
1064 set_address_range_perms ( a, len, VGM_BIT_INVALID );
1065}
1066
1067void SK_(make_accessible) ( Addr a, UInt len )
1068{
1069 PROF_EVENT(36);
1070 DEBUG("SK_(make_accessible)(%p, %x)\n", a, len);
1071 set_address_range_perms ( a, len, VGM_BIT_VALID );
1072}
1073
1074/* Block-copy permissions (needed for implementing realloc()). */
1075
1076static void copy_address_range_state ( Addr src, Addr dst, UInt len )
1077{
1078 UInt i;
1079
1080 DEBUG("copy_address_range_state\n");
1081
1082 PROF_EVENT(40);
1083 for (i = 0; i < len; i++) {
1084 UChar abit = get_abit ( src+i );
1085 PROF_EVENT(41);
1086 set_abit ( dst+i, abit );
1087 }
1088}
1089
1090
1091/* Check permissions for address range. If inadequate permissions
1092 exist, *bad_addr is set to the offending address, so the caller can
1093 know what it is. */
1094
1095Bool SK_(check_writable) ( Addr a, UInt len, Addr* bad_addr )
1096{
1097 UInt i;
1098 UChar abit;
1099 PROF_EVENT(42);
1100 for (i = 0; i < len; i++) {
1101 PROF_EVENT(43);
1102 abit = get_abit(a);
1103 if (abit == VGM_BIT_INVALID) {
1104 if (bad_addr != NULL) *bad_addr = a;
1105 return False;
1106 }
1107 a++;
1108 }
1109 return True;
1110}
1111
1112Bool SK_(check_readable) ( Addr a, UInt len, Addr* bad_addr )
1113{
1114 UInt i;
1115 UChar abit;
1116
1117 PROF_EVENT(44);
1118 DEBUG("SK_(check_readable)\n");
1119 for (i = 0; i < len; i++) {
1120 abit = get_abit(a);
1121 PROF_EVENT(45);
1122 if (abit != VGM_BIT_VALID) {
1123 if (bad_addr != NULL) *bad_addr = a;
1124 return False;
1125 }
1126 a++;
1127 }
1128 return True;
1129}
1130
1131
1132/* Check a zero-terminated ascii string. Tricky -- don't want to
1133 examine the actual bytes, to find the end, until we're sure it is
1134 safe to do so. */
1135
1136Bool SK_(check_readable_asciiz) ( Addr a, Addr* bad_addr )
1137{
1138 UChar abit;
1139 PROF_EVENT(46);
1140 DEBUG("SK_(check_readable_asciiz)\n");
1141 while (True) {
1142 PROF_EVENT(47);
1143 abit = get_abit(a);
1144 if (abit != VGM_BIT_VALID) {
1145 if (bad_addr != NULL) *bad_addr = a;
1146 return False;
1147 }
1148 /* Ok, a is safe to read. */
1149 if (* ((UChar*)a) == 0) return True;
1150 a++;
1151 }
1152}
1153
1154
1155/*------------------------------------------------------------*/
1156/*--- Memory event handlers ---*/
1157/*------------------------------------------------------------*/
1158
1159/* Setting permissions for aligned words. This supports fast stack
1160 operations. */
1161
1162static void make_noaccess_aligned ( Addr a, UInt len )
1163{
1164 AcSecMap* sm;
1165 UInt sm_off;
1166 UChar mask;
1167 Addr a_past_end = a + len;
1168
1169 VGP_PUSHCC(VgpSetMem);
1170
1171 PROF_EVENT(50);
1172# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +00001173 sk_assert(IS_ALIGNED4_ADDR(a));
1174 sk_assert(IS_ALIGNED4_ADDR(len));
njn25e49d8e72002-09-23 09:36:25 +00001175# endif
1176
1177 for ( ; a < a_past_end; a += 4) {
1178 ENSURE_MAPPABLE(a, "make_noaccess_aligned");
1179 sm = primary_map[a >> 16];
1180 sm_off = a & 0xFFFF;
1181 mask = 0x0F;
1182 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
1183 /* mask now contains 1s where we wish to make address bits
1184 invalid (1s). */
1185 sm->abits[sm_off >> 3] |= mask;
1186 }
1187 VGP_POPCC(VgpSetMem);
1188}
1189
1190static void make_writable_aligned ( Addr a, UInt len )
1191{
1192 AcSecMap* sm;
1193 UInt sm_off;
1194 UChar mask;
1195 Addr a_past_end = a + len;
1196
1197 VGP_PUSHCC(VgpSetMem);
1198
1199 PROF_EVENT(51);
1200# ifdef VG_DEBUG_MEMORY
njne427a662002-10-02 11:08:25 +00001201 sk_assert(IS_ALIGNED4_ADDR(a));
1202 sk_assert(IS_ALIGNED4_ADDR(len));
njn25e49d8e72002-09-23 09:36:25 +00001203# endif
1204
1205 for ( ; a < a_past_end; a += 4) {
1206 ENSURE_MAPPABLE(a, "make_writable_aligned");
1207 sm = primary_map[a >> 16];
1208 sm_off = a & 0xFFFF;
1209 mask = 0x0F;
1210 mask <<= (a & 4 /* 100b */); /* a & 4 is either 0 or 4 */
1211 /* mask now contains 1s where we wish to make address bits
1212 invalid (0s). */
1213 sm->abits[sm_off >> 3] &= ~mask;
1214 }
1215 VGP_POPCC(VgpSetMem);
1216}
1217
1218
1219static
1220void check_is_writable ( CorePart part, ThreadState* tst,
1221 Char* s, UInt base, UInt size )
1222{
1223 Bool ok;
1224 Addr bad_addr;
1225
1226 VGP_PUSHCC(VgpCheckMem);
1227
1228 /* VG_(message)(Vg_DebugMsg,"check is writable: %x .. %x",
1229 base,base+size-1); */
1230 ok = SK_(check_writable) ( base, size, &bad_addr );
1231 if (!ok) {
1232 switch (part) {
1233 case Vg_CoreSysCall:
1234 SK_(record_param_error) ( tst, bad_addr, /*isWrite =*/True, s );
1235 break;
1236
1237 case Vg_CorePThread:
1238 case Vg_CoreSignal:
1239 SK_(record_core_mem_error)( tst, /*isWrite=*/True, s );
1240 break;
1241
1242 default:
njne427a662002-10-02 11:08:25 +00001243 VG_(skin_panic)("check_is_readable: Unknown or unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001244 }
1245 }
1246
1247 VGP_POPCC(VgpCheckMem);
1248}
1249
1250static
1251void check_is_readable ( CorePart part, ThreadState* tst,
1252 Char* s, UInt base, UInt size )
1253{
1254 Bool ok;
1255 Addr bad_addr;
1256
1257 VGP_PUSHCC(VgpCheckMem);
1258
1259 /* VG_(message)(Vg_DebugMsg,"check is readable: %x .. %x",
1260 base,base+size-1); */
1261 ok = SK_(check_readable) ( base, size, &bad_addr );
1262 if (!ok) {
1263 switch (part) {
1264 case Vg_CoreSysCall:
1265 SK_(record_param_error) ( tst, bad_addr, /*isWrite =*/False, s );
1266 break;
1267
1268 case Vg_CorePThread:
1269 SK_(record_core_mem_error)( tst, /*isWrite=*/False, s );
1270 break;
1271
1272 /* If we're being asked to jump to a silly address, record an error
1273 message before potentially crashing the entire system. */
1274 case Vg_CoreTranslate:
1275 SK_(record_jump_error)( tst, bad_addr );
1276 break;
1277
1278 default:
njne427a662002-10-02 11:08:25 +00001279 VG_(skin_panic)("check_is_readable: Unknown or unexpected CorePart");
njn25e49d8e72002-09-23 09:36:25 +00001280 }
1281 }
1282 VGP_POPCC(VgpCheckMem);
1283}
1284
1285static
1286void check_is_readable_asciiz ( CorePart part, ThreadState* tst,
1287 Char* s, UInt str )
1288{
1289 Bool ok = True;
1290 Addr bad_addr;
1291 /* VG_(message)(Vg_DebugMsg,"check is readable asciiz: 0x%x",str); */
1292
1293 VGP_PUSHCC(VgpCheckMem);
1294
njne427a662002-10-02 11:08:25 +00001295 sk_assert(part == Vg_CoreSysCall);
njn25e49d8e72002-09-23 09:36:25 +00001296 ok = SK_(check_readable_asciiz) ( (Addr)str, &bad_addr );
1297 if (!ok) {
1298 SK_(record_param_error) ( tst, bad_addr, /*is_writable =*/False, s );
1299 }
1300
1301 VGP_POPCC(VgpCheckMem);
1302}
1303
1304static
1305void addrcheck_new_mem_startup( Addr a, UInt len, Bool rr, Bool ww, Bool xx )
1306{
1307 // JJJ: this ignores the permissions and just makes it readable, like the
1308 // old code did, AFAICT
1309 DEBUG("new_mem_startup(%p, %u, rr=%u, ww=%u, xx=%u)\n", a,len,rr,ww,xx);
1310 SK_(make_accessible)(a, len);
1311}
1312
1313static
1314void addrcheck_new_mem_heap ( Addr a, UInt len, Bool is_inited )
1315{
1316 SK_(make_accessible)(a, len);
1317}
1318
1319static
1320void addrcheck_set_perms (Addr a, UInt len,
1321 Bool nn, Bool rr, Bool ww, Bool xx)
1322{
1323 DEBUG("addrcheck_set_perms(%p, %u, nn=%u, rr=%u ww=%u, xx=%u)\n",
1324 a, len, nn, rr, ww, xx);
1325 if (rr || ww || xx) {
1326 SK_(make_accessible)(a, len);
1327 } else {
1328 SK_(make_noaccess)(a, len);
1329 }
1330}
1331
1332
1333/*------------------------------------------------------------*/
1334/*--- Functions called directly from generated code. ---*/
1335/*------------------------------------------------------------*/
1336
1337static __inline__ UInt rotateRight16 ( UInt x )
1338{
1339 /* Amazingly, gcc turns this into a single rotate insn. */
1340 return (x >> 16) | (x << 16);
1341}
1342
1343
1344static __inline__ UInt shiftRight16 ( UInt x )
1345{
1346 return x >> 16;
1347}
1348
1349
1350/* Read/write 1/2/4 sized V bytes, and emit an address error if
1351 needed. */
1352
1353/* SK_(helperc_ACCESS{1,2,4}) handle the common case fast.
1354 Under all other circumstances, it defers to the relevant _SLOWLY
1355 function, which can handle all situations.
1356*/
1357__attribute__ ((regparm(1)))
1358void SK_(helperc_ACCESS4) ( Addr a )
1359{
1360# ifdef VG_DEBUG_MEMORY
1361 return vgmext_ACCESS4_SLOWLY(a);
1362# else
1363 UInt sec_no = rotateRight16(a) & 0x3FFFF;
1364 AcSecMap* sm = primary_map[sec_no];
1365 UInt a_off = (a & 0xFFFF) >> 3;
1366 UChar abits = sm->abits[a_off];
1367 abits >>= (a & 4);
1368 abits &= 15;
1369 PROF_EVENT(60);
1370 if (abits == VGM_NIBBLE_VALID) {
1371 /* Handle common case quickly: a is suitably aligned, is mapped,
1372 and is addressible. So just return. */
1373 return;
1374 } else {
1375 /* Slow but general case. */
1376 vgmext_ACCESS4_SLOWLY(a);
1377 }
1378# endif
1379}
1380
1381__attribute__ ((regparm(1)))
1382void SK_(helperc_ACCESS2) ( Addr a )
1383{
1384# ifdef VG_DEBUG_MEMORY
1385 return vgmext_ACCESS2_SLOWLY(a);
1386# else
1387 UInt sec_no = rotateRight16(a) & 0x1FFFF;
1388 AcSecMap* sm = primary_map[sec_no];
1389 UInt a_off = (a & 0xFFFF) >> 3;
1390 PROF_EVENT(62);
1391 if (sm->abits[a_off] == VGM_BYTE_VALID) {
1392 /* Handle common case quickly. */
1393 return;
1394 } else {
1395 /* Slow but general case. */
1396 vgmext_ACCESS2_SLOWLY(a);
1397 }
1398# endif
1399}
1400
1401__attribute__ ((regparm(1)))
1402void SK_(helperc_ACCESS1) ( Addr a )
1403{
1404# ifdef VG_DEBUG_MEMORY
1405 return vgmext_ACCESS1_SLOWLY(a);
1406# else
1407 UInt sec_no = shiftRight16(a);
1408 AcSecMap* sm = primary_map[sec_no];
1409 UInt a_off = (a & 0xFFFF) >> 3;
1410 PROF_EVENT(64);
1411 if (sm->abits[a_off] == VGM_BYTE_VALID) {
1412 /* Handle common case quickly. */
1413 return;
1414 } else {
1415 /* Slow but general case. */
1416 vgmext_ACCESS1_SLOWLY(a);
1417 }
1418# endif
1419}
1420
1421
1422/*------------------------------------------------------------*/
1423/*--- Fallback functions to handle cases that the above ---*/
1424/*--- VG_(helperc_ACCESS{1,2,4}) can't manage. ---*/
1425/*------------------------------------------------------------*/
1426
1427static void vgmext_ACCESS4_SLOWLY ( Addr a )
1428{
1429 Bool a0ok, a1ok, a2ok, a3ok;
1430
1431 PROF_EVENT(70);
1432
1433 /* First establish independently the addressibility of the 4 bytes
1434 involved. */
1435 a0ok = get_abit(a+0) == VGM_BIT_VALID;
1436 a1ok = get_abit(a+1) == VGM_BIT_VALID;
1437 a2ok = get_abit(a+2) == VGM_BIT_VALID;
1438 a3ok = get_abit(a+3) == VGM_BIT_VALID;
1439
1440 /* Now distinguish 3 cases */
1441
1442 /* Case 1: the address is completely valid, so:
1443 - no addressing error
1444 */
1445 if (a0ok && a1ok && a2ok && a3ok) {
1446 return;
1447 }
1448
1449 /* Case 2: the address is completely invalid.
1450 - emit addressing error
1451 */
1452 /* VG_(printf)("%p (%d %d %d %d)\n", a, a0ok, a1ok, a2ok, a3ok); */
1453 if (!SK_(clo_partial_loads_ok)
1454 || ((a & 3) != 0)
1455 || (!a0ok && !a1ok && !a2ok && !a3ok)) {
1456 sk_record_address_error( a, 4, False );
1457 return;
1458 }
1459
1460 /* Case 3: the address is partially valid.
1461 - no addressing error
1462 Case 3 is only allowed if SK_(clo_partial_loads_ok) is True
1463 (which is the default), and the address is 4-aligned.
1464 If not, Case 2 will have applied.
1465 */
njne427a662002-10-02 11:08:25 +00001466 sk_assert(SK_(clo_partial_loads_ok));
njn25e49d8e72002-09-23 09:36:25 +00001467 {
1468 return;
1469 }
1470}
1471
1472static void vgmext_ACCESS2_SLOWLY ( Addr a )
1473{
1474 /* Check the address for validity. */
1475 Bool aerr = False;
1476 PROF_EVENT(72);
1477
1478 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1479 if (get_abit(a+1) != VGM_BIT_VALID) aerr = True;
1480
1481 /* If an address error has happened, report it. */
1482 if (aerr) {
1483 sk_record_address_error( a, 2, False );
1484 }
1485}
1486
1487static void vgmext_ACCESS1_SLOWLY ( Addr a )
1488{
1489 /* Check the address for validity. */
1490 Bool aerr = False;
1491 PROF_EVENT(74);
1492
1493 if (get_abit(a+0) != VGM_BIT_VALID) aerr = True;
1494
1495 /* If an address error has happened, report it. */
1496 if (aerr) {
1497 sk_record_address_error( a, 1, False );
1498 }
1499}
1500
1501
1502/* ---------------------------------------------------------------------
1503 FPU load and store checks, called from generated code.
1504 ------------------------------------------------------------------ */
1505
1506__attribute__ ((regparm(2)))
1507void SK_(fpu_ACCESS_check) ( Addr addr, Int size )
1508{
1509 /* Ensure the read area is both addressible and valid (ie,
1510 readable). If there's an address error, don't report a value
1511 error too; but if there isn't an address error, check for a
1512 value error.
1513
1514 Try to be reasonably fast on the common case; wimp out and defer
1515 to fpu_ACCESS_check_SLOWLY for everything else. */
1516
1517 AcSecMap* sm;
1518 UInt sm_off, a_off;
1519 Addr addr4;
1520
1521 PROF_EVENT(80);
1522
1523# ifdef VG_DEBUG_MEMORY
1524 fpu_ACCESS_check_SLOWLY ( addr, size );
1525# else
1526
1527 if (size == 4) {
1528 if (!IS_ALIGNED4_ADDR(addr)) goto slow4;
1529 PROF_EVENT(81);
1530 /* Properly aligned. */
1531 sm = primary_map[addr >> 16];
1532 sm_off = addr & 0xFFFF;
1533 a_off = sm_off >> 3;
1534 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow4;
1535 /* Properly aligned and addressible. */
1536 return;
1537 slow4:
1538 fpu_ACCESS_check_SLOWLY ( addr, 4 );
1539 return;
1540 }
1541
1542 if (size == 8) {
1543 if (!IS_ALIGNED4_ADDR(addr)) goto slow8;
1544 PROF_EVENT(82);
1545 /* Properly aligned. Do it in two halves. */
1546 addr4 = addr + 4;
1547 /* First half. */
1548 sm = primary_map[addr >> 16];
1549 sm_off = addr & 0xFFFF;
1550 a_off = sm_off >> 3;
1551 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1552 /* First half properly aligned and addressible. */
1553 /* Second half. */
1554 sm = primary_map[addr4 >> 16];
1555 sm_off = addr4 & 0xFFFF;
1556 a_off = sm_off >> 3;
1557 if (sm->abits[a_off] != VGM_BYTE_VALID) goto slow8;
1558 /* Second half properly aligned and addressible. */
1559 /* Both halves properly aligned and addressible. */
1560 return;
1561 slow8:
1562 fpu_ACCESS_check_SLOWLY ( addr, 8 );
1563 return;
1564 }
1565
1566 /* Can't be bothered to huff'n'puff to make these (allegedly) rare
1567 cases go quickly. */
1568 if (size == 2) {
1569 PROF_EVENT(83);
1570 fpu_ACCESS_check_SLOWLY ( addr, 2 );
1571 return;
1572 }
1573
1574 if (size == 10) {
1575 PROF_EVENT(84);
1576 fpu_ACCESS_check_SLOWLY ( addr, 10 );
1577 return;
1578 }
1579
1580 if (size == 28 || size == 108) {
1581 PROF_EVENT(84); /* XXX assign correct event number */
1582 fpu_ACCESS_check_SLOWLY ( addr, size );
1583 return;
1584 }
1585
1586 VG_(printf)("size is %d\n", size);
njne427a662002-10-02 11:08:25 +00001587 VG_(skin_panic)("fpu_ACCESS_check: unhandled size");
njn25e49d8e72002-09-23 09:36:25 +00001588# endif
1589}
1590
1591
1592/* ---------------------------------------------------------------------
1593 Slow, general cases for FPU access checks.
1594 ------------------------------------------------------------------ */
1595
1596void fpu_ACCESS_check_SLOWLY ( Addr addr, Int size )
1597{
1598 Int i;
1599 Bool aerr = False;
1600 PROF_EVENT(90);
1601 for (i = 0; i < size; i++) {
1602 PROF_EVENT(91);
1603 if (get_abit(addr+i) != VGM_BIT_VALID)
1604 aerr = True;
1605 }
1606
1607 if (aerr) {
1608 sk_record_address_error( addr, size, False );
1609 }
1610}
1611
1612
1613/*------------------------------------------------------------*/
1614/*--- Shadow chunks info ---*/
1615/*------------------------------------------------------------*/
1616
1617static __inline__
1618void set_where( ShadowChunk* sc, ExeContext* ec )
1619{
1620 sc->skin_extra[0] = (UInt)ec;
1621}
1622
1623static __inline__
1624ExeContext *get_where( ShadowChunk* sc )
1625{
1626 return (ExeContext*)sc->skin_extra[0];
1627}
1628
1629void SK_(complete_shadow_chunk) ( ShadowChunk* sc, ThreadState* tst )
1630{
1631 set_where( sc, VG_(get_ExeContext) ( tst ) );
1632}
1633
1634/*------------------------------------------------------------*/
1635/*--- Postponing free()ing ---*/
1636/*------------------------------------------------------------*/
1637
1638/* Holds blocks after freeing. */
1639static ShadowChunk* vg_freed_list_start = NULL;
1640static ShadowChunk* vg_freed_list_end = NULL;
1641static Int vg_freed_list_volume = 0;
1642
1643static __attribute__ ((unused))
1644 Int count_freelist ( void )
1645{
1646 ShadowChunk* sc;
1647 Int n = 0;
1648 for (sc = vg_freed_list_start; sc != NULL; sc = sc->next)
1649 n++;
1650 return n;
1651}
1652
1653static __attribute__ ((unused))
1654 void freelist_sanity ( void )
1655{
1656 ShadowChunk* sc;
1657 Int n = 0;
1658 /* VG_(printf)("freelist sanity\n"); */
1659 for (sc = vg_freed_list_start; sc != NULL; sc = sc->next)
1660 n += sc->size;
njne427a662002-10-02 11:08:25 +00001661 sk_assert(n == vg_freed_list_volume);
njn25e49d8e72002-09-23 09:36:25 +00001662}
1663
1664/* Put a shadow chunk on the freed blocks queue, possibly freeing up
1665 some of the oldest blocks in the queue at the same time. */
1666static void add_to_freed_queue ( ShadowChunk* sc )
1667{
1668 ShadowChunk* sc1;
1669
1670 /* Put it at the end of the freed list */
1671 if (vg_freed_list_end == NULL) {
njne427a662002-10-02 11:08:25 +00001672 sk_assert(vg_freed_list_start == NULL);
njn25e49d8e72002-09-23 09:36:25 +00001673 vg_freed_list_end = vg_freed_list_start = sc;
1674 vg_freed_list_volume = sc->size;
1675 } else {
njne427a662002-10-02 11:08:25 +00001676 sk_assert(vg_freed_list_end->next == NULL);
njn25e49d8e72002-09-23 09:36:25 +00001677 vg_freed_list_end->next = sc;
1678 vg_freed_list_end = sc;
1679 vg_freed_list_volume += sc->size;
1680 }
1681 sc->next = NULL;
1682
1683 /* Release enough of the oldest blocks to bring the free queue
1684 volume below vg_clo_freelist_vol. */
1685
1686 while (vg_freed_list_volume > SK_(clo_freelist_vol)) {
1687 /* freelist_sanity(); */
njne427a662002-10-02 11:08:25 +00001688 sk_assert(vg_freed_list_start != NULL);
1689 sk_assert(vg_freed_list_end != NULL);
njn25e49d8e72002-09-23 09:36:25 +00001690
1691 sc1 = vg_freed_list_start;
1692 vg_freed_list_volume -= sc1->size;
1693 /* VG_(printf)("volume now %d\n", vg_freed_list_volume); */
njne427a662002-10-02 11:08:25 +00001694 sk_assert(vg_freed_list_volume >= 0);
njn25e49d8e72002-09-23 09:36:25 +00001695
1696 if (vg_freed_list_start == vg_freed_list_end) {
1697 vg_freed_list_start = vg_freed_list_end = NULL;
1698 } else {
1699 vg_freed_list_start = sc1->next;
1700 }
1701 sc1->next = NULL; /* just paranoia */
njn4ba5a792002-09-30 10:23:54 +00001702 VG_(free_ShadowChunk) ( sc1 );
njn25e49d8e72002-09-23 09:36:25 +00001703 }
1704}
1705
1706/* Return the first shadow chunk satisfying the predicate p. */
1707ShadowChunk* SK_(any_matching_freed_ShadowChunks)
1708 ( Bool (*p) ( ShadowChunk* ))
1709{
1710 ShadowChunk* sc;
1711
1712 /* No point looking through freed blocks if we're not keeping
1713 them around for a while... */
1714 for (sc = vg_freed_list_start; sc != NULL; sc = sc->next)
1715 if (p(sc))
1716 return sc;
1717
1718 return NULL;
1719}
1720
1721void SK_(alt_free) ( ShadowChunk* sc, ThreadState* tst )
1722{
1723 /* Record where freed */
1724 set_where( sc, VG_(get_ExeContext) ( tst ) );
1725
1726 /* Put it out of harm's way for a while. */
1727 add_to_freed_queue ( sc );
1728}
1729
1730
1731/*------------------------------------------------------------*/
1732/*--- Our instrumenter ---*/
1733/*------------------------------------------------------------*/
1734
njn4ba5a792002-09-30 10:23:54 +00001735#define uInstr1 VG_(new_UInstr1)
1736#define uInstr2 VG_(new_UInstr2)
1737#define uLiteral VG_(set_lit_field)
1738#define uCCall VG_(set_ccall_fields)
1739#define newTemp VG_(get_new_temp)
njn25e49d8e72002-09-23 09:36:25 +00001740
1741UCodeBlock* SK_(instrument)(UCodeBlock* cb_in, Addr orig_addr)
1742{
1743/* Use this rather than eg. -1 because it's a UInt. */
1744#define INVALID_DATA_SIZE 999999
1745
1746 UCodeBlock* cb;
1747 Int i;
1748 UInstr* u_in;
1749 Int t_addr, t_size;
1750
njn4ba5a792002-09-30 10:23:54 +00001751 cb = VG_(alloc_UCodeBlock)();
njn25e49d8e72002-09-23 09:36:25 +00001752 cb->nextTemp = cb_in->nextTemp;
1753
1754 for (i = 0; i < cb_in->used; i++) {
1755
1756 t_addr = t_size = INVALID_TEMPREG;
1757 u_in = &cb_in->instrs[i];
1758
1759 switch (u_in->opcode) {
1760 case NOP: case CALLM_E: case CALLM_S:
1761 break;
1762
1763 /* For memory-ref instrs, copy the data_addr into a temporary to be
1764 * passed to the cachesim_* helper at the end of the instruction.
1765 */
1766 case LOAD:
1767 t_addr = u_in->val1;
1768 goto do_LOAD_or_STORE;
1769 case STORE: t_addr = u_in->val2;
1770 goto do_LOAD_or_STORE;
1771 do_LOAD_or_STORE:
1772 uInstr1(cb, CCALL, 0, TempReg, t_addr);
1773 switch (u_in->size) {
njn4ba5a792002-09-30 10:23:54 +00001774 case 4: uCCall(cb, (Addr)&SK_(helperc_ACCESS4), 1, 1, False );
njn25e49d8e72002-09-23 09:36:25 +00001775 break;
njn4ba5a792002-09-30 10:23:54 +00001776 case 2: uCCall(cb, (Addr)&SK_(helperc_ACCESS2), 1, 1, False );
njn25e49d8e72002-09-23 09:36:25 +00001777 break;
njn4ba5a792002-09-30 10:23:54 +00001778 case 1: uCCall(cb, (Addr)&SK_(helperc_ACCESS1), 1, 1, False );
njn25e49d8e72002-09-23 09:36:25 +00001779 break;
1780 default:
njne427a662002-10-02 11:08:25 +00001781 VG_(skin_panic)("addrcheck::SK_(instrument):LOAD/STORE");
njn25e49d8e72002-09-23 09:36:25 +00001782 }
njn4ba5a792002-09-30 10:23:54 +00001783 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00001784 break;
1785
1786 case FPU_R:
1787 case FPU_W:
1788 t_addr = u_in->val2;
1789 t_size = newTemp(cb);
1790 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
1791 uLiteral(cb, u_in->size);
1792 uInstr2(cb, CCALL, 0, TempReg, t_addr, TempReg, t_size);
njn4ba5a792002-09-30 10:23:54 +00001793 uCCall(cb, (Addr)&SK_(fpu_ACCESS_check), 2, 2, False );
1794 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00001795 break;
1796
1797 default:
njn4ba5a792002-09-30 10:23:54 +00001798 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00001799 break;
1800 }
1801 }
1802
njn4ba5a792002-09-30 10:23:54 +00001803 VG_(free_UCodeBlock)(cb_in);
njn25e49d8e72002-09-23 09:36:25 +00001804 return cb;
1805}
1806
1807
1808
1809/*------------------------------------------------------------*/
1810/*--- Low-level address-space scanning, for the leak ---*/
1811/*--- detector. ---*/
1812/*------------------------------------------------------------*/
1813
1814static
1815jmp_buf memscan_jmpbuf;
1816
1817static
1818void vg_scan_all_valid_memory_sighandler ( Int sigNo )
1819{
1820 __builtin_longjmp(memscan_jmpbuf, 1);
1821}
1822
1823/* Safely (avoiding SIGSEGV / SIGBUS) scan the entire valid address
1824 space and pass the addresses and values of all addressible,
1825 defined, aligned words to notify_word. This is the basis for the
1826 leak detector. Returns the number of calls made to notify_word. */
1827UInt VG_(scan_all_valid_memory) ( void (*notify_word)( Addr, UInt ) )
1828{
1829 /* All volatile, because some gccs seem paranoid about longjmp(). */
1830 volatile UInt res, numPages, page, primaryMapNo, nWordsNotified;
1831 volatile Addr pageBase, addr;
1832 volatile AcSecMap* sm;
1833 volatile UChar abits;
1834 volatile UInt page_first_word;
1835
1836 vki_ksigaction sigbus_saved;
1837 vki_ksigaction sigbus_new;
1838 vki_ksigaction sigsegv_saved;
1839 vki_ksigaction sigsegv_new;
1840 vki_ksigset_t blockmask_saved;
1841 vki_ksigset_t unblockmask_new;
1842
1843 /* Temporarily install a new sigsegv and sigbus handler, and make
1844 sure SIGBUS, SIGSEGV and SIGTERM are unblocked. (Perhaps the
1845 first two can never be blocked anyway?) */
1846
1847 sigbus_new.ksa_handler = vg_scan_all_valid_memory_sighandler;
1848 sigbus_new.ksa_flags = VKI_SA_ONSTACK | VKI_SA_RESTART;
1849 sigbus_new.ksa_restorer = NULL;
1850 res = VG_(ksigemptyset)( &sigbus_new.ksa_mask );
njne427a662002-10-02 11:08:25 +00001851 sk_assert(res == 0);
njn25e49d8e72002-09-23 09:36:25 +00001852
1853 sigsegv_new.ksa_handler = vg_scan_all_valid_memory_sighandler;
1854 sigsegv_new.ksa_flags = VKI_SA_ONSTACK | VKI_SA_RESTART;
1855 sigsegv_new.ksa_restorer = NULL;
1856 res = VG_(ksigemptyset)( &sigsegv_new.ksa_mask );
njne427a662002-10-02 11:08:25 +00001857 sk_assert(res == 0+0);
njn25e49d8e72002-09-23 09:36:25 +00001858
1859 res = VG_(ksigemptyset)( &unblockmask_new );
1860 res |= VG_(ksigaddset)( &unblockmask_new, VKI_SIGBUS );
1861 res |= VG_(ksigaddset)( &unblockmask_new, VKI_SIGSEGV );
1862 res |= VG_(ksigaddset)( &unblockmask_new, VKI_SIGTERM );
njne427a662002-10-02 11:08:25 +00001863 sk_assert(res == 0+0+0);
njn25e49d8e72002-09-23 09:36:25 +00001864
1865 res = VG_(ksigaction)( VKI_SIGBUS, &sigbus_new, &sigbus_saved );
njne427a662002-10-02 11:08:25 +00001866 sk_assert(res == 0+0+0+0);
njn25e49d8e72002-09-23 09:36:25 +00001867
1868 res = VG_(ksigaction)( VKI_SIGSEGV, &sigsegv_new, &sigsegv_saved );
njne427a662002-10-02 11:08:25 +00001869 sk_assert(res == 0+0+0+0+0);
njn25e49d8e72002-09-23 09:36:25 +00001870
1871 res = VG_(ksigprocmask)( VKI_SIG_UNBLOCK, &unblockmask_new, &blockmask_saved );
njne427a662002-10-02 11:08:25 +00001872 sk_assert(res == 0+0+0+0+0+0);
njn25e49d8e72002-09-23 09:36:25 +00001873
1874 /* The signal handlers are installed. Actually do the memory scan. */
1875 numPages = 1 << (32-VKI_BYTES_PER_PAGE_BITS);
njne427a662002-10-02 11:08:25 +00001876 sk_assert(numPages == 1048576);
1877 sk_assert(4096 == (1 << VKI_BYTES_PER_PAGE_BITS));
njn25e49d8e72002-09-23 09:36:25 +00001878
1879 nWordsNotified = 0;
1880
1881 for (page = 0; page < numPages; page++) {
1882 pageBase = page << VKI_BYTES_PER_PAGE_BITS;
1883 primaryMapNo = pageBase >> 16;
1884 sm = primary_map[primaryMapNo];
1885 if (IS_DISTINGUISHED_SM(sm)) continue;
1886 if (__builtin_setjmp(memscan_jmpbuf) == 0) {
1887 /* try this ... */
1888 page_first_word = * (volatile UInt*)pageBase;
1889 /* we get here if we didn't get a fault */
1890 /* Scan the page */
1891 for (addr = pageBase; addr < pageBase+VKI_BYTES_PER_PAGE; addr += 4) {
1892 abits = get_abits4_ALIGNED(addr);
1893 if (abits == VGM_NIBBLE_VALID) {
1894 nWordsNotified++;
1895 notify_word ( addr, *(UInt*)addr );
1896 }
1897 }
1898 } else {
1899 /* We get here if reading the first word of the page caused a
1900 fault, which in turn caused the signal handler to longjmp.
1901 Ignore this page. */
1902 if (0)
1903 VG_(printf)(
1904 "vg_scan_all_valid_memory_sighandler: ignoring page at %p\n",
1905 (void*)pageBase
1906 );
1907 }
1908 }
1909
1910 /* Restore signal state to whatever it was before. */
1911 res = VG_(ksigaction)( VKI_SIGBUS, &sigbus_saved, NULL );
njne427a662002-10-02 11:08:25 +00001912 sk_assert(res == 0 +0);
njn25e49d8e72002-09-23 09:36:25 +00001913
1914 res = VG_(ksigaction)( VKI_SIGSEGV, &sigsegv_saved, NULL );
njne427a662002-10-02 11:08:25 +00001915 sk_assert(res == 0 +0 +0);
njn25e49d8e72002-09-23 09:36:25 +00001916
1917 res = VG_(ksigprocmask)( VKI_SIG_SETMASK, &blockmask_saved, NULL );
njne427a662002-10-02 11:08:25 +00001918 sk_assert(res == 0 +0 +0 +0);
njn25e49d8e72002-09-23 09:36:25 +00001919
1920 return nWordsNotified;
1921}
1922
1923
1924/*------------------------------------------------------------*/
1925/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
1926/*------------------------------------------------------------*/
1927
1928/* A block is either
1929 -- Proper-ly reached; a pointer to its start has been found
1930 -- Interior-ly reached; only an interior pointer to it has been found
1931 -- Unreached; so far, no pointers to any part of it have been found.
1932*/
1933typedef
1934 enum { Unreached, Interior, Proper }
1935 Reachedness;
1936
1937/* A block record, used for generating err msgs. */
1938typedef
1939 struct _LossRecord {
1940 struct _LossRecord* next;
1941 /* Where these lost blocks were allocated. */
1942 ExeContext* allocated_at;
1943 /* Their reachability. */
1944 Reachedness loss_mode;
1945 /* Number of blocks and total # bytes involved. */
1946 UInt total_bytes;
1947 UInt num_blocks;
1948 }
1949 LossRecord;
1950
1951
1952/* Find the i such that ptr points at or inside the block described by
1953 shadows[i]. Return -1 if none found. This assumes that shadows[]
1954 has been sorted on the ->data field. */
1955
1956#ifdef VG_DEBUG_LEAKCHECK
1957/* Used to sanity-check the fast binary-search mechanism. */
1958static Int find_shadow_for_OLD ( Addr ptr,
1959 ShadowChunk** shadows,
1960 Int n_shadows )
1961
1962{
1963 Int i;
1964 Addr a_lo, a_hi;
1965 PROF_EVENT(70);
1966 for (i = 0; i < n_shadows; i++) {
1967 PROF_EVENT(71);
1968 a_lo = shadows[i]->data;
1969 a_hi = ((Addr)shadows[i]->data) + shadows[i]->size - 1;
1970 if (a_lo <= ptr && ptr <= a_hi)
1971 return i;
1972 }
1973 return -1;
1974}
1975#endif
1976
1977
1978static Int find_shadow_for ( Addr ptr,
1979 ShadowChunk** shadows,
1980 Int n_shadows )
1981{
1982 Addr a_mid_lo, a_mid_hi;
1983 Int lo, mid, hi, retVal;
1984 PROF_EVENT(70);
1985 /* VG_(printf)("find shadow for %p = ", ptr); */
1986 retVal = -1;
1987 lo = 0;
1988 hi = n_shadows-1;
1989 while (True) {
1990 PROF_EVENT(71);
1991
1992 /* invariant: current unsearched space is from lo to hi,
1993 inclusive. */
1994 if (lo > hi) break; /* not found */
1995
1996 mid = (lo + hi) / 2;
1997 a_mid_lo = shadows[mid]->data;
1998 a_mid_hi = ((Addr)shadows[mid]->data) + shadows[mid]->size - 1;
1999
2000 if (ptr < a_mid_lo) {
2001 hi = mid-1;
2002 continue;
2003 }
2004 if (ptr > a_mid_hi) {
2005 lo = mid+1;
2006 continue;
2007 }
njne427a662002-10-02 11:08:25 +00002008 sk_assert(ptr >= a_mid_lo && ptr <= a_mid_hi);
njn25e49d8e72002-09-23 09:36:25 +00002009 retVal = mid;
2010 break;
2011 }
2012
2013# ifdef VG_DEBUG_LEAKCHECK
njne427a662002-10-02 11:08:25 +00002014 sk_assert(retVal == find_shadow_for_OLD ( ptr, shadows, n_shadows ));
njn25e49d8e72002-09-23 09:36:25 +00002015# endif
2016 /* VG_(printf)("%d\n", retVal); */
2017 return retVal;
2018}
2019
2020
2021
2022static void sort_malloc_shadows ( ShadowChunk** shadows, UInt n_shadows )
2023{
2024 Int incs[14] = { 1, 4, 13, 40, 121, 364, 1093, 3280,
2025 9841, 29524, 88573, 265720,
2026 797161, 2391484 };
2027 Int lo = 0;
2028 Int hi = n_shadows-1;
2029 Int i, j, h, bigN, hp;
2030 ShadowChunk* v;
2031
2032 PROF_EVENT(72);
2033 bigN = hi - lo + 1; if (bigN < 2) return;
2034 hp = 0; while (incs[hp] < bigN) hp++; hp--;
2035
2036 for (; hp >= 0; hp--) {
2037 PROF_EVENT(73);
2038 h = incs[hp];
2039 i = lo + h;
2040 while (1) {
2041 PROF_EVENT(74);
2042 if (i > hi) break;
2043 v = shadows[i];
2044 j = i;
2045 while (shadows[j-h]->data > v->data) {
2046 PROF_EVENT(75);
2047 shadows[j] = shadows[j-h];
2048 j = j - h;
2049 if (j <= (lo + h - 1)) break;
2050 }
2051 shadows[j] = v;
2052 i++;
2053 }
2054 }
2055}
2056
2057/* Globals, for the callback used by SK_(detect_memory_leaks). */
2058
2059static ShadowChunk** vglc_shadows;
2060static Int vglc_n_shadows;
2061static Reachedness* vglc_reachedness;
2062static Addr vglc_min_mallocd_addr;
2063static Addr vglc_max_mallocd_addr;
2064
2065static
2066void vg_detect_memory_leaks_notify_addr ( Addr a, UInt word_at_a )
2067{
2068 Int sh_no;
2069 Addr ptr;
2070
2071 /* Rule out some known causes of bogus pointers. Mostly these do
2072 not cause much trouble because only a few false pointers can
2073 ever lurk in these places. This mainly stops it reporting that
2074 blocks are still reachable in stupid test programs like this
2075
2076 int main (void) { char* a = malloc(100); return 0; }
2077
2078 which people seem inordinately fond of writing, for some reason.
2079
2080 Note that this is a complete kludge. It would be better to
2081 ignore any addresses corresponding to valgrind.so's .bss and
2082 .data segments, but I cannot think of a reliable way to identify
2083 where the .bss segment has been put. If you can, drop me a
2084 line.
2085 */
2086 if (VG_(within_stack)(a)) return;
2087 if (VG_(within_m_state_static)(a)) return;
2088 if (a == (Addr)(&vglc_min_mallocd_addr)) return;
2089 if (a == (Addr)(&vglc_max_mallocd_addr)) return;
2090
2091 /* OK, let's get on and do something Useful for a change. */
2092
2093 ptr = (Addr)word_at_a;
2094 if (ptr >= vglc_min_mallocd_addr && ptr <= vglc_max_mallocd_addr) {
2095 /* Might be legitimate; we'll have to investigate further. */
2096 sh_no = find_shadow_for ( ptr, vglc_shadows, vglc_n_shadows );
2097 if (sh_no != -1) {
2098 /* Found a block at/into which ptr points. */
njne427a662002-10-02 11:08:25 +00002099 sk_assert(sh_no >= 0 && sh_no < vglc_n_shadows);
2100 sk_assert(ptr < vglc_shadows[sh_no]->data
njn25e49d8e72002-09-23 09:36:25 +00002101 + vglc_shadows[sh_no]->size);
2102 /* Decide whether Proper-ly or Interior-ly reached. */
2103 if (ptr == vglc_shadows[sh_no]->data) {
2104 if (0) VG_(printf)("pointer at %p to %p\n", a, word_at_a );
2105 vglc_reachedness[sh_no] = Proper;
2106 } else {
2107 if (vglc_reachedness[sh_no] == Unreached)
2108 vglc_reachedness[sh_no] = Interior;
2109 }
2110 }
2111 }
2112}
2113
2114
2115void SK_(detect_memory_leaks) ( void )
2116{
2117 Int i;
2118 Int blocks_leaked, bytes_leaked;
2119 Int blocks_dubious, bytes_dubious;
2120 Int blocks_reachable, bytes_reachable;
2121 Int n_lossrecords;
2122 UInt bytes_notified;
2123
2124 LossRecord* errlist;
2125 LossRecord* p;
2126
2127 PROF_EVENT(76);
2128
2129 /* VG_(get_malloc_shadows) allocates storage for shadows */
2130 vglc_shadows = VG_(get_malloc_shadows)( &vglc_n_shadows );
2131 if (vglc_n_shadows == 0) {
njne427a662002-10-02 11:08:25 +00002132 sk_assert(vglc_shadows == NULL);
njn25e49d8e72002-09-23 09:36:25 +00002133 VG_(message)(Vg_UserMsg,
2134 "No malloc'd blocks -- no leaks are possible.\n");
2135 return;
2136 }
2137
2138 VG_(message)(Vg_UserMsg,
2139 "searching for pointers to %d not-freed blocks.",
2140 vglc_n_shadows );
2141 sort_malloc_shadows ( vglc_shadows, vglc_n_shadows );
2142
2143 /* Sanity check; assert that the blocks are now in order and that
2144 they don't overlap. */
2145 for (i = 0; i < vglc_n_shadows-1; i++) {
njne427a662002-10-02 11:08:25 +00002146 sk_assert( ((Addr)vglc_shadows[i]->data)
njn25e49d8e72002-09-23 09:36:25 +00002147 < ((Addr)vglc_shadows[i+1]->data) );
njne427a662002-10-02 11:08:25 +00002148 sk_assert( ((Addr)vglc_shadows[i]->data) + vglc_shadows[i]->size
njn25e49d8e72002-09-23 09:36:25 +00002149 < ((Addr)vglc_shadows[i+1]->data) );
2150 }
2151
2152 vglc_min_mallocd_addr = ((Addr)vglc_shadows[0]->data);
2153 vglc_max_mallocd_addr = ((Addr)vglc_shadows[vglc_n_shadows-1]->data)
2154 + vglc_shadows[vglc_n_shadows-1]->size - 1;
2155
2156 vglc_reachedness
2157 = VG_(malloc)( vglc_n_shadows * sizeof(Reachedness) );
2158 for (i = 0; i < vglc_n_shadows; i++)
2159 vglc_reachedness[i] = Unreached;
2160
2161 /* Do the scan of memory. */
2162 bytes_notified
2163 = VG_(scan_all_valid_memory)( &vg_detect_memory_leaks_notify_addr )
2164 * VKI_BYTES_PER_WORD;
2165
2166 VG_(message)(Vg_UserMsg, "checked %d bytes.", bytes_notified);
2167
2168 blocks_leaked = bytes_leaked = 0;
2169 blocks_dubious = bytes_dubious = 0;
2170 blocks_reachable = bytes_reachable = 0;
2171
2172 for (i = 0; i < vglc_n_shadows; i++) {
2173 if (vglc_reachedness[i] == Unreached) {
2174 blocks_leaked++;
2175 bytes_leaked += vglc_shadows[i]->size;
2176 }
2177 else if (vglc_reachedness[i] == Interior) {
2178 blocks_dubious++;
2179 bytes_dubious += vglc_shadows[i]->size;
2180 }
2181 else if (vglc_reachedness[i] == Proper) {
2182 blocks_reachable++;
2183 bytes_reachable += vglc_shadows[i]->size;
2184 }
2185 }
2186
2187 VG_(message)(Vg_UserMsg, "");
2188 VG_(message)(Vg_UserMsg, "definitely lost: %d bytes in %d blocks.",
2189 bytes_leaked, blocks_leaked );
2190 VG_(message)(Vg_UserMsg, "possibly lost: %d bytes in %d blocks.",
2191 bytes_dubious, blocks_dubious );
2192 VG_(message)(Vg_UserMsg, "still reachable: %d bytes in %d blocks.",
2193 bytes_reachable, blocks_reachable );
2194
2195
2196 /* Common up the lost blocks so we can print sensible error
2197 messages. */
2198
2199 n_lossrecords = 0;
2200 errlist = NULL;
2201 for (i = 0; i < vglc_n_shadows; i++) {
2202
2203 /* 'where' stored in 'skin_extra' field */
2204 ExeContext* where = get_where ( vglc_shadows[i] );
2205
2206 for (p = errlist; p != NULL; p = p->next) {
2207 if (p->loss_mode == vglc_reachedness[i]
2208 && VG_(eq_ExeContext) ( SK_(clo_leak_resolution),
2209 p->allocated_at,
2210 where) ) {
2211 break;
2212 }
2213 }
2214 if (p != NULL) {
2215 p->num_blocks ++;
2216 p->total_bytes += vglc_shadows[i]->size;
2217 } else {
2218 n_lossrecords ++;
2219 p = VG_(malloc)(sizeof(LossRecord));
2220 p->loss_mode = vglc_reachedness[i];
2221 p->allocated_at = where;
2222 p->total_bytes = vglc_shadows[i]->size;
2223 p->num_blocks = 1;
2224 p->next = errlist;
2225 errlist = p;
2226 }
2227 }
2228
2229 for (i = 0; i < n_lossrecords; i++) {
2230 LossRecord* p_min = NULL;
2231 UInt n_min = 0xFFFFFFFF;
2232 for (p = errlist; p != NULL; p = p->next) {
2233 if (p->num_blocks > 0 && p->total_bytes < n_min) {
2234 n_min = p->total_bytes;
2235 p_min = p;
2236 }
2237 }
njne427a662002-10-02 11:08:25 +00002238 sk_assert(p_min != NULL);
njn25e49d8e72002-09-23 09:36:25 +00002239
2240 if ( (!SK_(clo_show_reachable)) && p_min->loss_mode == Proper) {
2241 p_min->num_blocks = 0;
2242 continue;
2243 }
2244
2245 VG_(message)(Vg_UserMsg, "");
2246 VG_(message)(
2247 Vg_UserMsg,
2248 "%d bytes in %d blocks are %s in loss record %d of %d",
2249 p_min->total_bytes, p_min->num_blocks,
2250 p_min->loss_mode==Unreached ? "definitely lost" :
2251 (p_min->loss_mode==Interior ? "possibly lost"
2252 : "still reachable"),
2253 i+1, n_lossrecords
2254 );
2255 VG_(pp_ExeContext)(p_min->allocated_at);
2256 p_min->num_blocks = 0;
2257 }
2258
2259 VG_(message)(Vg_UserMsg, "");
2260 VG_(message)(Vg_UserMsg, "LEAK SUMMARY:");
2261 VG_(message)(Vg_UserMsg, " definitely lost: %d bytes in %d blocks.",
2262 bytes_leaked, blocks_leaked );
2263 VG_(message)(Vg_UserMsg, " possibly lost: %d bytes in %d blocks.",
2264 bytes_dubious, blocks_dubious );
2265 VG_(message)(Vg_UserMsg, " still reachable: %d bytes in %d blocks.",
2266 bytes_reachable, blocks_reachable );
2267 if (!SK_(clo_show_reachable)) {
2268 VG_(message)(Vg_UserMsg,
2269 "Reachable blocks (those to which a pointer was found) are not shown.");
2270 VG_(message)(Vg_UserMsg,
2271 "To see them, rerun with: --show-reachable=yes");
2272 }
2273 VG_(message)(Vg_UserMsg, "");
2274
2275 VG_(free) ( vglc_shadows );
2276 VG_(free) ( vglc_reachedness );
2277}
2278
2279
2280/* ---------------------------------------------------------------------
2281 Sanity check machinery (permanently engaged).
2282 ------------------------------------------------------------------ */
2283
2284/* Check that nobody has spuriously claimed that the first or last 16
2285 pages (64 KB) of address space have become accessible. Failure of
2286 the following do not per se indicate an internal consistency
2287 problem, but they are so likely to that we really want to know
2288 about it if so. */
2289
2290Bool SK_(cheap_sanity_check) ( void )
2291{
2292 if (IS_DISTINGUISHED_SM(primary_map[0]) &&
2293 IS_DISTINGUISHED_SM(primary_map[65535]))
2294 return True;
2295 else
2296 return False;
2297}
2298
2299Bool SK_(expensive_sanity_check) ( void )
2300{
2301 Int i;
2302
2303 /* Make sure nobody changed the distinguished secondary. */
2304 for (i = 0; i < 8192; i++)
2305 if (distinguished_secondary_map.abits[i] != VGM_BYTE_INVALID)
2306 return False;
2307
2308 /* Make sure that the upper 3/4 of the primary map hasn't
2309 been messed with. */
2310 for (i = 65536; i < 262144; i++)
2311 if (primary_map[i] != & distinguished_secondary_map)
2312 return False;
2313
2314 return True;
2315}
2316
2317/* ---------------------------------------------------------------------
2318 Debugging machinery (turn on to debug). Something of a mess.
2319 ------------------------------------------------------------------ */
2320
2321#if 0
2322/* Print the value tags on the 8 integer registers & flag reg. */
2323
2324static void uint_to_bits ( UInt x, Char* str )
2325{
2326 Int i;
2327 Int w = 0;
2328 /* str must point to a space of at least 36 bytes. */
2329 for (i = 31; i >= 0; i--) {
2330 str[w++] = (x & ( ((UInt)1) << i)) ? '1' : '0';
2331 if (i == 24 || i == 16 || i == 8)
2332 str[w++] = ' ';
2333 }
2334 str[w++] = 0;
njne427a662002-10-02 11:08:25 +00002335 sk_assert(w == 36);
njn25e49d8e72002-09-23 09:36:25 +00002336}
2337
2338/* Caution! Not vthread-safe; looks in VG_(baseBlock), not the thread
2339 state table. */
2340
2341static void vg_show_reg_tags ( void )
2342{
2343 Char buf1[36];
2344 Char buf2[36];
2345 UInt z_eax, z_ebx, z_ecx, z_edx,
2346 z_esi, z_edi, z_ebp, z_esp, z_eflags;
2347
2348 z_eax = VG_(baseBlock)[VGOFF_(sh_eax)];
2349 z_ebx = VG_(baseBlock)[VGOFF_(sh_ebx)];
2350 z_ecx = VG_(baseBlock)[VGOFF_(sh_ecx)];
2351 z_edx = VG_(baseBlock)[VGOFF_(sh_edx)];
2352 z_esi = VG_(baseBlock)[VGOFF_(sh_esi)];
2353 z_edi = VG_(baseBlock)[VGOFF_(sh_edi)];
2354 z_ebp = VG_(baseBlock)[VGOFF_(sh_ebp)];
2355 z_esp = VG_(baseBlock)[VGOFF_(sh_esp)];
2356 z_eflags = VG_(baseBlock)[VGOFF_(sh_eflags)];
2357
2358 uint_to_bits(z_eflags, buf1);
2359 VG_(message)(Vg_DebugMsg, "efl %\n", buf1);
2360
2361 uint_to_bits(z_eax, buf1);
2362 uint_to_bits(z_ebx, buf2);
2363 VG_(message)(Vg_DebugMsg, "eax %s ebx %s\n", buf1, buf2);
2364
2365 uint_to_bits(z_ecx, buf1);
2366 uint_to_bits(z_edx, buf2);
2367 VG_(message)(Vg_DebugMsg, "ecx %s edx %s\n", buf1, buf2);
2368
2369 uint_to_bits(z_esi, buf1);
2370 uint_to_bits(z_edi, buf2);
2371 VG_(message)(Vg_DebugMsg, "esi %s edi %s\n", buf1, buf2);
2372
2373 uint_to_bits(z_ebp, buf1);
2374 uint_to_bits(z_esp, buf2);
2375 VG_(message)(Vg_DebugMsg, "ebp %s esp %s\n", buf1, buf2);
2376}
2377
2378
2379/* For debugging only. Scan the address space and touch all allegedly
2380 addressible words. Useful for establishing where Valgrind's idea of
2381 addressibility has diverged from what the kernel believes. */
2382
2383static
2384void zzzmemscan_notify_word ( Addr a, UInt w )
2385{
2386}
2387
2388void zzzmemscan ( void )
2389{
2390 Int n_notifies
2391 = VG_(scan_all_valid_memory)( zzzmemscan_notify_word );
2392 VG_(printf)("zzzmemscan: n_bytes = %d\n", 4 * n_notifies );
2393}
2394#endif
2395
2396
2397
2398
2399#if 0
2400static Int zzz = 0;
2401
2402void show_bb ( Addr eip_next )
2403{
2404 VG_(printf)("[%4d] ", zzz);
2405 vg_show_reg_tags( &VG_(m_shadow );
2406 VG_(translate) ( eip_next, NULL, NULL, NULL );
2407}
2408#endif /* 0 */
2409
2410/*------------------------------------------------------------*/
2411/*--- Syscall wrappers ---*/
2412/*------------------------------------------------------------*/
2413
2414void* SK_(pre_syscall) ( ThreadId tid, UInt syscallno, Bool isBlocking )
2415{
2416 Int sane = SK_(cheap_sanity_check)();
2417 return (void*)sane;
2418}
2419
2420void SK_(post_syscall) ( ThreadId tid, UInt syscallno,
2421 void* pre_result, Int res, Bool isBlocking )
2422{
2423 Int sane_before_call = (Int)pre_result;
2424 Bool sane_after_call = SK_(cheap_sanity_check)();
2425
2426 if ((Int)sane_before_call && (!sane_after_call)) {
2427 VG_(message)(Vg_DebugMsg, "post-syscall: ");
2428 VG_(message)(Vg_DebugMsg,
2429 "probable sanity check failure for syscall number %d\n",
2430 syscallno );
njne427a662002-10-02 11:08:25 +00002431 VG_(skin_panic)("aborting due to the above ... bye!");
njn25e49d8e72002-09-23 09:36:25 +00002432 }
2433}
2434
2435
2436/*------------------------------------------------------------*/
2437/*--- Setup ---*/
2438/*------------------------------------------------------------*/
2439
2440void SK_(written_shadow_regs_values)( UInt* gen_reg_value, UInt* eflags_value )
2441{
2442 *gen_reg_value = VGM_WORD_VALID;
2443 *eflags_value = VGM_EFLAGS_VALID;
2444}
2445
2446Bool SK_(process_cmd_line_option)(Char* arg)
2447{
2448# define STREQ(s1,s2) (0==VG_(strcmp_ws)((s1),(s2)))
2449# define STREQN(nn,s1,s2) (0==VG_(strncmp_ws)((s1),(s2),(nn)))
2450
2451 if (STREQ(arg, "--partial-loads-ok=yes"))
2452 SK_(clo_partial_loads_ok) = True;
2453 else if (STREQ(arg, "--partial-loads-ok=no"))
2454 SK_(clo_partial_loads_ok) = False;
2455
2456 else if (STREQN(15, arg, "--freelist-vol=")) {
2457 SK_(clo_freelist_vol) = (Int)VG_(atoll)(&arg[15]);
2458 if (SK_(clo_freelist_vol) < 0) SK_(clo_freelist_vol) = 0;
2459 }
2460
2461 else if (STREQ(arg, "--leak-check=yes"))
2462 SK_(clo_leak_check) = True;
2463 else if (STREQ(arg, "--leak-check=no"))
2464 SK_(clo_leak_check) = False;
2465
2466 else if (STREQ(arg, "--leak-resolution=low"))
2467 SK_(clo_leak_resolution) = Vg_LowRes;
2468 else if (STREQ(arg, "--leak-resolution=med"))
2469 SK_(clo_leak_resolution) = Vg_MedRes;
2470 else if (STREQ(arg, "--leak-resolution=high"))
2471 SK_(clo_leak_resolution) = Vg_HighRes;
2472
2473 else if (STREQ(arg, "--show-reachable=yes"))
2474 SK_(clo_show_reachable) = True;
2475 else if (STREQ(arg, "--show-reachable=no"))
2476 SK_(clo_show_reachable) = False;
2477
2478 else if (STREQ(arg, "--workaround-gcc296-bugs=yes"))
2479 SK_(clo_workaround_gcc296_bugs) = True;
2480 else if (STREQ(arg, "--workaround-gcc296-bugs=no"))
2481 SK_(clo_workaround_gcc296_bugs) = False;
2482
2483 else if (STREQ(arg, "--cleanup=yes"))
2484 SK_(clo_cleanup) = True;
2485 else if (STREQ(arg, "--cleanup=no"))
2486 SK_(clo_cleanup) = False;
2487
2488 else
2489 return False;
2490
2491 return True;
2492
2493#undef STREQ
2494#undef STREQN
2495}
2496
2497Char* SK_(usage)(void)
2498{
2499 return
2500" --partial-loads-ok=no|yes too hard to explain here; see manual [yes]\n"
2501" --freelist-vol=<number> volume of freed blocks queue [1000000]\n"
2502" --leak-check=no|yes search for memory leaks at exit? [no]\n"
2503" --leak-resolution=low|med|high\n"
2504" amount of bt merging in leak check [low]\n"
2505" --show-reachable=no|yes show reachable blocks in leak check? [no]\n"
2506" --workaround-gcc296-bugs=no|yes self explanatory [no]\n"
2507" --check-addrVs=no|yes experimental lighterweight checking? [yes]\n"
2508" yes == Valgrind's original behaviour\n"
2509"\n"
2510" --cleanup=no|yes improve after instrumentation? [yes]\n";
2511}
2512
2513
2514/*------------------------------------------------------------*/
2515/*--- Setup ---*/
2516/*------------------------------------------------------------*/
2517
2518void SK_(pre_clo_init)(VgNeeds* needs, VgTrackEvents* track)
2519{
2520 needs->name = "addrcheck";
2521 needs->description = "a fine-grained address checker";
njne427a662002-10-02 11:08:25 +00002522 needs->bug_reports_to = "jseward@acm.org";
njn25e49d8e72002-09-23 09:36:25 +00002523
2524 needs->core_errors = True;
2525 needs->skin_errors = True;
2526 needs->run_libc_freeres = True;
2527
2528 needs->sizeof_shadow_block = 1;
2529
2530 needs->basic_block_discards = False;
2531 needs->shadow_regs = False;
2532 needs->command_line_options = True;
2533 needs->client_requests = True;
2534 needs->extended_UCode = False;
2535 needs->syscall_wrapper = True;
2536 needs->alternative_free = True;
2537 needs->sanity_checks = True;
2538
2539 VG_(register_compact_helper)((Addr) & SK_(helperc_ACCESS4));
2540 VG_(register_compact_helper)((Addr) & SK_(helperc_ACCESS2));
2541 VG_(register_compact_helper)((Addr) & SK_(helperc_ACCESS1));
2542 VG_(register_compact_helper)((Addr) & SK_(fpu_ACCESS_check));
2543
2544 /* Events to track */
2545 track->new_mem_startup = & addrcheck_new_mem_startup;
2546 track->new_mem_heap = & addrcheck_new_mem_heap;
2547 track->new_mem_stack = & SK_(make_accessible);
2548 track->new_mem_stack_aligned = & make_writable_aligned;
2549 track->new_mem_stack_signal = & SK_(make_accessible);
2550 track->new_mem_brk = & SK_(make_accessible);
2551 track->new_mem_mmap = & addrcheck_set_perms;
2552
2553 track->copy_mem_heap = & copy_address_range_state;
2554 track->copy_mem_remap = & copy_address_range_state;
2555 track->change_mem_mprotect = & addrcheck_set_perms;
2556
2557 track->ban_mem_heap = & SK_(make_noaccess);
2558 track->ban_mem_stack = & SK_(make_noaccess);
2559
2560 track->die_mem_heap = & SK_(make_noaccess);
2561 track->die_mem_stack = & SK_(make_noaccess);
2562 track->die_mem_stack_aligned = & make_noaccess_aligned;
2563 track->die_mem_stack_signal = & SK_(make_noaccess);
2564 track->die_mem_brk = & SK_(make_noaccess);
2565 track->die_mem_munmap = & SK_(make_noaccess);
2566
2567 track->bad_free = & SK_(record_free_error);
2568 track->mismatched_free = & SK_(record_freemismatch_error);
2569
2570 track->pre_mem_read = & check_is_readable;
2571 track->pre_mem_read_asciiz = & check_is_readable_asciiz;
2572 track->pre_mem_write = & check_is_writable;
2573 track->post_mem_write = & SK_(make_accessible);
2574
2575 init_shadow_memory();
2576
2577 init_prof_mem();
2578
2579 VGP_(register_profile_event) ( VgpSetMem, "set-mem-perms" );
2580 VGP_(register_profile_event) ( VgpCheckMem, "check-mem-perms" );
2581}
2582
2583/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00002584/*--- end ac_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00002585/*--------------------------------------------------------------------*/