blob: f3e55483289787d371b68882913de10f66c0d2dc [file] [log] [blame]
njn5c004e42002-11-18 11:04:50 +00001
2/*--------------------------------------------------------------------*/
3/*--- Code that is shared between MemCheck and AddrCheck. ---*/
4/*--- mc_common.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of MemCheck, a heavyweight Valgrind skin for
9 detecting memory errors, and AddrCheck, a lightweight Valgrind skin
10 for detecting memory errors.
11
12 Copyright (C) 2000-2002 Julian Seward
13 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
33
34#include "mc_common.h"
35
36/*------------------------------------------------------------*/
37/*--- Defns ---*/
38/*------------------------------------------------------------*/
39
40/* These many bytes below %ESP are considered addressible if we're
41 doing the --workaround-gcc296-bugs hack. */
42#define VG_GCC296_BUG_STACK_SLOP 1024
43
44/*------------------------------------------------------------*/
45/*--- Command line options ---*/
46/*------------------------------------------------------------*/
47
48Bool MC_(clo_partial_loads_ok) = True;
49Int MC_(clo_freelist_vol) = 1000000;
50Bool MC_(clo_leak_check) = False;
51VgRes MC_(clo_leak_resolution) = Vg_LowRes;
52Bool MC_(clo_show_reachable) = False;
53Bool MC_(clo_workaround_gcc296_bugs) = False;
54Bool MC_(clo_cleanup) = True;
njn5c004e42002-11-18 11:04:50 +000055Bool MC_(clo_avoid_strlen_errors) = True;
56
57Bool MC_(process_common_cmd_line_option)(Char* arg)
58{
59# define STREQ(s1,s2) (0==VG_(strcmp_ws)((s1),(s2)))
60# define STREQN(nn,s1,s2) (0==VG_(strncmp_ws)((s1),(s2),(nn)))
61
62 if (STREQ(arg, "--partial-loads-ok=yes"))
63 MC_(clo_partial_loads_ok) = True;
64 else if (STREQ(arg, "--partial-loads-ok=no"))
65 MC_(clo_partial_loads_ok) = False;
66
67 else if (STREQN(15, arg, "--freelist-vol=")) {
68 MC_(clo_freelist_vol) = (Int)VG_(atoll)(&arg[15]);
69 if (MC_(clo_freelist_vol) < 0) MC_(clo_freelist_vol) = 0;
70 }
71
72 else if (STREQ(arg, "--leak-check=yes"))
73 MC_(clo_leak_check) = True;
74 else if (STREQ(arg, "--leak-check=no"))
75 MC_(clo_leak_check) = False;
76
77 else if (STREQ(arg, "--leak-resolution=low"))
78 MC_(clo_leak_resolution) = Vg_LowRes;
79 else if (STREQ(arg, "--leak-resolution=med"))
80 MC_(clo_leak_resolution) = Vg_MedRes;
81 else if (STREQ(arg, "--leak-resolution=high"))
82 MC_(clo_leak_resolution) = Vg_HighRes;
83
84 else if (STREQ(arg, "--show-reachable=yes"))
85 MC_(clo_show_reachable) = True;
86 else if (STREQ(arg, "--show-reachable=no"))
87 MC_(clo_show_reachable) = False;
88
89 else if (STREQ(arg, "--workaround-gcc296-bugs=yes"))
90 MC_(clo_workaround_gcc296_bugs) = True;
91 else if (STREQ(arg, "--workaround-gcc296-bugs=no"))
92 MC_(clo_workaround_gcc296_bugs) = False;
93
94 else if (STREQ(arg, "--cleanup=yes"))
95 MC_(clo_cleanup) = True;
96 else if (STREQ(arg, "--cleanup=no"))
97 MC_(clo_cleanup) = False;
98
99 else
100 return False;
101
102 return True;
103
104#undef STREQ
105#undef STREQN
106}
107
108/*------------------------------------------------------------*/
109/*--- Comparing and printing errors ---*/
110/*------------------------------------------------------------*/
111
112static __inline__
113void clear_AddrInfo ( AddrInfo* ai )
114{
115 ai->akind = Unknown;
116 ai->blksize = 0;
117 ai->rwoffset = 0;
118 ai->lastchange = NULL;
119 ai->stack_tid = VG_INVALID_THREADID;
120 ai->maybe_gcc = False;
121}
122
123void MC_(clear_MemCheckError) ( MemCheckError* err_extra )
124{
125 err_extra->axskind = ReadAxs;
126 err_extra->size = 0;
127 clear_AddrInfo ( &err_extra->addrinfo );
128 err_extra->isWrite = False;
129}
130
131__attribute__ ((unused))
132static Bool eq_AddrInfo ( VgRes res, AddrInfo* ai1, AddrInfo* ai2 )
133{
134 if (ai1->akind != Undescribed
135 && ai2->akind != Undescribed
136 && ai1->akind != ai2->akind)
137 return False;
138 if (ai1->akind == Freed || ai1->akind == Mallocd) {
139 if (ai1->blksize != ai2->blksize)
140 return False;
141 if (!VG_(eq_ExeContext)(res, ai1->lastchange, ai2->lastchange))
142 return False;
143 }
144 return True;
145}
146
147/* Compare error contexts, to detect duplicates. Note that if they
148 are otherwise the same, the faulting addrs and associated rwoffsets
149 are allowed to be different. */
150
151Bool SK_(eq_SkinError) ( VgRes res, Error* e1, Error* e2 )
152{
153 MemCheckError* e1_extra = VG_(get_error_extra)(e1);
154 MemCheckError* e2_extra = VG_(get_error_extra)(e2);
njn7cc53a82002-11-19 16:19:32 +0000155
156 /* Guaranteed by calling function */
157 sk_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
njn5c004e42002-11-18 11:04:50 +0000158
159 switch (VG_(get_error_kind)(e1)) {
160 case CoreMemErr: {
161 Char *e1s, *e2s;
162 if (e1_extra->isWrite != e2_extra->isWrite) return False;
njn5c004e42002-11-18 11:04:50 +0000163 e1s = VG_(get_error_string)(e1);
164 e2s = VG_(get_error_string)(e2);
165 if (e1s == e2s) return True;
166 if (0 == VG_(strcmp)(e1s, e2s)) return True;
167 return False;
168 }
169
170 case UserErr:
171 case ParamErr:
172 if (e1_extra->isWrite != e2_extra->isWrite) return False;
173 if (VG_(get_error_kind)(e1) == ParamErr
174 && 0 != VG_(strcmp)(VG_(get_error_string)(e1),
175 VG_(get_error_string)(e2))) return False;
176 return True;
177
178 case FreeErr:
179 case FreeMismatchErr:
180 /* JRS 2002-Aug-26: comparing addrs seems overkill and can
181 cause excessive duplication of errors. Not even AddrErr
182 below does that. So don't compare either the .addr field
183 or the .addrinfo fields. */
184 /* if (e1->addr != e2->addr) return False; */
185 /* if (!eq_AddrInfo(res, &e1_extra->addrinfo, &e2_extra->addrinfo))
186 return False;
187 */
188 return True;
189
190 case AddrErr:
191 /* if (e1_extra->axskind != e2_extra->axskind) return False; */
192 if (e1_extra->size != e2_extra->size) return False;
193 /*
194 if (!eq_AddrInfo(res, &e1_extra->addrinfo, &e2_extra->addrinfo))
195 return False;
196 */
197 return True;
198
199 case ValueErr:
200 if (e1_extra->size != e2_extra->size) return False;
201 return True;
202
203 default:
204 VG_(printf)("Error:\n unknown error code %d\n",
205 VG_(get_error_kind)(e1));
206 VG_(skin_panic)("unknown error code in SK_(eq_SkinError)");
207 }
208}
209
210void MC_(pp_AddrInfo) ( Addr a, AddrInfo* ai )
211{
212 switch (ai->akind) {
213 case Stack:
214 VG_(message)(Vg_UserMsg,
215 " Address 0x%x is on thread %d's stack",
216 a, ai->stack_tid);
217 break;
218 case Unknown:
219 if (ai->maybe_gcc) {
220 VG_(message)(Vg_UserMsg,
221 " Address 0x%x is just below %%esp. Possibly a bug in GCC/G++",
222 a);
223 VG_(message)(Vg_UserMsg,
224 " v 2.96 or 3.0.X. To suppress, use: --workaround-gcc296-bugs=yes");
225 } else {
226 VG_(message)(Vg_UserMsg,
227 " Address 0x%x is not stack'd, malloc'd or free'd", a);
228 }
229 break;
230 case Freed: case Mallocd: case UserG: case UserS: {
231 UInt delta;
232 UChar* relative;
233 if (ai->rwoffset < 0) {
234 delta = (UInt)(- ai->rwoffset);
235 relative = "before";
236 } else if (ai->rwoffset >= ai->blksize) {
237 delta = ai->rwoffset - ai->blksize;
238 relative = "after";
239 } else {
240 delta = ai->rwoffset;
241 relative = "inside";
242 }
243 if (ai->akind == UserS) {
244 VG_(message)(Vg_UserMsg,
245 " Address 0x%x is %d bytes %s a %d-byte stack red-zone created",
246 a, delta, relative,
247 ai->blksize );
248 } else {
249 VG_(message)(Vg_UserMsg,
250 " Address 0x%x is %d bytes %s a block of size %d %s",
251 a, delta, relative,
252 ai->blksize,
253 ai->akind==Mallocd ? "alloc'd"
254 : ai->akind==Freed ? "free'd"
255 : "client-defined");
256 }
257 VG_(pp_ExeContext)(ai->lastchange);
258 break;
259 }
260 default:
261 VG_(skin_panic)("MC_(pp_AddrInfo)");
262 }
263}
264
265/*------------------------------------------------------------*/
266/*--- Recording errors ---*/
267/*------------------------------------------------------------*/
268
269/* Is this address within some small distance below %ESP? Used only
270 for the --workaround-gcc296-bugs kludge. */
271static Bool is_just_below_ESP( Addr esp, Addr aa )
272{
273 if ((UInt)esp > (UInt)aa
274 && ((UInt)esp - (UInt)aa) <= VG_GCC296_BUG_STACK_SLOP)
275 return True;
276 else
277 return False;
278}
279
280/* This one called from generated code. */
281
282void MC_(record_address_error) ( Addr a, Int size, Bool isWrite )
283{
284 MemCheckError err_extra;
285 Bool just_below_esp;
286
287 just_below_esp = is_just_below_ESP( VG_(get_stack_pointer)(), a );
288
289 /* If this is caused by an access immediately below %ESP, and the
290 user asks nicely, we just ignore it. */
291 if (MC_(clo_workaround_gcc296_bugs) && just_below_esp)
292 return;
293
294 MC_(clear_MemCheckError)( &err_extra );
295 err_extra.axskind = isWrite ? WriteAxs : ReadAxs;
296 err_extra.size = size;
297 err_extra.addrinfo.akind = Undescribed;
298 err_extra.addrinfo.maybe_gcc = just_below_esp;
299 VG_(maybe_record_error)( NULL, AddrErr, a, /*s*/NULL, &err_extra );
300}
301
302/* These ones are called from non-generated code */
303
304/* This is for memory errors in pthread functions, as opposed to pthread API
305 errors which are found by the core. */
306void MC_(record_core_mem_error) ( ThreadState* tst, Bool isWrite, Char* msg )
307{
308 MemCheckError err_extra;
309
310 MC_(clear_MemCheckError)( &err_extra );
311 err_extra.isWrite = isWrite;
312 VG_(maybe_record_error)( tst, CoreMemErr, /*addr*/0, msg, &err_extra );
313}
314
315void MC_(record_param_error) ( ThreadState* tst, Addr a, Bool isWrite,
316 Char* msg )
317{
318 MemCheckError err_extra;
319
320 sk_assert(NULL != tst);
321 MC_(clear_MemCheckError)( &err_extra );
322 err_extra.addrinfo.akind = Undescribed;
323 err_extra.isWrite = isWrite;
324 VG_(maybe_record_error)( tst, ParamErr, a, msg, &err_extra );
325}
326
327void MC_(record_jump_error) ( ThreadState* tst, Addr a )
328{
329 MemCheckError err_extra;
330
331 sk_assert(NULL != tst);
332
333 MC_(clear_MemCheckError)( &err_extra );
334 err_extra.axskind = ExecAxs;
335 err_extra.addrinfo.akind = Undescribed;
336 VG_(maybe_record_error)( tst, AddrErr, a, /*s*/NULL, &err_extra );
337}
338
339void MC_(record_free_error) ( ThreadState* tst, Addr a )
340{
341 MemCheckError err_extra;
342
343 sk_assert(NULL != tst);
344
345 MC_(clear_MemCheckError)( &err_extra );
346 err_extra.addrinfo.akind = Undescribed;
347 VG_(maybe_record_error)( tst, FreeErr, a, /*s*/NULL, &err_extra );
348}
349
350void MC_(record_freemismatch_error) ( ThreadState* tst, Addr a )
351{
352 MemCheckError err_extra;
353
354 sk_assert(NULL != tst);
355
356 MC_(clear_MemCheckError)( &err_extra );
357 err_extra.addrinfo.akind = Undescribed;
358 VG_(maybe_record_error)( tst, FreeMismatchErr, a, /*s*/NULL, &err_extra );
359}
360
361/*------------------------------------------------------------*/
362/*--- Suppressions ---*/
363/*------------------------------------------------------------*/
364
365Bool SK_(read_extra_suppression_info) ( Int fd, Char* buf, Int nBuf, Supp *su )
366{
367 Bool eof;
368
369 if (VG_(get_supp_kind)(su) == ParamSupp) {
370 eof = VG_(get_line) ( fd, buf, nBuf );
371 if (eof) return False;
372 VG_(set_supp_string)(su, VG_(strdup)(buf));
373 }
374 return True;
375}
376
377#define STREQ(s1,s2) (s1 != NULL && s2 != NULL \
378 && VG_(strcmp)((s1),(s2))==0)
379
sewardj99aac972002-12-26 01:53:45 +0000380Bool SK_(error_matches_suppression)(Error* err, Supp* su)
njn5c004e42002-11-18 11:04:50 +0000381{
382 UInt su_size;
383 MemCheckError* err_extra = VG_(get_error_extra)(err);
384 ErrorKind ekind = VG_(get_error_kind )(err);
385
386 switch (VG_(get_supp_kind)(su)) {
387 case ParamSupp:
388 return (ekind == ParamErr
389 && STREQ(VG_(get_error_string)(err), VG_(get_supp_string)(su)));
390
391 case CoreMemSupp:
392 return (ekind == CoreMemErr
393 && STREQ(VG_(get_error_string)(err), VG_(get_supp_string)(su)));
394
395 case Value0Supp: su_size = 0; goto value_case;
396 case Value1Supp: su_size = 1; goto value_case;
397 case Value2Supp: su_size = 2; goto value_case;
398 case Value4Supp: su_size = 4; goto value_case;
399 case Value8Supp: su_size = 8; goto value_case;
400 value_case:
401 return (ekind == ValueErr && err_extra->size == su_size);
402
403 case Addr1Supp: su_size = 1; goto addr_case;
404 case Addr2Supp: su_size = 2; goto addr_case;
405 case Addr4Supp: su_size = 4; goto addr_case;
406 case Addr8Supp: su_size = 8; goto addr_case;
407 addr_case:
408 return (ekind == AddrErr && err_extra->size == su_size);
409
410 case FreeSupp:
411 return (ekind == FreeErr || ekind == FreeMismatchErr);
412
413 default:
414 VG_(printf)("Error:\n"
415 " unknown suppression type %d\n",
416 VG_(get_supp_kind)(su));
417 VG_(skin_panic)("unknown suppression type in "
418 "SK_(error_matches_suppression)");
419 }
420}
421
422# undef STREQ
423
424/*------------------------------------------------------------*/
425/*--- Crude profiling machinery. ---*/
426/*------------------------------------------------------------*/
427
428/* Event index. If just the name of the fn is given, this means the
429 number of calls to the fn. Otherwise it is the specified event.
430 Ones marked 'M' are MemCheck only. Ones marked 'A' are AddrCheck only.
431 The rest are shared.
432
433 10 alloc_secondary_map
434
435 20 get_abit
436M 21 get_vbyte
437 22 set_abit
438M 23 set_vbyte
439 24 get_abits4_ALIGNED
440M 25 get_vbytes4_ALIGNED
441
442 30 set_address_range_perms
443 31 set_address_range_perms(lower byte loop)
444 32 set_address_range_perms(quadword loop)
445 33 set_address_range_perms(upper byte loop)
446
447 35 make_noaccess
448 36 make_writable
449 37 make_readable
450A 38 make_accessible
451
452 40 copy_address_range_state
453 41 copy_address_range_state(byte loop)
454 42 check_writable
455 43 check_writable(byte loop)
456 44 check_readable
457 45 check_readable(byte loop)
458 46 check_readable_asciiz
459 47 check_readable_asciiz(byte loop)
460A 48 check_accessible
461A 49 check_accessible(byte loop)
462
463 50 make_noaccess_aligned
464 51 make_writable_aligned
465
466M 60 helperc_LOADV4
467M 61 helperc_STOREV4
468M 62 helperc_LOADV2
469M 63 helperc_STOREV2
470M 64 helperc_LOADV1
471M 65 helperc_STOREV1
472
473A 66 helperc_ACCESS4
474A 67 helperc_ACCESS2
475A 68 helperc_ACCESS1
476
477M 70 rim_rd_V4_SLOWLY
478M 71 rim_wr_V4_SLOWLY
479M 72 rim_rd_V2_SLOWLY
480M 73 rim_wr_V2_SLOWLY
481M 74 rim_rd_V1_SLOWLY
482M 75 rim_wr_V1_SLOWLY
483
484A 76 ACCESS4_SLOWLY
485A 77 ACCESS2_SLOWLY
486A 78 ACCESS1_SLOWLY
487
488 80 fpu_read
489 81 fpu_read aligned 4
490 82 fpu_read aligned 8
491 83 fpu_read 2
492 84 fpu_read 10/28/108
493
494M 85 fpu_write
495M 86 fpu_write aligned 4
496M 87 fpu_write aligned 8
497M 88 fpu_write 2
498M 89 fpu_write 10/28/108
499
500 90 fpu_access
501 91 fpu_access aligned 4
502 92 fpu_access aligned 8
503 93 fpu_access 2
504 94 fpu_access 10/28/108
505
506 100 fpu_access_check_SLOWLY
507 101 fpu_access_check_SLOWLY(byte loop)
508*/
509
510#ifdef VG_PROFILE_MEMORY
511
512#define N_PROF_EVENTS 150
513
514extern UInt MC_(event_ctr)[N_PROF_EVENTS];
515
516void MC_(init_prof_mem) ( void )
517{
518 Int i;
519 for (i = 0; i < N_PROF_EVENTS; i++)
520 MC_(event_ctr)[i] = 0;
521}
522
523void MC_(done_prof_mem) ( void )
524{
525 Int i;
526 for (i = 0; i < N_PROF_EVENTS; i++) {
527 if ((i % 10) == 0)
528 VG_(printf)("\n");
529 if (MC_(event_ctr)[i] > 0)
530 VG_(printf)( "prof mem event %2d: %d\n", i, MC_(event_ctr)[i] );
531 }
532 VG_(printf)("\n");
533}
534
535#else
536
537void MC_(init_prof_mem) ( void ) { }
538void MC_(done_prof_mem) ( void ) { }
539
540#define PROF_EVENT(ev) /* */
541
542#endif
543
544/*------------------------------------------------------------*/
545/*--- Shadow chunks info ---*/
546/*------------------------------------------------------------*/
547
548void MC_(set_where)( ShadowChunk* sc, ExeContext* ec )
549{
550 VG_(set_sc_extra)( sc, 0, (UInt)ec );
551}
552
553ExeContext *MC_(get_where)( ShadowChunk* sc )
554{
555 return (ExeContext*)VG_(get_sc_extra)(sc, 0);
556}
557
558void SK_(complete_shadow_chunk) ( ShadowChunk* sc, ThreadState* tst )
559{
560 VG_(set_sc_extra) ( sc, 0, (UInt)VG_(get_ExeContext)(tst) );
561}
562
563
564/*------------------------------------------------------------*/
565/*--- Postponing free()ing ---*/
566/*------------------------------------------------------------*/
567
568/* Holds blocks after freeing. */
569static ShadowChunk* freed_list_start = NULL;
570static ShadowChunk* freed_list_end = NULL;
571static Int freed_list_volume = 0;
572
573__attribute__ ((unused))
574Int MC_(count_freelist) ( void )
575{
576 ShadowChunk* sc;
577 Int n = 0;
578 for (sc = freed_list_start; sc != NULL; sc = VG_(get_sc_next)(sc))
579 n++;
580 return n;
581}
582
583__attribute__ ((unused))
584void MC_(freelist_sanity) ( void )
585{
586 ShadowChunk* sc;
587 Int n = 0;
588 /* VG_(printf)("freelist sanity\n"); */
589 for (sc = freed_list_start; sc != NULL; sc = VG_(get_sc_next)(sc))
590 n += VG_(get_sc_size)(sc);
591 sk_assert(n == freed_list_volume);
592}
593
594/* Put a shadow chunk on the freed blocks queue, possibly freeing up
595 some of the oldest blocks in the queue at the same time. */
596static void add_to_freed_queue ( ShadowChunk* sc )
597{
598 ShadowChunk* sc1;
599
600 /* Put it at the end of the freed list */
601 if (freed_list_end == NULL) {
602 sk_assert(freed_list_start == NULL);
603 freed_list_end = freed_list_start = sc;
604 freed_list_volume = VG_(get_sc_size)(sc);
605 } else {
606 sk_assert(VG_(get_sc_next)(freed_list_end) == NULL);
607 VG_(set_sc_next)(freed_list_end, sc);
608 freed_list_end = sc;
609 freed_list_volume += VG_(get_sc_size)(sc);
610 }
611 VG_(set_sc_next)(sc, NULL);
612
613 /* Release enough of the oldest blocks to bring the free queue
614 volume below vg_clo_freelist_vol. */
615
616 while (freed_list_volume > MC_(clo_freelist_vol)) {
617 /* freelist_sanity(); */
618 sk_assert(freed_list_start != NULL);
619 sk_assert(freed_list_end != NULL);
620
621 sc1 = freed_list_start;
622 freed_list_volume -= VG_(get_sc_size)(sc1);
623 /* VG_(printf)("volume now %d\n", freed_list_volume); */
624 sk_assert(freed_list_volume >= 0);
625
626 if (freed_list_start == freed_list_end) {
627 freed_list_start = freed_list_end = NULL;
628 } else {
629 freed_list_start = VG_(get_sc_next)(sc1);
630 }
631 VG_(set_sc_next)(sc1, NULL); /* just paranoia */
632 VG_(free_ShadowChunk) ( sc1 );
633 }
634}
635
636/* Return the first shadow chunk satisfying the predicate p. */
637ShadowChunk* MC_(any_matching_freed_ShadowChunks) ( Bool (*p)(ShadowChunk*) )
638{
639 ShadowChunk* sc;
640
641 /* No point looking through freed blocks if we're not keeping
642 them around for a while... */
643 for (sc = freed_list_start; sc != NULL; sc = VG_(get_sc_next)(sc))
644 if (p(sc))
645 return sc;
646
647 return NULL;
648}
649
650void SK_(alt_free) ( ShadowChunk* sc, ThreadState* tst )
651{
652 /* Record where freed */
653 MC_(set_where)( sc, VG_(get_ExeContext) ( tst ) );
654
655 /* Put it out of harm's way for a while. */
656 add_to_freed_queue ( sc );
657}
658
659/*------------------------------------------------------------*/
660/*--- Syscall wrappers ---*/
661/*------------------------------------------------------------*/
662
663void* SK_(pre_syscall) ( ThreadId tid, UInt syscallno, Bool isBlocking )
664{
665 Int sane = SK_(cheap_sanity_check)();
666 return (void*)sane;
667}
668
669void SK_(post_syscall) ( ThreadId tid, UInt syscallno,
670 void* pre_result, Int res, Bool isBlocking )
671{
672 Int sane_before_call = (Int)pre_result;
673 Bool sane_after_call = SK_(cheap_sanity_check)();
674
675 if ((Int)sane_before_call && (!sane_after_call)) {
676 VG_(message)(Vg_DebugMsg, "post-syscall: ");
677 VG_(message)(Vg_DebugMsg,
678 "probable sanity check failure for syscall number %d\n",
679 syscallno );
680 VG_(skin_panic)("aborting due to the above ... bye!");
681 }
682}
683
sewardj99aac972002-12-26 01:53:45 +0000684
njn5c004e42002-11-18 11:04:50 +0000685/*--------------------------------------------------------------------*/
686/*--- end mc_common.c ---*/
687/*--------------------------------------------------------------------*/