blob: e08097631bd30881296b5e5b01d5e197cf2f6774 [file] [log] [blame]
njn5c004e42002-11-18 11:04:50 +00001
2/*--------------------------------------------------------------------*/
3/*--- Code that is shared between MemCheck and AddrCheck. ---*/
4/*--- mc_common.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of MemCheck, a heavyweight Valgrind skin for
9 detecting memory errors, and AddrCheck, a lightweight Valgrind skin
10 for detecting memory errors.
11
12 Copyright (C) 2000-2002 Julian Seward
13 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
33
34#include "mc_common.h"
35
36/*------------------------------------------------------------*/
37/*--- Defns ---*/
38/*------------------------------------------------------------*/
39
40/* These many bytes below %ESP are considered addressible if we're
41 doing the --workaround-gcc296-bugs hack. */
42#define VG_GCC296_BUG_STACK_SLOP 1024
43
44/*------------------------------------------------------------*/
45/*--- Command line options ---*/
46/*------------------------------------------------------------*/
47
48Bool MC_(clo_partial_loads_ok) = True;
49Int MC_(clo_freelist_vol) = 1000000;
50Bool MC_(clo_leak_check) = False;
51VgRes MC_(clo_leak_resolution) = Vg_LowRes;
52Bool MC_(clo_show_reachable) = False;
53Bool MC_(clo_workaround_gcc296_bugs) = False;
54Bool MC_(clo_cleanup) = True;
55Bool MC_(clo_check_addrVs) = True;
56Bool MC_(clo_avoid_strlen_errors) = True;
57
58Bool MC_(process_common_cmd_line_option)(Char* arg)
59{
60# define STREQ(s1,s2) (0==VG_(strcmp_ws)((s1),(s2)))
61# define STREQN(nn,s1,s2) (0==VG_(strncmp_ws)((s1),(s2),(nn)))
62
63 if (STREQ(arg, "--partial-loads-ok=yes"))
64 MC_(clo_partial_loads_ok) = True;
65 else if (STREQ(arg, "--partial-loads-ok=no"))
66 MC_(clo_partial_loads_ok) = False;
67
68 else if (STREQN(15, arg, "--freelist-vol=")) {
69 MC_(clo_freelist_vol) = (Int)VG_(atoll)(&arg[15]);
70 if (MC_(clo_freelist_vol) < 0) MC_(clo_freelist_vol) = 0;
71 }
72
73 else if (STREQ(arg, "--leak-check=yes"))
74 MC_(clo_leak_check) = True;
75 else if (STREQ(arg, "--leak-check=no"))
76 MC_(clo_leak_check) = False;
77
78 else if (STREQ(arg, "--leak-resolution=low"))
79 MC_(clo_leak_resolution) = Vg_LowRes;
80 else if (STREQ(arg, "--leak-resolution=med"))
81 MC_(clo_leak_resolution) = Vg_MedRes;
82 else if (STREQ(arg, "--leak-resolution=high"))
83 MC_(clo_leak_resolution) = Vg_HighRes;
84
85 else if (STREQ(arg, "--show-reachable=yes"))
86 MC_(clo_show_reachable) = True;
87 else if (STREQ(arg, "--show-reachable=no"))
88 MC_(clo_show_reachable) = False;
89
90 else if (STREQ(arg, "--workaround-gcc296-bugs=yes"))
91 MC_(clo_workaround_gcc296_bugs) = True;
92 else if (STREQ(arg, "--workaround-gcc296-bugs=no"))
93 MC_(clo_workaround_gcc296_bugs) = False;
94
95 else if (STREQ(arg, "--cleanup=yes"))
96 MC_(clo_cleanup) = True;
97 else if (STREQ(arg, "--cleanup=no"))
98 MC_(clo_cleanup) = False;
99
100 else
101 return False;
102
103 return True;
104
105#undef STREQ
106#undef STREQN
107}
108
109/*------------------------------------------------------------*/
110/*--- Comparing and printing errors ---*/
111/*------------------------------------------------------------*/
112
113static __inline__
114void clear_AddrInfo ( AddrInfo* ai )
115{
116 ai->akind = Unknown;
117 ai->blksize = 0;
118 ai->rwoffset = 0;
119 ai->lastchange = NULL;
120 ai->stack_tid = VG_INVALID_THREADID;
121 ai->maybe_gcc = False;
122}
123
124void MC_(clear_MemCheckError) ( MemCheckError* err_extra )
125{
126 err_extra->axskind = ReadAxs;
127 err_extra->size = 0;
128 clear_AddrInfo ( &err_extra->addrinfo );
129 err_extra->isWrite = False;
130}
131
132__attribute__ ((unused))
133static Bool eq_AddrInfo ( VgRes res, AddrInfo* ai1, AddrInfo* ai2 )
134{
135 if (ai1->akind != Undescribed
136 && ai2->akind != Undescribed
137 && ai1->akind != ai2->akind)
138 return False;
139 if (ai1->akind == Freed || ai1->akind == Mallocd) {
140 if (ai1->blksize != ai2->blksize)
141 return False;
142 if (!VG_(eq_ExeContext)(res, ai1->lastchange, ai2->lastchange))
143 return False;
144 }
145 return True;
146}
147
148/* Compare error contexts, to detect duplicates. Note that if they
149 are otherwise the same, the faulting addrs and associated rwoffsets
150 are allowed to be different. */
151
152Bool SK_(eq_SkinError) ( VgRes res, Error* e1, Error* e2 )
153{
154 MemCheckError* e1_extra = VG_(get_error_extra)(e1);
155 MemCheckError* e2_extra = VG_(get_error_extra)(e2);
njn7cc53a82002-11-19 16:19:32 +0000156
157 /* Guaranteed by calling function */
158 sk_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
njn5c004e42002-11-18 11:04:50 +0000159
160 switch (VG_(get_error_kind)(e1)) {
161 case CoreMemErr: {
162 Char *e1s, *e2s;
163 if (e1_extra->isWrite != e2_extra->isWrite) return False;
njn5c004e42002-11-18 11:04:50 +0000164 e1s = VG_(get_error_string)(e1);
165 e2s = VG_(get_error_string)(e2);
166 if (e1s == e2s) return True;
167 if (0 == VG_(strcmp)(e1s, e2s)) return True;
168 return False;
169 }
170
171 case UserErr:
172 case ParamErr:
173 if (e1_extra->isWrite != e2_extra->isWrite) return False;
174 if (VG_(get_error_kind)(e1) == ParamErr
175 && 0 != VG_(strcmp)(VG_(get_error_string)(e1),
176 VG_(get_error_string)(e2))) return False;
177 return True;
178
179 case FreeErr:
180 case FreeMismatchErr:
181 /* JRS 2002-Aug-26: comparing addrs seems overkill and can
182 cause excessive duplication of errors. Not even AddrErr
183 below does that. So don't compare either the .addr field
184 or the .addrinfo fields. */
185 /* if (e1->addr != e2->addr) return False; */
186 /* if (!eq_AddrInfo(res, &e1_extra->addrinfo, &e2_extra->addrinfo))
187 return False;
188 */
189 return True;
190
191 case AddrErr:
192 /* if (e1_extra->axskind != e2_extra->axskind) return False; */
193 if (e1_extra->size != e2_extra->size) return False;
194 /*
195 if (!eq_AddrInfo(res, &e1_extra->addrinfo, &e2_extra->addrinfo))
196 return False;
197 */
198 return True;
199
200 case ValueErr:
201 if (e1_extra->size != e2_extra->size) return False;
202 return True;
203
204 default:
205 VG_(printf)("Error:\n unknown error code %d\n",
206 VG_(get_error_kind)(e1));
207 VG_(skin_panic)("unknown error code in SK_(eq_SkinError)");
208 }
209}
210
211void MC_(pp_AddrInfo) ( Addr a, AddrInfo* ai )
212{
213 switch (ai->akind) {
214 case Stack:
215 VG_(message)(Vg_UserMsg,
216 " Address 0x%x is on thread %d's stack",
217 a, ai->stack_tid);
218 break;
219 case Unknown:
220 if (ai->maybe_gcc) {
221 VG_(message)(Vg_UserMsg,
222 " Address 0x%x is just below %%esp. Possibly a bug in GCC/G++",
223 a);
224 VG_(message)(Vg_UserMsg,
225 " v 2.96 or 3.0.X. To suppress, use: --workaround-gcc296-bugs=yes");
226 } else {
227 VG_(message)(Vg_UserMsg,
228 " Address 0x%x is not stack'd, malloc'd or free'd", a);
229 }
230 break;
231 case Freed: case Mallocd: case UserG: case UserS: {
232 UInt delta;
233 UChar* relative;
234 if (ai->rwoffset < 0) {
235 delta = (UInt)(- ai->rwoffset);
236 relative = "before";
237 } else if (ai->rwoffset >= ai->blksize) {
238 delta = ai->rwoffset - ai->blksize;
239 relative = "after";
240 } else {
241 delta = ai->rwoffset;
242 relative = "inside";
243 }
244 if (ai->akind == UserS) {
245 VG_(message)(Vg_UserMsg,
246 " Address 0x%x is %d bytes %s a %d-byte stack red-zone created",
247 a, delta, relative,
248 ai->blksize );
249 } else {
250 VG_(message)(Vg_UserMsg,
251 " Address 0x%x is %d bytes %s a block of size %d %s",
252 a, delta, relative,
253 ai->blksize,
254 ai->akind==Mallocd ? "alloc'd"
255 : ai->akind==Freed ? "free'd"
256 : "client-defined");
257 }
258 VG_(pp_ExeContext)(ai->lastchange);
259 break;
260 }
261 default:
262 VG_(skin_panic)("MC_(pp_AddrInfo)");
263 }
264}
265
266/*------------------------------------------------------------*/
267/*--- Recording errors ---*/
268/*------------------------------------------------------------*/
269
270/* Is this address within some small distance below %ESP? Used only
271 for the --workaround-gcc296-bugs kludge. */
272static Bool is_just_below_ESP( Addr esp, Addr aa )
273{
274 if ((UInt)esp > (UInt)aa
275 && ((UInt)esp - (UInt)aa) <= VG_GCC296_BUG_STACK_SLOP)
276 return True;
277 else
278 return False;
279}
280
281/* This one called from generated code. */
282
283void MC_(record_address_error) ( Addr a, Int size, Bool isWrite )
284{
285 MemCheckError err_extra;
286 Bool just_below_esp;
287
288 just_below_esp = is_just_below_ESP( VG_(get_stack_pointer)(), a );
289
290 /* If this is caused by an access immediately below %ESP, and the
291 user asks nicely, we just ignore it. */
292 if (MC_(clo_workaround_gcc296_bugs) && just_below_esp)
293 return;
294
295 MC_(clear_MemCheckError)( &err_extra );
296 err_extra.axskind = isWrite ? WriteAxs : ReadAxs;
297 err_extra.size = size;
298 err_extra.addrinfo.akind = Undescribed;
299 err_extra.addrinfo.maybe_gcc = just_below_esp;
300 VG_(maybe_record_error)( NULL, AddrErr, a, /*s*/NULL, &err_extra );
301}
302
303/* These ones are called from non-generated code */
304
305/* This is for memory errors in pthread functions, as opposed to pthread API
306 errors which are found by the core. */
307void MC_(record_core_mem_error) ( ThreadState* tst, Bool isWrite, Char* msg )
308{
309 MemCheckError err_extra;
310
311 MC_(clear_MemCheckError)( &err_extra );
312 err_extra.isWrite = isWrite;
313 VG_(maybe_record_error)( tst, CoreMemErr, /*addr*/0, msg, &err_extra );
314}
315
316void MC_(record_param_error) ( ThreadState* tst, Addr a, Bool isWrite,
317 Char* msg )
318{
319 MemCheckError err_extra;
320
321 sk_assert(NULL != tst);
322 MC_(clear_MemCheckError)( &err_extra );
323 err_extra.addrinfo.akind = Undescribed;
324 err_extra.isWrite = isWrite;
325 VG_(maybe_record_error)( tst, ParamErr, a, msg, &err_extra );
326}
327
328void MC_(record_jump_error) ( ThreadState* tst, Addr a )
329{
330 MemCheckError err_extra;
331
332 sk_assert(NULL != tst);
333
334 MC_(clear_MemCheckError)( &err_extra );
335 err_extra.axskind = ExecAxs;
336 err_extra.addrinfo.akind = Undescribed;
337 VG_(maybe_record_error)( tst, AddrErr, a, /*s*/NULL, &err_extra );
338}
339
340void MC_(record_free_error) ( ThreadState* tst, Addr a )
341{
342 MemCheckError err_extra;
343
344 sk_assert(NULL != tst);
345
346 MC_(clear_MemCheckError)( &err_extra );
347 err_extra.addrinfo.akind = Undescribed;
348 VG_(maybe_record_error)( tst, FreeErr, a, /*s*/NULL, &err_extra );
349}
350
351void MC_(record_freemismatch_error) ( ThreadState* tst, Addr a )
352{
353 MemCheckError err_extra;
354
355 sk_assert(NULL != tst);
356
357 MC_(clear_MemCheckError)( &err_extra );
358 err_extra.addrinfo.akind = Undescribed;
359 VG_(maybe_record_error)( tst, FreeMismatchErr, a, /*s*/NULL, &err_extra );
360}
361
362/*------------------------------------------------------------*/
363/*--- Suppressions ---*/
364/*------------------------------------------------------------*/
365
366Bool SK_(read_extra_suppression_info) ( Int fd, Char* buf, Int nBuf, Supp *su )
367{
368 Bool eof;
369
370 if (VG_(get_supp_kind)(su) == ParamSupp) {
371 eof = VG_(get_line) ( fd, buf, nBuf );
372 if (eof) return False;
373 VG_(set_supp_string)(su, VG_(strdup)(buf));
374 }
375 return True;
376}
377
378#define STREQ(s1,s2) (s1 != NULL && s2 != NULL \
379 && VG_(strcmp)((s1),(s2))==0)
380
381extern Bool SK_(error_matches_suppression)(Error* err, Supp* su)
382{
383 UInt su_size;
384 MemCheckError* err_extra = VG_(get_error_extra)(err);
385 ErrorKind ekind = VG_(get_error_kind )(err);
386
387 switch (VG_(get_supp_kind)(su)) {
388 case ParamSupp:
389 return (ekind == ParamErr
390 && STREQ(VG_(get_error_string)(err), VG_(get_supp_string)(su)));
391
392 case CoreMemSupp:
393 return (ekind == CoreMemErr
394 && STREQ(VG_(get_error_string)(err), VG_(get_supp_string)(su)));
395
396 case Value0Supp: su_size = 0; goto value_case;
397 case Value1Supp: su_size = 1; goto value_case;
398 case Value2Supp: su_size = 2; goto value_case;
399 case Value4Supp: su_size = 4; goto value_case;
400 case Value8Supp: su_size = 8; goto value_case;
401 value_case:
402 return (ekind == ValueErr && err_extra->size == su_size);
403
404 case Addr1Supp: su_size = 1; goto addr_case;
405 case Addr2Supp: su_size = 2; goto addr_case;
406 case Addr4Supp: su_size = 4; goto addr_case;
407 case Addr8Supp: su_size = 8; goto addr_case;
408 addr_case:
409 return (ekind == AddrErr && err_extra->size == su_size);
410
411 case FreeSupp:
412 return (ekind == FreeErr || ekind == FreeMismatchErr);
413
414 default:
415 VG_(printf)("Error:\n"
416 " unknown suppression type %d\n",
417 VG_(get_supp_kind)(su));
418 VG_(skin_panic)("unknown suppression type in "
419 "SK_(error_matches_suppression)");
420 }
421}
422
423# undef STREQ
424
425/*------------------------------------------------------------*/
426/*--- Crude profiling machinery. ---*/
427/*------------------------------------------------------------*/
428
429/* Event index. If just the name of the fn is given, this means the
430 number of calls to the fn. Otherwise it is the specified event.
431 Ones marked 'M' are MemCheck only. Ones marked 'A' are AddrCheck only.
432 The rest are shared.
433
434 10 alloc_secondary_map
435
436 20 get_abit
437M 21 get_vbyte
438 22 set_abit
439M 23 set_vbyte
440 24 get_abits4_ALIGNED
441M 25 get_vbytes4_ALIGNED
442
443 30 set_address_range_perms
444 31 set_address_range_perms(lower byte loop)
445 32 set_address_range_perms(quadword loop)
446 33 set_address_range_perms(upper byte loop)
447
448 35 make_noaccess
449 36 make_writable
450 37 make_readable
451A 38 make_accessible
452
453 40 copy_address_range_state
454 41 copy_address_range_state(byte loop)
455 42 check_writable
456 43 check_writable(byte loop)
457 44 check_readable
458 45 check_readable(byte loop)
459 46 check_readable_asciiz
460 47 check_readable_asciiz(byte loop)
461A 48 check_accessible
462A 49 check_accessible(byte loop)
463
464 50 make_noaccess_aligned
465 51 make_writable_aligned
466
467M 60 helperc_LOADV4
468M 61 helperc_STOREV4
469M 62 helperc_LOADV2
470M 63 helperc_STOREV2
471M 64 helperc_LOADV1
472M 65 helperc_STOREV1
473
474A 66 helperc_ACCESS4
475A 67 helperc_ACCESS2
476A 68 helperc_ACCESS1
477
478M 70 rim_rd_V4_SLOWLY
479M 71 rim_wr_V4_SLOWLY
480M 72 rim_rd_V2_SLOWLY
481M 73 rim_wr_V2_SLOWLY
482M 74 rim_rd_V1_SLOWLY
483M 75 rim_wr_V1_SLOWLY
484
485A 76 ACCESS4_SLOWLY
486A 77 ACCESS2_SLOWLY
487A 78 ACCESS1_SLOWLY
488
489 80 fpu_read
490 81 fpu_read aligned 4
491 82 fpu_read aligned 8
492 83 fpu_read 2
493 84 fpu_read 10/28/108
494
495M 85 fpu_write
496M 86 fpu_write aligned 4
497M 87 fpu_write aligned 8
498M 88 fpu_write 2
499M 89 fpu_write 10/28/108
500
501 90 fpu_access
502 91 fpu_access aligned 4
503 92 fpu_access aligned 8
504 93 fpu_access 2
505 94 fpu_access 10/28/108
506
507 100 fpu_access_check_SLOWLY
508 101 fpu_access_check_SLOWLY(byte loop)
509*/
510
511#ifdef VG_PROFILE_MEMORY
512
513#define N_PROF_EVENTS 150
514
515extern UInt MC_(event_ctr)[N_PROF_EVENTS];
516
517void MC_(init_prof_mem) ( void )
518{
519 Int i;
520 for (i = 0; i < N_PROF_EVENTS; i++)
521 MC_(event_ctr)[i] = 0;
522}
523
524void MC_(done_prof_mem) ( void )
525{
526 Int i;
527 for (i = 0; i < N_PROF_EVENTS; i++) {
528 if ((i % 10) == 0)
529 VG_(printf)("\n");
530 if (MC_(event_ctr)[i] > 0)
531 VG_(printf)( "prof mem event %2d: %d\n", i, MC_(event_ctr)[i] );
532 }
533 VG_(printf)("\n");
534}
535
536#else
537
538void MC_(init_prof_mem) ( void ) { }
539void MC_(done_prof_mem) ( void ) { }
540
541#define PROF_EVENT(ev) /* */
542
543#endif
544
545/*------------------------------------------------------------*/
546/*--- Shadow chunks info ---*/
547/*------------------------------------------------------------*/
548
549void MC_(set_where)( ShadowChunk* sc, ExeContext* ec )
550{
551 VG_(set_sc_extra)( sc, 0, (UInt)ec );
552}
553
554ExeContext *MC_(get_where)( ShadowChunk* sc )
555{
556 return (ExeContext*)VG_(get_sc_extra)(sc, 0);
557}
558
559void SK_(complete_shadow_chunk) ( ShadowChunk* sc, ThreadState* tst )
560{
561 VG_(set_sc_extra) ( sc, 0, (UInt)VG_(get_ExeContext)(tst) );
562}
563
564
565/*------------------------------------------------------------*/
566/*--- Postponing free()ing ---*/
567/*------------------------------------------------------------*/
568
569/* Holds blocks after freeing. */
570static ShadowChunk* freed_list_start = NULL;
571static ShadowChunk* freed_list_end = NULL;
572static Int freed_list_volume = 0;
573
574__attribute__ ((unused))
575Int MC_(count_freelist) ( void )
576{
577 ShadowChunk* sc;
578 Int n = 0;
579 for (sc = freed_list_start; sc != NULL; sc = VG_(get_sc_next)(sc))
580 n++;
581 return n;
582}
583
584__attribute__ ((unused))
585void MC_(freelist_sanity) ( void )
586{
587 ShadowChunk* sc;
588 Int n = 0;
589 /* VG_(printf)("freelist sanity\n"); */
590 for (sc = freed_list_start; sc != NULL; sc = VG_(get_sc_next)(sc))
591 n += VG_(get_sc_size)(sc);
592 sk_assert(n == freed_list_volume);
593}
594
595/* Put a shadow chunk on the freed blocks queue, possibly freeing up
596 some of the oldest blocks in the queue at the same time. */
597static void add_to_freed_queue ( ShadowChunk* sc )
598{
599 ShadowChunk* sc1;
600
601 /* Put it at the end of the freed list */
602 if (freed_list_end == NULL) {
603 sk_assert(freed_list_start == NULL);
604 freed_list_end = freed_list_start = sc;
605 freed_list_volume = VG_(get_sc_size)(sc);
606 } else {
607 sk_assert(VG_(get_sc_next)(freed_list_end) == NULL);
608 VG_(set_sc_next)(freed_list_end, sc);
609 freed_list_end = sc;
610 freed_list_volume += VG_(get_sc_size)(sc);
611 }
612 VG_(set_sc_next)(sc, NULL);
613
614 /* Release enough of the oldest blocks to bring the free queue
615 volume below vg_clo_freelist_vol. */
616
617 while (freed_list_volume > MC_(clo_freelist_vol)) {
618 /* freelist_sanity(); */
619 sk_assert(freed_list_start != NULL);
620 sk_assert(freed_list_end != NULL);
621
622 sc1 = freed_list_start;
623 freed_list_volume -= VG_(get_sc_size)(sc1);
624 /* VG_(printf)("volume now %d\n", freed_list_volume); */
625 sk_assert(freed_list_volume >= 0);
626
627 if (freed_list_start == freed_list_end) {
628 freed_list_start = freed_list_end = NULL;
629 } else {
630 freed_list_start = VG_(get_sc_next)(sc1);
631 }
632 VG_(set_sc_next)(sc1, NULL); /* just paranoia */
633 VG_(free_ShadowChunk) ( sc1 );
634 }
635}
636
637/* Return the first shadow chunk satisfying the predicate p. */
638ShadowChunk* MC_(any_matching_freed_ShadowChunks) ( Bool (*p)(ShadowChunk*) )
639{
640 ShadowChunk* sc;
641
642 /* No point looking through freed blocks if we're not keeping
643 them around for a while... */
644 for (sc = freed_list_start; sc != NULL; sc = VG_(get_sc_next)(sc))
645 if (p(sc))
646 return sc;
647
648 return NULL;
649}
650
651void SK_(alt_free) ( ShadowChunk* sc, ThreadState* tst )
652{
653 /* Record where freed */
654 MC_(set_where)( sc, VG_(get_ExeContext) ( tst ) );
655
656 /* Put it out of harm's way for a while. */
657 add_to_freed_queue ( sc );
658}
659
660/*------------------------------------------------------------*/
661/*--- Syscall wrappers ---*/
662/*------------------------------------------------------------*/
663
664void* SK_(pre_syscall) ( ThreadId tid, UInt syscallno, Bool isBlocking )
665{
666 Int sane = SK_(cheap_sanity_check)();
667 return (void*)sane;
668}
669
670void SK_(post_syscall) ( ThreadId tid, UInt syscallno,
671 void* pre_result, Int res, Bool isBlocking )
672{
673 Int sane_before_call = (Int)pre_result;
674 Bool sane_after_call = SK_(cheap_sanity_check)();
675
676 if ((Int)sane_before_call && (!sane_after_call)) {
677 VG_(message)(Vg_DebugMsg, "post-syscall: ");
678 VG_(message)(Vg_DebugMsg,
679 "probable sanity check failure for syscall number %d\n",
680 syscallno );
681 VG_(skin_panic)("aborting due to the above ... bye!");
682 }
683}
684
685/*--------------------------------------------------------------------*/
686/*--- end mc_common.c ---*/
687/*--------------------------------------------------------------------*/