blob: 1cfd195cf2146433f90a80d1bc6c3206e5aac069 [file] [log] [blame]
njn5c004e42002-11-18 11:04:50 +00001
2/*--------------------------------------------------------------------*/
3/*--- Code that is shared between MemCheck and AddrCheck. ---*/
4/*--- mc_common.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of MemCheck, a heavyweight Valgrind skin for
9 detecting memory errors, and AddrCheck, a lightweight Valgrind skin
10 for detecting memory errors.
11
12 Copyright (C) 2000-2002 Julian Seward
13 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
33
34#include "mc_common.h"
35
36/*------------------------------------------------------------*/
37/*--- Defns ---*/
38/*------------------------------------------------------------*/
39
40/* These many bytes below %ESP are considered addressible if we're
41 doing the --workaround-gcc296-bugs hack. */
42#define VG_GCC296_BUG_STACK_SLOP 1024
43
44/*------------------------------------------------------------*/
45/*--- Command line options ---*/
46/*------------------------------------------------------------*/
47
48Bool MC_(clo_partial_loads_ok) = True;
49Int MC_(clo_freelist_vol) = 1000000;
50Bool MC_(clo_leak_check) = False;
51VgRes MC_(clo_leak_resolution) = Vg_LowRes;
52Bool MC_(clo_show_reachable) = False;
53Bool MC_(clo_workaround_gcc296_bugs) = False;
54Bool MC_(clo_cleanup) = True;
55Bool MC_(clo_check_addrVs) = True;
56Bool MC_(clo_avoid_strlen_errors) = True;
57
58Bool MC_(process_common_cmd_line_option)(Char* arg)
59{
60# define STREQ(s1,s2) (0==VG_(strcmp_ws)((s1),(s2)))
61# define STREQN(nn,s1,s2) (0==VG_(strncmp_ws)((s1),(s2),(nn)))
62
63 if (STREQ(arg, "--partial-loads-ok=yes"))
64 MC_(clo_partial_loads_ok) = True;
65 else if (STREQ(arg, "--partial-loads-ok=no"))
66 MC_(clo_partial_loads_ok) = False;
67
68 else if (STREQN(15, arg, "--freelist-vol=")) {
69 MC_(clo_freelist_vol) = (Int)VG_(atoll)(&arg[15]);
70 if (MC_(clo_freelist_vol) < 0) MC_(clo_freelist_vol) = 0;
71 }
72
73 else if (STREQ(arg, "--leak-check=yes"))
74 MC_(clo_leak_check) = True;
75 else if (STREQ(arg, "--leak-check=no"))
76 MC_(clo_leak_check) = False;
77
78 else if (STREQ(arg, "--leak-resolution=low"))
79 MC_(clo_leak_resolution) = Vg_LowRes;
80 else if (STREQ(arg, "--leak-resolution=med"))
81 MC_(clo_leak_resolution) = Vg_MedRes;
82 else if (STREQ(arg, "--leak-resolution=high"))
83 MC_(clo_leak_resolution) = Vg_HighRes;
84
85 else if (STREQ(arg, "--show-reachable=yes"))
86 MC_(clo_show_reachable) = True;
87 else if (STREQ(arg, "--show-reachable=no"))
88 MC_(clo_show_reachable) = False;
89
90 else if (STREQ(arg, "--workaround-gcc296-bugs=yes"))
91 MC_(clo_workaround_gcc296_bugs) = True;
92 else if (STREQ(arg, "--workaround-gcc296-bugs=no"))
93 MC_(clo_workaround_gcc296_bugs) = False;
94
95 else if (STREQ(arg, "--cleanup=yes"))
96 MC_(clo_cleanup) = True;
97 else if (STREQ(arg, "--cleanup=no"))
98 MC_(clo_cleanup) = False;
99
100 else
101 return False;
102
103 return True;
104
105#undef STREQ
106#undef STREQN
107}
108
109/*------------------------------------------------------------*/
110/*--- Comparing and printing errors ---*/
111/*------------------------------------------------------------*/
112
113static __inline__
114void clear_AddrInfo ( AddrInfo* ai )
115{
116 ai->akind = Unknown;
117 ai->blksize = 0;
118 ai->rwoffset = 0;
119 ai->lastchange = NULL;
120 ai->stack_tid = VG_INVALID_THREADID;
121 ai->maybe_gcc = False;
122}
123
124void MC_(clear_MemCheckError) ( MemCheckError* err_extra )
125{
126 err_extra->axskind = ReadAxs;
127 err_extra->size = 0;
128 clear_AddrInfo ( &err_extra->addrinfo );
129 err_extra->isWrite = False;
130}
131
132__attribute__ ((unused))
133static Bool eq_AddrInfo ( VgRes res, AddrInfo* ai1, AddrInfo* ai2 )
134{
135 if (ai1->akind != Undescribed
136 && ai2->akind != Undescribed
137 && ai1->akind != ai2->akind)
138 return False;
139 if (ai1->akind == Freed || ai1->akind == Mallocd) {
140 if (ai1->blksize != ai2->blksize)
141 return False;
142 if (!VG_(eq_ExeContext)(res, ai1->lastchange, ai2->lastchange))
143 return False;
144 }
145 return True;
146}
147
148/* Compare error contexts, to detect duplicates. Note that if they
149 are otherwise the same, the faulting addrs and associated rwoffsets
150 are allowed to be different. */
151
152Bool SK_(eq_SkinError) ( VgRes res, Error* e1, Error* e2 )
153{
154 MemCheckError* e1_extra = VG_(get_error_extra)(e1);
155 MemCheckError* e2_extra = VG_(get_error_extra)(e2);
156
157 switch (VG_(get_error_kind)(e1)) {
158 case CoreMemErr: {
159 Char *e1s, *e2s;
160 if (e1_extra->isWrite != e2_extra->isWrite) return False;
161 if (VG_(get_error_kind)(e2) != CoreMemErr) return False;
162 e1s = VG_(get_error_string)(e1);
163 e2s = VG_(get_error_string)(e2);
164 if (e1s == e2s) return True;
165 if (0 == VG_(strcmp)(e1s, e2s)) return True;
166 return False;
167 }
168
169 case UserErr:
170 case ParamErr:
171 if (e1_extra->isWrite != e2_extra->isWrite) return False;
172 if (VG_(get_error_kind)(e1) == ParamErr
173 && 0 != VG_(strcmp)(VG_(get_error_string)(e1),
174 VG_(get_error_string)(e2))) return False;
175 return True;
176
177 case FreeErr:
178 case FreeMismatchErr:
179 /* JRS 2002-Aug-26: comparing addrs seems overkill and can
180 cause excessive duplication of errors. Not even AddrErr
181 below does that. So don't compare either the .addr field
182 or the .addrinfo fields. */
183 /* if (e1->addr != e2->addr) return False; */
184 /* if (!eq_AddrInfo(res, &e1_extra->addrinfo, &e2_extra->addrinfo))
185 return False;
186 */
187 return True;
188
189 case AddrErr:
190 /* if (e1_extra->axskind != e2_extra->axskind) return False; */
191 if (e1_extra->size != e2_extra->size) return False;
192 /*
193 if (!eq_AddrInfo(res, &e1_extra->addrinfo, &e2_extra->addrinfo))
194 return False;
195 */
196 return True;
197
198 case ValueErr:
199 if (e1_extra->size != e2_extra->size) return False;
200 return True;
201
202 default:
203 VG_(printf)("Error:\n unknown error code %d\n",
204 VG_(get_error_kind)(e1));
205 VG_(skin_panic)("unknown error code in SK_(eq_SkinError)");
206 }
207}
208
209void MC_(pp_AddrInfo) ( Addr a, AddrInfo* ai )
210{
211 switch (ai->akind) {
212 case Stack:
213 VG_(message)(Vg_UserMsg,
214 " Address 0x%x is on thread %d's stack",
215 a, ai->stack_tid);
216 break;
217 case Unknown:
218 if (ai->maybe_gcc) {
219 VG_(message)(Vg_UserMsg,
220 " Address 0x%x is just below %%esp. Possibly a bug in GCC/G++",
221 a);
222 VG_(message)(Vg_UserMsg,
223 " v 2.96 or 3.0.X. To suppress, use: --workaround-gcc296-bugs=yes");
224 } else {
225 VG_(message)(Vg_UserMsg,
226 " Address 0x%x is not stack'd, malloc'd or free'd", a);
227 }
228 break;
229 case Freed: case Mallocd: case UserG: case UserS: {
230 UInt delta;
231 UChar* relative;
232 if (ai->rwoffset < 0) {
233 delta = (UInt)(- ai->rwoffset);
234 relative = "before";
235 } else if (ai->rwoffset >= ai->blksize) {
236 delta = ai->rwoffset - ai->blksize;
237 relative = "after";
238 } else {
239 delta = ai->rwoffset;
240 relative = "inside";
241 }
242 if (ai->akind == UserS) {
243 VG_(message)(Vg_UserMsg,
244 " Address 0x%x is %d bytes %s a %d-byte stack red-zone created",
245 a, delta, relative,
246 ai->blksize );
247 } else {
248 VG_(message)(Vg_UserMsg,
249 " Address 0x%x is %d bytes %s a block of size %d %s",
250 a, delta, relative,
251 ai->blksize,
252 ai->akind==Mallocd ? "alloc'd"
253 : ai->akind==Freed ? "free'd"
254 : "client-defined");
255 }
256 VG_(pp_ExeContext)(ai->lastchange);
257 break;
258 }
259 default:
260 VG_(skin_panic)("MC_(pp_AddrInfo)");
261 }
262}
263
264/*------------------------------------------------------------*/
265/*--- Recording errors ---*/
266/*------------------------------------------------------------*/
267
268/* Is this address within some small distance below %ESP? Used only
269 for the --workaround-gcc296-bugs kludge. */
270static Bool is_just_below_ESP( Addr esp, Addr aa )
271{
272 if ((UInt)esp > (UInt)aa
273 && ((UInt)esp - (UInt)aa) <= VG_GCC296_BUG_STACK_SLOP)
274 return True;
275 else
276 return False;
277}
278
279/* This one called from generated code. */
280
281void MC_(record_address_error) ( Addr a, Int size, Bool isWrite )
282{
283 MemCheckError err_extra;
284 Bool just_below_esp;
285
286 just_below_esp = is_just_below_ESP( VG_(get_stack_pointer)(), a );
287
288 /* If this is caused by an access immediately below %ESP, and the
289 user asks nicely, we just ignore it. */
290 if (MC_(clo_workaround_gcc296_bugs) && just_below_esp)
291 return;
292
293 MC_(clear_MemCheckError)( &err_extra );
294 err_extra.axskind = isWrite ? WriteAxs : ReadAxs;
295 err_extra.size = size;
296 err_extra.addrinfo.akind = Undescribed;
297 err_extra.addrinfo.maybe_gcc = just_below_esp;
298 VG_(maybe_record_error)( NULL, AddrErr, a, /*s*/NULL, &err_extra );
299}
300
301/* These ones are called from non-generated code */
302
303/* This is for memory errors in pthread functions, as opposed to pthread API
304 errors which are found by the core. */
305void MC_(record_core_mem_error) ( ThreadState* tst, Bool isWrite, Char* msg )
306{
307 MemCheckError err_extra;
308
309 MC_(clear_MemCheckError)( &err_extra );
310 err_extra.isWrite = isWrite;
311 VG_(maybe_record_error)( tst, CoreMemErr, /*addr*/0, msg, &err_extra );
312}
313
314void MC_(record_param_error) ( ThreadState* tst, Addr a, Bool isWrite,
315 Char* msg )
316{
317 MemCheckError err_extra;
318
319 sk_assert(NULL != tst);
320 MC_(clear_MemCheckError)( &err_extra );
321 err_extra.addrinfo.akind = Undescribed;
322 err_extra.isWrite = isWrite;
323 VG_(maybe_record_error)( tst, ParamErr, a, msg, &err_extra );
324}
325
326void MC_(record_jump_error) ( ThreadState* tst, Addr a )
327{
328 MemCheckError err_extra;
329
330 sk_assert(NULL != tst);
331
332 MC_(clear_MemCheckError)( &err_extra );
333 err_extra.axskind = ExecAxs;
334 err_extra.addrinfo.akind = Undescribed;
335 VG_(maybe_record_error)( tst, AddrErr, a, /*s*/NULL, &err_extra );
336}
337
338void MC_(record_free_error) ( ThreadState* tst, Addr a )
339{
340 MemCheckError err_extra;
341
342 sk_assert(NULL != tst);
343
344 MC_(clear_MemCheckError)( &err_extra );
345 err_extra.addrinfo.akind = Undescribed;
346 VG_(maybe_record_error)( tst, FreeErr, a, /*s*/NULL, &err_extra );
347}
348
349void MC_(record_freemismatch_error) ( ThreadState* tst, Addr a )
350{
351 MemCheckError err_extra;
352
353 sk_assert(NULL != tst);
354
355 MC_(clear_MemCheckError)( &err_extra );
356 err_extra.addrinfo.akind = Undescribed;
357 VG_(maybe_record_error)( tst, FreeMismatchErr, a, /*s*/NULL, &err_extra );
358}
359
360/*------------------------------------------------------------*/
361/*--- Suppressions ---*/
362/*------------------------------------------------------------*/
363
364Bool SK_(read_extra_suppression_info) ( Int fd, Char* buf, Int nBuf, Supp *su )
365{
366 Bool eof;
367
368 if (VG_(get_supp_kind)(su) == ParamSupp) {
369 eof = VG_(get_line) ( fd, buf, nBuf );
370 if (eof) return False;
371 VG_(set_supp_string)(su, VG_(strdup)(buf));
372 }
373 return True;
374}
375
376#define STREQ(s1,s2) (s1 != NULL && s2 != NULL \
377 && VG_(strcmp)((s1),(s2))==0)
378
379extern Bool SK_(error_matches_suppression)(Error* err, Supp* su)
380{
381 UInt su_size;
382 MemCheckError* err_extra = VG_(get_error_extra)(err);
383 ErrorKind ekind = VG_(get_error_kind )(err);
384
385 switch (VG_(get_supp_kind)(su)) {
386 case ParamSupp:
387 return (ekind == ParamErr
388 && STREQ(VG_(get_error_string)(err), VG_(get_supp_string)(su)));
389
390 case CoreMemSupp:
391 return (ekind == CoreMemErr
392 && STREQ(VG_(get_error_string)(err), VG_(get_supp_string)(su)));
393
394 case Value0Supp: su_size = 0; goto value_case;
395 case Value1Supp: su_size = 1; goto value_case;
396 case Value2Supp: su_size = 2; goto value_case;
397 case Value4Supp: su_size = 4; goto value_case;
398 case Value8Supp: su_size = 8; goto value_case;
399 value_case:
400 return (ekind == ValueErr && err_extra->size == su_size);
401
402 case Addr1Supp: su_size = 1; goto addr_case;
403 case Addr2Supp: su_size = 2; goto addr_case;
404 case Addr4Supp: su_size = 4; goto addr_case;
405 case Addr8Supp: su_size = 8; goto addr_case;
406 addr_case:
407 return (ekind == AddrErr && err_extra->size == su_size);
408
409 case FreeSupp:
410 return (ekind == FreeErr || ekind == FreeMismatchErr);
411
412 default:
413 VG_(printf)("Error:\n"
414 " unknown suppression type %d\n",
415 VG_(get_supp_kind)(su));
416 VG_(skin_panic)("unknown suppression type in "
417 "SK_(error_matches_suppression)");
418 }
419}
420
421# undef STREQ
422
423/*------------------------------------------------------------*/
424/*--- Crude profiling machinery. ---*/
425/*------------------------------------------------------------*/
426
427/* Event index. If just the name of the fn is given, this means the
428 number of calls to the fn. Otherwise it is the specified event.
429 Ones marked 'M' are MemCheck only. Ones marked 'A' are AddrCheck only.
430 The rest are shared.
431
432 10 alloc_secondary_map
433
434 20 get_abit
435M 21 get_vbyte
436 22 set_abit
437M 23 set_vbyte
438 24 get_abits4_ALIGNED
439M 25 get_vbytes4_ALIGNED
440
441 30 set_address_range_perms
442 31 set_address_range_perms(lower byte loop)
443 32 set_address_range_perms(quadword loop)
444 33 set_address_range_perms(upper byte loop)
445
446 35 make_noaccess
447 36 make_writable
448 37 make_readable
449A 38 make_accessible
450
451 40 copy_address_range_state
452 41 copy_address_range_state(byte loop)
453 42 check_writable
454 43 check_writable(byte loop)
455 44 check_readable
456 45 check_readable(byte loop)
457 46 check_readable_asciiz
458 47 check_readable_asciiz(byte loop)
459A 48 check_accessible
460A 49 check_accessible(byte loop)
461
462 50 make_noaccess_aligned
463 51 make_writable_aligned
464
465M 60 helperc_LOADV4
466M 61 helperc_STOREV4
467M 62 helperc_LOADV2
468M 63 helperc_STOREV2
469M 64 helperc_LOADV1
470M 65 helperc_STOREV1
471
472A 66 helperc_ACCESS4
473A 67 helperc_ACCESS2
474A 68 helperc_ACCESS1
475
476M 70 rim_rd_V4_SLOWLY
477M 71 rim_wr_V4_SLOWLY
478M 72 rim_rd_V2_SLOWLY
479M 73 rim_wr_V2_SLOWLY
480M 74 rim_rd_V1_SLOWLY
481M 75 rim_wr_V1_SLOWLY
482
483A 76 ACCESS4_SLOWLY
484A 77 ACCESS2_SLOWLY
485A 78 ACCESS1_SLOWLY
486
487 80 fpu_read
488 81 fpu_read aligned 4
489 82 fpu_read aligned 8
490 83 fpu_read 2
491 84 fpu_read 10/28/108
492
493M 85 fpu_write
494M 86 fpu_write aligned 4
495M 87 fpu_write aligned 8
496M 88 fpu_write 2
497M 89 fpu_write 10/28/108
498
499 90 fpu_access
500 91 fpu_access aligned 4
501 92 fpu_access aligned 8
502 93 fpu_access 2
503 94 fpu_access 10/28/108
504
505 100 fpu_access_check_SLOWLY
506 101 fpu_access_check_SLOWLY(byte loop)
507*/
508
509#ifdef VG_PROFILE_MEMORY
510
511#define N_PROF_EVENTS 150
512
513extern UInt MC_(event_ctr)[N_PROF_EVENTS];
514
515void MC_(init_prof_mem) ( void )
516{
517 Int i;
518 for (i = 0; i < N_PROF_EVENTS; i++)
519 MC_(event_ctr)[i] = 0;
520}
521
522void MC_(done_prof_mem) ( void )
523{
524 Int i;
525 for (i = 0; i < N_PROF_EVENTS; i++) {
526 if ((i % 10) == 0)
527 VG_(printf)("\n");
528 if (MC_(event_ctr)[i] > 0)
529 VG_(printf)( "prof mem event %2d: %d\n", i, MC_(event_ctr)[i] );
530 }
531 VG_(printf)("\n");
532}
533
534#else
535
536void MC_(init_prof_mem) ( void ) { }
537void MC_(done_prof_mem) ( void ) { }
538
539#define PROF_EVENT(ev) /* */
540
541#endif
542
543/*------------------------------------------------------------*/
544/*--- Shadow chunks info ---*/
545/*------------------------------------------------------------*/
546
547void MC_(set_where)( ShadowChunk* sc, ExeContext* ec )
548{
549 VG_(set_sc_extra)( sc, 0, (UInt)ec );
550}
551
552ExeContext *MC_(get_where)( ShadowChunk* sc )
553{
554 return (ExeContext*)VG_(get_sc_extra)(sc, 0);
555}
556
557void SK_(complete_shadow_chunk) ( ShadowChunk* sc, ThreadState* tst )
558{
559 VG_(set_sc_extra) ( sc, 0, (UInt)VG_(get_ExeContext)(tst) );
560}
561
562
563/*------------------------------------------------------------*/
564/*--- Postponing free()ing ---*/
565/*------------------------------------------------------------*/
566
567/* Holds blocks after freeing. */
568static ShadowChunk* freed_list_start = NULL;
569static ShadowChunk* freed_list_end = NULL;
570static Int freed_list_volume = 0;
571
572__attribute__ ((unused))
573Int MC_(count_freelist) ( void )
574{
575 ShadowChunk* sc;
576 Int n = 0;
577 for (sc = freed_list_start; sc != NULL; sc = VG_(get_sc_next)(sc))
578 n++;
579 return n;
580}
581
582__attribute__ ((unused))
583void MC_(freelist_sanity) ( void )
584{
585 ShadowChunk* sc;
586 Int n = 0;
587 /* VG_(printf)("freelist sanity\n"); */
588 for (sc = freed_list_start; sc != NULL; sc = VG_(get_sc_next)(sc))
589 n += VG_(get_sc_size)(sc);
590 sk_assert(n == freed_list_volume);
591}
592
593/* Put a shadow chunk on the freed blocks queue, possibly freeing up
594 some of the oldest blocks in the queue at the same time. */
595static void add_to_freed_queue ( ShadowChunk* sc )
596{
597 ShadowChunk* sc1;
598
599 /* Put it at the end of the freed list */
600 if (freed_list_end == NULL) {
601 sk_assert(freed_list_start == NULL);
602 freed_list_end = freed_list_start = sc;
603 freed_list_volume = VG_(get_sc_size)(sc);
604 } else {
605 sk_assert(VG_(get_sc_next)(freed_list_end) == NULL);
606 VG_(set_sc_next)(freed_list_end, sc);
607 freed_list_end = sc;
608 freed_list_volume += VG_(get_sc_size)(sc);
609 }
610 VG_(set_sc_next)(sc, NULL);
611
612 /* Release enough of the oldest blocks to bring the free queue
613 volume below vg_clo_freelist_vol. */
614
615 while (freed_list_volume > MC_(clo_freelist_vol)) {
616 /* freelist_sanity(); */
617 sk_assert(freed_list_start != NULL);
618 sk_assert(freed_list_end != NULL);
619
620 sc1 = freed_list_start;
621 freed_list_volume -= VG_(get_sc_size)(sc1);
622 /* VG_(printf)("volume now %d\n", freed_list_volume); */
623 sk_assert(freed_list_volume >= 0);
624
625 if (freed_list_start == freed_list_end) {
626 freed_list_start = freed_list_end = NULL;
627 } else {
628 freed_list_start = VG_(get_sc_next)(sc1);
629 }
630 VG_(set_sc_next)(sc1, NULL); /* just paranoia */
631 VG_(free_ShadowChunk) ( sc1 );
632 }
633}
634
635/* Return the first shadow chunk satisfying the predicate p. */
636ShadowChunk* MC_(any_matching_freed_ShadowChunks) ( Bool (*p)(ShadowChunk*) )
637{
638 ShadowChunk* sc;
639
640 /* No point looking through freed blocks if we're not keeping
641 them around for a while... */
642 for (sc = freed_list_start; sc != NULL; sc = VG_(get_sc_next)(sc))
643 if (p(sc))
644 return sc;
645
646 return NULL;
647}
648
649void SK_(alt_free) ( ShadowChunk* sc, ThreadState* tst )
650{
651 /* Record where freed */
652 MC_(set_where)( sc, VG_(get_ExeContext) ( tst ) );
653
654 /* Put it out of harm's way for a while. */
655 add_to_freed_queue ( sc );
656}
657
658/*------------------------------------------------------------*/
659/*--- Syscall wrappers ---*/
660/*------------------------------------------------------------*/
661
662void* SK_(pre_syscall) ( ThreadId tid, UInt syscallno, Bool isBlocking )
663{
664 Int sane = SK_(cheap_sanity_check)();
665 return (void*)sane;
666}
667
668void SK_(post_syscall) ( ThreadId tid, UInt syscallno,
669 void* pre_result, Int res, Bool isBlocking )
670{
671 Int sane_before_call = (Int)pre_result;
672 Bool sane_after_call = SK_(cheap_sanity_check)();
673
674 if ((Int)sane_before_call && (!sane_after_call)) {
675 VG_(message)(Vg_DebugMsg, "post-syscall: ");
676 VG_(message)(Vg_DebugMsg,
677 "probable sanity check failure for syscall number %d\n",
678 syscallno );
679 VG_(skin_panic)("aborting due to the above ... bye!");
680 }
681}
682
683/*--------------------------------------------------------------------*/
684/*--- end mc_common.c ---*/
685/*--------------------------------------------------------------------*/