blob: 0a3b776c86d681fdd971e8d985b3e8744a23fb52 [file] [log] [blame]
njn5c004e42002-11-18 11:04:50 +00001
2/*--------------------------------------------------------------------*/
3/*--- Code that is shared between MemCheck and AddrCheck. ---*/
4/*--- mc_common.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of MemCheck, a heavyweight Valgrind skin for
9 detecting memory errors, and AddrCheck, a lightweight Valgrind skin
10 for detecting memory errors.
11
12 Copyright (C) 2000-2002 Julian Seward
13 jseward@acm.org
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file COPYING.
31*/
32
33
34#include "mc_common.h"
35
36/*------------------------------------------------------------*/
37/*--- Defns ---*/
38/*------------------------------------------------------------*/
39
40/* These many bytes below %ESP are considered addressible if we're
41 doing the --workaround-gcc296-bugs hack. */
42#define VG_GCC296_BUG_STACK_SLOP 1024
43
44/*------------------------------------------------------------*/
45/*--- Command line options ---*/
46/*------------------------------------------------------------*/
47
48Bool MC_(clo_partial_loads_ok) = True;
49Int MC_(clo_freelist_vol) = 1000000;
50Bool MC_(clo_leak_check) = False;
51VgRes MC_(clo_leak_resolution) = Vg_LowRes;
52Bool MC_(clo_show_reachable) = False;
53Bool MC_(clo_workaround_gcc296_bugs) = False;
54Bool MC_(clo_cleanup) = True;
njn5c004e42002-11-18 11:04:50 +000055Bool MC_(clo_avoid_strlen_errors) = True;
56
57Bool MC_(process_common_cmd_line_option)(Char* arg)
58{
59# define STREQ(s1,s2) (0==VG_(strcmp_ws)((s1),(s2)))
60# define STREQN(nn,s1,s2) (0==VG_(strncmp_ws)((s1),(s2),(nn)))
61
62 if (STREQ(arg, "--partial-loads-ok=yes"))
63 MC_(clo_partial_loads_ok) = True;
64 else if (STREQ(arg, "--partial-loads-ok=no"))
65 MC_(clo_partial_loads_ok) = False;
66
67 else if (STREQN(15, arg, "--freelist-vol=")) {
68 MC_(clo_freelist_vol) = (Int)VG_(atoll)(&arg[15]);
69 if (MC_(clo_freelist_vol) < 0) MC_(clo_freelist_vol) = 0;
70 }
71
72 else if (STREQ(arg, "--leak-check=yes"))
73 MC_(clo_leak_check) = True;
74 else if (STREQ(arg, "--leak-check=no"))
75 MC_(clo_leak_check) = False;
76
77 else if (STREQ(arg, "--leak-resolution=low"))
78 MC_(clo_leak_resolution) = Vg_LowRes;
79 else if (STREQ(arg, "--leak-resolution=med"))
80 MC_(clo_leak_resolution) = Vg_MedRes;
81 else if (STREQ(arg, "--leak-resolution=high"))
82 MC_(clo_leak_resolution) = Vg_HighRes;
83
84 else if (STREQ(arg, "--show-reachable=yes"))
85 MC_(clo_show_reachable) = True;
86 else if (STREQ(arg, "--show-reachable=no"))
87 MC_(clo_show_reachable) = False;
88
89 else if (STREQ(arg, "--workaround-gcc296-bugs=yes"))
90 MC_(clo_workaround_gcc296_bugs) = True;
91 else if (STREQ(arg, "--workaround-gcc296-bugs=no"))
92 MC_(clo_workaround_gcc296_bugs) = False;
93
94 else if (STREQ(arg, "--cleanup=yes"))
95 MC_(clo_cleanup) = True;
96 else if (STREQ(arg, "--cleanup=no"))
97 MC_(clo_cleanup) = False;
98
99 else
100 return False;
101
102 return True;
103
104#undef STREQ
105#undef STREQN
106}
107
108/*------------------------------------------------------------*/
109/*--- Comparing and printing errors ---*/
110/*------------------------------------------------------------*/
111
112static __inline__
113void clear_AddrInfo ( AddrInfo* ai )
114{
115 ai->akind = Unknown;
116 ai->blksize = 0;
117 ai->rwoffset = 0;
118 ai->lastchange = NULL;
119 ai->stack_tid = VG_INVALID_THREADID;
120 ai->maybe_gcc = False;
121}
122
123void MC_(clear_MemCheckError) ( MemCheckError* err_extra )
124{
125 err_extra->axskind = ReadAxs;
126 err_extra->size = 0;
127 clear_AddrInfo ( &err_extra->addrinfo );
128 err_extra->isWrite = False;
129}
130
131__attribute__ ((unused))
132static Bool eq_AddrInfo ( VgRes res, AddrInfo* ai1, AddrInfo* ai2 )
133{
134 if (ai1->akind != Undescribed
135 && ai2->akind != Undescribed
136 && ai1->akind != ai2->akind)
137 return False;
138 if (ai1->akind == Freed || ai1->akind == Mallocd) {
139 if (ai1->blksize != ai2->blksize)
140 return False;
141 if (!VG_(eq_ExeContext)(res, ai1->lastchange, ai2->lastchange))
142 return False;
143 }
144 return True;
145}
146
147/* Compare error contexts, to detect duplicates. Note that if they
148 are otherwise the same, the faulting addrs and associated rwoffsets
149 are allowed to be different. */
150
151Bool SK_(eq_SkinError) ( VgRes res, Error* e1, Error* e2 )
152{
153 MemCheckError* e1_extra = VG_(get_error_extra)(e1);
154 MemCheckError* e2_extra = VG_(get_error_extra)(e2);
njn7cc53a82002-11-19 16:19:32 +0000155
156 /* Guaranteed by calling function */
157 sk_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
njn5c004e42002-11-18 11:04:50 +0000158
159 switch (VG_(get_error_kind)(e1)) {
160 case CoreMemErr: {
161 Char *e1s, *e2s;
162 if (e1_extra->isWrite != e2_extra->isWrite) return False;
njn5c004e42002-11-18 11:04:50 +0000163 e1s = VG_(get_error_string)(e1);
164 e2s = VG_(get_error_string)(e2);
165 if (e1s == e2s) return True;
166 if (0 == VG_(strcmp)(e1s, e2s)) return True;
167 return False;
168 }
169
170 case UserErr:
171 case ParamErr:
172 if (e1_extra->isWrite != e2_extra->isWrite) return False;
173 if (VG_(get_error_kind)(e1) == ParamErr
174 && 0 != VG_(strcmp)(VG_(get_error_string)(e1),
175 VG_(get_error_string)(e2))) return False;
176 return True;
177
178 case FreeErr:
179 case FreeMismatchErr:
180 /* JRS 2002-Aug-26: comparing addrs seems overkill and can
181 cause excessive duplication of errors. Not even AddrErr
182 below does that. So don't compare either the .addr field
183 or the .addrinfo fields. */
184 /* if (e1->addr != e2->addr) return False; */
185 /* if (!eq_AddrInfo(res, &e1_extra->addrinfo, &e2_extra->addrinfo))
186 return False;
187 */
188 return True;
189
190 case AddrErr:
191 /* if (e1_extra->axskind != e2_extra->axskind) return False; */
192 if (e1_extra->size != e2_extra->size) return False;
193 /*
194 if (!eq_AddrInfo(res, &e1_extra->addrinfo, &e2_extra->addrinfo))
195 return False;
196 */
197 return True;
198
199 case ValueErr:
200 if (e1_extra->size != e2_extra->size) return False;
201 return True;
202
203 default:
204 VG_(printf)("Error:\n unknown error code %d\n",
205 VG_(get_error_kind)(e1));
206 VG_(skin_panic)("unknown error code in SK_(eq_SkinError)");
207 }
208}
209
210void MC_(pp_AddrInfo) ( Addr a, AddrInfo* ai )
211{
212 switch (ai->akind) {
213 case Stack:
214 VG_(message)(Vg_UserMsg,
215 " Address 0x%x is on thread %d's stack",
216 a, ai->stack_tid);
217 break;
218 case Unknown:
219 if (ai->maybe_gcc) {
220 VG_(message)(Vg_UserMsg,
221 " Address 0x%x is just below %%esp. Possibly a bug in GCC/G++",
222 a);
223 VG_(message)(Vg_UserMsg,
224 " v 2.96 or 3.0.X. To suppress, use: --workaround-gcc296-bugs=yes");
225 } else {
226 VG_(message)(Vg_UserMsg,
227 " Address 0x%x is not stack'd, malloc'd or free'd", a);
228 }
229 break;
230 case Freed: case Mallocd: case UserG: case UserS: {
231 UInt delta;
232 UChar* relative;
233 if (ai->rwoffset < 0) {
234 delta = (UInt)(- ai->rwoffset);
235 relative = "before";
236 } else if (ai->rwoffset >= ai->blksize) {
237 delta = ai->rwoffset - ai->blksize;
238 relative = "after";
239 } else {
240 delta = ai->rwoffset;
241 relative = "inside";
242 }
243 if (ai->akind == UserS) {
244 VG_(message)(Vg_UserMsg,
245 " Address 0x%x is %d bytes %s a %d-byte stack red-zone created",
246 a, delta, relative,
247 ai->blksize );
248 } else {
249 VG_(message)(Vg_UserMsg,
250 " Address 0x%x is %d bytes %s a block of size %d %s",
251 a, delta, relative,
252 ai->blksize,
253 ai->akind==Mallocd ? "alloc'd"
254 : ai->akind==Freed ? "free'd"
255 : "client-defined");
256 }
257 VG_(pp_ExeContext)(ai->lastchange);
258 break;
259 }
260 default:
261 VG_(skin_panic)("MC_(pp_AddrInfo)");
262 }
263}
264
265/*------------------------------------------------------------*/
266/*--- Recording errors ---*/
267/*------------------------------------------------------------*/
268
269/* Is this address within some small distance below %ESP? Used only
270 for the --workaround-gcc296-bugs kludge. */
271static Bool is_just_below_ESP( Addr esp, Addr aa )
272{
273 if ((UInt)esp > (UInt)aa
274 && ((UInt)esp - (UInt)aa) <= VG_GCC296_BUG_STACK_SLOP)
275 return True;
276 else
277 return False;
278}
279
280/* This one called from generated code. */
281
282void MC_(record_address_error) ( Addr a, Int size, Bool isWrite )
283{
284 MemCheckError err_extra;
285 Bool just_below_esp;
286
287 just_below_esp = is_just_below_ESP( VG_(get_stack_pointer)(), a );
288
289 /* If this is caused by an access immediately below %ESP, and the
290 user asks nicely, we just ignore it. */
291 if (MC_(clo_workaround_gcc296_bugs) && just_below_esp)
292 return;
293
294 MC_(clear_MemCheckError)( &err_extra );
295 err_extra.axskind = isWrite ? WriteAxs : ReadAxs;
296 err_extra.size = size;
297 err_extra.addrinfo.akind = Undescribed;
298 err_extra.addrinfo.maybe_gcc = just_below_esp;
299 VG_(maybe_record_error)( NULL, AddrErr, a, /*s*/NULL, &err_extra );
300}
301
302/* These ones are called from non-generated code */
303
304/* This is for memory errors in pthread functions, as opposed to pthread API
305 errors which are found by the core. */
306void MC_(record_core_mem_error) ( ThreadState* tst, Bool isWrite, Char* msg )
307{
308 MemCheckError err_extra;
309
310 MC_(clear_MemCheckError)( &err_extra );
311 err_extra.isWrite = isWrite;
312 VG_(maybe_record_error)( tst, CoreMemErr, /*addr*/0, msg, &err_extra );
313}
314
315void MC_(record_param_error) ( ThreadState* tst, Addr a, Bool isWrite,
316 Char* msg )
317{
318 MemCheckError err_extra;
319
320 sk_assert(NULL != tst);
321 MC_(clear_MemCheckError)( &err_extra );
322 err_extra.addrinfo.akind = Undescribed;
323 err_extra.isWrite = isWrite;
324 VG_(maybe_record_error)( tst, ParamErr, a, msg, &err_extra );
325}
326
327void MC_(record_jump_error) ( ThreadState* tst, Addr a )
328{
329 MemCheckError err_extra;
330
331 sk_assert(NULL != tst);
332
333 MC_(clear_MemCheckError)( &err_extra );
334 err_extra.axskind = ExecAxs;
335 err_extra.addrinfo.akind = Undescribed;
336 VG_(maybe_record_error)( tst, AddrErr, a, /*s*/NULL, &err_extra );
337}
338
339void MC_(record_free_error) ( ThreadState* tst, Addr a )
340{
341 MemCheckError err_extra;
342
343 sk_assert(NULL != tst);
344
345 MC_(clear_MemCheckError)( &err_extra );
346 err_extra.addrinfo.akind = Undescribed;
347 VG_(maybe_record_error)( tst, FreeErr, a, /*s*/NULL, &err_extra );
348}
349
350void MC_(record_freemismatch_error) ( ThreadState* tst, Addr a )
351{
352 MemCheckError err_extra;
353
354 sk_assert(NULL != tst);
355
356 MC_(clear_MemCheckError)( &err_extra );
357 err_extra.addrinfo.akind = Undescribed;
358 VG_(maybe_record_error)( tst, FreeMismatchErr, a, /*s*/NULL, &err_extra );
359}
360
361/*------------------------------------------------------------*/
362/*--- Suppressions ---*/
363/*------------------------------------------------------------*/
364
365Bool SK_(read_extra_suppression_info) ( Int fd, Char* buf, Int nBuf, Supp *su )
366{
367 Bool eof;
368
369 if (VG_(get_supp_kind)(su) == ParamSupp) {
370 eof = VG_(get_line) ( fd, buf, nBuf );
371 if (eof) return False;
372 VG_(set_supp_string)(su, VG_(strdup)(buf));
373 }
374 return True;
375}
376
377#define STREQ(s1,s2) (s1 != NULL && s2 != NULL \
378 && VG_(strcmp)((s1),(s2))==0)
379
sewardj99aac972002-12-26 01:53:45 +0000380Bool SK_(error_matches_suppression)(Error* err, Supp* su)
njn5c004e42002-11-18 11:04:50 +0000381{
382 UInt su_size;
383 MemCheckError* err_extra = VG_(get_error_extra)(err);
384 ErrorKind ekind = VG_(get_error_kind )(err);
385
386 switch (VG_(get_supp_kind)(su)) {
387 case ParamSupp:
388 return (ekind == ParamErr
389 && STREQ(VG_(get_error_string)(err), VG_(get_supp_string)(su)));
390
391 case CoreMemSupp:
392 return (ekind == CoreMemErr
393 && STREQ(VG_(get_error_string)(err), VG_(get_supp_string)(su)));
394
395 case Value0Supp: su_size = 0; goto value_case;
396 case Value1Supp: su_size = 1; goto value_case;
397 case Value2Supp: su_size = 2; goto value_case;
398 case Value4Supp: su_size = 4; goto value_case;
399 case Value8Supp: su_size = 8; goto value_case;
400 value_case:
401 return (ekind == ValueErr && err_extra->size == su_size);
402
403 case Addr1Supp: su_size = 1; goto addr_case;
404 case Addr2Supp: su_size = 2; goto addr_case;
405 case Addr4Supp: su_size = 4; goto addr_case;
406 case Addr8Supp: su_size = 8; goto addr_case;
407 addr_case:
408 return (ekind == AddrErr && err_extra->size == su_size);
409
410 case FreeSupp:
411 return (ekind == FreeErr || ekind == FreeMismatchErr);
412
sewardj4a19e2f2002-12-26 11:50:21 +0000413 case LeakSupp:
414 return False; /* Doesn't match any normal error */
415
njn5c004e42002-11-18 11:04:50 +0000416 default:
417 VG_(printf)("Error:\n"
418 " unknown suppression type %d\n",
419 VG_(get_supp_kind)(su));
420 VG_(skin_panic)("unknown suppression type in "
421 "SK_(error_matches_suppression)");
422 }
423}
424
425# undef STREQ
426
427/*------------------------------------------------------------*/
428/*--- Crude profiling machinery. ---*/
429/*------------------------------------------------------------*/
430
431/* Event index. If just the name of the fn is given, this means the
432 number of calls to the fn. Otherwise it is the specified event.
433 Ones marked 'M' are MemCheck only. Ones marked 'A' are AddrCheck only.
434 The rest are shared.
435
436 10 alloc_secondary_map
437
438 20 get_abit
439M 21 get_vbyte
440 22 set_abit
441M 23 set_vbyte
442 24 get_abits4_ALIGNED
443M 25 get_vbytes4_ALIGNED
444
445 30 set_address_range_perms
446 31 set_address_range_perms(lower byte loop)
447 32 set_address_range_perms(quadword loop)
448 33 set_address_range_perms(upper byte loop)
449
450 35 make_noaccess
451 36 make_writable
452 37 make_readable
453A 38 make_accessible
454
455 40 copy_address_range_state
456 41 copy_address_range_state(byte loop)
457 42 check_writable
458 43 check_writable(byte loop)
459 44 check_readable
460 45 check_readable(byte loop)
461 46 check_readable_asciiz
462 47 check_readable_asciiz(byte loop)
463A 48 check_accessible
464A 49 check_accessible(byte loop)
465
466 50 make_noaccess_aligned
467 51 make_writable_aligned
468
469M 60 helperc_LOADV4
470M 61 helperc_STOREV4
471M 62 helperc_LOADV2
472M 63 helperc_STOREV2
473M 64 helperc_LOADV1
474M 65 helperc_STOREV1
475
476A 66 helperc_ACCESS4
477A 67 helperc_ACCESS2
478A 68 helperc_ACCESS1
479
480M 70 rim_rd_V4_SLOWLY
481M 71 rim_wr_V4_SLOWLY
482M 72 rim_rd_V2_SLOWLY
483M 73 rim_wr_V2_SLOWLY
484M 74 rim_rd_V1_SLOWLY
485M 75 rim_wr_V1_SLOWLY
486
487A 76 ACCESS4_SLOWLY
488A 77 ACCESS2_SLOWLY
489A 78 ACCESS1_SLOWLY
490
491 80 fpu_read
492 81 fpu_read aligned 4
493 82 fpu_read aligned 8
494 83 fpu_read 2
495 84 fpu_read 10/28/108
496
497M 85 fpu_write
498M 86 fpu_write aligned 4
499M 87 fpu_write aligned 8
500M 88 fpu_write 2
501M 89 fpu_write 10/28/108
502
503 90 fpu_access
504 91 fpu_access aligned 4
505 92 fpu_access aligned 8
506 93 fpu_access 2
507 94 fpu_access 10/28/108
508
509 100 fpu_access_check_SLOWLY
510 101 fpu_access_check_SLOWLY(byte loop)
511*/
512
513#ifdef VG_PROFILE_MEMORY
514
515#define N_PROF_EVENTS 150
516
517extern UInt MC_(event_ctr)[N_PROF_EVENTS];
518
519void MC_(init_prof_mem) ( void )
520{
521 Int i;
522 for (i = 0; i < N_PROF_EVENTS; i++)
523 MC_(event_ctr)[i] = 0;
524}
525
526void MC_(done_prof_mem) ( void )
527{
528 Int i;
529 for (i = 0; i < N_PROF_EVENTS; i++) {
530 if ((i % 10) == 0)
531 VG_(printf)("\n");
532 if (MC_(event_ctr)[i] > 0)
533 VG_(printf)( "prof mem event %2d: %d\n", i, MC_(event_ctr)[i] );
534 }
535 VG_(printf)("\n");
536}
537
538#else
539
540void MC_(init_prof_mem) ( void ) { }
541void MC_(done_prof_mem) ( void ) { }
542
543#define PROF_EVENT(ev) /* */
544
545#endif
546
547/*------------------------------------------------------------*/
548/*--- Shadow chunks info ---*/
549/*------------------------------------------------------------*/
550
551void MC_(set_where)( ShadowChunk* sc, ExeContext* ec )
552{
553 VG_(set_sc_extra)( sc, 0, (UInt)ec );
554}
555
556ExeContext *MC_(get_where)( ShadowChunk* sc )
557{
558 return (ExeContext*)VG_(get_sc_extra)(sc, 0);
559}
560
561void SK_(complete_shadow_chunk) ( ShadowChunk* sc, ThreadState* tst )
562{
563 VG_(set_sc_extra) ( sc, 0, (UInt)VG_(get_ExeContext)(tst) );
564}
565
566
567/*------------------------------------------------------------*/
568/*--- Postponing free()ing ---*/
569/*------------------------------------------------------------*/
570
571/* Holds blocks after freeing. */
572static ShadowChunk* freed_list_start = NULL;
573static ShadowChunk* freed_list_end = NULL;
574static Int freed_list_volume = 0;
575
576__attribute__ ((unused))
577Int MC_(count_freelist) ( void )
578{
579 ShadowChunk* sc;
580 Int n = 0;
581 for (sc = freed_list_start; sc != NULL; sc = VG_(get_sc_next)(sc))
582 n++;
583 return n;
584}
585
586__attribute__ ((unused))
587void MC_(freelist_sanity) ( void )
588{
589 ShadowChunk* sc;
590 Int n = 0;
591 /* VG_(printf)("freelist sanity\n"); */
592 for (sc = freed_list_start; sc != NULL; sc = VG_(get_sc_next)(sc))
593 n += VG_(get_sc_size)(sc);
594 sk_assert(n == freed_list_volume);
595}
596
597/* Put a shadow chunk on the freed blocks queue, possibly freeing up
598 some of the oldest blocks in the queue at the same time. */
599static void add_to_freed_queue ( ShadowChunk* sc )
600{
601 ShadowChunk* sc1;
602
603 /* Put it at the end of the freed list */
604 if (freed_list_end == NULL) {
605 sk_assert(freed_list_start == NULL);
606 freed_list_end = freed_list_start = sc;
607 freed_list_volume = VG_(get_sc_size)(sc);
608 } else {
609 sk_assert(VG_(get_sc_next)(freed_list_end) == NULL);
610 VG_(set_sc_next)(freed_list_end, sc);
611 freed_list_end = sc;
612 freed_list_volume += VG_(get_sc_size)(sc);
613 }
614 VG_(set_sc_next)(sc, NULL);
615
616 /* Release enough of the oldest blocks to bring the free queue
617 volume below vg_clo_freelist_vol. */
618
619 while (freed_list_volume > MC_(clo_freelist_vol)) {
620 /* freelist_sanity(); */
621 sk_assert(freed_list_start != NULL);
622 sk_assert(freed_list_end != NULL);
623
624 sc1 = freed_list_start;
625 freed_list_volume -= VG_(get_sc_size)(sc1);
626 /* VG_(printf)("volume now %d\n", freed_list_volume); */
627 sk_assert(freed_list_volume >= 0);
628
629 if (freed_list_start == freed_list_end) {
630 freed_list_start = freed_list_end = NULL;
631 } else {
632 freed_list_start = VG_(get_sc_next)(sc1);
633 }
634 VG_(set_sc_next)(sc1, NULL); /* just paranoia */
635 VG_(free_ShadowChunk) ( sc1 );
636 }
637}
638
639/* Return the first shadow chunk satisfying the predicate p. */
640ShadowChunk* MC_(any_matching_freed_ShadowChunks) ( Bool (*p)(ShadowChunk*) )
641{
642 ShadowChunk* sc;
643
644 /* No point looking through freed blocks if we're not keeping
645 them around for a while... */
646 for (sc = freed_list_start; sc != NULL; sc = VG_(get_sc_next)(sc))
647 if (p(sc))
648 return sc;
649
650 return NULL;
651}
652
653void SK_(alt_free) ( ShadowChunk* sc, ThreadState* tst )
654{
655 /* Record where freed */
656 MC_(set_where)( sc, VG_(get_ExeContext) ( tst ) );
657
658 /* Put it out of harm's way for a while. */
659 add_to_freed_queue ( sc );
660}
661
662/*------------------------------------------------------------*/
663/*--- Syscall wrappers ---*/
664/*------------------------------------------------------------*/
665
666void* SK_(pre_syscall) ( ThreadId tid, UInt syscallno, Bool isBlocking )
667{
668 Int sane = SK_(cheap_sanity_check)();
669 return (void*)sane;
670}
671
672void SK_(post_syscall) ( ThreadId tid, UInt syscallno,
673 void* pre_result, Int res, Bool isBlocking )
674{
675 Int sane_before_call = (Int)pre_result;
676 Bool sane_after_call = SK_(cheap_sanity_check)();
677
678 if ((Int)sane_before_call && (!sane_after_call)) {
679 VG_(message)(Vg_DebugMsg, "post-syscall: ");
680 VG_(message)(Vg_DebugMsg,
681 "probable sanity check failure for syscall number %d\n",
682 syscallno );
683 VG_(skin_panic)("aborting due to the above ... bye!");
684 }
685}
686
sewardj99aac972002-12-26 01:53:45 +0000687
njn5c004e42002-11-18 11:04:50 +0000688/*--------------------------------------------------------------------*/
689/*--- end mc_common.c ---*/
690/*--------------------------------------------------------------------*/