blob: 659bc56a2873bcf895910213dafd77e5ad844dc2 [file] [log] [blame]
sewardjf98e1c02008-10-25 16:22:41 +00001
2/*--------------------------------------------------------------------*/
3/*--- Error management for Helgrind. ---*/
4/*--- hg_errors.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
11 Copyright (C) 2007-2008 OpenWorks Ltd
12 info@open-works.co.uk
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file COPYING.
30*/
31
32#include "pub_tool_basics.h"
33#include "pub_tool_libcbase.h"
34#include "pub_tool_libcassert.h"
35#include "pub_tool_libcprint.h"
36#include "pub_tool_execontext.h"
37#include "pub_tool_errormgr.h"
38#include "pub_tool_wordfm.h"
39#include "pub_tool_xarray.h"
40#include "pub_tool_debuginfo.h"
41#include "pub_tool_threadstate.h"
42
43#include "hg_basics.h"
44#include "hg_wordset.h"
45#include "hg_lock_n_thread.h"
sewardjc5ea9962008-12-07 01:41:46 +000046#include "libhb.h"
sewardjf98e1c02008-10-25 16:22:41 +000047#include "hg_errors.h" /* self */
48
49
50/*----------------------------------------------------------------*/
51/*--- ---*/
52/*----------------------------------------------------------------*/
53
54/* This has to do with printing error messages. See comments on
55 announce_threadset() and summarise_threadset(). Perhaps it
56 should be a command line option. */
57#define N_THREADS_TO_ANNOUNCE 5
58
59
60/*----------------------------------------------------------------*/
61/*--- Error management ---*/
62/*----------------------------------------------------------------*/
63
64/* maps (by value) strings to a copy of them in ARENA_TOOL */
65
66static WordFM* string_table = NULL;
67
68ULong HG_(stats__string_table_queries) = 0;
69
70ULong HG_(stats__string_table_get_map_size) ( void ) {
71 return string_table ? (ULong)VG_(sizeFM)(string_table) : 0;
72}
73
74static Word string_table_cmp ( UWord s1, UWord s2 ) {
75 return (Word)VG_(strcmp)( (HChar*)s1, (HChar*)s2 );
76}
77
78static HChar* string_table_strdup ( HChar* str ) {
79 HChar* copy = NULL;
80 HG_(stats__string_table_queries)++;
81 if (!str)
82 str = "(null)";
83 if (!string_table) {
84 string_table = VG_(newFM)( HG_(zalloc), "hg.sts.1",
85 HG_(free), string_table_cmp );
86 tl_assert(string_table);
87 }
88 if (VG_(lookupFM)( string_table,
89 NULL, (Word*)&copy, (Word)str )) {
90 tl_assert(copy);
91 if (0) VG_(printf)("string_table_strdup: %p -> %p\n", str, copy );
92 return copy;
93 } else {
94 copy = HG_(strdup)("hg.sts.2", str);
95 tl_assert(copy);
96 VG_(addToFM)( string_table, (Word)copy, (Word)copy );
97 return copy;
98 }
99}
100
101/* maps from Lock .unique fields to LockP*s */
102
103static WordFM* map_LockN_to_P = NULL;
104
105ULong HG_(stats__LockN_to_P_queries) = 0;
106
107ULong HG_(stats__LockN_to_P_get_map_size) ( void ) {
108 return map_LockN_to_P ? (ULong)VG_(sizeFM)(map_LockN_to_P) : 0;
109}
110
111static Word lock_unique_cmp ( UWord lk1W, UWord lk2W )
112{
113 Lock* lk1 = (Lock*)lk1W;
114 Lock* lk2 = (Lock*)lk2W;
115 tl_assert( HG_(is_sane_LockNorP)(lk1) );
116 tl_assert( HG_(is_sane_LockNorP)(lk2) );
117 if (lk1->unique < lk2->unique) return -1;
118 if (lk1->unique > lk2->unique) return 1;
119 return 0;
120}
121
122static Lock* mk_LockP_from_LockN ( Lock* lkn )
123{
124 Lock* lkp = NULL;
125 HG_(stats__LockN_to_P_queries)++;
126 tl_assert( HG_(is_sane_LockN)(lkn) );
127 if (!map_LockN_to_P) {
128 map_LockN_to_P = VG_(newFM)( HG_(zalloc), "hg.mLPfLN.1",
129 HG_(free), lock_unique_cmp );
130 tl_assert(map_LockN_to_P);
131 }
132 if (!VG_(lookupFM)( map_LockN_to_P, NULL, (Word*)&lkp, (Word)lkn)) {
133 lkp = HG_(zalloc)( "hg.mLPfLN.2", sizeof(Lock) );
134 *lkp = *lkn;
135 lkp->admin = NULL;
136 lkp->magic = LockP_MAGIC;
137 /* Forget about the bag of lock holders - don't copy that.
138 Also, acquired_at should be NULL whenever heldBy is, and vice
139 versa. Also forget about the associated libhb synch object. */
140 lkp->heldW = False;
141 lkp->heldBy = NULL;
142 lkp->acquired_at = NULL;
143 lkp->hbso = NULL;
144 VG_(addToFM)( map_LockN_to_P, (Word)lkp, (Word)lkp );
145 }
146 tl_assert( HG_(is_sane_LockP)(lkp) );
147 return lkp;
148}
149
150/* Errors:
151
152 race: program counter
153 read or write
154 data size
155 previous state
156 current state
157
158 FIXME: how does state printing interact with lockset gc?
159 Are the locksets in prev/curr state always valid?
160 Ditto question for the threadsets
161 ThreadSets - probably are always valid if Threads
162 are never thrown away.
163 LockSets - could at least print the lockset elements that
164 correspond to actual locks at the time of printing. Hmm.
165*/
166
167/* Error kinds */
168typedef
169 enum {
170 XE_Race=1101, // race
171 XE_FreeMemLock, // freeing memory containing a locked lock
172 XE_UnlockUnlocked, // unlocking a not-locked lock
173 XE_UnlockForeign, // unlocking a lock held by some other thread
174 XE_UnlockBogus, // unlocking an address not known to be a lock
175 XE_PthAPIerror, // error from the POSIX pthreads API
176 XE_LockOrder, // lock order error
177 XE_Misc // misc other error (w/ string to describe it)
178 }
179 XErrorTag;
180
181/* Extra contexts for kinds */
182typedef
183 struct {
184 XErrorTag tag;
185 union {
186 struct {
187 Addr data_addr;
188 Int szB;
189 Bool isWrite;
190 ExeContext* mb_lastlock;
191 ExeContext* mb_confacc;
192 Thread* thr;
193 Thread* mb_confaccthr;
sewardjc5ea9962008-12-07 01:41:46 +0000194 Int mb_confaccSzB;
195 Bool mb_confaccIsW;
sewardjf98e1c02008-10-25 16:22:41 +0000196 Char descr1[96];
197 Char descr2[96];
198 } Race;
199 struct {
200 Thread* thr; /* doing the freeing */
201 Lock* lock; /* lock which is locked */
202 } FreeMemLock;
203 struct {
204 Thread* thr; /* doing the unlocking */
205 Lock* lock; /* lock (that is already unlocked) */
206 } UnlockUnlocked;
207 struct {
208 Thread* thr; /* doing the unlocking */
209 Thread* owner; /* thread that actually holds the lock */
210 Lock* lock; /* lock (that is held by 'owner') */
211 } UnlockForeign;
212 struct {
213 Thread* thr; /* doing the unlocking */
214 Addr lock_ga; /* purported address of the lock */
215 } UnlockBogus;
216 struct {
217 Thread* thr;
218 HChar* fnname; /* persistent, in tool-arena */
219 Word err; /* pth error code */
220 HChar* errstr; /* persistent, in tool-arena */
221 } PthAPIerror;
222 struct {
223 Thread* thr;
224 Addr before_ga; /* always locked first in prog. history */
225 Addr after_ga;
226 ExeContext* before_ec;
227 ExeContext* after_ec;
228 } LockOrder;
229 struct {
230 Thread* thr;
231 HChar* errstr; /* persistent, in tool-arena */
232 } Misc;
233 } XE;
234 }
235 XError;
236
237static void init_XError ( XError* xe ) {
238 VG_(memset)(xe, 0, sizeof(*xe) );
239 xe->tag = XE_Race-1; /* bogus */
240}
241
242
243/* Extensions of suppressions */
244typedef
245 enum {
246 XS_Race=1201, /* race */
247 XS_FreeMemLock,
248 XS_UnlockUnlocked,
249 XS_UnlockForeign,
250 XS_UnlockBogus,
251 XS_PthAPIerror,
252 XS_LockOrder,
253 XS_Misc
254 }
255 XSuppTag;
256
257
258/* Updates the copy with address info if necessary. */
259UInt HG_(update_extra) ( Error* err )
260{
261 XError* xe = (XError*)VG_(get_error_extra)(err);
262 tl_assert(xe);
263 //if (extra != NULL && Undescribed == extra->addrinfo.akind) {
264 // describe_addr ( VG_(get_error_address)(err), &(extra->addrinfo) );
265 //}
266
267 if (xe->tag == XE_Race) {
268 /* See if we can come up with a source level description of the
269 raced-upon address. This is potentially expensive, which is
270 why it's only done at the update_extra point, not when the
271 error is initially created. */
sewardjc5ea9962008-12-07 01:41:46 +0000272 static Int xxx = 0;
273 xxx++;
274 if (0)
275 VG_(printf)("HG_(update_extra): "
276 "%d conflicting-event queries\n", xxx);
sewardjf98e1c02008-10-25 16:22:41 +0000277 tl_assert(sizeof(xe->XE.Race.descr1) == sizeof(xe->XE.Race.descr2));
278 if (VG_(get_data_description)(
279 &xe->XE.Race.descr1[0],
280 &xe->XE.Race.descr2[0],
281 sizeof(xe->XE.Race.descr1)-1,
282 xe->XE.Race.data_addr )) {
283 tl_assert( xe->XE.Race.descr1
284 [ sizeof(xe->XE.Race.descr1)-1 ] == 0);
285 tl_assert( xe->XE.Race.descr2
286 [ sizeof(xe->XE.Race.descr2)-1 ] == 0);
287 }
sewardjc5ea9962008-12-07 01:41:46 +0000288 { Thr* thrp = NULL;
289 ExeContext* wherep = NULL;
290 Addr acc_addr = xe->XE.Race.data_addr;
291 Int acc_szB = xe->XE.Race.szB;
292 Thr* acc_thr = xe->XE.Race.thr->hbthr;
293 Bool acc_isW = xe->XE.Race.isWrite;
294 SizeT conf_szB = 0;
295 Bool conf_isW = False;
296 tl_assert(!xe->XE.Race.mb_confacc);
297 tl_assert(!xe->XE.Race.mb_confaccthr);
298 if (libhb_event_map_lookup(
299 &wherep, &thrp, &conf_szB, &conf_isW,
300 acc_thr, acc_addr, acc_szB, acc_isW )) {
301 Thread* threadp;
302 tl_assert(wherep);
303 tl_assert(thrp);
304 threadp = libhb_get_Thr_opaque( thrp );
305 tl_assert(threadp);
306 xe->XE.Race.mb_confacc = wherep;
307 xe->XE.Race.mb_confaccthr = threadp;
308 xe->XE.Race.mb_confaccSzB = (Int)conf_szB;
309 xe->XE.Race.mb_confaccIsW = conf_isW;
310 }
311 }
sewardjf98e1c02008-10-25 16:22:41 +0000312 }
313
314 return sizeof(XError);
315}
316
317void HG_(record_error_Race) ( Thread* thr,
sewardja781be62008-12-08 00:12:28 +0000318 Addr data_addr, Int szB, Bool isWrite,
sewardjc5ea9962008-12-07 01:41:46 +0000319 ExeContext* mb_lastlock )
sewardjf98e1c02008-10-25 16:22:41 +0000320{
321 XError xe;
322 tl_assert( HG_(is_sane_Thread)(thr) );
323
324# if defined(VGO_linux)
325 /* Skip any races on locations apparently in GOTPLT sections. This
326 is said to be caused by ld.so poking PLT table entries (or
327 whatever) when it writes the resolved address of a dynamically
328 linked routine, into the table (or whatever) when it is called
329 for the first time. */
330 {
331 VgSectKind sect = VG_(seginfo_sect_kind)( NULL, 0, data_addr );
332 if (0) VG_(printf)("XXXXXXXXX RACE on %#lx %s\n",
333 data_addr, VG_(pp_SectKind)(sect));
sewardj52104132008-12-23 00:10:26 +0000334 /* SectPLT is required on ???-linux */
sewardjf98e1c02008-10-25 16:22:41 +0000335 if (sect == Vg_SectGOTPLT) return;
sewardj52104132008-12-23 00:10:26 +0000336 /* SectPLT is required on ppc32/64-linux */
337 if (sect == Vg_SectPLT) return;
sewardjf98e1c02008-10-25 16:22:41 +0000338 }
339# endif
340
341 init_XError(&xe);
342 xe.tag = XE_Race;
343 xe.XE.Race.data_addr = data_addr;
344 xe.XE.Race.szB = szB;
345 xe.XE.Race.isWrite = isWrite;
346 xe.XE.Race.mb_lastlock = mb_lastlock;
sewardjf98e1c02008-10-25 16:22:41 +0000347 xe.XE.Race.thr = thr;
sewardjf98e1c02008-10-25 16:22:41 +0000348 tl_assert(isWrite == False || isWrite == True);
sewardja781be62008-12-08 00:12:28 +0000349 tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
sewardjf98e1c02008-10-25 16:22:41 +0000350 xe.XE.Race.descr1[0] = xe.XE.Race.descr2[0] = 0;
351 // FIXME: tid vs thr
sewardjc5ea9962008-12-07 01:41:46 +0000352 // Skip on any of the conflicting-access info at this point.
353 // It's expensive to obtain, and this error is more likely than
354 // not to be discarded. We'll fill these fields in in
355 // HG_(update_extra) just above, assuming the error ever makes
356 // it that far (unlikely).
357 xe.XE.Race.mb_confaccSzB = 0;
358 xe.XE.Race.mb_confaccIsW = False;
359 xe.XE.Race.mb_confacc = NULL;
360 xe.XE.Race.mb_confaccthr = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000361 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
362 tl_assert( thr->coretid != VG_INVALID_THREADID );
363 VG_(maybe_record_error)( thr->coretid,
364 XE_Race, data_addr, NULL, &xe );
365}
366
367void HG_(record_error_FreeMemLock) ( Thread* thr, Lock* lk )
368{
369 XError xe;
370 tl_assert( HG_(is_sane_Thread)(thr) );
371 tl_assert( HG_(is_sane_LockN)(lk) );
372 init_XError(&xe);
373 xe.tag = XE_FreeMemLock;
374 xe.XE.FreeMemLock.thr = thr;
375 xe.XE.FreeMemLock.lock = mk_LockP_from_LockN(lk);
376 // FIXME: tid vs thr
377 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
378 tl_assert( thr->coretid != VG_INVALID_THREADID );
379 VG_(maybe_record_error)( thr->coretid,
380 XE_FreeMemLock, 0, NULL, &xe );
381}
382
383void HG_(record_error_UnlockUnlocked) ( Thread* thr, Lock* lk )
384{
385 XError xe;
386 tl_assert( HG_(is_sane_Thread)(thr) );
387 tl_assert( HG_(is_sane_LockN)(lk) );
388 init_XError(&xe);
389 xe.tag = XE_UnlockUnlocked;
390 xe.XE.UnlockUnlocked.thr = thr;
391 xe.XE.UnlockUnlocked.lock = mk_LockP_from_LockN(lk);
392 // FIXME: tid vs thr
393 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
394 tl_assert( thr->coretid != VG_INVALID_THREADID );
395 VG_(maybe_record_error)( thr->coretid,
396 XE_UnlockUnlocked, 0, NULL, &xe );
397}
398
399void HG_(record_error_UnlockForeign) ( Thread* thr,
400 Thread* owner, Lock* lk )
401{
402 XError xe;
403 tl_assert( HG_(is_sane_Thread)(thr) );
404 tl_assert( HG_(is_sane_Thread)(owner) );
405 tl_assert( HG_(is_sane_LockN)(lk) );
406 init_XError(&xe);
407 xe.tag = XE_UnlockForeign;
408 xe.XE.UnlockForeign.thr = thr;
409 xe.XE.UnlockForeign.owner = owner;
410 xe.XE.UnlockForeign.lock = mk_LockP_from_LockN(lk);
411 // FIXME: tid vs thr
412 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
413 tl_assert( thr->coretid != VG_INVALID_THREADID );
414 VG_(maybe_record_error)( thr->coretid,
415 XE_UnlockForeign, 0, NULL, &xe );
416}
417
418void HG_(record_error_UnlockBogus) ( Thread* thr, Addr lock_ga )
419{
420 XError xe;
421 tl_assert( HG_(is_sane_Thread)(thr) );
422 init_XError(&xe);
423 xe.tag = XE_UnlockBogus;
424 xe.XE.UnlockBogus.thr = thr;
425 xe.XE.UnlockBogus.lock_ga = lock_ga;
426 // FIXME: tid vs thr
427 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
428 tl_assert( thr->coretid != VG_INVALID_THREADID );
429 VG_(maybe_record_error)( thr->coretid,
430 XE_UnlockBogus, 0, NULL, &xe );
431}
432
433void HG_(record_error_LockOrder)(
434 Thread* thr, Addr before_ga, Addr after_ga,
435 ExeContext* before_ec, ExeContext* after_ec
436 )
437{
438 XError xe;
439 tl_assert( HG_(is_sane_Thread)(thr) );
440 if (!HG_(clo_track_lockorders))
441 return;
442 init_XError(&xe);
443 xe.tag = XE_LockOrder;
444 xe.XE.LockOrder.thr = thr;
445 xe.XE.LockOrder.before_ga = before_ga;
446 xe.XE.LockOrder.before_ec = before_ec;
447 xe.XE.LockOrder.after_ga = after_ga;
448 xe.XE.LockOrder.after_ec = after_ec;
449 // FIXME: tid vs thr
450 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
451 tl_assert( thr->coretid != VG_INVALID_THREADID );
452 VG_(maybe_record_error)( thr->coretid,
453 XE_LockOrder, 0, NULL, &xe );
454}
455
456void HG_(record_error_PthAPIerror) ( Thread* thr, HChar* fnname,
457 Word err, HChar* errstr )
458{
459 XError xe;
460 tl_assert( HG_(is_sane_Thread)(thr) );
461 tl_assert(fnname);
462 tl_assert(errstr);
463 init_XError(&xe);
464 xe.tag = XE_PthAPIerror;
465 xe.XE.PthAPIerror.thr = thr;
466 xe.XE.PthAPIerror.fnname = string_table_strdup(fnname);
467 xe.XE.PthAPIerror.err = err;
468 xe.XE.PthAPIerror.errstr = string_table_strdup(errstr);
469 // FIXME: tid vs thr
470 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
471 tl_assert( thr->coretid != VG_INVALID_THREADID );
472 VG_(maybe_record_error)( thr->coretid,
473 XE_PthAPIerror, 0, NULL, &xe );
474}
475
476void HG_(record_error_Misc) ( Thread* thr, HChar* errstr )
477{
478 XError xe;
479 tl_assert( HG_(is_sane_Thread)(thr) );
480 tl_assert(errstr);
481 init_XError(&xe);
482 xe.tag = XE_Misc;
483 xe.XE.Misc.thr = thr;
484 xe.XE.Misc.errstr = string_table_strdup(errstr);
485 // FIXME: tid vs thr
486 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
487 tl_assert( thr->coretid != VG_INVALID_THREADID );
488 VG_(maybe_record_error)( thr->coretid,
489 XE_Misc, 0, NULL, &xe );
490}
491
492Bool HG_(eq_Error) ( VgRes not_used, Error* e1, Error* e2 )
493{
494 XError *xe1, *xe2;
495
496 tl_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
497
498 xe1 = (XError*)VG_(get_error_extra)(e1);
499 xe2 = (XError*)VG_(get_error_extra)(e2);
500 tl_assert(xe1);
501 tl_assert(xe2);
502
503 switch (VG_(get_error_kind)(e1)) {
504 case XE_Race:
505 return xe1->XE.Race.szB == xe2->XE.Race.szB
506 && xe1->XE.Race.isWrite == xe2->XE.Race.isWrite
507 && (HG_(clo_cmp_race_err_addrs)
508 ? xe1->XE.Race.data_addr == xe2->XE.Race.data_addr
509 : True);
510 case XE_FreeMemLock:
511 return xe1->XE.FreeMemLock.thr == xe2->XE.FreeMemLock.thr
512 && xe1->XE.FreeMemLock.lock == xe2->XE.FreeMemLock.lock;
513 case XE_UnlockUnlocked:
514 return xe1->XE.UnlockUnlocked.thr == xe2->XE.UnlockUnlocked.thr
515 && xe1->XE.UnlockUnlocked.lock == xe2->XE.UnlockUnlocked.lock;
516 case XE_UnlockForeign:
517 return xe1->XE.UnlockForeign.thr == xe2->XE.UnlockForeign.thr
518 && xe1->XE.UnlockForeign.owner == xe2->XE.UnlockForeign.owner
519 && xe1->XE.UnlockForeign.lock == xe2->XE.UnlockForeign.lock;
520 case XE_UnlockBogus:
521 return xe1->XE.UnlockBogus.thr == xe2->XE.UnlockBogus.thr
522 && xe1->XE.UnlockBogus.lock_ga == xe2->XE.UnlockBogus.lock_ga;
523 case XE_PthAPIerror:
524 return xe1->XE.PthAPIerror.thr == xe2->XE.PthAPIerror.thr
525 && 0==VG_(strcmp)(xe1->XE.PthAPIerror.fnname,
526 xe2->XE.PthAPIerror.fnname)
527 && xe1->XE.PthAPIerror.err == xe2->XE.PthAPIerror.err;
528 case XE_LockOrder:
529 return xe1->XE.LockOrder.thr == xe2->XE.LockOrder.thr;
530 case XE_Misc:
531 return xe1->XE.Misc.thr == xe2->XE.Misc.thr
532 && 0==VG_(strcmp)(xe1->XE.Misc.errstr, xe2->XE.Misc.errstr);
533 default:
534 tl_assert(0);
535 }
536
537 /*NOTREACHED*/
538 tl_assert(0);
539}
540
541
542/* Announce (that is, print the point-of-creation) of 'thr'. Only do
543 this once, as we only want to see these announcements once per
544 thread. */
545static void announce_one_thread ( Thread* thr )
546{
547 tl_assert(HG_(is_sane_Thread)(thr));
548 tl_assert(thr->errmsg_index >= 1);
549 if (!thr->announced) {
550 if (thr->errmsg_index == 1) {
551 tl_assert(thr->created_at == NULL);
552 VG_(message)(Vg_UserMsg, "Thread #%d is the program's root thread",
553 thr->errmsg_index);
554 } else {
555 tl_assert(thr->created_at != NULL);
556 VG_(message)(Vg_UserMsg, "Thread #%d was created",
557 thr->errmsg_index);
558 VG_(pp_ExeContext)( thr->created_at );
559 }
560 VG_(message)(Vg_UserMsg, "");
561 thr->announced = True;
562 }
563}
564
565
566void HG_(pp_Error) ( Error* err )
567{
568 XError *xe = (XError*)VG_(get_error_extra)(err);
569
570 switch (VG_(get_error_kind)(err)) {
571
572 case XE_Misc: {
573 tl_assert(xe);
574 tl_assert( HG_(is_sane_Thread)( xe->XE.Misc.thr ) );
575 announce_one_thread( xe->XE.Misc.thr );
576 VG_(message)(Vg_UserMsg,
577 "Thread #%d: %s",
578 (Int)xe->XE.Misc.thr->errmsg_index,
579 xe->XE.Misc.errstr);
580 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
581 break;
582 }
583
584 case XE_LockOrder: {
585 tl_assert(xe);
586 tl_assert( HG_(is_sane_Thread)( xe->XE.LockOrder.thr ) );
587 announce_one_thread( xe->XE.LockOrder.thr );
588 VG_(message)(Vg_UserMsg,
589 "Thread #%d: lock order \"%p before %p\" violated",
590 (Int)xe->XE.LockOrder.thr->errmsg_index,
591 (void*)xe->XE.LockOrder.before_ga,
592 (void*)xe->XE.LockOrder.after_ga);
593 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
594 if (xe->XE.LockOrder.before_ec && xe->XE.LockOrder.after_ec) {
595 VG_(message)(Vg_UserMsg,
596 " Required order was established by acquisition of lock at %p",
597 (void*)xe->XE.LockOrder.before_ga);
598 VG_(pp_ExeContext)( xe->XE.LockOrder.before_ec );
599 VG_(message)(Vg_UserMsg,
600 " followed by a later acquisition of lock at %p",
601 (void*)xe->XE.LockOrder.after_ga);
602 VG_(pp_ExeContext)( xe->XE.LockOrder.after_ec );
603 }
604 break;
605 }
606
607 case XE_PthAPIerror: {
608 tl_assert(xe);
609 tl_assert( HG_(is_sane_Thread)( xe->XE.PthAPIerror.thr ) );
610 announce_one_thread( xe->XE.PthAPIerror.thr );
611 VG_(message)(Vg_UserMsg,
612 "Thread #%d's call to %s failed",
613 (Int)xe->XE.PthAPIerror.thr->errmsg_index,
614 xe->XE.PthAPIerror.fnname);
615 VG_(message)(Vg_UserMsg,
616 " with error code %ld (%s)",
617 xe->XE.PthAPIerror.err,
618 xe->XE.PthAPIerror.errstr);
619 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
620 break;
621 }
622
623 case XE_UnlockBogus: {
624 tl_assert(xe);
625 tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockBogus.thr ) );
626 announce_one_thread( xe->XE.UnlockBogus.thr );
627 VG_(message)(Vg_UserMsg,
628 "Thread #%d unlocked an invalid lock at %p ",
629 (Int)xe->XE.UnlockBogus.thr->errmsg_index,
630 (void*)xe->XE.UnlockBogus.lock_ga);
631 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
632 break;
633 }
634
635 case XE_UnlockForeign: {
636 tl_assert(xe);
637 tl_assert( HG_(is_sane_LockP)( xe->XE.UnlockForeign.lock ) );
638 tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockForeign.owner ) );
639 tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockForeign.thr ) );
640 announce_one_thread( xe->XE.UnlockForeign.thr );
641 announce_one_thread( xe->XE.UnlockForeign.owner );
642 VG_(message)(Vg_UserMsg,
643 "Thread #%d unlocked lock at %p "
644 "currently held by thread #%d",
645 (Int)xe->XE.UnlockForeign.thr->errmsg_index,
646 (void*)xe->XE.UnlockForeign.lock->guestaddr,
647 (Int)xe->XE.UnlockForeign.owner->errmsg_index );
648 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
649 if (xe->XE.UnlockForeign.lock->appeared_at) {
650 VG_(message)(Vg_UserMsg,
651 " Lock at %p was first observed",
652 (void*)xe->XE.UnlockForeign.lock->guestaddr);
653 VG_(pp_ExeContext)( xe->XE.UnlockForeign.lock->appeared_at );
654 }
655 break;
656 }
657
658 case XE_UnlockUnlocked: {
659 tl_assert(xe);
660 tl_assert( HG_(is_sane_LockP)( xe->XE.UnlockUnlocked.lock ) );
661 tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockUnlocked.thr ) );
662 announce_one_thread( xe->XE.UnlockUnlocked.thr );
663 VG_(message)(Vg_UserMsg,
664 "Thread #%d unlocked a not-locked lock at %p ",
665 (Int)xe->XE.UnlockUnlocked.thr->errmsg_index,
666 (void*)xe->XE.UnlockUnlocked.lock->guestaddr);
667 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
668 if (xe->XE.UnlockUnlocked.lock->appeared_at) {
669 VG_(message)(Vg_UserMsg,
670 " Lock at %p was first observed",
671 (void*)xe->XE.UnlockUnlocked.lock->guestaddr);
672 VG_(pp_ExeContext)( xe->XE.UnlockUnlocked.lock->appeared_at );
673 }
674 break;
675 }
676
677 case XE_FreeMemLock: {
678 tl_assert(xe);
679 tl_assert( HG_(is_sane_LockP)( xe->XE.FreeMemLock.lock ) );
680 tl_assert( HG_(is_sane_Thread)( xe->XE.FreeMemLock.thr ) );
681 announce_one_thread( xe->XE.FreeMemLock.thr );
682 VG_(message)(Vg_UserMsg,
683 "Thread #%d deallocated location %p "
684 "containing a locked lock",
685 (Int)xe->XE.FreeMemLock.thr->errmsg_index,
686 (void*)xe->XE.FreeMemLock.lock->guestaddr);
687 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
688 if (xe->XE.FreeMemLock.lock->appeared_at) {
689 VG_(message)(Vg_UserMsg,
690 " Lock at %p was first observed",
691 (void*)xe->XE.FreeMemLock.lock->guestaddr);
692 VG_(pp_ExeContext)( xe->XE.FreeMemLock.lock->appeared_at );
693 }
694 break;
695 }
696
697 case XE_Race: {
698 Addr err_ga;
699 HChar* what;
700 Int szB;
701 what = xe->XE.Race.isWrite ? "write" : "read";
702 szB = xe->XE.Race.szB;
703 err_ga = VG_(get_error_address)(err);
704
705 announce_one_thread( xe->XE.Race.thr );
706 if (xe->XE.Race.mb_confaccthr)
707 announce_one_thread( xe->XE.Race.mb_confaccthr );
708 VG_(message)(Vg_UserMsg,
709 "Possible data race during %s of size %d at %#lx by thread #%d",
710 what, szB, err_ga, (Int)xe->XE.Race.thr->errmsg_index
711 );
712 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
713 if (xe->XE.Race.mb_confacc) {
714 if (xe->XE.Race.mb_confaccthr) {
715 VG_(message)(Vg_UserMsg,
sewardjc5ea9962008-12-07 01:41:46 +0000716 " This conflicts with a previous %s of size %d by thread #%d",
717 xe->XE.Race.mb_confaccIsW ? "write" : "read",
718 xe->XE.Race.mb_confaccSzB,
sewardjf98e1c02008-10-25 16:22:41 +0000719 xe->XE.Race.mb_confaccthr->errmsg_index
720 );
721 } else {
sewardjc5ea9962008-12-07 01:41:46 +0000722 // FIXME: can this ever happen?
sewardjf98e1c02008-10-25 16:22:41 +0000723 VG_(message)(Vg_UserMsg,
sewardjc5ea9962008-12-07 01:41:46 +0000724 " This conflicts with a previous %s of size %d",
725 xe->XE.Race.mb_confaccIsW ? "write" : "read",
726 xe->XE.Race.mb_confaccSzB
sewardjf98e1c02008-10-25 16:22:41 +0000727 );
728 }
729 VG_(pp_ExeContext)( xe->XE.Race.mb_confacc );
730 }
731
732
733 /* If we have a better description of the address, show it. */
734 if (xe->XE.Race.descr1[0] != 0)
735 VG_(message)(Vg_UserMsg, " %s", &xe->XE.Race.descr1[0]);
736 if (xe->XE.Race.descr2[0] != 0)
737 VG_(message)(Vg_UserMsg, " %s", &xe->XE.Race.descr2[0]);
738
739 break; /* case XE_Race */
740 } /* case XE_Race */
741
742 default:
743 tl_assert(0);
744 } /* switch (VG_(get_error_kind)(err)) */
745}
746
747Char* HG_(get_error_name) ( Error* err )
748{
749 switch (VG_(get_error_kind)(err)) {
750 case XE_Race: return "Race";
751 case XE_FreeMemLock: return "FreeMemLock";
752 case XE_UnlockUnlocked: return "UnlockUnlocked";
753 case XE_UnlockForeign: return "UnlockForeign";
754 case XE_UnlockBogus: return "UnlockBogus";
755 case XE_PthAPIerror: return "PthAPIerror";
756 case XE_LockOrder: return "LockOrder";
757 case XE_Misc: return "Misc";
758 default: tl_assert(0); /* fill in missing case */
759 }
760}
761
762Bool HG_(recognised_suppression) ( Char* name, Supp *su )
763{
764# define TRY(_name,_xskind) \
765 if (0 == VG_(strcmp)(name, (_name))) { \
766 VG_(set_supp_kind)(su, (_xskind)); \
767 return True; \
768 }
769 TRY("Race", XS_Race);
770 TRY("FreeMemLock", XS_FreeMemLock);
771 TRY("UnlockUnlocked", XS_UnlockUnlocked);
772 TRY("UnlockForeign", XS_UnlockForeign);
773 TRY("UnlockBogus", XS_UnlockBogus);
774 TRY("PthAPIerror", XS_PthAPIerror);
775 TRY("LockOrder", XS_LockOrder);
776 TRY("Misc", XS_Misc);
777 return False;
778# undef TRY
779}
780
781Bool HG_(read_extra_suppression_info) ( Int fd, Char* buf, Int nBuf,
782 Supp* su )
783{
784 /* do nothing -- no extra suppression info present. Return True to
785 indicate nothing bad happened. */
786 return True;
787}
788
789Bool HG_(error_matches_suppression) ( Error* err, Supp* su )
790{
791 switch (VG_(get_supp_kind)(su)) {
792 case XS_Race: return VG_(get_error_kind)(err) == XE_Race;
793 case XS_FreeMemLock: return VG_(get_error_kind)(err) == XE_FreeMemLock;
794 case XS_UnlockUnlocked: return VG_(get_error_kind)(err) == XE_UnlockUnlocked;
795 case XS_UnlockForeign: return VG_(get_error_kind)(err) == XE_UnlockForeign;
796 case XS_UnlockBogus: return VG_(get_error_kind)(err) == XE_UnlockBogus;
797 case XS_PthAPIerror: return VG_(get_error_kind)(err) == XE_PthAPIerror;
798 case XS_LockOrder: return VG_(get_error_kind)(err) == XE_LockOrder;
799 case XS_Misc: return VG_(get_error_kind)(err) == XE_Misc;
800 //case XS_: return VG_(get_error_kind)(err) == XE_;
801 default: tl_assert(0); /* fill in missing cases */
802 }
803}
804
805void HG_(print_extra_suppression_info) ( Error* err )
806{
807 /* Do nothing */
808}
809
810
811/*--------------------------------------------------------------------*/
812/*--- end hg_errors.c ---*/
813/*--------------------------------------------------------------------*/