blob: 2c70ff62b8cbda143e17f9582001545b6b142215 [file] [log] [blame]
sewardjf98e1c02008-10-25 16:22:41 +00001
2/*--------------------------------------------------------------------*/
3/*--- Error management for Helgrind. ---*/
4/*--- hg_errors.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
11 Copyright (C) 2007-2008 OpenWorks Ltd
12 info@open-works.co.uk
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file COPYING.
30*/
31
32#include "pub_tool_basics.h"
33#include "pub_tool_libcbase.h"
34#include "pub_tool_libcassert.h"
35#include "pub_tool_libcprint.h"
36#include "pub_tool_execontext.h"
37#include "pub_tool_errormgr.h"
38#include "pub_tool_wordfm.h"
39#include "pub_tool_xarray.h"
40#include "pub_tool_debuginfo.h"
41#include "pub_tool_threadstate.h"
42
43#include "hg_basics.h"
44#include "hg_wordset.h"
45#include "hg_lock_n_thread.h"
sewardjc5ea9962008-12-07 01:41:46 +000046#include "libhb.h"
sewardjf98e1c02008-10-25 16:22:41 +000047#include "hg_errors.h" /* self */
48
49
50/*----------------------------------------------------------------*/
51/*--- ---*/
52/*----------------------------------------------------------------*/
53
54/* This has to do with printing error messages. See comments on
55 announce_threadset() and summarise_threadset(). Perhaps it
56 should be a command line option. */
57#define N_THREADS_TO_ANNOUNCE 5
58
59
60/*----------------------------------------------------------------*/
61/*--- Error management ---*/
62/*----------------------------------------------------------------*/
63
64/* maps (by value) strings to a copy of them in ARENA_TOOL */
65
66static WordFM* string_table = NULL;
67
68ULong HG_(stats__string_table_queries) = 0;
69
70ULong HG_(stats__string_table_get_map_size) ( void ) {
71 return string_table ? (ULong)VG_(sizeFM)(string_table) : 0;
72}
73
74static Word string_table_cmp ( UWord s1, UWord s2 ) {
75 return (Word)VG_(strcmp)( (HChar*)s1, (HChar*)s2 );
76}
77
78static HChar* string_table_strdup ( HChar* str ) {
79 HChar* copy = NULL;
80 HG_(stats__string_table_queries)++;
81 if (!str)
82 str = "(null)";
83 if (!string_table) {
84 string_table = VG_(newFM)( HG_(zalloc), "hg.sts.1",
85 HG_(free), string_table_cmp );
86 tl_assert(string_table);
87 }
88 if (VG_(lookupFM)( string_table,
89 NULL, (Word*)&copy, (Word)str )) {
90 tl_assert(copy);
91 if (0) VG_(printf)("string_table_strdup: %p -> %p\n", str, copy );
92 return copy;
93 } else {
94 copy = HG_(strdup)("hg.sts.2", str);
95 tl_assert(copy);
96 VG_(addToFM)( string_table, (Word)copy, (Word)copy );
97 return copy;
98 }
99}
100
101/* maps from Lock .unique fields to LockP*s */
102
103static WordFM* map_LockN_to_P = NULL;
104
105ULong HG_(stats__LockN_to_P_queries) = 0;
106
107ULong HG_(stats__LockN_to_P_get_map_size) ( void ) {
108 return map_LockN_to_P ? (ULong)VG_(sizeFM)(map_LockN_to_P) : 0;
109}
110
111static Word lock_unique_cmp ( UWord lk1W, UWord lk2W )
112{
113 Lock* lk1 = (Lock*)lk1W;
114 Lock* lk2 = (Lock*)lk2W;
115 tl_assert( HG_(is_sane_LockNorP)(lk1) );
116 tl_assert( HG_(is_sane_LockNorP)(lk2) );
117 if (lk1->unique < lk2->unique) return -1;
118 if (lk1->unique > lk2->unique) return 1;
119 return 0;
120}
121
122static Lock* mk_LockP_from_LockN ( Lock* lkn )
123{
124 Lock* lkp = NULL;
125 HG_(stats__LockN_to_P_queries)++;
126 tl_assert( HG_(is_sane_LockN)(lkn) );
127 if (!map_LockN_to_P) {
128 map_LockN_to_P = VG_(newFM)( HG_(zalloc), "hg.mLPfLN.1",
129 HG_(free), lock_unique_cmp );
130 tl_assert(map_LockN_to_P);
131 }
132 if (!VG_(lookupFM)( map_LockN_to_P, NULL, (Word*)&lkp, (Word)lkn)) {
133 lkp = HG_(zalloc)( "hg.mLPfLN.2", sizeof(Lock) );
134 *lkp = *lkn;
135 lkp->admin = NULL;
136 lkp->magic = LockP_MAGIC;
137 /* Forget about the bag of lock holders - don't copy that.
138 Also, acquired_at should be NULL whenever heldBy is, and vice
139 versa. Also forget about the associated libhb synch object. */
140 lkp->heldW = False;
141 lkp->heldBy = NULL;
142 lkp->acquired_at = NULL;
143 lkp->hbso = NULL;
144 VG_(addToFM)( map_LockN_to_P, (Word)lkp, (Word)lkp );
145 }
146 tl_assert( HG_(is_sane_LockP)(lkp) );
147 return lkp;
148}
149
150/* Errors:
151
152 race: program counter
153 read or write
154 data size
155 previous state
156 current state
157
158 FIXME: how does state printing interact with lockset gc?
159 Are the locksets in prev/curr state always valid?
160 Ditto question for the threadsets
161 ThreadSets - probably are always valid if Threads
162 are never thrown away.
163 LockSets - could at least print the lockset elements that
164 correspond to actual locks at the time of printing. Hmm.
165*/
166
167/* Error kinds */
168typedef
169 enum {
170 XE_Race=1101, // race
171 XE_FreeMemLock, // freeing memory containing a locked lock
172 XE_UnlockUnlocked, // unlocking a not-locked lock
173 XE_UnlockForeign, // unlocking a lock held by some other thread
174 XE_UnlockBogus, // unlocking an address not known to be a lock
175 XE_PthAPIerror, // error from the POSIX pthreads API
176 XE_LockOrder, // lock order error
177 XE_Misc // misc other error (w/ string to describe it)
178 }
179 XErrorTag;
180
181/* Extra contexts for kinds */
182typedef
183 struct {
184 XErrorTag tag;
185 union {
186 struct {
187 Addr data_addr;
188 Int szB;
189 Bool isWrite;
190 ExeContext* mb_lastlock;
191 ExeContext* mb_confacc;
192 Thread* thr;
193 Thread* mb_confaccthr;
sewardjc5ea9962008-12-07 01:41:46 +0000194 Int mb_confaccSzB;
195 Bool mb_confaccIsW;
sewardjf98e1c02008-10-25 16:22:41 +0000196 Char descr1[96];
197 Char descr2[96];
198 } Race;
199 struct {
200 Thread* thr; /* doing the freeing */
201 Lock* lock; /* lock which is locked */
202 } FreeMemLock;
203 struct {
204 Thread* thr; /* doing the unlocking */
205 Lock* lock; /* lock (that is already unlocked) */
206 } UnlockUnlocked;
207 struct {
208 Thread* thr; /* doing the unlocking */
209 Thread* owner; /* thread that actually holds the lock */
210 Lock* lock; /* lock (that is held by 'owner') */
211 } UnlockForeign;
212 struct {
213 Thread* thr; /* doing the unlocking */
214 Addr lock_ga; /* purported address of the lock */
215 } UnlockBogus;
216 struct {
217 Thread* thr;
218 HChar* fnname; /* persistent, in tool-arena */
219 Word err; /* pth error code */
220 HChar* errstr; /* persistent, in tool-arena */
221 } PthAPIerror;
222 struct {
223 Thread* thr;
224 Addr before_ga; /* always locked first in prog. history */
225 Addr after_ga;
226 ExeContext* before_ec;
227 ExeContext* after_ec;
228 } LockOrder;
229 struct {
230 Thread* thr;
231 HChar* errstr; /* persistent, in tool-arena */
232 } Misc;
233 } XE;
234 }
235 XError;
236
237static void init_XError ( XError* xe ) {
238 VG_(memset)(xe, 0, sizeof(*xe) );
239 xe->tag = XE_Race-1; /* bogus */
240}
241
242
243/* Extensions of suppressions */
244typedef
245 enum {
246 XS_Race=1201, /* race */
247 XS_FreeMemLock,
248 XS_UnlockUnlocked,
249 XS_UnlockForeign,
250 XS_UnlockBogus,
251 XS_PthAPIerror,
252 XS_LockOrder,
253 XS_Misc
254 }
255 XSuppTag;
256
257
258/* Updates the copy with address info if necessary. */
259UInt HG_(update_extra) ( Error* err )
260{
261 XError* xe = (XError*)VG_(get_error_extra)(err);
262 tl_assert(xe);
263 //if (extra != NULL && Undescribed == extra->addrinfo.akind) {
264 // describe_addr ( VG_(get_error_address)(err), &(extra->addrinfo) );
265 //}
266
267 if (xe->tag == XE_Race) {
268 /* See if we can come up with a source level description of the
269 raced-upon address. This is potentially expensive, which is
270 why it's only done at the update_extra point, not when the
271 error is initially created. */
sewardjc5ea9962008-12-07 01:41:46 +0000272 static Int xxx = 0;
273 xxx++;
274 if (0)
275 VG_(printf)("HG_(update_extra): "
276 "%d conflicting-event queries\n", xxx);
sewardjf98e1c02008-10-25 16:22:41 +0000277 tl_assert(sizeof(xe->XE.Race.descr1) == sizeof(xe->XE.Race.descr2));
278 if (VG_(get_data_description)(
279 &xe->XE.Race.descr1[0],
280 &xe->XE.Race.descr2[0],
281 sizeof(xe->XE.Race.descr1)-1,
282 xe->XE.Race.data_addr )) {
283 tl_assert( xe->XE.Race.descr1
284 [ sizeof(xe->XE.Race.descr1)-1 ] == 0);
285 tl_assert( xe->XE.Race.descr2
286 [ sizeof(xe->XE.Race.descr2)-1 ] == 0);
287 }
sewardjc5ea9962008-12-07 01:41:46 +0000288 { Thr* thrp = NULL;
289 ExeContext* wherep = NULL;
290 Addr acc_addr = xe->XE.Race.data_addr;
291 Int acc_szB = xe->XE.Race.szB;
292 Thr* acc_thr = xe->XE.Race.thr->hbthr;
293 Bool acc_isW = xe->XE.Race.isWrite;
294 SizeT conf_szB = 0;
295 Bool conf_isW = False;
296 tl_assert(!xe->XE.Race.mb_confacc);
297 tl_assert(!xe->XE.Race.mb_confaccthr);
298 if (libhb_event_map_lookup(
299 &wherep, &thrp, &conf_szB, &conf_isW,
300 acc_thr, acc_addr, acc_szB, acc_isW )) {
301 Thread* threadp;
302 tl_assert(wherep);
303 tl_assert(thrp);
304 threadp = libhb_get_Thr_opaque( thrp );
305 tl_assert(threadp);
306 xe->XE.Race.mb_confacc = wherep;
307 xe->XE.Race.mb_confaccthr = threadp;
308 xe->XE.Race.mb_confaccSzB = (Int)conf_szB;
309 xe->XE.Race.mb_confaccIsW = conf_isW;
310 }
311 }
sewardjf98e1c02008-10-25 16:22:41 +0000312 }
313
314 return sizeof(XError);
315}
316
317void HG_(record_error_Race) ( Thread* thr,
318 Addr data_addr, Bool isWrite, Int szB,
sewardjc5ea9962008-12-07 01:41:46 +0000319 ExeContext* mb_lastlock )
sewardjf98e1c02008-10-25 16:22:41 +0000320{
321 XError xe;
322 tl_assert( HG_(is_sane_Thread)(thr) );
323
324# if defined(VGO_linux)
325 /* Skip any races on locations apparently in GOTPLT sections. This
326 is said to be caused by ld.so poking PLT table entries (or
327 whatever) when it writes the resolved address of a dynamically
328 linked routine, into the table (or whatever) when it is called
329 for the first time. */
330 {
331 VgSectKind sect = VG_(seginfo_sect_kind)( NULL, 0, data_addr );
332 if (0) VG_(printf)("XXXXXXXXX RACE on %#lx %s\n",
333 data_addr, VG_(pp_SectKind)(sect));
334 if (sect == Vg_SectGOTPLT) return;
335 }
336# endif
337
338 init_XError(&xe);
339 xe.tag = XE_Race;
340 xe.XE.Race.data_addr = data_addr;
341 xe.XE.Race.szB = szB;
342 xe.XE.Race.isWrite = isWrite;
343 xe.XE.Race.mb_lastlock = mb_lastlock;
sewardjf98e1c02008-10-25 16:22:41 +0000344 xe.XE.Race.thr = thr;
sewardjf98e1c02008-10-25 16:22:41 +0000345 tl_assert(isWrite == False || isWrite == True);
346 // tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
347 xe.XE.Race.descr1[0] = xe.XE.Race.descr2[0] = 0;
348 // FIXME: tid vs thr
sewardjc5ea9962008-12-07 01:41:46 +0000349 // Skip on any of the conflicting-access info at this point.
350 // It's expensive to obtain, and this error is more likely than
351 // not to be discarded. We'll fill these fields in in
352 // HG_(update_extra) just above, assuming the error ever makes
353 // it that far (unlikely).
354 xe.XE.Race.mb_confaccSzB = 0;
355 xe.XE.Race.mb_confaccIsW = False;
356 xe.XE.Race.mb_confacc = NULL;
357 xe.XE.Race.mb_confaccthr = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000358 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
359 tl_assert( thr->coretid != VG_INVALID_THREADID );
360 VG_(maybe_record_error)( thr->coretid,
361 XE_Race, data_addr, NULL, &xe );
362}
363
364void HG_(record_error_FreeMemLock) ( Thread* thr, Lock* lk )
365{
366 XError xe;
367 tl_assert( HG_(is_sane_Thread)(thr) );
368 tl_assert( HG_(is_sane_LockN)(lk) );
369 init_XError(&xe);
370 xe.tag = XE_FreeMemLock;
371 xe.XE.FreeMemLock.thr = thr;
372 xe.XE.FreeMemLock.lock = mk_LockP_from_LockN(lk);
373 // FIXME: tid vs thr
374 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
375 tl_assert( thr->coretid != VG_INVALID_THREADID );
376 VG_(maybe_record_error)( thr->coretid,
377 XE_FreeMemLock, 0, NULL, &xe );
378}
379
380void HG_(record_error_UnlockUnlocked) ( Thread* thr, Lock* lk )
381{
382 XError xe;
383 tl_assert( HG_(is_sane_Thread)(thr) );
384 tl_assert( HG_(is_sane_LockN)(lk) );
385 init_XError(&xe);
386 xe.tag = XE_UnlockUnlocked;
387 xe.XE.UnlockUnlocked.thr = thr;
388 xe.XE.UnlockUnlocked.lock = mk_LockP_from_LockN(lk);
389 // FIXME: tid vs thr
390 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
391 tl_assert( thr->coretid != VG_INVALID_THREADID );
392 VG_(maybe_record_error)( thr->coretid,
393 XE_UnlockUnlocked, 0, NULL, &xe );
394}
395
396void HG_(record_error_UnlockForeign) ( Thread* thr,
397 Thread* owner, Lock* lk )
398{
399 XError xe;
400 tl_assert( HG_(is_sane_Thread)(thr) );
401 tl_assert( HG_(is_sane_Thread)(owner) );
402 tl_assert( HG_(is_sane_LockN)(lk) );
403 init_XError(&xe);
404 xe.tag = XE_UnlockForeign;
405 xe.XE.UnlockForeign.thr = thr;
406 xe.XE.UnlockForeign.owner = owner;
407 xe.XE.UnlockForeign.lock = mk_LockP_from_LockN(lk);
408 // FIXME: tid vs thr
409 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
410 tl_assert( thr->coretid != VG_INVALID_THREADID );
411 VG_(maybe_record_error)( thr->coretid,
412 XE_UnlockForeign, 0, NULL, &xe );
413}
414
415void HG_(record_error_UnlockBogus) ( Thread* thr, Addr lock_ga )
416{
417 XError xe;
418 tl_assert( HG_(is_sane_Thread)(thr) );
419 init_XError(&xe);
420 xe.tag = XE_UnlockBogus;
421 xe.XE.UnlockBogus.thr = thr;
422 xe.XE.UnlockBogus.lock_ga = lock_ga;
423 // FIXME: tid vs thr
424 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
425 tl_assert( thr->coretid != VG_INVALID_THREADID );
426 VG_(maybe_record_error)( thr->coretid,
427 XE_UnlockBogus, 0, NULL, &xe );
428}
429
430void HG_(record_error_LockOrder)(
431 Thread* thr, Addr before_ga, Addr after_ga,
432 ExeContext* before_ec, ExeContext* after_ec
433 )
434{
435 XError xe;
436 tl_assert( HG_(is_sane_Thread)(thr) );
437 if (!HG_(clo_track_lockorders))
438 return;
439 init_XError(&xe);
440 xe.tag = XE_LockOrder;
441 xe.XE.LockOrder.thr = thr;
442 xe.XE.LockOrder.before_ga = before_ga;
443 xe.XE.LockOrder.before_ec = before_ec;
444 xe.XE.LockOrder.after_ga = after_ga;
445 xe.XE.LockOrder.after_ec = after_ec;
446 // FIXME: tid vs thr
447 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
448 tl_assert( thr->coretid != VG_INVALID_THREADID );
449 VG_(maybe_record_error)( thr->coretid,
450 XE_LockOrder, 0, NULL, &xe );
451}
452
453void HG_(record_error_PthAPIerror) ( Thread* thr, HChar* fnname,
454 Word err, HChar* errstr )
455{
456 XError xe;
457 tl_assert( HG_(is_sane_Thread)(thr) );
458 tl_assert(fnname);
459 tl_assert(errstr);
460 init_XError(&xe);
461 xe.tag = XE_PthAPIerror;
462 xe.XE.PthAPIerror.thr = thr;
463 xe.XE.PthAPIerror.fnname = string_table_strdup(fnname);
464 xe.XE.PthAPIerror.err = err;
465 xe.XE.PthAPIerror.errstr = string_table_strdup(errstr);
466 // FIXME: tid vs thr
467 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
468 tl_assert( thr->coretid != VG_INVALID_THREADID );
469 VG_(maybe_record_error)( thr->coretid,
470 XE_PthAPIerror, 0, NULL, &xe );
471}
472
473void HG_(record_error_Misc) ( Thread* thr, HChar* errstr )
474{
475 XError xe;
476 tl_assert( HG_(is_sane_Thread)(thr) );
477 tl_assert(errstr);
478 init_XError(&xe);
479 xe.tag = XE_Misc;
480 xe.XE.Misc.thr = thr;
481 xe.XE.Misc.errstr = string_table_strdup(errstr);
482 // FIXME: tid vs thr
483 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
484 tl_assert( thr->coretid != VG_INVALID_THREADID );
485 VG_(maybe_record_error)( thr->coretid,
486 XE_Misc, 0, NULL, &xe );
487}
488
489Bool HG_(eq_Error) ( VgRes not_used, Error* e1, Error* e2 )
490{
491 XError *xe1, *xe2;
492
493 tl_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
494
495 xe1 = (XError*)VG_(get_error_extra)(e1);
496 xe2 = (XError*)VG_(get_error_extra)(e2);
497 tl_assert(xe1);
498 tl_assert(xe2);
499
500 switch (VG_(get_error_kind)(e1)) {
501 case XE_Race:
502 return xe1->XE.Race.szB == xe2->XE.Race.szB
503 && xe1->XE.Race.isWrite == xe2->XE.Race.isWrite
504 && (HG_(clo_cmp_race_err_addrs)
505 ? xe1->XE.Race.data_addr == xe2->XE.Race.data_addr
506 : True);
507 case XE_FreeMemLock:
508 return xe1->XE.FreeMemLock.thr == xe2->XE.FreeMemLock.thr
509 && xe1->XE.FreeMemLock.lock == xe2->XE.FreeMemLock.lock;
510 case XE_UnlockUnlocked:
511 return xe1->XE.UnlockUnlocked.thr == xe2->XE.UnlockUnlocked.thr
512 && xe1->XE.UnlockUnlocked.lock == xe2->XE.UnlockUnlocked.lock;
513 case XE_UnlockForeign:
514 return xe1->XE.UnlockForeign.thr == xe2->XE.UnlockForeign.thr
515 && xe1->XE.UnlockForeign.owner == xe2->XE.UnlockForeign.owner
516 && xe1->XE.UnlockForeign.lock == xe2->XE.UnlockForeign.lock;
517 case XE_UnlockBogus:
518 return xe1->XE.UnlockBogus.thr == xe2->XE.UnlockBogus.thr
519 && xe1->XE.UnlockBogus.lock_ga == xe2->XE.UnlockBogus.lock_ga;
520 case XE_PthAPIerror:
521 return xe1->XE.PthAPIerror.thr == xe2->XE.PthAPIerror.thr
522 && 0==VG_(strcmp)(xe1->XE.PthAPIerror.fnname,
523 xe2->XE.PthAPIerror.fnname)
524 && xe1->XE.PthAPIerror.err == xe2->XE.PthAPIerror.err;
525 case XE_LockOrder:
526 return xe1->XE.LockOrder.thr == xe2->XE.LockOrder.thr;
527 case XE_Misc:
528 return xe1->XE.Misc.thr == xe2->XE.Misc.thr
529 && 0==VG_(strcmp)(xe1->XE.Misc.errstr, xe2->XE.Misc.errstr);
530 default:
531 tl_assert(0);
532 }
533
534 /*NOTREACHED*/
535 tl_assert(0);
536}
537
538
539/* Announce (that is, print the point-of-creation) of 'thr'. Only do
540 this once, as we only want to see these announcements once per
541 thread. */
542static void announce_one_thread ( Thread* thr )
543{
544 tl_assert(HG_(is_sane_Thread)(thr));
545 tl_assert(thr->errmsg_index >= 1);
546 if (!thr->announced) {
547 if (thr->errmsg_index == 1) {
548 tl_assert(thr->created_at == NULL);
549 VG_(message)(Vg_UserMsg, "Thread #%d is the program's root thread",
550 thr->errmsg_index);
551 } else {
552 tl_assert(thr->created_at != NULL);
553 VG_(message)(Vg_UserMsg, "Thread #%d was created",
554 thr->errmsg_index);
555 VG_(pp_ExeContext)( thr->created_at );
556 }
557 VG_(message)(Vg_UserMsg, "");
558 thr->announced = True;
559 }
560}
561
562
563void HG_(pp_Error) ( Error* err )
564{
565 XError *xe = (XError*)VG_(get_error_extra)(err);
566
567 switch (VG_(get_error_kind)(err)) {
568
569 case XE_Misc: {
570 tl_assert(xe);
571 tl_assert( HG_(is_sane_Thread)( xe->XE.Misc.thr ) );
572 announce_one_thread( xe->XE.Misc.thr );
573 VG_(message)(Vg_UserMsg,
574 "Thread #%d: %s",
575 (Int)xe->XE.Misc.thr->errmsg_index,
576 xe->XE.Misc.errstr);
577 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
578 break;
579 }
580
581 case XE_LockOrder: {
582 tl_assert(xe);
583 tl_assert( HG_(is_sane_Thread)( xe->XE.LockOrder.thr ) );
584 announce_one_thread( xe->XE.LockOrder.thr );
585 VG_(message)(Vg_UserMsg,
586 "Thread #%d: lock order \"%p before %p\" violated",
587 (Int)xe->XE.LockOrder.thr->errmsg_index,
588 (void*)xe->XE.LockOrder.before_ga,
589 (void*)xe->XE.LockOrder.after_ga);
590 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
591 if (xe->XE.LockOrder.before_ec && xe->XE.LockOrder.after_ec) {
592 VG_(message)(Vg_UserMsg,
593 " Required order was established by acquisition of lock at %p",
594 (void*)xe->XE.LockOrder.before_ga);
595 VG_(pp_ExeContext)( xe->XE.LockOrder.before_ec );
596 VG_(message)(Vg_UserMsg,
597 " followed by a later acquisition of lock at %p",
598 (void*)xe->XE.LockOrder.after_ga);
599 VG_(pp_ExeContext)( xe->XE.LockOrder.after_ec );
600 }
601 break;
602 }
603
604 case XE_PthAPIerror: {
605 tl_assert(xe);
606 tl_assert( HG_(is_sane_Thread)( xe->XE.PthAPIerror.thr ) );
607 announce_one_thread( xe->XE.PthAPIerror.thr );
608 VG_(message)(Vg_UserMsg,
609 "Thread #%d's call to %s failed",
610 (Int)xe->XE.PthAPIerror.thr->errmsg_index,
611 xe->XE.PthAPIerror.fnname);
612 VG_(message)(Vg_UserMsg,
613 " with error code %ld (%s)",
614 xe->XE.PthAPIerror.err,
615 xe->XE.PthAPIerror.errstr);
616 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
617 break;
618 }
619
620 case XE_UnlockBogus: {
621 tl_assert(xe);
622 tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockBogus.thr ) );
623 announce_one_thread( xe->XE.UnlockBogus.thr );
624 VG_(message)(Vg_UserMsg,
625 "Thread #%d unlocked an invalid lock at %p ",
626 (Int)xe->XE.UnlockBogus.thr->errmsg_index,
627 (void*)xe->XE.UnlockBogus.lock_ga);
628 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
629 break;
630 }
631
632 case XE_UnlockForeign: {
633 tl_assert(xe);
634 tl_assert( HG_(is_sane_LockP)( xe->XE.UnlockForeign.lock ) );
635 tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockForeign.owner ) );
636 tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockForeign.thr ) );
637 announce_one_thread( xe->XE.UnlockForeign.thr );
638 announce_one_thread( xe->XE.UnlockForeign.owner );
639 VG_(message)(Vg_UserMsg,
640 "Thread #%d unlocked lock at %p "
641 "currently held by thread #%d",
642 (Int)xe->XE.UnlockForeign.thr->errmsg_index,
643 (void*)xe->XE.UnlockForeign.lock->guestaddr,
644 (Int)xe->XE.UnlockForeign.owner->errmsg_index );
645 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
646 if (xe->XE.UnlockForeign.lock->appeared_at) {
647 VG_(message)(Vg_UserMsg,
648 " Lock at %p was first observed",
649 (void*)xe->XE.UnlockForeign.lock->guestaddr);
650 VG_(pp_ExeContext)( xe->XE.UnlockForeign.lock->appeared_at );
651 }
652 break;
653 }
654
655 case XE_UnlockUnlocked: {
656 tl_assert(xe);
657 tl_assert( HG_(is_sane_LockP)( xe->XE.UnlockUnlocked.lock ) );
658 tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockUnlocked.thr ) );
659 announce_one_thread( xe->XE.UnlockUnlocked.thr );
660 VG_(message)(Vg_UserMsg,
661 "Thread #%d unlocked a not-locked lock at %p ",
662 (Int)xe->XE.UnlockUnlocked.thr->errmsg_index,
663 (void*)xe->XE.UnlockUnlocked.lock->guestaddr);
664 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
665 if (xe->XE.UnlockUnlocked.lock->appeared_at) {
666 VG_(message)(Vg_UserMsg,
667 " Lock at %p was first observed",
668 (void*)xe->XE.UnlockUnlocked.lock->guestaddr);
669 VG_(pp_ExeContext)( xe->XE.UnlockUnlocked.lock->appeared_at );
670 }
671 break;
672 }
673
674 case XE_FreeMemLock: {
675 tl_assert(xe);
676 tl_assert( HG_(is_sane_LockP)( xe->XE.FreeMemLock.lock ) );
677 tl_assert( HG_(is_sane_Thread)( xe->XE.FreeMemLock.thr ) );
678 announce_one_thread( xe->XE.FreeMemLock.thr );
679 VG_(message)(Vg_UserMsg,
680 "Thread #%d deallocated location %p "
681 "containing a locked lock",
682 (Int)xe->XE.FreeMemLock.thr->errmsg_index,
683 (void*)xe->XE.FreeMemLock.lock->guestaddr);
684 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
685 if (xe->XE.FreeMemLock.lock->appeared_at) {
686 VG_(message)(Vg_UserMsg,
687 " Lock at %p was first observed",
688 (void*)xe->XE.FreeMemLock.lock->guestaddr);
689 VG_(pp_ExeContext)( xe->XE.FreeMemLock.lock->appeared_at );
690 }
691 break;
692 }
693
694 case XE_Race: {
695 Addr err_ga;
696 HChar* what;
697 Int szB;
698 what = xe->XE.Race.isWrite ? "write" : "read";
699 szB = xe->XE.Race.szB;
700 err_ga = VG_(get_error_address)(err);
701
702 announce_one_thread( xe->XE.Race.thr );
703 if (xe->XE.Race.mb_confaccthr)
704 announce_one_thread( xe->XE.Race.mb_confaccthr );
705 VG_(message)(Vg_UserMsg,
706 "Possible data race during %s of size %d at %#lx by thread #%d",
707 what, szB, err_ga, (Int)xe->XE.Race.thr->errmsg_index
708 );
709 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
710 if (xe->XE.Race.mb_confacc) {
711 if (xe->XE.Race.mb_confaccthr) {
712 VG_(message)(Vg_UserMsg,
sewardjc5ea9962008-12-07 01:41:46 +0000713 " This conflicts with a previous %s of size %d by thread #%d",
714 xe->XE.Race.mb_confaccIsW ? "write" : "read",
715 xe->XE.Race.mb_confaccSzB,
sewardjf98e1c02008-10-25 16:22:41 +0000716 xe->XE.Race.mb_confaccthr->errmsg_index
717 );
718 } else {
sewardjc5ea9962008-12-07 01:41:46 +0000719 // FIXME: can this ever happen?
sewardjf98e1c02008-10-25 16:22:41 +0000720 VG_(message)(Vg_UserMsg,
sewardjc5ea9962008-12-07 01:41:46 +0000721 " This conflicts with a previous %s of size %d",
722 xe->XE.Race.mb_confaccIsW ? "write" : "read",
723 xe->XE.Race.mb_confaccSzB
sewardjf98e1c02008-10-25 16:22:41 +0000724 );
725 }
726 VG_(pp_ExeContext)( xe->XE.Race.mb_confacc );
727 }
728
729
730 /* If we have a better description of the address, show it. */
731 if (xe->XE.Race.descr1[0] != 0)
732 VG_(message)(Vg_UserMsg, " %s", &xe->XE.Race.descr1[0]);
733 if (xe->XE.Race.descr2[0] != 0)
734 VG_(message)(Vg_UserMsg, " %s", &xe->XE.Race.descr2[0]);
735
736 break; /* case XE_Race */
737 } /* case XE_Race */
738
739 default:
740 tl_assert(0);
741 } /* switch (VG_(get_error_kind)(err)) */
742}
743
744Char* HG_(get_error_name) ( Error* err )
745{
746 switch (VG_(get_error_kind)(err)) {
747 case XE_Race: return "Race";
748 case XE_FreeMemLock: return "FreeMemLock";
749 case XE_UnlockUnlocked: return "UnlockUnlocked";
750 case XE_UnlockForeign: return "UnlockForeign";
751 case XE_UnlockBogus: return "UnlockBogus";
752 case XE_PthAPIerror: return "PthAPIerror";
753 case XE_LockOrder: return "LockOrder";
754 case XE_Misc: return "Misc";
755 default: tl_assert(0); /* fill in missing case */
756 }
757}
758
759Bool HG_(recognised_suppression) ( Char* name, Supp *su )
760{
761# define TRY(_name,_xskind) \
762 if (0 == VG_(strcmp)(name, (_name))) { \
763 VG_(set_supp_kind)(su, (_xskind)); \
764 return True; \
765 }
766 TRY("Race", XS_Race);
767 TRY("FreeMemLock", XS_FreeMemLock);
768 TRY("UnlockUnlocked", XS_UnlockUnlocked);
769 TRY("UnlockForeign", XS_UnlockForeign);
770 TRY("UnlockBogus", XS_UnlockBogus);
771 TRY("PthAPIerror", XS_PthAPIerror);
772 TRY("LockOrder", XS_LockOrder);
773 TRY("Misc", XS_Misc);
774 return False;
775# undef TRY
776}
777
778Bool HG_(read_extra_suppression_info) ( Int fd, Char* buf, Int nBuf,
779 Supp* su )
780{
781 /* do nothing -- no extra suppression info present. Return True to
782 indicate nothing bad happened. */
783 return True;
784}
785
786Bool HG_(error_matches_suppression) ( Error* err, Supp* su )
787{
788 switch (VG_(get_supp_kind)(su)) {
789 case XS_Race: return VG_(get_error_kind)(err) == XE_Race;
790 case XS_FreeMemLock: return VG_(get_error_kind)(err) == XE_FreeMemLock;
791 case XS_UnlockUnlocked: return VG_(get_error_kind)(err) == XE_UnlockUnlocked;
792 case XS_UnlockForeign: return VG_(get_error_kind)(err) == XE_UnlockForeign;
793 case XS_UnlockBogus: return VG_(get_error_kind)(err) == XE_UnlockBogus;
794 case XS_PthAPIerror: return VG_(get_error_kind)(err) == XE_PthAPIerror;
795 case XS_LockOrder: return VG_(get_error_kind)(err) == XE_LockOrder;
796 case XS_Misc: return VG_(get_error_kind)(err) == XE_Misc;
797 //case XS_: return VG_(get_error_kind)(err) == XE_;
798 default: tl_assert(0); /* fill in missing cases */
799 }
800}
801
802void HG_(print_extra_suppression_info) ( Error* err )
803{
804 /* Do nothing */
805}
806
807
808/*--------------------------------------------------------------------*/
809/*--- end hg_errors.c ---*/
810/*--------------------------------------------------------------------*/