blob: 6d1f9b7a3a4af5e50582fca543253a8b875f385b [file] [log] [blame]
sewardjb4112022007-11-09 22:49:28 +00001
2/*--------------------------------------------------------------------*/
3/*--- Helgrind: a Valgrind tool for detecting errors ---*/
4/*--- in threaded programs. hg_main.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
sewardj4d474d02008-02-11 11:34:59 +000011 Copyright (C) 2007-2008 OpenWorks LLP
sewardjb4112022007-11-09 22:49:28 +000012 info@open-works.co.uk
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file COPYING.
30
31 Neither the names of the U.S. Department of Energy nor the
32 University of California nor the names of its contributors may be
33 used to endorse or promote products derived from this software
34 without prior written permission.
35*/
36
37#include "pub_tool_basics.h"
sewardjb4112022007-11-09 22:49:28 +000038#include "pub_tool_libcassert.h"
39#include "pub_tool_libcbase.h"
40#include "pub_tool_libcprint.h"
sewardjb4112022007-11-09 22:49:28 +000041#include "pub_tool_threadstate.h"
42#include "pub_tool_tooliface.h"
43#include "pub_tool_hashtable.h"
44#include "pub_tool_replacemalloc.h"
45#include "pub_tool_machine.h"
46#include "pub_tool_options.h"
47#include "pub_tool_xarray.h"
48#include "pub_tool_stacktrace.h"
sewardjb8b79ad2008-03-03 01:35:41 +000049#include "pub_tool_debuginfo.h" /* VG_(get_data_description) */
sewardj896f6f92008-08-19 08:38:52 +000050#include "pub_tool_wordfm.h"
sewardjb4112022007-11-09 22:49:28 +000051
sewardjf98e1c02008-10-25 16:22:41 +000052#include "hg_basics.h"
53#include "hg_wordset.h"
54#include "hg_lock_n_thread.h"
55#include "hg_errors.h"
56
57#include "libhb.h"
58
sewardjb4112022007-11-09 22:49:28 +000059#include "helgrind.h"
60
sewardjf98e1c02008-10-25 16:22:41 +000061
62// FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
63
64// FIXME: when client destroys a lock or a CV, remove these
65// from our mappings, so that the associated SO can be freed up
sewardjb4112022007-11-09 22:49:28 +000066
67/*----------------------------------------------------------------*/
68/*--- ---*/
69/*----------------------------------------------------------------*/
70
sewardj11e352f2007-11-30 11:11:02 +000071/* Note this needs to be compiled with -fno-strict-aliasing, since it
72 contains a whole bunch of calls to lookupFM etc which cast between
73 Word and pointer types. gcc rightly complains this breaks ANSI C
74 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
75 worthwhile performance benefits over -O.
sewardjc17be792007-11-10 22:50:13 +000076*/
sewardjb4112022007-11-09 22:49:28 +000077
sewardjefd3b4d2007-12-02 02:05:23 +000078// FIXME catch sync signals (SEGV, basically) and unlock BHL,
79// if held. Otherwise a LOCK-prefixed insn which segfaults
80// gets Helgrind into a total muddle as the BHL will not be
81// released after the insn.
82
sewardjb4112022007-11-09 22:49:28 +000083// FIXME what is supposed to happen to locks in memory which
84// is relocated as a result of client realloc?
85
sewardjb4112022007-11-09 22:49:28 +000086// FIXME put referencing ThreadId into Thread and get
87// rid of the slow reverse mapping function.
88
89// FIXME accesses to NoAccess areas: change state to Excl?
90
91// FIXME report errors for accesses of NoAccess memory?
92
93// FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
94// the thread still holds the lock.
95
96/* ------------ Debug/trace options ------------ */
97
98// this is:
99// shadow_mem_make_NoAccess: 29156 SMs, 1728 scanned
100// happens_before_wrk: 1000
101// ev__post_thread_join: 3360 SMs, 29 scanned, 252 re-Excls
102#define SHOW_EXPENSIVE_STUFF 0
103
104// 0 for silent, 1 for some stuff, 2 for lots of stuff
105#define SHOW_EVENTS 0
106
sewardjb4112022007-11-09 22:49:28 +0000107
108static void all__sanity_check ( Char* who ); /* fwds */
109
110#define HG_CLI__MALLOC_REDZONE_SZB 16 /* let's say */
111
112// 0 for none, 1 for dump at end of run
113#define SHOW_DATA_STRUCTURES 0
114
115
sewardjb4112022007-11-09 22:49:28 +0000116/* ------------ Misc comments ------------ */
117
118// FIXME: don't hardwire initial entries for root thread.
119// Instead, let the pre_thread_ll_create handler do this.
120
sewardjb4112022007-11-09 22:49:28 +0000121
122/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000123/*--- Primary data structures ---*/
sewardjb4112022007-11-09 22:49:28 +0000124/*----------------------------------------------------------------*/
125
sewardjb4112022007-11-09 22:49:28 +0000126/* Admin linked list of Threads */
127static Thread* admin_threads = NULL;
128
129/* Admin linked list of Locks */
130static Lock* admin_locks = NULL;
131
sewardjb4112022007-11-09 22:49:28 +0000132/* Mapping table for core ThreadIds to Thread* */
133static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
134
sewardjb4112022007-11-09 22:49:28 +0000135/* Mapping table for lock guest addresses to Lock* */
136static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
137
138/* The word-set universes for thread sets and lock sets. */
139static WordSetU* univ_tsets = NULL; /* sets of Thread* */
140static WordSetU* univ_lsets = NULL; /* sets of Lock* */
141static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
142
143/* never changed; we only care about its address. Is treated as if it
144 was a standard userspace lock. Also we have a Lock* describing it
145 so it can participate in lock sets in the usual way. */
146static Int __bus_lock = 0;
147static Lock* __bus_lock_Lock = NULL;
148
149
150/*----------------------------------------------------------------*/
151/*--- Simple helpers for the data structures ---*/
152/*----------------------------------------------------------------*/
153
154static UWord stats__lockN_acquires = 0;
155static UWord stats__lockN_releases = 0;
156
sewardjf98e1c02008-10-25 16:22:41 +0000157static
158ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
sewardjb4112022007-11-09 22:49:28 +0000159
160/* --------- Constructors --------- */
161
sewardjf98e1c02008-10-25 16:22:41 +0000162static Thread* mk_Thread ( Thr* hbthr ) {
sewardjb4112022007-11-09 22:49:28 +0000163 static Int indx = 1;
sewardjf98e1c02008-10-25 16:22:41 +0000164 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
sewardjb4112022007-11-09 22:49:28 +0000165 thread->locksetA = HG_(emptyWS)( univ_lsets );
166 thread->locksetW = HG_(emptyWS)( univ_lsets );
sewardjb4112022007-11-09 22:49:28 +0000167 thread->magic = Thread_MAGIC;
sewardjf98e1c02008-10-25 16:22:41 +0000168 thread->hbthr = hbthr;
169 thread->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +0000170 thread->created_at = NULL;
171 thread->announced = False;
172 thread->errmsg_index = indx++;
173 thread->admin = admin_threads;
174 admin_threads = thread;
175 return thread;
176}
sewardjf98e1c02008-10-25 16:22:41 +0000177
sewardjb4112022007-11-09 22:49:28 +0000178// Make a new lock which is unlocked (hence ownerless)
179static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
180 static ULong unique = 0;
sewardjf98e1c02008-10-25 16:22:41 +0000181 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
sewardjb4112022007-11-09 22:49:28 +0000182 lock->admin = admin_locks;
183 lock->unique = unique++;
184 lock->magic = LockN_MAGIC;
185 lock->appeared_at = NULL;
186 lock->acquired_at = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000187 lock->hbso = libhb_so_alloc();
sewardjb4112022007-11-09 22:49:28 +0000188 lock->guestaddr = guestaddr;
189 lock->kind = kind;
190 lock->heldW = False;
191 lock->heldBy = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000192 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +0000193 admin_locks = lock;
194 return lock;
195}
sewardjb4112022007-11-09 22:49:28 +0000196
197/* Release storage for a Lock. Also release storage in .heldBy, if
198 any. */
199static void del_LockN ( Lock* lk )
200{
sewardjf98e1c02008-10-25 16:22:41 +0000201 tl_assert(HG_(is_sane_LockN)(lk));
202 tl_assert(lk->hbso);
203 libhb_so_dealloc(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +0000204 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000205 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000206 VG_(memset)(lk, 0xAA, sizeof(*lk));
sewardjf98e1c02008-10-25 16:22:41 +0000207 HG_(free)(lk);
sewardjb4112022007-11-09 22:49:28 +0000208}
209
210/* Update 'lk' to reflect that 'thr' now has a write-acquisition of
211 it. This is done strictly: only combinations resulting from
212 correct program and libpthread behaviour are allowed. */
213static void lockN_acquire_writer ( Lock* lk, Thread* thr )
214{
sewardjf98e1c02008-10-25 16:22:41 +0000215 tl_assert(HG_(is_sane_LockN)(lk));
216 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000217
218 stats__lockN_acquires++;
219
220 /* EXPOSITION only */
221 /* We need to keep recording snapshots of where the lock was
222 acquired, so as to produce better lock-order error messages. */
223 if (lk->acquired_at == NULL) {
224 ThreadId tid;
225 tl_assert(lk->heldBy == NULL);
226 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
227 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000228 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000229 } else {
230 tl_assert(lk->heldBy != NULL);
231 }
232 /* end EXPOSITION only */
233
234 switch (lk->kind) {
235 case LK_nonRec:
236 case_LK_nonRec:
237 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
238 tl_assert(!lk->heldW);
239 lk->heldW = True;
sewardjf98e1c02008-10-25 16:22:41 +0000240 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000241 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000242 break;
243 case LK_mbRec:
244 if (lk->heldBy == NULL)
245 goto case_LK_nonRec;
246 /* 2nd and subsequent locking of a lock by its owner */
247 tl_assert(lk->heldW);
248 /* assert: lk is only held by one thread .. */
sewardj896f6f92008-08-19 08:38:52 +0000249 tl_assert(VG_(sizeUniqueBag(lk->heldBy)) == 1);
sewardjb4112022007-11-09 22:49:28 +0000250 /* assert: .. and that thread is 'thr'. */
sewardj896f6f92008-08-19 08:38:52 +0000251 tl_assert(VG_(elemBag)(lk->heldBy, (Word)thr)
252 == VG_(sizeTotalBag)(lk->heldBy));
253 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000254 break;
255 case LK_rdwr:
256 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
257 goto case_LK_nonRec;
258 default:
259 tl_assert(0);
260 }
sewardjf98e1c02008-10-25 16:22:41 +0000261 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000262}
263
264static void lockN_acquire_reader ( Lock* lk, Thread* thr )
265{
sewardjf98e1c02008-10-25 16:22:41 +0000266 tl_assert(HG_(is_sane_LockN)(lk));
267 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000268 /* can only add reader to a reader-writer lock. */
269 tl_assert(lk->kind == LK_rdwr);
270 /* lk must be free or already r-held. */
271 tl_assert(lk->heldBy == NULL
272 || (lk->heldBy != NULL && !lk->heldW));
273
274 stats__lockN_acquires++;
275
276 /* EXPOSITION only */
277 /* We need to keep recording snapshots of where the lock was
278 acquired, so as to produce better lock-order error messages. */
279 if (lk->acquired_at == NULL) {
280 ThreadId tid;
281 tl_assert(lk->heldBy == NULL);
282 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
283 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000284 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000285 } else {
286 tl_assert(lk->heldBy != NULL);
287 }
288 /* end EXPOSITION only */
289
290 if (lk->heldBy) {
sewardj896f6f92008-08-19 08:38:52 +0000291 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000292 } else {
293 lk->heldW = False;
sewardjf98e1c02008-10-25 16:22:41 +0000294 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000295 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000296 }
297 tl_assert(!lk->heldW);
sewardjf98e1c02008-10-25 16:22:41 +0000298 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000299}
300
301/* Update 'lk' to reflect a release of it by 'thr'. This is done
302 strictly: only combinations resulting from correct program and
303 libpthread behaviour are allowed. */
304
305static void lockN_release ( Lock* lk, Thread* thr )
306{
307 Bool b;
sewardjf98e1c02008-10-25 16:22:41 +0000308 tl_assert(HG_(is_sane_LockN)(lk));
309 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000310 /* lock must be held by someone */
311 tl_assert(lk->heldBy);
312 stats__lockN_releases++;
313 /* Remove it from the holder set */
sewardj896f6f92008-08-19 08:38:52 +0000314 b = VG_(delFromBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000315 /* thr must actually have been a holder of lk */
316 tl_assert(b);
317 /* normalise */
318 tl_assert(lk->acquired_at);
sewardj896f6f92008-08-19 08:38:52 +0000319 if (VG_(isEmptyBag)(lk->heldBy)) {
320 VG_(deleteBag)(lk->heldBy);
sewardjb4112022007-11-09 22:49:28 +0000321 lk->heldBy = NULL;
322 lk->heldW = False;
323 lk->acquired_at = NULL;
324 }
sewardjf98e1c02008-10-25 16:22:41 +0000325 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000326}
327
328static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
329{
330 Thread* thr;
331 if (!lk->heldBy) {
332 tl_assert(!lk->heldW);
333 return;
334 }
335 /* for each thread that holds this lock do ... */
sewardj896f6f92008-08-19 08:38:52 +0000336 VG_(initIterBag)( lk->heldBy );
337 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, NULL )) {
sewardjf98e1c02008-10-25 16:22:41 +0000338 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000339 tl_assert(HG_(elemWS)( univ_lsets,
340 thr->locksetA, (Word)lk ));
341 thr->locksetA
342 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lk );
343
344 if (lk->heldW) {
345 tl_assert(HG_(elemWS)( univ_lsets,
346 thr->locksetW, (Word)lk ));
347 thr->locksetW
348 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lk );
349 }
350 }
sewardj896f6f92008-08-19 08:38:52 +0000351 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000352}
353
sewardjb4112022007-11-09 22:49:28 +0000354
355/*----------------------------------------------------------------*/
356/*--- Print out the primary data structures ---*/
357/*----------------------------------------------------------------*/
358
sewardjd52392d2008-11-08 20:36:26 +0000359//static WordSetID del_BHL ( WordSetID lockset ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000360
361#define PP_THREADS (1<<1)
362#define PP_LOCKS (1<<2)
sewardjf98e1c02008-10-25 16:22:41 +0000363#define PP_ALL (PP_THREADS | PP_LOCKS)
sewardjb4112022007-11-09 22:49:28 +0000364
365
366static const Int sHOW_ADMIN = 0;
367
368static void space ( Int n )
369{
370 Int i;
371 Char spaces[128+1];
372 tl_assert(n >= 0 && n < 128);
373 if (n == 0)
374 return;
375 for (i = 0; i < n; i++)
376 spaces[i] = ' ';
377 spaces[i] = 0;
378 tl_assert(i < 128+1);
379 VG_(printf)("%s", spaces);
380}
381
382static void pp_Thread ( Int d, Thread* t )
383{
384 space(d+0); VG_(printf)("Thread %p {\n", t);
385 if (sHOW_ADMIN) {
386 space(d+3); VG_(printf)("admin %p\n", t->admin);
387 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
388 }
389 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
390 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
sewardjb4112022007-11-09 22:49:28 +0000391 space(d+0); VG_(printf)("}\n");
392}
393
394static void pp_admin_threads ( Int d )
395{
396 Int i, n;
397 Thread* t;
398 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
399 /* nothing */
400 }
401 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
402 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
403 if (0) {
404 space(n);
405 VG_(printf)("admin_threads record %d of %d:\n", i, n);
406 }
407 pp_Thread(d+3, t);
408 }
barta0b6b2c2008-07-07 06:49:24 +0000409 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000410}
411
412static void pp_map_threads ( Int d )
413{
414 Int i, n;
415 n = 0;
416 space(d); VG_(printf)("map_threads ");
417 n = 0;
418 for (i = 0; i < VG_N_THREADS; i++) {
419 if (map_threads[i] != NULL)
420 n++;
421 }
422 VG_(printf)("(%d entries) {\n", n);
423 for (i = 0; i < VG_N_THREADS; i++) {
424 if (map_threads[i] == NULL)
425 continue;
426 space(d+3);
427 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
428 }
429 space(d); VG_(printf)("}\n");
430}
431
432static const HChar* show_LockKind ( LockKind lkk ) {
433 switch (lkk) {
434 case LK_mbRec: return "mbRec";
435 case LK_nonRec: return "nonRec";
436 case LK_rdwr: return "rdwr";
437 default: tl_assert(0);
438 }
439}
440
441static void pp_Lock ( Int d, Lock* lk )
442{
barta0b6b2c2008-07-07 06:49:24 +0000443 space(d+0); VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
sewardjb4112022007-11-09 22:49:28 +0000444 if (sHOW_ADMIN) {
445 space(d+3); VG_(printf)("admin %p\n", lk->admin);
446 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
447 }
448 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
449 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
450 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
451 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
452 if (lk->heldBy) {
453 Thread* thr;
454 Word count;
455 VG_(printf)(" { ");
sewardj896f6f92008-08-19 08:38:52 +0000456 VG_(initIterBag)( lk->heldBy );
457 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, &count ))
sewardjb4112022007-11-09 22:49:28 +0000458 VG_(printf)("%lu:%p ", count, thr);
sewardj896f6f92008-08-19 08:38:52 +0000459 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000460 VG_(printf)("}");
461 }
462 VG_(printf)("\n");
463 space(d+0); VG_(printf)("}\n");
464}
465
466static void pp_admin_locks ( Int d )
467{
468 Int i, n;
469 Lock* lk;
470 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin) {
471 /* nothing */
472 }
473 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
474 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin) {
475 if (0) {
476 space(n);
477 VG_(printf)("admin_locks record %d of %d:\n", i, n);
478 }
479 pp_Lock(d+3, lk);
480 }
barta0b6b2c2008-07-07 06:49:24 +0000481 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000482}
483
484static void pp_map_locks ( Int d )
485{
486 void* gla;
487 Lock* lk;
488 space(d); VG_(printf)("map_locks (%d entries) {\n",
sewardj896f6f92008-08-19 08:38:52 +0000489 (Int)VG_(sizeFM)( map_locks ));
490 VG_(initIterFM)( map_locks );
491 while (VG_(nextIterFM)( map_locks, (Word*)&gla,
sewardjb5f29642007-11-16 12:02:43 +0000492 (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000493 space(d+3);
494 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
495 }
sewardj896f6f92008-08-19 08:38:52 +0000496 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000497 space(d); VG_(printf)("}\n");
498}
499
sewardjb4112022007-11-09 22:49:28 +0000500static void pp_everything ( Int flags, Char* caller )
501{
502 Int d = 0;
503 VG_(printf)("\n");
504 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
505 if (flags & PP_THREADS) {
506 VG_(printf)("\n");
507 pp_admin_threads(d+3);
508 VG_(printf)("\n");
509 pp_map_threads(d+3);
510 }
511 if (flags & PP_LOCKS) {
512 VG_(printf)("\n");
513 pp_admin_locks(d+3);
514 VG_(printf)("\n");
515 pp_map_locks(d+3);
516 }
sewardjb4112022007-11-09 22:49:28 +0000517
518 VG_(printf)("\n");
519 VG_(printf)("}\n");
520 VG_(printf)("\n");
521}
522
523#undef SHOW_ADMIN
524
525
526/*----------------------------------------------------------------*/
527/*--- Initialise the primary data structures ---*/
528/*----------------------------------------------------------------*/
529
sewardjf98e1c02008-10-25 16:22:41 +0000530static void initialise_data_structures ( Thr* hbthr_root )
sewardjb4112022007-11-09 22:49:28 +0000531{
sewardjb4112022007-11-09 22:49:28 +0000532 Thread* thr;
533
534 /* Get everything initialised and zeroed. */
535 tl_assert(admin_threads == NULL);
536 tl_assert(admin_locks == NULL);
sewardjb4112022007-11-09 22:49:28 +0000537
538 tl_assert(sizeof(Addr) == sizeof(Word));
sewardjb4112022007-11-09 22:49:28 +0000539
540 tl_assert(map_threads == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000541 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
sewardjb4112022007-11-09 22:49:28 +0000542 tl_assert(map_threads != NULL);
543
sewardjb4112022007-11-09 22:49:28 +0000544 tl_assert(sizeof(Addr) == sizeof(Word));
545 tl_assert(map_locks == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000546 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
547 NULL/*unboxed Word cmp*/);
sewardjb4112022007-11-09 22:49:28 +0000548 tl_assert(map_locks != NULL);
549
550 __bus_lock_Lock = mk_LockN( LK_nonRec, (Addr)&__bus_lock );
sewardjf98e1c02008-10-25 16:22:41 +0000551 tl_assert(HG_(is_sane_LockN)(__bus_lock_Lock));
sewardj896f6f92008-08-19 08:38:52 +0000552 VG_(addToFM)( map_locks, (Word)&__bus_lock, (Word)__bus_lock_Lock );
sewardjb4112022007-11-09 22:49:28 +0000553
554 tl_assert(univ_tsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000555 univ_tsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.3", HG_(free),
556 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000557 tl_assert(univ_tsets != NULL);
558
559 tl_assert(univ_lsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000560 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
561 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000562 tl_assert(univ_lsets != NULL);
563
564 tl_assert(univ_laog == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000565 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
566 HG_(free), 24/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000567 tl_assert(univ_laog != NULL);
568
569 /* Set up entries for the root thread */
570 // FIXME: this assumes that the first real ThreadId is 1
571
sewardjb4112022007-11-09 22:49:28 +0000572 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +0000573 thr = mk_Thread(hbthr_root);
574 thr->coretid = 1; /* FIXME: hardwires an assumption about the
575 identity of the root thread. */
576 tl_assert( libhb_get_Thr_opaque(hbthr_root) == NULL );
577 libhb_set_Thr_opaque(hbthr_root, thr);
sewardjb4112022007-11-09 22:49:28 +0000578
sewardjf98e1c02008-10-25 16:22:41 +0000579 /* and bind it in the thread-map table. */
580 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
581 tl_assert(thr->coretid != VG_INVALID_THREADID);
sewardjb4112022007-11-09 22:49:28 +0000582
sewardjf98e1c02008-10-25 16:22:41 +0000583 map_threads[thr->coretid] = thr;
sewardjb4112022007-11-09 22:49:28 +0000584
585 tl_assert(VG_INVALID_THREADID == 0);
586
587 /* Mark the new bus lock correctly (to stop the sanity checks
588 complaining) */
589 tl_assert( sizeof(__bus_lock) == 4 );
sewardjb4112022007-11-09 22:49:28 +0000590
591 all__sanity_check("initialise_data_structures");
592}
593
594
595/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000596/*--- map_threads :: array[core-ThreadId] of Thread* ---*/
sewardjb4112022007-11-09 22:49:28 +0000597/*----------------------------------------------------------------*/
598
599/* Doesn't assert if the relevant map_threads entry is NULL. */
600static Thread* map_threads_maybe_lookup ( ThreadId coretid )
601{
602 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000603 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000604 thr = map_threads[coretid];
605 return thr;
606}
607
608/* Asserts if the relevant map_threads entry is NULL. */
609static inline Thread* map_threads_lookup ( ThreadId coretid )
610{
611 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000612 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000613 thr = map_threads[coretid];
614 tl_assert(thr);
615 return thr;
616}
617
sewardjf98e1c02008-10-25 16:22:41 +0000618/* Do a reverse lookup. Does not assert if 'thr' is not found in
619 map_threads. */
sewardjb4112022007-11-09 22:49:28 +0000620static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
621{
sewardjf98e1c02008-10-25 16:22:41 +0000622 ThreadId tid;
623 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000624 /* Check nobody used the invalid-threadid slot */
625 tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
626 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000627 tid = thr->coretid;
628 tl_assert(HG_(is_sane_ThreadId)(tid));
629 return tid;
sewardjb4112022007-11-09 22:49:28 +0000630}
631
632/* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
633 is not found in map_threads. */
634static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
635{
636 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
637 tl_assert(tid != VG_INVALID_THREADID);
sewardjf98e1c02008-10-25 16:22:41 +0000638 tl_assert(map_threads[tid]);
639 tl_assert(map_threads[tid]->coretid == tid);
sewardjb4112022007-11-09 22:49:28 +0000640 return tid;
641}
642
643static void map_threads_delete ( ThreadId coretid )
644{
645 Thread* thr;
646 tl_assert(coretid != 0);
sewardjf98e1c02008-10-25 16:22:41 +0000647 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000648 thr = map_threads[coretid];
649 tl_assert(thr);
650 map_threads[coretid] = NULL;
651}
652
653
654/*----------------------------------------------------------------*/
655/*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
656/*----------------------------------------------------------------*/
657
658/* Make sure there is a lock table entry for the given (lock) guest
659 address. If not, create one of the stated 'kind' in unheld state.
660 In any case, return the address of the existing or new Lock. */
661static
662Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
663{
664 Bool found;
665 Lock* oldlock = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000666 tl_assert(HG_(is_sane_ThreadId)(tid));
sewardj896f6f92008-08-19 08:38:52 +0000667 found = VG_(lookupFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000668 NULL, (Word*)&oldlock, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000669 if (!found) {
670 Lock* lock = mk_LockN(lkk, ga);
671 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
sewardjf98e1c02008-10-25 16:22:41 +0000672 tl_assert(HG_(is_sane_LockN)(lock));
sewardj896f6f92008-08-19 08:38:52 +0000673 VG_(addToFM)( map_locks, (Word)ga, (Word)lock );
sewardjb4112022007-11-09 22:49:28 +0000674 tl_assert(oldlock == NULL);
sewardjb4112022007-11-09 22:49:28 +0000675 return lock;
676 } else {
677 tl_assert(oldlock != NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000678 tl_assert(HG_(is_sane_LockN)(oldlock));
sewardjb4112022007-11-09 22:49:28 +0000679 tl_assert(oldlock->guestaddr == ga);
sewardjb4112022007-11-09 22:49:28 +0000680 return oldlock;
681 }
682}
683
684static Lock* map_locks_maybe_lookup ( Addr ga )
685{
686 Bool found;
687 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000688 found = VG_(lookupFM)( map_locks, NULL, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000689 tl_assert(found ? lk != NULL : lk == NULL);
sewardjb4112022007-11-09 22:49:28 +0000690 return lk;
691}
692
693static void map_locks_delete ( Addr ga )
694{
695 Addr ga2 = 0;
696 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000697 VG_(delFromFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000698 (Word*)&ga2, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000699 /* delFromFM produces the val which is being deleted, if it is
700 found. So assert it is non-null; that in effect asserts that we
701 are deleting a (ga, Lock) pair which actually exists. */
702 tl_assert(lk != NULL);
703 tl_assert(ga2 == ga);
704}
705
706
sewardjb4112022007-11-09 22:49:28 +0000707
708/*----------------------------------------------------------------*/
709/*--- Sanity checking the data structures ---*/
710/*----------------------------------------------------------------*/
711
712static UWord stats__sanity_checks = 0;
713
sewardjb4112022007-11-09 22:49:28 +0000714static void laog__sanity_check ( Char* who ); /* fwds */
715
716/* REQUIRED INVARIANTS:
717
718 Thread vs Segment/Lock/SecMaps
719
720 for each t in Threads {
721
722 // Thread.lockset: each element is really a valid Lock
723
724 // Thread.lockset: each Lock in set is actually held by that thread
725 for lk in Thread.lockset
726 lk == LockedBy(t)
727
728 // Thread.csegid is a valid SegmentID
729 // and the associated Segment has .thr == t
730
731 }
732
733 all thread Locksets are pairwise empty under intersection
734 (that is, no lock is claimed to be held by more than one thread)
735 -- this is guaranteed if all locks in locksets point back to their
736 owner threads
737
738 Lock vs Thread/Segment/SecMaps
739
740 for each entry (gla, la) in map_locks
741 gla == la->guest_addr
742
743 for each lk in Locks {
744
745 lk->tag is valid
746 lk->guest_addr does not have shadow state NoAccess
747 if lk == LockedBy(t), then t->lockset contains lk
748 if lk == UnlockedBy(segid) then segid is valid SegmentID
749 and can be mapped to a valid Segment(seg)
750 and seg->thr->lockset does not contain lk
751 if lk == UnlockedNew then (no lockset contains lk)
752
753 secmaps for lk has .mbHasLocks == True
754
755 }
756
757 Segment vs Thread/Lock/SecMaps
758
759 the Segment graph is a dag (no cycles)
760 all of the Segment graph must be reachable from the segids
761 mentioned in the Threads
762
763 for seg in Segments {
764
765 seg->thr is a sane Thread
766
767 }
768
769 SecMaps vs Segment/Thread/Lock
770
771 for sm in SecMaps {
772
773 sm properly aligned
774 if any shadow word is ShR or ShM then .mbHasShared == True
775
776 for each Excl(segid) state
777 map_segments_lookup maps to a sane Segment(seg)
778 for each ShM/ShR(tsetid,lsetid) state
779 each lk in lset is a valid Lock
780 each thr in tset is a valid thread, which is non-dead
781
782 }
783*/
784
785
786/* Return True iff 'thr' holds 'lk' in some mode. */
787static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
788{
789 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000790 return VG_(elemBag)( lk->heldBy, (Word)thr ) > 0;
sewardjb4112022007-11-09 22:49:28 +0000791 else
792 return False;
793}
794
795/* Sanity check Threads, as far as possible */
796__attribute__((noinline))
797static void threads__sanity_check ( Char* who )
798{
799#define BAD(_str) do { how = (_str); goto bad; } while (0)
800 Char* how = "no error";
801 Thread* thr;
802 WordSetID wsA, wsW;
sewardj250ec2e2008-02-15 22:02:30 +0000803 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +0000804 Word ls_size, i;
805 Lock* lk;
sewardjb4112022007-11-09 22:49:28 +0000806 for (thr = admin_threads; thr; thr = thr->admin) {
sewardjf98e1c02008-10-25 16:22:41 +0000807 if (!HG_(is_sane_Thread)(thr)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000808 wsA = thr->locksetA;
809 wsW = thr->locksetW;
810 // locks held in W mode are a subset of all locks held
811 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
812 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
813 for (i = 0; i < ls_size; i++) {
814 lk = (Lock*)ls_words[i];
815 // Thread.lockset: each element is really a valid Lock
sewardjf98e1c02008-10-25 16:22:41 +0000816 if (!HG_(is_sane_LockN)(lk)) BAD("2");
sewardjb4112022007-11-09 22:49:28 +0000817 // Thread.lockset: each Lock in set is actually held by that
818 // thread
819 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000820 }
821 }
822 return;
823 bad:
824 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
825 tl_assert(0);
826#undef BAD
827}
828
829
830/* Sanity check Locks, as far as possible */
831__attribute__((noinline))
832static void locks__sanity_check ( Char* who )
833{
834#define BAD(_str) do { how = (_str); goto bad; } while (0)
835 Char* how = "no error";
836 Addr gla;
837 Lock* lk;
838 Int i;
839 // # entries in admin_locks == # entries in map_locks
840 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin)
841 ;
sewardj896f6f92008-08-19 08:38:52 +0000842 if (i != VG_(sizeFM)(map_locks)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000843 // for each entry (gla, lk) in map_locks
844 // gla == lk->guest_addr
sewardj896f6f92008-08-19 08:38:52 +0000845 VG_(initIterFM)( map_locks );
846 while (VG_(nextIterFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000847 (Word*)&gla, (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000848 if (lk->guestaddr != gla) BAD("2");
849 }
sewardj896f6f92008-08-19 08:38:52 +0000850 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000851 // scan through admin_locks ...
852 for (lk = admin_locks; lk; lk = lk->admin) {
853 // lock is sane. Quite comprehensive, also checks that
854 // referenced (holder) threads are sane.
sewardjf98e1c02008-10-25 16:22:41 +0000855 if (!HG_(is_sane_LockN)(lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000856 // map_locks binds guest address back to this lock
857 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
sewardjb4112022007-11-09 22:49:28 +0000858 // look at all threads mentioned as holders of this lock. Ensure
859 // this lock is mentioned in their locksets.
860 if (lk->heldBy) {
861 Thread* thr;
862 Word count;
sewardj896f6f92008-08-19 08:38:52 +0000863 VG_(initIterBag)( lk->heldBy );
864 while (VG_(nextIterBag)( lk->heldBy,
sewardjb5f29642007-11-16 12:02:43 +0000865 (Word*)&thr, &count )) {
sewardjf98e1c02008-10-25 16:22:41 +0000866 // HG_(is_sane_LockN) above ensures these
sewardjb4112022007-11-09 22:49:28 +0000867 tl_assert(count >= 1);
sewardjf98e1c02008-10-25 16:22:41 +0000868 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000869 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (Word)lk))
870 BAD("6");
871 // also check the w-only lockset
872 if (lk->heldW
873 && !HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
874 BAD("7");
875 if ((!lk->heldW)
876 && HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
877 BAD("8");
878 }
sewardj896f6f92008-08-19 08:38:52 +0000879 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000880 } else {
881 /* lock not held by anybody */
882 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
883 // since lk is unheld, then (no lockset contains lk)
884 // hmm, this is really too expensive to check. Hmm.
885 }
sewardjb4112022007-11-09 22:49:28 +0000886 }
887
888 return;
889 bad:
890 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
891 tl_assert(0);
892#undef BAD
893}
894
895
sewardjb4112022007-11-09 22:49:28 +0000896static void all_except_Locks__sanity_check ( Char* who ) {
897 stats__sanity_checks++;
898 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
899 threads__sanity_check(who);
sewardjb4112022007-11-09 22:49:28 +0000900 laog__sanity_check(who);
901}
902static void all__sanity_check ( Char* who ) {
903 all_except_Locks__sanity_check(who);
904 locks__sanity_check(who);
905}
906
907
908/*----------------------------------------------------------------*/
909/*--- the core memory state machine (msm__* functions) ---*/
910/*----------------------------------------------------------------*/
911
sewardjd52392d2008-11-08 20:36:26 +0000912//static WordSetID add_BHL ( WordSetID lockset ) {
913// return HG_(addToWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
914//}
915//static WordSetID del_BHL ( WordSetID lockset ) {
916// return HG_(delFromWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
917//}
sewardjb4112022007-11-09 22:49:28 +0000918
919
sewardjd52392d2008-11-08 20:36:26 +0000920///* Last-lock-lossage records. This mechanism exists to help explain
921// to programmers why we are complaining about a race. The idea is to
922// monitor all lockset transitions. When a previously nonempty
923// lockset becomes empty, the lock(s) that just disappeared (the
924// "lossage") are the locks that have consistently protected the
925// location (ga_of_access) in question for the longest time. Most of
926// the time the lossage-set is a single lock. Because the
927// lossage-lock is the one that has survived longest, there is there
928// is a good chance that it is indeed the lock that the programmer
929// intended to use to protect the location.
930//
931// Note that we cannot in general just look at the lossage set when we
932// see a transition to ShM(...,empty-set), because a transition to an
933// empty lockset can happen arbitrarily far before the point where we
934// want to report an error. This is in the case where there are many
935// transitions ShR -> ShR, all with an empty lockset, and only later
936// is there a transition to ShM. So what we want to do is note the
937// lossage lock at the point where a ShR -> ShR transition empties out
938// the lockset, so we can present it later if there should be a
939// transition to ShM.
940//
941// So this function finds such transitions. For each, it associates
942// in ga_to_lastlock, the guest address and the lossage lock. In fact
943// we do not record the Lock* directly as that may disappear later,
944// but instead the ExeContext inside the Lock which says where it was
945// initialised or first locked. ExeContexts are permanent so keeping
946// them indefinitely is safe.
947//
948// A boring detail: the hardware bus lock is not interesting in this
949// respect, so we first remove that from the pre/post locksets.
950//*/
951//
952//static UWord stats__ga_LL_adds = 0;
953//
954//static WordFM* ga_to_lastlock = NULL; /* GuestAddr -> ExeContext* */
955//
956//static
957//void record_last_lock_lossage ( Addr ga_of_access,
958// WordSetID lset_old, WordSetID lset_new )
959//{
960// Lock* lk;
961// Int card_old, card_new;
962//
963// tl_assert(lset_old != lset_new);
964//
965// if (0) VG_(printf)("XX1: %d (card %ld) -> %d (card %ld) %#lx\n",
966// (Int)lset_old,
967// HG_(cardinalityWS)(univ_lsets,lset_old),
968// (Int)lset_new,
969// HG_(cardinalityWS)(univ_lsets,lset_new),
970// ga_of_access );
971//
972// /* This is slow, but at least it's simple. The bus hardware lock
973// just confuses the logic, so remove it from the locksets we're
974// considering before doing anything else. */
975// lset_new = del_BHL( lset_new );
976//
977// if (!HG_(isEmptyWS)( univ_lsets, lset_new )) {
978// /* The post-transition lock set is not empty. So we are not
979// interested. We're only interested in spotting transitions
980// that make locksets become empty. */
981// return;
982// }
983//
984// /* lset_new is now empty */
985// card_new = HG_(cardinalityWS)( univ_lsets, lset_new );
986// tl_assert(card_new == 0);
987//
988// lset_old = del_BHL( lset_old );
989// card_old = HG_(cardinalityWS)( univ_lsets, lset_old );
990//
991// if (0) VG_(printf)(" X2: %d (card %d) -> %d (card %d)\n",
992// (Int)lset_old, card_old, (Int)lset_new, card_new );
993//
994// if (card_old == 0) {
995// /* The old lockset was also empty. Not interesting. */
996// return;
997// }
998//
999// tl_assert(card_old > 0);
1000// tl_assert(!HG_(isEmptyWS)( univ_lsets, lset_old ));
1001//
1002// /* Now we know we've got a transition from a nonempty lockset to an
1003// empty one. So lset_old must be the set of locks lost. Record
1004// some details. If there is more than one element in the lossage
1005// set, just choose one arbitrarily -- not the best, but at least
1006// it's simple. */
1007//
1008// lk = (Lock*)HG_(anyElementOfWS)( univ_lsets, lset_old );
1009// if (0) VG_(printf)("lossage %ld %p\n",
1010// HG_(cardinalityWS)( univ_lsets, lset_old), lk );
1011// if (lk->appeared_at) {
1012// if (ga_to_lastlock == NULL)
1013// ga_to_lastlock = VG_(newFM)( HG_(zalloc), "hg.rlll.1", HG_(free), NULL );
1014// VG_(addToFM)( ga_to_lastlock, ga_of_access, (Word)lk->appeared_at );
1015// stats__ga_LL_adds++;
1016// }
1017//}
1018//
1019///* This queries the table (ga_to_lastlock) made by
1020// record_last_lock_lossage, when constructing error messages. It
1021// attempts to find the ExeContext of the allocation or initialisation
1022// point for the lossage lock associated with 'ga'. */
1023//
1024//static ExeContext* maybe_get_lastlock_initpoint ( Addr ga )
1025//{
1026// ExeContext* ec_hint = NULL;
1027// if (ga_to_lastlock != NULL
1028// && VG_(lookupFM)(ga_to_lastlock,
1029// NULL, (Word*)&ec_hint, ga)) {
1030// tl_assert(ec_hint != NULL);
1031// return ec_hint;
1032// } else {
1033// return NULL;
1034// }
1035//}
sewardjb4112022007-11-09 22:49:28 +00001036
1037
sewardjb4112022007-11-09 22:49:28 +00001038/*----------------------------------------------------------------*/
1039/*--- Shadow value and address range handlers ---*/
1040/*----------------------------------------------------------------*/
1041
1042static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +00001043//static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +00001044static inline Thread* get_current_Thread ( void ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +00001045__attribute__((noinline))
1046static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +00001047
sewardjb4112022007-11-09 22:49:28 +00001048
1049/* Block-copy states (needed for implementing realloc()). */
1050static void shadow_mem_copy_range ( Addr src, Addr dst, SizeT len )
1051{
sewardjf98e1c02008-10-25 16:22:41 +00001052 libhb_copy_shadow_state( src, dst, len );
sewardjb4112022007-11-09 22:49:28 +00001053}
1054
sewardjf98e1c02008-10-25 16:22:41 +00001055static void shadow_mem_read_range ( Thread* thr, Addr a, SizeT len )
1056{
1057 Thr* hbthr = thr->hbthr;
1058 tl_assert(hbthr);
1059 LIBHB_READ_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +00001060}
1061
1062static void shadow_mem_write_range ( Thread* thr, Addr a, SizeT len ) {
sewardjf98e1c02008-10-25 16:22:41 +00001063 Thr* hbthr = thr->hbthr;
1064 tl_assert(hbthr);
1065 LIBHB_WRITE_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +00001066}
1067
1068static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
1069{
sewardjf98e1c02008-10-25 16:22:41 +00001070 libhb_range_new( thr->hbthr, a, len );
sewardjb4112022007-11-09 22:49:28 +00001071}
1072
sewardjb4112022007-11-09 22:49:28 +00001073static void shadow_mem_make_NoAccess ( Thread* thr, Addr aIN, SizeT len )
1074{
sewardjb4112022007-11-09 22:49:28 +00001075 if (0 && len > 500)
barta0b6b2c2008-07-07 06:49:24 +00001076 VG_(printf)("make NoAccess ( %#lx, %ld )\n", aIN, len );
sewardjf98e1c02008-10-25 16:22:41 +00001077 libhb_range_noaccess( thr->hbthr, aIN, len );
sewardjb4112022007-11-09 22:49:28 +00001078}
1079
1080
1081/*----------------------------------------------------------------*/
1082/*--- Event handlers (evh__* functions) ---*/
1083/*--- plus helpers (evhH__* functions) ---*/
1084/*----------------------------------------------------------------*/
1085
1086/*--------- Event handler helpers (evhH__* functions) ---------*/
1087
1088/* Create a new segment for 'thr', making it depend (.prev) on its
1089 existing segment, bind together the SegmentID and Segment, and
1090 return both of them. Also update 'thr' so it references the new
1091 Segment. */
sewardjf98e1c02008-10-25 16:22:41 +00001092//zz static
1093//zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
1094//zz /*OUT*/Segment** new_segP,
1095//zz Thread* thr )
1096//zz {
1097//zz Segment* cur_seg;
1098//zz tl_assert(new_segP);
1099//zz tl_assert(new_segidP);
1100//zz tl_assert(HG_(is_sane_Thread)(thr));
1101//zz cur_seg = map_segments_lookup( thr->csegid );
1102//zz tl_assert(cur_seg);
1103//zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1104//zz at their owner thread. */
1105//zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1106//zz *new_segidP = alloc_SegmentID();
1107//zz map_segments_add( *new_segidP, *new_segP );
1108//zz thr->csegid = *new_segidP;
1109//zz }
sewardjb4112022007-11-09 22:49:28 +00001110
1111
1112/* The lock at 'lock_ga' has acquired a writer. Make all necessary
1113 updates, and also do all possible error checks. */
1114static
1115void evhH__post_thread_w_acquires_lock ( Thread* thr,
1116 LockKind lkk, Addr lock_ga )
1117{
1118 Lock* lk;
1119
1120 /* Basically what we need to do is call lockN_acquire_writer.
1121 However, that will barf if any 'invalid' lock states would
1122 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001123 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001124 routine.
1125
1126 Because this routine is only called after successful lock
1127 acquisition, we should not be asked to move the lock into any
1128 invalid states. Requests to do so are bugs in libpthread, since
1129 that should have rejected any such requests. */
1130
sewardjf98e1c02008-10-25 16:22:41 +00001131 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001132 /* Try to find the lock. If we can't, then create a new one with
1133 kind 'lkk'. */
1134 lk = map_locks_lookup_or_create(
1135 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001136 tl_assert( HG_(is_sane_LockN)(lk) );
1137
1138 /* check libhb level entities exist */
1139 tl_assert(thr->hbthr);
1140 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001141
1142 if (lk->heldBy == NULL) {
1143 /* the lock isn't held. Simple. */
1144 tl_assert(!lk->heldW);
1145 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001146 /* acquire a dependency from the lock's VCs */
1147 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001148 goto noerror;
1149 }
1150
1151 /* So the lock is already held. If held as a r-lock then
1152 libpthread must be buggy. */
1153 tl_assert(lk->heldBy);
1154 if (!lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001155 HG_(record_error_Misc)(
1156 thr, "Bug in libpthread: write lock "
1157 "granted on rwlock which is currently rd-held");
sewardjb4112022007-11-09 22:49:28 +00001158 goto error;
1159 }
1160
1161 /* So the lock is held in w-mode. If it's held by some other
1162 thread, then libpthread must be buggy. */
sewardj896f6f92008-08-19 08:38:52 +00001163 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
sewardjb4112022007-11-09 22:49:28 +00001164
sewardj896f6f92008-08-19 08:38:52 +00001165 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
sewardjf98e1c02008-10-25 16:22:41 +00001166 HG_(record_error_Misc)(
1167 thr, "Bug in libpthread: write lock "
1168 "granted on mutex/rwlock which is currently "
1169 "wr-held by a different thread");
sewardjb4112022007-11-09 22:49:28 +00001170 goto error;
1171 }
1172
1173 /* So the lock is already held in w-mode by 'thr'. That means this
1174 is an attempt to lock it recursively, which is only allowable
1175 for LK_mbRec kinded locks. Since this routine is called only
1176 once the lock has been acquired, this must also be a libpthread
1177 bug. */
1178 if (lk->kind != LK_mbRec) {
sewardjf98e1c02008-10-25 16:22:41 +00001179 HG_(record_error_Misc)(
1180 thr, "Bug in libpthread: recursive write lock "
1181 "granted on mutex/wrlock which does not "
1182 "support recursion");
sewardjb4112022007-11-09 22:49:28 +00001183 goto error;
1184 }
1185
1186 /* So we are recursively re-locking a lock we already w-hold. */
1187 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001188 /* acquire a dependency from the lock's VC. Probably pointless,
1189 but also harmless. */
1190 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001191 goto noerror;
1192
1193 noerror:
1194 /* check lock order acquisition graph, and update. This has to
1195 happen before the lock is added to the thread's locksetA/W. */
1196 laog__pre_thread_acquires_lock( thr, lk );
1197 /* update the thread's held-locks set */
1198 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1199 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (Word)lk );
1200 /* fall through */
1201
1202 error:
sewardjf98e1c02008-10-25 16:22:41 +00001203 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001204}
1205
1206
1207/* The lock at 'lock_ga' has acquired a reader. Make all necessary
1208 updates, and also do all possible error checks. */
1209static
1210void evhH__post_thread_r_acquires_lock ( Thread* thr,
1211 LockKind lkk, Addr lock_ga )
1212{
1213 Lock* lk;
1214
1215 /* Basically what we need to do is call lockN_acquire_reader.
1216 However, that will barf if any 'invalid' lock states would
1217 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001218 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001219 routine.
1220
1221 Because this routine is only called after successful lock
1222 acquisition, we should not be asked to move the lock into any
1223 invalid states. Requests to do so are bugs in libpthread, since
1224 that should have rejected any such requests. */
1225
sewardjf98e1c02008-10-25 16:22:41 +00001226 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001227 /* Try to find the lock. If we can't, then create a new one with
1228 kind 'lkk'. Only a reader-writer lock can be read-locked,
1229 hence the first assertion. */
1230 tl_assert(lkk == LK_rdwr);
1231 lk = map_locks_lookup_or_create(
1232 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001233 tl_assert( HG_(is_sane_LockN)(lk) );
1234
1235 /* check libhb level entities exist */
1236 tl_assert(thr->hbthr);
1237 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001238
1239 if (lk->heldBy == NULL) {
1240 /* the lock isn't held. Simple. */
1241 tl_assert(!lk->heldW);
1242 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001243 /* acquire a dependency from the lock's VC */
1244 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001245 goto noerror;
1246 }
1247
1248 /* So the lock is already held. If held as a w-lock then
1249 libpthread must be buggy. */
1250 tl_assert(lk->heldBy);
1251 if (lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001252 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1253 "granted on rwlock which is "
1254 "currently wr-held");
sewardjb4112022007-11-09 22:49:28 +00001255 goto error;
1256 }
1257
1258 /* Easy enough. In short anybody can get a read-lock on a rwlock
1259 provided it is either unlocked or already in rd-held. */
1260 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001261 /* acquire a dependency from the lock's VC. Probably pointless,
1262 but also harmless. */
1263 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001264 goto noerror;
1265
1266 noerror:
1267 /* check lock order acquisition graph, and update. This has to
1268 happen before the lock is added to the thread's locksetA/W. */
1269 laog__pre_thread_acquires_lock( thr, lk );
1270 /* update the thread's held-locks set */
1271 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1272 /* but don't update thr->locksetW, since lk is only rd-held */
1273 /* fall through */
1274
1275 error:
sewardjf98e1c02008-10-25 16:22:41 +00001276 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001277}
1278
1279
1280/* The lock at 'lock_ga' is just about to be unlocked. Make all
1281 necessary updates, and also do all possible error checks. */
1282static
1283void evhH__pre_thread_releases_lock ( Thread* thr,
1284 Addr lock_ga, Bool isRDWR )
1285{
1286 Lock* lock;
1287 Word n;
sewardjf98e1c02008-10-25 16:22:41 +00001288 Bool was_heldW;
sewardjb4112022007-11-09 22:49:28 +00001289
1290 /* This routine is called prior to a lock release, before
1291 libpthread has had a chance to validate the call. Hence we need
1292 to detect and reject any attempts to move the lock into an
1293 invalid state. Such attempts are bugs in the client.
1294
1295 isRDWR is True if we know from the wrapper context that lock_ga
1296 should refer to a reader-writer lock, and is False if [ditto]
1297 lock_ga should refer to a standard mutex. */
1298
sewardjf98e1c02008-10-25 16:22:41 +00001299 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001300 lock = map_locks_maybe_lookup( lock_ga );
1301
1302 if (!lock) {
1303 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1304 the client is trying to unlock it. So complain, then ignore
1305 the attempt. */
sewardjf98e1c02008-10-25 16:22:41 +00001306 HG_(record_error_UnlockBogus)( thr, lock_ga );
sewardjb4112022007-11-09 22:49:28 +00001307 return;
1308 }
1309
1310 tl_assert(lock->guestaddr == lock_ga);
sewardjf98e1c02008-10-25 16:22:41 +00001311 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001312
1313 if (isRDWR && lock->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001314 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1315 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001316 }
1317 if ((!isRDWR) && lock->kind == LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001318 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1319 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001320 }
1321
1322 if (!lock->heldBy) {
1323 /* The lock is not held. This indicates a serious bug in the
1324 client. */
1325 tl_assert(!lock->heldW);
sewardjf98e1c02008-10-25 16:22:41 +00001326 HG_(record_error_UnlockUnlocked)( thr, lock );
sewardjb4112022007-11-09 22:49:28 +00001327 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1328 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1329 goto error;
1330 }
1331
sewardjf98e1c02008-10-25 16:22:41 +00001332 /* test just above dominates */
1333 tl_assert(lock->heldBy);
1334 was_heldW = lock->heldW;
1335
sewardjb4112022007-11-09 22:49:28 +00001336 /* The lock is held. Is this thread one of the holders? If not,
1337 report a bug in the client. */
sewardj896f6f92008-08-19 08:38:52 +00001338 n = VG_(elemBag)( lock->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +00001339 tl_assert(n >= 0);
1340 if (n == 0) {
1341 /* We are not a current holder of the lock. This is a bug in
1342 the guest, and (per POSIX pthread rules) the unlock
1343 attempt will fail. So just complain and do nothing
1344 else. */
sewardj896f6f92008-08-19 08:38:52 +00001345 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001346 tl_assert(HG_(is_sane_Thread)(realOwner));
sewardjb4112022007-11-09 22:49:28 +00001347 tl_assert(realOwner != thr);
1348 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1349 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
sewardjf98e1c02008-10-25 16:22:41 +00001350 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
sewardjb4112022007-11-09 22:49:28 +00001351 goto error;
1352 }
1353
1354 /* Ok, we hold the lock 'n' times. */
1355 tl_assert(n >= 1);
1356
1357 lockN_release( lock, thr );
1358
1359 n--;
1360 tl_assert(n >= 0);
1361
1362 if (n > 0) {
1363 tl_assert(lock->heldBy);
sewardj896f6f92008-08-19 08:38:52 +00001364 tl_assert(n == VG_(elemBag)( lock->heldBy, (Word)thr ));
sewardjb4112022007-11-09 22:49:28 +00001365 /* We still hold the lock. So either it's a recursive lock
1366 or a rwlock which is currently r-held. */
1367 tl_assert(lock->kind == LK_mbRec
1368 || (lock->kind == LK_rdwr && !lock->heldW));
1369 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1370 if (lock->heldW)
1371 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1372 else
1373 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1374 } else {
1375 /* We no longer hold the lock. */
sewardjf98e1c02008-10-25 16:22:41 +00001376 tl_assert(!lock->heldBy);
1377 tl_assert(lock->heldW == False);
1378 //if (lock->heldBy) {
1379 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (Word)thr ));
1380 //}
sewardjb4112022007-11-09 22:49:28 +00001381 /* update this thread's lockset accordingly. */
1382 thr->locksetA
1383 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lock );
1384 thr->locksetW
1385 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lock );
sewardjf98e1c02008-10-25 16:22:41 +00001386 /* push our VC into the lock */
1387 tl_assert(thr->hbthr);
1388 tl_assert(lock->hbso);
1389 /* If the lock was previously W-held, then we want to do a
1390 strong send, and if previously R-held, then a weak send. */
1391 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
sewardjb4112022007-11-09 22:49:28 +00001392 }
1393 /* fall through */
1394
1395 error:
sewardjf98e1c02008-10-25 16:22:41 +00001396 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001397}
1398
1399
1400/*--------- Event handlers proper (evh__* functions) ---------*/
1401
1402/* What is the Thread* for the currently running thread? This is
1403 absolutely performance critical. We receive notifications from the
1404 core for client code starts/stops, and cache the looked-up result
1405 in 'current_Thread'. Hence, for the vast majority of requests,
1406 finding the current thread reduces to a read of a global variable,
1407 provided get_current_Thread_in_C_C is inlined.
1408
1409 Outside of client code, current_Thread is NULL, and presumably
1410 any uses of it will cause a segfault. Hence:
1411
1412 - for uses definitely within client code, use
1413 get_current_Thread_in_C_C.
1414
1415 - for all other uses, use get_current_Thread.
1416*/
1417
1418static Thread* current_Thread = NULL;
1419
1420static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1421 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1422 tl_assert(current_Thread == NULL);
1423 current_Thread = map_threads_lookup( tid );
1424 tl_assert(current_Thread != NULL);
1425}
1426static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1427 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1428 tl_assert(current_Thread != NULL);
1429 current_Thread = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00001430 libhb_maybe_GC();
sewardjb4112022007-11-09 22:49:28 +00001431}
1432static inline Thread* get_current_Thread_in_C_C ( void ) {
1433 return current_Thread;
1434}
1435static inline Thread* get_current_Thread ( void ) {
1436 ThreadId coretid;
1437 Thread* thr;
1438 thr = get_current_Thread_in_C_C();
1439 if (LIKELY(thr))
1440 return thr;
1441 /* evidently not in client code. Do it the slow way. */
1442 coretid = VG_(get_running_tid)();
1443 /* FIXME: get rid of the following kludge. It exists because
sewardjf98e1c02008-10-25 16:22:41 +00001444 evh__new_mem is called during initialisation (as notification
sewardjb4112022007-11-09 22:49:28 +00001445 of initial memory layout) and VG_(get_running_tid)() returns
1446 VG_INVALID_THREADID at that point. */
1447 if (coretid == VG_INVALID_THREADID)
1448 coretid = 1; /* KLUDGE */
1449 thr = map_threads_lookup( coretid );
1450 return thr;
1451}
1452
1453static
1454void evh__new_mem ( Addr a, SizeT len ) {
1455 if (SHOW_EVENTS >= 2)
1456 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1457 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001458 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001459 all__sanity_check("evh__new_mem-post");
1460}
1461
1462static
sewardj7cf4e6b2008-05-01 20:24:26 +00001463void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1464 if (SHOW_EVENTS >= 2)
1465 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1466 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001467 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardj7cf4e6b2008-05-01 20:24:26 +00001468 all__sanity_check("evh__new_mem_w_tid-post");
1469}
1470
1471static
sewardjb4112022007-11-09 22:49:28 +00001472void evh__new_mem_w_perms ( Addr a, SizeT len,
sewardj9c606bd2008-09-18 18:12:50 +00001473 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
sewardjb4112022007-11-09 22:49:28 +00001474 if (SHOW_EVENTS >= 1)
1475 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1476 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1477 if (rr || ww || xx)
1478 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001479 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001480 all__sanity_check("evh__new_mem_w_perms-post");
1481}
1482
1483static
1484void evh__set_perms ( Addr a, SizeT len,
1485 Bool rr, Bool ww, Bool xx ) {
1486 if (SHOW_EVENTS >= 1)
1487 VG_(printf)("evh__set_perms(%p, %lu, %d,%d,%d)\n",
1488 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1489 /* Hmm. What should we do here, that actually makes any sense?
1490 Let's say: if neither readable nor writable, then declare it
1491 NoAccess, else leave it alone. */
1492 if (!(rr || ww))
1493 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001494 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001495 all__sanity_check("evh__set_perms-post");
1496}
1497
1498static
1499void evh__die_mem ( Addr a, SizeT len ) {
1500 if (SHOW_EVENTS >= 2)
1501 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
1502 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001503 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001504 all__sanity_check("evh__die_mem-post");
1505}
1506
1507static
1508void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1509{
1510 if (SHOW_EVENTS >= 1)
1511 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1512 (Int)parent, (Int)child );
1513
1514 if (parent != VG_INVALID_THREADID) {
sewardjf98e1c02008-10-25 16:22:41 +00001515 Thread* thr_p;
1516 Thread* thr_c;
1517 Thr* hbthr_p;
1518 Thr* hbthr_c;
sewardjb4112022007-11-09 22:49:28 +00001519
sewardjf98e1c02008-10-25 16:22:41 +00001520 tl_assert(HG_(is_sane_ThreadId)(parent));
1521 tl_assert(HG_(is_sane_ThreadId)(child));
sewardjb4112022007-11-09 22:49:28 +00001522 tl_assert(parent != child);
1523
1524 thr_p = map_threads_maybe_lookup( parent );
1525 thr_c = map_threads_maybe_lookup( child );
1526
1527 tl_assert(thr_p != NULL);
1528 tl_assert(thr_c == NULL);
1529
sewardjf98e1c02008-10-25 16:22:41 +00001530 hbthr_p = thr_p->hbthr;
1531 tl_assert(hbthr_p != NULL);
1532 tl_assert( libhb_get_Thr_opaque(hbthr_p) == thr_p );
sewardjb4112022007-11-09 22:49:28 +00001533
sewardjf98e1c02008-10-25 16:22:41 +00001534 hbthr_c = libhb_create ( hbthr_p );
1535
1536 /* Create a new thread record for the child. */
sewardjb4112022007-11-09 22:49:28 +00001537 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +00001538 thr_c = mk_Thread( hbthr_c );
1539 tl_assert( libhb_get_Thr_opaque(hbthr_c) == NULL );
1540 libhb_set_Thr_opaque(hbthr_c, thr_c);
sewardjb4112022007-11-09 22:49:28 +00001541
1542 /* and bind it in the thread-map table */
1543 map_threads[child] = thr_c;
sewardjf98e1c02008-10-25 16:22:41 +00001544 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1545 thr_c->coretid = child;
sewardjb4112022007-11-09 22:49:28 +00001546
1547 /* Record where the parent is so we can later refer to this in
1548 error messages.
1549
1550 On amd64-linux, this entails a nasty glibc-2.5 specific hack.
1551 The stack snapshot is taken immediately after the parent has
1552 returned from its sys_clone call. Unfortunately there is no
1553 unwind info for the insn following "syscall" - reading the
1554 glibc sources confirms this. So we ask for a snapshot to be
1555 taken as if RIP was 3 bytes earlier, in a place where there
1556 is unwind info. Sigh.
1557 */
1558 { Word first_ip_delta = 0;
1559# if defined(VGP_amd64_linux)
1560 first_ip_delta = -3;
1561# endif
1562 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1563 }
sewardjb4112022007-11-09 22:49:28 +00001564 }
1565
sewardjf98e1c02008-10-25 16:22:41 +00001566 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001567 all__sanity_check("evh__pre_thread_create-post");
1568}
1569
1570static
1571void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1572{
1573 Int nHeld;
1574 Thread* thr_q;
1575 if (SHOW_EVENTS >= 1)
1576 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1577 (Int)quit_tid );
1578
1579 /* quit_tid has disappeared without joining to any other thread.
1580 Therefore there is no synchronisation event associated with its
1581 exit and so we have to pretty much treat it as if it was still
1582 alive but mysteriously making no progress. That is because, if
1583 we don't know when it really exited, then we can never say there
1584 is a point in time when we're sure the thread really has
1585 finished, and so we need to consider the possibility that it
1586 lingers indefinitely and continues to interact with other
1587 threads. */
1588 /* However, it might have rendezvous'd with a thread that called
1589 pthread_join with this one as arg, prior to this point (that's
1590 how NPTL works). In which case there has already been a prior
1591 sync event. So in any case, just let the thread exit. On NPTL,
1592 all thread exits go through here. */
sewardjf98e1c02008-10-25 16:22:41 +00001593 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
sewardjb4112022007-11-09 22:49:28 +00001594 thr_q = map_threads_maybe_lookup( quit_tid );
1595 tl_assert(thr_q != NULL);
1596
1597 /* Complain if this thread holds any locks. */
1598 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1599 tl_assert(nHeld >= 0);
1600 if (nHeld > 0) {
1601 HChar buf[80];
1602 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1603 nHeld, nHeld > 1 ? "s" : "");
sewardjf98e1c02008-10-25 16:22:41 +00001604 HG_(record_error_Misc)( thr_q, buf );
sewardjb4112022007-11-09 22:49:28 +00001605 }
1606
1607 /* About the only thing we do need to do is clear the map_threads
1608 entry, in order that the Valgrind core can re-use it. */
sewardjf98e1c02008-10-25 16:22:41 +00001609 tl_assert(thr_q->coretid == quit_tid);
1610 thr_q->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +00001611 map_threads_delete( quit_tid );
1612
sewardjf98e1c02008-10-25 16:22:41 +00001613 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001614 all__sanity_check("evh__pre_thread_ll_exit-post");
1615}
1616
sewardjf98e1c02008-10-25 16:22:41 +00001617
sewardjb4112022007-11-09 22:49:28 +00001618static
1619void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1620{
sewardjb4112022007-11-09 22:49:28 +00001621 Thread* thr_s;
1622 Thread* thr_q;
sewardjf98e1c02008-10-25 16:22:41 +00001623 Thr* hbthr_s;
1624 Thr* hbthr_q;
1625 SO* so;
sewardjb4112022007-11-09 22:49:28 +00001626
1627 if (SHOW_EVENTS >= 1)
1628 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1629 (Int)stay_tid, quit_thr );
1630
sewardjf98e1c02008-10-25 16:22:41 +00001631 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
sewardjb4112022007-11-09 22:49:28 +00001632
1633 thr_s = map_threads_maybe_lookup( stay_tid );
1634 thr_q = quit_thr;
1635 tl_assert(thr_s != NULL);
1636 tl_assert(thr_q != NULL);
1637 tl_assert(thr_s != thr_q);
1638
sewardjf98e1c02008-10-25 16:22:41 +00001639 hbthr_s = thr_s->hbthr;
1640 hbthr_q = thr_q->hbthr;
1641 tl_assert(hbthr_s != hbthr_q);
1642 tl_assert( libhb_get_Thr_opaque(hbthr_s) == thr_s );
1643 tl_assert( libhb_get_Thr_opaque(hbthr_q) == thr_q );
sewardjb4112022007-11-09 22:49:28 +00001644
sewardjf98e1c02008-10-25 16:22:41 +00001645 /* Allocate a temporary synchronisation object and use it to send
1646 an imaginary message from the quitter to the stayer, the purpose
1647 being to generate a dependence from the quitter to the
1648 stayer. */
1649 so = libhb_so_alloc();
1650 tl_assert(so);
1651 libhb_so_send(hbthr_q, so, True/*strong_send*/);
1652 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1653 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00001654
sewardjf98e1c02008-10-25 16:22:41 +00001655 /* evh__pre_thread_ll_exit issues an error message if the exiting
1656 thread holds any locks. No need to check here. */
sewardjb4112022007-11-09 22:49:28 +00001657
1658 /* This holds because, at least when using NPTL as the thread
1659 library, we should be notified the low level thread exit before
1660 we hear of any join event on it. The low level exit
1661 notification feeds through into evh__pre_thread_ll_exit,
1662 which should clear the map_threads entry for it. Hence we
1663 expect there to be no map_threads entry at this point. */
1664 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1665 == VG_INVALID_THREADID);
1666
sewardjf98e1c02008-10-25 16:22:41 +00001667 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001668 all__sanity_check("evh__post_thread_join-post");
1669}
1670
1671static
1672void evh__pre_mem_read ( CorePart part, ThreadId tid, Char* s,
1673 Addr a, SizeT size) {
1674 if (SHOW_EVENTS >= 2
1675 || (SHOW_EVENTS >= 1 && size != 1))
1676 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1677 (Int)tid, s, (void*)a, size );
1678 shadow_mem_read_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001679 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001680 all__sanity_check("evh__pre_mem_read-post");
1681}
1682
1683static
1684void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
1685 Char* s, Addr a ) {
1686 Int len;
1687 if (SHOW_EVENTS >= 1)
1688 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1689 (Int)tid, s, (void*)a );
1690 // FIXME: think of a less ugly hack
1691 len = VG_(strlen)( (Char*) a );
1692 shadow_mem_read_range( map_threads_lookup(tid), a, len+1 );
sewardjf98e1c02008-10-25 16:22:41 +00001693 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001694 all__sanity_check("evh__pre_mem_read_asciiz-post");
1695}
1696
1697static
1698void evh__pre_mem_write ( CorePart part, ThreadId tid, Char* s,
1699 Addr a, SizeT size ) {
1700 if (SHOW_EVENTS >= 1)
1701 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1702 (Int)tid, s, (void*)a, size );
1703 shadow_mem_write_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001704 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001705 all__sanity_check("evh__pre_mem_write-post");
1706}
1707
1708static
1709void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1710 if (SHOW_EVENTS >= 1)
1711 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1712 (void*)a, len, (Int)is_inited );
1713 // FIXME: this is kinda stupid
1714 if (is_inited) {
1715 shadow_mem_make_New(get_current_Thread(), a, len);
1716 } else {
1717 shadow_mem_make_New(get_current_Thread(), a, len);
1718 }
sewardjf98e1c02008-10-25 16:22:41 +00001719 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001720 all__sanity_check("evh__pre_mem_read-post");
1721}
1722
1723static
1724void evh__die_mem_heap ( Addr a, SizeT len ) {
1725 if (SHOW_EVENTS >= 1)
1726 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
1727 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001728 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001729 all__sanity_check("evh__pre_mem_read-post");
1730}
1731
sewardjb4112022007-11-09 22:49:28 +00001732static VG_REGPARM(1)
1733void evh__mem_help_read_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001734 Thread* thr = get_current_Thread_in_C_C();
1735 Thr* hbthr = thr->hbthr;
1736 LIBHB_READ_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001737}
sewardjf98e1c02008-10-25 16:22:41 +00001738
sewardjb4112022007-11-09 22:49:28 +00001739static VG_REGPARM(1)
1740void evh__mem_help_read_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001741 Thread* thr = get_current_Thread_in_C_C();
1742 Thr* hbthr = thr->hbthr;
1743 LIBHB_READ_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001744}
sewardjf98e1c02008-10-25 16:22:41 +00001745
sewardjb4112022007-11-09 22:49:28 +00001746static VG_REGPARM(1)
1747void evh__mem_help_read_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001748 Thread* thr = get_current_Thread_in_C_C();
1749 Thr* hbthr = thr->hbthr;
1750 LIBHB_READ_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001751}
sewardjf98e1c02008-10-25 16:22:41 +00001752
sewardjb4112022007-11-09 22:49:28 +00001753static VG_REGPARM(1)
1754void evh__mem_help_read_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001755 Thread* thr = get_current_Thread_in_C_C();
1756 Thr* hbthr = thr->hbthr;
1757 LIBHB_READ_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001758}
sewardjf98e1c02008-10-25 16:22:41 +00001759
sewardjb4112022007-11-09 22:49:28 +00001760static VG_REGPARM(2)
1761void evh__mem_help_read_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001762 Thread* thr = get_current_Thread_in_C_C();
1763 Thr* hbthr = thr->hbthr;
1764 LIBHB_READ_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001765}
1766
1767static VG_REGPARM(1)
1768void evh__mem_help_write_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001769 Thread* thr = get_current_Thread_in_C_C();
1770 Thr* hbthr = thr->hbthr;
1771 LIBHB_WRITE_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001772}
sewardjf98e1c02008-10-25 16:22:41 +00001773
sewardjb4112022007-11-09 22:49:28 +00001774static VG_REGPARM(1)
1775void evh__mem_help_write_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001776 Thread* thr = get_current_Thread_in_C_C();
1777 Thr* hbthr = thr->hbthr;
1778 LIBHB_WRITE_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001779}
sewardjf98e1c02008-10-25 16:22:41 +00001780
sewardjb4112022007-11-09 22:49:28 +00001781static VG_REGPARM(1)
1782void evh__mem_help_write_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001783 Thread* thr = get_current_Thread_in_C_C();
1784 Thr* hbthr = thr->hbthr;
1785 LIBHB_WRITE_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001786}
sewardjf98e1c02008-10-25 16:22:41 +00001787
sewardjb4112022007-11-09 22:49:28 +00001788static VG_REGPARM(1)
1789void evh__mem_help_write_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001790 Thread* thr = get_current_Thread_in_C_C();
1791 Thr* hbthr = thr->hbthr;
1792 LIBHB_WRITE_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001793}
sewardjf98e1c02008-10-25 16:22:41 +00001794
sewardjb4112022007-11-09 22:49:28 +00001795static VG_REGPARM(2)
1796void evh__mem_help_write_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001797 Thread* thr = get_current_Thread_in_C_C();
1798 Thr* hbthr = thr->hbthr;
1799 LIBHB_WRITE_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001800}
1801
sewardjd52392d2008-11-08 20:36:26 +00001802//static void evh__bus_lock(void) {
1803// Thread* thr;
1804// if (0) VG_(printf)("evh__bus_lock()\n");
1805// thr = get_current_Thread();
1806// tl_assert(thr); /* cannot fail - Thread* must already exist */
1807// evhH__post_thread_w_acquires_lock( thr, LK_nonRec, (Addr)&__bus_lock );
1808//}
1809//static void evh__bus_unlock(void) {
1810// Thread* thr;
1811// if (0) VG_(printf)("evh__bus_unlock()\n");
1812// thr = get_current_Thread();
1813// tl_assert(thr); /* cannot fail - Thread* must already exist */
1814// evhH__pre_thread_releases_lock( thr, (Addr)&__bus_lock, False/*!isRDWR*/ );
1815//}
sewardjb4112022007-11-09 22:49:28 +00001816
1817
1818/* -------------- events to do with mutexes -------------- */
1819
1820/* EXPOSITION only: by intercepting lock init events we can show the
1821 user where the lock was initialised, rather than only being able to
1822 show where it was first locked. Intercepting lock initialisations
1823 is not necessary for the basic operation of the race checker. */
1824static
1825void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
1826 void* mutex, Word mbRec )
1827{
1828 if (SHOW_EVENTS >= 1)
1829 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
1830 (Int)tid, mbRec, (void*)mutex );
1831 tl_assert(mbRec == 0 || mbRec == 1);
1832 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
1833 (Addr)mutex, tid );
sewardjf98e1c02008-10-25 16:22:41 +00001834 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001835 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
1836}
1837
1838static
1839void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex )
1840{
1841 Thread* thr;
1842 Lock* lk;
1843 if (SHOW_EVENTS >= 1)
1844 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE(ctid=%d, %p)\n",
1845 (Int)tid, (void*)mutex );
1846
1847 thr = map_threads_maybe_lookup( tid );
1848 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00001849 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00001850
1851 lk = map_locks_maybe_lookup( (Addr)mutex );
1852
1853 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
sewardjf98e1c02008-10-25 16:22:41 +00001854 HG_(record_error_Misc)(
1855 thr, "pthread_mutex_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00001856 }
1857
1858 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00001859 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00001860 tl_assert( lk->guestaddr == (Addr)mutex );
1861 if (lk->heldBy) {
1862 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00001863 HG_(record_error_Misc)(
1864 thr, "pthread_mutex_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00001865 /* remove lock from locksets of all owning threads */
1866 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00001867 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00001868 lk->heldBy = NULL;
1869 lk->heldW = False;
1870 lk->acquired_at = NULL;
1871 }
1872 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001873 tl_assert( HG_(is_sane_LockN)(lk) );
1874
sewardj1cbc12f2008-11-10 16:16:46 +00001875 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00001876 map_locks_delete( lk->guestaddr );
1877 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00001878 }
1879
sewardjf98e1c02008-10-25 16:22:41 +00001880 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001881 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
1882}
1883
1884static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
1885 void* mutex, Word isTryLock )
1886{
1887 /* Just check the mutex is sane; nothing else to do. */
1888 // 'mutex' may be invalid - not checked by wrapper
1889 Thread* thr;
1890 Lock* lk;
1891 if (SHOW_EVENTS >= 1)
1892 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
1893 (Int)tid, (void*)mutex );
1894
1895 tl_assert(isTryLock == 0 || isTryLock == 1);
1896 thr = map_threads_maybe_lookup( tid );
1897 tl_assert(thr); /* cannot fail - Thread* must already exist */
1898
1899 lk = map_locks_maybe_lookup( (Addr)mutex );
1900
1901 if (lk && (lk->kind == LK_rdwr)) {
sewardjf98e1c02008-10-25 16:22:41 +00001902 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
1903 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001904 }
1905
1906 if ( lk
1907 && isTryLock == 0
1908 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
1909 && lk->heldBy
1910 && lk->heldW
sewardj896f6f92008-08-19 08:38:52 +00001911 && VG_(elemBag)( lk->heldBy, (Word)thr ) > 0 ) {
sewardjb4112022007-11-09 22:49:28 +00001912 /* uh, it's a non-recursive lock and we already w-hold it, and
1913 this is a real lock operation (not a speculative "tryLock"
1914 kind of thing). Duh. Deadlock coming up; but at least
1915 produce an error message. */
sewardjf98e1c02008-10-25 16:22:41 +00001916 HG_(record_error_Misc)( thr, "Attempt to re-lock a "
1917 "non-recursive lock I already hold" );
sewardjb4112022007-11-09 22:49:28 +00001918 }
1919}
1920
1921static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
1922{
1923 // only called if the real library call succeeded - so mutex is sane
1924 Thread* thr;
1925 if (SHOW_EVENTS >= 1)
1926 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
1927 (Int)tid, (void*)mutex );
1928
1929 thr = map_threads_maybe_lookup( tid );
1930 tl_assert(thr); /* cannot fail - Thread* must already exist */
1931
1932 evhH__post_thread_w_acquires_lock(
1933 thr,
1934 LK_mbRec, /* if not known, create new lock with this LockKind */
1935 (Addr)mutex
1936 );
1937}
1938
1939static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
1940{
1941 // 'mutex' may be invalid - not checked by wrapper
1942 Thread* thr;
1943 if (SHOW_EVENTS >= 1)
1944 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
1945 (Int)tid, (void*)mutex );
1946
1947 thr = map_threads_maybe_lookup( tid );
1948 tl_assert(thr); /* cannot fail - Thread* must already exist */
1949
1950 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
1951}
1952
1953static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
1954{
1955 // only called if the real library call succeeded - so mutex is sane
1956 Thread* thr;
1957 if (SHOW_EVENTS >= 1)
1958 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
1959 (Int)tid, (void*)mutex );
1960 thr = map_threads_maybe_lookup( tid );
1961 tl_assert(thr); /* cannot fail - Thread* must already exist */
1962
1963 // anything we should do here?
1964}
1965
1966
1967/* --------------- events to do with CVs --------------- */
1968
sewardjf98e1c02008-10-25 16:22:41 +00001969/* A mapping from CV to the SO associated with it. When the CV is
1970 signalled/broadcasted upon, we do a 'send' into the SO, and when a
1971 wait on it completes, we do a 'recv' from the SO. This is believed
1972 to give the correct happens-before events arising from CV
sewardjb4112022007-11-09 22:49:28 +00001973 signallings/broadcasts.
1974*/
1975
sewardjf98e1c02008-10-25 16:22:41 +00001976/* pthread_mutex_cond* -> SO* */
1977static WordFM* map_cond_to_SO = NULL;
sewardjb4112022007-11-09 22:49:28 +00001978
sewardjf98e1c02008-10-25 16:22:41 +00001979static void map_cond_to_SO_INIT ( void ) {
1980 if (UNLIKELY(map_cond_to_SO == NULL)) {
1981 map_cond_to_SO = VG_(newFM)( HG_(zalloc), "hg.mctSI.1", HG_(free), NULL );
1982 tl_assert(map_cond_to_SO != NULL);
1983 }
1984}
1985
1986static SO* map_cond_to_SO_lookup_or_alloc ( void* cond ) {
1987 UWord key, val;
1988 map_cond_to_SO_INIT();
1989 if (VG_(lookupFM)( map_cond_to_SO, &key, &val, (UWord)cond )) {
1990 tl_assert(key == (UWord)cond);
1991 return (SO*)val;
1992 } else {
1993 SO* so = libhb_so_alloc();
1994 VG_(addToFM)( map_cond_to_SO, (UWord)cond, (UWord)so );
1995 return so;
1996 }
1997}
1998
1999static void map_cond_to_SO_delete ( void* cond ) {
2000 UWord keyW, valW;
2001 map_cond_to_SO_INIT();
2002 if (VG_(delFromFM)( map_cond_to_SO, &keyW, &valW, (UWord)cond )) {
2003 SO* so = (SO*)valW;
2004 tl_assert(keyW == (UWord)cond);
2005 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00002006 }
2007}
2008
2009static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2010{
sewardjf98e1c02008-10-25 16:22:41 +00002011 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2012 cond to a SO if it is not already so bound, and 'send' on the
2013 SO. This is later used by other thread(s) which successfully
2014 exit from a pthread_cond_wait on the same cv; then they 'recv'
2015 from the SO, thereby acquiring a dependency on this signalling
2016 event. */
sewardjb4112022007-11-09 22:49:28 +00002017 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +00002018 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002019
2020 if (SHOW_EVENTS >= 1)
2021 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2022 (Int)tid, (void*)cond );
2023
sewardjb4112022007-11-09 22:49:28 +00002024 thr = map_threads_maybe_lookup( tid );
2025 tl_assert(thr); /* cannot fail - Thread* must already exist */
2026
2027 // error-if: mutex is bogus
2028 // error-if: mutex is not locked
2029
sewardjf98e1c02008-10-25 16:22:41 +00002030 so = map_cond_to_SO_lookup_or_alloc( cond );
2031 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002032
sewardjf98e1c02008-10-25 16:22:41 +00002033 libhb_so_send( thr->hbthr, so, True/*strong_send*/ );
sewardjb4112022007-11-09 22:49:28 +00002034}
2035
2036/* returns True if it reckons 'mutex' is valid and held by this
2037 thread, else False */
2038static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2039 void* cond, void* mutex )
2040{
2041 Thread* thr;
2042 Lock* lk;
2043 Bool lk_valid = True;
2044
2045 if (SHOW_EVENTS >= 1)
2046 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2047 "(ctid=%d, cond=%p, mutex=%p)\n",
2048 (Int)tid, (void*)cond, (void*)mutex );
2049
sewardjb4112022007-11-09 22:49:28 +00002050 thr = map_threads_maybe_lookup( tid );
2051 tl_assert(thr); /* cannot fail - Thread* must already exist */
2052
2053 lk = map_locks_maybe_lookup( (Addr)mutex );
2054
2055 /* Check for stupid mutex arguments. There are various ways to be
2056 a bozo. Only complain once, though, even if more than one thing
2057 is wrong. */
2058 if (lk == NULL) {
2059 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002060 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002061 thr,
2062 "pthread_cond_{timed}wait called with invalid mutex" );
2063 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002064 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002065 if (lk->kind == LK_rdwr) {
2066 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002067 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002068 thr, "pthread_cond_{timed}wait called with mutex "
2069 "of type pthread_rwlock_t*" );
2070 } else
2071 if (lk->heldBy == NULL) {
2072 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002073 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002074 thr, "pthread_cond_{timed}wait called with un-held mutex");
2075 } else
2076 if (lk->heldBy != NULL
sewardj896f6f92008-08-19 08:38:52 +00002077 && VG_(elemBag)( lk->heldBy, (Word)thr ) == 0) {
sewardjb4112022007-11-09 22:49:28 +00002078 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002079 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002080 thr, "pthread_cond_{timed}wait called with mutex "
2081 "held by a different thread" );
2082 }
2083 }
2084
2085 // error-if: cond is also associated with a different mutex
2086
2087 return lk_valid;
2088}
2089
2090static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
2091 void* cond, void* mutex )
2092{
sewardjf98e1c02008-10-25 16:22:41 +00002093 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2094 the SO for this cond, and 'recv' from it so as to acquire a
2095 dependency edge back to the signaller/broadcaster. */
2096 Thread* thr;
2097 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002098
2099 if (SHOW_EVENTS >= 1)
2100 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
2101 "(ctid=%d, cond=%p, mutex=%p)\n",
2102 (Int)tid, (void*)cond, (void*)mutex );
2103
sewardjb4112022007-11-09 22:49:28 +00002104 thr = map_threads_maybe_lookup( tid );
2105 tl_assert(thr); /* cannot fail - Thread* must already exist */
2106
2107 // error-if: cond is also associated with a different mutex
2108
sewardjf98e1c02008-10-25 16:22:41 +00002109 so = map_cond_to_SO_lookup_or_alloc( cond );
2110 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002111
sewardjf98e1c02008-10-25 16:22:41 +00002112 if (!libhb_so_everSent(so)) {
2113 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2114 it? If this happened it would surely be a bug in the threads
2115 library. Or one of those fabled "spurious wakeups". */
2116 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
2117 "succeeded on"
2118 " without prior pthread_cond_post");
sewardjb4112022007-11-09 22:49:28 +00002119 }
sewardjf98e1c02008-10-25 16:22:41 +00002120
2121 /* anyway, acquire a dependency on it. */
2122 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
2123}
2124
2125static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
2126 void* cond )
2127{
2128 /* Deal with destroy events. The only purpose is to free storage
2129 associated with the CV, so as to avoid any possible resource
2130 leaks. */
2131 if (SHOW_EVENTS >= 1)
2132 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
2133 "(ctid=%d, cond=%p)\n",
2134 (Int)tid, (void*)cond );
2135
2136 map_cond_to_SO_delete( cond );
sewardjb4112022007-11-09 22:49:28 +00002137}
2138
2139
2140/* -------------- events to do with rwlocks -------------- */
2141
2142/* EXPOSITION only */
2143static
2144void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2145{
2146 if (SHOW_EVENTS >= 1)
2147 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2148 (Int)tid, (void*)rwl );
2149 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
sewardjf98e1c02008-10-25 16:22:41 +00002150 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002151 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2152}
2153
2154static
2155void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2156{
2157 Thread* thr;
2158 Lock* lk;
2159 if (SHOW_EVENTS >= 1)
2160 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2161 (Int)tid, (void*)rwl );
2162
2163 thr = map_threads_maybe_lookup( tid );
2164 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00002165 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00002166
2167 lk = map_locks_maybe_lookup( (Addr)rwl );
2168
2169 if (lk == NULL || lk->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00002170 HG_(record_error_Misc)(
2171 thr, "pthread_rwlock_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00002172 }
2173
2174 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00002175 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002176 tl_assert( lk->guestaddr == (Addr)rwl );
2177 if (lk->heldBy) {
2178 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00002179 HG_(record_error_Misc)(
2180 thr, "pthread_rwlock_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00002181 /* remove lock from locksets of all owning threads */
2182 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00002183 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00002184 lk->heldBy = NULL;
2185 lk->heldW = False;
sewardj1c7e8332007-11-29 13:04:03 +00002186 lk->acquired_at = NULL;
sewardjb4112022007-11-09 22:49:28 +00002187 }
2188 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002189 tl_assert( HG_(is_sane_LockN)(lk) );
2190
sewardj1cbc12f2008-11-10 16:16:46 +00002191 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002192 map_locks_delete( lk->guestaddr );
2193 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002194 }
2195
sewardjf98e1c02008-10-25 16:22:41 +00002196 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002197 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2198}
2199
2200static
sewardj789c3c52008-02-25 12:10:07 +00002201void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2202 void* rwl,
2203 Word isW, Word isTryLock )
sewardjb4112022007-11-09 22:49:28 +00002204{
2205 /* Just check the rwl is sane; nothing else to do. */
2206 // 'rwl' may be invalid - not checked by wrapper
2207 Thread* thr;
2208 Lock* lk;
2209 if (SHOW_EVENTS >= 1)
2210 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2211 (Int)tid, (Int)isW, (void*)rwl );
2212
2213 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
sewardj789c3c52008-02-25 12:10:07 +00002214 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
sewardjb4112022007-11-09 22:49:28 +00002215 thr = map_threads_maybe_lookup( tid );
2216 tl_assert(thr); /* cannot fail - Thread* must already exist */
2217
2218 lk = map_locks_maybe_lookup( (Addr)rwl );
2219 if ( lk
2220 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2221 /* Wrong kind of lock. Duh. */
sewardjf98e1c02008-10-25 16:22:41 +00002222 HG_(record_error_Misc)(
2223 thr, "pthread_rwlock_{rd,rw}lock with a "
2224 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002225 }
2226}
2227
2228static
2229void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2230{
2231 // only called if the real library call succeeded - so mutex is sane
2232 Thread* thr;
2233 if (SHOW_EVENTS >= 1)
2234 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2235 (Int)tid, (Int)isW, (void*)rwl );
2236
2237 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2238 thr = map_threads_maybe_lookup( tid );
2239 tl_assert(thr); /* cannot fail - Thread* must already exist */
2240
2241 (isW ? evhH__post_thread_w_acquires_lock
2242 : evhH__post_thread_r_acquires_lock)(
2243 thr,
2244 LK_rdwr, /* if not known, create new lock with this LockKind */
2245 (Addr)rwl
2246 );
2247}
2248
2249static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2250{
2251 // 'rwl' may be invalid - not checked by wrapper
2252 Thread* thr;
2253 if (SHOW_EVENTS >= 1)
2254 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2255 (Int)tid, (void*)rwl );
2256
2257 thr = map_threads_maybe_lookup( tid );
2258 tl_assert(thr); /* cannot fail - Thread* must already exist */
2259
2260 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2261}
2262
2263static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2264{
2265 // only called if the real library call succeeded - so mutex is sane
2266 Thread* thr;
2267 if (SHOW_EVENTS >= 1)
2268 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2269 (Int)tid, (void*)rwl );
2270 thr = map_threads_maybe_lookup( tid );
2271 tl_assert(thr); /* cannot fail - Thread* must already exist */
2272
2273 // anything we should do here?
2274}
2275
2276
2277/* --------------- events to do with semaphores --------------- */
2278
sewardj11e352f2007-11-30 11:11:02 +00002279/* This is similar to but not identical to the handling for condition
sewardjb4112022007-11-09 22:49:28 +00002280 variables. */
2281
sewardjf98e1c02008-10-25 16:22:41 +00002282/* For each semaphore, we maintain a stack of SOs. When a 'post'
2283 operation is done on a semaphore (unlocking, essentially), a new SO
2284 is created for the posting thread, the posting thread does a strong
2285 send to it (which merely installs the posting thread's VC in the
2286 SO), and the SO is pushed on the semaphore's stack.
sewardjb4112022007-11-09 22:49:28 +00002287
2288 Later, when a (probably different) thread completes 'wait' on the
sewardjf98e1c02008-10-25 16:22:41 +00002289 semaphore, we pop a SO off the semaphore's stack (which should be
2290 nonempty), and do a strong recv from it. This mechanism creates
sewardjb4112022007-11-09 22:49:28 +00002291 dependencies between posters and waiters of the semaphore.
2292
sewardjf98e1c02008-10-25 16:22:41 +00002293 It may not be necessary to use a stack - perhaps a bag of SOs would
2294 do. But we do need to keep track of how many unused-up posts have
2295 happened for the semaphore.
sewardjb4112022007-11-09 22:49:28 +00002296
sewardjf98e1c02008-10-25 16:22:41 +00002297 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
sewardjb4112022007-11-09 22:49:28 +00002298 twice on S. T3 cannot complete its waits without both T1 and T2
2299 posting. The above mechanism will ensure that T3 acquires
2300 dependencies on both T1 and T2.
sewardj11e352f2007-11-30 11:11:02 +00002301
sewardjf98e1c02008-10-25 16:22:41 +00002302 When a semaphore is initialised with value N, we do as if we'd
2303 posted N times on the semaphore: basically create N SOs and do a
2304 strong send to all of then. This allows up to N waits on the
2305 semaphore to acquire a dependency on the initialisation point,
2306 which AFAICS is the correct behaviour.
sewardj11e352f2007-11-30 11:11:02 +00002307
2308 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2309 about. We should.
sewardjb4112022007-11-09 22:49:28 +00002310*/
2311
sewardjf98e1c02008-10-25 16:22:41 +00002312/* sem_t* -> XArray* SO* */
2313static WordFM* map_sem_to_SO_stack = NULL;
sewardjb4112022007-11-09 22:49:28 +00002314
sewardjf98e1c02008-10-25 16:22:41 +00002315static void map_sem_to_SO_stack_INIT ( void ) {
2316 if (map_sem_to_SO_stack == NULL) {
2317 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2318 HG_(free), NULL );
2319 tl_assert(map_sem_to_SO_stack != NULL);
sewardjb4112022007-11-09 22:49:28 +00002320 }
2321}
2322
sewardjf98e1c02008-10-25 16:22:41 +00002323static void push_SO_for_sem ( void* sem, SO* so ) {
2324 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002325 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002326 tl_assert(so);
2327 map_sem_to_SO_stack_INIT();
2328 if (VG_(lookupFM)( map_sem_to_SO_stack,
2329 &keyW, (UWord*)&xa, (UWord)sem )) {
2330 tl_assert(keyW == (UWord)sem);
sewardjb4112022007-11-09 22:49:28 +00002331 tl_assert(xa);
sewardjf98e1c02008-10-25 16:22:41 +00002332 VG_(addToXA)( xa, &so );
sewardjb4112022007-11-09 22:49:28 +00002333 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002334 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2335 VG_(addToXA)( xa, &so );
2336 VG_(addToFM)( map_sem_to_SO_stack, (Word)sem, (Word)xa );
sewardjb4112022007-11-09 22:49:28 +00002337 }
2338}
2339
sewardjf98e1c02008-10-25 16:22:41 +00002340static SO* mb_pop_SO_for_sem ( void* sem ) {
2341 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002342 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002343 SO* so;
2344 map_sem_to_SO_stack_INIT();
2345 if (VG_(lookupFM)( map_sem_to_SO_stack,
2346 &keyW, (UWord*)&xa, (UWord)sem )) {
sewardjb4112022007-11-09 22:49:28 +00002347 /* xa is the stack for this semaphore. */
sewardjf98e1c02008-10-25 16:22:41 +00002348 Word sz;
2349 tl_assert(keyW == (UWord)sem);
2350 sz = VG_(sizeXA)( xa );
sewardjb4112022007-11-09 22:49:28 +00002351 tl_assert(sz >= 0);
2352 if (sz == 0)
2353 return NULL; /* odd, the stack is empty */
sewardjf98e1c02008-10-25 16:22:41 +00002354 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2355 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002356 VG_(dropTailXA)( xa, 1 );
sewardjf98e1c02008-10-25 16:22:41 +00002357 return so;
sewardjb4112022007-11-09 22:49:28 +00002358 } else {
2359 /* hmm, that's odd. No stack for this semaphore. */
2360 return NULL;
2361 }
2362}
2363
sewardj11e352f2007-11-30 11:11:02 +00002364static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002365{
sewardjf98e1c02008-10-25 16:22:41 +00002366 UWord keyW, valW;
2367 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002368
sewardjb4112022007-11-09 22:49:28 +00002369 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002370 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002371 (Int)tid, (void*)sem );
2372
sewardjf98e1c02008-10-25 16:22:41 +00002373 map_sem_to_SO_stack_INIT();
sewardjb4112022007-11-09 22:49:28 +00002374
sewardjf98e1c02008-10-25 16:22:41 +00002375 /* Empty out the semaphore's SO stack. This way of doing it is
2376 stupid, but at least it's easy. */
2377 while (1) {
2378 so = mb_pop_SO_for_sem( sem );
2379 if (!so) break;
2380 libhb_so_dealloc(so);
2381 }
2382
2383 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2384 XArray* xa = (XArray*)valW;
2385 tl_assert(keyW == (UWord)sem);
2386 tl_assert(xa);
2387 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2388 VG_(deleteXA)(xa);
2389 }
sewardjb4112022007-11-09 22:49:28 +00002390}
2391
sewardj11e352f2007-11-30 11:11:02 +00002392static
2393void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2394{
sewardjf98e1c02008-10-25 16:22:41 +00002395 SO* so;
2396 Thread* thr;
sewardj11e352f2007-11-30 11:11:02 +00002397
2398 if (SHOW_EVENTS >= 1)
2399 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2400 (Int)tid, (void*)sem, value );
2401
sewardjf98e1c02008-10-25 16:22:41 +00002402 thr = map_threads_maybe_lookup( tid );
2403 tl_assert(thr); /* cannot fail - Thread* must already exist */
sewardj11e352f2007-11-30 11:11:02 +00002404
sewardjf98e1c02008-10-25 16:22:41 +00002405 /* Empty out the semaphore's SO stack. This way of doing it is
2406 stupid, but at least it's easy. */
2407 while (1) {
2408 so = mb_pop_SO_for_sem( sem );
2409 if (!so) break;
2410 libhb_so_dealloc(so);
2411 }
sewardj11e352f2007-11-30 11:11:02 +00002412
sewardjf98e1c02008-10-25 16:22:41 +00002413 /* If we don't do this check, the following while loop runs us out
2414 of memory for stupid initial values of 'value'. */
2415 if (value > 10000) {
2416 HG_(record_error_Misc)(
2417 thr, "sem_init: initial value exceeds 10000; using 10000" );
2418 value = 10000;
2419 }
sewardj11e352f2007-11-30 11:11:02 +00002420
sewardjf98e1c02008-10-25 16:22:41 +00002421 /* Now create 'valid' new SOs for the thread, do a strong send to
2422 each of them, and push them all on the stack. */
2423 for (; value > 0; value--) {
2424 Thr* hbthr = thr->hbthr;
2425 tl_assert(hbthr);
sewardj11e352f2007-11-30 11:11:02 +00002426
sewardjf98e1c02008-10-25 16:22:41 +00002427 so = libhb_so_alloc();
2428 libhb_so_send( hbthr, so, True/*strong send*/ );
2429 push_SO_for_sem( sem, so );
sewardj11e352f2007-11-30 11:11:02 +00002430 }
2431}
2432
2433static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002434{
sewardjf98e1c02008-10-25 16:22:41 +00002435 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2436 it (iow, write our VC into it, then tick ours), and push the SO
2437 on on a stack of SOs associated with 'sem'. This is later used
2438 by other thread(s) which successfully exit from a sem_wait on
2439 the same sem; by doing a strong recv from SOs popped of the
2440 stack, they acquire dependencies on the posting thread
2441 segment(s). */
sewardjb4112022007-11-09 22:49:28 +00002442
sewardjf98e1c02008-10-25 16:22:41 +00002443 Thread* thr;
2444 SO* so;
2445 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002446
2447 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002448 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002449 (Int)tid, (void*)sem );
2450
2451 thr = map_threads_maybe_lookup( tid );
2452 tl_assert(thr); /* cannot fail - Thread* must already exist */
2453
2454 // error-if: sem is bogus
2455
sewardjf98e1c02008-10-25 16:22:41 +00002456 hbthr = thr->hbthr;
2457 tl_assert(hbthr);
sewardjb4112022007-11-09 22:49:28 +00002458
sewardjf98e1c02008-10-25 16:22:41 +00002459 so = libhb_so_alloc();
2460 libhb_so_send( hbthr, so, True/*strong send*/ );
2461 push_SO_for_sem( sem, so );
sewardjb4112022007-11-09 22:49:28 +00002462}
2463
sewardj11e352f2007-11-30 11:11:02 +00002464static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002465{
sewardjf98e1c02008-10-25 16:22:41 +00002466 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2467 the 'sem' from this semaphore's SO-stack, and do a strong recv
2468 from it. This creates a dependency back to one of the post-ers
2469 for the semaphore. */
sewardjb4112022007-11-09 22:49:28 +00002470
sewardjf98e1c02008-10-25 16:22:41 +00002471 Thread* thr;
2472 SO* so;
2473 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002474
2475 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002476 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002477 (Int)tid, (void*)sem );
2478
2479 thr = map_threads_maybe_lookup( tid );
2480 tl_assert(thr); /* cannot fail - Thread* must already exist */
2481
2482 // error-if: sem is bogus
2483
sewardjf98e1c02008-10-25 16:22:41 +00002484 so = mb_pop_SO_for_sem( sem );
sewardjb4112022007-11-09 22:49:28 +00002485
sewardjf98e1c02008-10-25 16:22:41 +00002486 if (so) {
2487 hbthr = thr->hbthr;
2488 tl_assert(hbthr);
2489
2490 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2491 libhb_so_dealloc(so);
2492 } else {
2493 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2494 If this happened it would surely be a bug in the threads
2495 library. */
2496 HG_(record_error_Misc)(
2497 thr, "Bug in libpthread: sem_wait succeeded on"
2498 " semaphore without prior sem_post");
sewardjb4112022007-11-09 22:49:28 +00002499 }
2500}
2501
2502
2503/*--------------------------------------------------------------*/
2504/*--- Lock acquisition order monitoring ---*/
2505/*--------------------------------------------------------------*/
2506
2507/* FIXME: here are some optimisations still to do in
2508 laog__pre_thread_acquires_lock.
2509
2510 The graph is structured so that if L1 --*--> L2 then L1 must be
2511 acquired before L2.
2512
2513 The common case is that some thread T holds (eg) L1 L2 and L3 and
2514 is repeatedly acquiring and releasing Ln, and there is no ordering
2515 error in what it is doing. Hence it repeatly:
2516
2517 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
2518 produces the answer No (because there is no error).
2519
2520 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
2521 (because they already got added the first time T acquired Ln).
2522
2523 Hence cache these two events:
2524
2525 (1) Cache result of the query from last time. Invalidate the cache
2526 any time any edges are added to or deleted from laog.
2527
2528 (2) Cache these add-edge requests and ignore them if said edges
2529 have already been added to laog. Invalidate the cache any time
2530 any edges are deleted from laog.
2531*/
2532
2533typedef
2534 struct {
2535 WordSetID inns; /* in univ_laog */
2536 WordSetID outs; /* in univ_laog */
2537 }
2538 LAOGLinks;
2539
2540/* lock order acquisition graph */
2541static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
2542
2543/* EXPOSITION ONLY: for each edge in 'laog', record the two places
2544 where that edge was created, so that we can show the user later if
2545 we need to. */
2546typedef
2547 struct {
2548 Addr src_ga; /* Lock guest addresses for */
2549 Addr dst_ga; /* src/dst of the edge */
2550 ExeContext* src_ec; /* And corresponding places where that */
2551 ExeContext* dst_ec; /* ordering was established */
2552 }
2553 LAOGLinkExposition;
2554
sewardj250ec2e2008-02-15 22:02:30 +00002555static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
sewardjb4112022007-11-09 22:49:28 +00002556 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
2557 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
2558 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
2559 if (llx1->src_ga < llx2->src_ga) return -1;
2560 if (llx1->src_ga > llx2->src_ga) return 1;
2561 if (llx1->dst_ga < llx2->dst_ga) return -1;
2562 if (llx1->dst_ga > llx2->dst_ga) return 1;
2563 return 0;
2564}
2565
2566static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
2567/* end EXPOSITION ONLY */
2568
2569
2570static void laog__show ( Char* who ) {
2571 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00002572 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00002573 Lock* me;
2574 LAOGLinks* links;
2575 VG_(printf)("laog (requested by %s) {\n", who);
sewardj896f6f92008-08-19 08:38:52 +00002576 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00002577 me = NULL;
2578 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00002579 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00002580 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00002581 tl_assert(me);
2582 tl_assert(links);
2583 VG_(printf)(" node %p:\n", me);
2584 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
2585 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00002586 VG_(printf)(" inn %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00002587 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
2588 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00002589 VG_(printf)(" out %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00002590 me = NULL;
2591 links = NULL;
2592 }
sewardj896f6f92008-08-19 08:38:52 +00002593 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00002594 VG_(printf)("}\n");
2595}
2596
2597__attribute__((noinline))
2598static void laog__add_edge ( Lock* src, Lock* dst ) {
2599 Word keyW;
2600 LAOGLinks* links;
2601 Bool presentF, presentR;
2602 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
2603
2604 /* Take the opportunity to sanity check the graph. Record in
2605 presentF if there is already a src->dst mapping in this node's
2606 forwards links, and presentR if there is already a src->dst
2607 mapping in this node's backwards links. They should agree!
2608 Also, we need to know whether the edge was already present so as
2609 to decide whether or not to update the link details mapping. We
2610 can compute presentF and presentR essentially for free, so may
2611 as well do this always. */
2612 presentF = presentR = False;
2613
2614 /* Update the out edges for src */
2615 keyW = 0;
2616 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00002617 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00002618 WordSetID outs_new;
2619 tl_assert(links);
2620 tl_assert(keyW == (Word)src);
2621 outs_new = HG_(addToWS)( univ_laog, links->outs, (Word)dst );
2622 presentF = outs_new == links->outs;
2623 links->outs = outs_new;
2624 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002625 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00002626 links->inns = HG_(emptyWS)( univ_laog );
2627 links->outs = HG_(singletonWS)( univ_laog, (Word)dst );
sewardj896f6f92008-08-19 08:38:52 +00002628 VG_(addToFM)( laog, (Word)src, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00002629 }
2630 /* Update the in edges for dst */
2631 keyW = 0;
2632 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00002633 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00002634 WordSetID inns_new;
2635 tl_assert(links);
2636 tl_assert(keyW == (Word)dst);
2637 inns_new = HG_(addToWS)( univ_laog, links->inns, (Word)src );
2638 presentR = inns_new == links->inns;
2639 links->inns = inns_new;
2640 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002641 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00002642 links->inns = HG_(singletonWS)( univ_laog, (Word)src );
2643 links->outs = HG_(emptyWS)( univ_laog );
sewardj896f6f92008-08-19 08:38:52 +00002644 VG_(addToFM)( laog, (Word)dst, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00002645 }
2646
2647 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
2648
2649 if (!presentF && src->acquired_at && dst->acquired_at) {
2650 LAOGLinkExposition expo;
2651 /* If this edge is entering the graph, and we have acquired_at
2652 information for both src and dst, record those acquisition
2653 points. Hence, if there is later a violation of this
2654 ordering, we can show the user the two places in which the
2655 required src-dst ordering was previously established. */
barta0b6b2c2008-07-07 06:49:24 +00002656 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
sewardjb4112022007-11-09 22:49:28 +00002657 src->guestaddr, dst->guestaddr);
2658 expo.src_ga = src->guestaddr;
2659 expo.dst_ga = dst->guestaddr;
2660 expo.src_ec = NULL;
2661 expo.dst_ec = NULL;
2662 tl_assert(laog_exposition);
sewardj896f6f92008-08-19 08:38:52 +00002663 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (Word)&expo )) {
sewardjb4112022007-11-09 22:49:28 +00002664 /* we already have it; do nothing */
2665 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002666 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
2667 sizeof(LAOGLinkExposition));
sewardjb4112022007-11-09 22:49:28 +00002668 expo2->src_ga = src->guestaddr;
2669 expo2->dst_ga = dst->guestaddr;
2670 expo2->src_ec = src->acquired_at;
2671 expo2->dst_ec = dst->acquired_at;
sewardj896f6f92008-08-19 08:38:52 +00002672 VG_(addToFM)( laog_exposition, (Word)expo2, (Word)NULL );
sewardjb4112022007-11-09 22:49:28 +00002673 }
2674 }
2675}
2676
2677__attribute__((noinline))
2678static void laog__del_edge ( Lock* src, Lock* dst ) {
2679 Word keyW;
2680 LAOGLinks* links;
2681 if (0) VG_(printf)("laog__del_edge %p %p\n", src, dst);
2682 /* Update the out edges for src */
2683 keyW = 0;
2684 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00002685 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00002686 tl_assert(links);
2687 tl_assert(keyW == (Word)src);
2688 links->outs = HG_(delFromWS)( univ_laog, links->outs, (Word)dst );
2689 }
2690 /* Update the in edges for dst */
2691 keyW = 0;
2692 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00002693 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00002694 tl_assert(links);
2695 tl_assert(keyW == (Word)dst);
2696 links->inns = HG_(delFromWS)( univ_laog, links->inns, (Word)src );
2697 }
2698}
2699
2700__attribute__((noinline))
2701static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
2702 Word keyW;
2703 LAOGLinks* links;
2704 keyW = 0;
2705 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00002706 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00002707 tl_assert(links);
2708 tl_assert(keyW == (Word)lk);
2709 return links->outs;
2710 } else {
2711 return HG_(emptyWS)( univ_laog );
2712 }
2713}
2714
2715__attribute__((noinline))
2716static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
2717 Word keyW;
2718 LAOGLinks* links;
2719 keyW = 0;
2720 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00002721 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00002722 tl_assert(links);
2723 tl_assert(keyW == (Word)lk);
2724 return links->inns;
2725 } else {
2726 return HG_(emptyWS)( univ_laog );
2727 }
2728}
2729
2730__attribute__((noinline))
2731static void laog__sanity_check ( Char* who ) {
2732 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00002733 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00002734 Lock* me;
2735 LAOGLinks* links;
2736 if ( !laog )
2737 return; /* nothing much we can do */
sewardj896f6f92008-08-19 08:38:52 +00002738 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00002739 me = NULL;
2740 links = NULL;
2741 if (0) VG_(printf)("laog sanity check\n");
sewardj896f6f92008-08-19 08:38:52 +00002742 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00002743 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00002744 tl_assert(me);
2745 tl_assert(links);
2746 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
2747 for (i = 0; i < ws_size; i++) {
2748 if ( ! HG_(elemWS)( univ_laog,
2749 laog__succs( (Lock*)ws_words[i] ),
2750 (Word)me ))
2751 goto bad;
2752 }
2753 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
2754 for (i = 0; i < ws_size; i++) {
2755 if ( ! HG_(elemWS)( univ_laog,
2756 laog__preds( (Lock*)ws_words[i] ),
2757 (Word)me ))
2758 goto bad;
2759 }
2760 me = NULL;
2761 links = NULL;
2762 }
sewardj896f6f92008-08-19 08:38:52 +00002763 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00002764 return;
2765
2766 bad:
2767 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
2768 laog__show(who);
2769 tl_assert(0);
2770}
2771
2772/* If there is a path in laog from 'src' to any of the elements in
2773 'dst', return an arbitrarily chosen element of 'dst' reachable from
2774 'src'. If no path exist from 'src' to any element in 'dst', return
2775 NULL. */
2776__attribute__((noinline))
2777static
2778Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
2779{
2780 Lock* ret;
2781 Word i, ssz;
2782 XArray* stack; /* of Lock* */
2783 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
2784 Lock* here;
2785 WordSetID succs;
2786 Word succs_size;
sewardj250ec2e2008-02-15 22:02:30 +00002787 UWord* succs_words;
sewardjb4112022007-11-09 22:49:28 +00002788 //laog__sanity_check();
2789
2790 /* If the destination set is empty, we can never get there from
2791 'src' :-), so don't bother to try */
2792 if (HG_(isEmptyWS)( univ_lsets, dsts ))
2793 return NULL;
2794
2795 ret = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00002796 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
2797 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00002798
2799 (void) VG_(addToXA)( stack, &src );
2800
2801 while (True) {
2802
2803 ssz = VG_(sizeXA)( stack );
2804
2805 if (ssz == 0) { ret = NULL; break; }
2806
2807 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
2808 VG_(dropTailXA)( stack, 1 );
2809
2810 if (HG_(elemWS)( univ_lsets, dsts, (Word)here )) { ret = here; break; }
2811
sewardj896f6f92008-08-19 08:38:52 +00002812 if (VG_(lookupFM)( visited, NULL, NULL, (Word)here ))
sewardjb4112022007-11-09 22:49:28 +00002813 continue;
2814
sewardj896f6f92008-08-19 08:38:52 +00002815 VG_(addToFM)( visited, (Word)here, 0 );
sewardjb4112022007-11-09 22:49:28 +00002816
2817 succs = laog__succs( here );
2818 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
2819 for (i = 0; i < succs_size; i++)
2820 (void) VG_(addToXA)( stack, &succs_words[i] );
2821 }
2822
sewardj896f6f92008-08-19 08:38:52 +00002823 VG_(deleteFM)( visited, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00002824 VG_(deleteXA)( stack );
2825 return ret;
2826}
2827
2828
2829/* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
2830 between 'lk' and the locks already held by 'thr' and issue a
2831 complaint if so. Also, update the ordering graph appropriately.
2832*/
2833__attribute__((noinline))
2834static void laog__pre_thread_acquires_lock (
2835 Thread* thr, /* NB: BEFORE lock is added */
2836 Lock* lk
2837 )
2838{
sewardj250ec2e2008-02-15 22:02:30 +00002839 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +00002840 Word ls_size, i;
2841 Lock* other;
2842
2843 /* It may be that 'thr' already holds 'lk' and is recursively
2844 relocking in. In this case we just ignore the call. */
2845 /* NB: univ_lsets really is correct here */
2846 if (HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lk ))
2847 return;
2848
2849 if (!laog)
sewardjf98e1c02008-10-25 16:22:41 +00002850 laog = VG_(newFM)( HG_(zalloc), "hg.lptal.1",
2851 HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00002852 if (!laog_exposition)
sewardjf98e1c02008-10-25 16:22:41 +00002853 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.lptal.2", HG_(free),
sewardjb4112022007-11-09 22:49:28 +00002854 cmp_LAOGLinkExposition );
2855
2856 /* First, the check. Complain if there is any path in laog from lk
2857 to any of the locks already held by thr, since if any such path
2858 existed, it would mean that previously lk was acquired before
2859 (rather than after, as we are doing here) at least one of those
2860 locks.
2861 */
2862 other = laog__do_dfs_from_to(lk, thr->locksetA);
2863 if (other) {
2864 LAOGLinkExposition key, *found;
2865 /* So we managed to find a path lk --*--> other in the graph,
2866 which implies that 'lk' should have been acquired before
2867 'other' but is in fact being acquired afterwards. We present
2868 the lk/other arguments to record_error_LockOrder in the order
2869 in which they should have been acquired. */
2870 /* Go look in the laog_exposition mapping, to find the allocation
2871 points for this edge, so we can show the user. */
2872 key.src_ga = lk->guestaddr;
2873 key.dst_ga = other->guestaddr;
2874 key.src_ec = NULL;
2875 key.dst_ec = NULL;
2876 found = NULL;
sewardj896f6f92008-08-19 08:38:52 +00002877 if (VG_(lookupFM)( laog_exposition,
sewardjb5f29642007-11-16 12:02:43 +00002878 (Word*)&found, NULL, (Word)&key )) {
sewardjb4112022007-11-09 22:49:28 +00002879 tl_assert(found != &key);
2880 tl_assert(found->src_ga == key.src_ga);
2881 tl_assert(found->dst_ga == key.dst_ga);
2882 tl_assert(found->src_ec);
2883 tl_assert(found->dst_ec);
sewardjf98e1c02008-10-25 16:22:41 +00002884 HG_(record_error_LockOrder)(
2885 thr, lk->guestaddr, other->guestaddr,
2886 found->src_ec, found->dst_ec );
sewardjb4112022007-11-09 22:49:28 +00002887 } else {
2888 /* Hmm. This can't happen (can it?) */
sewardjf98e1c02008-10-25 16:22:41 +00002889 HG_(record_error_LockOrder)(
2890 thr, lk->guestaddr, other->guestaddr,
2891 NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00002892 }
2893 }
2894
2895 /* Second, add to laog the pairs
2896 (old, lk) | old <- locks already held by thr
2897 Since both old and lk are currently held by thr, their acquired_at
2898 fields must be non-NULL.
2899 */
2900 tl_assert(lk->acquired_at);
2901 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
2902 for (i = 0; i < ls_size; i++) {
2903 Lock* old = (Lock*)ls_words[i];
2904 tl_assert(old->acquired_at);
2905 laog__add_edge( old, lk );
2906 }
2907
2908 /* Why "except_Locks" ? We're here because a lock is being
2909 acquired by a thread, and we're in an inconsistent state here.
2910 See the call points in evhH__post_thread_{r,w}_acquires_lock.
2911 When called in this inconsistent state, locks__sanity_check duly
2912 barfs. */
sewardjf98e1c02008-10-25 16:22:41 +00002913 if (HG_(clo_sanity_flags) & SCE_LAOG)
sewardjb4112022007-11-09 22:49:28 +00002914 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
2915}
2916
2917
2918/* Delete from 'laog' any pair mentioning a lock in locksToDelete */
2919
2920__attribute__((noinline))
2921static void laog__handle_one_lock_deletion ( Lock* lk )
2922{
2923 WordSetID preds, succs;
2924 Word preds_size, succs_size, i, j;
sewardj250ec2e2008-02-15 22:02:30 +00002925 UWord *preds_words, *succs_words;
sewardjb4112022007-11-09 22:49:28 +00002926
2927 preds = laog__preds( lk );
2928 succs = laog__succs( lk );
2929
2930 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
2931 for (i = 0; i < preds_size; i++)
2932 laog__del_edge( (Lock*)preds_words[i], lk );
2933
2934 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
2935 for (j = 0; j < succs_size; j++)
2936 laog__del_edge( lk, (Lock*)succs_words[j] );
2937
2938 for (i = 0; i < preds_size; i++) {
2939 for (j = 0; j < succs_size; j++) {
2940 if (preds_words[i] != succs_words[j]) {
2941 /* This can pass unlocked locks to laog__add_edge, since
2942 we're deleting stuff. So their acquired_at fields may
2943 be NULL. */
2944 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
2945 }
2946 }
2947 }
2948}
2949
sewardj1cbc12f2008-11-10 16:16:46 +00002950//__attribute__((noinline))
2951//static void laog__handle_lock_deletions (
2952// WordSetID /* in univ_laog */ locksToDelete
2953// )
2954//{
2955// Word i, ws_size;
2956// UWord* ws_words;
2957//
2958// if (!laog)
2959// laog = VG_(newFM)( HG_(zalloc), "hg.lhld.1", HG_(free), NULL/*unboxedcmp*/ );
2960// if (!laog_exposition)
2961// laog_exposition = VG_(newFM)( HG_(zalloc), "hg.lhld.2", HG_(free),
2962// cmp_LAOGLinkExposition );
2963//
2964// HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
2965// for (i = 0; i < ws_size; i++)
2966// laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
2967//
2968// if (HG_(clo_sanity_flags) & SCE_LAOG)
2969// all__sanity_check("laog__handle_lock_deletions-post");
2970//}
sewardjb4112022007-11-09 22:49:28 +00002971
2972
2973/*--------------------------------------------------------------*/
2974/*--- Malloc/free replacements ---*/
2975/*--------------------------------------------------------------*/
2976
2977typedef
2978 struct {
2979 void* next; /* required by m_hashtable */
2980 Addr payload; /* ptr to actual block */
2981 SizeT szB; /* size requested */
2982 ExeContext* where; /* where it was allocated */
2983 Thread* thr; /* allocating thread */
2984 }
2985 MallocMeta;
2986
2987/* A hash table of MallocMetas, used to track malloc'd blocks
2988 (obviously). */
2989static VgHashTable hg_mallocmeta_table = NULL;
2990
2991
2992static MallocMeta* new_MallocMeta ( void ) {
sewardjf98e1c02008-10-25 16:22:41 +00002993 MallocMeta* md = HG_(zalloc)( "hg.new_MallocMeta.1", sizeof(MallocMeta) );
sewardjb4112022007-11-09 22:49:28 +00002994 tl_assert(md);
2995 return md;
2996}
2997static void delete_MallocMeta ( MallocMeta* md ) {
sewardjf98e1c02008-10-25 16:22:41 +00002998 HG_(free)(md);
sewardjb4112022007-11-09 22:49:28 +00002999}
3000
3001
3002/* Allocate a client block and set up the metadata for it. */
3003
3004static
3005void* handle_alloc ( ThreadId tid,
3006 SizeT szB, SizeT alignB, Bool is_zeroed )
3007{
3008 Addr p;
3009 MallocMeta* md;
3010
3011 tl_assert( ((SSizeT)szB) >= 0 );
3012 p = (Addr)VG_(cli_malloc)(alignB, szB);
3013 if (!p) {
3014 return NULL;
3015 }
3016 if (is_zeroed)
3017 VG_(memset)((void*)p, 0, szB);
3018
3019 /* Note that map_threads_lookup must succeed (cannot assert), since
3020 memory can only be allocated by currently alive threads, hence
3021 they must have an entry in map_threads. */
3022 md = new_MallocMeta();
3023 md->payload = p;
3024 md->szB = szB;
3025 md->where = VG_(record_ExeContext)( tid, 0 );
3026 md->thr = map_threads_lookup( tid );
3027
3028 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
3029
3030 /* Tell the lower level memory wranglers. */
3031 evh__new_mem_heap( p, szB, is_zeroed );
3032
3033 return (void*)p;
3034}
3035
3036/* Re the checks for less-than-zero (also in hg_cli__realloc below):
3037 Cast to a signed type to catch any unexpectedly negative args.
3038 We're assuming here that the size asked for is not greater than
3039 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
3040 platforms). */
3041static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
3042 if (((SSizeT)n) < 0) return NULL;
3043 return handle_alloc ( tid, n, VG_(clo_alignment),
3044 /*is_zeroed*/False );
3045}
3046static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
3047 if (((SSizeT)n) < 0) return NULL;
3048 return handle_alloc ( tid, n, VG_(clo_alignment),
3049 /*is_zeroed*/False );
3050}
3051static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
3052 if (((SSizeT)n) < 0) return NULL;
3053 return handle_alloc ( tid, n, VG_(clo_alignment),
3054 /*is_zeroed*/False );
3055}
3056static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
3057 if (((SSizeT)n) < 0) return NULL;
3058 return handle_alloc ( tid, n, align,
3059 /*is_zeroed*/False );
3060}
3061static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
3062 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
3063 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
3064 /*is_zeroed*/True );
3065}
3066
3067
3068/* Free a client block, including getting rid of the relevant
3069 metadata. */
3070
3071static void handle_free ( ThreadId tid, void* p )
3072{
3073 MallocMeta *md, *old_md;
3074 SizeT szB;
3075
3076 /* First see if we can find the metadata for 'p'. */
3077 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3078 if (!md)
3079 return; /* apparently freeing a bogus address. Oh well. */
3080
3081 tl_assert(md->payload == (Addr)p);
3082 szB = md->szB;
3083
3084 /* Nuke the metadata block */
3085 old_md = (MallocMeta*)
3086 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
3087 tl_assert(old_md); /* it must be present - we just found it */
3088 tl_assert(old_md == md);
3089 tl_assert(old_md->payload == (Addr)p);
3090
3091 VG_(cli_free)((void*)old_md->payload);
3092 delete_MallocMeta(old_md);
3093
3094 /* Tell the lower level memory wranglers. */
3095 evh__die_mem_heap( (Addr)p, szB );
3096}
3097
3098static void hg_cli__free ( ThreadId tid, void* p ) {
3099 handle_free(tid, p);
3100}
3101static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
3102 handle_free(tid, p);
3103}
3104static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
3105 handle_free(tid, p);
3106}
3107
3108
3109static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
3110{
3111 MallocMeta *md, *md_new, *md_tmp;
3112 SizeT i;
3113
3114 Addr payload = (Addr)payloadV;
3115
3116 if (((SSizeT)new_size) < 0) return NULL;
3117
3118 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
3119 if (!md)
3120 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
3121
3122 tl_assert(md->payload == payload);
3123
3124 if (md->szB == new_size) {
3125 /* size unchanged */
3126 md->where = VG_(record_ExeContext)(tid, 0);
3127 return payloadV;
3128 }
3129
3130 if (md->szB > new_size) {
3131 /* new size is smaller */
3132 md->szB = new_size;
3133 md->where = VG_(record_ExeContext)(tid, 0);
3134 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
3135 return payloadV;
3136 }
3137
3138 /* else */ {
3139 /* new size is bigger */
3140 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
3141
3142 /* First half kept and copied, second half new */
3143 // FIXME: shouldn't we use a copier which implements the
3144 // memory state machine?
3145 shadow_mem_copy_range( payload, p_new, md->szB );
3146 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
sewardjf98e1c02008-10-25 16:22:41 +00003147 /*inited*/False );
sewardjb4112022007-11-09 22:49:28 +00003148 /* FIXME: can anything funny happen here? specifically, if the
3149 old range contained a lock, then die_mem_heap will complain.
3150 Is that the correct behaviour? Not sure. */
3151 evh__die_mem_heap( payload, md->szB );
3152
3153 /* Copy from old to new */
3154 for (i = 0; i < md->szB; i++)
3155 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
3156
3157 /* Because the metadata hash table is index by payload address,
3158 we have to get rid of the old hash table entry and make a new
3159 one. We can't just modify the existing metadata in place,
3160 because then it would (almost certainly) be in the wrong hash
3161 chain. */
3162 md_new = new_MallocMeta();
3163 *md_new = *md;
3164
3165 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
3166 tl_assert(md_tmp);
3167 tl_assert(md_tmp == md);
3168
3169 VG_(cli_free)((void*)md->payload);
3170 delete_MallocMeta(md);
3171
3172 /* Update fields */
3173 md_new->where = VG_(record_ExeContext)( tid, 0 );
3174 md_new->szB = new_size;
3175 md_new->payload = p_new;
3176 md_new->thr = map_threads_lookup( tid );
3177
3178 /* and add */
3179 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
3180
3181 return (void*)p_new;
3182 }
3183}
3184
3185
3186/*--------------------------------------------------------------*/
3187/*--- Instrumentation ---*/
3188/*--------------------------------------------------------------*/
3189
3190static void instrument_mem_access ( IRSB* bbOut,
3191 IRExpr* addr,
3192 Int szB,
3193 Bool isStore,
3194 Int hWordTy_szB )
3195{
3196 IRType tyAddr = Ity_INVALID;
3197 HChar* hName = NULL;
3198 void* hAddr = NULL;
3199 Int regparms = 0;
3200 IRExpr** argv = NULL;
3201 IRDirty* di = NULL;
3202
3203 tl_assert(isIRAtom(addr));
3204 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
3205
3206 tyAddr = typeOfIRExpr( bbOut->tyenv, addr );
3207 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
3208
3209 /* So the effective address is in 'addr' now. */
3210 regparms = 1; // unless stated otherwise
3211 if (isStore) {
3212 switch (szB) {
3213 case 1:
3214 hName = "evh__mem_help_write_1";
3215 hAddr = &evh__mem_help_write_1;
3216 argv = mkIRExprVec_1( addr );
3217 break;
3218 case 2:
3219 hName = "evh__mem_help_write_2";
3220 hAddr = &evh__mem_help_write_2;
3221 argv = mkIRExprVec_1( addr );
3222 break;
3223 case 4:
3224 hName = "evh__mem_help_write_4";
3225 hAddr = &evh__mem_help_write_4;
3226 argv = mkIRExprVec_1( addr );
3227 break;
3228 case 8:
3229 hName = "evh__mem_help_write_8";
3230 hAddr = &evh__mem_help_write_8;
3231 argv = mkIRExprVec_1( addr );
3232 break;
3233 default:
3234 tl_assert(szB > 8 && szB <= 512); /* stay sane */
3235 regparms = 2;
3236 hName = "evh__mem_help_write_N";
3237 hAddr = &evh__mem_help_write_N;
3238 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
3239 break;
3240 }
3241 } else {
3242 switch (szB) {
3243 case 1:
3244 hName = "evh__mem_help_read_1";
3245 hAddr = &evh__mem_help_read_1;
3246 argv = mkIRExprVec_1( addr );
3247 break;
3248 case 2:
3249 hName = "evh__mem_help_read_2";
3250 hAddr = &evh__mem_help_read_2;
3251 argv = mkIRExprVec_1( addr );
3252 break;
3253 case 4:
3254 hName = "evh__mem_help_read_4";
3255 hAddr = &evh__mem_help_read_4;
3256 argv = mkIRExprVec_1( addr );
3257 break;
3258 case 8:
3259 hName = "evh__mem_help_read_8";
3260 hAddr = &evh__mem_help_read_8;
3261 argv = mkIRExprVec_1( addr );
3262 break;
3263 default:
3264 tl_assert(szB > 8 && szB <= 512); /* stay sane */
3265 regparms = 2;
3266 hName = "evh__mem_help_read_N";
3267 hAddr = &evh__mem_help_read_N;
3268 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
3269 break;
3270 }
3271 }
3272
3273 /* Add the helper. */
3274 tl_assert(hName);
3275 tl_assert(hAddr);
3276 tl_assert(argv);
3277 di = unsafeIRDirty_0_N( regparms,
3278 hName, VG_(fnptr_to_fnentry)( hAddr ),
3279 argv );
3280 addStmtToIRSB( bbOut, IRStmt_Dirty(di) );
3281}
3282
3283
sewardjd52392d2008-11-08 20:36:26 +00003284//static void instrument_memory_bus_event ( IRSB* bbOut, IRMBusEvent event )
3285//{
3286// switch (event) {
3287// case Imbe_SnoopedStoreBegin:
3288// case Imbe_SnoopedStoreEnd:
3289// /* These arise from ppc stwcx. insns. They should perhaps be
3290// handled better. */
3291// break;
3292// case Imbe_Fence:
3293// break; /* not interesting */
3294// case Imbe_BusLock:
3295// case Imbe_BusUnlock:
3296// addStmtToIRSB(
3297// bbOut,
3298// IRStmt_Dirty(
3299// unsafeIRDirty_0_N(
3300// 0/*regparms*/,
3301// event == Imbe_BusLock ? "evh__bus_lock"
3302// : "evh__bus_unlock",
3303// VG_(fnptr_to_fnentry)(
3304// event == Imbe_BusLock ? &evh__bus_lock
3305// : &evh__bus_unlock
3306// ),
3307// mkIRExprVec_0()
3308// )
3309// )
3310// );
3311// break;
3312// default:
3313// tl_assert(0);
3314// }
3315//}
sewardjb4112022007-11-09 22:49:28 +00003316
3317
3318static
3319IRSB* hg_instrument ( VgCallbackClosure* closure,
3320 IRSB* bbIn,
3321 VexGuestLayout* layout,
3322 VexGuestExtents* vge,
3323 IRType gWordTy, IRType hWordTy )
3324{
3325 Int i;
3326 IRSB* bbOut;
sewardjf98e1c02008-10-25 16:22:41 +00003327 Bool x86busLocked = False;
sewardjb4112022007-11-09 22:49:28 +00003328
3329 if (gWordTy != hWordTy) {
3330 /* We don't currently support this case. */
3331 VG_(tool_panic)("host/guest word size mismatch");
3332 }
3333
3334 /* Set up BB */
3335 bbOut = emptyIRSB();
3336 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
3337 bbOut->next = deepCopyIRExpr(bbIn->next);
3338 bbOut->jumpkind = bbIn->jumpkind;
3339
3340 // Copy verbatim any IR preamble preceding the first IMark
3341 i = 0;
3342 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
3343 addStmtToIRSB( bbOut, bbIn->stmts[i] );
3344 i++;
3345 }
3346
3347 for (/*use current i*/; i < bbIn->stmts_used; i++) {
3348 IRStmt* st = bbIn->stmts[i];
3349 tl_assert(st);
3350 tl_assert(isFlatIRStmt(st));
3351 switch (st->tag) {
3352 case Ist_NoOp:
3353 case Ist_AbiHint:
3354 case Ist_Put:
3355 case Ist_PutI:
3356 case Ist_IMark:
3357 case Ist_Exit:
3358 /* None of these can contain any memory references. */
3359 break;
3360
3361 case Ist_MBE:
sewardjf98e1c02008-10-25 16:22:41 +00003362 //instrument_memory_bus_event( bbOut, st->Ist.MBE.event );
3363 switch (st->Ist.MBE.event) {
3364 case Imbe_Fence:
3365 break; /* not interesting */
3366 case Imbe_BusLock:
3367 tl_assert(x86busLocked == False);
3368 x86busLocked = True;
3369 break;
3370 case Imbe_BusUnlock:
3371 tl_assert(x86busLocked == True);
3372 x86busLocked = False;
3373 break;
3374 default:
3375 goto unhandled;
3376 }
sewardjb4112022007-11-09 22:49:28 +00003377 break;
3378
3379 case Ist_Store:
sewardjf98e1c02008-10-25 16:22:41 +00003380 if (!x86busLocked)
3381 instrument_mem_access(
3382 bbOut,
3383 st->Ist.Store.addr,
3384 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
3385 True/*isStore*/,
3386 sizeofIRType(hWordTy)
3387 );
3388 break;
sewardjb4112022007-11-09 22:49:28 +00003389
3390 case Ist_WrTmp: {
3391 IRExpr* data = st->Ist.WrTmp.data;
3392 if (data->tag == Iex_Load) {
3393 instrument_mem_access(
3394 bbOut,
3395 data->Iex.Load.addr,
3396 sizeofIRType(data->Iex.Load.ty),
3397 False/*!isStore*/,
3398 sizeofIRType(hWordTy)
3399 );
3400 }
3401 break;
3402 }
3403
3404 case Ist_Dirty: {
3405 Int dataSize;
3406 IRDirty* d = st->Ist.Dirty.details;
3407 if (d->mFx != Ifx_None) {
3408 /* This dirty helper accesses memory. Collect the
3409 details. */
3410 tl_assert(d->mAddr != NULL);
3411 tl_assert(d->mSize != 0);
3412 dataSize = d->mSize;
3413 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
3414 instrument_mem_access(
3415 bbOut, d->mAddr, dataSize, False/*!isStore*/,
3416 sizeofIRType(hWordTy)
3417 );
3418 }
3419 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
3420 instrument_mem_access(
3421 bbOut, d->mAddr, dataSize, True/*isStore*/,
3422 sizeofIRType(hWordTy)
3423 );
3424 }
3425 } else {
3426 tl_assert(d->mAddr == NULL);
3427 tl_assert(d->mSize == 0);
3428 }
3429 break;
3430 }
3431
3432 default:
sewardjf98e1c02008-10-25 16:22:41 +00003433 unhandled:
3434 ppIRStmt(st);
sewardjb4112022007-11-09 22:49:28 +00003435 tl_assert(0);
3436
3437 } /* switch (st->tag) */
3438
3439 addStmtToIRSB( bbOut, st );
3440 } /* iterate over bbIn->stmts */
3441
3442 return bbOut;
3443}
3444
3445
3446/*----------------------------------------------------------------*/
3447/*--- Client requests ---*/
3448/*----------------------------------------------------------------*/
3449
3450/* Sheesh. Yet another goddam finite map. */
3451static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
3452
3453static void map_pthread_t_to_Thread_INIT ( void ) {
3454 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
sewardjf98e1c02008-10-25 16:22:41 +00003455 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
3456 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00003457 tl_assert(map_pthread_t_to_Thread != NULL);
3458 }
3459}
3460
3461
3462static
3463Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
3464{
3465 if (!VG_IS_TOOL_USERREQ('H','G',args[0]))
3466 return False;
3467
3468 /* Anything that gets past the above check is one of ours, so we
3469 should be able to handle it. */
3470
3471 /* default, meaningless return value, unless otherwise set */
3472 *ret = 0;
3473
3474 switch (args[0]) {
3475
3476 /* --- --- User-visible client requests --- --- */
3477
3478 case VG_USERREQ__HG_CLEAN_MEMORY:
barta0b6b2c2008-07-07 06:49:24 +00003479 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%ld)\n",
sewardjb4112022007-11-09 22:49:28 +00003480 args[1], args[2]);
3481 /* Call die_mem to (expensively) tidy up properly, if there
sewardjf98e1c02008-10-25 16:22:41 +00003482 are any held locks etc in the area. Calling evh__die_mem
3483 and then evh__new_mem is a bit inefficient; probably just
3484 the latter would do. */
sewardjb4112022007-11-09 22:49:28 +00003485 if (args[2] > 0) { /* length */
3486 evh__die_mem(args[1], args[2]);
3487 /* and then set it to New */
3488 evh__new_mem(args[1], args[2]);
3489 }
3490 break;
3491
3492 /* --- --- Client requests for Helgrind's use only --- --- */
3493
3494 /* Some thread is telling us its pthread_t value. Record the
3495 binding between that and the associated Thread*, so we can
3496 later find the Thread* again when notified of a join by the
3497 thread. */
3498 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
3499 Thread* my_thr = NULL;
3500 if (0)
3501 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
3502 (void*)args[1]);
3503 map_pthread_t_to_Thread_INIT();
3504 my_thr = map_threads_maybe_lookup( tid );
3505 /* This assertion should hold because the map_threads (tid to
3506 Thread*) binding should have been made at the point of
3507 low-level creation of this thread, which should have
3508 happened prior to us getting this client request for it.
3509 That's because this client request is sent from
3510 client-world from the 'thread_wrapper' function, which
3511 only runs once the thread has been low-level created. */
3512 tl_assert(my_thr != NULL);
3513 /* So now we know that (pthread_t)args[1] is associated with
3514 (Thread*)my_thr. Note that down. */
3515 if (0)
3516 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
3517 (void*)args[1], (void*)my_thr );
sewardj896f6f92008-08-19 08:38:52 +00003518 VG_(addToFM)( map_pthread_t_to_Thread, (Word)args[1], (Word)my_thr );
sewardjb4112022007-11-09 22:49:28 +00003519 break;
3520 }
3521
3522 case _VG_USERREQ__HG_PTH_API_ERROR: {
3523 Thread* my_thr = NULL;
3524 map_pthread_t_to_Thread_INIT();
3525 my_thr = map_threads_maybe_lookup( tid );
3526 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
sewardjf98e1c02008-10-25 16:22:41 +00003527 HG_(record_error_PthAPIerror)(
3528 my_thr, (HChar*)args[1], (Word)args[2], (HChar*)args[3] );
sewardjb4112022007-11-09 22:49:28 +00003529 break;
3530 }
3531
3532 /* This thread (tid) has completed a join with the quitting
3533 thread whose pthread_t is in args[1]. */
3534 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
3535 Thread* thr_q = NULL; /* quitter Thread* */
3536 Bool found = False;
3537 if (0)
3538 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
3539 (void*)args[1]);
3540 map_pthread_t_to_Thread_INIT();
sewardj896f6f92008-08-19 08:38:52 +00003541 found = VG_(lookupFM)( map_pthread_t_to_Thread,
sewardjb5f29642007-11-16 12:02:43 +00003542 NULL, (Word*)&thr_q, (Word)args[1] );
sewardjb4112022007-11-09 22:49:28 +00003543 /* Can this fail? It would mean that our pthread_join
3544 wrapper observed a successful join on args[1] yet that
3545 thread never existed (or at least, it never lodged an
3546 entry in the mapping (via SET_MY_PTHREAD_T)). Which
3547 sounds like a bug in the threads library. */
3548 // FIXME: get rid of this assertion; handle properly
3549 tl_assert(found);
3550 if (found) {
3551 if (0)
3552 VG_(printf)(".................... quitter Thread* = %p\n",
3553 thr_q);
3554 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
3555 }
3556 break;
3557 }
3558
3559 /* EXPOSITION only: by intercepting lock init events we can show
3560 the user where the lock was initialised, rather than only
3561 being able to show where it was first locked. Intercepting
3562 lock initialisations is not necessary for the basic operation
3563 of the race checker. */
3564 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
3565 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
3566 break;
3567
3568 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
3569 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1] );
3570 break;
3571
3572 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
3573 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
3574 break;
3575
3576 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
3577 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
3578 break;
3579
3580 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*, Word
3581 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
3582 break;
3583
3584 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*
3585 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
3586 break;
3587
3588 /* This thread is about to do pthread_cond_signal on the
3589 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
3590 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
3591 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
3592 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
3593 break;
3594
3595 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
3596 Returns a flag indicating whether or not the mutex is believed to be
3597 valid for this operation. */
3598 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
3599 Bool mutex_is_valid
3600 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
3601 (void*)args[2] );
3602 *ret = mutex_is_valid ? 1 : 0;
3603 break;
3604 }
3605
sewardjf98e1c02008-10-25 16:22:41 +00003606 /* cond=arg[1] */
3607 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
3608 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1] );
3609 break;
3610
sewardjb4112022007-11-09 22:49:28 +00003611 /* Thread successfully completed pthread_cond_wait, cond=arg[1],
3612 mutex=arg[2] */
3613 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
3614 evh__HG_PTHREAD_COND_WAIT_POST( tid,
3615 (void*)args[1], (void*)args[2] );
3616 break;
3617
3618 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
3619 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
3620 break;
3621
3622 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
3623 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
3624 break;
3625
sewardj789c3c52008-02-25 12:10:07 +00003626 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
sewardjb4112022007-11-09 22:49:28 +00003627 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
sewardj789c3c52008-02-25 12:10:07 +00003628 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
3629 args[2], args[3] );
sewardjb4112022007-11-09 22:49:28 +00003630 break;
3631
3632 /* rwlock=arg[1], isW=arg[2] */
3633 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
3634 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
3635 break;
3636
3637 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
3638 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
3639 break;
3640
3641 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
3642 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
3643 break;
3644
sewardj11e352f2007-11-30 11:11:02 +00003645 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
3646 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
sewardjb4112022007-11-09 22:49:28 +00003647 break;
3648
sewardj11e352f2007-11-30 11:11:02 +00003649 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
3650 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00003651 break;
3652
sewardj11e352f2007-11-30 11:11:02 +00003653 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
3654 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
3655 break;
3656
3657 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t* */
3658 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00003659 break;
3660
sewardjf98e1c02008-10-25 16:22:41 +00003661//zz case _VG_USERREQ__HG_GET_MY_SEGMENT: { // -> Segment*
3662//zz Thread* thr;
3663//zz SegmentID segid;
3664//zz Segment* seg;
3665//zz thr = map_threads_maybe_lookup( tid );
3666//zz tl_assert(thr); /* cannot fail */
3667//zz segid = thr->csegid;
3668//zz tl_assert(is_sane_SegmentID(segid));
3669//zz seg = map_segments_lookup( segid );
3670//zz tl_assert(seg);
3671//zz *ret = (UWord)seg;
3672//zz break;
3673//zz }
sewardjb4112022007-11-09 22:49:28 +00003674
3675 default:
3676 /* Unhandled Helgrind client request! */
sewardjf98e1c02008-10-25 16:22:41 +00003677 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
3678 args[0]);
sewardjb4112022007-11-09 22:49:28 +00003679 }
3680
3681 return True;
3682}
3683
3684
3685/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +00003686/*--- Setup ---*/
3687/*----------------------------------------------------------------*/
3688
3689static Bool hg_process_cmd_line_option ( Char* arg )
3690{
sewardjf98e1c02008-10-25 16:22:41 +00003691 if (VG_CLO_STREQ(arg, "--track-lockorders=no"))
3692 HG_(clo_track_lockorders) = False;
3693 else if (VG_CLO_STREQ(arg, "--track-lockorders=yes"))
3694 HG_(clo_track_lockorders) = True;
sewardjb4112022007-11-09 22:49:28 +00003695
3696 else if (VG_CLO_STREQ(arg, "--cmp-race-err-addrs=no"))
sewardjf98e1c02008-10-25 16:22:41 +00003697 HG_(clo_cmp_race_err_addrs) = False;
sewardjb4112022007-11-09 22:49:28 +00003698 else if (VG_CLO_STREQ(arg, "--cmp-race-err-addrs=yes"))
sewardjf98e1c02008-10-25 16:22:41 +00003699 HG_(clo_cmp_race_err_addrs) = True;
sewardjb4112022007-11-09 22:49:28 +00003700
3701 else if (VG_CLO_STREQN(13, arg, "--trace-addr=")) {
sewardjf98e1c02008-10-25 16:22:41 +00003702 HG_(clo_trace_addr) = VG_(atoll16)(&arg[13]);
3703 if (HG_(clo_trace_level) == 0)
3704 HG_(clo_trace_level) = 1;
sewardjb4112022007-11-09 22:49:28 +00003705 }
sewardjf98e1c02008-10-25 16:22:41 +00003706 else VG_BNUM_CLO(arg, "--trace-level", HG_(clo_trace_level), 0, 2)
sewardjb4112022007-11-09 22:49:28 +00003707
sewardj11e352f2007-11-30 11:11:02 +00003708 /* "stuvwx" --> stuvwx (binary) */
3709 else if (VG_CLO_STREQN(18, arg, "--hg-sanity-flags=")) {
sewardjb4112022007-11-09 22:49:28 +00003710 Int j;
sewardjb5f29642007-11-16 12:02:43 +00003711 Char* opt = & arg[18];
sewardjb4112022007-11-09 22:49:28 +00003712
sewardj11e352f2007-11-30 11:11:02 +00003713 if (6 != VG_(strlen)(opt)) {
sewardjb4112022007-11-09 22:49:28 +00003714 VG_(message)(Vg_UserMsg,
sewardj11e352f2007-11-30 11:11:02 +00003715 "--hg-sanity-flags argument must have 6 digits");
sewardjb4112022007-11-09 22:49:28 +00003716 return False;
3717 }
sewardj11e352f2007-11-30 11:11:02 +00003718 for (j = 0; j < 6; j++) {
sewardjb4112022007-11-09 22:49:28 +00003719 if ('0' == opt[j]) { /* do nothing */ }
sewardjf98e1c02008-10-25 16:22:41 +00003720 else if ('1' == opt[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
sewardjb4112022007-11-09 22:49:28 +00003721 else {
sewardj11e352f2007-11-30 11:11:02 +00003722 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
sewardjb4112022007-11-09 22:49:28 +00003723 "only contain 0s and 1s");
3724 return False;
3725 }
3726 }
sewardjf98e1c02008-10-25 16:22:41 +00003727 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
sewardjb4112022007-11-09 22:49:28 +00003728 }
3729
3730 else
3731 return VG_(replacement_malloc_process_cmd_line_option)(arg);
3732
3733 return True;
3734}
3735
3736static void hg_print_usage ( void )
3737{
3738 VG_(printf)(
sewardjf98e1c02008-10-25 16:22:41 +00003739" --track-lockorders=no|yes show lock ordering errors? [yes]\n"
sewardjb4112022007-11-09 22:49:28 +00003740" --trace-addr=0xXXYYZZ show all state changes for address 0xXXYYZZ\n"
3741" --trace-level=0|1|2 verbosity level of --trace-addr [1]\n"
3742 );
3743 VG_(replacement_malloc_print_usage)();
3744}
3745
3746static void hg_print_debug_usage ( void )
3747{
3748 VG_(replacement_malloc_print_debug_usage)();
sewardjb4112022007-11-09 22:49:28 +00003749 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
3750 "race errors significant? [no]\n");
sewardj11e352f2007-11-30 11:11:02 +00003751 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
3752 " at events (X = 0|1) [000000]\n");
3753 VG_(printf)(" --hg-sanity-flags values:\n");
3754 VG_(printf)(" 100000 crosscheck happens-before-graph searches\n");
3755 VG_(printf)(" 010000 after changes to "
sewardjb4112022007-11-09 22:49:28 +00003756 "lock-order-acquisition-graph\n");
sewardj11e352f2007-11-30 11:11:02 +00003757 VG_(printf)(" 001000 at memory accesses (NB: not currently used)\n");
3758 VG_(printf)(" 000100 at mem permission setting for "
sewardjb4112022007-11-09 22:49:28 +00003759 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
sewardj11e352f2007-11-30 11:11:02 +00003760 VG_(printf)(" 000010 at lock/unlock events\n");
3761 VG_(printf)(" 000001 at thread create/join events\n");
sewardjb4112022007-11-09 22:49:28 +00003762}
3763
3764static void hg_post_clo_init ( void )
3765{
3766}
3767
3768static void hg_fini ( Int exitcode )
3769{
3770 if (SHOW_DATA_STRUCTURES)
3771 pp_everything( PP_ALL, "SK_(fini)" );
sewardjf98e1c02008-10-25 16:22:41 +00003772 if (HG_(clo_sanity_flags))
sewardjb4112022007-11-09 22:49:28 +00003773 all__sanity_check("SK_(fini)");
3774
sewardjb4112022007-11-09 22:49:28 +00003775 if (VG_(clo_verbosity) >= 2) {
3776
3777 if (1) {
3778 VG_(printf)("\n");
3779 HG_(ppWSUstats)( univ_tsets, "univ_tsets" );
3780 VG_(printf)("\n");
3781 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
3782 VG_(printf)("\n");
3783 HG_(ppWSUstats)( univ_laog, "univ_laog" );
3784 }
3785
sewardjf98e1c02008-10-25 16:22:41 +00003786 //zz VG_(printf)("\n");
3787 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
3788 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
3789 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
3790 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
3791 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
3792 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
3793 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
3794 //zz stats__hbefore_stk_hwm);
3795 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
3796 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
sewardjb4112022007-11-09 22:49:28 +00003797
3798 VG_(printf)("\n");
barta0b6b2c2008-07-07 06:49:24 +00003799 VG_(printf)(" locksets: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00003800 (Int)HG_(cardinalityWSU)( univ_lsets ));
barta0b6b2c2008-07-07 06:49:24 +00003801 VG_(printf)(" threadsets: %'8d unique thread sets\n",
sewardjb4112022007-11-09 22:49:28 +00003802 (Int)HG_(cardinalityWSU)( univ_tsets ));
barta0b6b2c2008-07-07 06:49:24 +00003803 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00003804 (Int)HG_(cardinalityWSU)( univ_laog ));
3805
sewardjd52392d2008-11-08 20:36:26 +00003806 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
3807 // stats__ga_LL_adds,
3808 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
sewardjb4112022007-11-09 22:49:28 +00003809
sewardjf98e1c02008-10-25 16:22:41 +00003810 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
3811 HG_(stats__LockN_to_P_queries),
3812 HG_(stats__LockN_to_P_get_map_size)() );
sewardjb4112022007-11-09 22:49:28 +00003813
sewardjf98e1c02008-10-25 16:22:41 +00003814 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
3815 HG_(stats__string_table_queries),
3816 HG_(stats__string_table_get_map_size)() );
barta0b6b2c2008-07-07 06:49:24 +00003817 VG_(printf)(" LAOG: %'8d map size\n",
sewardj896f6f92008-08-19 08:38:52 +00003818 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
barta0b6b2c2008-07-07 06:49:24 +00003819 VG_(printf)(" LAOG exposition: %'8d map size\n",
sewardj896f6f92008-08-19 08:38:52 +00003820 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
barta0b6b2c2008-07-07 06:49:24 +00003821 VG_(printf)(" locks: %'8lu acquires, "
3822 "%'lu releases\n",
sewardjb4112022007-11-09 22:49:28 +00003823 stats__lockN_acquires,
3824 stats__lockN_releases
3825 );
barta0b6b2c2008-07-07 06:49:24 +00003826 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
sewardjb4112022007-11-09 22:49:28 +00003827
3828 VG_(printf)("\n");
sewardjf98e1c02008-10-25 16:22:41 +00003829 libhb_shutdown(True);
sewardjb4112022007-11-09 22:49:28 +00003830 }
3831}
3832
sewardjf98e1c02008-10-25 16:22:41 +00003833/* FIXME: move these somewhere sane */
3834
3835static
3836void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
3837{
3838 Thread* thr;
3839 ThreadId tid;
3840 UWord nActual;
3841 tl_assert(hbt);
3842 thr = libhb_get_Thr_opaque( hbt );
3843 tl_assert(thr);
3844 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
3845 nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
3846 NULL, NULL, 0 );
3847 tl_assert(nActual <= nRequest);
3848 for (; nActual < nRequest; nActual++)
3849 frames[nActual] = 0;
3850}
3851
3852static
sewardjd52392d2008-11-08 20:36:26 +00003853ExeContext* for_libhb__get_EC ( Thr* hbt )
sewardjf98e1c02008-10-25 16:22:41 +00003854{
3855 Thread* thr;
3856 ThreadId tid;
3857 ExeContext* ec;
3858 tl_assert(hbt);
3859 thr = libhb_get_Thr_opaque( hbt );
3860 tl_assert(thr);
3861 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
3862 ec = VG_(record_ExeContext)( tid, 0 );
sewardjd52392d2008-11-08 20:36:26 +00003863 return ec;
sewardjf98e1c02008-10-25 16:22:41 +00003864}
3865
3866
sewardjb4112022007-11-09 22:49:28 +00003867static void hg_pre_clo_init ( void )
3868{
sewardjf98e1c02008-10-25 16:22:41 +00003869 Thr* hbthr_root;
sewardjb4112022007-11-09 22:49:28 +00003870 VG_(details_name) ("Helgrind");
3871 VG_(details_version) (NULL);
3872 VG_(details_description) ("a thread error detector");
3873 VG_(details_copyright_author)(
sewardj4d474d02008-02-11 11:34:59 +00003874 "Copyright (C) 2007-2008, and GNU GPL'd, by OpenWorks LLP et al.");
sewardjb4112022007-11-09 22:49:28 +00003875 VG_(details_bug_reports_to) (VG_BUGS_TO);
3876 VG_(details_avg_translation_sizeB) ( 200 );
3877
3878 VG_(basic_tool_funcs) (hg_post_clo_init,
3879 hg_instrument,
3880 hg_fini);
3881
3882 VG_(needs_core_errors) ();
sewardjf98e1c02008-10-25 16:22:41 +00003883 VG_(needs_tool_errors) (HG_(eq_Error),
3884 HG_(pp_Error),
sewardjb4112022007-11-09 22:49:28 +00003885 False,/*show TIDs for errors*/
sewardjf98e1c02008-10-25 16:22:41 +00003886 HG_(update_extra),
3887 HG_(recognised_suppression),
3888 HG_(read_extra_suppression_info),
3889 HG_(error_matches_suppression),
3890 HG_(get_error_name),
3891 HG_(print_extra_suppression_info));
sewardjb4112022007-11-09 22:49:28 +00003892
3893 VG_(needs_command_line_options)(hg_process_cmd_line_option,
3894 hg_print_usage,
3895 hg_print_debug_usage);
3896 VG_(needs_client_requests) (hg_handle_client_request);
3897
3898 // FIXME?
3899 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
3900 // hg_expensive_sanity_check);
3901
3902 VG_(needs_malloc_replacement) (hg_cli__malloc,
3903 hg_cli____builtin_new,
3904 hg_cli____builtin_vec_new,
3905 hg_cli__memalign,
3906 hg_cli__calloc,
3907 hg_cli__free,
3908 hg_cli____builtin_delete,
3909 hg_cli____builtin_vec_delete,
3910 hg_cli__realloc,
3911 HG_CLI__MALLOC_REDZONE_SZB );
3912
sewardjf98e1c02008-10-25 16:22:41 +00003913 VG_(needs_var_info)(); /* optional */
sewardjb4112022007-11-09 22:49:28 +00003914
3915 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
sewardj7cf4e6b2008-05-01 20:24:26 +00003916 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
3917 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
sewardjb4112022007-11-09 22:49:28 +00003918 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
3919 VG_(track_new_mem_stack) ( evh__new_mem );
3920
3921 // FIXME: surely this isn't thread-aware
3922 VG_(track_copy_mem_remap) ( shadow_mem_copy_range );
3923
3924 VG_(track_change_mem_mprotect) ( evh__set_perms );
3925
3926 VG_(track_die_mem_stack_signal)( evh__die_mem );
3927 VG_(track_die_mem_brk) ( evh__die_mem );
3928 VG_(track_die_mem_munmap) ( evh__die_mem );
3929 VG_(track_die_mem_stack) ( evh__die_mem );
3930
3931 // FIXME: what is this for?
3932 VG_(track_ban_mem_stack) (NULL);
3933
3934 VG_(track_pre_mem_read) ( evh__pre_mem_read );
3935 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
3936 VG_(track_pre_mem_write) ( evh__pre_mem_write );
3937 VG_(track_post_mem_write) (NULL);
3938
3939 /////////////////
3940
3941 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
3942 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
3943
3944 VG_(track_start_client_code)( evh__start_client_code );
3945 VG_(track_stop_client_code)( evh__stop_client_code );
3946
sewardjf98e1c02008-10-25 16:22:41 +00003947 /////////////////////////////////////////////
3948 hbthr_root = libhb_init( for_libhb__get_stacktrace,
sewardjf98e1c02008-10-25 16:22:41 +00003949 for_libhb__get_EC );
3950 /////////////////////////////////////////////
3951
3952 initialise_data_structures(hbthr_root);
sewardjb4112022007-11-09 22:49:28 +00003953
3954 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
3955 as described in comments at the top of pub_tool_hashtable.h, are
3956 met. Blargh. */
3957 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
3958 tl_assert( sizeof(UWord) == sizeof(Addr) );
3959 hg_mallocmeta_table
3960 = VG_(HT_construct)( "hg_malloc_metadata_table" );
3961
sewardjb4112022007-11-09 22:49:28 +00003962}
3963
3964VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
3965
3966/*--------------------------------------------------------------------*/
3967/*--- end hg_main.c ---*/
3968/*--------------------------------------------------------------------*/