blob: cac940f7b980e8738e569dadf6a53dc6d68805b6 [file] [log] [blame]
sewardjb4112022007-11-09 22:49:28 +00001
2/*--------------------------------------------------------------------*/
3/*--- Helgrind: a Valgrind tool for detecting errors ---*/
4/*--- in threaded programs. hg_main.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
sewardj4d474d02008-02-11 11:34:59 +000011 Copyright (C) 2007-2008 OpenWorks LLP
sewardjb4112022007-11-09 22:49:28 +000012 info@open-works.co.uk
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file COPYING.
30
31 Neither the names of the U.S. Department of Energy nor the
32 University of California nor the names of its contributors may be
33 used to endorse or promote products derived from this software
34 without prior written permission.
35*/
36
37#include "pub_tool_basics.h"
sewardjb4112022007-11-09 22:49:28 +000038#include "pub_tool_libcassert.h"
39#include "pub_tool_libcbase.h"
40#include "pub_tool_libcprint.h"
sewardjb4112022007-11-09 22:49:28 +000041#include "pub_tool_threadstate.h"
42#include "pub_tool_tooliface.h"
43#include "pub_tool_hashtable.h"
44#include "pub_tool_replacemalloc.h"
45#include "pub_tool_machine.h"
46#include "pub_tool_options.h"
47#include "pub_tool_xarray.h"
48#include "pub_tool_stacktrace.h"
sewardjb8b79ad2008-03-03 01:35:41 +000049#include "pub_tool_debuginfo.h" /* VG_(get_data_description) */
sewardj896f6f92008-08-19 08:38:52 +000050#include "pub_tool_wordfm.h"
sewardjb4112022007-11-09 22:49:28 +000051
sewardjf98e1c02008-10-25 16:22:41 +000052#include "hg_basics.h"
53#include "hg_wordset.h"
54#include "hg_lock_n_thread.h"
55#include "hg_errors.h"
56
57#include "libhb.h"
58
sewardjb4112022007-11-09 22:49:28 +000059#include "helgrind.h"
60
sewardjf98e1c02008-10-25 16:22:41 +000061
62// FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
63
64// FIXME: when client destroys a lock or a CV, remove these
65// from our mappings, so that the associated SO can be freed up
sewardjb4112022007-11-09 22:49:28 +000066
67/*----------------------------------------------------------------*/
68/*--- ---*/
69/*----------------------------------------------------------------*/
70
sewardj11e352f2007-11-30 11:11:02 +000071/* Note this needs to be compiled with -fno-strict-aliasing, since it
72 contains a whole bunch of calls to lookupFM etc which cast between
73 Word and pointer types. gcc rightly complains this breaks ANSI C
74 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
75 worthwhile performance benefits over -O.
sewardjc17be792007-11-10 22:50:13 +000076*/
sewardjb4112022007-11-09 22:49:28 +000077
sewardjefd3b4d2007-12-02 02:05:23 +000078// FIXME catch sync signals (SEGV, basically) and unlock BHL,
79// if held. Otherwise a LOCK-prefixed insn which segfaults
80// gets Helgrind into a total muddle as the BHL will not be
81// released after the insn.
82
sewardjb4112022007-11-09 22:49:28 +000083// FIXME what is supposed to happen to locks in memory which
84// is relocated as a result of client realloc?
85
sewardjb4112022007-11-09 22:49:28 +000086// FIXME put referencing ThreadId into Thread and get
87// rid of the slow reverse mapping function.
88
89// FIXME accesses to NoAccess areas: change state to Excl?
90
91// FIXME report errors for accesses of NoAccess memory?
92
93// FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
94// the thread still holds the lock.
95
96/* ------------ Debug/trace options ------------ */
97
98// this is:
99// shadow_mem_make_NoAccess: 29156 SMs, 1728 scanned
100// happens_before_wrk: 1000
101// ev__post_thread_join: 3360 SMs, 29 scanned, 252 re-Excls
102#define SHOW_EXPENSIVE_STUFF 0
103
104// 0 for silent, 1 for some stuff, 2 for lots of stuff
105#define SHOW_EVENTS 0
106
sewardjb4112022007-11-09 22:49:28 +0000107
108static void all__sanity_check ( Char* who ); /* fwds */
109
110#define HG_CLI__MALLOC_REDZONE_SZB 16 /* let's say */
111
112// 0 for none, 1 for dump at end of run
113#define SHOW_DATA_STRUCTURES 0
114
115
sewardjb4112022007-11-09 22:49:28 +0000116/* ------------ Misc comments ------------ */
117
118// FIXME: don't hardwire initial entries for root thread.
119// Instead, let the pre_thread_ll_create handler do this.
120
sewardjb4112022007-11-09 22:49:28 +0000121
122/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000123/*--- Primary data structures ---*/
sewardjb4112022007-11-09 22:49:28 +0000124/*----------------------------------------------------------------*/
125
sewardjb4112022007-11-09 22:49:28 +0000126/* Admin linked list of Threads */
127static Thread* admin_threads = NULL;
128
129/* Admin linked list of Locks */
130static Lock* admin_locks = NULL;
131
sewardjb4112022007-11-09 22:49:28 +0000132/* Mapping table for core ThreadIds to Thread* */
133static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
134
sewardjb4112022007-11-09 22:49:28 +0000135/* Mapping table for lock guest addresses to Lock* */
136static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
137
138/* The word-set universes for thread sets and lock sets. */
139static WordSetU* univ_tsets = NULL; /* sets of Thread* */
140static WordSetU* univ_lsets = NULL; /* sets of Lock* */
141static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
142
143/* never changed; we only care about its address. Is treated as if it
144 was a standard userspace lock. Also we have a Lock* describing it
145 so it can participate in lock sets in the usual way. */
146static Int __bus_lock = 0;
147static Lock* __bus_lock_Lock = NULL;
148
149
150/*----------------------------------------------------------------*/
151/*--- Simple helpers for the data structures ---*/
152/*----------------------------------------------------------------*/
153
154static UWord stats__lockN_acquires = 0;
155static UWord stats__lockN_releases = 0;
156
sewardjf98e1c02008-10-25 16:22:41 +0000157static
158ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
sewardjb4112022007-11-09 22:49:28 +0000159
160/* --------- Constructors --------- */
161
sewardjf98e1c02008-10-25 16:22:41 +0000162static Thread* mk_Thread ( Thr* hbthr ) {
sewardjb4112022007-11-09 22:49:28 +0000163 static Int indx = 1;
sewardjf98e1c02008-10-25 16:22:41 +0000164 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
sewardjb4112022007-11-09 22:49:28 +0000165 thread->locksetA = HG_(emptyWS)( univ_lsets );
166 thread->locksetW = HG_(emptyWS)( univ_lsets );
sewardjb4112022007-11-09 22:49:28 +0000167 thread->magic = Thread_MAGIC;
sewardjf98e1c02008-10-25 16:22:41 +0000168 thread->hbthr = hbthr;
169 thread->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +0000170 thread->created_at = NULL;
171 thread->announced = False;
172 thread->errmsg_index = indx++;
173 thread->admin = admin_threads;
174 admin_threads = thread;
175 return thread;
176}
sewardjf98e1c02008-10-25 16:22:41 +0000177
sewardjb4112022007-11-09 22:49:28 +0000178// Make a new lock which is unlocked (hence ownerless)
179static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
180 static ULong unique = 0;
sewardjf98e1c02008-10-25 16:22:41 +0000181 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
sewardjb4112022007-11-09 22:49:28 +0000182 lock->admin = admin_locks;
183 lock->unique = unique++;
184 lock->magic = LockN_MAGIC;
185 lock->appeared_at = NULL;
186 lock->acquired_at = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000187 lock->hbso = libhb_so_alloc();
sewardjb4112022007-11-09 22:49:28 +0000188 lock->guestaddr = guestaddr;
189 lock->kind = kind;
190 lock->heldW = False;
191 lock->heldBy = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000192 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +0000193 admin_locks = lock;
194 return lock;
195}
sewardjb4112022007-11-09 22:49:28 +0000196
197/* Release storage for a Lock. Also release storage in .heldBy, if
198 any. */
199static void del_LockN ( Lock* lk )
200{
sewardjf98e1c02008-10-25 16:22:41 +0000201 tl_assert(HG_(is_sane_LockN)(lk));
202 tl_assert(lk->hbso);
203 libhb_so_dealloc(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +0000204 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000205 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000206 VG_(memset)(lk, 0xAA, sizeof(*lk));
sewardjf98e1c02008-10-25 16:22:41 +0000207 HG_(free)(lk);
sewardjb4112022007-11-09 22:49:28 +0000208}
209
210/* Update 'lk' to reflect that 'thr' now has a write-acquisition of
211 it. This is done strictly: only combinations resulting from
212 correct program and libpthread behaviour are allowed. */
213static void lockN_acquire_writer ( Lock* lk, Thread* thr )
214{
sewardjf98e1c02008-10-25 16:22:41 +0000215 tl_assert(HG_(is_sane_LockN)(lk));
216 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000217
218 stats__lockN_acquires++;
219
220 /* EXPOSITION only */
221 /* We need to keep recording snapshots of where the lock was
222 acquired, so as to produce better lock-order error messages. */
223 if (lk->acquired_at == NULL) {
224 ThreadId tid;
225 tl_assert(lk->heldBy == NULL);
226 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
227 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000228 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000229 } else {
230 tl_assert(lk->heldBy != NULL);
231 }
232 /* end EXPOSITION only */
233
234 switch (lk->kind) {
235 case LK_nonRec:
236 case_LK_nonRec:
237 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
238 tl_assert(!lk->heldW);
239 lk->heldW = True;
sewardjf98e1c02008-10-25 16:22:41 +0000240 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000241 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000242 break;
243 case LK_mbRec:
244 if (lk->heldBy == NULL)
245 goto case_LK_nonRec;
246 /* 2nd and subsequent locking of a lock by its owner */
247 tl_assert(lk->heldW);
248 /* assert: lk is only held by one thread .. */
sewardj896f6f92008-08-19 08:38:52 +0000249 tl_assert(VG_(sizeUniqueBag(lk->heldBy)) == 1);
sewardjb4112022007-11-09 22:49:28 +0000250 /* assert: .. and that thread is 'thr'. */
sewardj896f6f92008-08-19 08:38:52 +0000251 tl_assert(VG_(elemBag)(lk->heldBy, (Word)thr)
252 == VG_(sizeTotalBag)(lk->heldBy));
253 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000254 break;
255 case LK_rdwr:
256 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
257 goto case_LK_nonRec;
258 default:
259 tl_assert(0);
260 }
sewardjf98e1c02008-10-25 16:22:41 +0000261 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000262}
263
264static void lockN_acquire_reader ( Lock* lk, Thread* thr )
265{
sewardjf98e1c02008-10-25 16:22:41 +0000266 tl_assert(HG_(is_sane_LockN)(lk));
267 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000268 /* can only add reader to a reader-writer lock. */
269 tl_assert(lk->kind == LK_rdwr);
270 /* lk must be free or already r-held. */
271 tl_assert(lk->heldBy == NULL
272 || (lk->heldBy != NULL && !lk->heldW));
273
274 stats__lockN_acquires++;
275
276 /* EXPOSITION only */
277 /* We need to keep recording snapshots of where the lock was
278 acquired, so as to produce better lock-order error messages. */
279 if (lk->acquired_at == NULL) {
280 ThreadId tid;
281 tl_assert(lk->heldBy == NULL);
282 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
283 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000284 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000285 } else {
286 tl_assert(lk->heldBy != NULL);
287 }
288 /* end EXPOSITION only */
289
290 if (lk->heldBy) {
sewardj896f6f92008-08-19 08:38:52 +0000291 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000292 } else {
293 lk->heldW = False;
sewardjf98e1c02008-10-25 16:22:41 +0000294 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000295 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000296 }
297 tl_assert(!lk->heldW);
sewardjf98e1c02008-10-25 16:22:41 +0000298 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000299}
300
301/* Update 'lk' to reflect a release of it by 'thr'. This is done
302 strictly: only combinations resulting from correct program and
303 libpthread behaviour are allowed. */
304
305static void lockN_release ( Lock* lk, Thread* thr )
306{
307 Bool b;
sewardjf98e1c02008-10-25 16:22:41 +0000308 tl_assert(HG_(is_sane_LockN)(lk));
309 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000310 /* lock must be held by someone */
311 tl_assert(lk->heldBy);
312 stats__lockN_releases++;
313 /* Remove it from the holder set */
sewardj896f6f92008-08-19 08:38:52 +0000314 b = VG_(delFromBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000315 /* thr must actually have been a holder of lk */
316 tl_assert(b);
317 /* normalise */
318 tl_assert(lk->acquired_at);
sewardj896f6f92008-08-19 08:38:52 +0000319 if (VG_(isEmptyBag)(lk->heldBy)) {
320 VG_(deleteBag)(lk->heldBy);
sewardjb4112022007-11-09 22:49:28 +0000321 lk->heldBy = NULL;
322 lk->heldW = False;
323 lk->acquired_at = NULL;
324 }
sewardjf98e1c02008-10-25 16:22:41 +0000325 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000326}
327
328static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
329{
330 Thread* thr;
331 if (!lk->heldBy) {
332 tl_assert(!lk->heldW);
333 return;
334 }
335 /* for each thread that holds this lock do ... */
sewardj896f6f92008-08-19 08:38:52 +0000336 VG_(initIterBag)( lk->heldBy );
337 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, NULL )) {
sewardjf98e1c02008-10-25 16:22:41 +0000338 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000339 tl_assert(HG_(elemWS)( univ_lsets,
340 thr->locksetA, (Word)lk ));
341 thr->locksetA
342 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lk );
343
344 if (lk->heldW) {
345 tl_assert(HG_(elemWS)( univ_lsets,
346 thr->locksetW, (Word)lk ));
347 thr->locksetW
348 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lk );
349 }
350 }
sewardj896f6f92008-08-19 08:38:52 +0000351 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000352}
353
sewardjb4112022007-11-09 22:49:28 +0000354
355/*----------------------------------------------------------------*/
356/*--- Print out the primary data structures ---*/
357/*----------------------------------------------------------------*/
358
sewardjd52392d2008-11-08 20:36:26 +0000359//static WordSetID del_BHL ( WordSetID lockset ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000360
361#define PP_THREADS (1<<1)
362#define PP_LOCKS (1<<2)
sewardjf98e1c02008-10-25 16:22:41 +0000363#define PP_ALL (PP_THREADS | PP_LOCKS)
sewardjb4112022007-11-09 22:49:28 +0000364
365
366static const Int sHOW_ADMIN = 0;
367
368static void space ( Int n )
369{
370 Int i;
371 Char spaces[128+1];
372 tl_assert(n >= 0 && n < 128);
373 if (n == 0)
374 return;
375 for (i = 0; i < n; i++)
376 spaces[i] = ' ';
377 spaces[i] = 0;
378 tl_assert(i < 128+1);
379 VG_(printf)("%s", spaces);
380}
381
382static void pp_Thread ( Int d, Thread* t )
383{
384 space(d+0); VG_(printf)("Thread %p {\n", t);
385 if (sHOW_ADMIN) {
386 space(d+3); VG_(printf)("admin %p\n", t->admin);
387 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
388 }
389 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
390 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
sewardjb4112022007-11-09 22:49:28 +0000391 space(d+0); VG_(printf)("}\n");
392}
393
394static void pp_admin_threads ( Int d )
395{
396 Int i, n;
397 Thread* t;
398 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
399 /* nothing */
400 }
401 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
402 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
403 if (0) {
404 space(n);
405 VG_(printf)("admin_threads record %d of %d:\n", i, n);
406 }
407 pp_Thread(d+3, t);
408 }
barta0b6b2c2008-07-07 06:49:24 +0000409 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000410}
411
412static void pp_map_threads ( Int d )
413{
414 Int i, n;
415 n = 0;
416 space(d); VG_(printf)("map_threads ");
417 n = 0;
418 for (i = 0; i < VG_N_THREADS; i++) {
419 if (map_threads[i] != NULL)
420 n++;
421 }
422 VG_(printf)("(%d entries) {\n", n);
423 for (i = 0; i < VG_N_THREADS; i++) {
424 if (map_threads[i] == NULL)
425 continue;
426 space(d+3);
427 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
428 }
429 space(d); VG_(printf)("}\n");
430}
431
432static const HChar* show_LockKind ( LockKind lkk ) {
433 switch (lkk) {
434 case LK_mbRec: return "mbRec";
435 case LK_nonRec: return "nonRec";
436 case LK_rdwr: return "rdwr";
437 default: tl_assert(0);
438 }
439}
440
441static void pp_Lock ( Int d, Lock* lk )
442{
barta0b6b2c2008-07-07 06:49:24 +0000443 space(d+0); VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
sewardjb4112022007-11-09 22:49:28 +0000444 if (sHOW_ADMIN) {
445 space(d+3); VG_(printf)("admin %p\n", lk->admin);
446 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
447 }
448 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
449 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
450 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
451 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
452 if (lk->heldBy) {
453 Thread* thr;
454 Word count;
455 VG_(printf)(" { ");
sewardj896f6f92008-08-19 08:38:52 +0000456 VG_(initIterBag)( lk->heldBy );
457 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, &count ))
sewardjb4112022007-11-09 22:49:28 +0000458 VG_(printf)("%lu:%p ", count, thr);
sewardj896f6f92008-08-19 08:38:52 +0000459 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000460 VG_(printf)("}");
461 }
462 VG_(printf)("\n");
463 space(d+0); VG_(printf)("}\n");
464}
465
466static void pp_admin_locks ( Int d )
467{
468 Int i, n;
469 Lock* lk;
470 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin) {
471 /* nothing */
472 }
473 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
474 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin) {
475 if (0) {
476 space(n);
477 VG_(printf)("admin_locks record %d of %d:\n", i, n);
478 }
479 pp_Lock(d+3, lk);
480 }
barta0b6b2c2008-07-07 06:49:24 +0000481 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000482}
483
484static void pp_map_locks ( Int d )
485{
486 void* gla;
487 Lock* lk;
488 space(d); VG_(printf)("map_locks (%d entries) {\n",
sewardj896f6f92008-08-19 08:38:52 +0000489 (Int)VG_(sizeFM)( map_locks ));
490 VG_(initIterFM)( map_locks );
491 while (VG_(nextIterFM)( map_locks, (Word*)&gla,
sewardjb5f29642007-11-16 12:02:43 +0000492 (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000493 space(d+3);
494 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
495 }
sewardj896f6f92008-08-19 08:38:52 +0000496 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000497 space(d); VG_(printf)("}\n");
498}
499
sewardjb4112022007-11-09 22:49:28 +0000500static void pp_everything ( Int flags, Char* caller )
501{
502 Int d = 0;
503 VG_(printf)("\n");
504 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
505 if (flags & PP_THREADS) {
506 VG_(printf)("\n");
507 pp_admin_threads(d+3);
508 VG_(printf)("\n");
509 pp_map_threads(d+3);
510 }
511 if (flags & PP_LOCKS) {
512 VG_(printf)("\n");
513 pp_admin_locks(d+3);
514 VG_(printf)("\n");
515 pp_map_locks(d+3);
516 }
sewardjb4112022007-11-09 22:49:28 +0000517
518 VG_(printf)("\n");
519 VG_(printf)("}\n");
520 VG_(printf)("\n");
521}
522
523#undef SHOW_ADMIN
524
525
526/*----------------------------------------------------------------*/
527/*--- Initialise the primary data structures ---*/
528/*----------------------------------------------------------------*/
529
sewardjf98e1c02008-10-25 16:22:41 +0000530static void initialise_data_structures ( Thr* hbthr_root )
sewardjb4112022007-11-09 22:49:28 +0000531{
sewardjb4112022007-11-09 22:49:28 +0000532 Thread* thr;
533
534 /* Get everything initialised and zeroed. */
535 tl_assert(admin_threads == NULL);
536 tl_assert(admin_locks == NULL);
sewardjb4112022007-11-09 22:49:28 +0000537
538 tl_assert(sizeof(Addr) == sizeof(Word));
sewardjb4112022007-11-09 22:49:28 +0000539
540 tl_assert(map_threads == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000541 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
sewardjb4112022007-11-09 22:49:28 +0000542 tl_assert(map_threads != NULL);
543
sewardjb4112022007-11-09 22:49:28 +0000544 tl_assert(sizeof(Addr) == sizeof(Word));
545 tl_assert(map_locks == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000546 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
547 NULL/*unboxed Word cmp*/);
sewardjb4112022007-11-09 22:49:28 +0000548 tl_assert(map_locks != NULL);
549
550 __bus_lock_Lock = mk_LockN( LK_nonRec, (Addr)&__bus_lock );
sewardjf98e1c02008-10-25 16:22:41 +0000551 tl_assert(HG_(is_sane_LockN)(__bus_lock_Lock));
sewardj896f6f92008-08-19 08:38:52 +0000552 VG_(addToFM)( map_locks, (Word)&__bus_lock, (Word)__bus_lock_Lock );
sewardjb4112022007-11-09 22:49:28 +0000553
554 tl_assert(univ_tsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000555 univ_tsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.3", HG_(free),
556 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000557 tl_assert(univ_tsets != NULL);
558
559 tl_assert(univ_lsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000560 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
561 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000562 tl_assert(univ_lsets != NULL);
563
564 tl_assert(univ_laog == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000565 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
566 HG_(free), 24/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000567 tl_assert(univ_laog != NULL);
568
569 /* Set up entries for the root thread */
570 // FIXME: this assumes that the first real ThreadId is 1
571
sewardjb4112022007-11-09 22:49:28 +0000572 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +0000573 thr = mk_Thread(hbthr_root);
574 thr->coretid = 1; /* FIXME: hardwires an assumption about the
575 identity of the root thread. */
576 tl_assert( libhb_get_Thr_opaque(hbthr_root) == NULL );
577 libhb_set_Thr_opaque(hbthr_root, thr);
sewardjb4112022007-11-09 22:49:28 +0000578
sewardjf98e1c02008-10-25 16:22:41 +0000579 /* and bind it in the thread-map table. */
580 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
581 tl_assert(thr->coretid != VG_INVALID_THREADID);
sewardjb4112022007-11-09 22:49:28 +0000582
sewardjf98e1c02008-10-25 16:22:41 +0000583 map_threads[thr->coretid] = thr;
sewardjb4112022007-11-09 22:49:28 +0000584
585 tl_assert(VG_INVALID_THREADID == 0);
586
587 /* Mark the new bus lock correctly (to stop the sanity checks
588 complaining) */
589 tl_assert( sizeof(__bus_lock) == 4 );
sewardjb4112022007-11-09 22:49:28 +0000590
591 all__sanity_check("initialise_data_structures");
592}
593
594
595/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000596/*--- map_threads :: array[core-ThreadId] of Thread* ---*/
sewardjb4112022007-11-09 22:49:28 +0000597/*----------------------------------------------------------------*/
598
599/* Doesn't assert if the relevant map_threads entry is NULL. */
600static Thread* map_threads_maybe_lookup ( ThreadId coretid )
601{
602 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000603 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000604 thr = map_threads[coretid];
605 return thr;
606}
607
608/* Asserts if the relevant map_threads entry is NULL. */
609static inline Thread* map_threads_lookup ( ThreadId coretid )
610{
611 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000612 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000613 thr = map_threads[coretid];
614 tl_assert(thr);
615 return thr;
616}
617
sewardjf98e1c02008-10-25 16:22:41 +0000618/* Do a reverse lookup. Does not assert if 'thr' is not found in
619 map_threads. */
sewardjb4112022007-11-09 22:49:28 +0000620static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
621{
sewardjf98e1c02008-10-25 16:22:41 +0000622 ThreadId tid;
623 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000624 /* Check nobody used the invalid-threadid slot */
625 tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
626 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000627 tid = thr->coretid;
628 tl_assert(HG_(is_sane_ThreadId)(tid));
629 return tid;
sewardjb4112022007-11-09 22:49:28 +0000630}
631
632/* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
633 is not found in map_threads. */
634static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
635{
636 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
637 tl_assert(tid != VG_INVALID_THREADID);
sewardjf98e1c02008-10-25 16:22:41 +0000638 tl_assert(map_threads[tid]);
639 tl_assert(map_threads[tid]->coretid == tid);
sewardjb4112022007-11-09 22:49:28 +0000640 return tid;
641}
642
643static void map_threads_delete ( ThreadId coretid )
644{
645 Thread* thr;
646 tl_assert(coretid != 0);
sewardjf98e1c02008-10-25 16:22:41 +0000647 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000648 thr = map_threads[coretid];
649 tl_assert(thr);
650 map_threads[coretid] = NULL;
651}
652
653
654/*----------------------------------------------------------------*/
655/*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
656/*----------------------------------------------------------------*/
657
658/* Make sure there is a lock table entry for the given (lock) guest
659 address. If not, create one of the stated 'kind' in unheld state.
660 In any case, return the address of the existing or new Lock. */
661static
662Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
663{
664 Bool found;
665 Lock* oldlock = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000666 tl_assert(HG_(is_sane_ThreadId)(tid));
sewardj896f6f92008-08-19 08:38:52 +0000667 found = VG_(lookupFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000668 NULL, (Word*)&oldlock, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000669 if (!found) {
670 Lock* lock = mk_LockN(lkk, ga);
671 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
sewardjf98e1c02008-10-25 16:22:41 +0000672 tl_assert(HG_(is_sane_LockN)(lock));
sewardj896f6f92008-08-19 08:38:52 +0000673 VG_(addToFM)( map_locks, (Word)ga, (Word)lock );
sewardjb4112022007-11-09 22:49:28 +0000674 tl_assert(oldlock == NULL);
sewardjb4112022007-11-09 22:49:28 +0000675 return lock;
676 } else {
677 tl_assert(oldlock != NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000678 tl_assert(HG_(is_sane_LockN)(oldlock));
sewardjb4112022007-11-09 22:49:28 +0000679 tl_assert(oldlock->guestaddr == ga);
sewardjb4112022007-11-09 22:49:28 +0000680 return oldlock;
681 }
682}
683
684static Lock* map_locks_maybe_lookup ( Addr ga )
685{
686 Bool found;
687 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000688 found = VG_(lookupFM)( map_locks, NULL, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000689 tl_assert(found ? lk != NULL : lk == NULL);
sewardjb4112022007-11-09 22:49:28 +0000690 return lk;
691}
692
693static void map_locks_delete ( Addr ga )
694{
695 Addr ga2 = 0;
696 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000697 VG_(delFromFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000698 (Word*)&ga2, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000699 /* delFromFM produces the val which is being deleted, if it is
700 found. So assert it is non-null; that in effect asserts that we
701 are deleting a (ga, Lock) pair which actually exists. */
702 tl_assert(lk != NULL);
703 tl_assert(ga2 == ga);
704}
705
706
sewardjb4112022007-11-09 22:49:28 +0000707
708/*----------------------------------------------------------------*/
709/*--- Sanity checking the data structures ---*/
710/*----------------------------------------------------------------*/
711
712static UWord stats__sanity_checks = 0;
713
sewardjb4112022007-11-09 22:49:28 +0000714static void laog__sanity_check ( Char* who ); /* fwds */
715
716/* REQUIRED INVARIANTS:
717
718 Thread vs Segment/Lock/SecMaps
719
720 for each t in Threads {
721
722 // Thread.lockset: each element is really a valid Lock
723
724 // Thread.lockset: each Lock in set is actually held by that thread
725 for lk in Thread.lockset
726 lk == LockedBy(t)
727
728 // Thread.csegid is a valid SegmentID
729 // and the associated Segment has .thr == t
730
731 }
732
733 all thread Locksets are pairwise empty under intersection
734 (that is, no lock is claimed to be held by more than one thread)
735 -- this is guaranteed if all locks in locksets point back to their
736 owner threads
737
738 Lock vs Thread/Segment/SecMaps
739
740 for each entry (gla, la) in map_locks
741 gla == la->guest_addr
742
743 for each lk in Locks {
744
745 lk->tag is valid
746 lk->guest_addr does not have shadow state NoAccess
747 if lk == LockedBy(t), then t->lockset contains lk
748 if lk == UnlockedBy(segid) then segid is valid SegmentID
749 and can be mapped to a valid Segment(seg)
750 and seg->thr->lockset does not contain lk
751 if lk == UnlockedNew then (no lockset contains lk)
752
753 secmaps for lk has .mbHasLocks == True
754
755 }
756
757 Segment vs Thread/Lock/SecMaps
758
759 the Segment graph is a dag (no cycles)
760 all of the Segment graph must be reachable from the segids
761 mentioned in the Threads
762
763 for seg in Segments {
764
765 seg->thr is a sane Thread
766
767 }
768
769 SecMaps vs Segment/Thread/Lock
770
771 for sm in SecMaps {
772
773 sm properly aligned
774 if any shadow word is ShR or ShM then .mbHasShared == True
775
776 for each Excl(segid) state
777 map_segments_lookup maps to a sane Segment(seg)
778 for each ShM/ShR(tsetid,lsetid) state
779 each lk in lset is a valid Lock
780 each thr in tset is a valid thread, which is non-dead
781
782 }
783*/
784
785
786/* Return True iff 'thr' holds 'lk' in some mode. */
787static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
788{
789 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000790 return VG_(elemBag)( lk->heldBy, (Word)thr ) > 0;
sewardjb4112022007-11-09 22:49:28 +0000791 else
792 return False;
793}
794
795/* Sanity check Threads, as far as possible */
796__attribute__((noinline))
797static void threads__sanity_check ( Char* who )
798{
799#define BAD(_str) do { how = (_str); goto bad; } while (0)
800 Char* how = "no error";
801 Thread* thr;
802 WordSetID wsA, wsW;
sewardj250ec2e2008-02-15 22:02:30 +0000803 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +0000804 Word ls_size, i;
805 Lock* lk;
sewardjb4112022007-11-09 22:49:28 +0000806 for (thr = admin_threads; thr; thr = thr->admin) {
sewardjf98e1c02008-10-25 16:22:41 +0000807 if (!HG_(is_sane_Thread)(thr)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000808 wsA = thr->locksetA;
809 wsW = thr->locksetW;
810 // locks held in W mode are a subset of all locks held
811 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
812 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
813 for (i = 0; i < ls_size; i++) {
814 lk = (Lock*)ls_words[i];
815 // Thread.lockset: each element is really a valid Lock
sewardjf98e1c02008-10-25 16:22:41 +0000816 if (!HG_(is_sane_LockN)(lk)) BAD("2");
sewardjb4112022007-11-09 22:49:28 +0000817 // Thread.lockset: each Lock in set is actually held by that
818 // thread
819 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000820 }
821 }
822 return;
823 bad:
824 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
825 tl_assert(0);
826#undef BAD
827}
828
829
830/* Sanity check Locks, as far as possible */
831__attribute__((noinline))
832static void locks__sanity_check ( Char* who )
833{
834#define BAD(_str) do { how = (_str); goto bad; } while (0)
835 Char* how = "no error";
836 Addr gla;
837 Lock* lk;
838 Int i;
839 // # entries in admin_locks == # entries in map_locks
840 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin)
841 ;
sewardj896f6f92008-08-19 08:38:52 +0000842 if (i != VG_(sizeFM)(map_locks)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000843 // for each entry (gla, lk) in map_locks
844 // gla == lk->guest_addr
sewardj896f6f92008-08-19 08:38:52 +0000845 VG_(initIterFM)( map_locks );
846 while (VG_(nextIterFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000847 (Word*)&gla, (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000848 if (lk->guestaddr != gla) BAD("2");
849 }
sewardj896f6f92008-08-19 08:38:52 +0000850 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000851 // scan through admin_locks ...
852 for (lk = admin_locks; lk; lk = lk->admin) {
853 // lock is sane. Quite comprehensive, also checks that
854 // referenced (holder) threads are sane.
sewardjf98e1c02008-10-25 16:22:41 +0000855 if (!HG_(is_sane_LockN)(lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000856 // map_locks binds guest address back to this lock
857 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
sewardjb4112022007-11-09 22:49:28 +0000858 // look at all threads mentioned as holders of this lock. Ensure
859 // this lock is mentioned in their locksets.
860 if (lk->heldBy) {
861 Thread* thr;
862 Word count;
sewardj896f6f92008-08-19 08:38:52 +0000863 VG_(initIterBag)( lk->heldBy );
864 while (VG_(nextIterBag)( lk->heldBy,
sewardjb5f29642007-11-16 12:02:43 +0000865 (Word*)&thr, &count )) {
sewardjf98e1c02008-10-25 16:22:41 +0000866 // HG_(is_sane_LockN) above ensures these
sewardjb4112022007-11-09 22:49:28 +0000867 tl_assert(count >= 1);
sewardjf98e1c02008-10-25 16:22:41 +0000868 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000869 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (Word)lk))
870 BAD("6");
871 // also check the w-only lockset
872 if (lk->heldW
873 && !HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
874 BAD("7");
875 if ((!lk->heldW)
876 && HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
877 BAD("8");
878 }
sewardj896f6f92008-08-19 08:38:52 +0000879 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000880 } else {
881 /* lock not held by anybody */
882 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
883 // since lk is unheld, then (no lockset contains lk)
884 // hmm, this is really too expensive to check. Hmm.
885 }
sewardjb4112022007-11-09 22:49:28 +0000886 }
887
888 return;
889 bad:
890 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
891 tl_assert(0);
892#undef BAD
893}
894
895
sewardjb4112022007-11-09 22:49:28 +0000896static void all_except_Locks__sanity_check ( Char* who ) {
897 stats__sanity_checks++;
898 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
899 threads__sanity_check(who);
sewardjb4112022007-11-09 22:49:28 +0000900 laog__sanity_check(who);
901}
902static void all__sanity_check ( Char* who ) {
903 all_except_Locks__sanity_check(who);
904 locks__sanity_check(who);
905}
906
907
908/*----------------------------------------------------------------*/
909/*--- the core memory state machine (msm__* functions) ---*/
910/*----------------------------------------------------------------*/
911
sewardjd52392d2008-11-08 20:36:26 +0000912//static WordSetID add_BHL ( WordSetID lockset ) {
913// return HG_(addToWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
914//}
915//static WordSetID del_BHL ( WordSetID lockset ) {
916// return HG_(delFromWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
917//}
sewardjb4112022007-11-09 22:49:28 +0000918
919
sewardjd52392d2008-11-08 20:36:26 +0000920///* Last-lock-lossage records. This mechanism exists to help explain
921// to programmers why we are complaining about a race. The idea is to
922// monitor all lockset transitions. When a previously nonempty
923// lockset becomes empty, the lock(s) that just disappeared (the
924// "lossage") are the locks that have consistently protected the
925// location (ga_of_access) in question for the longest time. Most of
926// the time the lossage-set is a single lock. Because the
927// lossage-lock is the one that has survived longest, there is there
928// is a good chance that it is indeed the lock that the programmer
929// intended to use to protect the location.
930//
931// Note that we cannot in general just look at the lossage set when we
932// see a transition to ShM(...,empty-set), because a transition to an
933// empty lockset can happen arbitrarily far before the point where we
934// want to report an error. This is in the case where there are many
935// transitions ShR -> ShR, all with an empty lockset, and only later
936// is there a transition to ShM. So what we want to do is note the
937// lossage lock at the point where a ShR -> ShR transition empties out
938// the lockset, so we can present it later if there should be a
939// transition to ShM.
940//
941// So this function finds such transitions. For each, it associates
942// in ga_to_lastlock, the guest address and the lossage lock. In fact
943// we do not record the Lock* directly as that may disappear later,
944// but instead the ExeContext inside the Lock which says where it was
945// initialised or first locked. ExeContexts are permanent so keeping
946// them indefinitely is safe.
947//
948// A boring detail: the hardware bus lock is not interesting in this
949// respect, so we first remove that from the pre/post locksets.
950//*/
951//
952//static UWord stats__ga_LL_adds = 0;
953//
954//static WordFM* ga_to_lastlock = NULL; /* GuestAddr -> ExeContext* */
955//
956//static
957//void record_last_lock_lossage ( Addr ga_of_access,
958// WordSetID lset_old, WordSetID lset_new )
959//{
960// Lock* lk;
961// Int card_old, card_new;
962//
963// tl_assert(lset_old != lset_new);
964//
965// if (0) VG_(printf)("XX1: %d (card %ld) -> %d (card %ld) %#lx\n",
966// (Int)lset_old,
967// HG_(cardinalityWS)(univ_lsets,lset_old),
968// (Int)lset_new,
969// HG_(cardinalityWS)(univ_lsets,lset_new),
970// ga_of_access );
971//
972// /* This is slow, but at least it's simple. The bus hardware lock
973// just confuses the logic, so remove it from the locksets we're
974// considering before doing anything else. */
975// lset_new = del_BHL( lset_new );
976//
977// if (!HG_(isEmptyWS)( univ_lsets, lset_new )) {
978// /* The post-transition lock set is not empty. So we are not
979// interested. We're only interested in spotting transitions
980// that make locksets become empty. */
981// return;
982// }
983//
984// /* lset_new is now empty */
985// card_new = HG_(cardinalityWS)( univ_lsets, lset_new );
986// tl_assert(card_new == 0);
987//
988// lset_old = del_BHL( lset_old );
989// card_old = HG_(cardinalityWS)( univ_lsets, lset_old );
990//
991// if (0) VG_(printf)(" X2: %d (card %d) -> %d (card %d)\n",
992// (Int)lset_old, card_old, (Int)lset_new, card_new );
993//
994// if (card_old == 0) {
995// /* The old lockset was also empty. Not interesting. */
996// return;
997// }
998//
999// tl_assert(card_old > 0);
1000// tl_assert(!HG_(isEmptyWS)( univ_lsets, lset_old ));
1001//
1002// /* Now we know we've got a transition from a nonempty lockset to an
1003// empty one. So lset_old must be the set of locks lost. Record
1004// some details. If there is more than one element in the lossage
1005// set, just choose one arbitrarily -- not the best, but at least
1006// it's simple. */
1007//
1008// lk = (Lock*)HG_(anyElementOfWS)( univ_lsets, lset_old );
1009// if (0) VG_(printf)("lossage %ld %p\n",
1010// HG_(cardinalityWS)( univ_lsets, lset_old), lk );
1011// if (lk->appeared_at) {
1012// if (ga_to_lastlock == NULL)
1013// ga_to_lastlock = VG_(newFM)( HG_(zalloc), "hg.rlll.1", HG_(free), NULL );
1014// VG_(addToFM)( ga_to_lastlock, ga_of_access, (Word)lk->appeared_at );
1015// stats__ga_LL_adds++;
1016// }
1017//}
1018//
1019///* This queries the table (ga_to_lastlock) made by
1020// record_last_lock_lossage, when constructing error messages. It
1021// attempts to find the ExeContext of the allocation or initialisation
1022// point for the lossage lock associated with 'ga'. */
1023//
1024//static ExeContext* maybe_get_lastlock_initpoint ( Addr ga )
1025//{
1026// ExeContext* ec_hint = NULL;
1027// if (ga_to_lastlock != NULL
1028// && VG_(lookupFM)(ga_to_lastlock,
1029// NULL, (Word*)&ec_hint, ga)) {
1030// tl_assert(ec_hint != NULL);
1031// return ec_hint;
1032// } else {
1033// return NULL;
1034// }
1035//}
sewardjb4112022007-11-09 22:49:28 +00001036
1037
sewardjb4112022007-11-09 22:49:28 +00001038/*----------------------------------------------------------------*/
1039/*--- Shadow value and address range handlers ---*/
1040/*----------------------------------------------------------------*/
1041
1042static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +00001043//static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +00001044static inline Thread* get_current_Thread ( void ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +00001045__attribute__((noinline))
1046static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +00001047
sewardjb4112022007-11-09 22:49:28 +00001048
1049/* Block-copy states (needed for implementing realloc()). */
1050static void shadow_mem_copy_range ( Addr src, Addr dst, SizeT len )
1051{
sewardjf98e1c02008-10-25 16:22:41 +00001052 libhb_copy_shadow_state( src, dst, len );
sewardjb4112022007-11-09 22:49:28 +00001053}
1054
sewardjf98e1c02008-10-25 16:22:41 +00001055static void shadow_mem_read_range ( Thread* thr, Addr a, SizeT len )
1056{
1057 Thr* hbthr = thr->hbthr;
1058 tl_assert(hbthr);
1059 LIBHB_READ_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +00001060}
1061
1062static void shadow_mem_write_range ( Thread* thr, Addr a, SizeT len ) {
sewardjf98e1c02008-10-25 16:22:41 +00001063 Thr* hbthr = thr->hbthr;
1064 tl_assert(hbthr);
1065 LIBHB_WRITE_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +00001066}
1067
1068static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
1069{
sewardjf98e1c02008-10-25 16:22:41 +00001070 libhb_range_new( thr->hbthr, a, len );
sewardjb4112022007-11-09 22:49:28 +00001071}
1072
sewardjb4112022007-11-09 22:49:28 +00001073static void shadow_mem_make_NoAccess ( Thread* thr, Addr aIN, SizeT len )
1074{
sewardjb4112022007-11-09 22:49:28 +00001075 if (0 && len > 500)
barta0b6b2c2008-07-07 06:49:24 +00001076 VG_(printf)("make NoAccess ( %#lx, %ld )\n", aIN, len );
sewardjf98e1c02008-10-25 16:22:41 +00001077 libhb_range_noaccess( thr->hbthr, aIN, len );
sewardjb4112022007-11-09 22:49:28 +00001078}
1079
1080
1081/*----------------------------------------------------------------*/
1082/*--- Event handlers (evh__* functions) ---*/
1083/*--- plus helpers (evhH__* functions) ---*/
1084/*----------------------------------------------------------------*/
1085
1086/*--------- Event handler helpers (evhH__* functions) ---------*/
1087
1088/* Create a new segment for 'thr', making it depend (.prev) on its
1089 existing segment, bind together the SegmentID and Segment, and
1090 return both of them. Also update 'thr' so it references the new
1091 Segment. */
sewardjf98e1c02008-10-25 16:22:41 +00001092//zz static
1093//zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
1094//zz /*OUT*/Segment** new_segP,
1095//zz Thread* thr )
1096//zz {
1097//zz Segment* cur_seg;
1098//zz tl_assert(new_segP);
1099//zz tl_assert(new_segidP);
1100//zz tl_assert(HG_(is_sane_Thread)(thr));
1101//zz cur_seg = map_segments_lookup( thr->csegid );
1102//zz tl_assert(cur_seg);
1103//zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1104//zz at their owner thread. */
1105//zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1106//zz *new_segidP = alloc_SegmentID();
1107//zz map_segments_add( *new_segidP, *new_segP );
1108//zz thr->csegid = *new_segidP;
1109//zz }
sewardjb4112022007-11-09 22:49:28 +00001110
1111
1112/* The lock at 'lock_ga' has acquired a writer. Make all necessary
1113 updates, and also do all possible error checks. */
1114static
1115void evhH__post_thread_w_acquires_lock ( Thread* thr,
1116 LockKind lkk, Addr lock_ga )
1117{
1118 Lock* lk;
1119
1120 /* Basically what we need to do is call lockN_acquire_writer.
1121 However, that will barf if any 'invalid' lock states would
1122 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001123 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001124 routine.
1125
1126 Because this routine is only called after successful lock
1127 acquisition, we should not be asked to move the lock into any
1128 invalid states. Requests to do so are bugs in libpthread, since
1129 that should have rejected any such requests. */
1130
sewardjf98e1c02008-10-25 16:22:41 +00001131 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001132 /* Try to find the lock. If we can't, then create a new one with
1133 kind 'lkk'. */
1134 lk = map_locks_lookup_or_create(
1135 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001136 tl_assert( HG_(is_sane_LockN)(lk) );
1137
1138 /* check libhb level entities exist */
1139 tl_assert(thr->hbthr);
1140 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001141
1142 if (lk->heldBy == NULL) {
1143 /* the lock isn't held. Simple. */
1144 tl_assert(!lk->heldW);
1145 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001146 /* acquire a dependency from the lock's VCs */
1147 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001148 goto noerror;
1149 }
1150
1151 /* So the lock is already held. If held as a r-lock then
1152 libpthread must be buggy. */
1153 tl_assert(lk->heldBy);
1154 if (!lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001155 HG_(record_error_Misc)(
1156 thr, "Bug in libpthread: write lock "
1157 "granted on rwlock which is currently rd-held");
sewardjb4112022007-11-09 22:49:28 +00001158 goto error;
1159 }
1160
1161 /* So the lock is held in w-mode. If it's held by some other
1162 thread, then libpthread must be buggy. */
sewardj896f6f92008-08-19 08:38:52 +00001163 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
sewardjb4112022007-11-09 22:49:28 +00001164
sewardj896f6f92008-08-19 08:38:52 +00001165 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
sewardjf98e1c02008-10-25 16:22:41 +00001166 HG_(record_error_Misc)(
1167 thr, "Bug in libpthread: write lock "
1168 "granted on mutex/rwlock which is currently "
1169 "wr-held by a different thread");
sewardjb4112022007-11-09 22:49:28 +00001170 goto error;
1171 }
1172
1173 /* So the lock is already held in w-mode by 'thr'. That means this
1174 is an attempt to lock it recursively, which is only allowable
1175 for LK_mbRec kinded locks. Since this routine is called only
1176 once the lock has been acquired, this must also be a libpthread
1177 bug. */
1178 if (lk->kind != LK_mbRec) {
sewardjf98e1c02008-10-25 16:22:41 +00001179 HG_(record_error_Misc)(
1180 thr, "Bug in libpthread: recursive write lock "
1181 "granted on mutex/wrlock which does not "
1182 "support recursion");
sewardjb4112022007-11-09 22:49:28 +00001183 goto error;
1184 }
1185
1186 /* So we are recursively re-locking a lock we already w-hold. */
1187 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001188 /* acquire a dependency from the lock's VC. Probably pointless,
1189 but also harmless. */
1190 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001191 goto noerror;
1192
1193 noerror:
1194 /* check lock order acquisition graph, and update. This has to
1195 happen before the lock is added to the thread's locksetA/W. */
1196 laog__pre_thread_acquires_lock( thr, lk );
1197 /* update the thread's held-locks set */
1198 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1199 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (Word)lk );
1200 /* fall through */
1201
1202 error:
sewardjf98e1c02008-10-25 16:22:41 +00001203 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001204}
1205
1206
1207/* The lock at 'lock_ga' has acquired a reader. Make all necessary
1208 updates, and also do all possible error checks. */
1209static
1210void evhH__post_thread_r_acquires_lock ( Thread* thr,
1211 LockKind lkk, Addr lock_ga )
1212{
1213 Lock* lk;
1214
1215 /* Basically what we need to do is call lockN_acquire_reader.
1216 However, that will barf if any 'invalid' lock states would
1217 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001218 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001219 routine.
1220
1221 Because this routine is only called after successful lock
1222 acquisition, we should not be asked to move the lock into any
1223 invalid states. Requests to do so are bugs in libpthread, since
1224 that should have rejected any such requests. */
1225
sewardjf98e1c02008-10-25 16:22:41 +00001226 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001227 /* Try to find the lock. If we can't, then create a new one with
1228 kind 'lkk'. Only a reader-writer lock can be read-locked,
1229 hence the first assertion. */
1230 tl_assert(lkk == LK_rdwr);
1231 lk = map_locks_lookup_or_create(
1232 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001233 tl_assert( HG_(is_sane_LockN)(lk) );
1234
1235 /* check libhb level entities exist */
1236 tl_assert(thr->hbthr);
1237 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001238
1239 if (lk->heldBy == NULL) {
1240 /* the lock isn't held. Simple. */
1241 tl_assert(!lk->heldW);
1242 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001243 /* acquire a dependency from the lock's VC */
1244 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001245 goto noerror;
1246 }
1247
1248 /* So the lock is already held. If held as a w-lock then
1249 libpthread must be buggy. */
1250 tl_assert(lk->heldBy);
1251 if (lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001252 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1253 "granted on rwlock which is "
1254 "currently wr-held");
sewardjb4112022007-11-09 22:49:28 +00001255 goto error;
1256 }
1257
1258 /* Easy enough. In short anybody can get a read-lock on a rwlock
1259 provided it is either unlocked or already in rd-held. */
1260 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001261 /* acquire a dependency from the lock's VC. Probably pointless,
1262 but also harmless. */
1263 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001264 goto noerror;
1265
1266 noerror:
1267 /* check lock order acquisition graph, and update. This has to
1268 happen before the lock is added to the thread's locksetA/W. */
1269 laog__pre_thread_acquires_lock( thr, lk );
1270 /* update the thread's held-locks set */
1271 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1272 /* but don't update thr->locksetW, since lk is only rd-held */
1273 /* fall through */
1274
1275 error:
sewardjf98e1c02008-10-25 16:22:41 +00001276 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001277}
1278
1279
1280/* The lock at 'lock_ga' is just about to be unlocked. Make all
1281 necessary updates, and also do all possible error checks. */
1282static
1283void evhH__pre_thread_releases_lock ( Thread* thr,
1284 Addr lock_ga, Bool isRDWR )
1285{
1286 Lock* lock;
1287 Word n;
sewardjf98e1c02008-10-25 16:22:41 +00001288 Bool was_heldW;
sewardjb4112022007-11-09 22:49:28 +00001289
1290 /* This routine is called prior to a lock release, before
1291 libpthread has had a chance to validate the call. Hence we need
1292 to detect and reject any attempts to move the lock into an
1293 invalid state. Such attempts are bugs in the client.
1294
1295 isRDWR is True if we know from the wrapper context that lock_ga
1296 should refer to a reader-writer lock, and is False if [ditto]
1297 lock_ga should refer to a standard mutex. */
1298
sewardjf98e1c02008-10-25 16:22:41 +00001299 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001300 lock = map_locks_maybe_lookup( lock_ga );
1301
1302 if (!lock) {
1303 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1304 the client is trying to unlock it. So complain, then ignore
1305 the attempt. */
sewardjf98e1c02008-10-25 16:22:41 +00001306 HG_(record_error_UnlockBogus)( thr, lock_ga );
sewardjb4112022007-11-09 22:49:28 +00001307 return;
1308 }
1309
1310 tl_assert(lock->guestaddr == lock_ga);
sewardjf98e1c02008-10-25 16:22:41 +00001311 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001312
1313 if (isRDWR && lock->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001314 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1315 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001316 }
1317 if ((!isRDWR) && lock->kind == LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001318 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1319 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001320 }
1321
1322 if (!lock->heldBy) {
1323 /* The lock is not held. This indicates a serious bug in the
1324 client. */
1325 tl_assert(!lock->heldW);
sewardjf98e1c02008-10-25 16:22:41 +00001326 HG_(record_error_UnlockUnlocked)( thr, lock );
sewardjb4112022007-11-09 22:49:28 +00001327 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1328 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1329 goto error;
1330 }
1331
sewardjf98e1c02008-10-25 16:22:41 +00001332 /* test just above dominates */
1333 tl_assert(lock->heldBy);
1334 was_heldW = lock->heldW;
1335
sewardjb4112022007-11-09 22:49:28 +00001336 /* The lock is held. Is this thread one of the holders? If not,
1337 report a bug in the client. */
sewardj896f6f92008-08-19 08:38:52 +00001338 n = VG_(elemBag)( lock->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +00001339 tl_assert(n >= 0);
1340 if (n == 0) {
1341 /* We are not a current holder of the lock. This is a bug in
1342 the guest, and (per POSIX pthread rules) the unlock
1343 attempt will fail. So just complain and do nothing
1344 else. */
sewardj896f6f92008-08-19 08:38:52 +00001345 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001346 tl_assert(HG_(is_sane_Thread)(realOwner));
sewardjb4112022007-11-09 22:49:28 +00001347 tl_assert(realOwner != thr);
1348 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1349 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
sewardjf98e1c02008-10-25 16:22:41 +00001350 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
sewardjb4112022007-11-09 22:49:28 +00001351 goto error;
1352 }
1353
1354 /* Ok, we hold the lock 'n' times. */
1355 tl_assert(n >= 1);
1356
1357 lockN_release( lock, thr );
1358
1359 n--;
1360 tl_assert(n >= 0);
1361
1362 if (n > 0) {
1363 tl_assert(lock->heldBy);
sewardj896f6f92008-08-19 08:38:52 +00001364 tl_assert(n == VG_(elemBag)( lock->heldBy, (Word)thr ));
sewardjb4112022007-11-09 22:49:28 +00001365 /* We still hold the lock. So either it's a recursive lock
1366 or a rwlock which is currently r-held. */
1367 tl_assert(lock->kind == LK_mbRec
1368 || (lock->kind == LK_rdwr && !lock->heldW));
1369 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1370 if (lock->heldW)
1371 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1372 else
1373 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1374 } else {
1375 /* We no longer hold the lock. */
sewardjf98e1c02008-10-25 16:22:41 +00001376 tl_assert(!lock->heldBy);
1377 tl_assert(lock->heldW == False);
1378 //if (lock->heldBy) {
1379 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (Word)thr ));
1380 //}
sewardjb4112022007-11-09 22:49:28 +00001381 /* update this thread's lockset accordingly. */
1382 thr->locksetA
1383 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lock );
1384 thr->locksetW
1385 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lock );
sewardjf98e1c02008-10-25 16:22:41 +00001386 /* push our VC into the lock */
1387 tl_assert(thr->hbthr);
1388 tl_assert(lock->hbso);
1389 /* If the lock was previously W-held, then we want to do a
1390 strong send, and if previously R-held, then a weak send. */
1391 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
sewardjb4112022007-11-09 22:49:28 +00001392 }
1393 /* fall through */
1394
1395 error:
sewardjf98e1c02008-10-25 16:22:41 +00001396 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001397}
1398
1399
sewardj9f569b72008-11-13 13:33:09 +00001400/* ---------------------------------------------------------- */
1401/* -------- Event handlers proper (evh__* functions) -------- */
1402/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001403
1404/* What is the Thread* for the currently running thread? This is
1405 absolutely performance critical. We receive notifications from the
1406 core for client code starts/stops, and cache the looked-up result
1407 in 'current_Thread'. Hence, for the vast majority of requests,
1408 finding the current thread reduces to a read of a global variable,
1409 provided get_current_Thread_in_C_C is inlined.
1410
1411 Outside of client code, current_Thread is NULL, and presumably
1412 any uses of it will cause a segfault. Hence:
1413
1414 - for uses definitely within client code, use
1415 get_current_Thread_in_C_C.
1416
1417 - for all other uses, use get_current_Thread.
1418*/
1419
1420static Thread* current_Thread = NULL;
1421
1422static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1423 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1424 tl_assert(current_Thread == NULL);
1425 current_Thread = map_threads_lookup( tid );
1426 tl_assert(current_Thread != NULL);
1427}
1428static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1429 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1430 tl_assert(current_Thread != NULL);
1431 current_Thread = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00001432 libhb_maybe_GC();
sewardjb4112022007-11-09 22:49:28 +00001433}
1434static inline Thread* get_current_Thread_in_C_C ( void ) {
1435 return current_Thread;
1436}
1437static inline Thread* get_current_Thread ( void ) {
1438 ThreadId coretid;
1439 Thread* thr;
1440 thr = get_current_Thread_in_C_C();
1441 if (LIKELY(thr))
1442 return thr;
1443 /* evidently not in client code. Do it the slow way. */
1444 coretid = VG_(get_running_tid)();
1445 /* FIXME: get rid of the following kludge. It exists because
sewardjf98e1c02008-10-25 16:22:41 +00001446 evh__new_mem is called during initialisation (as notification
sewardjb4112022007-11-09 22:49:28 +00001447 of initial memory layout) and VG_(get_running_tid)() returns
1448 VG_INVALID_THREADID at that point. */
1449 if (coretid == VG_INVALID_THREADID)
1450 coretid = 1; /* KLUDGE */
1451 thr = map_threads_lookup( coretid );
1452 return thr;
1453}
1454
1455static
1456void evh__new_mem ( Addr a, SizeT len ) {
1457 if (SHOW_EVENTS >= 2)
1458 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1459 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001460 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001461 all__sanity_check("evh__new_mem-post");
1462}
1463
1464static
sewardj7cf4e6b2008-05-01 20:24:26 +00001465void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1466 if (SHOW_EVENTS >= 2)
1467 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1468 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001469 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardj7cf4e6b2008-05-01 20:24:26 +00001470 all__sanity_check("evh__new_mem_w_tid-post");
1471}
1472
1473static
sewardjb4112022007-11-09 22:49:28 +00001474void evh__new_mem_w_perms ( Addr a, SizeT len,
sewardj9c606bd2008-09-18 18:12:50 +00001475 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
sewardjb4112022007-11-09 22:49:28 +00001476 if (SHOW_EVENTS >= 1)
1477 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1478 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1479 if (rr || ww || xx)
1480 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001481 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001482 all__sanity_check("evh__new_mem_w_perms-post");
1483}
1484
1485static
1486void evh__set_perms ( Addr a, SizeT len,
1487 Bool rr, Bool ww, Bool xx ) {
1488 if (SHOW_EVENTS >= 1)
1489 VG_(printf)("evh__set_perms(%p, %lu, %d,%d,%d)\n",
1490 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1491 /* Hmm. What should we do here, that actually makes any sense?
1492 Let's say: if neither readable nor writable, then declare it
1493 NoAccess, else leave it alone. */
1494 if (!(rr || ww))
1495 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001496 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001497 all__sanity_check("evh__set_perms-post");
1498}
1499
1500static
1501void evh__die_mem ( Addr a, SizeT len ) {
1502 if (SHOW_EVENTS >= 2)
1503 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
1504 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001505 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001506 all__sanity_check("evh__die_mem-post");
1507}
1508
1509static
1510void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1511{
1512 if (SHOW_EVENTS >= 1)
1513 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1514 (Int)parent, (Int)child );
1515
1516 if (parent != VG_INVALID_THREADID) {
sewardjf98e1c02008-10-25 16:22:41 +00001517 Thread* thr_p;
1518 Thread* thr_c;
1519 Thr* hbthr_p;
1520 Thr* hbthr_c;
sewardjb4112022007-11-09 22:49:28 +00001521
sewardjf98e1c02008-10-25 16:22:41 +00001522 tl_assert(HG_(is_sane_ThreadId)(parent));
1523 tl_assert(HG_(is_sane_ThreadId)(child));
sewardjb4112022007-11-09 22:49:28 +00001524 tl_assert(parent != child);
1525
1526 thr_p = map_threads_maybe_lookup( parent );
1527 thr_c = map_threads_maybe_lookup( child );
1528
1529 tl_assert(thr_p != NULL);
1530 tl_assert(thr_c == NULL);
1531
sewardjf98e1c02008-10-25 16:22:41 +00001532 hbthr_p = thr_p->hbthr;
1533 tl_assert(hbthr_p != NULL);
1534 tl_assert( libhb_get_Thr_opaque(hbthr_p) == thr_p );
sewardjb4112022007-11-09 22:49:28 +00001535
sewardjf98e1c02008-10-25 16:22:41 +00001536 hbthr_c = libhb_create ( hbthr_p );
1537
1538 /* Create a new thread record for the child. */
sewardjb4112022007-11-09 22:49:28 +00001539 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +00001540 thr_c = mk_Thread( hbthr_c );
1541 tl_assert( libhb_get_Thr_opaque(hbthr_c) == NULL );
1542 libhb_set_Thr_opaque(hbthr_c, thr_c);
sewardjb4112022007-11-09 22:49:28 +00001543
1544 /* and bind it in the thread-map table */
1545 map_threads[child] = thr_c;
sewardjf98e1c02008-10-25 16:22:41 +00001546 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1547 thr_c->coretid = child;
sewardjb4112022007-11-09 22:49:28 +00001548
1549 /* Record where the parent is so we can later refer to this in
1550 error messages.
1551
1552 On amd64-linux, this entails a nasty glibc-2.5 specific hack.
1553 The stack snapshot is taken immediately after the parent has
1554 returned from its sys_clone call. Unfortunately there is no
1555 unwind info for the insn following "syscall" - reading the
1556 glibc sources confirms this. So we ask for a snapshot to be
1557 taken as if RIP was 3 bytes earlier, in a place where there
1558 is unwind info. Sigh.
1559 */
1560 { Word first_ip_delta = 0;
1561# if defined(VGP_amd64_linux)
1562 first_ip_delta = -3;
1563# endif
1564 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1565 }
sewardjb4112022007-11-09 22:49:28 +00001566 }
1567
sewardjf98e1c02008-10-25 16:22:41 +00001568 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001569 all__sanity_check("evh__pre_thread_create-post");
1570}
1571
1572static
1573void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1574{
1575 Int nHeld;
1576 Thread* thr_q;
1577 if (SHOW_EVENTS >= 1)
1578 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1579 (Int)quit_tid );
1580
1581 /* quit_tid has disappeared without joining to any other thread.
1582 Therefore there is no synchronisation event associated with its
1583 exit and so we have to pretty much treat it as if it was still
1584 alive but mysteriously making no progress. That is because, if
1585 we don't know when it really exited, then we can never say there
1586 is a point in time when we're sure the thread really has
1587 finished, and so we need to consider the possibility that it
1588 lingers indefinitely and continues to interact with other
1589 threads. */
1590 /* However, it might have rendezvous'd with a thread that called
1591 pthread_join with this one as arg, prior to this point (that's
1592 how NPTL works). In which case there has already been a prior
1593 sync event. So in any case, just let the thread exit. On NPTL,
1594 all thread exits go through here. */
sewardjf98e1c02008-10-25 16:22:41 +00001595 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
sewardjb4112022007-11-09 22:49:28 +00001596 thr_q = map_threads_maybe_lookup( quit_tid );
1597 tl_assert(thr_q != NULL);
1598
1599 /* Complain if this thread holds any locks. */
1600 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1601 tl_assert(nHeld >= 0);
1602 if (nHeld > 0) {
1603 HChar buf[80];
1604 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1605 nHeld, nHeld > 1 ? "s" : "");
sewardjf98e1c02008-10-25 16:22:41 +00001606 HG_(record_error_Misc)( thr_q, buf );
sewardjb4112022007-11-09 22:49:28 +00001607 }
1608
1609 /* About the only thing we do need to do is clear the map_threads
1610 entry, in order that the Valgrind core can re-use it. */
sewardjf98e1c02008-10-25 16:22:41 +00001611 tl_assert(thr_q->coretid == quit_tid);
1612 thr_q->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +00001613 map_threads_delete( quit_tid );
1614
sewardjf98e1c02008-10-25 16:22:41 +00001615 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001616 all__sanity_check("evh__pre_thread_ll_exit-post");
1617}
1618
sewardjf98e1c02008-10-25 16:22:41 +00001619
sewardjb4112022007-11-09 22:49:28 +00001620static
1621void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1622{
sewardjb4112022007-11-09 22:49:28 +00001623 Thread* thr_s;
1624 Thread* thr_q;
sewardjf98e1c02008-10-25 16:22:41 +00001625 Thr* hbthr_s;
1626 Thr* hbthr_q;
1627 SO* so;
sewardjb4112022007-11-09 22:49:28 +00001628
1629 if (SHOW_EVENTS >= 1)
1630 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1631 (Int)stay_tid, quit_thr );
1632
sewardjf98e1c02008-10-25 16:22:41 +00001633 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
sewardjb4112022007-11-09 22:49:28 +00001634
1635 thr_s = map_threads_maybe_lookup( stay_tid );
1636 thr_q = quit_thr;
1637 tl_assert(thr_s != NULL);
1638 tl_assert(thr_q != NULL);
1639 tl_assert(thr_s != thr_q);
1640
sewardjf98e1c02008-10-25 16:22:41 +00001641 hbthr_s = thr_s->hbthr;
1642 hbthr_q = thr_q->hbthr;
1643 tl_assert(hbthr_s != hbthr_q);
1644 tl_assert( libhb_get_Thr_opaque(hbthr_s) == thr_s );
1645 tl_assert( libhb_get_Thr_opaque(hbthr_q) == thr_q );
sewardjb4112022007-11-09 22:49:28 +00001646
sewardjf98e1c02008-10-25 16:22:41 +00001647 /* Allocate a temporary synchronisation object and use it to send
1648 an imaginary message from the quitter to the stayer, the purpose
1649 being to generate a dependence from the quitter to the
1650 stayer. */
1651 so = libhb_so_alloc();
1652 tl_assert(so);
1653 libhb_so_send(hbthr_q, so, True/*strong_send*/);
1654 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1655 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00001656
sewardjf98e1c02008-10-25 16:22:41 +00001657 /* evh__pre_thread_ll_exit issues an error message if the exiting
1658 thread holds any locks. No need to check here. */
sewardjb4112022007-11-09 22:49:28 +00001659
1660 /* This holds because, at least when using NPTL as the thread
1661 library, we should be notified the low level thread exit before
1662 we hear of any join event on it. The low level exit
1663 notification feeds through into evh__pre_thread_ll_exit,
1664 which should clear the map_threads entry for it. Hence we
1665 expect there to be no map_threads entry at this point. */
1666 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1667 == VG_INVALID_THREADID);
1668
sewardjf98e1c02008-10-25 16:22:41 +00001669 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001670 all__sanity_check("evh__post_thread_join-post");
1671}
1672
1673static
1674void evh__pre_mem_read ( CorePart part, ThreadId tid, Char* s,
1675 Addr a, SizeT size) {
1676 if (SHOW_EVENTS >= 2
1677 || (SHOW_EVENTS >= 1 && size != 1))
1678 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1679 (Int)tid, s, (void*)a, size );
1680 shadow_mem_read_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001681 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001682 all__sanity_check("evh__pre_mem_read-post");
1683}
1684
1685static
1686void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
1687 Char* s, Addr a ) {
1688 Int len;
1689 if (SHOW_EVENTS >= 1)
1690 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1691 (Int)tid, s, (void*)a );
1692 // FIXME: think of a less ugly hack
1693 len = VG_(strlen)( (Char*) a );
1694 shadow_mem_read_range( map_threads_lookup(tid), a, len+1 );
sewardjf98e1c02008-10-25 16:22:41 +00001695 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001696 all__sanity_check("evh__pre_mem_read_asciiz-post");
1697}
1698
1699static
1700void evh__pre_mem_write ( CorePart part, ThreadId tid, Char* s,
1701 Addr a, SizeT size ) {
1702 if (SHOW_EVENTS >= 1)
1703 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1704 (Int)tid, s, (void*)a, size );
1705 shadow_mem_write_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001706 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001707 all__sanity_check("evh__pre_mem_write-post");
1708}
1709
1710static
1711void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1712 if (SHOW_EVENTS >= 1)
1713 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1714 (void*)a, len, (Int)is_inited );
1715 // FIXME: this is kinda stupid
1716 if (is_inited) {
1717 shadow_mem_make_New(get_current_Thread(), a, len);
1718 } else {
1719 shadow_mem_make_New(get_current_Thread(), a, len);
1720 }
sewardjf98e1c02008-10-25 16:22:41 +00001721 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001722 all__sanity_check("evh__pre_mem_read-post");
1723}
1724
1725static
1726void evh__die_mem_heap ( Addr a, SizeT len ) {
1727 if (SHOW_EVENTS >= 1)
1728 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
1729 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001730 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001731 all__sanity_check("evh__pre_mem_read-post");
1732}
1733
sewardjb4112022007-11-09 22:49:28 +00001734static VG_REGPARM(1)
1735void evh__mem_help_read_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001736 Thread* thr = get_current_Thread_in_C_C();
1737 Thr* hbthr = thr->hbthr;
1738 LIBHB_READ_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001739}
sewardjf98e1c02008-10-25 16:22:41 +00001740
sewardjb4112022007-11-09 22:49:28 +00001741static VG_REGPARM(1)
1742void evh__mem_help_read_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001743 Thread* thr = get_current_Thread_in_C_C();
1744 Thr* hbthr = thr->hbthr;
1745 LIBHB_READ_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001746}
sewardjf98e1c02008-10-25 16:22:41 +00001747
sewardjb4112022007-11-09 22:49:28 +00001748static VG_REGPARM(1)
1749void evh__mem_help_read_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001750 Thread* thr = get_current_Thread_in_C_C();
1751 Thr* hbthr = thr->hbthr;
1752 LIBHB_READ_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001753}
sewardjf98e1c02008-10-25 16:22:41 +00001754
sewardjb4112022007-11-09 22:49:28 +00001755static VG_REGPARM(1)
1756void evh__mem_help_read_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001757 Thread* thr = get_current_Thread_in_C_C();
1758 Thr* hbthr = thr->hbthr;
1759 LIBHB_READ_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001760}
sewardjf98e1c02008-10-25 16:22:41 +00001761
sewardjb4112022007-11-09 22:49:28 +00001762static VG_REGPARM(2)
1763void evh__mem_help_read_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001764 Thread* thr = get_current_Thread_in_C_C();
1765 Thr* hbthr = thr->hbthr;
1766 LIBHB_READ_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001767}
1768
1769static VG_REGPARM(1)
1770void evh__mem_help_write_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001771 Thread* thr = get_current_Thread_in_C_C();
1772 Thr* hbthr = thr->hbthr;
1773 LIBHB_WRITE_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001774}
sewardjf98e1c02008-10-25 16:22:41 +00001775
sewardjb4112022007-11-09 22:49:28 +00001776static VG_REGPARM(1)
1777void evh__mem_help_write_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001778 Thread* thr = get_current_Thread_in_C_C();
1779 Thr* hbthr = thr->hbthr;
1780 LIBHB_WRITE_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001781}
sewardjf98e1c02008-10-25 16:22:41 +00001782
sewardjb4112022007-11-09 22:49:28 +00001783static VG_REGPARM(1)
1784void evh__mem_help_write_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001785 Thread* thr = get_current_Thread_in_C_C();
1786 Thr* hbthr = thr->hbthr;
1787 LIBHB_WRITE_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001788}
sewardjf98e1c02008-10-25 16:22:41 +00001789
sewardjb4112022007-11-09 22:49:28 +00001790static VG_REGPARM(1)
1791void evh__mem_help_write_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001792 Thread* thr = get_current_Thread_in_C_C();
1793 Thr* hbthr = thr->hbthr;
1794 LIBHB_WRITE_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001795}
sewardjf98e1c02008-10-25 16:22:41 +00001796
sewardjb4112022007-11-09 22:49:28 +00001797static VG_REGPARM(2)
1798void evh__mem_help_write_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001799 Thread* thr = get_current_Thread_in_C_C();
1800 Thr* hbthr = thr->hbthr;
1801 LIBHB_WRITE_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001802}
1803
sewardjd52392d2008-11-08 20:36:26 +00001804//static void evh__bus_lock(void) {
1805// Thread* thr;
1806// if (0) VG_(printf)("evh__bus_lock()\n");
1807// thr = get_current_Thread();
1808// tl_assert(thr); /* cannot fail - Thread* must already exist */
1809// evhH__post_thread_w_acquires_lock( thr, LK_nonRec, (Addr)&__bus_lock );
1810//}
1811//static void evh__bus_unlock(void) {
1812// Thread* thr;
1813// if (0) VG_(printf)("evh__bus_unlock()\n");
1814// thr = get_current_Thread();
1815// tl_assert(thr); /* cannot fail - Thread* must already exist */
1816// evhH__pre_thread_releases_lock( thr, (Addr)&__bus_lock, False/*!isRDWR*/ );
1817//}
sewardjb4112022007-11-09 22:49:28 +00001818
sewardj9f569b72008-11-13 13:33:09 +00001819/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001820/* -------------- events to do with mutexes -------------- */
sewardj9f569b72008-11-13 13:33:09 +00001821/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001822
1823/* EXPOSITION only: by intercepting lock init events we can show the
1824 user where the lock was initialised, rather than only being able to
1825 show where it was first locked. Intercepting lock initialisations
1826 is not necessary for the basic operation of the race checker. */
1827static
1828void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
1829 void* mutex, Word mbRec )
1830{
1831 if (SHOW_EVENTS >= 1)
1832 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
1833 (Int)tid, mbRec, (void*)mutex );
1834 tl_assert(mbRec == 0 || mbRec == 1);
1835 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
1836 (Addr)mutex, tid );
sewardjf98e1c02008-10-25 16:22:41 +00001837 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001838 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
1839}
1840
1841static
1842void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex )
1843{
1844 Thread* thr;
1845 Lock* lk;
1846 if (SHOW_EVENTS >= 1)
1847 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE(ctid=%d, %p)\n",
1848 (Int)tid, (void*)mutex );
1849
1850 thr = map_threads_maybe_lookup( tid );
1851 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00001852 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00001853
1854 lk = map_locks_maybe_lookup( (Addr)mutex );
1855
1856 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
sewardjf98e1c02008-10-25 16:22:41 +00001857 HG_(record_error_Misc)(
1858 thr, "pthread_mutex_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00001859 }
1860
1861 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00001862 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00001863 tl_assert( lk->guestaddr == (Addr)mutex );
1864 if (lk->heldBy) {
1865 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00001866 HG_(record_error_Misc)(
1867 thr, "pthread_mutex_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00001868 /* remove lock from locksets of all owning threads */
1869 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00001870 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00001871 lk->heldBy = NULL;
1872 lk->heldW = False;
1873 lk->acquired_at = NULL;
1874 }
1875 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001876 tl_assert( HG_(is_sane_LockN)(lk) );
1877
sewardj1cbc12f2008-11-10 16:16:46 +00001878 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00001879 map_locks_delete( lk->guestaddr );
1880 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00001881 }
1882
sewardjf98e1c02008-10-25 16:22:41 +00001883 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001884 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
1885}
1886
1887static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
1888 void* mutex, Word isTryLock )
1889{
1890 /* Just check the mutex is sane; nothing else to do. */
1891 // 'mutex' may be invalid - not checked by wrapper
1892 Thread* thr;
1893 Lock* lk;
1894 if (SHOW_EVENTS >= 1)
1895 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
1896 (Int)tid, (void*)mutex );
1897
1898 tl_assert(isTryLock == 0 || isTryLock == 1);
1899 thr = map_threads_maybe_lookup( tid );
1900 tl_assert(thr); /* cannot fail - Thread* must already exist */
1901
1902 lk = map_locks_maybe_lookup( (Addr)mutex );
1903
1904 if (lk && (lk->kind == LK_rdwr)) {
sewardjf98e1c02008-10-25 16:22:41 +00001905 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
1906 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001907 }
1908
1909 if ( lk
1910 && isTryLock == 0
1911 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
1912 && lk->heldBy
1913 && lk->heldW
sewardj896f6f92008-08-19 08:38:52 +00001914 && VG_(elemBag)( lk->heldBy, (Word)thr ) > 0 ) {
sewardjb4112022007-11-09 22:49:28 +00001915 /* uh, it's a non-recursive lock and we already w-hold it, and
1916 this is a real lock operation (not a speculative "tryLock"
1917 kind of thing). Duh. Deadlock coming up; but at least
1918 produce an error message. */
sewardjf98e1c02008-10-25 16:22:41 +00001919 HG_(record_error_Misc)( thr, "Attempt to re-lock a "
1920 "non-recursive lock I already hold" );
sewardjb4112022007-11-09 22:49:28 +00001921 }
1922}
1923
1924static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
1925{
1926 // only called if the real library call succeeded - so mutex is sane
1927 Thread* thr;
1928 if (SHOW_EVENTS >= 1)
1929 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
1930 (Int)tid, (void*)mutex );
1931
1932 thr = map_threads_maybe_lookup( tid );
1933 tl_assert(thr); /* cannot fail - Thread* must already exist */
1934
1935 evhH__post_thread_w_acquires_lock(
1936 thr,
1937 LK_mbRec, /* if not known, create new lock with this LockKind */
1938 (Addr)mutex
1939 );
1940}
1941
1942static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
1943{
1944 // 'mutex' may be invalid - not checked by wrapper
1945 Thread* thr;
1946 if (SHOW_EVENTS >= 1)
1947 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
1948 (Int)tid, (void*)mutex );
1949
1950 thr = map_threads_maybe_lookup( tid );
1951 tl_assert(thr); /* cannot fail - Thread* must already exist */
1952
1953 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
1954}
1955
1956static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
1957{
1958 // only called if the real library call succeeded - so mutex is sane
1959 Thread* thr;
1960 if (SHOW_EVENTS >= 1)
1961 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
1962 (Int)tid, (void*)mutex );
1963 thr = map_threads_maybe_lookup( tid );
1964 tl_assert(thr); /* cannot fail - Thread* must already exist */
1965
1966 // anything we should do here?
1967}
1968
1969
sewardj9f569b72008-11-13 13:33:09 +00001970/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001971/* --------------- events to do with CVs --------------- */
sewardj9f569b72008-11-13 13:33:09 +00001972/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001973
sewardjf98e1c02008-10-25 16:22:41 +00001974/* A mapping from CV to the SO associated with it. When the CV is
1975 signalled/broadcasted upon, we do a 'send' into the SO, and when a
1976 wait on it completes, we do a 'recv' from the SO. This is believed
1977 to give the correct happens-before events arising from CV
sewardjb4112022007-11-09 22:49:28 +00001978 signallings/broadcasts.
1979*/
1980
sewardjf98e1c02008-10-25 16:22:41 +00001981/* pthread_mutex_cond* -> SO* */
1982static WordFM* map_cond_to_SO = NULL;
sewardjb4112022007-11-09 22:49:28 +00001983
sewardjf98e1c02008-10-25 16:22:41 +00001984static void map_cond_to_SO_INIT ( void ) {
1985 if (UNLIKELY(map_cond_to_SO == NULL)) {
sewardj9f569b72008-11-13 13:33:09 +00001986 map_cond_to_SO = VG_(newFM)( HG_(zalloc),
1987 "hg.mctSI.1", HG_(free), NULL );
sewardjf98e1c02008-10-25 16:22:41 +00001988 tl_assert(map_cond_to_SO != NULL);
1989 }
1990}
1991
1992static SO* map_cond_to_SO_lookup_or_alloc ( void* cond ) {
1993 UWord key, val;
1994 map_cond_to_SO_INIT();
1995 if (VG_(lookupFM)( map_cond_to_SO, &key, &val, (UWord)cond )) {
1996 tl_assert(key == (UWord)cond);
1997 return (SO*)val;
1998 } else {
1999 SO* so = libhb_so_alloc();
2000 VG_(addToFM)( map_cond_to_SO, (UWord)cond, (UWord)so );
2001 return so;
2002 }
2003}
2004
2005static void map_cond_to_SO_delete ( void* cond ) {
2006 UWord keyW, valW;
2007 map_cond_to_SO_INIT();
2008 if (VG_(delFromFM)( map_cond_to_SO, &keyW, &valW, (UWord)cond )) {
2009 SO* so = (SO*)valW;
2010 tl_assert(keyW == (UWord)cond);
2011 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00002012 }
2013}
2014
2015static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2016{
sewardjf98e1c02008-10-25 16:22:41 +00002017 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2018 cond to a SO if it is not already so bound, and 'send' on the
2019 SO. This is later used by other thread(s) which successfully
2020 exit from a pthread_cond_wait on the same cv; then they 'recv'
2021 from the SO, thereby acquiring a dependency on this signalling
2022 event. */
sewardjb4112022007-11-09 22:49:28 +00002023 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +00002024 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002025
2026 if (SHOW_EVENTS >= 1)
2027 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2028 (Int)tid, (void*)cond );
2029
sewardjb4112022007-11-09 22:49:28 +00002030 thr = map_threads_maybe_lookup( tid );
2031 tl_assert(thr); /* cannot fail - Thread* must already exist */
2032
2033 // error-if: mutex is bogus
2034 // error-if: mutex is not locked
2035
sewardjf98e1c02008-10-25 16:22:41 +00002036 so = map_cond_to_SO_lookup_or_alloc( cond );
2037 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002038
sewardjf98e1c02008-10-25 16:22:41 +00002039 libhb_so_send( thr->hbthr, so, True/*strong_send*/ );
sewardjb4112022007-11-09 22:49:28 +00002040}
2041
2042/* returns True if it reckons 'mutex' is valid and held by this
2043 thread, else False */
2044static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2045 void* cond, void* mutex )
2046{
2047 Thread* thr;
2048 Lock* lk;
2049 Bool lk_valid = True;
2050
2051 if (SHOW_EVENTS >= 1)
2052 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2053 "(ctid=%d, cond=%p, mutex=%p)\n",
2054 (Int)tid, (void*)cond, (void*)mutex );
2055
sewardjb4112022007-11-09 22:49:28 +00002056 thr = map_threads_maybe_lookup( tid );
2057 tl_assert(thr); /* cannot fail - Thread* must already exist */
2058
2059 lk = map_locks_maybe_lookup( (Addr)mutex );
2060
2061 /* Check for stupid mutex arguments. There are various ways to be
2062 a bozo. Only complain once, though, even if more than one thing
2063 is wrong. */
2064 if (lk == NULL) {
2065 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002066 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002067 thr,
2068 "pthread_cond_{timed}wait called with invalid mutex" );
2069 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002070 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002071 if (lk->kind == LK_rdwr) {
2072 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002073 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002074 thr, "pthread_cond_{timed}wait called with mutex "
2075 "of type pthread_rwlock_t*" );
2076 } else
2077 if (lk->heldBy == NULL) {
2078 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002079 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002080 thr, "pthread_cond_{timed}wait called with un-held mutex");
2081 } else
2082 if (lk->heldBy != NULL
sewardj896f6f92008-08-19 08:38:52 +00002083 && VG_(elemBag)( lk->heldBy, (Word)thr ) == 0) {
sewardjb4112022007-11-09 22:49:28 +00002084 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002085 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002086 thr, "pthread_cond_{timed}wait called with mutex "
2087 "held by a different thread" );
2088 }
2089 }
2090
2091 // error-if: cond is also associated with a different mutex
2092
2093 return lk_valid;
2094}
2095
2096static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
2097 void* cond, void* mutex )
2098{
sewardjf98e1c02008-10-25 16:22:41 +00002099 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2100 the SO for this cond, and 'recv' from it so as to acquire a
2101 dependency edge back to the signaller/broadcaster. */
2102 Thread* thr;
2103 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002104
2105 if (SHOW_EVENTS >= 1)
2106 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
2107 "(ctid=%d, cond=%p, mutex=%p)\n",
2108 (Int)tid, (void*)cond, (void*)mutex );
2109
sewardjb4112022007-11-09 22:49:28 +00002110 thr = map_threads_maybe_lookup( tid );
2111 tl_assert(thr); /* cannot fail - Thread* must already exist */
2112
2113 // error-if: cond is also associated with a different mutex
2114
sewardjf98e1c02008-10-25 16:22:41 +00002115 so = map_cond_to_SO_lookup_or_alloc( cond );
2116 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002117
sewardjf98e1c02008-10-25 16:22:41 +00002118 if (!libhb_so_everSent(so)) {
2119 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2120 it? If this happened it would surely be a bug in the threads
2121 library. Or one of those fabled "spurious wakeups". */
2122 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
2123 "succeeded on"
2124 " without prior pthread_cond_post");
sewardjb4112022007-11-09 22:49:28 +00002125 }
sewardjf98e1c02008-10-25 16:22:41 +00002126
2127 /* anyway, acquire a dependency on it. */
2128 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
2129}
2130
2131static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
2132 void* cond )
2133{
2134 /* Deal with destroy events. The only purpose is to free storage
2135 associated with the CV, so as to avoid any possible resource
2136 leaks. */
2137 if (SHOW_EVENTS >= 1)
2138 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
2139 "(ctid=%d, cond=%p)\n",
2140 (Int)tid, (void*)cond );
2141
2142 map_cond_to_SO_delete( cond );
sewardjb4112022007-11-09 22:49:28 +00002143}
2144
2145
sewardj9f569b72008-11-13 13:33:09 +00002146/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002147/* -------------- events to do with rwlocks -------------- */
sewardj9f569b72008-11-13 13:33:09 +00002148/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002149
2150/* EXPOSITION only */
2151static
2152void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2153{
2154 if (SHOW_EVENTS >= 1)
2155 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2156 (Int)tid, (void*)rwl );
2157 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
sewardjf98e1c02008-10-25 16:22:41 +00002158 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002159 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2160}
2161
2162static
2163void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2164{
2165 Thread* thr;
2166 Lock* lk;
2167 if (SHOW_EVENTS >= 1)
2168 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2169 (Int)tid, (void*)rwl );
2170
2171 thr = map_threads_maybe_lookup( tid );
2172 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00002173 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00002174
2175 lk = map_locks_maybe_lookup( (Addr)rwl );
2176
2177 if (lk == NULL || lk->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00002178 HG_(record_error_Misc)(
2179 thr, "pthread_rwlock_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00002180 }
2181
2182 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00002183 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002184 tl_assert( lk->guestaddr == (Addr)rwl );
2185 if (lk->heldBy) {
2186 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00002187 HG_(record_error_Misc)(
2188 thr, "pthread_rwlock_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00002189 /* remove lock from locksets of all owning threads */
2190 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00002191 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00002192 lk->heldBy = NULL;
2193 lk->heldW = False;
sewardj1c7e8332007-11-29 13:04:03 +00002194 lk->acquired_at = NULL;
sewardjb4112022007-11-09 22:49:28 +00002195 }
2196 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002197 tl_assert( HG_(is_sane_LockN)(lk) );
2198
sewardj1cbc12f2008-11-10 16:16:46 +00002199 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002200 map_locks_delete( lk->guestaddr );
2201 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002202 }
2203
sewardjf98e1c02008-10-25 16:22:41 +00002204 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002205 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2206}
2207
2208static
sewardj789c3c52008-02-25 12:10:07 +00002209void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2210 void* rwl,
2211 Word isW, Word isTryLock )
sewardjb4112022007-11-09 22:49:28 +00002212{
2213 /* Just check the rwl is sane; nothing else to do. */
2214 // 'rwl' may be invalid - not checked by wrapper
2215 Thread* thr;
2216 Lock* lk;
2217 if (SHOW_EVENTS >= 1)
2218 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2219 (Int)tid, (Int)isW, (void*)rwl );
2220
2221 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
sewardj789c3c52008-02-25 12:10:07 +00002222 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
sewardjb4112022007-11-09 22:49:28 +00002223 thr = map_threads_maybe_lookup( tid );
2224 tl_assert(thr); /* cannot fail - Thread* must already exist */
2225
2226 lk = map_locks_maybe_lookup( (Addr)rwl );
2227 if ( lk
2228 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2229 /* Wrong kind of lock. Duh. */
sewardjf98e1c02008-10-25 16:22:41 +00002230 HG_(record_error_Misc)(
2231 thr, "pthread_rwlock_{rd,rw}lock with a "
2232 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002233 }
2234}
2235
2236static
2237void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2238{
2239 // only called if the real library call succeeded - so mutex is sane
2240 Thread* thr;
2241 if (SHOW_EVENTS >= 1)
2242 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2243 (Int)tid, (Int)isW, (void*)rwl );
2244
2245 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2246 thr = map_threads_maybe_lookup( tid );
2247 tl_assert(thr); /* cannot fail - Thread* must already exist */
2248
2249 (isW ? evhH__post_thread_w_acquires_lock
2250 : evhH__post_thread_r_acquires_lock)(
2251 thr,
2252 LK_rdwr, /* if not known, create new lock with this LockKind */
2253 (Addr)rwl
2254 );
2255}
2256
2257static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2258{
2259 // 'rwl' may be invalid - not checked by wrapper
2260 Thread* thr;
2261 if (SHOW_EVENTS >= 1)
2262 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2263 (Int)tid, (void*)rwl );
2264
2265 thr = map_threads_maybe_lookup( tid );
2266 tl_assert(thr); /* cannot fail - Thread* must already exist */
2267
2268 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2269}
2270
2271static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2272{
2273 // only called if the real library call succeeded - so mutex is sane
2274 Thread* thr;
2275 if (SHOW_EVENTS >= 1)
2276 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2277 (Int)tid, (void*)rwl );
2278 thr = map_threads_maybe_lookup( tid );
2279 tl_assert(thr); /* cannot fail - Thread* must already exist */
2280
2281 // anything we should do here?
2282}
2283
2284
sewardj9f569b72008-11-13 13:33:09 +00002285/* ---------------------------------------------------------- */
2286/* -------------- events to do with semaphores -------------- */
2287/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002288
sewardj11e352f2007-11-30 11:11:02 +00002289/* This is similar to but not identical to the handling for condition
sewardjb4112022007-11-09 22:49:28 +00002290 variables. */
2291
sewardjf98e1c02008-10-25 16:22:41 +00002292/* For each semaphore, we maintain a stack of SOs. When a 'post'
2293 operation is done on a semaphore (unlocking, essentially), a new SO
2294 is created for the posting thread, the posting thread does a strong
2295 send to it (which merely installs the posting thread's VC in the
2296 SO), and the SO is pushed on the semaphore's stack.
sewardjb4112022007-11-09 22:49:28 +00002297
2298 Later, when a (probably different) thread completes 'wait' on the
sewardjf98e1c02008-10-25 16:22:41 +00002299 semaphore, we pop a SO off the semaphore's stack (which should be
2300 nonempty), and do a strong recv from it. This mechanism creates
sewardjb4112022007-11-09 22:49:28 +00002301 dependencies between posters and waiters of the semaphore.
2302
sewardjf98e1c02008-10-25 16:22:41 +00002303 It may not be necessary to use a stack - perhaps a bag of SOs would
2304 do. But we do need to keep track of how many unused-up posts have
2305 happened for the semaphore.
sewardjb4112022007-11-09 22:49:28 +00002306
sewardjf98e1c02008-10-25 16:22:41 +00002307 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
sewardjb4112022007-11-09 22:49:28 +00002308 twice on S. T3 cannot complete its waits without both T1 and T2
2309 posting. The above mechanism will ensure that T3 acquires
2310 dependencies on both T1 and T2.
sewardj11e352f2007-11-30 11:11:02 +00002311
sewardjf98e1c02008-10-25 16:22:41 +00002312 When a semaphore is initialised with value N, we do as if we'd
2313 posted N times on the semaphore: basically create N SOs and do a
2314 strong send to all of then. This allows up to N waits on the
2315 semaphore to acquire a dependency on the initialisation point,
2316 which AFAICS is the correct behaviour.
sewardj11e352f2007-11-30 11:11:02 +00002317
2318 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2319 about. We should.
sewardjb4112022007-11-09 22:49:28 +00002320*/
2321
sewardjf98e1c02008-10-25 16:22:41 +00002322/* sem_t* -> XArray* SO* */
2323static WordFM* map_sem_to_SO_stack = NULL;
sewardjb4112022007-11-09 22:49:28 +00002324
sewardjf98e1c02008-10-25 16:22:41 +00002325static void map_sem_to_SO_stack_INIT ( void ) {
2326 if (map_sem_to_SO_stack == NULL) {
2327 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2328 HG_(free), NULL );
2329 tl_assert(map_sem_to_SO_stack != NULL);
sewardjb4112022007-11-09 22:49:28 +00002330 }
2331}
2332
sewardjf98e1c02008-10-25 16:22:41 +00002333static void push_SO_for_sem ( void* sem, SO* so ) {
2334 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002335 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002336 tl_assert(so);
2337 map_sem_to_SO_stack_INIT();
2338 if (VG_(lookupFM)( map_sem_to_SO_stack,
2339 &keyW, (UWord*)&xa, (UWord)sem )) {
2340 tl_assert(keyW == (UWord)sem);
sewardjb4112022007-11-09 22:49:28 +00002341 tl_assert(xa);
sewardjf98e1c02008-10-25 16:22:41 +00002342 VG_(addToXA)( xa, &so );
sewardjb4112022007-11-09 22:49:28 +00002343 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002344 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2345 VG_(addToXA)( xa, &so );
2346 VG_(addToFM)( map_sem_to_SO_stack, (Word)sem, (Word)xa );
sewardjb4112022007-11-09 22:49:28 +00002347 }
2348}
2349
sewardjf98e1c02008-10-25 16:22:41 +00002350static SO* mb_pop_SO_for_sem ( void* sem ) {
2351 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002352 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002353 SO* so;
2354 map_sem_to_SO_stack_INIT();
2355 if (VG_(lookupFM)( map_sem_to_SO_stack,
2356 &keyW, (UWord*)&xa, (UWord)sem )) {
sewardjb4112022007-11-09 22:49:28 +00002357 /* xa is the stack for this semaphore. */
sewardjf98e1c02008-10-25 16:22:41 +00002358 Word sz;
2359 tl_assert(keyW == (UWord)sem);
2360 sz = VG_(sizeXA)( xa );
sewardjb4112022007-11-09 22:49:28 +00002361 tl_assert(sz >= 0);
2362 if (sz == 0)
2363 return NULL; /* odd, the stack is empty */
sewardjf98e1c02008-10-25 16:22:41 +00002364 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2365 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002366 VG_(dropTailXA)( xa, 1 );
sewardjf98e1c02008-10-25 16:22:41 +00002367 return so;
sewardjb4112022007-11-09 22:49:28 +00002368 } else {
2369 /* hmm, that's odd. No stack for this semaphore. */
2370 return NULL;
2371 }
2372}
2373
sewardj11e352f2007-11-30 11:11:02 +00002374static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002375{
sewardjf98e1c02008-10-25 16:22:41 +00002376 UWord keyW, valW;
2377 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002378
sewardjb4112022007-11-09 22:49:28 +00002379 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002380 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002381 (Int)tid, (void*)sem );
2382
sewardjf98e1c02008-10-25 16:22:41 +00002383 map_sem_to_SO_stack_INIT();
sewardjb4112022007-11-09 22:49:28 +00002384
sewardjf98e1c02008-10-25 16:22:41 +00002385 /* Empty out the semaphore's SO stack. This way of doing it is
2386 stupid, but at least it's easy. */
2387 while (1) {
2388 so = mb_pop_SO_for_sem( sem );
2389 if (!so) break;
2390 libhb_so_dealloc(so);
2391 }
2392
2393 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2394 XArray* xa = (XArray*)valW;
2395 tl_assert(keyW == (UWord)sem);
2396 tl_assert(xa);
2397 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2398 VG_(deleteXA)(xa);
2399 }
sewardjb4112022007-11-09 22:49:28 +00002400}
2401
sewardj11e352f2007-11-30 11:11:02 +00002402static
2403void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2404{
sewardjf98e1c02008-10-25 16:22:41 +00002405 SO* so;
2406 Thread* thr;
sewardj11e352f2007-11-30 11:11:02 +00002407
2408 if (SHOW_EVENTS >= 1)
2409 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2410 (Int)tid, (void*)sem, value );
2411
sewardjf98e1c02008-10-25 16:22:41 +00002412 thr = map_threads_maybe_lookup( tid );
2413 tl_assert(thr); /* cannot fail - Thread* must already exist */
sewardj11e352f2007-11-30 11:11:02 +00002414
sewardjf98e1c02008-10-25 16:22:41 +00002415 /* Empty out the semaphore's SO stack. This way of doing it is
2416 stupid, but at least it's easy. */
2417 while (1) {
2418 so = mb_pop_SO_for_sem( sem );
2419 if (!so) break;
2420 libhb_so_dealloc(so);
2421 }
sewardj11e352f2007-11-30 11:11:02 +00002422
sewardjf98e1c02008-10-25 16:22:41 +00002423 /* If we don't do this check, the following while loop runs us out
2424 of memory for stupid initial values of 'value'. */
2425 if (value > 10000) {
2426 HG_(record_error_Misc)(
2427 thr, "sem_init: initial value exceeds 10000; using 10000" );
2428 value = 10000;
2429 }
sewardj11e352f2007-11-30 11:11:02 +00002430
sewardjf98e1c02008-10-25 16:22:41 +00002431 /* Now create 'valid' new SOs for the thread, do a strong send to
2432 each of them, and push them all on the stack. */
2433 for (; value > 0; value--) {
2434 Thr* hbthr = thr->hbthr;
2435 tl_assert(hbthr);
sewardj11e352f2007-11-30 11:11:02 +00002436
sewardjf98e1c02008-10-25 16:22:41 +00002437 so = libhb_so_alloc();
2438 libhb_so_send( hbthr, so, True/*strong send*/ );
2439 push_SO_for_sem( sem, so );
sewardj11e352f2007-11-30 11:11:02 +00002440 }
2441}
2442
2443static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002444{
sewardjf98e1c02008-10-25 16:22:41 +00002445 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2446 it (iow, write our VC into it, then tick ours), and push the SO
2447 on on a stack of SOs associated with 'sem'. This is later used
2448 by other thread(s) which successfully exit from a sem_wait on
2449 the same sem; by doing a strong recv from SOs popped of the
2450 stack, they acquire dependencies on the posting thread
2451 segment(s). */
sewardjb4112022007-11-09 22:49:28 +00002452
sewardjf98e1c02008-10-25 16:22:41 +00002453 Thread* thr;
2454 SO* so;
2455 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002456
2457 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002458 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002459 (Int)tid, (void*)sem );
2460
2461 thr = map_threads_maybe_lookup( tid );
2462 tl_assert(thr); /* cannot fail - Thread* must already exist */
2463
2464 // error-if: sem is bogus
2465
sewardjf98e1c02008-10-25 16:22:41 +00002466 hbthr = thr->hbthr;
2467 tl_assert(hbthr);
sewardjb4112022007-11-09 22:49:28 +00002468
sewardjf98e1c02008-10-25 16:22:41 +00002469 so = libhb_so_alloc();
2470 libhb_so_send( hbthr, so, True/*strong send*/ );
2471 push_SO_for_sem( sem, so );
sewardjb4112022007-11-09 22:49:28 +00002472}
2473
sewardj11e352f2007-11-30 11:11:02 +00002474static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002475{
sewardjf98e1c02008-10-25 16:22:41 +00002476 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2477 the 'sem' from this semaphore's SO-stack, and do a strong recv
2478 from it. This creates a dependency back to one of the post-ers
2479 for the semaphore. */
sewardjb4112022007-11-09 22:49:28 +00002480
sewardjf98e1c02008-10-25 16:22:41 +00002481 Thread* thr;
2482 SO* so;
2483 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002484
2485 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002486 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002487 (Int)tid, (void*)sem );
2488
2489 thr = map_threads_maybe_lookup( tid );
2490 tl_assert(thr); /* cannot fail - Thread* must already exist */
2491
2492 // error-if: sem is bogus
2493
sewardjf98e1c02008-10-25 16:22:41 +00002494 so = mb_pop_SO_for_sem( sem );
sewardjb4112022007-11-09 22:49:28 +00002495
sewardjf98e1c02008-10-25 16:22:41 +00002496 if (so) {
2497 hbthr = thr->hbthr;
2498 tl_assert(hbthr);
2499
2500 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2501 libhb_so_dealloc(so);
2502 } else {
2503 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2504 If this happened it would surely be a bug in the threads
2505 library. */
2506 HG_(record_error_Misc)(
2507 thr, "Bug in libpthread: sem_wait succeeded on"
2508 " semaphore without prior sem_post");
sewardjb4112022007-11-09 22:49:28 +00002509 }
2510}
2511
2512
sewardj9f569b72008-11-13 13:33:09 +00002513/* -------------------------------------------------------- */
2514/* -------------- events to do with barriers -------------- */
2515/* -------------------------------------------------------- */
2516
2517typedef
2518 struct {
2519 Bool initted; /* has it yet been initted by guest? */
2520 UWord size; /* declared size */
2521 XArray* waiting; /* XA of Thread*. # present is 0 .. .size */
2522 }
2523 Bar;
2524
2525static Bar* new_Bar ( void ) {
2526 Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
2527 tl_assert(bar);
2528 /* all fields are zero */
2529 tl_assert(bar->initted == False);
2530 return bar;
2531}
2532
2533static void delete_Bar ( Bar* bar ) {
2534 tl_assert(bar);
2535 if (bar->waiting)
2536 VG_(deleteXA)(bar->waiting);
2537 HG_(free)(bar);
2538}
2539
2540/* A mapping which stores auxiliary data for barriers. */
2541
2542/* pthread_barrier_t* -> Bar* */
2543static WordFM* map_barrier_to_Bar = NULL;
2544
2545static void map_barrier_to_Bar_INIT ( void ) {
2546 if (UNLIKELY(map_barrier_to_Bar == NULL)) {
2547 map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
2548 "hg.mbtBI.1", HG_(free), NULL );
2549 tl_assert(map_barrier_to_Bar != NULL);
2550 }
2551}
2552
2553static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
2554 UWord key, val;
2555 map_barrier_to_Bar_INIT();
2556 if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
2557 tl_assert(key == (UWord)barrier);
2558 return (Bar*)val;
2559 } else {
2560 Bar* bar = new_Bar();
2561 VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
2562 return bar;
2563 }
2564}
2565
2566static void map_barrier_to_Bar_delete ( void* barrier ) {
2567 UWord keyW, valW;
2568 map_barrier_to_Bar_INIT();
2569 if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
2570 Bar* bar = (Bar*)valW;
2571 tl_assert(keyW == (UWord)barrier);
2572 delete_Bar(bar);
2573 }
2574}
2575
2576
2577static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
2578 void* barrier,
2579 UWord count )
2580{
2581 Thread* thr;
2582 Bar* bar;
2583
2584 if (SHOW_EVENTS >= 1)
2585 VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
2586 "(tid=%d, barrier=%p, count=%lu)\n",
2587 (Int)tid, (void*)barrier, count );
2588
2589 thr = map_threads_maybe_lookup( tid );
2590 tl_assert(thr); /* cannot fail - Thread* must already exist */
2591
2592 if (count == 0) {
2593 HG_(record_error_Misc)(
2594 thr, "pthread_barrier_init: 'count' argument is zero"
2595 );
2596 }
2597
2598 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2599 tl_assert(bar);
2600
2601 if (bar->initted) {
2602 HG_(record_error_Misc)(
2603 thr, "pthread_barrier_init: barrier is already initialised"
2604 );
2605 }
2606
2607 if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2608 tl_assert(bar->initted);
2609 HG_(record_error_Misc)(
sewardj553655c2008-11-14 19:41:19 +00002610 thr, "pthread_barrier_init: threads are waiting at barrier"
sewardj9f569b72008-11-13 13:33:09 +00002611 );
2612 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2613 }
2614 if (!bar->waiting) {
2615 bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
2616 sizeof(Thread*) );
2617 }
2618
2619 tl_assert(bar->waiting);
2620 tl_assert(VG_(sizeXA)(bar->waiting) == 0);
2621 bar->initted = True;
2622 bar->size = count;
2623}
2624
2625
2626static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
2627 void* barrier )
2628{
sewardj553655c2008-11-14 19:41:19 +00002629 Thread* thr;
2630 Bar* bar;
2631
sewardj9f569b72008-11-13 13:33:09 +00002632 /* Deal with destroy events. The only purpose is to free storage
2633 associated with the barrier, so as to avoid any possible
2634 resource leaks. */
2635 if (SHOW_EVENTS >= 1)
2636 VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
2637 "(tid=%d, barrier=%p)\n",
2638 (Int)tid, (void*)barrier );
2639
sewardj553655c2008-11-14 19:41:19 +00002640 thr = map_threads_maybe_lookup( tid );
2641 tl_assert(thr); /* cannot fail - Thread* must already exist */
2642
2643 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2644 tl_assert(bar);
2645
2646 if (!bar->initted) {
2647 HG_(record_error_Misc)(
2648 thr, "pthread_barrier_destroy: barrier was never initialised"
2649 );
2650 }
2651
2652 if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2653 HG_(record_error_Misc)(
2654 thr, "pthread_barrier_destroy: threads are waiting at barrier"
2655 );
2656 }
2657
sewardj9f569b72008-11-13 13:33:09 +00002658 /* Maybe we shouldn't do this; just let it persist, so that when it
2659 is reinitialised we don't need to do any dynamic memory
2660 allocation? The downside is a potentially unlimited space leak,
2661 if the client creates (in turn) a large number of barriers all
2662 at different locations. Note that if we do later move to the
2663 don't-delete-it scheme, we need to mark the barrier as
2664 uninitialised again since otherwise a later _init call will
sewardj553655c2008-11-14 19:41:19 +00002665 elicit a duplicate-init error. */
sewardj9f569b72008-11-13 13:33:09 +00002666 map_barrier_to_Bar_delete( barrier );
2667}
2668
2669
2670static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
2671 void* barrier )
2672{
sewardj1c466b72008-11-19 11:52:14 +00002673 /* This function gets called after a client thread calls
2674 pthread_barrier_wait but before it arrives at the real
2675 pthread_barrier_wait.
2676
2677 Why is the following correct? It's a bit subtle.
2678
2679 If this is not the last thread arriving at the barrier, we simply
2680 note its presence and return. Because valgrind (at least as of
2681 Nov 08) is single threaded, we are guaranteed safe from any race
2682 conditions when in this function -- no other client threads are
2683 running.
2684
2685 If this is the last thread, then we are again the only running
2686 thread. All the other threads will have either arrived at the
2687 real pthread_barrier_wait or are on their way to it, but in any
2688 case are guaranteed not to be able to move past it, because this
2689 thread is currently in this function and so has not yet arrived
2690 at the real pthread_barrier_wait. That means that:
2691
2692 1. While we are in this function, none of the other threads
2693 waiting at the barrier can move past it.
2694
2695 2. When this function returns (and simulated execution resumes),
2696 this thread and all other waiting threads will be able to move
2697 past the real barrier.
2698
2699 Because of this, it is now safe to update the vector clocks of
2700 all threads, to represent the fact that they all arrived at the
2701 barrier and have all moved on. There is no danger of any
2702 complications to do with some threads leaving the barrier and
2703 racing back round to the front, whilst others are still leaving
2704 (which is the primary source of complication in correct handling/
2705 implementation of barriers). That can't happen because we update
2706 here our data structures so as to indicate that the threads have
2707 passed the barrier, even though, as per (2) above, they are
2708 guaranteed not to pass the barrier until we return.
2709
2710 This relies crucially on Valgrind being single threaded. If that
2711 changes, this will need to be reconsidered.
2712 */
sewardj9f569b72008-11-13 13:33:09 +00002713 Thread* thr;
2714 Bar* bar;
2715 SO* so;
2716 UWord present, i;
2717
2718 if (SHOW_EVENTS >= 1)
2719 VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
2720 "(tid=%d, barrier=%p)\n",
2721 (Int)tid, (void*)barrier );
2722
2723 thr = map_threads_maybe_lookup( tid );
2724 tl_assert(thr); /* cannot fail - Thread* must already exist */
2725
2726 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2727 tl_assert(bar);
2728
2729 if (!bar->initted) {
2730 HG_(record_error_Misc)(
2731 thr, "pthread_barrier_wait: barrier is uninitialised"
2732 );
2733 return; /* client is broken .. avoid assertions below */
2734 }
2735
2736 /* guaranteed by _INIT_PRE above */
2737 tl_assert(bar->size > 0);
2738 tl_assert(bar->waiting);
2739
2740 VG_(addToXA)( bar->waiting, &thr );
2741
2742 /* guaranteed by this function */
2743 present = VG_(sizeXA)(bar->waiting);
2744 tl_assert(present > 0 && present <= bar->size);
2745
2746 if (present < bar->size)
2747 return;
2748
sewardj553655c2008-11-14 19:41:19 +00002749 /* All the threads have arrived. Now do the Interesting Bit. Get
sewardj9f569b72008-11-13 13:33:09 +00002750 a new synchronisation object and do a weak send to it from all
2751 the participating threads. This makes its vector clocks be the
sewardj553655c2008-11-14 19:41:19 +00002752 join of all the individual threads' vector clocks. Then do a
sewardj9f569b72008-11-13 13:33:09 +00002753 strong receive from it back to all threads, so that their VCs
2754 are a copy of it (hence are all equal to the join of their
2755 original VCs.) */
2756 so = libhb_so_alloc();
2757
2758 /* XXX check ->waiting has no duplicates */
2759
2760 tl_assert(bar->waiting);
2761 tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
2762
2763 /* compute the join ... */
2764 for (i = 0; i < bar->size; i++) {
2765 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2766 Thr* hbthr = t->hbthr;
2767 libhb_so_send( hbthr, so, False/*weak send*/ );
2768 }
2769 /* ... and distribute to all threads */
2770 for (i = 0; i < bar->size; i++) {
2771 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2772 Thr* hbthr = t->hbthr;
2773 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2774 }
2775
2776 /* finally, we must empty out the waiting vector */
sewardj1c466b72008-11-19 11:52:14 +00002777 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2778
2779 /* and we don't need this any more. Perhaps a stack-allocated
2780 SO would be better? */
2781 libhb_so_dealloc(so);
sewardj9f569b72008-11-13 13:33:09 +00002782}
2783
2784
sewardjb4112022007-11-09 22:49:28 +00002785/*--------------------------------------------------------------*/
2786/*--- Lock acquisition order monitoring ---*/
2787/*--------------------------------------------------------------*/
2788
2789/* FIXME: here are some optimisations still to do in
2790 laog__pre_thread_acquires_lock.
2791
2792 The graph is structured so that if L1 --*--> L2 then L1 must be
2793 acquired before L2.
2794
2795 The common case is that some thread T holds (eg) L1 L2 and L3 and
2796 is repeatedly acquiring and releasing Ln, and there is no ordering
2797 error in what it is doing. Hence it repeatly:
2798
2799 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
2800 produces the answer No (because there is no error).
2801
2802 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
2803 (because they already got added the first time T acquired Ln).
2804
2805 Hence cache these two events:
2806
2807 (1) Cache result of the query from last time. Invalidate the cache
2808 any time any edges are added to or deleted from laog.
2809
2810 (2) Cache these add-edge requests and ignore them if said edges
2811 have already been added to laog. Invalidate the cache any time
2812 any edges are deleted from laog.
2813*/
2814
2815typedef
2816 struct {
2817 WordSetID inns; /* in univ_laog */
2818 WordSetID outs; /* in univ_laog */
2819 }
2820 LAOGLinks;
2821
2822/* lock order acquisition graph */
2823static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
2824
2825/* EXPOSITION ONLY: for each edge in 'laog', record the two places
2826 where that edge was created, so that we can show the user later if
2827 we need to. */
2828typedef
2829 struct {
2830 Addr src_ga; /* Lock guest addresses for */
2831 Addr dst_ga; /* src/dst of the edge */
2832 ExeContext* src_ec; /* And corresponding places where that */
2833 ExeContext* dst_ec; /* ordering was established */
2834 }
2835 LAOGLinkExposition;
2836
sewardj250ec2e2008-02-15 22:02:30 +00002837static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
sewardjb4112022007-11-09 22:49:28 +00002838 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
2839 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
2840 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
2841 if (llx1->src_ga < llx2->src_ga) return -1;
2842 if (llx1->src_ga > llx2->src_ga) return 1;
2843 if (llx1->dst_ga < llx2->dst_ga) return -1;
2844 if (llx1->dst_ga > llx2->dst_ga) return 1;
2845 return 0;
2846}
2847
2848static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
2849/* end EXPOSITION ONLY */
2850
2851
2852static void laog__show ( Char* who ) {
2853 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00002854 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00002855 Lock* me;
2856 LAOGLinks* links;
2857 VG_(printf)("laog (requested by %s) {\n", who);
sewardj896f6f92008-08-19 08:38:52 +00002858 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00002859 me = NULL;
2860 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00002861 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00002862 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00002863 tl_assert(me);
2864 tl_assert(links);
2865 VG_(printf)(" node %p:\n", me);
2866 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
2867 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00002868 VG_(printf)(" inn %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00002869 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
2870 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00002871 VG_(printf)(" out %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00002872 me = NULL;
2873 links = NULL;
2874 }
sewardj896f6f92008-08-19 08:38:52 +00002875 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00002876 VG_(printf)("}\n");
2877}
2878
2879__attribute__((noinline))
2880static void laog__add_edge ( Lock* src, Lock* dst ) {
2881 Word keyW;
2882 LAOGLinks* links;
2883 Bool presentF, presentR;
2884 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
2885
2886 /* Take the opportunity to sanity check the graph. Record in
2887 presentF if there is already a src->dst mapping in this node's
2888 forwards links, and presentR if there is already a src->dst
2889 mapping in this node's backwards links. They should agree!
2890 Also, we need to know whether the edge was already present so as
2891 to decide whether or not to update the link details mapping. We
2892 can compute presentF and presentR essentially for free, so may
2893 as well do this always. */
2894 presentF = presentR = False;
2895
2896 /* Update the out edges for src */
2897 keyW = 0;
2898 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00002899 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00002900 WordSetID outs_new;
2901 tl_assert(links);
2902 tl_assert(keyW == (Word)src);
2903 outs_new = HG_(addToWS)( univ_laog, links->outs, (Word)dst );
2904 presentF = outs_new == links->outs;
2905 links->outs = outs_new;
2906 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002907 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00002908 links->inns = HG_(emptyWS)( univ_laog );
2909 links->outs = HG_(singletonWS)( univ_laog, (Word)dst );
sewardj896f6f92008-08-19 08:38:52 +00002910 VG_(addToFM)( laog, (Word)src, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00002911 }
2912 /* Update the in edges for dst */
2913 keyW = 0;
2914 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00002915 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00002916 WordSetID inns_new;
2917 tl_assert(links);
2918 tl_assert(keyW == (Word)dst);
2919 inns_new = HG_(addToWS)( univ_laog, links->inns, (Word)src );
2920 presentR = inns_new == links->inns;
2921 links->inns = inns_new;
2922 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002923 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00002924 links->inns = HG_(singletonWS)( univ_laog, (Word)src );
2925 links->outs = HG_(emptyWS)( univ_laog );
sewardj896f6f92008-08-19 08:38:52 +00002926 VG_(addToFM)( laog, (Word)dst, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00002927 }
2928
2929 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
2930
2931 if (!presentF && src->acquired_at && dst->acquired_at) {
2932 LAOGLinkExposition expo;
2933 /* If this edge is entering the graph, and we have acquired_at
2934 information for both src and dst, record those acquisition
2935 points. Hence, if there is later a violation of this
2936 ordering, we can show the user the two places in which the
2937 required src-dst ordering was previously established. */
barta0b6b2c2008-07-07 06:49:24 +00002938 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
sewardjb4112022007-11-09 22:49:28 +00002939 src->guestaddr, dst->guestaddr);
2940 expo.src_ga = src->guestaddr;
2941 expo.dst_ga = dst->guestaddr;
2942 expo.src_ec = NULL;
2943 expo.dst_ec = NULL;
2944 tl_assert(laog_exposition);
sewardj896f6f92008-08-19 08:38:52 +00002945 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (Word)&expo )) {
sewardjb4112022007-11-09 22:49:28 +00002946 /* we already have it; do nothing */
2947 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002948 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
2949 sizeof(LAOGLinkExposition));
sewardjb4112022007-11-09 22:49:28 +00002950 expo2->src_ga = src->guestaddr;
2951 expo2->dst_ga = dst->guestaddr;
2952 expo2->src_ec = src->acquired_at;
2953 expo2->dst_ec = dst->acquired_at;
sewardj896f6f92008-08-19 08:38:52 +00002954 VG_(addToFM)( laog_exposition, (Word)expo2, (Word)NULL );
sewardjb4112022007-11-09 22:49:28 +00002955 }
2956 }
2957}
2958
2959__attribute__((noinline))
2960static void laog__del_edge ( Lock* src, Lock* dst ) {
2961 Word keyW;
2962 LAOGLinks* links;
2963 if (0) VG_(printf)("laog__del_edge %p %p\n", src, dst);
2964 /* Update the out edges for src */
2965 keyW = 0;
2966 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00002967 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00002968 tl_assert(links);
2969 tl_assert(keyW == (Word)src);
2970 links->outs = HG_(delFromWS)( univ_laog, links->outs, (Word)dst );
2971 }
2972 /* Update the in edges for dst */
2973 keyW = 0;
2974 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00002975 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00002976 tl_assert(links);
2977 tl_assert(keyW == (Word)dst);
2978 links->inns = HG_(delFromWS)( univ_laog, links->inns, (Word)src );
2979 }
2980}
2981
2982__attribute__((noinline))
2983static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
2984 Word keyW;
2985 LAOGLinks* links;
2986 keyW = 0;
2987 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00002988 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00002989 tl_assert(links);
2990 tl_assert(keyW == (Word)lk);
2991 return links->outs;
2992 } else {
2993 return HG_(emptyWS)( univ_laog );
2994 }
2995}
2996
2997__attribute__((noinline))
2998static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
2999 Word keyW;
3000 LAOGLinks* links;
3001 keyW = 0;
3002 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003003 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003004 tl_assert(links);
3005 tl_assert(keyW == (Word)lk);
3006 return links->inns;
3007 } else {
3008 return HG_(emptyWS)( univ_laog );
3009 }
3010}
3011
3012__attribute__((noinline))
3013static void laog__sanity_check ( Char* who ) {
3014 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003015 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003016 Lock* me;
3017 LAOGLinks* links;
3018 if ( !laog )
3019 return; /* nothing much we can do */
sewardj896f6f92008-08-19 08:38:52 +00003020 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003021 me = NULL;
3022 links = NULL;
3023 if (0) VG_(printf)("laog sanity check\n");
sewardj896f6f92008-08-19 08:38:52 +00003024 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003025 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003026 tl_assert(me);
3027 tl_assert(links);
3028 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3029 for (i = 0; i < ws_size; i++) {
3030 if ( ! HG_(elemWS)( univ_laog,
3031 laog__succs( (Lock*)ws_words[i] ),
3032 (Word)me ))
3033 goto bad;
3034 }
3035 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3036 for (i = 0; i < ws_size; i++) {
3037 if ( ! HG_(elemWS)( univ_laog,
3038 laog__preds( (Lock*)ws_words[i] ),
3039 (Word)me ))
3040 goto bad;
3041 }
3042 me = NULL;
3043 links = NULL;
3044 }
sewardj896f6f92008-08-19 08:38:52 +00003045 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003046 return;
3047
3048 bad:
3049 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3050 laog__show(who);
3051 tl_assert(0);
3052}
3053
3054/* If there is a path in laog from 'src' to any of the elements in
3055 'dst', return an arbitrarily chosen element of 'dst' reachable from
3056 'src'. If no path exist from 'src' to any element in 'dst', return
3057 NULL. */
3058__attribute__((noinline))
3059static
3060Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3061{
3062 Lock* ret;
3063 Word i, ssz;
3064 XArray* stack; /* of Lock* */
3065 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
3066 Lock* here;
3067 WordSetID succs;
3068 Word succs_size;
sewardj250ec2e2008-02-15 22:02:30 +00003069 UWord* succs_words;
sewardjb4112022007-11-09 22:49:28 +00003070 //laog__sanity_check();
3071
3072 /* If the destination set is empty, we can never get there from
3073 'src' :-), so don't bother to try */
3074 if (HG_(isEmptyWS)( univ_lsets, dsts ))
3075 return NULL;
3076
3077 ret = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00003078 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3079 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00003080
3081 (void) VG_(addToXA)( stack, &src );
3082
3083 while (True) {
3084
3085 ssz = VG_(sizeXA)( stack );
3086
3087 if (ssz == 0) { ret = NULL; break; }
3088
3089 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3090 VG_(dropTailXA)( stack, 1 );
3091
3092 if (HG_(elemWS)( univ_lsets, dsts, (Word)here )) { ret = here; break; }
3093
sewardj896f6f92008-08-19 08:38:52 +00003094 if (VG_(lookupFM)( visited, NULL, NULL, (Word)here ))
sewardjb4112022007-11-09 22:49:28 +00003095 continue;
3096
sewardj896f6f92008-08-19 08:38:52 +00003097 VG_(addToFM)( visited, (Word)here, 0 );
sewardjb4112022007-11-09 22:49:28 +00003098
3099 succs = laog__succs( here );
3100 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3101 for (i = 0; i < succs_size; i++)
3102 (void) VG_(addToXA)( stack, &succs_words[i] );
3103 }
3104
sewardj896f6f92008-08-19 08:38:52 +00003105 VG_(deleteFM)( visited, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003106 VG_(deleteXA)( stack );
3107 return ret;
3108}
3109
3110
3111/* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3112 between 'lk' and the locks already held by 'thr' and issue a
3113 complaint if so. Also, update the ordering graph appropriately.
3114*/
3115__attribute__((noinline))
3116static void laog__pre_thread_acquires_lock (
3117 Thread* thr, /* NB: BEFORE lock is added */
3118 Lock* lk
3119 )
3120{
sewardj250ec2e2008-02-15 22:02:30 +00003121 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +00003122 Word ls_size, i;
3123 Lock* other;
3124
3125 /* It may be that 'thr' already holds 'lk' and is recursively
3126 relocking in. In this case we just ignore the call. */
3127 /* NB: univ_lsets really is correct here */
3128 if (HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lk ))
3129 return;
3130
3131 if (!laog)
sewardjf98e1c02008-10-25 16:22:41 +00003132 laog = VG_(newFM)( HG_(zalloc), "hg.lptal.1",
3133 HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00003134 if (!laog_exposition)
sewardjf98e1c02008-10-25 16:22:41 +00003135 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.lptal.2", HG_(free),
sewardjb4112022007-11-09 22:49:28 +00003136 cmp_LAOGLinkExposition );
3137
3138 /* First, the check. Complain if there is any path in laog from lk
3139 to any of the locks already held by thr, since if any such path
3140 existed, it would mean that previously lk was acquired before
3141 (rather than after, as we are doing here) at least one of those
3142 locks.
3143 */
3144 other = laog__do_dfs_from_to(lk, thr->locksetA);
3145 if (other) {
3146 LAOGLinkExposition key, *found;
3147 /* So we managed to find a path lk --*--> other in the graph,
3148 which implies that 'lk' should have been acquired before
3149 'other' but is in fact being acquired afterwards. We present
3150 the lk/other arguments to record_error_LockOrder in the order
3151 in which they should have been acquired. */
3152 /* Go look in the laog_exposition mapping, to find the allocation
3153 points for this edge, so we can show the user. */
3154 key.src_ga = lk->guestaddr;
3155 key.dst_ga = other->guestaddr;
3156 key.src_ec = NULL;
3157 key.dst_ec = NULL;
3158 found = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003159 if (VG_(lookupFM)( laog_exposition,
sewardjb5f29642007-11-16 12:02:43 +00003160 (Word*)&found, NULL, (Word)&key )) {
sewardjb4112022007-11-09 22:49:28 +00003161 tl_assert(found != &key);
3162 tl_assert(found->src_ga == key.src_ga);
3163 tl_assert(found->dst_ga == key.dst_ga);
3164 tl_assert(found->src_ec);
3165 tl_assert(found->dst_ec);
sewardjf98e1c02008-10-25 16:22:41 +00003166 HG_(record_error_LockOrder)(
3167 thr, lk->guestaddr, other->guestaddr,
3168 found->src_ec, found->dst_ec );
sewardjb4112022007-11-09 22:49:28 +00003169 } else {
3170 /* Hmm. This can't happen (can it?) */
sewardjf98e1c02008-10-25 16:22:41 +00003171 HG_(record_error_LockOrder)(
3172 thr, lk->guestaddr, other->guestaddr,
3173 NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003174 }
3175 }
3176
3177 /* Second, add to laog the pairs
3178 (old, lk) | old <- locks already held by thr
3179 Since both old and lk are currently held by thr, their acquired_at
3180 fields must be non-NULL.
3181 */
3182 tl_assert(lk->acquired_at);
3183 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3184 for (i = 0; i < ls_size; i++) {
3185 Lock* old = (Lock*)ls_words[i];
3186 tl_assert(old->acquired_at);
3187 laog__add_edge( old, lk );
3188 }
3189
3190 /* Why "except_Locks" ? We're here because a lock is being
3191 acquired by a thread, and we're in an inconsistent state here.
3192 See the call points in evhH__post_thread_{r,w}_acquires_lock.
3193 When called in this inconsistent state, locks__sanity_check duly
3194 barfs. */
sewardjf98e1c02008-10-25 16:22:41 +00003195 if (HG_(clo_sanity_flags) & SCE_LAOG)
sewardjb4112022007-11-09 22:49:28 +00003196 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
3197}
3198
3199
3200/* Delete from 'laog' any pair mentioning a lock in locksToDelete */
3201
3202__attribute__((noinline))
3203static void laog__handle_one_lock_deletion ( Lock* lk )
3204{
3205 WordSetID preds, succs;
3206 Word preds_size, succs_size, i, j;
sewardj250ec2e2008-02-15 22:02:30 +00003207 UWord *preds_words, *succs_words;
sewardjb4112022007-11-09 22:49:28 +00003208
3209 preds = laog__preds( lk );
3210 succs = laog__succs( lk );
3211
3212 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
3213 for (i = 0; i < preds_size; i++)
3214 laog__del_edge( (Lock*)preds_words[i], lk );
3215
3216 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3217 for (j = 0; j < succs_size; j++)
3218 laog__del_edge( lk, (Lock*)succs_words[j] );
3219
3220 for (i = 0; i < preds_size; i++) {
3221 for (j = 0; j < succs_size; j++) {
3222 if (preds_words[i] != succs_words[j]) {
3223 /* This can pass unlocked locks to laog__add_edge, since
3224 we're deleting stuff. So their acquired_at fields may
3225 be NULL. */
3226 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
3227 }
3228 }
3229 }
3230}
3231
sewardj1cbc12f2008-11-10 16:16:46 +00003232//__attribute__((noinline))
3233//static void laog__handle_lock_deletions (
3234// WordSetID /* in univ_laog */ locksToDelete
3235// )
3236//{
3237// Word i, ws_size;
3238// UWord* ws_words;
3239//
3240// if (!laog)
3241// laog = VG_(newFM)( HG_(zalloc), "hg.lhld.1", HG_(free), NULL/*unboxedcmp*/ );
3242// if (!laog_exposition)
3243// laog_exposition = VG_(newFM)( HG_(zalloc), "hg.lhld.2", HG_(free),
3244// cmp_LAOGLinkExposition );
3245//
3246// HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
3247// for (i = 0; i < ws_size; i++)
3248// laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
3249//
3250// if (HG_(clo_sanity_flags) & SCE_LAOG)
3251// all__sanity_check("laog__handle_lock_deletions-post");
3252//}
sewardjb4112022007-11-09 22:49:28 +00003253
3254
3255/*--------------------------------------------------------------*/
3256/*--- Malloc/free replacements ---*/
3257/*--------------------------------------------------------------*/
3258
3259typedef
3260 struct {
3261 void* next; /* required by m_hashtable */
3262 Addr payload; /* ptr to actual block */
3263 SizeT szB; /* size requested */
3264 ExeContext* where; /* where it was allocated */
3265 Thread* thr; /* allocating thread */
3266 }
3267 MallocMeta;
3268
3269/* A hash table of MallocMetas, used to track malloc'd blocks
3270 (obviously). */
3271static VgHashTable hg_mallocmeta_table = NULL;
3272
3273
3274static MallocMeta* new_MallocMeta ( void ) {
sewardjf98e1c02008-10-25 16:22:41 +00003275 MallocMeta* md = HG_(zalloc)( "hg.new_MallocMeta.1", sizeof(MallocMeta) );
sewardjb4112022007-11-09 22:49:28 +00003276 tl_assert(md);
3277 return md;
3278}
3279static void delete_MallocMeta ( MallocMeta* md ) {
sewardjf98e1c02008-10-25 16:22:41 +00003280 HG_(free)(md);
sewardjb4112022007-11-09 22:49:28 +00003281}
3282
3283
3284/* Allocate a client block and set up the metadata for it. */
3285
3286static
3287void* handle_alloc ( ThreadId tid,
3288 SizeT szB, SizeT alignB, Bool is_zeroed )
3289{
3290 Addr p;
3291 MallocMeta* md;
3292
3293 tl_assert( ((SSizeT)szB) >= 0 );
3294 p = (Addr)VG_(cli_malloc)(alignB, szB);
3295 if (!p) {
3296 return NULL;
3297 }
3298 if (is_zeroed)
3299 VG_(memset)((void*)p, 0, szB);
3300
3301 /* Note that map_threads_lookup must succeed (cannot assert), since
3302 memory can only be allocated by currently alive threads, hence
3303 they must have an entry in map_threads. */
3304 md = new_MallocMeta();
3305 md->payload = p;
3306 md->szB = szB;
3307 md->where = VG_(record_ExeContext)( tid, 0 );
3308 md->thr = map_threads_lookup( tid );
3309
3310 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
3311
3312 /* Tell the lower level memory wranglers. */
3313 evh__new_mem_heap( p, szB, is_zeroed );
3314
3315 return (void*)p;
3316}
3317
3318/* Re the checks for less-than-zero (also in hg_cli__realloc below):
3319 Cast to a signed type to catch any unexpectedly negative args.
3320 We're assuming here that the size asked for is not greater than
3321 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
3322 platforms). */
3323static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
3324 if (((SSizeT)n) < 0) return NULL;
3325 return handle_alloc ( tid, n, VG_(clo_alignment),
3326 /*is_zeroed*/False );
3327}
3328static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
3329 if (((SSizeT)n) < 0) return NULL;
3330 return handle_alloc ( tid, n, VG_(clo_alignment),
3331 /*is_zeroed*/False );
3332}
3333static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
3334 if (((SSizeT)n) < 0) return NULL;
3335 return handle_alloc ( tid, n, VG_(clo_alignment),
3336 /*is_zeroed*/False );
3337}
3338static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
3339 if (((SSizeT)n) < 0) return NULL;
3340 return handle_alloc ( tid, n, align,
3341 /*is_zeroed*/False );
3342}
3343static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
3344 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
3345 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
3346 /*is_zeroed*/True );
3347}
3348
3349
3350/* Free a client block, including getting rid of the relevant
3351 metadata. */
3352
3353static void handle_free ( ThreadId tid, void* p )
3354{
3355 MallocMeta *md, *old_md;
3356 SizeT szB;
3357
3358 /* First see if we can find the metadata for 'p'. */
3359 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3360 if (!md)
3361 return; /* apparently freeing a bogus address. Oh well. */
3362
3363 tl_assert(md->payload == (Addr)p);
3364 szB = md->szB;
3365
3366 /* Nuke the metadata block */
3367 old_md = (MallocMeta*)
3368 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
3369 tl_assert(old_md); /* it must be present - we just found it */
3370 tl_assert(old_md == md);
3371 tl_assert(old_md->payload == (Addr)p);
3372
3373 VG_(cli_free)((void*)old_md->payload);
3374 delete_MallocMeta(old_md);
3375
3376 /* Tell the lower level memory wranglers. */
3377 evh__die_mem_heap( (Addr)p, szB );
3378}
3379
3380static void hg_cli__free ( ThreadId tid, void* p ) {
3381 handle_free(tid, p);
3382}
3383static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
3384 handle_free(tid, p);
3385}
3386static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
3387 handle_free(tid, p);
3388}
3389
3390
3391static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
3392{
3393 MallocMeta *md, *md_new, *md_tmp;
3394 SizeT i;
3395
3396 Addr payload = (Addr)payloadV;
3397
3398 if (((SSizeT)new_size) < 0) return NULL;
3399
3400 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
3401 if (!md)
3402 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
3403
3404 tl_assert(md->payload == payload);
3405
3406 if (md->szB == new_size) {
3407 /* size unchanged */
3408 md->where = VG_(record_ExeContext)(tid, 0);
3409 return payloadV;
3410 }
3411
3412 if (md->szB > new_size) {
3413 /* new size is smaller */
3414 md->szB = new_size;
3415 md->where = VG_(record_ExeContext)(tid, 0);
3416 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
3417 return payloadV;
3418 }
3419
3420 /* else */ {
3421 /* new size is bigger */
3422 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
3423
3424 /* First half kept and copied, second half new */
3425 // FIXME: shouldn't we use a copier which implements the
3426 // memory state machine?
3427 shadow_mem_copy_range( payload, p_new, md->szB );
3428 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
sewardjf98e1c02008-10-25 16:22:41 +00003429 /*inited*/False );
sewardjb4112022007-11-09 22:49:28 +00003430 /* FIXME: can anything funny happen here? specifically, if the
3431 old range contained a lock, then die_mem_heap will complain.
3432 Is that the correct behaviour? Not sure. */
3433 evh__die_mem_heap( payload, md->szB );
3434
3435 /* Copy from old to new */
3436 for (i = 0; i < md->szB; i++)
3437 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
3438
3439 /* Because the metadata hash table is index by payload address,
3440 we have to get rid of the old hash table entry and make a new
3441 one. We can't just modify the existing metadata in place,
3442 because then it would (almost certainly) be in the wrong hash
3443 chain. */
3444 md_new = new_MallocMeta();
3445 *md_new = *md;
3446
3447 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
3448 tl_assert(md_tmp);
3449 tl_assert(md_tmp == md);
3450
3451 VG_(cli_free)((void*)md->payload);
3452 delete_MallocMeta(md);
3453
3454 /* Update fields */
3455 md_new->where = VG_(record_ExeContext)( tid, 0 );
3456 md_new->szB = new_size;
3457 md_new->payload = p_new;
3458 md_new->thr = map_threads_lookup( tid );
3459
3460 /* and add */
3461 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
3462
3463 return (void*)p_new;
3464 }
3465}
3466
3467
3468/*--------------------------------------------------------------*/
3469/*--- Instrumentation ---*/
3470/*--------------------------------------------------------------*/
3471
3472static void instrument_mem_access ( IRSB* bbOut,
3473 IRExpr* addr,
3474 Int szB,
3475 Bool isStore,
3476 Int hWordTy_szB )
3477{
3478 IRType tyAddr = Ity_INVALID;
3479 HChar* hName = NULL;
3480 void* hAddr = NULL;
3481 Int regparms = 0;
3482 IRExpr** argv = NULL;
3483 IRDirty* di = NULL;
3484
3485 tl_assert(isIRAtom(addr));
3486 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
3487
3488 tyAddr = typeOfIRExpr( bbOut->tyenv, addr );
3489 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
3490
3491 /* So the effective address is in 'addr' now. */
3492 regparms = 1; // unless stated otherwise
3493 if (isStore) {
3494 switch (szB) {
3495 case 1:
3496 hName = "evh__mem_help_write_1";
3497 hAddr = &evh__mem_help_write_1;
3498 argv = mkIRExprVec_1( addr );
3499 break;
3500 case 2:
3501 hName = "evh__mem_help_write_2";
3502 hAddr = &evh__mem_help_write_2;
3503 argv = mkIRExprVec_1( addr );
3504 break;
3505 case 4:
3506 hName = "evh__mem_help_write_4";
3507 hAddr = &evh__mem_help_write_4;
3508 argv = mkIRExprVec_1( addr );
3509 break;
3510 case 8:
3511 hName = "evh__mem_help_write_8";
3512 hAddr = &evh__mem_help_write_8;
3513 argv = mkIRExprVec_1( addr );
3514 break;
3515 default:
3516 tl_assert(szB > 8 && szB <= 512); /* stay sane */
3517 regparms = 2;
3518 hName = "evh__mem_help_write_N";
3519 hAddr = &evh__mem_help_write_N;
3520 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
3521 break;
3522 }
3523 } else {
3524 switch (szB) {
3525 case 1:
3526 hName = "evh__mem_help_read_1";
3527 hAddr = &evh__mem_help_read_1;
3528 argv = mkIRExprVec_1( addr );
3529 break;
3530 case 2:
3531 hName = "evh__mem_help_read_2";
3532 hAddr = &evh__mem_help_read_2;
3533 argv = mkIRExprVec_1( addr );
3534 break;
3535 case 4:
3536 hName = "evh__mem_help_read_4";
3537 hAddr = &evh__mem_help_read_4;
3538 argv = mkIRExprVec_1( addr );
3539 break;
3540 case 8:
3541 hName = "evh__mem_help_read_8";
3542 hAddr = &evh__mem_help_read_8;
3543 argv = mkIRExprVec_1( addr );
3544 break;
3545 default:
3546 tl_assert(szB > 8 && szB <= 512); /* stay sane */
3547 regparms = 2;
3548 hName = "evh__mem_help_read_N";
3549 hAddr = &evh__mem_help_read_N;
3550 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
3551 break;
3552 }
3553 }
3554
3555 /* Add the helper. */
3556 tl_assert(hName);
3557 tl_assert(hAddr);
3558 tl_assert(argv);
3559 di = unsafeIRDirty_0_N( regparms,
3560 hName, VG_(fnptr_to_fnentry)( hAddr ),
3561 argv );
3562 addStmtToIRSB( bbOut, IRStmt_Dirty(di) );
3563}
3564
3565
sewardjd52392d2008-11-08 20:36:26 +00003566//static void instrument_memory_bus_event ( IRSB* bbOut, IRMBusEvent event )
3567//{
3568// switch (event) {
3569// case Imbe_SnoopedStoreBegin:
3570// case Imbe_SnoopedStoreEnd:
3571// /* These arise from ppc stwcx. insns. They should perhaps be
3572// handled better. */
3573// break;
3574// case Imbe_Fence:
3575// break; /* not interesting */
3576// case Imbe_BusLock:
3577// case Imbe_BusUnlock:
3578// addStmtToIRSB(
3579// bbOut,
3580// IRStmt_Dirty(
3581// unsafeIRDirty_0_N(
3582// 0/*regparms*/,
3583// event == Imbe_BusLock ? "evh__bus_lock"
3584// : "evh__bus_unlock",
3585// VG_(fnptr_to_fnentry)(
3586// event == Imbe_BusLock ? &evh__bus_lock
3587// : &evh__bus_unlock
3588// ),
3589// mkIRExprVec_0()
3590// )
3591// )
3592// );
3593// break;
3594// default:
3595// tl_assert(0);
3596// }
3597//}
sewardjb4112022007-11-09 22:49:28 +00003598
3599
3600static
3601IRSB* hg_instrument ( VgCallbackClosure* closure,
3602 IRSB* bbIn,
3603 VexGuestLayout* layout,
3604 VexGuestExtents* vge,
3605 IRType gWordTy, IRType hWordTy )
3606{
3607 Int i;
3608 IRSB* bbOut;
sewardj484fe802008-12-22 18:17:24 +00003609 Bool x86busLocked = False;
3610 Bool isSnoopedStore = False;
sewardjb4112022007-11-09 22:49:28 +00003611
3612 if (gWordTy != hWordTy) {
3613 /* We don't currently support this case. */
3614 VG_(tool_panic)("host/guest word size mismatch");
3615 }
3616
3617 /* Set up BB */
3618 bbOut = emptyIRSB();
3619 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
3620 bbOut->next = deepCopyIRExpr(bbIn->next);
3621 bbOut->jumpkind = bbIn->jumpkind;
3622
3623 // Copy verbatim any IR preamble preceding the first IMark
3624 i = 0;
3625 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
3626 addStmtToIRSB( bbOut, bbIn->stmts[i] );
3627 i++;
3628 }
3629
3630 for (/*use current i*/; i < bbIn->stmts_used; i++) {
3631 IRStmt* st = bbIn->stmts[i];
3632 tl_assert(st);
3633 tl_assert(isFlatIRStmt(st));
3634 switch (st->tag) {
3635 case Ist_NoOp:
3636 case Ist_AbiHint:
3637 case Ist_Put:
3638 case Ist_PutI:
3639 case Ist_IMark:
3640 case Ist_Exit:
3641 /* None of these can contain any memory references. */
3642 break;
3643
3644 case Ist_MBE:
sewardjf98e1c02008-10-25 16:22:41 +00003645 //instrument_memory_bus_event( bbOut, st->Ist.MBE.event );
3646 switch (st->Ist.MBE.event) {
3647 case Imbe_Fence:
3648 break; /* not interesting */
sewardj484fe802008-12-22 18:17:24 +00003649 /* Imbe_Bus{Lock,Unlock} arise from x86/amd64 LOCK
3650 prefixed instructions. */
sewardjf98e1c02008-10-25 16:22:41 +00003651 case Imbe_BusLock:
3652 tl_assert(x86busLocked == False);
3653 x86busLocked = True;
3654 break;
3655 case Imbe_BusUnlock:
3656 tl_assert(x86busLocked == True);
3657 x86busLocked = False;
3658 break;
sewardj484fe802008-12-22 18:17:24 +00003659 /* Imbe_SnoopedStore{Begin,End} arise from ppc
3660 stwcx. instructions. */
sewardj92124542008-12-18 01:20:11 +00003661 case Imbe_SnoopedStoreBegin:
sewardj484fe802008-12-22 18:17:24 +00003662 tl_assert(isSnoopedStore == False);
3663 isSnoopedStore = True;
3664 break;
sewardj92124542008-12-18 01:20:11 +00003665 case Imbe_SnoopedStoreEnd:
sewardj484fe802008-12-22 18:17:24 +00003666 tl_assert(isSnoopedStore == True);
3667 isSnoopedStore = False;
sewardj92124542008-12-18 01:20:11 +00003668 break;
sewardjf98e1c02008-10-25 16:22:41 +00003669 default:
3670 goto unhandled;
3671 }
sewardjb4112022007-11-09 22:49:28 +00003672 break;
3673
3674 case Ist_Store:
sewardj484fe802008-12-22 18:17:24 +00003675 if (!x86busLocked && !isSnoopedStore)
sewardjf98e1c02008-10-25 16:22:41 +00003676 instrument_mem_access(
3677 bbOut,
3678 st->Ist.Store.addr,
3679 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
3680 True/*isStore*/,
3681 sizeofIRType(hWordTy)
3682 );
3683 break;
sewardjb4112022007-11-09 22:49:28 +00003684
3685 case Ist_WrTmp: {
3686 IRExpr* data = st->Ist.WrTmp.data;
3687 if (data->tag == Iex_Load) {
3688 instrument_mem_access(
3689 bbOut,
3690 data->Iex.Load.addr,
3691 sizeofIRType(data->Iex.Load.ty),
3692 False/*!isStore*/,
3693 sizeofIRType(hWordTy)
3694 );
3695 }
3696 break;
3697 }
3698
3699 case Ist_Dirty: {
3700 Int dataSize;
3701 IRDirty* d = st->Ist.Dirty.details;
3702 if (d->mFx != Ifx_None) {
3703 /* This dirty helper accesses memory. Collect the
3704 details. */
3705 tl_assert(d->mAddr != NULL);
3706 tl_assert(d->mSize != 0);
3707 dataSize = d->mSize;
3708 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
3709 instrument_mem_access(
3710 bbOut, d->mAddr, dataSize, False/*!isStore*/,
3711 sizeofIRType(hWordTy)
3712 );
3713 }
sewardj484fe802008-12-22 18:17:24 +00003714 /* This isn't really correct. Really the
3715 instrumentation should be only added when
3716 (!x86busLocked && !isSnoopedStore), just like with
3717 Ist_Store. Still, I don't think this is
3718 particularly important. */
sewardjb4112022007-11-09 22:49:28 +00003719 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
3720 instrument_mem_access(
3721 bbOut, d->mAddr, dataSize, True/*isStore*/,
3722 sizeofIRType(hWordTy)
3723 );
3724 }
3725 } else {
3726 tl_assert(d->mAddr == NULL);
3727 tl_assert(d->mSize == 0);
3728 }
3729 break;
3730 }
3731
3732 default:
sewardjf98e1c02008-10-25 16:22:41 +00003733 unhandled:
3734 ppIRStmt(st);
sewardjb4112022007-11-09 22:49:28 +00003735 tl_assert(0);
3736
3737 } /* switch (st->tag) */
3738
3739 addStmtToIRSB( bbOut, st );
3740 } /* iterate over bbIn->stmts */
3741
3742 return bbOut;
3743}
3744
3745
3746/*----------------------------------------------------------------*/
3747/*--- Client requests ---*/
3748/*----------------------------------------------------------------*/
3749
3750/* Sheesh. Yet another goddam finite map. */
3751static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
3752
3753static void map_pthread_t_to_Thread_INIT ( void ) {
3754 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
sewardjf98e1c02008-10-25 16:22:41 +00003755 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
3756 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00003757 tl_assert(map_pthread_t_to_Thread != NULL);
3758 }
3759}
3760
3761
3762static
3763Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
3764{
3765 if (!VG_IS_TOOL_USERREQ('H','G',args[0]))
3766 return False;
3767
3768 /* Anything that gets past the above check is one of ours, so we
3769 should be able to handle it. */
3770
3771 /* default, meaningless return value, unless otherwise set */
3772 *ret = 0;
3773
3774 switch (args[0]) {
3775
3776 /* --- --- User-visible client requests --- --- */
3777
3778 case VG_USERREQ__HG_CLEAN_MEMORY:
barta0b6b2c2008-07-07 06:49:24 +00003779 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%ld)\n",
sewardjb4112022007-11-09 22:49:28 +00003780 args[1], args[2]);
3781 /* Call die_mem to (expensively) tidy up properly, if there
sewardjf98e1c02008-10-25 16:22:41 +00003782 are any held locks etc in the area. Calling evh__die_mem
3783 and then evh__new_mem is a bit inefficient; probably just
3784 the latter would do. */
sewardjb4112022007-11-09 22:49:28 +00003785 if (args[2] > 0) { /* length */
3786 evh__die_mem(args[1], args[2]);
3787 /* and then set it to New */
3788 evh__new_mem(args[1], args[2]);
3789 }
3790 break;
3791
3792 /* --- --- Client requests for Helgrind's use only --- --- */
3793
3794 /* Some thread is telling us its pthread_t value. Record the
3795 binding between that and the associated Thread*, so we can
3796 later find the Thread* again when notified of a join by the
3797 thread. */
3798 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
3799 Thread* my_thr = NULL;
3800 if (0)
3801 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
3802 (void*)args[1]);
3803 map_pthread_t_to_Thread_INIT();
3804 my_thr = map_threads_maybe_lookup( tid );
3805 /* This assertion should hold because the map_threads (tid to
3806 Thread*) binding should have been made at the point of
3807 low-level creation of this thread, which should have
3808 happened prior to us getting this client request for it.
3809 That's because this client request is sent from
3810 client-world from the 'thread_wrapper' function, which
3811 only runs once the thread has been low-level created. */
3812 tl_assert(my_thr != NULL);
3813 /* So now we know that (pthread_t)args[1] is associated with
3814 (Thread*)my_thr. Note that down. */
3815 if (0)
3816 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
3817 (void*)args[1], (void*)my_thr );
sewardj896f6f92008-08-19 08:38:52 +00003818 VG_(addToFM)( map_pthread_t_to_Thread, (Word)args[1], (Word)my_thr );
sewardjb4112022007-11-09 22:49:28 +00003819 break;
3820 }
3821
3822 case _VG_USERREQ__HG_PTH_API_ERROR: {
3823 Thread* my_thr = NULL;
3824 map_pthread_t_to_Thread_INIT();
3825 my_thr = map_threads_maybe_lookup( tid );
3826 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
sewardjf98e1c02008-10-25 16:22:41 +00003827 HG_(record_error_PthAPIerror)(
3828 my_thr, (HChar*)args[1], (Word)args[2], (HChar*)args[3] );
sewardjb4112022007-11-09 22:49:28 +00003829 break;
3830 }
3831
3832 /* This thread (tid) has completed a join with the quitting
3833 thread whose pthread_t is in args[1]. */
3834 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
3835 Thread* thr_q = NULL; /* quitter Thread* */
3836 Bool found = False;
3837 if (0)
3838 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
3839 (void*)args[1]);
3840 map_pthread_t_to_Thread_INIT();
sewardj896f6f92008-08-19 08:38:52 +00003841 found = VG_(lookupFM)( map_pthread_t_to_Thread,
sewardjb5f29642007-11-16 12:02:43 +00003842 NULL, (Word*)&thr_q, (Word)args[1] );
sewardjb4112022007-11-09 22:49:28 +00003843 /* Can this fail? It would mean that our pthread_join
3844 wrapper observed a successful join on args[1] yet that
3845 thread never existed (or at least, it never lodged an
3846 entry in the mapping (via SET_MY_PTHREAD_T)). Which
3847 sounds like a bug in the threads library. */
3848 // FIXME: get rid of this assertion; handle properly
3849 tl_assert(found);
3850 if (found) {
3851 if (0)
3852 VG_(printf)(".................... quitter Thread* = %p\n",
3853 thr_q);
3854 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
3855 }
3856 break;
3857 }
3858
3859 /* EXPOSITION only: by intercepting lock init events we can show
3860 the user where the lock was initialised, rather than only
3861 being able to show where it was first locked. Intercepting
3862 lock initialisations is not necessary for the basic operation
3863 of the race checker. */
3864 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
3865 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
3866 break;
3867
3868 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
3869 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1] );
3870 break;
3871
3872 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
3873 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
3874 break;
3875
3876 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
3877 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
3878 break;
3879
3880 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*, Word
3881 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
3882 break;
3883
3884 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*
3885 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
3886 break;
3887
3888 /* This thread is about to do pthread_cond_signal on the
3889 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
3890 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
3891 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
3892 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
3893 break;
3894
3895 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
3896 Returns a flag indicating whether or not the mutex is believed to be
3897 valid for this operation. */
3898 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
3899 Bool mutex_is_valid
3900 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
3901 (void*)args[2] );
3902 *ret = mutex_is_valid ? 1 : 0;
3903 break;
3904 }
3905
sewardjf98e1c02008-10-25 16:22:41 +00003906 /* cond=arg[1] */
3907 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
3908 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1] );
3909 break;
3910
sewardjb4112022007-11-09 22:49:28 +00003911 /* Thread successfully completed pthread_cond_wait, cond=arg[1],
3912 mutex=arg[2] */
3913 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
3914 evh__HG_PTHREAD_COND_WAIT_POST( tid,
3915 (void*)args[1], (void*)args[2] );
3916 break;
3917
3918 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
3919 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
3920 break;
3921
3922 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
3923 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
3924 break;
3925
sewardj789c3c52008-02-25 12:10:07 +00003926 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
sewardjb4112022007-11-09 22:49:28 +00003927 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
sewardj789c3c52008-02-25 12:10:07 +00003928 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
3929 args[2], args[3] );
sewardjb4112022007-11-09 22:49:28 +00003930 break;
3931
3932 /* rwlock=arg[1], isW=arg[2] */
3933 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
3934 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
3935 break;
3936
3937 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
3938 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
3939 break;
3940
3941 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
3942 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
3943 break;
3944
sewardj11e352f2007-11-30 11:11:02 +00003945 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
3946 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
sewardjb4112022007-11-09 22:49:28 +00003947 break;
3948
sewardj11e352f2007-11-30 11:11:02 +00003949 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
3950 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00003951 break;
3952
sewardj11e352f2007-11-30 11:11:02 +00003953 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
3954 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
3955 break;
3956
3957 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t* */
3958 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00003959 break;
3960
sewardj9f569b72008-11-13 13:33:09 +00003961 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
3962 /* pth_bar_t*, ulong */
3963 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1], args[2] );
3964 break;
3965
3966 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
3967 /* pth_bar_t* */
3968 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
3969 break;
3970
3971 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
3972 /* pth_bar_t* */
3973 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
3974 break;
sewardjb4112022007-11-09 22:49:28 +00003975
3976 default:
3977 /* Unhandled Helgrind client request! */
sewardjf98e1c02008-10-25 16:22:41 +00003978 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
3979 args[0]);
sewardjb4112022007-11-09 22:49:28 +00003980 }
3981
3982 return True;
3983}
3984
3985
3986/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +00003987/*--- Setup ---*/
3988/*----------------------------------------------------------------*/
3989
3990static Bool hg_process_cmd_line_option ( Char* arg )
3991{
sewardjf98e1c02008-10-25 16:22:41 +00003992 if (VG_CLO_STREQ(arg, "--track-lockorders=no"))
3993 HG_(clo_track_lockorders) = False;
3994 else if (VG_CLO_STREQ(arg, "--track-lockorders=yes"))
3995 HG_(clo_track_lockorders) = True;
sewardjb4112022007-11-09 22:49:28 +00003996
3997 else if (VG_CLO_STREQ(arg, "--cmp-race-err-addrs=no"))
sewardjf98e1c02008-10-25 16:22:41 +00003998 HG_(clo_cmp_race_err_addrs) = False;
sewardjb4112022007-11-09 22:49:28 +00003999 else if (VG_CLO_STREQ(arg, "--cmp-race-err-addrs=yes"))
sewardjf98e1c02008-10-25 16:22:41 +00004000 HG_(clo_cmp_race_err_addrs) = True;
sewardjb4112022007-11-09 22:49:28 +00004001
sewardj849b0ed2008-12-21 10:43:10 +00004002 else if (VG_CLO_STREQ(arg, "--show-conflicts=no"))
4003 HG_(clo_show_conflicts) = False;
4004 else if (VG_CLO_STREQ(arg, "--show-conflicts=yes"))
4005 HG_(clo_show_conflicts) = True;
4006
4007 /* If you change the 10k/10mill limits, remember to also change
4008 them in assertions at the top of event_map_maybe_GC. */
4009 else VG_BNUM_CLO(arg, "--conflict-cache-size",
4010 HG_(clo_conflict_cache_size), 10*1000, 10*1000*1000)
sewardjb4112022007-11-09 22:49:28 +00004011
sewardj11e352f2007-11-30 11:11:02 +00004012 /* "stuvwx" --> stuvwx (binary) */
4013 else if (VG_CLO_STREQN(18, arg, "--hg-sanity-flags=")) {
sewardjb4112022007-11-09 22:49:28 +00004014 Int j;
sewardjb5f29642007-11-16 12:02:43 +00004015 Char* opt = & arg[18];
sewardjb4112022007-11-09 22:49:28 +00004016
sewardj11e352f2007-11-30 11:11:02 +00004017 if (6 != VG_(strlen)(opt)) {
sewardjb4112022007-11-09 22:49:28 +00004018 VG_(message)(Vg_UserMsg,
sewardj11e352f2007-11-30 11:11:02 +00004019 "--hg-sanity-flags argument must have 6 digits");
sewardjb4112022007-11-09 22:49:28 +00004020 return False;
4021 }
sewardj11e352f2007-11-30 11:11:02 +00004022 for (j = 0; j < 6; j++) {
sewardjb4112022007-11-09 22:49:28 +00004023 if ('0' == opt[j]) { /* do nothing */ }
sewardjf98e1c02008-10-25 16:22:41 +00004024 else if ('1' == opt[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
sewardjb4112022007-11-09 22:49:28 +00004025 else {
sewardj11e352f2007-11-30 11:11:02 +00004026 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
sewardjb4112022007-11-09 22:49:28 +00004027 "only contain 0s and 1s");
4028 return False;
4029 }
4030 }
sewardjf98e1c02008-10-25 16:22:41 +00004031 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
sewardjb4112022007-11-09 22:49:28 +00004032 }
4033
4034 else
4035 return VG_(replacement_malloc_process_cmd_line_option)(arg);
4036
4037 return True;
4038}
4039
4040static void hg_print_usage ( void )
4041{
4042 VG_(printf)(
sewardj849b0ed2008-12-21 10:43:10 +00004043" --track-lockorders=no|yes show lock ordering errors? [yes]\n"
4044" --show-conflicts=no|yes show both stack traces in a race? [yes]\n"
4045" --conflict-cache-size=N size of conflict history cache [1000000]\n"
sewardjb4112022007-11-09 22:49:28 +00004046 );
4047 VG_(replacement_malloc_print_usage)();
4048}
4049
4050static void hg_print_debug_usage ( void )
4051{
4052 VG_(replacement_malloc_print_debug_usage)();
sewardjb4112022007-11-09 22:49:28 +00004053 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
4054 "race errors significant? [no]\n");
sewardj849b0ed2008-12-21 10:43:10 +00004055 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
sewardj11e352f2007-11-30 11:11:02 +00004056 " at events (X = 0|1) [000000]\n");
4057 VG_(printf)(" --hg-sanity-flags values:\n");
sewardj11e352f2007-11-30 11:11:02 +00004058 VG_(printf)(" 010000 after changes to "
sewardjb4112022007-11-09 22:49:28 +00004059 "lock-order-acquisition-graph\n");
sewardj11e352f2007-11-30 11:11:02 +00004060 VG_(printf)(" 001000 at memory accesses (NB: not currently used)\n");
4061 VG_(printf)(" 000100 at mem permission setting for "
sewardjb4112022007-11-09 22:49:28 +00004062 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
sewardj11e352f2007-11-30 11:11:02 +00004063 VG_(printf)(" 000010 at lock/unlock events\n");
4064 VG_(printf)(" 000001 at thread create/join events\n");
sewardjb4112022007-11-09 22:49:28 +00004065}
4066
4067static void hg_post_clo_init ( void )
4068{
4069}
4070
4071static void hg_fini ( Int exitcode )
4072{
4073 if (SHOW_DATA_STRUCTURES)
4074 pp_everything( PP_ALL, "SK_(fini)" );
sewardjf98e1c02008-10-25 16:22:41 +00004075 if (HG_(clo_sanity_flags))
sewardjb4112022007-11-09 22:49:28 +00004076 all__sanity_check("SK_(fini)");
4077
sewardjb4112022007-11-09 22:49:28 +00004078 if (VG_(clo_verbosity) >= 2) {
4079
4080 if (1) {
4081 VG_(printf)("\n");
4082 HG_(ppWSUstats)( univ_tsets, "univ_tsets" );
4083 VG_(printf)("\n");
4084 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
4085 VG_(printf)("\n");
4086 HG_(ppWSUstats)( univ_laog, "univ_laog" );
4087 }
4088
sewardjf98e1c02008-10-25 16:22:41 +00004089 //zz VG_(printf)("\n");
4090 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
4091 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
4092 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
4093 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
4094 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
4095 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
4096 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
4097 //zz stats__hbefore_stk_hwm);
4098 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
4099 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
sewardjb4112022007-11-09 22:49:28 +00004100
4101 VG_(printf)("\n");
barta0b6b2c2008-07-07 06:49:24 +00004102 VG_(printf)(" locksets: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00004103 (Int)HG_(cardinalityWSU)( univ_lsets ));
barta0b6b2c2008-07-07 06:49:24 +00004104 VG_(printf)(" threadsets: %'8d unique thread sets\n",
sewardjb4112022007-11-09 22:49:28 +00004105 (Int)HG_(cardinalityWSU)( univ_tsets ));
barta0b6b2c2008-07-07 06:49:24 +00004106 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00004107 (Int)HG_(cardinalityWSU)( univ_laog ));
4108
sewardjd52392d2008-11-08 20:36:26 +00004109 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
4110 // stats__ga_LL_adds,
4111 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
sewardjb4112022007-11-09 22:49:28 +00004112
sewardjf98e1c02008-10-25 16:22:41 +00004113 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
4114 HG_(stats__LockN_to_P_queries),
4115 HG_(stats__LockN_to_P_get_map_size)() );
sewardjb4112022007-11-09 22:49:28 +00004116
sewardjf98e1c02008-10-25 16:22:41 +00004117 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
4118 HG_(stats__string_table_queries),
4119 HG_(stats__string_table_get_map_size)() );
barta0b6b2c2008-07-07 06:49:24 +00004120 VG_(printf)(" LAOG: %'8d map size\n",
sewardj896f6f92008-08-19 08:38:52 +00004121 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
barta0b6b2c2008-07-07 06:49:24 +00004122 VG_(printf)(" LAOG exposition: %'8d map size\n",
sewardj896f6f92008-08-19 08:38:52 +00004123 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
barta0b6b2c2008-07-07 06:49:24 +00004124 VG_(printf)(" locks: %'8lu acquires, "
4125 "%'lu releases\n",
sewardjb4112022007-11-09 22:49:28 +00004126 stats__lockN_acquires,
4127 stats__lockN_releases
4128 );
barta0b6b2c2008-07-07 06:49:24 +00004129 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
sewardjb4112022007-11-09 22:49:28 +00004130
4131 VG_(printf)("\n");
sewardjf98e1c02008-10-25 16:22:41 +00004132 libhb_shutdown(True);
sewardjb4112022007-11-09 22:49:28 +00004133 }
4134}
4135
sewardjf98e1c02008-10-25 16:22:41 +00004136/* FIXME: move these somewhere sane */
4137
4138static
4139void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
4140{
4141 Thread* thr;
4142 ThreadId tid;
4143 UWord nActual;
4144 tl_assert(hbt);
4145 thr = libhb_get_Thr_opaque( hbt );
4146 tl_assert(thr);
4147 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
4148 nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
4149 NULL, NULL, 0 );
4150 tl_assert(nActual <= nRequest);
4151 for (; nActual < nRequest; nActual++)
4152 frames[nActual] = 0;
4153}
4154
4155static
sewardjd52392d2008-11-08 20:36:26 +00004156ExeContext* for_libhb__get_EC ( Thr* hbt )
sewardjf98e1c02008-10-25 16:22:41 +00004157{
4158 Thread* thr;
4159 ThreadId tid;
4160 ExeContext* ec;
4161 tl_assert(hbt);
4162 thr = libhb_get_Thr_opaque( hbt );
4163 tl_assert(thr);
4164 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
4165 ec = VG_(record_ExeContext)( tid, 0 );
sewardjd52392d2008-11-08 20:36:26 +00004166 return ec;
sewardjf98e1c02008-10-25 16:22:41 +00004167}
4168
4169
sewardjb4112022007-11-09 22:49:28 +00004170static void hg_pre_clo_init ( void )
4171{
sewardjf98e1c02008-10-25 16:22:41 +00004172 Thr* hbthr_root;
sewardjb4112022007-11-09 22:49:28 +00004173 VG_(details_name) ("Helgrind");
4174 VG_(details_version) (NULL);
4175 VG_(details_description) ("a thread error detector");
4176 VG_(details_copyright_author)(
sewardj4d474d02008-02-11 11:34:59 +00004177 "Copyright (C) 2007-2008, and GNU GPL'd, by OpenWorks LLP et al.");
sewardjb4112022007-11-09 22:49:28 +00004178 VG_(details_bug_reports_to) (VG_BUGS_TO);
4179 VG_(details_avg_translation_sizeB) ( 200 );
4180
4181 VG_(basic_tool_funcs) (hg_post_clo_init,
4182 hg_instrument,
4183 hg_fini);
4184
4185 VG_(needs_core_errors) ();
sewardjf98e1c02008-10-25 16:22:41 +00004186 VG_(needs_tool_errors) (HG_(eq_Error),
4187 HG_(pp_Error),
sewardjb4112022007-11-09 22:49:28 +00004188 False,/*show TIDs for errors*/
sewardjf98e1c02008-10-25 16:22:41 +00004189 HG_(update_extra),
4190 HG_(recognised_suppression),
4191 HG_(read_extra_suppression_info),
4192 HG_(error_matches_suppression),
4193 HG_(get_error_name),
4194 HG_(print_extra_suppression_info));
sewardjb4112022007-11-09 22:49:28 +00004195
4196 VG_(needs_command_line_options)(hg_process_cmd_line_option,
4197 hg_print_usage,
4198 hg_print_debug_usage);
4199 VG_(needs_client_requests) (hg_handle_client_request);
4200
4201 // FIXME?
4202 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
4203 // hg_expensive_sanity_check);
4204
4205 VG_(needs_malloc_replacement) (hg_cli__malloc,
4206 hg_cli____builtin_new,
4207 hg_cli____builtin_vec_new,
4208 hg_cli__memalign,
4209 hg_cli__calloc,
4210 hg_cli__free,
4211 hg_cli____builtin_delete,
4212 hg_cli____builtin_vec_delete,
4213 hg_cli__realloc,
4214 HG_CLI__MALLOC_REDZONE_SZB );
4215
sewardj849b0ed2008-12-21 10:43:10 +00004216 /* 21 Dec 08: disabled this; it mostly causes H to start more
4217 slowly and use significantly more memory, without very often
4218 providing useful results. The user can request to load this
4219 information manually with --read-var-info=yes. */
4220 if (0) VG_(needs_var_info)(); /* optional */
sewardjb4112022007-11-09 22:49:28 +00004221
4222 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
sewardj7cf4e6b2008-05-01 20:24:26 +00004223 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
4224 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
sewardjb4112022007-11-09 22:49:28 +00004225 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
4226 VG_(track_new_mem_stack) ( evh__new_mem );
4227
4228 // FIXME: surely this isn't thread-aware
4229 VG_(track_copy_mem_remap) ( shadow_mem_copy_range );
4230
4231 VG_(track_change_mem_mprotect) ( evh__set_perms );
4232
4233 VG_(track_die_mem_stack_signal)( evh__die_mem );
4234 VG_(track_die_mem_brk) ( evh__die_mem );
4235 VG_(track_die_mem_munmap) ( evh__die_mem );
4236 VG_(track_die_mem_stack) ( evh__die_mem );
4237
4238 // FIXME: what is this for?
4239 VG_(track_ban_mem_stack) (NULL);
4240
4241 VG_(track_pre_mem_read) ( evh__pre_mem_read );
4242 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
4243 VG_(track_pre_mem_write) ( evh__pre_mem_write );
4244 VG_(track_post_mem_write) (NULL);
4245
4246 /////////////////
4247
4248 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
4249 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
4250
4251 VG_(track_start_client_code)( evh__start_client_code );
4252 VG_(track_stop_client_code)( evh__stop_client_code );
4253
sewardjf98e1c02008-10-25 16:22:41 +00004254 /////////////////////////////////////////////
4255 hbthr_root = libhb_init( for_libhb__get_stacktrace,
sewardjf98e1c02008-10-25 16:22:41 +00004256 for_libhb__get_EC );
4257 /////////////////////////////////////////////
4258
4259 initialise_data_structures(hbthr_root);
sewardjb4112022007-11-09 22:49:28 +00004260
4261 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
4262 as described in comments at the top of pub_tool_hashtable.h, are
4263 met. Blargh. */
4264 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
4265 tl_assert( sizeof(UWord) == sizeof(Addr) );
4266 hg_mallocmeta_table
4267 = VG_(HT_construct)( "hg_malloc_metadata_table" );
4268
sewardjb4112022007-11-09 22:49:28 +00004269}
4270
4271VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
4272
4273/*--------------------------------------------------------------------*/
4274/*--- end hg_main.c ---*/
4275/*--------------------------------------------------------------------*/