blob: 51fa2f8e2d38f899fe16a63f46679443b6703a30 [file] [log] [blame]
sewardjb4112022007-11-09 22:49:28 +00001
2/*--------------------------------------------------------------------*/
3/*--- Helgrind: a Valgrind tool for detecting errors ---*/
4/*--- in threaded programs. hg_main.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
sewardj4d474d02008-02-11 11:34:59 +000011 Copyright (C) 2007-2008 OpenWorks LLP
sewardjb4112022007-11-09 22:49:28 +000012 info@open-works.co.uk
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file COPYING.
30
31 Neither the names of the U.S. Department of Energy nor the
32 University of California nor the names of its contributors may be
33 used to endorse or promote products derived from this software
34 without prior written permission.
35*/
36
37#include "pub_tool_basics.h"
sewardjb4112022007-11-09 22:49:28 +000038#include "pub_tool_libcassert.h"
39#include "pub_tool_libcbase.h"
40#include "pub_tool_libcprint.h"
sewardjb4112022007-11-09 22:49:28 +000041#include "pub_tool_threadstate.h"
42#include "pub_tool_tooliface.h"
43#include "pub_tool_hashtable.h"
44#include "pub_tool_replacemalloc.h"
45#include "pub_tool_machine.h"
46#include "pub_tool_options.h"
47#include "pub_tool_xarray.h"
48#include "pub_tool_stacktrace.h"
sewardjb8b79ad2008-03-03 01:35:41 +000049#include "pub_tool_debuginfo.h" /* VG_(get_data_description) */
sewardj896f6f92008-08-19 08:38:52 +000050#include "pub_tool_wordfm.h"
sewardjb4112022007-11-09 22:49:28 +000051
sewardjf98e1c02008-10-25 16:22:41 +000052#include "hg_basics.h"
53#include "hg_wordset.h"
54#include "hg_lock_n_thread.h"
55#include "hg_errors.h"
56
57#include "libhb.h"
58
sewardjb4112022007-11-09 22:49:28 +000059#include "helgrind.h"
60
sewardjf98e1c02008-10-25 16:22:41 +000061
62// FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
63
64// FIXME: when client destroys a lock or a CV, remove these
65// from our mappings, so that the associated SO can be freed up
sewardjb4112022007-11-09 22:49:28 +000066
67/*----------------------------------------------------------------*/
68/*--- ---*/
69/*----------------------------------------------------------------*/
70
sewardj11e352f2007-11-30 11:11:02 +000071/* Note this needs to be compiled with -fno-strict-aliasing, since it
72 contains a whole bunch of calls to lookupFM etc which cast between
73 Word and pointer types. gcc rightly complains this breaks ANSI C
74 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
75 worthwhile performance benefits over -O.
sewardjc17be792007-11-10 22:50:13 +000076*/
sewardjb4112022007-11-09 22:49:28 +000077
sewardjefd3b4d2007-12-02 02:05:23 +000078// FIXME catch sync signals (SEGV, basically) and unlock BHL,
79// if held. Otherwise a LOCK-prefixed insn which segfaults
80// gets Helgrind into a total muddle as the BHL will not be
81// released after the insn.
82
sewardjb4112022007-11-09 22:49:28 +000083// FIXME what is supposed to happen to locks in memory which
84// is relocated as a result of client realloc?
85
sewardjb4112022007-11-09 22:49:28 +000086// FIXME put referencing ThreadId into Thread and get
87// rid of the slow reverse mapping function.
88
89// FIXME accesses to NoAccess areas: change state to Excl?
90
91// FIXME report errors for accesses of NoAccess memory?
92
93// FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
94// the thread still holds the lock.
95
96/* ------------ Debug/trace options ------------ */
97
98// this is:
99// shadow_mem_make_NoAccess: 29156 SMs, 1728 scanned
100// happens_before_wrk: 1000
101// ev__post_thread_join: 3360 SMs, 29 scanned, 252 re-Excls
102#define SHOW_EXPENSIVE_STUFF 0
103
104// 0 for silent, 1 for some stuff, 2 for lots of stuff
105#define SHOW_EVENTS 0
106
sewardjb4112022007-11-09 22:49:28 +0000107
108static void all__sanity_check ( Char* who ); /* fwds */
109
110#define HG_CLI__MALLOC_REDZONE_SZB 16 /* let's say */
111
112// 0 for none, 1 for dump at end of run
113#define SHOW_DATA_STRUCTURES 0
114
115
sewardjb4112022007-11-09 22:49:28 +0000116/* ------------ Misc comments ------------ */
117
118// FIXME: don't hardwire initial entries for root thread.
119// Instead, let the pre_thread_ll_create handler do this.
120
sewardjb4112022007-11-09 22:49:28 +0000121
122/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000123/*--- Primary data structures ---*/
sewardjb4112022007-11-09 22:49:28 +0000124/*----------------------------------------------------------------*/
125
sewardjb4112022007-11-09 22:49:28 +0000126/* Admin linked list of Threads */
127static Thread* admin_threads = NULL;
128
129/* Admin linked list of Locks */
130static Lock* admin_locks = NULL;
131
sewardjb4112022007-11-09 22:49:28 +0000132/* Mapping table for core ThreadIds to Thread* */
133static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
134
sewardjb4112022007-11-09 22:49:28 +0000135/* Mapping table for lock guest addresses to Lock* */
136static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
137
138/* The word-set universes for thread sets and lock sets. */
139static WordSetU* univ_tsets = NULL; /* sets of Thread* */
140static WordSetU* univ_lsets = NULL; /* sets of Lock* */
141static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
142
143/* never changed; we only care about its address. Is treated as if it
144 was a standard userspace lock. Also we have a Lock* describing it
145 so it can participate in lock sets in the usual way. */
146static Int __bus_lock = 0;
147static Lock* __bus_lock_Lock = NULL;
148
149
150/*----------------------------------------------------------------*/
151/*--- Simple helpers for the data structures ---*/
152/*----------------------------------------------------------------*/
153
154static UWord stats__lockN_acquires = 0;
155static UWord stats__lockN_releases = 0;
156
sewardjf98e1c02008-10-25 16:22:41 +0000157static
158ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
sewardjb4112022007-11-09 22:49:28 +0000159
160/* --------- Constructors --------- */
161
sewardjf98e1c02008-10-25 16:22:41 +0000162static Thread* mk_Thread ( Thr* hbthr ) {
sewardjb4112022007-11-09 22:49:28 +0000163 static Int indx = 1;
sewardjf98e1c02008-10-25 16:22:41 +0000164 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
sewardjb4112022007-11-09 22:49:28 +0000165 thread->locksetA = HG_(emptyWS)( univ_lsets );
166 thread->locksetW = HG_(emptyWS)( univ_lsets );
sewardjb4112022007-11-09 22:49:28 +0000167 thread->magic = Thread_MAGIC;
sewardjf98e1c02008-10-25 16:22:41 +0000168 thread->hbthr = hbthr;
169 thread->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +0000170 thread->created_at = NULL;
171 thread->announced = False;
172 thread->errmsg_index = indx++;
173 thread->admin = admin_threads;
174 admin_threads = thread;
175 return thread;
176}
sewardjf98e1c02008-10-25 16:22:41 +0000177
sewardjb4112022007-11-09 22:49:28 +0000178// Make a new lock which is unlocked (hence ownerless)
179static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
180 static ULong unique = 0;
sewardjf98e1c02008-10-25 16:22:41 +0000181 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
sewardjb4112022007-11-09 22:49:28 +0000182 lock->admin = admin_locks;
183 lock->unique = unique++;
184 lock->magic = LockN_MAGIC;
185 lock->appeared_at = NULL;
186 lock->acquired_at = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000187 lock->hbso = libhb_so_alloc();
sewardjb4112022007-11-09 22:49:28 +0000188 lock->guestaddr = guestaddr;
189 lock->kind = kind;
190 lock->heldW = False;
191 lock->heldBy = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000192 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +0000193 admin_locks = lock;
194 return lock;
195}
sewardjb4112022007-11-09 22:49:28 +0000196
197/* Release storage for a Lock. Also release storage in .heldBy, if
198 any. */
199static void del_LockN ( Lock* lk )
200{
sewardjf98e1c02008-10-25 16:22:41 +0000201 tl_assert(HG_(is_sane_LockN)(lk));
202 tl_assert(lk->hbso);
203 libhb_so_dealloc(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +0000204 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000205 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000206 VG_(memset)(lk, 0xAA, sizeof(*lk));
sewardjf98e1c02008-10-25 16:22:41 +0000207 HG_(free)(lk);
sewardjb4112022007-11-09 22:49:28 +0000208}
209
210/* Update 'lk' to reflect that 'thr' now has a write-acquisition of
211 it. This is done strictly: only combinations resulting from
212 correct program and libpthread behaviour are allowed. */
213static void lockN_acquire_writer ( Lock* lk, Thread* thr )
214{
sewardjf98e1c02008-10-25 16:22:41 +0000215 tl_assert(HG_(is_sane_LockN)(lk));
216 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000217
218 stats__lockN_acquires++;
219
220 /* EXPOSITION only */
221 /* We need to keep recording snapshots of where the lock was
222 acquired, so as to produce better lock-order error messages. */
223 if (lk->acquired_at == NULL) {
224 ThreadId tid;
225 tl_assert(lk->heldBy == NULL);
226 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
227 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000228 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000229 } else {
230 tl_assert(lk->heldBy != NULL);
231 }
232 /* end EXPOSITION only */
233
234 switch (lk->kind) {
235 case LK_nonRec:
236 case_LK_nonRec:
237 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
238 tl_assert(!lk->heldW);
239 lk->heldW = True;
sewardjf98e1c02008-10-25 16:22:41 +0000240 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000241 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000242 break;
243 case LK_mbRec:
244 if (lk->heldBy == NULL)
245 goto case_LK_nonRec;
246 /* 2nd and subsequent locking of a lock by its owner */
247 tl_assert(lk->heldW);
248 /* assert: lk is only held by one thread .. */
sewardj896f6f92008-08-19 08:38:52 +0000249 tl_assert(VG_(sizeUniqueBag(lk->heldBy)) == 1);
sewardjb4112022007-11-09 22:49:28 +0000250 /* assert: .. and that thread is 'thr'. */
sewardj896f6f92008-08-19 08:38:52 +0000251 tl_assert(VG_(elemBag)(lk->heldBy, (Word)thr)
252 == VG_(sizeTotalBag)(lk->heldBy));
253 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000254 break;
255 case LK_rdwr:
256 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
257 goto case_LK_nonRec;
258 default:
259 tl_assert(0);
260 }
sewardjf98e1c02008-10-25 16:22:41 +0000261 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000262}
263
264static void lockN_acquire_reader ( Lock* lk, Thread* thr )
265{
sewardjf98e1c02008-10-25 16:22:41 +0000266 tl_assert(HG_(is_sane_LockN)(lk));
267 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000268 /* can only add reader to a reader-writer lock. */
269 tl_assert(lk->kind == LK_rdwr);
270 /* lk must be free or already r-held. */
271 tl_assert(lk->heldBy == NULL
272 || (lk->heldBy != NULL && !lk->heldW));
273
274 stats__lockN_acquires++;
275
276 /* EXPOSITION only */
277 /* We need to keep recording snapshots of where the lock was
278 acquired, so as to produce better lock-order error messages. */
279 if (lk->acquired_at == NULL) {
280 ThreadId tid;
281 tl_assert(lk->heldBy == NULL);
282 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
283 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000284 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000285 } else {
286 tl_assert(lk->heldBy != NULL);
287 }
288 /* end EXPOSITION only */
289
290 if (lk->heldBy) {
sewardj896f6f92008-08-19 08:38:52 +0000291 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000292 } else {
293 lk->heldW = False;
sewardjf98e1c02008-10-25 16:22:41 +0000294 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000295 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000296 }
297 tl_assert(!lk->heldW);
sewardjf98e1c02008-10-25 16:22:41 +0000298 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000299}
300
301/* Update 'lk' to reflect a release of it by 'thr'. This is done
302 strictly: only combinations resulting from correct program and
303 libpthread behaviour are allowed. */
304
305static void lockN_release ( Lock* lk, Thread* thr )
306{
307 Bool b;
sewardjf98e1c02008-10-25 16:22:41 +0000308 tl_assert(HG_(is_sane_LockN)(lk));
309 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000310 /* lock must be held by someone */
311 tl_assert(lk->heldBy);
312 stats__lockN_releases++;
313 /* Remove it from the holder set */
sewardj896f6f92008-08-19 08:38:52 +0000314 b = VG_(delFromBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000315 /* thr must actually have been a holder of lk */
316 tl_assert(b);
317 /* normalise */
318 tl_assert(lk->acquired_at);
sewardj896f6f92008-08-19 08:38:52 +0000319 if (VG_(isEmptyBag)(lk->heldBy)) {
320 VG_(deleteBag)(lk->heldBy);
sewardjb4112022007-11-09 22:49:28 +0000321 lk->heldBy = NULL;
322 lk->heldW = False;
323 lk->acquired_at = NULL;
324 }
sewardjf98e1c02008-10-25 16:22:41 +0000325 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000326}
327
328static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
329{
330 Thread* thr;
331 if (!lk->heldBy) {
332 tl_assert(!lk->heldW);
333 return;
334 }
335 /* for each thread that holds this lock do ... */
sewardj896f6f92008-08-19 08:38:52 +0000336 VG_(initIterBag)( lk->heldBy );
337 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, NULL )) {
sewardjf98e1c02008-10-25 16:22:41 +0000338 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000339 tl_assert(HG_(elemWS)( univ_lsets,
340 thr->locksetA, (Word)lk ));
341 thr->locksetA
342 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lk );
343
344 if (lk->heldW) {
345 tl_assert(HG_(elemWS)( univ_lsets,
346 thr->locksetW, (Word)lk ));
347 thr->locksetW
348 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lk );
349 }
350 }
sewardj896f6f92008-08-19 08:38:52 +0000351 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000352}
353
sewardjb4112022007-11-09 22:49:28 +0000354
355/*----------------------------------------------------------------*/
356/*--- Print out the primary data structures ---*/
357/*----------------------------------------------------------------*/
358
sewardjd52392d2008-11-08 20:36:26 +0000359//static WordSetID del_BHL ( WordSetID lockset ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000360
361#define PP_THREADS (1<<1)
362#define PP_LOCKS (1<<2)
sewardjf98e1c02008-10-25 16:22:41 +0000363#define PP_ALL (PP_THREADS | PP_LOCKS)
sewardjb4112022007-11-09 22:49:28 +0000364
365
366static const Int sHOW_ADMIN = 0;
367
368static void space ( Int n )
369{
370 Int i;
371 Char spaces[128+1];
372 tl_assert(n >= 0 && n < 128);
373 if (n == 0)
374 return;
375 for (i = 0; i < n; i++)
376 spaces[i] = ' ';
377 spaces[i] = 0;
378 tl_assert(i < 128+1);
379 VG_(printf)("%s", spaces);
380}
381
382static void pp_Thread ( Int d, Thread* t )
383{
384 space(d+0); VG_(printf)("Thread %p {\n", t);
385 if (sHOW_ADMIN) {
386 space(d+3); VG_(printf)("admin %p\n", t->admin);
387 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
388 }
389 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
390 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
sewardjb4112022007-11-09 22:49:28 +0000391 space(d+0); VG_(printf)("}\n");
392}
393
394static void pp_admin_threads ( Int d )
395{
396 Int i, n;
397 Thread* t;
398 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
399 /* nothing */
400 }
401 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
402 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
403 if (0) {
404 space(n);
405 VG_(printf)("admin_threads record %d of %d:\n", i, n);
406 }
407 pp_Thread(d+3, t);
408 }
barta0b6b2c2008-07-07 06:49:24 +0000409 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000410}
411
412static void pp_map_threads ( Int d )
413{
414 Int i, n;
415 n = 0;
416 space(d); VG_(printf)("map_threads ");
417 n = 0;
418 for (i = 0; i < VG_N_THREADS; i++) {
419 if (map_threads[i] != NULL)
420 n++;
421 }
422 VG_(printf)("(%d entries) {\n", n);
423 for (i = 0; i < VG_N_THREADS; i++) {
424 if (map_threads[i] == NULL)
425 continue;
426 space(d+3);
427 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
428 }
429 space(d); VG_(printf)("}\n");
430}
431
432static const HChar* show_LockKind ( LockKind lkk ) {
433 switch (lkk) {
434 case LK_mbRec: return "mbRec";
435 case LK_nonRec: return "nonRec";
436 case LK_rdwr: return "rdwr";
437 default: tl_assert(0);
438 }
439}
440
441static void pp_Lock ( Int d, Lock* lk )
442{
barta0b6b2c2008-07-07 06:49:24 +0000443 space(d+0); VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
sewardjb4112022007-11-09 22:49:28 +0000444 if (sHOW_ADMIN) {
445 space(d+3); VG_(printf)("admin %p\n", lk->admin);
446 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
447 }
448 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
449 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
450 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
451 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
452 if (lk->heldBy) {
453 Thread* thr;
454 Word count;
455 VG_(printf)(" { ");
sewardj896f6f92008-08-19 08:38:52 +0000456 VG_(initIterBag)( lk->heldBy );
457 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, &count ))
sewardjb4112022007-11-09 22:49:28 +0000458 VG_(printf)("%lu:%p ", count, thr);
sewardj896f6f92008-08-19 08:38:52 +0000459 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000460 VG_(printf)("}");
461 }
462 VG_(printf)("\n");
463 space(d+0); VG_(printf)("}\n");
464}
465
466static void pp_admin_locks ( Int d )
467{
468 Int i, n;
469 Lock* lk;
470 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin) {
471 /* nothing */
472 }
473 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
474 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin) {
475 if (0) {
476 space(n);
477 VG_(printf)("admin_locks record %d of %d:\n", i, n);
478 }
479 pp_Lock(d+3, lk);
480 }
barta0b6b2c2008-07-07 06:49:24 +0000481 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000482}
483
484static void pp_map_locks ( Int d )
485{
486 void* gla;
487 Lock* lk;
488 space(d); VG_(printf)("map_locks (%d entries) {\n",
sewardj896f6f92008-08-19 08:38:52 +0000489 (Int)VG_(sizeFM)( map_locks ));
490 VG_(initIterFM)( map_locks );
491 while (VG_(nextIterFM)( map_locks, (Word*)&gla,
sewardjb5f29642007-11-16 12:02:43 +0000492 (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000493 space(d+3);
494 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
495 }
sewardj896f6f92008-08-19 08:38:52 +0000496 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000497 space(d); VG_(printf)("}\n");
498}
499
sewardjb4112022007-11-09 22:49:28 +0000500static void pp_everything ( Int flags, Char* caller )
501{
502 Int d = 0;
503 VG_(printf)("\n");
504 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
505 if (flags & PP_THREADS) {
506 VG_(printf)("\n");
507 pp_admin_threads(d+3);
508 VG_(printf)("\n");
509 pp_map_threads(d+3);
510 }
511 if (flags & PP_LOCKS) {
512 VG_(printf)("\n");
513 pp_admin_locks(d+3);
514 VG_(printf)("\n");
515 pp_map_locks(d+3);
516 }
sewardjb4112022007-11-09 22:49:28 +0000517
518 VG_(printf)("\n");
519 VG_(printf)("}\n");
520 VG_(printf)("\n");
521}
522
523#undef SHOW_ADMIN
524
525
526/*----------------------------------------------------------------*/
527/*--- Initialise the primary data structures ---*/
528/*----------------------------------------------------------------*/
529
sewardjf98e1c02008-10-25 16:22:41 +0000530static void initialise_data_structures ( Thr* hbthr_root )
sewardjb4112022007-11-09 22:49:28 +0000531{
sewardjb4112022007-11-09 22:49:28 +0000532 Thread* thr;
533
534 /* Get everything initialised and zeroed. */
535 tl_assert(admin_threads == NULL);
536 tl_assert(admin_locks == NULL);
sewardjb4112022007-11-09 22:49:28 +0000537
538 tl_assert(sizeof(Addr) == sizeof(Word));
sewardjb4112022007-11-09 22:49:28 +0000539
540 tl_assert(map_threads == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000541 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
sewardjb4112022007-11-09 22:49:28 +0000542 tl_assert(map_threads != NULL);
543
sewardjb4112022007-11-09 22:49:28 +0000544 tl_assert(sizeof(Addr) == sizeof(Word));
545 tl_assert(map_locks == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000546 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
547 NULL/*unboxed Word cmp*/);
sewardjb4112022007-11-09 22:49:28 +0000548 tl_assert(map_locks != NULL);
549
550 __bus_lock_Lock = mk_LockN( LK_nonRec, (Addr)&__bus_lock );
sewardjf98e1c02008-10-25 16:22:41 +0000551 tl_assert(HG_(is_sane_LockN)(__bus_lock_Lock));
sewardj896f6f92008-08-19 08:38:52 +0000552 VG_(addToFM)( map_locks, (Word)&__bus_lock, (Word)__bus_lock_Lock );
sewardjb4112022007-11-09 22:49:28 +0000553
554 tl_assert(univ_tsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000555 univ_tsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.3", HG_(free),
556 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000557 tl_assert(univ_tsets != NULL);
558
559 tl_assert(univ_lsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000560 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
561 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000562 tl_assert(univ_lsets != NULL);
563
564 tl_assert(univ_laog == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000565 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
566 HG_(free), 24/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000567 tl_assert(univ_laog != NULL);
568
569 /* Set up entries for the root thread */
570 // FIXME: this assumes that the first real ThreadId is 1
571
sewardjb4112022007-11-09 22:49:28 +0000572 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +0000573 thr = mk_Thread(hbthr_root);
574 thr->coretid = 1; /* FIXME: hardwires an assumption about the
575 identity of the root thread. */
576 tl_assert( libhb_get_Thr_opaque(hbthr_root) == NULL );
577 libhb_set_Thr_opaque(hbthr_root, thr);
sewardjb4112022007-11-09 22:49:28 +0000578
sewardjf98e1c02008-10-25 16:22:41 +0000579 /* and bind it in the thread-map table. */
580 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
581 tl_assert(thr->coretid != VG_INVALID_THREADID);
sewardjb4112022007-11-09 22:49:28 +0000582
sewardjf98e1c02008-10-25 16:22:41 +0000583 map_threads[thr->coretid] = thr;
sewardjb4112022007-11-09 22:49:28 +0000584
585 tl_assert(VG_INVALID_THREADID == 0);
586
587 /* Mark the new bus lock correctly (to stop the sanity checks
588 complaining) */
589 tl_assert( sizeof(__bus_lock) == 4 );
sewardjb4112022007-11-09 22:49:28 +0000590
591 all__sanity_check("initialise_data_structures");
592}
593
594
595/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000596/*--- map_threads :: array[core-ThreadId] of Thread* ---*/
sewardjb4112022007-11-09 22:49:28 +0000597/*----------------------------------------------------------------*/
598
599/* Doesn't assert if the relevant map_threads entry is NULL. */
600static Thread* map_threads_maybe_lookup ( ThreadId coretid )
601{
602 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000603 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000604 thr = map_threads[coretid];
605 return thr;
606}
607
608/* Asserts if the relevant map_threads entry is NULL. */
609static inline Thread* map_threads_lookup ( ThreadId coretid )
610{
611 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000612 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000613 thr = map_threads[coretid];
614 tl_assert(thr);
615 return thr;
616}
617
sewardjf98e1c02008-10-25 16:22:41 +0000618/* Do a reverse lookup. Does not assert if 'thr' is not found in
619 map_threads. */
sewardjb4112022007-11-09 22:49:28 +0000620static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
621{
sewardjf98e1c02008-10-25 16:22:41 +0000622 ThreadId tid;
623 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000624 /* Check nobody used the invalid-threadid slot */
625 tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
626 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000627 tid = thr->coretid;
628 tl_assert(HG_(is_sane_ThreadId)(tid));
629 return tid;
sewardjb4112022007-11-09 22:49:28 +0000630}
631
632/* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
633 is not found in map_threads. */
634static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
635{
636 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
637 tl_assert(tid != VG_INVALID_THREADID);
sewardjf98e1c02008-10-25 16:22:41 +0000638 tl_assert(map_threads[tid]);
639 tl_assert(map_threads[tid]->coretid == tid);
sewardjb4112022007-11-09 22:49:28 +0000640 return tid;
641}
642
643static void map_threads_delete ( ThreadId coretid )
644{
645 Thread* thr;
646 tl_assert(coretid != 0);
sewardjf98e1c02008-10-25 16:22:41 +0000647 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000648 thr = map_threads[coretid];
649 tl_assert(thr);
650 map_threads[coretid] = NULL;
651}
652
653
654/*----------------------------------------------------------------*/
655/*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
656/*----------------------------------------------------------------*/
657
658/* Make sure there is a lock table entry for the given (lock) guest
659 address. If not, create one of the stated 'kind' in unheld state.
660 In any case, return the address of the existing or new Lock. */
661static
662Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
663{
664 Bool found;
665 Lock* oldlock = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000666 tl_assert(HG_(is_sane_ThreadId)(tid));
sewardj896f6f92008-08-19 08:38:52 +0000667 found = VG_(lookupFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000668 NULL, (Word*)&oldlock, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000669 if (!found) {
670 Lock* lock = mk_LockN(lkk, ga);
671 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
sewardjf98e1c02008-10-25 16:22:41 +0000672 tl_assert(HG_(is_sane_LockN)(lock));
sewardj896f6f92008-08-19 08:38:52 +0000673 VG_(addToFM)( map_locks, (Word)ga, (Word)lock );
sewardjb4112022007-11-09 22:49:28 +0000674 tl_assert(oldlock == NULL);
sewardjb4112022007-11-09 22:49:28 +0000675 return lock;
676 } else {
677 tl_assert(oldlock != NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000678 tl_assert(HG_(is_sane_LockN)(oldlock));
sewardjb4112022007-11-09 22:49:28 +0000679 tl_assert(oldlock->guestaddr == ga);
sewardjb4112022007-11-09 22:49:28 +0000680 return oldlock;
681 }
682}
683
684static Lock* map_locks_maybe_lookup ( Addr ga )
685{
686 Bool found;
687 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000688 found = VG_(lookupFM)( map_locks, NULL, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000689 tl_assert(found ? lk != NULL : lk == NULL);
sewardjb4112022007-11-09 22:49:28 +0000690 return lk;
691}
692
693static void map_locks_delete ( Addr ga )
694{
695 Addr ga2 = 0;
696 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000697 VG_(delFromFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000698 (Word*)&ga2, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000699 /* delFromFM produces the val which is being deleted, if it is
700 found. So assert it is non-null; that in effect asserts that we
701 are deleting a (ga, Lock) pair which actually exists. */
702 tl_assert(lk != NULL);
703 tl_assert(ga2 == ga);
704}
705
706
sewardjb4112022007-11-09 22:49:28 +0000707
708/*----------------------------------------------------------------*/
709/*--- Sanity checking the data structures ---*/
710/*----------------------------------------------------------------*/
711
712static UWord stats__sanity_checks = 0;
713
sewardjb4112022007-11-09 22:49:28 +0000714static void laog__sanity_check ( Char* who ); /* fwds */
715
716/* REQUIRED INVARIANTS:
717
718 Thread vs Segment/Lock/SecMaps
719
720 for each t in Threads {
721
722 // Thread.lockset: each element is really a valid Lock
723
724 // Thread.lockset: each Lock in set is actually held by that thread
725 for lk in Thread.lockset
726 lk == LockedBy(t)
727
728 // Thread.csegid is a valid SegmentID
729 // and the associated Segment has .thr == t
730
731 }
732
733 all thread Locksets are pairwise empty under intersection
734 (that is, no lock is claimed to be held by more than one thread)
735 -- this is guaranteed if all locks in locksets point back to their
736 owner threads
737
738 Lock vs Thread/Segment/SecMaps
739
740 for each entry (gla, la) in map_locks
741 gla == la->guest_addr
742
743 for each lk in Locks {
744
745 lk->tag is valid
746 lk->guest_addr does not have shadow state NoAccess
747 if lk == LockedBy(t), then t->lockset contains lk
748 if lk == UnlockedBy(segid) then segid is valid SegmentID
749 and can be mapped to a valid Segment(seg)
750 and seg->thr->lockset does not contain lk
751 if lk == UnlockedNew then (no lockset contains lk)
752
753 secmaps for lk has .mbHasLocks == True
754
755 }
756
757 Segment vs Thread/Lock/SecMaps
758
759 the Segment graph is a dag (no cycles)
760 all of the Segment graph must be reachable from the segids
761 mentioned in the Threads
762
763 for seg in Segments {
764
765 seg->thr is a sane Thread
766
767 }
768
769 SecMaps vs Segment/Thread/Lock
770
771 for sm in SecMaps {
772
773 sm properly aligned
774 if any shadow word is ShR or ShM then .mbHasShared == True
775
776 for each Excl(segid) state
777 map_segments_lookup maps to a sane Segment(seg)
778 for each ShM/ShR(tsetid,lsetid) state
779 each lk in lset is a valid Lock
780 each thr in tset is a valid thread, which is non-dead
781
782 }
783*/
784
785
786/* Return True iff 'thr' holds 'lk' in some mode. */
787static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
788{
789 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000790 return VG_(elemBag)( lk->heldBy, (Word)thr ) > 0;
sewardjb4112022007-11-09 22:49:28 +0000791 else
792 return False;
793}
794
795/* Sanity check Threads, as far as possible */
796__attribute__((noinline))
797static void threads__sanity_check ( Char* who )
798{
799#define BAD(_str) do { how = (_str); goto bad; } while (0)
800 Char* how = "no error";
801 Thread* thr;
802 WordSetID wsA, wsW;
sewardj250ec2e2008-02-15 22:02:30 +0000803 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +0000804 Word ls_size, i;
805 Lock* lk;
sewardjb4112022007-11-09 22:49:28 +0000806 for (thr = admin_threads; thr; thr = thr->admin) {
sewardjf98e1c02008-10-25 16:22:41 +0000807 if (!HG_(is_sane_Thread)(thr)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000808 wsA = thr->locksetA;
809 wsW = thr->locksetW;
810 // locks held in W mode are a subset of all locks held
811 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
812 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
813 for (i = 0; i < ls_size; i++) {
814 lk = (Lock*)ls_words[i];
815 // Thread.lockset: each element is really a valid Lock
sewardjf98e1c02008-10-25 16:22:41 +0000816 if (!HG_(is_sane_LockN)(lk)) BAD("2");
sewardjb4112022007-11-09 22:49:28 +0000817 // Thread.lockset: each Lock in set is actually held by that
818 // thread
819 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000820 }
821 }
822 return;
823 bad:
824 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
825 tl_assert(0);
826#undef BAD
827}
828
829
830/* Sanity check Locks, as far as possible */
831__attribute__((noinline))
832static void locks__sanity_check ( Char* who )
833{
834#define BAD(_str) do { how = (_str); goto bad; } while (0)
835 Char* how = "no error";
836 Addr gla;
837 Lock* lk;
838 Int i;
839 // # entries in admin_locks == # entries in map_locks
840 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin)
841 ;
sewardj896f6f92008-08-19 08:38:52 +0000842 if (i != VG_(sizeFM)(map_locks)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000843 // for each entry (gla, lk) in map_locks
844 // gla == lk->guest_addr
sewardj896f6f92008-08-19 08:38:52 +0000845 VG_(initIterFM)( map_locks );
846 while (VG_(nextIterFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000847 (Word*)&gla, (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000848 if (lk->guestaddr != gla) BAD("2");
849 }
sewardj896f6f92008-08-19 08:38:52 +0000850 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000851 // scan through admin_locks ...
852 for (lk = admin_locks; lk; lk = lk->admin) {
853 // lock is sane. Quite comprehensive, also checks that
854 // referenced (holder) threads are sane.
sewardjf98e1c02008-10-25 16:22:41 +0000855 if (!HG_(is_sane_LockN)(lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000856 // map_locks binds guest address back to this lock
857 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
sewardjb4112022007-11-09 22:49:28 +0000858 // look at all threads mentioned as holders of this lock. Ensure
859 // this lock is mentioned in their locksets.
860 if (lk->heldBy) {
861 Thread* thr;
862 Word count;
sewardj896f6f92008-08-19 08:38:52 +0000863 VG_(initIterBag)( lk->heldBy );
864 while (VG_(nextIterBag)( lk->heldBy,
sewardjb5f29642007-11-16 12:02:43 +0000865 (Word*)&thr, &count )) {
sewardjf98e1c02008-10-25 16:22:41 +0000866 // HG_(is_sane_LockN) above ensures these
sewardjb4112022007-11-09 22:49:28 +0000867 tl_assert(count >= 1);
sewardjf98e1c02008-10-25 16:22:41 +0000868 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000869 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (Word)lk))
870 BAD("6");
871 // also check the w-only lockset
872 if (lk->heldW
873 && !HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
874 BAD("7");
875 if ((!lk->heldW)
876 && HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
877 BAD("8");
878 }
sewardj896f6f92008-08-19 08:38:52 +0000879 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000880 } else {
881 /* lock not held by anybody */
882 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
883 // since lk is unheld, then (no lockset contains lk)
884 // hmm, this is really too expensive to check. Hmm.
885 }
sewardjb4112022007-11-09 22:49:28 +0000886 }
887
888 return;
889 bad:
890 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
891 tl_assert(0);
892#undef BAD
893}
894
895
sewardjb4112022007-11-09 22:49:28 +0000896static void all_except_Locks__sanity_check ( Char* who ) {
897 stats__sanity_checks++;
898 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
899 threads__sanity_check(who);
sewardjb4112022007-11-09 22:49:28 +0000900 laog__sanity_check(who);
901}
902static void all__sanity_check ( Char* who ) {
903 all_except_Locks__sanity_check(who);
904 locks__sanity_check(who);
905}
906
907
908/*----------------------------------------------------------------*/
909/*--- the core memory state machine (msm__* functions) ---*/
910/*----------------------------------------------------------------*/
911
sewardjd52392d2008-11-08 20:36:26 +0000912//static WordSetID add_BHL ( WordSetID lockset ) {
913// return HG_(addToWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
914//}
915//static WordSetID del_BHL ( WordSetID lockset ) {
916// return HG_(delFromWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
917//}
sewardjb4112022007-11-09 22:49:28 +0000918
919
sewardjd52392d2008-11-08 20:36:26 +0000920///* Last-lock-lossage records. This mechanism exists to help explain
921// to programmers why we are complaining about a race. The idea is to
922// monitor all lockset transitions. When a previously nonempty
923// lockset becomes empty, the lock(s) that just disappeared (the
924// "lossage") are the locks that have consistently protected the
925// location (ga_of_access) in question for the longest time. Most of
926// the time the lossage-set is a single lock. Because the
927// lossage-lock is the one that has survived longest, there is there
928// is a good chance that it is indeed the lock that the programmer
929// intended to use to protect the location.
930//
931// Note that we cannot in general just look at the lossage set when we
932// see a transition to ShM(...,empty-set), because a transition to an
933// empty lockset can happen arbitrarily far before the point where we
934// want to report an error. This is in the case where there are many
935// transitions ShR -> ShR, all with an empty lockset, and only later
936// is there a transition to ShM. So what we want to do is note the
937// lossage lock at the point where a ShR -> ShR transition empties out
938// the lockset, so we can present it later if there should be a
939// transition to ShM.
940//
941// So this function finds such transitions. For each, it associates
942// in ga_to_lastlock, the guest address and the lossage lock. In fact
943// we do not record the Lock* directly as that may disappear later,
944// but instead the ExeContext inside the Lock which says where it was
945// initialised or first locked. ExeContexts are permanent so keeping
946// them indefinitely is safe.
947//
948// A boring detail: the hardware bus lock is not interesting in this
949// respect, so we first remove that from the pre/post locksets.
950//*/
951//
952//static UWord stats__ga_LL_adds = 0;
953//
954//static WordFM* ga_to_lastlock = NULL; /* GuestAddr -> ExeContext* */
955//
956//static
957//void record_last_lock_lossage ( Addr ga_of_access,
958// WordSetID lset_old, WordSetID lset_new )
959//{
960// Lock* lk;
961// Int card_old, card_new;
962//
963// tl_assert(lset_old != lset_new);
964//
965// if (0) VG_(printf)("XX1: %d (card %ld) -> %d (card %ld) %#lx\n",
966// (Int)lset_old,
967// HG_(cardinalityWS)(univ_lsets,lset_old),
968// (Int)lset_new,
969// HG_(cardinalityWS)(univ_lsets,lset_new),
970// ga_of_access );
971//
972// /* This is slow, but at least it's simple. The bus hardware lock
973// just confuses the logic, so remove it from the locksets we're
974// considering before doing anything else. */
975// lset_new = del_BHL( lset_new );
976//
977// if (!HG_(isEmptyWS)( univ_lsets, lset_new )) {
978// /* The post-transition lock set is not empty. So we are not
979// interested. We're only interested in spotting transitions
980// that make locksets become empty. */
981// return;
982// }
983//
984// /* lset_new is now empty */
985// card_new = HG_(cardinalityWS)( univ_lsets, lset_new );
986// tl_assert(card_new == 0);
987//
988// lset_old = del_BHL( lset_old );
989// card_old = HG_(cardinalityWS)( univ_lsets, lset_old );
990//
991// if (0) VG_(printf)(" X2: %d (card %d) -> %d (card %d)\n",
992// (Int)lset_old, card_old, (Int)lset_new, card_new );
993//
994// if (card_old == 0) {
995// /* The old lockset was also empty. Not interesting. */
996// return;
997// }
998//
999// tl_assert(card_old > 0);
1000// tl_assert(!HG_(isEmptyWS)( univ_lsets, lset_old ));
1001//
1002// /* Now we know we've got a transition from a nonempty lockset to an
1003// empty one. So lset_old must be the set of locks lost. Record
1004// some details. If there is more than one element in the lossage
1005// set, just choose one arbitrarily -- not the best, but at least
1006// it's simple. */
1007//
1008// lk = (Lock*)HG_(anyElementOfWS)( univ_lsets, lset_old );
1009// if (0) VG_(printf)("lossage %ld %p\n",
1010// HG_(cardinalityWS)( univ_lsets, lset_old), lk );
1011// if (lk->appeared_at) {
1012// if (ga_to_lastlock == NULL)
1013// ga_to_lastlock = VG_(newFM)( HG_(zalloc), "hg.rlll.1", HG_(free), NULL );
1014// VG_(addToFM)( ga_to_lastlock, ga_of_access, (Word)lk->appeared_at );
1015// stats__ga_LL_adds++;
1016// }
1017//}
1018//
1019///* This queries the table (ga_to_lastlock) made by
1020// record_last_lock_lossage, when constructing error messages. It
1021// attempts to find the ExeContext of the allocation or initialisation
1022// point for the lossage lock associated with 'ga'. */
1023//
1024//static ExeContext* maybe_get_lastlock_initpoint ( Addr ga )
1025//{
1026// ExeContext* ec_hint = NULL;
1027// if (ga_to_lastlock != NULL
1028// && VG_(lookupFM)(ga_to_lastlock,
1029// NULL, (Word*)&ec_hint, ga)) {
1030// tl_assert(ec_hint != NULL);
1031// return ec_hint;
1032// } else {
1033// return NULL;
1034// }
1035//}
sewardjb4112022007-11-09 22:49:28 +00001036
1037
sewardjb4112022007-11-09 22:49:28 +00001038/*----------------------------------------------------------------*/
1039/*--- Shadow value and address range handlers ---*/
1040/*----------------------------------------------------------------*/
1041
1042static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +00001043//static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +00001044static inline Thread* get_current_Thread ( void ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +00001045__attribute__((noinline))
1046static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +00001047
sewardjb4112022007-11-09 22:49:28 +00001048
1049/* Block-copy states (needed for implementing realloc()). */
1050static void shadow_mem_copy_range ( Addr src, Addr dst, SizeT len )
1051{
sewardjf98e1c02008-10-25 16:22:41 +00001052 libhb_copy_shadow_state( src, dst, len );
sewardjb4112022007-11-09 22:49:28 +00001053}
1054
sewardjf98e1c02008-10-25 16:22:41 +00001055static void shadow_mem_read_range ( Thread* thr, Addr a, SizeT len )
1056{
1057 Thr* hbthr = thr->hbthr;
1058 tl_assert(hbthr);
1059 LIBHB_READ_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +00001060}
1061
1062static void shadow_mem_write_range ( Thread* thr, Addr a, SizeT len ) {
sewardjf98e1c02008-10-25 16:22:41 +00001063 Thr* hbthr = thr->hbthr;
1064 tl_assert(hbthr);
1065 LIBHB_WRITE_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +00001066}
1067
1068static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
1069{
sewardjf98e1c02008-10-25 16:22:41 +00001070 libhb_range_new( thr->hbthr, a, len );
sewardjb4112022007-11-09 22:49:28 +00001071}
1072
sewardjb4112022007-11-09 22:49:28 +00001073static void shadow_mem_make_NoAccess ( Thread* thr, Addr aIN, SizeT len )
1074{
sewardjb4112022007-11-09 22:49:28 +00001075 if (0 && len > 500)
barta0b6b2c2008-07-07 06:49:24 +00001076 VG_(printf)("make NoAccess ( %#lx, %ld )\n", aIN, len );
sewardjf98e1c02008-10-25 16:22:41 +00001077 libhb_range_noaccess( thr->hbthr, aIN, len );
sewardjb4112022007-11-09 22:49:28 +00001078}
1079
1080
1081/*----------------------------------------------------------------*/
1082/*--- Event handlers (evh__* functions) ---*/
1083/*--- plus helpers (evhH__* functions) ---*/
1084/*----------------------------------------------------------------*/
1085
1086/*--------- Event handler helpers (evhH__* functions) ---------*/
1087
1088/* Create a new segment for 'thr', making it depend (.prev) on its
1089 existing segment, bind together the SegmentID and Segment, and
1090 return both of them. Also update 'thr' so it references the new
1091 Segment. */
sewardjf98e1c02008-10-25 16:22:41 +00001092//zz static
1093//zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
1094//zz /*OUT*/Segment** new_segP,
1095//zz Thread* thr )
1096//zz {
1097//zz Segment* cur_seg;
1098//zz tl_assert(new_segP);
1099//zz tl_assert(new_segidP);
1100//zz tl_assert(HG_(is_sane_Thread)(thr));
1101//zz cur_seg = map_segments_lookup( thr->csegid );
1102//zz tl_assert(cur_seg);
1103//zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1104//zz at their owner thread. */
1105//zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1106//zz *new_segidP = alloc_SegmentID();
1107//zz map_segments_add( *new_segidP, *new_segP );
1108//zz thr->csegid = *new_segidP;
1109//zz }
sewardjb4112022007-11-09 22:49:28 +00001110
1111
1112/* The lock at 'lock_ga' has acquired a writer. Make all necessary
1113 updates, and also do all possible error checks. */
1114static
1115void evhH__post_thread_w_acquires_lock ( Thread* thr,
1116 LockKind lkk, Addr lock_ga )
1117{
1118 Lock* lk;
1119
1120 /* Basically what we need to do is call lockN_acquire_writer.
1121 However, that will barf if any 'invalid' lock states would
1122 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001123 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001124 routine.
1125
1126 Because this routine is only called after successful lock
1127 acquisition, we should not be asked to move the lock into any
1128 invalid states. Requests to do so are bugs in libpthread, since
1129 that should have rejected any such requests. */
1130
sewardjf98e1c02008-10-25 16:22:41 +00001131 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001132 /* Try to find the lock. If we can't, then create a new one with
1133 kind 'lkk'. */
1134 lk = map_locks_lookup_or_create(
1135 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001136 tl_assert( HG_(is_sane_LockN)(lk) );
1137
1138 /* check libhb level entities exist */
1139 tl_assert(thr->hbthr);
1140 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001141
1142 if (lk->heldBy == NULL) {
1143 /* the lock isn't held. Simple. */
1144 tl_assert(!lk->heldW);
1145 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001146 /* acquire a dependency from the lock's VCs */
1147 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001148 goto noerror;
1149 }
1150
1151 /* So the lock is already held. If held as a r-lock then
1152 libpthread must be buggy. */
1153 tl_assert(lk->heldBy);
1154 if (!lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001155 HG_(record_error_Misc)(
1156 thr, "Bug in libpthread: write lock "
1157 "granted on rwlock which is currently rd-held");
sewardjb4112022007-11-09 22:49:28 +00001158 goto error;
1159 }
1160
1161 /* So the lock is held in w-mode. If it's held by some other
1162 thread, then libpthread must be buggy. */
sewardj896f6f92008-08-19 08:38:52 +00001163 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
sewardjb4112022007-11-09 22:49:28 +00001164
sewardj896f6f92008-08-19 08:38:52 +00001165 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
sewardjf98e1c02008-10-25 16:22:41 +00001166 HG_(record_error_Misc)(
1167 thr, "Bug in libpthread: write lock "
1168 "granted on mutex/rwlock which is currently "
1169 "wr-held by a different thread");
sewardjb4112022007-11-09 22:49:28 +00001170 goto error;
1171 }
1172
1173 /* So the lock is already held in w-mode by 'thr'. That means this
1174 is an attempt to lock it recursively, which is only allowable
1175 for LK_mbRec kinded locks. Since this routine is called only
1176 once the lock has been acquired, this must also be a libpthread
1177 bug. */
1178 if (lk->kind != LK_mbRec) {
sewardjf98e1c02008-10-25 16:22:41 +00001179 HG_(record_error_Misc)(
1180 thr, "Bug in libpthread: recursive write lock "
1181 "granted on mutex/wrlock which does not "
1182 "support recursion");
sewardjb4112022007-11-09 22:49:28 +00001183 goto error;
1184 }
1185
1186 /* So we are recursively re-locking a lock we already w-hold. */
1187 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001188 /* acquire a dependency from the lock's VC. Probably pointless,
1189 but also harmless. */
1190 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001191 goto noerror;
1192
1193 noerror:
1194 /* check lock order acquisition graph, and update. This has to
1195 happen before the lock is added to the thread's locksetA/W. */
1196 laog__pre_thread_acquires_lock( thr, lk );
1197 /* update the thread's held-locks set */
1198 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1199 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (Word)lk );
1200 /* fall through */
1201
1202 error:
sewardjf98e1c02008-10-25 16:22:41 +00001203 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001204}
1205
1206
1207/* The lock at 'lock_ga' has acquired a reader. Make all necessary
1208 updates, and also do all possible error checks. */
1209static
1210void evhH__post_thread_r_acquires_lock ( Thread* thr,
1211 LockKind lkk, Addr lock_ga )
1212{
1213 Lock* lk;
1214
1215 /* Basically what we need to do is call lockN_acquire_reader.
1216 However, that will barf if any 'invalid' lock states would
1217 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001218 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001219 routine.
1220
1221 Because this routine is only called after successful lock
1222 acquisition, we should not be asked to move the lock into any
1223 invalid states. Requests to do so are bugs in libpthread, since
1224 that should have rejected any such requests. */
1225
sewardjf98e1c02008-10-25 16:22:41 +00001226 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001227 /* Try to find the lock. If we can't, then create a new one with
1228 kind 'lkk'. Only a reader-writer lock can be read-locked,
1229 hence the first assertion. */
1230 tl_assert(lkk == LK_rdwr);
1231 lk = map_locks_lookup_or_create(
1232 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001233 tl_assert( HG_(is_sane_LockN)(lk) );
1234
1235 /* check libhb level entities exist */
1236 tl_assert(thr->hbthr);
1237 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001238
1239 if (lk->heldBy == NULL) {
1240 /* the lock isn't held. Simple. */
1241 tl_assert(!lk->heldW);
1242 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001243 /* acquire a dependency from the lock's VC */
1244 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001245 goto noerror;
1246 }
1247
1248 /* So the lock is already held. If held as a w-lock then
1249 libpthread must be buggy. */
1250 tl_assert(lk->heldBy);
1251 if (lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001252 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1253 "granted on rwlock which is "
1254 "currently wr-held");
sewardjb4112022007-11-09 22:49:28 +00001255 goto error;
1256 }
1257
1258 /* Easy enough. In short anybody can get a read-lock on a rwlock
1259 provided it is either unlocked or already in rd-held. */
1260 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001261 /* acquire a dependency from the lock's VC. Probably pointless,
1262 but also harmless. */
1263 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001264 goto noerror;
1265
1266 noerror:
1267 /* check lock order acquisition graph, and update. This has to
1268 happen before the lock is added to the thread's locksetA/W. */
1269 laog__pre_thread_acquires_lock( thr, lk );
1270 /* update the thread's held-locks set */
1271 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1272 /* but don't update thr->locksetW, since lk is only rd-held */
1273 /* fall through */
1274
1275 error:
sewardjf98e1c02008-10-25 16:22:41 +00001276 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001277}
1278
1279
1280/* The lock at 'lock_ga' is just about to be unlocked. Make all
1281 necessary updates, and also do all possible error checks. */
1282static
1283void evhH__pre_thread_releases_lock ( Thread* thr,
1284 Addr lock_ga, Bool isRDWR )
1285{
1286 Lock* lock;
1287 Word n;
sewardjf98e1c02008-10-25 16:22:41 +00001288 Bool was_heldW;
sewardjb4112022007-11-09 22:49:28 +00001289
1290 /* This routine is called prior to a lock release, before
1291 libpthread has had a chance to validate the call. Hence we need
1292 to detect and reject any attempts to move the lock into an
1293 invalid state. Such attempts are bugs in the client.
1294
1295 isRDWR is True if we know from the wrapper context that lock_ga
1296 should refer to a reader-writer lock, and is False if [ditto]
1297 lock_ga should refer to a standard mutex. */
1298
sewardjf98e1c02008-10-25 16:22:41 +00001299 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001300 lock = map_locks_maybe_lookup( lock_ga );
1301
1302 if (!lock) {
1303 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1304 the client is trying to unlock it. So complain, then ignore
1305 the attempt. */
sewardjf98e1c02008-10-25 16:22:41 +00001306 HG_(record_error_UnlockBogus)( thr, lock_ga );
sewardjb4112022007-11-09 22:49:28 +00001307 return;
1308 }
1309
1310 tl_assert(lock->guestaddr == lock_ga);
sewardjf98e1c02008-10-25 16:22:41 +00001311 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001312
1313 if (isRDWR && lock->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001314 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1315 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001316 }
1317 if ((!isRDWR) && lock->kind == LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001318 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1319 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001320 }
1321
1322 if (!lock->heldBy) {
1323 /* The lock is not held. This indicates a serious bug in the
1324 client. */
1325 tl_assert(!lock->heldW);
sewardjf98e1c02008-10-25 16:22:41 +00001326 HG_(record_error_UnlockUnlocked)( thr, lock );
sewardjb4112022007-11-09 22:49:28 +00001327 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1328 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1329 goto error;
1330 }
1331
sewardjf98e1c02008-10-25 16:22:41 +00001332 /* test just above dominates */
1333 tl_assert(lock->heldBy);
1334 was_heldW = lock->heldW;
1335
sewardjb4112022007-11-09 22:49:28 +00001336 /* The lock is held. Is this thread one of the holders? If not,
1337 report a bug in the client. */
sewardj896f6f92008-08-19 08:38:52 +00001338 n = VG_(elemBag)( lock->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +00001339 tl_assert(n >= 0);
1340 if (n == 0) {
1341 /* We are not a current holder of the lock. This is a bug in
1342 the guest, and (per POSIX pthread rules) the unlock
1343 attempt will fail. So just complain and do nothing
1344 else. */
sewardj896f6f92008-08-19 08:38:52 +00001345 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001346 tl_assert(HG_(is_sane_Thread)(realOwner));
sewardjb4112022007-11-09 22:49:28 +00001347 tl_assert(realOwner != thr);
1348 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1349 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
sewardjf98e1c02008-10-25 16:22:41 +00001350 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
sewardjb4112022007-11-09 22:49:28 +00001351 goto error;
1352 }
1353
1354 /* Ok, we hold the lock 'n' times. */
1355 tl_assert(n >= 1);
1356
1357 lockN_release( lock, thr );
1358
1359 n--;
1360 tl_assert(n >= 0);
1361
1362 if (n > 0) {
1363 tl_assert(lock->heldBy);
sewardj896f6f92008-08-19 08:38:52 +00001364 tl_assert(n == VG_(elemBag)( lock->heldBy, (Word)thr ));
sewardjb4112022007-11-09 22:49:28 +00001365 /* We still hold the lock. So either it's a recursive lock
1366 or a rwlock which is currently r-held. */
1367 tl_assert(lock->kind == LK_mbRec
1368 || (lock->kind == LK_rdwr && !lock->heldW));
1369 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1370 if (lock->heldW)
1371 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1372 else
1373 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1374 } else {
1375 /* We no longer hold the lock. */
sewardjf98e1c02008-10-25 16:22:41 +00001376 tl_assert(!lock->heldBy);
1377 tl_assert(lock->heldW == False);
1378 //if (lock->heldBy) {
1379 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (Word)thr ));
1380 //}
sewardjb4112022007-11-09 22:49:28 +00001381 /* update this thread's lockset accordingly. */
1382 thr->locksetA
1383 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lock );
1384 thr->locksetW
1385 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lock );
sewardjf98e1c02008-10-25 16:22:41 +00001386 /* push our VC into the lock */
1387 tl_assert(thr->hbthr);
1388 tl_assert(lock->hbso);
1389 /* If the lock was previously W-held, then we want to do a
1390 strong send, and if previously R-held, then a weak send. */
1391 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
sewardjb4112022007-11-09 22:49:28 +00001392 }
1393 /* fall through */
1394
1395 error:
sewardjf98e1c02008-10-25 16:22:41 +00001396 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001397}
1398
1399
sewardj9f569b72008-11-13 13:33:09 +00001400/* ---------------------------------------------------------- */
1401/* -------- Event handlers proper (evh__* functions) -------- */
1402/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001403
1404/* What is the Thread* for the currently running thread? This is
1405 absolutely performance critical. We receive notifications from the
1406 core for client code starts/stops, and cache the looked-up result
1407 in 'current_Thread'. Hence, for the vast majority of requests,
1408 finding the current thread reduces to a read of a global variable,
1409 provided get_current_Thread_in_C_C is inlined.
1410
1411 Outside of client code, current_Thread is NULL, and presumably
1412 any uses of it will cause a segfault. Hence:
1413
1414 - for uses definitely within client code, use
1415 get_current_Thread_in_C_C.
1416
1417 - for all other uses, use get_current_Thread.
1418*/
1419
1420static Thread* current_Thread = NULL;
1421
1422static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1423 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1424 tl_assert(current_Thread == NULL);
1425 current_Thread = map_threads_lookup( tid );
1426 tl_assert(current_Thread != NULL);
1427}
1428static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1429 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1430 tl_assert(current_Thread != NULL);
1431 current_Thread = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00001432 libhb_maybe_GC();
sewardjb4112022007-11-09 22:49:28 +00001433}
1434static inline Thread* get_current_Thread_in_C_C ( void ) {
1435 return current_Thread;
1436}
1437static inline Thread* get_current_Thread ( void ) {
1438 ThreadId coretid;
1439 Thread* thr;
1440 thr = get_current_Thread_in_C_C();
1441 if (LIKELY(thr))
1442 return thr;
1443 /* evidently not in client code. Do it the slow way. */
1444 coretid = VG_(get_running_tid)();
1445 /* FIXME: get rid of the following kludge. It exists because
sewardjf98e1c02008-10-25 16:22:41 +00001446 evh__new_mem is called during initialisation (as notification
sewardjb4112022007-11-09 22:49:28 +00001447 of initial memory layout) and VG_(get_running_tid)() returns
1448 VG_INVALID_THREADID at that point. */
1449 if (coretid == VG_INVALID_THREADID)
1450 coretid = 1; /* KLUDGE */
1451 thr = map_threads_lookup( coretid );
1452 return thr;
1453}
1454
1455static
1456void evh__new_mem ( Addr a, SizeT len ) {
1457 if (SHOW_EVENTS >= 2)
1458 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1459 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001460 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001461 all__sanity_check("evh__new_mem-post");
1462}
1463
1464static
sewardj7cf4e6b2008-05-01 20:24:26 +00001465void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1466 if (SHOW_EVENTS >= 2)
1467 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1468 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001469 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardj7cf4e6b2008-05-01 20:24:26 +00001470 all__sanity_check("evh__new_mem_w_tid-post");
1471}
1472
1473static
sewardjb4112022007-11-09 22:49:28 +00001474void evh__new_mem_w_perms ( Addr a, SizeT len,
sewardj9c606bd2008-09-18 18:12:50 +00001475 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
sewardjb4112022007-11-09 22:49:28 +00001476 if (SHOW_EVENTS >= 1)
1477 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1478 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1479 if (rr || ww || xx)
1480 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001481 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001482 all__sanity_check("evh__new_mem_w_perms-post");
1483}
1484
1485static
1486void evh__set_perms ( Addr a, SizeT len,
1487 Bool rr, Bool ww, Bool xx ) {
1488 if (SHOW_EVENTS >= 1)
1489 VG_(printf)("evh__set_perms(%p, %lu, %d,%d,%d)\n",
1490 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1491 /* Hmm. What should we do here, that actually makes any sense?
1492 Let's say: if neither readable nor writable, then declare it
1493 NoAccess, else leave it alone. */
1494 if (!(rr || ww))
1495 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001496 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001497 all__sanity_check("evh__set_perms-post");
1498}
1499
1500static
1501void evh__die_mem ( Addr a, SizeT len ) {
1502 if (SHOW_EVENTS >= 2)
1503 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
1504 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001505 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001506 all__sanity_check("evh__die_mem-post");
1507}
1508
1509static
1510void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1511{
1512 if (SHOW_EVENTS >= 1)
1513 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1514 (Int)parent, (Int)child );
1515
1516 if (parent != VG_INVALID_THREADID) {
sewardjf98e1c02008-10-25 16:22:41 +00001517 Thread* thr_p;
1518 Thread* thr_c;
1519 Thr* hbthr_p;
1520 Thr* hbthr_c;
sewardjb4112022007-11-09 22:49:28 +00001521
sewardjf98e1c02008-10-25 16:22:41 +00001522 tl_assert(HG_(is_sane_ThreadId)(parent));
1523 tl_assert(HG_(is_sane_ThreadId)(child));
sewardjb4112022007-11-09 22:49:28 +00001524 tl_assert(parent != child);
1525
1526 thr_p = map_threads_maybe_lookup( parent );
1527 thr_c = map_threads_maybe_lookup( child );
1528
1529 tl_assert(thr_p != NULL);
1530 tl_assert(thr_c == NULL);
1531
sewardjf98e1c02008-10-25 16:22:41 +00001532 hbthr_p = thr_p->hbthr;
1533 tl_assert(hbthr_p != NULL);
1534 tl_assert( libhb_get_Thr_opaque(hbthr_p) == thr_p );
sewardjb4112022007-11-09 22:49:28 +00001535
sewardjf98e1c02008-10-25 16:22:41 +00001536 hbthr_c = libhb_create ( hbthr_p );
1537
1538 /* Create a new thread record for the child. */
sewardjb4112022007-11-09 22:49:28 +00001539 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +00001540 thr_c = mk_Thread( hbthr_c );
1541 tl_assert( libhb_get_Thr_opaque(hbthr_c) == NULL );
1542 libhb_set_Thr_opaque(hbthr_c, thr_c);
sewardjb4112022007-11-09 22:49:28 +00001543
1544 /* and bind it in the thread-map table */
1545 map_threads[child] = thr_c;
sewardjf98e1c02008-10-25 16:22:41 +00001546 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1547 thr_c->coretid = child;
sewardjb4112022007-11-09 22:49:28 +00001548
1549 /* Record where the parent is so we can later refer to this in
1550 error messages.
1551
1552 On amd64-linux, this entails a nasty glibc-2.5 specific hack.
1553 The stack snapshot is taken immediately after the parent has
1554 returned from its sys_clone call. Unfortunately there is no
1555 unwind info for the insn following "syscall" - reading the
1556 glibc sources confirms this. So we ask for a snapshot to be
1557 taken as if RIP was 3 bytes earlier, in a place where there
1558 is unwind info. Sigh.
1559 */
1560 { Word first_ip_delta = 0;
1561# if defined(VGP_amd64_linux)
1562 first_ip_delta = -3;
1563# endif
1564 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1565 }
sewardjb4112022007-11-09 22:49:28 +00001566 }
1567
sewardjf98e1c02008-10-25 16:22:41 +00001568 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001569 all__sanity_check("evh__pre_thread_create-post");
1570}
1571
1572static
1573void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1574{
1575 Int nHeld;
1576 Thread* thr_q;
1577 if (SHOW_EVENTS >= 1)
1578 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1579 (Int)quit_tid );
1580
1581 /* quit_tid has disappeared without joining to any other thread.
1582 Therefore there is no synchronisation event associated with its
1583 exit and so we have to pretty much treat it as if it was still
1584 alive but mysteriously making no progress. That is because, if
1585 we don't know when it really exited, then we can never say there
1586 is a point in time when we're sure the thread really has
1587 finished, and so we need to consider the possibility that it
1588 lingers indefinitely and continues to interact with other
1589 threads. */
1590 /* However, it might have rendezvous'd with a thread that called
1591 pthread_join with this one as arg, prior to this point (that's
1592 how NPTL works). In which case there has already been a prior
1593 sync event. So in any case, just let the thread exit. On NPTL,
1594 all thread exits go through here. */
sewardjf98e1c02008-10-25 16:22:41 +00001595 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
sewardjb4112022007-11-09 22:49:28 +00001596 thr_q = map_threads_maybe_lookup( quit_tid );
1597 tl_assert(thr_q != NULL);
1598
1599 /* Complain if this thread holds any locks. */
1600 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1601 tl_assert(nHeld >= 0);
1602 if (nHeld > 0) {
1603 HChar buf[80];
1604 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1605 nHeld, nHeld > 1 ? "s" : "");
sewardjf98e1c02008-10-25 16:22:41 +00001606 HG_(record_error_Misc)( thr_q, buf );
sewardjb4112022007-11-09 22:49:28 +00001607 }
1608
1609 /* About the only thing we do need to do is clear the map_threads
1610 entry, in order that the Valgrind core can re-use it. */
sewardjf98e1c02008-10-25 16:22:41 +00001611 tl_assert(thr_q->coretid == quit_tid);
1612 thr_q->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +00001613 map_threads_delete( quit_tid );
1614
sewardjf98e1c02008-10-25 16:22:41 +00001615 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001616 all__sanity_check("evh__pre_thread_ll_exit-post");
1617}
1618
sewardjf98e1c02008-10-25 16:22:41 +00001619
sewardjb4112022007-11-09 22:49:28 +00001620static
1621void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1622{
sewardjb4112022007-11-09 22:49:28 +00001623 Thread* thr_s;
1624 Thread* thr_q;
sewardjf98e1c02008-10-25 16:22:41 +00001625 Thr* hbthr_s;
1626 Thr* hbthr_q;
1627 SO* so;
sewardjb4112022007-11-09 22:49:28 +00001628
1629 if (SHOW_EVENTS >= 1)
1630 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1631 (Int)stay_tid, quit_thr );
1632
sewardjf98e1c02008-10-25 16:22:41 +00001633 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
sewardjb4112022007-11-09 22:49:28 +00001634
1635 thr_s = map_threads_maybe_lookup( stay_tid );
1636 thr_q = quit_thr;
1637 tl_assert(thr_s != NULL);
1638 tl_assert(thr_q != NULL);
1639 tl_assert(thr_s != thr_q);
1640
sewardjf98e1c02008-10-25 16:22:41 +00001641 hbthr_s = thr_s->hbthr;
1642 hbthr_q = thr_q->hbthr;
1643 tl_assert(hbthr_s != hbthr_q);
1644 tl_assert( libhb_get_Thr_opaque(hbthr_s) == thr_s );
1645 tl_assert( libhb_get_Thr_opaque(hbthr_q) == thr_q );
sewardjb4112022007-11-09 22:49:28 +00001646
sewardjf98e1c02008-10-25 16:22:41 +00001647 /* Allocate a temporary synchronisation object and use it to send
1648 an imaginary message from the quitter to the stayer, the purpose
1649 being to generate a dependence from the quitter to the
1650 stayer. */
1651 so = libhb_so_alloc();
1652 tl_assert(so);
1653 libhb_so_send(hbthr_q, so, True/*strong_send*/);
1654 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1655 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00001656
sewardjf98e1c02008-10-25 16:22:41 +00001657 /* evh__pre_thread_ll_exit issues an error message if the exiting
1658 thread holds any locks. No need to check here. */
sewardjb4112022007-11-09 22:49:28 +00001659
1660 /* This holds because, at least when using NPTL as the thread
1661 library, we should be notified the low level thread exit before
1662 we hear of any join event on it. The low level exit
1663 notification feeds through into evh__pre_thread_ll_exit,
1664 which should clear the map_threads entry for it. Hence we
1665 expect there to be no map_threads entry at this point. */
1666 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1667 == VG_INVALID_THREADID);
1668
sewardjf98e1c02008-10-25 16:22:41 +00001669 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001670 all__sanity_check("evh__post_thread_join-post");
1671}
1672
1673static
1674void evh__pre_mem_read ( CorePart part, ThreadId tid, Char* s,
1675 Addr a, SizeT size) {
1676 if (SHOW_EVENTS >= 2
1677 || (SHOW_EVENTS >= 1 && size != 1))
1678 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1679 (Int)tid, s, (void*)a, size );
1680 shadow_mem_read_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001681 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001682 all__sanity_check("evh__pre_mem_read-post");
1683}
1684
1685static
1686void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
1687 Char* s, Addr a ) {
1688 Int len;
1689 if (SHOW_EVENTS >= 1)
1690 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1691 (Int)tid, s, (void*)a );
1692 // FIXME: think of a less ugly hack
1693 len = VG_(strlen)( (Char*) a );
1694 shadow_mem_read_range( map_threads_lookup(tid), a, len+1 );
sewardjf98e1c02008-10-25 16:22:41 +00001695 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001696 all__sanity_check("evh__pre_mem_read_asciiz-post");
1697}
1698
1699static
1700void evh__pre_mem_write ( CorePart part, ThreadId tid, Char* s,
1701 Addr a, SizeT size ) {
1702 if (SHOW_EVENTS >= 1)
1703 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1704 (Int)tid, s, (void*)a, size );
1705 shadow_mem_write_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001706 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001707 all__sanity_check("evh__pre_mem_write-post");
1708}
1709
1710static
1711void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1712 if (SHOW_EVENTS >= 1)
1713 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1714 (void*)a, len, (Int)is_inited );
1715 // FIXME: this is kinda stupid
1716 if (is_inited) {
1717 shadow_mem_make_New(get_current_Thread(), a, len);
1718 } else {
1719 shadow_mem_make_New(get_current_Thread(), a, len);
1720 }
sewardjf98e1c02008-10-25 16:22:41 +00001721 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001722 all__sanity_check("evh__pre_mem_read-post");
1723}
1724
1725static
1726void evh__die_mem_heap ( Addr a, SizeT len ) {
1727 if (SHOW_EVENTS >= 1)
1728 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
1729 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001730 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001731 all__sanity_check("evh__pre_mem_read-post");
1732}
1733
sewardjb4112022007-11-09 22:49:28 +00001734static VG_REGPARM(1)
1735void evh__mem_help_read_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001736 Thread* thr = get_current_Thread_in_C_C();
1737 Thr* hbthr = thr->hbthr;
1738 LIBHB_READ_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001739}
sewardjf98e1c02008-10-25 16:22:41 +00001740
sewardjb4112022007-11-09 22:49:28 +00001741static VG_REGPARM(1)
1742void evh__mem_help_read_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001743 Thread* thr = get_current_Thread_in_C_C();
1744 Thr* hbthr = thr->hbthr;
1745 LIBHB_READ_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001746}
sewardjf98e1c02008-10-25 16:22:41 +00001747
sewardjb4112022007-11-09 22:49:28 +00001748static VG_REGPARM(1)
1749void evh__mem_help_read_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001750 Thread* thr = get_current_Thread_in_C_C();
1751 Thr* hbthr = thr->hbthr;
1752 LIBHB_READ_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001753}
sewardjf98e1c02008-10-25 16:22:41 +00001754
sewardjb4112022007-11-09 22:49:28 +00001755static VG_REGPARM(1)
1756void evh__mem_help_read_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001757 Thread* thr = get_current_Thread_in_C_C();
1758 Thr* hbthr = thr->hbthr;
1759 LIBHB_READ_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001760}
sewardjf98e1c02008-10-25 16:22:41 +00001761
sewardjb4112022007-11-09 22:49:28 +00001762static VG_REGPARM(2)
1763void evh__mem_help_read_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001764 Thread* thr = get_current_Thread_in_C_C();
1765 Thr* hbthr = thr->hbthr;
1766 LIBHB_READ_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001767}
1768
1769static VG_REGPARM(1)
1770void evh__mem_help_write_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001771 Thread* thr = get_current_Thread_in_C_C();
1772 Thr* hbthr = thr->hbthr;
1773 LIBHB_WRITE_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001774}
sewardjf98e1c02008-10-25 16:22:41 +00001775
sewardjb4112022007-11-09 22:49:28 +00001776static VG_REGPARM(1)
1777void evh__mem_help_write_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001778 Thread* thr = get_current_Thread_in_C_C();
1779 Thr* hbthr = thr->hbthr;
1780 LIBHB_WRITE_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001781}
sewardjf98e1c02008-10-25 16:22:41 +00001782
sewardjb4112022007-11-09 22:49:28 +00001783static VG_REGPARM(1)
1784void evh__mem_help_write_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001785 Thread* thr = get_current_Thread_in_C_C();
1786 Thr* hbthr = thr->hbthr;
1787 LIBHB_WRITE_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001788}
sewardjf98e1c02008-10-25 16:22:41 +00001789
sewardjb4112022007-11-09 22:49:28 +00001790static VG_REGPARM(1)
1791void evh__mem_help_write_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001792 Thread* thr = get_current_Thread_in_C_C();
1793 Thr* hbthr = thr->hbthr;
1794 LIBHB_WRITE_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001795}
sewardjf98e1c02008-10-25 16:22:41 +00001796
sewardjb4112022007-11-09 22:49:28 +00001797static VG_REGPARM(2)
1798void evh__mem_help_write_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001799 Thread* thr = get_current_Thread_in_C_C();
1800 Thr* hbthr = thr->hbthr;
1801 LIBHB_WRITE_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001802}
1803
sewardjd52392d2008-11-08 20:36:26 +00001804//static void evh__bus_lock(void) {
1805// Thread* thr;
1806// if (0) VG_(printf)("evh__bus_lock()\n");
1807// thr = get_current_Thread();
1808// tl_assert(thr); /* cannot fail - Thread* must already exist */
1809// evhH__post_thread_w_acquires_lock( thr, LK_nonRec, (Addr)&__bus_lock );
1810//}
1811//static void evh__bus_unlock(void) {
1812// Thread* thr;
1813// if (0) VG_(printf)("evh__bus_unlock()\n");
1814// thr = get_current_Thread();
1815// tl_assert(thr); /* cannot fail - Thread* must already exist */
1816// evhH__pre_thread_releases_lock( thr, (Addr)&__bus_lock, False/*!isRDWR*/ );
1817//}
sewardjb4112022007-11-09 22:49:28 +00001818
sewardj9f569b72008-11-13 13:33:09 +00001819/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001820/* -------------- events to do with mutexes -------------- */
sewardj9f569b72008-11-13 13:33:09 +00001821/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001822
1823/* EXPOSITION only: by intercepting lock init events we can show the
1824 user where the lock was initialised, rather than only being able to
1825 show where it was first locked. Intercepting lock initialisations
1826 is not necessary for the basic operation of the race checker. */
1827static
1828void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
1829 void* mutex, Word mbRec )
1830{
1831 if (SHOW_EVENTS >= 1)
1832 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
1833 (Int)tid, mbRec, (void*)mutex );
1834 tl_assert(mbRec == 0 || mbRec == 1);
1835 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
1836 (Addr)mutex, tid );
sewardjf98e1c02008-10-25 16:22:41 +00001837 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001838 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
1839}
1840
1841static
1842void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex )
1843{
1844 Thread* thr;
1845 Lock* lk;
1846 if (SHOW_EVENTS >= 1)
1847 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE(ctid=%d, %p)\n",
1848 (Int)tid, (void*)mutex );
1849
1850 thr = map_threads_maybe_lookup( tid );
1851 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00001852 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00001853
1854 lk = map_locks_maybe_lookup( (Addr)mutex );
1855
1856 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
sewardjf98e1c02008-10-25 16:22:41 +00001857 HG_(record_error_Misc)(
1858 thr, "pthread_mutex_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00001859 }
1860
1861 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00001862 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00001863 tl_assert( lk->guestaddr == (Addr)mutex );
1864 if (lk->heldBy) {
1865 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00001866 HG_(record_error_Misc)(
1867 thr, "pthread_mutex_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00001868 /* remove lock from locksets of all owning threads */
1869 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00001870 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00001871 lk->heldBy = NULL;
1872 lk->heldW = False;
1873 lk->acquired_at = NULL;
1874 }
1875 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001876 tl_assert( HG_(is_sane_LockN)(lk) );
1877
sewardj1cbc12f2008-11-10 16:16:46 +00001878 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00001879 map_locks_delete( lk->guestaddr );
1880 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00001881 }
1882
sewardjf98e1c02008-10-25 16:22:41 +00001883 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001884 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
1885}
1886
1887static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
1888 void* mutex, Word isTryLock )
1889{
1890 /* Just check the mutex is sane; nothing else to do. */
1891 // 'mutex' may be invalid - not checked by wrapper
1892 Thread* thr;
1893 Lock* lk;
1894 if (SHOW_EVENTS >= 1)
1895 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
1896 (Int)tid, (void*)mutex );
1897
1898 tl_assert(isTryLock == 0 || isTryLock == 1);
1899 thr = map_threads_maybe_lookup( tid );
1900 tl_assert(thr); /* cannot fail - Thread* must already exist */
1901
1902 lk = map_locks_maybe_lookup( (Addr)mutex );
1903
1904 if (lk && (lk->kind == LK_rdwr)) {
sewardjf98e1c02008-10-25 16:22:41 +00001905 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
1906 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001907 }
1908
1909 if ( lk
1910 && isTryLock == 0
1911 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
1912 && lk->heldBy
1913 && lk->heldW
sewardj896f6f92008-08-19 08:38:52 +00001914 && VG_(elemBag)( lk->heldBy, (Word)thr ) > 0 ) {
sewardjb4112022007-11-09 22:49:28 +00001915 /* uh, it's a non-recursive lock and we already w-hold it, and
1916 this is a real lock operation (not a speculative "tryLock"
1917 kind of thing). Duh. Deadlock coming up; but at least
1918 produce an error message. */
sewardjf98e1c02008-10-25 16:22:41 +00001919 HG_(record_error_Misc)( thr, "Attempt to re-lock a "
1920 "non-recursive lock I already hold" );
sewardjb4112022007-11-09 22:49:28 +00001921 }
1922}
1923
1924static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
1925{
1926 // only called if the real library call succeeded - so mutex is sane
1927 Thread* thr;
1928 if (SHOW_EVENTS >= 1)
1929 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
1930 (Int)tid, (void*)mutex );
1931
1932 thr = map_threads_maybe_lookup( tid );
1933 tl_assert(thr); /* cannot fail - Thread* must already exist */
1934
1935 evhH__post_thread_w_acquires_lock(
1936 thr,
1937 LK_mbRec, /* if not known, create new lock with this LockKind */
1938 (Addr)mutex
1939 );
1940}
1941
1942static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
1943{
1944 // 'mutex' may be invalid - not checked by wrapper
1945 Thread* thr;
1946 if (SHOW_EVENTS >= 1)
1947 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
1948 (Int)tid, (void*)mutex );
1949
1950 thr = map_threads_maybe_lookup( tid );
1951 tl_assert(thr); /* cannot fail - Thread* must already exist */
1952
1953 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
1954}
1955
1956static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
1957{
1958 // only called if the real library call succeeded - so mutex is sane
1959 Thread* thr;
1960 if (SHOW_EVENTS >= 1)
1961 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
1962 (Int)tid, (void*)mutex );
1963 thr = map_threads_maybe_lookup( tid );
1964 tl_assert(thr); /* cannot fail - Thread* must already exist */
1965
1966 // anything we should do here?
1967}
1968
1969
sewardj9f569b72008-11-13 13:33:09 +00001970/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001971/* --------------- events to do with CVs --------------- */
sewardj9f569b72008-11-13 13:33:09 +00001972/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001973
sewardjf98e1c02008-10-25 16:22:41 +00001974/* A mapping from CV to the SO associated with it. When the CV is
1975 signalled/broadcasted upon, we do a 'send' into the SO, and when a
1976 wait on it completes, we do a 'recv' from the SO. This is believed
1977 to give the correct happens-before events arising from CV
sewardjb4112022007-11-09 22:49:28 +00001978 signallings/broadcasts.
1979*/
1980
sewardjf98e1c02008-10-25 16:22:41 +00001981/* pthread_mutex_cond* -> SO* */
1982static WordFM* map_cond_to_SO = NULL;
sewardjb4112022007-11-09 22:49:28 +00001983
sewardjf98e1c02008-10-25 16:22:41 +00001984static void map_cond_to_SO_INIT ( void ) {
1985 if (UNLIKELY(map_cond_to_SO == NULL)) {
sewardj9f569b72008-11-13 13:33:09 +00001986 map_cond_to_SO = VG_(newFM)( HG_(zalloc),
1987 "hg.mctSI.1", HG_(free), NULL );
sewardjf98e1c02008-10-25 16:22:41 +00001988 tl_assert(map_cond_to_SO != NULL);
1989 }
1990}
1991
1992static SO* map_cond_to_SO_lookup_or_alloc ( void* cond ) {
1993 UWord key, val;
1994 map_cond_to_SO_INIT();
1995 if (VG_(lookupFM)( map_cond_to_SO, &key, &val, (UWord)cond )) {
1996 tl_assert(key == (UWord)cond);
1997 return (SO*)val;
1998 } else {
1999 SO* so = libhb_so_alloc();
2000 VG_(addToFM)( map_cond_to_SO, (UWord)cond, (UWord)so );
2001 return so;
2002 }
2003}
2004
2005static void map_cond_to_SO_delete ( void* cond ) {
2006 UWord keyW, valW;
2007 map_cond_to_SO_INIT();
2008 if (VG_(delFromFM)( map_cond_to_SO, &keyW, &valW, (UWord)cond )) {
2009 SO* so = (SO*)valW;
2010 tl_assert(keyW == (UWord)cond);
2011 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00002012 }
2013}
2014
2015static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2016{
sewardjf98e1c02008-10-25 16:22:41 +00002017 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2018 cond to a SO if it is not already so bound, and 'send' on the
2019 SO. This is later used by other thread(s) which successfully
2020 exit from a pthread_cond_wait on the same cv; then they 'recv'
2021 from the SO, thereby acquiring a dependency on this signalling
2022 event. */
sewardjb4112022007-11-09 22:49:28 +00002023 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +00002024 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002025
2026 if (SHOW_EVENTS >= 1)
2027 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2028 (Int)tid, (void*)cond );
2029
sewardjb4112022007-11-09 22:49:28 +00002030 thr = map_threads_maybe_lookup( tid );
2031 tl_assert(thr); /* cannot fail - Thread* must already exist */
2032
2033 // error-if: mutex is bogus
2034 // error-if: mutex is not locked
2035
sewardjf98e1c02008-10-25 16:22:41 +00002036 so = map_cond_to_SO_lookup_or_alloc( cond );
2037 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002038
sewardjf98e1c02008-10-25 16:22:41 +00002039 libhb_so_send( thr->hbthr, so, True/*strong_send*/ );
sewardjb4112022007-11-09 22:49:28 +00002040}
2041
2042/* returns True if it reckons 'mutex' is valid and held by this
2043 thread, else False */
2044static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2045 void* cond, void* mutex )
2046{
2047 Thread* thr;
2048 Lock* lk;
2049 Bool lk_valid = True;
2050
2051 if (SHOW_EVENTS >= 1)
2052 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2053 "(ctid=%d, cond=%p, mutex=%p)\n",
2054 (Int)tid, (void*)cond, (void*)mutex );
2055
sewardjb4112022007-11-09 22:49:28 +00002056 thr = map_threads_maybe_lookup( tid );
2057 tl_assert(thr); /* cannot fail - Thread* must already exist */
2058
2059 lk = map_locks_maybe_lookup( (Addr)mutex );
2060
2061 /* Check for stupid mutex arguments. There are various ways to be
2062 a bozo. Only complain once, though, even if more than one thing
2063 is wrong. */
2064 if (lk == NULL) {
2065 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002066 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002067 thr,
2068 "pthread_cond_{timed}wait called with invalid mutex" );
2069 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002070 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002071 if (lk->kind == LK_rdwr) {
2072 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002073 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002074 thr, "pthread_cond_{timed}wait called with mutex "
2075 "of type pthread_rwlock_t*" );
2076 } else
2077 if (lk->heldBy == NULL) {
2078 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002079 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002080 thr, "pthread_cond_{timed}wait called with un-held mutex");
2081 } else
2082 if (lk->heldBy != NULL
sewardj896f6f92008-08-19 08:38:52 +00002083 && VG_(elemBag)( lk->heldBy, (Word)thr ) == 0) {
sewardjb4112022007-11-09 22:49:28 +00002084 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002085 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002086 thr, "pthread_cond_{timed}wait called with mutex "
2087 "held by a different thread" );
2088 }
2089 }
2090
2091 // error-if: cond is also associated with a different mutex
2092
2093 return lk_valid;
2094}
2095
2096static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
2097 void* cond, void* mutex )
2098{
sewardjf98e1c02008-10-25 16:22:41 +00002099 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2100 the SO for this cond, and 'recv' from it so as to acquire a
2101 dependency edge back to the signaller/broadcaster. */
2102 Thread* thr;
2103 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002104
2105 if (SHOW_EVENTS >= 1)
2106 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
2107 "(ctid=%d, cond=%p, mutex=%p)\n",
2108 (Int)tid, (void*)cond, (void*)mutex );
2109
sewardjb4112022007-11-09 22:49:28 +00002110 thr = map_threads_maybe_lookup( tid );
2111 tl_assert(thr); /* cannot fail - Thread* must already exist */
2112
2113 // error-if: cond is also associated with a different mutex
2114
sewardjf98e1c02008-10-25 16:22:41 +00002115 so = map_cond_to_SO_lookup_or_alloc( cond );
2116 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002117
sewardjf98e1c02008-10-25 16:22:41 +00002118 if (!libhb_so_everSent(so)) {
2119 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2120 it? If this happened it would surely be a bug in the threads
2121 library. Or one of those fabled "spurious wakeups". */
2122 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
2123 "succeeded on"
2124 " without prior pthread_cond_post");
sewardjb4112022007-11-09 22:49:28 +00002125 }
sewardjf98e1c02008-10-25 16:22:41 +00002126
2127 /* anyway, acquire a dependency on it. */
2128 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
2129}
2130
2131static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
2132 void* cond )
2133{
2134 /* Deal with destroy events. The only purpose is to free storage
2135 associated with the CV, so as to avoid any possible resource
2136 leaks. */
2137 if (SHOW_EVENTS >= 1)
2138 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
2139 "(ctid=%d, cond=%p)\n",
2140 (Int)tid, (void*)cond );
2141
2142 map_cond_to_SO_delete( cond );
sewardjb4112022007-11-09 22:49:28 +00002143}
2144
2145
sewardj9f569b72008-11-13 13:33:09 +00002146/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002147/* -------------- events to do with rwlocks -------------- */
sewardj9f569b72008-11-13 13:33:09 +00002148/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002149
2150/* EXPOSITION only */
2151static
2152void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2153{
2154 if (SHOW_EVENTS >= 1)
2155 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2156 (Int)tid, (void*)rwl );
2157 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
sewardjf98e1c02008-10-25 16:22:41 +00002158 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002159 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2160}
2161
2162static
2163void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2164{
2165 Thread* thr;
2166 Lock* lk;
2167 if (SHOW_EVENTS >= 1)
2168 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2169 (Int)tid, (void*)rwl );
2170
2171 thr = map_threads_maybe_lookup( tid );
2172 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00002173 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00002174
2175 lk = map_locks_maybe_lookup( (Addr)rwl );
2176
2177 if (lk == NULL || lk->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00002178 HG_(record_error_Misc)(
2179 thr, "pthread_rwlock_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00002180 }
2181
2182 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00002183 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002184 tl_assert( lk->guestaddr == (Addr)rwl );
2185 if (lk->heldBy) {
2186 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00002187 HG_(record_error_Misc)(
2188 thr, "pthread_rwlock_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00002189 /* remove lock from locksets of all owning threads */
2190 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00002191 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00002192 lk->heldBy = NULL;
2193 lk->heldW = False;
sewardj1c7e8332007-11-29 13:04:03 +00002194 lk->acquired_at = NULL;
sewardjb4112022007-11-09 22:49:28 +00002195 }
2196 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002197 tl_assert( HG_(is_sane_LockN)(lk) );
2198
sewardj1cbc12f2008-11-10 16:16:46 +00002199 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002200 map_locks_delete( lk->guestaddr );
2201 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002202 }
2203
sewardjf98e1c02008-10-25 16:22:41 +00002204 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002205 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2206}
2207
2208static
sewardj789c3c52008-02-25 12:10:07 +00002209void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2210 void* rwl,
2211 Word isW, Word isTryLock )
sewardjb4112022007-11-09 22:49:28 +00002212{
2213 /* Just check the rwl is sane; nothing else to do. */
2214 // 'rwl' may be invalid - not checked by wrapper
2215 Thread* thr;
2216 Lock* lk;
2217 if (SHOW_EVENTS >= 1)
2218 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2219 (Int)tid, (Int)isW, (void*)rwl );
2220
2221 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
sewardj789c3c52008-02-25 12:10:07 +00002222 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
sewardjb4112022007-11-09 22:49:28 +00002223 thr = map_threads_maybe_lookup( tid );
2224 tl_assert(thr); /* cannot fail - Thread* must already exist */
2225
2226 lk = map_locks_maybe_lookup( (Addr)rwl );
2227 if ( lk
2228 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2229 /* Wrong kind of lock. Duh. */
sewardjf98e1c02008-10-25 16:22:41 +00002230 HG_(record_error_Misc)(
2231 thr, "pthread_rwlock_{rd,rw}lock with a "
2232 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002233 }
2234}
2235
2236static
2237void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2238{
2239 // only called if the real library call succeeded - so mutex is sane
2240 Thread* thr;
2241 if (SHOW_EVENTS >= 1)
2242 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2243 (Int)tid, (Int)isW, (void*)rwl );
2244
2245 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2246 thr = map_threads_maybe_lookup( tid );
2247 tl_assert(thr); /* cannot fail - Thread* must already exist */
2248
2249 (isW ? evhH__post_thread_w_acquires_lock
2250 : evhH__post_thread_r_acquires_lock)(
2251 thr,
2252 LK_rdwr, /* if not known, create new lock with this LockKind */
2253 (Addr)rwl
2254 );
2255}
2256
2257static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2258{
2259 // 'rwl' may be invalid - not checked by wrapper
2260 Thread* thr;
2261 if (SHOW_EVENTS >= 1)
2262 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2263 (Int)tid, (void*)rwl );
2264
2265 thr = map_threads_maybe_lookup( tid );
2266 tl_assert(thr); /* cannot fail - Thread* must already exist */
2267
2268 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2269}
2270
2271static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2272{
2273 // only called if the real library call succeeded - so mutex is sane
2274 Thread* thr;
2275 if (SHOW_EVENTS >= 1)
2276 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2277 (Int)tid, (void*)rwl );
2278 thr = map_threads_maybe_lookup( tid );
2279 tl_assert(thr); /* cannot fail - Thread* must already exist */
2280
2281 // anything we should do here?
2282}
2283
2284
sewardj9f569b72008-11-13 13:33:09 +00002285/* ---------------------------------------------------------- */
2286/* -------------- events to do with semaphores -------------- */
2287/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002288
sewardj11e352f2007-11-30 11:11:02 +00002289/* This is similar to but not identical to the handling for condition
sewardjb4112022007-11-09 22:49:28 +00002290 variables. */
2291
sewardjf98e1c02008-10-25 16:22:41 +00002292/* For each semaphore, we maintain a stack of SOs. When a 'post'
2293 operation is done on a semaphore (unlocking, essentially), a new SO
2294 is created for the posting thread, the posting thread does a strong
2295 send to it (which merely installs the posting thread's VC in the
2296 SO), and the SO is pushed on the semaphore's stack.
sewardjb4112022007-11-09 22:49:28 +00002297
2298 Later, when a (probably different) thread completes 'wait' on the
sewardjf98e1c02008-10-25 16:22:41 +00002299 semaphore, we pop a SO off the semaphore's stack (which should be
2300 nonempty), and do a strong recv from it. This mechanism creates
sewardjb4112022007-11-09 22:49:28 +00002301 dependencies between posters and waiters of the semaphore.
2302
sewardjf98e1c02008-10-25 16:22:41 +00002303 It may not be necessary to use a stack - perhaps a bag of SOs would
2304 do. But we do need to keep track of how many unused-up posts have
2305 happened for the semaphore.
sewardjb4112022007-11-09 22:49:28 +00002306
sewardjf98e1c02008-10-25 16:22:41 +00002307 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
sewardjb4112022007-11-09 22:49:28 +00002308 twice on S. T3 cannot complete its waits without both T1 and T2
2309 posting. The above mechanism will ensure that T3 acquires
2310 dependencies on both T1 and T2.
sewardj11e352f2007-11-30 11:11:02 +00002311
sewardjf98e1c02008-10-25 16:22:41 +00002312 When a semaphore is initialised with value N, we do as if we'd
2313 posted N times on the semaphore: basically create N SOs and do a
2314 strong send to all of then. This allows up to N waits on the
2315 semaphore to acquire a dependency on the initialisation point,
2316 which AFAICS is the correct behaviour.
sewardj11e352f2007-11-30 11:11:02 +00002317
2318 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2319 about. We should.
sewardjb4112022007-11-09 22:49:28 +00002320*/
2321
sewardjf98e1c02008-10-25 16:22:41 +00002322/* sem_t* -> XArray* SO* */
2323static WordFM* map_sem_to_SO_stack = NULL;
sewardjb4112022007-11-09 22:49:28 +00002324
sewardjf98e1c02008-10-25 16:22:41 +00002325static void map_sem_to_SO_stack_INIT ( void ) {
2326 if (map_sem_to_SO_stack == NULL) {
2327 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2328 HG_(free), NULL );
2329 tl_assert(map_sem_to_SO_stack != NULL);
sewardjb4112022007-11-09 22:49:28 +00002330 }
2331}
2332
sewardjf98e1c02008-10-25 16:22:41 +00002333static void push_SO_for_sem ( void* sem, SO* so ) {
2334 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002335 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002336 tl_assert(so);
2337 map_sem_to_SO_stack_INIT();
2338 if (VG_(lookupFM)( map_sem_to_SO_stack,
2339 &keyW, (UWord*)&xa, (UWord)sem )) {
2340 tl_assert(keyW == (UWord)sem);
sewardjb4112022007-11-09 22:49:28 +00002341 tl_assert(xa);
sewardjf98e1c02008-10-25 16:22:41 +00002342 VG_(addToXA)( xa, &so );
sewardjb4112022007-11-09 22:49:28 +00002343 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002344 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2345 VG_(addToXA)( xa, &so );
2346 VG_(addToFM)( map_sem_to_SO_stack, (Word)sem, (Word)xa );
sewardjb4112022007-11-09 22:49:28 +00002347 }
2348}
2349
sewardjf98e1c02008-10-25 16:22:41 +00002350static SO* mb_pop_SO_for_sem ( void* sem ) {
2351 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002352 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002353 SO* so;
2354 map_sem_to_SO_stack_INIT();
2355 if (VG_(lookupFM)( map_sem_to_SO_stack,
2356 &keyW, (UWord*)&xa, (UWord)sem )) {
sewardjb4112022007-11-09 22:49:28 +00002357 /* xa is the stack for this semaphore. */
sewardjf98e1c02008-10-25 16:22:41 +00002358 Word sz;
2359 tl_assert(keyW == (UWord)sem);
2360 sz = VG_(sizeXA)( xa );
sewardjb4112022007-11-09 22:49:28 +00002361 tl_assert(sz >= 0);
2362 if (sz == 0)
2363 return NULL; /* odd, the stack is empty */
sewardjf98e1c02008-10-25 16:22:41 +00002364 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2365 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002366 VG_(dropTailXA)( xa, 1 );
sewardjf98e1c02008-10-25 16:22:41 +00002367 return so;
sewardjb4112022007-11-09 22:49:28 +00002368 } else {
2369 /* hmm, that's odd. No stack for this semaphore. */
2370 return NULL;
2371 }
2372}
2373
sewardj11e352f2007-11-30 11:11:02 +00002374static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002375{
sewardjf98e1c02008-10-25 16:22:41 +00002376 UWord keyW, valW;
2377 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002378
sewardjb4112022007-11-09 22:49:28 +00002379 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002380 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002381 (Int)tid, (void*)sem );
2382
sewardjf98e1c02008-10-25 16:22:41 +00002383 map_sem_to_SO_stack_INIT();
sewardjb4112022007-11-09 22:49:28 +00002384
sewardjf98e1c02008-10-25 16:22:41 +00002385 /* Empty out the semaphore's SO stack. This way of doing it is
2386 stupid, but at least it's easy. */
2387 while (1) {
2388 so = mb_pop_SO_for_sem( sem );
2389 if (!so) break;
2390 libhb_so_dealloc(so);
2391 }
2392
2393 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2394 XArray* xa = (XArray*)valW;
2395 tl_assert(keyW == (UWord)sem);
2396 tl_assert(xa);
2397 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2398 VG_(deleteXA)(xa);
2399 }
sewardjb4112022007-11-09 22:49:28 +00002400}
2401
sewardj11e352f2007-11-30 11:11:02 +00002402static
2403void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2404{
sewardjf98e1c02008-10-25 16:22:41 +00002405 SO* so;
2406 Thread* thr;
sewardj11e352f2007-11-30 11:11:02 +00002407
2408 if (SHOW_EVENTS >= 1)
2409 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2410 (Int)tid, (void*)sem, value );
2411
sewardjf98e1c02008-10-25 16:22:41 +00002412 thr = map_threads_maybe_lookup( tid );
2413 tl_assert(thr); /* cannot fail - Thread* must already exist */
sewardj11e352f2007-11-30 11:11:02 +00002414
sewardjf98e1c02008-10-25 16:22:41 +00002415 /* Empty out the semaphore's SO stack. This way of doing it is
2416 stupid, but at least it's easy. */
2417 while (1) {
2418 so = mb_pop_SO_for_sem( sem );
2419 if (!so) break;
2420 libhb_so_dealloc(so);
2421 }
sewardj11e352f2007-11-30 11:11:02 +00002422
sewardjf98e1c02008-10-25 16:22:41 +00002423 /* If we don't do this check, the following while loop runs us out
2424 of memory for stupid initial values of 'value'. */
2425 if (value > 10000) {
2426 HG_(record_error_Misc)(
2427 thr, "sem_init: initial value exceeds 10000; using 10000" );
2428 value = 10000;
2429 }
sewardj11e352f2007-11-30 11:11:02 +00002430
sewardjf98e1c02008-10-25 16:22:41 +00002431 /* Now create 'valid' new SOs for the thread, do a strong send to
2432 each of them, and push them all on the stack. */
2433 for (; value > 0; value--) {
2434 Thr* hbthr = thr->hbthr;
2435 tl_assert(hbthr);
sewardj11e352f2007-11-30 11:11:02 +00002436
sewardjf98e1c02008-10-25 16:22:41 +00002437 so = libhb_so_alloc();
2438 libhb_so_send( hbthr, so, True/*strong send*/ );
2439 push_SO_for_sem( sem, so );
sewardj11e352f2007-11-30 11:11:02 +00002440 }
2441}
2442
2443static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002444{
sewardjf98e1c02008-10-25 16:22:41 +00002445 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2446 it (iow, write our VC into it, then tick ours), and push the SO
2447 on on a stack of SOs associated with 'sem'. This is later used
2448 by other thread(s) which successfully exit from a sem_wait on
2449 the same sem; by doing a strong recv from SOs popped of the
2450 stack, they acquire dependencies on the posting thread
2451 segment(s). */
sewardjb4112022007-11-09 22:49:28 +00002452
sewardjf98e1c02008-10-25 16:22:41 +00002453 Thread* thr;
2454 SO* so;
2455 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002456
2457 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002458 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002459 (Int)tid, (void*)sem );
2460
2461 thr = map_threads_maybe_lookup( tid );
2462 tl_assert(thr); /* cannot fail - Thread* must already exist */
2463
2464 // error-if: sem is bogus
2465
sewardjf98e1c02008-10-25 16:22:41 +00002466 hbthr = thr->hbthr;
2467 tl_assert(hbthr);
sewardjb4112022007-11-09 22:49:28 +00002468
sewardjf98e1c02008-10-25 16:22:41 +00002469 so = libhb_so_alloc();
2470 libhb_so_send( hbthr, so, True/*strong send*/ );
2471 push_SO_for_sem( sem, so );
sewardjb4112022007-11-09 22:49:28 +00002472}
2473
sewardj11e352f2007-11-30 11:11:02 +00002474static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002475{
sewardjf98e1c02008-10-25 16:22:41 +00002476 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2477 the 'sem' from this semaphore's SO-stack, and do a strong recv
2478 from it. This creates a dependency back to one of the post-ers
2479 for the semaphore. */
sewardjb4112022007-11-09 22:49:28 +00002480
sewardjf98e1c02008-10-25 16:22:41 +00002481 Thread* thr;
2482 SO* so;
2483 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002484
2485 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002486 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002487 (Int)tid, (void*)sem );
2488
2489 thr = map_threads_maybe_lookup( tid );
2490 tl_assert(thr); /* cannot fail - Thread* must already exist */
2491
2492 // error-if: sem is bogus
2493
sewardjf98e1c02008-10-25 16:22:41 +00002494 so = mb_pop_SO_for_sem( sem );
sewardjb4112022007-11-09 22:49:28 +00002495
sewardjf98e1c02008-10-25 16:22:41 +00002496 if (so) {
2497 hbthr = thr->hbthr;
2498 tl_assert(hbthr);
2499
2500 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2501 libhb_so_dealloc(so);
2502 } else {
2503 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2504 If this happened it would surely be a bug in the threads
2505 library. */
2506 HG_(record_error_Misc)(
2507 thr, "Bug in libpthread: sem_wait succeeded on"
2508 " semaphore without prior sem_post");
sewardjb4112022007-11-09 22:49:28 +00002509 }
2510}
2511
2512
sewardj9f569b72008-11-13 13:33:09 +00002513/* -------------------------------------------------------- */
2514/* -------------- events to do with barriers -------------- */
2515/* -------------------------------------------------------- */
2516
2517typedef
2518 struct {
2519 Bool initted; /* has it yet been initted by guest? */
2520 UWord size; /* declared size */
2521 XArray* waiting; /* XA of Thread*. # present is 0 .. .size */
2522 }
2523 Bar;
2524
2525static Bar* new_Bar ( void ) {
2526 Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
2527 tl_assert(bar);
2528 /* all fields are zero */
2529 tl_assert(bar->initted == False);
2530 return bar;
2531}
2532
2533static void delete_Bar ( Bar* bar ) {
2534 tl_assert(bar);
2535 if (bar->waiting)
2536 VG_(deleteXA)(bar->waiting);
2537 HG_(free)(bar);
2538}
2539
2540/* A mapping which stores auxiliary data for barriers. */
2541
2542/* pthread_barrier_t* -> Bar* */
2543static WordFM* map_barrier_to_Bar = NULL;
2544
2545static void map_barrier_to_Bar_INIT ( void ) {
2546 if (UNLIKELY(map_barrier_to_Bar == NULL)) {
2547 map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
2548 "hg.mbtBI.1", HG_(free), NULL );
2549 tl_assert(map_barrier_to_Bar != NULL);
2550 }
2551}
2552
2553static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
2554 UWord key, val;
2555 map_barrier_to_Bar_INIT();
2556 if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
2557 tl_assert(key == (UWord)barrier);
2558 return (Bar*)val;
2559 } else {
2560 Bar* bar = new_Bar();
2561 VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
2562 return bar;
2563 }
2564}
2565
2566static void map_barrier_to_Bar_delete ( void* barrier ) {
2567 UWord keyW, valW;
2568 map_barrier_to_Bar_INIT();
2569 if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
2570 Bar* bar = (Bar*)valW;
2571 tl_assert(keyW == (UWord)barrier);
2572 delete_Bar(bar);
2573 }
2574}
2575
2576
2577static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
2578 void* barrier,
2579 UWord count )
2580{
2581 Thread* thr;
2582 Bar* bar;
2583
2584 if (SHOW_EVENTS >= 1)
2585 VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
2586 "(tid=%d, barrier=%p, count=%lu)\n",
2587 (Int)tid, (void*)barrier, count );
2588
2589 thr = map_threads_maybe_lookup( tid );
2590 tl_assert(thr); /* cannot fail - Thread* must already exist */
2591
2592 if (count == 0) {
2593 HG_(record_error_Misc)(
2594 thr, "pthread_barrier_init: 'count' argument is zero"
2595 );
2596 }
2597
2598 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2599 tl_assert(bar);
2600
2601 if (bar->initted) {
2602 HG_(record_error_Misc)(
2603 thr, "pthread_barrier_init: barrier is already initialised"
2604 );
2605 }
2606
2607 if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2608 tl_assert(bar->initted);
2609 HG_(record_error_Misc)(
sewardj553655c2008-11-14 19:41:19 +00002610 thr, "pthread_barrier_init: threads are waiting at barrier"
sewardj9f569b72008-11-13 13:33:09 +00002611 );
2612 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2613 }
2614 if (!bar->waiting) {
2615 bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
2616 sizeof(Thread*) );
2617 }
2618
2619 tl_assert(bar->waiting);
2620 tl_assert(VG_(sizeXA)(bar->waiting) == 0);
2621 bar->initted = True;
2622 bar->size = count;
2623}
2624
2625
2626static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
2627 void* barrier )
2628{
sewardj553655c2008-11-14 19:41:19 +00002629 Thread* thr;
2630 Bar* bar;
2631
sewardj9f569b72008-11-13 13:33:09 +00002632 /* Deal with destroy events. The only purpose is to free storage
2633 associated with the barrier, so as to avoid any possible
2634 resource leaks. */
2635 if (SHOW_EVENTS >= 1)
2636 VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
2637 "(tid=%d, barrier=%p)\n",
2638 (Int)tid, (void*)barrier );
2639
sewardj553655c2008-11-14 19:41:19 +00002640 thr = map_threads_maybe_lookup( tid );
2641 tl_assert(thr); /* cannot fail - Thread* must already exist */
2642
2643 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2644 tl_assert(bar);
2645
2646 if (!bar->initted) {
2647 HG_(record_error_Misc)(
2648 thr, "pthread_barrier_destroy: barrier was never initialised"
2649 );
2650 }
2651
2652 if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2653 HG_(record_error_Misc)(
2654 thr, "pthread_barrier_destroy: threads are waiting at barrier"
2655 );
2656 }
2657
sewardj9f569b72008-11-13 13:33:09 +00002658 /* Maybe we shouldn't do this; just let it persist, so that when it
2659 is reinitialised we don't need to do any dynamic memory
2660 allocation? The downside is a potentially unlimited space leak,
2661 if the client creates (in turn) a large number of barriers all
2662 at different locations. Note that if we do later move to the
2663 don't-delete-it scheme, we need to mark the barrier as
2664 uninitialised again since otherwise a later _init call will
sewardj553655c2008-11-14 19:41:19 +00002665 elicit a duplicate-init error. */
sewardj9f569b72008-11-13 13:33:09 +00002666 map_barrier_to_Bar_delete( barrier );
2667}
2668
2669
2670static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
2671 void* barrier )
2672{
sewardj1c466b72008-11-19 11:52:14 +00002673 /* This function gets called after a client thread calls
2674 pthread_barrier_wait but before it arrives at the real
2675 pthread_barrier_wait.
2676
2677 Why is the following correct? It's a bit subtle.
2678
2679 If this is not the last thread arriving at the barrier, we simply
2680 note its presence and return. Because valgrind (at least as of
2681 Nov 08) is single threaded, we are guaranteed safe from any race
2682 conditions when in this function -- no other client threads are
2683 running.
2684
2685 If this is the last thread, then we are again the only running
2686 thread. All the other threads will have either arrived at the
2687 real pthread_barrier_wait or are on their way to it, but in any
2688 case are guaranteed not to be able to move past it, because this
2689 thread is currently in this function and so has not yet arrived
2690 at the real pthread_barrier_wait. That means that:
2691
2692 1. While we are in this function, none of the other threads
2693 waiting at the barrier can move past it.
2694
2695 2. When this function returns (and simulated execution resumes),
2696 this thread and all other waiting threads will be able to move
2697 past the real barrier.
2698
2699 Because of this, it is now safe to update the vector clocks of
2700 all threads, to represent the fact that they all arrived at the
2701 barrier and have all moved on. There is no danger of any
2702 complications to do with some threads leaving the barrier and
2703 racing back round to the front, whilst others are still leaving
2704 (which is the primary source of complication in correct handling/
2705 implementation of barriers). That can't happen because we update
2706 here our data structures so as to indicate that the threads have
2707 passed the barrier, even though, as per (2) above, they are
2708 guaranteed not to pass the barrier until we return.
2709
2710 This relies crucially on Valgrind being single threaded. If that
2711 changes, this will need to be reconsidered.
2712 */
sewardj9f569b72008-11-13 13:33:09 +00002713 Thread* thr;
2714 Bar* bar;
2715 SO* so;
2716 UWord present, i;
2717
2718 if (SHOW_EVENTS >= 1)
2719 VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
2720 "(tid=%d, barrier=%p)\n",
2721 (Int)tid, (void*)barrier );
2722
2723 thr = map_threads_maybe_lookup( tid );
2724 tl_assert(thr); /* cannot fail - Thread* must already exist */
2725
2726 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2727 tl_assert(bar);
2728
2729 if (!bar->initted) {
2730 HG_(record_error_Misc)(
2731 thr, "pthread_barrier_wait: barrier is uninitialised"
2732 );
2733 return; /* client is broken .. avoid assertions below */
2734 }
2735
2736 /* guaranteed by _INIT_PRE above */
2737 tl_assert(bar->size > 0);
2738 tl_assert(bar->waiting);
2739
2740 VG_(addToXA)( bar->waiting, &thr );
2741
2742 /* guaranteed by this function */
2743 present = VG_(sizeXA)(bar->waiting);
2744 tl_assert(present > 0 && present <= bar->size);
2745
2746 if (present < bar->size)
2747 return;
2748
sewardj553655c2008-11-14 19:41:19 +00002749 /* All the threads have arrived. Now do the Interesting Bit. Get
sewardj9f569b72008-11-13 13:33:09 +00002750 a new synchronisation object and do a weak send to it from all
2751 the participating threads. This makes its vector clocks be the
sewardj553655c2008-11-14 19:41:19 +00002752 join of all the individual threads' vector clocks. Then do a
sewardj9f569b72008-11-13 13:33:09 +00002753 strong receive from it back to all threads, so that their VCs
2754 are a copy of it (hence are all equal to the join of their
2755 original VCs.) */
2756 so = libhb_so_alloc();
2757
2758 /* XXX check ->waiting has no duplicates */
2759
2760 tl_assert(bar->waiting);
2761 tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
2762
2763 /* compute the join ... */
2764 for (i = 0; i < bar->size; i++) {
2765 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2766 Thr* hbthr = t->hbthr;
2767 libhb_so_send( hbthr, so, False/*weak send*/ );
2768 }
2769 /* ... and distribute to all threads */
2770 for (i = 0; i < bar->size; i++) {
2771 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2772 Thr* hbthr = t->hbthr;
2773 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2774 }
2775
2776 /* finally, we must empty out the waiting vector */
sewardj1c466b72008-11-19 11:52:14 +00002777 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2778
2779 /* and we don't need this any more. Perhaps a stack-allocated
2780 SO would be better? */
2781 libhb_so_dealloc(so);
sewardj9f569b72008-11-13 13:33:09 +00002782}
2783
2784
sewardjb4112022007-11-09 22:49:28 +00002785/*--------------------------------------------------------------*/
2786/*--- Lock acquisition order monitoring ---*/
2787/*--------------------------------------------------------------*/
2788
2789/* FIXME: here are some optimisations still to do in
2790 laog__pre_thread_acquires_lock.
2791
2792 The graph is structured so that if L1 --*--> L2 then L1 must be
2793 acquired before L2.
2794
2795 The common case is that some thread T holds (eg) L1 L2 and L3 and
2796 is repeatedly acquiring and releasing Ln, and there is no ordering
2797 error in what it is doing. Hence it repeatly:
2798
2799 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
2800 produces the answer No (because there is no error).
2801
2802 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
2803 (because they already got added the first time T acquired Ln).
2804
2805 Hence cache these two events:
2806
2807 (1) Cache result of the query from last time. Invalidate the cache
2808 any time any edges are added to or deleted from laog.
2809
2810 (2) Cache these add-edge requests and ignore them if said edges
2811 have already been added to laog. Invalidate the cache any time
2812 any edges are deleted from laog.
2813*/
2814
2815typedef
2816 struct {
2817 WordSetID inns; /* in univ_laog */
2818 WordSetID outs; /* in univ_laog */
2819 }
2820 LAOGLinks;
2821
2822/* lock order acquisition graph */
2823static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
2824
2825/* EXPOSITION ONLY: for each edge in 'laog', record the two places
2826 where that edge was created, so that we can show the user later if
2827 we need to. */
2828typedef
2829 struct {
2830 Addr src_ga; /* Lock guest addresses for */
2831 Addr dst_ga; /* src/dst of the edge */
2832 ExeContext* src_ec; /* And corresponding places where that */
2833 ExeContext* dst_ec; /* ordering was established */
2834 }
2835 LAOGLinkExposition;
2836
sewardj250ec2e2008-02-15 22:02:30 +00002837static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
sewardjb4112022007-11-09 22:49:28 +00002838 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
2839 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
2840 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
2841 if (llx1->src_ga < llx2->src_ga) return -1;
2842 if (llx1->src_ga > llx2->src_ga) return 1;
2843 if (llx1->dst_ga < llx2->dst_ga) return -1;
2844 if (llx1->dst_ga > llx2->dst_ga) return 1;
2845 return 0;
2846}
2847
2848static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
2849/* end EXPOSITION ONLY */
2850
2851
sewardja65db102009-01-26 10:45:16 +00002852__attribute__((noinline))
2853static void laog__init ( void )
2854{
2855 tl_assert(!laog);
2856 tl_assert(!laog_exposition);
2857
2858 laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
2859 HG_(free), NULL/*unboxedcmp*/ );
2860
2861 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
2862 cmp_LAOGLinkExposition );
2863 tl_assert(laog);
2864 tl_assert(laog_exposition);
2865}
2866
sewardjb4112022007-11-09 22:49:28 +00002867static void laog__show ( Char* who ) {
2868 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00002869 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00002870 Lock* me;
2871 LAOGLinks* links;
2872 VG_(printf)("laog (requested by %s) {\n", who);
sewardj896f6f92008-08-19 08:38:52 +00002873 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00002874 me = NULL;
2875 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00002876 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00002877 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00002878 tl_assert(me);
2879 tl_assert(links);
2880 VG_(printf)(" node %p:\n", me);
2881 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
2882 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00002883 VG_(printf)(" inn %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00002884 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
2885 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00002886 VG_(printf)(" out %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00002887 me = NULL;
2888 links = NULL;
2889 }
sewardj896f6f92008-08-19 08:38:52 +00002890 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00002891 VG_(printf)("}\n");
2892}
2893
2894__attribute__((noinline))
2895static void laog__add_edge ( Lock* src, Lock* dst ) {
2896 Word keyW;
2897 LAOGLinks* links;
2898 Bool presentF, presentR;
2899 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
2900
2901 /* Take the opportunity to sanity check the graph. Record in
2902 presentF if there is already a src->dst mapping in this node's
2903 forwards links, and presentR if there is already a src->dst
2904 mapping in this node's backwards links. They should agree!
2905 Also, we need to know whether the edge was already present so as
2906 to decide whether or not to update the link details mapping. We
2907 can compute presentF and presentR essentially for free, so may
2908 as well do this always. */
2909 presentF = presentR = False;
2910
2911 /* Update the out edges for src */
2912 keyW = 0;
2913 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00002914 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00002915 WordSetID outs_new;
2916 tl_assert(links);
2917 tl_assert(keyW == (Word)src);
2918 outs_new = HG_(addToWS)( univ_laog, links->outs, (Word)dst );
2919 presentF = outs_new == links->outs;
2920 links->outs = outs_new;
2921 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002922 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00002923 links->inns = HG_(emptyWS)( univ_laog );
2924 links->outs = HG_(singletonWS)( univ_laog, (Word)dst );
sewardj896f6f92008-08-19 08:38:52 +00002925 VG_(addToFM)( laog, (Word)src, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00002926 }
2927 /* Update the in edges for dst */
2928 keyW = 0;
2929 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00002930 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00002931 WordSetID inns_new;
2932 tl_assert(links);
2933 tl_assert(keyW == (Word)dst);
2934 inns_new = HG_(addToWS)( univ_laog, links->inns, (Word)src );
2935 presentR = inns_new == links->inns;
2936 links->inns = inns_new;
2937 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002938 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00002939 links->inns = HG_(singletonWS)( univ_laog, (Word)src );
2940 links->outs = HG_(emptyWS)( univ_laog );
sewardj896f6f92008-08-19 08:38:52 +00002941 VG_(addToFM)( laog, (Word)dst, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00002942 }
2943
2944 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
2945
2946 if (!presentF && src->acquired_at && dst->acquired_at) {
2947 LAOGLinkExposition expo;
2948 /* If this edge is entering the graph, and we have acquired_at
2949 information for both src and dst, record those acquisition
2950 points. Hence, if there is later a violation of this
2951 ordering, we can show the user the two places in which the
2952 required src-dst ordering was previously established. */
barta0b6b2c2008-07-07 06:49:24 +00002953 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
sewardjb4112022007-11-09 22:49:28 +00002954 src->guestaddr, dst->guestaddr);
2955 expo.src_ga = src->guestaddr;
2956 expo.dst_ga = dst->guestaddr;
2957 expo.src_ec = NULL;
2958 expo.dst_ec = NULL;
2959 tl_assert(laog_exposition);
sewardj896f6f92008-08-19 08:38:52 +00002960 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (Word)&expo )) {
sewardjb4112022007-11-09 22:49:28 +00002961 /* we already have it; do nothing */
2962 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002963 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
2964 sizeof(LAOGLinkExposition));
sewardjb4112022007-11-09 22:49:28 +00002965 expo2->src_ga = src->guestaddr;
2966 expo2->dst_ga = dst->guestaddr;
2967 expo2->src_ec = src->acquired_at;
2968 expo2->dst_ec = dst->acquired_at;
sewardj896f6f92008-08-19 08:38:52 +00002969 VG_(addToFM)( laog_exposition, (Word)expo2, (Word)NULL );
sewardjb4112022007-11-09 22:49:28 +00002970 }
2971 }
2972}
2973
2974__attribute__((noinline))
2975static void laog__del_edge ( Lock* src, Lock* dst ) {
2976 Word keyW;
2977 LAOGLinks* links;
2978 if (0) VG_(printf)("laog__del_edge %p %p\n", src, dst);
2979 /* Update the out edges for src */
2980 keyW = 0;
2981 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00002982 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00002983 tl_assert(links);
2984 tl_assert(keyW == (Word)src);
2985 links->outs = HG_(delFromWS)( univ_laog, links->outs, (Word)dst );
2986 }
2987 /* Update the in edges for dst */
2988 keyW = 0;
2989 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00002990 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00002991 tl_assert(links);
2992 tl_assert(keyW == (Word)dst);
2993 links->inns = HG_(delFromWS)( univ_laog, links->inns, (Word)src );
2994 }
2995}
2996
2997__attribute__((noinline))
2998static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
2999 Word keyW;
3000 LAOGLinks* links;
3001 keyW = 0;
3002 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003003 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003004 tl_assert(links);
3005 tl_assert(keyW == (Word)lk);
3006 return links->outs;
3007 } else {
3008 return HG_(emptyWS)( univ_laog );
3009 }
3010}
3011
3012__attribute__((noinline))
3013static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
3014 Word keyW;
3015 LAOGLinks* links;
3016 keyW = 0;
3017 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003018 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003019 tl_assert(links);
3020 tl_assert(keyW == (Word)lk);
3021 return links->inns;
3022 } else {
3023 return HG_(emptyWS)( univ_laog );
3024 }
3025}
3026
3027__attribute__((noinline))
3028static void laog__sanity_check ( Char* who ) {
3029 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003030 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003031 Lock* me;
3032 LAOGLinks* links;
sewardja65db102009-01-26 10:45:16 +00003033 if (UNLIKELY(!laog || !laog_exposition))
3034 laog__init();
sewardj896f6f92008-08-19 08:38:52 +00003035 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003036 me = NULL;
3037 links = NULL;
3038 if (0) VG_(printf)("laog sanity check\n");
sewardj896f6f92008-08-19 08:38:52 +00003039 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003040 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003041 tl_assert(me);
3042 tl_assert(links);
3043 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3044 for (i = 0; i < ws_size; i++) {
3045 if ( ! HG_(elemWS)( univ_laog,
3046 laog__succs( (Lock*)ws_words[i] ),
3047 (Word)me ))
3048 goto bad;
3049 }
3050 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3051 for (i = 0; i < ws_size; i++) {
3052 if ( ! HG_(elemWS)( univ_laog,
3053 laog__preds( (Lock*)ws_words[i] ),
3054 (Word)me ))
3055 goto bad;
3056 }
3057 me = NULL;
3058 links = NULL;
3059 }
sewardj896f6f92008-08-19 08:38:52 +00003060 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003061 return;
3062
3063 bad:
3064 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3065 laog__show(who);
3066 tl_assert(0);
3067}
3068
3069/* If there is a path in laog from 'src' to any of the elements in
3070 'dst', return an arbitrarily chosen element of 'dst' reachable from
3071 'src'. If no path exist from 'src' to any element in 'dst', return
3072 NULL. */
3073__attribute__((noinline))
3074static
3075Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3076{
3077 Lock* ret;
3078 Word i, ssz;
3079 XArray* stack; /* of Lock* */
3080 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
3081 Lock* here;
3082 WordSetID succs;
3083 Word succs_size;
sewardj250ec2e2008-02-15 22:02:30 +00003084 UWord* succs_words;
sewardjb4112022007-11-09 22:49:28 +00003085 //laog__sanity_check();
3086
3087 /* If the destination set is empty, we can never get there from
3088 'src' :-), so don't bother to try */
3089 if (HG_(isEmptyWS)( univ_lsets, dsts ))
3090 return NULL;
3091
3092 ret = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00003093 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3094 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00003095
3096 (void) VG_(addToXA)( stack, &src );
3097
3098 while (True) {
3099
3100 ssz = VG_(sizeXA)( stack );
3101
3102 if (ssz == 0) { ret = NULL; break; }
3103
3104 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3105 VG_(dropTailXA)( stack, 1 );
3106
3107 if (HG_(elemWS)( univ_lsets, dsts, (Word)here )) { ret = here; break; }
3108
sewardj896f6f92008-08-19 08:38:52 +00003109 if (VG_(lookupFM)( visited, NULL, NULL, (Word)here ))
sewardjb4112022007-11-09 22:49:28 +00003110 continue;
3111
sewardj896f6f92008-08-19 08:38:52 +00003112 VG_(addToFM)( visited, (Word)here, 0 );
sewardjb4112022007-11-09 22:49:28 +00003113
3114 succs = laog__succs( here );
3115 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3116 for (i = 0; i < succs_size; i++)
3117 (void) VG_(addToXA)( stack, &succs_words[i] );
3118 }
3119
sewardj896f6f92008-08-19 08:38:52 +00003120 VG_(deleteFM)( visited, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003121 VG_(deleteXA)( stack );
3122 return ret;
3123}
3124
3125
3126/* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3127 between 'lk' and the locks already held by 'thr' and issue a
3128 complaint if so. Also, update the ordering graph appropriately.
3129*/
3130__attribute__((noinline))
3131static void laog__pre_thread_acquires_lock (
3132 Thread* thr, /* NB: BEFORE lock is added */
3133 Lock* lk
3134 )
3135{
sewardj250ec2e2008-02-15 22:02:30 +00003136 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +00003137 Word ls_size, i;
3138 Lock* other;
3139
3140 /* It may be that 'thr' already holds 'lk' and is recursively
3141 relocking in. In this case we just ignore the call. */
3142 /* NB: univ_lsets really is correct here */
3143 if (HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lk ))
3144 return;
3145
sewardja65db102009-01-26 10:45:16 +00003146 if (UNLIKELY(!laog || !laog_exposition))
3147 laog__init();
sewardjb4112022007-11-09 22:49:28 +00003148
3149 /* First, the check. Complain if there is any path in laog from lk
3150 to any of the locks already held by thr, since if any such path
3151 existed, it would mean that previously lk was acquired before
3152 (rather than after, as we are doing here) at least one of those
3153 locks.
3154 */
3155 other = laog__do_dfs_from_to(lk, thr->locksetA);
3156 if (other) {
3157 LAOGLinkExposition key, *found;
3158 /* So we managed to find a path lk --*--> other in the graph,
3159 which implies that 'lk' should have been acquired before
3160 'other' but is in fact being acquired afterwards. We present
3161 the lk/other arguments to record_error_LockOrder in the order
3162 in which they should have been acquired. */
3163 /* Go look in the laog_exposition mapping, to find the allocation
3164 points for this edge, so we can show the user. */
3165 key.src_ga = lk->guestaddr;
3166 key.dst_ga = other->guestaddr;
3167 key.src_ec = NULL;
3168 key.dst_ec = NULL;
3169 found = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003170 if (VG_(lookupFM)( laog_exposition,
sewardjb5f29642007-11-16 12:02:43 +00003171 (Word*)&found, NULL, (Word)&key )) {
sewardjb4112022007-11-09 22:49:28 +00003172 tl_assert(found != &key);
3173 tl_assert(found->src_ga == key.src_ga);
3174 tl_assert(found->dst_ga == key.dst_ga);
3175 tl_assert(found->src_ec);
3176 tl_assert(found->dst_ec);
sewardjf98e1c02008-10-25 16:22:41 +00003177 HG_(record_error_LockOrder)(
3178 thr, lk->guestaddr, other->guestaddr,
3179 found->src_ec, found->dst_ec );
sewardjb4112022007-11-09 22:49:28 +00003180 } else {
3181 /* Hmm. This can't happen (can it?) */
sewardjf98e1c02008-10-25 16:22:41 +00003182 HG_(record_error_LockOrder)(
3183 thr, lk->guestaddr, other->guestaddr,
3184 NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003185 }
3186 }
3187
3188 /* Second, add to laog the pairs
3189 (old, lk) | old <- locks already held by thr
3190 Since both old and lk are currently held by thr, their acquired_at
3191 fields must be non-NULL.
3192 */
3193 tl_assert(lk->acquired_at);
3194 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3195 for (i = 0; i < ls_size; i++) {
3196 Lock* old = (Lock*)ls_words[i];
3197 tl_assert(old->acquired_at);
3198 laog__add_edge( old, lk );
3199 }
3200
3201 /* Why "except_Locks" ? We're here because a lock is being
3202 acquired by a thread, and we're in an inconsistent state here.
3203 See the call points in evhH__post_thread_{r,w}_acquires_lock.
3204 When called in this inconsistent state, locks__sanity_check duly
3205 barfs. */
sewardjf98e1c02008-10-25 16:22:41 +00003206 if (HG_(clo_sanity_flags) & SCE_LAOG)
sewardjb4112022007-11-09 22:49:28 +00003207 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
3208}
3209
3210
3211/* Delete from 'laog' any pair mentioning a lock in locksToDelete */
3212
3213__attribute__((noinline))
3214static void laog__handle_one_lock_deletion ( Lock* lk )
3215{
3216 WordSetID preds, succs;
3217 Word preds_size, succs_size, i, j;
sewardj250ec2e2008-02-15 22:02:30 +00003218 UWord *preds_words, *succs_words;
sewardjb4112022007-11-09 22:49:28 +00003219
sewardja65db102009-01-26 10:45:16 +00003220 if (UNLIKELY(!laog || !laog_exposition))
3221 laog__init();
3222
sewardjb4112022007-11-09 22:49:28 +00003223 preds = laog__preds( lk );
3224 succs = laog__succs( lk );
3225
3226 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
3227 for (i = 0; i < preds_size; i++)
3228 laog__del_edge( (Lock*)preds_words[i], lk );
3229
3230 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3231 for (j = 0; j < succs_size; j++)
3232 laog__del_edge( lk, (Lock*)succs_words[j] );
3233
3234 for (i = 0; i < preds_size; i++) {
3235 for (j = 0; j < succs_size; j++) {
3236 if (preds_words[i] != succs_words[j]) {
3237 /* This can pass unlocked locks to laog__add_edge, since
3238 we're deleting stuff. So their acquired_at fields may
3239 be NULL. */
3240 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
3241 }
3242 }
3243 }
3244}
3245
sewardj1cbc12f2008-11-10 16:16:46 +00003246//__attribute__((noinline))
3247//static void laog__handle_lock_deletions (
3248// WordSetID /* in univ_laog */ locksToDelete
3249// )
3250//{
3251// Word i, ws_size;
3252// UWord* ws_words;
3253//
sewardja65db102009-01-26 10:45:16 +00003254// if (UNLIKELY(!laog || !laog_exposition))
3255// laog__init();
sewardj1cbc12f2008-11-10 16:16:46 +00003256//
3257// HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
3258// for (i = 0; i < ws_size; i++)
3259// laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
3260//
3261// if (HG_(clo_sanity_flags) & SCE_LAOG)
3262// all__sanity_check("laog__handle_lock_deletions-post");
3263//}
sewardjb4112022007-11-09 22:49:28 +00003264
3265
3266/*--------------------------------------------------------------*/
3267/*--- Malloc/free replacements ---*/
3268/*--------------------------------------------------------------*/
3269
3270typedef
3271 struct {
3272 void* next; /* required by m_hashtable */
3273 Addr payload; /* ptr to actual block */
3274 SizeT szB; /* size requested */
3275 ExeContext* where; /* where it was allocated */
3276 Thread* thr; /* allocating thread */
3277 }
3278 MallocMeta;
3279
3280/* A hash table of MallocMetas, used to track malloc'd blocks
3281 (obviously). */
3282static VgHashTable hg_mallocmeta_table = NULL;
3283
3284
3285static MallocMeta* new_MallocMeta ( void ) {
sewardjf98e1c02008-10-25 16:22:41 +00003286 MallocMeta* md = HG_(zalloc)( "hg.new_MallocMeta.1", sizeof(MallocMeta) );
sewardjb4112022007-11-09 22:49:28 +00003287 tl_assert(md);
3288 return md;
3289}
3290static void delete_MallocMeta ( MallocMeta* md ) {
sewardjf98e1c02008-10-25 16:22:41 +00003291 HG_(free)(md);
sewardjb4112022007-11-09 22:49:28 +00003292}
3293
3294
3295/* Allocate a client block and set up the metadata for it. */
3296
3297static
3298void* handle_alloc ( ThreadId tid,
3299 SizeT szB, SizeT alignB, Bool is_zeroed )
3300{
3301 Addr p;
3302 MallocMeta* md;
3303
3304 tl_assert( ((SSizeT)szB) >= 0 );
3305 p = (Addr)VG_(cli_malloc)(alignB, szB);
3306 if (!p) {
3307 return NULL;
3308 }
3309 if (is_zeroed)
3310 VG_(memset)((void*)p, 0, szB);
3311
3312 /* Note that map_threads_lookup must succeed (cannot assert), since
3313 memory can only be allocated by currently alive threads, hence
3314 they must have an entry in map_threads. */
3315 md = new_MallocMeta();
3316 md->payload = p;
3317 md->szB = szB;
3318 md->where = VG_(record_ExeContext)( tid, 0 );
3319 md->thr = map_threads_lookup( tid );
3320
3321 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
3322
3323 /* Tell the lower level memory wranglers. */
3324 evh__new_mem_heap( p, szB, is_zeroed );
3325
3326 return (void*)p;
3327}
3328
3329/* Re the checks for less-than-zero (also in hg_cli__realloc below):
3330 Cast to a signed type to catch any unexpectedly negative args.
3331 We're assuming here that the size asked for is not greater than
3332 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
3333 platforms). */
3334static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
3335 if (((SSizeT)n) < 0) return NULL;
3336 return handle_alloc ( tid, n, VG_(clo_alignment),
3337 /*is_zeroed*/False );
3338}
3339static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
3340 if (((SSizeT)n) < 0) return NULL;
3341 return handle_alloc ( tid, n, VG_(clo_alignment),
3342 /*is_zeroed*/False );
3343}
3344static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
3345 if (((SSizeT)n) < 0) return NULL;
3346 return handle_alloc ( tid, n, VG_(clo_alignment),
3347 /*is_zeroed*/False );
3348}
3349static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
3350 if (((SSizeT)n) < 0) return NULL;
3351 return handle_alloc ( tid, n, align,
3352 /*is_zeroed*/False );
3353}
3354static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
3355 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
3356 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
3357 /*is_zeroed*/True );
3358}
3359
3360
3361/* Free a client block, including getting rid of the relevant
3362 metadata. */
3363
3364static void handle_free ( ThreadId tid, void* p )
3365{
3366 MallocMeta *md, *old_md;
3367 SizeT szB;
3368
3369 /* First see if we can find the metadata for 'p'. */
3370 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3371 if (!md)
3372 return; /* apparently freeing a bogus address. Oh well. */
3373
3374 tl_assert(md->payload == (Addr)p);
3375 szB = md->szB;
3376
3377 /* Nuke the metadata block */
3378 old_md = (MallocMeta*)
3379 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
3380 tl_assert(old_md); /* it must be present - we just found it */
3381 tl_assert(old_md == md);
3382 tl_assert(old_md->payload == (Addr)p);
3383
3384 VG_(cli_free)((void*)old_md->payload);
3385 delete_MallocMeta(old_md);
3386
3387 /* Tell the lower level memory wranglers. */
3388 evh__die_mem_heap( (Addr)p, szB );
3389}
3390
3391static void hg_cli__free ( ThreadId tid, void* p ) {
3392 handle_free(tid, p);
3393}
3394static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
3395 handle_free(tid, p);
3396}
3397static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
3398 handle_free(tid, p);
3399}
3400
3401
3402static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
3403{
3404 MallocMeta *md, *md_new, *md_tmp;
3405 SizeT i;
3406
3407 Addr payload = (Addr)payloadV;
3408
3409 if (((SSizeT)new_size) < 0) return NULL;
3410
3411 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
3412 if (!md)
3413 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
3414
3415 tl_assert(md->payload == payload);
3416
3417 if (md->szB == new_size) {
3418 /* size unchanged */
3419 md->where = VG_(record_ExeContext)(tid, 0);
3420 return payloadV;
3421 }
3422
3423 if (md->szB > new_size) {
3424 /* new size is smaller */
3425 md->szB = new_size;
3426 md->where = VG_(record_ExeContext)(tid, 0);
3427 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
3428 return payloadV;
3429 }
3430
3431 /* else */ {
3432 /* new size is bigger */
3433 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
3434
3435 /* First half kept and copied, second half new */
3436 // FIXME: shouldn't we use a copier which implements the
3437 // memory state machine?
3438 shadow_mem_copy_range( payload, p_new, md->szB );
3439 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
sewardjf98e1c02008-10-25 16:22:41 +00003440 /*inited*/False );
sewardjb4112022007-11-09 22:49:28 +00003441 /* FIXME: can anything funny happen here? specifically, if the
3442 old range contained a lock, then die_mem_heap will complain.
3443 Is that the correct behaviour? Not sure. */
3444 evh__die_mem_heap( payload, md->szB );
3445
3446 /* Copy from old to new */
3447 for (i = 0; i < md->szB; i++)
3448 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
3449
3450 /* Because the metadata hash table is index by payload address,
3451 we have to get rid of the old hash table entry and make a new
3452 one. We can't just modify the existing metadata in place,
3453 because then it would (almost certainly) be in the wrong hash
3454 chain. */
3455 md_new = new_MallocMeta();
3456 *md_new = *md;
3457
3458 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
3459 tl_assert(md_tmp);
3460 tl_assert(md_tmp == md);
3461
3462 VG_(cli_free)((void*)md->payload);
3463 delete_MallocMeta(md);
3464
3465 /* Update fields */
3466 md_new->where = VG_(record_ExeContext)( tid, 0 );
3467 md_new->szB = new_size;
3468 md_new->payload = p_new;
3469 md_new->thr = map_threads_lookup( tid );
3470
3471 /* and add */
3472 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
3473
3474 return (void*)p_new;
3475 }
3476}
3477
3478
3479/*--------------------------------------------------------------*/
3480/*--- Instrumentation ---*/
3481/*--------------------------------------------------------------*/
3482
3483static void instrument_mem_access ( IRSB* bbOut,
3484 IRExpr* addr,
3485 Int szB,
3486 Bool isStore,
3487 Int hWordTy_szB )
3488{
3489 IRType tyAddr = Ity_INVALID;
3490 HChar* hName = NULL;
3491 void* hAddr = NULL;
3492 Int regparms = 0;
3493 IRExpr** argv = NULL;
3494 IRDirty* di = NULL;
3495
3496 tl_assert(isIRAtom(addr));
3497 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
3498
3499 tyAddr = typeOfIRExpr( bbOut->tyenv, addr );
3500 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
3501
3502 /* So the effective address is in 'addr' now. */
3503 regparms = 1; // unless stated otherwise
3504 if (isStore) {
3505 switch (szB) {
3506 case 1:
3507 hName = "evh__mem_help_write_1";
3508 hAddr = &evh__mem_help_write_1;
3509 argv = mkIRExprVec_1( addr );
3510 break;
3511 case 2:
3512 hName = "evh__mem_help_write_2";
3513 hAddr = &evh__mem_help_write_2;
3514 argv = mkIRExprVec_1( addr );
3515 break;
3516 case 4:
3517 hName = "evh__mem_help_write_4";
3518 hAddr = &evh__mem_help_write_4;
3519 argv = mkIRExprVec_1( addr );
3520 break;
3521 case 8:
3522 hName = "evh__mem_help_write_8";
3523 hAddr = &evh__mem_help_write_8;
3524 argv = mkIRExprVec_1( addr );
3525 break;
3526 default:
3527 tl_assert(szB > 8 && szB <= 512); /* stay sane */
3528 regparms = 2;
3529 hName = "evh__mem_help_write_N";
3530 hAddr = &evh__mem_help_write_N;
3531 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
3532 break;
3533 }
3534 } else {
3535 switch (szB) {
3536 case 1:
3537 hName = "evh__mem_help_read_1";
3538 hAddr = &evh__mem_help_read_1;
3539 argv = mkIRExprVec_1( addr );
3540 break;
3541 case 2:
3542 hName = "evh__mem_help_read_2";
3543 hAddr = &evh__mem_help_read_2;
3544 argv = mkIRExprVec_1( addr );
3545 break;
3546 case 4:
3547 hName = "evh__mem_help_read_4";
3548 hAddr = &evh__mem_help_read_4;
3549 argv = mkIRExprVec_1( addr );
3550 break;
3551 case 8:
3552 hName = "evh__mem_help_read_8";
3553 hAddr = &evh__mem_help_read_8;
3554 argv = mkIRExprVec_1( addr );
3555 break;
3556 default:
3557 tl_assert(szB > 8 && szB <= 512); /* stay sane */
3558 regparms = 2;
3559 hName = "evh__mem_help_read_N";
3560 hAddr = &evh__mem_help_read_N;
3561 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
3562 break;
3563 }
3564 }
3565
3566 /* Add the helper. */
3567 tl_assert(hName);
3568 tl_assert(hAddr);
3569 tl_assert(argv);
3570 di = unsafeIRDirty_0_N( regparms,
3571 hName, VG_(fnptr_to_fnentry)( hAddr ),
3572 argv );
3573 addStmtToIRSB( bbOut, IRStmt_Dirty(di) );
3574}
3575
3576
sewardjd52392d2008-11-08 20:36:26 +00003577//static void instrument_memory_bus_event ( IRSB* bbOut, IRMBusEvent event )
3578//{
3579// switch (event) {
3580// case Imbe_SnoopedStoreBegin:
3581// case Imbe_SnoopedStoreEnd:
3582// /* These arise from ppc stwcx. insns. They should perhaps be
3583// handled better. */
3584// break;
3585// case Imbe_Fence:
3586// break; /* not interesting */
3587// case Imbe_BusLock:
3588// case Imbe_BusUnlock:
3589// addStmtToIRSB(
3590// bbOut,
3591// IRStmt_Dirty(
3592// unsafeIRDirty_0_N(
3593// 0/*regparms*/,
3594// event == Imbe_BusLock ? "evh__bus_lock"
3595// : "evh__bus_unlock",
3596// VG_(fnptr_to_fnentry)(
3597// event == Imbe_BusLock ? &evh__bus_lock
3598// : &evh__bus_unlock
3599// ),
3600// mkIRExprVec_0()
3601// )
3602// )
3603// );
3604// break;
3605// default:
3606// tl_assert(0);
3607// }
3608//}
sewardjb4112022007-11-09 22:49:28 +00003609
3610
3611static
3612IRSB* hg_instrument ( VgCallbackClosure* closure,
3613 IRSB* bbIn,
3614 VexGuestLayout* layout,
3615 VexGuestExtents* vge,
3616 IRType gWordTy, IRType hWordTy )
3617{
3618 Int i;
3619 IRSB* bbOut;
sewardj484fe802008-12-22 18:17:24 +00003620 Bool x86busLocked = False;
3621 Bool isSnoopedStore = False;
sewardjb4112022007-11-09 22:49:28 +00003622
3623 if (gWordTy != hWordTy) {
3624 /* We don't currently support this case. */
3625 VG_(tool_panic)("host/guest word size mismatch");
3626 }
3627
3628 /* Set up BB */
3629 bbOut = emptyIRSB();
3630 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
3631 bbOut->next = deepCopyIRExpr(bbIn->next);
3632 bbOut->jumpkind = bbIn->jumpkind;
3633
3634 // Copy verbatim any IR preamble preceding the first IMark
3635 i = 0;
3636 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
3637 addStmtToIRSB( bbOut, bbIn->stmts[i] );
3638 i++;
3639 }
3640
3641 for (/*use current i*/; i < bbIn->stmts_used; i++) {
3642 IRStmt* st = bbIn->stmts[i];
3643 tl_assert(st);
3644 tl_assert(isFlatIRStmt(st));
3645 switch (st->tag) {
3646 case Ist_NoOp:
3647 case Ist_AbiHint:
3648 case Ist_Put:
3649 case Ist_PutI:
3650 case Ist_IMark:
3651 case Ist_Exit:
3652 /* None of these can contain any memory references. */
3653 break;
3654
3655 case Ist_MBE:
sewardjf98e1c02008-10-25 16:22:41 +00003656 //instrument_memory_bus_event( bbOut, st->Ist.MBE.event );
3657 switch (st->Ist.MBE.event) {
3658 case Imbe_Fence:
3659 break; /* not interesting */
sewardj484fe802008-12-22 18:17:24 +00003660 /* Imbe_Bus{Lock,Unlock} arise from x86/amd64 LOCK
3661 prefixed instructions. */
sewardjf98e1c02008-10-25 16:22:41 +00003662 case Imbe_BusLock:
3663 tl_assert(x86busLocked == False);
3664 x86busLocked = True;
3665 break;
3666 case Imbe_BusUnlock:
3667 tl_assert(x86busLocked == True);
3668 x86busLocked = False;
3669 break;
sewardj484fe802008-12-22 18:17:24 +00003670 /* Imbe_SnoopedStore{Begin,End} arise from ppc
3671 stwcx. instructions. */
sewardj92124542008-12-18 01:20:11 +00003672 case Imbe_SnoopedStoreBegin:
sewardj484fe802008-12-22 18:17:24 +00003673 tl_assert(isSnoopedStore == False);
3674 isSnoopedStore = True;
3675 break;
sewardj92124542008-12-18 01:20:11 +00003676 case Imbe_SnoopedStoreEnd:
sewardj484fe802008-12-22 18:17:24 +00003677 tl_assert(isSnoopedStore == True);
3678 isSnoopedStore = False;
sewardj92124542008-12-18 01:20:11 +00003679 break;
sewardjf98e1c02008-10-25 16:22:41 +00003680 default:
3681 goto unhandled;
3682 }
sewardjb4112022007-11-09 22:49:28 +00003683 break;
3684
3685 case Ist_Store:
sewardj484fe802008-12-22 18:17:24 +00003686 if (!x86busLocked && !isSnoopedStore)
sewardjf98e1c02008-10-25 16:22:41 +00003687 instrument_mem_access(
3688 bbOut,
3689 st->Ist.Store.addr,
3690 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
3691 True/*isStore*/,
3692 sizeofIRType(hWordTy)
3693 );
3694 break;
sewardjb4112022007-11-09 22:49:28 +00003695
3696 case Ist_WrTmp: {
3697 IRExpr* data = st->Ist.WrTmp.data;
3698 if (data->tag == Iex_Load) {
3699 instrument_mem_access(
3700 bbOut,
3701 data->Iex.Load.addr,
3702 sizeofIRType(data->Iex.Load.ty),
3703 False/*!isStore*/,
3704 sizeofIRType(hWordTy)
3705 );
3706 }
3707 break;
3708 }
3709
3710 case Ist_Dirty: {
3711 Int dataSize;
3712 IRDirty* d = st->Ist.Dirty.details;
3713 if (d->mFx != Ifx_None) {
3714 /* This dirty helper accesses memory. Collect the
3715 details. */
3716 tl_assert(d->mAddr != NULL);
3717 tl_assert(d->mSize != 0);
3718 dataSize = d->mSize;
3719 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
3720 instrument_mem_access(
3721 bbOut, d->mAddr, dataSize, False/*!isStore*/,
3722 sizeofIRType(hWordTy)
3723 );
3724 }
sewardj484fe802008-12-22 18:17:24 +00003725 /* This isn't really correct. Really the
3726 instrumentation should be only added when
3727 (!x86busLocked && !isSnoopedStore), just like with
3728 Ist_Store. Still, I don't think this is
3729 particularly important. */
sewardjb4112022007-11-09 22:49:28 +00003730 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
3731 instrument_mem_access(
3732 bbOut, d->mAddr, dataSize, True/*isStore*/,
3733 sizeofIRType(hWordTy)
3734 );
3735 }
3736 } else {
3737 tl_assert(d->mAddr == NULL);
3738 tl_assert(d->mSize == 0);
3739 }
3740 break;
3741 }
3742
3743 default:
sewardjf98e1c02008-10-25 16:22:41 +00003744 unhandled:
3745 ppIRStmt(st);
sewardjb4112022007-11-09 22:49:28 +00003746 tl_assert(0);
3747
3748 } /* switch (st->tag) */
3749
3750 addStmtToIRSB( bbOut, st );
3751 } /* iterate over bbIn->stmts */
3752
3753 return bbOut;
3754}
3755
3756
3757/*----------------------------------------------------------------*/
3758/*--- Client requests ---*/
3759/*----------------------------------------------------------------*/
3760
3761/* Sheesh. Yet another goddam finite map. */
3762static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
3763
3764static void map_pthread_t_to_Thread_INIT ( void ) {
3765 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
sewardjf98e1c02008-10-25 16:22:41 +00003766 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
3767 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00003768 tl_assert(map_pthread_t_to_Thread != NULL);
3769 }
3770}
3771
3772
3773static
3774Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
3775{
3776 if (!VG_IS_TOOL_USERREQ('H','G',args[0]))
3777 return False;
3778
3779 /* Anything that gets past the above check is one of ours, so we
3780 should be able to handle it. */
3781
3782 /* default, meaningless return value, unless otherwise set */
3783 *ret = 0;
3784
3785 switch (args[0]) {
3786
3787 /* --- --- User-visible client requests --- --- */
3788
3789 case VG_USERREQ__HG_CLEAN_MEMORY:
barta0b6b2c2008-07-07 06:49:24 +00003790 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%ld)\n",
sewardjb4112022007-11-09 22:49:28 +00003791 args[1], args[2]);
3792 /* Call die_mem to (expensively) tidy up properly, if there
sewardjf98e1c02008-10-25 16:22:41 +00003793 are any held locks etc in the area. Calling evh__die_mem
3794 and then evh__new_mem is a bit inefficient; probably just
3795 the latter would do. */
sewardjb4112022007-11-09 22:49:28 +00003796 if (args[2] > 0) { /* length */
3797 evh__die_mem(args[1], args[2]);
3798 /* and then set it to New */
3799 evh__new_mem(args[1], args[2]);
3800 }
3801 break;
3802
3803 /* --- --- Client requests for Helgrind's use only --- --- */
3804
3805 /* Some thread is telling us its pthread_t value. Record the
3806 binding between that and the associated Thread*, so we can
3807 later find the Thread* again when notified of a join by the
3808 thread. */
3809 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
3810 Thread* my_thr = NULL;
3811 if (0)
3812 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
3813 (void*)args[1]);
3814 map_pthread_t_to_Thread_INIT();
3815 my_thr = map_threads_maybe_lookup( tid );
3816 /* This assertion should hold because the map_threads (tid to
3817 Thread*) binding should have been made at the point of
3818 low-level creation of this thread, which should have
3819 happened prior to us getting this client request for it.
3820 That's because this client request is sent from
3821 client-world from the 'thread_wrapper' function, which
3822 only runs once the thread has been low-level created. */
3823 tl_assert(my_thr != NULL);
3824 /* So now we know that (pthread_t)args[1] is associated with
3825 (Thread*)my_thr. Note that down. */
3826 if (0)
3827 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
3828 (void*)args[1], (void*)my_thr );
sewardj896f6f92008-08-19 08:38:52 +00003829 VG_(addToFM)( map_pthread_t_to_Thread, (Word)args[1], (Word)my_thr );
sewardjb4112022007-11-09 22:49:28 +00003830 break;
3831 }
3832
3833 case _VG_USERREQ__HG_PTH_API_ERROR: {
3834 Thread* my_thr = NULL;
3835 map_pthread_t_to_Thread_INIT();
3836 my_thr = map_threads_maybe_lookup( tid );
3837 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
sewardjf98e1c02008-10-25 16:22:41 +00003838 HG_(record_error_PthAPIerror)(
3839 my_thr, (HChar*)args[1], (Word)args[2], (HChar*)args[3] );
sewardjb4112022007-11-09 22:49:28 +00003840 break;
3841 }
3842
3843 /* This thread (tid) has completed a join with the quitting
3844 thread whose pthread_t is in args[1]. */
3845 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
3846 Thread* thr_q = NULL; /* quitter Thread* */
3847 Bool found = False;
3848 if (0)
3849 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
3850 (void*)args[1]);
3851 map_pthread_t_to_Thread_INIT();
sewardj896f6f92008-08-19 08:38:52 +00003852 found = VG_(lookupFM)( map_pthread_t_to_Thread,
sewardjb5f29642007-11-16 12:02:43 +00003853 NULL, (Word*)&thr_q, (Word)args[1] );
sewardjb4112022007-11-09 22:49:28 +00003854 /* Can this fail? It would mean that our pthread_join
3855 wrapper observed a successful join on args[1] yet that
3856 thread never existed (or at least, it never lodged an
3857 entry in the mapping (via SET_MY_PTHREAD_T)). Which
3858 sounds like a bug in the threads library. */
3859 // FIXME: get rid of this assertion; handle properly
3860 tl_assert(found);
3861 if (found) {
3862 if (0)
3863 VG_(printf)(".................... quitter Thread* = %p\n",
3864 thr_q);
3865 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
3866 }
3867 break;
3868 }
3869
3870 /* EXPOSITION only: by intercepting lock init events we can show
3871 the user where the lock was initialised, rather than only
3872 being able to show where it was first locked. Intercepting
3873 lock initialisations is not necessary for the basic operation
3874 of the race checker. */
3875 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
3876 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
3877 break;
3878
3879 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
3880 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1] );
3881 break;
3882
3883 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
3884 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
3885 break;
3886
3887 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
3888 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
3889 break;
3890
3891 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*, Word
3892 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
3893 break;
3894
3895 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*
3896 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
3897 break;
3898
3899 /* This thread is about to do pthread_cond_signal on the
3900 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
3901 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
3902 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
3903 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
3904 break;
3905
3906 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
3907 Returns a flag indicating whether or not the mutex is believed to be
3908 valid for this operation. */
3909 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
3910 Bool mutex_is_valid
3911 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
3912 (void*)args[2] );
3913 *ret = mutex_is_valid ? 1 : 0;
3914 break;
3915 }
3916
sewardjf98e1c02008-10-25 16:22:41 +00003917 /* cond=arg[1] */
3918 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
3919 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1] );
3920 break;
3921
sewardjb4112022007-11-09 22:49:28 +00003922 /* Thread successfully completed pthread_cond_wait, cond=arg[1],
3923 mutex=arg[2] */
3924 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
3925 evh__HG_PTHREAD_COND_WAIT_POST( tid,
3926 (void*)args[1], (void*)args[2] );
3927 break;
3928
3929 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
3930 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
3931 break;
3932
3933 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
3934 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
3935 break;
3936
sewardj789c3c52008-02-25 12:10:07 +00003937 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
sewardjb4112022007-11-09 22:49:28 +00003938 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
sewardj789c3c52008-02-25 12:10:07 +00003939 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
3940 args[2], args[3] );
sewardjb4112022007-11-09 22:49:28 +00003941 break;
3942
3943 /* rwlock=arg[1], isW=arg[2] */
3944 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
3945 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
3946 break;
3947
3948 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
3949 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
3950 break;
3951
3952 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
3953 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
3954 break;
3955
sewardj11e352f2007-11-30 11:11:02 +00003956 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
3957 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
sewardjb4112022007-11-09 22:49:28 +00003958 break;
3959
sewardj11e352f2007-11-30 11:11:02 +00003960 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
3961 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00003962 break;
3963
sewardj11e352f2007-11-30 11:11:02 +00003964 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
3965 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
3966 break;
3967
3968 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t* */
3969 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00003970 break;
3971
sewardj9f569b72008-11-13 13:33:09 +00003972 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
3973 /* pth_bar_t*, ulong */
3974 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1], args[2] );
3975 break;
3976
3977 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
3978 /* pth_bar_t* */
3979 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
3980 break;
3981
3982 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
3983 /* pth_bar_t* */
3984 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
3985 break;
sewardjb4112022007-11-09 22:49:28 +00003986
3987 default:
3988 /* Unhandled Helgrind client request! */
sewardjf98e1c02008-10-25 16:22:41 +00003989 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
3990 args[0]);
sewardjb4112022007-11-09 22:49:28 +00003991 }
3992
3993 return True;
3994}
3995
3996
3997/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +00003998/*--- Setup ---*/
3999/*----------------------------------------------------------------*/
4000
4001static Bool hg_process_cmd_line_option ( Char* arg )
4002{
sewardjf98e1c02008-10-25 16:22:41 +00004003 if (VG_CLO_STREQ(arg, "--track-lockorders=no"))
4004 HG_(clo_track_lockorders) = False;
4005 else if (VG_CLO_STREQ(arg, "--track-lockorders=yes"))
4006 HG_(clo_track_lockorders) = True;
sewardjb4112022007-11-09 22:49:28 +00004007
4008 else if (VG_CLO_STREQ(arg, "--cmp-race-err-addrs=no"))
sewardjf98e1c02008-10-25 16:22:41 +00004009 HG_(clo_cmp_race_err_addrs) = False;
sewardjb4112022007-11-09 22:49:28 +00004010 else if (VG_CLO_STREQ(arg, "--cmp-race-err-addrs=yes"))
sewardjf98e1c02008-10-25 16:22:41 +00004011 HG_(clo_cmp_race_err_addrs) = True;
sewardjb4112022007-11-09 22:49:28 +00004012
sewardj849b0ed2008-12-21 10:43:10 +00004013 else if (VG_CLO_STREQ(arg, "--show-conflicts=no"))
4014 HG_(clo_show_conflicts) = False;
4015 else if (VG_CLO_STREQ(arg, "--show-conflicts=yes"))
4016 HG_(clo_show_conflicts) = True;
4017
4018 /* If you change the 10k/10mill limits, remember to also change
4019 them in assertions at the top of event_map_maybe_GC. */
4020 else VG_BNUM_CLO(arg, "--conflict-cache-size",
4021 HG_(clo_conflict_cache_size), 10*1000, 10*1000*1000)
sewardjb4112022007-11-09 22:49:28 +00004022
sewardj11e352f2007-11-30 11:11:02 +00004023 /* "stuvwx" --> stuvwx (binary) */
4024 else if (VG_CLO_STREQN(18, arg, "--hg-sanity-flags=")) {
sewardjb4112022007-11-09 22:49:28 +00004025 Int j;
sewardjb5f29642007-11-16 12:02:43 +00004026 Char* opt = & arg[18];
sewardjb4112022007-11-09 22:49:28 +00004027
sewardj11e352f2007-11-30 11:11:02 +00004028 if (6 != VG_(strlen)(opt)) {
sewardjb4112022007-11-09 22:49:28 +00004029 VG_(message)(Vg_UserMsg,
sewardj11e352f2007-11-30 11:11:02 +00004030 "--hg-sanity-flags argument must have 6 digits");
sewardjb4112022007-11-09 22:49:28 +00004031 return False;
4032 }
sewardj11e352f2007-11-30 11:11:02 +00004033 for (j = 0; j < 6; j++) {
sewardjb4112022007-11-09 22:49:28 +00004034 if ('0' == opt[j]) { /* do nothing */ }
sewardjf98e1c02008-10-25 16:22:41 +00004035 else if ('1' == opt[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
sewardjb4112022007-11-09 22:49:28 +00004036 else {
sewardj11e352f2007-11-30 11:11:02 +00004037 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
sewardjb4112022007-11-09 22:49:28 +00004038 "only contain 0s and 1s");
4039 return False;
4040 }
4041 }
sewardjf98e1c02008-10-25 16:22:41 +00004042 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
sewardjb4112022007-11-09 22:49:28 +00004043 }
4044
4045 else
4046 return VG_(replacement_malloc_process_cmd_line_option)(arg);
4047
4048 return True;
4049}
4050
4051static void hg_print_usage ( void )
4052{
4053 VG_(printf)(
sewardj849b0ed2008-12-21 10:43:10 +00004054" --track-lockorders=no|yes show lock ordering errors? [yes]\n"
4055" --show-conflicts=no|yes show both stack traces in a race? [yes]\n"
4056" --conflict-cache-size=N size of conflict history cache [1000000]\n"
sewardjb4112022007-11-09 22:49:28 +00004057 );
4058 VG_(replacement_malloc_print_usage)();
4059}
4060
4061static void hg_print_debug_usage ( void )
4062{
4063 VG_(replacement_malloc_print_debug_usage)();
sewardjb4112022007-11-09 22:49:28 +00004064 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
4065 "race errors significant? [no]\n");
sewardj849b0ed2008-12-21 10:43:10 +00004066 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
sewardj11e352f2007-11-30 11:11:02 +00004067 " at events (X = 0|1) [000000]\n");
4068 VG_(printf)(" --hg-sanity-flags values:\n");
sewardj11e352f2007-11-30 11:11:02 +00004069 VG_(printf)(" 010000 after changes to "
sewardjb4112022007-11-09 22:49:28 +00004070 "lock-order-acquisition-graph\n");
sewardj11e352f2007-11-30 11:11:02 +00004071 VG_(printf)(" 001000 at memory accesses (NB: not currently used)\n");
4072 VG_(printf)(" 000100 at mem permission setting for "
sewardjb4112022007-11-09 22:49:28 +00004073 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
sewardj11e352f2007-11-30 11:11:02 +00004074 VG_(printf)(" 000010 at lock/unlock events\n");
4075 VG_(printf)(" 000001 at thread create/join events\n");
sewardjb4112022007-11-09 22:49:28 +00004076}
4077
4078static void hg_post_clo_init ( void )
4079{
4080}
4081
4082static void hg_fini ( Int exitcode )
4083{
4084 if (SHOW_DATA_STRUCTURES)
4085 pp_everything( PP_ALL, "SK_(fini)" );
sewardjf98e1c02008-10-25 16:22:41 +00004086 if (HG_(clo_sanity_flags))
sewardjb4112022007-11-09 22:49:28 +00004087 all__sanity_check("SK_(fini)");
4088
sewardjb4112022007-11-09 22:49:28 +00004089 if (VG_(clo_verbosity) >= 2) {
4090
4091 if (1) {
4092 VG_(printf)("\n");
4093 HG_(ppWSUstats)( univ_tsets, "univ_tsets" );
4094 VG_(printf)("\n");
4095 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
4096 VG_(printf)("\n");
4097 HG_(ppWSUstats)( univ_laog, "univ_laog" );
4098 }
4099
sewardjf98e1c02008-10-25 16:22:41 +00004100 //zz VG_(printf)("\n");
4101 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
4102 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
4103 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
4104 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
4105 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
4106 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
4107 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
4108 //zz stats__hbefore_stk_hwm);
4109 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
4110 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
sewardjb4112022007-11-09 22:49:28 +00004111
4112 VG_(printf)("\n");
barta0b6b2c2008-07-07 06:49:24 +00004113 VG_(printf)(" locksets: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00004114 (Int)HG_(cardinalityWSU)( univ_lsets ));
barta0b6b2c2008-07-07 06:49:24 +00004115 VG_(printf)(" threadsets: %'8d unique thread sets\n",
sewardjb4112022007-11-09 22:49:28 +00004116 (Int)HG_(cardinalityWSU)( univ_tsets ));
barta0b6b2c2008-07-07 06:49:24 +00004117 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00004118 (Int)HG_(cardinalityWSU)( univ_laog ));
4119
sewardjd52392d2008-11-08 20:36:26 +00004120 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
4121 // stats__ga_LL_adds,
4122 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
sewardjb4112022007-11-09 22:49:28 +00004123
sewardjf98e1c02008-10-25 16:22:41 +00004124 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
4125 HG_(stats__LockN_to_P_queries),
4126 HG_(stats__LockN_to_P_get_map_size)() );
sewardjb4112022007-11-09 22:49:28 +00004127
sewardjf98e1c02008-10-25 16:22:41 +00004128 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
4129 HG_(stats__string_table_queries),
4130 HG_(stats__string_table_get_map_size)() );
barta0b6b2c2008-07-07 06:49:24 +00004131 VG_(printf)(" LAOG: %'8d map size\n",
sewardj896f6f92008-08-19 08:38:52 +00004132 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
barta0b6b2c2008-07-07 06:49:24 +00004133 VG_(printf)(" LAOG exposition: %'8d map size\n",
sewardj896f6f92008-08-19 08:38:52 +00004134 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
barta0b6b2c2008-07-07 06:49:24 +00004135 VG_(printf)(" locks: %'8lu acquires, "
4136 "%'lu releases\n",
sewardjb4112022007-11-09 22:49:28 +00004137 stats__lockN_acquires,
4138 stats__lockN_releases
4139 );
barta0b6b2c2008-07-07 06:49:24 +00004140 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
sewardjb4112022007-11-09 22:49:28 +00004141
4142 VG_(printf)("\n");
sewardjf98e1c02008-10-25 16:22:41 +00004143 libhb_shutdown(True);
sewardjb4112022007-11-09 22:49:28 +00004144 }
4145}
4146
sewardjf98e1c02008-10-25 16:22:41 +00004147/* FIXME: move these somewhere sane */
4148
4149static
4150void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
4151{
4152 Thread* thr;
4153 ThreadId tid;
4154 UWord nActual;
4155 tl_assert(hbt);
4156 thr = libhb_get_Thr_opaque( hbt );
4157 tl_assert(thr);
4158 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
4159 nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
4160 NULL, NULL, 0 );
4161 tl_assert(nActual <= nRequest);
4162 for (; nActual < nRequest; nActual++)
4163 frames[nActual] = 0;
4164}
4165
4166static
sewardjd52392d2008-11-08 20:36:26 +00004167ExeContext* for_libhb__get_EC ( Thr* hbt )
sewardjf98e1c02008-10-25 16:22:41 +00004168{
4169 Thread* thr;
4170 ThreadId tid;
4171 ExeContext* ec;
4172 tl_assert(hbt);
4173 thr = libhb_get_Thr_opaque( hbt );
4174 tl_assert(thr);
4175 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
4176 ec = VG_(record_ExeContext)( tid, 0 );
sewardjd52392d2008-11-08 20:36:26 +00004177 return ec;
sewardjf98e1c02008-10-25 16:22:41 +00004178}
4179
4180
sewardjb4112022007-11-09 22:49:28 +00004181static void hg_pre_clo_init ( void )
4182{
sewardjf98e1c02008-10-25 16:22:41 +00004183 Thr* hbthr_root;
sewardjb4112022007-11-09 22:49:28 +00004184 VG_(details_name) ("Helgrind");
4185 VG_(details_version) (NULL);
4186 VG_(details_description) ("a thread error detector");
4187 VG_(details_copyright_author)(
sewardj4d474d02008-02-11 11:34:59 +00004188 "Copyright (C) 2007-2008, and GNU GPL'd, by OpenWorks LLP et al.");
sewardjb4112022007-11-09 22:49:28 +00004189 VG_(details_bug_reports_to) (VG_BUGS_TO);
4190 VG_(details_avg_translation_sizeB) ( 200 );
4191
4192 VG_(basic_tool_funcs) (hg_post_clo_init,
4193 hg_instrument,
4194 hg_fini);
4195
4196 VG_(needs_core_errors) ();
sewardjf98e1c02008-10-25 16:22:41 +00004197 VG_(needs_tool_errors) (HG_(eq_Error),
4198 HG_(pp_Error),
sewardjb4112022007-11-09 22:49:28 +00004199 False,/*show TIDs for errors*/
sewardjf98e1c02008-10-25 16:22:41 +00004200 HG_(update_extra),
4201 HG_(recognised_suppression),
4202 HG_(read_extra_suppression_info),
4203 HG_(error_matches_suppression),
4204 HG_(get_error_name),
4205 HG_(print_extra_suppression_info));
sewardjb4112022007-11-09 22:49:28 +00004206
4207 VG_(needs_command_line_options)(hg_process_cmd_line_option,
4208 hg_print_usage,
4209 hg_print_debug_usage);
4210 VG_(needs_client_requests) (hg_handle_client_request);
4211
4212 // FIXME?
4213 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
4214 // hg_expensive_sanity_check);
4215
4216 VG_(needs_malloc_replacement) (hg_cli__malloc,
4217 hg_cli____builtin_new,
4218 hg_cli____builtin_vec_new,
4219 hg_cli__memalign,
4220 hg_cli__calloc,
4221 hg_cli__free,
4222 hg_cli____builtin_delete,
4223 hg_cli____builtin_vec_delete,
4224 hg_cli__realloc,
4225 HG_CLI__MALLOC_REDZONE_SZB );
4226
sewardj849b0ed2008-12-21 10:43:10 +00004227 /* 21 Dec 08: disabled this; it mostly causes H to start more
4228 slowly and use significantly more memory, without very often
4229 providing useful results. The user can request to load this
4230 information manually with --read-var-info=yes. */
4231 if (0) VG_(needs_var_info)(); /* optional */
sewardjb4112022007-11-09 22:49:28 +00004232
4233 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
sewardj7cf4e6b2008-05-01 20:24:26 +00004234 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
4235 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
sewardjb4112022007-11-09 22:49:28 +00004236 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
4237 VG_(track_new_mem_stack) ( evh__new_mem );
4238
4239 // FIXME: surely this isn't thread-aware
4240 VG_(track_copy_mem_remap) ( shadow_mem_copy_range );
4241
4242 VG_(track_change_mem_mprotect) ( evh__set_perms );
4243
4244 VG_(track_die_mem_stack_signal)( evh__die_mem );
4245 VG_(track_die_mem_brk) ( evh__die_mem );
4246 VG_(track_die_mem_munmap) ( evh__die_mem );
4247 VG_(track_die_mem_stack) ( evh__die_mem );
4248
4249 // FIXME: what is this for?
4250 VG_(track_ban_mem_stack) (NULL);
4251
4252 VG_(track_pre_mem_read) ( evh__pre_mem_read );
4253 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
4254 VG_(track_pre_mem_write) ( evh__pre_mem_write );
4255 VG_(track_post_mem_write) (NULL);
4256
4257 /////////////////
4258
4259 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
4260 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
4261
4262 VG_(track_start_client_code)( evh__start_client_code );
4263 VG_(track_stop_client_code)( evh__stop_client_code );
4264
sewardjf98e1c02008-10-25 16:22:41 +00004265 /////////////////////////////////////////////
4266 hbthr_root = libhb_init( for_libhb__get_stacktrace,
sewardjf98e1c02008-10-25 16:22:41 +00004267 for_libhb__get_EC );
4268 /////////////////////////////////////////////
4269
4270 initialise_data_structures(hbthr_root);
sewardjb4112022007-11-09 22:49:28 +00004271
4272 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
4273 as described in comments at the top of pub_tool_hashtable.h, are
4274 met. Blargh. */
4275 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
4276 tl_assert( sizeof(UWord) == sizeof(Addr) );
4277 hg_mallocmeta_table
4278 = VG_(HT_construct)( "hg_malloc_metadata_table" );
4279
sewardjb4112022007-11-09 22:49:28 +00004280}
4281
4282VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
4283
4284/*--------------------------------------------------------------------*/
4285/*--- end hg_main.c ---*/
4286/*--------------------------------------------------------------------*/