blob: d63d73b63b130cc93c604ab7fb63e56086e2fba5 [file] [log] [blame]
sewardjb4112022007-11-09 22:49:28 +00001
2/*--------------------------------------------------------------------*/
3/*--- Helgrind: a Valgrind tool for detecting errors ---*/
4/*--- in threaded programs. hg_main.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
njn9f207462009-03-10 22:02:09 +000011 Copyright (C) 2007-2009 OpenWorks LLP
sewardjb4112022007-11-09 22:49:28 +000012 info@open-works.co.uk
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file COPYING.
30
31 Neither the names of the U.S. Department of Energy nor the
32 University of California nor the names of its contributors may be
33 used to endorse or promote products derived from this software
34 without prior written permission.
35*/
36
37#include "pub_tool_basics.h"
sewardjb4112022007-11-09 22:49:28 +000038#include "pub_tool_libcassert.h"
39#include "pub_tool_libcbase.h"
40#include "pub_tool_libcprint.h"
sewardjb4112022007-11-09 22:49:28 +000041#include "pub_tool_threadstate.h"
42#include "pub_tool_tooliface.h"
43#include "pub_tool_hashtable.h"
44#include "pub_tool_replacemalloc.h"
45#include "pub_tool_machine.h"
46#include "pub_tool_options.h"
47#include "pub_tool_xarray.h"
48#include "pub_tool_stacktrace.h"
sewardjb8b79ad2008-03-03 01:35:41 +000049#include "pub_tool_debuginfo.h" /* VG_(get_data_description) */
sewardj896f6f92008-08-19 08:38:52 +000050#include "pub_tool_wordfm.h"
sewardjb4112022007-11-09 22:49:28 +000051
sewardjf98e1c02008-10-25 16:22:41 +000052#include "hg_basics.h"
53#include "hg_wordset.h"
54#include "hg_lock_n_thread.h"
55#include "hg_errors.h"
56
57#include "libhb.h"
58
sewardjb4112022007-11-09 22:49:28 +000059#include "helgrind.h"
60
sewardjf98e1c02008-10-25 16:22:41 +000061
62// FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
63
64// FIXME: when client destroys a lock or a CV, remove these
65// from our mappings, so that the associated SO can be freed up
sewardjb4112022007-11-09 22:49:28 +000066
67/*----------------------------------------------------------------*/
68/*--- ---*/
69/*----------------------------------------------------------------*/
70
sewardj11e352f2007-11-30 11:11:02 +000071/* Note this needs to be compiled with -fno-strict-aliasing, since it
72 contains a whole bunch of calls to lookupFM etc which cast between
73 Word and pointer types. gcc rightly complains this breaks ANSI C
74 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
75 worthwhile performance benefits over -O.
sewardjc17be792007-11-10 22:50:13 +000076*/
sewardjb4112022007-11-09 22:49:28 +000077
sewardjefd3b4d2007-12-02 02:05:23 +000078// FIXME catch sync signals (SEGV, basically) and unlock BHL,
79// if held. Otherwise a LOCK-prefixed insn which segfaults
80// gets Helgrind into a total muddle as the BHL will not be
81// released after the insn.
82
sewardjb4112022007-11-09 22:49:28 +000083// FIXME what is supposed to happen to locks in memory which
84// is relocated as a result of client realloc?
85
sewardjb4112022007-11-09 22:49:28 +000086// FIXME put referencing ThreadId into Thread and get
87// rid of the slow reverse mapping function.
88
89// FIXME accesses to NoAccess areas: change state to Excl?
90
91// FIXME report errors for accesses of NoAccess memory?
92
93// FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
94// the thread still holds the lock.
95
96/* ------------ Debug/trace options ------------ */
97
98// this is:
99// shadow_mem_make_NoAccess: 29156 SMs, 1728 scanned
100// happens_before_wrk: 1000
101// ev__post_thread_join: 3360 SMs, 29 scanned, 252 re-Excls
102#define SHOW_EXPENSIVE_STUFF 0
103
104// 0 for silent, 1 for some stuff, 2 for lots of stuff
105#define SHOW_EVENTS 0
106
sewardjb4112022007-11-09 22:49:28 +0000107
108static void all__sanity_check ( Char* who ); /* fwds */
109
110#define HG_CLI__MALLOC_REDZONE_SZB 16 /* let's say */
111
112// 0 for none, 1 for dump at end of run
113#define SHOW_DATA_STRUCTURES 0
114
115
sewardjb4112022007-11-09 22:49:28 +0000116/* ------------ Misc comments ------------ */
117
118// FIXME: don't hardwire initial entries for root thread.
119// Instead, let the pre_thread_ll_create handler do this.
120
sewardjb4112022007-11-09 22:49:28 +0000121
122/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000123/*--- Primary data structures ---*/
sewardjb4112022007-11-09 22:49:28 +0000124/*----------------------------------------------------------------*/
125
sewardjb4112022007-11-09 22:49:28 +0000126/* Admin linked list of Threads */
127static Thread* admin_threads = NULL;
128
129/* Admin linked list of Locks */
130static Lock* admin_locks = NULL;
131
sewardjb4112022007-11-09 22:49:28 +0000132/* Mapping table for core ThreadIds to Thread* */
133static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
134
sewardjb4112022007-11-09 22:49:28 +0000135/* Mapping table for lock guest addresses to Lock* */
136static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
137
138/* The word-set universes for thread sets and lock sets. */
139static WordSetU* univ_tsets = NULL; /* sets of Thread* */
140static WordSetU* univ_lsets = NULL; /* sets of Lock* */
141static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
142
143/* never changed; we only care about its address. Is treated as if it
144 was a standard userspace lock. Also we have a Lock* describing it
145 so it can participate in lock sets in the usual way. */
146static Int __bus_lock = 0;
147static Lock* __bus_lock_Lock = NULL;
148
149
150/*----------------------------------------------------------------*/
151/*--- Simple helpers for the data structures ---*/
152/*----------------------------------------------------------------*/
153
154static UWord stats__lockN_acquires = 0;
155static UWord stats__lockN_releases = 0;
156
sewardjf98e1c02008-10-25 16:22:41 +0000157static
158ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
sewardjb4112022007-11-09 22:49:28 +0000159
160/* --------- Constructors --------- */
161
sewardjf98e1c02008-10-25 16:22:41 +0000162static Thread* mk_Thread ( Thr* hbthr ) {
sewardjb4112022007-11-09 22:49:28 +0000163 static Int indx = 1;
sewardjf98e1c02008-10-25 16:22:41 +0000164 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
sewardjb4112022007-11-09 22:49:28 +0000165 thread->locksetA = HG_(emptyWS)( univ_lsets );
166 thread->locksetW = HG_(emptyWS)( univ_lsets );
sewardjb4112022007-11-09 22:49:28 +0000167 thread->magic = Thread_MAGIC;
sewardjf98e1c02008-10-25 16:22:41 +0000168 thread->hbthr = hbthr;
169 thread->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +0000170 thread->created_at = NULL;
171 thread->announced = False;
172 thread->errmsg_index = indx++;
173 thread->admin = admin_threads;
174 admin_threads = thread;
175 return thread;
176}
sewardjf98e1c02008-10-25 16:22:41 +0000177
sewardjb4112022007-11-09 22:49:28 +0000178// Make a new lock which is unlocked (hence ownerless)
179static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
180 static ULong unique = 0;
sewardjf98e1c02008-10-25 16:22:41 +0000181 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
sewardjb4112022007-11-09 22:49:28 +0000182 lock->admin = admin_locks;
183 lock->unique = unique++;
184 lock->magic = LockN_MAGIC;
185 lock->appeared_at = NULL;
186 lock->acquired_at = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000187 lock->hbso = libhb_so_alloc();
sewardjb4112022007-11-09 22:49:28 +0000188 lock->guestaddr = guestaddr;
189 lock->kind = kind;
190 lock->heldW = False;
191 lock->heldBy = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000192 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +0000193 admin_locks = lock;
194 return lock;
195}
sewardjb4112022007-11-09 22:49:28 +0000196
197/* Release storage for a Lock. Also release storage in .heldBy, if
198 any. */
199static void del_LockN ( Lock* lk )
200{
sewardjf98e1c02008-10-25 16:22:41 +0000201 tl_assert(HG_(is_sane_LockN)(lk));
202 tl_assert(lk->hbso);
203 libhb_so_dealloc(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +0000204 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000205 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000206 VG_(memset)(lk, 0xAA, sizeof(*lk));
sewardjf98e1c02008-10-25 16:22:41 +0000207 HG_(free)(lk);
sewardjb4112022007-11-09 22:49:28 +0000208}
209
210/* Update 'lk' to reflect that 'thr' now has a write-acquisition of
211 it. This is done strictly: only combinations resulting from
212 correct program and libpthread behaviour are allowed. */
213static void lockN_acquire_writer ( Lock* lk, Thread* thr )
214{
sewardjf98e1c02008-10-25 16:22:41 +0000215 tl_assert(HG_(is_sane_LockN)(lk));
216 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000217
218 stats__lockN_acquires++;
219
220 /* EXPOSITION only */
221 /* We need to keep recording snapshots of where the lock was
222 acquired, so as to produce better lock-order error messages. */
223 if (lk->acquired_at == NULL) {
224 ThreadId tid;
225 tl_assert(lk->heldBy == NULL);
226 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
227 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000228 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000229 } else {
230 tl_assert(lk->heldBy != NULL);
231 }
232 /* end EXPOSITION only */
233
234 switch (lk->kind) {
235 case LK_nonRec:
236 case_LK_nonRec:
237 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
238 tl_assert(!lk->heldW);
239 lk->heldW = True;
sewardjf98e1c02008-10-25 16:22:41 +0000240 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000241 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000242 break;
243 case LK_mbRec:
244 if (lk->heldBy == NULL)
245 goto case_LK_nonRec;
246 /* 2nd and subsequent locking of a lock by its owner */
247 tl_assert(lk->heldW);
248 /* assert: lk is only held by one thread .. */
sewardj896f6f92008-08-19 08:38:52 +0000249 tl_assert(VG_(sizeUniqueBag(lk->heldBy)) == 1);
sewardjb4112022007-11-09 22:49:28 +0000250 /* assert: .. and that thread is 'thr'. */
sewardj896f6f92008-08-19 08:38:52 +0000251 tl_assert(VG_(elemBag)(lk->heldBy, (Word)thr)
252 == VG_(sizeTotalBag)(lk->heldBy));
253 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000254 break;
255 case LK_rdwr:
256 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
257 goto case_LK_nonRec;
258 default:
259 tl_assert(0);
260 }
sewardjf98e1c02008-10-25 16:22:41 +0000261 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000262}
263
264static void lockN_acquire_reader ( Lock* lk, Thread* thr )
265{
sewardjf98e1c02008-10-25 16:22:41 +0000266 tl_assert(HG_(is_sane_LockN)(lk));
267 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000268 /* can only add reader to a reader-writer lock. */
269 tl_assert(lk->kind == LK_rdwr);
270 /* lk must be free or already r-held. */
271 tl_assert(lk->heldBy == NULL
272 || (lk->heldBy != NULL && !lk->heldW));
273
274 stats__lockN_acquires++;
275
276 /* EXPOSITION only */
277 /* We need to keep recording snapshots of where the lock was
278 acquired, so as to produce better lock-order error messages. */
279 if (lk->acquired_at == NULL) {
280 ThreadId tid;
281 tl_assert(lk->heldBy == NULL);
282 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
283 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000284 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000285 } else {
286 tl_assert(lk->heldBy != NULL);
287 }
288 /* end EXPOSITION only */
289
290 if (lk->heldBy) {
sewardj896f6f92008-08-19 08:38:52 +0000291 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000292 } else {
293 lk->heldW = False;
sewardjf98e1c02008-10-25 16:22:41 +0000294 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000295 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000296 }
297 tl_assert(!lk->heldW);
sewardjf98e1c02008-10-25 16:22:41 +0000298 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000299}
300
301/* Update 'lk' to reflect a release of it by 'thr'. This is done
302 strictly: only combinations resulting from correct program and
303 libpthread behaviour are allowed. */
304
305static void lockN_release ( Lock* lk, Thread* thr )
306{
307 Bool b;
sewardjf98e1c02008-10-25 16:22:41 +0000308 tl_assert(HG_(is_sane_LockN)(lk));
309 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000310 /* lock must be held by someone */
311 tl_assert(lk->heldBy);
312 stats__lockN_releases++;
313 /* Remove it from the holder set */
sewardj896f6f92008-08-19 08:38:52 +0000314 b = VG_(delFromBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000315 /* thr must actually have been a holder of lk */
316 tl_assert(b);
317 /* normalise */
318 tl_assert(lk->acquired_at);
sewardj896f6f92008-08-19 08:38:52 +0000319 if (VG_(isEmptyBag)(lk->heldBy)) {
320 VG_(deleteBag)(lk->heldBy);
sewardjb4112022007-11-09 22:49:28 +0000321 lk->heldBy = NULL;
322 lk->heldW = False;
323 lk->acquired_at = NULL;
324 }
sewardjf98e1c02008-10-25 16:22:41 +0000325 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000326}
327
328static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
329{
330 Thread* thr;
331 if (!lk->heldBy) {
332 tl_assert(!lk->heldW);
333 return;
334 }
335 /* for each thread that holds this lock do ... */
sewardj896f6f92008-08-19 08:38:52 +0000336 VG_(initIterBag)( lk->heldBy );
337 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, NULL )) {
sewardjf98e1c02008-10-25 16:22:41 +0000338 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000339 tl_assert(HG_(elemWS)( univ_lsets,
340 thr->locksetA, (Word)lk ));
341 thr->locksetA
342 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lk );
343
344 if (lk->heldW) {
345 tl_assert(HG_(elemWS)( univ_lsets,
346 thr->locksetW, (Word)lk ));
347 thr->locksetW
348 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lk );
349 }
350 }
sewardj896f6f92008-08-19 08:38:52 +0000351 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000352}
353
sewardjb4112022007-11-09 22:49:28 +0000354
355/*----------------------------------------------------------------*/
356/*--- Print out the primary data structures ---*/
357/*----------------------------------------------------------------*/
358
sewardjd52392d2008-11-08 20:36:26 +0000359//static WordSetID del_BHL ( WordSetID lockset ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000360
361#define PP_THREADS (1<<1)
362#define PP_LOCKS (1<<2)
sewardjf98e1c02008-10-25 16:22:41 +0000363#define PP_ALL (PP_THREADS | PP_LOCKS)
sewardjb4112022007-11-09 22:49:28 +0000364
365
366static const Int sHOW_ADMIN = 0;
367
368static void space ( Int n )
369{
370 Int i;
371 Char spaces[128+1];
372 tl_assert(n >= 0 && n < 128);
373 if (n == 0)
374 return;
375 for (i = 0; i < n; i++)
376 spaces[i] = ' ';
377 spaces[i] = 0;
378 tl_assert(i < 128+1);
379 VG_(printf)("%s", spaces);
380}
381
382static void pp_Thread ( Int d, Thread* t )
383{
384 space(d+0); VG_(printf)("Thread %p {\n", t);
385 if (sHOW_ADMIN) {
386 space(d+3); VG_(printf)("admin %p\n", t->admin);
387 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
388 }
389 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
390 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
sewardjb4112022007-11-09 22:49:28 +0000391 space(d+0); VG_(printf)("}\n");
392}
393
394static void pp_admin_threads ( Int d )
395{
396 Int i, n;
397 Thread* t;
398 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
399 /* nothing */
400 }
401 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
402 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
403 if (0) {
404 space(n);
405 VG_(printf)("admin_threads record %d of %d:\n", i, n);
406 }
407 pp_Thread(d+3, t);
408 }
barta0b6b2c2008-07-07 06:49:24 +0000409 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000410}
411
412static void pp_map_threads ( Int d )
413{
njn4c245e52009-03-15 23:25:38 +0000414 Int i, n = 0;
sewardjb4112022007-11-09 22:49:28 +0000415 space(d); VG_(printf)("map_threads ");
sewardjb4112022007-11-09 22:49:28 +0000416 for (i = 0; i < VG_N_THREADS; i++) {
417 if (map_threads[i] != NULL)
418 n++;
419 }
420 VG_(printf)("(%d entries) {\n", n);
421 for (i = 0; i < VG_N_THREADS; i++) {
422 if (map_threads[i] == NULL)
423 continue;
424 space(d+3);
425 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
426 }
427 space(d); VG_(printf)("}\n");
428}
429
430static const HChar* show_LockKind ( LockKind lkk ) {
431 switch (lkk) {
432 case LK_mbRec: return "mbRec";
433 case LK_nonRec: return "nonRec";
434 case LK_rdwr: return "rdwr";
435 default: tl_assert(0);
436 }
437}
438
439static void pp_Lock ( Int d, Lock* lk )
440{
barta0b6b2c2008-07-07 06:49:24 +0000441 space(d+0); VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
sewardjb4112022007-11-09 22:49:28 +0000442 if (sHOW_ADMIN) {
443 space(d+3); VG_(printf)("admin %p\n", lk->admin);
444 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
445 }
446 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
447 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
448 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
449 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
450 if (lk->heldBy) {
451 Thread* thr;
452 Word count;
453 VG_(printf)(" { ");
sewardj896f6f92008-08-19 08:38:52 +0000454 VG_(initIterBag)( lk->heldBy );
455 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, &count ))
sewardjb4112022007-11-09 22:49:28 +0000456 VG_(printf)("%lu:%p ", count, thr);
sewardj896f6f92008-08-19 08:38:52 +0000457 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000458 VG_(printf)("}");
459 }
460 VG_(printf)("\n");
461 space(d+0); VG_(printf)("}\n");
462}
463
464static void pp_admin_locks ( Int d )
465{
466 Int i, n;
467 Lock* lk;
468 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin) {
469 /* nothing */
470 }
471 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
472 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin) {
473 if (0) {
474 space(n);
475 VG_(printf)("admin_locks record %d of %d:\n", i, n);
476 }
477 pp_Lock(d+3, lk);
478 }
barta0b6b2c2008-07-07 06:49:24 +0000479 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000480}
481
482static void pp_map_locks ( Int d )
483{
484 void* gla;
485 Lock* lk;
486 space(d); VG_(printf)("map_locks (%d entries) {\n",
sewardj896f6f92008-08-19 08:38:52 +0000487 (Int)VG_(sizeFM)( map_locks ));
488 VG_(initIterFM)( map_locks );
489 while (VG_(nextIterFM)( map_locks, (Word*)&gla,
sewardjb5f29642007-11-16 12:02:43 +0000490 (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000491 space(d+3);
492 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
493 }
sewardj896f6f92008-08-19 08:38:52 +0000494 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000495 space(d); VG_(printf)("}\n");
496}
497
sewardjb4112022007-11-09 22:49:28 +0000498static void pp_everything ( Int flags, Char* caller )
499{
500 Int d = 0;
501 VG_(printf)("\n");
502 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
503 if (flags & PP_THREADS) {
504 VG_(printf)("\n");
505 pp_admin_threads(d+3);
506 VG_(printf)("\n");
507 pp_map_threads(d+3);
508 }
509 if (flags & PP_LOCKS) {
510 VG_(printf)("\n");
511 pp_admin_locks(d+3);
512 VG_(printf)("\n");
513 pp_map_locks(d+3);
514 }
sewardjb4112022007-11-09 22:49:28 +0000515
516 VG_(printf)("\n");
517 VG_(printf)("}\n");
518 VG_(printf)("\n");
519}
520
521#undef SHOW_ADMIN
522
523
524/*----------------------------------------------------------------*/
525/*--- Initialise the primary data structures ---*/
526/*----------------------------------------------------------------*/
527
sewardjf98e1c02008-10-25 16:22:41 +0000528static void initialise_data_structures ( Thr* hbthr_root )
sewardjb4112022007-11-09 22:49:28 +0000529{
sewardjb4112022007-11-09 22:49:28 +0000530 Thread* thr;
531
532 /* Get everything initialised and zeroed. */
533 tl_assert(admin_threads == NULL);
534 tl_assert(admin_locks == NULL);
sewardjb4112022007-11-09 22:49:28 +0000535
536 tl_assert(sizeof(Addr) == sizeof(Word));
sewardjb4112022007-11-09 22:49:28 +0000537
538 tl_assert(map_threads == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000539 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
sewardjb4112022007-11-09 22:49:28 +0000540 tl_assert(map_threads != NULL);
541
sewardjb4112022007-11-09 22:49:28 +0000542 tl_assert(sizeof(Addr) == sizeof(Word));
543 tl_assert(map_locks == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000544 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
545 NULL/*unboxed Word cmp*/);
sewardjb4112022007-11-09 22:49:28 +0000546 tl_assert(map_locks != NULL);
547
548 __bus_lock_Lock = mk_LockN( LK_nonRec, (Addr)&__bus_lock );
sewardjf98e1c02008-10-25 16:22:41 +0000549 tl_assert(HG_(is_sane_LockN)(__bus_lock_Lock));
sewardj896f6f92008-08-19 08:38:52 +0000550 VG_(addToFM)( map_locks, (Word)&__bus_lock, (Word)__bus_lock_Lock );
sewardjb4112022007-11-09 22:49:28 +0000551
552 tl_assert(univ_tsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000553 univ_tsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.3", HG_(free),
554 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000555 tl_assert(univ_tsets != NULL);
556
557 tl_assert(univ_lsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000558 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
559 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000560 tl_assert(univ_lsets != NULL);
561
562 tl_assert(univ_laog == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000563 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
564 HG_(free), 24/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000565 tl_assert(univ_laog != NULL);
566
567 /* Set up entries for the root thread */
568 // FIXME: this assumes that the first real ThreadId is 1
569
sewardjb4112022007-11-09 22:49:28 +0000570 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +0000571 thr = mk_Thread(hbthr_root);
572 thr->coretid = 1; /* FIXME: hardwires an assumption about the
573 identity of the root thread. */
574 tl_assert( libhb_get_Thr_opaque(hbthr_root) == NULL );
575 libhb_set_Thr_opaque(hbthr_root, thr);
sewardjb4112022007-11-09 22:49:28 +0000576
sewardjf98e1c02008-10-25 16:22:41 +0000577 /* and bind it in the thread-map table. */
578 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
579 tl_assert(thr->coretid != VG_INVALID_THREADID);
sewardjb4112022007-11-09 22:49:28 +0000580
sewardjf98e1c02008-10-25 16:22:41 +0000581 map_threads[thr->coretid] = thr;
sewardjb4112022007-11-09 22:49:28 +0000582
583 tl_assert(VG_INVALID_THREADID == 0);
584
585 /* Mark the new bus lock correctly (to stop the sanity checks
586 complaining) */
587 tl_assert( sizeof(__bus_lock) == 4 );
sewardjb4112022007-11-09 22:49:28 +0000588
589 all__sanity_check("initialise_data_structures");
590}
591
592
593/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000594/*--- map_threads :: array[core-ThreadId] of Thread* ---*/
sewardjb4112022007-11-09 22:49:28 +0000595/*----------------------------------------------------------------*/
596
597/* Doesn't assert if the relevant map_threads entry is NULL. */
598static Thread* map_threads_maybe_lookup ( ThreadId coretid )
599{
600 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000601 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000602 thr = map_threads[coretid];
603 return thr;
604}
605
606/* Asserts if the relevant map_threads entry is NULL. */
607static inline Thread* map_threads_lookup ( ThreadId coretid )
608{
609 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000610 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000611 thr = map_threads[coretid];
612 tl_assert(thr);
613 return thr;
614}
615
sewardjf98e1c02008-10-25 16:22:41 +0000616/* Do a reverse lookup. Does not assert if 'thr' is not found in
617 map_threads. */
sewardjb4112022007-11-09 22:49:28 +0000618static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
619{
sewardjf98e1c02008-10-25 16:22:41 +0000620 ThreadId tid;
621 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000622 /* Check nobody used the invalid-threadid slot */
623 tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
624 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000625 tid = thr->coretid;
626 tl_assert(HG_(is_sane_ThreadId)(tid));
627 return tid;
sewardjb4112022007-11-09 22:49:28 +0000628}
629
630/* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
631 is not found in map_threads. */
632static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
633{
634 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
635 tl_assert(tid != VG_INVALID_THREADID);
sewardjf98e1c02008-10-25 16:22:41 +0000636 tl_assert(map_threads[tid]);
637 tl_assert(map_threads[tid]->coretid == tid);
sewardjb4112022007-11-09 22:49:28 +0000638 return tid;
639}
640
641static void map_threads_delete ( ThreadId coretid )
642{
643 Thread* thr;
644 tl_assert(coretid != 0);
sewardjf98e1c02008-10-25 16:22:41 +0000645 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000646 thr = map_threads[coretid];
647 tl_assert(thr);
648 map_threads[coretid] = NULL;
649}
650
651
652/*----------------------------------------------------------------*/
653/*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
654/*----------------------------------------------------------------*/
655
656/* Make sure there is a lock table entry for the given (lock) guest
657 address. If not, create one of the stated 'kind' in unheld state.
658 In any case, return the address of the existing or new Lock. */
659static
660Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
661{
662 Bool found;
663 Lock* oldlock = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000664 tl_assert(HG_(is_sane_ThreadId)(tid));
sewardj896f6f92008-08-19 08:38:52 +0000665 found = VG_(lookupFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000666 NULL, (Word*)&oldlock, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000667 if (!found) {
668 Lock* lock = mk_LockN(lkk, ga);
669 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
sewardjf98e1c02008-10-25 16:22:41 +0000670 tl_assert(HG_(is_sane_LockN)(lock));
sewardj896f6f92008-08-19 08:38:52 +0000671 VG_(addToFM)( map_locks, (Word)ga, (Word)lock );
sewardjb4112022007-11-09 22:49:28 +0000672 tl_assert(oldlock == NULL);
sewardjb4112022007-11-09 22:49:28 +0000673 return lock;
674 } else {
675 tl_assert(oldlock != NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000676 tl_assert(HG_(is_sane_LockN)(oldlock));
sewardjb4112022007-11-09 22:49:28 +0000677 tl_assert(oldlock->guestaddr == ga);
sewardjb4112022007-11-09 22:49:28 +0000678 return oldlock;
679 }
680}
681
682static Lock* map_locks_maybe_lookup ( Addr ga )
683{
684 Bool found;
685 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000686 found = VG_(lookupFM)( map_locks, NULL, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000687 tl_assert(found ? lk != NULL : lk == NULL);
sewardjb4112022007-11-09 22:49:28 +0000688 return lk;
689}
690
691static void map_locks_delete ( Addr ga )
692{
693 Addr ga2 = 0;
694 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000695 VG_(delFromFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000696 (Word*)&ga2, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000697 /* delFromFM produces the val which is being deleted, if it is
698 found. So assert it is non-null; that in effect asserts that we
699 are deleting a (ga, Lock) pair which actually exists. */
700 tl_assert(lk != NULL);
701 tl_assert(ga2 == ga);
702}
703
704
sewardjb4112022007-11-09 22:49:28 +0000705
706/*----------------------------------------------------------------*/
707/*--- Sanity checking the data structures ---*/
708/*----------------------------------------------------------------*/
709
710static UWord stats__sanity_checks = 0;
711
sewardjb4112022007-11-09 22:49:28 +0000712static void laog__sanity_check ( Char* who ); /* fwds */
713
714/* REQUIRED INVARIANTS:
715
716 Thread vs Segment/Lock/SecMaps
717
718 for each t in Threads {
719
720 // Thread.lockset: each element is really a valid Lock
721
722 // Thread.lockset: each Lock in set is actually held by that thread
723 for lk in Thread.lockset
724 lk == LockedBy(t)
725
726 // Thread.csegid is a valid SegmentID
727 // and the associated Segment has .thr == t
728
729 }
730
731 all thread Locksets are pairwise empty under intersection
732 (that is, no lock is claimed to be held by more than one thread)
733 -- this is guaranteed if all locks in locksets point back to their
734 owner threads
735
736 Lock vs Thread/Segment/SecMaps
737
738 for each entry (gla, la) in map_locks
739 gla == la->guest_addr
740
741 for each lk in Locks {
742
743 lk->tag is valid
744 lk->guest_addr does not have shadow state NoAccess
745 if lk == LockedBy(t), then t->lockset contains lk
746 if lk == UnlockedBy(segid) then segid is valid SegmentID
747 and can be mapped to a valid Segment(seg)
748 and seg->thr->lockset does not contain lk
749 if lk == UnlockedNew then (no lockset contains lk)
750
751 secmaps for lk has .mbHasLocks == True
752
753 }
754
755 Segment vs Thread/Lock/SecMaps
756
757 the Segment graph is a dag (no cycles)
758 all of the Segment graph must be reachable from the segids
759 mentioned in the Threads
760
761 for seg in Segments {
762
763 seg->thr is a sane Thread
764
765 }
766
767 SecMaps vs Segment/Thread/Lock
768
769 for sm in SecMaps {
770
771 sm properly aligned
772 if any shadow word is ShR or ShM then .mbHasShared == True
773
774 for each Excl(segid) state
775 map_segments_lookup maps to a sane Segment(seg)
776 for each ShM/ShR(tsetid,lsetid) state
777 each lk in lset is a valid Lock
778 each thr in tset is a valid thread, which is non-dead
779
780 }
781*/
782
783
784/* Return True iff 'thr' holds 'lk' in some mode. */
785static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
786{
787 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000788 return VG_(elemBag)( lk->heldBy, (Word)thr ) > 0;
sewardjb4112022007-11-09 22:49:28 +0000789 else
790 return False;
791}
792
793/* Sanity check Threads, as far as possible */
794__attribute__((noinline))
795static void threads__sanity_check ( Char* who )
796{
797#define BAD(_str) do { how = (_str); goto bad; } while (0)
798 Char* how = "no error";
799 Thread* thr;
800 WordSetID wsA, wsW;
sewardj250ec2e2008-02-15 22:02:30 +0000801 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +0000802 Word ls_size, i;
803 Lock* lk;
sewardjb4112022007-11-09 22:49:28 +0000804 for (thr = admin_threads; thr; thr = thr->admin) {
sewardjf98e1c02008-10-25 16:22:41 +0000805 if (!HG_(is_sane_Thread)(thr)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000806 wsA = thr->locksetA;
807 wsW = thr->locksetW;
808 // locks held in W mode are a subset of all locks held
809 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
810 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
811 for (i = 0; i < ls_size; i++) {
812 lk = (Lock*)ls_words[i];
813 // Thread.lockset: each element is really a valid Lock
sewardjf98e1c02008-10-25 16:22:41 +0000814 if (!HG_(is_sane_LockN)(lk)) BAD("2");
sewardjb4112022007-11-09 22:49:28 +0000815 // Thread.lockset: each Lock in set is actually held by that
816 // thread
817 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000818 }
819 }
820 return;
821 bad:
822 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
823 tl_assert(0);
824#undef BAD
825}
826
827
828/* Sanity check Locks, as far as possible */
829__attribute__((noinline))
830static void locks__sanity_check ( Char* who )
831{
832#define BAD(_str) do { how = (_str); goto bad; } while (0)
833 Char* how = "no error";
834 Addr gla;
835 Lock* lk;
836 Int i;
837 // # entries in admin_locks == # entries in map_locks
838 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin)
839 ;
sewardj896f6f92008-08-19 08:38:52 +0000840 if (i != VG_(sizeFM)(map_locks)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000841 // for each entry (gla, lk) in map_locks
842 // gla == lk->guest_addr
sewardj896f6f92008-08-19 08:38:52 +0000843 VG_(initIterFM)( map_locks );
844 while (VG_(nextIterFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000845 (Word*)&gla, (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000846 if (lk->guestaddr != gla) BAD("2");
847 }
sewardj896f6f92008-08-19 08:38:52 +0000848 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000849 // scan through admin_locks ...
850 for (lk = admin_locks; lk; lk = lk->admin) {
851 // lock is sane. Quite comprehensive, also checks that
852 // referenced (holder) threads are sane.
sewardjf98e1c02008-10-25 16:22:41 +0000853 if (!HG_(is_sane_LockN)(lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000854 // map_locks binds guest address back to this lock
855 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
sewardjb4112022007-11-09 22:49:28 +0000856 // look at all threads mentioned as holders of this lock. Ensure
857 // this lock is mentioned in their locksets.
858 if (lk->heldBy) {
859 Thread* thr;
860 Word count;
sewardj896f6f92008-08-19 08:38:52 +0000861 VG_(initIterBag)( lk->heldBy );
862 while (VG_(nextIterBag)( lk->heldBy,
sewardjb5f29642007-11-16 12:02:43 +0000863 (Word*)&thr, &count )) {
sewardjf98e1c02008-10-25 16:22:41 +0000864 // HG_(is_sane_LockN) above ensures these
sewardjb4112022007-11-09 22:49:28 +0000865 tl_assert(count >= 1);
sewardjf98e1c02008-10-25 16:22:41 +0000866 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000867 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (Word)lk))
868 BAD("6");
869 // also check the w-only lockset
870 if (lk->heldW
871 && !HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
872 BAD("7");
873 if ((!lk->heldW)
874 && HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
875 BAD("8");
876 }
sewardj896f6f92008-08-19 08:38:52 +0000877 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000878 } else {
879 /* lock not held by anybody */
880 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
881 // since lk is unheld, then (no lockset contains lk)
882 // hmm, this is really too expensive to check. Hmm.
883 }
sewardjb4112022007-11-09 22:49:28 +0000884 }
885
886 return;
887 bad:
888 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
889 tl_assert(0);
890#undef BAD
891}
892
893
sewardjb4112022007-11-09 22:49:28 +0000894static void all_except_Locks__sanity_check ( Char* who ) {
895 stats__sanity_checks++;
896 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
897 threads__sanity_check(who);
sewardjb4112022007-11-09 22:49:28 +0000898 laog__sanity_check(who);
899}
900static void all__sanity_check ( Char* who ) {
901 all_except_Locks__sanity_check(who);
902 locks__sanity_check(who);
903}
904
905
906/*----------------------------------------------------------------*/
907/*--- the core memory state machine (msm__* functions) ---*/
908/*----------------------------------------------------------------*/
909
sewardjd52392d2008-11-08 20:36:26 +0000910//static WordSetID add_BHL ( WordSetID lockset ) {
911// return HG_(addToWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
912//}
913//static WordSetID del_BHL ( WordSetID lockset ) {
914// return HG_(delFromWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
915//}
sewardjb4112022007-11-09 22:49:28 +0000916
917
sewardjd52392d2008-11-08 20:36:26 +0000918///* Last-lock-lossage records. This mechanism exists to help explain
919// to programmers why we are complaining about a race. The idea is to
920// monitor all lockset transitions. When a previously nonempty
921// lockset becomes empty, the lock(s) that just disappeared (the
922// "lossage") are the locks that have consistently protected the
923// location (ga_of_access) in question for the longest time. Most of
924// the time the lossage-set is a single lock. Because the
925// lossage-lock is the one that has survived longest, there is there
926// is a good chance that it is indeed the lock that the programmer
927// intended to use to protect the location.
928//
929// Note that we cannot in general just look at the lossage set when we
930// see a transition to ShM(...,empty-set), because a transition to an
931// empty lockset can happen arbitrarily far before the point where we
932// want to report an error. This is in the case where there are many
933// transitions ShR -> ShR, all with an empty lockset, and only later
934// is there a transition to ShM. So what we want to do is note the
935// lossage lock at the point where a ShR -> ShR transition empties out
936// the lockset, so we can present it later if there should be a
937// transition to ShM.
938//
939// So this function finds such transitions. For each, it associates
940// in ga_to_lastlock, the guest address and the lossage lock. In fact
941// we do not record the Lock* directly as that may disappear later,
942// but instead the ExeContext inside the Lock which says where it was
943// initialised or first locked. ExeContexts are permanent so keeping
944// them indefinitely is safe.
945//
946// A boring detail: the hardware bus lock is not interesting in this
947// respect, so we first remove that from the pre/post locksets.
948//*/
949//
950//static UWord stats__ga_LL_adds = 0;
951//
952//static WordFM* ga_to_lastlock = NULL; /* GuestAddr -> ExeContext* */
953//
954//static
955//void record_last_lock_lossage ( Addr ga_of_access,
956// WordSetID lset_old, WordSetID lset_new )
957//{
958// Lock* lk;
959// Int card_old, card_new;
960//
961// tl_assert(lset_old != lset_new);
962//
963// if (0) VG_(printf)("XX1: %d (card %ld) -> %d (card %ld) %#lx\n",
964// (Int)lset_old,
965// HG_(cardinalityWS)(univ_lsets,lset_old),
966// (Int)lset_new,
967// HG_(cardinalityWS)(univ_lsets,lset_new),
968// ga_of_access );
969//
970// /* This is slow, but at least it's simple. The bus hardware lock
971// just confuses the logic, so remove it from the locksets we're
972// considering before doing anything else. */
973// lset_new = del_BHL( lset_new );
974//
975// if (!HG_(isEmptyWS)( univ_lsets, lset_new )) {
976// /* The post-transition lock set is not empty. So we are not
977// interested. We're only interested in spotting transitions
978// that make locksets become empty. */
979// return;
980// }
981//
982// /* lset_new is now empty */
983// card_new = HG_(cardinalityWS)( univ_lsets, lset_new );
984// tl_assert(card_new == 0);
985//
986// lset_old = del_BHL( lset_old );
987// card_old = HG_(cardinalityWS)( univ_lsets, lset_old );
988//
989// if (0) VG_(printf)(" X2: %d (card %d) -> %d (card %d)\n",
990// (Int)lset_old, card_old, (Int)lset_new, card_new );
991//
992// if (card_old == 0) {
993// /* The old lockset was also empty. Not interesting. */
994// return;
995// }
996//
997// tl_assert(card_old > 0);
998// tl_assert(!HG_(isEmptyWS)( univ_lsets, lset_old ));
999//
1000// /* Now we know we've got a transition from a nonempty lockset to an
1001// empty one. So lset_old must be the set of locks lost. Record
1002// some details. If there is more than one element in the lossage
1003// set, just choose one arbitrarily -- not the best, but at least
1004// it's simple. */
1005//
1006// lk = (Lock*)HG_(anyElementOfWS)( univ_lsets, lset_old );
1007// if (0) VG_(printf)("lossage %ld %p\n",
1008// HG_(cardinalityWS)( univ_lsets, lset_old), lk );
1009// if (lk->appeared_at) {
1010// if (ga_to_lastlock == NULL)
1011// ga_to_lastlock = VG_(newFM)( HG_(zalloc), "hg.rlll.1", HG_(free), NULL );
1012// VG_(addToFM)( ga_to_lastlock, ga_of_access, (Word)lk->appeared_at );
1013// stats__ga_LL_adds++;
1014// }
1015//}
1016//
1017///* This queries the table (ga_to_lastlock) made by
1018// record_last_lock_lossage, when constructing error messages. It
1019// attempts to find the ExeContext of the allocation or initialisation
1020// point for the lossage lock associated with 'ga'. */
1021//
1022//static ExeContext* maybe_get_lastlock_initpoint ( Addr ga )
1023//{
1024// ExeContext* ec_hint = NULL;
1025// if (ga_to_lastlock != NULL
1026// && VG_(lookupFM)(ga_to_lastlock,
1027// NULL, (Word*)&ec_hint, ga)) {
1028// tl_assert(ec_hint != NULL);
1029// return ec_hint;
1030// } else {
1031// return NULL;
1032// }
1033//}
sewardjb4112022007-11-09 22:49:28 +00001034
1035
sewardjb4112022007-11-09 22:49:28 +00001036/*----------------------------------------------------------------*/
1037/*--- Shadow value and address range handlers ---*/
1038/*----------------------------------------------------------------*/
1039
1040static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +00001041//static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +00001042static inline Thread* get_current_Thread ( void ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +00001043__attribute__((noinline))
1044static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +00001045
sewardjb4112022007-11-09 22:49:28 +00001046
1047/* Block-copy states (needed for implementing realloc()). */
1048static void shadow_mem_copy_range ( Addr src, Addr dst, SizeT len )
1049{
sewardjf98e1c02008-10-25 16:22:41 +00001050 libhb_copy_shadow_state( src, dst, len );
sewardjb4112022007-11-09 22:49:28 +00001051}
1052
sewardjf98e1c02008-10-25 16:22:41 +00001053static void shadow_mem_read_range ( Thread* thr, Addr a, SizeT len )
1054{
1055 Thr* hbthr = thr->hbthr;
1056 tl_assert(hbthr);
1057 LIBHB_READ_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +00001058}
1059
1060static void shadow_mem_write_range ( Thread* thr, Addr a, SizeT len ) {
sewardjf98e1c02008-10-25 16:22:41 +00001061 Thr* hbthr = thr->hbthr;
1062 tl_assert(hbthr);
1063 LIBHB_WRITE_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +00001064}
1065
1066static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
1067{
sewardjf98e1c02008-10-25 16:22:41 +00001068 libhb_range_new( thr->hbthr, a, len );
sewardjb4112022007-11-09 22:49:28 +00001069}
1070
sewardjb4112022007-11-09 22:49:28 +00001071static void shadow_mem_make_NoAccess ( Thread* thr, Addr aIN, SizeT len )
1072{
sewardjb4112022007-11-09 22:49:28 +00001073 if (0 && len > 500)
barta0b6b2c2008-07-07 06:49:24 +00001074 VG_(printf)("make NoAccess ( %#lx, %ld )\n", aIN, len );
sewardjf98e1c02008-10-25 16:22:41 +00001075 libhb_range_noaccess( thr->hbthr, aIN, len );
sewardjb4112022007-11-09 22:49:28 +00001076}
1077
1078
1079/*----------------------------------------------------------------*/
1080/*--- Event handlers (evh__* functions) ---*/
1081/*--- plus helpers (evhH__* functions) ---*/
1082/*----------------------------------------------------------------*/
1083
1084/*--------- Event handler helpers (evhH__* functions) ---------*/
1085
1086/* Create a new segment for 'thr', making it depend (.prev) on its
1087 existing segment, bind together the SegmentID and Segment, and
1088 return both of them. Also update 'thr' so it references the new
1089 Segment. */
sewardjf98e1c02008-10-25 16:22:41 +00001090//zz static
1091//zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
1092//zz /*OUT*/Segment** new_segP,
1093//zz Thread* thr )
1094//zz {
1095//zz Segment* cur_seg;
1096//zz tl_assert(new_segP);
1097//zz tl_assert(new_segidP);
1098//zz tl_assert(HG_(is_sane_Thread)(thr));
1099//zz cur_seg = map_segments_lookup( thr->csegid );
1100//zz tl_assert(cur_seg);
1101//zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1102//zz at their owner thread. */
1103//zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1104//zz *new_segidP = alloc_SegmentID();
1105//zz map_segments_add( *new_segidP, *new_segP );
1106//zz thr->csegid = *new_segidP;
1107//zz }
sewardjb4112022007-11-09 22:49:28 +00001108
1109
1110/* The lock at 'lock_ga' has acquired a writer. Make all necessary
1111 updates, and also do all possible error checks. */
1112static
1113void evhH__post_thread_w_acquires_lock ( Thread* thr,
1114 LockKind lkk, Addr lock_ga )
1115{
1116 Lock* lk;
1117
1118 /* Basically what we need to do is call lockN_acquire_writer.
1119 However, that will barf if any 'invalid' lock states would
1120 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001121 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001122 routine.
1123
1124 Because this routine is only called after successful lock
1125 acquisition, we should not be asked to move the lock into any
1126 invalid states. Requests to do so are bugs in libpthread, since
1127 that should have rejected any such requests. */
1128
sewardjf98e1c02008-10-25 16:22:41 +00001129 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001130 /* Try to find the lock. If we can't, then create a new one with
1131 kind 'lkk'. */
1132 lk = map_locks_lookup_or_create(
1133 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001134 tl_assert( HG_(is_sane_LockN)(lk) );
1135
1136 /* check libhb level entities exist */
1137 tl_assert(thr->hbthr);
1138 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001139
1140 if (lk->heldBy == NULL) {
1141 /* the lock isn't held. Simple. */
1142 tl_assert(!lk->heldW);
1143 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001144 /* acquire a dependency from the lock's VCs */
1145 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001146 goto noerror;
1147 }
1148
1149 /* So the lock is already held. If held as a r-lock then
1150 libpthread must be buggy. */
1151 tl_assert(lk->heldBy);
1152 if (!lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001153 HG_(record_error_Misc)(
1154 thr, "Bug in libpthread: write lock "
1155 "granted on rwlock which is currently rd-held");
sewardjb4112022007-11-09 22:49:28 +00001156 goto error;
1157 }
1158
1159 /* So the lock is held in w-mode. If it's held by some other
1160 thread, then libpthread must be buggy. */
sewardj896f6f92008-08-19 08:38:52 +00001161 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
sewardjb4112022007-11-09 22:49:28 +00001162
sewardj896f6f92008-08-19 08:38:52 +00001163 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
sewardjf98e1c02008-10-25 16:22:41 +00001164 HG_(record_error_Misc)(
1165 thr, "Bug in libpthread: write lock "
1166 "granted on mutex/rwlock which is currently "
1167 "wr-held by a different thread");
sewardjb4112022007-11-09 22:49:28 +00001168 goto error;
1169 }
1170
1171 /* So the lock is already held in w-mode by 'thr'. That means this
1172 is an attempt to lock it recursively, which is only allowable
1173 for LK_mbRec kinded locks. Since this routine is called only
1174 once the lock has been acquired, this must also be a libpthread
1175 bug. */
1176 if (lk->kind != LK_mbRec) {
sewardjf98e1c02008-10-25 16:22:41 +00001177 HG_(record_error_Misc)(
1178 thr, "Bug in libpthread: recursive write lock "
1179 "granted on mutex/wrlock which does not "
1180 "support recursion");
sewardjb4112022007-11-09 22:49:28 +00001181 goto error;
1182 }
1183
1184 /* So we are recursively re-locking a lock we already w-hold. */
1185 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001186 /* acquire a dependency from the lock's VC. Probably pointless,
1187 but also harmless. */
1188 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001189 goto noerror;
1190
1191 noerror:
1192 /* check lock order acquisition graph, and update. This has to
1193 happen before the lock is added to the thread's locksetA/W. */
1194 laog__pre_thread_acquires_lock( thr, lk );
1195 /* update the thread's held-locks set */
1196 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1197 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (Word)lk );
1198 /* fall through */
1199
1200 error:
sewardjf98e1c02008-10-25 16:22:41 +00001201 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001202}
1203
1204
1205/* The lock at 'lock_ga' has acquired a reader. Make all necessary
1206 updates, and also do all possible error checks. */
1207static
1208void evhH__post_thread_r_acquires_lock ( Thread* thr,
1209 LockKind lkk, Addr lock_ga )
1210{
1211 Lock* lk;
1212
1213 /* Basically what we need to do is call lockN_acquire_reader.
1214 However, that will barf if any 'invalid' lock states would
1215 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001216 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001217 routine.
1218
1219 Because this routine is only called after successful lock
1220 acquisition, we should not be asked to move the lock into any
1221 invalid states. Requests to do so are bugs in libpthread, since
1222 that should have rejected any such requests. */
1223
sewardjf98e1c02008-10-25 16:22:41 +00001224 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001225 /* Try to find the lock. If we can't, then create a new one with
1226 kind 'lkk'. Only a reader-writer lock can be read-locked,
1227 hence the first assertion. */
1228 tl_assert(lkk == LK_rdwr);
1229 lk = map_locks_lookup_or_create(
1230 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001231 tl_assert( HG_(is_sane_LockN)(lk) );
1232
1233 /* check libhb level entities exist */
1234 tl_assert(thr->hbthr);
1235 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001236
1237 if (lk->heldBy == NULL) {
1238 /* the lock isn't held. Simple. */
1239 tl_assert(!lk->heldW);
1240 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001241 /* acquire a dependency from the lock's VC */
1242 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001243 goto noerror;
1244 }
1245
1246 /* So the lock is already held. If held as a w-lock then
1247 libpthread must be buggy. */
1248 tl_assert(lk->heldBy);
1249 if (lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001250 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1251 "granted on rwlock which is "
1252 "currently wr-held");
sewardjb4112022007-11-09 22:49:28 +00001253 goto error;
1254 }
1255
1256 /* Easy enough. In short anybody can get a read-lock on a rwlock
1257 provided it is either unlocked or already in rd-held. */
1258 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001259 /* acquire a dependency from the lock's VC. Probably pointless,
1260 but also harmless. */
1261 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001262 goto noerror;
1263
1264 noerror:
1265 /* check lock order acquisition graph, and update. This has to
1266 happen before the lock is added to the thread's locksetA/W. */
1267 laog__pre_thread_acquires_lock( thr, lk );
1268 /* update the thread's held-locks set */
1269 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1270 /* but don't update thr->locksetW, since lk is only rd-held */
1271 /* fall through */
1272
1273 error:
sewardjf98e1c02008-10-25 16:22:41 +00001274 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001275}
1276
1277
1278/* The lock at 'lock_ga' is just about to be unlocked. Make all
1279 necessary updates, and also do all possible error checks. */
1280static
1281void evhH__pre_thread_releases_lock ( Thread* thr,
1282 Addr lock_ga, Bool isRDWR )
1283{
1284 Lock* lock;
1285 Word n;
sewardjf98e1c02008-10-25 16:22:41 +00001286 Bool was_heldW;
sewardjb4112022007-11-09 22:49:28 +00001287
1288 /* This routine is called prior to a lock release, before
1289 libpthread has had a chance to validate the call. Hence we need
1290 to detect and reject any attempts to move the lock into an
1291 invalid state. Such attempts are bugs in the client.
1292
1293 isRDWR is True if we know from the wrapper context that lock_ga
1294 should refer to a reader-writer lock, and is False if [ditto]
1295 lock_ga should refer to a standard mutex. */
1296
sewardjf98e1c02008-10-25 16:22:41 +00001297 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001298 lock = map_locks_maybe_lookup( lock_ga );
1299
1300 if (!lock) {
1301 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1302 the client is trying to unlock it. So complain, then ignore
1303 the attempt. */
sewardjf98e1c02008-10-25 16:22:41 +00001304 HG_(record_error_UnlockBogus)( thr, lock_ga );
sewardjb4112022007-11-09 22:49:28 +00001305 return;
1306 }
1307
1308 tl_assert(lock->guestaddr == lock_ga);
sewardjf98e1c02008-10-25 16:22:41 +00001309 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001310
1311 if (isRDWR && lock->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001312 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1313 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001314 }
1315 if ((!isRDWR) && lock->kind == LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001316 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1317 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001318 }
1319
1320 if (!lock->heldBy) {
1321 /* The lock is not held. This indicates a serious bug in the
1322 client. */
1323 tl_assert(!lock->heldW);
sewardjf98e1c02008-10-25 16:22:41 +00001324 HG_(record_error_UnlockUnlocked)( thr, lock );
sewardjb4112022007-11-09 22:49:28 +00001325 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1326 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1327 goto error;
1328 }
1329
sewardjf98e1c02008-10-25 16:22:41 +00001330 /* test just above dominates */
1331 tl_assert(lock->heldBy);
1332 was_heldW = lock->heldW;
1333
sewardjb4112022007-11-09 22:49:28 +00001334 /* The lock is held. Is this thread one of the holders? If not,
1335 report a bug in the client. */
sewardj896f6f92008-08-19 08:38:52 +00001336 n = VG_(elemBag)( lock->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +00001337 tl_assert(n >= 0);
1338 if (n == 0) {
1339 /* We are not a current holder of the lock. This is a bug in
1340 the guest, and (per POSIX pthread rules) the unlock
1341 attempt will fail. So just complain and do nothing
1342 else. */
sewardj896f6f92008-08-19 08:38:52 +00001343 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001344 tl_assert(HG_(is_sane_Thread)(realOwner));
sewardjb4112022007-11-09 22:49:28 +00001345 tl_assert(realOwner != thr);
1346 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1347 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
sewardjf98e1c02008-10-25 16:22:41 +00001348 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
sewardjb4112022007-11-09 22:49:28 +00001349 goto error;
1350 }
1351
1352 /* Ok, we hold the lock 'n' times. */
1353 tl_assert(n >= 1);
1354
1355 lockN_release( lock, thr );
1356
1357 n--;
1358 tl_assert(n >= 0);
1359
1360 if (n > 0) {
1361 tl_assert(lock->heldBy);
sewardj896f6f92008-08-19 08:38:52 +00001362 tl_assert(n == VG_(elemBag)( lock->heldBy, (Word)thr ));
sewardjb4112022007-11-09 22:49:28 +00001363 /* We still hold the lock. So either it's a recursive lock
1364 or a rwlock which is currently r-held. */
1365 tl_assert(lock->kind == LK_mbRec
1366 || (lock->kind == LK_rdwr && !lock->heldW));
1367 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1368 if (lock->heldW)
1369 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1370 else
1371 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1372 } else {
1373 /* We no longer hold the lock. */
sewardjf98e1c02008-10-25 16:22:41 +00001374 tl_assert(!lock->heldBy);
1375 tl_assert(lock->heldW == False);
1376 //if (lock->heldBy) {
1377 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (Word)thr ));
1378 //}
sewardjb4112022007-11-09 22:49:28 +00001379 /* update this thread's lockset accordingly. */
1380 thr->locksetA
1381 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lock );
1382 thr->locksetW
1383 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lock );
sewardjf98e1c02008-10-25 16:22:41 +00001384 /* push our VC into the lock */
1385 tl_assert(thr->hbthr);
1386 tl_assert(lock->hbso);
1387 /* If the lock was previously W-held, then we want to do a
1388 strong send, and if previously R-held, then a weak send. */
1389 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
sewardjb4112022007-11-09 22:49:28 +00001390 }
1391 /* fall through */
1392
1393 error:
sewardjf98e1c02008-10-25 16:22:41 +00001394 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001395}
1396
1397
sewardj9f569b72008-11-13 13:33:09 +00001398/* ---------------------------------------------------------- */
1399/* -------- Event handlers proper (evh__* functions) -------- */
1400/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001401
1402/* What is the Thread* for the currently running thread? This is
1403 absolutely performance critical. We receive notifications from the
1404 core for client code starts/stops, and cache the looked-up result
1405 in 'current_Thread'. Hence, for the vast majority of requests,
1406 finding the current thread reduces to a read of a global variable,
1407 provided get_current_Thread_in_C_C is inlined.
1408
1409 Outside of client code, current_Thread is NULL, and presumably
1410 any uses of it will cause a segfault. Hence:
1411
1412 - for uses definitely within client code, use
1413 get_current_Thread_in_C_C.
1414
1415 - for all other uses, use get_current_Thread.
1416*/
1417
1418static Thread* current_Thread = NULL;
1419
1420static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1421 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1422 tl_assert(current_Thread == NULL);
1423 current_Thread = map_threads_lookup( tid );
1424 tl_assert(current_Thread != NULL);
1425}
1426static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1427 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1428 tl_assert(current_Thread != NULL);
1429 current_Thread = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00001430 libhb_maybe_GC();
sewardjb4112022007-11-09 22:49:28 +00001431}
1432static inline Thread* get_current_Thread_in_C_C ( void ) {
1433 return current_Thread;
1434}
1435static inline Thread* get_current_Thread ( void ) {
1436 ThreadId coretid;
1437 Thread* thr;
1438 thr = get_current_Thread_in_C_C();
1439 if (LIKELY(thr))
1440 return thr;
1441 /* evidently not in client code. Do it the slow way. */
1442 coretid = VG_(get_running_tid)();
1443 /* FIXME: get rid of the following kludge. It exists because
sewardjf98e1c02008-10-25 16:22:41 +00001444 evh__new_mem is called during initialisation (as notification
sewardjb4112022007-11-09 22:49:28 +00001445 of initial memory layout) and VG_(get_running_tid)() returns
1446 VG_INVALID_THREADID at that point. */
1447 if (coretid == VG_INVALID_THREADID)
1448 coretid = 1; /* KLUDGE */
1449 thr = map_threads_lookup( coretid );
1450 return thr;
1451}
1452
1453static
1454void evh__new_mem ( Addr a, SizeT len ) {
1455 if (SHOW_EVENTS >= 2)
1456 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1457 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001458 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001459 all__sanity_check("evh__new_mem-post");
1460}
1461
1462static
sewardj7cf4e6b2008-05-01 20:24:26 +00001463void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1464 if (SHOW_EVENTS >= 2)
1465 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1466 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001467 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardj7cf4e6b2008-05-01 20:24:26 +00001468 all__sanity_check("evh__new_mem_w_tid-post");
1469}
1470
1471static
sewardjb4112022007-11-09 22:49:28 +00001472void evh__new_mem_w_perms ( Addr a, SizeT len,
sewardj9c606bd2008-09-18 18:12:50 +00001473 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
sewardjb4112022007-11-09 22:49:28 +00001474 if (SHOW_EVENTS >= 1)
1475 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1476 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1477 if (rr || ww || xx)
1478 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001479 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001480 all__sanity_check("evh__new_mem_w_perms-post");
1481}
1482
1483static
1484void evh__set_perms ( Addr a, SizeT len,
1485 Bool rr, Bool ww, Bool xx ) {
1486 if (SHOW_EVENTS >= 1)
1487 VG_(printf)("evh__set_perms(%p, %lu, %d,%d,%d)\n",
1488 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1489 /* Hmm. What should we do here, that actually makes any sense?
1490 Let's say: if neither readable nor writable, then declare it
1491 NoAccess, else leave it alone. */
1492 if (!(rr || ww))
1493 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001494 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001495 all__sanity_check("evh__set_perms-post");
1496}
1497
1498static
1499void evh__die_mem ( Addr a, SizeT len ) {
1500 if (SHOW_EVENTS >= 2)
1501 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
1502 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001503 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001504 all__sanity_check("evh__die_mem-post");
1505}
1506
1507static
1508void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1509{
1510 if (SHOW_EVENTS >= 1)
1511 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1512 (Int)parent, (Int)child );
1513
1514 if (parent != VG_INVALID_THREADID) {
sewardjf98e1c02008-10-25 16:22:41 +00001515 Thread* thr_p;
1516 Thread* thr_c;
1517 Thr* hbthr_p;
1518 Thr* hbthr_c;
sewardjb4112022007-11-09 22:49:28 +00001519
sewardjf98e1c02008-10-25 16:22:41 +00001520 tl_assert(HG_(is_sane_ThreadId)(parent));
1521 tl_assert(HG_(is_sane_ThreadId)(child));
sewardjb4112022007-11-09 22:49:28 +00001522 tl_assert(parent != child);
1523
1524 thr_p = map_threads_maybe_lookup( parent );
1525 thr_c = map_threads_maybe_lookup( child );
1526
1527 tl_assert(thr_p != NULL);
1528 tl_assert(thr_c == NULL);
1529
sewardjf98e1c02008-10-25 16:22:41 +00001530 hbthr_p = thr_p->hbthr;
1531 tl_assert(hbthr_p != NULL);
1532 tl_assert( libhb_get_Thr_opaque(hbthr_p) == thr_p );
sewardjb4112022007-11-09 22:49:28 +00001533
sewardjf98e1c02008-10-25 16:22:41 +00001534 hbthr_c = libhb_create ( hbthr_p );
1535
1536 /* Create a new thread record for the child. */
sewardjb4112022007-11-09 22:49:28 +00001537 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +00001538 thr_c = mk_Thread( hbthr_c );
1539 tl_assert( libhb_get_Thr_opaque(hbthr_c) == NULL );
1540 libhb_set_Thr_opaque(hbthr_c, thr_c);
sewardjb4112022007-11-09 22:49:28 +00001541
1542 /* and bind it in the thread-map table */
1543 map_threads[child] = thr_c;
sewardjf98e1c02008-10-25 16:22:41 +00001544 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1545 thr_c->coretid = child;
sewardjb4112022007-11-09 22:49:28 +00001546
1547 /* Record where the parent is so we can later refer to this in
1548 error messages.
1549
1550 On amd64-linux, this entails a nasty glibc-2.5 specific hack.
1551 The stack snapshot is taken immediately after the parent has
1552 returned from its sys_clone call. Unfortunately there is no
1553 unwind info for the insn following "syscall" - reading the
1554 glibc sources confirms this. So we ask for a snapshot to be
1555 taken as if RIP was 3 bytes earlier, in a place where there
1556 is unwind info. Sigh.
1557 */
1558 { Word first_ip_delta = 0;
1559# if defined(VGP_amd64_linux)
1560 first_ip_delta = -3;
1561# endif
1562 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1563 }
sewardjb4112022007-11-09 22:49:28 +00001564 }
1565
sewardjf98e1c02008-10-25 16:22:41 +00001566 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001567 all__sanity_check("evh__pre_thread_create-post");
1568}
1569
1570static
1571void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1572{
1573 Int nHeld;
1574 Thread* thr_q;
1575 if (SHOW_EVENTS >= 1)
1576 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1577 (Int)quit_tid );
1578
1579 /* quit_tid has disappeared without joining to any other thread.
1580 Therefore there is no synchronisation event associated with its
1581 exit and so we have to pretty much treat it as if it was still
1582 alive but mysteriously making no progress. That is because, if
1583 we don't know when it really exited, then we can never say there
1584 is a point in time when we're sure the thread really has
1585 finished, and so we need to consider the possibility that it
1586 lingers indefinitely and continues to interact with other
1587 threads. */
1588 /* However, it might have rendezvous'd with a thread that called
1589 pthread_join with this one as arg, prior to this point (that's
1590 how NPTL works). In which case there has already been a prior
1591 sync event. So in any case, just let the thread exit. On NPTL,
1592 all thread exits go through here. */
sewardjf98e1c02008-10-25 16:22:41 +00001593 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
sewardjb4112022007-11-09 22:49:28 +00001594 thr_q = map_threads_maybe_lookup( quit_tid );
1595 tl_assert(thr_q != NULL);
1596
1597 /* Complain if this thread holds any locks. */
1598 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1599 tl_assert(nHeld >= 0);
1600 if (nHeld > 0) {
1601 HChar buf[80];
1602 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1603 nHeld, nHeld > 1 ? "s" : "");
sewardjf98e1c02008-10-25 16:22:41 +00001604 HG_(record_error_Misc)( thr_q, buf );
sewardjb4112022007-11-09 22:49:28 +00001605 }
1606
1607 /* About the only thing we do need to do is clear the map_threads
1608 entry, in order that the Valgrind core can re-use it. */
sewardjf98e1c02008-10-25 16:22:41 +00001609 tl_assert(thr_q->coretid == quit_tid);
1610 thr_q->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +00001611 map_threads_delete( quit_tid );
1612
sewardjf98e1c02008-10-25 16:22:41 +00001613 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001614 all__sanity_check("evh__pre_thread_ll_exit-post");
1615}
1616
sewardjf98e1c02008-10-25 16:22:41 +00001617
sewardjb4112022007-11-09 22:49:28 +00001618static
1619void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1620{
sewardjb4112022007-11-09 22:49:28 +00001621 Thread* thr_s;
1622 Thread* thr_q;
sewardjf98e1c02008-10-25 16:22:41 +00001623 Thr* hbthr_s;
1624 Thr* hbthr_q;
1625 SO* so;
sewardjb4112022007-11-09 22:49:28 +00001626
1627 if (SHOW_EVENTS >= 1)
1628 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1629 (Int)stay_tid, quit_thr );
1630
sewardjf98e1c02008-10-25 16:22:41 +00001631 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
sewardjb4112022007-11-09 22:49:28 +00001632
1633 thr_s = map_threads_maybe_lookup( stay_tid );
1634 thr_q = quit_thr;
1635 tl_assert(thr_s != NULL);
1636 tl_assert(thr_q != NULL);
1637 tl_assert(thr_s != thr_q);
1638
sewardjf98e1c02008-10-25 16:22:41 +00001639 hbthr_s = thr_s->hbthr;
1640 hbthr_q = thr_q->hbthr;
1641 tl_assert(hbthr_s != hbthr_q);
1642 tl_assert( libhb_get_Thr_opaque(hbthr_s) == thr_s );
1643 tl_assert( libhb_get_Thr_opaque(hbthr_q) == thr_q );
sewardjb4112022007-11-09 22:49:28 +00001644
sewardjf98e1c02008-10-25 16:22:41 +00001645 /* Allocate a temporary synchronisation object and use it to send
1646 an imaginary message from the quitter to the stayer, the purpose
1647 being to generate a dependence from the quitter to the
1648 stayer. */
1649 so = libhb_so_alloc();
1650 tl_assert(so);
1651 libhb_so_send(hbthr_q, so, True/*strong_send*/);
1652 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1653 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00001654
sewardjf98e1c02008-10-25 16:22:41 +00001655 /* evh__pre_thread_ll_exit issues an error message if the exiting
1656 thread holds any locks. No need to check here. */
sewardjb4112022007-11-09 22:49:28 +00001657
1658 /* This holds because, at least when using NPTL as the thread
1659 library, we should be notified the low level thread exit before
1660 we hear of any join event on it. The low level exit
1661 notification feeds through into evh__pre_thread_ll_exit,
1662 which should clear the map_threads entry for it. Hence we
1663 expect there to be no map_threads entry at this point. */
1664 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1665 == VG_INVALID_THREADID);
1666
sewardjf98e1c02008-10-25 16:22:41 +00001667 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001668 all__sanity_check("evh__post_thread_join-post");
1669}
1670
1671static
1672void evh__pre_mem_read ( CorePart part, ThreadId tid, Char* s,
1673 Addr a, SizeT size) {
1674 if (SHOW_EVENTS >= 2
1675 || (SHOW_EVENTS >= 1 && size != 1))
1676 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1677 (Int)tid, s, (void*)a, size );
1678 shadow_mem_read_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001679 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001680 all__sanity_check("evh__pre_mem_read-post");
1681}
1682
1683static
1684void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
1685 Char* s, Addr a ) {
1686 Int len;
1687 if (SHOW_EVENTS >= 1)
1688 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1689 (Int)tid, s, (void*)a );
1690 // FIXME: think of a less ugly hack
1691 len = VG_(strlen)( (Char*) a );
1692 shadow_mem_read_range( map_threads_lookup(tid), a, len+1 );
sewardjf98e1c02008-10-25 16:22:41 +00001693 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001694 all__sanity_check("evh__pre_mem_read_asciiz-post");
1695}
1696
1697static
1698void evh__pre_mem_write ( CorePart part, ThreadId tid, Char* s,
1699 Addr a, SizeT size ) {
1700 if (SHOW_EVENTS >= 1)
1701 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1702 (Int)tid, s, (void*)a, size );
1703 shadow_mem_write_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001704 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001705 all__sanity_check("evh__pre_mem_write-post");
1706}
1707
1708static
1709void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1710 if (SHOW_EVENTS >= 1)
1711 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1712 (void*)a, len, (Int)is_inited );
1713 // FIXME: this is kinda stupid
1714 if (is_inited) {
1715 shadow_mem_make_New(get_current_Thread(), a, len);
1716 } else {
1717 shadow_mem_make_New(get_current_Thread(), a, len);
1718 }
sewardjf98e1c02008-10-25 16:22:41 +00001719 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001720 all__sanity_check("evh__pre_mem_read-post");
1721}
1722
1723static
1724void evh__die_mem_heap ( Addr a, SizeT len ) {
1725 if (SHOW_EVENTS >= 1)
1726 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
1727 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001728 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001729 all__sanity_check("evh__pre_mem_read-post");
1730}
1731
sewardjb4112022007-11-09 22:49:28 +00001732static VG_REGPARM(1)
1733void evh__mem_help_read_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001734 Thread* thr = get_current_Thread_in_C_C();
1735 Thr* hbthr = thr->hbthr;
1736 LIBHB_READ_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001737}
sewardjf98e1c02008-10-25 16:22:41 +00001738
sewardjb4112022007-11-09 22:49:28 +00001739static VG_REGPARM(1)
1740void evh__mem_help_read_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001741 Thread* thr = get_current_Thread_in_C_C();
1742 Thr* hbthr = thr->hbthr;
1743 LIBHB_READ_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001744}
sewardjf98e1c02008-10-25 16:22:41 +00001745
sewardjb4112022007-11-09 22:49:28 +00001746static VG_REGPARM(1)
1747void evh__mem_help_read_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001748 Thread* thr = get_current_Thread_in_C_C();
1749 Thr* hbthr = thr->hbthr;
1750 LIBHB_READ_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001751}
sewardjf98e1c02008-10-25 16:22:41 +00001752
sewardjb4112022007-11-09 22:49:28 +00001753static VG_REGPARM(1)
1754void evh__mem_help_read_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001755 Thread* thr = get_current_Thread_in_C_C();
1756 Thr* hbthr = thr->hbthr;
1757 LIBHB_READ_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001758}
sewardjf98e1c02008-10-25 16:22:41 +00001759
sewardjb4112022007-11-09 22:49:28 +00001760static VG_REGPARM(2)
1761void evh__mem_help_read_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001762 Thread* thr = get_current_Thread_in_C_C();
1763 Thr* hbthr = thr->hbthr;
1764 LIBHB_READ_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001765}
1766
1767static VG_REGPARM(1)
1768void evh__mem_help_write_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001769 Thread* thr = get_current_Thread_in_C_C();
1770 Thr* hbthr = thr->hbthr;
1771 LIBHB_WRITE_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001772}
sewardjf98e1c02008-10-25 16:22:41 +00001773
sewardjb4112022007-11-09 22:49:28 +00001774static VG_REGPARM(1)
1775void evh__mem_help_write_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001776 Thread* thr = get_current_Thread_in_C_C();
1777 Thr* hbthr = thr->hbthr;
1778 LIBHB_WRITE_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001779}
sewardjf98e1c02008-10-25 16:22:41 +00001780
sewardjb4112022007-11-09 22:49:28 +00001781static VG_REGPARM(1)
1782void evh__mem_help_write_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001783 Thread* thr = get_current_Thread_in_C_C();
1784 Thr* hbthr = thr->hbthr;
1785 LIBHB_WRITE_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001786}
sewardjf98e1c02008-10-25 16:22:41 +00001787
sewardjb4112022007-11-09 22:49:28 +00001788static VG_REGPARM(1)
1789void evh__mem_help_write_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001790 Thread* thr = get_current_Thread_in_C_C();
1791 Thr* hbthr = thr->hbthr;
1792 LIBHB_WRITE_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001793}
sewardjf98e1c02008-10-25 16:22:41 +00001794
sewardjb4112022007-11-09 22:49:28 +00001795static VG_REGPARM(2)
1796void evh__mem_help_write_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001797 Thread* thr = get_current_Thread_in_C_C();
1798 Thr* hbthr = thr->hbthr;
1799 LIBHB_WRITE_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001800}
1801
sewardjd52392d2008-11-08 20:36:26 +00001802//static void evh__bus_lock(void) {
1803// Thread* thr;
1804// if (0) VG_(printf)("evh__bus_lock()\n");
1805// thr = get_current_Thread();
1806// tl_assert(thr); /* cannot fail - Thread* must already exist */
1807// evhH__post_thread_w_acquires_lock( thr, LK_nonRec, (Addr)&__bus_lock );
1808//}
1809//static void evh__bus_unlock(void) {
1810// Thread* thr;
1811// if (0) VG_(printf)("evh__bus_unlock()\n");
1812// thr = get_current_Thread();
1813// tl_assert(thr); /* cannot fail - Thread* must already exist */
1814// evhH__pre_thread_releases_lock( thr, (Addr)&__bus_lock, False/*!isRDWR*/ );
1815//}
sewardjb4112022007-11-09 22:49:28 +00001816
sewardj9f569b72008-11-13 13:33:09 +00001817/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001818/* -------------- events to do with mutexes -------------- */
sewardj9f569b72008-11-13 13:33:09 +00001819/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001820
1821/* EXPOSITION only: by intercepting lock init events we can show the
1822 user where the lock was initialised, rather than only being able to
1823 show where it was first locked. Intercepting lock initialisations
1824 is not necessary for the basic operation of the race checker. */
1825static
1826void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
1827 void* mutex, Word mbRec )
1828{
1829 if (SHOW_EVENTS >= 1)
1830 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
1831 (Int)tid, mbRec, (void*)mutex );
1832 tl_assert(mbRec == 0 || mbRec == 1);
1833 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
1834 (Addr)mutex, tid );
sewardjf98e1c02008-10-25 16:22:41 +00001835 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001836 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
1837}
1838
1839static
1840void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex )
1841{
1842 Thread* thr;
1843 Lock* lk;
1844 if (SHOW_EVENTS >= 1)
1845 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE(ctid=%d, %p)\n",
1846 (Int)tid, (void*)mutex );
1847
1848 thr = map_threads_maybe_lookup( tid );
1849 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00001850 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00001851
1852 lk = map_locks_maybe_lookup( (Addr)mutex );
1853
1854 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
sewardjf98e1c02008-10-25 16:22:41 +00001855 HG_(record_error_Misc)(
1856 thr, "pthread_mutex_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00001857 }
1858
1859 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00001860 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00001861 tl_assert( lk->guestaddr == (Addr)mutex );
1862 if (lk->heldBy) {
1863 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00001864 HG_(record_error_Misc)(
1865 thr, "pthread_mutex_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00001866 /* remove lock from locksets of all owning threads */
1867 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00001868 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00001869 lk->heldBy = NULL;
1870 lk->heldW = False;
1871 lk->acquired_at = NULL;
1872 }
1873 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001874 tl_assert( HG_(is_sane_LockN)(lk) );
1875
sewardj1cbc12f2008-11-10 16:16:46 +00001876 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00001877 map_locks_delete( lk->guestaddr );
1878 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00001879 }
1880
sewardjf98e1c02008-10-25 16:22:41 +00001881 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001882 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
1883}
1884
1885static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
1886 void* mutex, Word isTryLock )
1887{
1888 /* Just check the mutex is sane; nothing else to do. */
1889 // 'mutex' may be invalid - not checked by wrapper
1890 Thread* thr;
1891 Lock* lk;
1892 if (SHOW_EVENTS >= 1)
1893 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
1894 (Int)tid, (void*)mutex );
1895
1896 tl_assert(isTryLock == 0 || isTryLock == 1);
1897 thr = map_threads_maybe_lookup( tid );
1898 tl_assert(thr); /* cannot fail - Thread* must already exist */
1899
1900 lk = map_locks_maybe_lookup( (Addr)mutex );
1901
1902 if (lk && (lk->kind == LK_rdwr)) {
sewardjf98e1c02008-10-25 16:22:41 +00001903 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
1904 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001905 }
1906
1907 if ( lk
1908 && isTryLock == 0
1909 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
1910 && lk->heldBy
1911 && lk->heldW
sewardj896f6f92008-08-19 08:38:52 +00001912 && VG_(elemBag)( lk->heldBy, (Word)thr ) > 0 ) {
sewardjb4112022007-11-09 22:49:28 +00001913 /* uh, it's a non-recursive lock and we already w-hold it, and
1914 this is a real lock operation (not a speculative "tryLock"
1915 kind of thing). Duh. Deadlock coming up; but at least
1916 produce an error message. */
sewardjf98e1c02008-10-25 16:22:41 +00001917 HG_(record_error_Misc)( thr, "Attempt to re-lock a "
1918 "non-recursive lock I already hold" );
sewardjb4112022007-11-09 22:49:28 +00001919 }
1920}
1921
1922static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
1923{
1924 // only called if the real library call succeeded - so mutex is sane
1925 Thread* thr;
1926 if (SHOW_EVENTS >= 1)
1927 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
1928 (Int)tid, (void*)mutex );
1929
1930 thr = map_threads_maybe_lookup( tid );
1931 tl_assert(thr); /* cannot fail - Thread* must already exist */
1932
1933 evhH__post_thread_w_acquires_lock(
1934 thr,
1935 LK_mbRec, /* if not known, create new lock with this LockKind */
1936 (Addr)mutex
1937 );
1938}
1939
1940static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
1941{
1942 // 'mutex' may be invalid - not checked by wrapper
1943 Thread* thr;
1944 if (SHOW_EVENTS >= 1)
1945 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
1946 (Int)tid, (void*)mutex );
1947
1948 thr = map_threads_maybe_lookup( tid );
1949 tl_assert(thr); /* cannot fail - Thread* must already exist */
1950
1951 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
1952}
1953
1954static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
1955{
1956 // only called if the real library call succeeded - so mutex is sane
1957 Thread* thr;
1958 if (SHOW_EVENTS >= 1)
1959 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
1960 (Int)tid, (void*)mutex );
1961 thr = map_threads_maybe_lookup( tid );
1962 tl_assert(thr); /* cannot fail - Thread* must already exist */
1963
1964 // anything we should do here?
1965}
1966
1967
sewardj9f569b72008-11-13 13:33:09 +00001968/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001969/* --------------- events to do with CVs --------------- */
sewardj9f569b72008-11-13 13:33:09 +00001970/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001971
sewardjf98e1c02008-10-25 16:22:41 +00001972/* A mapping from CV to the SO associated with it. When the CV is
1973 signalled/broadcasted upon, we do a 'send' into the SO, and when a
1974 wait on it completes, we do a 'recv' from the SO. This is believed
1975 to give the correct happens-before events arising from CV
sewardjb4112022007-11-09 22:49:28 +00001976 signallings/broadcasts.
1977*/
1978
sewardjf98e1c02008-10-25 16:22:41 +00001979/* pthread_mutex_cond* -> SO* */
1980static WordFM* map_cond_to_SO = NULL;
sewardjb4112022007-11-09 22:49:28 +00001981
sewardjf98e1c02008-10-25 16:22:41 +00001982static void map_cond_to_SO_INIT ( void ) {
1983 if (UNLIKELY(map_cond_to_SO == NULL)) {
sewardj9f569b72008-11-13 13:33:09 +00001984 map_cond_to_SO = VG_(newFM)( HG_(zalloc),
1985 "hg.mctSI.1", HG_(free), NULL );
sewardjf98e1c02008-10-25 16:22:41 +00001986 tl_assert(map_cond_to_SO != NULL);
1987 }
1988}
1989
1990static SO* map_cond_to_SO_lookup_or_alloc ( void* cond ) {
1991 UWord key, val;
1992 map_cond_to_SO_INIT();
1993 if (VG_(lookupFM)( map_cond_to_SO, &key, &val, (UWord)cond )) {
1994 tl_assert(key == (UWord)cond);
1995 return (SO*)val;
1996 } else {
1997 SO* so = libhb_so_alloc();
1998 VG_(addToFM)( map_cond_to_SO, (UWord)cond, (UWord)so );
1999 return so;
2000 }
2001}
2002
2003static void map_cond_to_SO_delete ( void* cond ) {
2004 UWord keyW, valW;
2005 map_cond_to_SO_INIT();
2006 if (VG_(delFromFM)( map_cond_to_SO, &keyW, &valW, (UWord)cond )) {
2007 SO* so = (SO*)valW;
2008 tl_assert(keyW == (UWord)cond);
2009 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00002010 }
2011}
2012
2013static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2014{
sewardjf98e1c02008-10-25 16:22:41 +00002015 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2016 cond to a SO if it is not already so bound, and 'send' on the
2017 SO. This is later used by other thread(s) which successfully
2018 exit from a pthread_cond_wait on the same cv; then they 'recv'
2019 from the SO, thereby acquiring a dependency on this signalling
2020 event. */
sewardjb4112022007-11-09 22:49:28 +00002021 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +00002022 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002023
2024 if (SHOW_EVENTS >= 1)
2025 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2026 (Int)tid, (void*)cond );
2027
sewardjb4112022007-11-09 22:49:28 +00002028 thr = map_threads_maybe_lookup( tid );
2029 tl_assert(thr); /* cannot fail - Thread* must already exist */
2030
2031 // error-if: mutex is bogus
2032 // error-if: mutex is not locked
2033
sewardjf98e1c02008-10-25 16:22:41 +00002034 so = map_cond_to_SO_lookup_or_alloc( cond );
2035 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002036
sewardjf98e1c02008-10-25 16:22:41 +00002037 libhb_so_send( thr->hbthr, so, True/*strong_send*/ );
sewardjb4112022007-11-09 22:49:28 +00002038}
2039
2040/* returns True if it reckons 'mutex' is valid and held by this
2041 thread, else False */
2042static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2043 void* cond, void* mutex )
2044{
2045 Thread* thr;
2046 Lock* lk;
2047 Bool lk_valid = True;
2048
2049 if (SHOW_EVENTS >= 1)
2050 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2051 "(ctid=%d, cond=%p, mutex=%p)\n",
2052 (Int)tid, (void*)cond, (void*)mutex );
2053
sewardjb4112022007-11-09 22:49:28 +00002054 thr = map_threads_maybe_lookup( tid );
2055 tl_assert(thr); /* cannot fail - Thread* must already exist */
2056
2057 lk = map_locks_maybe_lookup( (Addr)mutex );
2058
2059 /* Check for stupid mutex arguments. There are various ways to be
2060 a bozo. Only complain once, though, even if more than one thing
2061 is wrong. */
2062 if (lk == NULL) {
2063 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002064 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002065 thr,
2066 "pthread_cond_{timed}wait called with invalid mutex" );
2067 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002068 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002069 if (lk->kind == LK_rdwr) {
2070 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002071 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002072 thr, "pthread_cond_{timed}wait called with mutex "
2073 "of type pthread_rwlock_t*" );
2074 } else
2075 if (lk->heldBy == NULL) {
2076 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002077 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002078 thr, "pthread_cond_{timed}wait called with un-held mutex");
2079 } else
2080 if (lk->heldBy != NULL
sewardj896f6f92008-08-19 08:38:52 +00002081 && VG_(elemBag)( lk->heldBy, (Word)thr ) == 0) {
sewardjb4112022007-11-09 22:49:28 +00002082 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002083 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002084 thr, "pthread_cond_{timed}wait called with mutex "
2085 "held by a different thread" );
2086 }
2087 }
2088
2089 // error-if: cond is also associated with a different mutex
2090
2091 return lk_valid;
2092}
2093
2094static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
2095 void* cond, void* mutex )
2096{
sewardjf98e1c02008-10-25 16:22:41 +00002097 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2098 the SO for this cond, and 'recv' from it so as to acquire a
2099 dependency edge back to the signaller/broadcaster. */
2100 Thread* thr;
2101 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002102
2103 if (SHOW_EVENTS >= 1)
2104 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
2105 "(ctid=%d, cond=%p, mutex=%p)\n",
2106 (Int)tid, (void*)cond, (void*)mutex );
2107
sewardjb4112022007-11-09 22:49:28 +00002108 thr = map_threads_maybe_lookup( tid );
2109 tl_assert(thr); /* cannot fail - Thread* must already exist */
2110
2111 // error-if: cond is also associated with a different mutex
2112
sewardjf98e1c02008-10-25 16:22:41 +00002113 so = map_cond_to_SO_lookup_or_alloc( cond );
2114 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002115
sewardjf98e1c02008-10-25 16:22:41 +00002116 if (!libhb_so_everSent(so)) {
2117 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2118 it? If this happened it would surely be a bug in the threads
2119 library. Or one of those fabled "spurious wakeups". */
2120 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
2121 "succeeded on"
2122 " without prior pthread_cond_post");
sewardjb4112022007-11-09 22:49:28 +00002123 }
sewardjf98e1c02008-10-25 16:22:41 +00002124
2125 /* anyway, acquire a dependency on it. */
2126 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
2127}
2128
2129static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
2130 void* cond )
2131{
2132 /* Deal with destroy events. The only purpose is to free storage
2133 associated with the CV, so as to avoid any possible resource
2134 leaks. */
2135 if (SHOW_EVENTS >= 1)
2136 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
2137 "(ctid=%d, cond=%p)\n",
2138 (Int)tid, (void*)cond );
2139
2140 map_cond_to_SO_delete( cond );
sewardjb4112022007-11-09 22:49:28 +00002141}
2142
2143
sewardj9f569b72008-11-13 13:33:09 +00002144/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002145/* -------------- events to do with rwlocks -------------- */
sewardj9f569b72008-11-13 13:33:09 +00002146/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002147
2148/* EXPOSITION only */
2149static
2150void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2151{
2152 if (SHOW_EVENTS >= 1)
2153 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2154 (Int)tid, (void*)rwl );
2155 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
sewardjf98e1c02008-10-25 16:22:41 +00002156 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002157 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2158}
2159
2160static
2161void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2162{
2163 Thread* thr;
2164 Lock* lk;
2165 if (SHOW_EVENTS >= 1)
2166 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2167 (Int)tid, (void*)rwl );
2168
2169 thr = map_threads_maybe_lookup( tid );
2170 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00002171 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00002172
2173 lk = map_locks_maybe_lookup( (Addr)rwl );
2174
2175 if (lk == NULL || lk->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00002176 HG_(record_error_Misc)(
2177 thr, "pthread_rwlock_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00002178 }
2179
2180 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00002181 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002182 tl_assert( lk->guestaddr == (Addr)rwl );
2183 if (lk->heldBy) {
2184 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00002185 HG_(record_error_Misc)(
2186 thr, "pthread_rwlock_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00002187 /* remove lock from locksets of all owning threads */
2188 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00002189 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00002190 lk->heldBy = NULL;
2191 lk->heldW = False;
sewardj1c7e8332007-11-29 13:04:03 +00002192 lk->acquired_at = NULL;
sewardjb4112022007-11-09 22:49:28 +00002193 }
2194 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002195 tl_assert( HG_(is_sane_LockN)(lk) );
2196
sewardj1cbc12f2008-11-10 16:16:46 +00002197 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002198 map_locks_delete( lk->guestaddr );
2199 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002200 }
2201
sewardjf98e1c02008-10-25 16:22:41 +00002202 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002203 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2204}
2205
2206static
sewardj789c3c52008-02-25 12:10:07 +00002207void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2208 void* rwl,
2209 Word isW, Word isTryLock )
sewardjb4112022007-11-09 22:49:28 +00002210{
2211 /* Just check the rwl is sane; nothing else to do. */
2212 // 'rwl' may be invalid - not checked by wrapper
2213 Thread* thr;
2214 Lock* lk;
2215 if (SHOW_EVENTS >= 1)
2216 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2217 (Int)tid, (Int)isW, (void*)rwl );
2218
2219 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
sewardj789c3c52008-02-25 12:10:07 +00002220 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
sewardjb4112022007-11-09 22:49:28 +00002221 thr = map_threads_maybe_lookup( tid );
2222 tl_assert(thr); /* cannot fail - Thread* must already exist */
2223
2224 lk = map_locks_maybe_lookup( (Addr)rwl );
2225 if ( lk
2226 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2227 /* Wrong kind of lock. Duh. */
sewardjf98e1c02008-10-25 16:22:41 +00002228 HG_(record_error_Misc)(
2229 thr, "pthread_rwlock_{rd,rw}lock with a "
2230 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002231 }
2232}
2233
2234static
2235void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2236{
2237 // only called if the real library call succeeded - so mutex is sane
2238 Thread* thr;
2239 if (SHOW_EVENTS >= 1)
2240 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2241 (Int)tid, (Int)isW, (void*)rwl );
2242
2243 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2244 thr = map_threads_maybe_lookup( tid );
2245 tl_assert(thr); /* cannot fail - Thread* must already exist */
2246
2247 (isW ? evhH__post_thread_w_acquires_lock
2248 : evhH__post_thread_r_acquires_lock)(
2249 thr,
2250 LK_rdwr, /* if not known, create new lock with this LockKind */
2251 (Addr)rwl
2252 );
2253}
2254
2255static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2256{
2257 // 'rwl' may be invalid - not checked by wrapper
2258 Thread* thr;
2259 if (SHOW_EVENTS >= 1)
2260 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2261 (Int)tid, (void*)rwl );
2262
2263 thr = map_threads_maybe_lookup( tid );
2264 tl_assert(thr); /* cannot fail - Thread* must already exist */
2265
2266 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2267}
2268
2269static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2270{
2271 // only called if the real library call succeeded - so mutex is sane
2272 Thread* thr;
2273 if (SHOW_EVENTS >= 1)
2274 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2275 (Int)tid, (void*)rwl );
2276 thr = map_threads_maybe_lookup( tid );
2277 tl_assert(thr); /* cannot fail - Thread* must already exist */
2278
2279 // anything we should do here?
2280}
2281
2282
sewardj9f569b72008-11-13 13:33:09 +00002283/* ---------------------------------------------------------- */
2284/* -------------- events to do with semaphores -------------- */
2285/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002286
sewardj11e352f2007-11-30 11:11:02 +00002287/* This is similar to but not identical to the handling for condition
sewardjb4112022007-11-09 22:49:28 +00002288 variables. */
2289
sewardjf98e1c02008-10-25 16:22:41 +00002290/* For each semaphore, we maintain a stack of SOs. When a 'post'
2291 operation is done on a semaphore (unlocking, essentially), a new SO
2292 is created for the posting thread, the posting thread does a strong
2293 send to it (which merely installs the posting thread's VC in the
2294 SO), and the SO is pushed on the semaphore's stack.
sewardjb4112022007-11-09 22:49:28 +00002295
2296 Later, when a (probably different) thread completes 'wait' on the
sewardjf98e1c02008-10-25 16:22:41 +00002297 semaphore, we pop a SO off the semaphore's stack (which should be
2298 nonempty), and do a strong recv from it. This mechanism creates
sewardjb4112022007-11-09 22:49:28 +00002299 dependencies between posters and waiters of the semaphore.
2300
sewardjf98e1c02008-10-25 16:22:41 +00002301 It may not be necessary to use a stack - perhaps a bag of SOs would
2302 do. But we do need to keep track of how many unused-up posts have
2303 happened for the semaphore.
sewardjb4112022007-11-09 22:49:28 +00002304
sewardjf98e1c02008-10-25 16:22:41 +00002305 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
sewardjb4112022007-11-09 22:49:28 +00002306 twice on S. T3 cannot complete its waits without both T1 and T2
2307 posting. The above mechanism will ensure that T3 acquires
2308 dependencies on both T1 and T2.
sewardj11e352f2007-11-30 11:11:02 +00002309
sewardjf98e1c02008-10-25 16:22:41 +00002310 When a semaphore is initialised with value N, we do as if we'd
2311 posted N times on the semaphore: basically create N SOs and do a
2312 strong send to all of then. This allows up to N waits on the
2313 semaphore to acquire a dependency on the initialisation point,
2314 which AFAICS is the correct behaviour.
sewardj11e352f2007-11-30 11:11:02 +00002315
2316 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2317 about. We should.
sewardjb4112022007-11-09 22:49:28 +00002318*/
2319
sewardjf98e1c02008-10-25 16:22:41 +00002320/* sem_t* -> XArray* SO* */
2321static WordFM* map_sem_to_SO_stack = NULL;
sewardjb4112022007-11-09 22:49:28 +00002322
sewardjf98e1c02008-10-25 16:22:41 +00002323static void map_sem_to_SO_stack_INIT ( void ) {
2324 if (map_sem_to_SO_stack == NULL) {
2325 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2326 HG_(free), NULL );
2327 tl_assert(map_sem_to_SO_stack != NULL);
sewardjb4112022007-11-09 22:49:28 +00002328 }
2329}
2330
sewardjf98e1c02008-10-25 16:22:41 +00002331static void push_SO_for_sem ( void* sem, SO* so ) {
2332 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002333 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002334 tl_assert(so);
2335 map_sem_to_SO_stack_INIT();
2336 if (VG_(lookupFM)( map_sem_to_SO_stack,
2337 &keyW, (UWord*)&xa, (UWord)sem )) {
2338 tl_assert(keyW == (UWord)sem);
sewardjb4112022007-11-09 22:49:28 +00002339 tl_assert(xa);
sewardjf98e1c02008-10-25 16:22:41 +00002340 VG_(addToXA)( xa, &so );
sewardjb4112022007-11-09 22:49:28 +00002341 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002342 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2343 VG_(addToXA)( xa, &so );
2344 VG_(addToFM)( map_sem_to_SO_stack, (Word)sem, (Word)xa );
sewardjb4112022007-11-09 22:49:28 +00002345 }
2346}
2347
sewardjf98e1c02008-10-25 16:22:41 +00002348static SO* mb_pop_SO_for_sem ( void* sem ) {
2349 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002350 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002351 SO* so;
2352 map_sem_to_SO_stack_INIT();
2353 if (VG_(lookupFM)( map_sem_to_SO_stack,
2354 &keyW, (UWord*)&xa, (UWord)sem )) {
sewardjb4112022007-11-09 22:49:28 +00002355 /* xa is the stack for this semaphore. */
sewardjf98e1c02008-10-25 16:22:41 +00002356 Word sz;
2357 tl_assert(keyW == (UWord)sem);
2358 sz = VG_(sizeXA)( xa );
sewardjb4112022007-11-09 22:49:28 +00002359 tl_assert(sz >= 0);
2360 if (sz == 0)
2361 return NULL; /* odd, the stack is empty */
sewardjf98e1c02008-10-25 16:22:41 +00002362 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2363 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002364 VG_(dropTailXA)( xa, 1 );
sewardjf98e1c02008-10-25 16:22:41 +00002365 return so;
sewardjb4112022007-11-09 22:49:28 +00002366 } else {
2367 /* hmm, that's odd. No stack for this semaphore. */
2368 return NULL;
2369 }
2370}
2371
sewardj11e352f2007-11-30 11:11:02 +00002372static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002373{
sewardjf98e1c02008-10-25 16:22:41 +00002374 UWord keyW, valW;
2375 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002376
sewardjb4112022007-11-09 22:49:28 +00002377 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002378 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002379 (Int)tid, (void*)sem );
2380
sewardjf98e1c02008-10-25 16:22:41 +00002381 map_sem_to_SO_stack_INIT();
sewardjb4112022007-11-09 22:49:28 +00002382
sewardjf98e1c02008-10-25 16:22:41 +00002383 /* Empty out the semaphore's SO stack. This way of doing it is
2384 stupid, but at least it's easy. */
2385 while (1) {
2386 so = mb_pop_SO_for_sem( sem );
2387 if (!so) break;
2388 libhb_so_dealloc(so);
2389 }
2390
2391 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2392 XArray* xa = (XArray*)valW;
2393 tl_assert(keyW == (UWord)sem);
2394 tl_assert(xa);
2395 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2396 VG_(deleteXA)(xa);
2397 }
sewardjb4112022007-11-09 22:49:28 +00002398}
2399
sewardj11e352f2007-11-30 11:11:02 +00002400static
2401void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2402{
sewardjf98e1c02008-10-25 16:22:41 +00002403 SO* so;
2404 Thread* thr;
sewardj11e352f2007-11-30 11:11:02 +00002405
2406 if (SHOW_EVENTS >= 1)
2407 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2408 (Int)tid, (void*)sem, value );
2409
sewardjf98e1c02008-10-25 16:22:41 +00002410 thr = map_threads_maybe_lookup( tid );
2411 tl_assert(thr); /* cannot fail - Thread* must already exist */
sewardj11e352f2007-11-30 11:11:02 +00002412
sewardjf98e1c02008-10-25 16:22:41 +00002413 /* Empty out the semaphore's SO stack. This way of doing it is
2414 stupid, but at least it's easy. */
2415 while (1) {
2416 so = mb_pop_SO_for_sem( sem );
2417 if (!so) break;
2418 libhb_so_dealloc(so);
2419 }
sewardj11e352f2007-11-30 11:11:02 +00002420
sewardjf98e1c02008-10-25 16:22:41 +00002421 /* If we don't do this check, the following while loop runs us out
2422 of memory for stupid initial values of 'value'. */
2423 if (value > 10000) {
2424 HG_(record_error_Misc)(
2425 thr, "sem_init: initial value exceeds 10000; using 10000" );
2426 value = 10000;
2427 }
sewardj11e352f2007-11-30 11:11:02 +00002428
sewardjf98e1c02008-10-25 16:22:41 +00002429 /* Now create 'valid' new SOs for the thread, do a strong send to
2430 each of them, and push them all on the stack. */
2431 for (; value > 0; value--) {
2432 Thr* hbthr = thr->hbthr;
2433 tl_assert(hbthr);
sewardj11e352f2007-11-30 11:11:02 +00002434
sewardjf98e1c02008-10-25 16:22:41 +00002435 so = libhb_so_alloc();
2436 libhb_so_send( hbthr, so, True/*strong send*/ );
2437 push_SO_for_sem( sem, so );
sewardj11e352f2007-11-30 11:11:02 +00002438 }
2439}
2440
2441static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002442{
sewardjf98e1c02008-10-25 16:22:41 +00002443 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2444 it (iow, write our VC into it, then tick ours), and push the SO
2445 on on a stack of SOs associated with 'sem'. This is later used
2446 by other thread(s) which successfully exit from a sem_wait on
2447 the same sem; by doing a strong recv from SOs popped of the
2448 stack, they acquire dependencies on the posting thread
2449 segment(s). */
sewardjb4112022007-11-09 22:49:28 +00002450
sewardjf98e1c02008-10-25 16:22:41 +00002451 Thread* thr;
2452 SO* so;
2453 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002454
2455 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002456 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002457 (Int)tid, (void*)sem );
2458
2459 thr = map_threads_maybe_lookup( tid );
2460 tl_assert(thr); /* cannot fail - Thread* must already exist */
2461
2462 // error-if: sem is bogus
2463
sewardjf98e1c02008-10-25 16:22:41 +00002464 hbthr = thr->hbthr;
2465 tl_assert(hbthr);
sewardjb4112022007-11-09 22:49:28 +00002466
sewardjf98e1c02008-10-25 16:22:41 +00002467 so = libhb_so_alloc();
2468 libhb_so_send( hbthr, so, True/*strong send*/ );
2469 push_SO_for_sem( sem, so );
sewardjb4112022007-11-09 22:49:28 +00002470}
2471
sewardj11e352f2007-11-30 11:11:02 +00002472static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002473{
sewardjf98e1c02008-10-25 16:22:41 +00002474 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2475 the 'sem' from this semaphore's SO-stack, and do a strong recv
2476 from it. This creates a dependency back to one of the post-ers
2477 for the semaphore. */
sewardjb4112022007-11-09 22:49:28 +00002478
sewardjf98e1c02008-10-25 16:22:41 +00002479 Thread* thr;
2480 SO* so;
2481 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002482
2483 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002484 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002485 (Int)tid, (void*)sem );
2486
2487 thr = map_threads_maybe_lookup( tid );
2488 tl_assert(thr); /* cannot fail - Thread* must already exist */
2489
2490 // error-if: sem is bogus
2491
sewardjf98e1c02008-10-25 16:22:41 +00002492 so = mb_pop_SO_for_sem( sem );
sewardjb4112022007-11-09 22:49:28 +00002493
sewardjf98e1c02008-10-25 16:22:41 +00002494 if (so) {
2495 hbthr = thr->hbthr;
2496 tl_assert(hbthr);
2497
2498 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2499 libhb_so_dealloc(so);
2500 } else {
2501 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2502 If this happened it would surely be a bug in the threads
2503 library. */
2504 HG_(record_error_Misc)(
2505 thr, "Bug in libpthread: sem_wait succeeded on"
2506 " semaphore without prior sem_post");
sewardjb4112022007-11-09 22:49:28 +00002507 }
2508}
2509
2510
sewardj9f569b72008-11-13 13:33:09 +00002511/* -------------------------------------------------------- */
2512/* -------------- events to do with barriers -------------- */
2513/* -------------------------------------------------------- */
2514
2515typedef
2516 struct {
2517 Bool initted; /* has it yet been initted by guest? */
2518 UWord size; /* declared size */
2519 XArray* waiting; /* XA of Thread*. # present is 0 .. .size */
2520 }
2521 Bar;
2522
2523static Bar* new_Bar ( void ) {
2524 Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
2525 tl_assert(bar);
2526 /* all fields are zero */
2527 tl_assert(bar->initted == False);
2528 return bar;
2529}
2530
2531static void delete_Bar ( Bar* bar ) {
2532 tl_assert(bar);
2533 if (bar->waiting)
2534 VG_(deleteXA)(bar->waiting);
2535 HG_(free)(bar);
2536}
2537
2538/* A mapping which stores auxiliary data for barriers. */
2539
2540/* pthread_barrier_t* -> Bar* */
2541static WordFM* map_barrier_to_Bar = NULL;
2542
2543static void map_barrier_to_Bar_INIT ( void ) {
2544 if (UNLIKELY(map_barrier_to_Bar == NULL)) {
2545 map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
2546 "hg.mbtBI.1", HG_(free), NULL );
2547 tl_assert(map_barrier_to_Bar != NULL);
2548 }
2549}
2550
2551static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
2552 UWord key, val;
2553 map_barrier_to_Bar_INIT();
2554 if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
2555 tl_assert(key == (UWord)barrier);
2556 return (Bar*)val;
2557 } else {
2558 Bar* bar = new_Bar();
2559 VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
2560 return bar;
2561 }
2562}
2563
2564static void map_barrier_to_Bar_delete ( void* barrier ) {
2565 UWord keyW, valW;
2566 map_barrier_to_Bar_INIT();
2567 if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
2568 Bar* bar = (Bar*)valW;
2569 tl_assert(keyW == (UWord)barrier);
2570 delete_Bar(bar);
2571 }
2572}
2573
2574
2575static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
2576 void* barrier,
2577 UWord count )
2578{
2579 Thread* thr;
2580 Bar* bar;
2581
2582 if (SHOW_EVENTS >= 1)
2583 VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
2584 "(tid=%d, barrier=%p, count=%lu)\n",
2585 (Int)tid, (void*)barrier, count );
2586
2587 thr = map_threads_maybe_lookup( tid );
2588 tl_assert(thr); /* cannot fail - Thread* must already exist */
2589
2590 if (count == 0) {
2591 HG_(record_error_Misc)(
2592 thr, "pthread_barrier_init: 'count' argument is zero"
2593 );
2594 }
2595
2596 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2597 tl_assert(bar);
2598
2599 if (bar->initted) {
2600 HG_(record_error_Misc)(
2601 thr, "pthread_barrier_init: barrier is already initialised"
2602 );
2603 }
2604
2605 if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2606 tl_assert(bar->initted);
2607 HG_(record_error_Misc)(
sewardj553655c2008-11-14 19:41:19 +00002608 thr, "pthread_barrier_init: threads are waiting at barrier"
sewardj9f569b72008-11-13 13:33:09 +00002609 );
2610 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2611 }
2612 if (!bar->waiting) {
2613 bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
2614 sizeof(Thread*) );
2615 }
2616
2617 tl_assert(bar->waiting);
2618 tl_assert(VG_(sizeXA)(bar->waiting) == 0);
2619 bar->initted = True;
2620 bar->size = count;
2621}
2622
2623
2624static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
2625 void* barrier )
2626{
sewardj553655c2008-11-14 19:41:19 +00002627 Thread* thr;
2628 Bar* bar;
2629
sewardj9f569b72008-11-13 13:33:09 +00002630 /* Deal with destroy events. The only purpose is to free storage
2631 associated with the barrier, so as to avoid any possible
2632 resource leaks. */
2633 if (SHOW_EVENTS >= 1)
2634 VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
2635 "(tid=%d, barrier=%p)\n",
2636 (Int)tid, (void*)barrier );
2637
sewardj553655c2008-11-14 19:41:19 +00002638 thr = map_threads_maybe_lookup( tid );
2639 tl_assert(thr); /* cannot fail - Thread* must already exist */
2640
2641 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2642 tl_assert(bar);
2643
2644 if (!bar->initted) {
2645 HG_(record_error_Misc)(
2646 thr, "pthread_barrier_destroy: barrier was never initialised"
2647 );
2648 }
2649
2650 if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2651 HG_(record_error_Misc)(
2652 thr, "pthread_barrier_destroy: threads are waiting at barrier"
2653 );
2654 }
2655
sewardj9f569b72008-11-13 13:33:09 +00002656 /* Maybe we shouldn't do this; just let it persist, so that when it
2657 is reinitialised we don't need to do any dynamic memory
2658 allocation? The downside is a potentially unlimited space leak,
2659 if the client creates (in turn) a large number of barriers all
2660 at different locations. Note that if we do later move to the
2661 don't-delete-it scheme, we need to mark the barrier as
2662 uninitialised again since otherwise a later _init call will
sewardj553655c2008-11-14 19:41:19 +00002663 elicit a duplicate-init error. */
sewardj9f569b72008-11-13 13:33:09 +00002664 map_barrier_to_Bar_delete( barrier );
2665}
2666
2667
2668static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
2669 void* barrier )
2670{
sewardj1c466b72008-11-19 11:52:14 +00002671 /* This function gets called after a client thread calls
2672 pthread_barrier_wait but before it arrives at the real
2673 pthread_barrier_wait.
2674
2675 Why is the following correct? It's a bit subtle.
2676
2677 If this is not the last thread arriving at the barrier, we simply
2678 note its presence and return. Because valgrind (at least as of
2679 Nov 08) is single threaded, we are guaranteed safe from any race
2680 conditions when in this function -- no other client threads are
2681 running.
2682
2683 If this is the last thread, then we are again the only running
2684 thread. All the other threads will have either arrived at the
2685 real pthread_barrier_wait or are on their way to it, but in any
2686 case are guaranteed not to be able to move past it, because this
2687 thread is currently in this function and so has not yet arrived
2688 at the real pthread_barrier_wait. That means that:
2689
2690 1. While we are in this function, none of the other threads
2691 waiting at the barrier can move past it.
2692
2693 2. When this function returns (and simulated execution resumes),
2694 this thread and all other waiting threads will be able to move
2695 past the real barrier.
2696
2697 Because of this, it is now safe to update the vector clocks of
2698 all threads, to represent the fact that they all arrived at the
2699 barrier and have all moved on. There is no danger of any
2700 complications to do with some threads leaving the barrier and
2701 racing back round to the front, whilst others are still leaving
2702 (which is the primary source of complication in correct handling/
2703 implementation of barriers). That can't happen because we update
2704 here our data structures so as to indicate that the threads have
2705 passed the barrier, even though, as per (2) above, they are
2706 guaranteed not to pass the barrier until we return.
2707
2708 This relies crucially on Valgrind being single threaded. If that
2709 changes, this will need to be reconsidered.
2710 */
sewardj9f569b72008-11-13 13:33:09 +00002711 Thread* thr;
2712 Bar* bar;
2713 SO* so;
2714 UWord present, i;
2715
2716 if (SHOW_EVENTS >= 1)
2717 VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
2718 "(tid=%d, barrier=%p)\n",
2719 (Int)tid, (void*)barrier );
2720
2721 thr = map_threads_maybe_lookup( tid );
2722 tl_assert(thr); /* cannot fail - Thread* must already exist */
2723
2724 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2725 tl_assert(bar);
2726
2727 if (!bar->initted) {
2728 HG_(record_error_Misc)(
2729 thr, "pthread_barrier_wait: barrier is uninitialised"
2730 );
2731 return; /* client is broken .. avoid assertions below */
2732 }
2733
2734 /* guaranteed by _INIT_PRE above */
2735 tl_assert(bar->size > 0);
2736 tl_assert(bar->waiting);
2737
2738 VG_(addToXA)( bar->waiting, &thr );
2739
2740 /* guaranteed by this function */
2741 present = VG_(sizeXA)(bar->waiting);
2742 tl_assert(present > 0 && present <= bar->size);
2743
2744 if (present < bar->size)
2745 return;
2746
sewardj553655c2008-11-14 19:41:19 +00002747 /* All the threads have arrived. Now do the Interesting Bit. Get
sewardj9f569b72008-11-13 13:33:09 +00002748 a new synchronisation object and do a weak send to it from all
2749 the participating threads. This makes its vector clocks be the
sewardj553655c2008-11-14 19:41:19 +00002750 join of all the individual threads' vector clocks. Then do a
sewardj9f569b72008-11-13 13:33:09 +00002751 strong receive from it back to all threads, so that their VCs
2752 are a copy of it (hence are all equal to the join of their
2753 original VCs.) */
2754 so = libhb_so_alloc();
2755
2756 /* XXX check ->waiting has no duplicates */
2757
2758 tl_assert(bar->waiting);
2759 tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
2760
2761 /* compute the join ... */
2762 for (i = 0; i < bar->size; i++) {
2763 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2764 Thr* hbthr = t->hbthr;
2765 libhb_so_send( hbthr, so, False/*weak send*/ );
2766 }
2767 /* ... and distribute to all threads */
2768 for (i = 0; i < bar->size; i++) {
2769 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2770 Thr* hbthr = t->hbthr;
2771 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2772 }
2773
2774 /* finally, we must empty out the waiting vector */
sewardj1c466b72008-11-19 11:52:14 +00002775 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2776
2777 /* and we don't need this any more. Perhaps a stack-allocated
2778 SO would be better? */
2779 libhb_so_dealloc(so);
sewardj9f569b72008-11-13 13:33:09 +00002780}
2781
2782
sewardjb4112022007-11-09 22:49:28 +00002783/*--------------------------------------------------------------*/
2784/*--- Lock acquisition order monitoring ---*/
2785/*--------------------------------------------------------------*/
2786
2787/* FIXME: here are some optimisations still to do in
2788 laog__pre_thread_acquires_lock.
2789
2790 The graph is structured so that if L1 --*--> L2 then L1 must be
2791 acquired before L2.
2792
2793 The common case is that some thread T holds (eg) L1 L2 and L3 and
2794 is repeatedly acquiring and releasing Ln, and there is no ordering
2795 error in what it is doing. Hence it repeatly:
2796
2797 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
2798 produces the answer No (because there is no error).
2799
2800 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
2801 (because they already got added the first time T acquired Ln).
2802
2803 Hence cache these two events:
2804
2805 (1) Cache result of the query from last time. Invalidate the cache
2806 any time any edges are added to or deleted from laog.
2807
2808 (2) Cache these add-edge requests and ignore them if said edges
2809 have already been added to laog. Invalidate the cache any time
2810 any edges are deleted from laog.
2811*/
2812
2813typedef
2814 struct {
2815 WordSetID inns; /* in univ_laog */
2816 WordSetID outs; /* in univ_laog */
2817 }
2818 LAOGLinks;
2819
2820/* lock order acquisition graph */
2821static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
2822
2823/* EXPOSITION ONLY: for each edge in 'laog', record the two places
2824 where that edge was created, so that we can show the user later if
2825 we need to. */
2826typedef
2827 struct {
2828 Addr src_ga; /* Lock guest addresses for */
2829 Addr dst_ga; /* src/dst of the edge */
2830 ExeContext* src_ec; /* And corresponding places where that */
2831 ExeContext* dst_ec; /* ordering was established */
2832 }
2833 LAOGLinkExposition;
2834
sewardj250ec2e2008-02-15 22:02:30 +00002835static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
sewardjb4112022007-11-09 22:49:28 +00002836 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
2837 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
2838 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
2839 if (llx1->src_ga < llx2->src_ga) return -1;
2840 if (llx1->src_ga > llx2->src_ga) return 1;
2841 if (llx1->dst_ga < llx2->dst_ga) return -1;
2842 if (llx1->dst_ga > llx2->dst_ga) return 1;
2843 return 0;
2844}
2845
2846static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
2847/* end EXPOSITION ONLY */
2848
2849
sewardja65db102009-01-26 10:45:16 +00002850__attribute__((noinline))
2851static void laog__init ( void )
2852{
2853 tl_assert(!laog);
2854 tl_assert(!laog_exposition);
2855
2856 laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
2857 HG_(free), NULL/*unboxedcmp*/ );
2858
2859 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
2860 cmp_LAOGLinkExposition );
2861 tl_assert(laog);
2862 tl_assert(laog_exposition);
2863}
2864
sewardjb4112022007-11-09 22:49:28 +00002865static void laog__show ( Char* who ) {
2866 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00002867 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00002868 Lock* me;
2869 LAOGLinks* links;
2870 VG_(printf)("laog (requested by %s) {\n", who);
sewardj896f6f92008-08-19 08:38:52 +00002871 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00002872 me = NULL;
2873 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00002874 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00002875 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00002876 tl_assert(me);
2877 tl_assert(links);
2878 VG_(printf)(" node %p:\n", me);
2879 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
2880 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00002881 VG_(printf)(" inn %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00002882 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
2883 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00002884 VG_(printf)(" out %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00002885 me = NULL;
2886 links = NULL;
2887 }
sewardj896f6f92008-08-19 08:38:52 +00002888 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00002889 VG_(printf)("}\n");
2890}
2891
2892__attribute__((noinline))
2893static void laog__add_edge ( Lock* src, Lock* dst ) {
2894 Word keyW;
2895 LAOGLinks* links;
2896 Bool presentF, presentR;
2897 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
2898
2899 /* Take the opportunity to sanity check the graph. Record in
2900 presentF if there is already a src->dst mapping in this node's
2901 forwards links, and presentR if there is already a src->dst
2902 mapping in this node's backwards links. They should agree!
2903 Also, we need to know whether the edge was already present so as
2904 to decide whether or not to update the link details mapping. We
2905 can compute presentF and presentR essentially for free, so may
2906 as well do this always. */
2907 presentF = presentR = False;
2908
2909 /* Update the out edges for src */
2910 keyW = 0;
2911 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00002912 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00002913 WordSetID outs_new;
2914 tl_assert(links);
2915 tl_assert(keyW == (Word)src);
2916 outs_new = HG_(addToWS)( univ_laog, links->outs, (Word)dst );
2917 presentF = outs_new == links->outs;
2918 links->outs = outs_new;
2919 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002920 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00002921 links->inns = HG_(emptyWS)( univ_laog );
2922 links->outs = HG_(singletonWS)( univ_laog, (Word)dst );
sewardj896f6f92008-08-19 08:38:52 +00002923 VG_(addToFM)( laog, (Word)src, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00002924 }
2925 /* Update the in edges for dst */
2926 keyW = 0;
2927 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00002928 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00002929 WordSetID inns_new;
2930 tl_assert(links);
2931 tl_assert(keyW == (Word)dst);
2932 inns_new = HG_(addToWS)( univ_laog, links->inns, (Word)src );
2933 presentR = inns_new == links->inns;
2934 links->inns = inns_new;
2935 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002936 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00002937 links->inns = HG_(singletonWS)( univ_laog, (Word)src );
2938 links->outs = HG_(emptyWS)( univ_laog );
sewardj896f6f92008-08-19 08:38:52 +00002939 VG_(addToFM)( laog, (Word)dst, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00002940 }
2941
2942 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
2943
2944 if (!presentF && src->acquired_at && dst->acquired_at) {
2945 LAOGLinkExposition expo;
2946 /* If this edge is entering the graph, and we have acquired_at
2947 information for both src and dst, record those acquisition
2948 points. Hence, if there is later a violation of this
2949 ordering, we can show the user the two places in which the
2950 required src-dst ordering was previously established. */
barta0b6b2c2008-07-07 06:49:24 +00002951 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
sewardjb4112022007-11-09 22:49:28 +00002952 src->guestaddr, dst->guestaddr);
2953 expo.src_ga = src->guestaddr;
2954 expo.dst_ga = dst->guestaddr;
2955 expo.src_ec = NULL;
2956 expo.dst_ec = NULL;
2957 tl_assert(laog_exposition);
sewardj896f6f92008-08-19 08:38:52 +00002958 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (Word)&expo )) {
sewardjb4112022007-11-09 22:49:28 +00002959 /* we already have it; do nothing */
2960 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002961 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
2962 sizeof(LAOGLinkExposition));
sewardjb4112022007-11-09 22:49:28 +00002963 expo2->src_ga = src->guestaddr;
2964 expo2->dst_ga = dst->guestaddr;
2965 expo2->src_ec = src->acquired_at;
2966 expo2->dst_ec = dst->acquired_at;
sewardj896f6f92008-08-19 08:38:52 +00002967 VG_(addToFM)( laog_exposition, (Word)expo2, (Word)NULL );
sewardjb4112022007-11-09 22:49:28 +00002968 }
2969 }
2970}
2971
2972__attribute__((noinline))
2973static void laog__del_edge ( Lock* src, Lock* dst ) {
2974 Word keyW;
2975 LAOGLinks* links;
2976 if (0) VG_(printf)("laog__del_edge %p %p\n", src, dst);
2977 /* Update the out edges for src */
2978 keyW = 0;
2979 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00002980 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00002981 tl_assert(links);
2982 tl_assert(keyW == (Word)src);
2983 links->outs = HG_(delFromWS)( univ_laog, links->outs, (Word)dst );
2984 }
2985 /* Update the in edges for dst */
2986 keyW = 0;
2987 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00002988 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00002989 tl_assert(links);
2990 tl_assert(keyW == (Word)dst);
2991 links->inns = HG_(delFromWS)( univ_laog, links->inns, (Word)src );
2992 }
2993}
2994
2995__attribute__((noinline))
2996static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
2997 Word keyW;
2998 LAOGLinks* links;
2999 keyW = 0;
3000 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003001 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003002 tl_assert(links);
3003 tl_assert(keyW == (Word)lk);
3004 return links->outs;
3005 } else {
3006 return HG_(emptyWS)( univ_laog );
3007 }
3008}
3009
3010__attribute__((noinline))
3011static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
3012 Word keyW;
3013 LAOGLinks* links;
3014 keyW = 0;
3015 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003016 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003017 tl_assert(links);
3018 tl_assert(keyW == (Word)lk);
3019 return links->inns;
3020 } else {
3021 return HG_(emptyWS)( univ_laog );
3022 }
3023}
3024
3025__attribute__((noinline))
3026static void laog__sanity_check ( Char* who ) {
3027 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003028 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003029 Lock* me;
3030 LAOGLinks* links;
sewardja65db102009-01-26 10:45:16 +00003031 if (UNLIKELY(!laog || !laog_exposition))
3032 laog__init();
sewardj896f6f92008-08-19 08:38:52 +00003033 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003034 me = NULL;
3035 links = NULL;
3036 if (0) VG_(printf)("laog sanity check\n");
sewardj896f6f92008-08-19 08:38:52 +00003037 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003038 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003039 tl_assert(me);
3040 tl_assert(links);
3041 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3042 for (i = 0; i < ws_size; i++) {
3043 if ( ! HG_(elemWS)( univ_laog,
3044 laog__succs( (Lock*)ws_words[i] ),
3045 (Word)me ))
3046 goto bad;
3047 }
3048 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3049 for (i = 0; i < ws_size; i++) {
3050 if ( ! HG_(elemWS)( univ_laog,
3051 laog__preds( (Lock*)ws_words[i] ),
3052 (Word)me ))
3053 goto bad;
3054 }
3055 me = NULL;
3056 links = NULL;
3057 }
sewardj896f6f92008-08-19 08:38:52 +00003058 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003059 return;
3060
3061 bad:
3062 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3063 laog__show(who);
3064 tl_assert(0);
3065}
3066
3067/* If there is a path in laog from 'src' to any of the elements in
3068 'dst', return an arbitrarily chosen element of 'dst' reachable from
3069 'src'. If no path exist from 'src' to any element in 'dst', return
3070 NULL. */
3071__attribute__((noinline))
3072static
3073Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3074{
3075 Lock* ret;
3076 Word i, ssz;
3077 XArray* stack; /* of Lock* */
3078 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
3079 Lock* here;
3080 WordSetID succs;
3081 Word succs_size;
sewardj250ec2e2008-02-15 22:02:30 +00003082 UWord* succs_words;
sewardjb4112022007-11-09 22:49:28 +00003083 //laog__sanity_check();
3084
3085 /* If the destination set is empty, we can never get there from
3086 'src' :-), so don't bother to try */
3087 if (HG_(isEmptyWS)( univ_lsets, dsts ))
3088 return NULL;
3089
3090 ret = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00003091 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3092 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00003093
3094 (void) VG_(addToXA)( stack, &src );
3095
3096 while (True) {
3097
3098 ssz = VG_(sizeXA)( stack );
3099
3100 if (ssz == 0) { ret = NULL; break; }
3101
3102 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3103 VG_(dropTailXA)( stack, 1 );
3104
3105 if (HG_(elemWS)( univ_lsets, dsts, (Word)here )) { ret = here; break; }
3106
sewardj896f6f92008-08-19 08:38:52 +00003107 if (VG_(lookupFM)( visited, NULL, NULL, (Word)here ))
sewardjb4112022007-11-09 22:49:28 +00003108 continue;
3109
sewardj896f6f92008-08-19 08:38:52 +00003110 VG_(addToFM)( visited, (Word)here, 0 );
sewardjb4112022007-11-09 22:49:28 +00003111
3112 succs = laog__succs( here );
3113 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3114 for (i = 0; i < succs_size; i++)
3115 (void) VG_(addToXA)( stack, &succs_words[i] );
3116 }
3117
sewardj896f6f92008-08-19 08:38:52 +00003118 VG_(deleteFM)( visited, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003119 VG_(deleteXA)( stack );
3120 return ret;
3121}
3122
3123
3124/* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3125 between 'lk' and the locks already held by 'thr' and issue a
3126 complaint if so. Also, update the ordering graph appropriately.
3127*/
3128__attribute__((noinline))
3129static void laog__pre_thread_acquires_lock (
3130 Thread* thr, /* NB: BEFORE lock is added */
3131 Lock* lk
3132 )
3133{
sewardj250ec2e2008-02-15 22:02:30 +00003134 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +00003135 Word ls_size, i;
3136 Lock* other;
3137
3138 /* It may be that 'thr' already holds 'lk' and is recursively
3139 relocking in. In this case we just ignore the call. */
3140 /* NB: univ_lsets really is correct here */
3141 if (HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lk ))
3142 return;
3143
sewardja65db102009-01-26 10:45:16 +00003144 if (UNLIKELY(!laog || !laog_exposition))
3145 laog__init();
sewardjb4112022007-11-09 22:49:28 +00003146
3147 /* First, the check. Complain if there is any path in laog from lk
3148 to any of the locks already held by thr, since if any such path
3149 existed, it would mean that previously lk was acquired before
3150 (rather than after, as we are doing here) at least one of those
3151 locks.
3152 */
3153 other = laog__do_dfs_from_to(lk, thr->locksetA);
3154 if (other) {
3155 LAOGLinkExposition key, *found;
3156 /* So we managed to find a path lk --*--> other in the graph,
3157 which implies that 'lk' should have been acquired before
3158 'other' but is in fact being acquired afterwards. We present
3159 the lk/other arguments to record_error_LockOrder in the order
3160 in which they should have been acquired. */
3161 /* Go look in the laog_exposition mapping, to find the allocation
3162 points for this edge, so we can show the user. */
3163 key.src_ga = lk->guestaddr;
3164 key.dst_ga = other->guestaddr;
3165 key.src_ec = NULL;
3166 key.dst_ec = NULL;
3167 found = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003168 if (VG_(lookupFM)( laog_exposition,
sewardjb5f29642007-11-16 12:02:43 +00003169 (Word*)&found, NULL, (Word)&key )) {
sewardjb4112022007-11-09 22:49:28 +00003170 tl_assert(found != &key);
3171 tl_assert(found->src_ga == key.src_ga);
3172 tl_assert(found->dst_ga == key.dst_ga);
3173 tl_assert(found->src_ec);
3174 tl_assert(found->dst_ec);
sewardjf98e1c02008-10-25 16:22:41 +00003175 HG_(record_error_LockOrder)(
3176 thr, lk->guestaddr, other->guestaddr,
3177 found->src_ec, found->dst_ec );
sewardjb4112022007-11-09 22:49:28 +00003178 } else {
3179 /* Hmm. This can't happen (can it?) */
sewardjf98e1c02008-10-25 16:22:41 +00003180 HG_(record_error_LockOrder)(
3181 thr, lk->guestaddr, other->guestaddr,
3182 NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003183 }
3184 }
3185
3186 /* Second, add to laog the pairs
3187 (old, lk) | old <- locks already held by thr
3188 Since both old and lk are currently held by thr, their acquired_at
3189 fields must be non-NULL.
3190 */
3191 tl_assert(lk->acquired_at);
3192 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3193 for (i = 0; i < ls_size; i++) {
3194 Lock* old = (Lock*)ls_words[i];
3195 tl_assert(old->acquired_at);
3196 laog__add_edge( old, lk );
3197 }
3198
3199 /* Why "except_Locks" ? We're here because a lock is being
3200 acquired by a thread, and we're in an inconsistent state here.
3201 See the call points in evhH__post_thread_{r,w}_acquires_lock.
3202 When called in this inconsistent state, locks__sanity_check duly
3203 barfs. */
sewardjf98e1c02008-10-25 16:22:41 +00003204 if (HG_(clo_sanity_flags) & SCE_LAOG)
sewardjb4112022007-11-09 22:49:28 +00003205 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
3206}
3207
3208
3209/* Delete from 'laog' any pair mentioning a lock in locksToDelete */
3210
3211__attribute__((noinline))
3212static void laog__handle_one_lock_deletion ( Lock* lk )
3213{
3214 WordSetID preds, succs;
3215 Word preds_size, succs_size, i, j;
sewardj250ec2e2008-02-15 22:02:30 +00003216 UWord *preds_words, *succs_words;
sewardjb4112022007-11-09 22:49:28 +00003217
sewardja65db102009-01-26 10:45:16 +00003218 if (UNLIKELY(!laog || !laog_exposition))
3219 laog__init();
3220
sewardjb4112022007-11-09 22:49:28 +00003221 preds = laog__preds( lk );
3222 succs = laog__succs( lk );
3223
3224 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
3225 for (i = 0; i < preds_size; i++)
3226 laog__del_edge( (Lock*)preds_words[i], lk );
3227
3228 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3229 for (j = 0; j < succs_size; j++)
3230 laog__del_edge( lk, (Lock*)succs_words[j] );
3231
3232 for (i = 0; i < preds_size; i++) {
3233 for (j = 0; j < succs_size; j++) {
3234 if (preds_words[i] != succs_words[j]) {
3235 /* This can pass unlocked locks to laog__add_edge, since
3236 we're deleting stuff. So their acquired_at fields may
3237 be NULL. */
3238 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
3239 }
3240 }
3241 }
3242}
3243
sewardj1cbc12f2008-11-10 16:16:46 +00003244//__attribute__((noinline))
3245//static void laog__handle_lock_deletions (
3246// WordSetID /* in univ_laog */ locksToDelete
3247// )
3248//{
3249// Word i, ws_size;
3250// UWord* ws_words;
3251//
sewardja65db102009-01-26 10:45:16 +00003252// if (UNLIKELY(!laog || !laog_exposition))
3253// laog__init();
sewardj1cbc12f2008-11-10 16:16:46 +00003254//
3255// HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
3256// for (i = 0; i < ws_size; i++)
3257// laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
3258//
3259// if (HG_(clo_sanity_flags) & SCE_LAOG)
3260// all__sanity_check("laog__handle_lock_deletions-post");
3261//}
sewardjb4112022007-11-09 22:49:28 +00003262
3263
3264/*--------------------------------------------------------------*/
3265/*--- Malloc/free replacements ---*/
3266/*--------------------------------------------------------------*/
3267
3268typedef
3269 struct {
3270 void* next; /* required by m_hashtable */
3271 Addr payload; /* ptr to actual block */
3272 SizeT szB; /* size requested */
3273 ExeContext* where; /* where it was allocated */
3274 Thread* thr; /* allocating thread */
3275 }
3276 MallocMeta;
3277
3278/* A hash table of MallocMetas, used to track malloc'd blocks
3279 (obviously). */
3280static VgHashTable hg_mallocmeta_table = NULL;
3281
3282
3283static MallocMeta* new_MallocMeta ( void ) {
sewardjf98e1c02008-10-25 16:22:41 +00003284 MallocMeta* md = HG_(zalloc)( "hg.new_MallocMeta.1", sizeof(MallocMeta) );
sewardjb4112022007-11-09 22:49:28 +00003285 tl_assert(md);
3286 return md;
3287}
3288static void delete_MallocMeta ( MallocMeta* md ) {
sewardjf98e1c02008-10-25 16:22:41 +00003289 HG_(free)(md);
sewardjb4112022007-11-09 22:49:28 +00003290}
3291
3292
3293/* Allocate a client block and set up the metadata for it. */
3294
3295static
3296void* handle_alloc ( ThreadId tid,
3297 SizeT szB, SizeT alignB, Bool is_zeroed )
3298{
3299 Addr p;
3300 MallocMeta* md;
3301
3302 tl_assert( ((SSizeT)szB) >= 0 );
3303 p = (Addr)VG_(cli_malloc)(alignB, szB);
3304 if (!p) {
3305 return NULL;
3306 }
3307 if (is_zeroed)
3308 VG_(memset)((void*)p, 0, szB);
3309
3310 /* Note that map_threads_lookup must succeed (cannot assert), since
3311 memory can only be allocated by currently alive threads, hence
3312 they must have an entry in map_threads. */
3313 md = new_MallocMeta();
3314 md->payload = p;
3315 md->szB = szB;
3316 md->where = VG_(record_ExeContext)( tid, 0 );
3317 md->thr = map_threads_lookup( tid );
3318
3319 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
3320
3321 /* Tell the lower level memory wranglers. */
3322 evh__new_mem_heap( p, szB, is_zeroed );
3323
3324 return (void*)p;
3325}
3326
3327/* Re the checks for less-than-zero (also in hg_cli__realloc below):
3328 Cast to a signed type to catch any unexpectedly negative args.
3329 We're assuming here that the size asked for is not greater than
3330 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
3331 platforms). */
3332static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
3333 if (((SSizeT)n) < 0) return NULL;
3334 return handle_alloc ( tid, n, VG_(clo_alignment),
3335 /*is_zeroed*/False );
3336}
3337static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
3338 if (((SSizeT)n) < 0) return NULL;
3339 return handle_alloc ( tid, n, VG_(clo_alignment),
3340 /*is_zeroed*/False );
3341}
3342static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
3343 if (((SSizeT)n) < 0) return NULL;
3344 return handle_alloc ( tid, n, VG_(clo_alignment),
3345 /*is_zeroed*/False );
3346}
3347static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
3348 if (((SSizeT)n) < 0) return NULL;
3349 return handle_alloc ( tid, n, align,
3350 /*is_zeroed*/False );
3351}
3352static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
3353 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
3354 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
3355 /*is_zeroed*/True );
3356}
3357
3358
3359/* Free a client block, including getting rid of the relevant
3360 metadata. */
3361
3362static void handle_free ( ThreadId tid, void* p )
3363{
3364 MallocMeta *md, *old_md;
3365 SizeT szB;
3366
3367 /* First see if we can find the metadata for 'p'. */
3368 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3369 if (!md)
3370 return; /* apparently freeing a bogus address. Oh well. */
3371
3372 tl_assert(md->payload == (Addr)p);
3373 szB = md->szB;
3374
3375 /* Nuke the metadata block */
3376 old_md = (MallocMeta*)
3377 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
3378 tl_assert(old_md); /* it must be present - we just found it */
3379 tl_assert(old_md == md);
3380 tl_assert(old_md->payload == (Addr)p);
3381
3382 VG_(cli_free)((void*)old_md->payload);
3383 delete_MallocMeta(old_md);
3384
3385 /* Tell the lower level memory wranglers. */
3386 evh__die_mem_heap( (Addr)p, szB );
3387}
3388
3389static void hg_cli__free ( ThreadId tid, void* p ) {
3390 handle_free(tid, p);
3391}
3392static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
3393 handle_free(tid, p);
3394}
3395static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
3396 handle_free(tid, p);
3397}
3398
3399
3400static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
3401{
3402 MallocMeta *md, *md_new, *md_tmp;
3403 SizeT i;
3404
3405 Addr payload = (Addr)payloadV;
3406
3407 if (((SSizeT)new_size) < 0) return NULL;
3408
3409 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
3410 if (!md)
3411 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
3412
3413 tl_assert(md->payload == payload);
3414
3415 if (md->szB == new_size) {
3416 /* size unchanged */
3417 md->where = VG_(record_ExeContext)(tid, 0);
3418 return payloadV;
3419 }
3420
3421 if (md->szB > new_size) {
3422 /* new size is smaller */
3423 md->szB = new_size;
3424 md->where = VG_(record_ExeContext)(tid, 0);
3425 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
3426 return payloadV;
3427 }
3428
3429 /* else */ {
3430 /* new size is bigger */
3431 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
3432
3433 /* First half kept and copied, second half new */
3434 // FIXME: shouldn't we use a copier which implements the
3435 // memory state machine?
3436 shadow_mem_copy_range( payload, p_new, md->szB );
3437 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
sewardjf98e1c02008-10-25 16:22:41 +00003438 /*inited*/False );
sewardjb4112022007-11-09 22:49:28 +00003439 /* FIXME: can anything funny happen here? specifically, if the
3440 old range contained a lock, then die_mem_heap will complain.
3441 Is that the correct behaviour? Not sure. */
3442 evh__die_mem_heap( payload, md->szB );
3443
3444 /* Copy from old to new */
3445 for (i = 0; i < md->szB; i++)
3446 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
3447
3448 /* Because the metadata hash table is index by payload address,
3449 we have to get rid of the old hash table entry and make a new
3450 one. We can't just modify the existing metadata in place,
3451 because then it would (almost certainly) be in the wrong hash
3452 chain. */
3453 md_new = new_MallocMeta();
3454 *md_new = *md;
3455
3456 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
3457 tl_assert(md_tmp);
3458 tl_assert(md_tmp == md);
3459
3460 VG_(cli_free)((void*)md->payload);
3461 delete_MallocMeta(md);
3462
3463 /* Update fields */
3464 md_new->where = VG_(record_ExeContext)( tid, 0 );
3465 md_new->szB = new_size;
3466 md_new->payload = p_new;
3467 md_new->thr = map_threads_lookup( tid );
3468
3469 /* and add */
3470 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
3471
3472 return (void*)p_new;
3473 }
3474}
3475
njn8b140de2009-02-17 04:31:18 +00003476static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
3477{
3478 MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3479
3480 // There may be slop, but pretend there isn't because only the asked-for
3481 // area will have been shadowed properly.
3482 return ( md ? md->szB : 0 );
3483}
3484
sewardjb4112022007-11-09 22:49:28 +00003485
3486/*--------------------------------------------------------------*/
3487/*--- Instrumentation ---*/
3488/*--------------------------------------------------------------*/
3489
3490static void instrument_mem_access ( IRSB* bbOut,
3491 IRExpr* addr,
3492 Int szB,
3493 Bool isStore,
3494 Int hWordTy_szB )
3495{
3496 IRType tyAddr = Ity_INVALID;
3497 HChar* hName = NULL;
3498 void* hAddr = NULL;
3499 Int regparms = 0;
3500 IRExpr** argv = NULL;
3501 IRDirty* di = NULL;
3502
3503 tl_assert(isIRAtom(addr));
3504 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
3505
3506 tyAddr = typeOfIRExpr( bbOut->tyenv, addr );
3507 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
3508
3509 /* So the effective address is in 'addr' now. */
3510 regparms = 1; // unless stated otherwise
3511 if (isStore) {
3512 switch (szB) {
3513 case 1:
3514 hName = "evh__mem_help_write_1";
3515 hAddr = &evh__mem_help_write_1;
3516 argv = mkIRExprVec_1( addr );
3517 break;
3518 case 2:
3519 hName = "evh__mem_help_write_2";
3520 hAddr = &evh__mem_help_write_2;
3521 argv = mkIRExprVec_1( addr );
3522 break;
3523 case 4:
3524 hName = "evh__mem_help_write_4";
3525 hAddr = &evh__mem_help_write_4;
3526 argv = mkIRExprVec_1( addr );
3527 break;
3528 case 8:
3529 hName = "evh__mem_help_write_8";
3530 hAddr = &evh__mem_help_write_8;
3531 argv = mkIRExprVec_1( addr );
3532 break;
3533 default:
3534 tl_assert(szB > 8 && szB <= 512); /* stay sane */
3535 regparms = 2;
3536 hName = "evh__mem_help_write_N";
3537 hAddr = &evh__mem_help_write_N;
3538 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
3539 break;
3540 }
3541 } else {
3542 switch (szB) {
3543 case 1:
3544 hName = "evh__mem_help_read_1";
3545 hAddr = &evh__mem_help_read_1;
3546 argv = mkIRExprVec_1( addr );
3547 break;
3548 case 2:
3549 hName = "evh__mem_help_read_2";
3550 hAddr = &evh__mem_help_read_2;
3551 argv = mkIRExprVec_1( addr );
3552 break;
3553 case 4:
3554 hName = "evh__mem_help_read_4";
3555 hAddr = &evh__mem_help_read_4;
3556 argv = mkIRExprVec_1( addr );
3557 break;
3558 case 8:
3559 hName = "evh__mem_help_read_8";
3560 hAddr = &evh__mem_help_read_8;
3561 argv = mkIRExprVec_1( addr );
3562 break;
3563 default:
3564 tl_assert(szB > 8 && szB <= 512); /* stay sane */
3565 regparms = 2;
3566 hName = "evh__mem_help_read_N";
3567 hAddr = &evh__mem_help_read_N;
3568 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
3569 break;
3570 }
3571 }
3572
3573 /* Add the helper. */
3574 tl_assert(hName);
3575 tl_assert(hAddr);
3576 tl_assert(argv);
3577 di = unsafeIRDirty_0_N( regparms,
3578 hName, VG_(fnptr_to_fnentry)( hAddr ),
3579 argv );
3580 addStmtToIRSB( bbOut, IRStmt_Dirty(di) );
3581}
3582
3583
sewardjd52392d2008-11-08 20:36:26 +00003584//static void instrument_memory_bus_event ( IRSB* bbOut, IRMBusEvent event )
3585//{
3586// switch (event) {
3587// case Imbe_SnoopedStoreBegin:
3588// case Imbe_SnoopedStoreEnd:
3589// /* These arise from ppc stwcx. insns. They should perhaps be
3590// handled better. */
3591// break;
3592// case Imbe_Fence:
3593// break; /* not interesting */
3594// case Imbe_BusLock:
3595// case Imbe_BusUnlock:
3596// addStmtToIRSB(
3597// bbOut,
3598// IRStmt_Dirty(
3599// unsafeIRDirty_0_N(
3600// 0/*regparms*/,
3601// event == Imbe_BusLock ? "evh__bus_lock"
3602// : "evh__bus_unlock",
3603// VG_(fnptr_to_fnentry)(
3604// event == Imbe_BusLock ? &evh__bus_lock
3605// : &evh__bus_unlock
3606// ),
3607// mkIRExprVec_0()
3608// )
3609// )
3610// );
3611// break;
3612// default:
3613// tl_assert(0);
3614// }
3615//}
sewardjb4112022007-11-09 22:49:28 +00003616
3617
3618static
3619IRSB* hg_instrument ( VgCallbackClosure* closure,
3620 IRSB* bbIn,
3621 VexGuestLayout* layout,
3622 VexGuestExtents* vge,
3623 IRType gWordTy, IRType hWordTy )
3624{
3625 Int i;
3626 IRSB* bbOut;
sewardj484fe802008-12-22 18:17:24 +00003627 Bool x86busLocked = False;
3628 Bool isSnoopedStore = False;
sewardjb4112022007-11-09 22:49:28 +00003629
3630 if (gWordTy != hWordTy) {
3631 /* We don't currently support this case. */
3632 VG_(tool_panic)("host/guest word size mismatch");
3633 }
3634
3635 /* Set up BB */
3636 bbOut = emptyIRSB();
3637 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
3638 bbOut->next = deepCopyIRExpr(bbIn->next);
3639 bbOut->jumpkind = bbIn->jumpkind;
3640
3641 // Copy verbatim any IR preamble preceding the first IMark
3642 i = 0;
3643 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
3644 addStmtToIRSB( bbOut, bbIn->stmts[i] );
3645 i++;
3646 }
3647
3648 for (/*use current i*/; i < bbIn->stmts_used; i++) {
3649 IRStmt* st = bbIn->stmts[i];
3650 tl_assert(st);
3651 tl_assert(isFlatIRStmt(st));
3652 switch (st->tag) {
3653 case Ist_NoOp:
3654 case Ist_AbiHint:
3655 case Ist_Put:
3656 case Ist_PutI:
3657 case Ist_IMark:
3658 case Ist_Exit:
3659 /* None of these can contain any memory references. */
3660 break;
3661
3662 case Ist_MBE:
sewardjf98e1c02008-10-25 16:22:41 +00003663 //instrument_memory_bus_event( bbOut, st->Ist.MBE.event );
3664 switch (st->Ist.MBE.event) {
3665 case Imbe_Fence:
3666 break; /* not interesting */
sewardj484fe802008-12-22 18:17:24 +00003667 /* Imbe_Bus{Lock,Unlock} arise from x86/amd64 LOCK
3668 prefixed instructions. */
sewardjf98e1c02008-10-25 16:22:41 +00003669 case Imbe_BusLock:
3670 tl_assert(x86busLocked == False);
3671 x86busLocked = True;
3672 break;
3673 case Imbe_BusUnlock:
3674 tl_assert(x86busLocked == True);
3675 x86busLocked = False;
3676 break;
sewardj484fe802008-12-22 18:17:24 +00003677 /* Imbe_SnoopedStore{Begin,End} arise from ppc
3678 stwcx. instructions. */
sewardj92124542008-12-18 01:20:11 +00003679 case Imbe_SnoopedStoreBegin:
sewardj484fe802008-12-22 18:17:24 +00003680 tl_assert(isSnoopedStore == False);
3681 isSnoopedStore = True;
3682 break;
sewardj92124542008-12-18 01:20:11 +00003683 case Imbe_SnoopedStoreEnd:
sewardj484fe802008-12-22 18:17:24 +00003684 tl_assert(isSnoopedStore == True);
3685 isSnoopedStore = False;
sewardj92124542008-12-18 01:20:11 +00003686 break;
sewardjf98e1c02008-10-25 16:22:41 +00003687 default:
3688 goto unhandled;
3689 }
sewardjb4112022007-11-09 22:49:28 +00003690 break;
3691
3692 case Ist_Store:
sewardj484fe802008-12-22 18:17:24 +00003693 if (!x86busLocked && !isSnoopedStore)
sewardjf98e1c02008-10-25 16:22:41 +00003694 instrument_mem_access(
3695 bbOut,
3696 st->Ist.Store.addr,
3697 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
3698 True/*isStore*/,
3699 sizeofIRType(hWordTy)
3700 );
3701 break;
sewardjb4112022007-11-09 22:49:28 +00003702
3703 case Ist_WrTmp: {
3704 IRExpr* data = st->Ist.WrTmp.data;
3705 if (data->tag == Iex_Load) {
3706 instrument_mem_access(
3707 bbOut,
3708 data->Iex.Load.addr,
3709 sizeofIRType(data->Iex.Load.ty),
3710 False/*!isStore*/,
3711 sizeofIRType(hWordTy)
3712 );
3713 }
3714 break;
3715 }
3716
3717 case Ist_Dirty: {
3718 Int dataSize;
3719 IRDirty* d = st->Ist.Dirty.details;
3720 if (d->mFx != Ifx_None) {
3721 /* This dirty helper accesses memory. Collect the
3722 details. */
3723 tl_assert(d->mAddr != NULL);
3724 tl_assert(d->mSize != 0);
3725 dataSize = d->mSize;
3726 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
3727 instrument_mem_access(
3728 bbOut, d->mAddr, dataSize, False/*!isStore*/,
3729 sizeofIRType(hWordTy)
3730 );
3731 }
sewardj484fe802008-12-22 18:17:24 +00003732 /* This isn't really correct. Really the
3733 instrumentation should be only added when
3734 (!x86busLocked && !isSnoopedStore), just like with
3735 Ist_Store. Still, I don't think this is
3736 particularly important. */
sewardjb4112022007-11-09 22:49:28 +00003737 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
3738 instrument_mem_access(
3739 bbOut, d->mAddr, dataSize, True/*isStore*/,
3740 sizeofIRType(hWordTy)
3741 );
3742 }
3743 } else {
3744 tl_assert(d->mAddr == NULL);
3745 tl_assert(d->mSize == 0);
3746 }
3747 break;
3748 }
3749
3750 default:
sewardjf98e1c02008-10-25 16:22:41 +00003751 unhandled:
3752 ppIRStmt(st);
sewardjb4112022007-11-09 22:49:28 +00003753 tl_assert(0);
3754
3755 } /* switch (st->tag) */
3756
3757 addStmtToIRSB( bbOut, st );
3758 } /* iterate over bbIn->stmts */
3759
3760 return bbOut;
3761}
3762
3763
3764/*----------------------------------------------------------------*/
3765/*--- Client requests ---*/
3766/*----------------------------------------------------------------*/
3767
3768/* Sheesh. Yet another goddam finite map. */
3769static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
3770
3771static void map_pthread_t_to_Thread_INIT ( void ) {
3772 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
sewardjf98e1c02008-10-25 16:22:41 +00003773 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
3774 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00003775 tl_assert(map_pthread_t_to_Thread != NULL);
3776 }
3777}
3778
3779
3780static
3781Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
3782{
3783 if (!VG_IS_TOOL_USERREQ('H','G',args[0]))
3784 return False;
3785
3786 /* Anything that gets past the above check is one of ours, so we
3787 should be able to handle it. */
3788
3789 /* default, meaningless return value, unless otherwise set */
3790 *ret = 0;
3791
3792 switch (args[0]) {
3793
3794 /* --- --- User-visible client requests --- --- */
3795
3796 case VG_USERREQ__HG_CLEAN_MEMORY:
barta0b6b2c2008-07-07 06:49:24 +00003797 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%ld)\n",
sewardjb4112022007-11-09 22:49:28 +00003798 args[1], args[2]);
3799 /* Call die_mem to (expensively) tidy up properly, if there
sewardjf98e1c02008-10-25 16:22:41 +00003800 are any held locks etc in the area. Calling evh__die_mem
3801 and then evh__new_mem is a bit inefficient; probably just
3802 the latter would do. */
sewardjb4112022007-11-09 22:49:28 +00003803 if (args[2] > 0) { /* length */
3804 evh__die_mem(args[1], args[2]);
3805 /* and then set it to New */
3806 evh__new_mem(args[1], args[2]);
3807 }
3808 break;
3809
3810 /* --- --- Client requests for Helgrind's use only --- --- */
3811
3812 /* Some thread is telling us its pthread_t value. Record the
3813 binding between that and the associated Thread*, so we can
3814 later find the Thread* again when notified of a join by the
3815 thread. */
3816 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
3817 Thread* my_thr = NULL;
3818 if (0)
3819 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
3820 (void*)args[1]);
3821 map_pthread_t_to_Thread_INIT();
3822 my_thr = map_threads_maybe_lookup( tid );
3823 /* This assertion should hold because the map_threads (tid to
3824 Thread*) binding should have been made at the point of
3825 low-level creation of this thread, which should have
3826 happened prior to us getting this client request for it.
3827 That's because this client request is sent from
3828 client-world from the 'thread_wrapper' function, which
3829 only runs once the thread has been low-level created. */
3830 tl_assert(my_thr != NULL);
3831 /* So now we know that (pthread_t)args[1] is associated with
3832 (Thread*)my_thr. Note that down. */
3833 if (0)
3834 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
3835 (void*)args[1], (void*)my_thr );
sewardj896f6f92008-08-19 08:38:52 +00003836 VG_(addToFM)( map_pthread_t_to_Thread, (Word)args[1], (Word)my_thr );
sewardjb4112022007-11-09 22:49:28 +00003837 break;
3838 }
3839
3840 case _VG_USERREQ__HG_PTH_API_ERROR: {
3841 Thread* my_thr = NULL;
3842 map_pthread_t_to_Thread_INIT();
3843 my_thr = map_threads_maybe_lookup( tid );
3844 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
sewardjf98e1c02008-10-25 16:22:41 +00003845 HG_(record_error_PthAPIerror)(
3846 my_thr, (HChar*)args[1], (Word)args[2], (HChar*)args[3] );
sewardjb4112022007-11-09 22:49:28 +00003847 break;
3848 }
3849
3850 /* This thread (tid) has completed a join with the quitting
3851 thread whose pthread_t is in args[1]. */
3852 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
3853 Thread* thr_q = NULL; /* quitter Thread* */
3854 Bool found = False;
3855 if (0)
3856 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
3857 (void*)args[1]);
3858 map_pthread_t_to_Thread_INIT();
sewardj896f6f92008-08-19 08:38:52 +00003859 found = VG_(lookupFM)( map_pthread_t_to_Thread,
sewardjb5f29642007-11-16 12:02:43 +00003860 NULL, (Word*)&thr_q, (Word)args[1] );
sewardjb4112022007-11-09 22:49:28 +00003861 /* Can this fail? It would mean that our pthread_join
3862 wrapper observed a successful join on args[1] yet that
3863 thread never existed (or at least, it never lodged an
3864 entry in the mapping (via SET_MY_PTHREAD_T)). Which
3865 sounds like a bug in the threads library. */
3866 // FIXME: get rid of this assertion; handle properly
3867 tl_assert(found);
3868 if (found) {
3869 if (0)
3870 VG_(printf)(".................... quitter Thread* = %p\n",
3871 thr_q);
3872 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
3873 }
3874 break;
3875 }
3876
3877 /* EXPOSITION only: by intercepting lock init events we can show
3878 the user where the lock was initialised, rather than only
3879 being able to show where it was first locked. Intercepting
3880 lock initialisations is not necessary for the basic operation
3881 of the race checker. */
3882 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
3883 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
3884 break;
3885
3886 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
3887 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1] );
3888 break;
3889
3890 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
3891 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
3892 break;
3893
3894 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
3895 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
3896 break;
3897
3898 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*, Word
3899 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
3900 break;
3901
3902 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*
3903 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
3904 break;
3905
3906 /* This thread is about to do pthread_cond_signal on the
3907 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
3908 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
3909 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
3910 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
3911 break;
3912
3913 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
3914 Returns a flag indicating whether or not the mutex is believed to be
3915 valid for this operation. */
3916 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
3917 Bool mutex_is_valid
3918 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
3919 (void*)args[2] );
3920 *ret = mutex_is_valid ? 1 : 0;
3921 break;
3922 }
3923
sewardjf98e1c02008-10-25 16:22:41 +00003924 /* cond=arg[1] */
3925 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
3926 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1] );
3927 break;
3928
sewardjb4112022007-11-09 22:49:28 +00003929 /* Thread successfully completed pthread_cond_wait, cond=arg[1],
3930 mutex=arg[2] */
3931 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
3932 evh__HG_PTHREAD_COND_WAIT_POST( tid,
3933 (void*)args[1], (void*)args[2] );
3934 break;
3935
3936 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
3937 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
3938 break;
3939
3940 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
3941 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
3942 break;
3943
sewardj789c3c52008-02-25 12:10:07 +00003944 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
sewardjb4112022007-11-09 22:49:28 +00003945 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
sewardj789c3c52008-02-25 12:10:07 +00003946 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
3947 args[2], args[3] );
sewardjb4112022007-11-09 22:49:28 +00003948 break;
3949
3950 /* rwlock=arg[1], isW=arg[2] */
3951 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
3952 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
3953 break;
3954
3955 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
3956 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
3957 break;
3958
3959 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
3960 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
3961 break;
3962
sewardj11e352f2007-11-30 11:11:02 +00003963 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
3964 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
sewardjb4112022007-11-09 22:49:28 +00003965 break;
3966
sewardj11e352f2007-11-30 11:11:02 +00003967 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
3968 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00003969 break;
3970
sewardj11e352f2007-11-30 11:11:02 +00003971 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
3972 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
3973 break;
3974
3975 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t* */
3976 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00003977 break;
3978
sewardj9f569b72008-11-13 13:33:09 +00003979 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
3980 /* pth_bar_t*, ulong */
3981 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1], args[2] );
3982 break;
3983
3984 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
3985 /* pth_bar_t* */
3986 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
3987 break;
3988
3989 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
3990 /* pth_bar_t* */
3991 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
3992 break;
sewardjb4112022007-11-09 22:49:28 +00003993
3994 default:
3995 /* Unhandled Helgrind client request! */
sewardjf98e1c02008-10-25 16:22:41 +00003996 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
3997 args[0]);
sewardjb4112022007-11-09 22:49:28 +00003998 }
3999
4000 return True;
4001}
4002
4003
4004/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +00004005/*--- Setup ---*/
4006/*----------------------------------------------------------------*/
4007
4008static Bool hg_process_cmd_line_option ( Char* arg )
4009{
njn83df0b62009-02-25 01:01:05 +00004010 Char* tmp_str;
sewardjb4112022007-11-09 22:49:28 +00004011
njn83df0b62009-02-25 01:01:05 +00004012 if VG_BOOL_CLO(arg, "--track-lockorders",
4013 HG_(clo_track_lockorders)) {}
4014 else if VG_BOOL_CLO(arg, "--cmp-race-err-addrs",
4015 HG_(clo_cmp_race_err_addrs)) {}
4016 else if VG_BOOL_CLO(arg, "--show-conflicts",
4017 HG_(clo_show_conflicts)) {}
sewardj849b0ed2008-12-21 10:43:10 +00004018
4019 /* If you change the 10k/10mill limits, remember to also change
4020 them in assertions at the top of event_map_maybe_GC. */
njn83df0b62009-02-25 01:01:05 +00004021 else if VG_BINT_CLO(arg, "--conflict-cache-size",
4022 HG_(clo_conflict_cache_size), 10*1000, 10*1000*1000) {}
sewardjb4112022007-11-09 22:49:28 +00004023
sewardj11e352f2007-11-30 11:11:02 +00004024 /* "stuvwx" --> stuvwx (binary) */
njn83df0b62009-02-25 01:01:05 +00004025 else if VG_STR_CLO(arg, "--hg-sanity-flags", tmp_str) {
sewardjb4112022007-11-09 22:49:28 +00004026 Int j;
sewardjb4112022007-11-09 22:49:28 +00004027
njn83df0b62009-02-25 01:01:05 +00004028 if (6 != VG_(strlen)(tmp_str)) {
sewardjb4112022007-11-09 22:49:28 +00004029 VG_(message)(Vg_UserMsg,
sewardj11e352f2007-11-30 11:11:02 +00004030 "--hg-sanity-flags argument must have 6 digits");
sewardjb4112022007-11-09 22:49:28 +00004031 return False;
4032 }
sewardj11e352f2007-11-30 11:11:02 +00004033 for (j = 0; j < 6; j++) {
njn83df0b62009-02-25 01:01:05 +00004034 if ('0' == tmp_str[j]) { /* do nothing */ }
4035 else if ('1' == tmp_str[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
sewardjb4112022007-11-09 22:49:28 +00004036 else {
sewardj11e352f2007-11-30 11:11:02 +00004037 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
sewardjb4112022007-11-09 22:49:28 +00004038 "only contain 0s and 1s");
4039 return False;
4040 }
4041 }
sewardjf98e1c02008-10-25 16:22:41 +00004042 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
sewardjb4112022007-11-09 22:49:28 +00004043 }
4044
4045 else
4046 return VG_(replacement_malloc_process_cmd_line_option)(arg);
4047
4048 return True;
4049}
4050
4051static void hg_print_usage ( void )
4052{
4053 VG_(printf)(
sewardj849b0ed2008-12-21 10:43:10 +00004054" --track-lockorders=no|yes show lock ordering errors? [yes]\n"
4055" --show-conflicts=no|yes show both stack traces in a race? [yes]\n"
4056" --conflict-cache-size=N size of conflict history cache [1000000]\n"
sewardjb4112022007-11-09 22:49:28 +00004057 );
4058 VG_(replacement_malloc_print_usage)();
4059}
4060
4061static void hg_print_debug_usage ( void )
4062{
4063 VG_(replacement_malloc_print_debug_usage)();
sewardjb4112022007-11-09 22:49:28 +00004064 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
4065 "race errors significant? [no]\n");
sewardj849b0ed2008-12-21 10:43:10 +00004066 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
sewardj11e352f2007-11-30 11:11:02 +00004067 " at events (X = 0|1) [000000]\n");
4068 VG_(printf)(" --hg-sanity-flags values:\n");
sewardj11e352f2007-11-30 11:11:02 +00004069 VG_(printf)(" 010000 after changes to "
sewardjb4112022007-11-09 22:49:28 +00004070 "lock-order-acquisition-graph\n");
sewardj11e352f2007-11-30 11:11:02 +00004071 VG_(printf)(" 001000 at memory accesses (NB: not currently used)\n");
4072 VG_(printf)(" 000100 at mem permission setting for "
sewardjb4112022007-11-09 22:49:28 +00004073 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
sewardj11e352f2007-11-30 11:11:02 +00004074 VG_(printf)(" 000010 at lock/unlock events\n");
4075 VG_(printf)(" 000001 at thread create/join events\n");
sewardjb4112022007-11-09 22:49:28 +00004076}
4077
4078static void hg_post_clo_init ( void )
4079{
4080}
4081
4082static void hg_fini ( Int exitcode )
4083{
4084 if (SHOW_DATA_STRUCTURES)
4085 pp_everything( PP_ALL, "SK_(fini)" );
sewardjf98e1c02008-10-25 16:22:41 +00004086 if (HG_(clo_sanity_flags))
sewardjb4112022007-11-09 22:49:28 +00004087 all__sanity_check("SK_(fini)");
4088
sewardjb4112022007-11-09 22:49:28 +00004089 if (VG_(clo_verbosity) >= 2) {
4090
4091 if (1) {
4092 VG_(printf)("\n");
4093 HG_(ppWSUstats)( univ_tsets, "univ_tsets" );
4094 VG_(printf)("\n");
4095 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
4096 VG_(printf)("\n");
4097 HG_(ppWSUstats)( univ_laog, "univ_laog" );
4098 }
4099
sewardjf98e1c02008-10-25 16:22:41 +00004100 //zz VG_(printf)("\n");
4101 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
4102 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
4103 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
4104 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
4105 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
4106 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
4107 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
4108 //zz stats__hbefore_stk_hwm);
4109 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
4110 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
sewardjb4112022007-11-09 22:49:28 +00004111
4112 VG_(printf)("\n");
barta0b6b2c2008-07-07 06:49:24 +00004113 VG_(printf)(" locksets: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00004114 (Int)HG_(cardinalityWSU)( univ_lsets ));
barta0b6b2c2008-07-07 06:49:24 +00004115 VG_(printf)(" threadsets: %'8d unique thread sets\n",
sewardjb4112022007-11-09 22:49:28 +00004116 (Int)HG_(cardinalityWSU)( univ_tsets ));
barta0b6b2c2008-07-07 06:49:24 +00004117 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00004118 (Int)HG_(cardinalityWSU)( univ_laog ));
4119
sewardjd52392d2008-11-08 20:36:26 +00004120 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
4121 // stats__ga_LL_adds,
4122 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
sewardjb4112022007-11-09 22:49:28 +00004123
sewardjf98e1c02008-10-25 16:22:41 +00004124 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
4125 HG_(stats__LockN_to_P_queries),
4126 HG_(stats__LockN_to_P_get_map_size)() );
sewardjb4112022007-11-09 22:49:28 +00004127
sewardjf98e1c02008-10-25 16:22:41 +00004128 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
4129 HG_(stats__string_table_queries),
4130 HG_(stats__string_table_get_map_size)() );
barta0b6b2c2008-07-07 06:49:24 +00004131 VG_(printf)(" LAOG: %'8d map size\n",
sewardj896f6f92008-08-19 08:38:52 +00004132 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
barta0b6b2c2008-07-07 06:49:24 +00004133 VG_(printf)(" LAOG exposition: %'8d map size\n",
sewardj896f6f92008-08-19 08:38:52 +00004134 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
barta0b6b2c2008-07-07 06:49:24 +00004135 VG_(printf)(" locks: %'8lu acquires, "
4136 "%'lu releases\n",
sewardjb4112022007-11-09 22:49:28 +00004137 stats__lockN_acquires,
4138 stats__lockN_releases
4139 );
barta0b6b2c2008-07-07 06:49:24 +00004140 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
sewardjb4112022007-11-09 22:49:28 +00004141
4142 VG_(printf)("\n");
sewardjf98e1c02008-10-25 16:22:41 +00004143 libhb_shutdown(True);
sewardjb4112022007-11-09 22:49:28 +00004144 }
4145}
4146
sewardjf98e1c02008-10-25 16:22:41 +00004147/* FIXME: move these somewhere sane */
4148
4149static
4150void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
4151{
4152 Thread* thr;
4153 ThreadId tid;
4154 UWord nActual;
4155 tl_assert(hbt);
4156 thr = libhb_get_Thr_opaque( hbt );
4157 tl_assert(thr);
4158 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
4159 nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
4160 NULL, NULL, 0 );
4161 tl_assert(nActual <= nRequest);
4162 for (; nActual < nRequest; nActual++)
4163 frames[nActual] = 0;
4164}
4165
4166static
sewardjd52392d2008-11-08 20:36:26 +00004167ExeContext* for_libhb__get_EC ( Thr* hbt )
sewardjf98e1c02008-10-25 16:22:41 +00004168{
4169 Thread* thr;
4170 ThreadId tid;
4171 ExeContext* ec;
4172 tl_assert(hbt);
4173 thr = libhb_get_Thr_opaque( hbt );
4174 tl_assert(thr);
4175 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
4176 ec = VG_(record_ExeContext)( tid, 0 );
sewardjd52392d2008-11-08 20:36:26 +00004177 return ec;
sewardjf98e1c02008-10-25 16:22:41 +00004178}
4179
4180
sewardjb4112022007-11-09 22:49:28 +00004181static void hg_pre_clo_init ( void )
4182{
sewardjf98e1c02008-10-25 16:22:41 +00004183 Thr* hbthr_root;
sewardjb4112022007-11-09 22:49:28 +00004184 VG_(details_name) ("Helgrind");
4185 VG_(details_version) (NULL);
4186 VG_(details_description) ("a thread error detector");
4187 VG_(details_copyright_author)(
njn9f207462009-03-10 22:02:09 +00004188 "Copyright (C) 2007-2009, and GNU GPL'd, by OpenWorks LLP et al.");
sewardjb4112022007-11-09 22:49:28 +00004189 VG_(details_bug_reports_to) (VG_BUGS_TO);
4190 VG_(details_avg_translation_sizeB) ( 200 );
4191
4192 VG_(basic_tool_funcs) (hg_post_clo_init,
4193 hg_instrument,
4194 hg_fini);
4195
4196 VG_(needs_core_errors) ();
sewardjf98e1c02008-10-25 16:22:41 +00004197 VG_(needs_tool_errors) (HG_(eq_Error),
4198 HG_(pp_Error),
sewardjb4112022007-11-09 22:49:28 +00004199 False,/*show TIDs for errors*/
sewardjf98e1c02008-10-25 16:22:41 +00004200 HG_(update_extra),
4201 HG_(recognised_suppression),
4202 HG_(read_extra_suppression_info),
4203 HG_(error_matches_suppression),
4204 HG_(get_error_name),
4205 HG_(print_extra_suppression_info));
sewardjb4112022007-11-09 22:49:28 +00004206
4207 VG_(needs_command_line_options)(hg_process_cmd_line_option,
4208 hg_print_usage,
4209 hg_print_debug_usage);
4210 VG_(needs_client_requests) (hg_handle_client_request);
4211
4212 // FIXME?
4213 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
4214 // hg_expensive_sanity_check);
4215
4216 VG_(needs_malloc_replacement) (hg_cli__malloc,
4217 hg_cli____builtin_new,
4218 hg_cli____builtin_vec_new,
4219 hg_cli__memalign,
4220 hg_cli__calloc,
4221 hg_cli__free,
4222 hg_cli____builtin_delete,
4223 hg_cli____builtin_vec_delete,
4224 hg_cli__realloc,
njn8b140de2009-02-17 04:31:18 +00004225 hg_cli_malloc_usable_size,
sewardjb4112022007-11-09 22:49:28 +00004226 HG_CLI__MALLOC_REDZONE_SZB );
4227
sewardj849b0ed2008-12-21 10:43:10 +00004228 /* 21 Dec 08: disabled this; it mostly causes H to start more
4229 slowly and use significantly more memory, without very often
4230 providing useful results. The user can request to load this
4231 information manually with --read-var-info=yes. */
4232 if (0) VG_(needs_var_info)(); /* optional */
sewardjb4112022007-11-09 22:49:28 +00004233
4234 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
sewardj7cf4e6b2008-05-01 20:24:26 +00004235 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
4236 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
sewardjb4112022007-11-09 22:49:28 +00004237 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
4238 VG_(track_new_mem_stack) ( evh__new_mem );
4239
4240 // FIXME: surely this isn't thread-aware
4241 VG_(track_copy_mem_remap) ( shadow_mem_copy_range );
4242
4243 VG_(track_change_mem_mprotect) ( evh__set_perms );
4244
4245 VG_(track_die_mem_stack_signal)( evh__die_mem );
4246 VG_(track_die_mem_brk) ( evh__die_mem );
4247 VG_(track_die_mem_munmap) ( evh__die_mem );
4248 VG_(track_die_mem_stack) ( evh__die_mem );
4249
4250 // FIXME: what is this for?
4251 VG_(track_ban_mem_stack) (NULL);
4252
4253 VG_(track_pre_mem_read) ( evh__pre_mem_read );
4254 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
4255 VG_(track_pre_mem_write) ( evh__pre_mem_write );
4256 VG_(track_post_mem_write) (NULL);
4257
4258 /////////////////
4259
4260 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
4261 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
4262
4263 VG_(track_start_client_code)( evh__start_client_code );
4264 VG_(track_stop_client_code)( evh__stop_client_code );
4265
sewardjf98e1c02008-10-25 16:22:41 +00004266 /////////////////////////////////////////////
4267 hbthr_root = libhb_init( for_libhb__get_stacktrace,
sewardjf98e1c02008-10-25 16:22:41 +00004268 for_libhb__get_EC );
4269 /////////////////////////////////////////////
4270
4271 initialise_data_structures(hbthr_root);
sewardjb4112022007-11-09 22:49:28 +00004272
4273 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
4274 as described in comments at the top of pub_tool_hashtable.h, are
4275 met. Blargh. */
4276 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
4277 tl_assert( sizeof(UWord) == sizeof(Addr) );
4278 hg_mallocmeta_table
4279 = VG_(HT_construct)( "hg_malloc_metadata_table" );
4280
sewardjb4112022007-11-09 22:49:28 +00004281}
4282
4283VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
4284
4285/*--------------------------------------------------------------------*/
4286/*--- end hg_main.c ---*/
4287/*--------------------------------------------------------------------*/