blob: da2c9b8d1f7aa2ac7fa840f8c50277da1b6b84fc [file] [log] [blame]
sewardjb4112022007-11-09 22:49:28 +00001
2/*--------------------------------------------------------------------*/
3/*--- Helgrind: a Valgrind tool for detecting errors ---*/
4/*--- in threaded programs. hg_main.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
sewardj4d474d02008-02-11 11:34:59 +000011 Copyright (C) 2007-2008 OpenWorks LLP
sewardjb4112022007-11-09 22:49:28 +000012 info@open-works.co.uk
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file COPYING.
30
31 Neither the names of the U.S. Department of Energy nor the
32 University of California nor the names of its contributors may be
33 used to endorse or promote products derived from this software
34 without prior written permission.
35*/
36
37#include "pub_tool_basics.h"
sewardjb4112022007-11-09 22:49:28 +000038#include "pub_tool_libcassert.h"
39#include "pub_tool_libcbase.h"
40#include "pub_tool_libcprint.h"
sewardjb4112022007-11-09 22:49:28 +000041#include "pub_tool_threadstate.h"
42#include "pub_tool_tooliface.h"
43#include "pub_tool_hashtable.h"
44#include "pub_tool_replacemalloc.h"
45#include "pub_tool_machine.h"
46#include "pub_tool_options.h"
47#include "pub_tool_xarray.h"
48#include "pub_tool_stacktrace.h"
sewardjb8b79ad2008-03-03 01:35:41 +000049#include "pub_tool_debuginfo.h" /* VG_(get_data_description) */
sewardj896f6f92008-08-19 08:38:52 +000050#include "pub_tool_wordfm.h"
sewardjb4112022007-11-09 22:49:28 +000051
sewardjf98e1c02008-10-25 16:22:41 +000052#include "hg_basics.h"
53#include "hg_wordset.h"
54#include "hg_lock_n_thread.h"
55#include "hg_errors.h"
56
57#include "libhb.h"
58
sewardjb4112022007-11-09 22:49:28 +000059#include "helgrind.h"
60
sewardjf98e1c02008-10-25 16:22:41 +000061
62// FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
63
64// FIXME: when client destroys a lock or a CV, remove these
65// from our mappings, so that the associated SO can be freed up
sewardjb4112022007-11-09 22:49:28 +000066
67/*----------------------------------------------------------------*/
68/*--- ---*/
69/*----------------------------------------------------------------*/
70
sewardj11e352f2007-11-30 11:11:02 +000071/* Note this needs to be compiled with -fno-strict-aliasing, since it
72 contains a whole bunch of calls to lookupFM etc which cast between
73 Word and pointer types. gcc rightly complains this breaks ANSI C
74 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
75 worthwhile performance benefits over -O.
sewardjc17be792007-11-10 22:50:13 +000076*/
sewardjb4112022007-11-09 22:49:28 +000077
sewardjefd3b4d2007-12-02 02:05:23 +000078// FIXME catch sync signals (SEGV, basically) and unlock BHL,
79// if held. Otherwise a LOCK-prefixed insn which segfaults
80// gets Helgrind into a total muddle as the BHL will not be
81// released after the insn.
82
sewardjb4112022007-11-09 22:49:28 +000083// FIXME what is supposed to happen to locks in memory which
84// is relocated as a result of client realloc?
85
sewardjb4112022007-11-09 22:49:28 +000086// FIXME put referencing ThreadId into Thread and get
87// rid of the slow reverse mapping function.
88
89// FIXME accesses to NoAccess areas: change state to Excl?
90
91// FIXME report errors for accesses of NoAccess memory?
92
93// FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
94// the thread still holds the lock.
95
96/* ------------ Debug/trace options ------------ */
97
98// this is:
99// shadow_mem_make_NoAccess: 29156 SMs, 1728 scanned
100// happens_before_wrk: 1000
101// ev__post_thread_join: 3360 SMs, 29 scanned, 252 re-Excls
102#define SHOW_EXPENSIVE_STUFF 0
103
104// 0 for silent, 1 for some stuff, 2 for lots of stuff
105#define SHOW_EVENTS 0
106
sewardjb4112022007-11-09 22:49:28 +0000107
108static void all__sanity_check ( Char* who ); /* fwds */
109
110#define HG_CLI__MALLOC_REDZONE_SZB 16 /* let's say */
111
112// 0 for none, 1 for dump at end of run
113#define SHOW_DATA_STRUCTURES 0
114
115
sewardjb4112022007-11-09 22:49:28 +0000116/* ------------ Misc comments ------------ */
117
118// FIXME: don't hardwire initial entries for root thread.
119// Instead, let the pre_thread_ll_create handler do this.
120
sewardjb4112022007-11-09 22:49:28 +0000121
122/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000123/*--- Primary data structures ---*/
sewardjb4112022007-11-09 22:49:28 +0000124/*----------------------------------------------------------------*/
125
sewardjb4112022007-11-09 22:49:28 +0000126/* Admin linked list of Threads */
127static Thread* admin_threads = NULL;
128
129/* Admin linked list of Locks */
130static Lock* admin_locks = NULL;
131
sewardjb4112022007-11-09 22:49:28 +0000132/* Mapping table for core ThreadIds to Thread* */
133static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
134
sewardjb4112022007-11-09 22:49:28 +0000135/* Mapping table for lock guest addresses to Lock* */
136static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
137
138/* The word-set universes for thread sets and lock sets. */
139static WordSetU* univ_tsets = NULL; /* sets of Thread* */
140static WordSetU* univ_lsets = NULL; /* sets of Lock* */
141static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
142
143/* never changed; we only care about its address. Is treated as if it
144 was a standard userspace lock. Also we have a Lock* describing it
145 so it can participate in lock sets in the usual way. */
146static Int __bus_lock = 0;
147static Lock* __bus_lock_Lock = NULL;
148
149
150/*----------------------------------------------------------------*/
151/*--- Simple helpers for the data structures ---*/
152/*----------------------------------------------------------------*/
153
154static UWord stats__lockN_acquires = 0;
155static UWord stats__lockN_releases = 0;
156
sewardjf98e1c02008-10-25 16:22:41 +0000157static
158ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
sewardjb4112022007-11-09 22:49:28 +0000159
160/* --------- Constructors --------- */
161
sewardjf98e1c02008-10-25 16:22:41 +0000162static Thread* mk_Thread ( Thr* hbthr ) {
sewardjb4112022007-11-09 22:49:28 +0000163 static Int indx = 1;
sewardjf98e1c02008-10-25 16:22:41 +0000164 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
sewardjb4112022007-11-09 22:49:28 +0000165 thread->locksetA = HG_(emptyWS)( univ_lsets );
166 thread->locksetW = HG_(emptyWS)( univ_lsets );
sewardjb4112022007-11-09 22:49:28 +0000167 thread->magic = Thread_MAGIC;
sewardjf98e1c02008-10-25 16:22:41 +0000168 thread->hbthr = hbthr;
169 thread->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +0000170 thread->created_at = NULL;
171 thread->announced = False;
172 thread->errmsg_index = indx++;
173 thread->admin = admin_threads;
174 admin_threads = thread;
175 return thread;
176}
sewardjf98e1c02008-10-25 16:22:41 +0000177
sewardjb4112022007-11-09 22:49:28 +0000178// Make a new lock which is unlocked (hence ownerless)
179static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
180 static ULong unique = 0;
sewardjf98e1c02008-10-25 16:22:41 +0000181 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
sewardjb4112022007-11-09 22:49:28 +0000182 lock->admin = admin_locks;
183 lock->unique = unique++;
184 lock->magic = LockN_MAGIC;
185 lock->appeared_at = NULL;
186 lock->acquired_at = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000187 lock->hbso = libhb_so_alloc();
sewardjb4112022007-11-09 22:49:28 +0000188 lock->guestaddr = guestaddr;
189 lock->kind = kind;
190 lock->heldW = False;
191 lock->heldBy = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000192 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +0000193 admin_locks = lock;
194 return lock;
195}
sewardjb4112022007-11-09 22:49:28 +0000196
197/* Release storage for a Lock. Also release storage in .heldBy, if
198 any. */
199static void del_LockN ( Lock* lk )
200{
sewardjf98e1c02008-10-25 16:22:41 +0000201 tl_assert(HG_(is_sane_LockN)(lk));
202 tl_assert(lk->hbso);
203 libhb_so_dealloc(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +0000204 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000205 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000206 VG_(memset)(lk, 0xAA, sizeof(*lk));
sewardjf98e1c02008-10-25 16:22:41 +0000207 HG_(free)(lk);
sewardjb4112022007-11-09 22:49:28 +0000208}
209
210/* Update 'lk' to reflect that 'thr' now has a write-acquisition of
211 it. This is done strictly: only combinations resulting from
212 correct program and libpthread behaviour are allowed. */
213static void lockN_acquire_writer ( Lock* lk, Thread* thr )
214{
sewardjf98e1c02008-10-25 16:22:41 +0000215 tl_assert(HG_(is_sane_LockN)(lk));
216 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000217
218 stats__lockN_acquires++;
219
220 /* EXPOSITION only */
221 /* We need to keep recording snapshots of where the lock was
222 acquired, so as to produce better lock-order error messages. */
223 if (lk->acquired_at == NULL) {
224 ThreadId tid;
225 tl_assert(lk->heldBy == NULL);
226 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
227 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000228 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000229 } else {
230 tl_assert(lk->heldBy != NULL);
231 }
232 /* end EXPOSITION only */
233
234 switch (lk->kind) {
235 case LK_nonRec:
236 case_LK_nonRec:
237 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
238 tl_assert(!lk->heldW);
239 lk->heldW = True;
sewardjf98e1c02008-10-25 16:22:41 +0000240 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000241 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000242 break;
243 case LK_mbRec:
244 if (lk->heldBy == NULL)
245 goto case_LK_nonRec;
246 /* 2nd and subsequent locking of a lock by its owner */
247 tl_assert(lk->heldW);
248 /* assert: lk is only held by one thread .. */
sewardj896f6f92008-08-19 08:38:52 +0000249 tl_assert(VG_(sizeUniqueBag(lk->heldBy)) == 1);
sewardjb4112022007-11-09 22:49:28 +0000250 /* assert: .. and that thread is 'thr'. */
sewardj896f6f92008-08-19 08:38:52 +0000251 tl_assert(VG_(elemBag)(lk->heldBy, (Word)thr)
252 == VG_(sizeTotalBag)(lk->heldBy));
253 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000254 break;
255 case LK_rdwr:
256 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
257 goto case_LK_nonRec;
258 default:
259 tl_assert(0);
260 }
sewardjf98e1c02008-10-25 16:22:41 +0000261 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000262}
263
264static void lockN_acquire_reader ( Lock* lk, Thread* thr )
265{
sewardjf98e1c02008-10-25 16:22:41 +0000266 tl_assert(HG_(is_sane_LockN)(lk));
267 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000268 /* can only add reader to a reader-writer lock. */
269 tl_assert(lk->kind == LK_rdwr);
270 /* lk must be free or already r-held. */
271 tl_assert(lk->heldBy == NULL
272 || (lk->heldBy != NULL && !lk->heldW));
273
274 stats__lockN_acquires++;
275
276 /* EXPOSITION only */
277 /* We need to keep recording snapshots of where the lock was
278 acquired, so as to produce better lock-order error messages. */
279 if (lk->acquired_at == NULL) {
280 ThreadId tid;
281 tl_assert(lk->heldBy == NULL);
282 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
283 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000284 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000285 } else {
286 tl_assert(lk->heldBy != NULL);
287 }
288 /* end EXPOSITION only */
289
290 if (lk->heldBy) {
sewardj896f6f92008-08-19 08:38:52 +0000291 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000292 } else {
293 lk->heldW = False;
sewardjf98e1c02008-10-25 16:22:41 +0000294 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000295 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000296 }
297 tl_assert(!lk->heldW);
sewardjf98e1c02008-10-25 16:22:41 +0000298 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000299}
300
301/* Update 'lk' to reflect a release of it by 'thr'. This is done
302 strictly: only combinations resulting from correct program and
303 libpthread behaviour are allowed. */
304
305static void lockN_release ( Lock* lk, Thread* thr )
306{
307 Bool b;
sewardjf98e1c02008-10-25 16:22:41 +0000308 tl_assert(HG_(is_sane_LockN)(lk));
309 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000310 /* lock must be held by someone */
311 tl_assert(lk->heldBy);
312 stats__lockN_releases++;
313 /* Remove it from the holder set */
sewardj896f6f92008-08-19 08:38:52 +0000314 b = VG_(delFromBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000315 /* thr must actually have been a holder of lk */
316 tl_assert(b);
317 /* normalise */
318 tl_assert(lk->acquired_at);
sewardj896f6f92008-08-19 08:38:52 +0000319 if (VG_(isEmptyBag)(lk->heldBy)) {
320 VG_(deleteBag)(lk->heldBy);
sewardjb4112022007-11-09 22:49:28 +0000321 lk->heldBy = NULL;
322 lk->heldW = False;
323 lk->acquired_at = NULL;
324 }
sewardjf98e1c02008-10-25 16:22:41 +0000325 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000326}
327
328static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
329{
330 Thread* thr;
331 if (!lk->heldBy) {
332 tl_assert(!lk->heldW);
333 return;
334 }
335 /* for each thread that holds this lock do ... */
sewardj896f6f92008-08-19 08:38:52 +0000336 VG_(initIterBag)( lk->heldBy );
337 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, NULL )) {
sewardjf98e1c02008-10-25 16:22:41 +0000338 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000339 tl_assert(HG_(elemWS)( univ_lsets,
340 thr->locksetA, (Word)lk ));
341 thr->locksetA
342 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lk );
343
344 if (lk->heldW) {
345 tl_assert(HG_(elemWS)( univ_lsets,
346 thr->locksetW, (Word)lk ));
347 thr->locksetW
348 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lk );
349 }
350 }
sewardj896f6f92008-08-19 08:38:52 +0000351 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000352}
353
sewardjb4112022007-11-09 22:49:28 +0000354
355/*----------------------------------------------------------------*/
356/*--- Print out the primary data structures ---*/
357/*----------------------------------------------------------------*/
358
sewardjd52392d2008-11-08 20:36:26 +0000359//static WordSetID del_BHL ( WordSetID lockset ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000360
361#define PP_THREADS (1<<1)
362#define PP_LOCKS (1<<2)
sewardjf98e1c02008-10-25 16:22:41 +0000363#define PP_ALL (PP_THREADS | PP_LOCKS)
sewardjb4112022007-11-09 22:49:28 +0000364
365
366static const Int sHOW_ADMIN = 0;
367
368static void space ( Int n )
369{
370 Int i;
371 Char spaces[128+1];
372 tl_assert(n >= 0 && n < 128);
373 if (n == 0)
374 return;
375 for (i = 0; i < n; i++)
376 spaces[i] = ' ';
377 spaces[i] = 0;
378 tl_assert(i < 128+1);
379 VG_(printf)("%s", spaces);
380}
381
382static void pp_Thread ( Int d, Thread* t )
383{
384 space(d+0); VG_(printf)("Thread %p {\n", t);
385 if (sHOW_ADMIN) {
386 space(d+3); VG_(printf)("admin %p\n", t->admin);
387 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
388 }
389 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
390 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
sewardjb4112022007-11-09 22:49:28 +0000391 space(d+0); VG_(printf)("}\n");
392}
393
394static void pp_admin_threads ( Int d )
395{
396 Int i, n;
397 Thread* t;
398 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
399 /* nothing */
400 }
401 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
402 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
403 if (0) {
404 space(n);
405 VG_(printf)("admin_threads record %d of %d:\n", i, n);
406 }
407 pp_Thread(d+3, t);
408 }
barta0b6b2c2008-07-07 06:49:24 +0000409 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000410}
411
412static void pp_map_threads ( Int d )
413{
414 Int i, n;
415 n = 0;
416 space(d); VG_(printf)("map_threads ");
417 n = 0;
418 for (i = 0; i < VG_N_THREADS; i++) {
419 if (map_threads[i] != NULL)
420 n++;
421 }
422 VG_(printf)("(%d entries) {\n", n);
423 for (i = 0; i < VG_N_THREADS; i++) {
424 if (map_threads[i] == NULL)
425 continue;
426 space(d+3);
427 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
428 }
429 space(d); VG_(printf)("}\n");
430}
431
432static const HChar* show_LockKind ( LockKind lkk ) {
433 switch (lkk) {
434 case LK_mbRec: return "mbRec";
435 case LK_nonRec: return "nonRec";
436 case LK_rdwr: return "rdwr";
437 default: tl_assert(0);
438 }
439}
440
441static void pp_Lock ( Int d, Lock* lk )
442{
barta0b6b2c2008-07-07 06:49:24 +0000443 space(d+0); VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
sewardjb4112022007-11-09 22:49:28 +0000444 if (sHOW_ADMIN) {
445 space(d+3); VG_(printf)("admin %p\n", lk->admin);
446 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
447 }
448 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
449 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
450 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
451 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
452 if (lk->heldBy) {
453 Thread* thr;
454 Word count;
455 VG_(printf)(" { ");
sewardj896f6f92008-08-19 08:38:52 +0000456 VG_(initIterBag)( lk->heldBy );
457 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, &count ))
sewardjb4112022007-11-09 22:49:28 +0000458 VG_(printf)("%lu:%p ", count, thr);
sewardj896f6f92008-08-19 08:38:52 +0000459 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000460 VG_(printf)("}");
461 }
462 VG_(printf)("\n");
463 space(d+0); VG_(printf)("}\n");
464}
465
466static void pp_admin_locks ( Int d )
467{
468 Int i, n;
469 Lock* lk;
470 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin) {
471 /* nothing */
472 }
473 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
474 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin) {
475 if (0) {
476 space(n);
477 VG_(printf)("admin_locks record %d of %d:\n", i, n);
478 }
479 pp_Lock(d+3, lk);
480 }
barta0b6b2c2008-07-07 06:49:24 +0000481 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000482}
483
484static void pp_map_locks ( Int d )
485{
486 void* gla;
487 Lock* lk;
488 space(d); VG_(printf)("map_locks (%d entries) {\n",
sewardj896f6f92008-08-19 08:38:52 +0000489 (Int)VG_(sizeFM)( map_locks ));
490 VG_(initIterFM)( map_locks );
491 while (VG_(nextIterFM)( map_locks, (Word*)&gla,
sewardjb5f29642007-11-16 12:02:43 +0000492 (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000493 space(d+3);
494 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
495 }
sewardj896f6f92008-08-19 08:38:52 +0000496 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000497 space(d); VG_(printf)("}\n");
498}
499
sewardjb4112022007-11-09 22:49:28 +0000500static void pp_everything ( Int flags, Char* caller )
501{
502 Int d = 0;
503 VG_(printf)("\n");
504 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
505 if (flags & PP_THREADS) {
506 VG_(printf)("\n");
507 pp_admin_threads(d+3);
508 VG_(printf)("\n");
509 pp_map_threads(d+3);
510 }
511 if (flags & PP_LOCKS) {
512 VG_(printf)("\n");
513 pp_admin_locks(d+3);
514 VG_(printf)("\n");
515 pp_map_locks(d+3);
516 }
sewardjb4112022007-11-09 22:49:28 +0000517
518 VG_(printf)("\n");
519 VG_(printf)("}\n");
520 VG_(printf)("\n");
521}
522
523#undef SHOW_ADMIN
524
525
526/*----------------------------------------------------------------*/
527/*--- Initialise the primary data structures ---*/
528/*----------------------------------------------------------------*/
529
sewardjf98e1c02008-10-25 16:22:41 +0000530static void initialise_data_structures ( Thr* hbthr_root )
sewardjb4112022007-11-09 22:49:28 +0000531{
sewardjb4112022007-11-09 22:49:28 +0000532 Thread* thr;
533
534 /* Get everything initialised and zeroed. */
535 tl_assert(admin_threads == NULL);
536 tl_assert(admin_locks == NULL);
sewardjb4112022007-11-09 22:49:28 +0000537
538 tl_assert(sizeof(Addr) == sizeof(Word));
sewardjb4112022007-11-09 22:49:28 +0000539
540 tl_assert(map_threads == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000541 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
sewardjb4112022007-11-09 22:49:28 +0000542 tl_assert(map_threads != NULL);
543
sewardjb4112022007-11-09 22:49:28 +0000544 tl_assert(sizeof(Addr) == sizeof(Word));
545 tl_assert(map_locks == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000546 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
547 NULL/*unboxed Word cmp*/);
sewardjb4112022007-11-09 22:49:28 +0000548 tl_assert(map_locks != NULL);
549
550 __bus_lock_Lock = mk_LockN( LK_nonRec, (Addr)&__bus_lock );
sewardjf98e1c02008-10-25 16:22:41 +0000551 tl_assert(HG_(is_sane_LockN)(__bus_lock_Lock));
sewardj896f6f92008-08-19 08:38:52 +0000552 VG_(addToFM)( map_locks, (Word)&__bus_lock, (Word)__bus_lock_Lock );
sewardjb4112022007-11-09 22:49:28 +0000553
554 tl_assert(univ_tsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000555 univ_tsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.3", HG_(free),
556 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000557 tl_assert(univ_tsets != NULL);
558
559 tl_assert(univ_lsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000560 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
561 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000562 tl_assert(univ_lsets != NULL);
563
564 tl_assert(univ_laog == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000565 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
566 HG_(free), 24/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000567 tl_assert(univ_laog != NULL);
568
569 /* Set up entries for the root thread */
570 // FIXME: this assumes that the first real ThreadId is 1
571
sewardjb4112022007-11-09 22:49:28 +0000572 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +0000573 thr = mk_Thread(hbthr_root);
574 thr->coretid = 1; /* FIXME: hardwires an assumption about the
575 identity of the root thread. */
576 tl_assert( libhb_get_Thr_opaque(hbthr_root) == NULL );
577 libhb_set_Thr_opaque(hbthr_root, thr);
sewardjb4112022007-11-09 22:49:28 +0000578
sewardjf98e1c02008-10-25 16:22:41 +0000579 /* and bind it in the thread-map table. */
580 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
581 tl_assert(thr->coretid != VG_INVALID_THREADID);
sewardjb4112022007-11-09 22:49:28 +0000582
sewardjf98e1c02008-10-25 16:22:41 +0000583 map_threads[thr->coretid] = thr;
sewardjb4112022007-11-09 22:49:28 +0000584
585 tl_assert(VG_INVALID_THREADID == 0);
586
587 /* Mark the new bus lock correctly (to stop the sanity checks
588 complaining) */
589 tl_assert( sizeof(__bus_lock) == 4 );
sewardjb4112022007-11-09 22:49:28 +0000590
591 all__sanity_check("initialise_data_structures");
592}
593
594
595/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000596/*--- map_threads :: array[core-ThreadId] of Thread* ---*/
sewardjb4112022007-11-09 22:49:28 +0000597/*----------------------------------------------------------------*/
598
599/* Doesn't assert if the relevant map_threads entry is NULL. */
600static Thread* map_threads_maybe_lookup ( ThreadId coretid )
601{
602 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000603 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000604 thr = map_threads[coretid];
605 return thr;
606}
607
608/* Asserts if the relevant map_threads entry is NULL. */
609static inline Thread* map_threads_lookup ( ThreadId coretid )
610{
611 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000612 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000613 thr = map_threads[coretid];
614 tl_assert(thr);
615 return thr;
616}
617
sewardjf98e1c02008-10-25 16:22:41 +0000618/* Do a reverse lookup. Does not assert if 'thr' is not found in
619 map_threads. */
sewardjb4112022007-11-09 22:49:28 +0000620static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
621{
sewardjf98e1c02008-10-25 16:22:41 +0000622 ThreadId tid;
623 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000624 /* Check nobody used the invalid-threadid slot */
625 tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
626 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000627 tid = thr->coretid;
628 tl_assert(HG_(is_sane_ThreadId)(tid));
629 return tid;
sewardjb4112022007-11-09 22:49:28 +0000630}
631
632/* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
633 is not found in map_threads. */
634static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
635{
636 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
637 tl_assert(tid != VG_INVALID_THREADID);
sewardjf98e1c02008-10-25 16:22:41 +0000638 tl_assert(map_threads[tid]);
639 tl_assert(map_threads[tid]->coretid == tid);
sewardjb4112022007-11-09 22:49:28 +0000640 return tid;
641}
642
643static void map_threads_delete ( ThreadId coretid )
644{
645 Thread* thr;
646 tl_assert(coretid != 0);
sewardjf98e1c02008-10-25 16:22:41 +0000647 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000648 thr = map_threads[coretid];
649 tl_assert(thr);
650 map_threads[coretid] = NULL;
651}
652
653
654/*----------------------------------------------------------------*/
655/*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
656/*----------------------------------------------------------------*/
657
658/* Make sure there is a lock table entry for the given (lock) guest
659 address. If not, create one of the stated 'kind' in unheld state.
660 In any case, return the address of the existing or new Lock. */
661static
662Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
663{
664 Bool found;
665 Lock* oldlock = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000666 tl_assert(HG_(is_sane_ThreadId)(tid));
sewardj896f6f92008-08-19 08:38:52 +0000667 found = VG_(lookupFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000668 NULL, (Word*)&oldlock, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000669 if (!found) {
670 Lock* lock = mk_LockN(lkk, ga);
671 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
sewardjf98e1c02008-10-25 16:22:41 +0000672 tl_assert(HG_(is_sane_LockN)(lock));
sewardj896f6f92008-08-19 08:38:52 +0000673 VG_(addToFM)( map_locks, (Word)ga, (Word)lock );
sewardjb4112022007-11-09 22:49:28 +0000674 tl_assert(oldlock == NULL);
sewardjb4112022007-11-09 22:49:28 +0000675 return lock;
676 } else {
677 tl_assert(oldlock != NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000678 tl_assert(HG_(is_sane_LockN)(oldlock));
sewardjb4112022007-11-09 22:49:28 +0000679 tl_assert(oldlock->guestaddr == ga);
sewardjb4112022007-11-09 22:49:28 +0000680 return oldlock;
681 }
682}
683
684static Lock* map_locks_maybe_lookup ( Addr ga )
685{
686 Bool found;
687 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000688 found = VG_(lookupFM)( map_locks, NULL, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000689 tl_assert(found ? lk != NULL : lk == NULL);
sewardjb4112022007-11-09 22:49:28 +0000690 return lk;
691}
692
693static void map_locks_delete ( Addr ga )
694{
695 Addr ga2 = 0;
696 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000697 VG_(delFromFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000698 (Word*)&ga2, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000699 /* delFromFM produces the val which is being deleted, if it is
700 found. So assert it is non-null; that in effect asserts that we
701 are deleting a (ga, Lock) pair which actually exists. */
702 tl_assert(lk != NULL);
703 tl_assert(ga2 == ga);
704}
705
706
sewardjb4112022007-11-09 22:49:28 +0000707
708/*----------------------------------------------------------------*/
709/*--- Sanity checking the data structures ---*/
710/*----------------------------------------------------------------*/
711
712static UWord stats__sanity_checks = 0;
713
sewardjb4112022007-11-09 22:49:28 +0000714static void laog__sanity_check ( Char* who ); /* fwds */
715
716/* REQUIRED INVARIANTS:
717
718 Thread vs Segment/Lock/SecMaps
719
720 for each t in Threads {
721
722 // Thread.lockset: each element is really a valid Lock
723
724 // Thread.lockset: each Lock in set is actually held by that thread
725 for lk in Thread.lockset
726 lk == LockedBy(t)
727
728 // Thread.csegid is a valid SegmentID
729 // and the associated Segment has .thr == t
730
731 }
732
733 all thread Locksets are pairwise empty under intersection
734 (that is, no lock is claimed to be held by more than one thread)
735 -- this is guaranteed if all locks in locksets point back to their
736 owner threads
737
738 Lock vs Thread/Segment/SecMaps
739
740 for each entry (gla, la) in map_locks
741 gla == la->guest_addr
742
743 for each lk in Locks {
744
745 lk->tag is valid
746 lk->guest_addr does not have shadow state NoAccess
747 if lk == LockedBy(t), then t->lockset contains lk
748 if lk == UnlockedBy(segid) then segid is valid SegmentID
749 and can be mapped to a valid Segment(seg)
750 and seg->thr->lockset does not contain lk
751 if lk == UnlockedNew then (no lockset contains lk)
752
753 secmaps for lk has .mbHasLocks == True
754
755 }
756
757 Segment vs Thread/Lock/SecMaps
758
759 the Segment graph is a dag (no cycles)
760 all of the Segment graph must be reachable from the segids
761 mentioned in the Threads
762
763 for seg in Segments {
764
765 seg->thr is a sane Thread
766
767 }
768
769 SecMaps vs Segment/Thread/Lock
770
771 for sm in SecMaps {
772
773 sm properly aligned
774 if any shadow word is ShR or ShM then .mbHasShared == True
775
776 for each Excl(segid) state
777 map_segments_lookup maps to a sane Segment(seg)
778 for each ShM/ShR(tsetid,lsetid) state
779 each lk in lset is a valid Lock
780 each thr in tset is a valid thread, which is non-dead
781
782 }
783*/
784
785
786/* Return True iff 'thr' holds 'lk' in some mode. */
787static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
788{
789 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000790 return VG_(elemBag)( lk->heldBy, (Word)thr ) > 0;
sewardjb4112022007-11-09 22:49:28 +0000791 else
792 return False;
793}
794
795/* Sanity check Threads, as far as possible */
796__attribute__((noinline))
797static void threads__sanity_check ( Char* who )
798{
799#define BAD(_str) do { how = (_str); goto bad; } while (0)
800 Char* how = "no error";
801 Thread* thr;
802 WordSetID wsA, wsW;
sewardj250ec2e2008-02-15 22:02:30 +0000803 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +0000804 Word ls_size, i;
805 Lock* lk;
sewardjb4112022007-11-09 22:49:28 +0000806 for (thr = admin_threads; thr; thr = thr->admin) {
sewardjf98e1c02008-10-25 16:22:41 +0000807 if (!HG_(is_sane_Thread)(thr)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000808 wsA = thr->locksetA;
809 wsW = thr->locksetW;
810 // locks held in W mode are a subset of all locks held
811 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
812 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
813 for (i = 0; i < ls_size; i++) {
814 lk = (Lock*)ls_words[i];
815 // Thread.lockset: each element is really a valid Lock
sewardjf98e1c02008-10-25 16:22:41 +0000816 if (!HG_(is_sane_LockN)(lk)) BAD("2");
sewardjb4112022007-11-09 22:49:28 +0000817 // Thread.lockset: each Lock in set is actually held by that
818 // thread
819 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000820 }
821 }
822 return;
823 bad:
824 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
825 tl_assert(0);
826#undef BAD
827}
828
829
830/* Sanity check Locks, as far as possible */
831__attribute__((noinline))
832static void locks__sanity_check ( Char* who )
833{
834#define BAD(_str) do { how = (_str); goto bad; } while (0)
835 Char* how = "no error";
836 Addr gla;
837 Lock* lk;
838 Int i;
839 // # entries in admin_locks == # entries in map_locks
840 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin)
841 ;
sewardj896f6f92008-08-19 08:38:52 +0000842 if (i != VG_(sizeFM)(map_locks)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000843 // for each entry (gla, lk) in map_locks
844 // gla == lk->guest_addr
sewardj896f6f92008-08-19 08:38:52 +0000845 VG_(initIterFM)( map_locks );
846 while (VG_(nextIterFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000847 (Word*)&gla, (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000848 if (lk->guestaddr != gla) BAD("2");
849 }
sewardj896f6f92008-08-19 08:38:52 +0000850 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000851 // scan through admin_locks ...
852 for (lk = admin_locks; lk; lk = lk->admin) {
853 // lock is sane. Quite comprehensive, also checks that
854 // referenced (holder) threads are sane.
sewardjf98e1c02008-10-25 16:22:41 +0000855 if (!HG_(is_sane_LockN)(lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000856 // map_locks binds guest address back to this lock
857 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
sewardjb4112022007-11-09 22:49:28 +0000858 // look at all threads mentioned as holders of this lock. Ensure
859 // this lock is mentioned in their locksets.
860 if (lk->heldBy) {
861 Thread* thr;
862 Word count;
sewardj896f6f92008-08-19 08:38:52 +0000863 VG_(initIterBag)( lk->heldBy );
864 while (VG_(nextIterBag)( lk->heldBy,
sewardjb5f29642007-11-16 12:02:43 +0000865 (Word*)&thr, &count )) {
sewardjf98e1c02008-10-25 16:22:41 +0000866 // HG_(is_sane_LockN) above ensures these
sewardjb4112022007-11-09 22:49:28 +0000867 tl_assert(count >= 1);
sewardjf98e1c02008-10-25 16:22:41 +0000868 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000869 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (Word)lk))
870 BAD("6");
871 // also check the w-only lockset
872 if (lk->heldW
873 && !HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
874 BAD("7");
875 if ((!lk->heldW)
876 && HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
877 BAD("8");
878 }
sewardj896f6f92008-08-19 08:38:52 +0000879 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000880 } else {
881 /* lock not held by anybody */
882 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
883 // since lk is unheld, then (no lockset contains lk)
884 // hmm, this is really too expensive to check. Hmm.
885 }
sewardjb4112022007-11-09 22:49:28 +0000886 }
887
888 return;
889 bad:
890 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
891 tl_assert(0);
892#undef BAD
893}
894
895
sewardjb4112022007-11-09 22:49:28 +0000896static void all_except_Locks__sanity_check ( Char* who ) {
897 stats__sanity_checks++;
898 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
899 threads__sanity_check(who);
sewardjb4112022007-11-09 22:49:28 +0000900 laog__sanity_check(who);
901}
902static void all__sanity_check ( Char* who ) {
903 all_except_Locks__sanity_check(who);
904 locks__sanity_check(who);
905}
906
907
908/*----------------------------------------------------------------*/
909/*--- the core memory state machine (msm__* functions) ---*/
910/*----------------------------------------------------------------*/
911
sewardjd52392d2008-11-08 20:36:26 +0000912//static WordSetID add_BHL ( WordSetID lockset ) {
913// return HG_(addToWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
914//}
915//static WordSetID del_BHL ( WordSetID lockset ) {
916// return HG_(delFromWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
917//}
sewardjb4112022007-11-09 22:49:28 +0000918
919
sewardjd52392d2008-11-08 20:36:26 +0000920///* Last-lock-lossage records. This mechanism exists to help explain
921// to programmers why we are complaining about a race. The idea is to
922// monitor all lockset transitions. When a previously nonempty
923// lockset becomes empty, the lock(s) that just disappeared (the
924// "lossage") are the locks that have consistently protected the
925// location (ga_of_access) in question for the longest time. Most of
926// the time the lossage-set is a single lock. Because the
927// lossage-lock is the one that has survived longest, there is there
928// is a good chance that it is indeed the lock that the programmer
929// intended to use to protect the location.
930//
931// Note that we cannot in general just look at the lossage set when we
932// see a transition to ShM(...,empty-set), because a transition to an
933// empty lockset can happen arbitrarily far before the point where we
934// want to report an error. This is in the case where there are many
935// transitions ShR -> ShR, all with an empty lockset, and only later
936// is there a transition to ShM. So what we want to do is note the
937// lossage lock at the point where a ShR -> ShR transition empties out
938// the lockset, so we can present it later if there should be a
939// transition to ShM.
940//
941// So this function finds such transitions. For each, it associates
942// in ga_to_lastlock, the guest address and the lossage lock. In fact
943// we do not record the Lock* directly as that may disappear later,
944// but instead the ExeContext inside the Lock which says where it was
945// initialised or first locked. ExeContexts are permanent so keeping
946// them indefinitely is safe.
947//
948// A boring detail: the hardware bus lock is not interesting in this
949// respect, so we first remove that from the pre/post locksets.
950//*/
951//
952//static UWord stats__ga_LL_adds = 0;
953//
954//static WordFM* ga_to_lastlock = NULL; /* GuestAddr -> ExeContext* */
955//
956//static
957//void record_last_lock_lossage ( Addr ga_of_access,
958// WordSetID lset_old, WordSetID lset_new )
959//{
960// Lock* lk;
961// Int card_old, card_new;
962//
963// tl_assert(lset_old != lset_new);
964//
965// if (0) VG_(printf)("XX1: %d (card %ld) -> %d (card %ld) %#lx\n",
966// (Int)lset_old,
967// HG_(cardinalityWS)(univ_lsets,lset_old),
968// (Int)lset_new,
969// HG_(cardinalityWS)(univ_lsets,lset_new),
970// ga_of_access );
971//
972// /* This is slow, but at least it's simple. The bus hardware lock
973// just confuses the logic, so remove it from the locksets we're
974// considering before doing anything else. */
975// lset_new = del_BHL( lset_new );
976//
977// if (!HG_(isEmptyWS)( univ_lsets, lset_new )) {
978// /* The post-transition lock set is not empty. So we are not
979// interested. We're only interested in spotting transitions
980// that make locksets become empty. */
981// return;
982// }
983//
984// /* lset_new is now empty */
985// card_new = HG_(cardinalityWS)( univ_lsets, lset_new );
986// tl_assert(card_new == 0);
987//
988// lset_old = del_BHL( lset_old );
989// card_old = HG_(cardinalityWS)( univ_lsets, lset_old );
990//
991// if (0) VG_(printf)(" X2: %d (card %d) -> %d (card %d)\n",
992// (Int)lset_old, card_old, (Int)lset_new, card_new );
993//
994// if (card_old == 0) {
995// /* The old lockset was also empty. Not interesting. */
996// return;
997// }
998//
999// tl_assert(card_old > 0);
1000// tl_assert(!HG_(isEmptyWS)( univ_lsets, lset_old ));
1001//
1002// /* Now we know we've got a transition from a nonempty lockset to an
1003// empty one. So lset_old must be the set of locks lost. Record
1004// some details. If there is more than one element in the lossage
1005// set, just choose one arbitrarily -- not the best, but at least
1006// it's simple. */
1007//
1008// lk = (Lock*)HG_(anyElementOfWS)( univ_lsets, lset_old );
1009// if (0) VG_(printf)("lossage %ld %p\n",
1010// HG_(cardinalityWS)( univ_lsets, lset_old), lk );
1011// if (lk->appeared_at) {
1012// if (ga_to_lastlock == NULL)
1013// ga_to_lastlock = VG_(newFM)( HG_(zalloc), "hg.rlll.1", HG_(free), NULL );
1014// VG_(addToFM)( ga_to_lastlock, ga_of_access, (Word)lk->appeared_at );
1015// stats__ga_LL_adds++;
1016// }
1017//}
1018//
1019///* This queries the table (ga_to_lastlock) made by
1020// record_last_lock_lossage, when constructing error messages. It
1021// attempts to find the ExeContext of the allocation or initialisation
1022// point for the lossage lock associated with 'ga'. */
1023//
1024//static ExeContext* maybe_get_lastlock_initpoint ( Addr ga )
1025//{
1026// ExeContext* ec_hint = NULL;
1027// if (ga_to_lastlock != NULL
1028// && VG_(lookupFM)(ga_to_lastlock,
1029// NULL, (Word*)&ec_hint, ga)) {
1030// tl_assert(ec_hint != NULL);
1031// return ec_hint;
1032// } else {
1033// return NULL;
1034// }
1035//}
sewardjb4112022007-11-09 22:49:28 +00001036
1037
sewardjb4112022007-11-09 22:49:28 +00001038/*----------------------------------------------------------------*/
1039/*--- Shadow value and address range handlers ---*/
1040/*----------------------------------------------------------------*/
1041
1042static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
1043static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
1044static inline Thread* get_current_Thread ( void ); /* fwds */
1045
sewardjb4112022007-11-09 22:49:28 +00001046
1047/* Block-copy states (needed for implementing realloc()). */
1048static void shadow_mem_copy_range ( Addr src, Addr dst, SizeT len )
1049{
sewardjf98e1c02008-10-25 16:22:41 +00001050 libhb_copy_shadow_state( src, dst, len );
sewardjb4112022007-11-09 22:49:28 +00001051}
1052
sewardjf98e1c02008-10-25 16:22:41 +00001053static void shadow_mem_read_range ( Thread* thr, Addr a, SizeT len )
1054{
1055 Thr* hbthr = thr->hbthr;
1056 tl_assert(hbthr);
1057 LIBHB_READ_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +00001058}
1059
1060static void shadow_mem_write_range ( Thread* thr, Addr a, SizeT len ) {
sewardjf98e1c02008-10-25 16:22:41 +00001061 Thr* hbthr = thr->hbthr;
1062 tl_assert(hbthr);
1063 LIBHB_WRITE_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +00001064}
1065
1066static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
1067{
sewardjf98e1c02008-10-25 16:22:41 +00001068 libhb_range_new( thr->hbthr, a, len );
sewardjb4112022007-11-09 22:49:28 +00001069}
1070
sewardjb4112022007-11-09 22:49:28 +00001071static void shadow_mem_make_NoAccess ( Thread* thr, Addr aIN, SizeT len )
1072{
sewardjb4112022007-11-09 22:49:28 +00001073 if (0 && len > 500)
barta0b6b2c2008-07-07 06:49:24 +00001074 VG_(printf)("make NoAccess ( %#lx, %ld )\n", aIN, len );
sewardjf98e1c02008-10-25 16:22:41 +00001075 libhb_range_noaccess( thr->hbthr, aIN, len );
sewardjb4112022007-11-09 22:49:28 +00001076}
1077
1078
1079/*----------------------------------------------------------------*/
1080/*--- Event handlers (evh__* functions) ---*/
1081/*--- plus helpers (evhH__* functions) ---*/
1082/*----------------------------------------------------------------*/
1083
1084/*--------- Event handler helpers (evhH__* functions) ---------*/
1085
1086/* Create a new segment for 'thr', making it depend (.prev) on its
1087 existing segment, bind together the SegmentID and Segment, and
1088 return both of them. Also update 'thr' so it references the new
1089 Segment. */
sewardjf98e1c02008-10-25 16:22:41 +00001090//zz static
1091//zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
1092//zz /*OUT*/Segment** new_segP,
1093//zz Thread* thr )
1094//zz {
1095//zz Segment* cur_seg;
1096//zz tl_assert(new_segP);
1097//zz tl_assert(new_segidP);
1098//zz tl_assert(HG_(is_sane_Thread)(thr));
1099//zz cur_seg = map_segments_lookup( thr->csegid );
1100//zz tl_assert(cur_seg);
1101//zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1102//zz at their owner thread. */
1103//zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1104//zz *new_segidP = alloc_SegmentID();
1105//zz map_segments_add( *new_segidP, *new_segP );
1106//zz thr->csegid = *new_segidP;
1107//zz }
sewardjb4112022007-11-09 22:49:28 +00001108
1109
1110/* The lock at 'lock_ga' has acquired a writer. Make all necessary
1111 updates, and also do all possible error checks. */
1112static
1113void evhH__post_thread_w_acquires_lock ( Thread* thr,
1114 LockKind lkk, Addr lock_ga )
1115{
1116 Lock* lk;
1117
1118 /* Basically what we need to do is call lockN_acquire_writer.
1119 However, that will barf if any 'invalid' lock states would
1120 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001121 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001122 routine.
1123
1124 Because this routine is only called after successful lock
1125 acquisition, we should not be asked to move the lock into any
1126 invalid states. Requests to do so are bugs in libpthread, since
1127 that should have rejected any such requests. */
1128
sewardjf98e1c02008-10-25 16:22:41 +00001129 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001130 /* Try to find the lock. If we can't, then create a new one with
1131 kind 'lkk'. */
1132 lk = map_locks_lookup_or_create(
1133 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001134 tl_assert( HG_(is_sane_LockN)(lk) );
1135
1136 /* check libhb level entities exist */
1137 tl_assert(thr->hbthr);
1138 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001139
1140 if (lk->heldBy == NULL) {
1141 /* the lock isn't held. Simple. */
1142 tl_assert(!lk->heldW);
1143 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001144 /* acquire a dependency from the lock's VCs */
1145 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001146 goto noerror;
1147 }
1148
1149 /* So the lock is already held. If held as a r-lock then
1150 libpthread must be buggy. */
1151 tl_assert(lk->heldBy);
1152 if (!lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001153 HG_(record_error_Misc)(
1154 thr, "Bug in libpthread: write lock "
1155 "granted on rwlock which is currently rd-held");
sewardjb4112022007-11-09 22:49:28 +00001156 goto error;
1157 }
1158
1159 /* So the lock is held in w-mode. If it's held by some other
1160 thread, then libpthread must be buggy. */
sewardj896f6f92008-08-19 08:38:52 +00001161 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
sewardjb4112022007-11-09 22:49:28 +00001162
sewardj896f6f92008-08-19 08:38:52 +00001163 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
sewardjf98e1c02008-10-25 16:22:41 +00001164 HG_(record_error_Misc)(
1165 thr, "Bug in libpthread: write lock "
1166 "granted on mutex/rwlock which is currently "
1167 "wr-held by a different thread");
sewardjb4112022007-11-09 22:49:28 +00001168 goto error;
1169 }
1170
1171 /* So the lock is already held in w-mode by 'thr'. That means this
1172 is an attempt to lock it recursively, which is only allowable
1173 for LK_mbRec kinded locks. Since this routine is called only
1174 once the lock has been acquired, this must also be a libpthread
1175 bug. */
1176 if (lk->kind != LK_mbRec) {
sewardjf98e1c02008-10-25 16:22:41 +00001177 HG_(record_error_Misc)(
1178 thr, "Bug in libpthread: recursive write lock "
1179 "granted on mutex/wrlock which does not "
1180 "support recursion");
sewardjb4112022007-11-09 22:49:28 +00001181 goto error;
1182 }
1183
1184 /* So we are recursively re-locking a lock we already w-hold. */
1185 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001186 /* acquire a dependency from the lock's VC. Probably pointless,
1187 but also harmless. */
1188 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001189 goto noerror;
1190
1191 noerror:
1192 /* check lock order acquisition graph, and update. This has to
1193 happen before the lock is added to the thread's locksetA/W. */
1194 laog__pre_thread_acquires_lock( thr, lk );
1195 /* update the thread's held-locks set */
1196 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1197 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (Word)lk );
1198 /* fall through */
1199
1200 error:
sewardjf98e1c02008-10-25 16:22:41 +00001201 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001202}
1203
1204
1205/* The lock at 'lock_ga' has acquired a reader. Make all necessary
1206 updates, and also do all possible error checks. */
1207static
1208void evhH__post_thread_r_acquires_lock ( Thread* thr,
1209 LockKind lkk, Addr lock_ga )
1210{
1211 Lock* lk;
1212
1213 /* Basically what we need to do is call lockN_acquire_reader.
1214 However, that will barf if any 'invalid' lock states would
1215 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001216 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001217 routine.
1218
1219 Because this routine is only called after successful lock
1220 acquisition, we should not be asked to move the lock into any
1221 invalid states. Requests to do so are bugs in libpthread, since
1222 that should have rejected any such requests. */
1223
sewardjf98e1c02008-10-25 16:22:41 +00001224 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001225 /* Try to find the lock. If we can't, then create a new one with
1226 kind 'lkk'. Only a reader-writer lock can be read-locked,
1227 hence the first assertion. */
1228 tl_assert(lkk == LK_rdwr);
1229 lk = map_locks_lookup_or_create(
1230 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001231 tl_assert( HG_(is_sane_LockN)(lk) );
1232
1233 /* check libhb level entities exist */
1234 tl_assert(thr->hbthr);
1235 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001236
1237 if (lk->heldBy == NULL) {
1238 /* the lock isn't held. Simple. */
1239 tl_assert(!lk->heldW);
1240 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001241 /* acquire a dependency from the lock's VC */
1242 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001243 goto noerror;
1244 }
1245
1246 /* So the lock is already held. If held as a w-lock then
1247 libpthread must be buggy. */
1248 tl_assert(lk->heldBy);
1249 if (lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001250 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1251 "granted on rwlock which is "
1252 "currently wr-held");
sewardjb4112022007-11-09 22:49:28 +00001253 goto error;
1254 }
1255
1256 /* Easy enough. In short anybody can get a read-lock on a rwlock
1257 provided it is either unlocked or already in rd-held. */
1258 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001259 /* acquire a dependency from the lock's VC. Probably pointless,
1260 but also harmless. */
1261 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001262 goto noerror;
1263
1264 noerror:
1265 /* check lock order acquisition graph, and update. This has to
1266 happen before the lock is added to the thread's locksetA/W. */
1267 laog__pre_thread_acquires_lock( thr, lk );
1268 /* update the thread's held-locks set */
1269 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1270 /* but don't update thr->locksetW, since lk is only rd-held */
1271 /* fall through */
1272
1273 error:
sewardjf98e1c02008-10-25 16:22:41 +00001274 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001275}
1276
1277
1278/* The lock at 'lock_ga' is just about to be unlocked. Make all
1279 necessary updates, and also do all possible error checks. */
1280static
1281void evhH__pre_thread_releases_lock ( Thread* thr,
1282 Addr lock_ga, Bool isRDWR )
1283{
1284 Lock* lock;
1285 Word n;
sewardjf98e1c02008-10-25 16:22:41 +00001286 Bool was_heldW;
sewardjb4112022007-11-09 22:49:28 +00001287
1288 /* This routine is called prior to a lock release, before
1289 libpthread has had a chance to validate the call. Hence we need
1290 to detect and reject any attempts to move the lock into an
1291 invalid state. Such attempts are bugs in the client.
1292
1293 isRDWR is True if we know from the wrapper context that lock_ga
1294 should refer to a reader-writer lock, and is False if [ditto]
1295 lock_ga should refer to a standard mutex. */
1296
sewardjf98e1c02008-10-25 16:22:41 +00001297 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001298 lock = map_locks_maybe_lookup( lock_ga );
1299
1300 if (!lock) {
1301 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1302 the client is trying to unlock it. So complain, then ignore
1303 the attempt. */
sewardjf98e1c02008-10-25 16:22:41 +00001304 HG_(record_error_UnlockBogus)( thr, lock_ga );
sewardjb4112022007-11-09 22:49:28 +00001305 return;
1306 }
1307
1308 tl_assert(lock->guestaddr == lock_ga);
sewardjf98e1c02008-10-25 16:22:41 +00001309 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001310
1311 if (isRDWR && lock->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001312 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1313 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001314 }
1315 if ((!isRDWR) && lock->kind == LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001316 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1317 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001318 }
1319
1320 if (!lock->heldBy) {
1321 /* The lock is not held. This indicates a serious bug in the
1322 client. */
1323 tl_assert(!lock->heldW);
sewardjf98e1c02008-10-25 16:22:41 +00001324 HG_(record_error_UnlockUnlocked)( thr, lock );
sewardjb4112022007-11-09 22:49:28 +00001325 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1326 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1327 goto error;
1328 }
1329
sewardjf98e1c02008-10-25 16:22:41 +00001330 /* test just above dominates */
1331 tl_assert(lock->heldBy);
1332 was_heldW = lock->heldW;
1333
sewardjb4112022007-11-09 22:49:28 +00001334 /* The lock is held. Is this thread one of the holders? If not,
1335 report a bug in the client. */
sewardj896f6f92008-08-19 08:38:52 +00001336 n = VG_(elemBag)( lock->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +00001337 tl_assert(n >= 0);
1338 if (n == 0) {
1339 /* We are not a current holder of the lock. This is a bug in
1340 the guest, and (per POSIX pthread rules) the unlock
1341 attempt will fail. So just complain and do nothing
1342 else. */
sewardj896f6f92008-08-19 08:38:52 +00001343 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001344 tl_assert(HG_(is_sane_Thread)(realOwner));
sewardjb4112022007-11-09 22:49:28 +00001345 tl_assert(realOwner != thr);
1346 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1347 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
sewardjf98e1c02008-10-25 16:22:41 +00001348 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
sewardjb4112022007-11-09 22:49:28 +00001349 goto error;
1350 }
1351
1352 /* Ok, we hold the lock 'n' times. */
1353 tl_assert(n >= 1);
1354
1355 lockN_release( lock, thr );
1356
1357 n--;
1358 tl_assert(n >= 0);
1359
1360 if (n > 0) {
1361 tl_assert(lock->heldBy);
sewardj896f6f92008-08-19 08:38:52 +00001362 tl_assert(n == VG_(elemBag)( lock->heldBy, (Word)thr ));
sewardjb4112022007-11-09 22:49:28 +00001363 /* We still hold the lock. So either it's a recursive lock
1364 or a rwlock which is currently r-held. */
1365 tl_assert(lock->kind == LK_mbRec
1366 || (lock->kind == LK_rdwr && !lock->heldW));
1367 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1368 if (lock->heldW)
1369 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1370 else
1371 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1372 } else {
1373 /* We no longer hold the lock. */
sewardjf98e1c02008-10-25 16:22:41 +00001374 tl_assert(!lock->heldBy);
1375 tl_assert(lock->heldW == False);
1376 //if (lock->heldBy) {
1377 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (Word)thr ));
1378 //}
sewardjb4112022007-11-09 22:49:28 +00001379 /* update this thread's lockset accordingly. */
1380 thr->locksetA
1381 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lock );
1382 thr->locksetW
1383 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lock );
sewardjf98e1c02008-10-25 16:22:41 +00001384 /* push our VC into the lock */
1385 tl_assert(thr->hbthr);
1386 tl_assert(lock->hbso);
1387 /* If the lock was previously W-held, then we want to do a
1388 strong send, and if previously R-held, then a weak send. */
1389 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
sewardjb4112022007-11-09 22:49:28 +00001390 }
1391 /* fall through */
1392
1393 error:
sewardjf98e1c02008-10-25 16:22:41 +00001394 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001395}
1396
1397
1398/*--------- Event handlers proper (evh__* functions) ---------*/
1399
1400/* What is the Thread* for the currently running thread? This is
1401 absolutely performance critical. We receive notifications from the
1402 core for client code starts/stops, and cache the looked-up result
1403 in 'current_Thread'. Hence, for the vast majority of requests,
1404 finding the current thread reduces to a read of a global variable,
1405 provided get_current_Thread_in_C_C is inlined.
1406
1407 Outside of client code, current_Thread is NULL, and presumably
1408 any uses of it will cause a segfault. Hence:
1409
1410 - for uses definitely within client code, use
1411 get_current_Thread_in_C_C.
1412
1413 - for all other uses, use get_current_Thread.
1414*/
1415
1416static Thread* current_Thread = NULL;
1417
1418static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1419 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1420 tl_assert(current_Thread == NULL);
1421 current_Thread = map_threads_lookup( tid );
1422 tl_assert(current_Thread != NULL);
1423}
1424static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1425 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1426 tl_assert(current_Thread != NULL);
1427 current_Thread = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00001428 libhb_maybe_GC();
sewardjb4112022007-11-09 22:49:28 +00001429}
1430static inline Thread* get_current_Thread_in_C_C ( void ) {
1431 return current_Thread;
1432}
1433static inline Thread* get_current_Thread ( void ) {
1434 ThreadId coretid;
1435 Thread* thr;
1436 thr = get_current_Thread_in_C_C();
1437 if (LIKELY(thr))
1438 return thr;
1439 /* evidently not in client code. Do it the slow way. */
1440 coretid = VG_(get_running_tid)();
1441 /* FIXME: get rid of the following kludge. It exists because
sewardjf98e1c02008-10-25 16:22:41 +00001442 evh__new_mem is called during initialisation (as notification
sewardjb4112022007-11-09 22:49:28 +00001443 of initial memory layout) and VG_(get_running_tid)() returns
1444 VG_INVALID_THREADID at that point. */
1445 if (coretid == VG_INVALID_THREADID)
1446 coretid = 1; /* KLUDGE */
1447 thr = map_threads_lookup( coretid );
1448 return thr;
1449}
1450
1451static
1452void evh__new_mem ( Addr a, SizeT len ) {
1453 if (SHOW_EVENTS >= 2)
1454 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1455 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001456 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001457 all__sanity_check("evh__new_mem-post");
1458}
1459
1460static
sewardj7cf4e6b2008-05-01 20:24:26 +00001461void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1462 if (SHOW_EVENTS >= 2)
1463 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1464 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001465 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardj7cf4e6b2008-05-01 20:24:26 +00001466 all__sanity_check("evh__new_mem_w_tid-post");
1467}
1468
1469static
sewardjb4112022007-11-09 22:49:28 +00001470void evh__new_mem_w_perms ( Addr a, SizeT len,
sewardj9c606bd2008-09-18 18:12:50 +00001471 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
sewardjb4112022007-11-09 22:49:28 +00001472 if (SHOW_EVENTS >= 1)
1473 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1474 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1475 if (rr || ww || xx)
1476 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001477 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001478 all__sanity_check("evh__new_mem_w_perms-post");
1479}
1480
1481static
1482void evh__set_perms ( Addr a, SizeT len,
1483 Bool rr, Bool ww, Bool xx ) {
1484 if (SHOW_EVENTS >= 1)
1485 VG_(printf)("evh__set_perms(%p, %lu, %d,%d,%d)\n",
1486 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1487 /* Hmm. What should we do here, that actually makes any sense?
1488 Let's say: if neither readable nor writable, then declare it
1489 NoAccess, else leave it alone. */
1490 if (!(rr || ww))
1491 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001492 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001493 all__sanity_check("evh__set_perms-post");
1494}
1495
1496static
1497void evh__die_mem ( Addr a, SizeT len ) {
1498 if (SHOW_EVENTS >= 2)
1499 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
1500 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001501 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001502 all__sanity_check("evh__die_mem-post");
1503}
1504
1505static
1506void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1507{
1508 if (SHOW_EVENTS >= 1)
1509 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1510 (Int)parent, (Int)child );
1511
1512 if (parent != VG_INVALID_THREADID) {
sewardjf98e1c02008-10-25 16:22:41 +00001513 Thread* thr_p;
1514 Thread* thr_c;
1515 Thr* hbthr_p;
1516 Thr* hbthr_c;
sewardjb4112022007-11-09 22:49:28 +00001517
sewardjf98e1c02008-10-25 16:22:41 +00001518 tl_assert(HG_(is_sane_ThreadId)(parent));
1519 tl_assert(HG_(is_sane_ThreadId)(child));
sewardjb4112022007-11-09 22:49:28 +00001520 tl_assert(parent != child);
1521
1522 thr_p = map_threads_maybe_lookup( parent );
1523 thr_c = map_threads_maybe_lookup( child );
1524
1525 tl_assert(thr_p != NULL);
1526 tl_assert(thr_c == NULL);
1527
sewardjf98e1c02008-10-25 16:22:41 +00001528 hbthr_p = thr_p->hbthr;
1529 tl_assert(hbthr_p != NULL);
1530 tl_assert( libhb_get_Thr_opaque(hbthr_p) == thr_p );
sewardjb4112022007-11-09 22:49:28 +00001531
sewardjf98e1c02008-10-25 16:22:41 +00001532 hbthr_c = libhb_create ( hbthr_p );
1533
1534 /* Create a new thread record for the child. */
sewardjb4112022007-11-09 22:49:28 +00001535 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +00001536 thr_c = mk_Thread( hbthr_c );
1537 tl_assert( libhb_get_Thr_opaque(hbthr_c) == NULL );
1538 libhb_set_Thr_opaque(hbthr_c, thr_c);
sewardjb4112022007-11-09 22:49:28 +00001539
1540 /* and bind it in the thread-map table */
1541 map_threads[child] = thr_c;
sewardjf98e1c02008-10-25 16:22:41 +00001542 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1543 thr_c->coretid = child;
sewardjb4112022007-11-09 22:49:28 +00001544
1545 /* Record where the parent is so we can later refer to this in
1546 error messages.
1547
1548 On amd64-linux, this entails a nasty glibc-2.5 specific hack.
1549 The stack snapshot is taken immediately after the parent has
1550 returned from its sys_clone call. Unfortunately there is no
1551 unwind info for the insn following "syscall" - reading the
1552 glibc sources confirms this. So we ask for a snapshot to be
1553 taken as if RIP was 3 bytes earlier, in a place where there
1554 is unwind info. Sigh.
1555 */
1556 { Word first_ip_delta = 0;
1557# if defined(VGP_amd64_linux)
1558 first_ip_delta = -3;
1559# endif
1560 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1561 }
sewardjb4112022007-11-09 22:49:28 +00001562 }
1563
sewardjf98e1c02008-10-25 16:22:41 +00001564 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001565 all__sanity_check("evh__pre_thread_create-post");
1566}
1567
1568static
1569void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1570{
1571 Int nHeld;
1572 Thread* thr_q;
1573 if (SHOW_EVENTS >= 1)
1574 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1575 (Int)quit_tid );
1576
1577 /* quit_tid has disappeared without joining to any other thread.
1578 Therefore there is no synchronisation event associated with its
1579 exit and so we have to pretty much treat it as if it was still
1580 alive but mysteriously making no progress. That is because, if
1581 we don't know when it really exited, then we can never say there
1582 is a point in time when we're sure the thread really has
1583 finished, and so we need to consider the possibility that it
1584 lingers indefinitely and continues to interact with other
1585 threads. */
1586 /* However, it might have rendezvous'd with a thread that called
1587 pthread_join with this one as arg, prior to this point (that's
1588 how NPTL works). In which case there has already been a prior
1589 sync event. So in any case, just let the thread exit. On NPTL,
1590 all thread exits go through here. */
sewardjf98e1c02008-10-25 16:22:41 +00001591 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
sewardjb4112022007-11-09 22:49:28 +00001592 thr_q = map_threads_maybe_lookup( quit_tid );
1593 tl_assert(thr_q != NULL);
1594
1595 /* Complain if this thread holds any locks. */
1596 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1597 tl_assert(nHeld >= 0);
1598 if (nHeld > 0) {
1599 HChar buf[80];
1600 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1601 nHeld, nHeld > 1 ? "s" : "");
sewardjf98e1c02008-10-25 16:22:41 +00001602 HG_(record_error_Misc)( thr_q, buf );
sewardjb4112022007-11-09 22:49:28 +00001603 }
1604
1605 /* About the only thing we do need to do is clear the map_threads
1606 entry, in order that the Valgrind core can re-use it. */
sewardjf98e1c02008-10-25 16:22:41 +00001607 tl_assert(thr_q->coretid == quit_tid);
1608 thr_q->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +00001609 map_threads_delete( quit_tid );
1610
sewardjf98e1c02008-10-25 16:22:41 +00001611 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001612 all__sanity_check("evh__pre_thread_ll_exit-post");
1613}
1614
sewardjf98e1c02008-10-25 16:22:41 +00001615
sewardjb4112022007-11-09 22:49:28 +00001616static
1617void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1618{
sewardjb4112022007-11-09 22:49:28 +00001619 Thread* thr_s;
1620 Thread* thr_q;
sewardjf98e1c02008-10-25 16:22:41 +00001621 Thr* hbthr_s;
1622 Thr* hbthr_q;
1623 SO* so;
sewardjb4112022007-11-09 22:49:28 +00001624
1625 if (SHOW_EVENTS >= 1)
1626 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1627 (Int)stay_tid, quit_thr );
1628
sewardjf98e1c02008-10-25 16:22:41 +00001629 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
sewardjb4112022007-11-09 22:49:28 +00001630
1631 thr_s = map_threads_maybe_lookup( stay_tid );
1632 thr_q = quit_thr;
1633 tl_assert(thr_s != NULL);
1634 tl_assert(thr_q != NULL);
1635 tl_assert(thr_s != thr_q);
1636
sewardjf98e1c02008-10-25 16:22:41 +00001637 hbthr_s = thr_s->hbthr;
1638 hbthr_q = thr_q->hbthr;
1639 tl_assert(hbthr_s != hbthr_q);
1640 tl_assert( libhb_get_Thr_opaque(hbthr_s) == thr_s );
1641 tl_assert( libhb_get_Thr_opaque(hbthr_q) == thr_q );
sewardjb4112022007-11-09 22:49:28 +00001642
sewardjf98e1c02008-10-25 16:22:41 +00001643 /* Allocate a temporary synchronisation object and use it to send
1644 an imaginary message from the quitter to the stayer, the purpose
1645 being to generate a dependence from the quitter to the
1646 stayer. */
1647 so = libhb_so_alloc();
1648 tl_assert(so);
1649 libhb_so_send(hbthr_q, so, True/*strong_send*/);
1650 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1651 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00001652
sewardjf98e1c02008-10-25 16:22:41 +00001653 /* evh__pre_thread_ll_exit issues an error message if the exiting
1654 thread holds any locks. No need to check here. */
sewardjb4112022007-11-09 22:49:28 +00001655
1656 /* This holds because, at least when using NPTL as the thread
1657 library, we should be notified the low level thread exit before
1658 we hear of any join event on it. The low level exit
1659 notification feeds through into evh__pre_thread_ll_exit,
1660 which should clear the map_threads entry for it. Hence we
1661 expect there to be no map_threads entry at this point. */
1662 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1663 == VG_INVALID_THREADID);
1664
sewardjf98e1c02008-10-25 16:22:41 +00001665 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001666 all__sanity_check("evh__post_thread_join-post");
1667}
1668
1669static
1670void evh__pre_mem_read ( CorePart part, ThreadId tid, Char* s,
1671 Addr a, SizeT size) {
1672 if (SHOW_EVENTS >= 2
1673 || (SHOW_EVENTS >= 1 && size != 1))
1674 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1675 (Int)tid, s, (void*)a, size );
1676 shadow_mem_read_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001677 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001678 all__sanity_check("evh__pre_mem_read-post");
1679}
1680
1681static
1682void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
1683 Char* s, Addr a ) {
1684 Int len;
1685 if (SHOW_EVENTS >= 1)
1686 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1687 (Int)tid, s, (void*)a );
1688 // FIXME: think of a less ugly hack
1689 len = VG_(strlen)( (Char*) a );
1690 shadow_mem_read_range( map_threads_lookup(tid), a, len+1 );
sewardjf98e1c02008-10-25 16:22:41 +00001691 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001692 all__sanity_check("evh__pre_mem_read_asciiz-post");
1693}
1694
1695static
1696void evh__pre_mem_write ( CorePart part, ThreadId tid, Char* s,
1697 Addr a, SizeT size ) {
1698 if (SHOW_EVENTS >= 1)
1699 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1700 (Int)tid, s, (void*)a, size );
1701 shadow_mem_write_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001702 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001703 all__sanity_check("evh__pre_mem_write-post");
1704}
1705
1706static
1707void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1708 if (SHOW_EVENTS >= 1)
1709 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1710 (void*)a, len, (Int)is_inited );
1711 // FIXME: this is kinda stupid
1712 if (is_inited) {
1713 shadow_mem_make_New(get_current_Thread(), a, len);
1714 } else {
1715 shadow_mem_make_New(get_current_Thread(), a, len);
1716 }
sewardjf98e1c02008-10-25 16:22:41 +00001717 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001718 all__sanity_check("evh__pre_mem_read-post");
1719}
1720
1721static
1722void evh__die_mem_heap ( Addr a, SizeT len ) {
1723 if (SHOW_EVENTS >= 1)
1724 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
1725 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001726 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001727 all__sanity_check("evh__pre_mem_read-post");
1728}
1729
sewardjb4112022007-11-09 22:49:28 +00001730static VG_REGPARM(1)
1731void evh__mem_help_read_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001732 Thread* thr = get_current_Thread_in_C_C();
1733 Thr* hbthr = thr->hbthr;
1734 LIBHB_READ_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001735}
sewardjf98e1c02008-10-25 16:22:41 +00001736
sewardjb4112022007-11-09 22:49:28 +00001737static VG_REGPARM(1)
1738void evh__mem_help_read_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001739 Thread* thr = get_current_Thread_in_C_C();
1740 Thr* hbthr = thr->hbthr;
1741 LIBHB_READ_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001742}
sewardjf98e1c02008-10-25 16:22:41 +00001743
sewardjb4112022007-11-09 22:49:28 +00001744static VG_REGPARM(1)
1745void evh__mem_help_read_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001746 Thread* thr = get_current_Thread_in_C_C();
1747 Thr* hbthr = thr->hbthr;
1748 LIBHB_READ_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001749}
sewardjf98e1c02008-10-25 16:22:41 +00001750
sewardjb4112022007-11-09 22:49:28 +00001751static VG_REGPARM(1)
1752void evh__mem_help_read_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001753 Thread* thr = get_current_Thread_in_C_C();
1754 Thr* hbthr = thr->hbthr;
1755 LIBHB_READ_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001756}
sewardjf98e1c02008-10-25 16:22:41 +00001757
sewardjb4112022007-11-09 22:49:28 +00001758static VG_REGPARM(2)
1759void evh__mem_help_read_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001760 Thread* thr = get_current_Thread_in_C_C();
1761 Thr* hbthr = thr->hbthr;
1762 LIBHB_READ_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001763}
1764
1765static VG_REGPARM(1)
1766void evh__mem_help_write_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001767 Thread* thr = get_current_Thread_in_C_C();
1768 Thr* hbthr = thr->hbthr;
1769 LIBHB_WRITE_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001770}
sewardjf98e1c02008-10-25 16:22:41 +00001771
sewardjb4112022007-11-09 22:49:28 +00001772static VG_REGPARM(1)
1773void evh__mem_help_write_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001774 Thread* thr = get_current_Thread_in_C_C();
1775 Thr* hbthr = thr->hbthr;
1776 LIBHB_WRITE_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001777}
sewardjf98e1c02008-10-25 16:22:41 +00001778
sewardjb4112022007-11-09 22:49:28 +00001779static VG_REGPARM(1)
1780void evh__mem_help_write_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001781 Thread* thr = get_current_Thread_in_C_C();
1782 Thr* hbthr = thr->hbthr;
1783 LIBHB_WRITE_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001784}
sewardjf98e1c02008-10-25 16:22:41 +00001785
sewardjb4112022007-11-09 22:49:28 +00001786static VG_REGPARM(1)
1787void evh__mem_help_write_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001788 Thread* thr = get_current_Thread_in_C_C();
1789 Thr* hbthr = thr->hbthr;
1790 LIBHB_WRITE_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001791}
sewardjf98e1c02008-10-25 16:22:41 +00001792
sewardjb4112022007-11-09 22:49:28 +00001793static VG_REGPARM(2)
1794void evh__mem_help_write_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001795 Thread* thr = get_current_Thread_in_C_C();
1796 Thr* hbthr = thr->hbthr;
1797 LIBHB_WRITE_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001798}
1799
sewardjd52392d2008-11-08 20:36:26 +00001800//static void evh__bus_lock(void) {
1801// Thread* thr;
1802// if (0) VG_(printf)("evh__bus_lock()\n");
1803// thr = get_current_Thread();
1804// tl_assert(thr); /* cannot fail - Thread* must already exist */
1805// evhH__post_thread_w_acquires_lock( thr, LK_nonRec, (Addr)&__bus_lock );
1806//}
1807//static void evh__bus_unlock(void) {
1808// Thread* thr;
1809// if (0) VG_(printf)("evh__bus_unlock()\n");
1810// thr = get_current_Thread();
1811// tl_assert(thr); /* cannot fail - Thread* must already exist */
1812// evhH__pre_thread_releases_lock( thr, (Addr)&__bus_lock, False/*!isRDWR*/ );
1813//}
sewardjb4112022007-11-09 22:49:28 +00001814
1815
1816/* -------------- events to do with mutexes -------------- */
1817
1818/* EXPOSITION only: by intercepting lock init events we can show the
1819 user where the lock was initialised, rather than only being able to
1820 show where it was first locked. Intercepting lock initialisations
1821 is not necessary for the basic operation of the race checker. */
1822static
1823void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
1824 void* mutex, Word mbRec )
1825{
1826 if (SHOW_EVENTS >= 1)
1827 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
1828 (Int)tid, mbRec, (void*)mutex );
1829 tl_assert(mbRec == 0 || mbRec == 1);
1830 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
1831 (Addr)mutex, tid );
sewardjf98e1c02008-10-25 16:22:41 +00001832 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001833 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
1834}
1835
1836static
1837void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex )
1838{
1839 Thread* thr;
1840 Lock* lk;
1841 if (SHOW_EVENTS >= 1)
1842 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE(ctid=%d, %p)\n",
1843 (Int)tid, (void*)mutex );
1844
1845 thr = map_threads_maybe_lookup( tid );
1846 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00001847 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00001848
1849 lk = map_locks_maybe_lookup( (Addr)mutex );
1850
1851 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
sewardjf98e1c02008-10-25 16:22:41 +00001852 HG_(record_error_Misc)(
1853 thr, "pthread_mutex_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00001854 }
1855
1856 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00001857 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00001858 tl_assert( lk->guestaddr == (Addr)mutex );
1859 if (lk->heldBy) {
1860 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00001861 HG_(record_error_Misc)(
1862 thr, "pthread_mutex_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00001863 /* remove lock from locksets of all owning threads */
1864 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00001865 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00001866 lk->heldBy = NULL;
1867 lk->heldW = False;
1868 lk->acquired_at = NULL;
1869 }
1870 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001871 tl_assert( HG_(is_sane_LockN)(lk) );
1872
1873 map_locks_delete( lk->guestaddr );
1874 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00001875 }
1876
sewardjf98e1c02008-10-25 16:22:41 +00001877 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001878 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
1879}
1880
1881static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
1882 void* mutex, Word isTryLock )
1883{
1884 /* Just check the mutex is sane; nothing else to do. */
1885 // 'mutex' may be invalid - not checked by wrapper
1886 Thread* thr;
1887 Lock* lk;
1888 if (SHOW_EVENTS >= 1)
1889 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
1890 (Int)tid, (void*)mutex );
1891
1892 tl_assert(isTryLock == 0 || isTryLock == 1);
1893 thr = map_threads_maybe_lookup( tid );
1894 tl_assert(thr); /* cannot fail - Thread* must already exist */
1895
1896 lk = map_locks_maybe_lookup( (Addr)mutex );
1897
1898 if (lk && (lk->kind == LK_rdwr)) {
sewardjf98e1c02008-10-25 16:22:41 +00001899 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
1900 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001901 }
1902
1903 if ( lk
1904 && isTryLock == 0
1905 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
1906 && lk->heldBy
1907 && lk->heldW
sewardj896f6f92008-08-19 08:38:52 +00001908 && VG_(elemBag)( lk->heldBy, (Word)thr ) > 0 ) {
sewardjb4112022007-11-09 22:49:28 +00001909 /* uh, it's a non-recursive lock and we already w-hold it, and
1910 this is a real lock operation (not a speculative "tryLock"
1911 kind of thing). Duh. Deadlock coming up; but at least
1912 produce an error message. */
sewardjf98e1c02008-10-25 16:22:41 +00001913 HG_(record_error_Misc)( thr, "Attempt to re-lock a "
1914 "non-recursive lock I already hold" );
sewardjb4112022007-11-09 22:49:28 +00001915 }
1916}
1917
1918static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
1919{
1920 // only called if the real library call succeeded - so mutex is sane
1921 Thread* thr;
1922 if (SHOW_EVENTS >= 1)
1923 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
1924 (Int)tid, (void*)mutex );
1925
1926 thr = map_threads_maybe_lookup( tid );
1927 tl_assert(thr); /* cannot fail - Thread* must already exist */
1928
1929 evhH__post_thread_w_acquires_lock(
1930 thr,
1931 LK_mbRec, /* if not known, create new lock with this LockKind */
1932 (Addr)mutex
1933 );
1934}
1935
1936static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
1937{
1938 // 'mutex' may be invalid - not checked by wrapper
1939 Thread* thr;
1940 if (SHOW_EVENTS >= 1)
1941 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
1942 (Int)tid, (void*)mutex );
1943
1944 thr = map_threads_maybe_lookup( tid );
1945 tl_assert(thr); /* cannot fail - Thread* must already exist */
1946
1947 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
1948}
1949
1950static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
1951{
1952 // only called if the real library call succeeded - so mutex is sane
1953 Thread* thr;
1954 if (SHOW_EVENTS >= 1)
1955 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
1956 (Int)tid, (void*)mutex );
1957 thr = map_threads_maybe_lookup( tid );
1958 tl_assert(thr); /* cannot fail - Thread* must already exist */
1959
1960 // anything we should do here?
1961}
1962
1963
1964/* --------------- events to do with CVs --------------- */
1965
sewardjf98e1c02008-10-25 16:22:41 +00001966/* A mapping from CV to the SO associated with it. When the CV is
1967 signalled/broadcasted upon, we do a 'send' into the SO, and when a
1968 wait on it completes, we do a 'recv' from the SO. This is believed
1969 to give the correct happens-before events arising from CV
sewardjb4112022007-11-09 22:49:28 +00001970 signallings/broadcasts.
1971*/
1972
sewardjf98e1c02008-10-25 16:22:41 +00001973/* pthread_mutex_cond* -> SO* */
1974static WordFM* map_cond_to_SO = NULL;
sewardjb4112022007-11-09 22:49:28 +00001975
sewardjf98e1c02008-10-25 16:22:41 +00001976static void map_cond_to_SO_INIT ( void ) {
1977 if (UNLIKELY(map_cond_to_SO == NULL)) {
1978 map_cond_to_SO = VG_(newFM)( HG_(zalloc), "hg.mctSI.1", HG_(free), NULL );
1979 tl_assert(map_cond_to_SO != NULL);
1980 }
1981}
1982
1983static SO* map_cond_to_SO_lookup_or_alloc ( void* cond ) {
1984 UWord key, val;
1985 map_cond_to_SO_INIT();
1986 if (VG_(lookupFM)( map_cond_to_SO, &key, &val, (UWord)cond )) {
1987 tl_assert(key == (UWord)cond);
1988 return (SO*)val;
1989 } else {
1990 SO* so = libhb_so_alloc();
1991 VG_(addToFM)( map_cond_to_SO, (UWord)cond, (UWord)so );
1992 return so;
1993 }
1994}
1995
1996static void map_cond_to_SO_delete ( void* cond ) {
1997 UWord keyW, valW;
1998 map_cond_to_SO_INIT();
1999 if (VG_(delFromFM)( map_cond_to_SO, &keyW, &valW, (UWord)cond )) {
2000 SO* so = (SO*)valW;
2001 tl_assert(keyW == (UWord)cond);
2002 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00002003 }
2004}
2005
2006static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2007{
sewardjf98e1c02008-10-25 16:22:41 +00002008 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2009 cond to a SO if it is not already so bound, and 'send' on the
2010 SO. This is later used by other thread(s) which successfully
2011 exit from a pthread_cond_wait on the same cv; then they 'recv'
2012 from the SO, thereby acquiring a dependency on this signalling
2013 event. */
sewardjb4112022007-11-09 22:49:28 +00002014 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +00002015 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002016
2017 if (SHOW_EVENTS >= 1)
2018 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2019 (Int)tid, (void*)cond );
2020
sewardjb4112022007-11-09 22:49:28 +00002021 thr = map_threads_maybe_lookup( tid );
2022 tl_assert(thr); /* cannot fail - Thread* must already exist */
2023
2024 // error-if: mutex is bogus
2025 // error-if: mutex is not locked
2026
sewardjf98e1c02008-10-25 16:22:41 +00002027 so = map_cond_to_SO_lookup_or_alloc( cond );
2028 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002029
sewardjf98e1c02008-10-25 16:22:41 +00002030 libhb_so_send( thr->hbthr, so, True/*strong_send*/ );
sewardjb4112022007-11-09 22:49:28 +00002031}
2032
2033/* returns True if it reckons 'mutex' is valid and held by this
2034 thread, else False */
2035static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2036 void* cond, void* mutex )
2037{
2038 Thread* thr;
2039 Lock* lk;
2040 Bool lk_valid = True;
2041
2042 if (SHOW_EVENTS >= 1)
2043 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2044 "(ctid=%d, cond=%p, mutex=%p)\n",
2045 (Int)tid, (void*)cond, (void*)mutex );
2046
sewardjb4112022007-11-09 22:49:28 +00002047 thr = map_threads_maybe_lookup( tid );
2048 tl_assert(thr); /* cannot fail - Thread* must already exist */
2049
2050 lk = map_locks_maybe_lookup( (Addr)mutex );
2051
2052 /* Check for stupid mutex arguments. There are various ways to be
2053 a bozo. Only complain once, though, even if more than one thing
2054 is wrong. */
2055 if (lk == NULL) {
2056 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002057 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002058 thr,
2059 "pthread_cond_{timed}wait called with invalid mutex" );
2060 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002061 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002062 if (lk->kind == LK_rdwr) {
2063 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002064 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002065 thr, "pthread_cond_{timed}wait called with mutex "
2066 "of type pthread_rwlock_t*" );
2067 } else
2068 if (lk->heldBy == NULL) {
2069 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002070 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002071 thr, "pthread_cond_{timed}wait called with un-held mutex");
2072 } else
2073 if (lk->heldBy != NULL
sewardj896f6f92008-08-19 08:38:52 +00002074 && VG_(elemBag)( lk->heldBy, (Word)thr ) == 0) {
sewardjb4112022007-11-09 22:49:28 +00002075 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002076 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002077 thr, "pthread_cond_{timed}wait called with mutex "
2078 "held by a different thread" );
2079 }
2080 }
2081
2082 // error-if: cond is also associated with a different mutex
2083
2084 return lk_valid;
2085}
2086
2087static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
2088 void* cond, void* mutex )
2089{
sewardjf98e1c02008-10-25 16:22:41 +00002090 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2091 the SO for this cond, and 'recv' from it so as to acquire a
2092 dependency edge back to the signaller/broadcaster. */
2093 Thread* thr;
2094 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002095
2096 if (SHOW_EVENTS >= 1)
2097 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
2098 "(ctid=%d, cond=%p, mutex=%p)\n",
2099 (Int)tid, (void*)cond, (void*)mutex );
2100
sewardjb4112022007-11-09 22:49:28 +00002101 thr = map_threads_maybe_lookup( tid );
2102 tl_assert(thr); /* cannot fail - Thread* must already exist */
2103
2104 // error-if: cond is also associated with a different mutex
2105
sewardjf98e1c02008-10-25 16:22:41 +00002106 so = map_cond_to_SO_lookup_or_alloc( cond );
2107 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002108
sewardjf98e1c02008-10-25 16:22:41 +00002109 if (!libhb_so_everSent(so)) {
2110 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2111 it? If this happened it would surely be a bug in the threads
2112 library. Or one of those fabled "spurious wakeups". */
2113 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
2114 "succeeded on"
2115 " without prior pthread_cond_post");
sewardjb4112022007-11-09 22:49:28 +00002116 }
sewardjf98e1c02008-10-25 16:22:41 +00002117
2118 /* anyway, acquire a dependency on it. */
2119 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
2120}
2121
2122static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
2123 void* cond )
2124{
2125 /* Deal with destroy events. The only purpose is to free storage
2126 associated with the CV, so as to avoid any possible resource
2127 leaks. */
2128 if (SHOW_EVENTS >= 1)
2129 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
2130 "(ctid=%d, cond=%p)\n",
2131 (Int)tid, (void*)cond );
2132
2133 map_cond_to_SO_delete( cond );
sewardjb4112022007-11-09 22:49:28 +00002134}
2135
2136
2137/* -------------- events to do with rwlocks -------------- */
2138
2139/* EXPOSITION only */
2140static
2141void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2142{
2143 if (SHOW_EVENTS >= 1)
2144 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2145 (Int)tid, (void*)rwl );
2146 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
sewardjf98e1c02008-10-25 16:22:41 +00002147 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002148 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2149}
2150
2151static
2152void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2153{
2154 Thread* thr;
2155 Lock* lk;
2156 if (SHOW_EVENTS >= 1)
2157 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2158 (Int)tid, (void*)rwl );
2159
2160 thr = map_threads_maybe_lookup( tid );
2161 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00002162 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00002163
2164 lk = map_locks_maybe_lookup( (Addr)rwl );
2165
2166 if (lk == NULL || lk->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00002167 HG_(record_error_Misc)(
2168 thr, "pthread_rwlock_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00002169 }
2170
2171 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00002172 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002173 tl_assert( lk->guestaddr == (Addr)rwl );
2174 if (lk->heldBy) {
2175 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00002176 HG_(record_error_Misc)(
2177 thr, "pthread_rwlock_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00002178 /* remove lock from locksets of all owning threads */
2179 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00002180 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00002181 lk->heldBy = NULL;
2182 lk->heldW = False;
sewardj1c7e8332007-11-29 13:04:03 +00002183 lk->acquired_at = NULL;
sewardjb4112022007-11-09 22:49:28 +00002184 }
2185 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002186 tl_assert( HG_(is_sane_LockN)(lk) );
2187
2188 map_locks_delete( lk->guestaddr );
2189 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002190 }
2191
sewardjf98e1c02008-10-25 16:22:41 +00002192 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002193 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2194}
2195
2196static
sewardj789c3c52008-02-25 12:10:07 +00002197void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2198 void* rwl,
2199 Word isW, Word isTryLock )
sewardjb4112022007-11-09 22:49:28 +00002200{
2201 /* Just check the rwl is sane; nothing else to do. */
2202 // 'rwl' may be invalid - not checked by wrapper
2203 Thread* thr;
2204 Lock* lk;
2205 if (SHOW_EVENTS >= 1)
2206 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2207 (Int)tid, (Int)isW, (void*)rwl );
2208
2209 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
sewardj789c3c52008-02-25 12:10:07 +00002210 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
sewardjb4112022007-11-09 22:49:28 +00002211 thr = map_threads_maybe_lookup( tid );
2212 tl_assert(thr); /* cannot fail - Thread* must already exist */
2213
2214 lk = map_locks_maybe_lookup( (Addr)rwl );
2215 if ( lk
2216 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2217 /* Wrong kind of lock. Duh. */
sewardjf98e1c02008-10-25 16:22:41 +00002218 HG_(record_error_Misc)(
2219 thr, "pthread_rwlock_{rd,rw}lock with a "
2220 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002221 }
2222}
2223
2224static
2225void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2226{
2227 // only called if the real library call succeeded - so mutex is sane
2228 Thread* thr;
2229 if (SHOW_EVENTS >= 1)
2230 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2231 (Int)tid, (Int)isW, (void*)rwl );
2232
2233 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2234 thr = map_threads_maybe_lookup( tid );
2235 tl_assert(thr); /* cannot fail - Thread* must already exist */
2236
2237 (isW ? evhH__post_thread_w_acquires_lock
2238 : evhH__post_thread_r_acquires_lock)(
2239 thr,
2240 LK_rdwr, /* if not known, create new lock with this LockKind */
2241 (Addr)rwl
2242 );
2243}
2244
2245static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2246{
2247 // 'rwl' may be invalid - not checked by wrapper
2248 Thread* thr;
2249 if (SHOW_EVENTS >= 1)
2250 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2251 (Int)tid, (void*)rwl );
2252
2253 thr = map_threads_maybe_lookup( tid );
2254 tl_assert(thr); /* cannot fail - Thread* must already exist */
2255
2256 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2257}
2258
2259static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2260{
2261 // only called if the real library call succeeded - so mutex is sane
2262 Thread* thr;
2263 if (SHOW_EVENTS >= 1)
2264 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2265 (Int)tid, (void*)rwl );
2266 thr = map_threads_maybe_lookup( tid );
2267 tl_assert(thr); /* cannot fail - Thread* must already exist */
2268
2269 // anything we should do here?
2270}
2271
2272
2273/* --------------- events to do with semaphores --------------- */
2274
sewardj11e352f2007-11-30 11:11:02 +00002275/* This is similar to but not identical to the handling for condition
sewardjb4112022007-11-09 22:49:28 +00002276 variables. */
2277
sewardjf98e1c02008-10-25 16:22:41 +00002278/* For each semaphore, we maintain a stack of SOs. When a 'post'
2279 operation is done on a semaphore (unlocking, essentially), a new SO
2280 is created for the posting thread, the posting thread does a strong
2281 send to it (which merely installs the posting thread's VC in the
2282 SO), and the SO is pushed on the semaphore's stack.
sewardjb4112022007-11-09 22:49:28 +00002283
2284 Later, when a (probably different) thread completes 'wait' on the
sewardjf98e1c02008-10-25 16:22:41 +00002285 semaphore, we pop a SO off the semaphore's stack (which should be
2286 nonempty), and do a strong recv from it. This mechanism creates
sewardjb4112022007-11-09 22:49:28 +00002287 dependencies between posters and waiters of the semaphore.
2288
sewardjf98e1c02008-10-25 16:22:41 +00002289 It may not be necessary to use a stack - perhaps a bag of SOs would
2290 do. But we do need to keep track of how many unused-up posts have
2291 happened for the semaphore.
sewardjb4112022007-11-09 22:49:28 +00002292
sewardjf98e1c02008-10-25 16:22:41 +00002293 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
sewardjb4112022007-11-09 22:49:28 +00002294 twice on S. T3 cannot complete its waits without both T1 and T2
2295 posting. The above mechanism will ensure that T3 acquires
2296 dependencies on both T1 and T2.
sewardj11e352f2007-11-30 11:11:02 +00002297
sewardjf98e1c02008-10-25 16:22:41 +00002298 When a semaphore is initialised with value N, we do as if we'd
2299 posted N times on the semaphore: basically create N SOs and do a
2300 strong send to all of then. This allows up to N waits on the
2301 semaphore to acquire a dependency on the initialisation point,
2302 which AFAICS is the correct behaviour.
sewardj11e352f2007-11-30 11:11:02 +00002303
2304 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2305 about. We should.
sewardjb4112022007-11-09 22:49:28 +00002306*/
2307
sewardjf98e1c02008-10-25 16:22:41 +00002308/* sem_t* -> XArray* SO* */
2309static WordFM* map_sem_to_SO_stack = NULL;
sewardjb4112022007-11-09 22:49:28 +00002310
sewardjf98e1c02008-10-25 16:22:41 +00002311static void map_sem_to_SO_stack_INIT ( void ) {
2312 if (map_sem_to_SO_stack == NULL) {
2313 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2314 HG_(free), NULL );
2315 tl_assert(map_sem_to_SO_stack != NULL);
sewardjb4112022007-11-09 22:49:28 +00002316 }
2317}
2318
sewardjf98e1c02008-10-25 16:22:41 +00002319static void push_SO_for_sem ( void* sem, SO* so ) {
2320 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002321 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002322 tl_assert(so);
2323 map_sem_to_SO_stack_INIT();
2324 if (VG_(lookupFM)( map_sem_to_SO_stack,
2325 &keyW, (UWord*)&xa, (UWord)sem )) {
2326 tl_assert(keyW == (UWord)sem);
sewardjb4112022007-11-09 22:49:28 +00002327 tl_assert(xa);
sewardjf98e1c02008-10-25 16:22:41 +00002328 VG_(addToXA)( xa, &so );
sewardjb4112022007-11-09 22:49:28 +00002329 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002330 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2331 VG_(addToXA)( xa, &so );
2332 VG_(addToFM)( map_sem_to_SO_stack, (Word)sem, (Word)xa );
sewardjb4112022007-11-09 22:49:28 +00002333 }
2334}
2335
sewardjf98e1c02008-10-25 16:22:41 +00002336static SO* mb_pop_SO_for_sem ( void* sem ) {
2337 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002338 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002339 SO* so;
2340 map_sem_to_SO_stack_INIT();
2341 if (VG_(lookupFM)( map_sem_to_SO_stack,
2342 &keyW, (UWord*)&xa, (UWord)sem )) {
sewardjb4112022007-11-09 22:49:28 +00002343 /* xa is the stack for this semaphore. */
sewardjf98e1c02008-10-25 16:22:41 +00002344 Word sz;
2345 tl_assert(keyW == (UWord)sem);
2346 sz = VG_(sizeXA)( xa );
sewardjb4112022007-11-09 22:49:28 +00002347 tl_assert(sz >= 0);
2348 if (sz == 0)
2349 return NULL; /* odd, the stack is empty */
sewardjf98e1c02008-10-25 16:22:41 +00002350 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2351 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002352 VG_(dropTailXA)( xa, 1 );
sewardjf98e1c02008-10-25 16:22:41 +00002353 return so;
sewardjb4112022007-11-09 22:49:28 +00002354 } else {
2355 /* hmm, that's odd. No stack for this semaphore. */
2356 return NULL;
2357 }
2358}
2359
sewardj11e352f2007-11-30 11:11:02 +00002360static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002361{
sewardjf98e1c02008-10-25 16:22:41 +00002362 UWord keyW, valW;
2363 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002364
sewardjb4112022007-11-09 22:49:28 +00002365 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002366 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002367 (Int)tid, (void*)sem );
2368
sewardjf98e1c02008-10-25 16:22:41 +00002369 map_sem_to_SO_stack_INIT();
sewardjb4112022007-11-09 22:49:28 +00002370
sewardjf98e1c02008-10-25 16:22:41 +00002371 /* Empty out the semaphore's SO stack. This way of doing it is
2372 stupid, but at least it's easy. */
2373 while (1) {
2374 so = mb_pop_SO_for_sem( sem );
2375 if (!so) break;
2376 libhb_so_dealloc(so);
2377 }
2378
2379 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2380 XArray* xa = (XArray*)valW;
2381 tl_assert(keyW == (UWord)sem);
2382 tl_assert(xa);
2383 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2384 VG_(deleteXA)(xa);
2385 }
sewardjb4112022007-11-09 22:49:28 +00002386}
2387
sewardj11e352f2007-11-30 11:11:02 +00002388static
2389void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2390{
sewardjf98e1c02008-10-25 16:22:41 +00002391 SO* so;
2392 Thread* thr;
sewardj11e352f2007-11-30 11:11:02 +00002393
2394 if (SHOW_EVENTS >= 1)
2395 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2396 (Int)tid, (void*)sem, value );
2397
sewardjf98e1c02008-10-25 16:22:41 +00002398 thr = map_threads_maybe_lookup( tid );
2399 tl_assert(thr); /* cannot fail - Thread* must already exist */
sewardj11e352f2007-11-30 11:11:02 +00002400
sewardjf98e1c02008-10-25 16:22:41 +00002401 /* Empty out the semaphore's SO stack. This way of doing it is
2402 stupid, but at least it's easy. */
2403 while (1) {
2404 so = mb_pop_SO_for_sem( sem );
2405 if (!so) break;
2406 libhb_so_dealloc(so);
2407 }
sewardj11e352f2007-11-30 11:11:02 +00002408
sewardjf98e1c02008-10-25 16:22:41 +00002409 /* If we don't do this check, the following while loop runs us out
2410 of memory for stupid initial values of 'value'. */
2411 if (value > 10000) {
2412 HG_(record_error_Misc)(
2413 thr, "sem_init: initial value exceeds 10000; using 10000" );
2414 value = 10000;
2415 }
sewardj11e352f2007-11-30 11:11:02 +00002416
sewardjf98e1c02008-10-25 16:22:41 +00002417 /* Now create 'valid' new SOs for the thread, do a strong send to
2418 each of them, and push them all on the stack. */
2419 for (; value > 0; value--) {
2420 Thr* hbthr = thr->hbthr;
2421 tl_assert(hbthr);
sewardj11e352f2007-11-30 11:11:02 +00002422
sewardjf98e1c02008-10-25 16:22:41 +00002423 so = libhb_so_alloc();
2424 libhb_so_send( hbthr, so, True/*strong send*/ );
2425 push_SO_for_sem( sem, so );
sewardj11e352f2007-11-30 11:11:02 +00002426 }
2427}
2428
2429static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002430{
sewardjf98e1c02008-10-25 16:22:41 +00002431 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2432 it (iow, write our VC into it, then tick ours), and push the SO
2433 on on a stack of SOs associated with 'sem'. This is later used
2434 by other thread(s) which successfully exit from a sem_wait on
2435 the same sem; by doing a strong recv from SOs popped of the
2436 stack, they acquire dependencies on the posting thread
2437 segment(s). */
sewardjb4112022007-11-09 22:49:28 +00002438
sewardjf98e1c02008-10-25 16:22:41 +00002439 Thread* thr;
2440 SO* so;
2441 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002442
2443 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002444 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002445 (Int)tid, (void*)sem );
2446
2447 thr = map_threads_maybe_lookup( tid );
2448 tl_assert(thr); /* cannot fail - Thread* must already exist */
2449
2450 // error-if: sem is bogus
2451
sewardjf98e1c02008-10-25 16:22:41 +00002452 hbthr = thr->hbthr;
2453 tl_assert(hbthr);
sewardjb4112022007-11-09 22:49:28 +00002454
sewardjf98e1c02008-10-25 16:22:41 +00002455 so = libhb_so_alloc();
2456 libhb_so_send( hbthr, so, True/*strong send*/ );
2457 push_SO_for_sem( sem, so );
sewardjb4112022007-11-09 22:49:28 +00002458}
2459
sewardj11e352f2007-11-30 11:11:02 +00002460static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002461{
sewardjf98e1c02008-10-25 16:22:41 +00002462 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2463 the 'sem' from this semaphore's SO-stack, and do a strong recv
2464 from it. This creates a dependency back to one of the post-ers
2465 for the semaphore. */
sewardjb4112022007-11-09 22:49:28 +00002466
sewardjf98e1c02008-10-25 16:22:41 +00002467 Thread* thr;
2468 SO* so;
2469 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002470
2471 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002472 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002473 (Int)tid, (void*)sem );
2474
2475 thr = map_threads_maybe_lookup( tid );
2476 tl_assert(thr); /* cannot fail - Thread* must already exist */
2477
2478 // error-if: sem is bogus
2479
sewardjf98e1c02008-10-25 16:22:41 +00002480 so = mb_pop_SO_for_sem( sem );
sewardjb4112022007-11-09 22:49:28 +00002481
sewardjf98e1c02008-10-25 16:22:41 +00002482 if (so) {
2483 hbthr = thr->hbthr;
2484 tl_assert(hbthr);
2485
2486 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2487 libhb_so_dealloc(so);
2488 } else {
2489 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2490 If this happened it would surely be a bug in the threads
2491 library. */
2492 HG_(record_error_Misc)(
2493 thr, "Bug in libpthread: sem_wait succeeded on"
2494 " semaphore without prior sem_post");
sewardjb4112022007-11-09 22:49:28 +00002495 }
2496}
2497
2498
2499/*--------------------------------------------------------------*/
2500/*--- Lock acquisition order monitoring ---*/
2501/*--------------------------------------------------------------*/
2502
2503/* FIXME: here are some optimisations still to do in
2504 laog__pre_thread_acquires_lock.
2505
2506 The graph is structured so that if L1 --*--> L2 then L1 must be
2507 acquired before L2.
2508
2509 The common case is that some thread T holds (eg) L1 L2 and L3 and
2510 is repeatedly acquiring and releasing Ln, and there is no ordering
2511 error in what it is doing. Hence it repeatly:
2512
2513 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
2514 produces the answer No (because there is no error).
2515
2516 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
2517 (because they already got added the first time T acquired Ln).
2518
2519 Hence cache these two events:
2520
2521 (1) Cache result of the query from last time. Invalidate the cache
2522 any time any edges are added to or deleted from laog.
2523
2524 (2) Cache these add-edge requests and ignore them if said edges
2525 have already been added to laog. Invalidate the cache any time
2526 any edges are deleted from laog.
2527*/
2528
2529typedef
2530 struct {
2531 WordSetID inns; /* in univ_laog */
2532 WordSetID outs; /* in univ_laog */
2533 }
2534 LAOGLinks;
2535
2536/* lock order acquisition graph */
2537static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
2538
2539/* EXPOSITION ONLY: for each edge in 'laog', record the two places
2540 where that edge was created, so that we can show the user later if
2541 we need to. */
2542typedef
2543 struct {
2544 Addr src_ga; /* Lock guest addresses for */
2545 Addr dst_ga; /* src/dst of the edge */
2546 ExeContext* src_ec; /* And corresponding places where that */
2547 ExeContext* dst_ec; /* ordering was established */
2548 }
2549 LAOGLinkExposition;
2550
sewardj250ec2e2008-02-15 22:02:30 +00002551static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
sewardjb4112022007-11-09 22:49:28 +00002552 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
2553 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
2554 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
2555 if (llx1->src_ga < llx2->src_ga) return -1;
2556 if (llx1->src_ga > llx2->src_ga) return 1;
2557 if (llx1->dst_ga < llx2->dst_ga) return -1;
2558 if (llx1->dst_ga > llx2->dst_ga) return 1;
2559 return 0;
2560}
2561
2562static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
2563/* end EXPOSITION ONLY */
2564
2565
2566static void laog__show ( Char* who ) {
2567 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00002568 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00002569 Lock* me;
2570 LAOGLinks* links;
2571 VG_(printf)("laog (requested by %s) {\n", who);
sewardj896f6f92008-08-19 08:38:52 +00002572 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00002573 me = NULL;
2574 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00002575 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00002576 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00002577 tl_assert(me);
2578 tl_assert(links);
2579 VG_(printf)(" node %p:\n", me);
2580 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
2581 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00002582 VG_(printf)(" inn %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00002583 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
2584 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00002585 VG_(printf)(" out %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00002586 me = NULL;
2587 links = NULL;
2588 }
sewardj896f6f92008-08-19 08:38:52 +00002589 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00002590 VG_(printf)("}\n");
2591}
2592
2593__attribute__((noinline))
2594static void laog__add_edge ( Lock* src, Lock* dst ) {
2595 Word keyW;
2596 LAOGLinks* links;
2597 Bool presentF, presentR;
2598 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
2599
2600 /* Take the opportunity to sanity check the graph. Record in
2601 presentF if there is already a src->dst mapping in this node's
2602 forwards links, and presentR if there is already a src->dst
2603 mapping in this node's backwards links. They should agree!
2604 Also, we need to know whether the edge was already present so as
2605 to decide whether or not to update the link details mapping. We
2606 can compute presentF and presentR essentially for free, so may
2607 as well do this always. */
2608 presentF = presentR = False;
2609
2610 /* Update the out edges for src */
2611 keyW = 0;
2612 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00002613 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00002614 WordSetID outs_new;
2615 tl_assert(links);
2616 tl_assert(keyW == (Word)src);
2617 outs_new = HG_(addToWS)( univ_laog, links->outs, (Word)dst );
2618 presentF = outs_new == links->outs;
2619 links->outs = outs_new;
2620 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002621 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00002622 links->inns = HG_(emptyWS)( univ_laog );
2623 links->outs = HG_(singletonWS)( univ_laog, (Word)dst );
sewardj896f6f92008-08-19 08:38:52 +00002624 VG_(addToFM)( laog, (Word)src, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00002625 }
2626 /* Update the in edges for dst */
2627 keyW = 0;
2628 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00002629 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00002630 WordSetID inns_new;
2631 tl_assert(links);
2632 tl_assert(keyW == (Word)dst);
2633 inns_new = HG_(addToWS)( univ_laog, links->inns, (Word)src );
2634 presentR = inns_new == links->inns;
2635 links->inns = inns_new;
2636 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002637 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00002638 links->inns = HG_(singletonWS)( univ_laog, (Word)src );
2639 links->outs = HG_(emptyWS)( univ_laog );
sewardj896f6f92008-08-19 08:38:52 +00002640 VG_(addToFM)( laog, (Word)dst, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00002641 }
2642
2643 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
2644
2645 if (!presentF && src->acquired_at && dst->acquired_at) {
2646 LAOGLinkExposition expo;
2647 /* If this edge is entering the graph, and we have acquired_at
2648 information for both src and dst, record those acquisition
2649 points. Hence, if there is later a violation of this
2650 ordering, we can show the user the two places in which the
2651 required src-dst ordering was previously established. */
barta0b6b2c2008-07-07 06:49:24 +00002652 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
sewardjb4112022007-11-09 22:49:28 +00002653 src->guestaddr, dst->guestaddr);
2654 expo.src_ga = src->guestaddr;
2655 expo.dst_ga = dst->guestaddr;
2656 expo.src_ec = NULL;
2657 expo.dst_ec = NULL;
2658 tl_assert(laog_exposition);
sewardj896f6f92008-08-19 08:38:52 +00002659 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (Word)&expo )) {
sewardjb4112022007-11-09 22:49:28 +00002660 /* we already have it; do nothing */
2661 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002662 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
2663 sizeof(LAOGLinkExposition));
sewardjb4112022007-11-09 22:49:28 +00002664 expo2->src_ga = src->guestaddr;
2665 expo2->dst_ga = dst->guestaddr;
2666 expo2->src_ec = src->acquired_at;
2667 expo2->dst_ec = dst->acquired_at;
sewardj896f6f92008-08-19 08:38:52 +00002668 VG_(addToFM)( laog_exposition, (Word)expo2, (Word)NULL );
sewardjb4112022007-11-09 22:49:28 +00002669 }
2670 }
2671}
2672
2673__attribute__((noinline))
2674static void laog__del_edge ( Lock* src, Lock* dst ) {
2675 Word keyW;
2676 LAOGLinks* links;
2677 if (0) VG_(printf)("laog__del_edge %p %p\n", src, dst);
2678 /* Update the out edges for src */
2679 keyW = 0;
2680 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00002681 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00002682 tl_assert(links);
2683 tl_assert(keyW == (Word)src);
2684 links->outs = HG_(delFromWS)( univ_laog, links->outs, (Word)dst );
2685 }
2686 /* Update the in edges for dst */
2687 keyW = 0;
2688 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00002689 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00002690 tl_assert(links);
2691 tl_assert(keyW == (Word)dst);
2692 links->inns = HG_(delFromWS)( univ_laog, links->inns, (Word)src );
2693 }
2694}
2695
2696__attribute__((noinline))
2697static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
2698 Word keyW;
2699 LAOGLinks* links;
2700 keyW = 0;
2701 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00002702 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00002703 tl_assert(links);
2704 tl_assert(keyW == (Word)lk);
2705 return links->outs;
2706 } else {
2707 return HG_(emptyWS)( univ_laog );
2708 }
2709}
2710
2711__attribute__((noinline))
2712static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
2713 Word keyW;
2714 LAOGLinks* links;
2715 keyW = 0;
2716 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00002717 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00002718 tl_assert(links);
2719 tl_assert(keyW == (Word)lk);
2720 return links->inns;
2721 } else {
2722 return HG_(emptyWS)( univ_laog );
2723 }
2724}
2725
2726__attribute__((noinline))
2727static void laog__sanity_check ( Char* who ) {
2728 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00002729 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00002730 Lock* me;
2731 LAOGLinks* links;
2732 if ( !laog )
2733 return; /* nothing much we can do */
sewardj896f6f92008-08-19 08:38:52 +00002734 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00002735 me = NULL;
2736 links = NULL;
2737 if (0) VG_(printf)("laog sanity check\n");
sewardj896f6f92008-08-19 08:38:52 +00002738 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00002739 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00002740 tl_assert(me);
2741 tl_assert(links);
2742 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
2743 for (i = 0; i < ws_size; i++) {
2744 if ( ! HG_(elemWS)( univ_laog,
2745 laog__succs( (Lock*)ws_words[i] ),
2746 (Word)me ))
2747 goto bad;
2748 }
2749 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
2750 for (i = 0; i < ws_size; i++) {
2751 if ( ! HG_(elemWS)( univ_laog,
2752 laog__preds( (Lock*)ws_words[i] ),
2753 (Word)me ))
2754 goto bad;
2755 }
2756 me = NULL;
2757 links = NULL;
2758 }
sewardj896f6f92008-08-19 08:38:52 +00002759 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00002760 return;
2761
2762 bad:
2763 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
2764 laog__show(who);
2765 tl_assert(0);
2766}
2767
2768/* If there is a path in laog from 'src' to any of the elements in
2769 'dst', return an arbitrarily chosen element of 'dst' reachable from
2770 'src'. If no path exist from 'src' to any element in 'dst', return
2771 NULL. */
2772__attribute__((noinline))
2773static
2774Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
2775{
2776 Lock* ret;
2777 Word i, ssz;
2778 XArray* stack; /* of Lock* */
2779 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
2780 Lock* here;
2781 WordSetID succs;
2782 Word succs_size;
sewardj250ec2e2008-02-15 22:02:30 +00002783 UWord* succs_words;
sewardjb4112022007-11-09 22:49:28 +00002784 //laog__sanity_check();
2785
2786 /* If the destination set is empty, we can never get there from
2787 'src' :-), so don't bother to try */
2788 if (HG_(isEmptyWS)( univ_lsets, dsts ))
2789 return NULL;
2790
2791 ret = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00002792 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
2793 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00002794
2795 (void) VG_(addToXA)( stack, &src );
2796
2797 while (True) {
2798
2799 ssz = VG_(sizeXA)( stack );
2800
2801 if (ssz == 0) { ret = NULL; break; }
2802
2803 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
2804 VG_(dropTailXA)( stack, 1 );
2805
2806 if (HG_(elemWS)( univ_lsets, dsts, (Word)here )) { ret = here; break; }
2807
sewardj896f6f92008-08-19 08:38:52 +00002808 if (VG_(lookupFM)( visited, NULL, NULL, (Word)here ))
sewardjb4112022007-11-09 22:49:28 +00002809 continue;
2810
sewardj896f6f92008-08-19 08:38:52 +00002811 VG_(addToFM)( visited, (Word)here, 0 );
sewardjb4112022007-11-09 22:49:28 +00002812
2813 succs = laog__succs( here );
2814 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
2815 for (i = 0; i < succs_size; i++)
2816 (void) VG_(addToXA)( stack, &succs_words[i] );
2817 }
2818
sewardj896f6f92008-08-19 08:38:52 +00002819 VG_(deleteFM)( visited, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00002820 VG_(deleteXA)( stack );
2821 return ret;
2822}
2823
2824
2825/* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
2826 between 'lk' and the locks already held by 'thr' and issue a
2827 complaint if so. Also, update the ordering graph appropriately.
2828*/
2829__attribute__((noinline))
2830static void laog__pre_thread_acquires_lock (
2831 Thread* thr, /* NB: BEFORE lock is added */
2832 Lock* lk
2833 )
2834{
sewardj250ec2e2008-02-15 22:02:30 +00002835 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +00002836 Word ls_size, i;
2837 Lock* other;
2838
2839 /* It may be that 'thr' already holds 'lk' and is recursively
2840 relocking in. In this case we just ignore the call. */
2841 /* NB: univ_lsets really is correct here */
2842 if (HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lk ))
2843 return;
2844
2845 if (!laog)
sewardjf98e1c02008-10-25 16:22:41 +00002846 laog = VG_(newFM)( HG_(zalloc), "hg.lptal.1",
2847 HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00002848 if (!laog_exposition)
sewardjf98e1c02008-10-25 16:22:41 +00002849 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.lptal.2", HG_(free),
sewardjb4112022007-11-09 22:49:28 +00002850 cmp_LAOGLinkExposition );
2851
2852 /* First, the check. Complain if there is any path in laog from lk
2853 to any of the locks already held by thr, since if any such path
2854 existed, it would mean that previously lk was acquired before
2855 (rather than after, as we are doing here) at least one of those
2856 locks.
2857 */
2858 other = laog__do_dfs_from_to(lk, thr->locksetA);
2859 if (other) {
2860 LAOGLinkExposition key, *found;
2861 /* So we managed to find a path lk --*--> other in the graph,
2862 which implies that 'lk' should have been acquired before
2863 'other' but is in fact being acquired afterwards. We present
2864 the lk/other arguments to record_error_LockOrder in the order
2865 in which they should have been acquired. */
2866 /* Go look in the laog_exposition mapping, to find the allocation
2867 points for this edge, so we can show the user. */
2868 key.src_ga = lk->guestaddr;
2869 key.dst_ga = other->guestaddr;
2870 key.src_ec = NULL;
2871 key.dst_ec = NULL;
2872 found = NULL;
sewardj896f6f92008-08-19 08:38:52 +00002873 if (VG_(lookupFM)( laog_exposition,
sewardjb5f29642007-11-16 12:02:43 +00002874 (Word*)&found, NULL, (Word)&key )) {
sewardjb4112022007-11-09 22:49:28 +00002875 tl_assert(found != &key);
2876 tl_assert(found->src_ga == key.src_ga);
2877 tl_assert(found->dst_ga == key.dst_ga);
2878 tl_assert(found->src_ec);
2879 tl_assert(found->dst_ec);
sewardjf98e1c02008-10-25 16:22:41 +00002880 HG_(record_error_LockOrder)(
2881 thr, lk->guestaddr, other->guestaddr,
2882 found->src_ec, found->dst_ec );
sewardjb4112022007-11-09 22:49:28 +00002883 } else {
2884 /* Hmm. This can't happen (can it?) */
sewardjf98e1c02008-10-25 16:22:41 +00002885 HG_(record_error_LockOrder)(
2886 thr, lk->guestaddr, other->guestaddr,
2887 NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00002888 }
2889 }
2890
2891 /* Second, add to laog the pairs
2892 (old, lk) | old <- locks already held by thr
2893 Since both old and lk are currently held by thr, their acquired_at
2894 fields must be non-NULL.
2895 */
2896 tl_assert(lk->acquired_at);
2897 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
2898 for (i = 0; i < ls_size; i++) {
2899 Lock* old = (Lock*)ls_words[i];
2900 tl_assert(old->acquired_at);
2901 laog__add_edge( old, lk );
2902 }
2903
2904 /* Why "except_Locks" ? We're here because a lock is being
2905 acquired by a thread, and we're in an inconsistent state here.
2906 See the call points in evhH__post_thread_{r,w}_acquires_lock.
2907 When called in this inconsistent state, locks__sanity_check duly
2908 barfs. */
sewardjf98e1c02008-10-25 16:22:41 +00002909 if (HG_(clo_sanity_flags) & SCE_LAOG)
sewardjb4112022007-11-09 22:49:28 +00002910 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
2911}
2912
2913
2914/* Delete from 'laog' any pair mentioning a lock in locksToDelete */
2915
2916__attribute__((noinline))
2917static void laog__handle_one_lock_deletion ( Lock* lk )
2918{
2919 WordSetID preds, succs;
2920 Word preds_size, succs_size, i, j;
sewardj250ec2e2008-02-15 22:02:30 +00002921 UWord *preds_words, *succs_words;
sewardjb4112022007-11-09 22:49:28 +00002922
2923 preds = laog__preds( lk );
2924 succs = laog__succs( lk );
2925
2926 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
2927 for (i = 0; i < preds_size; i++)
2928 laog__del_edge( (Lock*)preds_words[i], lk );
2929
2930 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
2931 for (j = 0; j < succs_size; j++)
2932 laog__del_edge( lk, (Lock*)succs_words[j] );
2933
2934 for (i = 0; i < preds_size; i++) {
2935 for (j = 0; j < succs_size; j++) {
2936 if (preds_words[i] != succs_words[j]) {
2937 /* This can pass unlocked locks to laog__add_edge, since
2938 we're deleting stuff. So their acquired_at fields may
2939 be NULL. */
2940 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
2941 }
2942 }
2943 }
2944}
2945
2946__attribute__((noinline))
2947static void laog__handle_lock_deletions (
2948 WordSetID /* in univ_laog */ locksToDelete
2949 )
2950{
sewardj250ec2e2008-02-15 22:02:30 +00002951 Word i, ws_size;
2952 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00002953
2954 if (!laog)
sewardjf98e1c02008-10-25 16:22:41 +00002955 laog = VG_(newFM)( HG_(zalloc), "hg.lhld.1", HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00002956 if (!laog_exposition)
sewardjf98e1c02008-10-25 16:22:41 +00002957 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.lhld.2", HG_(free),
sewardjb4112022007-11-09 22:49:28 +00002958 cmp_LAOGLinkExposition );
2959
2960 HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
2961 for (i = 0; i < ws_size; i++)
2962 laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
2963
sewardjf98e1c02008-10-25 16:22:41 +00002964 if (HG_(clo_sanity_flags) & SCE_LAOG)
sewardjb4112022007-11-09 22:49:28 +00002965 all__sanity_check("laog__handle_lock_deletions-post");
2966}
2967
2968
2969/*--------------------------------------------------------------*/
2970/*--- Malloc/free replacements ---*/
2971/*--------------------------------------------------------------*/
2972
2973typedef
2974 struct {
2975 void* next; /* required by m_hashtable */
2976 Addr payload; /* ptr to actual block */
2977 SizeT szB; /* size requested */
2978 ExeContext* where; /* where it was allocated */
2979 Thread* thr; /* allocating thread */
2980 }
2981 MallocMeta;
2982
2983/* A hash table of MallocMetas, used to track malloc'd blocks
2984 (obviously). */
2985static VgHashTable hg_mallocmeta_table = NULL;
2986
2987
2988static MallocMeta* new_MallocMeta ( void ) {
sewardjf98e1c02008-10-25 16:22:41 +00002989 MallocMeta* md = HG_(zalloc)( "hg.new_MallocMeta.1", sizeof(MallocMeta) );
sewardjb4112022007-11-09 22:49:28 +00002990 tl_assert(md);
2991 return md;
2992}
2993static void delete_MallocMeta ( MallocMeta* md ) {
sewardjf98e1c02008-10-25 16:22:41 +00002994 HG_(free)(md);
sewardjb4112022007-11-09 22:49:28 +00002995}
2996
2997
2998/* Allocate a client block and set up the metadata for it. */
2999
3000static
3001void* handle_alloc ( ThreadId tid,
3002 SizeT szB, SizeT alignB, Bool is_zeroed )
3003{
3004 Addr p;
3005 MallocMeta* md;
3006
3007 tl_assert( ((SSizeT)szB) >= 0 );
3008 p = (Addr)VG_(cli_malloc)(alignB, szB);
3009 if (!p) {
3010 return NULL;
3011 }
3012 if (is_zeroed)
3013 VG_(memset)((void*)p, 0, szB);
3014
3015 /* Note that map_threads_lookup must succeed (cannot assert), since
3016 memory can only be allocated by currently alive threads, hence
3017 they must have an entry in map_threads. */
3018 md = new_MallocMeta();
3019 md->payload = p;
3020 md->szB = szB;
3021 md->where = VG_(record_ExeContext)( tid, 0 );
3022 md->thr = map_threads_lookup( tid );
3023
3024 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
3025
3026 /* Tell the lower level memory wranglers. */
3027 evh__new_mem_heap( p, szB, is_zeroed );
3028
3029 return (void*)p;
3030}
3031
3032/* Re the checks for less-than-zero (also in hg_cli__realloc below):
3033 Cast to a signed type to catch any unexpectedly negative args.
3034 We're assuming here that the size asked for is not greater than
3035 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
3036 platforms). */
3037static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
3038 if (((SSizeT)n) < 0) return NULL;
3039 return handle_alloc ( tid, n, VG_(clo_alignment),
3040 /*is_zeroed*/False );
3041}
3042static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
3043 if (((SSizeT)n) < 0) return NULL;
3044 return handle_alloc ( tid, n, VG_(clo_alignment),
3045 /*is_zeroed*/False );
3046}
3047static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
3048 if (((SSizeT)n) < 0) return NULL;
3049 return handle_alloc ( tid, n, VG_(clo_alignment),
3050 /*is_zeroed*/False );
3051}
3052static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
3053 if (((SSizeT)n) < 0) return NULL;
3054 return handle_alloc ( tid, n, align,
3055 /*is_zeroed*/False );
3056}
3057static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
3058 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
3059 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
3060 /*is_zeroed*/True );
3061}
3062
3063
3064/* Free a client block, including getting rid of the relevant
3065 metadata. */
3066
3067static void handle_free ( ThreadId tid, void* p )
3068{
3069 MallocMeta *md, *old_md;
3070 SizeT szB;
3071
3072 /* First see if we can find the metadata for 'p'. */
3073 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3074 if (!md)
3075 return; /* apparently freeing a bogus address. Oh well. */
3076
3077 tl_assert(md->payload == (Addr)p);
3078 szB = md->szB;
3079
3080 /* Nuke the metadata block */
3081 old_md = (MallocMeta*)
3082 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
3083 tl_assert(old_md); /* it must be present - we just found it */
3084 tl_assert(old_md == md);
3085 tl_assert(old_md->payload == (Addr)p);
3086
3087 VG_(cli_free)((void*)old_md->payload);
3088 delete_MallocMeta(old_md);
3089
3090 /* Tell the lower level memory wranglers. */
3091 evh__die_mem_heap( (Addr)p, szB );
3092}
3093
3094static void hg_cli__free ( ThreadId tid, void* p ) {
3095 handle_free(tid, p);
3096}
3097static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
3098 handle_free(tid, p);
3099}
3100static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
3101 handle_free(tid, p);
3102}
3103
3104
3105static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
3106{
3107 MallocMeta *md, *md_new, *md_tmp;
3108 SizeT i;
3109
3110 Addr payload = (Addr)payloadV;
3111
3112 if (((SSizeT)new_size) < 0) return NULL;
3113
3114 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
3115 if (!md)
3116 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
3117
3118 tl_assert(md->payload == payload);
3119
3120 if (md->szB == new_size) {
3121 /* size unchanged */
3122 md->where = VG_(record_ExeContext)(tid, 0);
3123 return payloadV;
3124 }
3125
3126 if (md->szB > new_size) {
3127 /* new size is smaller */
3128 md->szB = new_size;
3129 md->where = VG_(record_ExeContext)(tid, 0);
3130 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
3131 return payloadV;
3132 }
3133
3134 /* else */ {
3135 /* new size is bigger */
3136 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
3137
3138 /* First half kept and copied, second half new */
3139 // FIXME: shouldn't we use a copier which implements the
3140 // memory state machine?
3141 shadow_mem_copy_range( payload, p_new, md->szB );
3142 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
sewardjf98e1c02008-10-25 16:22:41 +00003143 /*inited*/False );
sewardjb4112022007-11-09 22:49:28 +00003144 /* FIXME: can anything funny happen here? specifically, if the
3145 old range contained a lock, then die_mem_heap will complain.
3146 Is that the correct behaviour? Not sure. */
3147 evh__die_mem_heap( payload, md->szB );
3148
3149 /* Copy from old to new */
3150 for (i = 0; i < md->szB; i++)
3151 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
3152
3153 /* Because the metadata hash table is index by payload address,
3154 we have to get rid of the old hash table entry and make a new
3155 one. We can't just modify the existing metadata in place,
3156 because then it would (almost certainly) be in the wrong hash
3157 chain. */
3158 md_new = new_MallocMeta();
3159 *md_new = *md;
3160
3161 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
3162 tl_assert(md_tmp);
3163 tl_assert(md_tmp == md);
3164
3165 VG_(cli_free)((void*)md->payload);
3166 delete_MallocMeta(md);
3167
3168 /* Update fields */
3169 md_new->where = VG_(record_ExeContext)( tid, 0 );
3170 md_new->szB = new_size;
3171 md_new->payload = p_new;
3172 md_new->thr = map_threads_lookup( tid );
3173
3174 /* and add */
3175 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
3176
3177 return (void*)p_new;
3178 }
3179}
3180
3181
3182/*--------------------------------------------------------------*/
3183/*--- Instrumentation ---*/
3184/*--------------------------------------------------------------*/
3185
3186static void instrument_mem_access ( IRSB* bbOut,
3187 IRExpr* addr,
3188 Int szB,
3189 Bool isStore,
3190 Int hWordTy_szB )
3191{
3192 IRType tyAddr = Ity_INVALID;
3193 HChar* hName = NULL;
3194 void* hAddr = NULL;
3195 Int regparms = 0;
3196 IRExpr** argv = NULL;
3197 IRDirty* di = NULL;
3198
3199 tl_assert(isIRAtom(addr));
3200 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
3201
3202 tyAddr = typeOfIRExpr( bbOut->tyenv, addr );
3203 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
3204
3205 /* So the effective address is in 'addr' now. */
3206 regparms = 1; // unless stated otherwise
3207 if (isStore) {
3208 switch (szB) {
3209 case 1:
3210 hName = "evh__mem_help_write_1";
3211 hAddr = &evh__mem_help_write_1;
3212 argv = mkIRExprVec_1( addr );
3213 break;
3214 case 2:
3215 hName = "evh__mem_help_write_2";
3216 hAddr = &evh__mem_help_write_2;
3217 argv = mkIRExprVec_1( addr );
3218 break;
3219 case 4:
3220 hName = "evh__mem_help_write_4";
3221 hAddr = &evh__mem_help_write_4;
3222 argv = mkIRExprVec_1( addr );
3223 break;
3224 case 8:
3225 hName = "evh__mem_help_write_8";
3226 hAddr = &evh__mem_help_write_8;
3227 argv = mkIRExprVec_1( addr );
3228 break;
3229 default:
3230 tl_assert(szB > 8 && szB <= 512); /* stay sane */
3231 regparms = 2;
3232 hName = "evh__mem_help_write_N";
3233 hAddr = &evh__mem_help_write_N;
3234 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
3235 break;
3236 }
3237 } else {
3238 switch (szB) {
3239 case 1:
3240 hName = "evh__mem_help_read_1";
3241 hAddr = &evh__mem_help_read_1;
3242 argv = mkIRExprVec_1( addr );
3243 break;
3244 case 2:
3245 hName = "evh__mem_help_read_2";
3246 hAddr = &evh__mem_help_read_2;
3247 argv = mkIRExprVec_1( addr );
3248 break;
3249 case 4:
3250 hName = "evh__mem_help_read_4";
3251 hAddr = &evh__mem_help_read_4;
3252 argv = mkIRExprVec_1( addr );
3253 break;
3254 case 8:
3255 hName = "evh__mem_help_read_8";
3256 hAddr = &evh__mem_help_read_8;
3257 argv = mkIRExprVec_1( addr );
3258 break;
3259 default:
3260 tl_assert(szB > 8 && szB <= 512); /* stay sane */
3261 regparms = 2;
3262 hName = "evh__mem_help_read_N";
3263 hAddr = &evh__mem_help_read_N;
3264 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
3265 break;
3266 }
3267 }
3268
3269 /* Add the helper. */
3270 tl_assert(hName);
3271 tl_assert(hAddr);
3272 tl_assert(argv);
3273 di = unsafeIRDirty_0_N( regparms,
3274 hName, VG_(fnptr_to_fnentry)( hAddr ),
3275 argv );
3276 addStmtToIRSB( bbOut, IRStmt_Dirty(di) );
3277}
3278
3279
sewardjd52392d2008-11-08 20:36:26 +00003280//static void instrument_memory_bus_event ( IRSB* bbOut, IRMBusEvent event )
3281//{
3282// switch (event) {
3283// case Imbe_SnoopedStoreBegin:
3284// case Imbe_SnoopedStoreEnd:
3285// /* These arise from ppc stwcx. insns. They should perhaps be
3286// handled better. */
3287// break;
3288// case Imbe_Fence:
3289// break; /* not interesting */
3290// case Imbe_BusLock:
3291// case Imbe_BusUnlock:
3292// addStmtToIRSB(
3293// bbOut,
3294// IRStmt_Dirty(
3295// unsafeIRDirty_0_N(
3296// 0/*regparms*/,
3297// event == Imbe_BusLock ? "evh__bus_lock"
3298// : "evh__bus_unlock",
3299// VG_(fnptr_to_fnentry)(
3300// event == Imbe_BusLock ? &evh__bus_lock
3301// : &evh__bus_unlock
3302// ),
3303// mkIRExprVec_0()
3304// )
3305// )
3306// );
3307// break;
3308// default:
3309// tl_assert(0);
3310// }
3311//}
sewardjb4112022007-11-09 22:49:28 +00003312
3313
3314static
3315IRSB* hg_instrument ( VgCallbackClosure* closure,
3316 IRSB* bbIn,
3317 VexGuestLayout* layout,
3318 VexGuestExtents* vge,
3319 IRType gWordTy, IRType hWordTy )
3320{
3321 Int i;
3322 IRSB* bbOut;
sewardjf98e1c02008-10-25 16:22:41 +00003323 Bool x86busLocked = False;
sewardjb4112022007-11-09 22:49:28 +00003324
3325 if (gWordTy != hWordTy) {
3326 /* We don't currently support this case. */
3327 VG_(tool_panic)("host/guest word size mismatch");
3328 }
3329
3330 /* Set up BB */
3331 bbOut = emptyIRSB();
3332 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
3333 bbOut->next = deepCopyIRExpr(bbIn->next);
3334 bbOut->jumpkind = bbIn->jumpkind;
3335
3336 // Copy verbatim any IR preamble preceding the first IMark
3337 i = 0;
3338 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
3339 addStmtToIRSB( bbOut, bbIn->stmts[i] );
3340 i++;
3341 }
3342
3343 for (/*use current i*/; i < bbIn->stmts_used; i++) {
3344 IRStmt* st = bbIn->stmts[i];
3345 tl_assert(st);
3346 tl_assert(isFlatIRStmt(st));
3347 switch (st->tag) {
3348 case Ist_NoOp:
3349 case Ist_AbiHint:
3350 case Ist_Put:
3351 case Ist_PutI:
3352 case Ist_IMark:
3353 case Ist_Exit:
3354 /* None of these can contain any memory references. */
3355 break;
3356
3357 case Ist_MBE:
sewardjf98e1c02008-10-25 16:22:41 +00003358 //instrument_memory_bus_event( bbOut, st->Ist.MBE.event );
3359 switch (st->Ist.MBE.event) {
3360 case Imbe_Fence:
3361 break; /* not interesting */
3362 case Imbe_BusLock:
3363 tl_assert(x86busLocked == False);
3364 x86busLocked = True;
3365 break;
3366 case Imbe_BusUnlock:
3367 tl_assert(x86busLocked == True);
3368 x86busLocked = False;
3369 break;
3370 default:
3371 goto unhandled;
3372 }
sewardjb4112022007-11-09 22:49:28 +00003373 break;
3374
3375 case Ist_Store:
sewardjf98e1c02008-10-25 16:22:41 +00003376 if (!x86busLocked)
3377 instrument_mem_access(
3378 bbOut,
3379 st->Ist.Store.addr,
3380 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
3381 True/*isStore*/,
3382 sizeofIRType(hWordTy)
3383 );
3384 break;
sewardjb4112022007-11-09 22:49:28 +00003385
3386 case Ist_WrTmp: {
3387 IRExpr* data = st->Ist.WrTmp.data;
3388 if (data->tag == Iex_Load) {
3389 instrument_mem_access(
3390 bbOut,
3391 data->Iex.Load.addr,
3392 sizeofIRType(data->Iex.Load.ty),
3393 False/*!isStore*/,
3394 sizeofIRType(hWordTy)
3395 );
3396 }
3397 break;
3398 }
3399
3400 case Ist_Dirty: {
3401 Int dataSize;
3402 IRDirty* d = st->Ist.Dirty.details;
3403 if (d->mFx != Ifx_None) {
3404 /* This dirty helper accesses memory. Collect the
3405 details. */
3406 tl_assert(d->mAddr != NULL);
3407 tl_assert(d->mSize != 0);
3408 dataSize = d->mSize;
3409 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
3410 instrument_mem_access(
3411 bbOut, d->mAddr, dataSize, False/*!isStore*/,
3412 sizeofIRType(hWordTy)
3413 );
3414 }
3415 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
3416 instrument_mem_access(
3417 bbOut, d->mAddr, dataSize, True/*isStore*/,
3418 sizeofIRType(hWordTy)
3419 );
3420 }
3421 } else {
3422 tl_assert(d->mAddr == NULL);
3423 tl_assert(d->mSize == 0);
3424 }
3425 break;
3426 }
3427
3428 default:
sewardjf98e1c02008-10-25 16:22:41 +00003429 unhandled:
3430 ppIRStmt(st);
sewardjb4112022007-11-09 22:49:28 +00003431 tl_assert(0);
3432
3433 } /* switch (st->tag) */
3434
3435 addStmtToIRSB( bbOut, st );
3436 } /* iterate over bbIn->stmts */
3437
3438 return bbOut;
3439}
3440
3441
3442/*----------------------------------------------------------------*/
3443/*--- Client requests ---*/
3444/*----------------------------------------------------------------*/
3445
3446/* Sheesh. Yet another goddam finite map. */
3447static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
3448
3449static void map_pthread_t_to_Thread_INIT ( void ) {
3450 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
sewardjf98e1c02008-10-25 16:22:41 +00003451 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
3452 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00003453 tl_assert(map_pthread_t_to_Thread != NULL);
3454 }
3455}
3456
3457
3458static
3459Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
3460{
3461 if (!VG_IS_TOOL_USERREQ('H','G',args[0]))
3462 return False;
3463
3464 /* Anything that gets past the above check is one of ours, so we
3465 should be able to handle it. */
3466
3467 /* default, meaningless return value, unless otherwise set */
3468 *ret = 0;
3469
3470 switch (args[0]) {
3471
3472 /* --- --- User-visible client requests --- --- */
3473
3474 case VG_USERREQ__HG_CLEAN_MEMORY:
barta0b6b2c2008-07-07 06:49:24 +00003475 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%ld)\n",
sewardjb4112022007-11-09 22:49:28 +00003476 args[1], args[2]);
3477 /* Call die_mem to (expensively) tidy up properly, if there
sewardjf98e1c02008-10-25 16:22:41 +00003478 are any held locks etc in the area. Calling evh__die_mem
3479 and then evh__new_mem is a bit inefficient; probably just
3480 the latter would do. */
sewardjb4112022007-11-09 22:49:28 +00003481 if (args[2] > 0) { /* length */
3482 evh__die_mem(args[1], args[2]);
3483 /* and then set it to New */
3484 evh__new_mem(args[1], args[2]);
3485 }
3486 break;
3487
3488 /* --- --- Client requests for Helgrind's use only --- --- */
3489
3490 /* Some thread is telling us its pthread_t value. Record the
3491 binding between that and the associated Thread*, so we can
3492 later find the Thread* again when notified of a join by the
3493 thread. */
3494 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
3495 Thread* my_thr = NULL;
3496 if (0)
3497 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
3498 (void*)args[1]);
3499 map_pthread_t_to_Thread_INIT();
3500 my_thr = map_threads_maybe_lookup( tid );
3501 /* This assertion should hold because the map_threads (tid to
3502 Thread*) binding should have been made at the point of
3503 low-level creation of this thread, which should have
3504 happened prior to us getting this client request for it.
3505 That's because this client request is sent from
3506 client-world from the 'thread_wrapper' function, which
3507 only runs once the thread has been low-level created. */
3508 tl_assert(my_thr != NULL);
3509 /* So now we know that (pthread_t)args[1] is associated with
3510 (Thread*)my_thr. Note that down. */
3511 if (0)
3512 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
3513 (void*)args[1], (void*)my_thr );
sewardj896f6f92008-08-19 08:38:52 +00003514 VG_(addToFM)( map_pthread_t_to_Thread, (Word)args[1], (Word)my_thr );
sewardjb4112022007-11-09 22:49:28 +00003515 break;
3516 }
3517
3518 case _VG_USERREQ__HG_PTH_API_ERROR: {
3519 Thread* my_thr = NULL;
3520 map_pthread_t_to_Thread_INIT();
3521 my_thr = map_threads_maybe_lookup( tid );
3522 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
sewardjf98e1c02008-10-25 16:22:41 +00003523 HG_(record_error_PthAPIerror)(
3524 my_thr, (HChar*)args[1], (Word)args[2], (HChar*)args[3] );
sewardjb4112022007-11-09 22:49:28 +00003525 break;
3526 }
3527
3528 /* This thread (tid) has completed a join with the quitting
3529 thread whose pthread_t is in args[1]. */
3530 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
3531 Thread* thr_q = NULL; /* quitter Thread* */
3532 Bool found = False;
3533 if (0)
3534 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
3535 (void*)args[1]);
3536 map_pthread_t_to_Thread_INIT();
sewardj896f6f92008-08-19 08:38:52 +00003537 found = VG_(lookupFM)( map_pthread_t_to_Thread,
sewardjb5f29642007-11-16 12:02:43 +00003538 NULL, (Word*)&thr_q, (Word)args[1] );
sewardjb4112022007-11-09 22:49:28 +00003539 /* Can this fail? It would mean that our pthread_join
3540 wrapper observed a successful join on args[1] yet that
3541 thread never existed (or at least, it never lodged an
3542 entry in the mapping (via SET_MY_PTHREAD_T)). Which
3543 sounds like a bug in the threads library. */
3544 // FIXME: get rid of this assertion; handle properly
3545 tl_assert(found);
3546 if (found) {
3547 if (0)
3548 VG_(printf)(".................... quitter Thread* = %p\n",
3549 thr_q);
3550 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
3551 }
3552 break;
3553 }
3554
3555 /* EXPOSITION only: by intercepting lock init events we can show
3556 the user where the lock was initialised, rather than only
3557 being able to show where it was first locked. Intercepting
3558 lock initialisations is not necessary for the basic operation
3559 of the race checker. */
3560 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
3561 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
3562 break;
3563
3564 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
3565 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1] );
3566 break;
3567
3568 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
3569 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
3570 break;
3571
3572 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
3573 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
3574 break;
3575
3576 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*, Word
3577 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
3578 break;
3579
3580 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*
3581 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
3582 break;
3583
3584 /* This thread is about to do pthread_cond_signal on the
3585 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
3586 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
3587 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
3588 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
3589 break;
3590
3591 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
3592 Returns a flag indicating whether or not the mutex is believed to be
3593 valid for this operation. */
3594 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
3595 Bool mutex_is_valid
3596 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
3597 (void*)args[2] );
3598 *ret = mutex_is_valid ? 1 : 0;
3599 break;
3600 }
3601
sewardjf98e1c02008-10-25 16:22:41 +00003602 /* cond=arg[1] */
3603 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
3604 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1] );
3605 break;
3606
sewardjb4112022007-11-09 22:49:28 +00003607 /* Thread successfully completed pthread_cond_wait, cond=arg[1],
3608 mutex=arg[2] */
3609 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
3610 evh__HG_PTHREAD_COND_WAIT_POST( tid,
3611 (void*)args[1], (void*)args[2] );
3612 break;
3613
3614 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
3615 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
3616 break;
3617
3618 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
3619 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
3620 break;
3621
sewardj789c3c52008-02-25 12:10:07 +00003622 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
sewardjb4112022007-11-09 22:49:28 +00003623 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
sewardj789c3c52008-02-25 12:10:07 +00003624 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
3625 args[2], args[3] );
sewardjb4112022007-11-09 22:49:28 +00003626 break;
3627
3628 /* rwlock=arg[1], isW=arg[2] */
3629 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
3630 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
3631 break;
3632
3633 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
3634 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
3635 break;
3636
3637 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
3638 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
3639 break;
3640
sewardj11e352f2007-11-30 11:11:02 +00003641 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
3642 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
sewardjb4112022007-11-09 22:49:28 +00003643 break;
3644
sewardj11e352f2007-11-30 11:11:02 +00003645 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
3646 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00003647 break;
3648
sewardj11e352f2007-11-30 11:11:02 +00003649 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
3650 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
3651 break;
3652
3653 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t* */
3654 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00003655 break;
3656
sewardjf98e1c02008-10-25 16:22:41 +00003657//zz case _VG_USERREQ__HG_GET_MY_SEGMENT: { // -> Segment*
3658//zz Thread* thr;
3659//zz SegmentID segid;
3660//zz Segment* seg;
3661//zz thr = map_threads_maybe_lookup( tid );
3662//zz tl_assert(thr); /* cannot fail */
3663//zz segid = thr->csegid;
3664//zz tl_assert(is_sane_SegmentID(segid));
3665//zz seg = map_segments_lookup( segid );
3666//zz tl_assert(seg);
3667//zz *ret = (UWord)seg;
3668//zz break;
3669//zz }
sewardjb4112022007-11-09 22:49:28 +00003670
3671 default:
3672 /* Unhandled Helgrind client request! */
sewardjf98e1c02008-10-25 16:22:41 +00003673 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
3674 args[0]);
sewardjb4112022007-11-09 22:49:28 +00003675 }
3676
3677 return True;
3678}
3679
3680
3681/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +00003682/*--- Setup ---*/
3683/*----------------------------------------------------------------*/
3684
3685static Bool hg_process_cmd_line_option ( Char* arg )
3686{
sewardjf98e1c02008-10-25 16:22:41 +00003687 if (VG_CLO_STREQ(arg, "--track-lockorders=no"))
3688 HG_(clo_track_lockorders) = False;
3689 else if (VG_CLO_STREQ(arg, "--track-lockorders=yes"))
3690 HG_(clo_track_lockorders) = True;
sewardjb4112022007-11-09 22:49:28 +00003691
3692 else if (VG_CLO_STREQ(arg, "--cmp-race-err-addrs=no"))
sewardjf98e1c02008-10-25 16:22:41 +00003693 HG_(clo_cmp_race_err_addrs) = False;
sewardjb4112022007-11-09 22:49:28 +00003694 else if (VG_CLO_STREQ(arg, "--cmp-race-err-addrs=yes"))
sewardjf98e1c02008-10-25 16:22:41 +00003695 HG_(clo_cmp_race_err_addrs) = True;
sewardjb4112022007-11-09 22:49:28 +00003696
3697 else if (VG_CLO_STREQN(13, arg, "--trace-addr=")) {
sewardjf98e1c02008-10-25 16:22:41 +00003698 HG_(clo_trace_addr) = VG_(atoll16)(&arg[13]);
3699 if (HG_(clo_trace_level) == 0)
3700 HG_(clo_trace_level) = 1;
sewardjb4112022007-11-09 22:49:28 +00003701 }
sewardjf98e1c02008-10-25 16:22:41 +00003702 else VG_BNUM_CLO(arg, "--trace-level", HG_(clo_trace_level), 0, 2)
sewardjb4112022007-11-09 22:49:28 +00003703
sewardj11e352f2007-11-30 11:11:02 +00003704 /* "stuvwx" --> stuvwx (binary) */
3705 else if (VG_CLO_STREQN(18, arg, "--hg-sanity-flags=")) {
sewardjb4112022007-11-09 22:49:28 +00003706 Int j;
sewardjb5f29642007-11-16 12:02:43 +00003707 Char* opt = & arg[18];
sewardjb4112022007-11-09 22:49:28 +00003708
sewardj11e352f2007-11-30 11:11:02 +00003709 if (6 != VG_(strlen)(opt)) {
sewardjb4112022007-11-09 22:49:28 +00003710 VG_(message)(Vg_UserMsg,
sewardj11e352f2007-11-30 11:11:02 +00003711 "--hg-sanity-flags argument must have 6 digits");
sewardjb4112022007-11-09 22:49:28 +00003712 return False;
3713 }
sewardj11e352f2007-11-30 11:11:02 +00003714 for (j = 0; j < 6; j++) {
sewardjb4112022007-11-09 22:49:28 +00003715 if ('0' == opt[j]) { /* do nothing */ }
sewardjf98e1c02008-10-25 16:22:41 +00003716 else if ('1' == opt[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
sewardjb4112022007-11-09 22:49:28 +00003717 else {
sewardj11e352f2007-11-30 11:11:02 +00003718 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
sewardjb4112022007-11-09 22:49:28 +00003719 "only contain 0s and 1s");
3720 return False;
3721 }
3722 }
sewardjf98e1c02008-10-25 16:22:41 +00003723 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
sewardjb4112022007-11-09 22:49:28 +00003724 }
3725
3726 else
3727 return VG_(replacement_malloc_process_cmd_line_option)(arg);
3728
3729 return True;
3730}
3731
3732static void hg_print_usage ( void )
3733{
3734 VG_(printf)(
sewardjf98e1c02008-10-25 16:22:41 +00003735" --track-lockorders=no|yes show lock ordering errors? [yes]\n"
sewardjb4112022007-11-09 22:49:28 +00003736" --trace-addr=0xXXYYZZ show all state changes for address 0xXXYYZZ\n"
3737" --trace-level=0|1|2 verbosity level of --trace-addr [1]\n"
3738 );
3739 VG_(replacement_malloc_print_usage)();
3740}
3741
3742static void hg_print_debug_usage ( void )
3743{
3744 VG_(replacement_malloc_print_debug_usage)();
sewardjb4112022007-11-09 22:49:28 +00003745 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
3746 "race errors significant? [no]\n");
sewardj11e352f2007-11-30 11:11:02 +00003747 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
3748 " at events (X = 0|1) [000000]\n");
3749 VG_(printf)(" --hg-sanity-flags values:\n");
3750 VG_(printf)(" 100000 crosscheck happens-before-graph searches\n");
3751 VG_(printf)(" 010000 after changes to "
sewardjb4112022007-11-09 22:49:28 +00003752 "lock-order-acquisition-graph\n");
sewardj11e352f2007-11-30 11:11:02 +00003753 VG_(printf)(" 001000 at memory accesses (NB: not currently used)\n");
3754 VG_(printf)(" 000100 at mem permission setting for "
sewardjb4112022007-11-09 22:49:28 +00003755 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
sewardj11e352f2007-11-30 11:11:02 +00003756 VG_(printf)(" 000010 at lock/unlock events\n");
3757 VG_(printf)(" 000001 at thread create/join events\n");
sewardjb4112022007-11-09 22:49:28 +00003758}
3759
3760static void hg_post_clo_init ( void )
3761{
3762}
3763
3764static void hg_fini ( Int exitcode )
3765{
3766 if (SHOW_DATA_STRUCTURES)
3767 pp_everything( PP_ALL, "SK_(fini)" );
sewardjf98e1c02008-10-25 16:22:41 +00003768 if (HG_(clo_sanity_flags))
sewardjb4112022007-11-09 22:49:28 +00003769 all__sanity_check("SK_(fini)");
3770
sewardjb4112022007-11-09 22:49:28 +00003771 if (VG_(clo_verbosity) >= 2) {
3772
3773 if (1) {
3774 VG_(printf)("\n");
3775 HG_(ppWSUstats)( univ_tsets, "univ_tsets" );
3776 VG_(printf)("\n");
3777 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
3778 VG_(printf)("\n");
3779 HG_(ppWSUstats)( univ_laog, "univ_laog" );
3780 }
3781
sewardjf98e1c02008-10-25 16:22:41 +00003782 //zz VG_(printf)("\n");
3783 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
3784 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
3785 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
3786 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
3787 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
3788 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
3789 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
3790 //zz stats__hbefore_stk_hwm);
3791 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
3792 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
sewardjb4112022007-11-09 22:49:28 +00003793
3794 VG_(printf)("\n");
barta0b6b2c2008-07-07 06:49:24 +00003795 VG_(printf)(" locksets: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00003796 (Int)HG_(cardinalityWSU)( univ_lsets ));
barta0b6b2c2008-07-07 06:49:24 +00003797 VG_(printf)(" threadsets: %'8d unique thread sets\n",
sewardjb4112022007-11-09 22:49:28 +00003798 (Int)HG_(cardinalityWSU)( univ_tsets ));
barta0b6b2c2008-07-07 06:49:24 +00003799 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00003800 (Int)HG_(cardinalityWSU)( univ_laog ));
3801
sewardjd52392d2008-11-08 20:36:26 +00003802 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
3803 // stats__ga_LL_adds,
3804 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
sewardjb4112022007-11-09 22:49:28 +00003805
sewardjf98e1c02008-10-25 16:22:41 +00003806 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
3807 HG_(stats__LockN_to_P_queries),
3808 HG_(stats__LockN_to_P_get_map_size)() );
sewardjb4112022007-11-09 22:49:28 +00003809
sewardjf98e1c02008-10-25 16:22:41 +00003810 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
3811 HG_(stats__string_table_queries),
3812 HG_(stats__string_table_get_map_size)() );
barta0b6b2c2008-07-07 06:49:24 +00003813 VG_(printf)(" LAOG: %'8d map size\n",
sewardj896f6f92008-08-19 08:38:52 +00003814 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
barta0b6b2c2008-07-07 06:49:24 +00003815 VG_(printf)(" LAOG exposition: %'8d map size\n",
sewardj896f6f92008-08-19 08:38:52 +00003816 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
barta0b6b2c2008-07-07 06:49:24 +00003817 VG_(printf)(" locks: %'8lu acquires, "
3818 "%'lu releases\n",
sewardjb4112022007-11-09 22:49:28 +00003819 stats__lockN_acquires,
3820 stats__lockN_releases
3821 );
barta0b6b2c2008-07-07 06:49:24 +00003822 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
sewardjb4112022007-11-09 22:49:28 +00003823
3824 VG_(printf)("\n");
sewardjf98e1c02008-10-25 16:22:41 +00003825 libhb_shutdown(True);
sewardjb4112022007-11-09 22:49:28 +00003826 }
3827}
3828
sewardjf98e1c02008-10-25 16:22:41 +00003829/* FIXME: move these somewhere sane */
3830
3831static
3832void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
3833{
3834 Thread* thr;
3835 ThreadId tid;
3836 UWord nActual;
3837 tl_assert(hbt);
3838 thr = libhb_get_Thr_opaque( hbt );
3839 tl_assert(thr);
3840 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
3841 nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
3842 NULL, NULL, 0 );
3843 tl_assert(nActual <= nRequest);
3844 for (; nActual < nRequest; nActual++)
3845 frames[nActual] = 0;
3846}
3847
3848static
sewardjd52392d2008-11-08 20:36:26 +00003849ExeContext* for_libhb__get_EC ( Thr* hbt )
sewardjf98e1c02008-10-25 16:22:41 +00003850{
3851 Thread* thr;
3852 ThreadId tid;
3853 ExeContext* ec;
3854 tl_assert(hbt);
3855 thr = libhb_get_Thr_opaque( hbt );
3856 tl_assert(thr);
3857 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
3858 ec = VG_(record_ExeContext)( tid, 0 );
sewardjd52392d2008-11-08 20:36:26 +00003859 return ec;
sewardjf98e1c02008-10-25 16:22:41 +00003860}
3861
3862
sewardjb4112022007-11-09 22:49:28 +00003863static void hg_pre_clo_init ( void )
3864{
sewardjf98e1c02008-10-25 16:22:41 +00003865 Thr* hbthr_root;
sewardjb4112022007-11-09 22:49:28 +00003866 VG_(details_name) ("Helgrind");
3867 VG_(details_version) (NULL);
3868 VG_(details_description) ("a thread error detector");
3869 VG_(details_copyright_author)(
sewardj4d474d02008-02-11 11:34:59 +00003870 "Copyright (C) 2007-2008, and GNU GPL'd, by OpenWorks LLP et al.");
sewardjb4112022007-11-09 22:49:28 +00003871 VG_(details_bug_reports_to) (VG_BUGS_TO);
3872 VG_(details_avg_translation_sizeB) ( 200 );
3873
3874 VG_(basic_tool_funcs) (hg_post_clo_init,
3875 hg_instrument,
3876 hg_fini);
3877
3878 VG_(needs_core_errors) ();
sewardjf98e1c02008-10-25 16:22:41 +00003879 VG_(needs_tool_errors) (HG_(eq_Error),
3880 HG_(pp_Error),
sewardjb4112022007-11-09 22:49:28 +00003881 False,/*show TIDs for errors*/
sewardjf98e1c02008-10-25 16:22:41 +00003882 HG_(update_extra),
3883 HG_(recognised_suppression),
3884 HG_(read_extra_suppression_info),
3885 HG_(error_matches_suppression),
3886 HG_(get_error_name),
3887 HG_(print_extra_suppression_info));
sewardjb4112022007-11-09 22:49:28 +00003888
3889 VG_(needs_command_line_options)(hg_process_cmd_line_option,
3890 hg_print_usage,
3891 hg_print_debug_usage);
3892 VG_(needs_client_requests) (hg_handle_client_request);
3893
3894 // FIXME?
3895 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
3896 // hg_expensive_sanity_check);
3897
3898 VG_(needs_malloc_replacement) (hg_cli__malloc,
3899 hg_cli____builtin_new,
3900 hg_cli____builtin_vec_new,
3901 hg_cli__memalign,
3902 hg_cli__calloc,
3903 hg_cli__free,
3904 hg_cli____builtin_delete,
3905 hg_cli____builtin_vec_delete,
3906 hg_cli__realloc,
3907 HG_CLI__MALLOC_REDZONE_SZB );
3908
sewardjf98e1c02008-10-25 16:22:41 +00003909 VG_(needs_var_info)(); /* optional */
sewardjb4112022007-11-09 22:49:28 +00003910
3911 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
sewardj7cf4e6b2008-05-01 20:24:26 +00003912 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
3913 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
sewardjb4112022007-11-09 22:49:28 +00003914 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
3915 VG_(track_new_mem_stack) ( evh__new_mem );
3916
3917 // FIXME: surely this isn't thread-aware
3918 VG_(track_copy_mem_remap) ( shadow_mem_copy_range );
3919
3920 VG_(track_change_mem_mprotect) ( evh__set_perms );
3921
3922 VG_(track_die_mem_stack_signal)( evh__die_mem );
3923 VG_(track_die_mem_brk) ( evh__die_mem );
3924 VG_(track_die_mem_munmap) ( evh__die_mem );
3925 VG_(track_die_mem_stack) ( evh__die_mem );
3926
3927 // FIXME: what is this for?
3928 VG_(track_ban_mem_stack) (NULL);
3929
3930 VG_(track_pre_mem_read) ( evh__pre_mem_read );
3931 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
3932 VG_(track_pre_mem_write) ( evh__pre_mem_write );
3933 VG_(track_post_mem_write) (NULL);
3934
3935 /////////////////
3936
3937 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
3938 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
3939
3940 VG_(track_start_client_code)( evh__start_client_code );
3941 VG_(track_stop_client_code)( evh__stop_client_code );
3942
sewardjf98e1c02008-10-25 16:22:41 +00003943 /////////////////////////////////////////////
3944 hbthr_root = libhb_init( for_libhb__get_stacktrace,
sewardjf98e1c02008-10-25 16:22:41 +00003945 for_libhb__get_EC );
3946 /////////////////////////////////////////////
3947
3948 initialise_data_structures(hbthr_root);
sewardjb4112022007-11-09 22:49:28 +00003949
3950 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
3951 as described in comments at the top of pub_tool_hashtable.h, are
3952 met. Blargh. */
3953 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
3954 tl_assert( sizeof(UWord) == sizeof(Addr) );
3955 hg_mallocmeta_table
3956 = VG_(HT_construct)( "hg_malloc_metadata_table" );
3957
sewardjb4112022007-11-09 22:49:28 +00003958}
3959
3960VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
3961
3962/*--------------------------------------------------------------------*/
3963/*--- end hg_main.c ---*/
3964/*--------------------------------------------------------------------*/