blob: 49e76925b139e765a22514fc2f9da72d56afc6cb [file] [log] [blame]
sewardjb4112022007-11-09 22:49:28 +00001
2/*--------------------------------------------------------------------*/
3/*--- Helgrind: a Valgrind tool for detecting errors ---*/
4/*--- in threaded programs. hg_main.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
sewardj9eecbbb2010-05-03 21:37:12 +000011 Copyright (C) 2007-2010 OpenWorks LLP
sewardjb4112022007-11-09 22:49:28 +000012 info@open-works.co.uk
13
sewardj9eecbbb2010-05-03 21:37:12 +000014 Copyright (C) 2007-2010 Apple, Inc.
njnf76d27a2009-05-28 01:53:07 +000015
sewardjb4112022007-11-09 22:49:28 +000016 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
20
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
25
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, write to the Free Software
28 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 02111-1307, USA.
30
31 The GNU General Public License is contained in the file COPYING.
32
33 Neither the names of the U.S. Department of Energy nor the
34 University of California nor the names of its contributors may be
35 used to endorse or promote products derived from this software
36 without prior written permission.
37*/
38
39#include "pub_tool_basics.h"
sewardjb4112022007-11-09 22:49:28 +000040#include "pub_tool_libcassert.h"
41#include "pub_tool_libcbase.h"
42#include "pub_tool_libcprint.h"
sewardjb4112022007-11-09 22:49:28 +000043#include "pub_tool_threadstate.h"
44#include "pub_tool_tooliface.h"
45#include "pub_tool_hashtable.h"
46#include "pub_tool_replacemalloc.h"
47#include "pub_tool_machine.h"
48#include "pub_tool_options.h"
49#include "pub_tool_xarray.h"
50#include "pub_tool_stacktrace.h"
sewardj896f6f92008-08-19 08:38:52 +000051#include "pub_tool_wordfm.h"
sewardja0eee322009-07-31 08:46:35 +000052#include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
53#include "pub_tool_redir.h" // sonames for the dynamic linkers
54#include "pub_tool_vki.h" // VKI_PAGE_SIZE
sewardj61bc2c52011-02-09 10:34:00 +000055#include "pub_tool_libcproc.h" // VG_(atfork)
sewardj234e5582011-02-09 12:47:23 +000056#include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client)
sewardjb4112022007-11-09 22:49:28 +000057
sewardjf98e1c02008-10-25 16:22:41 +000058#include "hg_basics.h"
59#include "hg_wordset.h"
60#include "hg_lock_n_thread.h"
61#include "hg_errors.h"
62
63#include "libhb.h"
64
sewardjb4112022007-11-09 22:49:28 +000065#include "helgrind.h"
66
sewardjf98e1c02008-10-25 16:22:41 +000067
68// FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
69
70// FIXME: when client destroys a lock or a CV, remove these
71// from our mappings, so that the associated SO can be freed up
sewardjb4112022007-11-09 22:49:28 +000072
73/*----------------------------------------------------------------*/
74/*--- ---*/
75/*----------------------------------------------------------------*/
76
sewardj11e352f2007-11-30 11:11:02 +000077/* Note this needs to be compiled with -fno-strict-aliasing, since it
78 contains a whole bunch of calls to lookupFM etc which cast between
79 Word and pointer types. gcc rightly complains this breaks ANSI C
80 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
81 worthwhile performance benefits over -O.
sewardjc17be792007-11-10 22:50:13 +000082*/
sewardjb4112022007-11-09 22:49:28 +000083
84// FIXME what is supposed to happen to locks in memory which
85// is relocated as a result of client realloc?
86
sewardjb4112022007-11-09 22:49:28 +000087// FIXME put referencing ThreadId into Thread and get
88// rid of the slow reverse mapping function.
89
90// FIXME accesses to NoAccess areas: change state to Excl?
91
92// FIXME report errors for accesses of NoAccess memory?
93
94// FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
95// the thread still holds the lock.
96
97/* ------------ Debug/trace options ------------ */
98
sewardjb4112022007-11-09 22:49:28 +000099// 0 for silent, 1 for some stuff, 2 for lots of stuff
100#define SHOW_EVENTS 0
101
sewardjb4112022007-11-09 22:49:28 +0000102
103static void all__sanity_check ( Char* who ); /* fwds */
104
105#define HG_CLI__MALLOC_REDZONE_SZB 16 /* let's say */
106
107// 0 for none, 1 for dump at end of run
108#define SHOW_DATA_STRUCTURES 0
109
110
sewardjb4112022007-11-09 22:49:28 +0000111/* ------------ Misc comments ------------ */
112
113// FIXME: don't hardwire initial entries for root thread.
114// Instead, let the pre_thread_ll_create handler do this.
115
sewardjb4112022007-11-09 22:49:28 +0000116
117/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000118/*--- Primary data structures ---*/
sewardjb4112022007-11-09 22:49:28 +0000119/*----------------------------------------------------------------*/
120
sewardjb4112022007-11-09 22:49:28 +0000121/* Admin linked list of Threads */
122static Thread* admin_threads = NULL;
sewardjffce8152011-06-24 10:09:41 +0000123Thread* get_admin_threads ( void ) { return admin_threads; }
sewardjb4112022007-11-09 22:49:28 +0000124
sewardj1d7c3322011-02-28 09:22:51 +0000125/* Admin double linked list of Locks */
126/* We need a double linked list to properly and efficiently
127 handle del_LockN. */
sewardjb4112022007-11-09 22:49:28 +0000128static Lock* admin_locks = NULL;
129
sewardjb4112022007-11-09 22:49:28 +0000130/* Mapping table for core ThreadIds to Thread* */
131static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
132
sewardjb4112022007-11-09 22:49:28 +0000133/* Mapping table for lock guest addresses to Lock* */
134static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
135
sewardj0f64c9e2011-03-10 17:40:22 +0000136/* The word-set universes for lock sets. */
sewardjb4112022007-11-09 22:49:28 +0000137static WordSetU* univ_lsets = NULL; /* sets of Lock* */
138static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
sewardj866c80c2011-10-22 19:29:51 +0000139static Int next_gc_univ_laog = 1;
140/* univ_laog will be garbaged collected when the nr of element in univ_laog is
141 >= next_gc_univ_laog. */
sewardjb4112022007-11-09 22:49:28 +0000142
sewardjffce8152011-06-24 10:09:41 +0000143/* Allow libhb to get at the universe of locksets stored
144 here. Sigh. */
145WordSetU* HG_(get_univ_lsets) ( void ) { return univ_lsets; }
146
147/* Allow libhb to get at the list of locks stored here. Ditto
148 sigh. */
149Lock* HG_(get_admin_locks) ( void ) { return admin_locks; }
150
sewardjb4112022007-11-09 22:49:28 +0000151
152/*----------------------------------------------------------------*/
153/*--- Simple helpers for the data structures ---*/
154/*----------------------------------------------------------------*/
155
156static UWord stats__lockN_acquires = 0;
157static UWord stats__lockN_releases = 0;
158
sewardjf98e1c02008-10-25 16:22:41 +0000159static
160ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
sewardjb4112022007-11-09 22:49:28 +0000161
162/* --------- Constructors --------- */
163
sewardjf98e1c02008-10-25 16:22:41 +0000164static Thread* mk_Thread ( Thr* hbthr ) {
sewardjb4112022007-11-09 22:49:28 +0000165 static Int indx = 1;
sewardjf98e1c02008-10-25 16:22:41 +0000166 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
sewardjb4112022007-11-09 22:49:28 +0000167 thread->locksetA = HG_(emptyWS)( univ_lsets );
168 thread->locksetW = HG_(emptyWS)( univ_lsets );
sewardjb4112022007-11-09 22:49:28 +0000169 thread->magic = Thread_MAGIC;
sewardjf98e1c02008-10-25 16:22:41 +0000170 thread->hbthr = hbthr;
171 thread->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +0000172 thread->created_at = NULL;
173 thread->announced = False;
174 thread->errmsg_index = indx++;
175 thread->admin = admin_threads;
176 admin_threads = thread;
177 return thread;
178}
sewardjf98e1c02008-10-25 16:22:41 +0000179
sewardjb4112022007-11-09 22:49:28 +0000180// Make a new lock which is unlocked (hence ownerless)
sewardj1d7c3322011-02-28 09:22:51 +0000181// and insert the new lock in admin_locks double linked list.
sewardjb4112022007-11-09 22:49:28 +0000182static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
183 static ULong unique = 0;
sewardjf98e1c02008-10-25 16:22:41 +0000184 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
sewardj0f64c9e2011-03-10 17:40:22 +0000185 /* begin: add to double linked list */
sewardj1d7c3322011-02-28 09:22:51 +0000186 if (admin_locks)
187 admin_locks->admin_prev = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000188 lock->admin_next = admin_locks;
189 lock->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000190 admin_locks = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000191 /* end: add */
sewardjb4112022007-11-09 22:49:28 +0000192 lock->unique = unique++;
193 lock->magic = LockN_MAGIC;
194 lock->appeared_at = NULL;
195 lock->acquired_at = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000196 lock->hbso = libhb_so_alloc();
sewardjb4112022007-11-09 22:49:28 +0000197 lock->guestaddr = guestaddr;
198 lock->kind = kind;
199 lock->heldW = False;
200 lock->heldBy = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000201 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +0000202 return lock;
203}
sewardjb4112022007-11-09 22:49:28 +0000204
205/* Release storage for a Lock. Also release storage in .heldBy, if
sewardj1d7c3322011-02-28 09:22:51 +0000206 any. Removes from admin_locks double linked list. */
sewardjb4112022007-11-09 22:49:28 +0000207static void del_LockN ( Lock* lk )
208{
sewardjf98e1c02008-10-25 16:22:41 +0000209 tl_assert(HG_(is_sane_LockN)(lk));
210 tl_assert(lk->hbso);
211 libhb_so_dealloc(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +0000212 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000213 VG_(deleteBag)( lk->heldBy );
sewardj0f64c9e2011-03-10 17:40:22 +0000214 /* begin: del lock from double linked list */
215 if (lk == admin_locks) {
216 tl_assert(lk->admin_prev == NULL);
217 if (lk->admin_next)
218 lk->admin_next->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000219 admin_locks = lk->admin_next;
sewardj1d7c3322011-02-28 09:22:51 +0000220 }
221 else {
sewardj0f64c9e2011-03-10 17:40:22 +0000222 tl_assert(lk->admin_prev != NULL);
sewardj1d7c3322011-02-28 09:22:51 +0000223 lk->admin_prev->admin_next = lk->admin_next;
sewardj0f64c9e2011-03-10 17:40:22 +0000224 if (lk->admin_next)
225 lk->admin_next->admin_prev = lk->admin_prev;
sewardj1d7c3322011-02-28 09:22:51 +0000226 }
sewardj0f64c9e2011-03-10 17:40:22 +0000227 /* end: del */
sewardjb4112022007-11-09 22:49:28 +0000228 VG_(memset)(lk, 0xAA, sizeof(*lk));
sewardjf98e1c02008-10-25 16:22:41 +0000229 HG_(free)(lk);
sewardjb4112022007-11-09 22:49:28 +0000230}
231
232/* Update 'lk' to reflect that 'thr' now has a write-acquisition of
233 it. This is done strictly: only combinations resulting from
234 correct program and libpthread behaviour are allowed. */
235static void lockN_acquire_writer ( Lock* lk, Thread* thr )
236{
sewardjf98e1c02008-10-25 16:22:41 +0000237 tl_assert(HG_(is_sane_LockN)(lk));
238 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000239
240 stats__lockN_acquires++;
241
242 /* EXPOSITION only */
243 /* We need to keep recording snapshots of where the lock was
244 acquired, so as to produce better lock-order error messages. */
245 if (lk->acquired_at == NULL) {
246 ThreadId tid;
247 tl_assert(lk->heldBy == NULL);
248 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
249 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000250 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000251 } else {
252 tl_assert(lk->heldBy != NULL);
253 }
254 /* end EXPOSITION only */
255
256 switch (lk->kind) {
257 case LK_nonRec:
258 case_LK_nonRec:
259 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
260 tl_assert(!lk->heldW);
261 lk->heldW = True;
sewardjf98e1c02008-10-25 16:22:41 +0000262 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000263 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000264 break;
265 case LK_mbRec:
266 if (lk->heldBy == NULL)
267 goto case_LK_nonRec;
268 /* 2nd and subsequent locking of a lock by its owner */
269 tl_assert(lk->heldW);
270 /* assert: lk is only held by one thread .. */
sewardj896f6f92008-08-19 08:38:52 +0000271 tl_assert(VG_(sizeUniqueBag(lk->heldBy)) == 1);
sewardjb4112022007-11-09 22:49:28 +0000272 /* assert: .. and that thread is 'thr'. */
sewardj896f6f92008-08-19 08:38:52 +0000273 tl_assert(VG_(elemBag)(lk->heldBy, (Word)thr)
274 == VG_(sizeTotalBag)(lk->heldBy));
275 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000276 break;
277 case LK_rdwr:
278 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
279 goto case_LK_nonRec;
280 default:
281 tl_assert(0);
282 }
sewardjf98e1c02008-10-25 16:22:41 +0000283 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000284}
285
286static void lockN_acquire_reader ( Lock* lk, Thread* thr )
287{
sewardjf98e1c02008-10-25 16:22:41 +0000288 tl_assert(HG_(is_sane_LockN)(lk));
289 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000290 /* can only add reader to a reader-writer lock. */
291 tl_assert(lk->kind == LK_rdwr);
292 /* lk must be free or already r-held. */
293 tl_assert(lk->heldBy == NULL
294 || (lk->heldBy != NULL && !lk->heldW));
295
296 stats__lockN_acquires++;
297
298 /* EXPOSITION only */
299 /* We need to keep recording snapshots of where the lock was
300 acquired, so as to produce better lock-order error messages. */
301 if (lk->acquired_at == NULL) {
302 ThreadId tid;
303 tl_assert(lk->heldBy == NULL);
304 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
305 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000306 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000307 } else {
308 tl_assert(lk->heldBy != NULL);
309 }
310 /* end EXPOSITION only */
311
312 if (lk->heldBy) {
sewardj896f6f92008-08-19 08:38:52 +0000313 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000314 } else {
315 lk->heldW = False;
sewardjf98e1c02008-10-25 16:22:41 +0000316 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000317 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000318 }
319 tl_assert(!lk->heldW);
sewardjf98e1c02008-10-25 16:22:41 +0000320 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000321}
322
323/* Update 'lk' to reflect a release of it by 'thr'. This is done
324 strictly: only combinations resulting from correct program and
325 libpthread behaviour are allowed. */
326
327static void lockN_release ( Lock* lk, Thread* thr )
328{
329 Bool b;
sewardjf98e1c02008-10-25 16:22:41 +0000330 tl_assert(HG_(is_sane_LockN)(lk));
331 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000332 /* lock must be held by someone */
333 tl_assert(lk->heldBy);
334 stats__lockN_releases++;
335 /* Remove it from the holder set */
sewardj896f6f92008-08-19 08:38:52 +0000336 b = VG_(delFromBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000337 /* thr must actually have been a holder of lk */
338 tl_assert(b);
339 /* normalise */
340 tl_assert(lk->acquired_at);
sewardj896f6f92008-08-19 08:38:52 +0000341 if (VG_(isEmptyBag)(lk->heldBy)) {
342 VG_(deleteBag)(lk->heldBy);
sewardjb4112022007-11-09 22:49:28 +0000343 lk->heldBy = NULL;
344 lk->heldW = False;
345 lk->acquired_at = NULL;
346 }
sewardjf98e1c02008-10-25 16:22:41 +0000347 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000348}
349
350static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
351{
352 Thread* thr;
353 if (!lk->heldBy) {
354 tl_assert(!lk->heldW);
355 return;
356 }
357 /* for each thread that holds this lock do ... */
sewardj896f6f92008-08-19 08:38:52 +0000358 VG_(initIterBag)( lk->heldBy );
359 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, NULL )) {
sewardjf98e1c02008-10-25 16:22:41 +0000360 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000361 tl_assert(HG_(elemWS)( univ_lsets,
362 thr->locksetA, (Word)lk ));
363 thr->locksetA
364 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lk );
365
366 if (lk->heldW) {
367 tl_assert(HG_(elemWS)( univ_lsets,
368 thr->locksetW, (Word)lk ));
369 thr->locksetW
370 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lk );
371 }
372 }
sewardj896f6f92008-08-19 08:38:52 +0000373 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000374}
375
sewardjb4112022007-11-09 22:49:28 +0000376
377/*----------------------------------------------------------------*/
378/*--- Print out the primary data structures ---*/
379/*----------------------------------------------------------------*/
380
sewardjb4112022007-11-09 22:49:28 +0000381#define PP_THREADS (1<<1)
382#define PP_LOCKS (1<<2)
sewardjf98e1c02008-10-25 16:22:41 +0000383#define PP_ALL (PP_THREADS | PP_LOCKS)
sewardjb4112022007-11-09 22:49:28 +0000384
385
386static const Int sHOW_ADMIN = 0;
387
388static void space ( Int n )
389{
390 Int i;
391 Char spaces[128+1];
392 tl_assert(n >= 0 && n < 128);
393 if (n == 0)
394 return;
395 for (i = 0; i < n; i++)
396 spaces[i] = ' ';
397 spaces[i] = 0;
398 tl_assert(i < 128+1);
399 VG_(printf)("%s", spaces);
400}
401
402static void pp_Thread ( Int d, Thread* t )
403{
404 space(d+0); VG_(printf)("Thread %p {\n", t);
405 if (sHOW_ADMIN) {
406 space(d+3); VG_(printf)("admin %p\n", t->admin);
407 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
408 }
409 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
410 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
sewardjb4112022007-11-09 22:49:28 +0000411 space(d+0); VG_(printf)("}\n");
412}
413
414static void pp_admin_threads ( Int d )
415{
416 Int i, n;
417 Thread* t;
418 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
419 /* nothing */
420 }
421 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
422 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
423 if (0) {
424 space(n);
425 VG_(printf)("admin_threads record %d of %d:\n", i, n);
426 }
427 pp_Thread(d+3, t);
428 }
barta0b6b2c2008-07-07 06:49:24 +0000429 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000430}
431
432static void pp_map_threads ( Int d )
433{
njn4c245e52009-03-15 23:25:38 +0000434 Int i, n = 0;
sewardjb4112022007-11-09 22:49:28 +0000435 space(d); VG_(printf)("map_threads ");
sewardjb4112022007-11-09 22:49:28 +0000436 for (i = 0; i < VG_N_THREADS; i++) {
437 if (map_threads[i] != NULL)
438 n++;
439 }
440 VG_(printf)("(%d entries) {\n", n);
441 for (i = 0; i < VG_N_THREADS; i++) {
442 if (map_threads[i] == NULL)
443 continue;
444 space(d+3);
445 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
446 }
447 space(d); VG_(printf)("}\n");
448}
449
450static const HChar* show_LockKind ( LockKind lkk ) {
451 switch (lkk) {
452 case LK_mbRec: return "mbRec";
453 case LK_nonRec: return "nonRec";
454 case LK_rdwr: return "rdwr";
455 default: tl_assert(0);
456 }
457}
458
459static void pp_Lock ( Int d, Lock* lk )
460{
barta0b6b2c2008-07-07 06:49:24 +0000461 space(d+0); VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
sewardjb4112022007-11-09 22:49:28 +0000462 if (sHOW_ADMIN) {
sewardj1d7c3322011-02-28 09:22:51 +0000463 space(d+3); VG_(printf)("admin_n %p\n", lk->admin_next);
464 space(d+3); VG_(printf)("admin_p %p\n", lk->admin_prev);
465 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
sewardjb4112022007-11-09 22:49:28 +0000466 }
467 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
468 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
469 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
470 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
471 if (lk->heldBy) {
472 Thread* thr;
473 Word count;
474 VG_(printf)(" { ");
sewardj896f6f92008-08-19 08:38:52 +0000475 VG_(initIterBag)( lk->heldBy );
476 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, &count ))
sewardjb4112022007-11-09 22:49:28 +0000477 VG_(printf)("%lu:%p ", count, thr);
sewardj896f6f92008-08-19 08:38:52 +0000478 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000479 VG_(printf)("}");
480 }
481 VG_(printf)("\n");
482 space(d+0); VG_(printf)("}\n");
483}
484
485static void pp_admin_locks ( Int d )
486{
487 Int i, n;
488 Lock* lk;
sewardj1d7c3322011-02-28 09:22:51 +0000489 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000490 /* nothing */
491 }
492 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
sewardj1d7c3322011-02-28 09:22:51 +0000493 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000494 if (0) {
495 space(n);
496 VG_(printf)("admin_locks record %d of %d:\n", i, n);
497 }
498 pp_Lock(d+3, lk);
499 }
barta0b6b2c2008-07-07 06:49:24 +0000500 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000501}
502
503static void pp_map_locks ( Int d )
504{
505 void* gla;
506 Lock* lk;
507 space(d); VG_(printf)("map_locks (%d entries) {\n",
sewardj896f6f92008-08-19 08:38:52 +0000508 (Int)VG_(sizeFM)( map_locks ));
509 VG_(initIterFM)( map_locks );
510 while (VG_(nextIterFM)( map_locks, (Word*)&gla,
sewardjb5f29642007-11-16 12:02:43 +0000511 (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000512 space(d+3);
513 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
514 }
sewardj896f6f92008-08-19 08:38:52 +0000515 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000516 space(d); VG_(printf)("}\n");
517}
518
sewardjb4112022007-11-09 22:49:28 +0000519static void pp_everything ( Int flags, Char* caller )
520{
521 Int d = 0;
522 VG_(printf)("\n");
523 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
524 if (flags & PP_THREADS) {
525 VG_(printf)("\n");
526 pp_admin_threads(d+3);
527 VG_(printf)("\n");
528 pp_map_threads(d+3);
529 }
530 if (flags & PP_LOCKS) {
531 VG_(printf)("\n");
532 pp_admin_locks(d+3);
533 VG_(printf)("\n");
534 pp_map_locks(d+3);
535 }
sewardjb4112022007-11-09 22:49:28 +0000536
537 VG_(printf)("\n");
538 VG_(printf)("}\n");
539 VG_(printf)("\n");
540}
541
542#undef SHOW_ADMIN
543
544
545/*----------------------------------------------------------------*/
546/*--- Initialise the primary data structures ---*/
547/*----------------------------------------------------------------*/
548
sewardjf98e1c02008-10-25 16:22:41 +0000549static void initialise_data_structures ( Thr* hbthr_root )
sewardjb4112022007-11-09 22:49:28 +0000550{
sewardjb4112022007-11-09 22:49:28 +0000551 Thread* thr;
sewardjffce8152011-06-24 10:09:41 +0000552 WordSetID wsid;
sewardjb4112022007-11-09 22:49:28 +0000553
554 /* Get everything initialised and zeroed. */
555 tl_assert(admin_threads == NULL);
556 tl_assert(admin_locks == NULL);
sewardjb4112022007-11-09 22:49:28 +0000557
558 tl_assert(sizeof(Addr) == sizeof(Word));
sewardjb4112022007-11-09 22:49:28 +0000559
560 tl_assert(map_threads == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000561 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
sewardjb4112022007-11-09 22:49:28 +0000562 tl_assert(map_threads != NULL);
563
sewardjb4112022007-11-09 22:49:28 +0000564 tl_assert(sizeof(Addr) == sizeof(Word));
565 tl_assert(map_locks == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000566 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
567 NULL/*unboxed Word cmp*/);
sewardjb4112022007-11-09 22:49:28 +0000568 tl_assert(map_locks != NULL);
569
sewardjb4112022007-11-09 22:49:28 +0000570 tl_assert(univ_lsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000571 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
572 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000573 tl_assert(univ_lsets != NULL);
sewardjffce8152011-06-24 10:09:41 +0000574 /* Ensure that univ_lsets is non-empty, with lockset zero being the
575 empty lockset. hg_errors.c relies on the assumption that
576 lockset number zero in univ_lsets is always valid. */
577 wsid = HG_(emptyWS)(univ_lsets);
578 tl_assert(wsid == 0);
sewardjb4112022007-11-09 22:49:28 +0000579
580 tl_assert(univ_laog == NULL);
sewardjc1fb9d22011-02-28 09:03:44 +0000581 if (HG_(clo_track_lockorders)) {
582 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
583 HG_(free), 24/*cacheSize*/ );
584 tl_assert(univ_laog != NULL);
585 }
sewardjb4112022007-11-09 22:49:28 +0000586
587 /* Set up entries for the root thread */
588 // FIXME: this assumes that the first real ThreadId is 1
589
sewardjb4112022007-11-09 22:49:28 +0000590 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +0000591 thr = mk_Thread(hbthr_root);
592 thr->coretid = 1; /* FIXME: hardwires an assumption about the
593 identity of the root thread. */
sewardj60626642011-03-10 15:14:37 +0000594 tl_assert( libhb_get_Thr_hgthread(hbthr_root) == NULL );
595 libhb_set_Thr_hgthread(hbthr_root, thr);
sewardjb4112022007-11-09 22:49:28 +0000596
sewardjf98e1c02008-10-25 16:22:41 +0000597 /* and bind it in the thread-map table. */
598 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
599 tl_assert(thr->coretid != VG_INVALID_THREADID);
sewardjb4112022007-11-09 22:49:28 +0000600
sewardjf98e1c02008-10-25 16:22:41 +0000601 map_threads[thr->coretid] = thr;
sewardjb4112022007-11-09 22:49:28 +0000602
603 tl_assert(VG_INVALID_THREADID == 0);
604
sewardjb4112022007-11-09 22:49:28 +0000605 all__sanity_check("initialise_data_structures");
606}
607
608
609/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000610/*--- map_threads :: array[core-ThreadId] of Thread* ---*/
sewardjb4112022007-11-09 22:49:28 +0000611/*----------------------------------------------------------------*/
612
613/* Doesn't assert if the relevant map_threads entry is NULL. */
614static Thread* map_threads_maybe_lookup ( ThreadId coretid )
615{
616 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000617 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000618 thr = map_threads[coretid];
619 return thr;
620}
621
622/* Asserts if the relevant map_threads entry is NULL. */
623static inline Thread* map_threads_lookup ( ThreadId coretid )
624{
625 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000626 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000627 thr = map_threads[coretid];
628 tl_assert(thr);
629 return thr;
630}
631
sewardjf98e1c02008-10-25 16:22:41 +0000632/* Do a reverse lookup. Does not assert if 'thr' is not found in
633 map_threads. */
sewardjb4112022007-11-09 22:49:28 +0000634static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
635{
sewardjf98e1c02008-10-25 16:22:41 +0000636 ThreadId tid;
637 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000638 /* Check nobody used the invalid-threadid slot */
639 tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
640 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000641 tid = thr->coretid;
642 tl_assert(HG_(is_sane_ThreadId)(tid));
643 return tid;
sewardjb4112022007-11-09 22:49:28 +0000644}
645
646/* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
647 is not found in map_threads. */
648static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
649{
650 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
651 tl_assert(tid != VG_INVALID_THREADID);
sewardjf98e1c02008-10-25 16:22:41 +0000652 tl_assert(map_threads[tid]);
653 tl_assert(map_threads[tid]->coretid == tid);
sewardjb4112022007-11-09 22:49:28 +0000654 return tid;
655}
656
657static void map_threads_delete ( ThreadId coretid )
658{
659 Thread* thr;
660 tl_assert(coretid != 0);
sewardjf98e1c02008-10-25 16:22:41 +0000661 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000662 thr = map_threads[coretid];
663 tl_assert(thr);
664 map_threads[coretid] = NULL;
665}
666
667
668/*----------------------------------------------------------------*/
669/*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
670/*----------------------------------------------------------------*/
671
672/* Make sure there is a lock table entry for the given (lock) guest
673 address. If not, create one of the stated 'kind' in unheld state.
674 In any case, return the address of the existing or new Lock. */
675static
676Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
677{
678 Bool found;
679 Lock* oldlock = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000680 tl_assert(HG_(is_sane_ThreadId)(tid));
sewardj896f6f92008-08-19 08:38:52 +0000681 found = VG_(lookupFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000682 NULL, (Word*)&oldlock, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000683 if (!found) {
684 Lock* lock = mk_LockN(lkk, ga);
685 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
sewardjf98e1c02008-10-25 16:22:41 +0000686 tl_assert(HG_(is_sane_LockN)(lock));
sewardj896f6f92008-08-19 08:38:52 +0000687 VG_(addToFM)( map_locks, (Word)ga, (Word)lock );
sewardjb4112022007-11-09 22:49:28 +0000688 tl_assert(oldlock == NULL);
sewardjb4112022007-11-09 22:49:28 +0000689 return lock;
690 } else {
691 tl_assert(oldlock != NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000692 tl_assert(HG_(is_sane_LockN)(oldlock));
sewardjb4112022007-11-09 22:49:28 +0000693 tl_assert(oldlock->guestaddr == ga);
sewardjb4112022007-11-09 22:49:28 +0000694 return oldlock;
695 }
696}
697
698static Lock* map_locks_maybe_lookup ( Addr ga )
699{
700 Bool found;
701 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000702 found = VG_(lookupFM)( map_locks, NULL, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000703 tl_assert(found ? lk != NULL : lk == NULL);
sewardjb4112022007-11-09 22:49:28 +0000704 return lk;
705}
706
707static void map_locks_delete ( Addr ga )
708{
709 Addr ga2 = 0;
710 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000711 VG_(delFromFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000712 (Word*)&ga2, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000713 /* delFromFM produces the val which is being deleted, if it is
714 found. So assert it is non-null; that in effect asserts that we
715 are deleting a (ga, Lock) pair which actually exists. */
716 tl_assert(lk != NULL);
717 tl_assert(ga2 == ga);
718}
719
720
sewardjb4112022007-11-09 22:49:28 +0000721
722/*----------------------------------------------------------------*/
723/*--- Sanity checking the data structures ---*/
724/*----------------------------------------------------------------*/
725
726static UWord stats__sanity_checks = 0;
727
sewardjb4112022007-11-09 22:49:28 +0000728static void laog__sanity_check ( Char* who ); /* fwds */
729
730/* REQUIRED INVARIANTS:
731
732 Thread vs Segment/Lock/SecMaps
733
734 for each t in Threads {
735
736 // Thread.lockset: each element is really a valid Lock
737
738 // Thread.lockset: each Lock in set is actually held by that thread
739 for lk in Thread.lockset
740 lk == LockedBy(t)
741
742 // Thread.csegid is a valid SegmentID
743 // and the associated Segment has .thr == t
744
745 }
746
747 all thread Locksets are pairwise empty under intersection
748 (that is, no lock is claimed to be held by more than one thread)
749 -- this is guaranteed if all locks in locksets point back to their
750 owner threads
751
752 Lock vs Thread/Segment/SecMaps
753
754 for each entry (gla, la) in map_locks
755 gla == la->guest_addr
756
757 for each lk in Locks {
758
759 lk->tag is valid
760 lk->guest_addr does not have shadow state NoAccess
761 if lk == LockedBy(t), then t->lockset contains lk
762 if lk == UnlockedBy(segid) then segid is valid SegmentID
763 and can be mapped to a valid Segment(seg)
764 and seg->thr->lockset does not contain lk
765 if lk == UnlockedNew then (no lockset contains lk)
766
767 secmaps for lk has .mbHasLocks == True
768
769 }
770
771 Segment vs Thread/Lock/SecMaps
772
773 the Segment graph is a dag (no cycles)
774 all of the Segment graph must be reachable from the segids
775 mentioned in the Threads
776
777 for seg in Segments {
778
779 seg->thr is a sane Thread
780
781 }
782
783 SecMaps vs Segment/Thread/Lock
784
785 for sm in SecMaps {
786
787 sm properly aligned
788 if any shadow word is ShR or ShM then .mbHasShared == True
789
790 for each Excl(segid) state
791 map_segments_lookup maps to a sane Segment(seg)
792 for each ShM/ShR(tsetid,lsetid) state
793 each lk in lset is a valid Lock
794 each thr in tset is a valid thread, which is non-dead
795
796 }
797*/
798
799
800/* Return True iff 'thr' holds 'lk' in some mode. */
801static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
802{
803 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000804 return VG_(elemBag)( lk->heldBy, (Word)thr ) > 0;
sewardjb4112022007-11-09 22:49:28 +0000805 else
806 return False;
807}
808
809/* Sanity check Threads, as far as possible */
810__attribute__((noinline))
811static void threads__sanity_check ( Char* who )
812{
813#define BAD(_str) do { how = (_str); goto bad; } while (0)
814 Char* how = "no error";
815 Thread* thr;
816 WordSetID wsA, wsW;
sewardj250ec2e2008-02-15 22:02:30 +0000817 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +0000818 Word ls_size, i;
819 Lock* lk;
sewardjb4112022007-11-09 22:49:28 +0000820 for (thr = admin_threads; thr; thr = thr->admin) {
sewardjf98e1c02008-10-25 16:22:41 +0000821 if (!HG_(is_sane_Thread)(thr)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000822 wsA = thr->locksetA;
823 wsW = thr->locksetW;
824 // locks held in W mode are a subset of all locks held
825 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
826 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
827 for (i = 0; i < ls_size; i++) {
828 lk = (Lock*)ls_words[i];
829 // Thread.lockset: each element is really a valid Lock
sewardjf98e1c02008-10-25 16:22:41 +0000830 if (!HG_(is_sane_LockN)(lk)) BAD("2");
sewardjb4112022007-11-09 22:49:28 +0000831 // Thread.lockset: each Lock in set is actually held by that
832 // thread
833 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000834 }
835 }
836 return;
837 bad:
838 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
839 tl_assert(0);
840#undef BAD
841}
842
843
844/* Sanity check Locks, as far as possible */
845__attribute__((noinline))
846static void locks__sanity_check ( Char* who )
847{
848#define BAD(_str) do { how = (_str); goto bad; } while (0)
849 Char* how = "no error";
850 Addr gla;
851 Lock* lk;
852 Int i;
853 // # entries in admin_locks == # entries in map_locks
sewardj1d7c3322011-02-28 09:22:51 +0000854 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next)
sewardjb4112022007-11-09 22:49:28 +0000855 ;
sewardj896f6f92008-08-19 08:38:52 +0000856 if (i != VG_(sizeFM)(map_locks)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000857 // for each entry (gla, lk) in map_locks
858 // gla == lk->guest_addr
sewardj896f6f92008-08-19 08:38:52 +0000859 VG_(initIterFM)( map_locks );
860 while (VG_(nextIterFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000861 (Word*)&gla, (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000862 if (lk->guestaddr != gla) BAD("2");
863 }
sewardj896f6f92008-08-19 08:38:52 +0000864 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000865 // scan through admin_locks ...
sewardj1d7c3322011-02-28 09:22:51 +0000866 for (lk = admin_locks; lk; lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000867 // lock is sane. Quite comprehensive, also checks that
868 // referenced (holder) threads are sane.
sewardjf98e1c02008-10-25 16:22:41 +0000869 if (!HG_(is_sane_LockN)(lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000870 // map_locks binds guest address back to this lock
871 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
sewardjb4112022007-11-09 22:49:28 +0000872 // look at all threads mentioned as holders of this lock. Ensure
873 // this lock is mentioned in their locksets.
874 if (lk->heldBy) {
875 Thread* thr;
876 Word count;
sewardj896f6f92008-08-19 08:38:52 +0000877 VG_(initIterBag)( lk->heldBy );
878 while (VG_(nextIterBag)( lk->heldBy,
sewardjb5f29642007-11-16 12:02:43 +0000879 (Word*)&thr, &count )) {
sewardjf98e1c02008-10-25 16:22:41 +0000880 // HG_(is_sane_LockN) above ensures these
sewardjb4112022007-11-09 22:49:28 +0000881 tl_assert(count >= 1);
sewardjf98e1c02008-10-25 16:22:41 +0000882 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000883 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (Word)lk))
884 BAD("6");
885 // also check the w-only lockset
886 if (lk->heldW
887 && !HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
888 BAD("7");
889 if ((!lk->heldW)
890 && HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
891 BAD("8");
892 }
sewardj896f6f92008-08-19 08:38:52 +0000893 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000894 } else {
895 /* lock not held by anybody */
896 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
897 // since lk is unheld, then (no lockset contains lk)
898 // hmm, this is really too expensive to check. Hmm.
899 }
sewardjb4112022007-11-09 22:49:28 +0000900 }
901
902 return;
903 bad:
904 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
905 tl_assert(0);
906#undef BAD
907}
908
909
sewardjb4112022007-11-09 22:49:28 +0000910static void all_except_Locks__sanity_check ( Char* who ) {
911 stats__sanity_checks++;
912 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
913 threads__sanity_check(who);
sewardjc1fb9d22011-02-28 09:03:44 +0000914 if (HG_(clo_track_lockorders))
915 laog__sanity_check(who);
sewardjb4112022007-11-09 22:49:28 +0000916}
917static void all__sanity_check ( Char* who ) {
918 all_except_Locks__sanity_check(who);
919 locks__sanity_check(who);
920}
921
922
923/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +0000924/*--- Shadow value and address range handlers ---*/
925/*----------------------------------------------------------------*/
926
927static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +0000928//static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000929static inline Thread* get_current_Thread ( void ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +0000930__attribute__((noinline))
931static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000932
sewardjb4112022007-11-09 22:49:28 +0000933
934/* Block-copy states (needed for implementing realloc()). */
sewardj23f12002009-07-24 08:45:08 +0000935/* FIXME this copies shadow memory; it doesn't apply the MSM to it.
936 Is that a problem? (hence 'scopy' rather than 'ccopy') */
937static void shadow_mem_scopy_range ( Thread* thr,
938 Addr src, Addr dst, SizeT len )
sewardjf98e1c02008-10-25 16:22:41 +0000939{
940 Thr* hbthr = thr->hbthr;
941 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +0000942 libhb_copy_shadow_state( hbthr, src, dst, len );
sewardjb4112022007-11-09 22:49:28 +0000943}
944
sewardj23f12002009-07-24 08:45:08 +0000945static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
946{
sewardjf98e1c02008-10-25 16:22:41 +0000947 Thr* hbthr = thr->hbthr;
948 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +0000949 LIBHB_CREAD_N(hbthr, a, len);
950}
951
952static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
953 Thr* hbthr = thr->hbthr;
954 tl_assert(hbthr);
955 LIBHB_CWRITE_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +0000956}
957
958static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
959{
sewardj23f12002009-07-24 08:45:08 +0000960 libhb_srange_new( thr->hbthr, a, len );
sewardjb4112022007-11-09 22:49:28 +0000961}
962
sewardjfd35d492011-03-17 19:39:55 +0000963static void shadow_mem_make_NoAccess_NoFX ( Thread* thr, Addr aIN, SizeT len )
sewardjb4112022007-11-09 22:49:28 +0000964{
sewardjb4112022007-11-09 22:49:28 +0000965 if (0 && len > 500)
sewardjfd35d492011-03-17 19:39:55 +0000966 VG_(printf)("make NoAccess_NoFX ( %#lx, %ld )\n", aIN, len );
967 // has no effect (NoFX)
968 libhb_srange_noaccess_NoFX( thr->hbthr, aIN, len );
969}
970
971static void shadow_mem_make_NoAccess_AHAE ( Thread* thr, Addr aIN, SizeT len )
972{
973 if (0 && len > 500)
974 VG_(printf)("make NoAccess_AHAE ( %#lx, %ld )\n", aIN, len );
975 // Actually Has An Effect (AHAE)
976 libhb_srange_noaccess_AHAE( thr->hbthr, aIN, len );
sewardjb4112022007-11-09 22:49:28 +0000977}
978
sewardj406bac82010-03-03 23:03:40 +0000979static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN, SizeT len )
980{
981 if (0 && len > 500)
982 VG_(printf)("make Untracked ( %#lx, %ld )\n", aIN, len );
983 libhb_srange_untrack( thr->hbthr, aIN, len );
984}
985
sewardjb4112022007-11-09 22:49:28 +0000986
987/*----------------------------------------------------------------*/
988/*--- Event handlers (evh__* functions) ---*/
989/*--- plus helpers (evhH__* functions) ---*/
990/*----------------------------------------------------------------*/
991
992/*--------- Event handler helpers (evhH__* functions) ---------*/
993
994/* Create a new segment for 'thr', making it depend (.prev) on its
995 existing segment, bind together the SegmentID and Segment, and
996 return both of them. Also update 'thr' so it references the new
997 Segment. */
sewardjf98e1c02008-10-25 16:22:41 +0000998//zz static
999//zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
1000//zz /*OUT*/Segment** new_segP,
1001//zz Thread* thr )
1002//zz {
1003//zz Segment* cur_seg;
1004//zz tl_assert(new_segP);
1005//zz tl_assert(new_segidP);
1006//zz tl_assert(HG_(is_sane_Thread)(thr));
1007//zz cur_seg = map_segments_lookup( thr->csegid );
1008//zz tl_assert(cur_seg);
1009//zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1010//zz at their owner thread. */
1011//zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1012//zz *new_segidP = alloc_SegmentID();
1013//zz map_segments_add( *new_segidP, *new_segP );
1014//zz thr->csegid = *new_segidP;
1015//zz }
sewardjb4112022007-11-09 22:49:28 +00001016
1017
1018/* The lock at 'lock_ga' has acquired a writer. Make all necessary
1019 updates, and also do all possible error checks. */
1020static
1021void evhH__post_thread_w_acquires_lock ( Thread* thr,
1022 LockKind lkk, Addr lock_ga )
1023{
1024 Lock* lk;
1025
1026 /* Basically what we need to do is call lockN_acquire_writer.
1027 However, that will barf if any 'invalid' lock states would
1028 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001029 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001030 routine.
1031
1032 Because this routine is only called after successful lock
1033 acquisition, we should not be asked to move the lock into any
1034 invalid states. Requests to do so are bugs in libpthread, since
1035 that should have rejected any such requests. */
1036
sewardjf98e1c02008-10-25 16:22:41 +00001037 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001038 /* Try to find the lock. If we can't, then create a new one with
1039 kind 'lkk'. */
1040 lk = map_locks_lookup_or_create(
1041 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001042 tl_assert( HG_(is_sane_LockN)(lk) );
1043
1044 /* check libhb level entities exist */
1045 tl_assert(thr->hbthr);
1046 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001047
1048 if (lk->heldBy == NULL) {
1049 /* the lock isn't held. Simple. */
1050 tl_assert(!lk->heldW);
1051 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001052 /* acquire a dependency from the lock's VCs */
1053 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001054 goto noerror;
1055 }
1056
1057 /* So the lock is already held. If held as a r-lock then
1058 libpthread must be buggy. */
1059 tl_assert(lk->heldBy);
1060 if (!lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001061 HG_(record_error_Misc)(
1062 thr, "Bug in libpthread: write lock "
1063 "granted on rwlock which is currently rd-held");
sewardjb4112022007-11-09 22:49:28 +00001064 goto error;
1065 }
1066
1067 /* So the lock is held in w-mode. If it's held by some other
1068 thread, then libpthread must be buggy. */
sewardj896f6f92008-08-19 08:38:52 +00001069 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
sewardjb4112022007-11-09 22:49:28 +00001070
sewardj896f6f92008-08-19 08:38:52 +00001071 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
sewardjf98e1c02008-10-25 16:22:41 +00001072 HG_(record_error_Misc)(
1073 thr, "Bug in libpthread: write lock "
1074 "granted on mutex/rwlock which is currently "
1075 "wr-held by a different thread");
sewardjb4112022007-11-09 22:49:28 +00001076 goto error;
1077 }
1078
1079 /* So the lock is already held in w-mode by 'thr'. That means this
1080 is an attempt to lock it recursively, which is only allowable
1081 for LK_mbRec kinded locks. Since this routine is called only
1082 once the lock has been acquired, this must also be a libpthread
1083 bug. */
1084 if (lk->kind != LK_mbRec) {
sewardjf98e1c02008-10-25 16:22:41 +00001085 HG_(record_error_Misc)(
1086 thr, "Bug in libpthread: recursive write lock "
1087 "granted on mutex/wrlock which does not "
1088 "support recursion");
sewardjb4112022007-11-09 22:49:28 +00001089 goto error;
1090 }
1091
1092 /* So we are recursively re-locking a lock we already w-hold. */
1093 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001094 /* acquire a dependency from the lock's VC. Probably pointless,
1095 but also harmless. */
1096 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001097 goto noerror;
1098
1099 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001100 if (HG_(clo_track_lockorders)) {
1101 /* check lock order acquisition graph, and update. This has to
1102 happen before the lock is added to the thread's locksetA/W. */
1103 laog__pre_thread_acquires_lock( thr, lk );
1104 }
sewardjb4112022007-11-09 22:49:28 +00001105 /* update the thread's held-locks set */
1106 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1107 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (Word)lk );
1108 /* fall through */
1109
1110 error:
sewardjf98e1c02008-10-25 16:22:41 +00001111 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001112}
1113
1114
1115/* The lock at 'lock_ga' has acquired a reader. Make all necessary
1116 updates, and also do all possible error checks. */
1117static
1118void evhH__post_thread_r_acquires_lock ( Thread* thr,
1119 LockKind lkk, Addr lock_ga )
1120{
1121 Lock* lk;
1122
1123 /* Basically what we need to do is call lockN_acquire_reader.
1124 However, that will barf if any 'invalid' lock states would
1125 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001126 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001127 routine.
1128
1129 Because this routine is only called after successful lock
1130 acquisition, we should not be asked to move the lock into any
1131 invalid states. Requests to do so are bugs in libpthread, since
1132 that should have rejected any such requests. */
1133
sewardjf98e1c02008-10-25 16:22:41 +00001134 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001135 /* Try to find the lock. If we can't, then create a new one with
1136 kind 'lkk'. Only a reader-writer lock can be read-locked,
1137 hence the first assertion. */
1138 tl_assert(lkk == LK_rdwr);
1139 lk = map_locks_lookup_or_create(
1140 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001141 tl_assert( HG_(is_sane_LockN)(lk) );
1142
1143 /* check libhb level entities exist */
1144 tl_assert(thr->hbthr);
1145 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001146
1147 if (lk->heldBy == NULL) {
1148 /* the lock isn't held. Simple. */
1149 tl_assert(!lk->heldW);
1150 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001151 /* acquire a dependency from the lock's VC */
1152 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001153 goto noerror;
1154 }
1155
1156 /* So the lock is already held. If held as a w-lock then
1157 libpthread must be buggy. */
1158 tl_assert(lk->heldBy);
1159 if (lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001160 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1161 "granted on rwlock which is "
1162 "currently wr-held");
sewardjb4112022007-11-09 22:49:28 +00001163 goto error;
1164 }
1165
1166 /* Easy enough. In short anybody can get a read-lock on a rwlock
1167 provided it is either unlocked or already in rd-held. */
1168 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001169 /* acquire a dependency from the lock's VC. Probably pointless,
1170 but also harmless. */
1171 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001172 goto noerror;
1173
1174 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001175 if (HG_(clo_track_lockorders)) {
1176 /* check lock order acquisition graph, and update. This has to
1177 happen before the lock is added to the thread's locksetA/W. */
1178 laog__pre_thread_acquires_lock( thr, lk );
1179 }
sewardjb4112022007-11-09 22:49:28 +00001180 /* update the thread's held-locks set */
1181 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1182 /* but don't update thr->locksetW, since lk is only rd-held */
1183 /* fall through */
1184
1185 error:
sewardjf98e1c02008-10-25 16:22:41 +00001186 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001187}
1188
1189
1190/* The lock at 'lock_ga' is just about to be unlocked. Make all
1191 necessary updates, and also do all possible error checks. */
1192static
1193void evhH__pre_thread_releases_lock ( Thread* thr,
1194 Addr lock_ga, Bool isRDWR )
1195{
1196 Lock* lock;
1197 Word n;
sewardjf98e1c02008-10-25 16:22:41 +00001198 Bool was_heldW;
sewardjb4112022007-11-09 22:49:28 +00001199
1200 /* This routine is called prior to a lock release, before
1201 libpthread has had a chance to validate the call. Hence we need
1202 to detect and reject any attempts to move the lock into an
1203 invalid state. Such attempts are bugs in the client.
1204
1205 isRDWR is True if we know from the wrapper context that lock_ga
1206 should refer to a reader-writer lock, and is False if [ditto]
1207 lock_ga should refer to a standard mutex. */
1208
sewardjf98e1c02008-10-25 16:22:41 +00001209 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001210 lock = map_locks_maybe_lookup( lock_ga );
1211
1212 if (!lock) {
1213 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1214 the client is trying to unlock it. So complain, then ignore
1215 the attempt. */
sewardjf98e1c02008-10-25 16:22:41 +00001216 HG_(record_error_UnlockBogus)( thr, lock_ga );
sewardjb4112022007-11-09 22:49:28 +00001217 return;
1218 }
1219
1220 tl_assert(lock->guestaddr == lock_ga);
sewardjf98e1c02008-10-25 16:22:41 +00001221 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001222
1223 if (isRDWR && lock->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001224 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1225 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001226 }
1227 if ((!isRDWR) && lock->kind == LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001228 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1229 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001230 }
1231
1232 if (!lock->heldBy) {
1233 /* The lock is not held. This indicates a serious bug in the
1234 client. */
1235 tl_assert(!lock->heldW);
sewardjf98e1c02008-10-25 16:22:41 +00001236 HG_(record_error_UnlockUnlocked)( thr, lock );
sewardjb4112022007-11-09 22:49:28 +00001237 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1238 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1239 goto error;
1240 }
1241
sewardjf98e1c02008-10-25 16:22:41 +00001242 /* test just above dominates */
1243 tl_assert(lock->heldBy);
1244 was_heldW = lock->heldW;
1245
sewardjb4112022007-11-09 22:49:28 +00001246 /* The lock is held. Is this thread one of the holders? If not,
1247 report a bug in the client. */
sewardj896f6f92008-08-19 08:38:52 +00001248 n = VG_(elemBag)( lock->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +00001249 tl_assert(n >= 0);
1250 if (n == 0) {
1251 /* We are not a current holder of the lock. This is a bug in
1252 the guest, and (per POSIX pthread rules) the unlock
1253 attempt will fail. So just complain and do nothing
1254 else. */
sewardj896f6f92008-08-19 08:38:52 +00001255 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001256 tl_assert(HG_(is_sane_Thread)(realOwner));
sewardjb4112022007-11-09 22:49:28 +00001257 tl_assert(realOwner != thr);
1258 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1259 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
sewardjf98e1c02008-10-25 16:22:41 +00001260 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
sewardjb4112022007-11-09 22:49:28 +00001261 goto error;
1262 }
1263
1264 /* Ok, we hold the lock 'n' times. */
1265 tl_assert(n >= 1);
1266
1267 lockN_release( lock, thr );
1268
1269 n--;
1270 tl_assert(n >= 0);
1271
1272 if (n > 0) {
1273 tl_assert(lock->heldBy);
sewardj896f6f92008-08-19 08:38:52 +00001274 tl_assert(n == VG_(elemBag)( lock->heldBy, (Word)thr ));
sewardjb4112022007-11-09 22:49:28 +00001275 /* We still hold the lock. So either it's a recursive lock
1276 or a rwlock which is currently r-held. */
1277 tl_assert(lock->kind == LK_mbRec
1278 || (lock->kind == LK_rdwr && !lock->heldW));
1279 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1280 if (lock->heldW)
1281 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1282 else
1283 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1284 } else {
sewardj983f3022009-05-21 14:49:55 +00001285 /* n is zero. This means we don't hold the lock any more. But
1286 if it's a rwlock held in r-mode, someone else could still
1287 hold it. Just do whatever sanity checks we can. */
1288 if (lock->kind == LK_rdwr && lock->heldBy) {
1289 /* It's a rwlock. We no longer hold it but we used to;
1290 nevertheless it still appears to be held by someone else.
1291 The implication is that, prior to this release, it must
1292 have been shared by us and and whoever else is holding it;
1293 which in turn implies it must be r-held, since a lock
1294 can't be w-held by more than one thread. */
1295 /* The lock is now R-held by somebody else: */
1296 tl_assert(lock->heldW == False);
1297 } else {
1298 /* Normal case. It's either not a rwlock, or it's a rwlock
1299 that we used to hold in w-mode (which is pretty much the
1300 same thing as a non-rwlock.) Since this transaction is
1301 atomic (V does not allow multiple threads to run
1302 simultaneously), it must mean the lock is now not held by
1303 anybody. Hence assert for it. */
1304 /* The lock is now not held by anybody: */
1305 tl_assert(!lock->heldBy);
1306 tl_assert(lock->heldW == False);
1307 }
sewardjf98e1c02008-10-25 16:22:41 +00001308 //if (lock->heldBy) {
1309 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (Word)thr ));
1310 //}
sewardjb4112022007-11-09 22:49:28 +00001311 /* update this thread's lockset accordingly. */
1312 thr->locksetA
1313 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lock );
1314 thr->locksetW
1315 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lock );
sewardjf98e1c02008-10-25 16:22:41 +00001316 /* push our VC into the lock */
1317 tl_assert(thr->hbthr);
1318 tl_assert(lock->hbso);
1319 /* If the lock was previously W-held, then we want to do a
1320 strong send, and if previously R-held, then a weak send. */
1321 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
sewardjb4112022007-11-09 22:49:28 +00001322 }
1323 /* fall through */
1324
1325 error:
sewardjf98e1c02008-10-25 16:22:41 +00001326 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001327}
1328
1329
sewardj9f569b72008-11-13 13:33:09 +00001330/* ---------------------------------------------------------- */
1331/* -------- Event handlers proper (evh__* functions) -------- */
1332/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001333
1334/* What is the Thread* for the currently running thread? This is
1335 absolutely performance critical. We receive notifications from the
1336 core for client code starts/stops, and cache the looked-up result
1337 in 'current_Thread'. Hence, for the vast majority of requests,
1338 finding the current thread reduces to a read of a global variable,
1339 provided get_current_Thread_in_C_C is inlined.
1340
1341 Outside of client code, current_Thread is NULL, and presumably
1342 any uses of it will cause a segfault. Hence:
1343
1344 - for uses definitely within client code, use
1345 get_current_Thread_in_C_C.
1346
1347 - for all other uses, use get_current_Thread.
1348*/
1349
sewardj23f12002009-07-24 08:45:08 +00001350static Thread *current_Thread = NULL,
1351 *current_Thread_prev = NULL;
sewardjb4112022007-11-09 22:49:28 +00001352
1353static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1354 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1355 tl_assert(current_Thread == NULL);
1356 current_Thread = map_threads_lookup( tid );
1357 tl_assert(current_Thread != NULL);
sewardj23f12002009-07-24 08:45:08 +00001358 if (current_Thread != current_Thread_prev) {
1359 libhb_Thr_resumes( current_Thread->hbthr );
1360 current_Thread_prev = current_Thread;
1361 }
sewardjb4112022007-11-09 22:49:28 +00001362}
1363static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1364 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1365 tl_assert(current_Thread != NULL);
1366 current_Thread = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00001367 libhb_maybe_GC();
sewardjb4112022007-11-09 22:49:28 +00001368}
1369static inline Thread* get_current_Thread_in_C_C ( void ) {
1370 return current_Thread;
1371}
1372static inline Thread* get_current_Thread ( void ) {
1373 ThreadId coretid;
1374 Thread* thr;
1375 thr = get_current_Thread_in_C_C();
1376 if (LIKELY(thr))
1377 return thr;
1378 /* evidently not in client code. Do it the slow way. */
1379 coretid = VG_(get_running_tid)();
1380 /* FIXME: get rid of the following kludge. It exists because
sewardjf98e1c02008-10-25 16:22:41 +00001381 evh__new_mem is called during initialisation (as notification
sewardjb4112022007-11-09 22:49:28 +00001382 of initial memory layout) and VG_(get_running_tid)() returns
1383 VG_INVALID_THREADID at that point. */
1384 if (coretid == VG_INVALID_THREADID)
1385 coretid = 1; /* KLUDGE */
1386 thr = map_threads_lookup( coretid );
1387 return thr;
1388}
1389
1390static
1391void evh__new_mem ( Addr a, SizeT len ) {
1392 if (SHOW_EVENTS >= 2)
1393 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1394 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001395 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001396 all__sanity_check("evh__new_mem-post");
1397}
1398
1399static
sewardj1f77fec2010-04-12 19:51:04 +00001400void evh__new_mem_stack ( Addr a, SizeT len ) {
1401 if (SHOW_EVENTS >= 2)
1402 VG_(printf)("evh__new_mem_stack(%p, %lu)\n", (void*)a, len );
1403 shadow_mem_make_New( get_current_Thread(),
1404 -VG_STACK_REDZONE_SZB + a, len );
1405 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1406 all__sanity_check("evh__new_mem_stack-post");
1407}
1408
1409static
sewardj7cf4e6b2008-05-01 20:24:26 +00001410void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1411 if (SHOW_EVENTS >= 2)
1412 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1413 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001414 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardj7cf4e6b2008-05-01 20:24:26 +00001415 all__sanity_check("evh__new_mem_w_tid-post");
1416}
1417
1418static
sewardjb4112022007-11-09 22:49:28 +00001419void evh__new_mem_w_perms ( Addr a, SizeT len,
sewardj9c606bd2008-09-18 18:12:50 +00001420 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
sewardjb4112022007-11-09 22:49:28 +00001421 if (SHOW_EVENTS >= 1)
1422 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1423 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1424 if (rr || ww || xx)
1425 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001426 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001427 all__sanity_check("evh__new_mem_w_perms-post");
1428}
1429
1430static
1431void evh__set_perms ( Addr a, SizeT len,
1432 Bool rr, Bool ww, Bool xx ) {
sewardjfd35d492011-03-17 19:39:55 +00001433 // This handles mprotect requests. If the memory is being put
1434 // into no-R no-W state, paint it as NoAccess, for the reasons
1435 // documented at evh__die_mem_munmap().
sewardjb4112022007-11-09 22:49:28 +00001436 if (SHOW_EVENTS >= 1)
sewardjfd35d492011-03-17 19:39:55 +00001437 VG_(printf)("evh__set_perms(%p, %lu, r=%d w=%d x=%d)\n",
sewardjb4112022007-11-09 22:49:28 +00001438 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1439 /* Hmm. What should we do here, that actually makes any sense?
1440 Let's say: if neither readable nor writable, then declare it
1441 NoAccess, else leave it alone. */
1442 if (!(rr || ww))
sewardjfd35d492011-03-17 19:39:55 +00001443 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001444 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001445 all__sanity_check("evh__set_perms-post");
1446}
1447
1448static
1449void evh__die_mem ( Addr a, SizeT len ) {
sewardjfd35d492011-03-17 19:39:55 +00001450 // Urr, libhb ignores this.
sewardjb4112022007-11-09 22:49:28 +00001451 if (SHOW_EVENTS >= 2)
1452 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
sewardjfd35d492011-03-17 19:39:55 +00001453 shadow_mem_make_NoAccess_NoFX( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001454 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001455 all__sanity_check("evh__die_mem-post");
1456}
1457
1458static
sewardjfd35d492011-03-17 19:39:55 +00001459void evh__die_mem_munmap ( Addr a, SizeT len ) {
1460 // It's important that libhb doesn't ignore this. If, as is likely,
1461 // the client is subject to address space layout randomization,
1462 // then unmapped areas may never get remapped over, even in long
1463 // runs. If we just ignore them we wind up with large resource
1464 // (VTS) leaks in libhb. So force them to NoAccess, so that all
1465 // VTS references in the affected area are dropped. Marking memory
1466 // as NoAccess is expensive, but we assume that munmap is sufficiently
1467 // rare that the space gains of doing this are worth the costs.
1468 if (SHOW_EVENTS >= 2)
1469 VG_(printf)("evh__die_mem_munmap(%p, %lu)\n", (void*)a, len );
1470 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
1471}
1472
1473static
sewardj406bac82010-03-03 23:03:40 +00001474void evh__untrack_mem ( Addr a, SizeT len ) {
sewardjfd35d492011-03-17 19:39:55 +00001475 // Libhb doesn't ignore this.
sewardj406bac82010-03-03 23:03:40 +00001476 if (SHOW_EVENTS >= 2)
1477 VG_(printf)("evh__untrack_mem(%p, %lu)\n", (void*)a, len );
1478 shadow_mem_make_Untracked( get_current_Thread(), a, len );
1479 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1480 all__sanity_check("evh__untrack_mem-post");
1481}
1482
1483static
sewardj23f12002009-07-24 08:45:08 +00001484void evh__copy_mem ( Addr src, Addr dst, SizeT len ) {
1485 if (SHOW_EVENTS >= 2)
1486 VG_(printf)("evh__copy_mem(%p, %p, %lu)\n", (void*)src, (void*)dst, len );
1487 shadow_mem_scopy_range( get_current_Thread(), src, dst, len );
1488 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1489 all__sanity_check("evh__copy_mem-post");
1490}
1491
1492static
sewardjb4112022007-11-09 22:49:28 +00001493void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1494{
1495 if (SHOW_EVENTS >= 1)
1496 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1497 (Int)parent, (Int)child );
1498
1499 if (parent != VG_INVALID_THREADID) {
sewardjf98e1c02008-10-25 16:22:41 +00001500 Thread* thr_p;
1501 Thread* thr_c;
1502 Thr* hbthr_p;
1503 Thr* hbthr_c;
sewardjb4112022007-11-09 22:49:28 +00001504
sewardjf98e1c02008-10-25 16:22:41 +00001505 tl_assert(HG_(is_sane_ThreadId)(parent));
1506 tl_assert(HG_(is_sane_ThreadId)(child));
sewardjb4112022007-11-09 22:49:28 +00001507 tl_assert(parent != child);
1508
1509 thr_p = map_threads_maybe_lookup( parent );
1510 thr_c = map_threads_maybe_lookup( child );
1511
1512 tl_assert(thr_p != NULL);
1513 tl_assert(thr_c == NULL);
1514
sewardjf98e1c02008-10-25 16:22:41 +00001515 hbthr_p = thr_p->hbthr;
1516 tl_assert(hbthr_p != NULL);
sewardj60626642011-03-10 15:14:37 +00001517 tl_assert( libhb_get_Thr_hgthread(hbthr_p) == thr_p );
sewardjb4112022007-11-09 22:49:28 +00001518
sewardjf98e1c02008-10-25 16:22:41 +00001519 hbthr_c = libhb_create ( hbthr_p );
1520
1521 /* Create a new thread record for the child. */
sewardjb4112022007-11-09 22:49:28 +00001522 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +00001523 thr_c = mk_Thread( hbthr_c );
sewardj60626642011-03-10 15:14:37 +00001524 tl_assert( libhb_get_Thr_hgthread(hbthr_c) == NULL );
1525 libhb_set_Thr_hgthread(hbthr_c, thr_c);
sewardjb4112022007-11-09 22:49:28 +00001526
1527 /* and bind it in the thread-map table */
1528 map_threads[child] = thr_c;
sewardjf98e1c02008-10-25 16:22:41 +00001529 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1530 thr_c->coretid = child;
sewardjb4112022007-11-09 22:49:28 +00001531
1532 /* Record where the parent is so we can later refer to this in
1533 error messages.
1534
1535 On amd64-linux, this entails a nasty glibc-2.5 specific hack.
1536 The stack snapshot is taken immediately after the parent has
1537 returned from its sys_clone call. Unfortunately there is no
1538 unwind info for the insn following "syscall" - reading the
1539 glibc sources confirms this. So we ask for a snapshot to be
1540 taken as if RIP was 3 bytes earlier, in a place where there
1541 is unwind info. Sigh.
1542 */
1543 { Word first_ip_delta = 0;
1544# if defined(VGP_amd64_linux)
1545 first_ip_delta = -3;
1546# endif
1547 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1548 }
sewardjb4112022007-11-09 22:49:28 +00001549 }
1550
sewardjf98e1c02008-10-25 16:22:41 +00001551 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001552 all__sanity_check("evh__pre_thread_create-post");
1553}
1554
1555static
1556void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1557{
1558 Int nHeld;
1559 Thread* thr_q;
1560 if (SHOW_EVENTS >= 1)
1561 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1562 (Int)quit_tid );
1563
1564 /* quit_tid has disappeared without joining to any other thread.
1565 Therefore there is no synchronisation event associated with its
1566 exit and so we have to pretty much treat it as if it was still
1567 alive but mysteriously making no progress. That is because, if
1568 we don't know when it really exited, then we can never say there
1569 is a point in time when we're sure the thread really has
1570 finished, and so we need to consider the possibility that it
1571 lingers indefinitely and continues to interact with other
1572 threads. */
1573 /* However, it might have rendezvous'd with a thread that called
1574 pthread_join with this one as arg, prior to this point (that's
1575 how NPTL works). In which case there has already been a prior
1576 sync event. So in any case, just let the thread exit. On NPTL,
1577 all thread exits go through here. */
sewardjf98e1c02008-10-25 16:22:41 +00001578 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
sewardjb4112022007-11-09 22:49:28 +00001579 thr_q = map_threads_maybe_lookup( quit_tid );
1580 tl_assert(thr_q != NULL);
1581
1582 /* Complain if this thread holds any locks. */
1583 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1584 tl_assert(nHeld >= 0);
1585 if (nHeld > 0) {
1586 HChar buf[80];
1587 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1588 nHeld, nHeld > 1 ? "s" : "");
sewardjf98e1c02008-10-25 16:22:41 +00001589 HG_(record_error_Misc)( thr_q, buf );
sewardjb4112022007-11-09 22:49:28 +00001590 }
1591
sewardj23f12002009-07-24 08:45:08 +00001592 /* Not much to do here:
1593 - tell libhb the thread is gone
1594 - clear the map_threads entry, in order that the Valgrind core
1595 can re-use it. */
sewardj61bc2c52011-02-09 10:34:00 +00001596 /* Cleanup actions (next 5 lines) copied in evh__atfork_child; keep
1597 in sync. */
sewardj23f12002009-07-24 08:45:08 +00001598 tl_assert(thr_q->hbthr);
1599 libhb_async_exit(thr_q->hbthr);
sewardjf98e1c02008-10-25 16:22:41 +00001600 tl_assert(thr_q->coretid == quit_tid);
1601 thr_q->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +00001602 map_threads_delete( quit_tid );
1603
sewardjf98e1c02008-10-25 16:22:41 +00001604 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001605 all__sanity_check("evh__pre_thread_ll_exit-post");
1606}
1607
sewardj61bc2c52011-02-09 10:34:00 +00001608/* This is called immediately after fork, for the child only. 'tid'
1609 is the only surviving thread (as per POSIX rules on fork() in
1610 threaded programs), so we have to clean up map_threads to remove
1611 entries for any other threads. */
1612static
1613void evh__atfork_child ( ThreadId tid )
1614{
1615 UInt i;
1616 Thread* thr;
1617 /* Slot 0 should never be used. */
1618 thr = map_threads_maybe_lookup( 0/*INVALID*/ );
1619 tl_assert(!thr);
1620 /* Clean up all other slots except 'tid'. */
1621 for (i = 1; i < VG_N_THREADS; i++) {
1622 if (i == tid)
1623 continue;
1624 thr = map_threads_maybe_lookup(i);
1625 if (!thr)
1626 continue;
1627 /* Cleanup actions (next 5 lines) copied from end of
1628 evh__pre_thread_ll_exit; keep in sync. */
1629 tl_assert(thr->hbthr);
1630 libhb_async_exit(thr->hbthr);
1631 tl_assert(thr->coretid == i);
1632 thr->coretid = VG_INVALID_THREADID;
1633 map_threads_delete(i);
1634 }
1635}
1636
sewardjf98e1c02008-10-25 16:22:41 +00001637
sewardjb4112022007-11-09 22:49:28 +00001638static
1639void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1640{
sewardjb4112022007-11-09 22:49:28 +00001641 Thread* thr_s;
1642 Thread* thr_q;
sewardjf98e1c02008-10-25 16:22:41 +00001643 Thr* hbthr_s;
1644 Thr* hbthr_q;
1645 SO* so;
sewardjb4112022007-11-09 22:49:28 +00001646
1647 if (SHOW_EVENTS >= 1)
1648 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1649 (Int)stay_tid, quit_thr );
1650
sewardjf98e1c02008-10-25 16:22:41 +00001651 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
sewardjb4112022007-11-09 22:49:28 +00001652
1653 thr_s = map_threads_maybe_lookup( stay_tid );
1654 thr_q = quit_thr;
1655 tl_assert(thr_s != NULL);
1656 tl_assert(thr_q != NULL);
1657 tl_assert(thr_s != thr_q);
1658
sewardjf98e1c02008-10-25 16:22:41 +00001659 hbthr_s = thr_s->hbthr;
1660 hbthr_q = thr_q->hbthr;
1661 tl_assert(hbthr_s != hbthr_q);
sewardj60626642011-03-10 15:14:37 +00001662 tl_assert( libhb_get_Thr_hgthread(hbthr_s) == thr_s );
1663 tl_assert( libhb_get_Thr_hgthread(hbthr_q) == thr_q );
sewardjb4112022007-11-09 22:49:28 +00001664
sewardjf98e1c02008-10-25 16:22:41 +00001665 /* Allocate a temporary synchronisation object and use it to send
1666 an imaginary message from the quitter to the stayer, the purpose
1667 being to generate a dependence from the quitter to the
1668 stayer. */
1669 so = libhb_so_alloc();
1670 tl_assert(so);
sewardj23f12002009-07-24 08:45:08 +00001671 /* Send last arg of _so_send as False, since the sending thread
1672 doesn't actually exist any more, so we don't want _so_send to
1673 try taking stack snapshots of it. */
sewardjffce8152011-06-24 10:09:41 +00001674 libhb_so_send(hbthr_q, so, True/*strong_send*//*?!? wrt comment above*/);
sewardjf98e1c02008-10-25 16:22:41 +00001675 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1676 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00001677
sewardjffce8152011-06-24 10:09:41 +00001678 /* Tell libhb that the quitter has been reaped. Note that we might
1679 have to be cleverer about this, to exclude 2nd and subsequent
1680 notifications for the same hbthr_q, in the case where the app is
1681 buggy (calls pthread_join twice or more on the same thread) AND
1682 where libpthread is also buggy and doesn't return ESRCH on
1683 subsequent calls. (If libpthread isn't thusly buggy, then the
1684 wrapper for pthread_join in hg_intercepts.c will stop us getting
1685 notified here multiple times for the same joinee.) See also
1686 comments in helgrind/tests/jointwice.c. */
1687 libhb_joinedwith_done(hbthr_q);
1688
sewardjf98e1c02008-10-25 16:22:41 +00001689 /* evh__pre_thread_ll_exit issues an error message if the exiting
1690 thread holds any locks. No need to check here. */
sewardjb4112022007-11-09 22:49:28 +00001691
1692 /* This holds because, at least when using NPTL as the thread
1693 library, we should be notified the low level thread exit before
1694 we hear of any join event on it. The low level exit
1695 notification feeds through into evh__pre_thread_ll_exit,
1696 which should clear the map_threads entry for it. Hence we
1697 expect there to be no map_threads entry at this point. */
1698 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1699 == VG_INVALID_THREADID);
1700
sewardjf98e1c02008-10-25 16:22:41 +00001701 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001702 all__sanity_check("evh__post_thread_join-post");
1703}
1704
1705static
1706void evh__pre_mem_read ( CorePart part, ThreadId tid, Char* s,
1707 Addr a, SizeT size) {
1708 if (SHOW_EVENTS >= 2
1709 || (SHOW_EVENTS >= 1 && size != 1))
1710 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1711 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001712 shadow_mem_cread_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001713 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001714 all__sanity_check("evh__pre_mem_read-post");
1715}
1716
1717static
1718void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
1719 Char* s, Addr a ) {
1720 Int len;
1721 if (SHOW_EVENTS >= 1)
1722 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1723 (Int)tid, s, (void*)a );
sewardj234e5582011-02-09 12:47:23 +00001724 // Don't segfault if the string starts in an obviously stupid
1725 // place. Actually we should check the whole string, not just
1726 // the start address, but that's too much trouble. At least
1727 // checking the first byte is better than nothing. See #255009.
1728 if (!VG_(am_is_valid_for_client) (a, 1, VKI_PROT_READ))
1729 return;
sewardjb4112022007-11-09 22:49:28 +00001730 len = VG_(strlen)( (Char*) a );
sewardj23f12002009-07-24 08:45:08 +00001731 shadow_mem_cread_range( map_threads_lookup(tid), a, len+1 );
sewardjf98e1c02008-10-25 16:22:41 +00001732 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001733 all__sanity_check("evh__pre_mem_read_asciiz-post");
1734}
1735
1736static
1737void evh__pre_mem_write ( CorePart part, ThreadId tid, Char* s,
1738 Addr a, SizeT size ) {
1739 if (SHOW_EVENTS >= 1)
1740 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1741 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001742 shadow_mem_cwrite_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001743 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001744 all__sanity_check("evh__pre_mem_write-post");
1745}
1746
1747static
1748void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1749 if (SHOW_EVENTS >= 1)
1750 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1751 (void*)a, len, (Int)is_inited );
1752 // FIXME: this is kinda stupid
1753 if (is_inited) {
1754 shadow_mem_make_New(get_current_Thread(), a, len);
1755 } else {
1756 shadow_mem_make_New(get_current_Thread(), a, len);
1757 }
sewardjf98e1c02008-10-25 16:22:41 +00001758 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001759 all__sanity_check("evh__pre_mem_read-post");
1760}
1761
1762static
1763void evh__die_mem_heap ( Addr a, SizeT len ) {
sewardj622fe492011-03-11 21:06:59 +00001764 Thread* thr;
sewardjb4112022007-11-09 22:49:28 +00001765 if (SHOW_EVENTS >= 1)
1766 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
sewardj622fe492011-03-11 21:06:59 +00001767 thr = get_current_Thread();
1768 tl_assert(thr);
1769 if (HG_(clo_free_is_write)) {
1770 /* Treat frees as if the memory was written immediately prior to
1771 the free. This shakes out more races, specifically, cases
1772 where memory is referenced by one thread, and freed by
1773 another, and there's no observable synchronisation event to
1774 guarantee that the reference happens before the free. */
1775 shadow_mem_cwrite_range(thr, a, len);
1776 }
sewardjfd35d492011-03-17 19:39:55 +00001777 shadow_mem_make_NoAccess_NoFX( thr, a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001778 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001779 all__sanity_check("evh__pre_mem_read-post");
1780}
1781
sewardj23f12002009-07-24 08:45:08 +00001782/* --- Event handlers called from generated code --- */
1783
sewardjb4112022007-11-09 22:49:28 +00001784static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001785void evh__mem_help_cread_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001786 Thread* thr = get_current_Thread_in_C_C();
1787 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001788 LIBHB_CREAD_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001789}
sewardjf98e1c02008-10-25 16:22:41 +00001790
sewardjb4112022007-11-09 22:49:28 +00001791static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001792void evh__mem_help_cread_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001793 Thread* thr = get_current_Thread_in_C_C();
1794 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001795 LIBHB_CREAD_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001796}
sewardjf98e1c02008-10-25 16:22:41 +00001797
sewardjb4112022007-11-09 22:49:28 +00001798static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001799void evh__mem_help_cread_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001800 Thread* thr = get_current_Thread_in_C_C();
1801 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001802 LIBHB_CREAD_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001803}
sewardjf98e1c02008-10-25 16:22:41 +00001804
sewardjb4112022007-11-09 22:49:28 +00001805static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001806void evh__mem_help_cread_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001807 Thread* thr = get_current_Thread_in_C_C();
1808 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001809 LIBHB_CREAD_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001810}
sewardjf98e1c02008-10-25 16:22:41 +00001811
sewardjb4112022007-11-09 22:49:28 +00001812static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001813void evh__mem_help_cread_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001814 Thread* thr = get_current_Thread_in_C_C();
1815 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001816 LIBHB_CREAD_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001817}
1818
1819static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001820void evh__mem_help_cwrite_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001821 Thread* thr = get_current_Thread_in_C_C();
1822 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001823 LIBHB_CWRITE_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001824}
sewardjf98e1c02008-10-25 16:22:41 +00001825
sewardjb4112022007-11-09 22:49:28 +00001826static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001827void evh__mem_help_cwrite_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001828 Thread* thr = get_current_Thread_in_C_C();
1829 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001830 LIBHB_CWRITE_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001831}
sewardjf98e1c02008-10-25 16:22:41 +00001832
sewardjb4112022007-11-09 22:49:28 +00001833static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001834void evh__mem_help_cwrite_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001835 Thread* thr = get_current_Thread_in_C_C();
1836 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001837 LIBHB_CWRITE_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001838}
sewardjf98e1c02008-10-25 16:22:41 +00001839
sewardjb4112022007-11-09 22:49:28 +00001840static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001841void evh__mem_help_cwrite_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001842 Thread* thr = get_current_Thread_in_C_C();
1843 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001844 LIBHB_CWRITE_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001845}
sewardjf98e1c02008-10-25 16:22:41 +00001846
sewardjb4112022007-11-09 22:49:28 +00001847static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001848void evh__mem_help_cwrite_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001849 Thread* thr = get_current_Thread_in_C_C();
1850 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001851 LIBHB_CWRITE_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001852}
1853
sewardjb4112022007-11-09 22:49:28 +00001854
sewardj9f569b72008-11-13 13:33:09 +00001855/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001856/* -------------- events to do with mutexes -------------- */
sewardj9f569b72008-11-13 13:33:09 +00001857/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001858
1859/* EXPOSITION only: by intercepting lock init events we can show the
1860 user where the lock was initialised, rather than only being able to
1861 show where it was first locked. Intercepting lock initialisations
1862 is not necessary for the basic operation of the race checker. */
1863static
1864void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
1865 void* mutex, Word mbRec )
1866{
1867 if (SHOW_EVENTS >= 1)
1868 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
1869 (Int)tid, mbRec, (void*)mutex );
1870 tl_assert(mbRec == 0 || mbRec == 1);
1871 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
1872 (Addr)mutex, tid );
sewardjf98e1c02008-10-25 16:22:41 +00001873 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001874 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
1875}
1876
1877static
1878void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex )
1879{
1880 Thread* thr;
1881 Lock* lk;
1882 if (SHOW_EVENTS >= 1)
1883 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE(ctid=%d, %p)\n",
1884 (Int)tid, (void*)mutex );
1885
1886 thr = map_threads_maybe_lookup( tid );
1887 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00001888 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00001889
1890 lk = map_locks_maybe_lookup( (Addr)mutex );
1891
1892 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
sewardjf98e1c02008-10-25 16:22:41 +00001893 HG_(record_error_Misc)(
1894 thr, "pthread_mutex_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00001895 }
1896
1897 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00001898 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00001899 tl_assert( lk->guestaddr == (Addr)mutex );
1900 if (lk->heldBy) {
1901 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00001902 HG_(record_error_Misc)(
1903 thr, "pthread_mutex_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00001904 /* remove lock from locksets of all owning threads */
1905 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00001906 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00001907 lk->heldBy = NULL;
1908 lk->heldW = False;
1909 lk->acquired_at = NULL;
1910 }
1911 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001912 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00001913
1914 if (HG_(clo_track_lockorders))
1915 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00001916 map_locks_delete( lk->guestaddr );
1917 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00001918 }
1919
sewardjf98e1c02008-10-25 16:22:41 +00001920 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001921 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
1922}
1923
1924static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
1925 void* mutex, Word isTryLock )
1926{
1927 /* Just check the mutex is sane; nothing else to do. */
1928 // 'mutex' may be invalid - not checked by wrapper
1929 Thread* thr;
1930 Lock* lk;
1931 if (SHOW_EVENTS >= 1)
1932 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
1933 (Int)tid, (void*)mutex );
1934
1935 tl_assert(isTryLock == 0 || isTryLock == 1);
1936 thr = map_threads_maybe_lookup( tid );
1937 tl_assert(thr); /* cannot fail - Thread* must already exist */
1938
1939 lk = map_locks_maybe_lookup( (Addr)mutex );
1940
1941 if (lk && (lk->kind == LK_rdwr)) {
sewardjf98e1c02008-10-25 16:22:41 +00001942 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
1943 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001944 }
1945
1946 if ( lk
1947 && isTryLock == 0
1948 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
1949 && lk->heldBy
1950 && lk->heldW
sewardj896f6f92008-08-19 08:38:52 +00001951 && VG_(elemBag)( lk->heldBy, (Word)thr ) > 0 ) {
sewardjb4112022007-11-09 22:49:28 +00001952 /* uh, it's a non-recursive lock and we already w-hold it, and
1953 this is a real lock operation (not a speculative "tryLock"
1954 kind of thing). Duh. Deadlock coming up; but at least
1955 produce an error message. */
sewardj8fef6252010-07-29 05:28:02 +00001956 HChar* errstr = "Attempt to re-lock a "
1957 "non-recursive lock I already hold";
1958 HChar* auxstr = "Lock was previously acquired";
1959 if (lk->acquired_at) {
1960 HG_(record_error_Misc_w_aux)( thr, errstr, auxstr, lk->acquired_at );
1961 } else {
1962 HG_(record_error_Misc)( thr, errstr );
1963 }
sewardjb4112022007-11-09 22:49:28 +00001964 }
1965}
1966
1967static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
1968{
1969 // only called if the real library call succeeded - so mutex is sane
1970 Thread* thr;
1971 if (SHOW_EVENTS >= 1)
1972 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
1973 (Int)tid, (void*)mutex );
1974
1975 thr = map_threads_maybe_lookup( tid );
1976 tl_assert(thr); /* cannot fail - Thread* must already exist */
1977
1978 evhH__post_thread_w_acquires_lock(
1979 thr,
1980 LK_mbRec, /* if not known, create new lock with this LockKind */
1981 (Addr)mutex
1982 );
1983}
1984
1985static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
1986{
1987 // 'mutex' may be invalid - not checked by wrapper
1988 Thread* thr;
1989 if (SHOW_EVENTS >= 1)
1990 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
1991 (Int)tid, (void*)mutex );
1992
1993 thr = map_threads_maybe_lookup( tid );
1994 tl_assert(thr); /* cannot fail - Thread* must already exist */
1995
1996 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
1997}
1998
1999static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
2000{
2001 // only called if the real library call succeeded - so mutex is sane
2002 Thread* thr;
2003 if (SHOW_EVENTS >= 1)
2004 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
2005 (Int)tid, (void*)mutex );
2006 thr = map_threads_maybe_lookup( tid );
2007 tl_assert(thr); /* cannot fail - Thread* must already exist */
2008
2009 // anything we should do here?
2010}
2011
2012
sewardj5a644da2009-08-11 10:35:58 +00002013/* ------------------------------------------------------- */
sewardj1f77fec2010-04-12 19:51:04 +00002014/* -------------- events to do with spinlocks ------------ */
sewardj5a644da2009-08-11 10:35:58 +00002015/* ------------------------------------------------------- */
2016
2017/* All a bit of a kludge. Pretend we're really dealing with ordinary
2018 pthread_mutex_t's instead, for the most part. */
2019
2020static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid,
2021 void* slock )
2022{
2023 Thread* thr;
2024 Lock* lk;
2025 /* In glibc's kludgey world, we're either initialising or unlocking
2026 it. Since this is the pre-routine, if it is locked, unlock it
2027 and take a dependence edge. Otherwise, do nothing. */
2028
2029 if (SHOW_EVENTS >= 1)
2030 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
2031 "(ctid=%d, slock=%p)\n",
2032 (Int)tid, (void*)slock );
2033
2034 thr = map_threads_maybe_lookup( tid );
2035 /* cannot fail - Thread* must already exist */;
2036 tl_assert( HG_(is_sane_Thread)(thr) );
2037
2038 lk = map_locks_maybe_lookup( (Addr)slock );
2039 if (lk && lk->heldBy) {
2040 /* it's held. So do the normal pre-unlock actions, as copied
2041 from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE. This stupidly
2042 duplicates the map_locks_maybe_lookup. */
2043 evhH__pre_thread_releases_lock( thr, (Addr)slock,
2044 False/*!isRDWR*/ );
2045 }
2046}
2047
2048static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid,
2049 void* slock )
2050{
2051 Lock* lk;
2052 /* More kludgery. If the lock has never been seen before, do
2053 actions as per evh__HG_PTHREAD_MUTEX_INIT_POST. Else do
2054 nothing. */
2055
2056 if (SHOW_EVENTS >= 1)
2057 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
2058 "(ctid=%d, slock=%p)\n",
2059 (Int)tid, (void*)slock );
2060
2061 lk = map_locks_maybe_lookup( (Addr)slock );
2062 if (!lk) {
2063 map_locks_lookup_or_create( LK_nonRec, (Addr)slock, tid );
2064 }
2065}
2066
2067static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid,
2068 void* slock, Word isTryLock )
2069{
2070 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, slock, isTryLock );
2071}
2072
2073static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid,
2074 void* slock )
2075{
2076 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, slock );
2077}
2078
2079static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid,
2080 void* slock )
2081{
2082 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, slock );
2083}
2084
2085
sewardj9f569b72008-11-13 13:33:09 +00002086/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002087/* --------------- events to do with CVs --------------- */
sewardj9f569b72008-11-13 13:33:09 +00002088/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002089
sewardj02114542009-07-28 20:52:36 +00002090/* A mapping from CV to (the SO associated with it, plus some
2091 auxiliary data for error checking). When the CV is
sewardjf98e1c02008-10-25 16:22:41 +00002092 signalled/broadcasted upon, we do a 'send' into the SO, and when a
2093 wait on it completes, we do a 'recv' from the SO. This is believed
2094 to give the correct happens-before events arising from CV
sewardjb4112022007-11-09 22:49:28 +00002095 signallings/broadcasts.
2096*/
2097
sewardj02114542009-07-28 20:52:36 +00002098/* .so is the SO for this CV.
2099 .mx_ga is the associated mutex, when .nWaiters > 0
sewardjb4112022007-11-09 22:49:28 +00002100
sewardj02114542009-07-28 20:52:36 +00002101 POSIX says effectively that the first pthread_cond_{timed}wait call
2102 causes a dynamic binding between the CV and the mutex, and that
2103 lasts until such time as the waiter count falls to zero. Hence
2104 need to keep track of the number of waiters in order to do
2105 consistency tracking. */
2106typedef
2107 struct {
2108 SO* so; /* libhb-allocated SO */
2109 void* mx_ga; /* addr of associated mutex, if any */
2110 UWord nWaiters; /* # threads waiting on the CV */
2111 }
2112 CVInfo;
2113
2114
2115/* pthread_cond_t* -> CVInfo* */
2116static WordFM* map_cond_to_CVInfo = NULL;
2117
2118static void map_cond_to_CVInfo_INIT ( void ) {
2119 if (UNLIKELY(map_cond_to_CVInfo == NULL)) {
2120 map_cond_to_CVInfo = VG_(newFM)( HG_(zalloc),
2121 "hg.mctCI.1", HG_(free), NULL );
2122 tl_assert(map_cond_to_CVInfo != NULL);
sewardjf98e1c02008-10-25 16:22:41 +00002123 }
2124}
2125
sewardj02114542009-07-28 20:52:36 +00002126static CVInfo* map_cond_to_CVInfo_lookup_or_alloc ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002127 UWord key, val;
sewardj02114542009-07-28 20:52:36 +00002128 map_cond_to_CVInfo_INIT();
2129 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
sewardjf98e1c02008-10-25 16:22:41 +00002130 tl_assert(key == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002131 return (CVInfo*)val;
sewardjf98e1c02008-10-25 16:22:41 +00002132 } else {
sewardj02114542009-07-28 20:52:36 +00002133 SO* so = libhb_so_alloc();
2134 CVInfo* cvi = HG_(zalloc)("hg.mctCloa.1", sizeof(CVInfo));
2135 cvi->so = so;
2136 cvi->mx_ga = 0;
2137 VG_(addToFM)( map_cond_to_CVInfo, (UWord)cond, (UWord)cvi );
2138 return cvi;
sewardjf98e1c02008-10-25 16:22:41 +00002139 }
2140}
2141
sewardj02114542009-07-28 20:52:36 +00002142static void map_cond_to_CVInfo_delete ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002143 UWord keyW, valW;
sewardj02114542009-07-28 20:52:36 +00002144 map_cond_to_CVInfo_INIT();
2145 if (VG_(delFromFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond )) {
2146 CVInfo* cvi = (CVInfo*)valW;
sewardjf98e1c02008-10-25 16:22:41 +00002147 tl_assert(keyW == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002148 tl_assert(cvi);
2149 tl_assert(cvi->so);
2150 libhb_so_dealloc(cvi->so);
2151 cvi->mx_ga = 0;
2152 HG_(free)(cvi);
sewardjb4112022007-11-09 22:49:28 +00002153 }
2154}
2155
2156static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2157{
sewardjf98e1c02008-10-25 16:22:41 +00002158 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2159 cond to a SO if it is not already so bound, and 'send' on the
2160 SO. This is later used by other thread(s) which successfully
2161 exit from a pthread_cond_wait on the same cv; then they 'recv'
2162 from the SO, thereby acquiring a dependency on this signalling
2163 event. */
sewardjb4112022007-11-09 22:49:28 +00002164 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002165 CVInfo* cvi;
2166 //Lock* lk;
sewardjb4112022007-11-09 22:49:28 +00002167
2168 if (SHOW_EVENTS >= 1)
2169 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2170 (Int)tid, (void*)cond );
2171
sewardjb4112022007-11-09 22:49:28 +00002172 thr = map_threads_maybe_lookup( tid );
2173 tl_assert(thr); /* cannot fail - Thread* must already exist */
2174
sewardj02114542009-07-28 20:52:36 +00002175 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2176 tl_assert(cvi);
2177 tl_assert(cvi->so);
2178
sewardjb4112022007-11-09 22:49:28 +00002179 // error-if: mutex is bogus
2180 // error-if: mutex is not locked
sewardj02114542009-07-28 20:52:36 +00002181 // Hmm. POSIX doesn't actually say that it's an error to call
2182 // pthread_cond_signal with the associated mutex being unlocked.
2183 // Although it does say that it should be "if consistent scheduling
sewardjffce8152011-06-24 10:09:41 +00002184 // is desired." For that reason, print "dubious" if the lock isn't
2185 // held by any thread. Skip the "dubious" if it is held by some
2186 // other thread; that sounds straight-out wrong.
sewardj02114542009-07-28 20:52:36 +00002187 //
sewardjffce8152011-06-24 10:09:41 +00002188 // Anybody who writes code that signals on a CV without holding
2189 // the associated MX needs to be shipped off to a lunatic asylum
2190 // ASAP, even though POSIX doesn't actually declare such behaviour
2191 // illegal -- it makes code extremely difficult to understand/
2192 // reason about. In particular it puts the signalling thread in
2193 // a situation where it is racing against the released waiter
2194 // as soon as the signalling is done, and so there needs to be
2195 // some auxiliary synchronisation mechanism in the program that
2196 // makes this safe -- or the race(s) need to be harmless, or
2197 // probably nonexistent.
2198 //
2199 if (1) {
2200 Lock* lk = NULL;
2201 if (cvi->mx_ga != 0) {
2202 lk = map_locks_maybe_lookup( (Addr)cvi->mx_ga );
2203 }
2204 /* note: lk could be NULL. Be careful. */
2205 if (lk) {
2206 if (lk->kind == LK_rdwr) {
2207 HG_(record_error_Misc)(thr,
2208 "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2209 }
2210 if (lk->heldBy == NULL) {
2211 HG_(record_error_Misc)(thr,
2212 "pthread_cond_{signal,broadcast}: dubious: "
2213 "associated lock is not held by any thread");
2214 }
2215 if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (Word)thr)) {
2216 HG_(record_error_Misc)(thr,
2217 "pthread_cond_{signal,broadcast}: "
2218 "associated lock is not held by calling thread");
2219 }
2220 } else {
2221 /* Couldn't even find the damn thing. */
2222 // But actually .. that's not necessarily an error. We don't
2223 // know the (CV,MX) binding until a pthread_cond_wait or bcast
2224 // shows us what it is, and if that may not have happened yet.
2225 // So just keep quiet in this circumstance.
2226 //HG_(record_error_Misc)( thr,
2227 // "pthread_cond_{signal,broadcast}: "
2228 // "no or invalid mutex associated with cond");
2229 }
2230 }
sewardjb4112022007-11-09 22:49:28 +00002231
sewardj02114542009-07-28 20:52:36 +00002232 libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
sewardjb4112022007-11-09 22:49:28 +00002233}
2234
2235/* returns True if it reckons 'mutex' is valid and held by this
2236 thread, else False */
2237static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2238 void* cond, void* mutex )
2239{
2240 Thread* thr;
2241 Lock* lk;
2242 Bool lk_valid = True;
sewardj02114542009-07-28 20:52:36 +00002243 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002244
2245 if (SHOW_EVENTS >= 1)
2246 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2247 "(ctid=%d, cond=%p, mutex=%p)\n",
2248 (Int)tid, (void*)cond, (void*)mutex );
2249
sewardjb4112022007-11-09 22:49:28 +00002250 thr = map_threads_maybe_lookup( tid );
2251 tl_assert(thr); /* cannot fail - Thread* must already exist */
2252
2253 lk = map_locks_maybe_lookup( (Addr)mutex );
2254
2255 /* Check for stupid mutex arguments. There are various ways to be
2256 a bozo. Only complain once, though, even if more than one thing
2257 is wrong. */
2258 if (lk == NULL) {
2259 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002260 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002261 thr,
2262 "pthread_cond_{timed}wait called with invalid mutex" );
2263 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002264 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002265 if (lk->kind == LK_rdwr) {
2266 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002267 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002268 thr, "pthread_cond_{timed}wait called with mutex "
2269 "of type pthread_rwlock_t*" );
2270 } else
2271 if (lk->heldBy == NULL) {
2272 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002273 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002274 thr, "pthread_cond_{timed}wait called with un-held mutex");
2275 } else
2276 if (lk->heldBy != NULL
sewardj896f6f92008-08-19 08:38:52 +00002277 && VG_(elemBag)( lk->heldBy, (Word)thr ) == 0) {
sewardjb4112022007-11-09 22:49:28 +00002278 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002279 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002280 thr, "pthread_cond_{timed}wait called with mutex "
2281 "held by a different thread" );
2282 }
2283 }
2284
2285 // error-if: cond is also associated with a different mutex
sewardj02114542009-07-28 20:52:36 +00002286 cvi = map_cond_to_CVInfo_lookup_or_alloc(cond);
2287 tl_assert(cvi);
2288 tl_assert(cvi->so);
2289 if (cvi->nWaiters == 0) {
2290 /* form initial (CV,MX) binding */
2291 cvi->mx_ga = mutex;
2292 }
2293 else /* check existing (CV,MX) binding */
2294 if (cvi->mx_ga != mutex) {
2295 HG_(record_error_Misc)(
2296 thr, "pthread_cond_{timed}wait: cond is associated "
2297 "with a different mutex");
2298 }
2299 cvi->nWaiters++;
sewardjb4112022007-11-09 22:49:28 +00002300
2301 return lk_valid;
2302}
2303
2304static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
2305 void* cond, void* mutex )
2306{
sewardjf98e1c02008-10-25 16:22:41 +00002307 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2308 the SO for this cond, and 'recv' from it so as to acquire a
2309 dependency edge back to the signaller/broadcaster. */
2310 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002311 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002312
2313 if (SHOW_EVENTS >= 1)
2314 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
2315 "(ctid=%d, cond=%p, mutex=%p)\n",
2316 (Int)tid, (void*)cond, (void*)mutex );
2317
sewardjb4112022007-11-09 22:49:28 +00002318 thr = map_threads_maybe_lookup( tid );
2319 tl_assert(thr); /* cannot fail - Thread* must already exist */
2320
2321 // error-if: cond is also associated with a different mutex
2322
sewardj02114542009-07-28 20:52:36 +00002323 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2324 tl_assert(cvi);
2325 tl_assert(cvi->so);
2326 tl_assert(cvi->nWaiters > 0);
sewardjb4112022007-11-09 22:49:28 +00002327
sewardj02114542009-07-28 20:52:36 +00002328 if (!libhb_so_everSent(cvi->so)) {
sewardjf98e1c02008-10-25 16:22:41 +00002329 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2330 it? If this happened it would surely be a bug in the threads
2331 library. Or one of those fabled "spurious wakeups". */
2332 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
sewardjffce8152011-06-24 10:09:41 +00002333 "succeeded"
sewardjf98e1c02008-10-25 16:22:41 +00002334 " without prior pthread_cond_post");
sewardjb4112022007-11-09 22:49:28 +00002335 }
sewardjf98e1c02008-10-25 16:22:41 +00002336
2337 /* anyway, acquire a dependency on it. */
sewardj02114542009-07-28 20:52:36 +00002338 libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2339
2340 cvi->nWaiters--;
sewardjf98e1c02008-10-25 16:22:41 +00002341}
2342
2343static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
2344 void* cond )
2345{
2346 /* Deal with destroy events. The only purpose is to free storage
2347 associated with the CV, so as to avoid any possible resource
2348 leaks. */
2349 if (SHOW_EVENTS >= 1)
2350 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
2351 "(ctid=%d, cond=%p)\n",
2352 (Int)tid, (void*)cond );
2353
sewardj02114542009-07-28 20:52:36 +00002354 map_cond_to_CVInfo_delete( cond );
sewardjb4112022007-11-09 22:49:28 +00002355}
2356
2357
sewardj9f569b72008-11-13 13:33:09 +00002358/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002359/* -------------- events to do with rwlocks -------------- */
sewardj9f569b72008-11-13 13:33:09 +00002360/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002361
2362/* EXPOSITION only */
2363static
2364void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2365{
2366 if (SHOW_EVENTS >= 1)
2367 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2368 (Int)tid, (void*)rwl );
2369 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
sewardjf98e1c02008-10-25 16:22:41 +00002370 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002371 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2372}
2373
2374static
2375void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2376{
2377 Thread* thr;
2378 Lock* lk;
2379 if (SHOW_EVENTS >= 1)
2380 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2381 (Int)tid, (void*)rwl );
2382
2383 thr = map_threads_maybe_lookup( tid );
2384 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00002385 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00002386
2387 lk = map_locks_maybe_lookup( (Addr)rwl );
2388
2389 if (lk == NULL || lk->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00002390 HG_(record_error_Misc)(
2391 thr, "pthread_rwlock_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00002392 }
2393
2394 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00002395 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002396 tl_assert( lk->guestaddr == (Addr)rwl );
2397 if (lk->heldBy) {
2398 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00002399 HG_(record_error_Misc)(
2400 thr, "pthread_rwlock_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00002401 /* remove lock from locksets of all owning threads */
2402 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00002403 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00002404 lk->heldBy = NULL;
2405 lk->heldW = False;
sewardj1c7e8332007-11-29 13:04:03 +00002406 lk->acquired_at = NULL;
sewardjb4112022007-11-09 22:49:28 +00002407 }
2408 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002409 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00002410
2411 if (HG_(clo_track_lockorders))
2412 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002413 map_locks_delete( lk->guestaddr );
2414 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002415 }
2416
sewardjf98e1c02008-10-25 16:22:41 +00002417 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002418 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2419}
2420
2421static
sewardj789c3c52008-02-25 12:10:07 +00002422void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2423 void* rwl,
2424 Word isW, Word isTryLock )
sewardjb4112022007-11-09 22:49:28 +00002425{
2426 /* Just check the rwl is sane; nothing else to do. */
2427 // 'rwl' may be invalid - not checked by wrapper
2428 Thread* thr;
2429 Lock* lk;
2430 if (SHOW_EVENTS >= 1)
2431 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2432 (Int)tid, (Int)isW, (void*)rwl );
2433
2434 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
sewardj789c3c52008-02-25 12:10:07 +00002435 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
sewardjb4112022007-11-09 22:49:28 +00002436 thr = map_threads_maybe_lookup( tid );
2437 tl_assert(thr); /* cannot fail - Thread* must already exist */
2438
2439 lk = map_locks_maybe_lookup( (Addr)rwl );
2440 if ( lk
2441 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2442 /* Wrong kind of lock. Duh. */
sewardjf98e1c02008-10-25 16:22:41 +00002443 HG_(record_error_Misc)(
2444 thr, "pthread_rwlock_{rd,rw}lock with a "
2445 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002446 }
2447}
2448
2449static
2450void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2451{
2452 // only called if the real library call succeeded - so mutex is sane
2453 Thread* thr;
2454 if (SHOW_EVENTS >= 1)
2455 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2456 (Int)tid, (Int)isW, (void*)rwl );
2457
2458 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2459 thr = map_threads_maybe_lookup( tid );
2460 tl_assert(thr); /* cannot fail - Thread* must already exist */
2461
2462 (isW ? evhH__post_thread_w_acquires_lock
2463 : evhH__post_thread_r_acquires_lock)(
2464 thr,
2465 LK_rdwr, /* if not known, create new lock with this LockKind */
2466 (Addr)rwl
2467 );
2468}
2469
2470static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2471{
2472 // 'rwl' may be invalid - not checked by wrapper
2473 Thread* thr;
2474 if (SHOW_EVENTS >= 1)
2475 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2476 (Int)tid, (void*)rwl );
2477
2478 thr = map_threads_maybe_lookup( tid );
2479 tl_assert(thr); /* cannot fail - Thread* must already exist */
2480
2481 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2482}
2483
2484static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2485{
2486 // only called if the real library call succeeded - so mutex is sane
2487 Thread* thr;
2488 if (SHOW_EVENTS >= 1)
2489 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2490 (Int)tid, (void*)rwl );
2491 thr = map_threads_maybe_lookup( tid );
2492 tl_assert(thr); /* cannot fail - Thread* must already exist */
2493
2494 // anything we should do here?
2495}
2496
2497
sewardj9f569b72008-11-13 13:33:09 +00002498/* ---------------------------------------------------------- */
2499/* -------------- events to do with semaphores -------------- */
2500/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002501
sewardj11e352f2007-11-30 11:11:02 +00002502/* This is similar to but not identical to the handling for condition
sewardjb4112022007-11-09 22:49:28 +00002503 variables. */
2504
sewardjf98e1c02008-10-25 16:22:41 +00002505/* For each semaphore, we maintain a stack of SOs. When a 'post'
2506 operation is done on a semaphore (unlocking, essentially), a new SO
2507 is created for the posting thread, the posting thread does a strong
2508 send to it (which merely installs the posting thread's VC in the
2509 SO), and the SO is pushed on the semaphore's stack.
sewardjb4112022007-11-09 22:49:28 +00002510
2511 Later, when a (probably different) thread completes 'wait' on the
sewardjf98e1c02008-10-25 16:22:41 +00002512 semaphore, we pop a SO off the semaphore's stack (which should be
2513 nonempty), and do a strong recv from it. This mechanism creates
sewardjb4112022007-11-09 22:49:28 +00002514 dependencies between posters and waiters of the semaphore.
2515
sewardjf98e1c02008-10-25 16:22:41 +00002516 It may not be necessary to use a stack - perhaps a bag of SOs would
2517 do. But we do need to keep track of how many unused-up posts have
2518 happened for the semaphore.
sewardjb4112022007-11-09 22:49:28 +00002519
sewardjf98e1c02008-10-25 16:22:41 +00002520 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
sewardjb4112022007-11-09 22:49:28 +00002521 twice on S. T3 cannot complete its waits without both T1 and T2
2522 posting. The above mechanism will ensure that T3 acquires
2523 dependencies on both T1 and T2.
sewardj11e352f2007-11-30 11:11:02 +00002524
sewardjf98e1c02008-10-25 16:22:41 +00002525 When a semaphore is initialised with value N, we do as if we'd
2526 posted N times on the semaphore: basically create N SOs and do a
2527 strong send to all of then. This allows up to N waits on the
2528 semaphore to acquire a dependency on the initialisation point,
2529 which AFAICS is the correct behaviour.
sewardj11e352f2007-11-30 11:11:02 +00002530
2531 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2532 about. We should.
sewardjb4112022007-11-09 22:49:28 +00002533*/
2534
sewardjf98e1c02008-10-25 16:22:41 +00002535/* sem_t* -> XArray* SO* */
2536static WordFM* map_sem_to_SO_stack = NULL;
sewardjb4112022007-11-09 22:49:28 +00002537
sewardjf98e1c02008-10-25 16:22:41 +00002538static void map_sem_to_SO_stack_INIT ( void ) {
2539 if (map_sem_to_SO_stack == NULL) {
2540 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2541 HG_(free), NULL );
2542 tl_assert(map_sem_to_SO_stack != NULL);
sewardjb4112022007-11-09 22:49:28 +00002543 }
2544}
2545
sewardjf98e1c02008-10-25 16:22:41 +00002546static void push_SO_for_sem ( void* sem, SO* so ) {
2547 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002548 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002549 tl_assert(so);
2550 map_sem_to_SO_stack_INIT();
2551 if (VG_(lookupFM)( map_sem_to_SO_stack,
2552 &keyW, (UWord*)&xa, (UWord)sem )) {
2553 tl_assert(keyW == (UWord)sem);
sewardjb4112022007-11-09 22:49:28 +00002554 tl_assert(xa);
sewardjf98e1c02008-10-25 16:22:41 +00002555 VG_(addToXA)( xa, &so );
sewardjb4112022007-11-09 22:49:28 +00002556 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002557 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2558 VG_(addToXA)( xa, &so );
2559 VG_(addToFM)( map_sem_to_SO_stack, (Word)sem, (Word)xa );
sewardjb4112022007-11-09 22:49:28 +00002560 }
2561}
2562
sewardjf98e1c02008-10-25 16:22:41 +00002563static SO* mb_pop_SO_for_sem ( void* sem ) {
2564 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002565 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002566 SO* so;
2567 map_sem_to_SO_stack_INIT();
2568 if (VG_(lookupFM)( map_sem_to_SO_stack,
2569 &keyW, (UWord*)&xa, (UWord)sem )) {
sewardjb4112022007-11-09 22:49:28 +00002570 /* xa is the stack for this semaphore. */
sewardjf98e1c02008-10-25 16:22:41 +00002571 Word sz;
2572 tl_assert(keyW == (UWord)sem);
2573 sz = VG_(sizeXA)( xa );
sewardjb4112022007-11-09 22:49:28 +00002574 tl_assert(sz >= 0);
2575 if (sz == 0)
2576 return NULL; /* odd, the stack is empty */
sewardjf98e1c02008-10-25 16:22:41 +00002577 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2578 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002579 VG_(dropTailXA)( xa, 1 );
sewardjf98e1c02008-10-25 16:22:41 +00002580 return so;
sewardjb4112022007-11-09 22:49:28 +00002581 } else {
2582 /* hmm, that's odd. No stack for this semaphore. */
2583 return NULL;
2584 }
2585}
2586
sewardj11e352f2007-11-30 11:11:02 +00002587static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002588{
sewardjf98e1c02008-10-25 16:22:41 +00002589 UWord keyW, valW;
2590 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002591
sewardjb4112022007-11-09 22:49:28 +00002592 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002593 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002594 (Int)tid, (void*)sem );
2595
sewardjf98e1c02008-10-25 16:22:41 +00002596 map_sem_to_SO_stack_INIT();
sewardjb4112022007-11-09 22:49:28 +00002597
sewardjf98e1c02008-10-25 16:22:41 +00002598 /* Empty out the semaphore's SO stack. This way of doing it is
2599 stupid, but at least it's easy. */
2600 while (1) {
2601 so = mb_pop_SO_for_sem( sem );
2602 if (!so) break;
2603 libhb_so_dealloc(so);
2604 }
2605
2606 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2607 XArray* xa = (XArray*)valW;
2608 tl_assert(keyW == (UWord)sem);
2609 tl_assert(xa);
2610 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2611 VG_(deleteXA)(xa);
2612 }
sewardjb4112022007-11-09 22:49:28 +00002613}
2614
sewardj11e352f2007-11-30 11:11:02 +00002615static
2616void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2617{
sewardjf98e1c02008-10-25 16:22:41 +00002618 SO* so;
2619 Thread* thr;
sewardj11e352f2007-11-30 11:11:02 +00002620
2621 if (SHOW_EVENTS >= 1)
2622 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2623 (Int)tid, (void*)sem, value );
2624
sewardjf98e1c02008-10-25 16:22:41 +00002625 thr = map_threads_maybe_lookup( tid );
2626 tl_assert(thr); /* cannot fail - Thread* must already exist */
sewardj11e352f2007-11-30 11:11:02 +00002627
sewardjf98e1c02008-10-25 16:22:41 +00002628 /* Empty out the semaphore's SO stack. This way of doing it is
2629 stupid, but at least it's easy. */
2630 while (1) {
2631 so = mb_pop_SO_for_sem( sem );
2632 if (!so) break;
2633 libhb_so_dealloc(so);
2634 }
sewardj11e352f2007-11-30 11:11:02 +00002635
sewardjf98e1c02008-10-25 16:22:41 +00002636 /* If we don't do this check, the following while loop runs us out
2637 of memory for stupid initial values of 'value'. */
2638 if (value > 10000) {
2639 HG_(record_error_Misc)(
2640 thr, "sem_init: initial value exceeds 10000; using 10000" );
2641 value = 10000;
2642 }
sewardj11e352f2007-11-30 11:11:02 +00002643
sewardjf98e1c02008-10-25 16:22:41 +00002644 /* Now create 'valid' new SOs for the thread, do a strong send to
2645 each of them, and push them all on the stack. */
2646 for (; value > 0; value--) {
2647 Thr* hbthr = thr->hbthr;
2648 tl_assert(hbthr);
sewardj11e352f2007-11-30 11:11:02 +00002649
sewardjf98e1c02008-10-25 16:22:41 +00002650 so = libhb_so_alloc();
2651 libhb_so_send( hbthr, so, True/*strong send*/ );
2652 push_SO_for_sem( sem, so );
sewardj11e352f2007-11-30 11:11:02 +00002653 }
2654}
2655
2656static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002657{
sewardjf98e1c02008-10-25 16:22:41 +00002658 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2659 it (iow, write our VC into it, then tick ours), and push the SO
2660 on on a stack of SOs associated with 'sem'. This is later used
2661 by other thread(s) which successfully exit from a sem_wait on
2662 the same sem; by doing a strong recv from SOs popped of the
2663 stack, they acquire dependencies on the posting thread
2664 segment(s). */
sewardjb4112022007-11-09 22:49:28 +00002665
sewardjf98e1c02008-10-25 16:22:41 +00002666 Thread* thr;
2667 SO* so;
2668 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002669
2670 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002671 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002672 (Int)tid, (void*)sem );
2673
2674 thr = map_threads_maybe_lookup( tid );
2675 tl_assert(thr); /* cannot fail - Thread* must already exist */
2676
2677 // error-if: sem is bogus
2678
sewardjf98e1c02008-10-25 16:22:41 +00002679 hbthr = thr->hbthr;
2680 tl_assert(hbthr);
sewardjb4112022007-11-09 22:49:28 +00002681
sewardjf98e1c02008-10-25 16:22:41 +00002682 so = libhb_so_alloc();
2683 libhb_so_send( hbthr, so, True/*strong send*/ );
2684 push_SO_for_sem( sem, so );
sewardjb4112022007-11-09 22:49:28 +00002685}
2686
sewardj11e352f2007-11-30 11:11:02 +00002687static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002688{
sewardjf98e1c02008-10-25 16:22:41 +00002689 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2690 the 'sem' from this semaphore's SO-stack, and do a strong recv
2691 from it. This creates a dependency back to one of the post-ers
2692 for the semaphore. */
sewardjb4112022007-11-09 22:49:28 +00002693
sewardjf98e1c02008-10-25 16:22:41 +00002694 Thread* thr;
2695 SO* so;
2696 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002697
2698 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002699 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002700 (Int)tid, (void*)sem );
2701
2702 thr = map_threads_maybe_lookup( tid );
2703 tl_assert(thr); /* cannot fail - Thread* must already exist */
2704
2705 // error-if: sem is bogus
2706
sewardjf98e1c02008-10-25 16:22:41 +00002707 so = mb_pop_SO_for_sem( sem );
sewardjb4112022007-11-09 22:49:28 +00002708
sewardjf98e1c02008-10-25 16:22:41 +00002709 if (so) {
2710 hbthr = thr->hbthr;
2711 tl_assert(hbthr);
2712
2713 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2714 libhb_so_dealloc(so);
2715 } else {
2716 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2717 If this happened it would surely be a bug in the threads
2718 library. */
2719 HG_(record_error_Misc)(
2720 thr, "Bug in libpthread: sem_wait succeeded on"
2721 " semaphore without prior sem_post");
sewardjb4112022007-11-09 22:49:28 +00002722 }
2723}
2724
2725
sewardj9f569b72008-11-13 13:33:09 +00002726/* -------------------------------------------------------- */
2727/* -------------- events to do with barriers -------------- */
2728/* -------------------------------------------------------- */
2729
2730typedef
2731 struct {
2732 Bool initted; /* has it yet been initted by guest? */
sewardj406bac82010-03-03 23:03:40 +00002733 Bool resizable; /* is resizing allowed? */
sewardj9f569b72008-11-13 13:33:09 +00002734 UWord size; /* declared size */
2735 XArray* waiting; /* XA of Thread*. # present is 0 .. .size */
2736 }
2737 Bar;
2738
2739static Bar* new_Bar ( void ) {
2740 Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
2741 tl_assert(bar);
2742 /* all fields are zero */
2743 tl_assert(bar->initted == False);
2744 return bar;
2745}
2746
2747static void delete_Bar ( Bar* bar ) {
2748 tl_assert(bar);
2749 if (bar->waiting)
2750 VG_(deleteXA)(bar->waiting);
2751 HG_(free)(bar);
2752}
2753
2754/* A mapping which stores auxiliary data for barriers. */
2755
2756/* pthread_barrier_t* -> Bar* */
2757static WordFM* map_barrier_to_Bar = NULL;
2758
2759static void map_barrier_to_Bar_INIT ( void ) {
2760 if (UNLIKELY(map_barrier_to_Bar == NULL)) {
2761 map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
2762 "hg.mbtBI.1", HG_(free), NULL );
2763 tl_assert(map_barrier_to_Bar != NULL);
2764 }
2765}
2766
2767static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
2768 UWord key, val;
2769 map_barrier_to_Bar_INIT();
2770 if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
2771 tl_assert(key == (UWord)barrier);
2772 return (Bar*)val;
2773 } else {
2774 Bar* bar = new_Bar();
2775 VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
2776 return bar;
2777 }
2778}
2779
2780static void map_barrier_to_Bar_delete ( void* barrier ) {
2781 UWord keyW, valW;
2782 map_barrier_to_Bar_INIT();
2783 if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
2784 Bar* bar = (Bar*)valW;
2785 tl_assert(keyW == (UWord)barrier);
2786 delete_Bar(bar);
2787 }
2788}
2789
2790
2791static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
2792 void* barrier,
sewardj406bac82010-03-03 23:03:40 +00002793 UWord count,
2794 UWord resizable )
sewardj9f569b72008-11-13 13:33:09 +00002795{
2796 Thread* thr;
2797 Bar* bar;
2798
2799 if (SHOW_EVENTS >= 1)
2800 VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
sewardj406bac82010-03-03 23:03:40 +00002801 "(tid=%d, barrier=%p, count=%lu, resizable=%lu)\n",
2802 (Int)tid, (void*)barrier, count, resizable );
sewardj9f569b72008-11-13 13:33:09 +00002803
2804 thr = map_threads_maybe_lookup( tid );
2805 tl_assert(thr); /* cannot fail - Thread* must already exist */
2806
2807 if (count == 0) {
2808 HG_(record_error_Misc)(
2809 thr, "pthread_barrier_init: 'count' argument is zero"
2810 );
2811 }
2812
sewardj406bac82010-03-03 23:03:40 +00002813 if (resizable != 0 && resizable != 1) {
2814 HG_(record_error_Misc)(
2815 thr, "pthread_barrier_init: invalid 'resizable' argument"
2816 );
2817 }
2818
sewardj9f569b72008-11-13 13:33:09 +00002819 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2820 tl_assert(bar);
2821
2822 if (bar->initted) {
2823 HG_(record_error_Misc)(
2824 thr, "pthread_barrier_init: barrier is already initialised"
2825 );
2826 }
2827
2828 if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2829 tl_assert(bar->initted);
2830 HG_(record_error_Misc)(
sewardj553655c2008-11-14 19:41:19 +00002831 thr, "pthread_barrier_init: threads are waiting at barrier"
sewardj9f569b72008-11-13 13:33:09 +00002832 );
2833 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2834 }
2835 if (!bar->waiting) {
2836 bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
2837 sizeof(Thread*) );
2838 }
2839
2840 tl_assert(bar->waiting);
2841 tl_assert(VG_(sizeXA)(bar->waiting) == 0);
sewardj406bac82010-03-03 23:03:40 +00002842 bar->initted = True;
2843 bar->resizable = resizable == 1 ? True : False;
2844 bar->size = count;
sewardj9f569b72008-11-13 13:33:09 +00002845}
2846
2847
2848static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
2849 void* barrier )
2850{
sewardj553655c2008-11-14 19:41:19 +00002851 Thread* thr;
2852 Bar* bar;
2853
sewardj9f569b72008-11-13 13:33:09 +00002854 /* Deal with destroy events. The only purpose is to free storage
2855 associated with the barrier, so as to avoid any possible
2856 resource leaks. */
2857 if (SHOW_EVENTS >= 1)
2858 VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
2859 "(tid=%d, barrier=%p)\n",
2860 (Int)tid, (void*)barrier );
2861
sewardj553655c2008-11-14 19:41:19 +00002862 thr = map_threads_maybe_lookup( tid );
2863 tl_assert(thr); /* cannot fail - Thread* must already exist */
2864
2865 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2866 tl_assert(bar);
2867
2868 if (!bar->initted) {
2869 HG_(record_error_Misc)(
2870 thr, "pthread_barrier_destroy: barrier was never initialised"
2871 );
2872 }
2873
2874 if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2875 HG_(record_error_Misc)(
2876 thr, "pthread_barrier_destroy: threads are waiting at barrier"
2877 );
2878 }
2879
sewardj9f569b72008-11-13 13:33:09 +00002880 /* Maybe we shouldn't do this; just let it persist, so that when it
2881 is reinitialised we don't need to do any dynamic memory
2882 allocation? The downside is a potentially unlimited space leak,
2883 if the client creates (in turn) a large number of barriers all
2884 at different locations. Note that if we do later move to the
2885 don't-delete-it scheme, we need to mark the barrier as
2886 uninitialised again since otherwise a later _init call will
sewardj553655c2008-11-14 19:41:19 +00002887 elicit a duplicate-init error. */
sewardj9f569b72008-11-13 13:33:09 +00002888 map_barrier_to_Bar_delete( barrier );
2889}
2890
2891
sewardj406bac82010-03-03 23:03:40 +00002892/* All the threads have arrived. Now do the Interesting Bit. Get a
2893 new synchronisation object and do a weak send to it from all the
2894 participating threads. This makes its vector clocks be the join of
2895 all the individual threads' vector clocks. Then do a strong
2896 receive from it back to all threads, so that their VCs are a copy
2897 of it (hence are all equal to the join of their original VCs.) */
2898static void do_barrier_cross_sync_and_empty ( Bar* bar )
2899{
2900 /* XXX check bar->waiting has no duplicates */
2901 UWord i;
2902 SO* so = libhb_so_alloc();
2903
2904 tl_assert(bar->waiting);
2905 tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
2906
2907 /* compute the join ... */
2908 for (i = 0; i < bar->size; i++) {
2909 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2910 Thr* hbthr = t->hbthr;
2911 libhb_so_send( hbthr, so, False/*weak send*/ );
2912 }
2913 /* ... and distribute to all threads */
2914 for (i = 0; i < bar->size; i++) {
2915 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2916 Thr* hbthr = t->hbthr;
2917 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2918 }
2919
2920 /* finally, we must empty out the waiting vector */
2921 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2922
2923 /* and we don't need this any more. Perhaps a stack-allocated
2924 SO would be better? */
2925 libhb_so_dealloc(so);
2926}
2927
2928
sewardj9f569b72008-11-13 13:33:09 +00002929static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
2930 void* barrier )
2931{
sewardj1c466b72008-11-19 11:52:14 +00002932 /* This function gets called after a client thread calls
2933 pthread_barrier_wait but before it arrives at the real
2934 pthread_barrier_wait.
2935
2936 Why is the following correct? It's a bit subtle.
2937
2938 If this is not the last thread arriving at the barrier, we simply
2939 note its presence and return. Because valgrind (at least as of
2940 Nov 08) is single threaded, we are guaranteed safe from any race
2941 conditions when in this function -- no other client threads are
2942 running.
2943
2944 If this is the last thread, then we are again the only running
2945 thread. All the other threads will have either arrived at the
2946 real pthread_barrier_wait or are on their way to it, but in any
2947 case are guaranteed not to be able to move past it, because this
2948 thread is currently in this function and so has not yet arrived
2949 at the real pthread_barrier_wait. That means that:
2950
2951 1. While we are in this function, none of the other threads
2952 waiting at the barrier can move past it.
2953
2954 2. When this function returns (and simulated execution resumes),
2955 this thread and all other waiting threads will be able to move
2956 past the real barrier.
2957
2958 Because of this, it is now safe to update the vector clocks of
2959 all threads, to represent the fact that they all arrived at the
2960 barrier and have all moved on. There is no danger of any
2961 complications to do with some threads leaving the barrier and
2962 racing back round to the front, whilst others are still leaving
2963 (which is the primary source of complication in correct handling/
2964 implementation of barriers). That can't happen because we update
2965 here our data structures so as to indicate that the threads have
2966 passed the barrier, even though, as per (2) above, they are
2967 guaranteed not to pass the barrier until we return.
2968
2969 This relies crucially on Valgrind being single threaded. If that
2970 changes, this will need to be reconsidered.
2971 */
sewardj9f569b72008-11-13 13:33:09 +00002972 Thread* thr;
2973 Bar* bar;
sewardj406bac82010-03-03 23:03:40 +00002974 UWord present;
sewardj9f569b72008-11-13 13:33:09 +00002975
2976 if (SHOW_EVENTS >= 1)
2977 VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
2978 "(tid=%d, barrier=%p)\n",
2979 (Int)tid, (void*)barrier );
2980
2981 thr = map_threads_maybe_lookup( tid );
2982 tl_assert(thr); /* cannot fail - Thread* must already exist */
2983
2984 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2985 tl_assert(bar);
2986
2987 if (!bar->initted) {
2988 HG_(record_error_Misc)(
2989 thr, "pthread_barrier_wait: barrier is uninitialised"
2990 );
2991 return; /* client is broken .. avoid assertions below */
2992 }
2993
2994 /* guaranteed by _INIT_PRE above */
2995 tl_assert(bar->size > 0);
2996 tl_assert(bar->waiting);
2997
2998 VG_(addToXA)( bar->waiting, &thr );
2999
3000 /* guaranteed by this function */
3001 present = VG_(sizeXA)(bar->waiting);
3002 tl_assert(present > 0 && present <= bar->size);
3003
3004 if (present < bar->size)
3005 return;
3006
sewardj406bac82010-03-03 23:03:40 +00003007 do_barrier_cross_sync_and_empty(bar);
3008}
sewardj9f569b72008-11-13 13:33:09 +00003009
sewardj9f569b72008-11-13 13:33:09 +00003010
sewardj406bac82010-03-03 23:03:40 +00003011static void evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( ThreadId tid,
3012 void* barrier,
3013 UWord newcount )
3014{
3015 Thread* thr;
3016 Bar* bar;
3017 UWord present;
3018
3019 if (SHOW_EVENTS >= 1)
3020 VG_(printf)("evh__HG_PTHREAD_BARRIER_RESIZE_PRE"
3021 "(tid=%d, barrier=%p, newcount=%lu)\n",
3022 (Int)tid, (void*)barrier, newcount );
3023
3024 thr = map_threads_maybe_lookup( tid );
3025 tl_assert(thr); /* cannot fail - Thread* must already exist */
3026
3027 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3028 tl_assert(bar);
3029
3030 if (!bar->initted) {
3031 HG_(record_error_Misc)(
3032 thr, "pthread_barrier_resize: barrier is uninitialised"
3033 );
3034 return; /* client is broken .. avoid assertions below */
3035 }
3036
3037 if (!bar->resizable) {
3038 HG_(record_error_Misc)(
3039 thr, "pthread_barrier_resize: barrier is may not be resized"
3040 );
3041 return; /* client is broken .. avoid assertions below */
3042 }
3043
3044 if (newcount == 0) {
3045 HG_(record_error_Misc)(
3046 thr, "pthread_barrier_resize: 'newcount' argument is zero"
3047 );
3048 return; /* client is broken .. avoid assertions below */
3049 }
3050
3051 /* guaranteed by _INIT_PRE above */
3052 tl_assert(bar->size > 0);
sewardj9f569b72008-11-13 13:33:09 +00003053 tl_assert(bar->waiting);
sewardj406bac82010-03-03 23:03:40 +00003054 /* Guaranteed by this fn */
3055 tl_assert(newcount > 0);
sewardj9f569b72008-11-13 13:33:09 +00003056
sewardj406bac82010-03-03 23:03:40 +00003057 if (newcount >= bar->size) {
3058 /* Increasing the capacity. There's no possibility of threads
3059 moving on from the barrier in this situation, so just note
3060 the fact and do nothing more. */
3061 bar->size = newcount;
3062 } else {
3063 /* Decreasing the capacity. If we decrease it to be equal or
3064 below the number of waiting threads, they will now move past
3065 the barrier, so need to mess with dep edges in the same way
3066 as if the barrier had filled up normally. */
3067 present = VG_(sizeXA)(bar->waiting);
3068 tl_assert(present >= 0 && present <= bar->size);
3069 if (newcount <= present) {
3070 bar->size = present; /* keep the cross_sync call happy */
3071 do_barrier_cross_sync_and_empty(bar);
3072 }
3073 bar->size = newcount;
sewardj9f569b72008-11-13 13:33:09 +00003074 }
sewardj9f569b72008-11-13 13:33:09 +00003075}
3076
3077
sewardjed2e72e2009-08-14 11:08:24 +00003078/* ----------------------------------------------------- */
3079/* ----- events to do with user-specified HB edges ----- */
3080/* ----------------------------------------------------- */
3081
3082/* A mapping from arbitrary UWord tag to the SO associated with it.
3083 The UWord tags are meaningless to us, interpreted only by the
3084 user. */
3085
3086
3087
3088/* UWord -> SO* */
3089static WordFM* map_usertag_to_SO = NULL;
3090
3091static void map_usertag_to_SO_INIT ( void ) {
3092 if (UNLIKELY(map_usertag_to_SO == NULL)) {
3093 map_usertag_to_SO = VG_(newFM)( HG_(zalloc),
3094 "hg.mutS.1", HG_(free), NULL );
3095 tl_assert(map_usertag_to_SO != NULL);
3096 }
3097}
3098
3099static SO* map_usertag_to_SO_lookup_or_alloc ( UWord usertag ) {
3100 UWord key, val;
3101 map_usertag_to_SO_INIT();
3102 if (VG_(lookupFM)( map_usertag_to_SO, &key, &val, usertag )) {
3103 tl_assert(key == (UWord)usertag);
3104 return (SO*)val;
3105 } else {
3106 SO* so = libhb_so_alloc();
3107 VG_(addToFM)( map_usertag_to_SO, usertag, (UWord)so );
3108 return so;
3109 }
3110}
3111
sewardj6015d0e2011-03-11 19:10:48 +00003112static void map_usertag_to_SO_delete ( UWord usertag ) {
3113 UWord keyW, valW;
3114 map_usertag_to_SO_INIT();
3115 if (VG_(delFromFM)( map_usertag_to_SO, &keyW, &valW, usertag )) {
3116 SO* so = (SO*)valW;
3117 tl_assert(keyW == usertag);
3118 tl_assert(so);
3119 libhb_so_dealloc(so);
3120 }
3121}
sewardjed2e72e2009-08-14 11:08:24 +00003122
3123
3124static
3125void evh__HG_USERSO_SEND_PRE ( ThreadId tid, UWord usertag )
3126{
3127 /* TID is just about to notionally sent a message on a notional
3128 abstract synchronisation object whose identity is given by
3129 USERTAG. Bind USERTAG to a real SO if it is not already so
sewardj8c50d3c2011-03-11 18:38:12 +00003130 bound, and do a 'weak send' on the SO. This joins the vector
3131 clocks from this thread into any vector clocks already present
3132 in the SO. The resulting SO vector clocks are later used by
sewardjed2e72e2009-08-14 11:08:24 +00003133 other thread(s) which successfully 'receive' from the SO,
sewardj8c50d3c2011-03-11 18:38:12 +00003134 thereby acquiring a dependency on all the events that have
3135 previously signalled on this SO. */
sewardjed2e72e2009-08-14 11:08:24 +00003136 Thread* thr;
3137 SO* so;
3138
3139 if (SHOW_EVENTS >= 1)
3140 VG_(printf)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3141 (Int)tid, usertag );
3142
3143 thr = map_threads_maybe_lookup( tid );
3144 tl_assert(thr); /* cannot fail - Thread* must already exist */
3145
3146 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3147 tl_assert(so);
3148
sewardj8c50d3c2011-03-11 18:38:12 +00003149 libhb_so_send( thr->hbthr, so, False/*!strong_send*/ );
sewardjed2e72e2009-08-14 11:08:24 +00003150}
3151
3152static
3153void evh__HG_USERSO_RECV_POST ( ThreadId tid, UWord usertag )
3154{
3155 /* TID has just notionally received a message from a notional
3156 abstract synchronisation object whose identity is given by
3157 USERTAG. Bind USERTAG to a real SO if it is not already so
3158 bound. If the SO has at some point in the past been 'sent' on,
3159 to a 'strong receive' on it, thereby acquiring a dependency on
3160 the sender. */
3161 Thread* thr;
3162 SO* so;
3163
3164 if (SHOW_EVENTS >= 1)
3165 VG_(printf)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3166 (Int)tid, usertag );
3167
3168 thr = map_threads_maybe_lookup( tid );
3169 tl_assert(thr); /* cannot fail - Thread* must already exist */
3170
3171 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3172 tl_assert(so);
3173
3174 /* Acquire a dependency on it. If the SO has never so far been
3175 sent on, then libhb_so_recv will do nothing. So we're safe
3176 regardless of SO's history. */
3177 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3178}
3179
sewardj6015d0e2011-03-11 19:10:48 +00003180static
3181void evh__HG_USERSO_FORGET_ALL ( ThreadId tid, UWord usertag )
3182{
3183 /* TID declares that any happens-before edges notionally stored in
3184 USERTAG can be deleted. If (as would normally be the case) a
3185 SO is associated with USERTAG, then the assocation is removed
3186 and all resources associated with SO are freed. Importantly,
3187 that frees up any VTSs stored in SO. */
3188 if (SHOW_EVENTS >= 1)
3189 VG_(printf)("evh__HG_USERSO_FORGET_ALL(ctid=%d, usertag=%#lx)\n",
3190 (Int)tid, usertag );
3191
3192 map_usertag_to_SO_delete( usertag );
3193}
3194
sewardjed2e72e2009-08-14 11:08:24 +00003195
sewardjb4112022007-11-09 22:49:28 +00003196/*--------------------------------------------------------------*/
3197/*--- Lock acquisition order monitoring ---*/
3198/*--------------------------------------------------------------*/
3199
3200/* FIXME: here are some optimisations still to do in
3201 laog__pre_thread_acquires_lock.
3202
3203 The graph is structured so that if L1 --*--> L2 then L1 must be
3204 acquired before L2.
3205
3206 The common case is that some thread T holds (eg) L1 L2 and L3 and
3207 is repeatedly acquiring and releasing Ln, and there is no ordering
3208 error in what it is doing. Hence it repeatly:
3209
3210 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3211 produces the answer No (because there is no error).
3212
3213 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3214 (because they already got added the first time T acquired Ln).
3215
3216 Hence cache these two events:
3217
3218 (1) Cache result of the query from last time. Invalidate the cache
3219 any time any edges are added to or deleted from laog.
3220
3221 (2) Cache these add-edge requests and ignore them if said edges
3222 have already been added to laog. Invalidate the cache any time
3223 any edges are deleted from laog.
3224*/
3225
3226typedef
3227 struct {
3228 WordSetID inns; /* in univ_laog */
3229 WordSetID outs; /* in univ_laog */
3230 }
3231 LAOGLinks;
3232
3233/* lock order acquisition graph */
3234static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
3235
3236/* EXPOSITION ONLY: for each edge in 'laog', record the two places
3237 where that edge was created, so that we can show the user later if
3238 we need to. */
3239typedef
3240 struct {
3241 Addr src_ga; /* Lock guest addresses for */
3242 Addr dst_ga; /* src/dst of the edge */
3243 ExeContext* src_ec; /* And corresponding places where that */
3244 ExeContext* dst_ec; /* ordering was established */
3245 }
3246 LAOGLinkExposition;
3247
sewardj250ec2e2008-02-15 22:02:30 +00003248static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
sewardjb4112022007-11-09 22:49:28 +00003249 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3250 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
3251 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
3252 if (llx1->src_ga < llx2->src_ga) return -1;
3253 if (llx1->src_ga > llx2->src_ga) return 1;
3254 if (llx1->dst_ga < llx2->dst_ga) return -1;
3255 if (llx1->dst_ga > llx2->dst_ga) return 1;
3256 return 0;
3257}
3258
3259static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
3260/* end EXPOSITION ONLY */
3261
3262
sewardja65db102009-01-26 10:45:16 +00003263__attribute__((noinline))
3264static void laog__init ( void )
3265{
3266 tl_assert(!laog);
3267 tl_assert(!laog_exposition);
sewardjc1fb9d22011-02-28 09:03:44 +00003268 tl_assert(HG_(clo_track_lockorders));
sewardja65db102009-01-26 10:45:16 +00003269
3270 laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
3271 HG_(free), NULL/*unboxedcmp*/ );
3272
3273 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
3274 cmp_LAOGLinkExposition );
3275 tl_assert(laog);
3276 tl_assert(laog_exposition);
3277}
3278
sewardjb4112022007-11-09 22:49:28 +00003279static void laog__show ( Char* who ) {
3280 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003281 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003282 Lock* me;
3283 LAOGLinks* links;
3284 VG_(printf)("laog (requested by %s) {\n", who);
sewardj896f6f92008-08-19 08:38:52 +00003285 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003286 me = NULL;
3287 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003288 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003289 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003290 tl_assert(me);
3291 tl_assert(links);
3292 VG_(printf)(" node %p:\n", me);
3293 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3294 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003295 VG_(printf)(" inn %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003296 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3297 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003298 VG_(printf)(" out %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003299 me = NULL;
3300 links = NULL;
3301 }
sewardj896f6f92008-08-19 08:38:52 +00003302 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003303 VG_(printf)("}\n");
3304}
3305
sewardj866c80c2011-10-22 19:29:51 +00003306static void univ_laog_do_GC ( void ) {
3307 Word i;
3308 LAOGLinks* links;
3309 Word seen = 0;
3310 Int prev_next_gc_univ_laog = next_gc_univ_laog;
3311 const UWord univ_laog_cardinality = HG_(cardinalityWSU)( univ_laog);
3312
3313 Bool *univ_laog_seen = HG_(zalloc) ( "hg.gc_univ_laog.1",
3314 (Int) univ_laog_cardinality
3315 * sizeof(Bool) );
3316 // univ_laog_seen[*] set to 0 (False) by zalloc.
3317
3318 if (VG_(clo_stats))
3319 VG_(message)(Vg_DebugMsg,
3320 "univ_laog_do_GC enter cardinality %'10d\n",
3321 (Int)univ_laog_cardinality);
3322
3323 VG_(initIterFM)( laog );
3324 links = NULL;
3325 while (VG_(nextIterFM)( laog, NULL, (UWord*)&links )) {
3326 tl_assert(links);
3327 tl_assert(links->inns >= 0 && links->inns < univ_laog_cardinality);
3328 univ_laog_seen[links->inns] = True;
3329 tl_assert(links->outs >= 0 && links->outs < univ_laog_cardinality);
3330 univ_laog_seen[links->outs] = True;
3331 links = NULL;
3332 }
3333 VG_(doneIterFM)( laog );
3334
3335 for (i = 0; i < (Int)univ_laog_cardinality; i++) {
3336 if (univ_laog_seen[i])
3337 seen++;
3338 else
3339 HG_(dieWS) ( univ_laog, (WordSet)i );
3340 }
3341
3342 HG_(free) (univ_laog_seen);
3343
3344 // We need to decide the value of the next_gc.
3345 // 3 solutions were looked at:
3346 // Sol 1: garbage collect at seen * 2
3347 // This solution was a lot slower, probably because we both do a lot of
3348 // garbage collection and do not keep long enough laog WV that will become
3349 // useful again very soon.
3350 // Sol 2: garbage collect at a percentage increase of the current cardinality
3351 // (with a min increase of 1)
3352 // Trials on a small test program with 1%, 5% and 10% increase was done.
3353 // 1% is slightly faster than 5%, which is slightly slower than 10%.
3354 // However, on a big application, this caused the memory to be exhausted,
3355 // as even a 1% increase of size at each gc becomes a lot, when many gc
3356 // are done.
3357 // Sol 3: always garbage collect at current cardinality + 1.
3358 // This solution was the fastest of the 3 solutions, and caused no memory
3359 // exhaustion in the big application.
3360 //
3361 // With regards to cost introduced by gc: on the t2t perf test (doing only
3362 // lock/unlock operations), t2t 50 10 2 was about 25% faster than the
3363 // version with garbage collection. With t2t 50 20 2, my machine started
3364 // to page out, and so the garbage collected version was much faster.
3365 // On smaller lock sets (e.g. t2t 20 5 2, giving about 100 locks), the
3366 // difference performance is insignificant (~ 0.1 s).
3367 // Of course, it might be that real life programs are not well represented
3368 // by t2t.
3369
3370 // If ever we want to have a more sophisticated control
3371 // (e.g. clo options to control the percentage increase or fixed increased),
3372 // we should do it here, eg.
3373 // next_gc_univ_laog = prev_next_gc_univ_laog + VG_(clo_laog_gc_fixed);
3374 // Currently, we just hard-code the solution 3 above.
3375 next_gc_univ_laog = prev_next_gc_univ_laog + 1;
3376
3377 if (VG_(clo_stats))
3378 VG_(message)
3379 (Vg_DebugMsg,
3380 "univ_laog_do_GC exit seen %'8d next gc at cardinality %'10d\n",
3381 (Int)seen, next_gc_univ_laog);
3382}
3383
3384
sewardjb4112022007-11-09 22:49:28 +00003385__attribute__((noinline))
3386static void laog__add_edge ( Lock* src, Lock* dst ) {
3387 Word keyW;
3388 LAOGLinks* links;
3389 Bool presentF, presentR;
3390 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
3391
3392 /* Take the opportunity to sanity check the graph. Record in
3393 presentF if there is already a src->dst mapping in this node's
3394 forwards links, and presentR if there is already a src->dst
3395 mapping in this node's backwards links. They should agree!
3396 Also, we need to know whether the edge was already present so as
3397 to decide whether or not to update the link details mapping. We
3398 can compute presentF and presentR essentially for free, so may
3399 as well do this always. */
3400 presentF = presentR = False;
3401
3402 /* Update the out edges for src */
3403 keyW = 0;
3404 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003405 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00003406 WordSetID outs_new;
3407 tl_assert(links);
3408 tl_assert(keyW == (Word)src);
3409 outs_new = HG_(addToWS)( univ_laog, links->outs, (Word)dst );
3410 presentF = outs_new == links->outs;
3411 links->outs = outs_new;
3412 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003413 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003414 links->inns = HG_(emptyWS)( univ_laog );
3415 links->outs = HG_(singletonWS)( univ_laog, (Word)dst );
sewardj896f6f92008-08-19 08:38:52 +00003416 VG_(addToFM)( laog, (Word)src, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00003417 }
3418 /* Update the in edges for dst */
3419 keyW = 0;
3420 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003421 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003422 WordSetID inns_new;
3423 tl_assert(links);
3424 tl_assert(keyW == (Word)dst);
3425 inns_new = HG_(addToWS)( univ_laog, links->inns, (Word)src );
3426 presentR = inns_new == links->inns;
3427 links->inns = inns_new;
3428 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003429 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003430 links->inns = HG_(singletonWS)( univ_laog, (Word)src );
3431 links->outs = HG_(emptyWS)( univ_laog );
sewardj896f6f92008-08-19 08:38:52 +00003432 VG_(addToFM)( laog, (Word)dst, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00003433 }
3434
3435 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
3436
3437 if (!presentF && src->acquired_at && dst->acquired_at) {
3438 LAOGLinkExposition expo;
3439 /* If this edge is entering the graph, and we have acquired_at
3440 information for both src and dst, record those acquisition
3441 points. Hence, if there is later a violation of this
3442 ordering, we can show the user the two places in which the
3443 required src-dst ordering was previously established. */
barta0b6b2c2008-07-07 06:49:24 +00003444 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
sewardjb4112022007-11-09 22:49:28 +00003445 src->guestaddr, dst->guestaddr);
3446 expo.src_ga = src->guestaddr;
3447 expo.dst_ga = dst->guestaddr;
3448 expo.src_ec = NULL;
3449 expo.dst_ec = NULL;
3450 tl_assert(laog_exposition);
sewardj896f6f92008-08-19 08:38:52 +00003451 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (Word)&expo )) {
sewardjb4112022007-11-09 22:49:28 +00003452 /* we already have it; do nothing */
3453 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003454 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
3455 sizeof(LAOGLinkExposition));
sewardjb4112022007-11-09 22:49:28 +00003456 expo2->src_ga = src->guestaddr;
3457 expo2->dst_ga = dst->guestaddr;
3458 expo2->src_ec = src->acquired_at;
3459 expo2->dst_ec = dst->acquired_at;
sewardj896f6f92008-08-19 08:38:52 +00003460 VG_(addToFM)( laog_exposition, (Word)expo2, (Word)NULL );
sewardjb4112022007-11-09 22:49:28 +00003461 }
3462 }
sewardj866c80c2011-10-22 19:29:51 +00003463
3464 if (HG_(cardinalityWSU) (univ_laog) >= next_gc_univ_laog)
3465 univ_laog_do_GC();
sewardjb4112022007-11-09 22:49:28 +00003466}
3467
3468__attribute__((noinline))
3469static void laog__del_edge ( Lock* src, Lock* dst ) {
3470 Word keyW;
3471 LAOGLinks* links;
sewardj866c80c2011-10-22 19:29:51 +00003472 if (0) VG_(printf)("laog__del_edge enter %p %p\n", src, dst);
sewardjb4112022007-11-09 22:49:28 +00003473 /* Update the out edges for src */
3474 keyW = 0;
3475 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003476 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00003477 tl_assert(links);
3478 tl_assert(keyW == (Word)src);
3479 links->outs = HG_(delFromWS)( univ_laog, links->outs, (Word)dst );
3480 }
3481 /* Update the in edges for dst */
3482 keyW = 0;
3483 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003484 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003485 tl_assert(links);
3486 tl_assert(keyW == (Word)dst);
3487 links->inns = HG_(delFromWS)( univ_laog, links->inns, (Word)src );
3488 }
sewardj866c80c2011-10-22 19:29:51 +00003489
3490 /* Remove the exposition of src,dst (if present) */
3491 {
3492 LAOGLinkExposition *fm_expo;
3493
3494 LAOGLinkExposition expo;
3495 expo.src_ga = src->guestaddr;
3496 expo.dst_ga = dst->guestaddr;
3497 expo.src_ec = NULL;
3498 expo.dst_ec = NULL;
3499
3500 if (VG_(delFromFM) (laog_exposition,
3501 (UWord*)&fm_expo, NULL, (UWord)&expo )) {
3502 HG_(free) (fm_expo);
3503 }
3504 }
3505
3506 /* deleting edges can increase nr of of WS so check for gc. */
3507 if (HG_(cardinalityWSU) (univ_laog) >= next_gc_univ_laog)
3508 univ_laog_do_GC();
3509 if (0) VG_(printf)("laog__del_edge exit\n");
sewardjb4112022007-11-09 22:49:28 +00003510}
3511
3512__attribute__((noinline))
3513static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
3514 Word keyW;
3515 LAOGLinks* links;
3516 keyW = 0;
3517 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003518 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003519 tl_assert(links);
3520 tl_assert(keyW == (Word)lk);
3521 return links->outs;
3522 } else {
3523 return HG_(emptyWS)( univ_laog );
3524 }
3525}
3526
3527__attribute__((noinline))
3528static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
3529 Word keyW;
3530 LAOGLinks* links;
3531 keyW = 0;
3532 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003533 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003534 tl_assert(links);
3535 tl_assert(keyW == (Word)lk);
3536 return links->inns;
3537 } else {
3538 return HG_(emptyWS)( univ_laog );
3539 }
3540}
3541
3542__attribute__((noinline))
3543static void laog__sanity_check ( Char* who ) {
3544 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003545 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003546 Lock* me;
3547 LAOGLinks* links;
sewardj896f6f92008-08-19 08:38:52 +00003548 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003549 me = NULL;
3550 links = NULL;
3551 if (0) VG_(printf)("laog sanity check\n");
sewardj896f6f92008-08-19 08:38:52 +00003552 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003553 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003554 tl_assert(me);
3555 tl_assert(links);
3556 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3557 for (i = 0; i < ws_size; i++) {
3558 if ( ! HG_(elemWS)( univ_laog,
3559 laog__succs( (Lock*)ws_words[i] ),
3560 (Word)me ))
3561 goto bad;
3562 }
3563 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3564 for (i = 0; i < ws_size; i++) {
3565 if ( ! HG_(elemWS)( univ_laog,
3566 laog__preds( (Lock*)ws_words[i] ),
3567 (Word)me ))
3568 goto bad;
3569 }
3570 me = NULL;
3571 links = NULL;
3572 }
sewardj896f6f92008-08-19 08:38:52 +00003573 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003574 return;
3575
3576 bad:
3577 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3578 laog__show(who);
3579 tl_assert(0);
3580}
3581
3582/* If there is a path in laog from 'src' to any of the elements in
3583 'dst', return an arbitrarily chosen element of 'dst' reachable from
3584 'src'. If no path exist from 'src' to any element in 'dst', return
3585 NULL. */
3586__attribute__((noinline))
3587static
3588Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3589{
3590 Lock* ret;
3591 Word i, ssz;
3592 XArray* stack; /* of Lock* */
3593 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
3594 Lock* here;
3595 WordSetID succs;
3596 Word succs_size;
sewardj250ec2e2008-02-15 22:02:30 +00003597 UWord* succs_words;
sewardjb4112022007-11-09 22:49:28 +00003598 //laog__sanity_check();
3599
3600 /* If the destination set is empty, we can never get there from
3601 'src' :-), so don't bother to try */
3602 if (HG_(isEmptyWS)( univ_lsets, dsts ))
3603 return NULL;
3604
3605 ret = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00003606 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3607 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00003608
3609 (void) VG_(addToXA)( stack, &src );
3610
3611 while (True) {
3612
3613 ssz = VG_(sizeXA)( stack );
3614
3615 if (ssz == 0) { ret = NULL; break; }
3616
3617 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3618 VG_(dropTailXA)( stack, 1 );
3619
3620 if (HG_(elemWS)( univ_lsets, dsts, (Word)here )) { ret = here; break; }
3621
sewardj896f6f92008-08-19 08:38:52 +00003622 if (VG_(lookupFM)( visited, NULL, NULL, (Word)here ))
sewardjb4112022007-11-09 22:49:28 +00003623 continue;
3624
sewardj896f6f92008-08-19 08:38:52 +00003625 VG_(addToFM)( visited, (Word)here, 0 );
sewardjb4112022007-11-09 22:49:28 +00003626
3627 succs = laog__succs( here );
3628 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3629 for (i = 0; i < succs_size; i++)
3630 (void) VG_(addToXA)( stack, &succs_words[i] );
3631 }
3632
sewardj896f6f92008-08-19 08:38:52 +00003633 VG_(deleteFM)( visited, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003634 VG_(deleteXA)( stack );
3635 return ret;
3636}
3637
3638
3639/* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3640 between 'lk' and the locks already held by 'thr' and issue a
3641 complaint if so. Also, update the ordering graph appropriately.
3642*/
3643__attribute__((noinline))
3644static void laog__pre_thread_acquires_lock (
3645 Thread* thr, /* NB: BEFORE lock is added */
3646 Lock* lk
3647 )
3648{
sewardj250ec2e2008-02-15 22:02:30 +00003649 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +00003650 Word ls_size, i;
3651 Lock* other;
3652
3653 /* It may be that 'thr' already holds 'lk' and is recursively
3654 relocking in. In this case we just ignore the call. */
3655 /* NB: univ_lsets really is correct here */
3656 if (HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lk ))
3657 return;
3658
sewardjb4112022007-11-09 22:49:28 +00003659 /* First, the check. Complain if there is any path in laog from lk
3660 to any of the locks already held by thr, since if any such path
3661 existed, it would mean that previously lk was acquired before
3662 (rather than after, as we are doing here) at least one of those
3663 locks.
3664 */
3665 other = laog__do_dfs_from_to(lk, thr->locksetA);
3666 if (other) {
3667 LAOGLinkExposition key, *found;
3668 /* So we managed to find a path lk --*--> other in the graph,
3669 which implies that 'lk' should have been acquired before
3670 'other' but is in fact being acquired afterwards. We present
3671 the lk/other arguments to record_error_LockOrder in the order
3672 in which they should have been acquired. */
3673 /* Go look in the laog_exposition mapping, to find the allocation
3674 points for this edge, so we can show the user. */
3675 key.src_ga = lk->guestaddr;
3676 key.dst_ga = other->guestaddr;
3677 key.src_ec = NULL;
3678 key.dst_ec = NULL;
3679 found = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003680 if (VG_(lookupFM)( laog_exposition,
sewardjb5f29642007-11-16 12:02:43 +00003681 (Word*)&found, NULL, (Word)&key )) {
sewardjb4112022007-11-09 22:49:28 +00003682 tl_assert(found != &key);
3683 tl_assert(found->src_ga == key.src_ga);
3684 tl_assert(found->dst_ga == key.dst_ga);
3685 tl_assert(found->src_ec);
3686 tl_assert(found->dst_ec);
sewardjf98e1c02008-10-25 16:22:41 +00003687 HG_(record_error_LockOrder)(
3688 thr, lk->guestaddr, other->guestaddr,
sewardjffce8152011-06-24 10:09:41 +00003689 found->src_ec, found->dst_ec, other->acquired_at );
sewardjb4112022007-11-09 22:49:28 +00003690 } else {
3691 /* Hmm. This can't happen (can it?) */
sewardjf98e1c02008-10-25 16:22:41 +00003692 HG_(record_error_LockOrder)(
3693 thr, lk->guestaddr, other->guestaddr,
sewardjffce8152011-06-24 10:09:41 +00003694 NULL, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003695 }
3696 }
3697
3698 /* Second, add to laog the pairs
3699 (old, lk) | old <- locks already held by thr
3700 Since both old and lk are currently held by thr, their acquired_at
3701 fields must be non-NULL.
3702 */
3703 tl_assert(lk->acquired_at);
3704 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3705 for (i = 0; i < ls_size; i++) {
3706 Lock* old = (Lock*)ls_words[i];
3707 tl_assert(old->acquired_at);
3708 laog__add_edge( old, lk );
3709 }
3710
3711 /* Why "except_Locks" ? We're here because a lock is being
3712 acquired by a thread, and we're in an inconsistent state here.
3713 See the call points in evhH__post_thread_{r,w}_acquires_lock.
3714 When called in this inconsistent state, locks__sanity_check duly
3715 barfs. */
sewardjf98e1c02008-10-25 16:22:41 +00003716 if (HG_(clo_sanity_flags) & SCE_LAOG)
sewardjb4112022007-11-09 22:49:28 +00003717 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
3718}
3719
sewardj866c80c2011-10-22 19:29:51 +00003720/* Allocates a duplicate of words. Caller must HG_(free) the result. */
3721static UWord* UWordV_dup(UWord* words, Word words_size)
3722{
3723 UInt i;
3724
3725 if (words_size == 0)
3726 return NULL;
3727
3728 UWord *dup = HG_(zalloc) ("hg.dup.1", (SizeT) words_size * sizeof(UWord));
3729
3730 for (i = 0; i < words_size; i++)
3731 dup[i] = words[i];
3732
3733 return dup;
3734}
sewardjb4112022007-11-09 22:49:28 +00003735
3736/* Delete from 'laog' any pair mentioning a lock in locksToDelete */
3737
3738__attribute__((noinline))
3739static void laog__handle_one_lock_deletion ( Lock* lk )
3740{
3741 WordSetID preds, succs;
3742 Word preds_size, succs_size, i, j;
sewardj250ec2e2008-02-15 22:02:30 +00003743 UWord *preds_words, *succs_words;
sewardjb4112022007-11-09 22:49:28 +00003744
3745 preds = laog__preds( lk );
3746 succs = laog__succs( lk );
3747
sewardj866c80c2011-10-22 19:29:51 +00003748 // We need to duplicate the payload, as these can be garbage collected
3749 // during the del/add operations below.
sewardjb4112022007-11-09 22:49:28 +00003750 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
sewardj866c80c2011-10-22 19:29:51 +00003751 preds_words = UWordV_dup(preds_words, preds_size);
3752
3753 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3754 succs_words = UWordV_dup(succs_words, succs_size);
3755
sewardjb4112022007-11-09 22:49:28 +00003756 for (i = 0; i < preds_size; i++)
3757 laog__del_edge( (Lock*)preds_words[i], lk );
3758
sewardjb4112022007-11-09 22:49:28 +00003759 for (j = 0; j < succs_size; j++)
3760 laog__del_edge( lk, (Lock*)succs_words[j] );
3761
3762 for (i = 0; i < preds_size; i++) {
3763 for (j = 0; j < succs_size; j++) {
3764 if (preds_words[i] != succs_words[j]) {
3765 /* This can pass unlocked locks to laog__add_edge, since
3766 we're deleting stuff. So their acquired_at fields may
3767 be NULL. */
3768 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
3769 }
3770 }
3771 }
sewardj866c80c2011-10-22 19:29:51 +00003772
3773 if (preds_words)
3774 HG_(free) (preds_words);
3775 if (succs_words)
3776 HG_(free) (succs_words);
3777
3778 // Remove lk information from laog links FM
3779 {
3780 LAOGLinks *links;
3781 Lock* linked_lk;
3782
3783 if (VG_(delFromFM) (laog,
3784 (UWord*)&linked_lk, (UWord*)&links, (UWord)lk)) {
3785 tl_assert (linked_lk == lk);
3786 HG_(free) (links);
3787 }
3788 }
3789 /* FIXME ??? What about removing lock lk data from EXPOSITION ??? */
sewardjb4112022007-11-09 22:49:28 +00003790}
3791
sewardj1cbc12f2008-11-10 16:16:46 +00003792//__attribute__((noinline))
3793//static void laog__handle_lock_deletions (
3794// WordSetID /* in univ_laog */ locksToDelete
3795// )
3796//{
3797// Word i, ws_size;
3798// UWord* ws_words;
3799//
sewardj1cbc12f2008-11-10 16:16:46 +00003800//
3801// HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
sewardj866c80c2011-10-22 19:29:51 +00003802// UWordV_dup call needed here ...
sewardj1cbc12f2008-11-10 16:16:46 +00003803// for (i = 0; i < ws_size; i++)
3804// laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
3805//
3806// if (HG_(clo_sanity_flags) & SCE_LAOG)
3807// all__sanity_check("laog__handle_lock_deletions-post");
3808//}
sewardjb4112022007-11-09 22:49:28 +00003809
3810
3811/*--------------------------------------------------------------*/
3812/*--- Malloc/free replacements ---*/
3813/*--------------------------------------------------------------*/
3814
3815typedef
3816 struct {
3817 void* next; /* required by m_hashtable */
3818 Addr payload; /* ptr to actual block */
3819 SizeT szB; /* size requested */
3820 ExeContext* where; /* where it was allocated */
3821 Thread* thr; /* allocating thread */
3822 }
3823 MallocMeta;
3824
3825/* A hash table of MallocMetas, used to track malloc'd blocks
3826 (obviously). */
3827static VgHashTable hg_mallocmeta_table = NULL;
3828
3829
3830static MallocMeta* new_MallocMeta ( void ) {
sewardjf98e1c02008-10-25 16:22:41 +00003831 MallocMeta* md = HG_(zalloc)( "hg.new_MallocMeta.1", sizeof(MallocMeta) );
sewardjb4112022007-11-09 22:49:28 +00003832 tl_assert(md);
3833 return md;
3834}
3835static void delete_MallocMeta ( MallocMeta* md ) {
sewardjf98e1c02008-10-25 16:22:41 +00003836 HG_(free)(md);
sewardjb4112022007-11-09 22:49:28 +00003837}
3838
3839
3840/* Allocate a client block and set up the metadata for it. */
3841
3842static
3843void* handle_alloc ( ThreadId tid,
3844 SizeT szB, SizeT alignB, Bool is_zeroed )
3845{
3846 Addr p;
3847 MallocMeta* md;
3848
3849 tl_assert( ((SSizeT)szB) >= 0 );
3850 p = (Addr)VG_(cli_malloc)(alignB, szB);
3851 if (!p) {
3852 return NULL;
3853 }
3854 if (is_zeroed)
3855 VG_(memset)((void*)p, 0, szB);
3856
3857 /* Note that map_threads_lookup must succeed (cannot assert), since
3858 memory can only be allocated by currently alive threads, hence
3859 they must have an entry in map_threads. */
3860 md = new_MallocMeta();
3861 md->payload = p;
3862 md->szB = szB;
3863 md->where = VG_(record_ExeContext)( tid, 0 );
3864 md->thr = map_threads_lookup( tid );
3865
3866 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
3867
3868 /* Tell the lower level memory wranglers. */
3869 evh__new_mem_heap( p, szB, is_zeroed );
3870
3871 return (void*)p;
3872}
3873
3874/* Re the checks for less-than-zero (also in hg_cli__realloc below):
3875 Cast to a signed type to catch any unexpectedly negative args.
3876 We're assuming here that the size asked for is not greater than
3877 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
3878 platforms). */
3879static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
3880 if (((SSizeT)n) < 0) return NULL;
3881 return handle_alloc ( tid, n, VG_(clo_alignment),
3882 /*is_zeroed*/False );
3883}
3884static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
3885 if (((SSizeT)n) < 0) return NULL;
3886 return handle_alloc ( tid, n, VG_(clo_alignment),
3887 /*is_zeroed*/False );
3888}
3889static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
3890 if (((SSizeT)n) < 0) return NULL;
3891 return handle_alloc ( tid, n, VG_(clo_alignment),
3892 /*is_zeroed*/False );
3893}
3894static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
3895 if (((SSizeT)n) < 0) return NULL;
3896 return handle_alloc ( tid, n, align,
3897 /*is_zeroed*/False );
3898}
3899static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
3900 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
3901 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
3902 /*is_zeroed*/True );
3903}
3904
3905
3906/* Free a client block, including getting rid of the relevant
3907 metadata. */
3908
3909static void handle_free ( ThreadId tid, void* p )
3910{
3911 MallocMeta *md, *old_md;
3912 SizeT szB;
3913
3914 /* First see if we can find the metadata for 'p'. */
3915 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3916 if (!md)
3917 return; /* apparently freeing a bogus address. Oh well. */
3918
3919 tl_assert(md->payload == (Addr)p);
3920 szB = md->szB;
3921
3922 /* Nuke the metadata block */
3923 old_md = (MallocMeta*)
3924 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
3925 tl_assert(old_md); /* it must be present - we just found it */
3926 tl_assert(old_md == md);
3927 tl_assert(old_md->payload == (Addr)p);
3928
3929 VG_(cli_free)((void*)old_md->payload);
3930 delete_MallocMeta(old_md);
3931
3932 /* Tell the lower level memory wranglers. */
3933 evh__die_mem_heap( (Addr)p, szB );
3934}
3935
3936static void hg_cli__free ( ThreadId tid, void* p ) {
3937 handle_free(tid, p);
3938}
3939static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
3940 handle_free(tid, p);
3941}
3942static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
3943 handle_free(tid, p);
3944}
3945
3946
3947static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
3948{
3949 MallocMeta *md, *md_new, *md_tmp;
3950 SizeT i;
3951
3952 Addr payload = (Addr)payloadV;
3953
3954 if (((SSizeT)new_size) < 0) return NULL;
3955
3956 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
3957 if (!md)
3958 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
3959
3960 tl_assert(md->payload == payload);
3961
3962 if (md->szB == new_size) {
3963 /* size unchanged */
3964 md->where = VG_(record_ExeContext)(tid, 0);
3965 return payloadV;
3966 }
3967
3968 if (md->szB > new_size) {
3969 /* new size is smaller */
3970 md->szB = new_size;
3971 md->where = VG_(record_ExeContext)(tid, 0);
3972 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
3973 return payloadV;
3974 }
3975
3976 /* else */ {
3977 /* new size is bigger */
3978 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
3979
3980 /* First half kept and copied, second half new */
3981 // FIXME: shouldn't we use a copier which implements the
3982 // memory state machine?
sewardj23f12002009-07-24 08:45:08 +00003983 evh__copy_mem( payload, p_new, md->szB );
sewardjb4112022007-11-09 22:49:28 +00003984 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
sewardjf98e1c02008-10-25 16:22:41 +00003985 /*inited*/False );
sewardjb4112022007-11-09 22:49:28 +00003986 /* FIXME: can anything funny happen here? specifically, if the
3987 old range contained a lock, then die_mem_heap will complain.
3988 Is that the correct behaviour? Not sure. */
3989 evh__die_mem_heap( payload, md->szB );
3990
3991 /* Copy from old to new */
3992 for (i = 0; i < md->szB; i++)
3993 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
3994
3995 /* Because the metadata hash table is index by payload address,
3996 we have to get rid of the old hash table entry and make a new
3997 one. We can't just modify the existing metadata in place,
3998 because then it would (almost certainly) be in the wrong hash
3999 chain. */
4000 md_new = new_MallocMeta();
4001 *md_new = *md;
4002
4003 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
4004 tl_assert(md_tmp);
4005 tl_assert(md_tmp == md);
4006
4007 VG_(cli_free)((void*)md->payload);
4008 delete_MallocMeta(md);
4009
4010 /* Update fields */
4011 md_new->where = VG_(record_ExeContext)( tid, 0 );
4012 md_new->szB = new_size;
4013 md_new->payload = p_new;
4014 md_new->thr = map_threads_lookup( tid );
4015
4016 /* and add */
4017 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
4018
4019 return (void*)p_new;
4020 }
4021}
4022
njn8b140de2009-02-17 04:31:18 +00004023static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
4024{
4025 MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
4026
4027 // There may be slop, but pretend there isn't because only the asked-for
4028 // area will have been shadowed properly.
4029 return ( md ? md->szB : 0 );
4030}
4031
sewardjb4112022007-11-09 22:49:28 +00004032
sewardj095d61e2010-03-11 13:43:18 +00004033/* For error creation: map 'data_addr' to a malloc'd chunk, if any.
sewardjc8028ad2010-05-05 09:34:42 +00004034 Slow linear search. With a bit of hash table help if 'data_addr'
4035 is either the start of a block or up to 15 word-sized steps along
4036 from the start of a block. */
sewardj095d61e2010-03-11 13:43:18 +00004037
4038static inline Bool addr_is_in_MM_Chunk( MallocMeta* mm, Addr a )
4039{
sewardjc8028ad2010-05-05 09:34:42 +00004040 /* Accept 'a' as within 'mm' if 'mm's size is zero and 'a' points
4041 right at it. */
4042 if (UNLIKELY(mm->szB == 0 && a == mm->payload))
4043 return True;
4044 /* else normal interval rules apply */
4045 if (LIKELY(a < mm->payload)) return False;
4046 if (LIKELY(a >= mm->payload + mm->szB)) return False;
4047 return True;
sewardj095d61e2010-03-11 13:43:18 +00004048}
4049
sewardjc8028ad2010-05-05 09:34:42 +00004050Bool HG_(mm_find_containing_block)( /*OUT*/ExeContext** where,
sewardj095d61e2010-03-11 13:43:18 +00004051 /*OUT*/Addr* payload,
4052 /*OUT*/SizeT* szB,
4053 Addr data_addr )
4054{
4055 MallocMeta* mm;
sewardjc8028ad2010-05-05 09:34:42 +00004056 Int i;
4057 const Int n_fast_check_words = 16;
4058
4059 /* First, do a few fast searches on the basis that data_addr might
4060 be exactly the start of a block or up to 15 words inside. This
4061 can happen commonly via the creq
4062 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK. */
4063 for (i = 0; i < n_fast_check_words; i++) {
4064 mm = VG_(HT_lookup)( hg_mallocmeta_table,
4065 data_addr - (UWord)(UInt)i * sizeof(UWord) );
4066 if (UNLIKELY(mm && addr_is_in_MM_Chunk(mm, data_addr)))
4067 goto found;
4068 }
4069
sewardj095d61e2010-03-11 13:43:18 +00004070 /* Well, this totally sucks. But without using an interval tree or
sewardjc8028ad2010-05-05 09:34:42 +00004071 some such, it's hard to see how to do better. We have to check
4072 every block in the entire table. */
sewardj095d61e2010-03-11 13:43:18 +00004073 VG_(HT_ResetIter)(hg_mallocmeta_table);
4074 while ( (mm = VG_(HT_Next)(hg_mallocmeta_table)) ) {
sewardjc8028ad2010-05-05 09:34:42 +00004075 if (UNLIKELY(addr_is_in_MM_Chunk(mm, data_addr)))
4076 goto found;
sewardj095d61e2010-03-11 13:43:18 +00004077 }
sewardjc8028ad2010-05-05 09:34:42 +00004078
4079 /* Not found. Bah. */
4080 return False;
4081 /*NOTREACHED*/
4082
4083 found:
4084 tl_assert(mm);
4085 tl_assert(addr_is_in_MM_Chunk(mm, data_addr));
4086 if (where) *where = mm->where;
4087 if (payload) *payload = mm->payload;
4088 if (szB) *szB = mm->szB;
4089 return True;
sewardj095d61e2010-03-11 13:43:18 +00004090}
4091
4092
sewardjb4112022007-11-09 22:49:28 +00004093/*--------------------------------------------------------------*/
4094/*--- Instrumentation ---*/
4095/*--------------------------------------------------------------*/
4096
sewardjffce8152011-06-24 10:09:41 +00004097#define binop(_op, _arg1, _arg2) IRExpr_Binop((_op),(_arg1),(_arg2))
4098#define mkexpr(_tmp) IRExpr_RdTmp((_tmp))
4099#define mkU32(_n) IRExpr_Const(IRConst_U32(_n))
4100#define mkU64(_n) IRExpr_Const(IRConst_U64(_n))
4101#define assign(_t, _e) IRStmt_WrTmp((_t), (_e))
4102
4103static void instrument_mem_access ( IRSB* sbOut,
sewardjb4112022007-11-09 22:49:28 +00004104 IRExpr* addr,
4105 Int szB,
4106 Bool isStore,
sewardjffce8152011-06-24 10:09:41 +00004107 Int hWordTy_szB,
4108 Int goff_sp )
sewardjb4112022007-11-09 22:49:28 +00004109{
4110 IRType tyAddr = Ity_INVALID;
4111 HChar* hName = NULL;
4112 void* hAddr = NULL;
4113 Int regparms = 0;
4114 IRExpr** argv = NULL;
4115 IRDirty* di = NULL;
4116
sewardjffce8152011-06-24 10:09:41 +00004117 // THRESH is the size of the window above SP (well,
4118 // mostly above) that we assume implies a stack reference.
4119 const Int THRESH = 4096 * 4; // somewhat arbitrary
4120 const Int rz_szB = VG_STACK_REDZONE_SZB;
4121
sewardjb4112022007-11-09 22:49:28 +00004122 tl_assert(isIRAtom(addr));
4123 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
4124
sewardjffce8152011-06-24 10:09:41 +00004125 tyAddr = typeOfIRExpr( sbOut->tyenv, addr );
sewardjb4112022007-11-09 22:49:28 +00004126 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
4127
4128 /* So the effective address is in 'addr' now. */
4129 regparms = 1; // unless stated otherwise
4130 if (isStore) {
4131 switch (szB) {
4132 case 1:
sewardj23f12002009-07-24 08:45:08 +00004133 hName = "evh__mem_help_cwrite_1";
4134 hAddr = &evh__mem_help_cwrite_1;
sewardjb4112022007-11-09 22:49:28 +00004135 argv = mkIRExprVec_1( addr );
4136 break;
4137 case 2:
sewardj23f12002009-07-24 08:45:08 +00004138 hName = "evh__mem_help_cwrite_2";
4139 hAddr = &evh__mem_help_cwrite_2;
sewardjb4112022007-11-09 22:49:28 +00004140 argv = mkIRExprVec_1( addr );
4141 break;
4142 case 4:
sewardj23f12002009-07-24 08:45:08 +00004143 hName = "evh__mem_help_cwrite_4";
4144 hAddr = &evh__mem_help_cwrite_4;
sewardjb4112022007-11-09 22:49:28 +00004145 argv = mkIRExprVec_1( addr );
4146 break;
4147 case 8:
sewardj23f12002009-07-24 08:45:08 +00004148 hName = "evh__mem_help_cwrite_8";
4149 hAddr = &evh__mem_help_cwrite_8;
sewardjb4112022007-11-09 22:49:28 +00004150 argv = mkIRExprVec_1( addr );
4151 break;
4152 default:
4153 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4154 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004155 hName = "evh__mem_help_cwrite_N";
4156 hAddr = &evh__mem_help_cwrite_N;
sewardjb4112022007-11-09 22:49:28 +00004157 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4158 break;
4159 }
4160 } else {
4161 switch (szB) {
4162 case 1:
sewardj23f12002009-07-24 08:45:08 +00004163 hName = "evh__mem_help_cread_1";
4164 hAddr = &evh__mem_help_cread_1;
sewardjb4112022007-11-09 22:49:28 +00004165 argv = mkIRExprVec_1( addr );
4166 break;
4167 case 2:
sewardj23f12002009-07-24 08:45:08 +00004168 hName = "evh__mem_help_cread_2";
4169 hAddr = &evh__mem_help_cread_2;
sewardjb4112022007-11-09 22:49:28 +00004170 argv = mkIRExprVec_1( addr );
4171 break;
4172 case 4:
sewardj23f12002009-07-24 08:45:08 +00004173 hName = "evh__mem_help_cread_4";
4174 hAddr = &evh__mem_help_cread_4;
sewardjb4112022007-11-09 22:49:28 +00004175 argv = mkIRExprVec_1( addr );
4176 break;
4177 case 8:
sewardj23f12002009-07-24 08:45:08 +00004178 hName = "evh__mem_help_cread_8";
4179 hAddr = &evh__mem_help_cread_8;
sewardjb4112022007-11-09 22:49:28 +00004180 argv = mkIRExprVec_1( addr );
4181 break;
4182 default:
4183 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4184 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004185 hName = "evh__mem_help_cread_N";
4186 hAddr = &evh__mem_help_cread_N;
sewardjb4112022007-11-09 22:49:28 +00004187 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4188 break;
4189 }
4190 }
4191
sewardjffce8152011-06-24 10:09:41 +00004192 /* Create the helper. */
sewardjb4112022007-11-09 22:49:28 +00004193 tl_assert(hName);
4194 tl_assert(hAddr);
4195 tl_assert(argv);
4196 di = unsafeIRDirty_0_N( regparms,
4197 hName, VG_(fnptr_to_fnentry)( hAddr ),
4198 argv );
sewardjffce8152011-06-24 10:09:41 +00004199
4200 if (! HG_(clo_check_stack_refs)) {
4201 /* We're ignoring memory references which are (obviously) to the
4202 stack. In fact just skip stack refs that are within 4 pages
4203 of SP (SP - the redzone, really), as that's simple, easy, and
4204 filters out most stack references. */
4205 /* Generate the guard condition: "(addr - (SP - RZ)) >u N", for
4206 some arbitrary N. If that is true then addr is outside the
4207 range (SP - RZ .. SP + N - RZ). If N is smallish (a few
4208 pages) then we can say addr is within a few pages of SP and
4209 so can't possibly be a heap access, and so can be skipped.
4210
4211 Note that the condition simplifies to
4212 (addr - SP + RZ) >u N
4213 which generates better code in x86/amd64 backends, but it does
4214 not unfortunately simplify to
4215 (addr - SP) >u (N - RZ)
4216 (would be beneficial because N - RZ is a constant) because
4217 wraparound arithmetic messes up the comparison. eg.
4218 20 >u 10 == True,
4219 but (20 - 15) >u (10 - 15) == 5 >u (MAXINT-5) == False.
4220 */
4221 IRTemp sp = newIRTemp(sbOut->tyenv, tyAddr);
4222 addStmtToIRSB( sbOut, assign(sp, IRExpr_Get(goff_sp, tyAddr)));
4223
4224 /* "addr - SP" */
4225 IRTemp addr_minus_sp = newIRTemp(sbOut->tyenv, tyAddr);
4226 addStmtToIRSB(
4227 sbOut,
4228 assign(addr_minus_sp,
4229 tyAddr == Ity_I32
4230 ? binop(Iop_Sub32, addr, mkexpr(sp))
4231 : binop(Iop_Sub64, addr, mkexpr(sp)))
4232 );
4233
4234 /* "addr - SP + RZ" */
4235 IRTemp diff = newIRTemp(sbOut->tyenv, tyAddr);
4236 addStmtToIRSB(
4237 sbOut,
4238 assign(diff,
4239 tyAddr == Ity_I32
4240 ? binop(Iop_Add32, mkexpr(addr_minus_sp), mkU32(rz_szB))
4241 : binop(Iop_Add64, mkexpr(addr_minus_sp), mkU64(rz_szB)))
4242 );
4243
4244 IRTemp guard = newIRTemp(sbOut->tyenv, Ity_I1);
4245 addStmtToIRSB(
4246 sbOut,
4247 assign(guard,
4248 tyAddr == Ity_I32
4249 ? binop(Iop_CmpLT32U, mkU32(THRESH), mkexpr(diff))
4250 : binop(Iop_CmpLT64U, mkU64(THRESH), mkexpr(diff)))
4251 );
4252 di->guard = mkexpr(guard);
4253 }
4254
4255 /* Add the helper. */
4256 addStmtToIRSB( sbOut, IRStmt_Dirty(di) );
sewardjb4112022007-11-09 22:49:28 +00004257}
4258
4259
sewardja0eee322009-07-31 08:46:35 +00004260/* Figure out if GA is a guest code address in the dynamic linker, and
4261 if so return True. Otherwise (and in case of any doubt) return
4262 False. (sidedly safe w/ False as the safe value) */
4263static Bool is_in_dynamic_linker_shared_object( Addr64 ga )
4264{
4265 DebugInfo* dinfo;
4266 const UChar* soname;
4267 if (0) return False;
4268
sewardje3f1e592009-07-31 09:41:29 +00004269 dinfo = VG_(find_DebugInfo)( (Addr)ga );
sewardja0eee322009-07-31 08:46:35 +00004270 if (!dinfo) return False;
4271
sewardje3f1e592009-07-31 09:41:29 +00004272 soname = VG_(DebugInfo_get_soname)(dinfo);
sewardja0eee322009-07-31 08:46:35 +00004273 tl_assert(soname);
4274 if (0) VG_(printf)("%s\n", soname);
4275
4276# if defined(VGO_linux)
sewardj651cfa42010-01-11 13:02:19 +00004277 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_3)) return True;
sewardja0eee322009-07-31 08:46:35 +00004278 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_2)) return True;
4279 if (VG_STREQ(soname, VG_U_LD_LINUX_X86_64_SO_2)) return True;
4280 if (VG_STREQ(soname, VG_U_LD64_SO_1)) return True;
4281 if (VG_STREQ(soname, VG_U_LD_SO_1)) return True;
4282# elif defined(VGO_darwin)
4283 if (VG_STREQ(soname, VG_U_DYLD)) return True;
4284# else
4285# error "Unsupported OS"
4286# endif
4287 return False;
4288}
4289
sewardjb4112022007-11-09 22:49:28 +00004290static
4291IRSB* hg_instrument ( VgCallbackClosure* closure,
4292 IRSB* bbIn,
4293 VexGuestLayout* layout,
4294 VexGuestExtents* vge,
4295 IRType gWordTy, IRType hWordTy )
4296{
sewardj1c0ce7a2009-07-01 08:10:49 +00004297 Int i;
4298 IRSB* bbOut;
4299 Addr64 cia; /* address of current insn */
4300 IRStmt* st;
sewardja0eee322009-07-31 08:46:35 +00004301 Bool inLDSO = False;
4302 Addr64 inLDSOmask4K = 1; /* mismatches on first check */
sewardjb4112022007-11-09 22:49:28 +00004303
sewardjffce8152011-06-24 10:09:41 +00004304 const Int goff_sp = layout->offset_SP;
4305
sewardjb4112022007-11-09 22:49:28 +00004306 if (gWordTy != hWordTy) {
4307 /* We don't currently support this case. */
4308 VG_(tool_panic)("host/guest word size mismatch");
4309 }
4310
sewardja0eee322009-07-31 08:46:35 +00004311 if (VKI_PAGE_SIZE < 4096 || VG_(log2)(VKI_PAGE_SIZE) == -1) {
4312 VG_(tool_panic)("implausible or too-small VKI_PAGE_SIZE");
4313 }
4314
sewardjb4112022007-11-09 22:49:28 +00004315 /* Set up BB */
4316 bbOut = emptyIRSB();
4317 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
4318 bbOut->next = deepCopyIRExpr(bbIn->next);
4319 bbOut->jumpkind = bbIn->jumpkind;
4320
4321 // Copy verbatim any IR preamble preceding the first IMark
4322 i = 0;
4323 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
4324 addStmtToIRSB( bbOut, bbIn->stmts[i] );
4325 i++;
4326 }
4327
sewardj1c0ce7a2009-07-01 08:10:49 +00004328 // Get the first statement, and initial cia from it
4329 tl_assert(bbIn->stmts_used > 0);
4330 tl_assert(i < bbIn->stmts_used);
4331 st = bbIn->stmts[i];
4332 tl_assert(Ist_IMark == st->tag);
4333 cia = st->Ist.IMark.addr;
4334 st = NULL;
4335
sewardjb4112022007-11-09 22:49:28 +00004336 for (/*use current i*/; i < bbIn->stmts_used; i++) {
sewardj1c0ce7a2009-07-01 08:10:49 +00004337 st = bbIn->stmts[i];
sewardjb4112022007-11-09 22:49:28 +00004338 tl_assert(st);
4339 tl_assert(isFlatIRStmt(st));
4340 switch (st->tag) {
4341 case Ist_NoOp:
4342 case Ist_AbiHint:
4343 case Ist_Put:
4344 case Ist_PutI:
sewardjb4112022007-11-09 22:49:28 +00004345 case Ist_Exit:
4346 /* None of these can contain any memory references. */
4347 break;
4348
sewardj1c0ce7a2009-07-01 08:10:49 +00004349 case Ist_IMark:
4350 /* no mem refs, but note the insn address. */
4351 cia = st->Ist.IMark.addr;
sewardja0eee322009-07-31 08:46:35 +00004352 /* Don't instrument the dynamic linker. It generates a
4353 lot of races which we just expensively suppress, so
4354 it's pointless.
4355
4356 Avoid flooding is_in_dynamic_linker_shared_object with
4357 requests by only checking at transitions between 4K
4358 pages. */
4359 if ((cia & ~(Addr64)0xFFF) != inLDSOmask4K) {
4360 if (0) VG_(printf)("NEW %#lx\n", (Addr)cia);
4361 inLDSOmask4K = cia & ~(Addr64)0xFFF;
4362 inLDSO = is_in_dynamic_linker_shared_object(cia);
4363 } else {
4364 if (0) VG_(printf)("old %#lx\n", (Addr)cia);
4365 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004366 break;
4367
sewardjb4112022007-11-09 22:49:28 +00004368 case Ist_MBE:
sewardjf98e1c02008-10-25 16:22:41 +00004369 switch (st->Ist.MBE.event) {
4370 case Imbe_Fence:
4371 break; /* not interesting */
sewardjf98e1c02008-10-25 16:22:41 +00004372 default:
4373 goto unhandled;
4374 }
sewardjb4112022007-11-09 22:49:28 +00004375 break;
4376
sewardj1c0ce7a2009-07-01 08:10:49 +00004377 case Ist_CAS: {
4378 /* Atomic read-modify-write cycle. Just pretend it's a
4379 read. */
4380 IRCAS* cas = st->Ist.CAS.details;
sewardj23f12002009-07-24 08:45:08 +00004381 Bool isDCAS = cas->oldHi != IRTemp_INVALID;
4382 if (isDCAS) {
4383 tl_assert(cas->expdHi);
4384 tl_assert(cas->dataHi);
4385 } else {
4386 tl_assert(!cas->expdHi);
4387 tl_assert(!cas->dataHi);
4388 }
4389 /* Just be boring about it. */
sewardja0eee322009-07-31 08:46:35 +00004390 if (!inLDSO) {
4391 instrument_mem_access(
4392 bbOut,
4393 cas->addr,
4394 (isDCAS ? 2 : 1)
4395 * sizeofIRType(typeOfIRExpr(bbIn->tyenv, cas->dataLo)),
4396 False/*!isStore*/,
sewardjffce8152011-06-24 10:09:41 +00004397 sizeofIRType(hWordTy), goff_sp
sewardja0eee322009-07-31 08:46:35 +00004398 );
4399 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004400 break;
4401 }
4402
sewardjdb5907d2009-11-26 17:20:21 +00004403 case Ist_LLSC: {
4404 /* We pretend store-conditionals don't exist, viz, ignore
4405 them. Whereas load-linked's are treated the same as
4406 normal loads. */
4407 IRType dataTy;
4408 if (st->Ist.LLSC.storedata == NULL) {
4409 /* LL */
4410 dataTy = typeOfIRTemp(bbIn->tyenv, st->Ist.LLSC.result);
sewardja0eee322009-07-31 08:46:35 +00004411 if (!inLDSO) {
sewardjdb5907d2009-11-26 17:20:21 +00004412 instrument_mem_access(
4413 bbOut,
4414 st->Ist.LLSC.addr,
4415 sizeofIRType(dataTy),
4416 False/*!isStore*/,
sewardjffce8152011-06-24 10:09:41 +00004417 sizeofIRType(hWordTy), goff_sp
sewardja0eee322009-07-31 08:46:35 +00004418 );
4419 }
sewardjdb5907d2009-11-26 17:20:21 +00004420 } else {
4421 /* SC */
4422 /*ignore */
4423 }
4424 break;
4425 }
4426
4427 case Ist_Store:
4428 /* It seems we pretend that store-conditionals don't
4429 exist, viz, just ignore them ... */
4430 if (!inLDSO) {
4431 instrument_mem_access(
4432 bbOut,
4433 st->Ist.Store.addr,
4434 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
4435 True/*isStore*/,
sewardjffce8152011-06-24 10:09:41 +00004436 sizeofIRType(hWordTy), goff_sp
sewardjdb5907d2009-11-26 17:20:21 +00004437 );
sewardj1c0ce7a2009-07-01 08:10:49 +00004438 }
njnb83caf22009-05-25 01:47:56 +00004439 break;
sewardjb4112022007-11-09 22:49:28 +00004440
4441 case Ist_WrTmp: {
sewardj1c0ce7a2009-07-01 08:10:49 +00004442 /* ... whereas here we don't care whether a load is a
4443 vanilla one or a load-linked. */
sewardjb4112022007-11-09 22:49:28 +00004444 IRExpr* data = st->Ist.WrTmp.data;
4445 if (data->tag == Iex_Load) {
sewardja0eee322009-07-31 08:46:35 +00004446 if (!inLDSO) {
4447 instrument_mem_access(
4448 bbOut,
4449 data->Iex.Load.addr,
4450 sizeofIRType(data->Iex.Load.ty),
4451 False/*!isStore*/,
sewardjffce8152011-06-24 10:09:41 +00004452 sizeofIRType(hWordTy), goff_sp
sewardja0eee322009-07-31 08:46:35 +00004453 );
4454 }
sewardjb4112022007-11-09 22:49:28 +00004455 }
4456 break;
4457 }
4458
4459 case Ist_Dirty: {
4460 Int dataSize;
4461 IRDirty* d = st->Ist.Dirty.details;
4462 if (d->mFx != Ifx_None) {
4463 /* This dirty helper accesses memory. Collect the
4464 details. */
4465 tl_assert(d->mAddr != NULL);
4466 tl_assert(d->mSize != 0);
4467 dataSize = d->mSize;
4468 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004469 if (!inLDSO) {
4470 instrument_mem_access(
4471 bbOut, d->mAddr, dataSize, False/*!isStore*/,
sewardjffce8152011-06-24 10:09:41 +00004472 sizeofIRType(hWordTy), goff_sp
sewardja0eee322009-07-31 08:46:35 +00004473 );
4474 }
sewardjb4112022007-11-09 22:49:28 +00004475 }
4476 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004477 if (!inLDSO) {
4478 instrument_mem_access(
4479 bbOut, d->mAddr, dataSize, True/*isStore*/,
sewardjffce8152011-06-24 10:09:41 +00004480 sizeofIRType(hWordTy), goff_sp
sewardja0eee322009-07-31 08:46:35 +00004481 );
4482 }
sewardjb4112022007-11-09 22:49:28 +00004483 }
4484 } else {
4485 tl_assert(d->mAddr == NULL);
4486 tl_assert(d->mSize == 0);
4487 }
4488 break;
4489 }
4490
4491 default:
sewardjf98e1c02008-10-25 16:22:41 +00004492 unhandled:
4493 ppIRStmt(st);
sewardjb4112022007-11-09 22:49:28 +00004494 tl_assert(0);
4495
4496 } /* switch (st->tag) */
4497
4498 addStmtToIRSB( bbOut, st );
4499 } /* iterate over bbIn->stmts */
4500
4501 return bbOut;
4502}
4503
sewardjffce8152011-06-24 10:09:41 +00004504#undef binop
4505#undef mkexpr
4506#undef mkU32
4507#undef mkU64
4508#undef assign
4509
sewardjb4112022007-11-09 22:49:28 +00004510
4511/*----------------------------------------------------------------*/
4512/*--- Client requests ---*/
4513/*----------------------------------------------------------------*/
4514
4515/* Sheesh. Yet another goddam finite map. */
4516static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
4517
4518static void map_pthread_t_to_Thread_INIT ( void ) {
4519 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
sewardjf98e1c02008-10-25 16:22:41 +00004520 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
4521 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00004522 tl_assert(map_pthread_t_to_Thread != NULL);
4523 }
4524}
4525
4526
4527static
4528Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
4529{
4530 if (!VG_IS_TOOL_USERREQ('H','G',args[0]))
4531 return False;
4532
4533 /* Anything that gets past the above check is one of ours, so we
4534 should be able to handle it. */
4535
4536 /* default, meaningless return value, unless otherwise set */
4537 *ret = 0;
4538
4539 switch (args[0]) {
4540
4541 /* --- --- User-visible client requests --- --- */
4542
4543 case VG_USERREQ__HG_CLEAN_MEMORY:
barta0b6b2c2008-07-07 06:49:24 +00004544 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%ld)\n",
sewardjb4112022007-11-09 22:49:28 +00004545 args[1], args[2]);
4546 /* Call die_mem to (expensively) tidy up properly, if there
sewardjf98e1c02008-10-25 16:22:41 +00004547 are any held locks etc in the area. Calling evh__die_mem
4548 and then evh__new_mem is a bit inefficient; probably just
4549 the latter would do. */
sewardjb4112022007-11-09 22:49:28 +00004550 if (args[2] > 0) { /* length */
4551 evh__die_mem(args[1], args[2]);
4552 /* and then set it to New */
4553 evh__new_mem(args[1], args[2]);
4554 }
4555 break;
4556
sewardjc8028ad2010-05-05 09:34:42 +00004557 case _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK: {
4558 Addr payload = 0;
4559 SizeT pszB = 0;
4560 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK(%#lx)\n",
4561 args[1]);
4562 if (HG_(mm_find_containing_block)(NULL, &payload, &pszB, args[1])) {
4563 if (pszB > 0) {
4564 evh__die_mem(payload, pszB);
4565 evh__new_mem(payload, pszB);
4566 }
4567 *ret = pszB;
4568 } else {
4569 *ret = (UWord)-1;
4570 }
4571 break;
4572 }
4573
sewardj406bac82010-03-03 23:03:40 +00004574 case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED:
4575 if (0) VG_(printf)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%ld)\n",
4576 args[1], args[2]);
4577 if (args[2] > 0) { /* length */
4578 evh__untrack_mem(args[1], args[2]);
4579 }
4580 break;
4581
4582 case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED:
4583 if (0) VG_(printf)("HG_ARANGE_MAKE_TRACKED(%#lx,%ld)\n",
4584 args[1], args[2]);
4585 if (args[2] > 0) { /* length */
4586 evh__new_mem(args[1], args[2]);
4587 }
4588 break;
4589
sewardjb4112022007-11-09 22:49:28 +00004590 /* --- --- Client requests for Helgrind's use only --- --- */
4591
4592 /* Some thread is telling us its pthread_t value. Record the
4593 binding between that and the associated Thread*, so we can
4594 later find the Thread* again when notified of a join by the
4595 thread. */
4596 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
4597 Thread* my_thr = NULL;
4598 if (0)
4599 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
4600 (void*)args[1]);
4601 map_pthread_t_to_Thread_INIT();
4602 my_thr = map_threads_maybe_lookup( tid );
4603 /* This assertion should hold because the map_threads (tid to
4604 Thread*) binding should have been made at the point of
4605 low-level creation of this thread, which should have
4606 happened prior to us getting this client request for it.
4607 That's because this client request is sent from
4608 client-world from the 'thread_wrapper' function, which
4609 only runs once the thread has been low-level created. */
4610 tl_assert(my_thr != NULL);
4611 /* So now we know that (pthread_t)args[1] is associated with
4612 (Thread*)my_thr. Note that down. */
4613 if (0)
4614 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
4615 (void*)args[1], (void*)my_thr );
sewardj896f6f92008-08-19 08:38:52 +00004616 VG_(addToFM)( map_pthread_t_to_Thread, (Word)args[1], (Word)my_thr );
sewardjb4112022007-11-09 22:49:28 +00004617 break;
4618 }
4619
4620 case _VG_USERREQ__HG_PTH_API_ERROR: {
4621 Thread* my_thr = NULL;
4622 map_pthread_t_to_Thread_INIT();
4623 my_thr = map_threads_maybe_lookup( tid );
4624 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
sewardjf98e1c02008-10-25 16:22:41 +00004625 HG_(record_error_PthAPIerror)(
4626 my_thr, (HChar*)args[1], (Word)args[2], (HChar*)args[3] );
sewardjb4112022007-11-09 22:49:28 +00004627 break;
4628 }
4629
4630 /* This thread (tid) has completed a join with the quitting
4631 thread whose pthread_t is in args[1]. */
4632 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
4633 Thread* thr_q = NULL; /* quitter Thread* */
4634 Bool found = False;
4635 if (0)
4636 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
4637 (void*)args[1]);
4638 map_pthread_t_to_Thread_INIT();
sewardj896f6f92008-08-19 08:38:52 +00004639 found = VG_(lookupFM)( map_pthread_t_to_Thread,
sewardjb5f29642007-11-16 12:02:43 +00004640 NULL, (Word*)&thr_q, (Word)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004641 /* Can this fail? It would mean that our pthread_join
4642 wrapper observed a successful join on args[1] yet that
4643 thread never existed (or at least, it never lodged an
4644 entry in the mapping (via SET_MY_PTHREAD_T)). Which
4645 sounds like a bug in the threads library. */
4646 // FIXME: get rid of this assertion; handle properly
4647 tl_assert(found);
4648 if (found) {
4649 if (0)
4650 VG_(printf)(".................... quitter Thread* = %p\n",
4651 thr_q);
4652 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
4653 }
4654 break;
4655 }
4656
4657 /* EXPOSITION only: by intercepting lock init events we can show
4658 the user where the lock was initialised, rather than only
4659 being able to show where it was first locked. Intercepting
4660 lock initialisations is not necessary for the basic operation
4661 of the race checker. */
4662 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
4663 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
4664 break;
4665
4666 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
4667 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1] );
4668 break;
4669
4670 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
4671 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
4672 break;
4673
4674 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
4675 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
4676 break;
4677
4678 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*, Word
4679 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
4680 break;
4681
4682 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*
4683 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
4684 break;
4685
4686 /* This thread is about to do pthread_cond_signal on the
4687 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
4688 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
4689 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
4690 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
4691 break;
4692
4693 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
4694 Returns a flag indicating whether or not the mutex is believed to be
4695 valid for this operation. */
4696 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
4697 Bool mutex_is_valid
4698 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
4699 (void*)args[2] );
4700 *ret = mutex_is_valid ? 1 : 0;
4701 break;
4702 }
4703
sewardjf98e1c02008-10-25 16:22:41 +00004704 /* cond=arg[1] */
4705 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
4706 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1] );
4707 break;
4708
sewardjb4112022007-11-09 22:49:28 +00004709 /* Thread successfully completed pthread_cond_wait, cond=arg[1],
4710 mutex=arg[2] */
4711 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
4712 evh__HG_PTHREAD_COND_WAIT_POST( tid,
4713 (void*)args[1], (void*)args[2] );
4714 break;
4715
4716 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
4717 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
4718 break;
4719
4720 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
4721 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
4722 break;
4723
sewardj789c3c52008-02-25 12:10:07 +00004724 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
sewardjb4112022007-11-09 22:49:28 +00004725 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
sewardj789c3c52008-02-25 12:10:07 +00004726 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
4727 args[2], args[3] );
sewardjb4112022007-11-09 22:49:28 +00004728 break;
4729
4730 /* rwlock=arg[1], isW=arg[2] */
4731 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
4732 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
4733 break;
4734
4735 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
4736 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
4737 break;
4738
4739 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
4740 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
4741 break;
4742
sewardj11e352f2007-11-30 11:11:02 +00004743 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
4744 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
sewardjb4112022007-11-09 22:49:28 +00004745 break;
4746
sewardj11e352f2007-11-30 11:11:02 +00004747 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
4748 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004749 break;
4750
sewardj11e352f2007-11-30 11:11:02 +00004751 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
4752 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
4753 break;
4754
4755 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t* */
4756 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004757 break;
4758
sewardj9f569b72008-11-13 13:33:09 +00004759 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
sewardj406bac82010-03-03 23:03:40 +00004760 /* pth_bar_t*, ulong count, ulong resizable */
4761 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1],
4762 args[2], args[3] );
4763 break;
4764
4765 case _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE:
4766 /* pth_bar_t*, ulong newcount */
4767 evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( tid, (void*)args[1],
4768 args[2] );
sewardj9f569b72008-11-13 13:33:09 +00004769 break;
4770
4771 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
4772 /* pth_bar_t* */
4773 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
4774 break;
4775
4776 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
4777 /* pth_bar_t* */
4778 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
4779 break;
sewardjb4112022007-11-09 22:49:28 +00004780
sewardj5a644da2009-08-11 10:35:58 +00004781 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE:
4782 /* pth_spinlock_t* */
4783 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid, (void*)args[1] );
4784 break;
4785
4786 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST:
4787 /* pth_spinlock_t* */
4788 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid, (void*)args[1] );
4789 break;
4790
4791 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE:
4792 /* pth_spinlock_t*, Word */
4793 evh__HG_PTHREAD_SPIN_LOCK_PRE( tid, (void*)args[1], args[2] );
4794 break;
4795
4796 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST:
4797 /* pth_spinlock_t* */
4798 evh__HG_PTHREAD_SPIN_LOCK_POST( tid, (void*)args[1] );
4799 break;
4800
4801 case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE:
4802 /* pth_spinlock_t* */
4803 evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid, (void*)args[1] );
4804 break;
4805
sewardjed2e72e2009-08-14 11:08:24 +00004806 case _VG_USERREQ__HG_CLIENTREQ_UNIMP: {
4807 /* char* who */
4808 HChar* who = (HChar*)args[1];
4809 HChar buf[50 + 50];
4810 Thread* thr = map_threads_maybe_lookup( tid );
4811 tl_assert( thr ); /* I must be mapped */
4812 tl_assert( who );
4813 tl_assert( VG_(strlen)(who) <= 50 );
4814 VG_(sprintf)(buf, "Unimplemented client request macro \"%s\"", who );
4815 /* record_error_Misc strdup's buf, so this is safe: */
4816 HG_(record_error_Misc)( thr, buf );
4817 break;
4818 }
4819
4820 case _VG_USERREQ__HG_USERSO_SEND_PRE:
4821 /* UWord arbitrary-SO-tag */
4822 evh__HG_USERSO_SEND_PRE( tid, args[1] );
4823 break;
4824
4825 case _VG_USERREQ__HG_USERSO_RECV_POST:
4826 /* UWord arbitrary-SO-tag */
4827 evh__HG_USERSO_RECV_POST( tid, args[1] );
4828 break;
4829
sewardj6015d0e2011-03-11 19:10:48 +00004830 case _VG_USERREQ__HG_USERSO_FORGET_ALL:
4831 /* UWord arbitrary-SO-tag */
4832 evh__HG_USERSO_FORGET_ALL( tid, args[1] );
4833 break;
4834
sewardjb4112022007-11-09 22:49:28 +00004835 default:
4836 /* Unhandled Helgrind client request! */
sewardjf98e1c02008-10-25 16:22:41 +00004837 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
4838 args[0]);
sewardjb4112022007-11-09 22:49:28 +00004839 }
4840
4841 return True;
4842}
4843
4844
4845/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +00004846/*--- Setup ---*/
4847/*----------------------------------------------------------------*/
4848
4849static Bool hg_process_cmd_line_option ( Char* arg )
4850{
njn83df0b62009-02-25 01:01:05 +00004851 Char* tmp_str;
sewardjb4112022007-11-09 22:49:28 +00004852
njn83df0b62009-02-25 01:01:05 +00004853 if VG_BOOL_CLO(arg, "--track-lockorders",
4854 HG_(clo_track_lockorders)) {}
4855 else if VG_BOOL_CLO(arg, "--cmp-race-err-addrs",
4856 HG_(clo_cmp_race_err_addrs)) {}
sewardj23f12002009-07-24 08:45:08 +00004857
4858 else if VG_XACT_CLO(arg, "--history-level=none",
4859 HG_(clo_history_level), 0);
sewardjf3861392009-08-02 10:16:03 +00004860 else if VG_XACT_CLO(arg, "--history-level=approx",
sewardj23f12002009-07-24 08:45:08 +00004861 HG_(clo_history_level), 1);
4862 else if VG_XACT_CLO(arg, "--history-level=full",
4863 HG_(clo_history_level), 2);
sewardj849b0ed2008-12-21 10:43:10 +00004864
sewardjf585e482009-08-16 22:52:29 +00004865 /* If you change the 10k/30mill limits, remember to also change
sewardj849b0ed2008-12-21 10:43:10 +00004866 them in assertions at the top of event_map_maybe_GC. */
njn83df0b62009-02-25 01:01:05 +00004867 else if VG_BINT_CLO(arg, "--conflict-cache-size",
sewardjf585e482009-08-16 22:52:29 +00004868 HG_(clo_conflict_cache_size), 10*1000, 30*1000*1000) {}
sewardjb4112022007-11-09 22:49:28 +00004869
sewardj11e352f2007-11-30 11:11:02 +00004870 /* "stuvwx" --> stuvwx (binary) */
njn83df0b62009-02-25 01:01:05 +00004871 else if VG_STR_CLO(arg, "--hg-sanity-flags", tmp_str) {
sewardjb4112022007-11-09 22:49:28 +00004872 Int j;
sewardjb4112022007-11-09 22:49:28 +00004873
njn83df0b62009-02-25 01:01:05 +00004874 if (6 != VG_(strlen)(tmp_str)) {
sewardjb4112022007-11-09 22:49:28 +00004875 VG_(message)(Vg_UserMsg,
sewardj24118492009-07-15 14:50:02 +00004876 "--hg-sanity-flags argument must have 6 digits\n");
sewardjb4112022007-11-09 22:49:28 +00004877 return False;
4878 }
sewardj11e352f2007-11-30 11:11:02 +00004879 for (j = 0; j < 6; j++) {
njn83df0b62009-02-25 01:01:05 +00004880 if ('0' == tmp_str[j]) { /* do nothing */ }
4881 else if ('1' == tmp_str[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
sewardjb4112022007-11-09 22:49:28 +00004882 else {
sewardj11e352f2007-11-30 11:11:02 +00004883 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
sewardj24118492009-07-15 14:50:02 +00004884 "only contain 0s and 1s\n");
sewardjb4112022007-11-09 22:49:28 +00004885 return False;
4886 }
4887 }
sewardjf98e1c02008-10-25 16:22:41 +00004888 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
sewardjb4112022007-11-09 22:49:28 +00004889 }
4890
sewardj622fe492011-03-11 21:06:59 +00004891 else if VG_BOOL_CLO(arg, "--free-is-write",
4892 HG_(clo_free_is_write)) {}
sewardjffce8152011-06-24 10:09:41 +00004893
4894 else if VG_XACT_CLO(arg, "--vts-pruning=never",
4895 HG_(clo_vts_pruning), 0);
4896 else if VG_XACT_CLO(arg, "--vts-pruning=auto",
4897 HG_(clo_vts_pruning), 1);
4898 else if VG_XACT_CLO(arg, "--vts-pruning=always",
4899 HG_(clo_vts_pruning), 2);
4900
4901 else if VG_BOOL_CLO(arg, "--check-stack-refs",
4902 HG_(clo_check_stack_refs)) {}
4903
sewardjb4112022007-11-09 22:49:28 +00004904 else
4905 return VG_(replacement_malloc_process_cmd_line_option)(arg);
4906
4907 return True;
4908}
4909
4910static void hg_print_usage ( void )
4911{
4912 VG_(printf)(
sewardj622fe492011-03-11 21:06:59 +00004913" --free-is-write=no|yes treat heap frees as writes [no]\n"
sewardj849b0ed2008-12-21 10:43:10 +00004914" --track-lockorders=no|yes show lock ordering errors? [yes]\n"
njnf6e8ca92009-08-07 02:18:00 +00004915" --history-level=none|approx|full [full]\n"
sewardjf3861392009-08-02 10:16:03 +00004916" full: show both stack traces for a data race (can be very slow)\n"
4917" approx: full trace for one thread, approx for the other (faster)\n"
4918" none: only show trace for one thread in a race (fastest)\n"
sewardj23f12002009-07-24 08:45:08 +00004919" --conflict-cache-size=N size of 'full' history cache [1000000]\n"
sewardjffce8152011-06-24 10:09:41 +00004920" --check-stack-refs=no|yes race-check reads and writes on the\n"
4921" main stack and thread stacks? [yes]\n"
sewardjb4112022007-11-09 22:49:28 +00004922 );
sewardjb4112022007-11-09 22:49:28 +00004923}
4924
4925static void hg_print_debug_usage ( void )
4926{
sewardjb4112022007-11-09 22:49:28 +00004927 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
4928 "race errors significant? [no]\n");
sewardj849b0ed2008-12-21 10:43:10 +00004929 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
sewardj11e352f2007-11-30 11:11:02 +00004930 " at events (X = 0|1) [000000]\n");
4931 VG_(printf)(" --hg-sanity-flags values:\n");
sewardj11e352f2007-11-30 11:11:02 +00004932 VG_(printf)(" 010000 after changes to "
sewardjb4112022007-11-09 22:49:28 +00004933 "lock-order-acquisition-graph\n");
sewardj11e352f2007-11-30 11:11:02 +00004934 VG_(printf)(" 001000 at memory accesses (NB: not currently used)\n");
4935 VG_(printf)(" 000100 at mem permission setting for "
sewardjb4112022007-11-09 22:49:28 +00004936 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
sewardj11e352f2007-11-30 11:11:02 +00004937 VG_(printf)(" 000010 at lock/unlock events\n");
4938 VG_(printf)(" 000001 at thread create/join events\n");
sewardjffce8152011-06-24 10:09:41 +00004939 VG_(printf)(
4940" --vts-pruning=never|auto|always [auto]\n"
4941" never: is never done (may cause big space leaks in Helgrind)\n"
4942" auto: done just often enough to keep space usage under control\n"
4943" always: done after every VTS GC (mostly just a big time waster)\n"
4944 );
sewardjb4112022007-11-09 22:49:28 +00004945}
4946
sewardjb4112022007-11-09 22:49:28 +00004947static void hg_fini ( Int exitcode )
4948{
sewardj2d9e8742009-08-07 15:46:56 +00004949 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
4950 VG_(message)(Vg_UserMsg,
4951 "For counts of detected and suppressed errors, "
4952 "rerun with: -v\n");
4953 }
4954
4955 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)
4956 && HG_(clo_history_level) >= 2) {
4957 VG_(umsg)(
4958 "Use --history-level=approx or =none to gain increased speed, at\n" );
4959 VG_(umsg)(
4960 "the cost of reduced accuracy of conflicting-access information\n");
4961 }
4962
sewardjb4112022007-11-09 22:49:28 +00004963 if (SHOW_DATA_STRUCTURES)
4964 pp_everything( PP_ALL, "SK_(fini)" );
sewardjf98e1c02008-10-25 16:22:41 +00004965 if (HG_(clo_sanity_flags))
sewardjb4112022007-11-09 22:49:28 +00004966 all__sanity_check("SK_(fini)");
4967
sewardj2d9e8742009-08-07 15:46:56 +00004968 if (VG_(clo_stats)) {
sewardjb4112022007-11-09 22:49:28 +00004969
4970 if (1) {
4971 VG_(printf)("\n");
sewardjb4112022007-11-09 22:49:28 +00004972 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
sewardjc1fb9d22011-02-28 09:03:44 +00004973 if (HG_(clo_track_lockorders)) {
4974 VG_(printf)("\n");
4975 HG_(ppWSUstats)( univ_laog, "univ_laog" );
4976 }
sewardjb4112022007-11-09 22:49:28 +00004977 }
4978
sewardjf98e1c02008-10-25 16:22:41 +00004979 //zz VG_(printf)("\n");
4980 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
4981 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
4982 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
4983 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
4984 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
4985 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
4986 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
4987 //zz stats__hbefore_stk_hwm);
4988 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
4989 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
sewardjb4112022007-11-09 22:49:28 +00004990
4991 VG_(printf)("\n");
barta0b6b2c2008-07-07 06:49:24 +00004992 VG_(printf)(" locksets: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00004993 (Int)HG_(cardinalityWSU)( univ_lsets ));
sewardjc1fb9d22011-02-28 09:03:44 +00004994 if (HG_(clo_track_lockorders)) {
4995 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
4996 (Int)HG_(cardinalityWSU)( univ_laog ));
4997 }
sewardjb4112022007-11-09 22:49:28 +00004998
sewardjd52392d2008-11-08 20:36:26 +00004999 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
5000 // stats__ga_LL_adds,
5001 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
sewardjb4112022007-11-09 22:49:28 +00005002
sewardjf98e1c02008-10-25 16:22:41 +00005003 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
5004 HG_(stats__LockN_to_P_queries),
5005 HG_(stats__LockN_to_P_get_map_size)() );
sewardjb4112022007-11-09 22:49:28 +00005006
sewardjf98e1c02008-10-25 16:22:41 +00005007 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
5008 HG_(stats__string_table_queries),
5009 HG_(stats__string_table_get_map_size)() );
sewardjc1fb9d22011-02-28 09:03:44 +00005010 if (HG_(clo_track_lockorders)) {
5011 VG_(printf)(" LAOG: %'8d map size\n",
5012 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
5013 VG_(printf)(" LAOG exposition: %'8d map size\n",
5014 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
5015 }
5016
barta0b6b2c2008-07-07 06:49:24 +00005017 VG_(printf)(" locks: %'8lu acquires, "
5018 "%'lu releases\n",
sewardjb4112022007-11-09 22:49:28 +00005019 stats__lockN_acquires,
5020 stats__lockN_releases
5021 );
barta0b6b2c2008-07-07 06:49:24 +00005022 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
sewardjb4112022007-11-09 22:49:28 +00005023
5024 VG_(printf)("\n");
sewardjf98e1c02008-10-25 16:22:41 +00005025 libhb_shutdown(True);
sewardjb4112022007-11-09 22:49:28 +00005026 }
5027}
5028
sewardjf98e1c02008-10-25 16:22:41 +00005029/* FIXME: move these somewhere sane */
5030
5031static
5032void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
5033{
5034 Thread* thr;
5035 ThreadId tid;
5036 UWord nActual;
5037 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00005038 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00005039 tl_assert(thr);
5040 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
5041 nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
5042 NULL, NULL, 0 );
5043 tl_assert(nActual <= nRequest);
5044 for (; nActual < nRequest; nActual++)
5045 frames[nActual] = 0;
5046}
5047
5048static
sewardj23f12002009-07-24 08:45:08 +00005049ExeContext* for_libhb__get_EC ( Thr* hbt )
sewardjf98e1c02008-10-25 16:22:41 +00005050{
5051 Thread* thr;
5052 ThreadId tid;
5053 ExeContext* ec;
5054 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00005055 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00005056 tl_assert(thr);
5057 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
sewardj23f12002009-07-24 08:45:08 +00005058 /* this will assert if tid is invalid */
sewardjf98e1c02008-10-25 16:22:41 +00005059 ec = VG_(record_ExeContext)( tid, 0 );
sewardjd52392d2008-11-08 20:36:26 +00005060 return ec;
sewardjf98e1c02008-10-25 16:22:41 +00005061}
5062
5063
sewardjc1fb9d22011-02-28 09:03:44 +00005064static void hg_post_clo_init ( void )
sewardjb4112022007-11-09 22:49:28 +00005065{
sewardjf98e1c02008-10-25 16:22:41 +00005066 Thr* hbthr_root;
njnf76d27a2009-05-28 01:53:07 +00005067
sewardjc1fb9d22011-02-28 09:03:44 +00005068 /////////////////////////////////////////////
5069 hbthr_root = libhb_init( for_libhb__get_stacktrace,
5070 for_libhb__get_EC );
5071 /////////////////////////////////////////////
5072
5073
5074 if (HG_(clo_track_lockorders))
5075 laog__init();
5076
5077 initialise_data_structures(hbthr_root);
5078}
5079
5080static void hg_pre_clo_init ( void )
5081{
sewardjb4112022007-11-09 22:49:28 +00005082 VG_(details_name) ("Helgrind");
5083 VG_(details_version) (NULL);
5084 VG_(details_description) ("a thread error detector");
5085 VG_(details_copyright_author)(
sewardj9eecbbb2010-05-03 21:37:12 +00005086 "Copyright (C) 2007-2010, and GNU GPL'd, by OpenWorks LLP et al.");
sewardjb4112022007-11-09 22:49:28 +00005087 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj9c08c0f2011-03-10 15:01:14 +00005088 VG_(details_avg_translation_sizeB) ( 320 );
sewardjb4112022007-11-09 22:49:28 +00005089
5090 VG_(basic_tool_funcs) (hg_post_clo_init,
5091 hg_instrument,
5092 hg_fini);
5093
5094 VG_(needs_core_errors) ();
sewardjf98e1c02008-10-25 16:22:41 +00005095 VG_(needs_tool_errors) (HG_(eq_Error),
sewardj24118492009-07-15 14:50:02 +00005096 HG_(before_pp_Error),
sewardjf98e1c02008-10-25 16:22:41 +00005097 HG_(pp_Error),
sewardjb4112022007-11-09 22:49:28 +00005098 False,/*show TIDs for errors*/
sewardjf98e1c02008-10-25 16:22:41 +00005099 HG_(update_extra),
5100 HG_(recognised_suppression),
5101 HG_(read_extra_suppression_info),
5102 HG_(error_matches_suppression),
5103 HG_(get_error_name),
sewardj588adef2009-08-15 22:41:51 +00005104 HG_(get_extra_suppression_info));
sewardjb4112022007-11-09 22:49:28 +00005105
sewardj24118492009-07-15 14:50:02 +00005106 VG_(needs_xml_output) ();
5107
sewardjb4112022007-11-09 22:49:28 +00005108 VG_(needs_command_line_options)(hg_process_cmd_line_option,
5109 hg_print_usage,
5110 hg_print_debug_usage);
5111 VG_(needs_client_requests) (hg_handle_client_request);
5112
5113 // FIXME?
5114 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
5115 // hg_expensive_sanity_check);
5116
5117 VG_(needs_malloc_replacement) (hg_cli__malloc,
5118 hg_cli____builtin_new,
5119 hg_cli____builtin_vec_new,
5120 hg_cli__memalign,
5121 hg_cli__calloc,
5122 hg_cli__free,
5123 hg_cli____builtin_delete,
5124 hg_cli____builtin_vec_delete,
5125 hg_cli__realloc,
njn8b140de2009-02-17 04:31:18 +00005126 hg_cli_malloc_usable_size,
sewardjb4112022007-11-09 22:49:28 +00005127 HG_CLI__MALLOC_REDZONE_SZB );
5128
sewardj849b0ed2008-12-21 10:43:10 +00005129 /* 21 Dec 08: disabled this; it mostly causes H to start more
5130 slowly and use significantly more memory, without very often
5131 providing useful results. The user can request to load this
5132 information manually with --read-var-info=yes. */
5133 if (0) VG_(needs_var_info)(); /* optional */
sewardjb4112022007-11-09 22:49:28 +00005134
5135 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
sewardj7cf4e6b2008-05-01 20:24:26 +00005136 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
5137 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
sewardjb4112022007-11-09 22:49:28 +00005138 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
sewardj1f77fec2010-04-12 19:51:04 +00005139 VG_(track_new_mem_stack) ( evh__new_mem_stack );
sewardjb4112022007-11-09 22:49:28 +00005140
5141 // FIXME: surely this isn't thread-aware
sewardj23f12002009-07-24 08:45:08 +00005142 VG_(track_copy_mem_remap) ( evh__copy_mem );
sewardjb4112022007-11-09 22:49:28 +00005143
5144 VG_(track_change_mem_mprotect) ( evh__set_perms );
5145
5146 VG_(track_die_mem_stack_signal)( evh__die_mem );
sewardjfd35d492011-03-17 19:39:55 +00005147 VG_(track_die_mem_brk) ( evh__die_mem_munmap );
5148 VG_(track_die_mem_munmap) ( evh__die_mem_munmap );
sewardjb4112022007-11-09 22:49:28 +00005149 VG_(track_die_mem_stack) ( evh__die_mem );
5150
5151 // FIXME: what is this for?
5152 VG_(track_ban_mem_stack) (NULL);
5153
5154 VG_(track_pre_mem_read) ( evh__pre_mem_read );
5155 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
5156 VG_(track_pre_mem_write) ( evh__pre_mem_write );
5157 VG_(track_post_mem_write) (NULL);
5158
5159 /////////////////
5160
5161 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
5162 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
5163
5164 VG_(track_start_client_code)( evh__start_client_code );
5165 VG_(track_stop_client_code)( evh__stop_client_code );
5166
sewardjb4112022007-11-09 22:49:28 +00005167 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
5168 as described in comments at the top of pub_tool_hashtable.h, are
5169 met. Blargh. */
5170 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
5171 tl_assert( sizeof(UWord) == sizeof(Addr) );
5172 hg_mallocmeta_table
5173 = VG_(HT_construct)( "hg_malloc_metadata_table" );
5174
sewardj61bc2c52011-02-09 10:34:00 +00005175 // add a callback to clean up on (threaded) fork.
5176 VG_(atfork)(NULL/*pre*/, NULL/*parent*/, evh__atfork_child/*child*/);
sewardjb4112022007-11-09 22:49:28 +00005177}
5178
5179VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
5180
5181/*--------------------------------------------------------------------*/
5182/*--- end hg_main.c ---*/
5183/*--------------------------------------------------------------------*/