blob: 81fbea1a4b520e90309acb19666554d1e79960ab [file] [log] [blame]
sewardjb4112022007-11-09 22:49:28 +00001
2/*--------------------------------------------------------------------*/
3/*--- Helgrind: a Valgrind tool for detecting errors ---*/
4/*--- in threaded programs. hg_main.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
sewardjec062e82011-10-23 07:32:08 +000011 Copyright (C) 2007-2011 OpenWorks LLP
sewardjb4112022007-11-09 22:49:28 +000012 info@open-works.co.uk
13
sewardjec062e82011-10-23 07:32:08 +000014 Copyright (C) 2007-2011 Apple, Inc.
njnf76d27a2009-05-28 01:53:07 +000015
sewardjb4112022007-11-09 22:49:28 +000016 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
20
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
25
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, write to the Free Software
28 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 02111-1307, USA.
30
31 The GNU General Public License is contained in the file COPYING.
32
33 Neither the names of the U.S. Department of Energy nor the
34 University of California nor the names of its contributors may be
35 used to endorse or promote products derived from this software
36 without prior written permission.
37*/
38
39#include "pub_tool_basics.h"
sewardjb4112022007-11-09 22:49:28 +000040#include "pub_tool_libcassert.h"
41#include "pub_tool_libcbase.h"
42#include "pub_tool_libcprint.h"
sewardjb4112022007-11-09 22:49:28 +000043#include "pub_tool_threadstate.h"
44#include "pub_tool_tooliface.h"
45#include "pub_tool_hashtable.h"
46#include "pub_tool_replacemalloc.h"
47#include "pub_tool_machine.h"
48#include "pub_tool_options.h"
49#include "pub_tool_xarray.h"
50#include "pub_tool_stacktrace.h"
sewardj896f6f92008-08-19 08:38:52 +000051#include "pub_tool_wordfm.h"
sewardja0eee322009-07-31 08:46:35 +000052#include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
53#include "pub_tool_redir.h" // sonames for the dynamic linkers
54#include "pub_tool_vki.h" // VKI_PAGE_SIZE
sewardj61bc2c52011-02-09 10:34:00 +000055#include "pub_tool_libcproc.h" // VG_(atfork)
sewardj234e5582011-02-09 12:47:23 +000056#include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client)
sewardjb4112022007-11-09 22:49:28 +000057
sewardjf98e1c02008-10-25 16:22:41 +000058#include "hg_basics.h"
59#include "hg_wordset.h"
60#include "hg_lock_n_thread.h"
61#include "hg_errors.h"
62
63#include "libhb.h"
64
sewardjb4112022007-11-09 22:49:28 +000065#include "helgrind.h"
66
sewardjf98e1c02008-10-25 16:22:41 +000067
68// FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
69
70// FIXME: when client destroys a lock or a CV, remove these
71// from our mappings, so that the associated SO can be freed up
sewardjb4112022007-11-09 22:49:28 +000072
73/*----------------------------------------------------------------*/
74/*--- ---*/
75/*----------------------------------------------------------------*/
76
sewardj11e352f2007-11-30 11:11:02 +000077/* Note this needs to be compiled with -fno-strict-aliasing, since it
78 contains a whole bunch of calls to lookupFM etc which cast between
79 Word and pointer types. gcc rightly complains this breaks ANSI C
80 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
81 worthwhile performance benefits over -O.
sewardjc17be792007-11-10 22:50:13 +000082*/
sewardjb4112022007-11-09 22:49:28 +000083
84// FIXME what is supposed to happen to locks in memory which
85// is relocated as a result of client realloc?
86
sewardjb4112022007-11-09 22:49:28 +000087// FIXME put referencing ThreadId into Thread and get
88// rid of the slow reverse mapping function.
89
90// FIXME accesses to NoAccess areas: change state to Excl?
91
92// FIXME report errors for accesses of NoAccess memory?
93
94// FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
95// the thread still holds the lock.
96
97/* ------------ Debug/trace options ------------ */
98
sewardjb4112022007-11-09 22:49:28 +000099// 0 for silent, 1 for some stuff, 2 for lots of stuff
100#define SHOW_EVENTS 0
101
sewardjb4112022007-11-09 22:49:28 +0000102
103static void all__sanity_check ( Char* who ); /* fwds */
104
philipped99c26a2012-07-31 22:17:28 +0000105#define HG_CLI__DEFAULT_MALLOC_REDZONE_SZB 16 /* let's say */
sewardjb4112022007-11-09 22:49:28 +0000106
107// 0 for none, 1 for dump at end of run
108#define SHOW_DATA_STRUCTURES 0
109
110
sewardjb4112022007-11-09 22:49:28 +0000111/* ------------ Misc comments ------------ */
112
113// FIXME: don't hardwire initial entries for root thread.
114// Instead, let the pre_thread_ll_create handler do this.
115
sewardjb4112022007-11-09 22:49:28 +0000116
117/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000118/*--- Primary data structures ---*/
sewardjb4112022007-11-09 22:49:28 +0000119/*----------------------------------------------------------------*/
120
sewardjb4112022007-11-09 22:49:28 +0000121/* Admin linked list of Threads */
122static Thread* admin_threads = NULL;
sewardjffce8152011-06-24 10:09:41 +0000123Thread* get_admin_threads ( void ) { return admin_threads; }
sewardjb4112022007-11-09 22:49:28 +0000124
sewardj1d7c3322011-02-28 09:22:51 +0000125/* Admin double linked list of Locks */
126/* We need a double linked list to properly and efficiently
127 handle del_LockN. */
sewardjb4112022007-11-09 22:49:28 +0000128static Lock* admin_locks = NULL;
129
sewardjb4112022007-11-09 22:49:28 +0000130/* Mapping table for core ThreadIds to Thread* */
131static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
132
sewardjb4112022007-11-09 22:49:28 +0000133/* Mapping table for lock guest addresses to Lock* */
134static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
135
sewardj0f64c9e2011-03-10 17:40:22 +0000136/* The word-set universes for lock sets. */
sewardjb4112022007-11-09 22:49:28 +0000137static WordSetU* univ_lsets = NULL; /* sets of Lock* */
138static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
sewardj866c80c2011-10-22 19:29:51 +0000139static Int next_gc_univ_laog = 1;
140/* univ_laog will be garbaged collected when the nr of element in univ_laog is
141 >= next_gc_univ_laog. */
sewardjb4112022007-11-09 22:49:28 +0000142
sewardjffce8152011-06-24 10:09:41 +0000143/* Allow libhb to get at the universe of locksets stored
144 here. Sigh. */
145WordSetU* HG_(get_univ_lsets) ( void ) { return univ_lsets; }
146
147/* Allow libhb to get at the list of locks stored here. Ditto
148 sigh. */
149Lock* HG_(get_admin_locks) ( void ) { return admin_locks; }
150
sewardjb4112022007-11-09 22:49:28 +0000151
152/*----------------------------------------------------------------*/
153/*--- Simple helpers for the data structures ---*/
154/*----------------------------------------------------------------*/
155
156static UWord stats__lockN_acquires = 0;
157static UWord stats__lockN_releases = 0;
158
sewardjf98e1c02008-10-25 16:22:41 +0000159static
160ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
sewardjb4112022007-11-09 22:49:28 +0000161
162/* --------- Constructors --------- */
163
sewardjf98e1c02008-10-25 16:22:41 +0000164static Thread* mk_Thread ( Thr* hbthr ) {
sewardjb4112022007-11-09 22:49:28 +0000165 static Int indx = 1;
sewardjf98e1c02008-10-25 16:22:41 +0000166 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
sewardjb4112022007-11-09 22:49:28 +0000167 thread->locksetA = HG_(emptyWS)( univ_lsets );
168 thread->locksetW = HG_(emptyWS)( univ_lsets );
sewardjb4112022007-11-09 22:49:28 +0000169 thread->magic = Thread_MAGIC;
sewardjf98e1c02008-10-25 16:22:41 +0000170 thread->hbthr = hbthr;
171 thread->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +0000172 thread->created_at = NULL;
173 thread->announced = False;
174 thread->errmsg_index = indx++;
175 thread->admin = admin_threads;
176 admin_threads = thread;
177 return thread;
178}
sewardjf98e1c02008-10-25 16:22:41 +0000179
sewardjb4112022007-11-09 22:49:28 +0000180// Make a new lock which is unlocked (hence ownerless)
sewardj1d7c3322011-02-28 09:22:51 +0000181// and insert the new lock in admin_locks double linked list.
sewardjb4112022007-11-09 22:49:28 +0000182static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
183 static ULong unique = 0;
sewardjf98e1c02008-10-25 16:22:41 +0000184 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
sewardj0f64c9e2011-03-10 17:40:22 +0000185 /* begin: add to double linked list */
sewardj1d7c3322011-02-28 09:22:51 +0000186 if (admin_locks)
187 admin_locks->admin_prev = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000188 lock->admin_next = admin_locks;
189 lock->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000190 admin_locks = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000191 /* end: add */
sewardjb4112022007-11-09 22:49:28 +0000192 lock->unique = unique++;
193 lock->magic = LockN_MAGIC;
194 lock->appeared_at = NULL;
195 lock->acquired_at = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000196 lock->hbso = libhb_so_alloc();
sewardjb4112022007-11-09 22:49:28 +0000197 lock->guestaddr = guestaddr;
198 lock->kind = kind;
199 lock->heldW = False;
200 lock->heldBy = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000201 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +0000202 return lock;
203}
sewardjb4112022007-11-09 22:49:28 +0000204
205/* Release storage for a Lock. Also release storage in .heldBy, if
sewardj1d7c3322011-02-28 09:22:51 +0000206 any. Removes from admin_locks double linked list. */
sewardjb4112022007-11-09 22:49:28 +0000207static void del_LockN ( Lock* lk )
208{
sewardjf98e1c02008-10-25 16:22:41 +0000209 tl_assert(HG_(is_sane_LockN)(lk));
210 tl_assert(lk->hbso);
211 libhb_so_dealloc(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +0000212 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000213 VG_(deleteBag)( lk->heldBy );
sewardj0f64c9e2011-03-10 17:40:22 +0000214 /* begin: del lock from double linked list */
215 if (lk == admin_locks) {
216 tl_assert(lk->admin_prev == NULL);
217 if (lk->admin_next)
218 lk->admin_next->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000219 admin_locks = lk->admin_next;
sewardj1d7c3322011-02-28 09:22:51 +0000220 }
221 else {
sewardj0f64c9e2011-03-10 17:40:22 +0000222 tl_assert(lk->admin_prev != NULL);
sewardj1d7c3322011-02-28 09:22:51 +0000223 lk->admin_prev->admin_next = lk->admin_next;
sewardj0f64c9e2011-03-10 17:40:22 +0000224 if (lk->admin_next)
225 lk->admin_next->admin_prev = lk->admin_prev;
sewardj1d7c3322011-02-28 09:22:51 +0000226 }
sewardj0f64c9e2011-03-10 17:40:22 +0000227 /* end: del */
sewardjb4112022007-11-09 22:49:28 +0000228 VG_(memset)(lk, 0xAA, sizeof(*lk));
sewardjf98e1c02008-10-25 16:22:41 +0000229 HG_(free)(lk);
sewardjb4112022007-11-09 22:49:28 +0000230}
231
232/* Update 'lk' to reflect that 'thr' now has a write-acquisition of
233 it. This is done strictly: only combinations resulting from
234 correct program and libpthread behaviour are allowed. */
235static void lockN_acquire_writer ( Lock* lk, Thread* thr )
236{
sewardjf98e1c02008-10-25 16:22:41 +0000237 tl_assert(HG_(is_sane_LockN)(lk));
238 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000239
240 stats__lockN_acquires++;
241
242 /* EXPOSITION only */
243 /* We need to keep recording snapshots of where the lock was
244 acquired, so as to produce better lock-order error messages. */
245 if (lk->acquired_at == NULL) {
246 ThreadId tid;
247 tl_assert(lk->heldBy == NULL);
248 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
249 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000250 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000251 } else {
252 tl_assert(lk->heldBy != NULL);
253 }
254 /* end EXPOSITION only */
255
256 switch (lk->kind) {
257 case LK_nonRec:
258 case_LK_nonRec:
259 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
260 tl_assert(!lk->heldW);
261 lk->heldW = True;
sewardjf98e1c02008-10-25 16:22:41 +0000262 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000263 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000264 break;
265 case LK_mbRec:
266 if (lk->heldBy == NULL)
267 goto case_LK_nonRec;
268 /* 2nd and subsequent locking of a lock by its owner */
269 tl_assert(lk->heldW);
270 /* assert: lk is only held by one thread .. */
sewardj896f6f92008-08-19 08:38:52 +0000271 tl_assert(VG_(sizeUniqueBag(lk->heldBy)) == 1);
sewardjb4112022007-11-09 22:49:28 +0000272 /* assert: .. and that thread is 'thr'. */
sewardj896f6f92008-08-19 08:38:52 +0000273 tl_assert(VG_(elemBag)(lk->heldBy, (Word)thr)
274 == VG_(sizeTotalBag)(lk->heldBy));
275 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000276 break;
277 case LK_rdwr:
278 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
279 goto case_LK_nonRec;
280 default:
281 tl_assert(0);
282 }
sewardjf98e1c02008-10-25 16:22:41 +0000283 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000284}
285
286static void lockN_acquire_reader ( Lock* lk, Thread* thr )
287{
sewardjf98e1c02008-10-25 16:22:41 +0000288 tl_assert(HG_(is_sane_LockN)(lk));
289 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000290 /* can only add reader to a reader-writer lock. */
291 tl_assert(lk->kind == LK_rdwr);
292 /* lk must be free or already r-held. */
293 tl_assert(lk->heldBy == NULL
294 || (lk->heldBy != NULL && !lk->heldW));
295
296 stats__lockN_acquires++;
297
298 /* EXPOSITION only */
299 /* We need to keep recording snapshots of where the lock was
300 acquired, so as to produce better lock-order error messages. */
301 if (lk->acquired_at == NULL) {
302 ThreadId tid;
303 tl_assert(lk->heldBy == NULL);
304 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
305 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000306 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000307 } else {
308 tl_assert(lk->heldBy != NULL);
309 }
310 /* end EXPOSITION only */
311
312 if (lk->heldBy) {
sewardj896f6f92008-08-19 08:38:52 +0000313 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000314 } else {
315 lk->heldW = False;
sewardjf98e1c02008-10-25 16:22:41 +0000316 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000317 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000318 }
319 tl_assert(!lk->heldW);
sewardjf98e1c02008-10-25 16:22:41 +0000320 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000321}
322
323/* Update 'lk' to reflect a release of it by 'thr'. This is done
324 strictly: only combinations resulting from correct program and
325 libpthread behaviour are allowed. */
326
327static void lockN_release ( Lock* lk, Thread* thr )
328{
329 Bool b;
sewardjf98e1c02008-10-25 16:22:41 +0000330 tl_assert(HG_(is_sane_LockN)(lk));
331 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000332 /* lock must be held by someone */
333 tl_assert(lk->heldBy);
334 stats__lockN_releases++;
335 /* Remove it from the holder set */
sewardj896f6f92008-08-19 08:38:52 +0000336 b = VG_(delFromBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000337 /* thr must actually have been a holder of lk */
338 tl_assert(b);
339 /* normalise */
340 tl_assert(lk->acquired_at);
sewardj896f6f92008-08-19 08:38:52 +0000341 if (VG_(isEmptyBag)(lk->heldBy)) {
342 VG_(deleteBag)(lk->heldBy);
sewardjb4112022007-11-09 22:49:28 +0000343 lk->heldBy = NULL;
344 lk->heldW = False;
345 lk->acquired_at = NULL;
346 }
sewardjf98e1c02008-10-25 16:22:41 +0000347 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000348}
349
350static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
351{
352 Thread* thr;
353 if (!lk->heldBy) {
354 tl_assert(!lk->heldW);
355 return;
356 }
357 /* for each thread that holds this lock do ... */
sewardj896f6f92008-08-19 08:38:52 +0000358 VG_(initIterBag)( lk->heldBy );
359 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, NULL )) {
sewardjf98e1c02008-10-25 16:22:41 +0000360 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000361 tl_assert(HG_(elemWS)( univ_lsets,
362 thr->locksetA, (Word)lk ));
363 thr->locksetA
364 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lk );
365
366 if (lk->heldW) {
367 tl_assert(HG_(elemWS)( univ_lsets,
368 thr->locksetW, (Word)lk ));
369 thr->locksetW
370 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lk );
371 }
372 }
sewardj896f6f92008-08-19 08:38:52 +0000373 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000374}
375
sewardjb4112022007-11-09 22:49:28 +0000376
377/*----------------------------------------------------------------*/
378/*--- Print out the primary data structures ---*/
379/*----------------------------------------------------------------*/
380
sewardjb4112022007-11-09 22:49:28 +0000381#define PP_THREADS (1<<1)
382#define PP_LOCKS (1<<2)
sewardjf98e1c02008-10-25 16:22:41 +0000383#define PP_ALL (PP_THREADS | PP_LOCKS)
sewardjb4112022007-11-09 22:49:28 +0000384
385
386static const Int sHOW_ADMIN = 0;
387
388static void space ( Int n )
389{
390 Int i;
391 Char spaces[128+1];
392 tl_assert(n >= 0 && n < 128);
393 if (n == 0)
394 return;
395 for (i = 0; i < n; i++)
396 spaces[i] = ' ';
397 spaces[i] = 0;
398 tl_assert(i < 128+1);
399 VG_(printf)("%s", spaces);
400}
401
402static void pp_Thread ( Int d, Thread* t )
403{
404 space(d+0); VG_(printf)("Thread %p {\n", t);
405 if (sHOW_ADMIN) {
406 space(d+3); VG_(printf)("admin %p\n", t->admin);
407 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
408 }
409 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
410 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
sewardjb4112022007-11-09 22:49:28 +0000411 space(d+0); VG_(printf)("}\n");
412}
413
414static void pp_admin_threads ( Int d )
415{
416 Int i, n;
417 Thread* t;
418 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
419 /* nothing */
420 }
421 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
422 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
423 if (0) {
424 space(n);
425 VG_(printf)("admin_threads record %d of %d:\n", i, n);
426 }
427 pp_Thread(d+3, t);
428 }
barta0b6b2c2008-07-07 06:49:24 +0000429 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000430}
431
432static void pp_map_threads ( Int d )
433{
njn4c245e52009-03-15 23:25:38 +0000434 Int i, n = 0;
sewardjb4112022007-11-09 22:49:28 +0000435 space(d); VG_(printf)("map_threads ");
sewardjb4112022007-11-09 22:49:28 +0000436 for (i = 0; i < VG_N_THREADS; i++) {
437 if (map_threads[i] != NULL)
438 n++;
439 }
440 VG_(printf)("(%d entries) {\n", n);
441 for (i = 0; i < VG_N_THREADS; i++) {
442 if (map_threads[i] == NULL)
443 continue;
444 space(d+3);
445 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
446 }
447 space(d); VG_(printf)("}\n");
448}
449
450static const HChar* show_LockKind ( LockKind lkk ) {
451 switch (lkk) {
452 case LK_mbRec: return "mbRec";
453 case LK_nonRec: return "nonRec";
454 case LK_rdwr: return "rdwr";
455 default: tl_assert(0);
456 }
457}
458
459static void pp_Lock ( Int d, Lock* lk )
460{
barta0b6b2c2008-07-07 06:49:24 +0000461 space(d+0); VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
sewardjb4112022007-11-09 22:49:28 +0000462 if (sHOW_ADMIN) {
sewardj1d7c3322011-02-28 09:22:51 +0000463 space(d+3); VG_(printf)("admin_n %p\n", lk->admin_next);
464 space(d+3); VG_(printf)("admin_p %p\n", lk->admin_prev);
465 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
sewardjb4112022007-11-09 22:49:28 +0000466 }
467 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
468 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
469 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
470 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
471 if (lk->heldBy) {
472 Thread* thr;
473 Word count;
474 VG_(printf)(" { ");
sewardj896f6f92008-08-19 08:38:52 +0000475 VG_(initIterBag)( lk->heldBy );
476 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, &count ))
sewardjb4112022007-11-09 22:49:28 +0000477 VG_(printf)("%lu:%p ", count, thr);
sewardj896f6f92008-08-19 08:38:52 +0000478 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000479 VG_(printf)("}");
480 }
481 VG_(printf)("\n");
482 space(d+0); VG_(printf)("}\n");
483}
484
485static void pp_admin_locks ( Int d )
486{
487 Int i, n;
488 Lock* lk;
sewardj1d7c3322011-02-28 09:22:51 +0000489 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000490 /* nothing */
491 }
492 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
sewardj1d7c3322011-02-28 09:22:51 +0000493 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000494 if (0) {
495 space(n);
496 VG_(printf)("admin_locks record %d of %d:\n", i, n);
497 }
498 pp_Lock(d+3, lk);
499 }
barta0b6b2c2008-07-07 06:49:24 +0000500 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000501}
502
503static void pp_map_locks ( Int d )
504{
505 void* gla;
506 Lock* lk;
507 space(d); VG_(printf)("map_locks (%d entries) {\n",
sewardj896f6f92008-08-19 08:38:52 +0000508 (Int)VG_(sizeFM)( map_locks ));
509 VG_(initIterFM)( map_locks );
510 while (VG_(nextIterFM)( map_locks, (Word*)&gla,
sewardjb5f29642007-11-16 12:02:43 +0000511 (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000512 space(d+3);
513 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
514 }
sewardj896f6f92008-08-19 08:38:52 +0000515 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000516 space(d); VG_(printf)("}\n");
517}
518
sewardjb4112022007-11-09 22:49:28 +0000519static void pp_everything ( Int flags, Char* caller )
520{
521 Int d = 0;
522 VG_(printf)("\n");
523 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
524 if (flags & PP_THREADS) {
525 VG_(printf)("\n");
526 pp_admin_threads(d+3);
527 VG_(printf)("\n");
528 pp_map_threads(d+3);
529 }
530 if (flags & PP_LOCKS) {
531 VG_(printf)("\n");
532 pp_admin_locks(d+3);
533 VG_(printf)("\n");
534 pp_map_locks(d+3);
535 }
sewardjb4112022007-11-09 22:49:28 +0000536
537 VG_(printf)("\n");
538 VG_(printf)("}\n");
539 VG_(printf)("\n");
540}
541
542#undef SHOW_ADMIN
543
544
545/*----------------------------------------------------------------*/
546/*--- Initialise the primary data structures ---*/
547/*----------------------------------------------------------------*/
548
sewardjf98e1c02008-10-25 16:22:41 +0000549static void initialise_data_structures ( Thr* hbthr_root )
sewardjb4112022007-11-09 22:49:28 +0000550{
sewardjb4112022007-11-09 22:49:28 +0000551 Thread* thr;
sewardjffce8152011-06-24 10:09:41 +0000552 WordSetID wsid;
sewardjb4112022007-11-09 22:49:28 +0000553
554 /* Get everything initialised and zeroed. */
555 tl_assert(admin_threads == NULL);
556 tl_assert(admin_locks == NULL);
sewardjb4112022007-11-09 22:49:28 +0000557
558 tl_assert(sizeof(Addr) == sizeof(Word));
sewardjb4112022007-11-09 22:49:28 +0000559
560 tl_assert(map_threads == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000561 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
sewardjb4112022007-11-09 22:49:28 +0000562 tl_assert(map_threads != NULL);
563
sewardjb4112022007-11-09 22:49:28 +0000564 tl_assert(sizeof(Addr) == sizeof(Word));
565 tl_assert(map_locks == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000566 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
567 NULL/*unboxed Word cmp*/);
sewardjb4112022007-11-09 22:49:28 +0000568 tl_assert(map_locks != NULL);
569
sewardjb4112022007-11-09 22:49:28 +0000570 tl_assert(univ_lsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000571 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
572 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000573 tl_assert(univ_lsets != NULL);
sewardjffce8152011-06-24 10:09:41 +0000574 /* Ensure that univ_lsets is non-empty, with lockset zero being the
575 empty lockset. hg_errors.c relies on the assumption that
576 lockset number zero in univ_lsets is always valid. */
577 wsid = HG_(emptyWS)(univ_lsets);
578 tl_assert(wsid == 0);
sewardjb4112022007-11-09 22:49:28 +0000579
580 tl_assert(univ_laog == NULL);
sewardjc1fb9d22011-02-28 09:03:44 +0000581 if (HG_(clo_track_lockorders)) {
582 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
583 HG_(free), 24/*cacheSize*/ );
584 tl_assert(univ_laog != NULL);
585 }
sewardjb4112022007-11-09 22:49:28 +0000586
587 /* Set up entries for the root thread */
588 // FIXME: this assumes that the first real ThreadId is 1
589
sewardjb4112022007-11-09 22:49:28 +0000590 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +0000591 thr = mk_Thread(hbthr_root);
592 thr->coretid = 1; /* FIXME: hardwires an assumption about the
593 identity of the root thread. */
sewardj60626642011-03-10 15:14:37 +0000594 tl_assert( libhb_get_Thr_hgthread(hbthr_root) == NULL );
595 libhb_set_Thr_hgthread(hbthr_root, thr);
sewardjb4112022007-11-09 22:49:28 +0000596
sewardjf98e1c02008-10-25 16:22:41 +0000597 /* and bind it in the thread-map table. */
598 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
599 tl_assert(thr->coretid != VG_INVALID_THREADID);
sewardjb4112022007-11-09 22:49:28 +0000600
sewardjf98e1c02008-10-25 16:22:41 +0000601 map_threads[thr->coretid] = thr;
sewardjb4112022007-11-09 22:49:28 +0000602
603 tl_assert(VG_INVALID_THREADID == 0);
604
sewardjb4112022007-11-09 22:49:28 +0000605 all__sanity_check("initialise_data_structures");
606}
607
608
609/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000610/*--- map_threads :: array[core-ThreadId] of Thread* ---*/
sewardjb4112022007-11-09 22:49:28 +0000611/*----------------------------------------------------------------*/
612
613/* Doesn't assert if the relevant map_threads entry is NULL. */
614static Thread* map_threads_maybe_lookup ( ThreadId coretid )
615{
616 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000617 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000618 thr = map_threads[coretid];
619 return thr;
620}
621
622/* Asserts if the relevant map_threads entry is NULL. */
623static inline Thread* map_threads_lookup ( ThreadId coretid )
624{
625 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000626 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000627 thr = map_threads[coretid];
628 tl_assert(thr);
629 return thr;
630}
631
sewardjf98e1c02008-10-25 16:22:41 +0000632/* Do a reverse lookup. Does not assert if 'thr' is not found in
633 map_threads. */
sewardjb4112022007-11-09 22:49:28 +0000634static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
635{
sewardjf98e1c02008-10-25 16:22:41 +0000636 ThreadId tid;
637 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000638 /* Check nobody used the invalid-threadid slot */
639 tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
640 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000641 tid = thr->coretid;
642 tl_assert(HG_(is_sane_ThreadId)(tid));
643 return tid;
sewardjb4112022007-11-09 22:49:28 +0000644}
645
646/* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
647 is not found in map_threads. */
648static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
649{
650 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
651 tl_assert(tid != VG_INVALID_THREADID);
sewardjf98e1c02008-10-25 16:22:41 +0000652 tl_assert(map_threads[tid]);
653 tl_assert(map_threads[tid]->coretid == tid);
sewardjb4112022007-11-09 22:49:28 +0000654 return tid;
655}
656
657static void map_threads_delete ( ThreadId coretid )
658{
659 Thread* thr;
660 tl_assert(coretid != 0);
sewardjf98e1c02008-10-25 16:22:41 +0000661 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000662 thr = map_threads[coretid];
663 tl_assert(thr);
664 map_threads[coretid] = NULL;
665}
666
667
668/*----------------------------------------------------------------*/
669/*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
670/*----------------------------------------------------------------*/
671
672/* Make sure there is a lock table entry for the given (lock) guest
673 address. If not, create one of the stated 'kind' in unheld state.
674 In any case, return the address of the existing or new Lock. */
675static
676Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
677{
678 Bool found;
679 Lock* oldlock = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000680 tl_assert(HG_(is_sane_ThreadId)(tid));
sewardj896f6f92008-08-19 08:38:52 +0000681 found = VG_(lookupFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000682 NULL, (Word*)&oldlock, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000683 if (!found) {
684 Lock* lock = mk_LockN(lkk, ga);
685 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
sewardjf98e1c02008-10-25 16:22:41 +0000686 tl_assert(HG_(is_sane_LockN)(lock));
sewardj896f6f92008-08-19 08:38:52 +0000687 VG_(addToFM)( map_locks, (Word)ga, (Word)lock );
sewardjb4112022007-11-09 22:49:28 +0000688 tl_assert(oldlock == NULL);
sewardjb4112022007-11-09 22:49:28 +0000689 return lock;
690 } else {
691 tl_assert(oldlock != NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000692 tl_assert(HG_(is_sane_LockN)(oldlock));
sewardjb4112022007-11-09 22:49:28 +0000693 tl_assert(oldlock->guestaddr == ga);
sewardjb4112022007-11-09 22:49:28 +0000694 return oldlock;
695 }
696}
697
698static Lock* map_locks_maybe_lookup ( Addr ga )
699{
700 Bool found;
701 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000702 found = VG_(lookupFM)( map_locks, NULL, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000703 tl_assert(found ? lk != NULL : lk == NULL);
sewardjb4112022007-11-09 22:49:28 +0000704 return lk;
705}
706
707static void map_locks_delete ( Addr ga )
708{
709 Addr ga2 = 0;
710 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000711 VG_(delFromFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000712 (Word*)&ga2, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000713 /* delFromFM produces the val which is being deleted, if it is
714 found. So assert it is non-null; that in effect asserts that we
715 are deleting a (ga, Lock) pair which actually exists. */
716 tl_assert(lk != NULL);
717 tl_assert(ga2 == ga);
718}
719
720
sewardjb4112022007-11-09 22:49:28 +0000721
722/*----------------------------------------------------------------*/
723/*--- Sanity checking the data structures ---*/
724/*----------------------------------------------------------------*/
725
726static UWord stats__sanity_checks = 0;
727
sewardjb4112022007-11-09 22:49:28 +0000728static void laog__sanity_check ( Char* who ); /* fwds */
729
730/* REQUIRED INVARIANTS:
731
732 Thread vs Segment/Lock/SecMaps
733
734 for each t in Threads {
735
736 // Thread.lockset: each element is really a valid Lock
737
738 // Thread.lockset: each Lock in set is actually held by that thread
739 for lk in Thread.lockset
740 lk == LockedBy(t)
741
742 // Thread.csegid is a valid SegmentID
743 // and the associated Segment has .thr == t
744
745 }
746
747 all thread Locksets are pairwise empty under intersection
748 (that is, no lock is claimed to be held by more than one thread)
749 -- this is guaranteed if all locks in locksets point back to their
750 owner threads
751
752 Lock vs Thread/Segment/SecMaps
753
754 for each entry (gla, la) in map_locks
755 gla == la->guest_addr
756
757 for each lk in Locks {
758
759 lk->tag is valid
760 lk->guest_addr does not have shadow state NoAccess
761 if lk == LockedBy(t), then t->lockset contains lk
762 if lk == UnlockedBy(segid) then segid is valid SegmentID
763 and can be mapped to a valid Segment(seg)
764 and seg->thr->lockset does not contain lk
765 if lk == UnlockedNew then (no lockset contains lk)
766
767 secmaps for lk has .mbHasLocks == True
768
769 }
770
771 Segment vs Thread/Lock/SecMaps
772
773 the Segment graph is a dag (no cycles)
774 all of the Segment graph must be reachable from the segids
775 mentioned in the Threads
776
777 for seg in Segments {
778
779 seg->thr is a sane Thread
780
781 }
782
783 SecMaps vs Segment/Thread/Lock
784
785 for sm in SecMaps {
786
787 sm properly aligned
788 if any shadow word is ShR or ShM then .mbHasShared == True
789
790 for each Excl(segid) state
791 map_segments_lookup maps to a sane Segment(seg)
792 for each ShM/ShR(tsetid,lsetid) state
793 each lk in lset is a valid Lock
794 each thr in tset is a valid thread, which is non-dead
795
796 }
797*/
798
799
800/* Return True iff 'thr' holds 'lk' in some mode. */
801static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
802{
803 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000804 return VG_(elemBag)( lk->heldBy, (Word)thr ) > 0;
sewardjb4112022007-11-09 22:49:28 +0000805 else
806 return False;
807}
808
809/* Sanity check Threads, as far as possible */
810__attribute__((noinline))
811static void threads__sanity_check ( Char* who )
812{
813#define BAD(_str) do { how = (_str); goto bad; } while (0)
814 Char* how = "no error";
815 Thread* thr;
816 WordSetID wsA, wsW;
sewardj250ec2e2008-02-15 22:02:30 +0000817 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +0000818 Word ls_size, i;
819 Lock* lk;
sewardjb4112022007-11-09 22:49:28 +0000820 for (thr = admin_threads; thr; thr = thr->admin) {
sewardjf98e1c02008-10-25 16:22:41 +0000821 if (!HG_(is_sane_Thread)(thr)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000822 wsA = thr->locksetA;
823 wsW = thr->locksetW;
824 // locks held in W mode are a subset of all locks held
825 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
826 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
827 for (i = 0; i < ls_size; i++) {
828 lk = (Lock*)ls_words[i];
829 // Thread.lockset: each element is really a valid Lock
sewardjf98e1c02008-10-25 16:22:41 +0000830 if (!HG_(is_sane_LockN)(lk)) BAD("2");
sewardjb4112022007-11-09 22:49:28 +0000831 // Thread.lockset: each Lock in set is actually held by that
832 // thread
833 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000834 }
835 }
836 return;
837 bad:
838 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
839 tl_assert(0);
840#undef BAD
841}
842
843
844/* Sanity check Locks, as far as possible */
845__attribute__((noinline))
846static void locks__sanity_check ( Char* who )
847{
848#define BAD(_str) do { how = (_str); goto bad; } while (0)
849 Char* how = "no error";
850 Addr gla;
851 Lock* lk;
852 Int i;
853 // # entries in admin_locks == # entries in map_locks
sewardj1d7c3322011-02-28 09:22:51 +0000854 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next)
sewardjb4112022007-11-09 22:49:28 +0000855 ;
sewardj896f6f92008-08-19 08:38:52 +0000856 if (i != VG_(sizeFM)(map_locks)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000857 // for each entry (gla, lk) in map_locks
858 // gla == lk->guest_addr
sewardj896f6f92008-08-19 08:38:52 +0000859 VG_(initIterFM)( map_locks );
860 while (VG_(nextIterFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000861 (Word*)&gla, (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000862 if (lk->guestaddr != gla) BAD("2");
863 }
sewardj896f6f92008-08-19 08:38:52 +0000864 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000865 // scan through admin_locks ...
sewardj1d7c3322011-02-28 09:22:51 +0000866 for (lk = admin_locks; lk; lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000867 // lock is sane. Quite comprehensive, also checks that
868 // referenced (holder) threads are sane.
sewardjf98e1c02008-10-25 16:22:41 +0000869 if (!HG_(is_sane_LockN)(lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000870 // map_locks binds guest address back to this lock
871 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
sewardjb4112022007-11-09 22:49:28 +0000872 // look at all threads mentioned as holders of this lock. Ensure
873 // this lock is mentioned in their locksets.
874 if (lk->heldBy) {
875 Thread* thr;
876 Word count;
sewardj896f6f92008-08-19 08:38:52 +0000877 VG_(initIterBag)( lk->heldBy );
878 while (VG_(nextIterBag)( lk->heldBy,
sewardjb5f29642007-11-16 12:02:43 +0000879 (Word*)&thr, &count )) {
sewardjf98e1c02008-10-25 16:22:41 +0000880 // HG_(is_sane_LockN) above ensures these
sewardjb4112022007-11-09 22:49:28 +0000881 tl_assert(count >= 1);
sewardjf98e1c02008-10-25 16:22:41 +0000882 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000883 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (Word)lk))
884 BAD("6");
885 // also check the w-only lockset
886 if (lk->heldW
887 && !HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
888 BAD("7");
889 if ((!lk->heldW)
890 && HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
891 BAD("8");
892 }
sewardj896f6f92008-08-19 08:38:52 +0000893 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000894 } else {
895 /* lock not held by anybody */
896 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
897 // since lk is unheld, then (no lockset contains lk)
898 // hmm, this is really too expensive to check. Hmm.
899 }
sewardjb4112022007-11-09 22:49:28 +0000900 }
901
902 return;
903 bad:
904 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
905 tl_assert(0);
906#undef BAD
907}
908
909
sewardjb4112022007-11-09 22:49:28 +0000910static void all_except_Locks__sanity_check ( Char* who ) {
911 stats__sanity_checks++;
912 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
913 threads__sanity_check(who);
sewardjc1fb9d22011-02-28 09:03:44 +0000914 if (HG_(clo_track_lockorders))
915 laog__sanity_check(who);
sewardjb4112022007-11-09 22:49:28 +0000916}
917static void all__sanity_check ( Char* who ) {
918 all_except_Locks__sanity_check(who);
919 locks__sanity_check(who);
920}
921
922
923/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +0000924/*--- Shadow value and address range handlers ---*/
925/*----------------------------------------------------------------*/
926
927static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +0000928//static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000929static inline Thread* get_current_Thread ( void ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +0000930__attribute__((noinline))
931static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000932
sewardjb4112022007-11-09 22:49:28 +0000933
934/* Block-copy states (needed for implementing realloc()). */
sewardj23f12002009-07-24 08:45:08 +0000935/* FIXME this copies shadow memory; it doesn't apply the MSM to it.
936 Is that a problem? (hence 'scopy' rather than 'ccopy') */
937static void shadow_mem_scopy_range ( Thread* thr,
938 Addr src, Addr dst, SizeT len )
sewardjf98e1c02008-10-25 16:22:41 +0000939{
940 Thr* hbthr = thr->hbthr;
941 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +0000942 libhb_copy_shadow_state( hbthr, src, dst, len );
sewardjb4112022007-11-09 22:49:28 +0000943}
944
sewardj23f12002009-07-24 08:45:08 +0000945static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
946{
sewardjf98e1c02008-10-25 16:22:41 +0000947 Thr* hbthr = thr->hbthr;
948 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +0000949 LIBHB_CREAD_N(hbthr, a, len);
950}
951
952static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
953 Thr* hbthr = thr->hbthr;
954 tl_assert(hbthr);
955 LIBHB_CWRITE_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +0000956}
957
958static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
959{
sewardj23f12002009-07-24 08:45:08 +0000960 libhb_srange_new( thr->hbthr, a, len );
sewardjb4112022007-11-09 22:49:28 +0000961}
962
sewardjfd35d492011-03-17 19:39:55 +0000963static void shadow_mem_make_NoAccess_NoFX ( Thread* thr, Addr aIN, SizeT len )
sewardjb4112022007-11-09 22:49:28 +0000964{
sewardjb4112022007-11-09 22:49:28 +0000965 if (0 && len > 500)
sewardjfd35d492011-03-17 19:39:55 +0000966 VG_(printf)("make NoAccess_NoFX ( %#lx, %ld )\n", aIN, len );
967 // has no effect (NoFX)
968 libhb_srange_noaccess_NoFX( thr->hbthr, aIN, len );
969}
970
971static void shadow_mem_make_NoAccess_AHAE ( Thread* thr, Addr aIN, SizeT len )
972{
973 if (0 && len > 500)
974 VG_(printf)("make NoAccess_AHAE ( %#lx, %ld )\n", aIN, len );
975 // Actually Has An Effect (AHAE)
976 libhb_srange_noaccess_AHAE( thr->hbthr, aIN, len );
sewardjb4112022007-11-09 22:49:28 +0000977}
978
sewardj406bac82010-03-03 23:03:40 +0000979static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN, SizeT len )
980{
981 if (0 && len > 500)
982 VG_(printf)("make Untracked ( %#lx, %ld )\n", aIN, len );
983 libhb_srange_untrack( thr->hbthr, aIN, len );
984}
985
sewardjb4112022007-11-09 22:49:28 +0000986
987/*----------------------------------------------------------------*/
988/*--- Event handlers (evh__* functions) ---*/
989/*--- plus helpers (evhH__* functions) ---*/
990/*----------------------------------------------------------------*/
991
992/*--------- Event handler helpers (evhH__* functions) ---------*/
993
994/* Create a new segment for 'thr', making it depend (.prev) on its
995 existing segment, bind together the SegmentID and Segment, and
996 return both of them. Also update 'thr' so it references the new
997 Segment. */
sewardjf98e1c02008-10-25 16:22:41 +0000998//zz static
999//zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
1000//zz /*OUT*/Segment** new_segP,
1001//zz Thread* thr )
1002//zz {
1003//zz Segment* cur_seg;
1004//zz tl_assert(new_segP);
1005//zz tl_assert(new_segidP);
1006//zz tl_assert(HG_(is_sane_Thread)(thr));
1007//zz cur_seg = map_segments_lookup( thr->csegid );
1008//zz tl_assert(cur_seg);
1009//zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1010//zz at their owner thread. */
1011//zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1012//zz *new_segidP = alloc_SegmentID();
1013//zz map_segments_add( *new_segidP, *new_segP );
1014//zz thr->csegid = *new_segidP;
1015//zz }
sewardjb4112022007-11-09 22:49:28 +00001016
1017
1018/* The lock at 'lock_ga' has acquired a writer. Make all necessary
1019 updates, and also do all possible error checks. */
1020static
1021void evhH__post_thread_w_acquires_lock ( Thread* thr,
1022 LockKind lkk, Addr lock_ga )
1023{
1024 Lock* lk;
1025
1026 /* Basically what we need to do is call lockN_acquire_writer.
1027 However, that will barf if any 'invalid' lock states would
1028 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001029 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001030 routine.
1031
1032 Because this routine is only called after successful lock
1033 acquisition, we should not be asked to move the lock into any
1034 invalid states. Requests to do so are bugs in libpthread, since
1035 that should have rejected any such requests. */
1036
sewardjf98e1c02008-10-25 16:22:41 +00001037 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001038 /* Try to find the lock. If we can't, then create a new one with
1039 kind 'lkk'. */
1040 lk = map_locks_lookup_or_create(
1041 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001042 tl_assert( HG_(is_sane_LockN)(lk) );
1043
1044 /* check libhb level entities exist */
1045 tl_assert(thr->hbthr);
1046 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001047
1048 if (lk->heldBy == NULL) {
1049 /* the lock isn't held. Simple. */
1050 tl_assert(!lk->heldW);
1051 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001052 /* acquire a dependency from the lock's VCs */
1053 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001054 goto noerror;
1055 }
1056
1057 /* So the lock is already held. If held as a r-lock then
1058 libpthread must be buggy. */
1059 tl_assert(lk->heldBy);
1060 if (!lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001061 HG_(record_error_Misc)(
1062 thr, "Bug in libpthread: write lock "
1063 "granted on rwlock which is currently rd-held");
sewardjb4112022007-11-09 22:49:28 +00001064 goto error;
1065 }
1066
1067 /* So the lock is held in w-mode. If it's held by some other
1068 thread, then libpthread must be buggy. */
sewardj896f6f92008-08-19 08:38:52 +00001069 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
sewardjb4112022007-11-09 22:49:28 +00001070
sewardj896f6f92008-08-19 08:38:52 +00001071 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
sewardjf98e1c02008-10-25 16:22:41 +00001072 HG_(record_error_Misc)(
1073 thr, "Bug in libpthread: write lock "
1074 "granted on mutex/rwlock which is currently "
1075 "wr-held by a different thread");
sewardjb4112022007-11-09 22:49:28 +00001076 goto error;
1077 }
1078
1079 /* So the lock is already held in w-mode by 'thr'. That means this
1080 is an attempt to lock it recursively, which is only allowable
1081 for LK_mbRec kinded locks. Since this routine is called only
1082 once the lock has been acquired, this must also be a libpthread
1083 bug. */
1084 if (lk->kind != LK_mbRec) {
sewardjf98e1c02008-10-25 16:22:41 +00001085 HG_(record_error_Misc)(
1086 thr, "Bug in libpthread: recursive write lock "
1087 "granted on mutex/wrlock which does not "
1088 "support recursion");
sewardjb4112022007-11-09 22:49:28 +00001089 goto error;
1090 }
1091
1092 /* So we are recursively re-locking a lock we already w-hold. */
1093 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001094 /* acquire a dependency from the lock's VC. Probably pointless,
1095 but also harmless. */
1096 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001097 goto noerror;
1098
1099 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001100 if (HG_(clo_track_lockorders)) {
1101 /* check lock order acquisition graph, and update. This has to
1102 happen before the lock is added to the thread's locksetA/W. */
1103 laog__pre_thread_acquires_lock( thr, lk );
1104 }
sewardjb4112022007-11-09 22:49:28 +00001105 /* update the thread's held-locks set */
1106 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1107 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (Word)lk );
1108 /* fall through */
1109
1110 error:
sewardjf98e1c02008-10-25 16:22:41 +00001111 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001112}
1113
1114
1115/* The lock at 'lock_ga' has acquired a reader. Make all necessary
1116 updates, and also do all possible error checks. */
1117static
1118void evhH__post_thread_r_acquires_lock ( Thread* thr,
1119 LockKind lkk, Addr lock_ga )
1120{
1121 Lock* lk;
1122
1123 /* Basically what we need to do is call lockN_acquire_reader.
1124 However, that will barf if any 'invalid' lock states would
1125 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001126 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001127 routine.
1128
1129 Because this routine is only called after successful lock
1130 acquisition, we should not be asked to move the lock into any
1131 invalid states. Requests to do so are bugs in libpthread, since
1132 that should have rejected any such requests. */
1133
sewardjf98e1c02008-10-25 16:22:41 +00001134 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001135 /* Try to find the lock. If we can't, then create a new one with
1136 kind 'lkk'. Only a reader-writer lock can be read-locked,
1137 hence the first assertion. */
1138 tl_assert(lkk == LK_rdwr);
1139 lk = map_locks_lookup_or_create(
1140 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001141 tl_assert( HG_(is_sane_LockN)(lk) );
1142
1143 /* check libhb level entities exist */
1144 tl_assert(thr->hbthr);
1145 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001146
1147 if (lk->heldBy == NULL) {
1148 /* the lock isn't held. Simple. */
1149 tl_assert(!lk->heldW);
1150 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001151 /* acquire a dependency from the lock's VC */
1152 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001153 goto noerror;
1154 }
1155
1156 /* So the lock is already held. If held as a w-lock then
1157 libpthread must be buggy. */
1158 tl_assert(lk->heldBy);
1159 if (lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001160 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1161 "granted on rwlock which is "
1162 "currently wr-held");
sewardjb4112022007-11-09 22:49:28 +00001163 goto error;
1164 }
1165
1166 /* Easy enough. In short anybody can get a read-lock on a rwlock
1167 provided it is either unlocked or already in rd-held. */
1168 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001169 /* acquire a dependency from the lock's VC. Probably pointless,
1170 but also harmless. */
1171 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001172 goto noerror;
1173
1174 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001175 if (HG_(clo_track_lockorders)) {
1176 /* check lock order acquisition graph, and update. This has to
1177 happen before the lock is added to the thread's locksetA/W. */
1178 laog__pre_thread_acquires_lock( thr, lk );
1179 }
sewardjb4112022007-11-09 22:49:28 +00001180 /* update the thread's held-locks set */
1181 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1182 /* but don't update thr->locksetW, since lk is only rd-held */
1183 /* fall through */
1184
1185 error:
sewardjf98e1c02008-10-25 16:22:41 +00001186 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001187}
1188
1189
1190/* The lock at 'lock_ga' is just about to be unlocked. Make all
1191 necessary updates, and also do all possible error checks. */
1192static
1193void evhH__pre_thread_releases_lock ( Thread* thr,
1194 Addr lock_ga, Bool isRDWR )
1195{
1196 Lock* lock;
1197 Word n;
sewardjf98e1c02008-10-25 16:22:41 +00001198 Bool was_heldW;
sewardjb4112022007-11-09 22:49:28 +00001199
1200 /* This routine is called prior to a lock release, before
1201 libpthread has had a chance to validate the call. Hence we need
1202 to detect and reject any attempts to move the lock into an
1203 invalid state. Such attempts are bugs in the client.
1204
1205 isRDWR is True if we know from the wrapper context that lock_ga
1206 should refer to a reader-writer lock, and is False if [ditto]
1207 lock_ga should refer to a standard mutex. */
1208
sewardjf98e1c02008-10-25 16:22:41 +00001209 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001210 lock = map_locks_maybe_lookup( lock_ga );
1211
1212 if (!lock) {
1213 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1214 the client is trying to unlock it. So complain, then ignore
1215 the attempt. */
sewardjf98e1c02008-10-25 16:22:41 +00001216 HG_(record_error_UnlockBogus)( thr, lock_ga );
sewardjb4112022007-11-09 22:49:28 +00001217 return;
1218 }
1219
1220 tl_assert(lock->guestaddr == lock_ga);
sewardjf98e1c02008-10-25 16:22:41 +00001221 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001222
1223 if (isRDWR && lock->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001224 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1225 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001226 }
1227 if ((!isRDWR) && lock->kind == LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001228 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1229 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001230 }
1231
1232 if (!lock->heldBy) {
1233 /* The lock is not held. This indicates a serious bug in the
1234 client. */
1235 tl_assert(!lock->heldW);
sewardjf98e1c02008-10-25 16:22:41 +00001236 HG_(record_error_UnlockUnlocked)( thr, lock );
sewardjb4112022007-11-09 22:49:28 +00001237 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1238 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1239 goto error;
1240 }
1241
sewardjf98e1c02008-10-25 16:22:41 +00001242 /* test just above dominates */
1243 tl_assert(lock->heldBy);
1244 was_heldW = lock->heldW;
1245
sewardjb4112022007-11-09 22:49:28 +00001246 /* The lock is held. Is this thread one of the holders? If not,
1247 report a bug in the client. */
sewardj896f6f92008-08-19 08:38:52 +00001248 n = VG_(elemBag)( lock->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +00001249 tl_assert(n >= 0);
1250 if (n == 0) {
1251 /* We are not a current holder of the lock. This is a bug in
1252 the guest, and (per POSIX pthread rules) the unlock
1253 attempt will fail. So just complain and do nothing
1254 else. */
sewardj896f6f92008-08-19 08:38:52 +00001255 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001256 tl_assert(HG_(is_sane_Thread)(realOwner));
sewardjb4112022007-11-09 22:49:28 +00001257 tl_assert(realOwner != thr);
1258 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1259 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
sewardjf98e1c02008-10-25 16:22:41 +00001260 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
sewardjb4112022007-11-09 22:49:28 +00001261 goto error;
1262 }
1263
1264 /* Ok, we hold the lock 'n' times. */
1265 tl_assert(n >= 1);
1266
1267 lockN_release( lock, thr );
1268
1269 n--;
1270 tl_assert(n >= 0);
1271
1272 if (n > 0) {
1273 tl_assert(lock->heldBy);
sewardj896f6f92008-08-19 08:38:52 +00001274 tl_assert(n == VG_(elemBag)( lock->heldBy, (Word)thr ));
sewardjb4112022007-11-09 22:49:28 +00001275 /* We still hold the lock. So either it's a recursive lock
1276 or a rwlock which is currently r-held. */
1277 tl_assert(lock->kind == LK_mbRec
1278 || (lock->kind == LK_rdwr && !lock->heldW));
1279 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1280 if (lock->heldW)
1281 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1282 else
1283 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1284 } else {
sewardj983f3022009-05-21 14:49:55 +00001285 /* n is zero. This means we don't hold the lock any more. But
1286 if it's a rwlock held in r-mode, someone else could still
1287 hold it. Just do whatever sanity checks we can. */
1288 if (lock->kind == LK_rdwr && lock->heldBy) {
1289 /* It's a rwlock. We no longer hold it but we used to;
1290 nevertheless it still appears to be held by someone else.
1291 The implication is that, prior to this release, it must
1292 have been shared by us and and whoever else is holding it;
1293 which in turn implies it must be r-held, since a lock
1294 can't be w-held by more than one thread. */
1295 /* The lock is now R-held by somebody else: */
1296 tl_assert(lock->heldW == False);
1297 } else {
1298 /* Normal case. It's either not a rwlock, or it's a rwlock
1299 that we used to hold in w-mode (which is pretty much the
1300 same thing as a non-rwlock.) Since this transaction is
1301 atomic (V does not allow multiple threads to run
1302 simultaneously), it must mean the lock is now not held by
1303 anybody. Hence assert for it. */
1304 /* The lock is now not held by anybody: */
1305 tl_assert(!lock->heldBy);
1306 tl_assert(lock->heldW == False);
1307 }
sewardjf98e1c02008-10-25 16:22:41 +00001308 //if (lock->heldBy) {
1309 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (Word)thr ));
1310 //}
sewardjb4112022007-11-09 22:49:28 +00001311 /* update this thread's lockset accordingly. */
1312 thr->locksetA
1313 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lock );
1314 thr->locksetW
1315 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lock );
sewardjf98e1c02008-10-25 16:22:41 +00001316 /* push our VC into the lock */
1317 tl_assert(thr->hbthr);
1318 tl_assert(lock->hbso);
1319 /* If the lock was previously W-held, then we want to do a
1320 strong send, and if previously R-held, then a weak send. */
1321 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
sewardjb4112022007-11-09 22:49:28 +00001322 }
1323 /* fall through */
1324
1325 error:
sewardjf98e1c02008-10-25 16:22:41 +00001326 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001327}
1328
1329
sewardj9f569b72008-11-13 13:33:09 +00001330/* ---------------------------------------------------------- */
1331/* -------- Event handlers proper (evh__* functions) -------- */
1332/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001333
1334/* What is the Thread* for the currently running thread? This is
1335 absolutely performance critical. We receive notifications from the
1336 core for client code starts/stops, and cache the looked-up result
1337 in 'current_Thread'. Hence, for the vast majority of requests,
1338 finding the current thread reduces to a read of a global variable,
1339 provided get_current_Thread_in_C_C is inlined.
1340
1341 Outside of client code, current_Thread is NULL, and presumably
1342 any uses of it will cause a segfault. Hence:
1343
1344 - for uses definitely within client code, use
1345 get_current_Thread_in_C_C.
1346
1347 - for all other uses, use get_current_Thread.
1348*/
1349
sewardj23f12002009-07-24 08:45:08 +00001350static Thread *current_Thread = NULL,
1351 *current_Thread_prev = NULL;
sewardjb4112022007-11-09 22:49:28 +00001352
1353static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1354 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1355 tl_assert(current_Thread == NULL);
1356 current_Thread = map_threads_lookup( tid );
1357 tl_assert(current_Thread != NULL);
sewardj23f12002009-07-24 08:45:08 +00001358 if (current_Thread != current_Thread_prev) {
1359 libhb_Thr_resumes( current_Thread->hbthr );
1360 current_Thread_prev = current_Thread;
1361 }
sewardjb4112022007-11-09 22:49:28 +00001362}
1363static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1364 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1365 tl_assert(current_Thread != NULL);
1366 current_Thread = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00001367 libhb_maybe_GC();
sewardjb4112022007-11-09 22:49:28 +00001368}
1369static inline Thread* get_current_Thread_in_C_C ( void ) {
1370 return current_Thread;
1371}
1372static inline Thread* get_current_Thread ( void ) {
1373 ThreadId coretid;
1374 Thread* thr;
1375 thr = get_current_Thread_in_C_C();
1376 if (LIKELY(thr))
1377 return thr;
1378 /* evidently not in client code. Do it the slow way. */
1379 coretid = VG_(get_running_tid)();
1380 /* FIXME: get rid of the following kludge. It exists because
sewardjf98e1c02008-10-25 16:22:41 +00001381 evh__new_mem is called during initialisation (as notification
sewardjb4112022007-11-09 22:49:28 +00001382 of initial memory layout) and VG_(get_running_tid)() returns
1383 VG_INVALID_THREADID at that point. */
1384 if (coretid == VG_INVALID_THREADID)
1385 coretid = 1; /* KLUDGE */
1386 thr = map_threads_lookup( coretid );
1387 return thr;
1388}
1389
1390static
1391void evh__new_mem ( Addr a, SizeT len ) {
1392 if (SHOW_EVENTS >= 2)
1393 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1394 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001395 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001396 all__sanity_check("evh__new_mem-post");
1397}
1398
1399static
sewardj1f77fec2010-04-12 19:51:04 +00001400void evh__new_mem_stack ( Addr a, SizeT len ) {
1401 if (SHOW_EVENTS >= 2)
1402 VG_(printf)("evh__new_mem_stack(%p, %lu)\n", (void*)a, len );
1403 shadow_mem_make_New( get_current_Thread(),
1404 -VG_STACK_REDZONE_SZB + a, len );
1405 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1406 all__sanity_check("evh__new_mem_stack-post");
1407}
1408
1409static
sewardj7cf4e6b2008-05-01 20:24:26 +00001410void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1411 if (SHOW_EVENTS >= 2)
1412 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1413 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001414 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardj7cf4e6b2008-05-01 20:24:26 +00001415 all__sanity_check("evh__new_mem_w_tid-post");
1416}
1417
1418static
sewardjb4112022007-11-09 22:49:28 +00001419void evh__new_mem_w_perms ( Addr a, SizeT len,
sewardj9c606bd2008-09-18 18:12:50 +00001420 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
sewardjb4112022007-11-09 22:49:28 +00001421 if (SHOW_EVENTS >= 1)
1422 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1423 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1424 if (rr || ww || xx)
1425 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001426 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001427 all__sanity_check("evh__new_mem_w_perms-post");
1428}
1429
1430static
1431void evh__set_perms ( Addr a, SizeT len,
1432 Bool rr, Bool ww, Bool xx ) {
sewardjfd35d492011-03-17 19:39:55 +00001433 // This handles mprotect requests. If the memory is being put
1434 // into no-R no-W state, paint it as NoAccess, for the reasons
1435 // documented at evh__die_mem_munmap().
sewardjb4112022007-11-09 22:49:28 +00001436 if (SHOW_EVENTS >= 1)
sewardjfd35d492011-03-17 19:39:55 +00001437 VG_(printf)("evh__set_perms(%p, %lu, r=%d w=%d x=%d)\n",
sewardjb4112022007-11-09 22:49:28 +00001438 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1439 /* Hmm. What should we do here, that actually makes any sense?
1440 Let's say: if neither readable nor writable, then declare it
1441 NoAccess, else leave it alone. */
1442 if (!(rr || ww))
sewardjfd35d492011-03-17 19:39:55 +00001443 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001444 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001445 all__sanity_check("evh__set_perms-post");
1446}
1447
1448static
1449void evh__die_mem ( Addr a, SizeT len ) {
sewardjfd35d492011-03-17 19:39:55 +00001450 // Urr, libhb ignores this.
sewardjb4112022007-11-09 22:49:28 +00001451 if (SHOW_EVENTS >= 2)
1452 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
sewardjfd35d492011-03-17 19:39:55 +00001453 shadow_mem_make_NoAccess_NoFX( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001454 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001455 all__sanity_check("evh__die_mem-post");
1456}
1457
1458static
sewardjfd35d492011-03-17 19:39:55 +00001459void evh__die_mem_munmap ( Addr a, SizeT len ) {
1460 // It's important that libhb doesn't ignore this. If, as is likely,
1461 // the client is subject to address space layout randomization,
1462 // then unmapped areas may never get remapped over, even in long
1463 // runs. If we just ignore them we wind up with large resource
1464 // (VTS) leaks in libhb. So force them to NoAccess, so that all
1465 // VTS references in the affected area are dropped. Marking memory
1466 // as NoAccess is expensive, but we assume that munmap is sufficiently
1467 // rare that the space gains of doing this are worth the costs.
1468 if (SHOW_EVENTS >= 2)
1469 VG_(printf)("evh__die_mem_munmap(%p, %lu)\n", (void*)a, len );
1470 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
1471}
1472
1473static
sewardj406bac82010-03-03 23:03:40 +00001474void evh__untrack_mem ( Addr a, SizeT len ) {
sewardjfd35d492011-03-17 19:39:55 +00001475 // Libhb doesn't ignore this.
sewardj406bac82010-03-03 23:03:40 +00001476 if (SHOW_EVENTS >= 2)
1477 VG_(printf)("evh__untrack_mem(%p, %lu)\n", (void*)a, len );
1478 shadow_mem_make_Untracked( get_current_Thread(), a, len );
1479 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1480 all__sanity_check("evh__untrack_mem-post");
1481}
1482
1483static
sewardj23f12002009-07-24 08:45:08 +00001484void evh__copy_mem ( Addr src, Addr dst, SizeT len ) {
1485 if (SHOW_EVENTS >= 2)
1486 VG_(printf)("evh__copy_mem(%p, %p, %lu)\n", (void*)src, (void*)dst, len );
1487 shadow_mem_scopy_range( get_current_Thread(), src, dst, len );
1488 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1489 all__sanity_check("evh__copy_mem-post");
1490}
1491
1492static
sewardjb4112022007-11-09 22:49:28 +00001493void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1494{
1495 if (SHOW_EVENTS >= 1)
1496 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1497 (Int)parent, (Int)child );
1498
1499 if (parent != VG_INVALID_THREADID) {
sewardjf98e1c02008-10-25 16:22:41 +00001500 Thread* thr_p;
1501 Thread* thr_c;
1502 Thr* hbthr_p;
1503 Thr* hbthr_c;
sewardjb4112022007-11-09 22:49:28 +00001504
sewardjf98e1c02008-10-25 16:22:41 +00001505 tl_assert(HG_(is_sane_ThreadId)(parent));
1506 tl_assert(HG_(is_sane_ThreadId)(child));
sewardjb4112022007-11-09 22:49:28 +00001507 tl_assert(parent != child);
1508
1509 thr_p = map_threads_maybe_lookup( parent );
1510 thr_c = map_threads_maybe_lookup( child );
1511
1512 tl_assert(thr_p != NULL);
1513 tl_assert(thr_c == NULL);
1514
sewardjf98e1c02008-10-25 16:22:41 +00001515 hbthr_p = thr_p->hbthr;
1516 tl_assert(hbthr_p != NULL);
sewardj60626642011-03-10 15:14:37 +00001517 tl_assert( libhb_get_Thr_hgthread(hbthr_p) == thr_p );
sewardjb4112022007-11-09 22:49:28 +00001518
sewardjf98e1c02008-10-25 16:22:41 +00001519 hbthr_c = libhb_create ( hbthr_p );
1520
1521 /* Create a new thread record for the child. */
sewardjb4112022007-11-09 22:49:28 +00001522 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +00001523 thr_c = mk_Thread( hbthr_c );
sewardj60626642011-03-10 15:14:37 +00001524 tl_assert( libhb_get_Thr_hgthread(hbthr_c) == NULL );
1525 libhb_set_Thr_hgthread(hbthr_c, thr_c);
sewardjb4112022007-11-09 22:49:28 +00001526
1527 /* and bind it in the thread-map table */
1528 map_threads[child] = thr_c;
sewardjf98e1c02008-10-25 16:22:41 +00001529 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1530 thr_c->coretid = child;
sewardjb4112022007-11-09 22:49:28 +00001531
1532 /* Record where the parent is so we can later refer to this in
1533 error messages.
1534
1535 On amd64-linux, this entails a nasty glibc-2.5 specific hack.
1536 The stack snapshot is taken immediately after the parent has
1537 returned from its sys_clone call. Unfortunately there is no
1538 unwind info for the insn following "syscall" - reading the
1539 glibc sources confirms this. So we ask for a snapshot to be
1540 taken as if RIP was 3 bytes earlier, in a place where there
1541 is unwind info. Sigh.
1542 */
1543 { Word first_ip_delta = 0;
1544# if defined(VGP_amd64_linux)
1545 first_ip_delta = -3;
1546# endif
1547 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1548 }
sewardjb4112022007-11-09 22:49:28 +00001549 }
1550
sewardjf98e1c02008-10-25 16:22:41 +00001551 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001552 all__sanity_check("evh__pre_thread_create-post");
1553}
1554
1555static
1556void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1557{
1558 Int nHeld;
1559 Thread* thr_q;
1560 if (SHOW_EVENTS >= 1)
1561 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1562 (Int)quit_tid );
1563
1564 /* quit_tid has disappeared without joining to any other thread.
1565 Therefore there is no synchronisation event associated with its
1566 exit and so we have to pretty much treat it as if it was still
1567 alive but mysteriously making no progress. That is because, if
1568 we don't know when it really exited, then we can never say there
1569 is a point in time when we're sure the thread really has
1570 finished, and so we need to consider the possibility that it
1571 lingers indefinitely and continues to interact with other
1572 threads. */
1573 /* However, it might have rendezvous'd with a thread that called
1574 pthread_join with this one as arg, prior to this point (that's
1575 how NPTL works). In which case there has already been a prior
1576 sync event. So in any case, just let the thread exit. On NPTL,
1577 all thread exits go through here. */
sewardjf98e1c02008-10-25 16:22:41 +00001578 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
sewardjb4112022007-11-09 22:49:28 +00001579 thr_q = map_threads_maybe_lookup( quit_tid );
1580 tl_assert(thr_q != NULL);
1581
1582 /* Complain if this thread holds any locks. */
1583 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1584 tl_assert(nHeld >= 0);
1585 if (nHeld > 0) {
1586 HChar buf[80];
1587 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1588 nHeld, nHeld > 1 ? "s" : "");
sewardjf98e1c02008-10-25 16:22:41 +00001589 HG_(record_error_Misc)( thr_q, buf );
sewardjb4112022007-11-09 22:49:28 +00001590 }
1591
sewardj23f12002009-07-24 08:45:08 +00001592 /* Not much to do here:
1593 - tell libhb the thread is gone
1594 - clear the map_threads entry, in order that the Valgrind core
1595 can re-use it. */
sewardj61bc2c52011-02-09 10:34:00 +00001596 /* Cleanup actions (next 5 lines) copied in evh__atfork_child; keep
1597 in sync. */
sewardj23f12002009-07-24 08:45:08 +00001598 tl_assert(thr_q->hbthr);
1599 libhb_async_exit(thr_q->hbthr);
sewardjf98e1c02008-10-25 16:22:41 +00001600 tl_assert(thr_q->coretid == quit_tid);
1601 thr_q->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +00001602 map_threads_delete( quit_tid );
1603
sewardjf98e1c02008-10-25 16:22:41 +00001604 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001605 all__sanity_check("evh__pre_thread_ll_exit-post");
1606}
1607
sewardj61bc2c52011-02-09 10:34:00 +00001608/* This is called immediately after fork, for the child only. 'tid'
1609 is the only surviving thread (as per POSIX rules on fork() in
1610 threaded programs), so we have to clean up map_threads to remove
1611 entries for any other threads. */
1612static
1613void evh__atfork_child ( ThreadId tid )
1614{
1615 UInt i;
1616 Thread* thr;
1617 /* Slot 0 should never be used. */
1618 thr = map_threads_maybe_lookup( 0/*INVALID*/ );
1619 tl_assert(!thr);
1620 /* Clean up all other slots except 'tid'. */
1621 for (i = 1; i < VG_N_THREADS; i++) {
1622 if (i == tid)
1623 continue;
1624 thr = map_threads_maybe_lookup(i);
1625 if (!thr)
1626 continue;
1627 /* Cleanup actions (next 5 lines) copied from end of
1628 evh__pre_thread_ll_exit; keep in sync. */
1629 tl_assert(thr->hbthr);
1630 libhb_async_exit(thr->hbthr);
1631 tl_assert(thr->coretid == i);
1632 thr->coretid = VG_INVALID_THREADID;
1633 map_threads_delete(i);
1634 }
1635}
1636
sewardjf98e1c02008-10-25 16:22:41 +00001637
sewardjb4112022007-11-09 22:49:28 +00001638static
1639void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1640{
sewardjb4112022007-11-09 22:49:28 +00001641 Thread* thr_s;
1642 Thread* thr_q;
sewardjf98e1c02008-10-25 16:22:41 +00001643 Thr* hbthr_s;
1644 Thr* hbthr_q;
1645 SO* so;
sewardjb4112022007-11-09 22:49:28 +00001646
1647 if (SHOW_EVENTS >= 1)
1648 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1649 (Int)stay_tid, quit_thr );
1650
sewardjf98e1c02008-10-25 16:22:41 +00001651 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
sewardjb4112022007-11-09 22:49:28 +00001652
1653 thr_s = map_threads_maybe_lookup( stay_tid );
1654 thr_q = quit_thr;
1655 tl_assert(thr_s != NULL);
1656 tl_assert(thr_q != NULL);
1657 tl_assert(thr_s != thr_q);
1658
sewardjf98e1c02008-10-25 16:22:41 +00001659 hbthr_s = thr_s->hbthr;
1660 hbthr_q = thr_q->hbthr;
1661 tl_assert(hbthr_s != hbthr_q);
sewardj60626642011-03-10 15:14:37 +00001662 tl_assert( libhb_get_Thr_hgthread(hbthr_s) == thr_s );
1663 tl_assert( libhb_get_Thr_hgthread(hbthr_q) == thr_q );
sewardjb4112022007-11-09 22:49:28 +00001664
sewardjf98e1c02008-10-25 16:22:41 +00001665 /* Allocate a temporary synchronisation object and use it to send
1666 an imaginary message from the quitter to the stayer, the purpose
1667 being to generate a dependence from the quitter to the
1668 stayer. */
1669 so = libhb_so_alloc();
1670 tl_assert(so);
sewardj23f12002009-07-24 08:45:08 +00001671 /* Send last arg of _so_send as False, since the sending thread
1672 doesn't actually exist any more, so we don't want _so_send to
1673 try taking stack snapshots of it. */
sewardjffce8152011-06-24 10:09:41 +00001674 libhb_so_send(hbthr_q, so, True/*strong_send*//*?!? wrt comment above*/);
sewardjf98e1c02008-10-25 16:22:41 +00001675 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1676 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00001677
sewardjffce8152011-06-24 10:09:41 +00001678 /* Tell libhb that the quitter has been reaped. Note that we might
1679 have to be cleverer about this, to exclude 2nd and subsequent
1680 notifications for the same hbthr_q, in the case where the app is
1681 buggy (calls pthread_join twice or more on the same thread) AND
1682 where libpthread is also buggy and doesn't return ESRCH on
1683 subsequent calls. (If libpthread isn't thusly buggy, then the
1684 wrapper for pthread_join in hg_intercepts.c will stop us getting
1685 notified here multiple times for the same joinee.) See also
1686 comments in helgrind/tests/jointwice.c. */
1687 libhb_joinedwith_done(hbthr_q);
1688
sewardjf98e1c02008-10-25 16:22:41 +00001689 /* evh__pre_thread_ll_exit issues an error message if the exiting
1690 thread holds any locks. No need to check here. */
sewardjb4112022007-11-09 22:49:28 +00001691
1692 /* This holds because, at least when using NPTL as the thread
1693 library, we should be notified the low level thread exit before
1694 we hear of any join event on it. The low level exit
1695 notification feeds through into evh__pre_thread_ll_exit,
1696 which should clear the map_threads entry for it. Hence we
1697 expect there to be no map_threads entry at this point. */
1698 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1699 == VG_INVALID_THREADID);
1700
sewardjf98e1c02008-10-25 16:22:41 +00001701 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001702 all__sanity_check("evh__post_thread_join-post");
1703}
1704
1705static
1706void evh__pre_mem_read ( CorePart part, ThreadId tid, Char* s,
1707 Addr a, SizeT size) {
1708 if (SHOW_EVENTS >= 2
1709 || (SHOW_EVENTS >= 1 && size != 1))
1710 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1711 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001712 shadow_mem_cread_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001713 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001714 all__sanity_check("evh__pre_mem_read-post");
1715}
1716
1717static
1718void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
1719 Char* s, Addr a ) {
1720 Int len;
1721 if (SHOW_EVENTS >= 1)
1722 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1723 (Int)tid, s, (void*)a );
sewardj234e5582011-02-09 12:47:23 +00001724 // Don't segfault if the string starts in an obviously stupid
1725 // place. Actually we should check the whole string, not just
1726 // the start address, but that's too much trouble. At least
1727 // checking the first byte is better than nothing. See #255009.
1728 if (!VG_(am_is_valid_for_client) (a, 1, VKI_PROT_READ))
1729 return;
sewardjb4112022007-11-09 22:49:28 +00001730 len = VG_(strlen)( (Char*) a );
sewardj23f12002009-07-24 08:45:08 +00001731 shadow_mem_cread_range( map_threads_lookup(tid), a, len+1 );
sewardjf98e1c02008-10-25 16:22:41 +00001732 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001733 all__sanity_check("evh__pre_mem_read_asciiz-post");
1734}
1735
1736static
1737void evh__pre_mem_write ( CorePart part, ThreadId tid, Char* s,
1738 Addr a, SizeT size ) {
1739 if (SHOW_EVENTS >= 1)
1740 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1741 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001742 shadow_mem_cwrite_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001743 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001744 all__sanity_check("evh__pre_mem_write-post");
1745}
1746
1747static
1748void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1749 if (SHOW_EVENTS >= 1)
1750 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1751 (void*)a, len, (Int)is_inited );
1752 // FIXME: this is kinda stupid
1753 if (is_inited) {
1754 shadow_mem_make_New(get_current_Thread(), a, len);
1755 } else {
1756 shadow_mem_make_New(get_current_Thread(), a, len);
1757 }
sewardjf98e1c02008-10-25 16:22:41 +00001758 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001759 all__sanity_check("evh__pre_mem_read-post");
1760}
1761
1762static
1763void evh__die_mem_heap ( Addr a, SizeT len ) {
sewardj622fe492011-03-11 21:06:59 +00001764 Thread* thr;
sewardjb4112022007-11-09 22:49:28 +00001765 if (SHOW_EVENTS >= 1)
1766 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
sewardj622fe492011-03-11 21:06:59 +00001767 thr = get_current_Thread();
1768 tl_assert(thr);
1769 if (HG_(clo_free_is_write)) {
1770 /* Treat frees as if the memory was written immediately prior to
1771 the free. This shakes out more races, specifically, cases
1772 where memory is referenced by one thread, and freed by
1773 another, and there's no observable synchronisation event to
1774 guarantee that the reference happens before the free. */
1775 shadow_mem_cwrite_range(thr, a, len);
1776 }
sewardjfd35d492011-03-17 19:39:55 +00001777 shadow_mem_make_NoAccess_NoFX( thr, a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001778 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001779 all__sanity_check("evh__pre_mem_read-post");
1780}
1781
sewardj23f12002009-07-24 08:45:08 +00001782/* --- Event handlers called from generated code --- */
1783
sewardjb4112022007-11-09 22:49:28 +00001784static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001785void evh__mem_help_cread_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001786 Thread* thr = get_current_Thread_in_C_C();
1787 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001788 LIBHB_CREAD_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001789}
sewardjf98e1c02008-10-25 16:22:41 +00001790
sewardjb4112022007-11-09 22:49:28 +00001791static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001792void evh__mem_help_cread_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001793 Thread* thr = get_current_Thread_in_C_C();
1794 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001795 LIBHB_CREAD_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001796}
sewardjf98e1c02008-10-25 16:22:41 +00001797
sewardjb4112022007-11-09 22:49:28 +00001798static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001799void evh__mem_help_cread_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001800 Thread* thr = get_current_Thread_in_C_C();
1801 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001802 LIBHB_CREAD_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001803}
sewardjf98e1c02008-10-25 16:22:41 +00001804
sewardjb4112022007-11-09 22:49:28 +00001805static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001806void evh__mem_help_cread_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001807 Thread* thr = get_current_Thread_in_C_C();
1808 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001809 LIBHB_CREAD_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001810}
sewardjf98e1c02008-10-25 16:22:41 +00001811
sewardjb4112022007-11-09 22:49:28 +00001812static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001813void evh__mem_help_cread_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001814 Thread* thr = get_current_Thread_in_C_C();
1815 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001816 LIBHB_CREAD_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001817}
1818
1819static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001820void evh__mem_help_cwrite_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001821 Thread* thr = get_current_Thread_in_C_C();
1822 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001823 LIBHB_CWRITE_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001824}
sewardjf98e1c02008-10-25 16:22:41 +00001825
sewardjb4112022007-11-09 22:49:28 +00001826static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001827void evh__mem_help_cwrite_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001828 Thread* thr = get_current_Thread_in_C_C();
1829 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001830 LIBHB_CWRITE_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001831}
sewardjf98e1c02008-10-25 16:22:41 +00001832
sewardjb4112022007-11-09 22:49:28 +00001833static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001834void evh__mem_help_cwrite_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001835 Thread* thr = get_current_Thread_in_C_C();
1836 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001837 LIBHB_CWRITE_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001838}
sewardjf98e1c02008-10-25 16:22:41 +00001839
sewardjb4112022007-11-09 22:49:28 +00001840static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001841void evh__mem_help_cwrite_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001842 Thread* thr = get_current_Thread_in_C_C();
1843 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001844 LIBHB_CWRITE_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001845}
sewardjf98e1c02008-10-25 16:22:41 +00001846
sewardjb4112022007-11-09 22:49:28 +00001847static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001848void evh__mem_help_cwrite_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001849 Thread* thr = get_current_Thread_in_C_C();
1850 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001851 LIBHB_CWRITE_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001852}
1853
sewardjb4112022007-11-09 22:49:28 +00001854
sewardj9f569b72008-11-13 13:33:09 +00001855/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001856/* -------------- events to do with mutexes -------------- */
sewardj9f569b72008-11-13 13:33:09 +00001857/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001858
1859/* EXPOSITION only: by intercepting lock init events we can show the
1860 user where the lock was initialised, rather than only being able to
1861 show where it was first locked. Intercepting lock initialisations
1862 is not necessary for the basic operation of the race checker. */
1863static
1864void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
1865 void* mutex, Word mbRec )
1866{
1867 if (SHOW_EVENTS >= 1)
1868 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
1869 (Int)tid, mbRec, (void*)mutex );
1870 tl_assert(mbRec == 0 || mbRec == 1);
1871 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
1872 (Addr)mutex, tid );
sewardjf98e1c02008-10-25 16:22:41 +00001873 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001874 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
1875}
1876
1877static
1878void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex )
1879{
1880 Thread* thr;
1881 Lock* lk;
1882 if (SHOW_EVENTS >= 1)
1883 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE(ctid=%d, %p)\n",
1884 (Int)tid, (void*)mutex );
1885
1886 thr = map_threads_maybe_lookup( tid );
1887 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00001888 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00001889
1890 lk = map_locks_maybe_lookup( (Addr)mutex );
1891
1892 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
sewardjf98e1c02008-10-25 16:22:41 +00001893 HG_(record_error_Misc)(
1894 thr, "pthread_mutex_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00001895 }
1896
1897 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00001898 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00001899 tl_assert( lk->guestaddr == (Addr)mutex );
1900 if (lk->heldBy) {
1901 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00001902 HG_(record_error_Misc)(
1903 thr, "pthread_mutex_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00001904 /* remove lock from locksets of all owning threads */
1905 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00001906 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00001907 lk->heldBy = NULL;
1908 lk->heldW = False;
1909 lk->acquired_at = NULL;
1910 }
1911 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001912 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00001913
1914 if (HG_(clo_track_lockorders))
1915 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00001916 map_locks_delete( lk->guestaddr );
1917 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00001918 }
1919
sewardjf98e1c02008-10-25 16:22:41 +00001920 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001921 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
1922}
1923
1924static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
1925 void* mutex, Word isTryLock )
1926{
1927 /* Just check the mutex is sane; nothing else to do. */
1928 // 'mutex' may be invalid - not checked by wrapper
1929 Thread* thr;
1930 Lock* lk;
1931 if (SHOW_EVENTS >= 1)
1932 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
1933 (Int)tid, (void*)mutex );
1934
1935 tl_assert(isTryLock == 0 || isTryLock == 1);
1936 thr = map_threads_maybe_lookup( tid );
1937 tl_assert(thr); /* cannot fail - Thread* must already exist */
1938
1939 lk = map_locks_maybe_lookup( (Addr)mutex );
1940
1941 if (lk && (lk->kind == LK_rdwr)) {
sewardjf98e1c02008-10-25 16:22:41 +00001942 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
1943 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001944 }
1945
1946 if ( lk
1947 && isTryLock == 0
1948 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
1949 && lk->heldBy
1950 && lk->heldW
sewardj896f6f92008-08-19 08:38:52 +00001951 && VG_(elemBag)( lk->heldBy, (Word)thr ) > 0 ) {
sewardjb4112022007-11-09 22:49:28 +00001952 /* uh, it's a non-recursive lock and we already w-hold it, and
1953 this is a real lock operation (not a speculative "tryLock"
1954 kind of thing). Duh. Deadlock coming up; but at least
1955 produce an error message. */
sewardj8fef6252010-07-29 05:28:02 +00001956 HChar* errstr = "Attempt to re-lock a "
1957 "non-recursive lock I already hold";
1958 HChar* auxstr = "Lock was previously acquired";
1959 if (lk->acquired_at) {
1960 HG_(record_error_Misc_w_aux)( thr, errstr, auxstr, lk->acquired_at );
1961 } else {
1962 HG_(record_error_Misc)( thr, errstr );
1963 }
sewardjb4112022007-11-09 22:49:28 +00001964 }
1965}
1966
1967static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
1968{
1969 // only called if the real library call succeeded - so mutex is sane
1970 Thread* thr;
1971 if (SHOW_EVENTS >= 1)
1972 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
1973 (Int)tid, (void*)mutex );
1974
1975 thr = map_threads_maybe_lookup( tid );
1976 tl_assert(thr); /* cannot fail - Thread* must already exist */
1977
1978 evhH__post_thread_w_acquires_lock(
1979 thr,
1980 LK_mbRec, /* if not known, create new lock with this LockKind */
1981 (Addr)mutex
1982 );
1983}
1984
1985static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
1986{
1987 // 'mutex' may be invalid - not checked by wrapper
1988 Thread* thr;
1989 if (SHOW_EVENTS >= 1)
1990 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
1991 (Int)tid, (void*)mutex );
1992
1993 thr = map_threads_maybe_lookup( tid );
1994 tl_assert(thr); /* cannot fail - Thread* must already exist */
1995
1996 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
1997}
1998
1999static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
2000{
2001 // only called if the real library call succeeded - so mutex is sane
2002 Thread* thr;
2003 if (SHOW_EVENTS >= 1)
2004 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
2005 (Int)tid, (void*)mutex );
2006 thr = map_threads_maybe_lookup( tid );
2007 tl_assert(thr); /* cannot fail - Thread* must already exist */
2008
2009 // anything we should do here?
2010}
2011
2012
sewardj5a644da2009-08-11 10:35:58 +00002013/* ------------------------------------------------------- */
sewardj1f77fec2010-04-12 19:51:04 +00002014/* -------------- events to do with spinlocks ------------ */
sewardj5a644da2009-08-11 10:35:58 +00002015/* ------------------------------------------------------- */
2016
2017/* All a bit of a kludge. Pretend we're really dealing with ordinary
2018 pthread_mutex_t's instead, for the most part. */
2019
2020static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid,
2021 void* slock )
2022{
2023 Thread* thr;
2024 Lock* lk;
2025 /* In glibc's kludgey world, we're either initialising or unlocking
2026 it. Since this is the pre-routine, if it is locked, unlock it
2027 and take a dependence edge. Otherwise, do nothing. */
2028
2029 if (SHOW_EVENTS >= 1)
2030 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
2031 "(ctid=%d, slock=%p)\n",
2032 (Int)tid, (void*)slock );
2033
2034 thr = map_threads_maybe_lookup( tid );
2035 /* cannot fail - Thread* must already exist */;
2036 tl_assert( HG_(is_sane_Thread)(thr) );
2037
2038 lk = map_locks_maybe_lookup( (Addr)slock );
2039 if (lk && lk->heldBy) {
2040 /* it's held. So do the normal pre-unlock actions, as copied
2041 from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE. This stupidly
2042 duplicates the map_locks_maybe_lookup. */
2043 evhH__pre_thread_releases_lock( thr, (Addr)slock,
2044 False/*!isRDWR*/ );
2045 }
2046}
2047
2048static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid,
2049 void* slock )
2050{
2051 Lock* lk;
2052 /* More kludgery. If the lock has never been seen before, do
2053 actions as per evh__HG_PTHREAD_MUTEX_INIT_POST. Else do
2054 nothing. */
2055
2056 if (SHOW_EVENTS >= 1)
2057 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
2058 "(ctid=%d, slock=%p)\n",
2059 (Int)tid, (void*)slock );
2060
2061 lk = map_locks_maybe_lookup( (Addr)slock );
2062 if (!lk) {
2063 map_locks_lookup_or_create( LK_nonRec, (Addr)slock, tid );
2064 }
2065}
2066
2067static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid,
2068 void* slock, Word isTryLock )
2069{
2070 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, slock, isTryLock );
2071}
2072
2073static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid,
2074 void* slock )
2075{
2076 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, slock );
2077}
2078
2079static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid,
2080 void* slock )
2081{
2082 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, slock );
2083}
2084
2085
sewardj9f569b72008-11-13 13:33:09 +00002086/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002087/* --------------- events to do with CVs --------------- */
sewardj9f569b72008-11-13 13:33:09 +00002088/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002089
sewardj02114542009-07-28 20:52:36 +00002090/* A mapping from CV to (the SO associated with it, plus some
2091 auxiliary data for error checking). When the CV is
sewardjf98e1c02008-10-25 16:22:41 +00002092 signalled/broadcasted upon, we do a 'send' into the SO, and when a
2093 wait on it completes, we do a 'recv' from the SO. This is believed
2094 to give the correct happens-before events arising from CV
sewardjb4112022007-11-09 22:49:28 +00002095 signallings/broadcasts.
2096*/
2097
sewardj02114542009-07-28 20:52:36 +00002098/* .so is the SO for this CV.
2099 .mx_ga is the associated mutex, when .nWaiters > 0
sewardjb4112022007-11-09 22:49:28 +00002100
sewardj02114542009-07-28 20:52:36 +00002101 POSIX says effectively that the first pthread_cond_{timed}wait call
2102 causes a dynamic binding between the CV and the mutex, and that
2103 lasts until such time as the waiter count falls to zero. Hence
2104 need to keep track of the number of waiters in order to do
2105 consistency tracking. */
2106typedef
2107 struct {
2108 SO* so; /* libhb-allocated SO */
2109 void* mx_ga; /* addr of associated mutex, if any */
2110 UWord nWaiters; /* # threads waiting on the CV */
2111 }
2112 CVInfo;
2113
2114
2115/* pthread_cond_t* -> CVInfo* */
2116static WordFM* map_cond_to_CVInfo = NULL;
2117
2118static void map_cond_to_CVInfo_INIT ( void ) {
2119 if (UNLIKELY(map_cond_to_CVInfo == NULL)) {
2120 map_cond_to_CVInfo = VG_(newFM)( HG_(zalloc),
2121 "hg.mctCI.1", HG_(free), NULL );
2122 tl_assert(map_cond_to_CVInfo != NULL);
sewardjf98e1c02008-10-25 16:22:41 +00002123 }
2124}
2125
sewardj02114542009-07-28 20:52:36 +00002126static CVInfo* map_cond_to_CVInfo_lookup_or_alloc ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002127 UWord key, val;
sewardj02114542009-07-28 20:52:36 +00002128 map_cond_to_CVInfo_INIT();
2129 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
sewardjf98e1c02008-10-25 16:22:41 +00002130 tl_assert(key == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002131 return (CVInfo*)val;
sewardjf98e1c02008-10-25 16:22:41 +00002132 } else {
sewardj02114542009-07-28 20:52:36 +00002133 SO* so = libhb_so_alloc();
2134 CVInfo* cvi = HG_(zalloc)("hg.mctCloa.1", sizeof(CVInfo));
2135 cvi->so = so;
2136 cvi->mx_ga = 0;
2137 VG_(addToFM)( map_cond_to_CVInfo, (UWord)cond, (UWord)cvi );
2138 return cvi;
sewardjf98e1c02008-10-25 16:22:41 +00002139 }
2140}
2141
philippe8bfc2152012-07-06 23:38:24 +00002142static CVInfo* map_cond_to_CVInfo_lookup_NO_alloc ( void* cond ) {
2143 UWord key, val;
2144 map_cond_to_CVInfo_INIT();
2145 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
2146 tl_assert(key == (UWord)cond);
2147 return (CVInfo*)val;
2148 } else {
2149 return NULL;
2150 }
2151}
2152
2153static void map_cond_to_CVInfo_delete ( ThreadId tid, void* cond ) {
2154 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +00002155 UWord keyW, valW;
philippe8bfc2152012-07-06 23:38:24 +00002156
2157 thr = map_threads_maybe_lookup( tid );
2158 tl_assert(thr); /* cannot fail - Thread* must already exist */
2159
sewardj02114542009-07-28 20:52:36 +00002160 map_cond_to_CVInfo_INIT();
2161 if (VG_(delFromFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond )) {
2162 CVInfo* cvi = (CVInfo*)valW;
sewardjf98e1c02008-10-25 16:22:41 +00002163 tl_assert(keyW == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002164 tl_assert(cvi);
2165 tl_assert(cvi->so);
philippe8bfc2152012-07-06 23:38:24 +00002166 if (cvi->nWaiters > 0) {
2167 HG_(record_error_Misc)(thr,
2168 "pthread_cond_destroy:"
2169 " destruction of condition variable being waited upon");
2170 }
sewardj02114542009-07-28 20:52:36 +00002171 libhb_so_dealloc(cvi->so);
2172 cvi->mx_ga = 0;
2173 HG_(free)(cvi);
philippe8bfc2152012-07-06 23:38:24 +00002174 } else {
2175 HG_(record_error_Misc)(thr,
2176 "pthread_cond_destroy: destruction of unknown cond var");
sewardjb4112022007-11-09 22:49:28 +00002177 }
2178}
2179
2180static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2181{
sewardjf98e1c02008-10-25 16:22:41 +00002182 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2183 cond to a SO if it is not already so bound, and 'send' on the
2184 SO. This is later used by other thread(s) which successfully
2185 exit from a pthread_cond_wait on the same cv; then they 'recv'
2186 from the SO, thereby acquiring a dependency on this signalling
2187 event. */
sewardjb4112022007-11-09 22:49:28 +00002188 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002189 CVInfo* cvi;
2190 //Lock* lk;
sewardjb4112022007-11-09 22:49:28 +00002191
2192 if (SHOW_EVENTS >= 1)
2193 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2194 (Int)tid, (void*)cond );
2195
sewardjb4112022007-11-09 22:49:28 +00002196 thr = map_threads_maybe_lookup( tid );
2197 tl_assert(thr); /* cannot fail - Thread* must already exist */
2198
sewardj02114542009-07-28 20:52:36 +00002199 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2200 tl_assert(cvi);
2201 tl_assert(cvi->so);
2202
sewardjb4112022007-11-09 22:49:28 +00002203 // error-if: mutex is bogus
2204 // error-if: mutex is not locked
sewardj02114542009-07-28 20:52:36 +00002205 // Hmm. POSIX doesn't actually say that it's an error to call
2206 // pthread_cond_signal with the associated mutex being unlocked.
2207 // Although it does say that it should be "if consistent scheduling
sewardjffce8152011-06-24 10:09:41 +00002208 // is desired." For that reason, print "dubious" if the lock isn't
2209 // held by any thread. Skip the "dubious" if it is held by some
2210 // other thread; that sounds straight-out wrong.
sewardj02114542009-07-28 20:52:36 +00002211 //
sewardjffce8152011-06-24 10:09:41 +00002212 // Anybody who writes code that signals on a CV without holding
2213 // the associated MX needs to be shipped off to a lunatic asylum
2214 // ASAP, even though POSIX doesn't actually declare such behaviour
2215 // illegal -- it makes code extremely difficult to understand/
2216 // reason about. In particular it puts the signalling thread in
2217 // a situation where it is racing against the released waiter
2218 // as soon as the signalling is done, and so there needs to be
2219 // some auxiliary synchronisation mechanism in the program that
2220 // makes this safe -- or the race(s) need to be harmless, or
2221 // probably nonexistent.
2222 //
2223 if (1) {
2224 Lock* lk = NULL;
2225 if (cvi->mx_ga != 0) {
2226 lk = map_locks_maybe_lookup( (Addr)cvi->mx_ga );
2227 }
2228 /* note: lk could be NULL. Be careful. */
2229 if (lk) {
2230 if (lk->kind == LK_rdwr) {
2231 HG_(record_error_Misc)(thr,
2232 "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2233 }
2234 if (lk->heldBy == NULL) {
2235 HG_(record_error_Misc)(thr,
2236 "pthread_cond_{signal,broadcast}: dubious: "
2237 "associated lock is not held by any thread");
2238 }
2239 if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (Word)thr)) {
2240 HG_(record_error_Misc)(thr,
2241 "pthread_cond_{signal,broadcast}: "
2242 "associated lock is not held by calling thread");
2243 }
2244 } else {
2245 /* Couldn't even find the damn thing. */
2246 // But actually .. that's not necessarily an error. We don't
2247 // know the (CV,MX) binding until a pthread_cond_wait or bcast
2248 // shows us what it is, and if that may not have happened yet.
2249 // So just keep quiet in this circumstance.
2250 //HG_(record_error_Misc)( thr,
2251 // "pthread_cond_{signal,broadcast}: "
2252 // "no or invalid mutex associated with cond");
2253 }
2254 }
sewardjb4112022007-11-09 22:49:28 +00002255
sewardj02114542009-07-28 20:52:36 +00002256 libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
sewardjb4112022007-11-09 22:49:28 +00002257}
2258
2259/* returns True if it reckons 'mutex' is valid and held by this
2260 thread, else False */
2261static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2262 void* cond, void* mutex )
2263{
2264 Thread* thr;
2265 Lock* lk;
2266 Bool lk_valid = True;
sewardj02114542009-07-28 20:52:36 +00002267 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002268
2269 if (SHOW_EVENTS >= 1)
2270 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2271 "(ctid=%d, cond=%p, mutex=%p)\n",
2272 (Int)tid, (void*)cond, (void*)mutex );
2273
sewardjb4112022007-11-09 22:49:28 +00002274 thr = map_threads_maybe_lookup( tid );
2275 tl_assert(thr); /* cannot fail - Thread* must already exist */
2276
2277 lk = map_locks_maybe_lookup( (Addr)mutex );
2278
2279 /* Check for stupid mutex arguments. There are various ways to be
2280 a bozo. Only complain once, though, even if more than one thing
2281 is wrong. */
2282 if (lk == NULL) {
2283 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002284 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002285 thr,
2286 "pthread_cond_{timed}wait called with invalid mutex" );
2287 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002288 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002289 if (lk->kind == LK_rdwr) {
2290 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002291 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002292 thr, "pthread_cond_{timed}wait called with mutex "
2293 "of type pthread_rwlock_t*" );
2294 } else
2295 if (lk->heldBy == NULL) {
2296 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002297 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002298 thr, "pthread_cond_{timed}wait called with un-held mutex");
2299 } else
2300 if (lk->heldBy != NULL
sewardj896f6f92008-08-19 08:38:52 +00002301 && VG_(elemBag)( lk->heldBy, (Word)thr ) == 0) {
sewardjb4112022007-11-09 22:49:28 +00002302 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002303 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002304 thr, "pthread_cond_{timed}wait called with mutex "
2305 "held by a different thread" );
2306 }
2307 }
2308
2309 // error-if: cond is also associated with a different mutex
sewardj02114542009-07-28 20:52:36 +00002310 cvi = map_cond_to_CVInfo_lookup_or_alloc(cond);
2311 tl_assert(cvi);
2312 tl_assert(cvi->so);
2313 if (cvi->nWaiters == 0) {
2314 /* form initial (CV,MX) binding */
2315 cvi->mx_ga = mutex;
2316 }
2317 else /* check existing (CV,MX) binding */
2318 if (cvi->mx_ga != mutex) {
2319 HG_(record_error_Misc)(
2320 thr, "pthread_cond_{timed}wait: cond is associated "
2321 "with a different mutex");
2322 }
2323 cvi->nWaiters++;
sewardjb4112022007-11-09 22:49:28 +00002324
2325 return lk_valid;
2326}
2327
2328static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
2329 void* cond, void* mutex )
2330{
sewardjf98e1c02008-10-25 16:22:41 +00002331 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2332 the SO for this cond, and 'recv' from it so as to acquire a
2333 dependency edge back to the signaller/broadcaster. */
2334 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002335 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002336
2337 if (SHOW_EVENTS >= 1)
2338 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
2339 "(ctid=%d, cond=%p, mutex=%p)\n",
2340 (Int)tid, (void*)cond, (void*)mutex );
2341
sewardjb4112022007-11-09 22:49:28 +00002342 thr = map_threads_maybe_lookup( tid );
2343 tl_assert(thr); /* cannot fail - Thread* must already exist */
2344
2345 // error-if: cond is also associated with a different mutex
2346
philippe8bfc2152012-07-06 23:38:24 +00002347 cvi = map_cond_to_CVInfo_lookup_NO_alloc( cond );
2348 if (!cvi) {
2349 /* This could be either a bug in helgrind or the guest application
2350 that did an error (e.g. cond var was destroyed by another thread.
2351 Let's assume helgrind is perfect ...
2352 Note that this is similar to drd behaviour. */
2353 HG_(record_error_Misc)(thr, "condition variable has been destroyed while"
2354 " being waited upon");
2355 return;
2356 }
2357
sewardj02114542009-07-28 20:52:36 +00002358 tl_assert(cvi);
2359 tl_assert(cvi->so);
2360 tl_assert(cvi->nWaiters > 0);
sewardjb4112022007-11-09 22:49:28 +00002361
sewardj02114542009-07-28 20:52:36 +00002362 if (!libhb_so_everSent(cvi->so)) {
sewardjf98e1c02008-10-25 16:22:41 +00002363 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2364 it? If this happened it would surely be a bug in the threads
2365 library. Or one of those fabled "spurious wakeups". */
2366 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
sewardjffce8152011-06-24 10:09:41 +00002367 "succeeded"
sewardjf98e1c02008-10-25 16:22:41 +00002368 " without prior pthread_cond_post");
sewardjb4112022007-11-09 22:49:28 +00002369 }
sewardjf98e1c02008-10-25 16:22:41 +00002370
2371 /* anyway, acquire a dependency on it. */
sewardj02114542009-07-28 20:52:36 +00002372 libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2373
2374 cvi->nWaiters--;
sewardjf98e1c02008-10-25 16:22:41 +00002375}
2376
2377static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
2378 void* cond )
2379{
2380 /* Deal with destroy events. The only purpose is to free storage
2381 associated with the CV, so as to avoid any possible resource
2382 leaks. */
2383 if (SHOW_EVENTS >= 1)
2384 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
2385 "(ctid=%d, cond=%p)\n",
2386 (Int)tid, (void*)cond );
2387
philippe8bfc2152012-07-06 23:38:24 +00002388 map_cond_to_CVInfo_delete( tid, cond );
sewardjb4112022007-11-09 22:49:28 +00002389}
2390
2391
sewardj9f569b72008-11-13 13:33:09 +00002392/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002393/* -------------- events to do with rwlocks -------------- */
sewardj9f569b72008-11-13 13:33:09 +00002394/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002395
2396/* EXPOSITION only */
2397static
2398void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2399{
2400 if (SHOW_EVENTS >= 1)
2401 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2402 (Int)tid, (void*)rwl );
2403 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
sewardjf98e1c02008-10-25 16:22:41 +00002404 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002405 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2406}
2407
2408static
2409void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2410{
2411 Thread* thr;
2412 Lock* lk;
2413 if (SHOW_EVENTS >= 1)
2414 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2415 (Int)tid, (void*)rwl );
2416
2417 thr = map_threads_maybe_lookup( tid );
2418 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00002419 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00002420
2421 lk = map_locks_maybe_lookup( (Addr)rwl );
2422
2423 if (lk == NULL || lk->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00002424 HG_(record_error_Misc)(
2425 thr, "pthread_rwlock_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00002426 }
2427
2428 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00002429 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002430 tl_assert( lk->guestaddr == (Addr)rwl );
2431 if (lk->heldBy) {
2432 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00002433 HG_(record_error_Misc)(
2434 thr, "pthread_rwlock_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00002435 /* remove lock from locksets of all owning threads */
2436 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00002437 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00002438 lk->heldBy = NULL;
2439 lk->heldW = False;
sewardj1c7e8332007-11-29 13:04:03 +00002440 lk->acquired_at = NULL;
sewardjb4112022007-11-09 22:49:28 +00002441 }
2442 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002443 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00002444
2445 if (HG_(clo_track_lockorders))
2446 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002447 map_locks_delete( lk->guestaddr );
2448 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002449 }
2450
sewardjf98e1c02008-10-25 16:22:41 +00002451 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002452 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2453}
2454
2455static
sewardj789c3c52008-02-25 12:10:07 +00002456void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2457 void* rwl,
2458 Word isW, Word isTryLock )
sewardjb4112022007-11-09 22:49:28 +00002459{
2460 /* Just check the rwl is sane; nothing else to do. */
2461 // 'rwl' may be invalid - not checked by wrapper
2462 Thread* thr;
2463 Lock* lk;
2464 if (SHOW_EVENTS >= 1)
2465 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2466 (Int)tid, (Int)isW, (void*)rwl );
2467
2468 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
sewardj789c3c52008-02-25 12:10:07 +00002469 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
sewardjb4112022007-11-09 22:49:28 +00002470 thr = map_threads_maybe_lookup( tid );
2471 tl_assert(thr); /* cannot fail - Thread* must already exist */
2472
2473 lk = map_locks_maybe_lookup( (Addr)rwl );
2474 if ( lk
2475 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2476 /* Wrong kind of lock. Duh. */
sewardjf98e1c02008-10-25 16:22:41 +00002477 HG_(record_error_Misc)(
2478 thr, "pthread_rwlock_{rd,rw}lock with a "
2479 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002480 }
2481}
2482
2483static
2484void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2485{
2486 // only called if the real library call succeeded - so mutex is sane
2487 Thread* thr;
2488 if (SHOW_EVENTS >= 1)
2489 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2490 (Int)tid, (Int)isW, (void*)rwl );
2491
2492 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2493 thr = map_threads_maybe_lookup( tid );
2494 tl_assert(thr); /* cannot fail - Thread* must already exist */
2495
2496 (isW ? evhH__post_thread_w_acquires_lock
2497 : evhH__post_thread_r_acquires_lock)(
2498 thr,
2499 LK_rdwr, /* if not known, create new lock with this LockKind */
2500 (Addr)rwl
2501 );
2502}
2503
2504static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2505{
2506 // 'rwl' may be invalid - not checked by wrapper
2507 Thread* thr;
2508 if (SHOW_EVENTS >= 1)
2509 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2510 (Int)tid, (void*)rwl );
2511
2512 thr = map_threads_maybe_lookup( tid );
2513 tl_assert(thr); /* cannot fail - Thread* must already exist */
2514
2515 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2516}
2517
2518static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2519{
2520 // only called if the real library call succeeded - so mutex is sane
2521 Thread* thr;
2522 if (SHOW_EVENTS >= 1)
2523 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2524 (Int)tid, (void*)rwl );
2525 thr = map_threads_maybe_lookup( tid );
2526 tl_assert(thr); /* cannot fail - Thread* must already exist */
2527
2528 // anything we should do here?
2529}
2530
2531
sewardj9f569b72008-11-13 13:33:09 +00002532/* ---------------------------------------------------------- */
2533/* -------------- events to do with semaphores -------------- */
2534/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002535
sewardj11e352f2007-11-30 11:11:02 +00002536/* This is similar to but not identical to the handling for condition
sewardjb4112022007-11-09 22:49:28 +00002537 variables. */
2538
sewardjf98e1c02008-10-25 16:22:41 +00002539/* For each semaphore, we maintain a stack of SOs. When a 'post'
2540 operation is done on a semaphore (unlocking, essentially), a new SO
2541 is created for the posting thread, the posting thread does a strong
2542 send to it (which merely installs the posting thread's VC in the
2543 SO), and the SO is pushed on the semaphore's stack.
sewardjb4112022007-11-09 22:49:28 +00002544
2545 Later, when a (probably different) thread completes 'wait' on the
sewardjf98e1c02008-10-25 16:22:41 +00002546 semaphore, we pop a SO off the semaphore's stack (which should be
2547 nonempty), and do a strong recv from it. This mechanism creates
sewardjb4112022007-11-09 22:49:28 +00002548 dependencies between posters and waiters of the semaphore.
2549
sewardjf98e1c02008-10-25 16:22:41 +00002550 It may not be necessary to use a stack - perhaps a bag of SOs would
2551 do. But we do need to keep track of how many unused-up posts have
2552 happened for the semaphore.
sewardjb4112022007-11-09 22:49:28 +00002553
sewardjf98e1c02008-10-25 16:22:41 +00002554 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
sewardjb4112022007-11-09 22:49:28 +00002555 twice on S. T3 cannot complete its waits without both T1 and T2
2556 posting. The above mechanism will ensure that T3 acquires
2557 dependencies on both T1 and T2.
sewardj11e352f2007-11-30 11:11:02 +00002558
sewardjf98e1c02008-10-25 16:22:41 +00002559 When a semaphore is initialised with value N, we do as if we'd
2560 posted N times on the semaphore: basically create N SOs and do a
2561 strong send to all of then. This allows up to N waits on the
2562 semaphore to acquire a dependency on the initialisation point,
2563 which AFAICS is the correct behaviour.
sewardj11e352f2007-11-30 11:11:02 +00002564
2565 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2566 about. We should.
sewardjb4112022007-11-09 22:49:28 +00002567*/
2568
sewardjf98e1c02008-10-25 16:22:41 +00002569/* sem_t* -> XArray* SO* */
2570static WordFM* map_sem_to_SO_stack = NULL;
sewardjb4112022007-11-09 22:49:28 +00002571
sewardjf98e1c02008-10-25 16:22:41 +00002572static void map_sem_to_SO_stack_INIT ( void ) {
2573 if (map_sem_to_SO_stack == NULL) {
2574 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2575 HG_(free), NULL );
2576 tl_assert(map_sem_to_SO_stack != NULL);
sewardjb4112022007-11-09 22:49:28 +00002577 }
2578}
2579
sewardjf98e1c02008-10-25 16:22:41 +00002580static void push_SO_for_sem ( void* sem, SO* so ) {
2581 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002582 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002583 tl_assert(so);
2584 map_sem_to_SO_stack_INIT();
2585 if (VG_(lookupFM)( map_sem_to_SO_stack,
2586 &keyW, (UWord*)&xa, (UWord)sem )) {
2587 tl_assert(keyW == (UWord)sem);
sewardjb4112022007-11-09 22:49:28 +00002588 tl_assert(xa);
sewardjf98e1c02008-10-25 16:22:41 +00002589 VG_(addToXA)( xa, &so );
sewardjb4112022007-11-09 22:49:28 +00002590 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002591 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2592 VG_(addToXA)( xa, &so );
2593 VG_(addToFM)( map_sem_to_SO_stack, (Word)sem, (Word)xa );
sewardjb4112022007-11-09 22:49:28 +00002594 }
2595}
2596
sewardjf98e1c02008-10-25 16:22:41 +00002597static SO* mb_pop_SO_for_sem ( void* sem ) {
2598 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002599 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002600 SO* so;
2601 map_sem_to_SO_stack_INIT();
2602 if (VG_(lookupFM)( map_sem_to_SO_stack,
2603 &keyW, (UWord*)&xa, (UWord)sem )) {
sewardjb4112022007-11-09 22:49:28 +00002604 /* xa is the stack for this semaphore. */
sewardjf98e1c02008-10-25 16:22:41 +00002605 Word sz;
2606 tl_assert(keyW == (UWord)sem);
2607 sz = VG_(sizeXA)( xa );
sewardjb4112022007-11-09 22:49:28 +00002608 tl_assert(sz >= 0);
2609 if (sz == 0)
2610 return NULL; /* odd, the stack is empty */
sewardjf98e1c02008-10-25 16:22:41 +00002611 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2612 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002613 VG_(dropTailXA)( xa, 1 );
sewardjf98e1c02008-10-25 16:22:41 +00002614 return so;
sewardjb4112022007-11-09 22:49:28 +00002615 } else {
2616 /* hmm, that's odd. No stack for this semaphore. */
2617 return NULL;
2618 }
2619}
2620
sewardj11e352f2007-11-30 11:11:02 +00002621static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002622{
sewardjf98e1c02008-10-25 16:22:41 +00002623 UWord keyW, valW;
2624 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002625
sewardjb4112022007-11-09 22:49:28 +00002626 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002627 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002628 (Int)tid, (void*)sem );
2629
sewardjf98e1c02008-10-25 16:22:41 +00002630 map_sem_to_SO_stack_INIT();
sewardjb4112022007-11-09 22:49:28 +00002631
sewardjf98e1c02008-10-25 16:22:41 +00002632 /* Empty out the semaphore's SO stack. This way of doing it is
2633 stupid, but at least it's easy. */
2634 while (1) {
2635 so = mb_pop_SO_for_sem( sem );
2636 if (!so) break;
2637 libhb_so_dealloc(so);
2638 }
2639
2640 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2641 XArray* xa = (XArray*)valW;
2642 tl_assert(keyW == (UWord)sem);
2643 tl_assert(xa);
2644 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2645 VG_(deleteXA)(xa);
2646 }
sewardjb4112022007-11-09 22:49:28 +00002647}
2648
sewardj11e352f2007-11-30 11:11:02 +00002649static
2650void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2651{
sewardjf98e1c02008-10-25 16:22:41 +00002652 SO* so;
2653 Thread* thr;
sewardj11e352f2007-11-30 11:11:02 +00002654
2655 if (SHOW_EVENTS >= 1)
2656 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2657 (Int)tid, (void*)sem, value );
2658
sewardjf98e1c02008-10-25 16:22:41 +00002659 thr = map_threads_maybe_lookup( tid );
2660 tl_assert(thr); /* cannot fail - Thread* must already exist */
sewardj11e352f2007-11-30 11:11:02 +00002661
sewardjf98e1c02008-10-25 16:22:41 +00002662 /* Empty out the semaphore's SO stack. This way of doing it is
2663 stupid, but at least it's easy. */
2664 while (1) {
2665 so = mb_pop_SO_for_sem( sem );
2666 if (!so) break;
2667 libhb_so_dealloc(so);
2668 }
sewardj11e352f2007-11-30 11:11:02 +00002669
sewardjf98e1c02008-10-25 16:22:41 +00002670 /* If we don't do this check, the following while loop runs us out
2671 of memory for stupid initial values of 'value'. */
2672 if (value > 10000) {
2673 HG_(record_error_Misc)(
2674 thr, "sem_init: initial value exceeds 10000; using 10000" );
2675 value = 10000;
2676 }
sewardj11e352f2007-11-30 11:11:02 +00002677
sewardjf98e1c02008-10-25 16:22:41 +00002678 /* Now create 'valid' new SOs for the thread, do a strong send to
2679 each of them, and push them all on the stack. */
2680 for (; value > 0; value--) {
2681 Thr* hbthr = thr->hbthr;
2682 tl_assert(hbthr);
sewardj11e352f2007-11-30 11:11:02 +00002683
sewardjf98e1c02008-10-25 16:22:41 +00002684 so = libhb_so_alloc();
2685 libhb_so_send( hbthr, so, True/*strong send*/ );
2686 push_SO_for_sem( sem, so );
sewardj11e352f2007-11-30 11:11:02 +00002687 }
2688}
2689
2690static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002691{
sewardjf98e1c02008-10-25 16:22:41 +00002692 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2693 it (iow, write our VC into it, then tick ours), and push the SO
2694 on on a stack of SOs associated with 'sem'. This is later used
2695 by other thread(s) which successfully exit from a sem_wait on
2696 the same sem; by doing a strong recv from SOs popped of the
2697 stack, they acquire dependencies on the posting thread
2698 segment(s). */
sewardjb4112022007-11-09 22:49:28 +00002699
sewardjf98e1c02008-10-25 16:22:41 +00002700 Thread* thr;
2701 SO* so;
2702 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002703
2704 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002705 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002706 (Int)tid, (void*)sem );
2707
2708 thr = map_threads_maybe_lookup( tid );
2709 tl_assert(thr); /* cannot fail - Thread* must already exist */
2710
2711 // error-if: sem is bogus
2712
sewardjf98e1c02008-10-25 16:22:41 +00002713 hbthr = thr->hbthr;
2714 tl_assert(hbthr);
sewardjb4112022007-11-09 22:49:28 +00002715
sewardjf98e1c02008-10-25 16:22:41 +00002716 so = libhb_so_alloc();
2717 libhb_so_send( hbthr, so, True/*strong send*/ );
2718 push_SO_for_sem( sem, so );
sewardjb4112022007-11-09 22:49:28 +00002719}
2720
sewardj11e352f2007-11-30 11:11:02 +00002721static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002722{
sewardjf98e1c02008-10-25 16:22:41 +00002723 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2724 the 'sem' from this semaphore's SO-stack, and do a strong recv
2725 from it. This creates a dependency back to one of the post-ers
2726 for the semaphore. */
sewardjb4112022007-11-09 22:49:28 +00002727
sewardjf98e1c02008-10-25 16:22:41 +00002728 Thread* thr;
2729 SO* so;
2730 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002731
2732 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002733 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002734 (Int)tid, (void*)sem );
2735
2736 thr = map_threads_maybe_lookup( tid );
2737 tl_assert(thr); /* cannot fail - Thread* must already exist */
2738
2739 // error-if: sem is bogus
2740
sewardjf98e1c02008-10-25 16:22:41 +00002741 so = mb_pop_SO_for_sem( sem );
sewardjb4112022007-11-09 22:49:28 +00002742
sewardjf98e1c02008-10-25 16:22:41 +00002743 if (so) {
2744 hbthr = thr->hbthr;
2745 tl_assert(hbthr);
2746
2747 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2748 libhb_so_dealloc(so);
2749 } else {
2750 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2751 If this happened it would surely be a bug in the threads
2752 library. */
2753 HG_(record_error_Misc)(
2754 thr, "Bug in libpthread: sem_wait succeeded on"
2755 " semaphore without prior sem_post");
sewardjb4112022007-11-09 22:49:28 +00002756 }
2757}
2758
2759
sewardj9f569b72008-11-13 13:33:09 +00002760/* -------------------------------------------------------- */
2761/* -------------- events to do with barriers -------------- */
2762/* -------------------------------------------------------- */
2763
2764typedef
2765 struct {
2766 Bool initted; /* has it yet been initted by guest? */
sewardj406bac82010-03-03 23:03:40 +00002767 Bool resizable; /* is resizing allowed? */
sewardj9f569b72008-11-13 13:33:09 +00002768 UWord size; /* declared size */
2769 XArray* waiting; /* XA of Thread*. # present is 0 .. .size */
2770 }
2771 Bar;
2772
2773static Bar* new_Bar ( void ) {
2774 Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
2775 tl_assert(bar);
2776 /* all fields are zero */
2777 tl_assert(bar->initted == False);
2778 return bar;
2779}
2780
2781static void delete_Bar ( Bar* bar ) {
2782 tl_assert(bar);
2783 if (bar->waiting)
2784 VG_(deleteXA)(bar->waiting);
2785 HG_(free)(bar);
2786}
2787
2788/* A mapping which stores auxiliary data for barriers. */
2789
2790/* pthread_barrier_t* -> Bar* */
2791static WordFM* map_barrier_to_Bar = NULL;
2792
2793static void map_barrier_to_Bar_INIT ( void ) {
2794 if (UNLIKELY(map_barrier_to_Bar == NULL)) {
2795 map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
2796 "hg.mbtBI.1", HG_(free), NULL );
2797 tl_assert(map_barrier_to_Bar != NULL);
2798 }
2799}
2800
2801static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
2802 UWord key, val;
2803 map_barrier_to_Bar_INIT();
2804 if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
2805 tl_assert(key == (UWord)barrier);
2806 return (Bar*)val;
2807 } else {
2808 Bar* bar = new_Bar();
2809 VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
2810 return bar;
2811 }
2812}
2813
2814static void map_barrier_to_Bar_delete ( void* barrier ) {
2815 UWord keyW, valW;
2816 map_barrier_to_Bar_INIT();
2817 if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
2818 Bar* bar = (Bar*)valW;
2819 tl_assert(keyW == (UWord)barrier);
2820 delete_Bar(bar);
2821 }
2822}
2823
2824
2825static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
2826 void* barrier,
sewardj406bac82010-03-03 23:03:40 +00002827 UWord count,
2828 UWord resizable )
sewardj9f569b72008-11-13 13:33:09 +00002829{
2830 Thread* thr;
2831 Bar* bar;
2832
2833 if (SHOW_EVENTS >= 1)
2834 VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
sewardj406bac82010-03-03 23:03:40 +00002835 "(tid=%d, barrier=%p, count=%lu, resizable=%lu)\n",
2836 (Int)tid, (void*)barrier, count, resizable );
sewardj9f569b72008-11-13 13:33:09 +00002837
2838 thr = map_threads_maybe_lookup( tid );
2839 tl_assert(thr); /* cannot fail - Thread* must already exist */
2840
2841 if (count == 0) {
2842 HG_(record_error_Misc)(
2843 thr, "pthread_barrier_init: 'count' argument is zero"
2844 );
2845 }
2846
sewardj406bac82010-03-03 23:03:40 +00002847 if (resizable != 0 && resizable != 1) {
2848 HG_(record_error_Misc)(
2849 thr, "pthread_barrier_init: invalid 'resizable' argument"
2850 );
2851 }
2852
sewardj9f569b72008-11-13 13:33:09 +00002853 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2854 tl_assert(bar);
2855
2856 if (bar->initted) {
2857 HG_(record_error_Misc)(
2858 thr, "pthread_barrier_init: barrier is already initialised"
2859 );
2860 }
2861
2862 if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2863 tl_assert(bar->initted);
2864 HG_(record_error_Misc)(
sewardj553655c2008-11-14 19:41:19 +00002865 thr, "pthread_barrier_init: threads are waiting at barrier"
sewardj9f569b72008-11-13 13:33:09 +00002866 );
2867 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2868 }
2869 if (!bar->waiting) {
2870 bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
2871 sizeof(Thread*) );
2872 }
2873
2874 tl_assert(bar->waiting);
2875 tl_assert(VG_(sizeXA)(bar->waiting) == 0);
sewardj406bac82010-03-03 23:03:40 +00002876 bar->initted = True;
2877 bar->resizable = resizable == 1 ? True : False;
2878 bar->size = count;
sewardj9f569b72008-11-13 13:33:09 +00002879}
2880
2881
2882static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
2883 void* barrier )
2884{
sewardj553655c2008-11-14 19:41:19 +00002885 Thread* thr;
2886 Bar* bar;
2887
sewardj9f569b72008-11-13 13:33:09 +00002888 /* Deal with destroy events. The only purpose is to free storage
2889 associated with the barrier, so as to avoid any possible
2890 resource leaks. */
2891 if (SHOW_EVENTS >= 1)
2892 VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
2893 "(tid=%d, barrier=%p)\n",
2894 (Int)tid, (void*)barrier );
2895
sewardj553655c2008-11-14 19:41:19 +00002896 thr = map_threads_maybe_lookup( tid );
2897 tl_assert(thr); /* cannot fail - Thread* must already exist */
2898
2899 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2900 tl_assert(bar);
2901
2902 if (!bar->initted) {
2903 HG_(record_error_Misc)(
2904 thr, "pthread_barrier_destroy: barrier was never initialised"
2905 );
2906 }
2907
2908 if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2909 HG_(record_error_Misc)(
2910 thr, "pthread_barrier_destroy: threads are waiting at barrier"
2911 );
2912 }
2913
sewardj9f569b72008-11-13 13:33:09 +00002914 /* Maybe we shouldn't do this; just let it persist, so that when it
2915 is reinitialised we don't need to do any dynamic memory
2916 allocation? The downside is a potentially unlimited space leak,
2917 if the client creates (in turn) a large number of barriers all
2918 at different locations. Note that if we do later move to the
2919 don't-delete-it scheme, we need to mark the barrier as
2920 uninitialised again since otherwise a later _init call will
sewardj553655c2008-11-14 19:41:19 +00002921 elicit a duplicate-init error. */
sewardj9f569b72008-11-13 13:33:09 +00002922 map_barrier_to_Bar_delete( barrier );
2923}
2924
2925
sewardj406bac82010-03-03 23:03:40 +00002926/* All the threads have arrived. Now do the Interesting Bit. Get a
2927 new synchronisation object and do a weak send to it from all the
2928 participating threads. This makes its vector clocks be the join of
2929 all the individual threads' vector clocks. Then do a strong
2930 receive from it back to all threads, so that their VCs are a copy
2931 of it (hence are all equal to the join of their original VCs.) */
2932static void do_barrier_cross_sync_and_empty ( Bar* bar )
2933{
2934 /* XXX check bar->waiting has no duplicates */
2935 UWord i;
2936 SO* so = libhb_so_alloc();
2937
2938 tl_assert(bar->waiting);
2939 tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
2940
2941 /* compute the join ... */
2942 for (i = 0; i < bar->size; i++) {
2943 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2944 Thr* hbthr = t->hbthr;
2945 libhb_so_send( hbthr, so, False/*weak send*/ );
2946 }
2947 /* ... and distribute to all threads */
2948 for (i = 0; i < bar->size; i++) {
2949 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2950 Thr* hbthr = t->hbthr;
2951 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2952 }
2953
2954 /* finally, we must empty out the waiting vector */
2955 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2956
2957 /* and we don't need this any more. Perhaps a stack-allocated
2958 SO would be better? */
2959 libhb_so_dealloc(so);
2960}
2961
2962
sewardj9f569b72008-11-13 13:33:09 +00002963static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
2964 void* barrier )
2965{
sewardj1c466b72008-11-19 11:52:14 +00002966 /* This function gets called after a client thread calls
2967 pthread_barrier_wait but before it arrives at the real
2968 pthread_barrier_wait.
2969
2970 Why is the following correct? It's a bit subtle.
2971
2972 If this is not the last thread arriving at the barrier, we simply
2973 note its presence and return. Because valgrind (at least as of
2974 Nov 08) is single threaded, we are guaranteed safe from any race
2975 conditions when in this function -- no other client threads are
2976 running.
2977
2978 If this is the last thread, then we are again the only running
2979 thread. All the other threads will have either arrived at the
2980 real pthread_barrier_wait or are on their way to it, but in any
2981 case are guaranteed not to be able to move past it, because this
2982 thread is currently in this function and so has not yet arrived
2983 at the real pthread_barrier_wait. That means that:
2984
2985 1. While we are in this function, none of the other threads
2986 waiting at the barrier can move past it.
2987
2988 2. When this function returns (and simulated execution resumes),
2989 this thread and all other waiting threads will be able to move
2990 past the real barrier.
2991
2992 Because of this, it is now safe to update the vector clocks of
2993 all threads, to represent the fact that they all arrived at the
2994 barrier and have all moved on. There is no danger of any
2995 complications to do with some threads leaving the barrier and
2996 racing back round to the front, whilst others are still leaving
2997 (which is the primary source of complication in correct handling/
2998 implementation of barriers). That can't happen because we update
2999 here our data structures so as to indicate that the threads have
3000 passed the barrier, even though, as per (2) above, they are
3001 guaranteed not to pass the barrier until we return.
3002
3003 This relies crucially on Valgrind being single threaded. If that
3004 changes, this will need to be reconsidered.
3005 */
sewardj9f569b72008-11-13 13:33:09 +00003006 Thread* thr;
3007 Bar* bar;
sewardj406bac82010-03-03 23:03:40 +00003008 UWord present;
sewardj9f569b72008-11-13 13:33:09 +00003009
3010 if (SHOW_EVENTS >= 1)
3011 VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
3012 "(tid=%d, barrier=%p)\n",
3013 (Int)tid, (void*)barrier );
3014
3015 thr = map_threads_maybe_lookup( tid );
3016 tl_assert(thr); /* cannot fail - Thread* must already exist */
3017
3018 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3019 tl_assert(bar);
3020
3021 if (!bar->initted) {
3022 HG_(record_error_Misc)(
3023 thr, "pthread_barrier_wait: barrier is uninitialised"
3024 );
3025 return; /* client is broken .. avoid assertions below */
3026 }
3027
3028 /* guaranteed by _INIT_PRE above */
3029 tl_assert(bar->size > 0);
3030 tl_assert(bar->waiting);
3031
3032 VG_(addToXA)( bar->waiting, &thr );
3033
3034 /* guaranteed by this function */
3035 present = VG_(sizeXA)(bar->waiting);
3036 tl_assert(present > 0 && present <= bar->size);
3037
3038 if (present < bar->size)
3039 return;
3040
sewardj406bac82010-03-03 23:03:40 +00003041 do_barrier_cross_sync_and_empty(bar);
3042}
sewardj9f569b72008-11-13 13:33:09 +00003043
sewardj9f569b72008-11-13 13:33:09 +00003044
sewardj406bac82010-03-03 23:03:40 +00003045static void evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( ThreadId tid,
3046 void* barrier,
3047 UWord newcount )
3048{
3049 Thread* thr;
3050 Bar* bar;
3051 UWord present;
3052
3053 if (SHOW_EVENTS >= 1)
3054 VG_(printf)("evh__HG_PTHREAD_BARRIER_RESIZE_PRE"
3055 "(tid=%d, barrier=%p, newcount=%lu)\n",
3056 (Int)tid, (void*)barrier, newcount );
3057
3058 thr = map_threads_maybe_lookup( tid );
3059 tl_assert(thr); /* cannot fail - Thread* must already exist */
3060
3061 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3062 tl_assert(bar);
3063
3064 if (!bar->initted) {
3065 HG_(record_error_Misc)(
3066 thr, "pthread_barrier_resize: barrier is uninitialised"
3067 );
3068 return; /* client is broken .. avoid assertions below */
3069 }
3070
3071 if (!bar->resizable) {
3072 HG_(record_error_Misc)(
3073 thr, "pthread_barrier_resize: barrier is may not be resized"
3074 );
3075 return; /* client is broken .. avoid assertions below */
3076 }
3077
3078 if (newcount == 0) {
3079 HG_(record_error_Misc)(
3080 thr, "pthread_barrier_resize: 'newcount' argument is zero"
3081 );
3082 return; /* client is broken .. avoid assertions below */
3083 }
3084
3085 /* guaranteed by _INIT_PRE above */
3086 tl_assert(bar->size > 0);
sewardj9f569b72008-11-13 13:33:09 +00003087 tl_assert(bar->waiting);
sewardj406bac82010-03-03 23:03:40 +00003088 /* Guaranteed by this fn */
3089 tl_assert(newcount > 0);
sewardj9f569b72008-11-13 13:33:09 +00003090
sewardj406bac82010-03-03 23:03:40 +00003091 if (newcount >= bar->size) {
3092 /* Increasing the capacity. There's no possibility of threads
3093 moving on from the barrier in this situation, so just note
3094 the fact and do nothing more. */
3095 bar->size = newcount;
3096 } else {
3097 /* Decreasing the capacity. If we decrease it to be equal or
3098 below the number of waiting threads, they will now move past
3099 the barrier, so need to mess with dep edges in the same way
3100 as if the barrier had filled up normally. */
3101 present = VG_(sizeXA)(bar->waiting);
3102 tl_assert(present >= 0 && present <= bar->size);
3103 if (newcount <= present) {
3104 bar->size = present; /* keep the cross_sync call happy */
3105 do_barrier_cross_sync_and_empty(bar);
3106 }
3107 bar->size = newcount;
sewardj9f569b72008-11-13 13:33:09 +00003108 }
sewardj9f569b72008-11-13 13:33:09 +00003109}
3110
3111
sewardjed2e72e2009-08-14 11:08:24 +00003112/* ----------------------------------------------------- */
3113/* ----- events to do with user-specified HB edges ----- */
3114/* ----------------------------------------------------- */
3115
3116/* A mapping from arbitrary UWord tag to the SO associated with it.
3117 The UWord tags are meaningless to us, interpreted only by the
3118 user. */
3119
3120
3121
3122/* UWord -> SO* */
3123static WordFM* map_usertag_to_SO = NULL;
3124
3125static void map_usertag_to_SO_INIT ( void ) {
3126 if (UNLIKELY(map_usertag_to_SO == NULL)) {
3127 map_usertag_to_SO = VG_(newFM)( HG_(zalloc),
3128 "hg.mutS.1", HG_(free), NULL );
3129 tl_assert(map_usertag_to_SO != NULL);
3130 }
3131}
3132
3133static SO* map_usertag_to_SO_lookup_or_alloc ( UWord usertag ) {
3134 UWord key, val;
3135 map_usertag_to_SO_INIT();
3136 if (VG_(lookupFM)( map_usertag_to_SO, &key, &val, usertag )) {
3137 tl_assert(key == (UWord)usertag);
3138 return (SO*)val;
3139 } else {
3140 SO* so = libhb_so_alloc();
3141 VG_(addToFM)( map_usertag_to_SO, usertag, (UWord)so );
3142 return so;
3143 }
3144}
3145
sewardj6015d0e2011-03-11 19:10:48 +00003146static void map_usertag_to_SO_delete ( UWord usertag ) {
3147 UWord keyW, valW;
3148 map_usertag_to_SO_INIT();
3149 if (VG_(delFromFM)( map_usertag_to_SO, &keyW, &valW, usertag )) {
3150 SO* so = (SO*)valW;
3151 tl_assert(keyW == usertag);
3152 tl_assert(so);
3153 libhb_so_dealloc(so);
3154 }
3155}
sewardjed2e72e2009-08-14 11:08:24 +00003156
3157
3158static
3159void evh__HG_USERSO_SEND_PRE ( ThreadId tid, UWord usertag )
3160{
3161 /* TID is just about to notionally sent a message on a notional
3162 abstract synchronisation object whose identity is given by
3163 USERTAG. Bind USERTAG to a real SO if it is not already so
sewardj8c50d3c2011-03-11 18:38:12 +00003164 bound, and do a 'weak send' on the SO. This joins the vector
3165 clocks from this thread into any vector clocks already present
3166 in the SO. The resulting SO vector clocks are later used by
sewardjed2e72e2009-08-14 11:08:24 +00003167 other thread(s) which successfully 'receive' from the SO,
sewardj8c50d3c2011-03-11 18:38:12 +00003168 thereby acquiring a dependency on all the events that have
3169 previously signalled on this SO. */
sewardjed2e72e2009-08-14 11:08:24 +00003170 Thread* thr;
3171 SO* so;
3172
3173 if (SHOW_EVENTS >= 1)
3174 VG_(printf)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3175 (Int)tid, usertag );
3176
3177 thr = map_threads_maybe_lookup( tid );
3178 tl_assert(thr); /* cannot fail - Thread* must already exist */
3179
3180 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3181 tl_assert(so);
3182
sewardj8c50d3c2011-03-11 18:38:12 +00003183 libhb_so_send( thr->hbthr, so, False/*!strong_send*/ );
sewardjed2e72e2009-08-14 11:08:24 +00003184}
3185
3186static
3187void evh__HG_USERSO_RECV_POST ( ThreadId tid, UWord usertag )
3188{
3189 /* TID has just notionally received a message from a notional
3190 abstract synchronisation object whose identity is given by
3191 USERTAG. Bind USERTAG to a real SO if it is not already so
3192 bound. If the SO has at some point in the past been 'sent' on,
3193 to a 'strong receive' on it, thereby acquiring a dependency on
3194 the sender. */
3195 Thread* thr;
3196 SO* so;
3197
3198 if (SHOW_EVENTS >= 1)
3199 VG_(printf)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3200 (Int)tid, usertag );
3201
3202 thr = map_threads_maybe_lookup( tid );
3203 tl_assert(thr); /* cannot fail - Thread* must already exist */
3204
3205 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3206 tl_assert(so);
3207
3208 /* Acquire a dependency on it. If the SO has never so far been
3209 sent on, then libhb_so_recv will do nothing. So we're safe
3210 regardless of SO's history. */
3211 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3212}
3213
sewardj6015d0e2011-03-11 19:10:48 +00003214static
3215void evh__HG_USERSO_FORGET_ALL ( ThreadId tid, UWord usertag )
3216{
3217 /* TID declares that any happens-before edges notionally stored in
3218 USERTAG can be deleted. If (as would normally be the case) a
3219 SO is associated with USERTAG, then the assocation is removed
3220 and all resources associated with SO are freed. Importantly,
3221 that frees up any VTSs stored in SO. */
3222 if (SHOW_EVENTS >= 1)
3223 VG_(printf)("evh__HG_USERSO_FORGET_ALL(ctid=%d, usertag=%#lx)\n",
3224 (Int)tid, usertag );
3225
3226 map_usertag_to_SO_delete( usertag );
3227}
3228
sewardjed2e72e2009-08-14 11:08:24 +00003229
sewardjb4112022007-11-09 22:49:28 +00003230/*--------------------------------------------------------------*/
3231/*--- Lock acquisition order monitoring ---*/
3232/*--------------------------------------------------------------*/
3233
3234/* FIXME: here are some optimisations still to do in
3235 laog__pre_thread_acquires_lock.
3236
3237 The graph is structured so that if L1 --*--> L2 then L1 must be
3238 acquired before L2.
3239
3240 The common case is that some thread T holds (eg) L1 L2 and L3 and
3241 is repeatedly acquiring and releasing Ln, and there is no ordering
3242 error in what it is doing. Hence it repeatly:
3243
3244 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3245 produces the answer No (because there is no error).
3246
3247 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3248 (because they already got added the first time T acquired Ln).
3249
3250 Hence cache these two events:
3251
3252 (1) Cache result of the query from last time. Invalidate the cache
3253 any time any edges are added to or deleted from laog.
3254
3255 (2) Cache these add-edge requests and ignore them if said edges
3256 have already been added to laog. Invalidate the cache any time
3257 any edges are deleted from laog.
3258*/
3259
3260typedef
3261 struct {
3262 WordSetID inns; /* in univ_laog */
3263 WordSetID outs; /* in univ_laog */
3264 }
3265 LAOGLinks;
3266
3267/* lock order acquisition graph */
3268static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
3269
3270/* EXPOSITION ONLY: for each edge in 'laog', record the two places
3271 where that edge was created, so that we can show the user later if
3272 we need to. */
3273typedef
3274 struct {
3275 Addr src_ga; /* Lock guest addresses for */
3276 Addr dst_ga; /* src/dst of the edge */
3277 ExeContext* src_ec; /* And corresponding places where that */
3278 ExeContext* dst_ec; /* ordering was established */
3279 }
3280 LAOGLinkExposition;
3281
sewardj250ec2e2008-02-15 22:02:30 +00003282static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
sewardjb4112022007-11-09 22:49:28 +00003283 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3284 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
3285 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
3286 if (llx1->src_ga < llx2->src_ga) return -1;
3287 if (llx1->src_ga > llx2->src_ga) return 1;
3288 if (llx1->dst_ga < llx2->dst_ga) return -1;
3289 if (llx1->dst_ga > llx2->dst_ga) return 1;
3290 return 0;
3291}
3292
3293static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
3294/* end EXPOSITION ONLY */
3295
3296
sewardja65db102009-01-26 10:45:16 +00003297__attribute__((noinline))
3298static void laog__init ( void )
3299{
3300 tl_assert(!laog);
3301 tl_assert(!laog_exposition);
sewardjc1fb9d22011-02-28 09:03:44 +00003302 tl_assert(HG_(clo_track_lockorders));
sewardja65db102009-01-26 10:45:16 +00003303
3304 laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
3305 HG_(free), NULL/*unboxedcmp*/ );
3306
3307 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
3308 cmp_LAOGLinkExposition );
3309 tl_assert(laog);
3310 tl_assert(laog_exposition);
3311}
3312
sewardjb4112022007-11-09 22:49:28 +00003313static void laog__show ( Char* who ) {
3314 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003315 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003316 Lock* me;
3317 LAOGLinks* links;
3318 VG_(printf)("laog (requested by %s) {\n", who);
sewardj896f6f92008-08-19 08:38:52 +00003319 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003320 me = NULL;
3321 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003322 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003323 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003324 tl_assert(me);
3325 tl_assert(links);
3326 VG_(printf)(" node %p:\n", me);
3327 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3328 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003329 VG_(printf)(" inn %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003330 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3331 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003332 VG_(printf)(" out %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003333 me = NULL;
3334 links = NULL;
3335 }
sewardj896f6f92008-08-19 08:38:52 +00003336 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003337 VG_(printf)("}\n");
3338}
3339
sewardj866c80c2011-10-22 19:29:51 +00003340static void univ_laog_do_GC ( void ) {
3341 Word i;
3342 LAOGLinks* links;
3343 Word seen = 0;
3344 Int prev_next_gc_univ_laog = next_gc_univ_laog;
3345 const UWord univ_laog_cardinality = HG_(cardinalityWSU)( univ_laog);
3346
3347 Bool *univ_laog_seen = HG_(zalloc) ( "hg.gc_univ_laog.1",
3348 (Int) univ_laog_cardinality
3349 * sizeof(Bool) );
3350 // univ_laog_seen[*] set to 0 (False) by zalloc.
3351
3352 if (VG_(clo_stats))
3353 VG_(message)(Vg_DebugMsg,
3354 "univ_laog_do_GC enter cardinality %'10d\n",
3355 (Int)univ_laog_cardinality);
3356
3357 VG_(initIterFM)( laog );
3358 links = NULL;
3359 while (VG_(nextIterFM)( laog, NULL, (UWord*)&links )) {
3360 tl_assert(links);
3361 tl_assert(links->inns >= 0 && links->inns < univ_laog_cardinality);
3362 univ_laog_seen[links->inns] = True;
3363 tl_assert(links->outs >= 0 && links->outs < univ_laog_cardinality);
3364 univ_laog_seen[links->outs] = True;
3365 links = NULL;
3366 }
3367 VG_(doneIterFM)( laog );
3368
3369 for (i = 0; i < (Int)univ_laog_cardinality; i++) {
3370 if (univ_laog_seen[i])
3371 seen++;
3372 else
3373 HG_(dieWS) ( univ_laog, (WordSet)i );
3374 }
3375
3376 HG_(free) (univ_laog_seen);
3377
3378 // We need to decide the value of the next_gc.
3379 // 3 solutions were looked at:
3380 // Sol 1: garbage collect at seen * 2
3381 // This solution was a lot slower, probably because we both do a lot of
3382 // garbage collection and do not keep long enough laog WV that will become
3383 // useful again very soon.
3384 // Sol 2: garbage collect at a percentage increase of the current cardinality
3385 // (with a min increase of 1)
3386 // Trials on a small test program with 1%, 5% and 10% increase was done.
3387 // 1% is slightly faster than 5%, which is slightly slower than 10%.
3388 // However, on a big application, this caused the memory to be exhausted,
3389 // as even a 1% increase of size at each gc becomes a lot, when many gc
3390 // are done.
3391 // Sol 3: always garbage collect at current cardinality + 1.
3392 // This solution was the fastest of the 3 solutions, and caused no memory
3393 // exhaustion in the big application.
3394 //
3395 // With regards to cost introduced by gc: on the t2t perf test (doing only
3396 // lock/unlock operations), t2t 50 10 2 was about 25% faster than the
3397 // version with garbage collection. With t2t 50 20 2, my machine started
3398 // to page out, and so the garbage collected version was much faster.
3399 // On smaller lock sets (e.g. t2t 20 5 2, giving about 100 locks), the
3400 // difference performance is insignificant (~ 0.1 s).
3401 // Of course, it might be that real life programs are not well represented
3402 // by t2t.
3403
3404 // If ever we want to have a more sophisticated control
3405 // (e.g. clo options to control the percentage increase or fixed increased),
3406 // we should do it here, eg.
3407 // next_gc_univ_laog = prev_next_gc_univ_laog + VG_(clo_laog_gc_fixed);
3408 // Currently, we just hard-code the solution 3 above.
3409 next_gc_univ_laog = prev_next_gc_univ_laog + 1;
3410
3411 if (VG_(clo_stats))
3412 VG_(message)
3413 (Vg_DebugMsg,
3414 "univ_laog_do_GC exit seen %'8d next gc at cardinality %'10d\n",
3415 (Int)seen, next_gc_univ_laog);
3416}
3417
3418
sewardjb4112022007-11-09 22:49:28 +00003419__attribute__((noinline))
3420static void laog__add_edge ( Lock* src, Lock* dst ) {
3421 Word keyW;
3422 LAOGLinks* links;
3423 Bool presentF, presentR;
3424 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
3425
3426 /* Take the opportunity to sanity check the graph. Record in
3427 presentF if there is already a src->dst mapping in this node's
3428 forwards links, and presentR if there is already a src->dst
3429 mapping in this node's backwards links. They should agree!
3430 Also, we need to know whether the edge was already present so as
3431 to decide whether or not to update the link details mapping. We
3432 can compute presentF and presentR essentially for free, so may
3433 as well do this always. */
3434 presentF = presentR = False;
3435
3436 /* Update the out edges for src */
3437 keyW = 0;
3438 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003439 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00003440 WordSetID outs_new;
3441 tl_assert(links);
3442 tl_assert(keyW == (Word)src);
3443 outs_new = HG_(addToWS)( univ_laog, links->outs, (Word)dst );
3444 presentF = outs_new == links->outs;
3445 links->outs = outs_new;
3446 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003447 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003448 links->inns = HG_(emptyWS)( univ_laog );
3449 links->outs = HG_(singletonWS)( univ_laog, (Word)dst );
sewardj896f6f92008-08-19 08:38:52 +00003450 VG_(addToFM)( laog, (Word)src, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00003451 }
3452 /* Update the in edges for dst */
3453 keyW = 0;
3454 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003455 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003456 WordSetID inns_new;
3457 tl_assert(links);
3458 tl_assert(keyW == (Word)dst);
3459 inns_new = HG_(addToWS)( univ_laog, links->inns, (Word)src );
3460 presentR = inns_new == links->inns;
3461 links->inns = inns_new;
3462 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003463 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003464 links->inns = HG_(singletonWS)( univ_laog, (Word)src );
3465 links->outs = HG_(emptyWS)( univ_laog );
sewardj896f6f92008-08-19 08:38:52 +00003466 VG_(addToFM)( laog, (Word)dst, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00003467 }
3468
3469 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
3470
3471 if (!presentF && src->acquired_at && dst->acquired_at) {
3472 LAOGLinkExposition expo;
3473 /* If this edge is entering the graph, and we have acquired_at
3474 information for both src and dst, record those acquisition
3475 points. Hence, if there is later a violation of this
3476 ordering, we can show the user the two places in which the
3477 required src-dst ordering was previously established. */
barta0b6b2c2008-07-07 06:49:24 +00003478 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
sewardjb4112022007-11-09 22:49:28 +00003479 src->guestaddr, dst->guestaddr);
3480 expo.src_ga = src->guestaddr;
3481 expo.dst_ga = dst->guestaddr;
3482 expo.src_ec = NULL;
3483 expo.dst_ec = NULL;
3484 tl_assert(laog_exposition);
sewardj896f6f92008-08-19 08:38:52 +00003485 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (Word)&expo )) {
sewardjb4112022007-11-09 22:49:28 +00003486 /* we already have it; do nothing */
3487 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003488 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
3489 sizeof(LAOGLinkExposition));
sewardjb4112022007-11-09 22:49:28 +00003490 expo2->src_ga = src->guestaddr;
3491 expo2->dst_ga = dst->guestaddr;
3492 expo2->src_ec = src->acquired_at;
3493 expo2->dst_ec = dst->acquired_at;
sewardj896f6f92008-08-19 08:38:52 +00003494 VG_(addToFM)( laog_exposition, (Word)expo2, (Word)NULL );
sewardjb4112022007-11-09 22:49:28 +00003495 }
3496 }
sewardj866c80c2011-10-22 19:29:51 +00003497
3498 if (HG_(cardinalityWSU) (univ_laog) >= next_gc_univ_laog)
3499 univ_laog_do_GC();
sewardjb4112022007-11-09 22:49:28 +00003500}
3501
3502__attribute__((noinline))
3503static void laog__del_edge ( Lock* src, Lock* dst ) {
3504 Word keyW;
3505 LAOGLinks* links;
sewardj866c80c2011-10-22 19:29:51 +00003506 if (0) VG_(printf)("laog__del_edge enter %p %p\n", src, dst);
sewardjb4112022007-11-09 22:49:28 +00003507 /* Update the out edges for src */
3508 keyW = 0;
3509 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003510 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00003511 tl_assert(links);
3512 tl_assert(keyW == (Word)src);
3513 links->outs = HG_(delFromWS)( univ_laog, links->outs, (Word)dst );
3514 }
3515 /* Update the in edges for dst */
3516 keyW = 0;
3517 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003518 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003519 tl_assert(links);
3520 tl_assert(keyW == (Word)dst);
3521 links->inns = HG_(delFromWS)( univ_laog, links->inns, (Word)src );
3522 }
sewardj866c80c2011-10-22 19:29:51 +00003523
3524 /* Remove the exposition of src,dst (if present) */
3525 {
3526 LAOGLinkExposition *fm_expo;
3527
3528 LAOGLinkExposition expo;
3529 expo.src_ga = src->guestaddr;
3530 expo.dst_ga = dst->guestaddr;
3531 expo.src_ec = NULL;
3532 expo.dst_ec = NULL;
3533
3534 if (VG_(delFromFM) (laog_exposition,
3535 (UWord*)&fm_expo, NULL, (UWord)&expo )) {
3536 HG_(free) (fm_expo);
3537 }
3538 }
3539
3540 /* deleting edges can increase nr of of WS so check for gc. */
3541 if (HG_(cardinalityWSU) (univ_laog) >= next_gc_univ_laog)
3542 univ_laog_do_GC();
3543 if (0) VG_(printf)("laog__del_edge exit\n");
sewardjb4112022007-11-09 22:49:28 +00003544}
3545
3546__attribute__((noinline))
3547static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
3548 Word keyW;
3549 LAOGLinks* links;
3550 keyW = 0;
3551 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003552 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003553 tl_assert(links);
3554 tl_assert(keyW == (Word)lk);
3555 return links->outs;
3556 } else {
3557 return HG_(emptyWS)( univ_laog );
3558 }
3559}
3560
3561__attribute__((noinline))
3562static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
3563 Word keyW;
3564 LAOGLinks* links;
3565 keyW = 0;
3566 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003567 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003568 tl_assert(links);
3569 tl_assert(keyW == (Word)lk);
3570 return links->inns;
3571 } else {
3572 return HG_(emptyWS)( univ_laog );
3573 }
3574}
3575
3576__attribute__((noinline))
3577static void laog__sanity_check ( Char* who ) {
3578 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003579 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003580 Lock* me;
3581 LAOGLinks* links;
sewardj896f6f92008-08-19 08:38:52 +00003582 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003583 me = NULL;
3584 links = NULL;
3585 if (0) VG_(printf)("laog sanity check\n");
sewardj896f6f92008-08-19 08:38:52 +00003586 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003587 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003588 tl_assert(me);
3589 tl_assert(links);
3590 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3591 for (i = 0; i < ws_size; i++) {
3592 if ( ! HG_(elemWS)( univ_laog,
3593 laog__succs( (Lock*)ws_words[i] ),
3594 (Word)me ))
3595 goto bad;
3596 }
3597 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3598 for (i = 0; i < ws_size; i++) {
3599 if ( ! HG_(elemWS)( univ_laog,
3600 laog__preds( (Lock*)ws_words[i] ),
3601 (Word)me ))
3602 goto bad;
3603 }
3604 me = NULL;
3605 links = NULL;
3606 }
sewardj896f6f92008-08-19 08:38:52 +00003607 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003608 return;
3609
3610 bad:
3611 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3612 laog__show(who);
3613 tl_assert(0);
3614}
3615
3616/* If there is a path in laog from 'src' to any of the elements in
3617 'dst', return an arbitrarily chosen element of 'dst' reachable from
3618 'src'. If no path exist from 'src' to any element in 'dst', return
3619 NULL. */
3620__attribute__((noinline))
3621static
3622Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3623{
3624 Lock* ret;
3625 Word i, ssz;
3626 XArray* stack; /* of Lock* */
3627 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
3628 Lock* here;
3629 WordSetID succs;
3630 Word succs_size;
sewardj250ec2e2008-02-15 22:02:30 +00003631 UWord* succs_words;
sewardjb4112022007-11-09 22:49:28 +00003632 //laog__sanity_check();
3633
3634 /* If the destination set is empty, we can never get there from
3635 'src' :-), so don't bother to try */
3636 if (HG_(isEmptyWS)( univ_lsets, dsts ))
3637 return NULL;
3638
3639 ret = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00003640 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3641 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00003642
3643 (void) VG_(addToXA)( stack, &src );
3644
3645 while (True) {
3646
3647 ssz = VG_(sizeXA)( stack );
3648
3649 if (ssz == 0) { ret = NULL; break; }
3650
3651 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3652 VG_(dropTailXA)( stack, 1 );
3653
3654 if (HG_(elemWS)( univ_lsets, dsts, (Word)here )) { ret = here; break; }
3655
sewardj896f6f92008-08-19 08:38:52 +00003656 if (VG_(lookupFM)( visited, NULL, NULL, (Word)here ))
sewardjb4112022007-11-09 22:49:28 +00003657 continue;
3658
sewardj896f6f92008-08-19 08:38:52 +00003659 VG_(addToFM)( visited, (Word)here, 0 );
sewardjb4112022007-11-09 22:49:28 +00003660
3661 succs = laog__succs( here );
3662 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3663 for (i = 0; i < succs_size; i++)
3664 (void) VG_(addToXA)( stack, &succs_words[i] );
3665 }
3666
sewardj896f6f92008-08-19 08:38:52 +00003667 VG_(deleteFM)( visited, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003668 VG_(deleteXA)( stack );
3669 return ret;
3670}
3671
3672
3673/* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3674 between 'lk' and the locks already held by 'thr' and issue a
3675 complaint if so. Also, update the ordering graph appropriately.
3676*/
3677__attribute__((noinline))
3678static void laog__pre_thread_acquires_lock (
3679 Thread* thr, /* NB: BEFORE lock is added */
3680 Lock* lk
3681 )
3682{
sewardj250ec2e2008-02-15 22:02:30 +00003683 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +00003684 Word ls_size, i;
3685 Lock* other;
3686
3687 /* It may be that 'thr' already holds 'lk' and is recursively
3688 relocking in. In this case we just ignore the call. */
3689 /* NB: univ_lsets really is correct here */
3690 if (HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lk ))
3691 return;
3692
sewardjb4112022007-11-09 22:49:28 +00003693 /* First, the check. Complain if there is any path in laog from lk
3694 to any of the locks already held by thr, since if any such path
3695 existed, it would mean that previously lk was acquired before
3696 (rather than after, as we are doing here) at least one of those
3697 locks.
3698 */
3699 other = laog__do_dfs_from_to(lk, thr->locksetA);
3700 if (other) {
3701 LAOGLinkExposition key, *found;
3702 /* So we managed to find a path lk --*--> other in the graph,
3703 which implies that 'lk' should have been acquired before
3704 'other' but is in fact being acquired afterwards. We present
3705 the lk/other arguments to record_error_LockOrder in the order
3706 in which they should have been acquired. */
3707 /* Go look in the laog_exposition mapping, to find the allocation
3708 points for this edge, so we can show the user. */
3709 key.src_ga = lk->guestaddr;
3710 key.dst_ga = other->guestaddr;
3711 key.src_ec = NULL;
3712 key.dst_ec = NULL;
3713 found = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003714 if (VG_(lookupFM)( laog_exposition,
sewardjb5f29642007-11-16 12:02:43 +00003715 (Word*)&found, NULL, (Word)&key )) {
sewardjb4112022007-11-09 22:49:28 +00003716 tl_assert(found != &key);
3717 tl_assert(found->src_ga == key.src_ga);
3718 tl_assert(found->dst_ga == key.dst_ga);
3719 tl_assert(found->src_ec);
3720 tl_assert(found->dst_ec);
sewardjf98e1c02008-10-25 16:22:41 +00003721 HG_(record_error_LockOrder)(
3722 thr, lk->guestaddr, other->guestaddr,
sewardjffce8152011-06-24 10:09:41 +00003723 found->src_ec, found->dst_ec, other->acquired_at );
sewardjb4112022007-11-09 22:49:28 +00003724 } else {
3725 /* Hmm. This can't happen (can it?) */
sewardjf98e1c02008-10-25 16:22:41 +00003726 HG_(record_error_LockOrder)(
3727 thr, lk->guestaddr, other->guestaddr,
sewardjffce8152011-06-24 10:09:41 +00003728 NULL, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003729 }
3730 }
3731
3732 /* Second, add to laog the pairs
3733 (old, lk) | old <- locks already held by thr
3734 Since both old and lk are currently held by thr, their acquired_at
3735 fields must be non-NULL.
3736 */
3737 tl_assert(lk->acquired_at);
3738 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3739 for (i = 0; i < ls_size; i++) {
3740 Lock* old = (Lock*)ls_words[i];
3741 tl_assert(old->acquired_at);
3742 laog__add_edge( old, lk );
3743 }
3744
3745 /* Why "except_Locks" ? We're here because a lock is being
3746 acquired by a thread, and we're in an inconsistent state here.
3747 See the call points in evhH__post_thread_{r,w}_acquires_lock.
3748 When called in this inconsistent state, locks__sanity_check duly
3749 barfs. */
sewardjf98e1c02008-10-25 16:22:41 +00003750 if (HG_(clo_sanity_flags) & SCE_LAOG)
sewardjb4112022007-11-09 22:49:28 +00003751 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
3752}
3753
sewardj866c80c2011-10-22 19:29:51 +00003754/* Allocates a duplicate of words. Caller must HG_(free) the result. */
3755static UWord* UWordV_dup(UWord* words, Word words_size)
3756{
3757 UInt i;
3758
3759 if (words_size == 0)
3760 return NULL;
3761
3762 UWord *dup = HG_(zalloc) ("hg.dup.1", (SizeT) words_size * sizeof(UWord));
3763
3764 for (i = 0; i < words_size; i++)
3765 dup[i] = words[i];
3766
3767 return dup;
3768}
sewardjb4112022007-11-09 22:49:28 +00003769
3770/* Delete from 'laog' any pair mentioning a lock in locksToDelete */
3771
3772__attribute__((noinline))
3773static void laog__handle_one_lock_deletion ( Lock* lk )
3774{
3775 WordSetID preds, succs;
3776 Word preds_size, succs_size, i, j;
sewardj250ec2e2008-02-15 22:02:30 +00003777 UWord *preds_words, *succs_words;
sewardjb4112022007-11-09 22:49:28 +00003778
3779 preds = laog__preds( lk );
3780 succs = laog__succs( lk );
3781
sewardj866c80c2011-10-22 19:29:51 +00003782 // We need to duplicate the payload, as these can be garbage collected
3783 // during the del/add operations below.
sewardjb4112022007-11-09 22:49:28 +00003784 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
sewardj866c80c2011-10-22 19:29:51 +00003785 preds_words = UWordV_dup(preds_words, preds_size);
3786
3787 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3788 succs_words = UWordV_dup(succs_words, succs_size);
3789
sewardjb4112022007-11-09 22:49:28 +00003790 for (i = 0; i < preds_size; i++)
3791 laog__del_edge( (Lock*)preds_words[i], lk );
3792
sewardjb4112022007-11-09 22:49:28 +00003793 for (j = 0; j < succs_size; j++)
3794 laog__del_edge( lk, (Lock*)succs_words[j] );
3795
3796 for (i = 0; i < preds_size; i++) {
3797 for (j = 0; j < succs_size; j++) {
3798 if (preds_words[i] != succs_words[j]) {
3799 /* This can pass unlocked locks to laog__add_edge, since
3800 we're deleting stuff. So their acquired_at fields may
3801 be NULL. */
3802 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
3803 }
3804 }
3805 }
sewardj866c80c2011-10-22 19:29:51 +00003806
3807 if (preds_words)
3808 HG_(free) (preds_words);
3809 if (succs_words)
3810 HG_(free) (succs_words);
3811
3812 // Remove lk information from laog links FM
3813 {
3814 LAOGLinks *links;
3815 Lock* linked_lk;
3816
3817 if (VG_(delFromFM) (laog,
3818 (UWord*)&linked_lk, (UWord*)&links, (UWord)lk)) {
3819 tl_assert (linked_lk == lk);
3820 HG_(free) (links);
3821 }
3822 }
3823 /* FIXME ??? What about removing lock lk data from EXPOSITION ??? */
sewardjb4112022007-11-09 22:49:28 +00003824}
3825
sewardj1cbc12f2008-11-10 16:16:46 +00003826//__attribute__((noinline))
3827//static void laog__handle_lock_deletions (
3828// WordSetID /* in univ_laog */ locksToDelete
3829// )
3830//{
3831// Word i, ws_size;
3832// UWord* ws_words;
3833//
sewardj1cbc12f2008-11-10 16:16:46 +00003834//
3835// HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
sewardj866c80c2011-10-22 19:29:51 +00003836// UWordV_dup call needed here ...
sewardj1cbc12f2008-11-10 16:16:46 +00003837// for (i = 0; i < ws_size; i++)
3838// laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
3839//
3840// if (HG_(clo_sanity_flags) & SCE_LAOG)
3841// all__sanity_check("laog__handle_lock_deletions-post");
3842//}
sewardjb4112022007-11-09 22:49:28 +00003843
3844
3845/*--------------------------------------------------------------*/
3846/*--- Malloc/free replacements ---*/
3847/*--------------------------------------------------------------*/
3848
3849typedef
3850 struct {
3851 void* next; /* required by m_hashtable */
3852 Addr payload; /* ptr to actual block */
3853 SizeT szB; /* size requested */
3854 ExeContext* where; /* where it was allocated */
3855 Thread* thr; /* allocating thread */
3856 }
3857 MallocMeta;
3858
3859/* A hash table of MallocMetas, used to track malloc'd blocks
3860 (obviously). */
3861static VgHashTable hg_mallocmeta_table = NULL;
3862
3863
3864static MallocMeta* new_MallocMeta ( void ) {
sewardjf98e1c02008-10-25 16:22:41 +00003865 MallocMeta* md = HG_(zalloc)( "hg.new_MallocMeta.1", sizeof(MallocMeta) );
sewardjb4112022007-11-09 22:49:28 +00003866 tl_assert(md);
3867 return md;
3868}
3869static void delete_MallocMeta ( MallocMeta* md ) {
sewardjf98e1c02008-10-25 16:22:41 +00003870 HG_(free)(md);
sewardjb4112022007-11-09 22:49:28 +00003871}
3872
3873
3874/* Allocate a client block and set up the metadata for it. */
3875
3876static
3877void* handle_alloc ( ThreadId tid,
3878 SizeT szB, SizeT alignB, Bool is_zeroed )
3879{
3880 Addr p;
3881 MallocMeta* md;
3882
3883 tl_assert( ((SSizeT)szB) >= 0 );
3884 p = (Addr)VG_(cli_malloc)(alignB, szB);
3885 if (!p) {
3886 return NULL;
3887 }
3888 if (is_zeroed)
3889 VG_(memset)((void*)p, 0, szB);
3890
3891 /* Note that map_threads_lookup must succeed (cannot assert), since
3892 memory can only be allocated by currently alive threads, hence
3893 they must have an entry in map_threads. */
3894 md = new_MallocMeta();
3895 md->payload = p;
3896 md->szB = szB;
3897 md->where = VG_(record_ExeContext)( tid, 0 );
3898 md->thr = map_threads_lookup( tid );
3899
3900 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
3901
3902 /* Tell the lower level memory wranglers. */
3903 evh__new_mem_heap( p, szB, is_zeroed );
3904
3905 return (void*)p;
3906}
3907
3908/* Re the checks for less-than-zero (also in hg_cli__realloc below):
3909 Cast to a signed type to catch any unexpectedly negative args.
3910 We're assuming here that the size asked for is not greater than
3911 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
3912 platforms). */
3913static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
3914 if (((SSizeT)n) < 0) return NULL;
3915 return handle_alloc ( tid, n, VG_(clo_alignment),
3916 /*is_zeroed*/False );
3917}
3918static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
3919 if (((SSizeT)n) < 0) return NULL;
3920 return handle_alloc ( tid, n, VG_(clo_alignment),
3921 /*is_zeroed*/False );
3922}
3923static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
3924 if (((SSizeT)n) < 0) return NULL;
3925 return handle_alloc ( tid, n, VG_(clo_alignment),
3926 /*is_zeroed*/False );
3927}
3928static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
3929 if (((SSizeT)n) < 0) return NULL;
3930 return handle_alloc ( tid, n, align,
3931 /*is_zeroed*/False );
3932}
3933static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
3934 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
3935 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
3936 /*is_zeroed*/True );
3937}
3938
3939
3940/* Free a client block, including getting rid of the relevant
3941 metadata. */
3942
3943static void handle_free ( ThreadId tid, void* p )
3944{
3945 MallocMeta *md, *old_md;
3946 SizeT szB;
3947
3948 /* First see if we can find the metadata for 'p'. */
3949 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3950 if (!md)
3951 return; /* apparently freeing a bogus address. Oh well. */
3952
3953 tl_assert(md->payload == (Addr)p);
3954 szB = md->szB;
3955
3956 /* Nuke the metadata block */
3957 old_md = (MallocMeta*)
3958 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
3959 tl_assert(old_md); /* it must be present - we just found it */
3960 tl_assert(old_md == md);
3961 tl_assert(old_md->payload == (Addr)p);
3962
3963 VG_(cli_free)((void*)old_md->payload);
3964 delete_MallocMeta(old_md);
3965
3966 /* Tell the lower level memory wranglers. */
3967 evh__die_mem_heap( (Addr)p, szB );
3968}
3969
3970static void hg_cli__free ( ThreadId tid, void* p ) {
3971 handle_free(tid, p);
3972}
3973static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
3974 handle_free(tid, p);
3975}
3976static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
3977 handle_free(tid, p);
3978}
3979
3980
3981static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
3982{
3983 MallocMeta *md, *md_new, *md_tmp;
3984 SizeT i;
3985
3986 Addr payload = (Addr)payloadV;
3987
3988 if (((SSizeT)new_size) < 0) return NULL;
3989
3990 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
3991 if (!md)
3992 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
3993
3994 tl_assert(md->payload == payload);
3995
3996 if (md->szB == new_size) {
3997 /* size unchanged */
3998 md->where = VG_(record_ExeContext)(tid, 0);
3999 return payloadV;
4000 }
4001
4002 if (md->szB > new_size) {
4003 /* new size is smaller */
4004 md->szB = new_size;
4005 md->where = VG_(record_ExeContext)(tid, 0);
4006 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
4007 return payloadV;
4008 }
4009
4010 /* else */ {
4011 /* new size is bigger */
4012 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
4013
4014 /* First half kept and copied, second half new */
4015 // FIXME: shouldn't we use a copier which implements the
4016 // memory state machine?
sewardj23f12002009-07-24 08:45:08 +00004017 evh__copy_mem( payload, p_new, md->szB );
sewardjb4112022007-11-09 22:49:28 +00004018 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
sewardjf98e1c02008-10-25 16:22:41 +00004019 /*inited*/False );
sewardjb4112022007-11-09 22:49:28 +00004020 /* FIXME: can anything funny happen here? specifically, if the
4021 old range contained a lock, then die_mem_heap will complain.
4022 Is that the correct behaviour? Not sure. */
4023 evh__die_mem_heap( payload, md->szB );
4024
4025 /* Copy from old to new */
4026 for (i = 0; i < md->szB; i++)
4027 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
4028
4029 /* Because the metadata hash table is index by payload address,
4030 we have to get rid of the old hash table entry and make a new
4031 one. We can't just modify the existing metadata in place,
4032 because then it would (almost certainly) be in the wrong hash
4033 chain. */
4034 md_new = new_MallocMeta();
4035 *md_new = *md;
4036
4037 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
4038 tl_assert(md_tmp);
4039 tl_assert(md_tmp == md);
4040
4041 VG_(cli_free)((void*)md->payload);
4042 delete_MallocMeta(md);
4043
4044 /* Update fields */
4045 md_new->where = VG_(record_ExeContext)( tid, 0 );
4046 md_new->szB = new_size;
4047 md_new->payload = p_new;
4048 md_new->thr = map_threads_lookup( tid );
4049
4050 /* and add */
4051 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
4052
4053 return (void*)p_new;
4054 }
4055}
4056
njn8b140de2009-02-17 04:31:18 +00004057static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
4058{
4059 MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
4060
4061 // There may be slop, but pretend there isn't because only the asked-for
4062 // area will have been shadowed properly.
4063 return ( md ? md->szB : 0 );
4064}
4065
sewardjb4112022007-11-09 22:49:28 +00004066
sewardj095d61e2010-03-11 13:43:18 +00004067/* For error creation: map 'data_addr' to a malloc'd chunk, if any.
sewardjc8028ad2010-05-05 09:34:42 +00004068 Slow linear search. With a bit of hash table help if 'data_addr'
4069 is either the start of a block or up to 15 word-sized steps along
4070 from the start of a block. */
sewardj095d61e2010-03-11 13:43:18 +00004071
4072static inline Bool addr_is_in_MM_Chunk( MallocMeta* mm, Addr a )
4073{
sewardjc8028ad2010-05-05 09:34:42 +00004074 /* Accept 'a' as within 'mm' if 'mm's size is zero and 'a' points
4075 right at it. */
4076 if (UNLIKELY(mm->szB == 0 && a == mm->payload))
4077 return True;
4078 /* else normal interval rules apply */
4079 if (LIKELY(a < mm->payload)) return False;
4080 if (LIKELY(a >= mm->payload + mm->szB)) return False;
4081 return True;
sewardj095d61e2010-03-11 13:43:18 +00004082}
4083
sewardjc8028ad2010-05-05 09:34:42 +00004084Bool HG_(mm_find_containing_block)( /*OUT*/ExeContext** where,
sewardj095d61e2010-03-11 13:43:18 +00004085 /*OUT*/Addr* payload,
4086 /*OUT*/SizeT* szB,
4087 Addr data_addr )
4088{
4089 MallocMeta* mm;
sewardjc8028ad2010-05-05 09:34:42 +00004090 Int i;
4091 const Int n_fast_check_words = 16;
4092
4093 /* First, do a few fast searches on the basis that data_addr might
4094 be exactly the start of a block or up to 15 words inside. This
4095 can happen commonly via the creq
4096 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK. */
4097 for (i = 0; i < n_fast_check_words; i++) {
4098 mm = VG_(HT_lookup)( hg_mallocmeta_table,
4099 data_addr - (UWord)(UInt)i * sizeof(UWord) );
4100 if (UNLIKELY(mm && addr_is_in_MM_Chunk(mm, data_addr)))
4101 goto found;
4102 }
4103
sewardj095d61e2010-03-11 13:43:18 +00004104 /* Well, this totally sucks. But without using an interval tree or
sewardjc8028ad2010-05-05 09:34:42 +00004105 some such, it's hard to see how to do better. We have to check
4106 every block in the entire table. */
sewardj095d61e2010-03-11 13:43:18 +00004107 VG_(HT_ResetIter)(hg_mallocmeta_table);
4108 while ( (mm = VG_(HT_Next)(hg_mallocmeta_table)) ) {
sewardjc8028ad2010-05-05 09:34:42 +00004109 if (UNLIKELY(addr_is_in_MM_Chunk(mm, data_addr)))
4110 goto found;
sewardj095d61e2010-03-11 13:43:18 +00004111 }
sewardjc8028ad2010-05-05 09:34:42 +00004112
4113 /* Not found. Bah. */
4114 return False;
4115 /*NOTREACHED*/
4116
4117 found:
4118 tl_assert(mm);
4119 tl_assert(addr_is_in_MM_Chunk(mm, data_addr));
4120 if (where) *where = mm->where;
4121 if (payload) *payload = mm->payload;
4122 if (szB) *szB = mm->szB;
4123 return True;
sewardj095d61e2010-03-11 13:43:18 +00004124}
4125
4126
sewardjb4112022007-11-09 22:49:28 +00004127/*--------------------------------------------------------------*/
4128/*--- Instrumentation ---*/
4129/*--------------------------------------------------------------*/
4130
sewardjffce8152011-06-24 10:09:41 +00004131#define binop(_op, _arg1, _arg2) IRExpr_Binop((_op),(_arg1),(_arg2))
4132#define mkexpr(_tmp) IRExpr_RdTmp((_tmp))
4133#define mkU32(_n) IRExpr_Const(IRConst_U32(_n))
4134#define mkU64(_n) IRExpr_Const(IRConst_U64(_n))
4135#define assign(_t, _e) IRStmt_WrTmp((_t), (_e))
4136
4137static void instrument_mem_access ( IRSB* sbOut,
sewardjb4112022007-11-09 22:49:28 +00004138 IRExpr* addr,
4139 Int szB,
4140 Bool isStore,
sewardjffce8152011-06-24 10:09:41 +00004141 Int hWordTy_szB,
4142 Int goff_sp )
sewardjb4112022007-11-09 22:49:28 +00004143{
4144 IRType tyAddr = Ity_INVALID;
4145 HChar* hName = NULL;
4146 void* hAddr = NULL;
4147 Int regparms = 0;
4148 IRExpr** argv = NULL;
4149 IRDirty* di = NULL;
4150
sewardjffce8152011-06-24 10:09:41 +00004151 // THRESH is the size of the window above SP (well,
4152 // mostly above) that we assume implies a stack reference.
4153 const Int THRESH = 4096 * 4; // somewhat arbitrary
4154 const Int rz_szB = VG_STACK_REDZONE_SZB;
4155
sewardjb4112022007-11-09 22:49:28 +00004156 tl_assert(isIRAtom(addr));
4157 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
4158
sewardjffce8152011-06-24 10:09:41 +00004159 tyAddr = typeOfIRExpr( sbOut->tyenv, addr );
sewardjb4112022007-11-09 22:49:28 +00004160 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
4161
4162 /* So the effective address is in 'addr' now. */
4163 regparms = 1; // unless stated otherwise
4164 if (isStore) {
4165 switch (szB) {
4166 case 1:
sewardj23f12002009-07-24 08:45:08 +00004167 hName = "evh__mem_help_cwrite_1";
4168 hAddr = &evh__mem_help_cwrite_1;
sewardjb4112022007-11-09 22:49:28 +00004169 argv = mkIRExprVec_1( addr );
4170 break;
4171 case 2:
sewardj23f12002009-07-24 08:45:08 +00004172 hName = "evh__mem_help_cwrite_2";
4173 hAddr = &evh__mem_help_cwrite_2;
sewardjb4112022007-11-09 22:49:28 +00004174 argv = mkIRExprVec_1( addr );
4175 break;
4176 case 4:
sewardj23f12002009-07-24 08:45:08 +00004177 hName = "evh__mem_help_cwrite_4";
4178 hAddr = &evh__mem_help_cwrite_4;
sewardjb4112022007-11-09 22:49:28 +00004179 argv = mkIRExprVec_1( addr );
4180 break;
4181 case 8:
sewardj23f12002009-07-24 08:45:08 +00004182 hName = "evh__mem_help_cwrite_8";
4183 hAddr = &evh__mem_help_cwrite_8;
sewardjb4112022007-11-09 22:49:28 +00004184 argv = mkIRExprVec_1( addr );
4185 break;
4186 default:
4187 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4188 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004189 hName = "evh__mem_help_cwrite_N";
4190 hAddr = &evh__mem_help_cwrite_N;
sewardjb4112022007-11-09 22:49:28 +00004191 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4192 break;
4193 }
4194 } else {
4195 switch (szB) {
4196 case 1:
sewardj23f12002009-07-24 08:45:08 +00004197 hName = "evh__mem_help_cread_1";
4198 hAddr = &evh__mem_help_cread_1;
sewardjb4112022007-11-09 22:49:28 +00004199 argv = mkIRExprVec_1( addr );
4200 break;
4201 case 2:
sewardj23f12002009-07-24 08:45:08 +00004202 hName = "evh__mem_help_cread_2";
4203 hAddr = &evh__mem_help_cread_2;
sewardjb4112022007-11-09 22:49:28 +00004204 argv = mkIRExprVec_1( addr );
4205 break;
4206 case 4:
sewardj23f12002009-07-24 08:45:08 +00004207 hName = "evh__mem_help_cread_4";
4208 hAddr = &evh__mem_help_cread_4;
sewardjb4112022007-11-09 22:49:28 +00004209 argv = mkIRExprVec_1( addr );
4210 break;
4211 case 8:
sewardj23f12002009-07-24 08:45:08 +00004212 hName = "evh__mem_help_cread_8";
4213 hAddr = &evh__mem_help_cread_8;
sewardjb4112022007-11-09 22:49:28 +00004214 argv = mkIRExprVec_1( addr );
4215 break;
4216 default:
4217 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4218 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004219 hName = "evh__mem_help_cread_N";
4220 hAddr = &evh__mem_help_cread_N;
sewardjb4112022007-11-09 22:49:28 +00004221 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4222 break;
4223 }
4224 }
4225
sewardjffce8152011-06-24 10:09:41 +00004226 /* Create the helper. */
sewardjb4112022007-11-09 22:49:28 +00004227 tl_assert(hName);
4228 tl_assert(hAddr);
4229 tl_assert(argv);
4230 di = unsafeIRDirty_0_N( regparms,
4231 hName, VG_(fnptr_to_fnentry)( hAddr ),
4232 argv );
sewardjffce8152011-06-24 10:09:41 +00004233
4234 if (! HG_(clo_check_stack_refs)) {
4235 /* We're ignoring memory references which are (obviously) to the
4236 stack. In fact just skip stack refs that are within 4 pages
4237 of SP (SP - the redzone, really), as that's simple, easy, and
4238 filters out most stack references. */
4239 /* Generate the guard condition: "(addr - (SP - RZ)) >u N", for
4240 some arbitrary N. If that is true then addr is outside the
4241 range (SP - RZ .. SP + N - RZ). If N is smallish (a few
4242 pages) then we can say addr is within a few pages of SP and
4243 so can't possibly be a heap access, and so can be skipped.
4244
4245 Note that the condition simplifies to
4246 (addr - SP + RZ) >u N
4247 which generates better code in x86/amd64 backends, but it does
4248 not unfortunately simplify to
4249 (addr - SP) >u (N - RZ)
4250 (would be beneficial because N - RZ is a constant) because
4251 wraparound arithmetic messes up the comparison. eg.
4252 20 >u 10 == True,
4253 but (20 - 15) >u (10 - 15) == 5 >u (MAXINT-5) == False.
4254 */
4255 IRTemp sp = newIRTemp(sbOut->tyenv, tyAddr);
4256 addStmtToIRSB( sbOut, assign(sp, IRExpr_Get(goff_sp, tyAddr)));
4257
4258 /* "addr - SP" */
4259 IRTemp addr_minus_sp = newIRTemp(sbOut->tyenv, tyAddr);
4260 addStmtToIRSB(
4261 sbOut,
4262 assign(addr_minus_sp,
4263 tyAddr == Ity_I32
4264 ? binop(Iop_Sub32, addr, mkexpr(sp))
4265 : binop(Iop_Sub64, addr, mkexpr(sp)))
4266 );
4267
4268 /* "addr - SP + RZ" */
4269 IRTemp diff = newIRTemp(sbOut->tyenv, tyAddr);
4270 addStmtToIRSB(
4271 sbOut,
4272 assign(diff,
4273 tyAddr == Ity_I32
4274 ? binop(Iop_Add32, mkexpr(addr_minus_sp), mkU32(rz_szB))
4275 : binop(Iop_Add64, mkexpr(addr_minus_sp), mkU64(rz_szB)))
4276 );
4277
4278 IRTemp guard = newIRTemp(sbOut->tyenv, Ity_I1);
4279 addStmtToIRSB(
4280 sbOut,
4281 assign(guard,
4282 tyAddr == Ity_I32
4283 ? binop(Iop_CmpLT32U, mkU32(THRESH), mkexpr(diff))
4284 : binop(Iop_CmpLT64U, mkU64(THRESH), mkexpr(diff)))
4285 );
4286 di->guard = mkexpr(guard);
4287 }
4288
4289 /* Add the helper. */
4290 addStmtToIRSB( sbOut, IRStmt_Dirty(di) );
sewardjb4112022007-11-09 22:49:28 +00004291}
4292
4293
sewardja0eee322009-07-31 08:46:35 +00004294/* Figure out if GA is a guest code address in the dynamic linker, and
4295 if so return True. Otherwise (and in case of any doubt) return
4296 False. (sidedly safe w/ False as the safe value) */
4297static Bool is_in_dynamic_linker_shared_object( Addr64 ga )
4298{
4299 DebugInfo* dinfo;
4300 const UChar* soname;
4301 if (0) return False;
4302
sewardje3f1e592009-07-31 09:41:29 +00004303 dinfo = VG_(find_DebugInfo)( (Addr)ga );
sewardja0eee322009-07-31 08:46:35 +00004304 if (!dinfo) return False;
4305
sewardje3f1e592009-07-31 09:41:29 +00004306 soname = VG_(DebugInfo_get_soname)(dinfo);
sewardja0eee322009-07-31 08:46:35 +00004307 tl_assert(soname);
4308 if (0) VG_(printf)("%s\n", soname);
4309
4310# if defined(VGO_linux)
sewardj651cfa42010-01-11 13:02:19 +00004311 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_3)) return True;
sewardja0eee322009-07-31 08:46:35 +00004312 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_2)) return True;
4313 if (VG_STREQ(soname, VG_U_LD_LINUX_X86_64_SO_2)) return True;
4314 if (VG_STREQ(soname, VG_U_LD64_SO_1)) return True;
4315 if (VG_STREQ(soname, VG_U_LD_SO_1)) return True;
4316# elif defined(VGO_darwin)
4317 if (VG_STREQ(soname, VG_U_DYLD)) return True;
4318# else
4319# error "Unsupported OS"
4320# endif
4321 return False;
4322}
4323
sewardjb4112022007-11-09 22:49:28 +00004324static
4325IRSB* hg_instrument ( VgCallbackClosure* closure,
4326 IRSB* bbIn,
4327 VexGuestLayout* layout,
4328 VexGuestExtents* vge,
4329 IRType gWordTy, IRType hWordTy )
4330{
sewardj1c0ce7a2009-07-01 08:10:49 +00004331 Int i;
4332 IRSB* bbOut;
4333 Addr64 cia; /* address of current insn */
4334 IRStmt* st;
sewardja0eee322009-07-31 08:46:35 +00004335 Bool inLDSO = False;
4336 Addr64 inLDSOmask4K = 1; /* mismatches on first check */
sewardjb4112022007-11-09 22:49:28 +00004337
sewardjffce8152011-06-24 10:09:41 +00004338 const Int goff_sp = layout->offset_SP;
4339
sewardjb4112022007-11-09 22:49:28 +00004340 if (gWordTy != hWordTy) {
4341 /* We don't currently support this case. */
4342 VG_(tool_panic)("host/guest word size mismatch");
4343 }
4344
sewardja0eee322009-07-31 08:46:35 +00004345 if (VKI_PAGE_SIZE < 4096 || VG_(log2)(VKI_PAGE_SIZE) == -1) {
4346 VG_(tool_panic)("implausible or too-small VKI_PAGE_SIZE");
4347 }
4348
sewardjb4112022007-11-09 22:49:28 +00004349 /* Set up BB */
4350 bbOut = emptyIRSB();
4351 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
4352 bbOut->next = deepCopyIRExpr(bbIn->next);
4353 bbOut->jumpkind = bbIn->jumpkind;
sewardj291849f2012-04-20 23:58:55 +00004354 bbOut->offsIP = bbIn->offsIP;
sewardjb4112022007-11-09 22:49:28 +00004355
4356 // Copy verbatim any IR preamble preceding the first IMark
4357 i = 0;
4358 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
4359 addStmtToIRSB( bbOut, bbIn->stmts[i] );
4360 i++;
4361 }
4362
sewardj1c0ce7a2009-07-01 08:10:49 +00004363 // Get the first statement, and initial cia from it
4364 tl_assert(bbIn->stmts_used > 0);
4365 tl_assert(i < bbIn->stmts_used);
4366 st = bbIn->stmts[i];
4367 tl_assert(Ist_IMark == st->tag);
4368 cia = st->Ist.IMark.addr;
4369 st = NULL;
4370
sewardjb4112022007-11-09 22:49:28 +00004371 for (/*use current i*/; i < bbIn->stmts_used; i++) {
sewardj1c0ce7a2009-07-01 08:10:49 +00004372 st = bbIn->stmts[i];
sewardjb4112022007-11-09 22:49:28 +00004373 tl_assert(st);
4374 tl_assert(isFlatIRStmt(st));
4375 switch (st->tag) {
4376 case Ist_NoOp:
4377 case Ist_AbiHint:
4378 case Ist_Put:
4379 case Ist_PutI:
sewardjb4112022007-11-09 22:49:28 +00004380 case Ist_Exit:
4381 /* None of these can contain any memory references. */
4382 break;
4383
sewardj1c0ce7a2009-07-01 08:10:49 +00004384 case Ist_IMark:
4385 /* no mem refs, but note the insn address. */
4386 cia = st->Ist.IMark.addr;
sewardja0eee322009-07-31 08:46:35 +00004387 /* Don't instrument the dynamic linker. It generates a
4388 lot of races which we just expensively suppress, so
4389 it's pointless.
4390
4391 Avoid flooding is_in_dynamic_linker_shared_object with
4392 requests by only checking at transitions between 4K
4393 pages. */
4394 if ((cia & ~(Addr64)0xFFF) != inLDSOmask4K) {
4395 if (0) VG_(printf)("NEW %#lx\n", (Addr)cia);
4396 inLDSOmask4K = cia & ~(Addr64)0xFFF;
4397 inLDSO = is_in_dynamic_linker_shared_object(cia);
4398 } else {
4399 if (0) VG_(printf)("old %#lx\n", (Addr)cia);
4400 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004401 break;
4402
sewardjb4112022007-11-09 22:49:28 +00004403 case Ist_MBE:
sewardjf98e1c02008-10-25 16:22:41 +00004404 switch (st->Ist.MBE.event) {
4405 case Imbe_Fence:
4406 break; /* not interesting */
sewardjf98e1c02008-10-25 16:22:41 +00004407 default:
4408 goto unhandled;
4409 }
sewardjb4112022007-11-09 22:49:28 +00004410 break;
4411
sewardj1c0ce7a2009-07-01 08:10:49 +00004412 case Ist_CAS: {
4413 /* Atomic read-modify-write cycle. Just pretend it's a
4414 read. */
4415 IRCAS* cas = st->Ist.CAS.details;
sewardj23f12002009-07-24 08:45:08 +00004416 Bool isDCAS = cas->oldHi != IRTemp_INVALID;
4417 if (isDCAS) {
4418 tl_assert(cas->expdHi);
4419 tl_assert(cas->dataHi);
4420 } else {
4421 tl_assert(!cas->expdHi);
4422 tl_assert(!cas->dataHi);
4423 }
4424 /* Just be boring about it. */
sewardja0eee322009-07-31 08:46:35 +00004425 if (!inLDSO) {
4426 instrument_mem_access(
4427 bbOut,
4428 cas->addr,
4429 (isDCAS ? 2 : 1)
4430 * sizeofIRType(typeOfIRExpr(bbIn->tyenv, cas->dataLo)),
4431 False/*!isStore*/,
sewardjffce8152011-06-24 10:09:41 +00004432 sizeofIRType(hWordTy), goff_sp
sewardja0eee322009-07-31 08:46:35 +00004433 );
4434 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004435 break;
4436 }
4437
sewardjdb5907d2009-11-26 17:20:21 +00004438 case Ist_LLSC: {
4439 /* We pretend store-conditionals don't exist, viz, ignore
4440 them. Whereas load-linked's are treated the same as
4441 normal loads. */
4442 IRType dataTy;
4443 if (st->Ist.LLSC.storedata == NULL) {
4444 /* LL */
4445 dataTy = typeOfIRTemp(bbIn->tyenv, st->Ist.LLSC.result);
sewardja0eee322009-07-31 08:46:35 +00004446 if (!inLDSO) {
sewardjdb5907d2009-11-26 17:20:21 +00004447 instrument_mem_access(
4448 bbOut,
4449 st->Ist.LLSC.addr,
4450 sizeofIRType(dataTy),
4451 False/*!isStore*/,
sewardjffce8152011-06-24 10:09:41 +00004452 sizeofIRType(hWordTy), goff_sp
sewardja0eee322009-07-31 08:46:35 +00004453 );
4454 }
sewardjdb5907d2009-11-26 17:20:21 +00004455 } else {
4456 /* SC */
4457 /*ignore */
4458 }
4459 break;
4460 }
4461
4462 case Ist_Store:
4463 /* It seems we pretend that store-conditionals don't
4464 exist, viz, just ignore them ... */
4465 if (!inLDSO) {
4466 instrument_mem_access(
4467 bbOut,
4468 st->Ist.Store.addr,
4469 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
4470 True/*isStore*/,
sewardjffce8152011-06-24 10:09:41 +00004471 sizeofIRType(hWordTy), goff_sp
sewardjdb5907d2009-11-26 17:20:21 +00004472 );
sewardj1c0ce7a2009-07-01 08:10:49 +00004473 }
njnb83caf22009-05-25 01:47:56 +00004474 break;
sewardjb4112022007-11-09 22:49:28 +00004475
4476 case Ist_WrTmp: {
sewardj1c0ce7a2009-07-01 08:10:49 +00004477 /* ... whereas here we don't care whether a load is a
4478 vanilla one or a load-linked. */
sewardjb4112022007-11-09 22:49:28 +00004479 IRExpr* data = st->Ist.WrTmp.data;
4480 if (data->tag == Iex_Load) {
sewardja0eee322009-07-31 08:46:35 +00004481 if (!inLDSO) {
4482 instrument_mem_access(
4483 bbOut,
4484 data->Iex.Load.addr,
4485 sizeofIRType(data->Iex.Load.ty),
4486 False/*!isStore*/,
sewardjffce8152011-06-24 10:09:41 +00004487 sizeofIRType(hWordTy), goff_sp
sewardja0eee322009-07-31 08:46:35 +00004488 );
4489 }
sewardjb4112022007-11-09 22:49:28 +00004490 }
4491 break;
4492 }
4493
4494 case Ist_Dirty: {
4495 Int dataSize;
4496 IRDirty* d = st->Ist.Dirty.details;
4497 if (d->mFx != Ifx_None) {
4498 /* This dirty helper accesses memory. Collect the
4499 details. */
4500 tl_assert(d->mAddr != NULL);
4501 tl_assert(d->mSize != 0);
4502 dataSize = d->mSize;
4503 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004504 if (!inLDSO) {
4505 instrument_mem_access(
4506 bbOut, d->mAddr, dataSize, False/*!isStore*/,
sewardjffce8152011-06-24 10:09:41 +00004507 sizeofIRType(hWordTy), goff_sp
sewardja0eee322009-07-31 08:46:35 +00004508 );
4509 }
sewardjb4112022007-11-09 22:49:28 +00004510 }
4511 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004512 if (!inLDSO) {
4513 instrument_mem_access(
4514 bbOut, d->mAddr, dataSize, True/*isStore*/,
sewardjffce8152011-06-24 10:09:41 +00004515 sizeofIRType(hWordTy), goff_sp
sewardja0eee322009-07-31 08:46:35 +00004516 );
4517 }
sewardjb4112022007-11-09 22:49:28 +00004518 }
4519 } else {
4520 tl_assert(d->mAddr == NULL);
4521 tl_assert(d->mSize == 0);
4522 }
4523 break;
4524 }
4525
4526 default:
sewardjf98e1c02008-10-25 16:22:41 +00004527 unhandled:
4528 ppIRStmt(st);
sewardjb4112022007-11-09 22:49:28 +00004529 tl_assert(0);
4530
4531 } /* switch (st->tag) */
4532
4533 addStmtToIRSB( bbOut, st );
4534 } /* iterate over bbIn->stmts */
4535
4536 return bbOut;
4537}
4538
sewardjffce8152011-06-24 10:09:41 +00004539#undef binop
4540#undef mkexpr
4541#undef mkU32
4542#undef mkU64
4543#undef assign
4544
sewardjb4112022007-11-09 22:49:28 +00004545
4546/*----------------------------------------------------------------*/
4547/*--- Client requests ---*/
4548/*----------------------------------------------------------------*/
4549
4550/* Sheesh. Yet another goddam finite map. */
4551static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
4552
4553static void map_pthread_t_to_Thread_INIT ( void ) {
4554 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
sewardjf98e1c02008-10-25 16:22:41 +00004555 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
4556 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00004557 tl_assert(map_pthread_t_to_Thread != NULL);
4558 }
4559}
4560
4561
4562static
4563Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
4564{
4565 if (!VG_IS_TOOL_USERREQ('H','G',args[0]))
4566 return False;
4567
4568 /* Anything that gets past the above check is one of ours, so we
4569 should be able to handle it. */
4570
4571 /* default, meaningless return value, unless otherwise set */
4572 *ret = 0;
4573
4574 switch (args[0]) {
4575
4576 /* --- --- User-visible client requests --- --- */
4577
4578 case VG_USERREQ__HG_CLEAN_MEMORY:
barta0b6b2c2008-07-07 06:49:24 +00004579 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%ld)\n",
sewardjb4112022007-11-09 22:49:28 +00004580 args[1], args[2]);
4581 /* Call die_mem to (expensively) tidy up properly, if there
sewardjf98e1c02008-10-25 16:22:41 +00004582 are any held locks etc in the area. Calling evh__die_mem
4583 and then evh__new_mem is a bit inefficient; probably just
4584 the latter would do. */
sewardjb4112022007-11-09 22:49:28 +00004585 if (args[2] > 0) { /* length */
4586 evh__die_mem(args[1], args[2]);
4587 /* and then set it to New */
4588 evh__new_mem(args[1], args[2]);
4589 }
4590 break;
4591
sewardjc8028ad2010-05-05 09:34:42 +00004592 case _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK: {
4593 Addr payload = 0;
4594 SizeT pszB = 0;
4595 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK(%#lx)\n",
4596 args[1]);
4597 if (HG_(mm_find_containing_block)(NULL, &payload, &pszB, args[1])) {
4598 if (pszB > 0) {
4599 evh__die_mem(payload, pszB);
4600 evh__new_mem(payload, pszB);
4601 }
4602 *ret = pszB;
4603 } else {
4604 *ret = (UWord)-1;
4605 }
4606 break;
4607 }
4608
sewardj406bac82010-03-03 23:03:40 +00004609 case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED:
4610 if (0) VG_(printf)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%ld)\n",
4611 args[1], args[2]);
4612 if (args[2] > 0) { /* length */
4613 evh__untrack_mem(args[1], args[2]);
4614 }
4615 break;
4616
4617 case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED:
4618 if (0) VG_(printf)("HG_ARANGE_MAKE_TRACKED(%#lx,%ld)\n",
4619 args[1], args[2]);
4620 if (args[2] > 0) { /* length */
4621 evh__new_mem(args[1], args[2]);
4622 }
4623 break;
4624
sewardjb4112022007-11-09 22:49:28 +00004625 /* --- --- Client requests for Helgrind's use only --- --- */
4626
4627 /* Some thread is telling us its pthread_t value. Record the
4628 binding between that and the associated Thread*, so we can
4629 later find the Thread* again when notified of a join by the
4630 thread. */
4631 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
4632 Thread* my_thr = NULL;
4633 if (0)
4634 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
4635 (void*)args[1]);
4636 map_pthread_t_to_Thread_INIT();
4637 my_thr = map_threads_maybe_lookup( tid );
4638 /* This assertion should hold because the map_threads (tid to
4639 Thread*) binding should have been made at the point of
4640 low-level creation of this thread, which should have
4641 happened prior to us getting this client request for it.
4642 That's because this client request is sent from
4643 client-world from the 'thread_wrapper' function, which
4644 only runs once the thread has been low-level created. */
4645 tl_assert(my_thr != NULL);
4646 /* So now we know that (pthread_t)args[1] is associated with
4647 (Thread*)my_thr. Note that down. */
4648 if (0)
4649 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
4650 (void*)args[1], (void*)my_thr );
sewardj896f6f92008-08-19 08:38:52 +00004651 VG_(addToFM)( map_pthread_t_to_Thread, (Word)args[1], (Word)my_thr );
sewardjb4112022007-11-09 22:49:28 +00004652 break;
4653 }
4654
4655 case _VG_USERREQ__HG_PTH_API_ERROR: {
4656 Thread* my_thr = NULL;
4657 map_pthread_t_to_Thread_INIT();
4658 my_thr = map_threads_maybe_lookup( tid );
4659 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
sewardjf98e1c02008-10-25 16:22:41 +00004660 HG_(record_error_PthAPIerror)(
4661 my_thr, (HChar*)args[1], (Word)args[2], (HChar*)args[3] );
sewardjb4112022007-11-09 22:49:28 +00004662 break;
4663 }
4664
4665 /* This thread (tid) has completed a join with the quitting
4666 thread whose pthread_t is in args[1]. */
4667 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
4668 Thread* thr_q = NULL; /* quitter Thread* */
4669 Bool found = False;
4670 if (0)
4671 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
4672 (void*)args[1]);
4673 map_pthread_t_to_Thread_INIT();
sewardj896f6f92008-08-19 08:38:52 +00004674 found = VG_(lookupFM)( map_pthread_t_to_Thread,
sewardjb5f29642007-11-16 12:02:43 +00004675 NULL, (Word*)&thr_q, (Word)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004676 /* Can this fail? It would mean that our pthread_join
4677 wrapper observed a successful join on args[1] yet that
4678 thread never existed (or at least, it never lodged an
4679 entry in the mapping (via SET_MY_PTHREAD_T)). Which
4680 sounds like a bug in the threads library. */
4681 // FIXME: get rid of this assertion; handle properly
4682 tl_assert(found);
4683 if (found) {
4684 if (0)
4685 VG_(printf)(".................... quitter Thread* = %p\n",
4686 thr_q);
4687 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
4688 }
4689 break;
4690 }
4691
4692 /* EXPOSITION only: by intercepting lock init events we can show
4693 the user where the lock was initialised, rather than only
4694 being able to show where it was first locked. Intercepting
4695 lock initialisations is not necessary for the basic operation
4696 of the race checker. */
4697 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
4698 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
4699 break;
4700
4701 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
4702 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1] );
4703 break;
4704
4705 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
4706 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
4707 break;
4708
4709 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
4710 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
4711 break;
4712
4713 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*, Word
4714 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
4715 break;
4716
4717 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*
4718 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
4719 break;
4720
4721 /* This thread is about to do pthread_cond_signal on the
4722 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
4723 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
4724 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
4725 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
4726 break;
4727
4728 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
4729 Returns a flag indicating whether or not the mutex is believed to be
4730 valid for this operation. */
4731 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
4732 Bool mutex_is_valid
4733 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
4734 (void*)args[2] );
4735 *ret = mutex_is_valid ? 1 : 0;
4736 break;
4737 }
4738
sewardjf98e1c02008-10-25 16:22:41 +00004739 /* cond=arg[1] */
4740 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
4741 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1] );
4742 break;
4743
sewardjb4112022007-11-09 22:49:28 +00004744 /* Thread successfully completed pthread_cond_wait, cond=arg[1],
4745 mutex=arg[2] */
4746 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
4747 evh__HG_PTHREAD_COND_WAIT_POST( tid,
4748 (void*)args[1], (void*)args[2] );
4749 break;
4750
4751 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
4752 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
4753 break;
4754
4755 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
4756 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
4757 break;
4758
sewardj789c3c52008-02-25 12:10:07 +00004759 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
sewardjb4112022007-11-09 22:49:28 +00004760 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
sewardj789c3c52008-02-25 12:10:07 +00004761 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
4762 args[2], args[3] );
sewardjb4112022007-11-09 22:49:28 +00004763 break;
4764
4765 /* rwlock=arg[1], isW=arg[2] */
4766 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
4767 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
4768 break;
4769
4770 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
4771 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
4772 break;
4773
4774 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
4775 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
4776 break;
4777
sewardj11e352f2007-11-30 11:11:02 +00004778 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
4779 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
sewardjb4112022007-11-09 22:49:28 +00004780 break;
4781
sewardj11e352f2007-11-30 11:11:02 +00004782 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
4783 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004784 break;
4785
sewardj11e352f2007-11-30 11:11:02 +00004786 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
4787 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
4788 break;
4789
4790 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t* */
4791 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004792 break;
4793
sewardj9f569b72008-11-13 13:33:09 +00004794 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
sewardj406bac82010-03-03 23:03:40 +00004795 /* pth_bar_t*, ulong count, ulong resizable */
4796 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1],
4797 args[2], args[3] );
4798 break;
4799
4800 case _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE:
4801 /* pth_bar_t*, ulong newcount */
4802 evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( tid, (void*)args[1],
4803 args[2] );
sewardj9f569b72008-11-13 13:33:09 +00004804 break;
4805
4806 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
4807 /* pth_bar_t* */
4808 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
4809 break;
4810
4811 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
4812 /* pth_bar_t* */
4813 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
4814 break;
sewardjb4112022007-11-09 22:49:28 +00004815
sewardj5a644da2009-08-11 10:35:58 +00004816 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE:
4817 /* pth_spinlock_t* */
4818 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid, (void*)args[1] );
4819 break;
4820
4821 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST:
4822 /* pth_spinlock_t* */
4823 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid, (void*)args[1] );
4824 break;
4825
4826 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE:
4827 /* pth_spinlock_t*, Word */
4828 evh__HG_PTHREAD_SPIN_LOCK_PRE( tid, (void*)args[1], args[2] );
4829 break;
4830
4831 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST:
4832 /* pth_spinlock_t* */
4833 evh__HG_PTHREAD_SPIN_LOCK_POST( tid, (void*)args[1] );
4834 break;
4835
4836 case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE:
4837 /* pth_spinlock_t* */
4838 evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid, (void*)args[1] );
4839 break;
4840
sewardjed2e72e2009-08-14 11:08:24 +00004841 case _VG_USERREQ__HG_CLIENTREQ_UNIMP: {
4842 /* char* who */
4843 HChar* who = (HChar*)args[1];
4844 HChar buf[50 + 50];
4845 Thread* thr = map_threads_maybe_lookup( tid );
4846 tl_assert( thr ); /* I must be mapped */
4847 tl_assert( who );
4848 tl_assert( VG_(strlen)(who) <= 50 );
4849 VG_(sprintf)(buf, "Unimplemented client request macro \"%s\"", who );
4850 /* record_error_Misc strdup's buf, so this is safe: */
4851 HG_(record_error_Misc)( thr, buf );
4852 break;
4853 }
4854
4855 case _VG_USERREQ__HG_USERSO_SEND_PRE:
4856 /* UWord arbitrary-SO-tag */
4857 evh__HG_USERSO_SEND_PRE( tid, args[1] );
4858 break;
4859
4860 case _VG_USERREQ__HG_USERSO_RECV_POST:
4861 /* UWord arbitrary-SO-tag */
4862 evh__HG_USERSO_RECV_POST( tid, args[1] );
4863 break;
4864
sewardj6015d0e2011-03-11 19:10:48 +00004865 case _VG_USERREQ__HG_USERSO_FORGET_ALL:
4866 /* UWord arbitrary-SO-tag */
4867 evh__HG_USERSO_FORGET_ALL( tid, args[1] );
4868 break;
4869
sewardjb4112022007-11-09 22:49:28 +00004870 default:
4871 /* Unhandled Helgrind client request! */
sewardjf98e1c02008-10-25 16:22:41 +00004872 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
4873 args[0]);
sewardjb4112022007-11-09 22:49:28 +00004874 }
4875
4876 return True;
4877}
4878
4879
4880/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +00004881/*--- Setup ---*/
4882/*----------------------------------------------------------------*/
4883
4884static Bool hg_process_cmd_line_option ( Char* arg )
4885{
njn83df0b62009-02-25 01:01:05 +00004886 Char* tmp_str;
sewardjb4112022007-11-09 22:49:28 +00004887
njn83df0b62009-02-25 01:01:05 +00004888 if VG_BOOL_CLO(arg, "--track-lockorders",
4889 HG_(clo_track_lockorders)) {}
4890 else if VG_BOOL_CLO(arg, "--cmp-race-err-addrs",
4891 HG_(clo_cmp_race_err_addrs)) {}
sewardj23f12002009-07-24 08:45:08 +00004892
4893 else if VG_XACT_CLO(arg, "--history-level=none",
4894 HG_(clo_history_level), 0);
sewardjf3861392009-08-02 10:16:03 +00004895 else if VG_XACT_CLO(arg, "--history-level=approx",
sewardj23f12002009-07-24 08:45:08 +00004896 HG_(clo_history_level), 1);
4897 else if VG_XACT_CLO(arg, "--history-level=full",
4898 HG_(clo_history_level), 2);
sewardj849b0ed2008-12-21 10:43:10 +00004899
sewardjf585e482009-08-16 22:52:29 +00004900 /* If you change the 10k/30mill limits, remember to also change
sewardj849b0ed2008-12-21 10:43:10 +00004901 them in assertions at the top of event_map_maybe_GC. */
njn83df0b62009-02-25 01:01:05 +00004902 else if VG_BINT_CLO(arg, "--conflict-cache-size",
sewardjf585e482009-08-16 22:52:29 +00004903 HG_(clo_conflict_cache_size), 10*1000, 30*1000*1000) {}
sewardjb4112022007-11-09 22:49:28 +00004904
sewardj11e352f2007-11-30 11:11:02 +00004905 /* "stuvwx" --> stuvwx (binary) */
njn83df0b62009-02-25 01:01:05 +00004906 else if VG_STR_CLO(arg, "--hg-sanity-flags", tmp_str) {
sewardjb4112022007-11-09 22:49:28 +00004907 Int j;
sewardjb4112022007-11-09 22:49:28 +00004908
njn83df0b62009-02-25 01:01:05 +00004909 if (6 != VG_(strlen)(tmp_str)) {
sewardjb4112022007-11-09 22:49:28 +00004910 VG_(message)(Vg_UserMsg,
sewardj24118492009-07-15 14:50:02 +00004911 "--hg-sanity-flags argument must have 6 digits\n");
sewardjb4112022007-11-09 22:49:28 +00004912 return False;
4913 }
sewardj11e352f2007-11-30 11:11:02 +00004914 for (j = 0; j < 6; j++) {
njn83df0b62009-02-25 01:01:05 +00004915 if ('0' == tmp_str[j]) { /* do nothing */ }
4916 else if ('1' == tmp_str[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
sewardjb4112022007-11-09 22:49:28 +00004917 else {
sewardj11e352f2007-11-30 11:11:02 +00004918 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
sewardj24118492009-07-15 14:50:02 +00004919 "only contain 0s and 1s\n");
sewardjb4112022007-11-09 22:49:28 +00004920 return False;
4921 }
4922 }
sewardjf98e1c02008-10-25 16:22:41 +00004923 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
sewardjb4112022007-11-09 22:49:28 +00004924 }
4925
sewardj622fe492011-03-11 21:06:59 +00004926 else if VG_BOOL_CLO(arg, "--free-is-write",
4927 HG_(clo_free_is_write)) {}
sewardjffce8152011-06-24 10:09:41 +00004928
4929 else if VG_XACT_CLO(arg, "--vts-pruning=never",
4930 HG_(clo_vts_pruning), 0);
4931 else if VG_XACT_CLO(arg, "--vts-pruning=auto",
4932 HG_(clo_vts_pruning), 1);
4933 else if VG_XACT_CLO(arg, "--vts-pruning=always",
4934 HG_(clo_vts_pruning), 2);
4935
4936 else if VG_BOOL_CLO(arg, "--check-stack-refs",
4937 HG_(clo_check_stack_refs)) {}
4938
sewardjb4112022007-11-09 22:49:28 +00004939 else
4940 return VG_(replacement_malloc_process_cmd_line_option)(arg);
4941
4942 return True;
4943}
4944
4945static void hg_print_usage ( void )
4946{
4947 VG_(printf)(
sewardj622fe492011-03-11 21:06:59 +00004948" --free-is-write=no|yes treat heap frees as writes [no]\n"
sewardj849b0ed2008-12-21 10:43:10 +00004949" --track-lockorders=no|yes show lock ordering errors? [yes]\n"
njnf6e8ca92009-08-07 02:18:00 +00004950" --history-level=none|approx|full [full]\n"
sewardjf3861392009-08-02 10:16:03 +00004951" full: show both stack traces for a data race (can be very slow)\n"
4952" approx: full trace for one thread, approx for the other (faster)\n"
4953" none: only show trace for one thread in a race (fastest)\n"
sewardj23f12002009-07-24 08:45:08 +00004954" --conflict-cache-size=N size of 'full' history cache [1000000]\n"
sewardjffce8152011-06-24 10:09:41 +00004955" --check-stack-refs=no|yes race-check reads and writes on the\n"
4956" main stack and thread stacks? [yes]\n"
sewardjb4112022007-11-09 22:49:28 +00004957 );
sewardjb4112022007-11-09 22:49:28 +00004958}
4959
4960static void hg_print_debug_usage ( void )
4961{
sewardjb4112022007-11-09 22:49:28 +00004962 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
4963 "race errors significant? [no]\n");
sewardj849b0ed2008-12-21 10:43:10 +00004964 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
sewardj11e352f2007-11-30 11:11:02 +00004965 " at events (X = 0|1) [000000]\n");
4966 VG_(printf)(" --hg-sanity-flags values:\n");
sewardj11e352f2007-11-30 11:11:02 +00004967 VG_(printf)(" 010000 after changes to "
sewardjb4112022007-11-09 22:49:28 +00004968 "lock-order-acquisition-graph\n");
sewardj11e352f2007-11-30 11:11:02 +00004969 VG_(printf)(" 001000 at memory accesses (NB: not currently used)\n");
4970 VG_(printf)(" 000100 at mem permission setting for "
sewardjb4112022007-11-09 22:49:28 +00004971 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
sewardj11e352f2007-11-30 11:11:02 +00004972 VG_(printf)(" 000010 at lock/unlock events\n");
4973 VG_(printf)(" 000001 at thread create/join events\n");
sewardjffce8152011-06-24 10:09:41 +00004974 VG_(printf)(
4975" --vts-pruning=never|auto|always [auto]\n"
4976" never: is never done (may cause big space leaks in Helgrind)\n"
4977" auto: done just often enough to keep space usage under control\n"
4978" always: done after every VTS GC (mostly just a big time waster)\n"
4979 );
sewardjb4112022007-11-09 22:49:28 +00004980}
4981
sewardjb4112022007-11-09 22:49:28 +00004982static void hg_fini ( Int exitcode )
4983{
sewardj2d9e8742009-08-07 15:46:56 +00004984 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
4985 VG_(message)(Vg_UserMsg,
4986 "For counts of detected and suppressed errors, "
4987 "rerun with: -v\n");
4988 }
4989
4990 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)
4991 && HG_(clo_history_level) >= 2) {
4992 VG_(umsg)(
4993 "Use --history-level=approx or =none to gain increased speed, at\n" );
4994 VG_(umsg)(
4995 "the cost of reduced accuracy of conflicting-access information\n");
4996 }
4997
sewardjb4112022007-11-09 22:49:28 +00004998 if (SHOW_DATA_STRUCTURES)
4999 pp_everything( PP_ALL, "SK_(fini)" );
sewardjf98e1c02008-10-25 16:22:41 +00005000 if (HG_(clo_sanity_flags))
sewardjb4112022007-11-09 22:49:28 +00005001 all__sanity_check("SK_(fini)");
5002
sewardj2d9e8742009-08-07 15:46:56 +00005003 if (VG_(clo_stats)) {
sewardjb4112022007-11-09 22:49:28 +00005004
5005 if (1) {
5006 VG_(printf)("\n");
sewardjb4112022007-11-09 22:49:28 +00005007 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
sewardjc1fb9d22011-02-28 09:03:44 +00005008 if (HG_(clo_track_lockorders)) {
5009 VG_(printf)("\n");
5010 HG_(ppWSUstats)( univ_laog, "univ_laog" );
5011 }
sewardjb4112022007-11-09 22:49:28 +00005012 }
5013
sewardjf98e1c02008-10-25 16:22:41 +00005014 //zz VG_(printf)("\n");
5015 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
5016 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
5017 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
5018 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
5019 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
5020 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
5021 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
5022 //zz stats__hbefore_stk_hwm);
5023 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
5024 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
sewardjb4112022007-11-09 22:49:28 +00005025
5026 VG_(printf)("\n");
barta0b6b2c2008-07-07 06:49:24 +00005027 VG_(printf)(" locksets: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00005028 (Int)HG_(cardinalityWSU)( univ_lsets ));
sewardjc1fb9d22011-02-28 09:03:44 +00005029 if (HG_(clo_track_lockorders)) {
5030 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
5031 (Int)HG_(cardinalityWSU)( univ_laog ));
5032 }
sewardjb4112022007-11-09 22:49:28 +00005033
sewardjd52392d2008-11-08 20:36:26 +00005034 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
5035 // stats__ga_LL_adds,
5036 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
sewardjb4112022007-11-09 22:49:28 +00005037
sewardjf98e1c02008-10-25 16:22:41 +00005038 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
5039 HG_(stats__LockN_to_P_queries),
5040 HG_(stats__LockN_to_P_get_map_size)() );
sewardjb4112022007-11-09 22:49:28 +00005041
sewardjf98e1c02008-10-25 16:22:41 +00005042 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
5043 HG_(stats__string_table_queries),
5044 HG_(stats__string_table_get_map_size)() );
sewardjc1fb9d22011-02-28 09:03:44 +00005045 if (HG_(clo_track_lockorders)) {
5046 VG_(printf)(" LAOG: %'8d map size\n",
5047 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
5048 VG_(printf)(" LAOG exposition: %'8d map size\n",
5049 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
5050 }
5051
barta0b6b2c2008-07-07 06:49:24 +00005052 VG_(printf)(" locks: %'8lu acquires, "
5053 "%'lu releases\n",
sewardjb4112022007-11-09 22:49:28 +00005054 stats__lockN_acquires,
5055 stats__lockN_releases
5056 );
barta0b6b2c2008-07-07 06:49:24 +00005057 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
sewardjb4112022007-11-09 22:49:28 +00005058
5059 VG_(printf)("\n");
sewardjf98e1c02008-10-25 16:22:41 +00005060 libhb_shutdown(True);
sewardjb4112022007-11-09 22:49:28 +00005061 }
5062}
5063
sewardjf98e1c02008-10-25 16:22:41 +00005064/* FIXME: move these somewhere sane */
5065
5066static
5067void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
5068{
5069 Thread* thr;
5070 ThreadId tid;
5071 UWord nActual;
5072 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00005073 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00005074 tl_assert(thr);
5075 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
5076 nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
5077 NULL, NULL, 0 );
5078 tl_assert(nActual <= nRequest);
5079 for (; nActual < nRequest; nActual++)
5080 frames[nActual] = 0;
5081}
5082
5083static
sewardj23f12002009-07-24 08:45:08 +00005084ExeContext* for_libhb__get_EC ( Thr* hbt )
sewardjf98e1c02008-10-25 16:22:41 +00005085{
5086 Thread* thr;
5087 ThreadId tid;
5088 ExeContext* ec;
5089 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00005090 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00005091 tl_assert(thr);
5092 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
sewardj23f12002009-07-24 08:45:08 +00005093 /* this will assert if tid is invalid */
sewardjf98e1c02008-10-25 16:22:41 +00005094 ec = VG_(record_ExeContext)( tid, 0 );
sewardjd52392d2008-11-08 20:36:26 +00005095 return ec;
sewardjf98e1c02008-10-25 16:22:41 +00005096}
5097
5098
sewardjc1fb9d22011-02-28 09:03:44 +00005099static void hg_post_clo_init ( void )
sewardjb4112022007-11-09 22:49:28 +00005100{
sewardjf98e1c02008-10-25 16:22:41 +00005101 Thr* hbthr_root;
njnf76d27a2009-05-28 01:53:07 +00005102
sewardjc1fb9d22011-02-28 09:03:44 +00005103 /////////////////////////////////////////////
5104 hbthr_root = libhb_init( for_libhb__get_stacktrace,
5105 for_libhb__get_EC );
5106 /////////////////////////////////////////////
5107
5108
5109 if (HG_(clo_track_lockorders))
5110 laog__init();
5111
5112 initialise_data_structures(hbthr_root);
5113}
5114
5115static void hg_pre_clo_init ( void )
5116{
sewardjb4112022007-11-09 22:49:28 +00005117 VG_(details_name) ("Helgrind");
5118 VG_(details_version) (NULL);
5119 VG_(details_description) ("a thread error detector");
5120 VG_(details_copyright_author)(
sewardjec062e82011-10-23 07:32:08 +00005121 "Copyright (C) 2007-2011, and GNU GPL'd, by OpenWorks LLP et al.");
sewardjb4112022007-11-09 22:49:28 +00005122 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj9c08c0f2011-03-10 15:01:14 +00005123 VG_(details_avg_translation_sizeB) ( 320 );
sewardjb4112022007-11-09 22:49:28 +00005124
5125 VG_(basic_tool_funcs) (hg_post_clo_init,
5126 hg_instrument,
5127 hg_fini);
5128
5129 VG_(needs_core_errors) ();
sewardjf98e1c02008-10-25 16:22:41 +00005130 VG_(needs_tool_errors) (HG_(eq_Error),
sewardj24118492009-07-15 14:50:02 +00005131 HG_(before_pp_Error),
sewardjf98e1c02008-10-25 16:22:41 +00005132 HG_(pp_Error),
sewardjb4112022007-11-09 22:49:28 +00005133 False,/*show TIDs for errors*/
sewardjf98e1c02008-10-25 16:22:41 +00005134 HG_(update_extra),
5135 HG_(recognised_suppression),
5136 HG_(read_extra_suppression_info),
5137 HG_(error_matches_suppression),
5138 HG_(get_error_name),
sewardj588adef2009-08-15 22:41:51 +00005139 HG_(get_extra_suppression_info));
sewardjb4112022007-11-09 22:49:28 +00005140
sewardj24118492009-07-15 14:50:02 +00005141 VG_(needs_xml_output) ();
5142
sewardjb4112022007-11-09 22:49:28 +00005143 VG_(needs_command_line_options)(hg_process_cmd_line_option,
5144 hg_print_usage,
5145 hg_print_debug_usage);
5146 VG_(needs_client_requests) (hg_handle_client_request);
5147
5148 // FIXME?
5149 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
5150 // hg_expensive_sanity_check);
5151
5152 VG_(needs_malloc_replacement) (hg_cli__malloc,
5153 hg_cli____builtin_new,
5154 hg_cli____builtin_vec_new,
5155 hg_cli__memalign,
5156 hg_cli__calloc,
5157 hg_cli__free,
5158 hg_cli____builtin_delete,
5159 hg_cli____builtin_vec_delete,
5160 hg_cli__realloc,
njn8b140de2009-02-17 04:31:18 +00005161 hg_cli_malloc_usable_size,
philipped99c26a2012-07-31 22:17:28 +00005162 HG_CLI__DEFAULT_MALLOC_REDZONE_SZB );
sewardjb4112022007-11-09 22:49:28 +00005163
sewardj849b0ed2008-12-21 10:43:10 +00005164 /* 21 Dec 08: disabled this; it mostly causes H to start more
5165 slowly and use significantly more memory, without very often
5166 providing useful results. The user can request to load this
5167 information manually with --read-var-info=yes. */
5168 if (0) VG_(needs_var_info)(); /* optional */
sewardjb4112022007-11-09 22:49:28 +00005169
5170 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
sewardj7cf4e6b2008-05-01 20:24:26 +00005171 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
5172 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
sewardjb4112022007-11-09 22:49:28 +00005173 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
sewardj1f77fec2010-04-12 19:51:04 +00005174 VG_(track_new_mem_stack) ( evh__new_mem_stack );
sewardjb4112022007-11-09 22:49:28 +00005175
5176 // FIXME: surely this isn't thread-aware
sewardj23f12002009-07-24 08:45:08 +00005177 VG_(track_copy_mem_remap) ( evh__copy_mem );
sewardjb4112022007-11-09 22:49:28 +00005178
5179 VG_(track_change_mem_mprotect) ( evh__set_perms );
5180
5181 VG_(track_die_mem_stack_signal)( evh__die_mem );
sewardjfd35d492011-03-17 19:39:55 +00005182 VG_(track_die_mem_brk) ( evh__die_mem_munmap );
5183 VG_(track_die_mem_munmap) ( evh__die_mem_munmap );
sewardjb4112022007-11-09 22:49:28 +00005184 VG_(track_die_mem_stack) ( evh__die_mem );
5185
5186 // FIXME: what is this for?
5187 VG_(track_ban_mem_stack) (NULL);
5188
5189 VG_(track_pre_mem_read) ( evh__pre_mem_read );
5190 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
5191 VG_(track_pre_mem_write) ( evh__pre_mem_write );
5192 VG_(track_post_mem_write) (NULL);
5193
5194 /////////////////
5195
5196 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
5197 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
5198
5199 VG_(track_start_client_code)( evh__start_client_code );
5200 VG_(track_stop_client_code)( evh__stop_client_code );
5201
sewardjb4112022007-11-09 22:49:28 +00005202 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
5203 as described in comments at the top of pub_tool_hashtable.h, are
5204 met. Blargh. */
5205 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
5206 tl_assert( sizeof(UWord) == sizeof(Addr) );
5207 hg_mallocmeta_table
5208 = VG_(HT_construct)( "hg_malloc_metadata_table" );
5209
sewardj61bc2c52011-02-09 10:34:00 +00005210 // add a callback to clean up on (threaded) fork.
5211 VG_(atfork)(NULL/*pre*/, NULL/*parent*/, evh__atfork_child/*child*/);
sewardjb4112022007-11-09 22:49:28 +00005212}
5213
5214VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
5215
5216/*--------------------------------------------------------------------*/
5217/*--- end hg_main.c ---*/
5218/*--------------------------------------------------------------------*/