blob: d1b72d0d0e42bf3073b82542666ecad873664bfe [file] [log] [blame]
sewardjb4112022007-11-09 22:49:28 +00001
2/*--------------------------------------------------------------------*/
3/*--- Helgrind: a Valgrind tool for detecting errors ---*/
4/*--- in threaded programs. hg_main.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
sewardj0f157dd2013-10-18 14:27:36 +000011 Copyright (C) 2007-2013 OpenWorks LLP
sewardjb4112022007-11-09 22:49:28 +000012 info@open-works.co.uk
13
sewardj0f157dd2013-10-18 14:27:36 +000014 Copyright (C) 2007-2013 Apple, Inc.
njnf76d27a2009-05-28 01:53:07 +000015
sewardjb4112022007-11-09 22:49:28 +000016 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
20
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
25
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, write to the Free Software
28 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 02111-1307, USA.
30
31 The GNU General Public License is contained in the file COPYING.
32
33 Neither the names of the U.S. Department of Energy nor the
34 University of California nor the names of its contributors may be
35 used to endorse or promote products derived from this software
36 without prior written permission.
37*/
38
39#include "pub_tool_basics.h"
philippef5774342014-05-03 11:12:50 +000040#include "pub_tool_gdbserver.h"
sewardjb4112022007-11-09 22:49:28 +000041#include "pub_tool_libcassert.h"
42#include "pub_tool_libcbase.h"
43#include "pub_tool_libcprint.h"
sewardjb4112022007-11-09 22:49:28 +000044#include "pub_tool_threadstate.h"
45#include "pub_tool_tooliface.h"
46#include "pub_tool_hashtable.h"
47#include "pub_tool_replacemalloc.h"
48#include "pub_tool_machine.h"
49#include "pub_tool_options.h"
50#include "pub_tool_xarray.h"
51#include "pub_tool_stacktrace.h"
sewardj896f6f92008-08-19 08:38:52 +000052#include "pub_tool_wordfm.h"
sewardja0eee322009-07-31 08:46:35 +000053#include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
54#include "pub_tool_redir.h" // sonames for the dynamic linkers
55#include "pub_tool_vki.h" // VKI_PAGE_SIZE
sewardj61bc2c52011-02-09 10:34:00 +000056#include "pub_tool_libcproc.h" // VG_(atfork)
sewardj234e5582011-02-09 12:47:23 +000057#include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client)
philippe5fbc9762013-12-01 19:28:48 +000058#include "pub_tool_poolalloc.h"
philippe07c08522014-05-14 20:39:27 +000059#include "pub_tool_addrinfo.h"
sewardjb4112022007-11-09 22:49:28 +000060
sewardjf98e1c02008-10-25 16:22:41 +000061#include "hg_basics.h"
62#include "hg_wordset.h"
philippef5774342014-05-03 11:12:50 +000063#include "hg_addrdescr.h"
sewardjf98e1c02008-10-25 16:22:41 +000064#include "hg_lock_n_thread.h"
65#include "hg_errors.h"
66
67#include "libhb.h"
68
sewardjb4112022007-11-09 22:49:28 +000069#include "helgrind.h"
70
sewardjf98e1c02008-10-25 16:22:41 +000071
72// FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
73
74// FIXME: when client destroys a lock or a CV, remove these
75// from our mappings, so that the associated SO can be freed up
sewardjb4112022007-11-09 22:49:28 +000076
77/*----------------------------------------------------------------*/
78/*--- ---*/
79/*----------------------------------------------------------------*/
80
sewardj11e352f2007-11-30 11:11:02 +000081/* Note this needs to be compiled with -fno-strict-aliasing, since it
82 contains a whole bunch of calls to lookupFM etc which cast between
83 Word and pointer types. gcc rightly complains this breaks ANSI C
84 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
85 worthwhile performance benefits over -O.
sewardjc17be792007-11-10 22:50:13 +000086*/
sewardjb4112022007-11-09 22:49:28 +000087
88// FIXME what is supposed to happen to locks in memory which
89// is relocated as a result of client realloc?
90
sewardjb4112022007-11-09 22:49:28 +000091// FIXME put referencing ThreadId into Thread and get
92// rid of the slow reverse mapping function.
93
94// FIXME accesses to NoAccess areas: change state to Excl?
95
96// FIXME report errors for accesses of NoAccess memory?
97
98// FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
99// the thread still holds the lock.
100
101/* ------------ Debug/trace options ------------ */
102
sewardjb4112022007-11-09 22:49:28 +0000103// 0 for silent, 1 for some stuff, 2 for lots of stuff
104#define SHOW_EVENTS 0
105
sewardjb4112022007-11-09 22:49:28 +0000106
florian6bf37262012-10-21 03:23:36 +0000107static void all__sanity_check ( const HChar* who ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000108
philipped99c26a2012-07-31 22:17:28 +0000109#define HG_CLI__DEFAULT_MALLOC_REDZONE_SZB 16 /* let's say */
sewardjb4112022007-11-09 22:49:28 +0000110
111// 0 for none, 1 for dump at end of run
112#define SHOW_DATA_STRUCTURES 0
113
114
sewardjb4112022007-11-09 22:49:28 +0000115/* ------------ Misc comments ------------ */
116
117// FIXME: don't hardwire initial entries for root thread.
118// Instead, let the pre_thread_ll_create handler do this.
119
sewardjb4112022007-11-09 22:49:28 +0000120
121/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000122/*--- Primary data structures ---*/
sewardjb4112022007-11-09 22:49:28 +0000123/*----------------------------------------------------------------*/
124
sewardjb4112022007-11-09 22:49:28 +0000125/* Admin linked list of Threads */
126static Thread* admin_threads = NULL;
sewardjffce8152011-06-24 10:09:41 +0000127Thread* get_admin_threads ( void ) { return admin_threads; }
sewardjb4112022007-11-09 22:49:28 +0000128
sewardj1d7c3322011-02-28 09:22:51 +0000129/* Admin double linked list of Locks */
130/* We need a double linked list to properly and efficiently
131 handle del_LockN. */
sewardjb4112022007-11-09 22:49:28 +0000132static Lock* admin_locks = NULL;
133
sewardjb4112022007-11-09 22:49:28 +0000134/* Mapping table for core ThreadIds to Thread* */
135static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
136
sewardjb4112022007-11-09 22:49:28 +0000137/* Mapping table for lock guest addresses to Lock* */
138static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
139
sewardj0f64c9e2011-03-10 17:40:22 +0000140/* The word-set universes for lock sets. */
sewardjb4112022007-11-09 22:49:28 +0000141static WordSetU* univ_lsets = NULL; /* sets of Lock* */
142static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
sewardj866c80c2011-10-22 19:29:51 +0000143static Int next_gc_univ_laog = 1;
144/* univ_laog will be garbaged collected when the nr of element in univ_laog is
145 >= next_gc_univ_laog. */
sewardjb4112022007-11-09 22:49:28 +0000146
sewardjffce8152011-06-24 10:09:41 +0000147/* Allow libhb to get at the universe of locksets stored
148 here. Sigh. */
149WordSetU* HG_(get_univ_lsets) ( void ) { return univ_lsets; }
150
151/* Allow libhb to get at the list of locks stored here. Ditto
152 sigh. */
153Lock* HG_(get_admin_locks) ( void ) { return admin_locks; }
154
sewardjb4112022007-11-09 22:49:28 +0000155
156/*----------------------------------------------------------------*/
157/*--- Simple helpers for the data structures ---*/
158/*----------------------------------------------------------------*/
159
160static UWord stats__lockN_acquires = 0;
161static UWord stats__lockN_releases = 0;
162
sewardjf98e1c02008-10-25 16:22:41 +0000163static
164ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
sewardjb4112022007-11-09 22:49:28 +0000165
166/* --------- Constructors --------- */
167
sewardjf98e1c02008-10-25 16:22:41 +0000168static Thread* mk_Thread ( Thr* hbthr ) {
sewardjb4112022007-11-09 22:49:28 +0000169 static Int indx = 1;
sewardjf98e1c02008-10-25 16:22:41 +0000170 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
sewardjb4112022007-11-09 22:49:28 +0000171 thread->locksetA = HG_(emptyWS)( univ_lsets );
172 thread->locksetW = HG_(emptyWS)( univ_lsets );
sewardjb4112022007-11-09 22:49:28 +0000173 thread->magic = Thread_MAGIC;
sewardjf98e1c02008-10-25 16:22:41 +0000174 thread->hbthr = hbthr;
175 thread->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +0000176 thread->created_at = NULL;
177 thread->announced = False;
178 thread->errmsg_index = indx++;
179 thread->admin = admin_threads;
180 admin_threads = thread;
181 return thread;
182}
sewardjf98e1c02008-10-25 16:22:41 +0000183
sewardjb4112022007-11-09 22:49:28 +0000184// Make a new lock which is unlocked (hence ownerless)
sewardj1d7c3322011-02-28 09:22:51 +0000185// and insert the new lock in admin_locks double linked list.
sewardjb4112022007-11-09 22:49:28 +0000186static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
187 static ULong unique = 0;
sewardjf98e1c02008-10-25 16:22:41 +0000188 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
sewardj0f64c9e2011-03-10 17:40:22 +0000189 /* begin: add to double linked list */
sewardj1d7c3322011-02-28 09:22:51 +0000190 if (admin_locks)
191 admin_locks->admin_prev = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000192 lock->admin_next = admin_locks;
193 lock->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000194 admin_locks = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000195 /* end: add */
sewardjb4112022007-11-09 22:49:28 +0000196 lock->unique = unique++;
197 lock->magic = LockN_MAGIC;
198 lock->appeared_at = NULL;
199 lock->acquired_at = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000200 lock->hbso = libhb_so_alloc();
sewardjb4112022007-11-09 22:49:28 +0000201 lock->guestaddr = guestaddr;
202 lock->kind = kind;
203 lock->heldW = False;
204 lock->heldBy = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000205 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +0000206 return lock;
207}
sewardjb4112022007-11-09 22:49:28 +0000208
209/* Release storage for a Lock. Also release storage in .heldBy, if
sewardj1d7c3322011-02-28 09:22:51 +0000210 any. Removes from admin_locks double linked list. */
sewardjb4112022007-11-09 22:49:28 +0000211static void del_LockN ( Lock* lk )
212{
sewardjf98e1c02008-10-25 16:22:41 +0000213 tl_assert(HG_(is_sane_LockN)(lk));
214 tl_assert(lk->hbso);
215 libhb_so_dealloc(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +0000216 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000217 VG_(deleteBag)( lk->heldBy );
sewardj0f64c9e2011-03-10 17:40:22 +0000218 /* begin: del lock from double linked list */
219 if (lk == admin_locks) {
220 tl_assert(lk->admin_prev == NULL);
221 if (lk->admin_next)
222 lk->admin_next->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000223 admin_locks = lk->admin_next;
sewardj1d7c3322011-02-28 09:22:51 +0000224 }
225 else {
sewardj0f64c9e2011-03-10 17:40:22 +0000226 tl_assert(lk->admin_prev != NULL);
sewardj1d7c3322011-02-28 09:22:51 +0000227 lk->admin_prev->admin_next = lk->admin_next;
sewardj0f64c9e2011-03-10 17:40:22 +0000228 if (lk->admin_next)
229 lk->admin_next->admin_prev = lk->admin_prev;
sewardj1d7c3322011-02-28 09:22:51 +0000230 }
sewardj0f64c9e2011-03-10 17:40:22 +0000231 /* end: del */
sewardjb4112022007-11-09 22:49:28 +0000232 VG_(memset)(lk, 0xAA, sizeof(*lk));
sewardjf98e1c02008-10-25 16:22:41 +0000233 HG_(free)(lk);
sewardjb4112022007-11-09 22:49:28 +0000234}
235
236/* Update 'lk' to reflect that 'thr' now has a write-acquisition of
237 it. This is done strictly: only combinations resulting from
238 correct program and libpthread behaviour are allowed. */
239static void lockN_acquire_writer ( Lock* lk, Thread* thr )
240{
sewardjf98e1c02008-10-25 16:22:41 +0000241 tl_assert(HG_(is_sane_LockN)(lk));
242 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000243
244 stats__lockN_acquires++;
245
246 /* EXPOSITION only */
247 /* We need to keep recording snapshots of where the lock was
248 acquired, so as to produce better lock-order error messages. */
249 if (lk->acquired_at == NULL) {
250 ThreadId tid;
251 tl_assert(lk->heldBy == NULL);
252 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
253 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000254 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000255 } else {
256 tl_assert(lk->heldBy != NULL);
257 }
258 /* end EXPOSITION only */
259
260 switch (lk->kind) {
261 case LK_nonRec:
262 case_LK_nonRec:
263 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
264 tl_assert(!lk->heldW);
265 lk->heldW = True;
sewardjf98e1c02008-10-25 16:22:41 +0000266 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
florian6bf37262012-10-21 03:23:36 +0000267 VG_(addToBag)( lk->heldBy, (UWord)thr );
sewardjb4112022007-11-09 22:49:28 +0000268 break;
269 case LK_mbRec:
270 if (lk->heldBy == NULL)
271 goto case_LK_nonRec;
272 /* 2nd and subsequent locking of a lock by its owner */
273 tl_assert(lk->heldW);
274 /* assert: lk is only held by one thread .. */
sewardj896f6f92008-08-19 08:38:52 +0000275 tl_assert(VG_(sizeUniqueBag(lk->heldBy)) == 1);
sewardjb4112022007-11-09 22:49:28 +0000276 /* assert: .. and that thread is 'thr'. */
florian6bf37262012-10-21 03:23:36 +0000277 tl_assert(VG_(elemBag)(lk->heldBy, (UWord)thr)
sewardj896f6f92008-08-19 08:38:52 +0000278 == VG_(sizeTotalBag)(lk->heldBy));
florian6bf37262012-10-21 03:23:36 +0000279 VG_(addToBag)(lk->heldBy, (UWord)thr);
sewardjb4112022007-11-09 22:49:28 +0000280 break;
281 case LK_rdwr:
282 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
283 goto case_LK_nonRec;
284 default:
285 tl_assert(0);
286 }
sewardjf98e1c02008-10-25 16:22:41 +0000287 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000288}
289
290static void lockN_acquire_reader ( Lock* lk, Thread* thr )
291{
sewardjf98e1c02008-10-25 16:22:41 +0000292 tl_assert(HG_(is_sane_LockN)(lk));
293 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000294 /* can only add reader to a reader-writer lock. */
295 tl_assert(lk->kind == LK_rdwr);
296 /* lk must be free or already r-held. */
297 tl_assert(lk->heldBy == NULL
298 || (lk->heldBy != NULL && !lk->heldW));
299
300 stats__lockN_acquires++;
301
302 /* EXPOSITION only */
303 /* We need to keep recording snapshots of where the lock was
304 acquired, so as to produce better lock-order error messages. */
305 if (lk->acquired_at == NULL) {
306 ThreadId tid;
307 tl_assert(lk->heldBy == NULL);
308 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
309 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000310 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000311 } else {
312 tl_assert(lk->heldBy != NULL);
313 }
314 /* end EXPOSITION only */
315
316 if (lk->heldBy) {
florian6bf37262012-10-21 03:23:36 +0000317 VG_(addToBag)(lk->heldBy, (UWord)thr);
sewardjb4112022007-11-09 22:49:28 +0000318 } else {
319 lk->heldW = False;
sewardjf98e1c02008-10-25 16:22:41 +0000320 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
florian6bf37262012-10-21 03:23:36 +0000321 VG_(addToBag)( lk->heldBy, (UWord)thr );
sewardjb4112022007-11-09 22:49:28 +0000322 }
323 tl_assert(!lk->heldW);
sewardjf98e1c02008-10-25 16:22:41 +0000324 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000325}
326
327/* Update 'lk' to reflect a release of it by 'thr'. This is done
328 strictly: only combinations resulting from correct program and
329 libpthread behaviour are allowed. */
330
331static void lockN_release ( Lock* lk, Thread* thr )
332{
333 Bool b;
sewardjf98e1c02008-10-25 16:22:41 +0000334 tl_assert(HG_(is_sane_LockN)(lk));
335 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000336 /* lock must be held by someone */
337 tl_assert(lk->heldBy);
338 stats__lockN_releases++;
339 /* Remove it from the holder set */
florian6bf37262012-10-21 03:23:36 +0000340 b = VG_(delFromBag)(lk->heldBy, (UWord)thr);
sewardjb4112022007-11-09 22:49:28 +0000341 /* thr must actually have been a holder of lk */
342 tl_assert(b);
343 /* normalise */
344 tl_assert(lk->acquired_at);
sewardj896f6f92008-08-19 08:38:52 +0000345 if (VG_(isEmptyBag)(lk->heldBy)) {
346 VG_(deleteBag)(lk->heldBy);
sewardjb4112022007-11-09 22:49:28 +0000347 lk->heldBy = NULL;
348 lk->heldW = False;
349 lk->acquired_at = NULL;
350 }
sewardjf98e1c02008-10-25 16:22:41 +0000351 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000352}
353
354static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
355{
356 Thread* thr;
357 if (!lk->heldBy) {
358 tl_assert(!lk->heldW);
359 return;
360 }
361 /* for each thread that holds this lock do ... */
sewardj896f6f92008-08-19 08:38:52 +0000362 VG_(initIterBag)( lk->heldBy );
florian6bf37262012-10-21 03:23:36 +0000363 while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, NULL )) {
sewardjf98e1c02008-10-25 16:22:41 +0000364 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000365 tl_assert(HG_(elemWS)( univ_lsets,
florian6bf37262012-10-21 03:23:36 +0000366 thr->locksetA, (UWord)lk ));
sewardjb4112022007-11-09 22:49:28 +0000367 thr->locksetA
florian6bf37262012-10-21 03:23:36 +0000368 = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +0000369
370 if (lk->heldW) {
371 tl_assert(HG_(elemWS)( univ_lsets,
florian6bf37262012-10-21 03:23:36 +0000372 thr->locksetW, (UWord)lk ));
sewardjb4112022007-11-09 22:49:28 +0000373 thr->locksetW
florian6bf37262012-10-21 03:23:36 +0000374 = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +0000375 }
376 }
sewardj896f6f92008-08-19 08:38:52 +0000377 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000378}
379
sewardjb4112022007-11-09 22:49:28 +0000380
381/*----------------------------------------------------------------*/
382/*--- Print out the primary data structures ---*/
383/*----------------------------------------------------------------*/
384
sewardjb4112022007-11-09 22:49:28 +0000385#define PP_THREADS (1<<1)
386#define PP_LOCKS (1<<2)
sewardjf98e1c02008-10-25 16:22:41 +0000387#define PP_ALL (PP_THREADS | PP_LOCKS)
sewardjb4112022007-11-09 22:49:28 +0000388
389
390static const Int sHOW_ADMIN = 0;
391
392static void space ( Int n )
393{
394 Int i;
florian6bf37262012-10-21 03:23:36 +0000395 HChar spaces[128+1];
sewardjb4112022007-11-09 22:49:28 +0000396 tl_assert(n >= 0 && n < 128);
397 if (n == 0)
398 return;
399 for (i = 0; i < n; i++)
400 spaces[i] = ' ';
401 spaces[i] = 0;
402 tl_assert(i < 128+1);
403 VG_(printf)("%s", spaces);
404}
405
406static void pp_Thread ( Int d, Thread* t )
407{
408 space(d+0); VG_(printf)("Thread %p {\n", t);
409 if (sHOW_ADMIN) {
410 space(d+3); VG_(printf)("admin %p\n", t->admin);
411 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
412 }
413 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
414 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
sewardjb4112022007-11-09 22:49:28 +0000415 space(d+0); VG_(printf)("}\n");
416}
417
418static void pp_admin_threads ( Int d )
419{
420 Int i, n;
421 Thread* t;
422 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
423 /* nothing */
424 }
425 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
426 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
427 if (0) {
428 space(n);
429 VG_(printf)("admin_threads record %d of %d:\n", i, n);
430 }
431 pp_Thread(d+3, t);
432 }
barta0b6b2c2008-07-07 06:49:24 +0000433 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000434}
435
436static void pp_map_threads ( Int d )
437{
njn4c245e52009-03-15 23:25:38 +0000438 Int i, n = 0;
sewardjb4112022007-11-09 22:49:28 +0000439 space(d); VG_(printf)("map_threads ");
sewardjb4112022007-11-09 22:49:28 +0000440 for (i = 0; i < VG_N_THREADS; i++) {
441 if (map_threads[i] != NULL)
442 n++;
443 }
444 VG_(printf)("(%d entries) {\n", n);
445 for (i = 0; i < VG_N_THREADS; i++) {
446 if (map_threads[i] == NULL)
447 continue;
448 space(d+3);
449 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
450 }
451 space(d); VG_(printf)("}\n");
452}
453
454static const HChar* show_LockKind ( LockKind lkk ) {
455 switch (lkk) {
456 case LK_mbRec: return "mbRec";
457 case LK_nonRec: return "nonRec";
458 case LK_rdwr: return "rdwr";
459 default: tl_assert(0);
460 }
461}
462
philippef5774342014-05-03 11:12:50 +0000463/* Pretty Print lock lk.
464 if show_lock_addrdescr, describes the (guest) lock address.
465 (this description will be more complete with --read-var-info=yes).
466 if show_internal_data, shows also helgrind internal information.
467 d is the level at which output is indented. */
468static void pp_Lock ( Int d, Lock* lk,
469 Bool show_lock_addrdescr,
470 Bool show_internal_data)
sewardjb4112022007-11-09 22:49:28 +0000471{
philippef5774342014-05-03 11:12:50 +0000472 space(d+0);
473 if (show_internal_data)
philippe07c08522014-05-14 20:39:27 +0000474 VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
philippef5774342014-05-03 11:12:50 +0000475 else
philippe07c08522014-05-14 20:39:27 +0000476 VG_(printf)("Lock ga %#lx {\n", lk->guestaddr);
philippef5774342014-05-03 11:12:50 +0000477 if (!show_lock_addrdescr
philippe07c08522014-05-14 20:39:27 +0000478 || !HG_(get_and_pp_addrdescr) ((Addr) lk->guestaddr))
philippef5774342014-05-03 11:12:50 +0000479 VG_(printf)("\n");
480
sewardjb4112022007-11-09 22:49:28 +0000481 if (sHOW_ADMIN) {
sewardj1d7c3322011-02-28 09:22:51 +0000482 space(d+3); VG_(printf)("admin_n %p\n", lk->admin_next);
483 space(d+3); VG_(printf)("admin_p %p\n", lk->admin_prev);
484 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
sewardjb4112022007-11-09 22:49:28 +0000485 }
philippef5774342014-05-03 11:12:50 +0000486 if (show_internal_data) {
487 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
488 }
sewardjb4112022007-11-09 22:49:28 +0000489 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
philippef5774342014-05-03 11:12:50 +0000490 if (show_internal_data) {
491 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
492 }
493 if (show_internal_data) {
494 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
495 }
sewardjb4112022007-11-09 22:49:28 +0000496 if (lk->heldBy) {
497 Thread* thr;
florian6bf37262012-10-21 03:23:36 +0000498 UWord count;
sewardjb4112022007-11-09 22:49:28 +0000499 VG_(printf)(" { ");
sewardj896f6f92008-08-19 08:38:52 +0000500 VG_(initIterBag)( lk->heldBy );
philippef5774342014-05-03 11:12:50 +0000501 while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, &count )) {
502 if (show_internal_data)
503 VG_(printf)("%lu:%p ", count, thr);
504 else {
505 VG_(printf)("%c%lu:thread #%d ",
506 lk->heldW ? 'W' : 'R',
507 count, thr->errmsg_index);
508 if (thr->coretid == VG_INVALID_THREADID)
509 VG_(printf)("tid (exited) ");
510 else
511 VG_(printf)("tid %d ", thr->coretid);
512
513 }
514 }
sewardj896f6f92008-08-19 08:38:52 +0000515 VG_(doneIterBag)( lk->heldBy );
philippef5774342014-05-03 11:12:50 +0000516 VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000517 }
sewardjb4112022007-11-09 22:49:28 +0000518 space(d+0); VG_(printf)("}\n");
519}
520
521static void pp_admin_locks ( Int d )
522{
523 Int i, n;
524 Lock* lk;
sewardj1d7c3322011-02-28 09:22:51 +0000525 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000526 /* nothing */
527 }
528 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
sewardj1d7c3322011-02-28 09:22:51 +0000529 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000530 if (0) {
531 space(n);
532 VG_(printf)("admin_locks record %d of %d:\n", i, n);
533 }
philippef5774342014-05-03 11:12:50 +0000534 pp_Lock(d+3, lk,
535 False /* show_lock_addrdescr */,
536 True /* show_internal_data */);
sewardjb4112022007-11-09 22:49:28 +0000537 }
barta0b6b2c2008-07-07 06:49:24 +0000538 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000539}
540
philippef5774342014-05-03 11:12:50 +0000541static void pp_map_locks ( Int d)
sewardjb4112022007-11-09 22:49:28 +0000542{
543 void* gla;
544 Lock* lk;
545 space(d); VG_(printf)("map_locks (%d entries) {\n",
sewardj896f6f92008-08-19 08:38:52 +0000546 (Int)VG_(sizeFM)( map_locks ));
547 VG_(initIterFM)( map_locks );
florian6bf37262012-10-21 03:23:36 +0000548 while (VG_(nextIterFM)( map_locks, (UWord*)&gla,
549 (UWord*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000550 space(d+3);
551 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
552 }
sewardj896f6f92008-08-19 08:38:52 +0000553 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000554 space(d); VG_(printf)("}\n");
555}
556
florian6bf37262012-10-21 03:23:36 +0000557static void pp_everything ( Int flags, const HChar* caller )
sewardjb4112022007-11-09 22:49:28 +0000558{
559 Int d = 0;
560 VG_(printf)("\n");
561 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
562 if (flags & PP_THREADS) {
563 VG_(printf)("\n");
564 pp_admin_threads(d+3);
565 VG_(printf)("\n");
566 pp_map_threads(d+3);
567 }
568 if (flags & PP_LOCKS) {
569 VG_(printf)("\n");
570 pp_admin_locks(d+3);
571 VG_(printf)("\n");
572 pp_map_locks(d+3);
573 }
sewardjb4112022007-11-09 22:49:28 +0000574
575 VG_(printf)("\n");
576 VG_(printf)("}\n");
577 VG_(printf)("\n");
578}
579
580#undef SHOW_ADMIN
581
582
583/*----------------------------------------------------------------*/
584/*--- Initialise the primary data structures ---*/
585/*----------------------------------------------------------------*/
586
sewardjf98e1c02008-10-25 16:22:41 +0000587static void initialise_data_structures ( Thr* hbthr_root )
sewardjb4112022007-11-09 22:49:28 +0000588{
sewardjb4112022007-11-09 22:49:28 +0000589 Thread* thr;
sewardjffce8152011-06-24 10:09:41 +0000590 WordSetID wsid;
sewardjb4112022007-11-09 22:49:28 +0000591
592 /* Get everything initialised and zeroed. */
593 tl_assert(admin_threads == NULL);
594 tl_assert(admin_locks == NULL);
sewardjb4112022007-11-09 22:49:28 +0000595
sewardjb4112022007-11-09 22:49:28 +0000596 tl_assert(map_threads == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000597 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
sewardjb4112022007-11-09 22:49:28 +0000598
florian6bf37262012-10-21 03:23:36 +0000599 tl_assert(sizeof(Addr) == sizeof(UWord));
sewardjb4112022007-11-09 22:49:28 +0000600 tl_assert(map_locks == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000601 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
602 NULL/*unboxed Word cmp*/);
sewardjb4112022007-11-09 22:49:28 +0000603
sewardjb4112022007-11-09 22:49:28 +0000604 tl_assert(univ_lsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000605 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
606 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000607 tl_assert(univ_lsets != NULL);
sewardjffce8152011-06-24 10:09:41 +0000608 /* Ensure that univ_lsets is non-empty, with lockset zero being the
609 empty lockset. hg_errors.c relies on the assumption that
610 lockset number zero in univ_lsets is always valid. */
611 wsid = HG_(emptyWS)(univ_lsets);
612 tl_assert(wsid == 0);
sewardjb4112022007-11-09 22:49:28 +0000613
614 tl_assert(univ_laog == NULL);
sewardjc1fb9d22011-02-28 09:03:44 +0000615 if (HG_(clo_track_lockorders)) {
616 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
617 HG_(free), 24/*cacheSize*/ );
618 tl_assert(univ_laog != NULL);
619 }
sewardjb4112022007-11-09 22:49:28 +0000620
621 /* Set up entries for the root thread */
622 // FIXME: this assumes that the first real ThreadId is 1
623
sewardjb4112022007-11-09 22:49:28 +0000624 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +0000625 thr = mk_Thread(hbthr_root);
626 thr->coretid = 1; /* FIXME: hardwires an assumption about the
627 identity of the root thread. */
sewardj60626642011-03-10 15:14:37 +0000628 tl_assert( libhb_get_Thr_hgthread(hbthr_root) == NULL );
629 libhb_set_Thr_hgthread(hbthr_root, thr);
sewardjb4112022007-11-09 22:49:28 +0000630
sewardjf98e1c02008-10-25 16:22:41 +0000631 /* and bind it in the thread-map table. */
632 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
633 tl_assert(thr->coretid != VG_INVALID_THREADID);
sewardjb4112022007-11-09 22:49:28 +0000634
sewardjf98e1c02008-10-25 16:22:41 +0000635 map_threads[thr->coretid] = thr;
sewardjb4112022007-11-09 22:49:28 +0000636
637 tl_assert(VG_INVALID_THREADID == 0);
638
sewardjb4112022007-11-09 22:49:28 +0000639 all__sanity_check("initialise_data_structures");
640}
641
642
643/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000644/*--- map_threads :: array[core-ThreadId] of Thread* ---*/
sewardjb4112022007-11-09 22:49:28 +0000645/*----------------------------------------------------------------*/
646
647/* Doesn't assert if the relevant map_threads entry is NULL. */
648static Thread* map_threads_maybe_lookup ( ThreadId coretid )
649{
650 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000651 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000652 thr = map_threads[coretid];
653 return thr;
654}
655
656/* Asserts if the relevant map_threads entry is NULL. */
657static inline Thread* map_threads_lookup ( ThreadId coretid )
658{
659 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000660 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000661 thr = map_threads[coretid];
662 tl_assert(thr);
663 return thr;
664}
665
sewardjf98e1c02008-10-25 16:22:41 +0000666/* Do a reverse lookup. Does not assert if 'thr' is not found in
667 map_threads. */
sewardjb4112022007-11-09 22:49:28 +0000668static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
669{
sewardjf98e1c02008-10-25 16:22:41 +0000670 ThreadId tid;
671 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000672 /* Check nobody used the invalid-threadid slot */
673 tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
674 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000675 tid = thr->coretid;
676 tl_assert(HG_(is_sane_ThreadId)(tid));
677 return tid;
sewardjb4112022007-11-09 22:49:28 +0000678}
679
680/* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
681 is not found in map_threads. */
682static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
683{
684 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
685 tl_assert(tid != VG_INVALID_THREADID);
sewardjf98e1c02008-10-25 16:22:41 +0000686 tl_assert(map_threads[tid]);
687 tl_assert(map_threads[tid]->coretid == tid);
sewardjb4112022007-11-09 22:49:28 +0000688 return tid;
689}
690
691static void map_threads_delete ( ThreadId coretid )
692{
693 Thread* thr;
694 tl_assert(coretid != 0);
sewardjf98e1c02008-10-25 16:22:41 +0000695 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000696 thr = map_threads[coretid];
697 tl_assert(thr);
698 map_threads[coretid] = NULL;
699}
700
701
702/*----------------------------------------------------------------*/
703/*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
704/*----------------------------------------------------------------*/
705
706/* Make sure there is a lock table entry for the given (lock) guest
707 address. If not, create one of the stated 'kind' in unheld state.
708 In any case, return the address of the existing or new Lock. */
709static
710Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
711{
712 Bool found;
713 Lock* oldlock = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000714 tl_assert(HG_(is_sane_ThreadId)(tid));
sewardj896f6f92008-08-19 08:38:52 +0000715 found = VG_(lookupFM)( map_locks,
florian6bf37262012-10-21 03:23:36 +0000716 NULL, (UWord*)&oldlock, (UWord)ga );
sewardjb4112022007-11-09 22:49:28 +0000717 if (!found) {
718 Lock* lock = mk_LockN(lkk, ga);
719 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
sewardjf98e1c02008-10-25 16:22:41 +0000720 tl_assert(HG_(is_sane_LockN)(lock));
florian6bf37262012-10-21 03:23:36 +0000721 VG_(addToFM)( map_locks, (UWord)ga, (UWord)lock );
sewardjb4112022007-11-09 22:49:28 +0000722 tl_assert(oldlock == NULL);
sewardjb4112022007-11-09 22:49:28 +0000723 return lock;
724 } else {
725 tl_assert(oldlock != NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000726 tl_assert(HG_(is_sane_LockN)(oldlock));
sewardjb4112022007-11-09 22:49:28 +0000727 tl_assert(oldlock->guestaddr == ga);
sewardjb4112022007-11-09 22:49:28 +0000728 return oldlock;
729 }
730}
731
732static Lock* map_locks_maybe_lookup ( Addr ga )
733{
734 Bool found;
735 Lock* lk = NULL;
florian6bf37262012-10-21 03:23:36 +0000736 found = VG_(lookupFM)( map_locks, NULL, (UWord*)&lk, (UWord)ga );
sewardjb4112022007-11-09 22:49:28 +0000737 tl_assert(found ? lk != NULL : lk == NULL);
sewardjb4112022007-11-09 22:49:28 +0000738 return lk;
739}
740
741static void map_locks_delete ( Addr ga )
742{
743 Addr ga2 = 0;
744 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000745 VG_(delFromFM)( map_locks,
florian6bf37262012-10-21 03:23:36 +0000746 (UWord*)&ga2, (UWord*)&lk, (UWord)ga );
sewardjb4112022007-11-09 22:49:28 +0000747 /* delFromFM produces the val which is being deleted, if it is
748 found. So assert it is non-null; that in effect asserts that we
749 are deleting a (ga, Lock) pair which actually exists. */
750 tl_assert(lk != NULL);
751 tl_assert(ga2 == ga);
752}
753
754
sewardjb4112022007-11-09 22:49:28 +0000755
756/*----------------------------------------------------------------*/
757/*--- Sanity checking the data structures ---*/
758/*----------------------------------------------------------------*/
759
760static UWord stats__sanity_checks = 0;
761
florian6bf37262012-10-21 03:23:36 +0000762static void laog__sanity_check ( const HChar* who ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000763
764/* REQUIRED INVARIANTS:
765
766 Thread vs Segment/Lock/SecMaps
767
768 for each t in Threads {
769
770 // Thread.lockset: each element is really a valid Lock
771
772 // Thread.lockset: each Lock in set is actually held by that thread
773 for lk in Thread.lockset
774 lk == LockedBy(t)
775
776 // Thread.csegid is a valid SegmentID
777 // and the associated Segment has .thr == t
778
779 }
780
781 all thread Locksets are pairwise empty under intersection
782 (that is, no lock is claimed to be held by more than one thread)
783 -- this is guaranteed if all locks in locksets point back to their
784 owner threads
785
786 Lock vs Thread/Segment/SecMaps
787
788 for each entry (gla, la) in map_locks
789 gla == la->guest_addr
790
791 for each lk in Locks {
792
793 lk->tag is valid
794 lk->guest_addr does not have shadow state NoAccess
795 if lk == LockedBy(t), then t->lockset contains lk
796 if lk == UnlockedBy(segid) then segid is valid SegmentID
797 and can be mapped to a valid Segment(seg)
798 and seg->thr->lockset does not contain lk
799 if lk == UnlockedNew then (no lockset contains lk)
800
801 secmaps for lk has .mbHasLocks == True
802
803 }
804
805 Segment vs Thread/Lock/SecMaps
806
807 the Segment graph is a dag (no cycles)
808 all of the Segment graph must be reachable from the segids
809 mentioned in the Threads
810
811 for seg in Segments {
812
813 seg->thr is a sane Thread
814
815 }
816
817 SecMaps vs Segment/Thread/Lock
818
819 for sm in SecMaps {
820
821 sm properly aligned
822 if any shadow word is ShR or ShM then .mbHasShared == True
823
824 for each Excl(segid) state
825 map_segments_lookup maps to a sane Segment(seg)
826 for each ShM/ShR(tsetid,lsetid) state
827 each lk in lset is a valid Lock
828 each thr in tset is a valid thread, which is non-dead
829
830 }
831*/
832
833
834/* Return True iff 'thr' holds 'lk' in some mode. */
835static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
836{
837 if (lk->heldBy)
florian6bf37262012-10-21 03:23:36 +0000838 return VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0;
sewardjb4112022007-11-09 22:49:28 +0000839 else
840 return False;
841}
842
843/* Sanity check Threads, as far as possible */
844__attribute__((noinline))
florian6bf37262012-10-21 03:23:36 +0000845static void threads__sanity_check ( const HChar* who )
sewardjb4112022007-11-09 22:49:28 +0000846{
847#define BAD(_str) do { how = (_str); goto bad; } while (0)
florian6bf37262012-10-21 03:23:36 +0000848 const HChar* how = "no error";
sewardjb4112022007-11-09 22:49:28 +0000849 Thread* thr;
850 WordSetID wsA, wsW;
sewardj250ec2e2008-02-15 22:02:30 +0000851 UWord* ls_words;
florian6bf37262012-10-21 03:23:36 +0000852 UWord ls_size, i;
sewardjb4112022007-11-09 22:49:28 +0000853 Lock* lk;
sewardjb4112022007-11-09 22:49:28 +0000854 for (thr = admin_threads; thr; thr = thr->admin) {
sewardjf98e1c02008-10-25 16:22:41 +0000855 if (!HG_(is_sane_Thread)(thr)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000856 wsA = thr->locksetA;
857 wsW = thr->locksetW;
858 // locks held in W mode are a subset of all locks held
859 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
860 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
861 for (i = 0; i < ls_size; i++) {
862 lk = (Lock*)ls_words[i];
863 // Thread.lockset: each element is really a valid Lock
sewardjf98e1c02008-10-25 16:22:41 +0000864 if (!HG_(is_sane_LockN)(lk)) BAD("2");
sewardjb4112022007-11-09 22:49:28 +0000865 // Thread.lockset: each Lock in set is actually held by that
866 // thread
867 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000868 }
869 }
870 return;
871 bad:
872 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
873 tl_assert(0);
874#undef BAD
875}
876
877
878/* Sanity check Locks, as far as possible */
879__attribute__((noinline))
florian6bf37262012-10-21 03:23:36 +0000880static void locks__sanity_check ( const HChar* who )
sewardjb4112022007-11-09 22:49:28 +0000881{
882#define BAD(_str) do { how = (_str); goto bad; } while (0)
florian6bf37262012-10-21 03:23:36 +0000883 const HChar* how = "no error";
sewardjb4112022007-11-09 22:49:28 +0000884 Addr gla;
885 Lock* lk;
886 Int i;
887 // # entries in admin_locks == # entries in map_locks
sewardj1d7c3322011-02-28 09:22:51 +0000888 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next)
sewardjb4112022007-11-09 22:49:28 +0000889 ;
sewardj896f6f92008-08-19 08:38:52 +0000890 if (i != VG_(sizeFM)(map_locks)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000891 // for each entry (gla, lk) in map_locks
892 // gla == lk->guest_addr
sewardj896f6f92008-08-19 08:38:52 +0000893 VG_(initIterFM)( map_locks );
894 while (VG_(nextIterFM)( map_locks,
florian6bf37262012-10-21 03:23:36 +0000895 (UWord*)&gla, (UWord*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000896 if (lk->guestaddr != gla) BAD("2");
897 }
sewardj896f6f92008-08-19 08:38:52 +0000898 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000899 // scan through admin_locks ...
sewardj1d7c3322011-02-28 09:22:51 +0000900 for (lk = admin_locks; lk; lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000901 // lock is sane. Quite comprehensive, also checks that
902 // referenced (holder) threads are sane.
sewardjf98e1c02008-10-25 16:22:41 +0000903 if (!HG_(is_sane_LockN)(lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000904 // map_locks binds guest address back to this lock
905 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
sewardjb4112022007-11-09 22:49:28 +0000906 // look at all threads mentioned as holders of this lock. Ensure
907 // this lock is mentioned in their locksets.
908 if (lk->heldBy) {
909 Thread* thr;
florian6bf37262012-10-21 03:23:36 +0000910 UWord count;
sewardj896f6f92008-08-19 08:38:52 +0000911 VG_(initIterBag)( lk->heldBy );
912 while (VG_(nextIterBag)( lk->heldBy,
florian6bf37262012-10-21 03:23:36 +0000913 (UWord*)&thr, &count )) {
sewardjf98e1c02008-10-25 16:22:41 +0000914 // HG_(is_sane_LockN) above ensures these
sewardjb4112022007-11-09 22:49:28 +0000915 tl_assert(count >= 1);
sewardjf98e1c02008-10-25 16:22:41 +0000916 tl_assert(HG_(is_sane_Thread)(thr));
florian6bf37262012-10-21 03:23:36 +0000917 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (UWord)lk))
sewardjb4112022007-11-09 22:49:28 +0000918 BAD("6");
919 // also check the w-only lockset
920 if (lk->heldW
florian6bf37262012-10-21 03:23:36 +0000921 && !HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
sewardjb4112022007-11-09 22:49:28 +0000922 BAD("7");
923 if ((!lk->heldW)
florian6bf37262012-10-21 03:23:36 +0000924 && HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
sewardjb4112022007-11-09 22:49:28 +0000925 BAD("8");
926 }
sewardj896f6f92008-08-19 08:38:52 +0000927 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000928 } else {
929 /* lock not held by anybody */
930 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
931 // since lk is unheld, then (no lockset contains lk)
932 // hmm, this is really too expensive to check. Hmm.
933 }
sewardjb4112022007-11-09 22:49:28 +0000934 }
935
936 return;
937 bad:
938 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
939 tl_assert(0);
940#undef BAD
941}
942
943
florian6bf37262012-10-21 03:23:36 +0000944static void all_except_Locks__sanity_check ( const HChar* who ) {
sewardjb4112022007-11-09 22:49:28 +0000945 stats__sanity_checks++;
946 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
947 threads__sanity_check(who);
sewardjc1fb9d22011-02-28 09:03:44 +0000948 if (HG_(clo_track_lockorders))
949 laog__sanity_check(who);
sewardjb4112022007-11-09 22:49:28 +0000950}
florian6bf37262012-10-21 03:23:36 +0000951static void all__sanity_check ( const HChar* who ) {
sewardjb4112022007-11-09 22:49:28 +0000952 all_except_Locks__sanity_check(who);
953 locks__sanity_check(who);
954}
955
956
957/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +0000958/*--- Shadow value and address range handlers ---*/
959/*----------------------------------------------------------------*/
960
961static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +0000962//static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000963static inline Thread* get_current_Thread ( void ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +0000964__attribute__((noinline))
965static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000966
sewardjb4112022007-11-09 22:49:28 +0000967
968/* Block-copy states (needed for implementing realloc()). */
sewardj23f12002009-07-24 08:45:08 +0000969/* FIXME this copies shadow memory; it doesn't apply the MSM to it.
970 Is that a problem? (hence 'scopy' rather than 'ccopy') */
971static void shadow_mem_scopy_range ( Thread* thr,
972 Addr src, Addr dst, SizeT len )
sewardjf98e1c02008-10-25 16:22:41 +0000973{
974 Thr* hbthr = thr->hbthr;
975 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +0000976 libhb_copy_shadow_state( hbthr, src, dst, len );
sewardjb4112022007-11-09 22:49:28 +0000977}
978
sewardj23f12002009-07-24 08:45:08 +0000979static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
980{
sewardjf98e1c02008-10-25 16:22:41 +0000981 Thr* hbthr = thr->hbthr;
982 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +0000983 LIBHB_CREAD_N(hbthr, a, len);
984}
985
986static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
987 Thr* hbthr = thr->hbthr;
988 tl_assert(hbthr);
989 LIBHB_CWRITE_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +0000990}
991
992static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
993{
sewardj23f12002009-07-24 08:45:08 +0000994 libhb_srange_new( thr->hbthr, a, len );
sewardjb4112022007-11-09 22:49:28 +0000995}
996
sewardjfd35d492011-03-17 19:39:55 +0000997static void shadow_mem_make_NoAccess_NoFX ( Thread* thr, Addr aIN, SizeT len )
sewardjb4112022007-11-09 22:49:28 +0000998{
sewardjb4112022007-11-09 22:49:28 +0000999 if (0 && len > 500)
sewardjfd35d492011-03-17 19:39:55 +00001000 VG_(printf)("make NoAccess_NoFX ( %#lx, %ld )\n", aIN, len );
1001 // has no effect (NoFX)
1002 libhb_srange_noaccess_NoFX( thr->hbthr, aIN, len );
1003}
1004
1005static void shadow_mem_make_NoAccess_AHAE ( Thread* thr, Addr aIN, SizeT len )
1006{
1007 if (0 && len > 500)
1008 VG_(printf)("make NoAccess_AHAE ( %#lx, %ld )\n", aIN, len );
1009 // Actually Has An Effect (AHAE)
1010 libhb_srange_noaccess_AHAE( thr->hbthr, aIN, len );
sewardjb4112022007-11-09 22:49:28 +00001011}
1012
sewardj406bac82010-03-03 23:03:40 +00001013static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN, SizeT len )
1014{
1015 if (0 && len > 500)
1016 VG_(printf)("make Untracked ( %#lx, %ld )\n", aIN, len );
1017 libhb_srange_untrack( thr->hbthr, aIN, len );
1018}
1019
sewardjb4112022007-11-09 22:49:28 +00001020
1021/*----------------------------------------------------------------*/
1022/*--- Event handlers (evh__* functions) ---*/
1023/*--- plus helpers (evhH__* functions) ---*/
1024/*----------------------------------------------------------------*/
1025
1026/*--------- Event handler helpers (evhH__* functions) ---------*/
1027
1028/* Create a new segment for 'thr', making it depend (.prev) on its
1029 existing segment, bind together the SegmentID and Segment, and
1030 return both of them. Also update 'thr' so it references the new
1031 Segment. */
sewardjf98e1c02008-10-25 16:22:41 +00001032//zz static
1033//zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
1034//zz /*OUT*/Segment** new_segP,
1035//zz Thread* thr )
1036//zz {
1037//zz Segment* cur_seg;
1038//zz tl_assert(new_segP);
1039//zz tl_assert(new_segidP);
1040//zz tl_assert(HG_(is_sane_Thread)(thr));
1041//zz cur_seg = map_segments_lookup( thr->csegid );
1042//zz tl_assert(cur_seg);
1043//zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1044//zz at their owner thread. */
1045//zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1046//zz *new_segidP = alloc_SegmentID();
1047//zz map_segments_add( *new_segidP, *new_segP );
1048//zz thr->csegid = *new_segidP;
1049//zz }
sewardjb4112022007-11-09 22:49:28 +00001050
1051
1052/* The lock at 'lock_ga' has acquired a writer. Make all necessary
1053 updates, and also do all possible error checks. */
1054static
1055void evhH__post_thread_w_acquires_lock ( Thread* thr,
1056 LockKind lkk, Addr lock_ga )
1057{
1058 Lock* lk;
1059
1060 /* Basically what we need to do is call lockN_acquire_writer.
1061 However, that will barf if any 'invalid' lock states would
1062 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001063 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001064 routine.
1065
1066 Because this routine is only called after successful lock
1067 acquisition, we should not be asked to move the lock into any
1068 invalid states. Requests to do so are bugs in libpthread, since
1069 that should have rejected any such requests. */
1070
sewardjf98e1c02008-10-25 16:22:41 +00001071 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001072 /* Try to find the lock. If we can't, then create a new one with
1073 kind 'lkk'. */
1074 lk = map_locks_lookup_or_create(
1075 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001076 tl_assert( HG_(is_sane_LockN)(lk) );
1077
1078 /* check libhb level entities exist */
1079 tl_assert(thr->hbthr);
1080 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001081
1082 if (lk->heldBy == NULL) {
1083 /* the lock isn't held. Simple. */
1084 tl_assert(!lk->heldW);
1085 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001086 /* acquire a dependency from the lock's VCs */
1087 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001088 goto noerror;
1089 }
1090
1091 /* So the lock is already held. If held as a r-lock then
1092 libpthread must be buggy. */
1093 tl_assert(lk->heldBy);
1094 if (!lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001095 HG_(record_error_Misc)(
1096 thr, "Bug in libpthread: write lock "
1097 "granted on rwlock which is currently rd-held");
sewardjb4112022007-11-09 22:49:28 +00001098 goto error;
1099 }
1100
1101 /* So the lock is held in w-mode. If it's held by some other
1102 thread, then libpthread must be buggy. */
sewardj896f6f92008-08-19 08:38:52 +00001103 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
sewardjb4112022007-11-09 22:49:28 +00001104
sewardj896f6f92008-08-19 08:38:52 +00001105 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
sewardjf98e1c02008-10-25 16:22:41 +00001106 HG_(record_error_Misc)(
1107 thr, "Bug in libpthread: write lock "
1108 "granted on mutex/rwlock which is currently "
1109 "wr-held by a different thread");
sewardjb4112022007-11-09 22:49:28 +00001110 goto error;
1111 }
1112
1113 /* So the lock is already held in w-mode by 'thr'. That means this
1114 is an attempt to lock it recursively, which is only allowable
1115 for LK_mbRec kinded locks. Since this routine is called only
1116 once the lock has been acquired, this must also be a libpthread
1117 bug. */
1118 if (lk->kind != LK_mbRec) {
sewardjf98e1c02008-10-25 16:22:41 +00001119 HG_(record_error_Misc)(
1120 thr, "Bug in libpthread: recursive write lock "
1121 "granted on mutex/wrlock which does not "
1122 "support recursion");
sewardjb4112022007-11-09 22:49:28 +00001123 goto error;
1124 }
1125
1126 /* So we are recursively re-locking a lock we already w-hold. */
1127 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001128 /* acquire a dependency from the lock's VC. Probably pointless,
1129 but also harmless. */
1130 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001131 goto noerror;
1132
1133 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001134 if (HG_(clo_track_lockorders)) {
1135 /* check lock order acquisition graph, and update. This has to
1136 happen before the lock is added to the thread's locksetA/W. */
1137 laog__pre_thread_acquires_lock( thr, lk );
1138 }
sewardjb4112022007-11-09 22:49:28 +00001139 /* update the thread's held-locks set */
florian6bf37262012-10-21 03:23:36 +00001140 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
1141 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +00001142 /* fall through */
1143
1144 error:
sewardjf98e1c02008-10-25 16:22:41 +00001145 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001146}
1147
1148
1149/* The lock at 'lock_ga' has acquired a reader. Make all necessary
1150 updates, and also do all possible error checks. */
1151static
1152void evhH__post_thread_r_acquires_lock ( Thread* thr,
1153 LockKind lkk, Addr lock_ga )
1154{
1155 Lock* lk;
1156
1157 /* Basically what we need to do is call lockN_acquire_reader.
1158 However, that will barf if any 'invalid' lock states would
1159 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001160 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001161 routine.
1162
1163 Because this routine is only called after successful lock
1164 acquisition, we should not be asked to move the lock into any
1165 invalid states. Requests to do so are bugs in libpthread, since
1166 that should have rejected any such requests. */
1167
sewardjf98e1c02008-10-25 16:22:41 +00001168 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001169 /* Try to find the lock. If we can't, then create a new one with
1170 kind 'lkk'. Only a reader-writer lock can be read-locked,
1171 hence the first assertion. */
1172 tl_assert(lkk == LK_rdwr);
1173 lk = map_locks_lookup_or_create(
1174 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001175 tl_assert( HG_(is_sane_LockN)(lk) );
1176
1177 /* check libhb level entities exist */
1178 tl_assert(thr->hbthr);
1179 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001180
1181 if (lk->heldBy == NULL) {
1182 /* the lock isn't held. Simple. */
1183 tl_assert(!lk->heldW);
1184 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001185 /* acquire a dependency from the lock's VC */
1186 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001187 goto noerror;
1188 }
1189
1190 /* So the lock is already held. If held as a w-lock then
1191 libpthread must be buggy. */
1192 tl_assert(lk->heldBy);
1193 if (lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001194 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1195 "granted on rwlock which is "
1196 "currently wr-held");
sewardjb4112022007-11-09 22:49:28 +00001197 goto error;
1198 }
1199
1200 /* Easy enough. In short anybody can get a read-lock on a rwlock
1201 provided it is either unlocked or already in rd-held. */
1202 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001203 /* acquire a dependency from the lock's VC. Probably pointless,
1204 but also harmless. */
1205 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001206 goto noerror;
1207
1208 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001209 if (HG_(clo_track_lockorders)) {
1210 /* check lock order acquisition graph, and update. This has to
1211 happen before the lock is added to the thread's locksetA/W. */
1212 laog__pre_thread_acquires_lock( thr, lk );
1213 }
sewardjb4112022007-11-09 22:49:28 +00001214 /* update the thread's held-locks set */
florian6bf37262012-10-21 03:23:36 +00001215 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +00001216 /* but don't update thr->locksetW, since lk is only rd-held */
1217 /* fall through */
1218
1219 error:
sewardjf98e1c02008-10-25 16:22:41 +00001220 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001221}
1222
1223
1224/* The lock at 'lock_ga' is just about to be unlocked. Make all
1225 necessary updates, and also do all possible error checks. */
1226static
1227void evhH__pre_thread_releases_lock ( Thread* thr,
1228 Addr lock_ga, Bool isRDWR )
1229{
1230 Lock* lock;
1231 Word n;
sewardjf98e1c02008-10-25 16:22:41 +00001232 Bool was_heldW;
sewardjb4112022007-11-09 22:49:28 +00001233
1234 /* This routine is called prior to a lock release, before
1235 libpthread has had a chance to validate the call. Hence we need
1236 to detect and reject any attempts to move the lock into an
1237 invalid state. Such attempts are bugs in the client.
1238
1239 isRDWR is True if we know from the wrapper context that lock_ga
1240 should refer to a reader-writer lock, and is False if [ditto]
1241 lock_ga should refer to a standard mutex. */
1242
sewardjf98e1c02008-10-25 16:22:41 +00001243 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001244 lock = map_locks_maybe_lookup( lock_ga );
1245
1246 if (!lock) {
1247 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1248 the client is trying to unlock it. So complain, then ignore
1249 the attempt. */
sewardjf98e1c02008-10-25 16:22:41 +00001250 HG_(record_error_UnlockBogus)( thr, lock_ga );
sewardjb4112022007-11-09 22:49:28 +00001251 return;
1252 }
1253
1254 tl_assert(lock->guestaddr == lock_ga);
sewardjf98e1c02008-10-25 16:22:41 +00001255 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001256
1257 if (isRDWR && lock->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001258 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1259 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001260 }
1261 if ((!isRDWR) && lock->kind == LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001262 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1263 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001264 }
1265
1266 if (!lock->heldBy) {
1267 /* The lock is not held. This indicates a serious bug in the
1268 client. */
1269 tl_assert(!lock->heldW);
sewardjf98e1c02008-10-25 16:22:41 +00001270 HG_(record_error_UnlockUnlocked)( thr, lock );
florian6bf37262012-10-21 03:23:36 +00001271 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1272 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001273 goto error;
1274 }
1275
sewardjf98e1c02008-10-25 16:22:41 +00001276 /* test just above dominates */
1277 tl_assert(lock->heldBy);
1278 was_heldW = lock->heldW;
1279
sewardjb4112022007-11-09 22:49:28 +00001280 /* The lock is held. Is this thread one of the holders? If not,
1281 report a bug in the client. */
florian6bf37262012-10-21 03:23:36 +00001282 n = VG_(elemBag)( lock->heldBy, (UWord)thr );
sewardjb4112022007-11-09 22:49:28 +00001283 tl_assert(n >= 0);
1284 if (n == 0) {
1285 /* We are not a current holder of the lock. This is a bug in
1286 the guest, and (per POSIX pthread rules) the unlock
1287 attempt will fail. So just complain and do nothing
1288 else. */
sewardj896f6f92008-08-19 08:38:52 +00001289 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001290 tl_assert(HG_(is_sane_Thread)(realOwner));
sewardjb4112022007-11-09 22:49:28 +00001291 tl_assert(realOwner != thr);
florian6bf37262012-10-21 03:23:36 +00001292 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1293 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjf98e1c02008-10-25 16:22:41 +00001294 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
sewardjb4112022007-11-09 22:49:28 +00001295 goto error;
1296 }
1297
1298 /* Ok, we hold the lock 'n' times. */
1299 tl_assert(n >= 1);
1300
1301 lockN_release( lock, thr );
1302
1303 n--;
1304 tl_assert(n >= 0);
1305
1306 if (n > 0) {
1307 tl_assert(lock->heldBy);
florian6bf37262012-10-21 03:23:36 +00001308 tl_assert(n == VG_(elemBag)( lock->heldBy, (UWord)thr ));
sewardjb4112022007-11-09 22:49:28 +00001309 /* We still hold the lock. So either it's a recursive lock
1310 or a rwlock which is currently r-held. */
1311 tl_assert(lock->kind == LK_mbRec
1312 || (lock->kind == LK_rdwr && !lock->heldW));
florian6bf37262012-10-21 03:23:36 +00001313 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001314 if (lock->heldW)
florian6bf37262012-10-21 03:23:36 +00001315 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001316 else
florian6bf37262012-10-21 03:23:36 +00001317 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001318 } else {
sewardj983f3022009-05-21 14:49:55 +00001319 /* n is zero. This means we don't hold the lock any more. But
1320 if it's a rwlock held in r-mode, someone else could still
1321 hold it. Just do whatever sanity checks we can. */
1322 if (lock->kind == LK_rdwr && lock->heldBy) {
1323 /* It's a rwlock. We no longer hold it but we used to;
1324 nevertheless it still appears to be held by someone else.
1325 The implication is that, prior to this release, it must
1326 have been shared by us and and whoever else is holding it;
1327 which in turn implies it must be r-held, since a lock
1328 can't be w-held by more than one thread. */
1329 /* The lock is now R-held by somebody else: */
1330 tl_assert(lock->heldW == False);
1331 } else {
1332 /* Normal case. It's either not a rwlock, or it's a rwlock
1333 that we used to hold in w-mode (which is pretty much the
1334 same thing as a non-rwlock.) Since this transaction is
1335 atomic (V does not allow multiple threads to run
1336 simultaneously), it must mean the lock is now not held by
1337 anybody. Hence assert for it. */
1338 /* The lock is now not held by anybody: */
1339 tl_assert(!lock->heldBy);
1340 tl_assert(lock->heldW == False);
1341 }
sewardjf98e1c02008-10-25 16:22:41 +00001342 //if (lock->heldBy) {
florian6bf37262012-10-21 03:23:36 +00001343 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (UWord)thr ));
sewardjf98e1c02008-10-25 16:22:41 +00001344 //}
sewardjb4112022007-11-09 22:49:28 +00001345 /* update this thread's lockset accordingly. */
1346 thr->locksetA
florian6bf37262012-10-21 03:23:36 +00001347 = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lock );
sewardjb4112022007-11-09 22:49:28 +00001348 thr->locksetW
florian6bf37262012-10-21 03:23:36 +00001349 = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lock );
sewardjf98e1c02008-10-25 16:22:41 +00001350 /* push our VC into the lock */
1351 tl_assert(thr->hbthr);
1352 tl_assert(lock->hbso);
1353 /* If the lock was previously W-held, then we want to do a
1354 strong send, and if previously R-held, then a weak send. */
1355 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
sewardjb4112022007-11-09 22:49:28 +00001356 }
1357 /* fall through */
1358
1359 error:
sewardjf98e1c02008-10-25 16:22:41 +00001360 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001361}
1362
1363
sewardj9f569b72008-11-13 13:33:09 +00001364/* ---------------------------------------------------------- */
1365/* -------- Event handlers proper (evh__* functions) -------- */
1366/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001367
1368/* What is the Thread* for the currently running thread? This is
1369 absolutely performance critical. We receive notifications from the
1370 core for client code starts/stops, and cache the looked-up result
1371 in 'current_Thread'. Hence, for the vast majority of requests,
1372 finding the current thread reduces to a read of a global variable,
1373 provided get_current_Thread_in_C_C is inlined.
1374
1375 Outside of client code, current_Thread is NULL, and presumably
1376 any uses of it will cause a segfault. Hence:
1377
1378 - for uses definitely within client code, use
1379 get_current_Thread_in_C_C.
1380
1381 - for all other uses, use get_current_Thread.
1382*/
1383
sewardj23f12002009-07-24 08:45:08 +00001384static Thread *current_Thread = NULL,
1385 *current_Thread_prev = NULL;
sewardjb4112022007-11-09 22:49:28 +00001386
1387static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1388 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1389 tl_assert(current_Thread == NULL);
1390 current_Thread = map_threads_lookup( tid );
1391 tl_assert(current_Thread != NULL);
sewardj23f12002009-07-24 08:45:08 +00001392 if (current_Thread != current_Thread_prev) {
1393 libhb_Thr_resumes( current_Thread->hbthr );
1394 current_Thread_prev = current_Thread;
1395 }
sewardjb4112022007-11-09 22:49:28 +00001396}
1397static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1398 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1399 tl_assert(current_Thread != NULL);
1400 current_Thread = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00001401 libhb_maybe_GC();
sewardjb4112022007-11-09 22:49:28 +00001402}
1403static inline Thread* get_current_Thread_in_C_C ( void ) {
1404 return current_Thread;
1405}
1406static inline Thread* get_current_Thread ( void ) {
1407 ThreadId coretid;
1408 Thread* thr;
1409 thr = get_current_Thread_in_C_C();
1410 if (LIKELY(thr))
1411 return thr;
1412 /* evidently not in client code. Do it the slow way. */
1413 coretid = VG_(get_running_tid)();
1414 /* FIXME: get rid of the following kludge. It exists because
sewardjf98e1c02008-10-25 16:22:41 +00001415 evh__new_mem is called during initialisation (as notification
sewardjb4112022007-11-09 22:49:28 +00001416 of initial memory layout) and VG_(get_running_tid)() returns
1417 VG_INVALID_THREADID at that point. */
1418 if (coretid == VG_INVALID_THREADID)
1419 coretid = 1; /* KLUDGE */
1420 thr = map_threads_lookup( coretid );
1421 return thr;
1422}
1423
1424static
1425void evh__new_mem ( Addr a, SizeT len ) {
1426 if (SHOW_EVENTS >= 2)
1427 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1428 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001429 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001430 all__sanity_check("evh__new_mem-post");
1431}
1432
1433static
sewardj1f77fec2010-04-12 19:51:04 +00001434void evh__new_mem_stack ( Addr a, SizeT len ) {
1435 if (SHOW_EVENTS >= 2)
1436 VG_(printf)("evh__new_mem_stack(%p, %lu)\n", (void*)a, len );
1437 shadow_mem_make_New( get_current_Thread(),
1438 -VG_STACK_REDZONE_SZB + a, len );
1439 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1440 all__sanity_check("evh__new_mem_stack-post");
1441}
1442
1443static
sewardj7cf4e6b2008-05-01 20:24:26 +00001444void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1445 if (SHOW_EVENTS >= 2)
1446 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1447 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001448 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardj7cf4e6b2008-05-01 20:24:26 +00001449 all__sanity_check("evh__new_mem_w_tid-post");
1450}
1451
1452static
sewardjb4112022007-11-09 22:49:28 +00001453void evh__new_mem_w_perms ( Addr a, SizeT len,
sewardj9c606bd2008-09-18 18:12:50 +00001454 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
sewardjb4112022007-11-09 22:49:28 +00001455 if (SHOW_EVENTS >= 1)
1456 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1457 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1458 if (rr || ww || xx)
1459 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001460 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001461 all__sanity_check("evh__new_mem_w_perms-post");
1462}
1463
1464static
1465void evh__set_perms ( Addr a, SizeT len,
1466 Bool rr, Bool ww, Bool xx ) {
sewardjfd35d492011-03-17 19:39:55 +00001467 // This handles mprotect requests. If the memory is being put
1468 // into no-R no-W state, paint it as NoAccess, for the reasons
1469 // documented at evh__die_mem_munmap().
sewardjb4112022007-11-09 22:49:28 +00001470 if (SHOW_EVENTS >= 1)
sewardjfd35d492011-03-17 19:39:55 +00001471 VG_(printf)("evh__set_perms(%p, %lu, r=%d w=%d x=%d)\n",
sewardjb4112022007-11-09 22:49:28 +00001472 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1473 /* Hmm. What should we do here, that actually makes any sense?
1474 Let's say: if neither readable nor writable, then declare it
1475 NoAccess, else leave it alone. */
1476 if (!(rr || ww))
sewardjfd35d492011-03-17 19:39:55 +00001477 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001478 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001479 all__sanity_check("evh__set_perms-post");
1480}
1481
1482static
1483void evh__die_mem ( Addr a, SizeT len ) {
sewardjfd35d492011-03-17 19:39:55 +00001484 // Urr, libhb ignores this.
sewardjb4112022007-11-09 22:49:28 +00001485 if (SHOW_EVENTS >= 2)
1486 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
sewardjfd35d492011-03-17 19:39:55 +00001487 shadow_mem_make_NoAccess_NoFX( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001488 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001489 all__sanity_check("evh__die_mem-post");
1490}
1491
1492static
sewardjfd35d492011-03-17 19:39:55 +00001493void evh__die_mem_munmap ( Addr a, SizeT len ) {
1494 // It's important that libhb doesn't ignore this. If, as is likely,
1495 // the client is subject to address space layout randomization,
1496 // then unmapped areas may never get remapped over, even in long
1497 // runs. If we just ignore them we wind up with large resource
1498 // (VTS) leaks in libhb. So force them to NoAccess, so that all
1499 // VTS references in the affected area are dropped. Marking memory
1500 // as NoAccess is expensive, but we assume that munmap is sufficiently
1501 // rare that the space gains of doing this are worth the costs.
1502 if (SHOW_EVENTS >= 2)
1503 VG_(printf)("evh__die_mem_munmap(%p, %lu)\n", (void*)a, len );
1504 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
1505}
1506
1507static
sewardj406bac82010-03-03 23:03:40 +00001508void evh__untrack_mem ( Addr a, SizeT len ) {
sewardjfd35d492011-03-17 19:39:55 +00001509 // Libhb doesn't ignore this.
sewardj406bac82010-03-03 23:03:40 +00001510 if (SHOW_EVENTS >= 2)
1511 VG_(printf)("evh__untrack_mem(%p, %lu)\n", (void*)a, len );
1512 shadow_mem_make_Untracked( get_current_Thread(), a, len );
1513 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1514 all__sanity_check("evh__untrack_mem-post");
1515}
1516
1517static
sewardj23f12002009-07-24 08:45:08 +00001518void evh__copy_mem ( Addr src, Addr dst, SizeT len ) {
1519 if (SHOW_EVENTS >= 2)
1520 VG_(printf)("evh__copy_mem(%p, %p, %lu)\n", (void*)src, (void*)dst, len );
1521 shadow_mem_scopy_range( get_current_Thread(), src, dst, len );
1522 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1523 all__sanity_check("evh__copy_mem-post");
1524}
1525
1526static
sewardjb4112022007-11-09 22:49:28 +00001527void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1528{
1529 if (SHOW_EVENTS >= 1)
1530 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1531 (Int)parent, (Int)child );
1532
1533 if (parent != VG_INVALID_THREADID) {
sewardjf98e1c02008-10-25 16:22:41 +00001534 Thread* thr_p;
1535 Thread* thr_c;
1536 Thr* hbthr_p;
1537 Thr* hbthr_c;
sewardjb4112022007-11-09 22:49:28 +00001538
sewardjf98e1c02008-10-25 16:22:41 +00001539 tl_assert(HG_(is_sane_ThreadId)(parent));
1540 tl_assert(HG_(is_sane_ThreadId)(child));
sewardjb4112022007-11-09 22:49:28 +00001541 tl_assert(parent != child);
1542
1543 thr_p = map_threads_maybe_lookup( parent );
1544 thr_c = map_threads_maybe_lookup( child );
1545
1546 tl_assert(thr_p != NULL);
1547 tl_assert(thr_c == NULL);
1548
sewardjf98e1c02008-10-25 16:22:41 +00001549 hbthr_p = thr_p->hbthr;
1550 tl_assert(hbthr_p != NULL);
sewardj60626642011-03-10 15:14:37 +00001551 tl_assert( libhb_get_Thr_hgthread(hbthr_p) == thr_p );
sewardjb4112022007-11-09 22:49:28 +00001552
sewardjf98e1c02008-10-25 16:22:41 +00001553 hbthr_c = libhb_create ( hbthr_p );
1554
1555 /* Create a new thread record for the child. */
sewardjb4112022007-11-09 22:49:28 +00001556 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +00001557 thr_c = mk_Thread( hbthr_c );
sewardj60626642011-03-10 15:14:37 +00001558 tl_assert( libhb_get_Thr_hgthread(hbthr_c) == NULL );
1559 libhb_set_Thr_hgthread(hbthr_c, thr_c);
sewardjb4112022007-11-09 22:49:28 +00001560
1561 /* and bind it in the thread-map table */
1562 map_threads[child] = thr_c;
sewardjf98e1c02008-10-25 16:22:41 +00001563 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1564 thr_c->coretid = child;
sewardjb4112022007-11-09 22:49:28 +00001565
1566 /* Record where the parent is so we can later refer to this in
1567 error messages.
1568
mjw36750c02014-08-30 20:37:40 +00001569 On x86/amd64-linux, this entails a nasty glibc specific hack.
sewardjb4112022007-11-09 22:49:28 +00001570 The stack snapshot is taken immediately after the parent has
1571 returned from its sys_clone call. Unfortunately there is no
1572 unwind info for the insn following "syscall" - reading the
1573 glibc sources confirms this. So we ask for a snapshot to be
1574 taken as if RIP was 3 bytes earlier, in a place where there
1575 is unwind info. Sigh.
1576 */
1577 { Word first_ip_delta = 0;
mjw36750c02014-08-30 20:37:40 +00001578# if defined(VGP_amd64_linux) || defined(VGP_x86_linux)
sewardjb4112022007-11-09 22:49:28 +00001579 first_ip_delta = -3;
mjw4fa71082014-09-01 15:29:55 +00001580# elif defined(VGP_arm64_linux) || defined(VGP_arm_linux)
sewardj5a460f52014-08-30 19:24:05 +00001581 first_ip_delta = -1;
sewardjb4112022007-11-09 22:49:28 +00001582# endif
1583 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1584 }
sewardjb4112022007-11-09 22:49:28 +00001585 }
1586
sewardjf98e1c02008-10-25 16:22:41 +00001587 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001588 all__sanity_check("evh__pre_thread_create-post");
1589}
1590
1591static
1592void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1593{
1594 Int nHeld;
1595 Thread* thr_q;
1596 if (SHOW_EVENTS >= 1)
1597 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1598 (Int)quit_tid );
1599
1600 /* quit_tid has disappeared without joining to any other thread.
1601 Therefore there is no synchronisation event associated with its
1602 exit and so we have to pretty much treat it as if it was still
1603 alive but mysteriously making no progress. That is because, if
1604 we don't know when it really exited, then we can never say there
1605 is a point in time when we're sure the thread really has
1606 finished, and so we need to consider the possibility that it
1607 lingers indefinitely and continues to interact with other
1608 threads. */
1609 /* However, it might have rendezvous'd with a thread that called
1610 pthread_join with this one as arg, prior to this point (that's
1611 how NPTL works). In which case there has already been a prior
1612 sync event. So in any case, just let the thread exit. On NPTL,
1613 all thread exits go through here. */
sewardjf98e1c02008-10-25 16:22:41 +00001614 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
sewardjb4112022007-11-09 22:49:28 +00001615 thr_q = map_threads_maybe_lookup( quit_tid );
1616 tl_assert(thr_q != NULL);
1617
1618 /* Complain if this thread holds any locks. */
1619 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1620 tl_assert(nHeld >= 0);
1621 if (nHeld > 0) {
1622 HChar buf[80];
1623 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1624 nHeld, nHeld > 1 ? "s" : "");
sewardjf98e1c02008-10-25 16:22:41 +00001625 HG_(record_error_Misc)( thr_q, buf );
sewardjb4112022007-11-09 22:49:28 +00001626 }
1627
sewardj23f12002009-07-24 08:45:08 +00001628 /* Not much to do here:
1629 - tell libhb the thread is gone
1630 - clear the map_threads entry, in order that the Valgrind core
1631 can re-use it. */
sewardj61bc2c52011-02-09 10:34:00 +00001632 /* Cleanup actions (next 5 lines) copied in evh__atfork_child; keep
1633 in sync. */
sewardj23f12002009-07-24 08:45:08 +00001634 tl_assert(thr_q->hbthr);
1635 libhb_async_exit(thr_q->hbthr);
sewardjf98e1c02008-10-25 16:22:41 +00001636 tl_assert(thr_q->coretid == quit_tid);
1637 thr_q->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +00001638 map_threads_delete( quit_tid );
1639
sewardjf98e1c02008-10-25 16:22:41 +00001640 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001641 all__sanity_check("evh__pre_thread_ll_exit-post");
1642}
1643
sewardj61bc2c52011-02-09 10:34:00 +00001644/* This is called immediately after fork, for the child only. 'tid'
1645 is the only surviving thread (as per POSIX rules on fork() in
1646 threaded programs), so we have to clean up map_threads to remove
1647 entries for any other threads. */
1648static
1649void evh__atfork_child ( ThreadId tid )
1650{
1651 UInt i;
1652 Thread* thr;
1653 /* Slot 0 should never be used. */
1654 thr = map_threads_maybe_lookup( 0/*INVALID*/ );
1655 tl_assert(!thr);
1656 /* Clean up all other slots except 'tid'. */
1657 for (i = 1; i < VG_N_THREADS; i++) {
1658 if (i == tid)
1659 continue;
1660 thr = map_threads_maybe_lookup(i);
1661 if (!thr)
1662 continue;
1663 /* Cleanup actions (next 5 lines) copied from end of
1664 evh__pre_thread_ll_exit; keep in sync. */
1665 tl_assert(thr->hbthr);
1666 libhb_async_exit(thr->hbthr);
1667 tl_assert(thr->coretid == i);
1668 thr->coretid = VG_INVALID_THREADID;
1669 map_threads_delete(i);
1670 }
1671}
1672
philipped40aff52014-06-16 20:00:14 +00001673/* generate a dependence from the hbthr_q quitter to the hbthr_s stayer. */
sewardjb4112022007-11-09 22:49:28 +00001674static
philipped40aff52014-06-16 20:00:14 +00001675void generate_quitter_stayer_dependence (Thr* hbthr_q, Thr* hbthr_s)
sewardjb4112022007-11-09 22:49:28 +00001676{
sewardjf98e1c02008-10-25 16:22:41 +00001677 SO* so;
sewardjf98e1c02008-10-25 16:22:41 +00001678 /* Allocate a temporary synchronisation object and use it to send
1679 an imaginary message from the quitter to the stayer, the purpose
1680 being to generate a dependence from the quitter to the
1681 stayer. */
1682 so = libhb_so_alloc();
1683 tl_assert(so);
sewardj23f12002009-07-24 08:45:08 +00001684 /* Send last arg of _so_send as False, since the sending thread
1685 doesn't actually exist any more, so we don't want _so_send to
1686 try taking stack snapshots of it. */
sewardjffce8152011-06-24 10:09:41 +00001687 libhb_so_send(hbthr_q, so, True/*strong_send*//*?!? wrt comment above*/);
sewardjf98e1c02008-10-25 16:22:41 +00001688 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1689 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00001690
sewardjffce8152011-06-24 10:09:41 +00001691 /* Tell libhb that the quitter has been reaped. Note that we might
1692 have to be cleverer about this, to exclude 2nd and subsequent
1693 notifications for the same hbthr_q, in the case where the app is
1694 buggy (calls pthread_join twice or more on the same thread) AND
1695 where libpthread is also buggy and doesn't return ESRCH on
1696 subsequent calls. (If libpthread isn't thusly buggy, then the
1697 wrapper for pthread_join in hg_intercepts.c will stop us getting
1698 notified here multiple times for the same joinee.) See also
1699 comments in helgrind/tests/jointwice.c. */
1700 libhb_joinedwith_done(hbthr_q);
philipped40aff52014-06-16 20:00:14 +00001701}
1702
1703
1704static
1705void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1706{
1707 Thread* thr_s;
1708 Thread* thr_q;
1709 Thr* hbthr_s;
1710 Thr* hbthr_q;
1711
1712 if (SHOW_EVENTS >= 1)
1713 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1714 (Int)stay_tid, quit_thr );
1715
1716 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
1717
1718 thr_s = map_threads_maybe_lookup( stay_tid );
1719 thr_q = quit_thr;
1720 tl_assert(thr_s != NULL);
1721 tl_assert(thr_q != NULL);
1722 tl_assert(thr_s != thr_q);
1723
1724 hbthr_s = thr_s->hbthr;
1725 hbthr_q = thr_q->hbthr;
1726 tl_assert(hbthr_s != hbthr_q);
1727 tl_assert( libhb_get_Thr_hgthread(hbthr_s) == thr_s );
1728 tl_assert( libhb_get_Thr_hgthread(hbthr_q) == thr_q );
1729
1730 generate_quitter_stayer_dependence (hbthr_q, hbthr_s);
sewardjffce8152011-06-24 10:09:41 +00001731
sewardjf98e1c02008-10-25 16:22:41 +00001732 /* evh__pre_thread_ll_exit issues an error message if the exiting
1733 thread holds any locks. No need to check here. */
sewardjb4112022007-11-09 22:49:28 +00001734
1735 /* This holds because, at least when using NPTL as the thread
1736 library, we should be notified the low level thread exit before
1737 we hear of any join event on it. The low level exit
1738 notification feeds through into evh__pre_thread_ll_exit,
1739 which should clear the map_threads entry for it. Hence we
1740 expect there to be no map_threads entry at this point. */
1741 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1742 == VG_INVALID_THREADID);
1743
sewardjf98e1c02008-10-25 16:22:41 +00001744 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001745 all__sanity_check("evh__post_thread_join-post");
1746}
1747
1748static
floriane543f302012-10-21 19:43:43 +00001749void evh__pre_mem_read ( CorePart part, ThreadId tid, const HChar* s,
sewardjb4112022007-11-09 22:49:28 +00001750 Addr a, SizeT size) {
1751 if (SHOW_EVENTS >= 2
1752 || (SHOW_EVENTS >= 1 && size != 1))
1753 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1754 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001755 shadow_mem_cread_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001756 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001757 all__sanity_check("evh__pre_mem_read-post");
1758}
1759
1760static
1761void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
floriane543f302012-10-21 19:43:43 +00001762 const HChar* s, Addr a ) {
sewardjb4112022007-11-09 22:49:28 +00001763 Int len;
1764 if (SHOW_EVENTS >= 1)
1765 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1766 (Int)tid, s, (void*)a );
sewardj234e5582011-02-09 12:47:23 +00001767 // Don't segfault if the string starts in an obviously stupid
1768 // place. Actually we should check the whole string, not just
1769 // the start address, but that's too much trouble. At least
1770 // checking the first byte is better than nothing. See #255009.
1771 if (!VG_(am_is_valid_for_client) (a, 1, VKI_PROT_READ))
1772 return;
florian19f91bb2012-11-10 22:29:54 +00001773 len = VG_(strlen)( (HChar*) a );
sewardj23f12002009-07-24 08:45:08 +00001774 shadow_mem_cread_range( map_threads_lookup(tid), a, len+1 );
sewardjf98e1c02008-10-25 16:22:41 +00001775 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001776 all__sanity_check("evh__pre_mem_read_asciiz-post");
1777}
1778
1779static
floriane543f302012-10-21 19:43:43 +00001780void evh__pre_mem_write ( CorePart part, ThreadId tid, const HChar* s,
sewardjb4112022007-11-09 22:49:28 +00001781 Addr a, SizeT size ) {
1782 if (SHOW_EVENTS >= 1)
1783 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1784 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001785 shadow_mem_cwrite_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001786 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001787 all__sanity_check("evh__pre_mem_write-post");
1788}
1789
1790static
1791void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1792 if (SHOW_EVENTS >= 1)
1793 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1794 (void*)a, len, (Int)is_inited );
sewardj438c4712014-09-05 20:29:10 +00001795 // We ignore the initialisation state (is_inited); that's ok.
1796 shadow_mem_make_New(get_current_Thread(), a, len);
sewardjf98e1c02008-10-25 16:22:41 +00001797 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001798 all__sanity_check("evh__pre_mem_read-post");
1799}
1800
1801static
1802void evh__die_mem_heap ( Addr a, SizeT len ) {
sewardj622fe492011-03-11 21:06:59 +00001803 Thread* thr;
sewardjb4112022007-11-09 22:49:28 +00001804 if (SHOW_EVENTS >= 1)
1805 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
sewardj622fe492011-03-11 21:06:59 +00001806 thr = get_current_Thread();
1807 tl_assert(thr);
1808 if (HG_(clo_free_is_write)) {
1809 /* Treat frees as if the memory was written immediately prior to
1810 the free. This shakes out more races, specifically, cases
1811 where memory is referenced by one thread, and freed by
1812 another, and there's no observable synchronisation event to
1813 guarantee that the reference happens before the free. */
1814 shadow_mem_cwrite_range(thr, a, len);
1815 }
sewardjfd35d492011-03-17 19:39:55 +00001816 shadow_mem_make_NoAccess_NoFX( thr, a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001817 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001818 all__sanity_check("evh__pre_mem_read-post");
1819}
1820
sewardj23f12002009-07-24 08:45:08 +00001821/* --- Event handlers called from generated code --- */
1822
sewardjb4112022007-11-09 22:49:28 +00001823static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001824void evh__mem_help_cread_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001825 Thread* thr = get_current_Thread_in_C_C();
1826 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001827 LIBHB_CREAD_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001828}
sewardjf98e1c02008-10-25 16:22:41 +00001829
sewardjb4112022007-11-09 22:49:28 +00001830static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001831void evh__mem_help_cread_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001832 Thread* thr = get_current_Thread_in_C_C();
1833 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001834 LIBHB_CREAD_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001835}
sewardjf98e1c02008-10-25 16:22:41 +00001836
sewardjb4112022007-11-09 22:49:28 +00001837static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001838void evh__mem_help_cread_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001839 Thread* thr = get_current_Thread_in_C_C();
1840 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001841 LIBHB_CREAD_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001842}
sewardjf98e1c02008-10-25 16:22:41 +00001843
sewardjb4112022007-11-09 22:49:28 +00001844static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001845void evh__mem_help_cread_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001846 Thread* thr = get_current_Thread_in_C_C();
1847 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001848 LIBHB_CREAD_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001849}
sewardjf98e1c02008-10-25 16:22:41 +00001850
sewardjb4112022007-11-09 22:49:28 +00001851static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001852void evh__mem_help_cread_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001853 Thread* thr = get_current_Thread_in_C_C();
1854 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001855 LIBHB_CREAD_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001856}
1857
1858static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001859void evh__mem_help_cwrite_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001860 Thread* thr = get_current_Thread_in_C_C();
1861 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001862 LIBHB_CWRITE_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001863}
sewardjf98e1c02008-10-25 16:22:41 +00001864
sewardjb4112022007-11-09 22:49:28 +00001865static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001866void evh__mem_help_cwrite_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001867 Thread* thr = get_current_Thread_in_C_C();
1868 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001869 LIBHB_CWRITE_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001870}
sewardjf98e1c02008-10-25 16:22:41 +00001871
sewardjb4112022007-11-09 22:49:28 +00001872static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001873void evh__mem_help_cwrite_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001874 Thread* thr = get_current_Thread_in_C_C();
1875 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001876 LIBHB_CWRITE_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001877}
sewardjf98e1c02008-10-25 16:22:41 +00001878
sewardjb4112022007-11-09 22:49:28 +00001879static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001880void evh__mem_help_cwrite_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001881 Thread* thr = get_current_Thread_in_C_C();
1882 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001883 LIBHB_CWRITE_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001884}
sewardjf98e1c02008-10-25 16:22:41 +00001885
sewardjb4112022007-11-09 22:49:28 +00001886static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001887void evh__mem_help_cwrite_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001888 Thread* thr = get_current_Thread_in_C_C();
1889 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001890 LIBHB_CWRITE_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001891}
1892
sewardjb4112022007-11-09 22:49:28 +00001893
sewardj9f569b72008-11-13 13:33:09 +00001894/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001895/* -------------- events to do with mutexes -------------- */
sewardj9f569b72008-11-13 13:33:09 +00001896/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001897
1898/* EXPOSITION only: by intercepting lock init events we can show the
1899 user where the lock was initialised, rather than only being able to
1900 show where it was first locked. Intercepting lock initialisations
1901 is not necessary for the basic operation of the race checker. */
1902static
1903void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
1904 void* mutex, Word mbRec )
1905{
1906 if (SHOW_EVENTS >= 1)
1907 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
1908 (Int)tid, mbRec, (void*)mutex );
1909 tl_assert(mbRec == 0 || mbRec == 1);
1910 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
1911 (Addr)mutex, tid );
sewardjf98e1c02008-10-25 16:22:41 +00001912 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001913 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
1914}
1915
1916static
sewardjc02f6c42013-10-14 13:51:25 +00001917void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex,
1918 Bool mutex_is_init )
sewardjb4112022007-11-09 22:49:28 +00001919{
1920 Thread* thr;
1921 Lock* lk;
1922 if (SHOW_EVENTS >= 1)
sewardjc02f6c42013-10-14 13:51:25 +00001923 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE"
1924 "(ctid=%d, %p, isInit=%d)\n",
1925 (Int)tid, (void*)mutex, (Int)mutex_is_init );
sewardjb4112022007-11-09 22:49:28 +00001926
1927 thr = map_threads_maybe_lookup( tid );
1928 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00001929 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00001930
1931 lk = map_locks_maybe_lookup( (Addr)mutex );
1932
sewardjc02f6c42013-10-14 13:51:25 +00001933 if (lk == NULL && mutex_is_init) {
1934 /* We're destroying a mutex which we don't have any record of,
1935 and which appears to have the value PTHREAD_MUTEX_INITIALIZER.
1936 Assume it never got used, and so we don't need to do anything
1937 more. */
1938 goto out;
1939 }
1940
sewardjb4112022007-11-09 22:49:28 +00001941 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
sewardjf98e1c02008-10-25 16:22:41 +00001942 HG_(record_error_Misc)(
1943 thr, "pthread_mutex_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00001944 }
1945
1946 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00001947 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00001948 tl_assert( lk->guestaddr == (Addr)mutex );
1949 if (lk->heldBy) {
1950 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00001951 HG_(record_error_Misc)(
1952 thr, "pthread_mutex_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00001953 /* remove lock from locksets of all owning threads */
1954 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00001955 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00001956 lk->heldBy = NULL;
1957 lk->heldW = False;
1958 lk->acquired_at = NULL;
1959 }
1960 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001961 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00001962
1963 if (HG_(clo_track_lockorders))
1964 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00001965 map_locks_delete( lk->guestaddr );
1966 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00001967 }
1968
sewardjc02f6c42013-10-14 13:51:25 +00001969 out:
sewardjf98e1c02008-10-25 16:22:41 +00001970 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001971 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
1972}
1973
1974static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
1975 void* mutex, Word isTryLock )
1976{
1977 /* Just check the mutex is sane; nothing else to do. */
1978 // 'mutex' may be invalid - not checked by wrapper
1979 Thread* thr;
1980 Lock* lk;
1981 if (SHOW_EVENTS >= 1)
1982 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
1983 (Int)tid, (void*)mutex );
1984
1985 tl_assert(isTryLock == 0 || isTryLock == 1);
1986 thr = map_threads_maybe_lookup( tid );
1987 tl_assert(thr); /* cannot fail - Thread* must already exist */
1988
1989 lk = map_locks_maybe_lookup( (Addr)mutex );
1990
1991 if (lk && (lk->kind == LK_rdwr)) {
sewardjf98e1c02008-10-25 16:22:41 +00001992 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
1993 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001994 }
1995
1996 if ( lk
1997 && isTryLock == 0
1998 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
1999 && lk->heldBy
2000 && lk->heldW
florian6bf37262012-10-21 03:23:36 +00002001 && VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0 ) {
sewardjb4112022007-11-09 22:49:28 +00002002 /* uh, it's a non-recursive lock and we already w-hold it, and
2003 this is a real lock operation (not a speculative "tryLock"
2004 kind of thing). Duh. Deadlock coming up; but at least
2005 produce an error message. */
florian6bd9dc12012-11-23 16:17:43 +00002006 const HChar* errstr = "Attempt to re-lock a "
2007 "non-recursive lock I already hold";
2008 const HChar* auxstr = "Lock was previously acquired";
sewardj8fef6252010-07-29 05:28:02 +00002009 if (lk->acquired_at) {
2010 HG_(record_error_Misc_w_aux)( thr, errstr, auxstr, lk->acquired_at );
2011 } else {
2012 HG_(record_error_Misc)( thr, errstr );
2013 }
sewardjb4112022007-11-09 22:49:28 +00002014 }
2015}
2016
2017static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
2018{
2019 // only called if the real library call succeeded - so mutex is sane
2020 Thread* thr;
2021 if (SHOW_EVENTS >= 1)
2022 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
2023 (Int)tid, (void*)mutex );
2024
2025 thr = map_threads_maybe_lookup( tid );
2026 tl_assert(thr); /* cannot fail - Thread* must already exist */
2027
2028 evhH__post_thread_w_acquires_lock(
2029 thr,
2030 LK_mbRec, /* if not known, create new lock with this LockKind */
2031 (Addr)mutex
2032 );
2033}
2034
2035static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
2036{
2037 // 'mutex' may be invalid - not checked by wrapper
2038 Thread* thr;
2039 if (SHOW_EVENTS >= 1)
2040 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
2041 (Int)tid, (void*)mutex );
2042
2043 thr = map_threads_maybe_lookup( tid );
2044 tl_assert(thr); /* cannot fail - Thread* must already exist */
2045
2046 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
2047}
2048
2049static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
2050{
2051 // only called if the real library call succeeded - so mutex is sane
2052 Thread* thr;
2053 if (SHOW_EVENTS >= 1)
2054 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
2055 (Int)tid, (void*)mutex );
2056 thr = map_threads_maybe_lookup( tid );
2057 tl_assert(thr); /* cannot fail - Thread* must already exist */
2058
2059 // anything we should do here?
2060}
2061
2062
sewardj5a644da2009-08-11 10:35:58 +00002063/* ------------------------------------------------------- */
sewardj1f77fec2010-04-12 19:51:04 +00002064/* -------------- events to do with spinlocks ------------ */
sewardj5a644da2009-08-11 10:35:58 +00002065/* ------------------------------------------------------- */
2066
2067/* All a bit of a kludge. Pretend we're really dealing with ordinary
2068 pthread_mutex_t's instead, for the most part. */
2069
2070static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid,
2071 void* slock )
2072{
2073 Thread* thr;
2074 Lock* lk;
2075 /* In glibc's kludgey world, we're either initialising or unlocking
2076 it. Since this is the pre-routine, if it is locked, unlock it
2077 and take a dependence edge. Otherwise, do nothing. */
2078
2079 if (SHOW_EVENTS >= 1)
2080 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
2081 "(ctid=%d, slock=%p)\n",
2082 (Int)tid, (void*)slock );
2083
2084 thr = map_threads_maybe_lookup( tid );
2085 /* cannot fail - Thread* must already exist */;
2086 tl_assert( HG_(is_sane_Thread)(thr) );
2087
2088 lk = map_locks_maybe_lookup( (Addr)slock );
2089 if (lk && lk->heldBy) {
2090 /* it's held. So do the normal pre-unlock actions, as copied
2091 from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE. This stupidly
2092 duplicates the map_locks_maybe_lookup. */
2093 evhH__pre_thread_releases_lock( thr, (Addr)slock,
2094 False/*!isRDWR*/ );
2095 }
2096}
2097
2098static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid,
2099 void* slock )
2100{
2101 Lock* lk;
2102 /* More kludgery. If the lock has never been seen before, do
2103 actions as per evh__HG_PTHREAD_MUTEX_INIT_POST. Else do
2104 nothing. */
2105
2106 if (SHOW_EVENTS >= 1)
2107 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
2108 "(ctid=%d, slock=%p)\n",
2109 (Int)tid, (void*)slock );
2110
2111 lk = map_locks_maybe_lookup( (Addr)slock );
2112 if (!lk) {
2113 map_locks_lookup_or_create( LK_nonRec, (Addr)slock, tid );
2114 }
2115}
2116
2117static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid,
2118 void* slock, Word isTryLock )
2119{
2120 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, slock, isTryLock );
2121}
2122
2123static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid,
2124 void* slock )
2125{
2126 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, slock );
2127}
2128
2129static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid,
2130 void* slock )
2131{
sewardjc02f6c42013-10-14 13:51:25 +00002132 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, slock, 0/*!isInit*/ );
sewardj5a644da2009-08-11 10:35:58 +00002133}
2134
2135
sewardj9f569b72008-11-13 13:33:09 +00002136/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002137/* --------------- events to do with CVs --------------- */
sewardj9f569b72008-11-13 13:33:09 +00002138/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002139
sewardj02114542009-07-28 20:52:36 +00002140/* A mapping from CV to (the SO associated with it, plus some
2141 auxiliary data for error checking). When the CV is
sewardjf98e1c02008-10-25 16:22:41 +00002142 signalled/broadcasted upon, we do a 'send' into the SO, and when a
2143 wait on it completes, we do a 'recv' from the SO. This is believed
2144 to give the correct happens-before events arising from CV
sewardjb4112022007-11-09 22:49:28 +00002145 signallings/broadcasts.
2146*/
2147
sewardj02114542009-07-28 20:52:36 +00002148/* .so is the SO for this CV.
2149 .mx_ga is the associated mutex, when .nWaiters > 0
sewardjb4112022007-11-09 22:49:28 +00002150
sewardj02114542009-07-28 20:52:36 +00002151 POSIX says effectively that the first pthread_cond_{timed}wait call
2152 causes a dynamic binding between the CV and the mutex, and that
2153 lasts until such time as the waiter count falls to zero. Hence
2154 need to keep track of the number of waiters in order to do
2155 consistency tracking. */
2156typedef
2157 struct {
2158 SO* so; /* libhb-allocated SO */
2159 void* mx_ga; /* addr of associated mutex, if any */
2160 UWord nWaiters; /* # threads waiting on the CV */
2161 }
2162 CVInfo;
2163
2164
2165/* pthread_cond_t* -> CVInfo* */
2166static WordFM* map_cond_to_CVInfo = NULL;
2167
2168static void map_cond_to_CVInfo_INIT ( void ) {
2169 if (UNLIKELY(map_cond_to_CVInfo == NULL)) {
2170 map_cond_to_CVInfo = VG_(newFM)( HG_(zalloc),
2171 "hg.mctCI.1", HG_(free), NULL );
sewardjf98e1c02008-10-25 16:22:41 +00002172 }
2173}
2174
sewardj02114542009-07-28 20:52:36 +00002175static CVInfo* map_cond_to_CVInfo_lookup_or_alloc ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002176 UWord key, val;
sewardj02114542009-07-28 20:52:36 +00002177 map_cond_to_CVInfo_INIT();
2178 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
sewardjf98e1c02008-10-25 16:22:41 +00002179 tl_assert(key == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002180 return (CVInfo*)val;
sewardjf98e1c02008-10-25 16:22:41 +00002181 } else {
sewardj02114542009-07-28 20:52:36 +00002182 SO* so = libhb_so_alloc();
2183 CVInfo* cvi = HG_(zalloc)("hg.mctCloa.1", sizeof(CVInfo));
2184 cvi->so = so;
2185 cvi->mx_ga = 0;
2186 VG_(addToFM)( map_cond_to_CVInfo, (UWord)cond, (UWord)cvi );
2187 return cvi;
sewardjf98e1c02008-10-25 16:22:41 +00002188 }
2189}
2190
philippe8bfc2152012-07-06 23:38:24 +00002191static CVInfo* map_cond_to_CVInfo_lookup_NO_alloc ( void* cond ) {
2192 UWord key, val;
2193 map_cond_to_CVInfo_INIT();
2194 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
2195 tl_assert(key == (UWord)cond);
2196 return (CVInfo*)val;
2197 } else {
2198 return NULL;
2199 }
2200}
2201
sewardjc02f6c42013-10-14 13:51:25 +00002202static void map_cond_to_CVInfo_delete ( ThreadId tid,
2203 void* cond, Bool cond_is_init ) {
philippe8bfc2152012-07-06 23:38:24 +00002204 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +00002205 UWord keyW, valW;
philippe8bfc2152012-07-06 23:38:24 +00002206
2207 thr = map_threads_maybe_lookup( tid );
2208 tl_assert(thr); /* cannot fail - Thread* must already exist */
2209
sewardj02114542009-07-28 20:52:36 +00002210 map_cond_to_CVInfo_INIT();
philippe24111972013-03-18 22:48:22 +00002211 if (VG_(lookupFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond )) {
sewardj02114542009-07-28 20:52:36 +00002212 CVInfo* cvi = (CVInfo*)valW;
sewardjf98e1c02008-10-25 16:22:41 +00002213 tl_assert(keyW == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002214 tl_assert(cvi);
2215 tl_assert(cvi->so);
philippe8bfc2152012-07-06 23:38:24 +00002216 if (cvi->nWaiters > 0) {
sewardjc02f6c42013-10-14 13:51:25 +00002217 HG_(record_error_Misc)(
2218 thr, "pthread_cond_destroy:"
2219 " destruction of condition variable being waited upon");
philippe24111972013-03-18 22:48:22 +00002220 /* Destroying a cond var being waited upon outcome is EBUSY and
2221 variable is not destroyed. */
2222 return;
philippe8bfc2152012-07-06 23:38:24 +00002223 }
philippe24111972013-03-18 22:48:22 +00002224 if (!VG_(delFromFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond ))
2225 tl_assert(0); // cond var found above, and not here ???
sewardj02114542009-07-28 20:52:36 +00002226 libhb_so_dealloc(cvi->so);
2227 cvi->mx_ga = 0;
2228 HG_(free)(cvi);
philippe8bfc2152012-07-06 23:38:24 +00002229 } else {
sewardjc02f6c42013-10-14 13:51:25 +00002230 /* We have no record of this CV. So complain about it
2231 .. except, don't bother to complain if it has exactly the
2232 value PTHREAD_COND_INITIALIZER, since it might be that the CV
2233 was initialised like that but never used. */
2234 if (!cond_is_init) {
2235 HG_(record_error_Misc)(
2236 thr, "pthread_cond_destroy: destruction of unknown cond var");
2237 }
sewardjb4112022007-11-09 22:49:28 +00002238 }
2239}
2240
2241static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2242{
sewardjf98e1c02008-10-25 16:22:41 +00002243 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2244 cond to a SO if it is not already so bound, and 'send' on the
2245 SO. This is later used by other thread(s) which successfully
2246 exit from a pthread_cond_wait on the same cv; then they 'recv'
2247 from the SO, thereby acquiring a dependency on this signalling
2248 event. */
sewardjb4112022007-11-09 22:49:28 +00002249 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002250 CVInfo* cvi;
2251 //Lock* lk;
sewardjb4112022007-11-09 22:49:28 +00002252
2253 if (SHOW_EVENTS >= 1)
2254 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2255 (Int)tid, (void*)cond );
2256
sewardjb4112022007-11-09 22:49:28 +00002257 thr = map_threads_maybe_lookup( tid );
2258 tl_assert(thr); /* cannot fail - Thread* must already exist */
2259
sewardj02114542009-07-28 20:52:36 +00002260 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2261 tl_assert(cvi);
2262 tl_assert(cvi->so);
2263
sewardjb4112022007-11-09 22:49:28 +00002264 // error-if: mutex is bogus
2265 // error-if: mutex is not locked
sewardj02114542009-07-28 20:52:36 +00002266 // Hmm. POSIX doesn't actually say that it's an error to call
2267 // pthread_cond_signal with the associated mutex being unlocked.
2268 // Although it does say that it should be "if consistent scheduling
sewardjffce8152011-06-24 10:09:41 +00002269 // is desired." For that reason, print "dubious" if the lock isn't
2270 // held by any thread. Skip the "dubious" if it is held by some
2271 // other thread; that sounds straight-out wrong.
sewardj02114542009-07-28 20:52:36 +00002272 //
sewardjffce8152011-06-24 10:09:41 +00002273 // Anybody who writes code that signals on a CV without holding
2274 // the associated MX needs to be shipped off to a lunatic asylum
2275 // ASAP, even though POSIX doesn't actually declare such behaviour
2276 // illegal -- it makes code extremely difficult to understand/
2277 // reason about. In particular it puts the signalling thread in
2278 // a situation where it is racing against the released waiter
2279 // as soon as the signalling is done, and so there needs to be
2280 // some auxiliary synchronisation mechanism in the program that
2281 // makes this safe -- or the race(s) need to be harmless, or
2282 // probably nonexistent.
2283 //
2284 if (1) {
2285 Lock* lk = NULL;
2286 if (cvi->mx_ga != 0) {
2287 lk = map_locks_maybe_lookup( (Addr)cvi->mx_ga );
2288 }
2289 /* note: lk could be NULL. Be careful. */
2290 if (lk) {
2291 if (lk->kind == LK_rdwr) {
2292 HG_(record_error_Misc)(thr,
2293 "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2294 }
2295 if (lk->heldBy == NULL) {
2296 HG_(record_error_Misc)(thr,
2297 "pthread_cond_{signal,broadcast}: dubious: "
2298 "associated lock is not held by any thread");
2299 }
florian6bf37262012-10-21 03:23:36 +00002300 if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (UWord)thr)) {
sewardjffce8152011-06-24 10:09:41 +00002301 HG_(record_error_Misc)(thr,
2302 "pthread_cond_{signal,broadcast}: "
2303 "associated lock is not held by calling thread");
2304 }
2305 } else {
2306 /* Couldn't even find the damn thing. */
2307 // But actually .. that's not necessarily an error. We don't
2308 // know the (CV,MX) binding until a pthread_cond_wait or bcast
2309 // shows us what it is, and if that may not have happened yet.
2310 // So just keep quiet in this circumstance.
2311 //HG_(record_error_Misc)( thr,
2312 // "pthread_cond_{signal,broadcast}: "
2313 // "no or invalid mutex associated with cond");
2314 }
2315 }
sewardjb4112022007-11-09 22:49:28 +00002316
sewardj02114542009-07-28 20:52:36 +00002317 libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
sewardjb4112022007-11-09 22:49:28 +00002318}
2319
2320/* returns True if it reckons 'mutex' is valid and held by this
2321 thread, else False */
2322static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2323 void* cond, void* mutex )
2324{
2325 Thread* thr;
2326 Lock* lk;
2327 Bool lk_valid = True;
sewardj02114542009-07-28 20:52:36 +00002328 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002329
2330 if (SHOW_EVENTS >= 1)
2331 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2332 "(ctid=%d, cond=%p, mutex=%p)\n",
2333 (Int)tid, (void*)cond, (void*)mutex );
2334
sewardjb4112022007-11-09 22:49:28 +00002335 thr = map_threads_maybe_lookup( tid );
2336 tl_assert(thr); /* cannot fail - Thread* must already exist */
2337
2338 lk = map_locks_maybe_lookup( (Addr)mutex );
2339
2340 /* Check for stupid mutex arguments. There are various ways to be
2341 a bozo. Only complain once, though, even if more than one thing
2342 is wrong. */
2343 if (lk == NULL) {
2344 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002345 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002346 thr,
2347 "pthread_cond_{timed}wait called with invalid mutex" );
2348 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002349 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002350 if (lk->kind == LK_rdwr) {
2351 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002352 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002353 thr, "pthread_cond_{timed}wait called with mutex "
2354 "of type pthread_rwlock_t*" );
2355 } else
2356 if (lk->heldBy == NULL) {
2357 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002358 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002359 thr, "pthread_cond_{timed}wait called with un-held mutex");
2360 } else
2361 if (lk->heldBy != NULL
florian6bf37262012-10-21 03:23:36 +00002362 && VG_(elemBag)( lk->heldBy, (UWord)thr ) == 0) {
sewardjb4112022007-11-09 22:49:28 +00002363 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002364 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002365 thr, "pthread_cond_{timed}wait called with mutex "
2366 "held by a different thread" );
2367 }
2368 }
2369
2370 // error-if: cond is also associated with a different mutex
sewardj02114542009-07-28 20:52:36 +00002371 cvi = map_cond_to_CVInfo_lookup_or_alloc(cond);
2372 tl_assert(cvi);
2373 tl_assert(cvi->so);
2374 if (cvi->nWaiters == 0) {
2375 /* form initial (CV,MX) binding */
2376 cvi->mx_ga = mutex;
2377 }
2378 else /* check existing (CV,MX) binding */
2379 if (cvi->mx_ga != mutex) {
2380 HG_(record_error_Misc)(
2381 thr, "pthread_cond_{timed}wait: cond is associated "
2382 "with a different mutex");
2383 }
2384 cvi->nWaiters++;
sewardjb4112022007-11-09 22:49:28 +00002385
2386 return lk_valid;
2387}
2388
2389static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
sewardjff427c92013-10-14 12:13:52 +00002390 void* cond, void* mutex,
2391 Bool timeout)
sewardjb4112022007-11-09 22:49:28 +00002392{
sewardjf98e1c02008-10-25 16:22:41 +00002393 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2394 the SO for this cond, and 'recv' from it so as to acquire a
2395 dependency edge back to the signaller/broadcaster. */
2396 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002397 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002398
2399 if (SHOW_EVENTS >= 1)
2400 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
sewardjff427c92013-10-14 12:13:52 +00002401 "(ctid=%d, cond=%p, mutex=%p)\n, timeout=%d",
2402 (Int)tid, (void*)cond, (void*)mutex, (Int)timeout );
sewardjb4112022007-11-09 22:49:28 +00002403
sewardjb4112022007-11-09 22:49:28 +00002404 thr = map_threads_maybe_lookup( tid );
2405 tl_assert(thr); /* cannot fail - Thread* must already exist */
2406
2407 // error-if: cond is also associated with a different mutex
2408
philippe8bfc2152012-07-06 23:38:24 +00002409 cvi = map_cond_to_CVInfo_lookup_NO_alloc( cond );
2410 if (!cvi) {
2411 /* This could be either a bug in helgrind or the guest application
2412 that did an error (e.g. cond var was destroyed by another thread.
2413 Let's assume helgrind is perfect ...
2414 Note that this is similar to drd behaviour. */
2415 HG_(record_error_Misc)(thr, "condition variable has been destroyed while"
2416 " being waited upon");
2417 return;
2418 }
2419
sewardj02114542009-07-28 20:52:36 +00002420 tl_assert(cvi);
2421 tl_assert(cvi->so);
2422 tl_assert(cvi->nWaiters > 0);
sewardjb4112022007-11-09 22:49:28 +00002423
sewardjff427c92013-10-14 12:13:52 +00002424 if (!timeout && !libhb_so_everSent(cvi->so)) {
sewardjf98e1c02008-10-25 16:22:41 +00002425 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2426 it? If this happened it would surely be a bug in the threads
2427 library. Or one of those fabled "spurious wakeups". */
2428 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
sewardjffce8152011-06-24 10:09:41 +00002429 "succeeded"
sewardjf98e1c02008-10-25 16:22:41 +00002430 " without prior pthread_cond_post");
sewardjb4112022007-11-09 22:49:28 +00002431 }
sewardjf98e1c02008-10-25 16:22:41 +00002432
2433 /* anyway, acquire a dependency on it. */
sewardj02114542009-07-28 20:52:36 +00002434 libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2435
2436 cvi->nWaiters--;
sewardjf98e1c02008-10-25 16:22:41 +00002437}
2438
philippe19dfe032013-03-24 20:10:23 +00002439static void evh__HG_PTHREAD_COND_INIT_POST ( ThreadId tid,
2440 void* cond, void* cond_attr )
2441{
2442 CVInfo* cvi;
2443
2444 if (SHOW_EVENTS >= 1)
2445 VG_(printf)("evh__HG_PTHREAD_COND_INIT_POST"
2446 "(ctid=%d, cond=%p, cond_attr=%p)\n",
2447 (Int)tid, (void*)cond, (void*) cond_attr );
2448
2449 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2450 tl_assert (cvi);
2451 tl_assert (cvi->so);
2452}
2453
2454
sewardjf98e1c02008-10-25 16:22:41 +00002455static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
sewardjc02f6c42013-10-14 13:51:25 +00002456 void* cond, Bool cond_is_init )
sewardjf98e1c02008-10-25 16:22:41 +00002457{
2458 /* Deal with destroy events. The only purpose is to free storage
2459 associated with the CV, so as to avoid any possible resource
2460 leaks. */
2461 if (SHOW_EVENTS >= 1)
2462 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
sewardjc02f6c42013-10-14 13:51:25 +00002463 "(ctid=%d, cond=%p, cond_is_init=%d)\n",
2464 (Int)tid, (void*)cond, (Int)cond_is_init );
sewardjf98e1c02008-10-25 16:22:41 +00002465
sewardjc02f6c42013-10-14 13:51:25 +00002466 map_cond_to_CVInfo_delete( tid, cond, cond_is_init );
sewardjb4112022007-11-09 22:49:28 +00002467}
2468
2469
sewardj9f569b72008-11-13 13:33:09 +00002470/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002471/* -------------- events to do with rwlocks -------------- */
sewardj9f569b72008-11-13 13:33:09 +00002472/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002473
2474/* EXPOSITION only */
2475static
2476void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2477{
2478 if (SHOW_EVENTS >= 1)
2479 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2480 (Int)tid, (void*)rwl );
2481 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
sewardjf98e1c02008-10-25 16:22:41 +00002482 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002483 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2484}
2485
2486static
2487void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2488{
2489 Thread* thr;
2490 Lock* lk;
2491 if (SHOW_EVENTS >= 1)
2492 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2493 (Int)tid, (void*)rwl );
2494
2495 thr = map_threads_maybe_lookup( tid );
2496 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00002497 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00002498
2499 lk = map_locks_maybe_lookup( (Addr)rwl );
2500
2501 if (lk == NULL || lk->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00002502 HG_(record_error_Misc)(
2503 thr, "pthread_rwlock_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00002504 }
2505
2506 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00002507 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002508 tl_assert( lk->guestaddr == (Addr)rwl );
2509 if (lk->heldBy) {
2510 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00002511 HG_(record_error_Misc)(
2512 thr, "pthread_rwlock_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00002513 /* remove lock from locksets of all owning threads */
2514 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00002515 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00002516 lk->heldBy = NULL;
2517 lk->heldW = False;
sewardj1c7e8332007-11-29 13:04:03 +00002518 lk->acquired_at = NULL;
sewardjb4112022007-11-09 22:49:28 +00002519 }
2520 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002521 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00002522
2523 if (HG_(clo_track_lockorders))
2524 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002525 map_locks_delete( lk->guestaddr );
2526 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002527 }
2528
sewardjf98e1c02008-10-25 16:22:41 +00002529 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002530 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2531}
2532
2533static
sewardj789c3c52008-02-25 12:10:07 +00002534void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2535 void* rwl,
2536 Word isW, Word isTryLock )
sewardjb4112022007-11-09 22:49:28 +00002537{
2538 /* Just check the rwl is sane; nothing else to do. */
2539 // 'rwl' may be invalid - not checked by wrapper
2540 Thread* thr;
2541 Lock* lk;
2542 if (SHOW_EVENTS >= 1)
2543 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2544 (Int)tid, (Int)isW, (void*)rwl );
2545
2546 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
sewardj789c3c52008-02-25 12:10:07 +00002547 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
sewardjb4112022007-11-09 22:49:28 +00002548 thr = map_threads_maybe_lookup( tid );
2549 tl_assert(thr); /* cannot fail - Thread* must already exist */
2550
2551 lk = map_locks_maybe_lookup( (Addr)rwl );
2552 if ( lk
2553 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2554 /* Wrong kind of lock. Duh. */
sewardjf98e1c02008-10-25 16:22:41 +00002555 HG_(record_error_Misc)(
2556 thr, "pthread_rwlock_{rd,rw}lock with a "
2557 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002558 }
2559}
2560
2561static
2562void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2563{
2564 // only called if the real library call succeeded - so mutex is sane
2565 Thread* thr;
2566 if (SHOW_EVENTS >= 1)
2567 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2568 (Int)tid, (Int)isW, (void*)rwl );
2569
2570 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2571 thr = map_threads_maybe_lookup( tid );
2572 tl_assert(thr); /* cannot fail - Thread* must already exist */
2573
2574 (isW ? evhH__post_thread_w_acquires_lock
2575 : evhH__post_thread_r_acquires_lock)(
2576 thr,
2577 LK_rdwr, /* if not known, create new lock with this LockKind */
2578 (Addr)rwl
2579 );
2580}
2581
2582static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2583{
2584 // 'rwl' may be invalid - not checked by wrapper
2585 Thread* thr;
2586 if (SHOW_EVENTS >= 1)
2587 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2588 (Int)tid, (void*)rwl );
2589
2590 thr = map_threads_maybe_lookup( tid );
2591 tl_assert(thr); /* cannot fail - Thread* must already exist */
2592
2593 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2594}
2595
2596static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2597{
2598 // only called if the real library call succeeded - so mutex is sane
2599 Thread* thr;
2600 if (SHOW_EVENTS >= 1)
2601 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2602 (Int)tid, (void*)rwl );
2603 thr = map_threads_maybe_lookup( tid );
2604 tl_assert(thr); /* cannot fail - Thread* must already exist */
2605
2606 // anything we should do here?
2607}
2608
2609
sewardj9f569b72008-11-13 13:33:09 +00002610/* ---------------------------------------------------------- */
2611/* -------------- events to do with semaphores -------------- */
2612/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002613
sewardj11e352f2007-11-30 11:11:02 +00002614/* This is similar to but not identical to the handling for condition
sewardjb4112022007-11-09 22:49:28 +00002615 variables. */
2616
sewardjf98e1c02008-10-25 16:22:41 +00002617/* For each semaphore, we maintain a stack of SOs. When a 'post'
2618 operation is done on a semaphore (unlocking, essentially), a new SO
2619 is created for the posting thread, the posting thread does a strong
2620 send to it (which merely installs the posting thread's VC in the
2621 SO), and the SO is pushed on the semaphore's stack.
sewardjb4112022007-11-09 22:49:28 +00002622
2623 Later, when a (probably different) thread completes 'wait' on the
sewardjf98e1c02008-10-25 16:22:41 +00002624 semaphore, we pop a SO off the semaphore's stack (which should be
2625 nonempty), and do a strong recv from it. This mechanism creates
sewardjb4112022007-11-09 22:49:28 +00002626 dependencies between posters and waiters of the semaphore.
2627
sewardjf98e1c02008-10-25 16:22:41 +00002628 It may not be necessary to use a stack - perhaps a bag of SOs would
2629 do. But we do need to keep track of how many unused-up posts have
2630 happened for the semaphore.
sewardjb4112022007-11-09 22:49:28 +00002631
sewardjf98e1c02008-10-25 16:22:41 +00002632 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
sewardjb4112022007-11-09 22:49:28 +00002633 twice on S. T3 cannot complete its waits without both T1 and T2
2634 posting. The above mechanism will ensure that T3 acquires
2635 dependencies on both T1 and T2.
sewardj11e352f2007-11-30 11:11:02 +00002636
sewardjf98e1c02008-10-25 16:22:41 +00002637 When a semaphore is initialised with value N, we do as if we'd
2638 posted N times on the semaphore: basically create N SOs and do a
2639 strong send to all of then. This allows up to N waits on the
2640 semaphore to acquire a dependency on the initialisation point,
2641 which AFAICS is the correct behaviour.
sewardj11e352f2007-11-30 11:11:02 +00002642
2643 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2644 about. We should.
sewardjb4112022007-11-09 22:49:28 +00002645*/
2646
sewardjf98e1c02008-10-25 16:22:41 +00002647/* sem_t* -> XArray* SO* */
2648static WordFM* map_sem_to_SO_stack = NULL;
sewardjb4112022007-11-09 22:49:28 +00002649
sewardjf98e1c02008-10-25 16:22:41 +00002650static void map_sem_to_SO_stack_INIT ( void ) {
2651 if (map_sem_to_SO_stack == NULL) {
2652 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2653 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00002654 }
2655}
2656
sewardjf98e1c02008-10-25 16:22:41 +00002657static void push_SO_for_sem ( void* sem, SO* so ) {
2658 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002659 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002660 tl_assert(so);
2661 map_sem_to_SO_stack_INIT();
2662 if (VG_(lookupFM)( map_sem_to_SO_stack,
2663 &keyW, (UWord*)&xa, (UWord)sem )) {
2664 tl_assert(keyW == (UWord)sem);
sewardjb4112022007-11-09 22:49:28 +00002665 tl_assert(xa);
sewardjf98e1c02008-10-25 16:22:41 +00002666 VG_(addToXA)( xa, &so );
sewardjb4112022007-11-09 22:49:28 +00002667 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002668 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2669 VG_(addToXA)( xa, &so );
florian6bf37262012-10-21 03:23:36 +00002670 VG_(addToFM)( map_sem_to_SO_stack, (UWord)sem, (UWord)xa );
sewardjb4112022007-11-09 22:49:28 +00002671 }
2672}
2673
sewardjf98e1c02008-10-25 16:22:41 +00002674static SO* mb_pop_SO_for_sem ( void* sem ) {
2675 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002676 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002677 SO* so;
2678 map_sem_to_SO_stack_INIT();
2679 if (VG_(lookupFM)( map_sem_to_SO_stack,
2680 &keyW, (UWord*)&xa, (UWord)sem )) {
sewardjb4112022007-11-09 22:49:28 +00002681 /* xa is the stack for this semaphore. */
sewardjf98e1c02008-10-25 16:22:41 +00002682 Word sz;
2683 tl_assert(keyW == (UWord)sem);
2684 sz = VG_(sizeXA)( xa );
sewardjb4112022007-11-09 22:49:28 +00002685 tl_assert(sz >= 0);
2686 if (sz == 0)
2687 return NULL; /* odd, the stack is empty */
sewardjf98e1c02008-10-25 16:22:41 +00002688 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2689 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002690 VG_(dropTailXA)( xa, 1 );
sewardjf98e1c02008-10-25 16:22:41 +00002691 return so;
sewardjb4112022007-11-09 22:49:28 +00002692 } else {
2693 /* hmm, that's odd. No stack for this semaphore. */
2694 return NULL;
2695 }
2696}
2697
sewardj11e352f2007-11-30 11:11:02 +00002698static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002699{
sewardjf98e1c02008-10-25 16:22:41 +00002700 UWord keyW, valW;
2701 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002702
sewardjb4112022007-11-09 22:49:28 +00002703 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002704 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002705 (Int)tid, (void*)sem );
2706
sewardjf98e1c02008-10-25 16:22:41 +00002707 map_sem_to_SO_stack_INIT();
sewardjb4112022007-11-09 22:49:28 +00002708
sewardjf98e1c02008-10-25 16:22:41 +00002709 /* Empty out the semaphore's SO stack. This way of doing it is
2710 stupid, but at least it's easy. */
2711 while (1) {
2712 so = mb_pop_SO_for_sem( sem );
2713 if (!so) break;
2714 libhb_so_dealloc(so);
2715 }
2716
2717 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2718 XArray* xa = (XArray*)valW;
2719 tl_assert(keyW == (UWord)sem);
2720 tl_assert(xa);
2721 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2722 VG_(deleteXA)(xa);
2723 }
sewardjb4112022007-11-09 22:49:28 +00002724}
2725
sewardj11e352f2007-11-30 11:11:02 +00002726static
2727void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2728{
sewardjf98e1c02008-10-25 16:22:41 +00002729 SO* so;
2730 Thread* thr;
sewardj11e352f2007-11-30 11:11:02 +00002731
2732 if (SHOW_EVENTS >= 1)
2733 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2734 (Int)tid, (void*)sem, value );
2735
sewardjf98e1c02008-10-25 16:22:41 +00002736 thr = map_threads_maybe_lookup( tid );
2737 tl_assert(thr); /* cannot fail - Thread* must already exist */
sewardj11e352f2007-11-30 11:11:02 +00002738
sewardjf98e1c02008-10-25 16:22:41 +00002739 /* Empty out the semaphore's SO stack. This way of doing it is
2740 stupid, but at least it's easy. */
2741 while (1) {
2742 so = mb_pop_SO_for_sem( sem );
2743 if (!so) break;
2744 libhb_so_dealloc(so);
2745 }
sewardj11e352f2007-11-30 11:11:02 +00002746
sewardjf98e1c02008-10-25 16:22:41 +00002747 /* If we don't do this check, the following while loop runs us out
2748 of memory for stupid initial values of 'value'. */
2749 if (value > 10000) {
2750 HG_(record_error_Misc)(
2751 thr, "sem_init: initial value exceeds 10000; using 10000" );
2752 value = 10000;
2753 }
sewardj11e352f2007-11-30 11:11:02 +00002754
sewardjf98e1c02008-10-25 16:22:41 +00002755 /* Now create 'valid' new SOs for the thread, do a strong send to
2756 each of them, and push them all on the stack. */
2757 for (; value > 0; value--) {
2758 Thr* hbthr = thr->hbthr;
2759 tl_assert(hbthr);
sewardj11e352f2007-11-30 11:11:02 +00002760
sewardjf98e1c02008-10-25 16:22:41 +00002761 so = libhb_so_alloc();
2762 libhb_so_send( hbthr, so, True/*strong send*/ );
2763 push_SO_for_sem( sem, so );
sewardj11e352f2007-11-30 11:11:02 +00002764 }
2765}
2766
2767static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002768{
sewardjf98e1c02008-10-25 16:22:41 +00002769 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2770 it (iow, write our VC into it, then tick ours), and push the SO
2771 on on a stack of SOs associated with 'sem'. This is later used
2772 by other thread(s) which successfully exit from a sem_wait on
2773 the same sem; by doing a strong recv from SOs popped of the
2774 stack, they acquire dependencies on the posting thread
2775 segment(s). */
sewardjb4112022007-11-09 22:49:28 +00002776
sewardjf98e1c02008-10-25 16:22:41 +00002777 Thread* thr;
2778 SO* so;
2779 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002780
2781 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002782 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002783 (Int)tid, (void*)sem );
2784
2785 thr = map_threads_maybe_lookup( tid );
2786 tl_assert(thr); /* cannot fail - Thread* must already exist */
2787
2788 // error-if: sem is bogus
2789
sewardjf98e1c02008-10-25 16:22:41 +00002790 hbthr = thr->hbthr;
2791 tl_assert(hbthr);
sewardjb4112022007-11-09 22:49:28 +00002792
sewardjf98e1c02008-10-25 16:22:41 +00002793 so = libhb_so_alloc();
2794 libhb_so_send( hbthr, so, True/*strong send*/ );
2795 push_SO_for_sem( sem, so );
sewardjb4112022007-11-09 22:49:28 +00002796}
2797
sewardj11e352f2007-11-30 11:11:02 +00002798static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002799{
sewardjf98e1c02008-10-25 16:22:41 +00002800 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2801 the 'sem' from this semaphore's SO-stack, and do a strong recv
2802 from it. This creates a dependency back to one of the post-ers
2803 for the semaphore. */
sewardjb4112022007-11-09 22:49:28 +00002804
sewardjf98e1c02008-10-25 16:22:41 +00002805 Thread* thr;
2806 SO* so;
2807 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002808
2809 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002810 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002811 (Int)tid, (void*)sem );
2812
2813 thr = map_threads_maybe_lookup( tid );
2814 tl_assert(thr); /* cannot fail - Thread* must already exist */
2815
2816 // error-if: sem is bogus
2817
sewardjf98e1c02008-10-25 16:22:41 +00002818 so = mb_pop_SO_for_sem( sem );
sewardjb4112022007-11-09 22:49:28 +00002819
sewardjf98e1c02008-10-25 16:22:41 +00002820 if (so) {
2821 hbthr = thr->hbthr;
2822 tl_assert(hbthr);
2823
2824 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2825 libhb_so_dealloc(so);
2826 } else {
2827 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2828 If this happened it would surely be a bug in the threads
2829 library. */
2830 HG_(record_error_Misc)(
2831 thr, "Bug in libpthread: sem_wait succeeded on"
2832 " semaphore without prior sem_post");
sewardjb4112022007-11-09 22:49:28 +00002833 }
2834}
2835
2836
sewardj9f569b72008-11-13 13:33:09 +00002837/* -------------------------------------------------------- */
2838/* -------------- events to do with barriers -------------- */
2839/* -------------------------------------------------------- */
2840
2841typedef
2842 struct {
2843 Bool initted; /* has it yet been initted by guest? */
sewardj406bac82010-03-03 23:03:40 +00002844 Bool resizable; /* is resizing allowed? */
sewardj9f569b72008-11-13 13:33:09 +00002845 UWord size; /* declared size */
2846 XArray* waiting; /* XA of Thread*. # present is 0 .. .size */
2847 }
2848 Bar;
2849
2850static Bar* new_Bar ( void ) {
2851 Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
sewardj9f569b72008-11-13 13:33:09 +00002852 /* all fields are zero */
2853 tl_assert(bar->initted == False);
2854 return bar;
2855}
2856
2857static void delete_Bar ( Bar* bar ) {
2858 tl_assert(bar);
2859 if (bar->waiting)
2860 VG_(deleteXA)(bar->waiting);
2861 HG_(free)(bar);
2862}
2863
2864/* A mapping which stores auxiliary data for barriers. */
2865
2866/* pthread_barrier_t* -> Bar* */
2867static WordFM* map_barrier_to_Bar = NULL;
2868
2869static void map_barrier_to_Bar_INIT ( void ) {
2870 if (UNLIKELY(map_barrier_to_Bar == NULL)) {
2871 map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
2872 "hg.mbtBI.1", HG_(free), NULL );
sewardj9f569b72008-11-13 13:33:09 +00002873 }
2874}
2875
2876static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
2877 UWord key, val;
2878 map_barrier_to_Bar_INIT();
2879 if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
2880 tl_assert(key == (UWord)barrier);
2881 return (Bar*)val;
2882 } else {
2883 Bar* bar = new_Bar();
2884 VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
2885 return bar;
2886 }
2887}
2888
2889static void map_barrier_to_Bar_delete ( void* barrier ) {
2890 UWord keyW, valW;
2891 map_barrier_to_Bar_INIT();
2892 if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
2893 Bar* bar = (Bar*)valW;
2894 tl_assert(keyW == (UWord)barrier);
2895 delete_Bar(bar);
2896 }
2897}
2898
2899
2900static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
2901 void* barrier,
sewardj406bac82010-03-03 23:03:40 +00002902 UWord count,
2903 UWord resizable )
sewardj9f569b72008-11-13 13:33:09 +00002904{
2905 Thread* thr;
2906 Bar* bar;
2907
2908 if (SHOW_EVENTS >= 1)
2909 VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
sewardj406bac82010-03-03 23:03:40 +00002910 "(tid=%d, barrier=%p, count=%lu, resizable=%lu)\n",
2911 (Int)tid, (void*)barrier, count, resizable );
sewardj9f569b72008-11-13 13:33:09 +00002912
2913 thr = map_threads_maybe_lookup( tid );
2914 tl_assert(thr); /* cannot fail - Thread* must already exist */
2915
2916 if (count == 0) {
2917 HG_(record_error_Misc)(
2918 thr, "pthread_barrier_init: 'count' argument is zero"
2919 );
2920 }
2921
sewardj406bac82010-03-03 23:03:40 +00002922 if (resizable != 0 && resizable != 1) {
2923 HG_(record_error_Misc)(
2924 thr, "pthread_barrier_init: invalid 'resizable' argument"
2925 );
2926 }
2927
sewardj9f569b72008-11-13 13:33:09 +00002928 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2929 tl_assert(bar);
2930
2931 if (bar->initted) {
2932 HG_(record_error_Misc)(
2933 thr, "pthread_barrier_init: barrier is already initialised"
2934 );
2935 }
2936
2937 if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2938 tl_assert(bar->initted);
2939 HG_(record_error_Misc)(
sewardj553655c2008-11-14 19:41:19 +00002940 thr, "pthread_barrier_init: threads are waiting at barrier"
sewardj9f569b72008-11-13 13:33:09 +00002941 );
2942 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2943 }
2944 if (!bar->waiting) {
2945 bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
2946 sizeof(Thread*) );
2947 }
2948
sewardj9f569b72008-11-13 13:33:09 +00002949 tl_assert(VG_(sizeXA)(bar->waiting) == 0);
sewardj406bac82010-03-03 23:03:40 +00002950 bar->initted = True;
2951 bar->resizable = resizable == 1 ? True : False;
2952 bar->size = count;
sewardj9f569b72008-11-13 13:33:09 +00002953}
2954
2955
2956static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
2957 void* barrier )
2958{
sewardj553655c2008-11-14 19:41:19 +00002959 Thread* thr;
2960 Bar* bar;
2961
sewardj9f569b72008-11-13 13:33:09 +00002962 /* Deal with destroy events. The only purpose is to free storage
2963 associated with the barrier, so as to avoid any possible
2964 resource leaks. */
2965 if (SHOW_EVENTS >= 1)
2966 VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
2967 "(tid=%d, barrier=%p)\n",
2968 (Int)tid, (void*)barrier );
2969
sewardj553655c2008-11-14 19:41:19 +00002970 thr = map_threads_maybe_lookup( tid );
2971 tl_assert(thr); /* cannot fail - Thread* must already exist */
2972
2973 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2974 tl_assert(bar);
2975
2976 if (!bar->initted) {
2977 HG_(record_error_Misc)(
2978 thr, "pthread_barrier_destroy: barrier was never initialised"
2979 );
2980 }
2981
2982 if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2983 HG_(record_error_Misc)(
2984 thr, "pthread_barrier_destroy: threads are waiting at barrier"
2985 );
2986 }
2987
sewardj9f569b72008-11-13 13:33:09 +00002988 /* Maybe we shouldn't do this; just let it persist, so that when it
2989 is reinitialised we don't need to do any dynamic memory
2990 allocation? The downside is a potentially unlimited space leak,
2991 if the client creates (in turn) a large number of barriers all
2992 at different locations. Note that if we do later move to the
2993 don't-delete-it scheme, we need to mark the barrier as
2994 uninitialised again since otherwise a later _init call will
sewardj553655c2008-11-14 19:41:19 +00002995 elicit a duplicate-init error. */
sewardj9f569b72008-11-13 13:33:09 +00002996 map_barrier_to_Bar_delete( barrier );
2997}
2998
2999
sewardj406bac82010-03-03 23:03:40 +00003000/* All the threads have arrived. Now do the Interesting Bit. Get a
3001 new synchronisation object and do a weak send to it from all the
3002 participating threads. This makes its vector clocks be the join of
3003 all the individual threads' vector clocks. Then do a strong
3004 receive from it back to all threads, so that their VCs are a copy
3005 of it (hence are all equal to the join of their original VCs.) */
3006static void do_barrier_cross_sync_and_empty ( Bar* bar )
3007{
3008 /* XXX check bar->waiting has no duplicates */
3009 UWord i;
3010 SO* so = libhb_so_alloc();
3011
3012 tl_assert(bar->waiting);
3013 tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
3014
3015 /* compute the join ... */
3016 for (i = 0; i < bar->size; i++) {
3017 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
3018 Thr* hbthr = t->hbthr;
3019 libhb_so_send( hbthr, so, False/*weak send*/ );
3020 }
3021 /* ... and distribute to all threads */
3022 for (i = 0; i < bar->size; i++) {
3023 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
3024 Thr* hbthr = t->hbthr;
3025 libhb_so_recv( hbthr, so, True/*strong recv*/ );
3026 }
3027
3028 /* finally, we must empty out the waiting vector */
3029 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
3030
3031 /* and we don't need this any more. Perhaps a stack-allocated
3032 SO would be better? */
3033 libhb_so_dealloc(so);
3034}
3035
3036
sewardj9f569b72008-11-13 13:33:09 +00003037static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
3038 void* barrier )
3039{
sewardj1c466b72008-11-19 11:52:14 +00003040 /* This function gets called after a client thread calls
3041 pthread_barrier_wait but before it arrives at the real
3042 pthread_barrier_wait.
3043
3044 Why is the following correct? It's a bit subtle.
3045
3046 If this is not the last thread arriving at the barrier, we simply
3047 note its presence and return. Because valgrind (at least as of
3048 Nov 08) is single threaded, we are guaranteed safe from any race
3049 conditions when in this function -- no other client threads are
3050 running.
3051
3052 If this is the last thread, then we are again the only running
3053 thread. All the other threads will have either arrived at the
3054 real pthread_barrier_wait or are on their way to it, but in any
3055 case are guaranteed not to be able to move past it, because this
3056 thread is currently in this function and so has not yet arrived
3057 at the real pthread_barrier_wait. That means that:
3058
3059 1. While we are in this function, none of the other threads
3060 waiting at the barrier can move past it.
3061
3062 2. When this function returns (and simulated execution resumes),
3063 this thread and all other waiting threads will be able to move
3064 past the real barrier.
3065
3066 Because of this, it is now safe to update the vector clocks of
3067 all threads, to represent the fact that they all arrived at the
3068 barrier and have all moved on. There is no danger of any
3069 complications to do with some threads leaving the barrier and
3070 racing back round to the front, whilst others are still leaving
3071 (which is the primary source of complication in correct handling/
3072 implementation of barriers). That can't happen because we update
3073 here our data structures so as to indicate that the threads have
3074 passed the barrier, even though, as per (2) above, they are
3075 guaranteed not to pass the barrier until we return.
3076
3077 This relies crucially on Valgrind being single threaded. If that
3078 changes, this will need to be reconsidered.
3079 */
sewardj9f569b72008-11-13 13:33:09 +00003080 Thread* thr;
3081 Bar* bar;
sewardj406bac82010-03-03 23:03:40 +00003082 UWord present;
sewardj9f569b72008-11-13 13:33:09 +00003083
3084 if (SHOW_EVENTS >= 1)
3085 VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
3086 "(tid=%d, barrier=%p)\n",
3087 (Int)tid, (void*)barrier );
3088
3089 thr = map_threads_maybe_lookup( tid );
3090 tl_assert(thr); /* cannot fail - Thread* must already exist */
3091
3092 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3093 tl_assert(bar);
3094
3095 if (!bar->initted) {
3096 HG_(record_error_Misc)(
3097 thr, "pthread_barrier_wait: barrier is uninitialised"
3098 );
3099 return; /* client is broken .. avoid assertions below */
3100 }
3101
3102 /* guaranteed by _INIT_PRE above */
3103 tl_assert(bar->size > 0);
3104 tl_assert(bar->waiting);
3105
3106 VG_(addToXA)( bar->waiting, &thr );
3107
3108 /* guaranteed by this function */
3109 present = VG_(sizeXA)(bar->waiting);
3110 tl_assert(present > 0 && present <= bar->size);
3111
3112 if (present < bar->size)
3113 return;
3114
sewardj406bac82010-03-03 23:03:40 +00003115 do_barrier_cross_sync_and_empty(bar);
3116}
sewardj9f569b72008-11-13 13:33:09 +00003117
sewardj9f569b72008-11-13 13:33:09 +00003118
sewardj406bac82010-03-03 23:03:40 +00003119static void evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( ThreadId tid,
3120 void* barrier,
3121 UWord newcount )
3122{
3123 Thread* thr;
3124 Bar* bar;
3125 UWord present;
3126
3127 if (SHOW_EVENTS >= 1)
3128 VG_(printf)("evh__HG_PTHREAD_BARRIER_RESIZE_PRE"
3129 "(tid=%d, barrier=%p, newcount=%lu)\n",
3130 (Int)tid, (void*)barrier, newcount );
3131
3132 thr = map_threads_maybe_lookup( tid );
3133 tl_assert(thr); /* cannot fail - Thread* must already exist */
3134
3135 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3136 tl_assert(bar);
3137
3138 if (!bar->initted) {
3139 HG_(record_error_Misc)(
3140 thr, "pthread_barrier_resize: barrier is uninitialised"
3141 );
3142 return; /* client is broken .. avoid assertions below */
3143 }
3144
3145 if (!bar->resizable) {
3146 HG_(record_error_Misc)(
3147 thr, "pthread_barrier_resize: barrier is may not be resized"
3148 );
3149 return; /* client is broken .. avoid assertions below */
3150 }
3151
3152 if (newcount == 0) {
3153 HG_(record_error_Misc)(
3154 thr, "pthread_barrier_resize: 'newcount' argument is zero"
3155 );
3156 return; /* client is broken .. avoid assertions below */
3157 }
3158
3159 /* guaranteed by _INIT_PRE above */
3160 tl_assert(bar->size > 0);
sewardj9f569b72008-11-13 13:33:09 +00003161 tl_assert(bar->waiting);
sewardj406bac82010-03-03 23:03:40 +00003162 /* Guaranteed by this fn */
3163 tl_assert(newcount > 0);
sewardj9f569b72008-11-13 13:33:09 +00003164
sewardj406bac82010-03-03 23:03:40 +00003165 if (newcount >= bar->size) {
3166 /* Increasing the capacity. There's no possibility of threads
3167 moving on from the barrier in this situation, so just note
3168 the fact and do nothing more. */
3169 bar->size = newcount;
3170 } else {
3171 /* Decreasing the capacity. If we decrease it to be equal or
3172 below the number of waiting threads, they will now move past
3173 the barrier, so need to mess with dep edges in the same way
3174 as if the barrier had filled up normally. */
3175 present = VG_(sizeXA)(bar->waiting);
3176 tl_assert(present >= 0 && present <= bar->size);
3177 if (newcount <= present) {
3178 bar->size = present; /* keep the cross_sync call happy */
3179 do_barrier_cross_sync_and_empty(bar);
3180 }
3181 bar->size = newcount;
sewardj9f569b72008-11-13 13:33:09 +00003182 }
sewardj9f569b72008-11-13 13:33:09 +00003183}
3184
3185
sewardjed2e72e2009-08-14 11:08:24 +00003186/* ----------------------------------------------------- */
3187/* ----- events to do with user-specified HB edges ----- */
3188/* ----------------------------------------------------- */
3189
3190/* A mapping from arbitrary UWord tag to the SO associated with it.
3191 The UWord tags are meaningless to us, interpreted only by the
3192 user. */
3193
3194
3195
3196/* UWord -> SO* */
3197static WordFM* map_usertag_to_SO = NULL;
3198
3199static void map_usertag_to_SO_INIT ( void ) {
3200 if (UNLIKELY(map_usertag_to_SO == NULL)) {
3201 map_usertag_to_SO = VG_(newFM)( HG_(zalloc),
3202 "hg.mutS.1", HG_(free), NULL );
sewardjed2e72e2009-08-14 11:08:24 +00003203 }
3204}
3205
3206static SO* map_usertag_to_SO_lookup_or_alloc ( UWord usertag ) {
3207 UWord key, val;
3208 map_usertag_to_SO_INIT();
3209 if (VG_(lookupFM)( map_usertag_to_SO, &key, &val, usertag )) {
3210 tl_assert(key == (UWord)usertag);
3211 return (SO*)val;
3212 } else {
3213 SO* so = libhb_so_alloc();
3214 VG_(addToFM)( map_usertag_to_SO, usertag, (UWord)so );
3215 return so;
3216 }
3217}
3218
sewardj6015d0e2011-03-11 19:10:48 +00003219static void map_usertag_to_SO_delete ( UWord usertag ) {
3220 UWord keyW, valW;
3221 map_usertag_to_SO_INIT();
3222 if (VG_(delFromFM)( map_usertag_to_SO, &keyW, &valW, usertag )) {
3223 SO* so = (SO*)valW;
3224 tl_assert(keyW == usertag);
3225 tl_assert(so);
3226 libhb_so_dealloc(so);
3227 }
3228}
sewardjed2e72e2009-08-14 11:08:24 +00003229
3230
3231static
3232void evh__HG_USERSO_SEND_PRE ( ThreadId tid, UWord usertag )
3233{
3234 /* TID is just about to notionally sent a message on a notional
3235 abstract synchronisation object whose identity is given by
3236 USERTAG. Bind USERTAG to a real SO if it is not already so
sewardj8c50d3c2011-03-11 18:38:12 +00003237 bound, and do a 'weak send' on the SO. This joins the vector
3238 clocks from this thread into any vector clocks already present
3239 in the SO. The resulting SO vector clocks are later used by
sewardjed2e72e2009-08-14 11:08:24 +00003240 other thread(s) which successfully 'receive' from the SO,
sewardj8c50d3c2011-03-11 18:38:12 +00003241 thereby acquiring a dependency on all the events that have
3242 previously signalled on this SO. */
sewardjed2e72e2009-08-14 11:08:24 +00003243 Thread* thr;
3244 SO* so;
3245
3246 if (SHOW_EVENTS >= 1)
3247 VG_(printf)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3248 (Int)tid, usertag );
3249
3250 thr = map_threads_maybe_lookup( tid );
3251 tl_assert(thr); /* cannot fail - Thread* must already exist */
3252
3253 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3254 tl_assert(so);
3255
sewardj8c50d3c2011-03-11 18:38:12 +00003256 libhb_so_send( thr->hbthr, so, False/*!strong_send*/ );
sewardjed2e72e2009-08-14 11:08:24 +00003257}
3258
3259static
3260void evh__HG_USERSO_RECV_POST ( ThreadId tid, UWord usertag )
3261{
3262 /* TID has just notionally received a message from a notional
3263 abstract synchronisation object whose identity is given by
3264 USERTAG. Bind USERTAG to a real SO if it is not already so
3265 bound. If the SO has at some point in the past been 'sent' on,
3266 to a 'strong receive' on it, thereby acquiring a dependency on
3267 the sender. */
3268 Thread* thr;
3269 SO* so;
3270
3271 if (SHOW_EVENTS >= 1)
3272 VG_(printf)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3273 (Int)tid, usertag );
3274
3275 thr = map_threads_maybe_lookup( tid );
3276 tl_assert(thr); /* cannot fail - Thread* must already exist */
3277
3278 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3279 tl_assert(so);
3280
3281 /* Acquire a dependency on it. If the SO has never so far been
3282 sent on, then libhb_so_recv will do nothing. So we're safe
3283 regardless of SO's history. */
3284 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3285}
3286
sewardj6015d0e2011-03-11 19:10:48 +00003287static
3288void evh__HG_USERSO_FORGET_ALL ( ThreadId tid, UWord usertag )
3289{
3290 /* TID declares that any happens-before edges notionally stored in
3291 USERTAG can be deleted. If (as would normally be the case) a
3292 SO is associated with USERTAG, then the assocation is removed
3293 and all resources associated with SO are freed. Importantly,
3294 that frees up any VTSs stored in SO. */
3295 if (SHOW_EVENTS >= 1)
3296 VG_(printf)("evh__HG_USERSO_FORGET_ALL(ctid=%d, usertag=%#lx)\n",
3297 (Int)tid, usertag );
3298
3299 map_usertag_to_SO_delete( usertag );
3300}
3301
sewardjed2e72e2009-08-14 11:08:24 +00003302
sewardjb4112022007-11-09 22:49:28 +00003303/*--------------------------------------------------------------*/
3304/*--- Lock acquisition order monitoring ---*/
3305/*--------------------------------------------------------------*/
3306
3307/* FIXME: here are some optimisations still to do in
3308 laog__pre_thread_acquires_lock.
3309
3310 The graph is structured so that if L1 --*--> L2 then L1 must be
3311 acquired before L2.
3312
3313 The common case is that some thread T holds (eg) L1 L2 and L3 and
3314 is repeatedly acquiring and releasing Ln, and there is no ordering
3315 error in what it is doing. Hence it repeatly:
3316
3317 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3318 produces the answer No (because there is no error).
3319
3320 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3321 (because they already got added the first time T acquired Ln).
3322
3323 Hence cache these two events:
3324
3325 (1) Cache result of the query from last time. Invalidate the cache
3326 any time any edges are added to or deleted from laog.
3327
3328 (2) Cache these add-edge requests and ignore them if said edges
3329 have already been added to laog. Invalidate the cache any time
3330 any edges are deleted from laog.
3331*/
3332
3333typedef
3334 struct {
3335 WordSetID inns; /* in univ_laog */
3336 WordSetID outs; /* in univ_laog */
3337 }
3338 LAOGLinks;
3339
3340/* lock order acquisition graph */
3341static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
3342
3343/* EXPOSITION ONLY: for each edge in 'laog', record the two places
3344 where that edge was created, so that we can show the user later if
3345 we need to. */
3346typedef
3347 struct {
3348 Addr src_ga; /* Lock guest addresses for */
3349 Addr dst_ga; /* src/dst of the edge */
3350 ExeContext* src_ec; /* And corresponding places where that */
3351 ExeContext* dst_ec; /* ordering was established */
3352 }
3353 LAOGLinkExposition;
3354
sewardj250ec2e2008-02-15 22:02:30 +00003355static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
sewardjb4112022007-11-09 22:49:28 +00003356 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3357 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
3358 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
3359 if (llx1->src_ga < llx2->src_ga) return -1;
3360 if (llx1->src_ga > llx2->src_ga) return 1;
3361 if (llx1->dst_ga < llx2->dst_ga) return -1;
3362 if (llx1->dst_ga > llx2->dst_ga) return 1;
3363 return 0;
3364}
3365
3366static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
3367/* end EXPOSITION ONLY */
3368
3369
sewardja65db102009-01-26 10:45:16 +00003370__attribute__((noinline))
3371static void laog__init ( void )
3372{
3373 tl_assert(!laog);
3374 tl_assert(!laog_exposition);
sewardjc1fb9d22011-02-28 09:03:44 +00003375 tl_assert(HG_(clo_track_lockorders));
sewardja65db102009-01-26 10:45:16 +00003376
3377 laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
3378 HG_(free), NULL/*unboxedcmp*/ );
3379
3380 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
3381 cmp_LAOGLinkExposition );
sewardja65db102009-01-26 10:45:16 +00003382}
3383
florian6bf37262012-10-21 03:23:36 +00003384static void laog__show ( const HChar* who ) {
3385 UWord i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003386 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003387 Lock* me;
3388 LAOGLinks* links;
3389 VG_(printf)("laog (requested by %s) {\n", who);
sewardj896f6f92008-08-19 08:38:52 +00003390 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003391 me = NULL;
3392 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003393 while (VG_(nextIterFM)( laog, (UWord*)&me,
3394 (UWord*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003395 tl_assert(me);
3396 tl_assert(links);
3397 VG_(printf)(" node %p:\n", me);
3398 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3399 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003400 VG_(printf)(" inn %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003401 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3402 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003403 VG_(printf)(" out %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003404 me = NULL;
3405 links = NULL;
3406 }
sewardj896f6f92008-08-19 08:38:52 +00003407 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003408 VG_(printf)("}\n");
3409}
3410
sewardj866c80c2011-10-22 19:29:51 +00003411static void univ_laog_do_GC ( void ) {
3412 Word i;
3413 LAOGLinks* links;
3414 Word seen = 0;
3415 Int prev_next_gc_univ_laog = next_gc_univ_laog;
3416 const UWord univ_laog_cardinality = HG_(cardinalityWSU)( univ_laog);
3417
3418 Bool *univ_laog_seen = HG_(zalloc) ( "hg.gc_univ_laog.1",
3419 (Int) univ_laog_cardinality
3420 * sizeof(Bool) );
3421 // univ_laog_seen[*] set to 0 (False) by zalloc.
3422
3423 if (VG_(clo_stats))
3424 VG_(message)(Vg_DebugMsg,
3425 "univ_laog_do_GC enter cardinality %'10d\n",
3426 (Int)univ_laog_cardinality);
3427
3428 VG_(initIterFM)( laog );
3429 links = NULL;
3430 while (VG_(nextIterFM)( laog, NULL, (UWord*)&links )) {
3431 tl_assert(links);
3432 tl_assert(links->inns >= 0 && links->inns < univ_laog_cardinality);
3433 univ_laog_seen[links->inns] = True;
3434 tl_assert(links->outs >= 0 && links->outs < univ_laog_cardinality);
3435 univ_laog_seen[links->outs] = True;
3436 links = NULL;
3437 }
3438 VG_(doneIterFM)( laog );
3439
3440 for (i = 0; i < (Int)univ_laog_cardinality; i++) {
3441 if (univ_laog_seen[i])
3442 seen++;
3443 else
3444 HG_(dieWS) ( univ_laog, (WordSet)i );
3445 }
3446
3447 HG_(free) (univ_laog_seen);
3448
3449 // We need to decide the value of the next_gc.
3450 // 3 solutions were looked at:
3451 // Sol 1: garbage collect at seen * 2
3452 // This solution was a lot slower, probably because we both do a lot of
3453 // garbage collection and do not keep long enough laog WV that will become
3454 // useful again very soon.
3455 // Sol 2: garbage collect at a percentage increase of the current cardinality
3456 // (with a min increase of 1)
3457 // Trials on a small test program with 1%, 5% and 10% increase was done.
3458 // 1% is slightly faster than 5%, which is slightly slower than 10%.
3459 // However, on a big application, this caused the memory to be exhausted,
3460 // as even a 1% increase of size at each gc becomes a lot, when many gc
3461 // are done.
3462 // Sol 3: always garbage collect at current cardinality + 1.
3463 // This solution was the fastest of the 3 solutions, and caused no memory
3464 // exhaustion in the big application.
3465 //
3466 // With regards to cost introduced by gc: on the t2t perf test (doing only
3467 // lock/unlock operations), t2t 50 10 2 was about 25% faster than the
3468 // version with garbage collection. With t2t 50 20 2, my machine started
3469 // to page out, and so the garbage collected version was much faster.
3470 // On smaller lock sets (e.g. t2t 20 5 2, giving about 100 locks), the
3471 // difference performance is insignificant (~ 0.1 s).
3472 // Of course, it might be that real life programs are not well represented
3473 // by t2t.
3474
3475 // If ever we want to have a more sophisticated control
3476 // (e.g. clo options to control the percentage increase or fixed increased),
3477 // we should do it here, eg.
3478 // next_gc_univ_laog = prev_next_gc_univ_laog + VG_(clo_laog_gc_fixed);
3479 // Currently, we just hard-code the solution 3 above.
3480 next_gc_univ_laog = prev_next_gc_univ_laog + 1;
3481
3482 if (VG_(clo_stats))
3483 VG_(message)
3484 (Vg_DebugMsg,
3485 "univ_laog_do_GC exit seen %'8d next gc at cardinality %'10d\n",
3486 (Int)seen, next_gc_univ_laog);
3487}
3488
3489
sewardjb4112022007-11-09 22:49:28 +00003490__attribute__((noinline))
3491static void laog__add_edge ( Lock* src, Lock* dst ) {
florian6bf37262012-10-21 03:23:36 +00003492 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003493 LAOGLinks* links;
3494 Bool presentF, presentR;
3495 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
3496
3497 /* Take the opportunity to sanity check the graph. Record in
3498 presentF if there is already a src->dst mapping in this node's
3499 forwards links, and presentR if there is already a src->dst
3500 mapping in this node's backwards links. They should agree!
3501 Also, we need to know whether the edge was already present so as
3502 to decide whether or not to update the link details mapping. We
3503 can compute presentF and presentR essentially for free, so may
3504 as well do this always. */
3505 presentF = presentR = False;
3506
3507 /* Update the out edges for src */
3508 keyW = 0;
3509 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003510 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)src )) {
sewardjb4112022007-11-09 22:49:28 +00003511 WordSetID outs_new;
3512 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003513 tl_assert(keyW == (UWord)src);
3514 outs_new = HG_(addToWS)( univ_laog, links->outs, (UWord)dst );
sewardjb4112022007-11-09 22:49:28 +00003515 presentF = outs_new == links->outs;
3516 links->outs = outs_new;
3517 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003518 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003519 links->inns = HG_(emptyWS)( univ_laog );
florian6bf37262012-10-21 03:23:36 +00003520 links->outs = HG_(singletonWS)( univ_laog, (UWord)dst );
3521 VG_(addToFM)( laog, (UWord)src, (UWord)links );
sewardjb4112022007-11-09 22:49:28 +00003522 }
3523 /* Update the in edges for dst */
3524 keyW = 0;
3525 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003526 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003527 WordSetID inns_new;
3528 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003529 tl_assert(keyW == (UWord)dst);
3530 inns_new = HG_(addToWS)( univ_laog, links->inns, (UWord)src );
sewardjb4112022007-11-09 22:49:28 +00003531 presentR = inns_new == links->inns;
3532 links->inns = inns_new;
3533 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003534 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
florian6bf37262012-10-21 03:23:36 +00003535 links->inns = HG_(singletonWS)( univ_laog, (UWord)src );
sewardjb4112022007-11-09 22:49:28 +00003536 links->outs = HG_(emptyWS)( univ_laog );
florian6bf37262012-10-21 03:23:36 +00003537 VG_(addToFM)( laog, (UWord)dst, (UWord)links );
sewardjb4112022007-11-09 22:49:28 +00003538 }
3539
3540 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
3541
3542 if (!presentF && src->acquired_at && dst->acquired_at) {
3543 LAOGLinkExposition expo;
3544 /* If this edge is entering the graph, and we have acquired_at
3545 information for both src and dst, record those acquisition
3546 points. Hence, if there is later a violation of this
3547 ordering, we can show the user the two places in which the
3548 required src-dst ordering was previously established. */
barta0b6b2c2008-07-07 06:49:24 +00003549 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
sewardjb4112022007-11-09 22:49:28 +00003550 src->guestaddr, dst->guestaddr);
3551 expo.src_ga = src->guestaddr;
3552 expo.dst_ga = dst->guestaddr;
3553 expo.src_ec = NULL;
3554 expo.dst_ec = NULL;
3555 tl_assert(laog_exposition);
florian6bf37262012-10-21 03:23:36 +00003556 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (UWord)&expo )) {
sewardjb4112022007-11-09 22:49:28 +00003557 /* we already have it; do nothing */
3558 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003559 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
3560 sizeof(LAOGLinkExposition));
sewardjb4112022007-11-09 22:49:28 +00003561 expo2->src_ga = src->guestaddr;
3562 expo2->dst_ga = dst->guestaddr;
3563 expo2->src_ec = src->acquired_at;
3564 expo2->dst_ec = dst->acquired_at;
florian6bf37262012-10-21 03:23:36 +00003565 VG_(addToFM)( laog_exposition, (UWord)expo2, (UWord)NULL );
sewardjb4112022007-11-09 22:49:28 +00003566 }
3567 }
sewardj866c80c2011-10-22 19:29:51 +00003568
3569 if (HG_(cardinalityWSU) (univ_laog) >= next_gc_univ_laog)
3570 univ_laog_do_GC();
sewardjb4112022007-11-09 22:49:28 +00003571}
3572
3573__attribute__((noinline))
3574static void laog__del_edge ( Lock* src, Lock* dst ) {
florian6bf37262012-10-21 03:23:36 +00003575 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003576 LAOGLinks* links;
sewardj866c80c2011-10-22 19:29:51 +00003577 if (0) VG_(printf)("laog__del_edge enter %p %p\n", src, dst);
sewardjb4112022007-11-09 22:49:28 +00003578 /* Update the out edges for src */
3579 keyW = 0;
3580 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003581 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)src )) {
sewardjb4112022007-11-09 22:49:28 +00003582 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003583 tl_assert(keyW == (UWord)src);
3584 links->outs = HG_(delFromWS)( univ_laog, links->outs, (UWord)dst );
sewardjb4112022007-11-09 22:49:28 +00003585 }
3586 /* Update the in edges for dst */
3587 keyW = 0;
3588 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003589 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003590 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003591 tl_assert(keyW == (UWord)dst);
3592 links->inns = HG_(delFromWS)( univ_laog, links->inns, (UWord)src );
sewardjb4112022007-11-09 22:49:28 +00003593 }
sewardj866c80c2011-10-22 19:29:51 +00003594
3595 /* Remove the exposition of src,dst (if present) */
3596 {
3597 LAOGLinkExposition *fm_expo;
3598
3599 LAOGLinkExposition expo;
3600 expo.src_ga = src->guestaddr;
3601 expo.dst_ga = dst->guestaddr;
3602 expo.src_ec = NULL;
3603 expo.dst_ec = NULL;
3604
3605 if (VG_(delFromFM) (laog_exposition,
3606 (UWord*)&fm_expo, NULL, (UWord)&expo )) {
3607 HG_(free) (fm_expo);
3608 }
3609 }
3610
3611 /* deleting edges can increase nr of of WS so check for gc. */
3612 if (HG_(cardinalityWSU) (univ_laog) >= next_gc_univ_laog)
3613 univ_laog_do_GC();
3614 if (0) VG_(printf)("laog__del_edge exit\n");
sewardjb4112022007-11-09 22:49:28 +00003615}
3616
3617__attribute__((noinline))
3618static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
florian6bf37262012-10-21 03:23:36 +00003619 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003620 LAOGLinks* links;
3621 keyW = 0;
3622 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003623 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003624 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003625 tl_assert(keyW == (UWord)lk);
sewardjb4112022007-11-09 22:49:28 +00003626 return links->outs;
3627 } else {
3628 return HG_(emptyWS)( univ_laog );
3629 }
3630}
3631
3632__attribute__((noinline))
3633static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
florian6bf37262012-10-21 03:23:36 +00003634 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003635 LAOGLinks* links;
3636 keyW = 0;
3637 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003638 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003639 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003640 tl_assert(keyW == (UWord)lk);
sewardjb4112022007-11-09 22:49:28 +00003641 return links->inns;
3642 } else {
3643 return HG_(emptyWS)( univ_laog );
3644 }
3645}
3646
3647__attribute__((noinline))
florian6bf37262012-10-21 03:23:36 +00003648static void laog__sanity_check ( const HChar* who ) {
3649 UWord i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003650 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003651 Lock* me;
3652 LAOGLinks* links;
sewardj896f6f92008-08-19 08:38:52 +00003653 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003654 me = NULL;
3655 links = NULL;
3656 if (0) VG_(printf)("laog sanity check\n");
florian6bf37262012-10-21 03:23:36 +00003657 while (VG_(nextIterFM)( laog, (UWord*)&me,
3658 (UWord*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003659 tl_assert(me);
3660 tl_assert(links);
3661 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3662 for (i = 0; i < ws_size; i++) {
3663 if ( ! HG_(elemWS)( univ_laog,
3664 laog__succs( (Lock*)ws_words[i] ),
florian6bf37262012-10-21 03:23:36 +00003665 (UWord)me ))
sewardjb4112022007-11-09 22:49:28 +00003666 goto bad;
3667 }
3668 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3669 for (i = 0; i < ws_size; i++) {
3670 if ( ! HG_(elemWS)( univ_laog,
3671 laog__preds( (Lock*)ws_words[i] ),
florian6bf37262012-10-21 03:23:36 +00003672 (UWord)me ))
sewardjb4112022007-11-09 22:49:28 +00003673 goto bad;
3674 }
3675 me = NULL;
3676 links = NULL;
3677 }
sewardj896f6f92008-08-19 08:38:52 +00003678 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003679 return;
3680
3681 bad:
3682 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3683 laog__show(who);
3684 tl_assert(0);
3685}
3686
3687/* If there is a path in laog from 'src' to any of the elements in
3688 'dst', return an arbitrarily chosen element of 'dst' reachable from
3689 'src'. If no path exist from 'src' to any element in 'dst', return
3690 NULL. */
3691__attribute__((noinline))
3692static
3693Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3694{
3695 Lock* ret;
florian6bf37262012-10-21 03:23:36 +00003696 Word ssz;
sewardjb4112022007-11-09 22:49:28 +00003697 XArray* stack; /* of Lock* */
3698 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
3699 Lock* here;
3700 WordSetID succs;
florian6bf37262012-10-21 03:23:36 +00003701 UWord succs_size, i;
sewardj250ec2e2008-02-15 22:02:30 +00003702 UWord* succs_words;
sewardjb4112022007-11-09 22:49:28 +00003703 //laog__sanity_check();
3704
3705 /* If the destination set is empty, we can never get there from
3706 'src' :-), so don't bother to try */
3707 if (HG_(isEmptyWS)( univ_lsets, dsts ))
3708 return NULL;
3709
3710 ret = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00003711 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3712 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00003713
3714 (void) VG_(addToXA)( stack, &src );
3715
3716 while (True) {
3717
3718 ssz = VG_(sizeXA)( stack );
3719
3720 if (ssz == 0) { ret = NULL; break; }
3721
3722 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3723 VG_(dropTailXA)( stack, 1 );
3724
florian6bf37262012-10-21 03:23:36 +00003725 if (HG_(elemWS)( univ_lsets, dsts, (UWord)here )) { ret = here; break; }
sewardjb4112022007-11-09 22:49:28 +00003726
florian6bf37262012-10-21 03:23:36 +00003727 if (VG_(lookupFM)( visited, NULL, NULL, (UWord)here ))
sewardjb4112022007-11-09 22:49:28 +00003728 continue;
3729
florian6bf37262012-10-21 03:23:36 +00003730 VG_(addToFM)( visited, (UWord)here, 0 );
sewardjb4112022007-11-09 22:49:28 +00003731
3732 succs = laog__succs( here );
3733 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3734 for (i = 0; i < succs_size; i++)
3735 (void) VG_(addToXA)( stack, &succs_words[i] );
3736 }
3737
sewardj896f6f92008-08-19 08:38:52 +00003738 VG_(deleteFM)( visited, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003739 VG_(deleteXA)( stack );
3740 return ret;
3741}
3742
3743
3744/* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3745 between 'lk' and the locks already held by 'thr' and issue a
3746 complaint if so. Also, update the ordering graph appropriately.
3747*/
3748__attribute__((noinline))
3749static void laog__pre_thread_acquires_lock (
3750 Thread* thr, /* NB: BEFORE lock is added */
3751 Lock* lk
3752 )
3753{
sewardj250ec2e2008-02-15 22:02:30 +00003754 UWord* ls_words;
florian6bf37262012-10-21 03:23:36 +00003755 UWord ls_size, i;
sewardjb4112022007-11-09 22:49:28 +00003756 Lock* other;
3757
3758 /* It may be that 'thr' already holds 'lk' and is recursively
3759 relocking in. In this case we just ignore the call. */
3760 /* NB: univ_lsets really is correct here */
florian6bf37262012-10-21 03:23:36 +00003761 if (HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lk ))
sewardjb4112022007-11-09 22:49:28 +00003762 return;
3763
sewardjb4112022007-11-09 22:49:28 +00003764 /* First, the check. Complain if there is any path in laog from lk
3765 to any of the locks already held by thr, since if any such path
3766 existed, it would mean that previously lk was acquired before
3767 (rather than after, as we are doing here) at least one of those
3768 locks.
3769 */
3770 other = laog__do_dfs_from_to(lk, thr->locksetA);
3771 if (other) {
3772 LAOGLinkExposition key, *found;
3773 /* So we managed to find a path lk --*--> other in the graph,
3774 which implies that 'lk' should have been acquired before
3775 'other' but is in fact being acquired afterwards. We present
3776 the lk/other arguments to record_error_LockOrder in the order
3777 in which they should have been acquired. */
3778 /* Go look in the laog_exposition mapping, to find the allocation
3779 points for this edge, so we can show the user. */
3780 key.src_ga = lk->guestaddr;
3781 key.dst_ga = other->guestaddr;
3782 key.src_ec = NULL;
3783 key.dst_ec = NULL;
3784 found = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003785 if (VG_(lookupFM)( laog_exposition,
florian6bf37262012-10-21 03:23:36 +00003786 (UWord*)&found, NULL, (UWord)&key )) {
sewardjb4112022007-11-09 22:49:28 +00003787 tl_assert(found != &key);
3788 tl_assert(found->src_ga == key.src_ga);
3789 tl_assert(found->dst_ga == key.dst_ga);
3790 tl_assert(found->src_ec);
3791 tl_assert(found->dst_ec);
sewardjf98e1c02008-10-25 16:22:41 +00003792 HG_(record_error_LockOrder)(
philippe46daf0d2014-07-29 20:08:15 +00003793 thr, lk, other,
sewardjffce8152011-06-24 10:09:41 +00003794 found->src_ec, found->dst_ec, other->acquired_at );
sewardjb4112022007-11-09 22:49:28 +00003795 } else {
3796 /* Hmm. This can't happen (can it?) */
philippeebe25802013-01-30 23:21:34 +00003797 /* Yes, it can happen: see tests/tc14_laog_dinphils.
3798 Imagine we have 3 philosophers A B C, and the forks
3799 between them:
3800
3801 C
3802
3803 fCA fBC
3804
3805 A fAB B
3806
3807 Let's have the following actions:
3808 A takes fCA,fAB
3809 A releases fCA,fAB
3810 B takes fAB,fBC
3811 B releases fAB,fBC
3812 C takes fBC,fCA
3813 C releases fBC,fCA
3814
3815 Helgrind will report a lock order error when C takes fCA.
3816 Effectively, we have a deadlock if the following
3817 sequence is done:
3818 A takes fCA
3819 B takes fAB
3820 C takes fBC
3821
3822 The error reported is:
3823 Observed (incorrect) order fBC followed by fCA
3824 but the stack traces that have established the required order
3825 are not given.
3826
3827 This is because there is no pair (fCA, fBC) in laog exposition :
3828 the laog_exposition records all pairs of locks between a new lock
3829 taken by a thread and all the already taken locks.
3830 So, there is no laog_exposition (fCA, fBC) as no thread ever
3831 first locked fCA followed by fBC.
3832
3833 In other words, when the deadlock cycle involves more than
3834 two locks, then helgrind does not report the sequence of
3835 operations that created the cycle.
3836
3837 However, we can report the current stack trace (where
3838 lk is being taken), and the stack trace where other was acquired:
3839 Effectively, the variable 'other' contains a lock currently
3840 held by this thread, with its 'acquired_at'. */
3841
sewardjf98e1c02008-10-25 16:22:41 +00003842 HG_(record_error_LockOrder)(
philippe46daf0d2014-07-29 20:08:15 +00003843 thr, lk, other,
philippeebe25802013-01-30 23:21:34 +00003844 NULL, NULL, other->acquired_at );
sewardjb4112022007-11-09 22:49:28 +00003845 }
3846 }
3847
3848 /* Second, add to laog the pairs
3849 (old, lk) | old <- locks already held by thr
3850 Since both old and lk are currently held by thr, their acquired_at
3851 fields must be non-NULL.
3852 */
3853 tl_assert(lk->acquired_at);
3854 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3855 for (i = 0; i < ls_size; i++) {
3856 Lock* old = (Lock*)ls_words[i];
3857 tl_assert(old->acquired_at);
3858 laog__add_edge( old, lk );
3859 }
3860
3861 /* Why "except_Locks" ? We're here because a lock is being
3862 acquired by a thread, and we're in an inconsistent state here.
3863 See the call points in evhH__post_thread_{r,w}_acquires_lock.
3864 When called in this inconsistent state, locks__sanity_check duly
3865 barfs. */
sewardjf98e1c02008-10-25 16:22:41 +00003866 if (HG_(clo_sanity_flags) & SCE_LAOG)
sewardjb4112022007-11-09 22:49:28 +00003867 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
3868}
3869
sewardj866c80c2011-10-22 19:29:51 +00003870/* Allocates a duplicate of words. Caller must HG_(free) the result. */
3871static UWord* UWordV_dup(UWord* words, Word words_size)
3872{
3873 UInt i;
3874
3875 if (words_size == 0)
3876 return NULL;
3877
3878 UWord *dup = HG_(zalloc) ("hg.dup.1", (SizeT) words_size * sizeof(UWord));
3879
3880 for (i = 0; i < words_size; i++)
3881 dup[i] = words[i];
3882
3883 return dup;
3884}
sewardjb4112022007-11-09 22:49:28 +00003885
3886/* Delete from 'laog' any pair mentioning a lock in locksToDelete */
3887
3888__attribute__((noinline))
3889static void laog__handle_one_lock_deletion ( Lock* lk )
3890{
3891 WordSetID preds, succs;
florian6bf37262012-10-21 03:23:36 +00003892 UWord preds_size, succs_size, i, j;
sewardj250ec2e2008-02-15 22:02:30 +00003893 UWord *preds_words, *succs_words;
sewardjb4112022007-11-09 22:49:28 +00003894
3895 preds = laog__preds( lk );
3896 succs = laog__succs( lk );
3897
sewardj866c80c2011-10-22 19:29:51 +00003898 // We need to duplicate the payload, as these can be garbage collected
3899 // during the del/add operations below.
sewardjb4112022007-11-09 22:49:28 +00003900 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
sewardj866c80c2011-10-22 19:29:51 +00003901 preds_words = UWordV_dup(preds_words, preds_size);
3902
3903 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3904 succs_words = UWordV_dup(succs_words, succs_size);
3905
sewardjb4112022007-11-09 22:49:28 +00003906 for (i = 0; i < preds_size; i++)
3907 laog__del_edge( (Lock*)preds_words[i], lk );
3908
sewardjb4112022007-11-09 22:49:28 +00003909 for (j = 0; j < succs_size; j++)
3910 laog__del_edge( lk, (Lock*)succs_words[j] );
3911
3912 for (i = 0; i < preds_size; i++) {
3913 for (j = 0; j < succs_size; j++) {
3914 if (preds_words[i] != succs_words[j]) {
3915 /* This can pass unlocked locks to laog__add_edge, since
3916 we're deleting stuff. So their acquired_at fields may
3917 be NULL. */
3918 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
3919 }
3920 }
3921 }
sewardj866c80c2011-10-22 19:29:51 +00003922
3923 if (preds_words)
3924 HG_(free) (preds_words);
3925 if (succs_words)
3926 HG_(free) (succs_words);
3927
3928 // Remove lk information from laog links FM
3929 {
3930 LAOGLinks *links;
3931 Lock* linked_lk;
3932
3933 if (VG_(delFromFM) (laog,
3934 (UWord*)&linked_lk, (UWord*)&links, (UWord)lk)) {
3935 tl_assert (linked_lk == lk);
3936 HG_(free) (links);
3937 }
3938 }
3939 /* FIXME ??? What about removing lock lk data from EXPOSITION ??? */
sewardjb4112022007-11-09 22:49:28 +00003940}
3941
sewardj1cbc12f2008-11-10 16:16:46 +00003942//__attribute__((noinline))
3943//static void laog__handle_lock_deletions (
3944// WordSetID /* in univ_laog */ locksToDelete
3945// )
3946//{
3947// Word i, ws_size;
3948// UWord* ws_words;
3949//
sewardj1cbc12f2008-11-10 16:16:46 +00003950//
3951// HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
sewardj866c80c2011-10-22 19:29:51 +00003952// UWordV_dup call needed here ...
sewardj1cbc12f2008-11-10 16:16:46 +00003953// for (i = 0; i < ws_size; i++)
3954// laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
3955//
3956// if (HG_(clo_sanity_flags) & SCE_LAOG)
3957// all__sanity_check("laog__handle_lock_deletions-post");
3958//}
sewardjb4112022007-11-09 22:49:28 +00003959
3960
3961/*--------------------------------------------------------------*/
3962/*--- Malloc/free replacements ---*/
3963/*--------------------------------------------------------------*/
3964
3965typedef
3966 struct {
3967 void* next; /* required by m_hashtable */
3968 Addr payload; /* ptr to actual block */
3969 SizeT szB; /* size requested */
3970 ExeContext* where; /* where it was allocated */
3971 Thread* thr; /* allocating thread */
3972 }
3973 MallocMeta;
3974
3975/* A hash table of MallocMetas, used to track malloc'd blocks
3976 (obviously). */
3977static VgHashTable hg_mallocmeta_table = NULL;
3978
philippe5fbc9762013-12-01 19:28:48 +00003979/* MallocMeta are small elements. We use a pool to avoid
3980 the overhead of malloc for each MallocMeta. */
3981static PoolAlloc *MallocMeta_poolalloc = NULL;
sewardjb4112022007-11-09 22:49:28 +00003982
3983static MallocMeta* new_MallocMeta ( void ) {
philippe5fbc9762013-12-01 19:28:48 +00003984 MallocMeta* md = VG_(allocEltPA) (MallocMeta_poolalloc);
3985 VG_(memset)(md, 0, sizeof(MallocMeta));
sewardjb4112022007-11-09 22:49:28 +00003986 return md;
3987}
3988static void delete_MallocMeta ( MallocMeta* md ) {
philippe5fbc9762013-12-01 19:28:48 +00003989 VG_(freeEltPA)(MallocMeta_poolalloc, md);
sewardjb4112022007-11-09 22:49:28 +00003990}
3991
3992
3993/* Allocate a client block and set up the metadata for it. */
3994
3995static
3996void* handle_alloc ( ThreadId tid,
3997 SizeT szB, SizeT alignB, Bool is_zeroed )
3998{
3999 Addr p;
4000 MallocMeta* md;
4001
4002 tl_assert( ((SSizeT)szB) >= 0 );
4003 p = (Addr)VG_(cli_malloc)(alignB, szB);
4004 if (!p) {
4005 return NULL;
4006 }
4007 if (is_zeroed)
4008 VG_(memset)((void*)p, 0, szB);
4009
4010 /* Note that map_threads_lookup must succeed (cannot assert), since
4011 memory can only be allocated by currently alive threads, hence
4012 they must have an entry in map_threads. */
4013 md = new_MallocMeta();
4014 md->payload = p;
4015 md->szB = szB;
4016 md->where = VG_(record_ExeContext)( tid, 0 );
4017 md->thr = map_threads_lookup( tid );
4018
4019 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
4020
4021 /* Tell the lower level memory wranglers. */
4022 evh__new_mem_heap( p, szB, is_zeroed );
4023
4024 return (void*)p;
4025}
4026
4027/* Re the checks for less-than-zero (also in hg_cli__realloc below):
4028 Cast to a signed type to catch any unexpectedly negative args.
4029 We're assuming here that the size asked for is not greater than
4030 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
4031 platforms). */
4032static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
4033 if (((SSizeT)n) < 0) return NULL;
4034 return handle_alloc ( tid, n, VG_(clo_alignment),
4035 /*is_zeroed*/False );
4036}
4037static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
4038 if (((SSizeT)n) < 0) return NULL;
4039 return handle_alloc ( tid, n, VG_(clo_alignment),
4040 /*is_zeroed*/False );
4041}
4042static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
4043 if (((SSizeT)n) < 0) return NULL;
4044 return handle_alloc ( tid, n, VG_(clo_alignment),
4045 /*is_zeroed*/False );
4046}
4047static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
4048 if (((SSizeT)n) < 0) return NULL;
4049 return handle_alloc ( tid, n, align,
4050 /*is_zeroed*/False );
4051}
4052static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
4053 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
4054 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
4055 /*is_zeroed*/True );
4056}
4057
4058
4059/* Free a client block, including getting rid of the relevant
4060 metadata. */
4061
4062static void handle_free ( ThreadId tid, void* p )
4063{
4064 MallocMeta *md, *old_md;
4065 SizeT szB;
4066
4067 /* First see if we can find the metadata for 'p'. */
4068 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
4069 if (!md)
4070 return; /* apparently freeing a bogus address. Oh well. */
4071
4072 tl_assert(md->payload == (Addr)p);
4073 szB = md->szB;
4074
4075 /* Nuke the metadata block */
4076 old_md = (MallocMeta*)
4077 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
4078 tl_assert(old_md); /* it must be present - we just found it */
4079 tl_assert(old_md == md);
4080 tl_assert(old_md->payload == (Addr)p);
4081
4082 VG_(cli_free)((void*)old_md->payload);
4083 delete_MallocMeta(old_md);
4084
4085 /* Tell the lower level memory wranglers. */
4086 evh__die_mem_heap( (Addr)p, szB );
4087}
4088
4089static void hg_cli__free ( ThreadId tid, void* p ) {
4090 handle_free(tid, p);
4091}
4092static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
4093 handle_free(tid, p);
4094}
4095static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
4096 handle_free(tid, p);
4097}
4098
4099
4100static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
4101{
4102 MallocMeta *md, *md_new, *md_tmp;
4103 SizeT i;
4104
4105 Addr payload = (Addr)payloadV;
4106
4107 if (((SSizeT)new_size) < 0) return NULL;
4108
4109 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
4110 if (!md)
4111 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
4112
4113 tl_assert(md->payload == payload);
4114
4115 if (md->szB == new_size) {
4116 /* size unchanged */
4117 md->where = VG_(record_ExeContext)(tid, 0);
4118 return payloadV;
4119 }
4120
4121 if (md->szB > new_size) {
4122 /* new size is smaller */
4123 md->szB = new_size;
4124 md->where = VG_(record_ExeContext)(tid, 0);
4125 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
4126 return payloadV;
4127 }
4128
4129 /* else */ {
4130 /* new size is bigger */
4131 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
4132
4133 /* First half kept and copied, second half new */
4134 // FIXME: shouldn't we use a copier which implements the
4135 // memory state machine?
sewardj23f12002009-07-24 08:45:08 +00004136 evh__copy_mem( payload, p_new, md->szB );
sewardjb4112022007-11-09 22:49:28 +00004137 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
sewardjf98e1c02008-10-25 16:22:41 +00004138 /*inited*/False );
sewardjb4112022007-11-09 22:49:28 +00004139 /* FIXME: can anything funny happen here? specifically, if the
4140 old range contained a lock, then die_mem_heap will complain.
4141 Is that the correct behaviour? Not sure. */
4142 evh__die_mem_heap( payload, md->szB );
4143
4144 /* Copy from old to new */
4145 for (i = 0; i < md->szB; i++)
4146 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
4147
4148 /* Because the metadata hash table is index by payload address,
4149 we have to get rid of the old hash table entry and make a new
4150 one. We can't just modify the existing metadata in place,
4151 because then it would (almost certainly) be in the wrong hash
4152 chain. */
4153 md_new = new_MallocMeta();
4154 *md_new = *md;
4155
4156 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
4157 tl_assert(md_tmp);
4158 tl_assert(md_tmp == md);
4159
4160 VG_(cli_free)((void*)md->payload);
4161 delete_MallocMeta(md);
4162
4163 /* Update fields */
4164 md_new->where = VG_(record_ExeContext)( tid, 0 );
4165 md_new->szB = new_size;
4166 md_new->payload = p_new;
4167 md_new->thr = map_threads_lookup( tid );
4168
4169 /* and add */
4170 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
4171
4172 return (void*)p_new;
4173 }
4174}
4175
njn8b140de2009-02-17 04:31:18 +00004176static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
4177{
4178 MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
4179
4180 // There may be slop, but pretend there isn't because only the asked-for
4181 // area will have been shadowed properly.
4182 return ( md ? md->szB : 0 );
4183}
4184
sewardjb4112022007-11-09 22:49:28 +00004185
sewardj095d61e2010-03-11 13:43:18 +00004186/* For error creation: map 'data_addr' to a malloc'd chunk, if any.
sewardjc8028ad2010-05-05 09:34:42 +00004187 Slow linear search. With a bit of hash table help if 'data_addr'
4188 is either the start of a block or up to 15 word-sized steps along
4189 from the start of a block. */
sewardj095d61e2010-03-11 13:43:18 +00004190
4191static inline Bool addr_is_in_MM_Chunk( MallocMeta* mm, Addr a )
4192{
sewardjc8028ad2010-05-05 09:34:42 +00004193 /* Accept 'a' as within 'mm' if 'mm's size is zero and 'a' points
4194 right at it. */
4195 if (UNLIKELY(mm->szB == 0 && a == mm->payload))
4196 return True;
4197 /* else normal interval rules apply */
4198 if (LIKELY(a < mm->payload)) return False;
4199 if (LIKELY(a >= mm->payload + mm->szB)) return False;
4200 return True;
sewardj095d61e2010-03-11 13:43:18 +00004201}
4202
sewardjc8028ad2010-05-05 09:34:42 +00004203Bool HG_(mm_find_containing_block)( /*OUT*/ExeContext** where,
philippe0c9ac8d2014-07-18 00:03:58 +00004204 /*OUT*/UInt* tnr,
sewardj095d61e2010-03-11 13:43:18 +00004205 /*OUT*/Addr* payload,
4206 /*OUT*/SizeT* szB,
4207 Addr data_addr )
4208{
4209 MallocMeta* mm;
sewardjc8028ad2010-05-05 09:34:42 +00004210 Int i;
4211 const Int n_fast_check_words = 16;
4212
4213 /* First, do a few fast searches on the basis that data_addr might
4214 be exactly the start of a block or up to 15 words inside. This
4215 can happen commonly via the creq
4216 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK. */
4217 for (i = 0; i < n_fast_check_words; i++) {
4218 mm = VG_(HT_lookup)( hg_mallocmeta_table,
4219 data_addr - (UWord)(UInt)i * sizeof(UWord) );
4220 if (UNLIKELY(mm && addr_is_in_MM_Chunk(mm, data_addr)))
4221 goto found;
4222 }
4223
sewardj095d61e2010-03-11 13:43:18 +00004224 /* Well, this totally sucks. But without using an interval tree or
sewardjc8028ad2010-05-05 09:34:42 +00004225 some such, it's hard to see how to do better. We have to check
4226 every block in the entire table. */
sewardj095d61e2010-03-11 13:43:18 +00004227 VG_(HT_ResetIter)(hg_mallocmeta_table);
4228 while ( (mm = VG_(HT_Next)(hg_mallocmeta_table)) ) {
sewardjc8028ad2010-05-05 09:34:42 +00004229 if (UNLIKELY(addr_is_in_MM_Chunk(mm, data_addr)))
4230 goto found;
sewardj095d61e2010-03-11 13:43:18 +00004231 }
sewardjc8028ad2010-05-05 09:34:42 +00004232
4233 /* Not found. Bah. */
4234 return False;
4235 /*NOTREACHED*/
4236
4237 found:
4238 tl_assert(mm);
4239 tl_assert(addr_is_in_MM_Chunk(mm, data_addr));
4240 if (where) *where = mm->where;
philippe0c9ac8d2014-07-18 00:03:58 +00004241 if (tnr) *tnr = mm->thr->errmsg_index;
sewardjc8028ad2010-05-05 09:34:42 +00004242 if (payload) *payload = mm->payload;
4243 if (szB) *szB = mm->szB;
4244 return True;
sewardj095d61e2010-03-11 13:43:18 +00004245}
4246
4247
sewardjb4112022007-11-09 22:49:28 +00004248/*--------------------------------------------------------------*/
4249/*--- Instrumentation ---*/
4250/*--------------------------------------------------------------*/
4251
sewardjcafe5052013-01-17 14:24:35 +00004252#define unop(_op, _arg1) IRExpr_Unop((_op),(_arg1))
sewardjffce8152011-06-24 10:09:41 +00004253#define binop(_op, _arg1, _arg2) IRExpr_Binop((_op),(_arg1),(_arg2))
4254#define mkexpr(_tmp) IRExpr_RdTmp((_tmp))
4255#define mkU32(_n) IRExpr_Const(IRConst_U32(_n))
4256#define mkU64(_n) IRExpr_Const(IRConst_U64(_n))
4257#define assign(_t, _e) IRStmt_WrTmp((_t), (_e))
4258
sewardjcafe5052013-01-17 14:24:35 +00004259/* This takes and returns atoms, of course. Not full IRExprs. */
4260static IRExpr* mk_And1 ( IRSB* sbOut, IRExpr* arg1, IRExpr* arg2 )
4261{
4262 tl_assert(arg1 && arg2);
4263 tl_assert(isIRAtom(arg1));
4264 tl_assert(isIRAtom(arg2));
4265 /* Generate 32to1(And32(1Uto32(arg1), 1Uto32(arg2))). Appalling
4266 code, I know. */
4267 IRTemp wide1 = newIRTemp(sbOut->tyenv, Ity_I32);
4268 IRTemp wide2 = newIRTemp(sbOut->tyenv, Ity_I32);
4269 IRTemp anded = newIRTemp(sbOut->tyenv, Ity_I32);
4270 IRTemp res = newIRTemp(sbOut->tyenv, Ity_I1);
4271 addStmtToIRSB(sbOut, assign(wide1, unop(Iop_1Uto32, arg1)));
4272 addStmtToIRSB(sbOut, assign(wide2, unop(Iop_1Uto32, arg2)));
4273 addStmtToIRSB(sbOut, assign(anded, binop(Iop_And32, mkexpr(wide1),
4274 mkexpr(wide2))));
4275 addStmtToIRSB(sbOut, assign(res, unop(Iop_32to1, mkexpr(anded))));
4276 return mkexpr(res);
4277}
4278
sewardjffce8152011-06-24 10:09:41 +00004279static void instrument_mem_access ( IRSB* sbOut,
sewardjb4112022007-11-09 22:49:28 +00004280 IRExpr* addr,
4281 Int szB,
4282 Bool isStore,
sewardjffce8152011-06-24 10:09:41 +00004283 Int hWordTy_szB,
sewardjcafe5052013-01-17 14:24:35 +00004284 Int goff_sp,
4285 IRExpr* guard ) /* NULL => True */
sewardjb4112022007-11-09 22:49:28 +00004286{
4287 IRType tyAddr = Ity_INVALID;
florian6bf37262012-10-21 03:23:36 +00004288 const HChar* hName = NULL;
sewardjb4112022007-11-09 22:49:28 +00004289 void* hAddr = NULL;
4290 Int regparms = 0;
4291 IRExpr** argv = NULL;
4292 IRDirty* di = NULL;
4293
sewardjffce8152011-06-24 10:09:41 +00004294 // THRESH is the size of the window above SP (well,
4295 // mostly above) that we assume implies a stack reference.
4296 const Int THRESH = 4096 * 4; // somewhat arbitrary
4297 const Int rz_szB = VG_STACK_REDZONE_SZB;
4298
sewardjb4112022007-11-09 22:49:28 +00004299 tl_assert(isIRAtom(addr));
4300 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
4301
sewardjffce8152011-06-24 10:09:41 +00004302 tyAddr = typeOfIRExpr( sbOut->tyenv, addr );
sewardjb4112022007-11-09 22:49:28 +00004303 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
4304
4305 /* So the effective address is in 'addr' now. */
4306 regparms = 1; // unless stated otherwise
4307 if (isStore) {
4308 switch (szB) {
4309 case 1:
sewardj23f12002009-07-24 08:45:08 +00004310 hName = "evh__mem_help_cwrite_1";
4311 hAddr = &evh__mem_help_cwrite_1;
sewardjb4112022007-11-09 22:49:28 +00004312 argv = mkIRExprVec_1( addr );
4313 break;
4314 case 2:
sewardj23f12002009-07-24 08:45:08 +00004315 hName = "evh__mem_help_cwrite_2";
4316 hAddr = &evh__mem_help_cwrite_2;
sewardjb4112022007-11-09 22:49:28 +00004317 argv = mkIRExprVec_1( addr );
4318 break;
4319 case 4:
sewardj23f12002009-07-24 08:45:08 +00004320 hName = "evh__mem_help_cwrite_4";
4321 hAddr = &evh__mem_help_cwrite_4;
sewardjb4112022007-11-09 22:49:28 +00004322 argv = mkIRExprVec_1( addr );
4323 break;
4324 case 8:
sewardj23f12002009-07-24 08:45:08 +00004325 hName = "evh__mem_help_cwrite_8";
4326 hAddr = &evh__mem_help_cwrite_8;
sewardjb4112022007-11-09 22:49:28 +00004327 argv = mkIRExprVec_1( addr );
4328 break;
4329 default:
4330 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4331 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004332 hName = "evh__mem_help_cwrite_N";
4333 hAddr = &evh__mem_help_cwrite_N;
sewardjb4112022007-11-09 22:49:28 +00004334 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4335 break;
4336 }
4337 } else {
4338 switch (szB) {
4339 case 1:
sewardj23f12002009-07-24 08:45:08 +00004340 hName = "evh__mem_help_cread_1";
4341 hAddr = &evh__mem_help_cread_1;
sewardjb4112022007-11-09 22:49:28 +00004342 argv = mkIRExprVec_1( addr );
4343 break;
4344 case 2:
sewardj23f12002009-07-24 08:45:08 +00004345 hName = "evh__mem_help_cread_2";
4346 hAddr = &evh__mem_help_cread_2;
sewardjb4112022007-11-09 22:49:28 +00004347 argv = mkIRExprVec_1( addr );
4348 break;
4349 case 4:
sewardj23f12002009-07-24 08:45:08 +00004350 hName = "evh__mem_help_cread_4";
4351 hAddr = &evh__mem_help_cread_4;
sewardjb4112022007-11-09 22:49:28 +00004352 argv = mkIRExprVec_1( addr );
4353 break;
4354 case 8:
sewardj23f12002009-07-24 08:45:08 +00004355 hName = "evh__mem_help_cread_8";
4356 hAddr = &evh__mem_help_cread_8;
sewardjb4112022007-11-09 22:49:28 +00004357 argv = mkIRExprVec_1( addr );
4358 break;
4359 default:
4360 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4361 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004362 hName = "evh__mem_help_cread_N";
4363 hAddr = &evh__mem_help_cread_N;
sewardjb4112022007-11-09 22:49:28 +00004364 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4365 break;
4366 }
4367 }
4368
sewardjffce8152011-06-24 10:09:41 +00004369 /* Create the helper. */
sewardjb4112022007-11-09 22:49:28 +00004370 tl_assert(hName);
4371 tl_assert(hAddr);
4372 tl_assert(argv);
4373 di = unsafeIRDirty_0_N( regparms,
4374 hName, VG_(fnptr_to_fnentry)( hAddr ),
4375 argv );
sewardjffce8152011-06-24 10:09:41 +00004376
4377 if (! HG_(clo_check_stack_refs)) {
4378 /* We're ignoring memory references which are (obviously) to the
4379 stack. In fact just skip stack refs that are within 4 pages
4380 of SP (SP - the redzone, really), as that's simple, easy, and
4381 filters out most stack references. */
4382 /* Generate the guard condition: "(addr - (SP - RZ)) >u N", for
4383 some arbitrary N. If that is true then addr is outside the
4384 range (SP - RZ .. SP + N - RZ). If N is smallish (a few
4385 pages) then we can say addr is within a few pages of SP and
4386 so can't possibly be a heap access, and so can be skipped.
4387
4388 Note that the condition simplifies to
4389 (addr - SP + RZ) >u N
4390 which generates better code in x86/amd64 backends, but it does
4391 not unfortunately simplify to
4392 (addr - SP) >u (N - RZ)
4393 (would be beneficial because N - RZ is a constant) because
4394 wraparound arithmetic messes up the comparison. eg.
4395 20 >u 10 == True,
4396 but (20 - 15) >u (10 - 15) == 5 >u (MAXINT-5) == False.
4397 */
4398 IRTemp sp = newIRTemp(sbOut->tyenv, tyAddr);
4399 addStmtToIRSB( sbOut, assign(sp, IRExpr_Get(goff_sp, tyAddr)));
4400
4401 /* "addr - SP" */
4402 IRTemp addr_minus_sp = newIRTemp(sbOut->tyenv, tyAddr);
4403 addStmtToIRSB(
4404 sbOut,
4405 assign(addr_minus_sp,
4406 tyAddr == Ity_I32
4407 ? binop(Iop_Sub32, addr, mkexpr(sp))
4408 : binop(Iop_Sub64, addr, mkexpr(sp)))
4409 );
4410
4411 /* "addr - SP + RZ" */
4412 IRTemp diff = newIRTemp(sbOut->tyenv, tyAddr);
4413 addStmtToIRSB(
4414 sbOut,
4415 assign(diff,
4416 tyAddr == Ity_I32
4417 ? binop(Iop_Add32, mkexpr(addr_minus_sp), mkU32(rz_szB))
4418 : binop(Iop_Add64, mkexpr(addr_minus_sp), mkU64(rz_szB)))
4419 );
4420
sewardjcafe5052013-01-17 14:24:35 +00004421 /* guardA == "guard on the address" */
4422 IRTemp guardA = newIRTemp(sbOut->tyenv, Ity_I1);
sewardjffce8152011-06-24 10:09:41 +00004423 addStmtToIRSB(
4424 sbOut,
sewardjcafe5052013-01-17 14:24:35 +00004425 assign(guardA,
sewardjffce8152011-06-24 10:09:41 +00004426 tyAddr == Ity_I32
4427 ? binop(Iop_CmpLT32U, mkU32(THRESH), mkexpr(diff))
4428 : binop(Iop_CmpLT64U, mkU64(THRESH), mkexpr(diff)))
4429 );
sewardjcafe5052013-01-17 14:24:35 +00004430 di->guard = mkexpr(guardA);
4431 }
4432
4433 /* If there's a guard on the access itself (as supplied by the
4434 caller of this routine), we need to AND that in to any guard we
4435 might already have. */
4436 if (guard) {
4437 di->guard = mk_And1(sbOut, di->guard, guard);
sewardjffce8152011-06-24 10:09:41 +00004438 }
4439
4440 /* Add the helper. */
4441 addStmtToIRSB( sbOut, IRStmt_Dirty(di) );
sewardjb4112022007-11-09 22:49:28 +00004442}
4443
4444
sewardja0eee322009-07-31 08:46:35 +00004445/* Figure out if GA is a guest code address in the dynamic linker, and
4446 if so return True. Otherwise (and in case of any doubt) return
4447 False. (sidedly safe w/ False as the safe value) */
4448static Bool is_in_dynamic_linker_shared_object( Addr64 ga )
4449{
4450 DebugInfo* dinfo;
florian19f91bb2012-11-10 22:29:54 +00004451 const HChar* soname;
sewardja0eee322009-07-31 08:46:35 +00004452 if (0) return False;
4453
sewardje3f1e592009-07-31 09:41:29 +00004454 dinfo = VG_(find_DebugInfo)( (Addr)ga );
sewardja0eee322009-07-31 08:46:35 +00004455 if (!dinfo) return False;
4456
sewardje3f1e592009-07-31 09:41:29 +00004457 soname = VG_(DebugInfo_get_soname)(dinfo);
sewardja0eee322009-07-31 08:46:35 +00004458 tl_assert(soname);
4459 if (0) VG_(printf)("%s\n", soname);
4460
4461# if defined(VGO_linux)
sewardj651cfa42010-01-11 13:02:19 +00004462 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_3)) return True;
sewardja0eee322009-07-31 08:46:35 +00004463 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_2)) return True;
4464 if (VG_STREQ(soname, VG_U_LD_LINUX_X86_64_SO_2)) return True;
4465 if (VG_STREQ(soname, VG_U_LD64_SO_1)) return True;
carll582d5822014-08-07 23:35:54 +00004466 if (VG_STREQ(soname, VG_U_LD64_SO_2)) return True;
sewardja0eee322009-07-31 08:46:35 +00004467 if (VG_STREQ(soname, VG_U_LD_SO_1)) return True;
sewardjdcd90512014-08-30 19:21:48 +00004468 if (VG_STREQ(soname, VG_U_LD_LINUX_AARCH64_SO_1)) return True;
mjw4fa71082014-09-01 15:29:55 +00004469 if (VG_STREQ(soname, VG_U_LD_LINUX_ARMHF_SO_3)) return True;
sewardja0eee322009-07-31 08:46:35 +00004470# elif defined(VGO_darwin)
4471 if (VG_STREQ(soname, VG_U_DYLD)) return True;
4472# else
4473# error "Unsupported OS"
4474# endif
4475 return False;
4476}
4477
sewardjb4112022007-11-09 22:49:28 +00004478static
4479IRSB* hg_instrument ( VgCallbackClosure* closure,
4480 IRSB* bbIn,
florian3c0c9472014-09-24 12:06:55 +00004481 const VexGuestLayout* layout,
4482 const VexGuestExtents* vge,
4483 const VexArchInfo* archinfo_host,
sewardjb4112022007-11-09 22:49:28 +00004484 IRType gWordTy, IRType hWordTy )
4485{
sewardj1c0ce7a2009-07-01 08:10:49 +00004486 Int i;
4487 IRSB* bbOut;
4488 Addr64 cia; /* address of current insn */
4489 IRStmt* st;
sewardja0eee322009-07-31 08:46:35 +00004490 Bool inLDSO = False;
4491 Addr64 inLDSOmask4K = 1; /* mismatches on first check */
sewardjb4112022007-11-09 22:49:28 +00004492
sewardjffce8152011-06-24 10:09:41 +00004493 const Int goff_sp = layout->offset_SP;
4494
sewardjb4112022007-11-09 22:49:28 +00004495 if (gWordTy != hWordTy) {
4496 /* We don't currently support this case. */
4497 VG_(tool_panic)("host/guest word size mismatch");
4498 }
4499
sewardja0eee322009-07-31 08:46:35 +00004500 if (VKI_PAGE_SIZE < 4096 || VG_(log2)(VKI_PAGE_SIZE) == -1) {
4501 VG_(tool_panic)("implausible or too-small VKI_PAGE_SIZE");
4502 }
4503
sewardjb4112022007-11-09 22:49:28 +00004504 /* Set up BB */
4505 bbOut = emptyIRSB();
4506 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
4507 bbOut->next = deepCopyIRExpr(bbIn->next);
4508 bbOut->jumpkind = bbIn->jumpkind;
sewardj291849f2012-04-20 23:58:55 +00004509 bbOut->offsIP = bbIn->offsIP;
sewardjb4112022007-11-09 22:49:28 +00004510
4511 // Copy verbatim any IR preamble preceding the first IMark
4512 i = 0;
4513 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
4514 addStmtToIRSB( bbOut, bbIn->stmts[i] );
4515 i++;
4516 }
4517
sewardj1c0ce7a2009-07-01 08:10:49 +00004518 // Get the first statement, and initial cia from it
4519 tl_assert(bbIn->stmts_used > 0);
4520 tl_assert(i < bbIn->stmts_used);
4521 st = bbIn->stmts[i];
4522 tl_assert(Ist_IMark == st->tag);
4523 cia = st->Ist.IMark.addr;
4524 st = NULL;
4525
sewardjb4112022007-11-09 22:49:28 +00004526 for (/*use current i*/; i < bbIn->stmts_used; i++) {
sewardj1c0ce7a2009-07-01 08:10:49 +00004527 st = bbIn->stmts[i];
sewardjb4112022007-11-09 22:49:28 +00004528 tl_assert(st);
4529 tl_assert(isFlatIRStmt(st));
4530 switch (st->tag) {
4531 case Ist_NoOp:
4532 case Ist_AbiHint:
4533 case Ist_Put:
4534 case Ist_PutI:
sewardjb4112022007-11-09 22:49:28 +00004535 case Ist_Exit:
4536 /* None of these can contain any memory references. */
4537 break;
4538
sewardj1c0ce7a2009-07-01 08:10:49 +00004539 case Ist_IMark:
4540 /* no mem refs, but note the insn address. */
4541 cia = st->Ist.IMark.addr;
sewardja0eee322009-07-31 08:46:35 +00004542 /* Don't instrument the dynamic linker. It generates a
4543 lot of races which we just expensively suppress, so
4544 it's pointless.
4545
4546 Avoid flooding is_in_dynamic_linker_shared_object with
4547 requests by only checking at transitions between 4K
4548 pages. */
4549 if ((cia & ~(Addr64)0xFFF) != inLDSOmask4K) {
4550 if (0) VG_(printf)("NEW %#lx\n", (Addr)cia);
4551 inLDSOmask4K = cia & ~(Addr64)0xFFF;
4552 inLDSO = is_in_dynamic_linker_shared_object(cia);
4553 } else {
4554 if (0) VG_(printf)("old %#lx\n", (Addr)cia);
4555 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004556 break;
4557
sewardjb4112022007-11-09 22:49:28 +00004558 case Ist_MBE:
sewardjf98e1c02008-10-25 16:22:41 +00004559 switch (st->Ist.MBE.event) {
4560 case Imbe_Fence:
sewardj2b9232a2014-10-11 13:54:52 +00004561 case Imbe_CancelReservation:
sewardjf98e1c02008-10-25 16:22:41 +00004562 break; /* not interesting */
sewardjf98e1c02008-10-25 16:22:41 +00004563 default:
4564 goto unhandled;
4565 }
sewardjb4112022007-11-09 22:49:28 +00004566 break;
4567
sewardj1c0ce7a2009-07-01 08:10:49 +00004568 case Ist_CAS: {
4569 /* Atomic read-modify-write cycle. Just pretend it's a
4570 read. */
4571 IRCAS* cas = st->Ist.CAS.details;
sewardj23f12002009-07-24 08:45:08 +00004572 Bool isDCAS = cas->oldHi != IRTemp_INVALID;
4573 if (isDCAS) {
4574 tl_assert(cas->expdHi);
4575 tl_assert(cas->dataHi);
4576 } else {
4577 tl_assert(!cas->expdHi);
4578 tl_assert(!cas->dataHi);
4579 }
4580 /* Just be boring about it. */
sewardja0eee322009-07-31 08:46:35 +00004581 if (!inLDSO) {
4582 instrument_mem_access(
4583 bbOut,
4584 cas->addr,
4585 (isDCAS ? 2 : 1)
4586 * sizeofIRType(typeOfIRExpr(bbIn->tyenv, cas->dataLo)),
4587 False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004588 sizeofIRType(hWordTy), goff_sp,
4589 NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004590 );
4591 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004592 break;
4593 }
4594
sewardjdb5907d2009-11-26 17:20:21 +00004595 case Ist_LLSC: {
4596 /* We pretend store-conditionals don't exist, viz, ignore
4597 them. Whereas load-linked's are treated the same as
4598 normal loads. */
4599 IRType dataTy;
4600 if (st->Ist.LLSC.storedata == NULL) {
4601 /* LL */
4602 dataTy = typeOfIRTemp(bbIn->tyenv, st->Ist.LLSC.result);
sewardja0eee322009-07-31 08:46:35 +00004603 if (!inLDSO) {
sewardjdb5907d2009-11-26 17:20:21 +00004604 instrument_mem_access(
4605 bbOut,
4606 st->Ist.LLSC.addr,
4607 sizeofIRType(dataTy),
4608 False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004609 sizeofIRType(hWordTy), goff_sp,
4610 NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004611 );
4612 }
sewardjdb5907d2009-11-26 17:20:21 +00004613 } else {
4614 /* SC */
4615 /*ignore */
4616 }
4617 break;
4618 }
4619
4620 case Ist_Store:
sewardjdb5907d2009-11-26 17:20:21 +00004621 if (!inLDSO) {
4622 instrument_mem_access(
4623 bbOut,
4624 st->Ist.Store.addr,
4625 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
4626 True/*isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004627 sizeofIRType(hWordTy), goff_sp,
4628 NULL/*no-guard*/
sewardjdb5907d2009-11-26 17:20:21 +00004629 );
sewardj1c0ce7a2009-07-01 08:10:49 +00004630 }
njnb83caf22009-05-25 01:47:56 +00004631 break;
sewardjb4112022007-11-09 22:49:28 +00004632
sewardjcafe5052013-01-17 14:24:35 +00004633 case Ist_StoreG: {
4634 IRStoreG* sg = st->Ist.StoreG.details;
4635 IRExpr* data = sg->data;
4636 IRExpr* addr = sg->addr;
4637 IRType type = typeOfIRExpr(bbIn->tyenv, data);
4638 tl_assert(type != Ity_INVALID);
4639 instrument_mem_access( bbOut, addr, sizeofIRType(type),
4640 True/*isStore*/,
4641 sizeofIRType(hWordTy),
4642 goff_sp, sg->guard );
4643 break;
4644 }
4645
4646 case Ist_LoadG: {
4647 IRLoadG* lg = st->Ist.LoadG.details;
4648 IRType type = Ity_INVALID; /* loaded type */
4649 IRType typeWide = Ity_INVALID; /* after implicit widening */
4650 IRExpr* addr = lg->addr;
4651 typeOfIRLoadGOp(lg->cvt, &typeWide, &type);
4652 tl_assert(type != Ity_INVALID);
4653 instrument_mem_access( bbOut, addr, sizeofIRType(type),
4654 False/*!isStore*/,
4655 sizeofIRType(hWordTy),
4656 goff_sp, lg->guard );
4657 break;
4658 }
4659
sewardjb4112022007-11-09 22:49:28 +00004660 case Ist_WrTmp: {
4661 IRExpr* data = st->Ist.WrTmp.data;
4662 if (data->tag == Iex_Load) {
sewardja0eee322009-07-31 08:46:35 +00004663 if (!inLDSO) {
4664 instrument_mem_access(
4665 bbOut,
4666 data->Iex.Load.addr,
4667 sizeofIRType(data->Iex.Load.ty),
4668 False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004669 sizeofIRType(hWordTy), goff_sp,
4670 NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004671 );
4672 }
sewardjb4112022007-11-09 22:49:28 +00004673 }
4674 break;
4675 }
4676
4677 case Ist_Dirty: {
4678 Int dataSize;
4679 IRDirty* d = st->Ist.Dirty.details;
4680 if (d->mFx != Ifx_None) {
4681 /* This dirty helper accesses memory. Collect the
4682 details. */
4683 tl_assert(d->mAddr != NULL);
4684 tl_assert(d->mSize != 0);
4685 dataSize = d->mSize;
4686 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004687 if (!inLDSO) {
4688 instrument_mem_access(
4689 bbOut, d->mAddr, dataSize, False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004690 sizeofIRType(hWordTy), goff_sp, NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004691 );
4692 }
sewardjb4112022007-11-09 22:49:28 +00004693 }
4694 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004695 if (!inLDSO) {
4696 instrument_mem_access(
4697 bbOut, d->mAddr, dataSize, True/*isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004698 sizeofIRType(hWordTy), goff_sp, NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004699 );
4700 }
sewardjb4112022007-11-09 22:49:28 +00004701 }
4702 } else {
4703 tl_assert(d->mAddr == NULL);
4704 tl_assert(d->mSize == 0);
4705 }
4706 break;
4707 }
4708
4709 default:
sewardjf98e1c02008-10-25 16:22:41 +00004710 unhandled:
4711 ppIRStmt(st);
sewardjb4112022007-11-09 22:49:28 +00004712 tl_assert(0);
4713
4714 } /* switch (st->tag) */
4715
4716 addStmtToIRSB( bbOut, st );
4717 } /* iterate over bbIn->stmts */
4718
4719 return bbOut;
4720}
4721
sewardjffce8152011-06-24 10:09:41 +00004722#undef binop
4723#undef mkexpr
4724#undef mkU32
4725#undef mkU64
4726#undef assign
4727
sewardjb4112022007-11-09 22:49:28 +00004728
4729/*----------------------------------------------------------------*/
4730/*--- Client requests ---*/
4731/*----------------------------------------------------------------*/
4732
4733/* Sheesh. Yet another goddam finite map. */
4734static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
4735
4736static void map_pthread_t_to_Thread_INIT ( void ) {
4737 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
sewardjf98e1c02008-10-25 16:22:41 +00004738 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
4739 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00004740 }
4741}
4742
philipped40aff52014-06-16 20:00:14 +00004743/* A list of Ada dependent tasks and their masters. Used for implementing
4744 the Ada task termination semantic as implemented by the
4745 gcc gnat Ada runtime. */
4746typedef
4747 struct {
4748 void* dependent; // Ada Task Control Block of the Dependent
4749 void* master; // ATCB of the master
4750 Word master_level; // level of dependency between master and dependent
4751 Thread* hg_dependent; // helgrind Thread* for dependent task.
4752 }
4753 GNAT_dmml;
4754static XArray* gnat_dmmls; /* of GNAT_dmml */
4755static void gnat_dmmls_INIT (void)
4756{
4757 if (UNLIKELY(gnat_dmmls == NULL)) {
4758 gnat_dmmls = VG_(newXA) (HG_(zalloc), "hg.gnat_md.1",
4759 HG_(free),
4760 sizeof(GNAT_dmml) );
4761 }
4762}
philippef5774342014-05-03 11:12:50 +00004763static void print_monitor_help ( void )
4764{
4765 VG_(gdb_printf)
4766 (
4767"\n"
4768"helgrind monitor commands:\n"
philippef5774342014-05-03 11:12:50 +00004769" info locks : show list of locks and their status\n"
4770"\n");
4771}
4772
4773/* return True if request recognised, False otherwise */
4774static Bool handle_gdb_monitor_command (ThreadId tid, HChar *req)
4775{
philippef5774342014-05-03 11:12:50 +00004776 HChar* wcmd;
4777 HChar s[VG_(strlen(req))]; /* copy for strtok_r */
4778 HChar *ssaveptr;
4779 Int kwdid;
4780
4781 VG_(strcpy) (s, req);
4782
4783 wcmd = VG_(strtok_r) (s, " ", &ssaveptr);
4784 /* NB: if possible, avoid introducing a new command below which
4785 starts with the same first letter(s) as an already existing
4786 command. This ensures a shorter abbreviation for the user. */
4787 switch (VG_(keyword_id)
philippe07c08522014-05-14 20:39:27 +00004788 ("help info",
philippef5774342014-05-03 11:12:50 +00004789 wcmd, kwd_report_duplicated_matches)) {
4790 case -2: /* multiple matches */
4791 return True;
4792 case -1: /* not found */
4793 return False;
4794 case 0: /* help */
4795 print_monitor_help();
4796 return True;
4797 case 1: /* info */
philippef5774342014-05-03 11:12:50 +00004798 wcmd = VG_(strtok_r) (NULL, " ", &ssaveptr);
4799 switch (kwdid = VG_(keyword_id)
4800 ("locks",
4801 wcmd, kwd_report_all)) {
4802 case -2:
4803 case -1:
4804 break;
4805 case 0: // locks
4806 {
4807 Int i;
4808 Lock* lk;
4809 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next) {
4810 pp_Lock(0, lk,
4811 True /* show_lock_addrdescr */,
4812 False /* show_internal_data */);
4813 }
4814 if (i == 0)
4815 VG_(gdb_printf) ("no locks\n");
4816 }
4817 break;
4818 default:
4819 tl_assert(0);
4820 }
4821 return True;
philippef5774342014-05-03 11:12:50 +00004822 default:
4823 tl_assert(0);
4824 return False;
4825 }
4826}
sewardjb4112022007-11-09 22:49:28 +00004827
4828static
4829Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
4830{
philippef5774342014-05-03 11:12:50 +00004831 if (!VG_IS_TOOL_USERREQ('H','G',args[0])
4832 && VG_USERREQ__GDB_MONITOR_COMMAND != args[0])
sewardjb4112022007-11-09 22:49:28 +00004833 return False;
4834
4835 /* Anything that gets past the above check is one of ours, so we
4836 should be able to handle it. */
4837
4838 /* default, meaningless return value, unless otherwise set */
4839 *ret = 0;
4840
4841 switch (args[0]) {
4842
4843 /* --- --- User-visible client requests --- --- */
4844
4845 case VG_USERREQ__HG_CLEAN_MEMORY:
barta0b6b2c2008-07-07 06:49:24 +00004846 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%ld)\n",
sewardjb4112022007-11-09 22:49:28 +00004847 args[1], args[2]);
4848 /* Call die_mem to (expensively) tidy up properly, if there
sewardjf98e1c02008-10-25 16:22:41 +00004849 are any held locks etc in the area. Calling evh__die_mem
4850 and then evh__new_mem is a bit inefficient; probably just
4851 the latter would do. */
sewardjb4112022007-11-09 22:49:28 +00004852 if (args[2] > 0) { /* length */
4853 evh__die_mem(args[1], args[2]);
4854 /* and then set it to New */
4855 evh__new_mem(args[1], args[2]);
4856 }
4857 break;
4858
sewardjc8028ad2010-05-05 09:34:42 +00004859 case _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK: {
4860 Addr payload = 0;
4861 SizeT pszB = 0;
4862 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK(%#lx)\n",
4863 args[1]);
philippe0c9ac8d2014-07-18 00:03:58 +00004864 if (HG_(mm_find_containing_block)(NULL, NULL,
4865 &payload, &pszB, args[1])) {
sewardjc8028ad2010-05-05 09:34:42 +00004866 if (pszB > 0) {
4867 evh__die_mem(payload, pszB);
4868 evh__new_mem(payload, pszB);
4869 }
4870 *ret = pszB;
4871 } else {
4872 *ret = (UWord)-1;
4873 }
4874 break;
4875 }
4876
sewardj406bac82010-03-03 23:03:40 +00004877 case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED:
4878 if (0) VG_(printf)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%ld)\n",
4879 args[1], args[2]);
4880 if (args[2] > 0) { /* length */
4881 evh__untrack_mem(args[1], args[2]);
4882 }
4883 break;
4884
4885 case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED:
4886 if (0) VG_(printf)("HG_ARANGE_MAKE_TRACKED(%#lx,%ld)\n",
4887 args[1], args[2]);
4888 if (args[2] > 0) { /* length */
4889 evh__new_mem(args[1], args[2]);
4890 }
4891 break;
4892
sewardjb4112022007-11-09 22:49:28 +00004893 /* --- --- Client requests for Helgrind's use only --- --- */
4894
4895 /* Some thread is telling us its pthread_t value. Record the
4896 binding between that and the associated Thread*, so we can
4897 later find the Thread* again when notified of a join by the
4898 thread. */
4899 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
4900 Thread* my_thr = NULL;
4901 if (0)
4902 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
4903 (void*)args[1]);
4904 map_pthread_t_to_Thread_INIT();
4905 my_thr = map_threads_maybe_lookup( tid );
4906 /* This assertion should hold because the map_threads (tid to
4907 Thread*) binding should have been made at the point of
4908 low-level creation of this thread, which should have
4909 happened prior to us getting this client request for it.
4910 That's because this client request is sent from
4911 client-world from the 'thread_wrapper' function, which
4912 only runs once the thread has been low-level created. */
4913 tl_assert(my_thr != NULL);
4914 /* So now we know that (pthread_t)args[1] is associated with
4915 (Thread*)my_thr. Note that down. */
4916 if (0)
4917 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
4918 (void*)args[1], (void*)my_thr );
florian6bf37262012-10-21 03:23:36 +00004919 VG_(addToFM)( map_pthread_t_to_Thread, (UWord)args[1], (UWord)my_thr );
sewardjb4112022007-11-09 22:49:28 +00004920 break;
4921 }
4922
4923 case _VG_USERREQ__HG_PTH_API_ERROR: {
4924 Thread* my_thr = NULL;
4925 map_pthread_t_to_Thread_INIT();
4926 my_thr = map_threads_maybe_lookup( tid );
4927 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
sewardjf98e1c02008-10-25 16:22:41 +00004928 HG_(record_error_PthAPIerror)(
florian6bf37262012-10-21 03:23:36 +00004929 my_thr, (HChar*)args[1], (UWord)args[2], (HChar*)args[3] );
sewardjb4112022007-11-09 22:49:28 +00004930 break;
4931 }
4932
4933 /* This thread (tid) has completed a join with the quitting
4934 thread whose pthread_t is in args[1]. */
4935 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
4936 Thread* thr_q = NULL; /* quitter Thread* */
4937 Bool found = False;
4938 if (0)
4939 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
4940 (void*)args[1]);
4941 map_pthread_t_to_Thread_INIT();
sewardj896f6f92008-08-19 08:38:52 +00004942 found = VG_(lookupFM)( map_pthread_t_to_Thread,
florian6bf37262012-10-21 03:23:36 +00004943 NULL, (UWord*)&thr_q, (UWord)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004944 /* Can this fail? It would mean that our pthread_join
4945 wrapper observed a successful join on args[1] yet that
4946 thread never existed (or at least, it never lodged an
4947 entry in the mapping (via SET_MY_PTHREAD_T)). Which
4948 sounds like a bug in the threads library. */
4949 // FIXME: get rid of this assertion; handle properly
4950 tl_assert(found);
4951 if (found) {
4952 if (0)
4953 VG_(printf)(".................... quitter Thread* = %p\n",
4954 thr_q);
4955 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
4956 }
4957 break;
4958 }
4959
philipped40aff52014-06-16 20:00:14 +00004960 /* This thread (tid) is informing us of its master. */
4961 case _VG_USERREQ__HG_GNAT_MASTER_HOOK: {
4962 GNAT_dmml dmml;
4963 dmml.dependent = (void*)args[1];
4964 dmml.master = (void*)args[2];
4965 dmml.master_level = (Word)args[3];
4966 dmml.hg_dependent = map_threads_maybe_lookup( tid );
4967 tl_assert(dmml.hg_dependent);
4968
4969 if (0)
4970 VG_(printf)("HG_GNAT_MASTER_HOOK (tid %d): "
4971 "dependent = %p master = %p master_level = %ld"
4972 " dependent Thread* = %p\n",
4973 (Int)tid, dmml.dependent, dmml.master, dmml.master_level,
4974 dmml.hg_dependent);
4975 gnat_dmmls_INIT();
4976 VG_(addToXA) (gnat_dmmls, &dmml);
4977 break;
4978 }
4979
4980 /* This thread (tid) is informing us that it has completed a
4981 master. */
4982 case _VG_USERREQ__HG_GNAT_MASTER_COMPLETED_HOOK: {
4983 Word n;
4984 const Thread *stayer = map_threads_maybe_lookup( tid );
4985 const void *master = (void*)args[1];
4986 const Word master_level = (Word) args[2];
4987 tl_assert(stayer);
4988
4989 if (0)
4990 VG_(printf)("HG_GNAT_MASTER_COMPLETED_HOOK (tid %d): "
4991 "self_id = %p master_level = %ld Thread* = %p\n",
4992 (Int)tid, master, master_level, stayer);
4993
4994 gnat_dmmls_INIT();
4995 /* Reverse loop on the array, simulating a pthread_join for
4996 the Dependent tasks of the completed master, and removing
4997 them from the array. */
4998 for (n = VG_(sizeXA) (gnat_dmmls) - 1; n >= 0; n--) {
4999 GNAT_dmml *dmml = (GNAT_dmml*) VG_(indexXA)(gnat_dmmls, n);
5000 if (dmml->master == master
5001 && dmml->master_level == master_level) {
5002 if (0)
5003 VG_(printf)("quitter %p dependency to stayer %p\n",
5004 dmml->hg_dependent->hbthr, stayer->hbthr);
5005 tl_assert(dmml->hg_dependent->hbthr != stayer->hbthr);
5006 generate_quitter_stayer_dependence (dmml->hg_dependent->hbthr,
5007 stayer->hbthr);
5008 VG_(removeIndexXA) (gnat_dmmls, n);
5009 }
5010 }
5011 break;
5012 }
5013
sewardjb4112022007-11-09 22:49:28 +00005014 /* EXPOSITION only: by intercepting lock init events we can show
5015 the user where the lock was initialised, rather than only
5016 being able to show where it was first locked. Intercepting
5017 lock initialisations is not necessary for the basic operation
5018 of the race checker. */
5019 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
5020 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
5021 break;
5022
sewardjc02f6c42013-10-14 13:51:25 +00005023 /* mutex=arg[1], mutex_is_init=arg[2] */
sewardjb4112022007-11-09 22:49:28 +00005024 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
sewardjc02f6c42013-10-14 13:51:25 +00005025 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1], args[2] != 0 );
sewardjb4112022007-11-09 22:49:28 +00005026 break;
5027
5028 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
5029 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
5030 break;
5031
5032 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
5033 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
5034 break;
5035
5036 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*, Word
5037 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
5038 break;
5039
5040 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*
5041 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
5042 break;
5043
5044 /* This thread is about to do pthread_cond_signal on the
5045 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
5046 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
5047 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
5048 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
5049 break;
5050
5051 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
5052 Returns a flag indicating whether or not the mutex is believed to be
5053 valid for this operation. */
5054 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
5055 Bool mutex_is_valid
5056 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
5057 (void*)args[2] );
5058 *ret = mutex_is_valid ? 1 : 0;
5059 break;
5060 }
5061
philippe19dfe032013-03-24 20:10:23 +00005062 /* Thread successfully completed pthread_cond_init:
5063 cond=arg[1], cond_attr=arg[2] */
5064 case _VG_USERREQ__HG_PTHREAD_COND_INIT_POST:
5065 evh__HG_PTHREAD_COND_INIT_POST( tid,
5066 (void*)args[1], (void*)args[2] );
5067 break;
5068
sewardjc02f6c42013-10-14 13:51:25 +00005069 /* cond=arg[1], cond_is_init=arg[2] */
sewardjf98e1c02008-10-25 16:22:41 +00005070 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
sewardjc02f6c42013-10-14 13:51:25 +00005071 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1], args[2] != 0 );
sewardjf98e1c02008-10-25 16:22:41 +00005072 break;
5073
sewardjb4112022007-11-09 22:49:28 +00005074 /* Thread successfully completed pthread_cond_wait, cond=arg[1],
5075 mutex=arg[2] */
5076 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
5077 evh__HG_PTHREAD_COND_WAIT_POST( tid,
sewardjff427c92013-10-14 12:13:52 +00005078 (void*)args[1], (void*)args[2],
5079 (Bool)args[3] );
sewardjb4112022007-11-09 22:49:28 +00005080 break;
5081
5082 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
5083 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
5084 break;
5085
5086 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
5087 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
5088 break;
5089
sewardj789c3c52008-02-25 12:10:07 +00005090 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
sewardjb4112022007-11-09 22:49:28 +00005091 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
sewardj789c3c52008-02-25 12:10:07 +00005092 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
5093 args[2], args[3] );
sewardjb4112022007-11-09 22:49:28 +00005094 break;
5095
5096 /* rwlock=arg[1], isW=arg[2] */
5097 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
5098 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
5099 break;
5100
5101 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
5102 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
5103 break;
5104
5105 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
5106 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
5107 break;
5108
sewardj11e352f2007-11-30 11:11:02 +00005109 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
5110 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
sewardjb4112022007-11-09 22:49:28 +00005111 break;
5112
sewardj11e352f2007-11-30 11:11:02 +00005113 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
5114 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00005115 break;
5116
sewardj11e352f2007-11-30 11:11:02 +00005117 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
5118 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
5119 break;
5120
5121 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t* */
5122 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00005123 break;
5124
sewardj9f569b72008-11-13 13:33:09 +00005125 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
sewardj406bac82010-03-03 23:03:40 +00005126 /* pth_bar_t*, ulong count, ulong resizable */
5127 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1],
5128 args[2], args[3] );
5129 break;
5130
5131 case _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE:
5132 /* pth_bar_t*, ulong newcount */
5133 evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( tid, (void*)args[1],
5134 args[2] );
sewardj9f569b72008-11-13 13:33:09 +00005135 break;
5136
5137 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
5138 /* pth_bar_t* */
5139 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
5140 break;
5141
5142 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
5143 /* pth_bar_t* */
5144 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
5145 break;
sewardjb4112022007-11-09 22:49:28 +00005146
sewardj5a644da2009-08-11 10:35:58 +00005147 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE:
5148 /* pth_spinlock_t* */
5149 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid, (void*)args[1] );
5150 break;
5151
5152 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST:
5153 /* pth_spinlock_t* */
5154 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid, (void*)args[1] );
5155 break;
5156
5157 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE:
5158 /* pth_spinlock_t*, Word */
5159 evh__HG_PTHREAD_SPIN_LOCK_PRE( tid, (void*)args[1], args[2] );
5160 break;
5161
5162 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST:
5163 /* pth_spinlock_t* */
5164 evh__HG_PTHREAD_SPIN_LOCK_POST( tid, (void*)args[1] );
5165 break;
5166
5167 case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE:
5168 /* pth_spinlock_t* */
5169 evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid, (void*)args[1] );
5170 break;
5171
sewardjed2e72e2009-08-14 11:08:24 +00005172 case _VG_USERREQ__HG_CLIENTREQ_UNIMP: {
florian19f91bb2012-11-10 22:29:54 +00005173 /* HChar* who */
sewardjed2e72e2009-08-14 11:08:24 +00005174 HChar* who = (HChar*)args[1];
5175 HChar buf[50 + 50];
5176 Thread* thr = map_threads_maybe_lookup( tid );
5177 tl_assert( thr ); /* I must be mapped */
5178 tl_assert( who );
5179 tl_assert( VG_(strlen)(who) <= 50 );
5180 VG_(sprintf)(buf, "Unimplemented client request macro \"%s\"", who );
5181 /* record_error_Misc strdup's buf, so this is safe: */
5182 HG_(record_error_Misc)( thr, buf );
5183 break;
5184 }
5185
5186 case _VG_USERREQ__HG_USERSO_SEND_PRE:
5187 /* UWord arbitrary-SO-tag */
5188 evh__HG_USERSO_SEND_PRE( tid, args[1] );
5189 break;
5190
5191 case _VG_USERREQ__HG_USERSO_RECV_POST:
5192 /* UWord arbitrary-SO-tag */
5193 evh__HG_USERSO_RECV_POST( tid, args[1] );
5194 break;
5195
sewardj6015d0e2011-03-11 19:10:48 +00005196 case _VG_USERREQ__HG_USERSO_FORGET_ALL:
5197 /* UWord arbitrary-SO-tag */
5198 evh__HG_USERSO_FORGET_ALL( tid, args[1] );
5199 break;
5200
philippef5774342014-05-03 11:12:50 +00005201 case VG_USERREQ__GDB_MONITOR_COMMAND: {
5202 Bool handled = handle_gdb_monitor_command (tid, (HChar*)args[1]);
5203 if (handled)
5204 *ret = 1;
5205 else
5206 *ret = 0;
5207 return handled;
5208 }
5209
sewardjb4112022007-11-09 22:49:28 +00005210 default:
5211 /* Unhandled Helgrind client request! */
sewardjf98e1c02008-10-25 16:22:41 +00005212 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
5213 args[0]);
sewardjb4112022007-11-09 22:49:28 +00005214 }
5215
5216 return True;
5217}
5218
5219
5220/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +00005221/*--- Setup ---*/
5222/*----------------------------------------------------------------*/
5223
florian19f91bb2012-11-10 22:29:54 +00005224static Bool hg_process_cmd_line_option ( const HChar* arg )
sewardjb4112022007-11-09 22:49:28 +00005225{
florian19f91bb2012-11-10 22:29:54 +00005226 const HChar* tmp_str;
sewardjb4112022007-11-09 22:49:28 +00005227
njn83df0b62009-02-25 01:01:05 +00005228 if VG_BOOL_CLO(arg, "--track-lockorders",
5229 HG_(clo_track_lockorders)) {}
5230 else if VG_BOOL_CLO(arg, "--cmp-race-err-addrs",
5231 HG_(clo_cmp_race_err_addrs)) {}
sewardj23f12002009-07-24 08:45:08 +00005232
5233 else if VG_XACT_CLO(arg, "--history-level=none",
5234 HG_(clo_history_level), 0);
sewardjf3861392009-08-02 10:16:03 +00005235 else if VG_XACT_CLO(arg, "--history-level=approx",
sewardj23f12002009-07-24 08:45:08 +00005236 HG_(clo_history_level), 1);
5237 else if VG_XACT_CLO(arg, "--history-level=full",
5238 HG_(clo_history_level), 2);
sewardj849b0ed2008-12-21 10:43:10 +00005239
sewardjf585e482009-08-16 22:52:29 +00005240 /* If you change the 10k/30mill limits, remember to also change
sewardj849b0ed2008-12-21 10:43:10 +00005241 them in assertions at the top of event_map_maybe_GC. */
njn83df0b62009-02-25 01:01:05 +00005242 else if VG_BINT_CLO(arg, "--conflict-cache-size",
sewardjf585e482009-08-16 22:52:29 +00005243 HG_(clo_conflict_cache_size), 10*1000, 30*1000*1000) {}
sewardjb4112022007-11-09 22:49:28 +00005244
sewardj11e352f2007-11-30 11:11:02 +00005245 /* "stuvwx" --> stuvwx (binary) */
njn83df0b62009-02-25 01:01:05 +00005246 else if VG_STR_CLO(arg, "--hg-sanity-flags", tmp_str) {
sewardjb4112022007-11-09 22:49:28 +00005247 Int j;
sewardjb4112022007-11-09 22:49:28 +00005248
njn83df0b62009-02-25 01:01:05 +00005249 if (6 != VG_(strlen)(tmp_str)) {
sewardjb4112022007-11-09 22:49:28 +00005250 VG_(message)(Vg_UserMsg,
sewardj24118492009-07-15 14:50:02 +00005251 "--hg-sanity-flags argument must have 6 digits\n");
sewardjb4112022007-11-09 22:49:28 +00005252 return False;
5253 }
sewardj11e352f2007-11-30 11:11:02 +00005254 for (j = 0; j < 6; j++) {
njn83df0b62009-02-25 01:01:05 +00005255 if ('0' == tmp_str[j]) { /* do nothing */ }
5256 else if ('1' == tmp_str[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
sewardjb4112022007-11-09 22:49:28 +00005257 else {
sewardj11e352f2007-11-30 11:11:02 +00005258 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
sewardj24118492009-07-15 14:50:02 +00005259 "only contain 0s and 1s\n");
sewardjb4112022007-11-09 22:49:28 +00005260 return False;
5261 }
5262 }
sewardjf98e1c02008-10-25 16:22:41 +00005263 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
sewardjb4112022007-11-09 22:49:28 +00005264 }
5265
sewardj622fe492011-03-11 21:06:59 +00005266 else if VG_BOOL_CLO(arg, "--free-is-write",
5267 HG_(clo_free_is_write)) {}
sewardjffce8152011-06-24 10:09:41 +00005268
5269 else if VG_XACT_CLO(arg, "--vts-pruning=never",
5270 HG_(clo_vts_pruning), 0);
5271 else if VG_XACT_CLO(arg, "--vts-pruning=auto",
5272 HG_(clo_vts_pruning), 1);
5273 else if VG_XACT_CLO(arg, "--vts-pruning=always",
5274 HG_(clo_vts_pruning), 2);
5275
5276 else if VG_BOOL_CLO(arg, "--check-stack-refs",
5277 HG_(clo_check_stack_refs)) {}
5278
sewardjb4112022007-11-09 22:49:28 +00005279 else
5280 return VG_(replacement_malloc_process_cmd_line_option)(arg);
5281
5282 return True;
5283}
5284
5285static void hg_print_usage ( void )
5286{
5287 VG_(printf)(
sewardj622fe492011-03-11 21:06:59 +00005288" --free-is-write=no|yes treat heap frees as writes [no]\n"
sewardj849b0ed2008-12-21 10:43:10 +00005289" --track-lockorders=no|yes show lock ordering errors? [yes]\n"
njnf6e8ca92009-08-07 02:18:00 +00005290" --history-level=none|approx|full [full]\n"
sewardjf3861392009-08-02 10:16:03 +00005291" full: show both stack traces for a data race (can be very slow)\n"
5292" approx: full trace for one thread, approx for the other (faster)\n"
5293" none: only show trace for one thread in a race (fastest)\n"
sewardj23f12002009-07-24 08:45:08 +00005294" --conflict-cache-size=N size of 'full' history cache [1000000]\n"
sewardjffce8152011-06-24 10:09:41 +00005295" --check-stack-refs=no|yes race-check reads and writes on the\n"
5296" main stack and thread stacks? [yes]\n"
sewardjb4112022007-11-09 22:49:28 +00005297 );
sewardjb4112022007-11-09 22:49:28 +00005298}
5299
5300static void hg_print_debug_usage ( void )
5301{
sewardjb4112022007-11-09 22:49:28 +00005302 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
5303 "race errors significant? [no]\n");
sewardj849b0ed2008-12-21 10:43:10 +00005304 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
sewardj11e352f2007-11-30 11:11:02 +00005305 " at events (X = 0|1) [000000]\n");
5306 VG_(printf)(" --hg-sanity-flags values:\n");
sewardj11e352f2007-11-30 11:11:02 +00005307 VG_(printf)(" 010000 after changes to "
sewardjb4112022007-11-09 22:49:28 +00005308 "lock-order-acquisition-graph\n");
sewardj11e352f2007-11-30 11:11:02 +00005309 VG_(printf)(" 001000 at memory accesses (NB: not currently used)\n");
5310 VG_(printf)(" 000100 at mem permission setting for "
sewardjb4112022007-11-09 22:49:28 +00005311 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
sewardj11e352f2007-11-30 11:11:02 +00005312 VG_(printf)(" 000010 at lock/unlock events\n");
5313 VG_(printf)(" 000001 at thread create/join events\n");
sewardjffce8152011-06-24 10:09:41 +00005314 VG_(printf)(
5315" --vts-pruning=never|auto|always [auto]\n"
5316" never: is never done (may cause big space leaks in Helgrind)\n"
5317" auto: done just often enough to keep space usage under control\n"
5318" always: done after every VTS GC (mostly just a big time waster)\n"
5319 );
sewardjb4112022007-11-09 22:49:28 +00005320}
5321
philippe8587b542013-12-15 20:24:43 +00005322static void hg_print_stats (void)
5323{
5324
5325 if (1) {
5326 VG_(printf)("\n");
5327 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
5328 if (HG_(clo_track_lockorders)) {
5329 VG_(printf)("\n");
5330 HG_(ppWSUstats)( univ_laog, "univ_laog" );
5331 }
5332 }
5333
5334 //zz VG_(printf)("\n");
5335 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
5336 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
5337 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
5338 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
5339 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
5340 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
5341 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
5342 //zz stats__hbefore_stk_hwm);
5343 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
5344 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
5345
5346 VG_(printf)("\n");
5347 VG_(printf)(" locksets: %'8d unique lock sets\n",
5348 (Int)HG_(cardinalityWSU)( univ_lsets ));
5349 if (HG_(clo_track_lockorders)) {
5350 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
5351 (Int)HG_(cardinalityWSU)( univ_laog ));
5352 }
5353
5354 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
5355 // stats__ga_LL_adds,
5356 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
5357
5358 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
5359 HG_(stats__LockN_to_P_queries),
5360 HG_(stats__LockN_to_P_get_map_size)() );
5361
5362 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
5363 HG_(stats__string_table_queries),
5364 HG_(stats__string_table_get_map_size)() );
5365 if (HG_(clo_track_lockorders)) {
5366 VG_(printf)(" LAOG: %'8d map size\n",
5367 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
5368 VG_(printf)(" LAOG exposition: %'8d map size\n",
5369 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
5370 }
5371
5372 VG_(printf)(" locks: %'8lu acquires, "
5373 "%'lu releases\n",
5374 stats__lockN_acquires,
5375 stats__lockN_releases
5376 );
5377 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
5378
5379 VG_(printf)("\n");
5380 libhb_shutdown(True); // This in fact only print stats.
5381}
5382
sewardjb4112022007-11-09 22:49:28 +00005383static void hg_fini ( Int exitcode )
5384{
sewardj2d9e8742009-08-07 15:46:56 +00005385 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
5386 VG_(message)(Vg_UserMsg,
5387 "For counts of detected and suppressed errors, "
5388 "rerun with: -v\n");
5389 }
5390
5391 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)
5392 && HG_(clo_history_level) >= 2) {
5393 VG_(umsg)(
5394 "Use --history-level=approx or =none to gain increased speed, at\n" );
5395 VG_(umsg)(
5396 "the cost of reduced accuracy of conflicting-access information\n");
5397 }
5398
sewardjb4112022007-11-09 22:49:28 +00005399 if (SHOW_DATA_STRUCTURES)
5400 pp_everything( PP_ALL, "SK_(fini)" );
sewardjf98e1c02008-10-25 16:22:41 +00005401 if (HG_(clo_sanity_flags))
sewardjb4112022007-11-09 22:49:28 +00005402 all__sanity_check("SK_(fini)");
5403
philippe8587b542013-12-15 20:24:43 +00005404 if (VG_(clo_stats))
5405 hg_print_stats();
sewardjb4112022007-11-09 22:49:28 +00005406}
5407
sewardjf98e1c02008-10-25 16:22:41 +00005408/* FIXME: move these somewhere sane */
5409
5410static
5411void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
5412{
5413 Thread* thr;
5414 ThreadId tid;
5415 UWord nActual;
5416 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00005417 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00005418 tl_assert(thr);
5419 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
5420 nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
5421 NULL, NULL, 0 );
5422 tl_assert(nActual <= nRequest);
5423 for (; nActual < nRequest; nActual++)
5424 frames[nActual] = 0;
5425}
5426
5427static
sewardj23f12002009-07-24 08:45:08 +00005428ExeContext* for_libhb__get_EC ( Thr* hbt )
sewardjf98e1c02008-10-25 16:22:41 +00005429{
5430 Thread* thr;
5431 ThreadId tid;
5432 ExeContext* ec;
5433 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00005434 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00005435 tl_assert(thr);
5436 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
sewardj23f12002009-07-24 08:45:08 +00005437 /* this will assert if tid is invalid */
sewardjf98e1c02008-10-25 16:22:41 +00005438 ec = VG_(record_ExeContext)( tid, 0 );
sewardjd52392d2008-11-08 20:36:26 +00005439 return ec;
sewardjf98e1c02008-10-25 16:22:41 +00005440}
5441
5442
sewardjc1fb9d22011-02-28 09:03:44 +00005443static void hg_post_clo_init ( void )
sewardjb4112022007-11-09 22:49:28 +00005444{
sewardjf98e1c02008-10-25 16:22:41 +00005445 Thr* hbthr_root;
njnf76d27a2009-05-28 01:53:07 +00005446
sewardjc1fb9d22011-02-28 09:03:44 +00005447 /////////////////////////////////////////////
5448 hbthr_root = libhb_init( for_libhb__get_stacktrace,
5449 for_libhb__get_EC );
5450 /////////////////////////////////////////////
5451
5452
5453 if (HG_(clo_track_lockorders))
5454 laog__init();
5455
5456 initialise_data_structures(hbthr_root);
5457}
5458
philippe07c08522014-05-14 20:39:27 +00005459static void hg_info_location (Addr a)
5460{
5461 (void) HG_(get_and_pp_addrdescr) (a);
5462}
5463
sewardjc1fb9d22011-02-28 09:03:44 +00005464static void hg_pre_clo_init ( void )
5465{
sewardjb4112022007-11-09 22:49:28 +00005466 VG_(details_name) ("Helgrind");
5467 VG_(details_version) (NULL);
5468 VG_(details_description) ("a thread error detector");
5469 VG_(details_copyright_author)(
sewardj0f157dd2013-10-18 14:27:36 +00005470 "Copyright (C) 2007-2013, and GNU GPL'd, by OpenWorks LLP et al.");
sewardjb4112022007-11-09 22:49:28 +00005471 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj9c08c0f2011-03-10 15:01:14 +00005472 VG_(details_avg_translation_sizeB) ( 320 );
sewardjb4112022007-11-09 22:49:28 +00005473
5474 VG_(basic_tool_funcs) (hg_post_clo_init,
5475 hg_instrument,
5476 hg_fini);
5477
5478 VG_(needs_core_errors) ();
sewardjf98e1c02008-10-25 16:22:41 +00005479 VG_(needs_tool_errors) (HG_(eq_Error),
sewardj24118492009-07-15 14:50:02 +00005480 HG_(before_pp_Error),
sewardjf98e1c02008-10-25 16:22:41 +00005481 HG_(pp_Error),
sewardjb4112022007-11-09 22:49:28 +00005482 False,/*show TIDs for errors*/
sewardjf98e1c02008-10-25 16:22:41 +00005483 HG_(update_extra),
5484 HG_(recognised_suppression),
5485 HG_(read_extra_suppression_info),
5486 HG_(error_matches_suppression),
5487 HG_(get_error_name),
philippe4e32d672013-10-17 22:10:41 +00005488 HG_(get_extra_suppression_info),
5489 HG_(print_extra_suppression_use),
5490 HG_(update_extra_suppression_use));
sewardjb4112022007-11-09 22:49:28 +00005491
sewardj24118492009-07-15 14:50:02 +00005492 VG_(needs_xml_output) ();
5493
sewardjb4112022007-11-09 22:49:28 +00005494 VG_(needs_command_line_options)(hg_process_cmd_line_option,
5495 hg_print_usage,
5496 hg_print_debug_usage);
5497 VG_(needs_client_requests) (hg_handle_client_request);
5498
5499 // FIXME?
5500 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
5501 // hg_expensive_sanity_check);
5502
philippe8587b542013-12-15 20:24:43 +00005503 VG_(needs_print_stats) (hg_print_stats);
philippe07c08522014-05-14 20:39:27 +00005504 VG_(needs_info_location) (hg_info_location);
philippe8587b542013-12-15 20:24:43 +00005505
sewardjb4112022007-11-09 22:49:28 +00005506 VG_(needs_malloc_replacement) (hg_cli__malloc,
5507 hg_cli____builtin_new,
5508 hg_cli____builtin_vec_new,
5509 hg_cli__memalign,
5510 hg_cli__calloc,
5511 hg_cli__free,
5512 hg_cli____builtin_delete,
5513 hg_cli____builtin_vec_delete,
5514 hg_cli__realloc,
njn8b140de2009-02-17 04:31:18 +00005515 hg_cli_malloc_usable_size,
philipped99c26a2012-07-31 22:17:28 +00005516 HG_CLI__DEFAULT_MALLOC_REDZONE_SZB );
sewardjb4112022007-11-09 22:49:28 +00005517
sewardj849b0ed2008-12-21 10:43:10 +00005518 /* 21 Dec 08: disabled this; it mostly causes H to start more
5519 slowly and use significantly more memory, without very often
5520 providing useful results. The user can request to load this
5521 information manually with --read-var-info=yes. */
5522 if (0) VG_(needs_var_info)(); /* optional */
sewardjb4112022007-11-09 22:49:28 +00005523
5524 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
sewardj7cf4e6b2008-05-01 20:24:26 +00005525 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
5526 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
sewardjb4112022007-11-09 22:49:28 +00005527 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
sewardj1f77fec2010-04-12 19:51:04 +00005528 VG_(track_new_mem_stack) ( evh__new_mem_stack );
sewardjb4112022007-11-09 22:49:28 +00005529
5530 // FIXME: surely this isn't thread-aware
sewardj23f12002009-07-24 08:45:08 +00005531 VG_(track_copy_mem_remap) ( evh__copy_mem );
sewardjb4112022007-11-09 22:49:28 +00005532
5533 VG_(track_change_mem_mprotect) ( evh__set_perms );
5534
5535 VG_(track_die_mem_stack_signal)( evh__die_mem );
sewardjfd35d492011-03-17 19:39:55 +00005536 VG_(track_die_mem_brk) ( evh__die_mem_munmap );
5537 VG_(track_die_mem_munmap) ( evh__die_mem_munmap );
sewardjb4112022007-11-09 22:49:28 +00005538 VG_(track_die_mem_stack) ( evh__die_mem );
5539
5540 // FIXME: what is this for?
5541 VG_(track_ban_mem_stack) (NULL);
5542
5543 VG_(track_pre_mem_read) ( evh__pre_mem_read );
5544 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
5545 VG_(track_pre_mem_write) ( evh__pre_mem_write );
5546 VG_(track_post_mem_write) (NULL);
5547
5548 /////////////////
5549
5550 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
5551 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
5552
5553 VG_(track_start_client_code)( evh__start_client_code );
5554 VG_(track_stop_client_code)( evh__stop_client_code );
5555
sewardjb4112022007-11-09 22:49:28 +00005556 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
5557 as described in comments at the top of pub_tool_hashtable.h, are
5558 met. Blargh. */
5559 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
5560 tl_assert( sizeof(UWord) == sizeof(Addr) );
5561 hg_mallocmeta_table
5562 = VG_(HT_construct)( "hg_malloc_metadata_table" );
5563
philippe5fbc9762013-12-01 19:28:48 +00005564 MallocMeta_poolalloc = VG_(newPA) ( sizeof(MallocMeta),
5565 1000,
5566 HG_(zalloc),
5567 "hg_malloc_metadata_pool",
5568 HG_(free));
5569
sewardj61bc2c52011-02-09 10:34:00 +00005570 // add a callback to clean up on (threaded) fork.
5571 VG_(atfork)(NULL/*pre*/, NULL/*parent*/, evh__atfork_child/*child*/);
sewardjb4112022007-11-09 22:49:28 +00005572}
5573
5574VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
5575
5576/*--------------------------------------------------------------------*/
5577/*--- end hg_main.c ---*/
5578/*--------------------------------------------------------------------*/