blob: f5d02b227cf2a7d1528ad1bdadff69484744e559 [file] [log] [blame]
sewardjb4112022007-11-09 22:49:28 +00001
2/*--------------------------------------------------------------------*/
3/*--- Helgrind: a Valgrind tool for detecting errors ---*/
4/*--- in threaded programs. hg_main.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
sewardj0f157dd2013-10-18 14:27:36 +000011 Copyright (C) 2007-2013 OpenWorks LLP
sewardjb4112022007-11-09 22:49:28 +000012 info@open-works.co.uk
13
sewardj0f157dd2013-10-18 14:27:36 +000014 Copyright (C) 2007-2013 Apple, Inc.
njnf76d27a2009-05-28 01:53:07 +000015
sewardjb4112022007-11-09 22:49:28 +000016 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
20
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
25
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, write to the Free Software
28 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 02111-1307, USA.
30
31 The GNU General Public License is contained in the file COPYING.
32
33 Neither the names of the U.S. Department of Energy nor the
34 University of California nor the names of its contributors may be
35 used to endorse or promote products derived from this software
36 without prior written permission.
37*/
38
39#include "pub_tool_basics.h"
philippef5774342014-05-03 11:12:50 +000040#include "pub_tool_gdbserver.h"
sewardjb4112022007-11-09 22:49:28 +000041#include "pub_tool_libcassert.h"
42#include "pub_tool_libcbase.h"
43#include "pub_tool_libcprint.h"
sewardjb4112022007-11-09 22:49:28 +000044#include "pub_tool_threadstate.h"
45#include "pub_tool_tooliface.h"
46#include "pub_tool_hashtable.h"
47#include "pub_tool_replacemalloc.h"
48#include "pub_tool_machine.h"
49#include "pub_tool_options.h"
50#include "pub_tool_xarray.h"
51#include "pub_tool_stacktrace.h"
sewardj896f6f92008-08-19 08:38:52 +000052#include "pub_tool_wordfm.h"
sewardja0eee322009-07-31 08:46:35 +000053#include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
54#include "pub_tool_redir.h" // sonames for the dynamic linkers
55#include "pub_tool_vki.h" // VKI_PAGE_SIZE
sewardj61bc2c52011-02-09 10:34:00 +000056#include "pub_tool_libcproc.h" // VG_(atfork)
sewardj234e5582011-02-09 12:47:23 +000057#include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client)
philippe5fbc9762013-12-01 19:28:48 +000058#include "pub_tool_poolalloc.h"
philippe07c08522014-05-14 20:39:27 +000059#include "pub_tool_addrinfo.h"
sewardjb4112022007-11-09 22:49:28 +000060
sewardjf98e1c02008-10-25 16:22:41 +000061#include "hg_basics.h"
62#include "hg_wordset.h"
philippef5774342014-05-03 11:12:50 +000063#include "hg_addrdescr.h"
sewardjf98e1c02008-10-25 16:22:41 +000064#include "hg_lock_n_thread.h"
65#include "hg_errors.h"
66
67#include "libhb.h"
68
sewardjb4112022007-11-09 22:49:28 +000069#include "helgrind.h"
70
sewardjf98e1c02008-10-25 16:22:41 +000071
72// FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
73
74// FIXME: when client destroys a lock or a CV, remove these
75// from our mappings, so that the associated SO can be freed up
sewardjb4112022007-11-09 22:49:28 +000076
77/*----------------------------------------------------------------*/
78/*--- ---*/
79/*----------------------------------------------------------------*/
80
sewardj11e352f2007-11-30 11:11:02 +000081/* Note this needs to be compiled with -fno-strict-aliasing, since it
82 contains a whole bunch of calls to lookupFM etc which cast between
83 Word and pointer types. gcc rightly complains this breaks ANSI C
84 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
85 worthwhile performance benefits over -O.
sewardjc17be792007-11-10 22:50:13 +000086*/
sewardjb4112022007-11-09 22:49:28 +000087
88// FIXME what is supposed to happen to locks in memory which
89// is relocated as a result of client realloc?
90
sewardjb4112022007-11-09 22:49:28 +000091// FIXME put referencing ThreadId into Thread and get
92// rid of the slow reverse mapping function.
93
94// FIXME accesses to NoAccess areas: change state to Excl?
95
96// FIXME report errors for accesses of NoAccess memory?
97
98// FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
99// the thread still holds the lock.
100
101/* ------------ Debug/trace options ------------ */
102
sewardjb4112022007-11-09 22:49:28 +0000103// 0 for silent, 1 for some stuff, 2 for lots of stuff
104#define SHOW_EVENTS 0
105
sewardjb4112022007-11-09 22:49:28 +0000106
florian6bf37262012-10-21 03:23:36 +0000107static void all__sanity_check ( const HChar* who ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000108
philipped99c26a2012-07-31 22:17:28 +0000109#define HG_CLI__DEFAULT_MALLOC_REDZONE_SZB 16 /* let's say */
sewardjb4112022007-11-09 22:49:28 +0000110
111// 0 for none, 1 for dump at end of run
112#define SHOW_DATA_STRUCTURES 0
113
114
sewardjb4112022007-11-09 22:49:28 +0000115/* ------------ Misc comments ------------ */
116
117// FIXME: don't hardwire initial entries for root thread.
118// Instead, let the pre_thread_ll_create handler do this.
119
sewardjb4112022007-11-09 22:49:28 +0000120
121/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000122/*--- Primary data structures ---*/
sewardjb4112022007-11-09 22:49:28 +0000123/*----------------------------------------------------------------*/
124
sewardjb4112022007-11-09 22:49:28 +0000125/* Admin linked list of Threads */
126static Thread* admin_threads = NULL;
sewardjffce8152011-06-24 10:09:41 +0000127Thread* get_admin_threads ( void ) { return admin_threads; }
sewardjb4112022007-11-09 22:49:28 +0000128
sewardj1d7c3322011-02-28 09:22:51 +0000129/* Admin double linked list of Locks */
130/* We need a double linked list to properly and efficiently
131 handle del_LockN. */
sewardjb4112022007-11-09 22:49:28 +0000132static Lock* admin_locks = NULL;
133
sewardjb4112022007-11-09 22:49:28 +0000134/* Mapping table for core ThreadIds to Thread* */
135static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
136
sewardjb4112022007-11-09 22:49:28 +0000137/* Mapping table for lock guest addresses to Lock* */
138static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
139
sewardj0f64c9e2011-03-10 17:40:22 +0000140/* The word-set universes for lock sets. */
sewardjb4112022007-11-09 22:49:28 +0000141static WordSetU* univ_lsets = NULL; /* sets of Lock* */
142static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
sewardj866c80c2011-10-22 19:29:51 +0000143static Int next_gc_univ_laog = 1;
144/* univ_laog will be garbaged collected when the nr of element in univ_laog is
145 >= next_gc_univ_laog. */
sewardjb4112022007-11-09 22:49:28 +0000146
sewardjffce8152011-06-24 10:09:41 +0000147/* Allow libhb to get at the universe of locksets stored
148 here. Sigh. */
149WordSetU* HG_(get_univ_lsets) ( void ) { return univ_lsets; }
150
151/* Allow libhb to get at the list of locks stored here. Ditto
152 sigh. */
153Lock* HG_(get_admin_locks) ( void ) { return admin_locks; }
154
sewardjb4112022007-11-09 22:49:28 +0000155
156/*----------------------------------------------------------------*/
157/*--- Simple helpers for the data structures ---*/
158/*----------------------------------------------------------------*/
159
160static UWord stats__lockN_acquires = 0;
161static UWord stats__lockN_releases = 0;
162
sewardjf98e1c02008-10-25 16:22:41 +0000163static
164ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
sewardjb4112022007-11-09 22:49:28 +0000165
166/* --------- Constructors --------- */
167
sewardjf98e1c02008-10-25 16:22:41 +0000168static Thread* mk_Thread ( Thr* hbthr ) {
sewardjb4112022007-11-09 22:49:28 +0000169 static Int indx = 1;
sewardjf98e1c02008-10-25 16:22:41 +0000170 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
sewardjb4112022007-11-09 22:49:28 +0000171 thread->locksetA = HG_(emptyWS)( univ_lsets );
172 thread->locksetW = HG_(emptyWS)( univ_lsets );
sewardjb4112022007-11-09 22:49:28 +0000173 thread->magic = Thread_MAGIC;
sewardjf98e1c02008-10-25 16:22:41 +0000174 thread->hbthr = hbthr;
175 thread->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +0000176 thread->created_at = NULL;
177 thread->announced = False;
178 thread->errmsg_index = indx++;
179 thread->admin = admin_threads;
180 admin_threads = thread;
181 return thread;
182}
sewardjf98e1c02008-10-25 16:22:41 +0000183
sewardjb4112022007-11-09 22:49:28 +0000184// Make a new lock which is unlocked (hence ownerless)
sewardj1d7c3322011-02-28 09:22:51 +0000185// and insert the new lock in admin_locks double linked list.
sewardjb4112022007-11-09 22:49:28 +0000186static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
187 static ULong unique = 0;
sewardjf98e1c02008-10-25 16:22:41 +0000188 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
sewardj0f64c9e2011-03-10 17:40:22 +0000189 /* begin: add to double linked list */
sewardj1d7c3322011-02-28 09:22:51 +0000190 if (admin_locks)
191 admin_locks->admin_prev = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000192 lock->admin_next = admin_locks;
193 lock->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000194 admin_locks = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000195 /* end: add */
sewardjb4112022007-11-09 22:49:28 +0000196 lock->unique = unique++;
197 lock->magic = LockN_MAGIC;
198 lock->appeared_at = NULL;
199 lock->acquired_at = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000200 lock->hbso = libhb_so_alloc();
sewardjb4112022007-11-09 22:49:28 +0000201 lock->guestaddr = guestaddr;
202 lock->kind = kind;
203 lock->heldW = False;
204 lock->heldBy = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000205 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +0000206 return lock;
207}
sewardjb4112022007-11-09 22:49:28 +0000208
209/* Release storage for a Lock. Also release storage in .heldBy, if
sewardj1d7c3322011-02-28 09:22:51 +0000210 any. Removes from admin_locks double linked list. */
sewardjb4112022007-11-09 22:49:28 +0000211static void del_LockN ( Lock* lk )
212{
sewardjf98e1c02008-10-25 16:22:41 +0000213 tl_assert(HG_(is_sane_LockN)(lk));
214 tl_assert(lk->hbso);
215 libhb_so_dealloc(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +0000216 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000217 VG_(deleteBag)( lk->heldBy );
sewardj0f64c9e2011-03-10 17:40:22 +0000218 /* begin: del lock from double linked list */
219 if (lk == admin_locks) {
220 tl_assert(lk->admin_prev == NULL);
221 if (lk->admin_next)
222 lk->admin_next->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000223 admin_locks = lk->admin_next;
sewardj1d7c3322011-02-28 09:22:51 +0000224 }
225 else {
sewardj0f64c9e2011-03-10 17:40:22 +0000226 tl_assert(lk->admin_prev != NULL);
sewardj1d7c3322011-02-28 09:22:51 +0000227 lk->admin_prev->admin_next = lk->admin_next;
sewardj0f64c9e2011-03-10 17:40:22 +0000228 if (lk->admin_next)
229 lk->admin_next->admin_prev = lk->admin_prev;
sewardj1d7c3322011-02-28 09:22:51 +0000230 }
sewardj0f64c9e2011-03-10 17:40:22 +0000231 /* end: del */
sewardjb4112022007-11-09 22:49:28 +0000232 VG_(memset)(lk, 0xAA, sizeof(*lk));
sewardjf98e1c02008-10-25 16:22:41 +0000233 HG_(free)(lk);
sewardjb4112022007-11-09 22:49:28 +0000234}
235
236/* Update 'lk' to reflect that 'thr' now has a write-acquisition of
237 it. This is done strictly: only combinations resulting from
238 correct program and libpthread behaviour are allowed. */
239static void lockN_acquire_writer ( Lock* lk, Thread* thr )
240{
sewardjf98e1c02008-10-25 16:22:41 +0000241 tl_assert(HG_(is_sane_LockN)(lk));
242 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000243
244 stats__lockN_acquires++;
245
246 /* EXPOSITION only */
247 /* We need to keep recording snapshots of where the lock was
248 acquired, so as to produce better lock-order error messages. */
249 if (lk->acquired_at == NULL) {
250 ThreadId tid;
251 tl_assert(lk->heldBy == NULL);
252 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
253 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000254 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000255 } else {
256 tl_assert(lk->heldBy != NULL);
257 }
258 /* end EXPOSITION only */
259
260 switch (lk->kind) {
261 case LK_nonRec:
262 case_LK_nonRec:
263 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
264 tl_assert(!lk->heldW);
265 lk->heldW = True;
sewardjf98e1c02008-10-25 16:22:41 +0000266 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
florian6bf37262012-10-21 03:23:36 +0000267 VG_(addToBag)( lk->heldBy, (UWord)thr );
sewardjb4112022007-11-09 22:49:28 +0000268 break;
269 case LK_mbRec:
270 if (lk->heldBy == NULL)
271 goto case_LK_nonRec;
272 /* 2nd and subsequent locking of a lock by its owner */
273 tl_assert(lk->heldW);
274 /* assert: lk is only held by one thread .. */
sewardj896f6f92008-08-19 08:38:52 +0000275 tl_assert(VG_(sizeUniqueBag(lk->heldBy)) == 1);
sewardjb4112022007-11-09 22:49:28 +0000276 /* assert: .. and that thread is 'thr'. */
florian6bf37262012-10-21 03:23:36 +0000277 tl_assert(VG_(elemBag)(lk->heldBy, (UWord)thr)
sewardj896f6f92008-08-19 08:38:52 +0000278 == VG_(sizeTotalBag)(lk->heldBy));
florian6bf37262012-10-21 03:23:36 +0000279 VG_(addToBag)(lk->heldBy, (UWord)thr);
sewardjb4112022007-11-09 22:49:28 +0000280 break;
281 case LK_rdwr:
282 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
283 goto case_LK_nonRec;
284 default:
285 tl_assert(0);
286 }
sewardjf98e1c02008-10-25 16:22:41 +0000287 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000288}
289
290static void lockN_acquire_reader ( Lock* lk, Thread* thr )
291{
sewardjf98e1c02008-10-25 16:22:41 +0000292 tl_assert(HG_(is_sane_LockN)(lk));
293 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000294 /* can only add reader to a reader-writer lock. */
295 tl_assert(lk->kind == LK_rdwr);
296 /* lk must be free or already r-held. */
297 tl_assert(lk->heldBy == NULL
298 || (lk->heldBy != NULL && !lk->heldW));
299
300 stats__lockN_acquires++;
301
302 /* EXPOSITION only */
303 /* We need to keep recording snapshots of where the lock was
304 acquired, so as to produce better lock-order error messages. */
305 if (lk->acquired_at == NULL) {
306 ThreadId tid;
307 tl_assert(lk->heldBy == NULL);
308 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
309 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000310 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000311 } else {
312 tl_assert(lk->heldBy != NULL);
313 }
314 /* end EXPOSITION only */
315
316 if (lk->heldBy) {
florian6bf37262012-10-21 03:23:36 +0000317 VG_(addToBag)(lk->heldBy, (UWord)thr);
sewardjb4112022007-11-09 22:49:28 +0000318 } else {
319 lk->heldW = False;
sewardjf98e1c02008-10-25 16:22:41 +0000320 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
florian6bf37262012-10-21 03:23:36 +0000321 VG_(addToBag)( lk->heldBy, (UWord)thr );
sewardjb4112022007-11-09 22:49:28 +0000322 }
323 tl_assert(!lk->heldW);
sewardjf98e1c02008-10-25 16:22:41 +0000324 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000325}
326
327/* Update 'lk' to reflect a release of it by 'thr'. This is done
328 strictly: only combinations resulting from correct program and
329 libpthread behaviour are allowed. */
330
331static void lockN_release ( Lock* lk, Thread* thr )
332{
333 Bool b;
sewardjf98e1c02008-10-25 16:22:41 +0000334 tl_assert(HG_(is_sane_LockN)(lk));
335 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000336 /* lock must be held by someone */
337 tl_assert(lk->heldBy);
338 stats__lockN_releases++;
339 /* Remove it from the holder set */
florian6bf37262012-10-21 03:23:36 +0000340 b = VG_(delFromBag)(lk->heldBy, (UWord)thr);
sewardjb4112022007-11-09 22:49:28 +0000341 /* thr must actually have been a holder of lk */
342 tl_assert(b);
343 /* normalise */
344 tl_assert(lk->acquired_at);
sewardj896f6f92008-08-19 08:38:52 +0000345 if (VG_(isEmptyBag)(lk->heldBy)) {
346 VG_(deleteBag)(lk->heldBy);
sewardjb4112022007-11-09 22:49:28 +0000347 lk->heldBy = NULL;
348 lk->heldW = False;
349 lk->acquired_at = NULL;
350 }
sewardjf98e1c02008-10-25 16:22:41 +0000351 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000352}
353
354static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
355{
356 Thread* thr;
357 if (!lk->heldBy) {
358 tl_assert(!lk->heldW);
359 return;
360 }
361 /* for each thread that holds this lock do ... */
sewardj896f6f92008-08-19 08:38:52 +0000362 VG_(initIterBag)( lk->heldBy );
florian6bf37262012-10-21 03:23:36 +0000363 while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, NULL )) {
sewardjf98e1c02008-10-25 16:22:41 +0000364 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000365 tl_assert(HG_(elemWS)( univ_lsets,
florian6bf37262012-10-21 03:23:36 +0000366 thr->locksetA, (UWord)lk ));
sewardjb4112022007-11-09 22:49:28 +0000367 thr->locksetA
florian6bf37262012-10-21 03:23:36 +0000368 = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +0000369
370 if (lk->heldW) {
371 tl_assert(HG_(elemWS)( univ_lsets,
florian6bf37262012-10-21 03:23:36 +0000372 thr->locksetW, (UWord)lk ));
sewardjb4112022007-11-09 22:49:28 +0000373 thr->locksetW
florian6bf37262012-10-21 03:23:36 +0000374 = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +0000375 }
376 }
sewardj896f6f92008-08-19 08:38:52 +0000377 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000378}
379
sewardjb4112022007-11-09 22:49:28 +0000380
381/*----------------------------------------------------------------*/
382/*--- Print out the primary data structures ---*/
383/*----------------------------------------------------------------*/
384
sewardjb4112022007-11-09 22:49:28 +0000385#define PP_THREADS (1<<1)
386#define PP_LOCKS (1<<2)
sewardjf98e1c02008-10-25 16:22:41 +0000387#define PP_ALL (PP_THREADS | PP_LOCKS)
sewardjb4112022007-11-09 22:49:28 +0000388
389
390static const Int sHOW_ADMIN = 0;
391
392static void space ( Int n )
393{
394 Int i;
florian6bf37262012-10-21 03:23:36 +0000395 HChar spaces[128+1];
sewardjb4112022007-11-09 22:49:28 +0000396 tl_assert(n >= 0 && n < 128);
397 if (n == 0)
398 return;
399 for (i = 0; i < n; i++)
400 spaces[i] = ' ';
401 spaces[i] = 0;
402 tl_assert(i < 128+1);
403 VG_(printf)("%s", spaces);
404}
405
406static void pp_Thread ( Int d, Thread* t )
407{
408 space(d+0); VG_(printf)("Thread %p {\n", t);
409 if (sHOW_ADMIN) {
410 space(d+3); VG_(printf)("admin %p\n", t->admin);
411 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
412 }
413 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
414 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
sewardjb4112022007-11-09 22:49:28 +0000415 space(d+0); VG_(printf)("}\n");
416}
417
418static void pp_admin_threads ( Int d )
419{
420 Int i, n;
421 Thread* t;
422 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
423 /* nothing */
424 }
425 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
426 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
427 if (0) {
428 space(n);
429 VG_(printf)("admin_threads record %d of %d:\n", i, n);
430 }
431 pp_Thread(d+3, t);
432 }
barta0b6b2c2008-07-07 06:49:24 +0000433 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000434}
435
436static void pp_map_threads ( Int d )
437{
njn4c245e52009-03-15 23:25:38 +0000438 Int i, n = 0;
sewardjb4112022007-11-09 22:49:28 +0000439 space(d); VG_(printf)("map_threads ");
sewardjb4112022007-11-09 22:49:28 +0000440 for (i = 0; i < VG_N_THREADS; i++) {
441 if (map_threads[i] != NULL)
442 n++;
443 }
444 VG_(printf)("(%d entries) {\n", n);
445 for (i = 0; i < VG_N_THREADS; i++) {
446 if (map_threads[i] == NULL)
447 continue;
448 space(d+3);
449 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
450 }
451 space(d); VG_(printf)("}\n");
452}
453
454static const HChar* show_LockKind ( LockKind lkk ) {
455 switch (lkk) {
456 case LK_mbRec: return "mbRec";
457 case LK_nonRec: return "nonRec";
458 case LK_rdwr: return "rdwr";
459 default: tl_assert(0);
460 }
461}
462
philippef5774342014-05-03 11:12:50 +0000463/* Pretty Print lock lk.
464 if show_lock_addrdescr, describes the (guest) lock address.
465 (this description will be more complete with --read-var-info=yes).
466 if show_internal_data, shows also helgrind internal information.
467 d is the level at which output is indented. */
468static void pp_Lock ( Int d, Lock* lk,
469 Bool show_lock_addrdescr,
470 Bool show_internal_data)
sewardjb4112022007-11-09 22:49:28 +0000471{
philippef5774342014-05-03 11:12:50 +0000472 space(d+0);
473 if (show_internal_data)
philippe07c08522014-05-14 20:39:27 +0000474 VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
philippef5774342014-05-03 11:12:50 +0000475 else
philippe07c08522014-05-14 20:39:27 +0000476 VG_(printf)("Lock ga %#lx {\n", lk->guestaddr);
philippef5774342014-05-03 11:12:50 +0000477 if (!show_lock_addrdescr
philippe07c08522014-05-14 20:39:27 +0000478 || !HG_(get_and_pp_addrdescr) ((Addr) lk->guestaddr))
philippef5774342014-05-03 11:12:50 +0000479 VG_(printf)("\n");
480
sewardjb4112022007-11-09 22:49:28 +0000481 if (sHOW_ADMIN) {
sewardj1d7c3322011-02-28 09:22:51 +0000482 space(d+3); VG_(printf)("admin_n %p\n", lk->admin_next);
483 space(d+3); VG_(printf)("admin_p %p\n", lk->admin_prev);
484 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
sewardjb4112022007-11-09 22:49:28 +0000485 }
philippef5774342014-05-03 11:12:50 +0000486 if (show_internal_data) {
487 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
488 }
sewardjb4112022007-11-09 22:49:28 +0000489 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
philippef5774342014-05-03 11:12:50 +0000490 if (show_internal_data) {
491 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
492 }
493 if (show_internal_data) {
494 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
495 }
sewardjb4112022007-11-09 22:49:28 +0000496 if (lk->heldBy) {
497 Thread* thr;
florian6bf37262012-10-21 03:23:36 +0000498 UWord count;
sewardjb4112022007-11-09 22:49:28 +0000499 VG_(printf)(" { ");
sewardj896f6f92008-08-19 08:38:52 +0000500 VG_(initIterBag)( lk->heldBy );
philippef5774342014-05-03 11:12:50 +0000501 while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, &count )) {
502 if (show_internal_data)
503 VG_(printf)("%lu:%p ", count, thr);
504 else {
505 VG_(printf)("%c%lu:thread #%d ",
506 lk->heldW ? 'W' : 'R',
507 count, thr->errmsg_index);
508 if (thr->coretid == VG_INVALID_THREADID)
509 VG_(printf)("tid (exited) ");
510 else
511 VG_(printf)("tid %d ", thr->coretid);
512
513 }
514 }
sewardj896f6f92008-08-19 08:38:52 +0000515 VG_(doneIterBag)( lk->heldBy );
philippef5774342014-05-03 11:12:50 +0000516 VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000517 }
sewardjb4112022007-11-09 22:49:28 +0000518 space(d+0); VG_(printf)("}\n");
519}
520
521static void pp_admin_locks ( Int d )
522{
523 Int i, n;
524 Lock* lk;
sewardj1d7c3322011-02-28 09:22:51 +0000525 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000526 /* nothing */
527 }
528 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
sewardj1d7c3322011-02-28 09:22:51 +0000529 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000530 if (0) {
531 space(n);
532 VG_(printf)("admin_locks record %d of %d:\n", i, n);
533 }
philippef5774342014-05-03 11:12:50 +0000534 pp_Lock(d+3, lk,
535 False /* show_lock_addrdescr */,
536 True /* show_internal_data */);
sewardjb4112022007-11-09 22:49:28 +0000537 }
barta0b6b2c2008-07-07 06:49:24 +0000538 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000539}
540
philippef5774342014-05-03 11:12:50 +0000541static void pp_map_locks ( Int d)
sewardjb4112022007-11-09 22:49:28 +0000542{
543 void* gla;
544 Lock* lk;
545 space(d); VG_(printf)("map_locks (%d entries) {\n",
sewardj896f6f92008-08-19 08:38:52 +0000546 (Int)VG_(sizeFM)( map_locks ));
547 VG_(initIterFM)( map_locks );
florian6bf37262012-10-21 03:23:36 +0000548 while (VG_(nextIterFM)( map_locks, (UWord*)&gla,
549 (UWord*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000550 space(d+3);
551 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
552 }
sewardj896f6f92008-08-19 08:38:52 +0000553 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000554 space(d); VG_(printf)("}\n");
555}
556
florian6bf37262012-10-21 03:23:36 +0000557static void pp_everything ( Int flags, const HChar* caller )
sewardjb4112022007-11-09 22:49:28 +0000558{
559 Int d = 0;
560 VG_(printf)("\n");
561 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
562 if (flags & PP_THREADS) {
563 VG_(printf)("\n");
564 pp_admin_threads(d+3);
565 VG_(printf)("\n");
566 pp_map_threads(d+3);
567 }
568 if (flags & PP_LOCKS) {
569 VG_(printf)("\n");
570 pp_admin_locks(d+3);
571 VG_(printf)("\n");
572 pp_map_locks(d+3);
573 }
sewardjb4112022007-11-09 22:49:28 +0000574
575 VG_(printf)("\n");
576 VG_(printf)("}\n");
577 VG_(printf)("\n");
578}
579
580#undef SHOW_ADMIN
581
582
583/*----------------------------------------------------------------*/
584/*--- Initialise the primary data structures ---*/
585/*----------------------------------------------------------------*/
586
sewardjf98e1c02008-10-25 16:22:41 +0000587static void initialise_data_structures ( Thr* hbthr_root )
sewardjb4112022007-11-09 22:49:28 +0000588{
sewardjb4112022007-11-09 22:49:28 +0000589 Thread* thr;
sewardjffce8152011-06-24 10:09:41 +0000590 WordSetID wsid;
sewardjb4112022007-11-09 22:49:28 +0000591
592 /* Get everything initialised and zeroed. */
593 tl_assert(admin_threads == NULL);
594 tl_assert(admin_locks == NULL);
sewardjb4112022007-11-09 22:49:28 +0000595
sewardjb4112022007-11-09 22:49:28 +0000596 tl_assert(map_threads == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000597 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
sewardjb4112022007-11-09 22:49:28 +0000598 tl_assert(map_threads != NULL);
599
florian6bf37262012-10-21 03:23:36 +0000600 tl_assert(sizeof(Addr) == sizeof(UWord));
sewardjb4112022007-11-09 22:49:28 +0000601 tl_assert(map_locks == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000602 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
603 NULL/*unboxed Word cmp*/);
sewardjb4112022007-11-09 22:49:28 +0000604 tl_assert(map_locks != NULL);
605
sewardjb4112022007-11-09 22:49:28 +0000606 tl_assert(univ_lsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000607 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
608 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000609 tl_assert(univ_lsets != NULL);
sewardjffce8152011-06-24 10:09:41 +0000610 /* Ensure that univ_lsets is non-empty, with lockset zero being the
611 empty lockset. hg_errors.c relies on the assumption that
612 lockset number zero in univ_lsets is always valid. */
613 wsid = HG_(emptyWS)(univ_lsets);
614 tl_assert(wsid == 0);
sewardjb4112022007-11-09 22:49:28 +0000615
616 tl_assert(univ_laog == NULL);
sewardjc1fb9d22011-02-28 09:03:44 +0000617 if (HG_(clo_track_lockorders)) {
618 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
619 HG_(free), 24/*cacheSize*/ );
620 tl_assert(univ_laog != NULL);
621 }
sewardjb4112022007-11-09 22:49:28 +0000622
623 /* Set up entries for the root thread */
624 // FIXME: this assumes that the first real ThreadId is 1
625
sewardjb4112022007-11-09 22:49:28 +0000626 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +0000627 thr = mk_Thread(hbthr_root);
628 thr->coretid = 1; /* FIXME: hardwires an assumption about the
629 identity of the root thread. */
sewardj60626642011-03-10 15:14:37 +0000630 tl_assert( libhb_get_Thr_hgthread(hbthr_root) == NULL );
631 libhb_set_Thr_hgthread(hbthr_root, thr);
sewardjb4112022007-11-09 22:49:28 +0000632
sewardjf98e1c02008-10-25 16:22:41 +0000633 /* and bind it in the thread-map table. */
634 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
635 tl_assert(thr->coretid != VG_INVALID_THREADID);
sewardjb4112022007-11-09 22:49:28 +0000636
sewardjf98e1c02008-10-25 16:22:41 +0000637 map_threads[thr->coretid] = thr;
sewardjb4112022007-11-09 22:49:28 +0000638
639 tl_assert(VG_INVALID_THREADID == 0);
640
sewardjb4112022007-11-09 22:49:28 +0000641 all__sanity_check("initialise_data_structures");
642}
643
644
645/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000646/*--- map_threads :: array[core-ThreadId] of Thread* ---*/
sewardjb4112022007-11-09 22:49:28 +0000647/*----------------------------------------------------------------*/
648
649/* Doesn't assert if the relevant map_threads entry is NULL. */
650static Thread* map_threads_maybe_lookup ( ThreadId coretid )
651{
652 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000653 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000654 thr = map_threads[coretid];
655 return thr;
656}
657
658/* Asserts if the relevant map_threads entry is NULL. */
659static inline Thread* map_threads_lookup ( ThreadId coretid )
660{
661 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000662 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000663 thr = map_threads[coretid];
664 tl_assert(thr);
665 return thr;
666}
667
sewardjf98e1c02008-10-25 16:22:41 +0000668/* Do a reverse lookup. Does not assert if 'thr' is not found in
669 map_threads. */
sewardjb4112022007-11-09 22:49:28 +0000670static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
671{
sewardjf98e1c02008-10-25 16:22:41 +0000672 ThreadId tid;
673 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000674 /* Check nobody used the invalid-threadid slot */
675 tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
676 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000677 tid = thr->coretid;
678 tl_assert(HG_(is_sane_ThreadId)(tid));
679 return tid;
sewardjb4112022007-11-09 22:49:28 +0000680}
681
682/* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
683 is not found in map_threads. */
684static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
685{
686 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
687 tl_assert(tid != VG_INVALID_THREADID);
sewardjf98e1c02008-10-25 16:22:41 +0000688 tl_assert(map_threads[tid]);
689 tl_assert(map_threads[tid]->coretid == tid);
sewardjb4112022007-11-09 22:49:28 +0000690 return tid;
691}
692
693static void map_threads_delete ( ThreadId coretid )
694{
695 Thread* thr;
696 tl_assert(coretid != 0);
sewardjf98e1c02008-10-25 16:22:41 +0000697 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000698 thr = map_threads[coretid];
699 tl_assert(thr);
700 map_threads[coretid] = NULL;
701}
702
703
704/*----------------------------------------------------------------*/
705/*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
706/*----------------------------------------------------------------*/
707
708/* Make sure there is a lock table entry for the given (lock) guest
709 address. If not, create one of the stated 'kind' in unheld state.
710 In any case, return the address of the existing or new Lock. */
711static
712Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
713{
714 Bool found;
715 Lock* oldlock = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000716 tl_assert(HG_(is_sane_ThreadId)(tid));
sewardj896f6f92008-08-19 08:38:52 +0000717 found = VG_(lookupFM)( map_locks,
florian6bf37262012-10-21 03:23:36 +0000718 NULL, (UWord*)&oldlock, (UWord)ga );
sewardjb4112022007-11-09 22:49:28 +0000719 if (!found) {
720 Lock* lock = mk_LockN(lkk, ga);
721 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
sewardjf98e1c02008-10-25 16:22:41 +0000722 tl_assert(HG_(is_sane_LockN)(lock));
florian6bf37262012-10-21 03:23:36 +0000723 VG_(addToFM)( map_locks, (UWord)ga, (UWord)lock );
sewardjb4112022007-11-09 22:49:28 +0000724 tl_assert(oldlock == NULL);
sewardjb4112022007-11-09 22:49:28 +0000725 return lock;
726 } else {
727 tl_assert(oldlock != NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000728 tl_assert(HG_(is_sane_LockN)(oldlock));
sewardjb4112022007-11-09 22:49:28 +0000729 tl_assert(oldlock->guestaddr == ga);
sewardjb4112022007-11-09 22:49:28 +0000730 return oldlock;
731 }
732}
733
734static Lock* map_locks_maybe_lookup ( Addr ga )
735{
736 Bool found;
737 Lock* lk = NULL;
florian6bf37262012-10-21 03:23:36 +0000738 found = VG_(lookupFM)( map_locks, NULL, (UWord*)&lk, (UWord)ga );
sewardjb4112022007-11-09 22:49:28 +0000739 tl_assert(found ? lk != NULL : lk == NULL);
sewardjb4112022007-11-09 22:49:28 +0000740 return lk;
741}
742
743static void map_locks_delete ( Addr ga )
744{
745 Addr ga2 = 0;
746 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000747 VG_(delFromFM)( map_locks,
florian6bf37262012-10-21 03:23:36 +0000748 (UWord*)&ga2, (UWord*)&lk, (UWord)ga );
sewardjb4112022007-11-09 22:49:28 +0000749 /* delFromFM produces the val which is being deleted, if it is
750 found. So assert it is non-null; that in effect asserts that we
751 are deleting a (ga, Lock) pair which actually exists. */
752 tl_assert(lk != NULL);
753 tl_assert(ga2 == ga);
754}
755
756
sewardjb4112022007-11-09 22:49:28 +0000757
758/*----------------------------------------------------------------*/
759/*--- Sanity checking the data structures ---*/
760/*----------------------------------------------------------------*/
761
762static UWord stats__sanity_checks = 0;
763
florian6bf37262012-10-21 03:23:36 +0000764static void laog__sanity_check ( const HChar* who ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000765
766/* REQUIRED INVARIANTS:
767
768 Thread vs Segment/Lock/SecMaps
769
770 for each t in Threads {
771
772 // Thread.lockset: each element is really a valid Lock
773
774 // Thread.lockset: each Lock in set is actually held by that thread
775 for lk in Thread.lockset
776 lk == LockedBy(t)
777
778 // Thread.csegid is a valid SegmentID
779 // and the associated Segment has .thr == t
780
781 }
782
783 all thread Locksets are pairwise empty under intersection
784 (that is, no lock is claimed to be held by more than one thread)
785 -- this is guaranteed if all locks in locksets point back to their
786 owner threads
787
788 Lock vs Thread/Segment/SecMaps
789
790 for each entry (gla, la) in map_locks
791 gla == la->guest_addr
792
793 for each lk in Locks {
794
795 lk->tag is valid
796 lk->guest_addr does not have shadow state NoAccess
797 if lk == LockedBy(t), then t->lockset contains lk
798 if lk == UnlockedBy(segid) then segid is valid SegmentID
799 and can be mapped to a valid Segment(seg)
800 and seg->thr->lockset does not contain lk
801 if lk == UnlockedNew then (no lockset contains lk)
802
803 secmaps for lk has .mbHasLocks == True
804
805 }
806
807 Segment vs Thread/Lock/SecMaps
808
809 the Segment graph is a dag (no cycles)
810 all of the Segment graph must be reachable from the segids
811 mentioned in the Threads
812
813 for seg in Segments {
814
815 seg->thr is a sane Thread
816
817 }
818
819 SecMaps vs Segment/Thread/Lock
820
821 for sm in SecMaps {
822
823 sm properly aligned
824 if any shadow word is ShR or ShM then .mbHasShared == True
825
826 for each Excl(segid) state
827 map_segments_lookup maps to a sane Segment(seg)
828 for each ShM/ShR(tsetid,lsetid) state
829 each lk in lset is a valid Lock
830 each thr in tset is a valid thread, which is non-dead
831
832 }
833*/
834
835
836/* Return True iff 'thr' holds 'lk' in some mode. */
837static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
838{
839 if (lk->heldBy)
florian6bf37262012-10-21 03:23:36 +0000840 return VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0;
sewardjb4112022007-11-09 22:49:28 +0000841 else
842 return False;
843}
844
845/* Sanity check Threads, as far as possible */
846__attribute__((noinline))
florian6bf37262012-10-21 03:23:36 +0000847static void threads__sanity_check ( const HChar* who )
sewardjb4112022007-11-09 22:49:28 +0000848{
849#define BAD(_str) do { how = (_str); goto bad; } while (0)
florian6bf37262012-10-21 03:23:36 +0000850 const HChar* how = "no error";
sewardjb4112022007-11-09 22:49:28 +0000851 Thread* thr;
852 WordSetID wsA, wsW;
sewardj250ec2e2008-02-15 22:02:30 +0000853 UWord* ls_words;
florian6bf37262012-10-21 03:23:36 +0000854 UWord ls_size, i;
sewardjb4112022007-11-09 22:49:28 +0000855 Lock* lk;
sewardjb4112022007-11-09 22:49:28 +0000856 for (thr = admin_threads; thr; thr = thr->admin) {
sewardjf98e1c02008-10-25 16:22:41 +0000857 if (!HG_(is_sane_Thread)(thr)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000858 wsA = thr->locksetA;
859 wsW = thr->locksetW;
860 // locks held in W mode are a subset of all locks held
861 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
862 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
863 for (i = 0; i < ls_size; i++) {
864 lk = (Lock*)ls_words[i];
865 // Thread.lockset: each element is really a valid Lock
sewardjf98e1c02008-10-25 16:22:41 +0000866 if (!HG_(is_sane_LockN)(lk)) BAD("2");
sewardjb4112022007-11-09 22:49:28 +0000867 // Thread.lockset: each Lock in set is actually held by that
868 // thread
869 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000870 }
871 }
872 return;
873 bad:
874 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
875 tl_assert(0);
876#undef BAD
877}
878
879
880/* Sanity check Locks, as far as possible */
881__attribute__((noinline))
florian6bf37262012-10-21 03:23:36 +0000882static void locks__sanity_check ( const HChar* who )
sewardjb4112022007-11-09 22:49:28 +0000883{
884#define BAD(_str) do { how = (_str); goto bad; } while (0)
florian6bf37262012-10-21 03:23:36 +0000885 const HChar* how = "no error";
sewardjb4112022007-11-09 22:49:28 +0000886 Addr gla;
887 Lock* lk;
888 Int i;
889 // # entries in admin_locks == # entries in map_locks
sewardj1d7c3322011-02-28 09:22:51 +0000890 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next)
sewardjb4112022007-11-09 22:49:28 +0000891 ;
sewardj896f6f92008-08-19 08:38:52 +0000892 if (i != VG_(sizeFM)(map_locks)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000893 // for each entry (gla, lk) in map_locks
894 // gla == lk->guest_addr
sewardj896f6f92008-08-19 08:38:52 +0000895 VG_(initIterFM)( map_locks );
896 while (VG_(nextIterFM)( map_locks,
florian6bf37262012-10-21 03:23:36 +0000897 (UWord*)&gla, (UWord*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000898 if (lk->guestaddr != gla) BAD("2");
899 }
sewardj896f6f92008-08-19 08:38:52 +0000900 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000901 // scan through admin_locks ...
sewardj1d7c3322011-02-28 09:22:51 +0000902 for (lk = admin_locks; lk; lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000903 // lock is sane. Quite comprehensive, also checks that
904 // referenced (holder) threads are sane.
sewardjf98e1c02008-10-25 16:22:41 +0000905 if (!HG_(is_sane_LockN)(lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000906 // map_locks binds guest address back to this lock
907 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
sewardjb4112022007-11-09 22:49:28 +0000908 // look at all threads mentioned as holders of this lock. Ensure
909 // this lock is mentioned in their locksets.
910 if (lk->heldBy) {
911 Thread* thr;
florian6bf37262012-10-21 03:23:36 +0000912 UWord count;
sewardj896f6f92008-08-19 08:38:52 +0000913 VG_(initIterBag)( lk->heldBy );
914 while (VG_(nextIterBag)( lk->heldBy,
florian6bf37262012-10-21 03:23:36 +0000915 (UWord*)&thr, &count )) {
sewardjf98e1c02008-10-25 16:22:41 +0000916 // HG_(is_sane_LockN) above ensures these
sewardjb4112022007-11-09 22:49:28 +0000917 tl_assert(count >= 1);
sewardjf98e1c02008-10-25 16:22:41 +0000918 tl_assert(HG_(is_sane_Thread)(thr));
florian6bf37262012-10-21 03:23:36 +0000919 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (UWord)lk))
sewardjb4112022007-11-09 22:49:28 +0000920 BAD("6");
921 // also check the w-only lockset
922 if (lk->heldW
florian6bf37262012-10-21 03:23:36 +0000923 && !HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
sewardjb4112022007-11-09 22:49:28 +0000924 BAD("7");
925 if ((!lk->heldW)
florian6bf37262012-10-21 03:23:36 +0000926 && HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
sewardjb4112022007-11-09 22:49:28 +0000927 BAD("8");
928 }
sewardj896f6f92008-08-19 08:38:52 +0000929 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000930 } else {
931 /* lock not held by anybody */
932 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
933 // since lk is unheld, then (no lockset contains lk)
934 // hmm, this is really too expensive to check. Hmm.
935 }
sewardjb4112022007-11-09 22:49:28 +0000936 }
937
938 return;
939 bad:
940 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
941 tl_assert(0);
942#undef BAD
943}
944
945
florian6bf37262012-10-21 03:23:36 +0000946static void all_except_Locks__sanity_check ( const HChar* who ) {
sewardjb4112022007-11-09 22:49:28 +0000947 stats__sanity_checks++;
948 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
949 threads__sanity_check(who);
sewardjc1fb9d22011-02-28 09:03:44 +0000950 if (HG_(clo_track_lockorders))
951 laog__sanity_check(who);
sewardjb4112022007-11-09 22:49:28 +0000952}
florian6bf37262012-10-21 03:23:36 +0000953static void all__sanity_check ( const HChar* who ) {
sewardjb4112022007-11-09 22:49:28 +0000954 all_except_Locks__sanity_check(who);
955 locks__sanity_check(who);
956}
957
958
959/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +0000960/*--- Shadow value and address range handlers ---*/
961/*----------------------------------------------------------------*/
962
963static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +0000964//static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000965static inline Thread* get_current_Thread ( void ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +0000966__attribute__((noinline))
967static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000968
sewardjb4112022007-11-09 22:49:28 +0000969
970/* Block-copy states (needed for implementing realloc()). */
sewardj23f12002009-07-24 08:45:08 +0000971/* FIXME this copies shadow memory; it doesn't apply the MSM to it.
972 Is that a problem? (hence 'scopy' rather than 'ccopy') */
973static void shadow_mem_scopy_range ( Thread* thr,
974 Addr src, Addr dst, SizeT len )
sewardjf98e1c02008-10-25 16:22:41 +0000975{
976 Thr* hbthr = thr->hbthr;
977 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +0000978 libhb_copy_shadow_state( hbthr, src, dst, len );
sewardjb4112022007-11-09 22:49:28 +0000979}
980
sewardj23f12002009-07-24 08:45:08 +0000981static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
982{
sewardjf98e1c02008-10-25 16:22:41 +0000983 Thr* hbthr = thr->hbthr;
984 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +0000985 LIBHB_CREAD_N(hbthr, a, len);
986}
987
988static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
989 Thr* hbthr = thr->hbthr;
990 tl_assert(hbthr);
991 LIBHB_CWRITE_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +0000992}
993
994static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
995{
sewardj23f12002009-07-24 08:45:08 +0000996 libhb_srange_new( thr->hbthr, a, len );
sewardjb4112022007-11-09 22:49:28 +0000997}
998
sewardjfd35d492011-03-17 19:39:55 +0000999static void shadow_mem_make_NoAccess_NoFX ( Thread* thr, Addr aIN, SizeT len )
sewardjb4112022007-11-09 22:49:28 +00001000{
sewardjb4112022007-11-09 22:49:28 +00001001 if (0 && len > 500)
sewardjfd35d492011-03-17 19:39:55 +00001002 VG_(printf)("make NoAccess_NoFX ( %#lx, %ld )\n", aIN, len );
1003 // has no effect (NoFX)
1004 libhb_srange_noaccess_NoFX( thr->hbthr, aIN, len );
1005}
1006
1007static void shadow_mem_make_NoAccess_AHAE ( Thread* thr, Addr aIN, SizeT len )
1008{
1009 if (0 && len > 500)
1010 VG_(printf)("make NoAccess_AHAE ( %#lx, %ld )\n", aIN, len );
1011 // Actually Has An Effect (AHAE)
1012 libhb_srange_noaccess_AHAE( thr->hbthr, aIN, len );
sewardjb4112022007-11-09 22:49:28 +00001013}
1014
sewardj406bac82010-03-03 23:03:40 +00001015static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN, SizeT len )
1016{
1017 if (0 && len > 500)
1018 VG_(printf)("make Untracked ( %#lx, %ld )\n", aIN, len );
1019 libhb_srange_untrack( thr->hbthr, aIN, len );
1020}
1021
sewardjb4112022007-11-09 22:49:28 +00001022
1023/*----------------------------------------------------------------*/
1024/*--- Event handlers (evh__* functions) ---*/
1025/*--- plus helpers (evhH__* functions) ---*/
1026/*----------------------------------------------------------------*/
1027
1028/*--------- Event handler helpers (evhH__* functions) ---------*/
1029
1030/* Create a new segment for 'thr', making it depend (.prev) on its
1031 existing segment, bind together the SegmentID and Segment, and
1032 return both of them. Also update 'thr' so it references the new
1033 Segment. */
sewardjf98e1c02008-10-25 16:22:41 +00001034//zz static
1035//zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
1036//zz /*OUT*/Segment** new_segP,
1037//zz Thread* thr )
1038//zz {
1039//zz Segment* cur_seg;
1040//zz tl_assert(new_segP);
1041//zz tl_assert(new_segidP);
1042//zz tl_assert(HG_(is_sane_Thread)(thr));
1043//zz cur_seg = map_segments_lookup( thr->csegid );
1044//zz tl_assert(cur_seg);
1045//zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1046//zz at their owner thread. */
1047//zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1048//zz *new_segidP = alloc_SegmentID();
1049//zz map_segments_add( *new_segidP, *new_segP );
1050//zz thr->csegid = *new_segidP;
1051//zz }
sewardjb4112022007-11-09 22:49:28 +00001052
1053
1054/* The lock at 'lock_ga' has acquired a writer. Make all necessary
1055 updates, and also do all possible error checks. */
1056static
1057void evhH__post_thread_w_acquires_lock ( Thread* thr,
1058 LockKind lkk, Addr lock_ga )
1059{
1060 Lock* lk;
1061
1062 /* Basically what we need to do is call lockN_acquire_writer.
1063 However, that will barf if any 'invalid' lock states would
1064 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001065 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001066 routine.
1067
1068 Because this routine is only called after successful lock
1069 acquisition, we should not be asked to move the lock into any
1070 invalid states. Requests to do so are bugs in libpthread, since
1071 that should have rejected any such requests. */
1072
sewardjf98e1c02008-10-25 16:22:41 +00001073 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001074 /* Try to find the lock. If we can't, then create a new one with
1075 kind 'lkk'. */
1076 lk = map_locks_lookup_or_create(
1077 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001078 tl_assert( HG_(is_sane_LockN)(lk) );
1079
1080 /* check libhb level entities exist */
1081 tl_assert(thr->hbthr);
1082 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001083
1084 if (lk->heldBy == NULL) {
1085 /* the lock isn't held. Simple. */
1086 tl_assert(!lk->heldW);
1087 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001088 /* acquire a dependency from the lock's VCs */
1089 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001090 goto noerror;
1091 }
1092
1093 /* So the lock is already held. If held as a r-lock then
1094 libpthread must be buggy. */
1095 tl_assert(lk->heldBy);
1096 if (!lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001097 HG_(record_error_Misc)(
1098 thr, "Bug in libpthread: write lock "
1099 "granted on rwlock which is currently rd-held");
sewardjb4112022007-11-09 22:49:28 +00001100 goto error;
1101 }
1102
1103 /* So the lock is held in w-mode. If it's held by some other
1104 thread, then libpthread must be buggy. */
sewardj896f6f92008-08-19 08:38:52 +00001105 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
sewardjb4112022007-11-09 22:49:28 +00001106
sewardj896f6f92008-08-19 08:38:52 +00001107 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
sewardjf98e1c02008-10-25 16:22:41 +00001108 HG_(record_error_Misc)(
1109 thr, "Bug in libpthread: write lock "
1110 "granted on mutex/rwlock which is currently "
1111 "wr-held by a different thread");
sewardjb4112022007-11-09 22:49:28 +00001112 goto error;
1113 }
1114
1115 /* So the lock is already held in w-mode by 'thr'. That means this
1116 is an attempt to lock it recursively, which is only allowable
1117 for LK_mbRec kinded locks. Since this routine is called only
1118 once the lock has been acquired, this must also be a libpthread
1119 bug. */
1120 if (lk->kind != LK_mbRec) {
sewardjf98e1c02008-10-25 16:22:41 +00001121 HG_(record_error_Misc)(
1122 thr, "Bug in libpthread: recursive write lock "
1123 "granted on mutex/wrlock which does not "
1124 "support recursion");
sewardjb4112022007-11-09 22:49:28 +00001125 goto error;
1126 }
1127
1128 /* So we are recursively re-locking a lock we already w-hold. */
1129 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001130 /* acquire a dependency from the lock's VC. Probably pointless,
1131 but also harmless. */
1132 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001133 goto noerror;
1134
1135 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001136 if (HG_(clo_track_lockorders)) {
1137 /* check lock order acquisition graph, and update. This has to
1138 happen before the lock is added to the thread's locksetA/W. */
1139 laog__pre_thread_acquires_lock( thr, lk );
1140 }
sewardjb4112022007-11-09 22:49:28 +00001141 /* update the thread's held-locks set */
florian6bf37262012-10-21 03:23:36 +00001142 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
1143 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +00001144 /* fall through */
1145
1146 error:
sewardjf98e1c02008-10-25 16:22:41 +00001147 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001148}
1149
1150
1151/* The lock at 'lock_ga' has acquired a reader. Make all necessary
1152 updates, and also do all possible error checks. */
1153static
1154void evhH__post_thread_r_acquires_lock ( Thread* thr,
1155 LockKind lkk, Addr lock_ga )
1156{
1157 Lock* lk;
1158
1159 /* Basically what we need to do is call lockN_acquire_reader.
1160 However, that will barf if any 'invalid' lock states would
1161 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001162 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001163 routine.
1164
1165 Because this routine is only called after successful lock
1166 acquisition, we should not be asked to move the lock into any
1167 invalid states. Requests to do so are bugs in libpthread, since
1168 that should have rejected any such requests. */
1169
sewardjf98e1c02008-10-25 16:22:41 +00001170 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001171 /* Try to find the lock. If we can't, then create a new one with
1172 kind 'lkk'. Only a reader-writer lock can be read-locked,
1173 hence the first assertion. */
1174 tl_assert(lkk == LK_rdwr);
1175 lk = map_locks_lookup_or_create(
1176 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001177 tl_assert( HG_(is_sane_LockN)(lk) );
1178
1179 /* check libhb level entities exist */
1180 tl_assert(thr->hbthr);
1181 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001182
1183 if (lk->heldBy == NULL) {
1184 /* the lock isn't held. Simple. */
1185 tl_assert(!lk->heldW);
1186 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001187 /* acquire a dependency from the lock's VC */
1188 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001189 goto noerror;
1190 }
1191
1192 /* So the lock is already held. If held as a w-lock then
1193 libpthread must be buggy. */
1194 tl_assert(lk->heldBy);
1195 if (lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001196 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1197 "granted on rwlock which is "
1198 "currently wr-held");
sewardjb4112022007-11-09 22:49:28 +00001199 goto error;
1200 }
1201
1202 /* Easy enough. In short anybody can get a read-lock on a rwlock
1203 provided it is either unlocked or already in rd-held. */
1204 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001205 /* acquire a dependency from the lock's VC. Probably pointless,
1206 but also harmless. */
1207 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001208 goto noerror;
1209
1210 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001211 if (HG_(clo_track_lockorders)) {
1212 /* check lock order acquisition graph, and update. This has to
1213 happen before the lock is added to the thread's locksetA/W. */
1214 laog__pre_thread_acquires_lock( thr, lk );
1215 }
sewardjb4112022007-11-09 22:49:28 +00001216 /* update the thread's held-locks set */
florian6bf37262012-10-21 03:23:36 +00001217 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +00001218 /* but don't update thr->locksetW, since lk is only rd-held */
1219 /* fall through */
1220
1221 error:
sewardjf98e1c02008-10-25 16:22:41 +00001222 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001223}
1224
1225
1226/* The lock at 'lock_ga' is just about to be unlocked. Make all
1227 necessary updates, and also do all possible error checks. */
1228static
1229void evhH__pre_thread_releases_lock ( Thread* thr,
1230 Addr lock_ga, Bool isRDWR )
1231{
1232 Lock* lock;
1233 Word n;
sewardjf98e1c02008-10-25 16:22:41 +00001234 Bool was_heldW;
sewardjb4112022007-11-09 22:49:28 +00001235
1236 /* This routine is called prior to a lock release, before
1237 libpthread has had a chance to validate the call. Hence we need
1238 to detect and reject any attempts to move the lock into an
1239 invalid state. Such attempts are bugs in the client.
1240
1241 isRDWR is True if we know from the wrapper context that lock_ga
1242 should refer to a reader-writer lock, and is False if [ditto]
1243 lock_ga should refer to a standard mutex. */
1244
sewardjf98e1c02008-10-25 16:22:41 +00001245 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001246 lock = map_locks_maybe_lookup( lock_ga );
1247
1248 if (!lock) {
1249 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1250 the client is trying to unlock it. So complain, then ignore
1251 the attempt. */
sewardjf98e1c02008-10-25 16:22:41 +00001252 HG_(record_error_UnlockBogus)( thr, lock_ga );
sewardjb4112022007-11-09 22:49:28 +00001253 return;
1254 }
1255
1256 tl_assert(lock->guestaddr == lock_ga);
sewardjf98e1c02008-10-25 16:22:41 +00001257 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001258
1259 if (isRDWR && lock->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001260 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1261 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001262 }
1263 if ((!isRDWR) && lock->kind == LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001264 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1265 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001266 }
1267
1268 if (!lock->heldBy) {
1269 /* The lock is not held. This indicates a serious bug in the
1270 client. */
1271 tl_assert(!lock->heldW);
sewardjf98e1c02008-10-25 16:22:41 +00001272 HG_(record_error_UnlockUnlocked)( thr, lock );
florian6bf37262012-10-21 03:23:36 +00001273 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1274 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001275 goto error;
1276 }
1277
sewardjf98e1c02008-10-25 16:22:41 +00001278 /* test just above dominates */
1279 tl_assert(lock->heldBy);
1280 was_heldW = lock->heldW;
1281
sewardjb4112022007-11-09 22:49:28 +00001282 /* The lock is held. Is this thread one of the holders? If not,
1283 report a bug in the client. */
florian6bf37262012-10-21 03:23:36 +00001284 n = VG_(elemBag)( lock->heldBy, (UWord)thr );
sewardjb4112022007-11-09 22:49:28 +00001285 tl_assert(n >= 0);
1286 if (n == 0) {
1287 /* We are not a current holder of the lock. This is a bug in
1288 the guest, and (per POSIX pthread rules) the unlock
1289 attempt will fail. So just complain and do nothing
1290 else. */
sewardj896f6f92008-08-19 08:38:52 +00001291 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001292 tl_assert(HG_(is_sane_Thread)(realOwner));
sewardjb4112022007-11-09 22:49:28 +00001293 tl_assert(realOwner != thr);
florian6bf37262012-10-21 03:23:36 +00001294 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1295 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjf98e1c02008-10-25 16:22:41 +00001296 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
sewardjb4112022007-11-09 22:49:28 +00001297 goto error;
1298 }
1299
1300 /* Ok, we hold the lock 'n' times. */
1301 tl_assert(n >= 1);
1302
1303 lockN_release( lock, thr );
1304
1305 n--;
1306 tl_assert(n >= 0);
1307
1308 if (n > 0) {
1309 tl_assert(lock->heldBy);
florian6bf37262012-10-21 03:23:36 +00001310 tl_assert(n == VG_(elemBag)( lock->heldBy, (UWord)thr ));
sewardjb4112022007-11-09 22:49:28 +00001311 /* We still hold the lock. So either it's a recursive lock
1312 or a rwlock which is currently r-held. */
1313 tl_assert(lock->kind == LK_mbRec
1314 || (lock->kind == LK_rdwr && !lock->heldW));
florian6bf37262012-10-21 03:23:36 +00001315 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001316 if (lock->heldW)
florian6bf37262012-10-21 03:23:36 +00001317 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001318 else
florian6bf37262012-10-21 03:23:36 +00001319 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001320 } else {
sewardj983f3022009-05-21 14:49:55 +00001321 /* n is zero. This means we don't hold the lock any more. But
1322 if it's a rwlock held in r-mode, someone else could still
1323 hold it. Just do whatever sanity checks we can. */
1324 if (lock->kind == LK_rdwr && lock->heldBy) {
1325 /* It's a rwlock. We no longer hold it but we used to;
1326 nevertheless it still appears to be held by someone else.
1327 The implication is that, prior to this release, it must
1328 have been shared by us and and whoever else is holding it;
1329 which in turn implies it must be r-held, since a lock
1330 can't be w-held by more than one thread. */
1331 /* The lock is now R-held by somebody else: */
1332 tl_assert(lock->heldW == False);
1333 } else {
1334 /* Normal case. It's either not a rwlock, or it's a rwlock
1335 that we used to hold in w-mode (which is pretty much the
1336 same thing as a non-rwlock.) Since this transaction is
1337 atomic (V does not allow multiple threads to run
1338 simultaneously), it must mean the lock is now not held by
1339 anybody. Hence assert for it. */
1340 /* The lock is now not held by anybody: */
1341 tl_assert(!lock->heldBy);
1342 tl_assert(lock->heldW == False);
1343 }
sewardjf98e1c02008-10-25 16:22:41 +00001344 //if (lock->heldBy) {
florian6bf37262012-10-21 03:23:36 +00001345 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (UWord)thr ));
sewardjf98e1c02008-10-25 16:22:41 +00001346 //}
sewardjb4112022007-11-09 22:49:28 +00001347 /* update this thread's lockset accordingly. */
1348 thr->locksetA
florian6bf37262012-10-21 03:23:36 +00001349 = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lock );
sewardjb4112022007-11-09 22:49:28 +00001350 thr->locksetW
florian6bf37262012-10-21 03:23:36 +00001351 = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lock );
sewardjf98e1c02008-10-25 16:22:41 +00001352 /* push our VC into the lock */
1353 tl_assert(thr->hbthr);
1354 tl_assert(lock->hbso);
1355 /* If the lock was previously W-held, then we want to do a
1356 strong send, and if previously R-held, then a weak send. */
1357 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
sewardjb4112022007-11-09 22:49:28 +00001358 }
1359 /* fall through */
1360
1361 error:
sewardjf98e1c02008-10-25 16:22:41 +00001362 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001363}
1364
1365
sewardj9f569b72008-11-13 13:33:09 +00001366/* ---------------------------------------------------------- */
1367/* -------- Event handlers proper (evh__* functions) -------- */
1368/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001369
1370/* What is the Thread* for the currently running thread? This is
1371 absolutely performance critical. We receive notifications from the
1372 core for client code starts/stops, and cache the looked-up result
1373 in 'current_Thread'. Hence, for the vast majority of requests,
1374 finding the current thread reduces to a read of a global variable,
1375 provided get_current_Thread_in_C_C is inlined.
1376
1377 Outside of client code, current_Thread is NULL, and presumably
1378 any uses of it will cause a segfault. Hence:
1379
1380 - for uses definitely within client code, use
1381 get_current_Thread_in_C_C.
1382
1383 - for all other uses, use get_current_Thread.
1384*/
1385
sewardj23f12002009-07-24 08:45:08 +00001386static Thread *current_Thread = NULL,
1387 *current_Thread_prev = NULL;
sewardjb4112022007-11-09 22:49:28 +00001388
1389static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1390 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1391 tl_assert(current_Thread == NULL);
1392 current_Thread = map_threads_lookup( tid );
1393 tl_assert(current_Thread != NULL);
sewardj23f12002009-07-24 08:45:08 +00001394 if (current_Thread != current_Thread_prev) {
1395 libhb_Thr_resumes( current_Thread->hbthr );
1396 current_Thread_prev = current_Thread;
1397 }
sewardjb4112022007-11-09 22:49:28 +00001398}
1399static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1400 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1401 tl_assert(current_Thread != NULL);
1402 current_Thread = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00001403 libhb_maybe_GC();
sewardjb4112022007-11-09 22:49:28 +00001404}
1405static inline Thread* get_current_Thread_in_C_C ( void ) {
1406 return current_Thread;
1407}
1408static inline Thread* get_current_Thread ( void ) {
1409 ThreadId coretid;
1410 Thread* thr;
1411 thr = get_current_Thread_in_C_C();
1412 if (LIKELY(thr))
1413 return thr;
1414 /* evidently not in client code. Do it the slow way. */
1415 coretid = VG_(get_running_tid)();
1416 /* FIXME: get rid of the following kludge. It exists because
sewardjf98e1c02008-10-25 16:22:41 +00001417 evh__new_mem is called during initialisation (as notification
sewardjb4112022007-11-09 22:49:28 +00001418 of initial memory layout) and VG_(get_running_tid)() returns
1419 VG_INVALID_THREADID at that point. */
1420 if (coretid == VG_INVALID_THREADID)
1421 coretid = 1; /* KLUDGE */
1422 thr = map_threads_lookup( coretid );
1423 return thr;
1424}
1425
1426static
1427void evh__new_mem ( Addr a, SizeT len ) {
1428 if (SHOW_EVENTS >= 2)
1429 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1430 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001431 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001432 all__sanity_check("evh__new_mem-post");
1433}
1434
1435static
sewardj1f77fec2010-04-12 19:51:04 +00001436void evh__new_mem_stack ( Addr a, SizeT len ) {
1437 if (SHOW_EVENTS >= 2)
1438 VG_(printf)("evh__new_mem_stack(%p, %lu)\n", (void*)a, len );
1439 shadow_mem_make_New( get_current_Thread(),
1440 -VG_STACK_REDZONE_SZB + a, len );
1441 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1442 all__sanity_check("evh__new_mem_stack-post");
1443}
1444
1445static
sewardj7cf4e6b2008-05-01 20:24:26 +00001446void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1447 if (SHOW_EVENTS >= 2)
1448 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1449 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001450 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardj7cf4e6b2008-05-01 20:24:26 +00001451 all__sanity_check("evh__new_mem_w_tid-post");
1452}
1453
1454static
sewardjb4112022007-11-09 22:49:28 +00001455void evh__new_mem_w_perms ( Addr a, SizeT len,
sewardj9c606bd2008-09-18 18:12:50 +00001456 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
sewardjb4112022007-11-09 22:49:28 +00001457 if (SHOW_EVENTS >= 1)
1458 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1459 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1460 if (rr || ww || xx)
1461 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001462 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001463 all__sanity_check("evh__new_mem_w_perms-post");
1464}
1465
1466static
1467void evh__set_perms ( Addr a, SizeT len,
1468 Bool rr, Bool ww, Bool xx ) {
sewardjfd35d492011-03-17 19:39:55 +00001469 // This handles mprotect requests. If the memory is being put
1470 // into no-R no-W state, paint it as NoAccess, for the reasons
1471 // documented at evh__die_mem_munmap().
sewardjb4112022007-11-09 22:49:28 +00001472 if (SHOW_EVENTS >= 1)
sewardjfd35d492011-03-17 19:39:55 +00001473 VG_(printf)("evh__set_perms(%p, %lu, r=%d w=%d x=%d)\n",
sewardjb4112022007-11-09 22:49:28 +00001474 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1475 /* Hmm. What should we do here, that actually makes any sense?
1476 Let's say: if neither readable nor writable, then declare it
1477 NoAccess, else leave it alone. */
1478 if (!(rr || ww))
sewardjfd35d492011-03-17 19:39:55 +00001479 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001480 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001481 all__sanity_check("evh__set_perms-post");
1482}
1483
1484static
1485void evh__die_mem ( Addr a, SizeT len ) {
sewardjfd35d492011-03-17 19:39:55 +00001486 // Urr, libhb ignores this.
sewardjb4112022007-11-09 22:49:28 +00001487 if (SHOW_EVENTS >= 2)
1488 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
sewardjfd35d492011-03-17 19:39:55 +00001489 shadow_mem_make_NoAccess_NoFX( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001490 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001491 all__sanity_check("evh__die_mem-post");
1492}
1493
1494static
sewardjfd35d492011-03-17 19:39:55 +00001495void evh__die_mem_munmap ( Addr a, SizeT len ) {
1496 // It's important that libhb doesn't ignore this. If, as is likely,
1497 // the client is subject to address space layout randomization,
1498 // then unmapped areas may never get remapped over, even in long
1499 // runs. If we just ignore them we wind up with large resource
1500 // (VTS) leaks in libhb. So force them to NoAccess, so that all
1501 // VTS references in the affected area are dropped. Marking memory
1502 // as NoAccess is expensive, but we assume that munmap is sufficiently
1503 // rare that the space gains of doing this are worth the costs.
1504 if (SHOW_EVENTS >= 2)
1505 VG_(printf)("evh__die_mem_munmap(%p, %lu)\n", (void*)a, len );
1506 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
1507}
1508
1509static
sewardj406bac82010-03-03 23:03:40 +00001510void evh__untrack_mem ( Addr a, SizeT len ) {
sewardjfd35d492011-03-17 19:39:55 +00001511 // Libhb doesn't ignore this.
sewardj406bac82010-03-03 23:03:40 +00001512 if (SHOW_EVENTS >= 2)
1513 VG_(printf)("evh__untrack_mem(%p, %lu)\n", (void*)a, len );
1514 shadow_mem_make_Untracked( get_current_Thread(), a, len );
1515 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1516 all__sanity_check("evh__untrack_mem-post");
1517}
1518
1519static
sewardj23f12002009-07-24 08:45:08 +00001520void evh__copy_mem ( Addr src, Addr dst, SizeT len ) {
1521 if (SHOW_EVENTS >= 2)
1522 VG_(printf)("evh__copy_mem(%p, %p, %lu)\n", (void*)src, (void*)dst, len );
1523 shadow_mem_scopy_range( get_current_Thread(), src, dst, len );
1524 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1525 all__sanity_check("evh__copy_mem-post");
1526}
1527
1528static
sewardjb4112022007-11-09 22:49:28 +00001529void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1530{
1531 if (SHOW_EVENTS >= 1)
1532 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1533 (Int)parent, (Int)child );
1534
1535 if (parent != VG_INVALID_THREADID) {
sewardjf98e1c02008-10-25 16:22:41 +00001536 Thread* thr_p;
1537 Thread* thr_c;
1538 Thr* hbthr_p;
1539 Thr* hbthr_c;
sewardjb4112022007-11-09 22:49:28 +00001540
sewardjf98e1c02008-10-25 16:22:41 +00001541 tl_assert(HG_(is_sane_ThreadId)(parent));
1542 tl_assert(HG_(is_sane_ThreadId)(child));
sewardjb4112022007-11-09 22:49:28 +00001543 tl_assert(parent != child);
1544
1545 thr_p = map_threads_maybe_lookup( parent );
1546 thr_c = map_threads_maybe_lookup( child );
1547
1548 tl_assert(thr_p != NULL);
1549 tl_assert(thr_c == NULL);
1550
sewardjf98e1c02008-10-25 16:22:41 +00001551 hbthr_p = thr_p->hbthr;
1552 tl_assert(hbthr_p != NULL);
sewardj60626642011-03-10 15:14:37 +00001553 tl_assert( libhb_get_Thr_hgthread(hbthr_p) == thr_p );
sewardjb4112022007-11-09 22:49:28 +00001554
sewardjf98e1c02008-10-25 16:22:41 +00001555 hbthr_c = libhb_create ( hbthr_p );
1556
1557 /* Create a new thread record for the child. */
sewardjb4112022007-11-09 22:49:28 +00001558 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +00001559 thr_c = mk_Thread( hbthr_c );
sewardj60626642011-03-10 15:14:37 +00001560 tl_assert( libhb_get_Thr_hgthread(hbthr_c) == NULL );
1561 libhb_set_Thr_hgthread(hbthr_c, thr_c);
sewardjb4112022007-11-09 22:49:28 +00001562
1563 /* and bind it in the thread-map table */
1564 map_threads[child] = thr_c;
sewardjf98e1c02008-10-25 16:22:41 +00001565 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1566 thr_c->coretid = child;
sewardjb4112022007-11-09 22:49:28 +00001567
1568 /* Record where the parent is so we can later refer to this in
1569 error messages.
1570
1571 On amd64-linux, this entails a nasty glibc-2.5 specific hack.
1572 The stack snapshot is taken immediately after the parent has
1573 returned from its sys_clone call. Unfortunately there is no
1574 unwind info for the insn following "syscall" - reading the
1575 glibc sources confirms this. So we ask for a snapshot to be
1576 taken as if RIP was 3 bytes earlier, in a place where there
1577 is unwind info. Sigh.
1578 */
1579 { Word first_ip_delta = 0;
1580# if defined(VGP_amd64_linux)
1581 first_ip_delta = -3;
1582# endif
1583 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1584 }
sewardjb4112022007-11-09 22:49:28 +00001585 }
1586
sewardjf98e1c02008-10-25 16:22:41 +00001587 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001588 all__sanity_check("evh__pre_thread_create-post");
1589}
1590
1591static
1592void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1593{
1594 Int nHeld;
1595 Thread* thr_q;
1596 if (SHOW_EVENTS >= 1)
1597 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1598 (Int)quit_tid );
1599
1600 /* quit_tid has disappeared without joining to any other thread.
1601 Therefore there is no synchronisation event associated with its
1602 exit and so we have to pretty much treat it as if it was still
1603 alive but mysteriously making no progress. That is because, if
1604 we don't know when it really exited, then we can never say there
1605 is a point in time when we're sure the thread really has
1606 finished, and so we need to consider the possibility that it
1607 lingers indefinitely and continues to interact with other
1608 threads. */
1609 /* However, it might have rendezvous'd with a thread that called
1610 pthread_join with this one as arg, prior to this point (that's
1611 how NPTL works). In which case there has already been a prior
1612 sync event. So in any case, just let the thread exit. On NPTL,
1613 all thread exits go through here. */
sewardjf98e1c02008-10-25 16:22:41 +00001614 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
sewardjb4112022007-11-09 22:49:28 +00001615 thr_q = map_threads_maybe_lookup( quit_tid );
1616 tl_assert(thr_q != NULL);
1617
1618 /* Complain if this thread holds any locks. */
1619 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1620 tl_assert(nHeld >= 0);
1621 if (nHeld > 0) {
1622 HChar buf[80];
1623 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1624 nHeld, nHeld > 1 ? "s" : "");
sewardjf98e1c02008-10-25 16:22:41 +00001625 HG_(record_error_Misc)( thr_q, buf );
sewardjb4112022007-11-09 22:49:28 +00001626 }
1627
sewardj23f12002009-07-24 08:45:08 +00001628 /* Not much to do here:
1629 - tell libhb the thread is gone
1630 - clear the map_threads entry, in order that the Valgrind core
1631 can re-use it. */
sewardj61bc2c52011-02-09 10:34:00 +00001632 /* Cleanup actions (next 5 lines) copied in evh__atfork_child; keep
1633 in sync. */
sewardj23f12002009-07-24 08:45:08 +00001634 tl_assert(thr_q->hbthr);
1635 libhb_async_exit(thr_q->hbthr);
sewardjf98e1c02008-10-25 16:22:41 +00001636 tl_assert(thr_q->coretid == quit_tid);
1637 thr_q->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +00001638 map_threads_delete( quit_tid );
1639
sewardjf98e1c02008-10-25 16:22:41 +00001640 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001641 all__sanity_check("evh__pre_thread_ll_exit-post");
1642}
1643
sewardj61bc2c52011-02-09 10:34:00 +00001644/* This is called immediately after fork, for the child only. 'tid'
1645 is the only surviving thread (as per POSIX rules on fork() in
1646 threaded programs), so we have to clean up map_threads to remove
1647 entries for any other threads. */
1648static
1649void evh__atfork_child ( ThreadId tid )
1650{
1651 UInt i;
1652 Thread* thr;
1653 /* Slot 0 should never be used. */
1654 thr = map_threads_maybe_lookup( 0/*INVALID*/ );
1655 tl_assert(!thr);
1656 /* Clean up all other slots except 'tid'. */
1657 for (i = 1; i < VG_N_THREADS; i++) {
1658 if (i == tid)
1659 continue;
1660 thr = map_threads_maybe_lookup(i);
1661 if (!thr)
1662 continue;
1663 /* Cleanup actions (next 5 lines) copied from end of
1664 evh__pre_thread_ll_exit; keep in sync. */
1665 tl_assert(thr->hbthr);
1666 libhb_async_exit(thr->hbthr);
1667 tl_assert(thr->coretid == i);
1668 thr->coretid = VG_INVALID_THREADID;
1669 map_threads_delete(i);
1670 }
1671}
1672
philipped40aff52014-06-16 20:00:14 +00001673/* generate a dependence from the hbthr_q quitter to the hbthr_s stayer. */
sewardjb4112022007-11-09 22:49:28 +00001674static
philipped40aff52014-06-16 20:00:14 +00001675void generate_quitter_stayer_dependence (Thr* hbthr_q, Thr* hbthr_s)
sewardjb4112022007-11-09 22:49:28 +00001676{
sewardjf98e1c02008-10-25 16:22:41 +00001677 SO* so;
sewardjf98e1c02008-10-25 16:22:41 +00001678 /* Allocate a temporary synchronisation object and use it to send
1679 an imaginary message from the quitter to the stayer, the purpose
1680 being to generate a dependence from the quitter to the
1681 stayer. */
1682 so = libhb_so_alloc();
1683 tl_assert(so);
sewardj23f12002009-07-24 08:45:08 +00001684 /* Send last arg of _so_send as False, since the sending thread
1685 doesn't actually exist any more, so we don't want _so_send to
1686 try taking stack snapshots of it. */
sewardjffce8152011-06-24 10:09:41 +00001687 libhb_so_send(hbthr_q, so, True/*strong_send*//*?!? wrt comment above*/);
sewardjf98e1c02008-10-25 16:22:41 +00001688 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1689 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00001690
sewardjffce8152011-06-24 10:09:41 +00001691 /* Tell libhb that the quitter has been reaped. Note that we might
1692 have to be cleverer about this, to exclude 2nd and subsequent
1693 notifications for the same hbthr_q, in the case where the app is
1694 buggy (calls pthread_join twice or more on the same thread) AND
1695 where libpthread is also buggy and doesn't return ESRCH on
1696 subsequent calls. (If libpthread isn't thusly buggy, then the
1697 wrapper for pthread_join in hg_intercepts.c will stop us getting
1698 notified here multiple times for the same joinee.) See also
1699 comments in helgrind/tests/jointwice.c. */
1700 libhb_joinedwith_done(hbthr_q);
philipped40aff52014-06-16 20:00:14 +00001701}
1702
1703
1704static
1705void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1706{
1707 Thread* thr_s;
1708 Thread* thr_q;
1709 Thr* hbthr_s;
1710 Thr* hbthr_q;
1711
1712 if (SHOW_EVENTS >= 1)
1713 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1714 (Int)stay_tid, quit_thr );
1715
1716 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
1717
1718 thr_s = map_threads_maybe_lookup( stay_tid );
1719 thr_q = quit_thr;
1720 tl_assert(thr_s != NULL);
1721 tl_assert(thr_q != NULL);
1722 tl_assert(thr_s != thr_q);
1723
1724 hbthr_s = thr_s->hbthr;
1725 hbthr_q = thr_q->hbthr;
1726 tl_assert(hbthr_s != hbthr_q);
1727 tl_assert( libhb_get_Thr_hgthread(hbthr_s) == thr_s );
1728 tl_assert( libhb_get_Thr_hgthread(hbthr_q) == thr_q );
1729
1730 generate_quitter_stayer_dependence (hbthr_q, hbthr_s);
sewardjffce8152011-06-24 10:09:41 +00001731
sewardjf98e1c02008-10-25 16:22:41 +00001732 /* evh__pre_thread_ll_exit issues an error message if the exiting
1733 thread holds any locks. No need to check here. */
sewardjb4112022007-11-09 22:49:28 +00001734
1735 /* This holds because, at least when using NPTL as the thread
1736 library, we should be notified the low level thread exit before
1737 we hear of any join event on it. The low level exit
1738 notification feeds through into evh__pre_thread_ll_exit,
1739 which should clear the map_threads entry for it. Hence we
1740 expect there to be no map_threads entry at this point. */
1741 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1742 == VG_INVALID_THREADID);
1743
sewardjf98e1c02008-10-25 16:22:41 +00001744 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001745 all__sanity_check("evh__post_thread_join-post");
1746}
1747
1748static
floriane543f302012-10-21 19:43:43 +00001749void evh__pre_mem_read ( CorePart part, ThreadId tid, const HChar* s,
sewardjb4112022007-11-09 22:49:28 +00001750 Addr a, SizeT size) {
1751 if (SHOW_EVENTS >= 2
1752 || (SHOW_EVENTS >= 1 && size != 1))
1753 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1754 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001755 shadow_mem_cread_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001756 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001757 all__sanity_check("evh__pre_mem_read-post");
1758}
1759
1760static
1761void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
floriane543f302012-10-21 19:43:43 +00001762 const HChar* s, Addr a ) {
sewardjb4112022007-11-09 22:49:28 +00001763 Int len;
1764 if (SHOW_EVENTS >= 1)
1765 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1766 (Int)tid, s, (void*)a );
sewardj234e5582011-02-09 12:47:23 +00001767 // Don't segfault if the string starts in an obviously stupid
1768 // place. Actually we should check the whole string, not just
1769 // the start address, but that's too much trouble. At least
1770 // checking the first byte is better than nothing. See #255009.
1771 if (!VG_(am_is_valid_for_client) (a, 1, VKI_PROT_READ))
1772 return;
florian19f91bb2012-11-10 22:29:54 +00001773 len = VG_(strlen)( (HChar*) a );
sewardj23f12002009-07-24 08:45:08 +00001774 shadow_mem_cread_range( map_threads_lookup(tid), a, len+1 );
sewardjf98e1c02008-10-25 16:22:41 +00001775 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001776 all__sanity_check("evh__pre_mem_read_asciiz-post");
1777}
1778
1779static
floriane543f302012-10-21 19:43:43 +00001780void evh__pre_mem_write ( CorePart part, ThreadId tid, const HChar* s,
sewardjb4112022007-11-09 22:49:28 +00001781 Addr a, SizeT size ) {
1782 if (SHOW_EVENTS >= 1)
1783 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1784 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001785 shadow_mem_cwrite_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001786 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001787 all__sanity_check("evh__pre_mem_write-post");
1788}
1789
1790static
1791void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1792 if (SHOW_EVENTS >= 1)
1793 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1794 (void*)a, len, (Int)is_inited );
1795 // FIXME: this is kinda stupid
1796 if (is_inited) {
1797 shadow_mem_make_New(get_current_Thread(), a, len);
1798 } else {
1799 shadow_mem_make_New(get_current_Thread(), a, len);
1800 }
sewardjf98e1c02008-10-25 16:22:41 +00001801 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001802 all__sanity_check("evh__pre_mem_read-post");
1803}
1804
1805static
1806void evh__die_mem_heap ( Addr a, SizeT len ) {
sewardj622fe492011-03-11 21:06:59 +00001807 Thread* thr;
sewardjb4112022007-11-09 22:49:28 +00001808 if (SHOW_EVENTS >= 1)
1809 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
sewardj622fe492011-03-11 21:06:59 +00001810 thr = get_current_Thread();
1811 tl_assert(thr);
1812 if (HG_(clo_free_is_write)) {
1813 /* Treat frees as if the memory was written immediately prior to
1814 the free. This shakes out more races, specifically, cases
1815 where memory is referenced by one thread, and freed by
1816 another, and there's no observable synchronisation event to
1817 guarantee that the reference happens before the free. */
1818 shadow_mem_cwrite_range(thr, a, len);
1819 }
sewardjfd35d492011-03-17 19:39:55 +00001820 shadow_mem_make_NoAccess_NoFX( thr, a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001821 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001822 all__sanity_check("evh__pre_mem_read-post");
1823}
1824
sewardj23f12002009-07-24 08:45:08 +00001825/* --- Event handlers called from generated code --- */
1826
sewardjb4112022007-11-09 22:49:28 +00001827static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001828void evh__mem_help_cread_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001829 Thread* thr = get_current_Thread_in_C_C();
1830 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001831 LIBHB_CREAD_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001832}
sewardjf98e1c02008-10-25 16:22:41 +00001833
sewardjb4112022007-11-09 22:49:28 +00001834static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001835void evh__mem_help_cread_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001836 Thread* thr = get_current_Thread_in_C_C();
1837 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001838 LIBHB_CREAD_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001839}
sewardjf98e1c02008-10-25 16:22:41 +00001840
sewardjb4112022007-11-09 22:49:28 +00001841static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001842void evh__mem_help_cread_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001843 Thread* thr = get_current_Thread_in_C_C();
1844 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001845 LIBHB_CREAD_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001846}
sewardjf98e1c02008-10-25 16:22:41 +00001847
sewardjb4112022007-11-09 22:49:28 +00001848static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001849void evh__mem_help_cread_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001850 Thread* thr = get_current_Thread_in_C_C();
1851 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001852 LIBHB_CREAD_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001853}
sewardjf98e1c02008-10-25 16:22:41 +00001854
sewardjb4112022007-11-09 22:49:28 +00001855static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001856void evh__mem_help_cread_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001857 Thread* thr = get_current_Thread_in_C_C();
1858 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001859 LIBHB_CREAD_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001860}
1861
1862static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001863void evh__mem_help_cwrite_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001864 Thread* thr = get_current_Thread_in_C_C();
1865 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001866 LIBHB_CWRITE_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001867}
sewardjf98e1c02008-10-25 16:22:41 +00001868
sewardjb4112022007-11-09 22:49:28 +00001869static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001870void evh__mem_help_cwrite_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001871 Thread* thr = get_current_Thread_in_C_C();
1872 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001873 LIBHB_CWRITE_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001874}
sewardjf98e1c02008-10-25 16:22:41 +00001875
sewardjb4112022007-11-09 22:49:28 +00001876static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001877void evh__mem_help_cwrite_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001878 Thread* thr = get_current_Thread_in_C_C();
1879 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001880 LIBHB_CWRITE_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001881}
sewardjf98e1c02008-10-25 16:22:41 +00001882
sewardjb4112022007-11-09 22:49:28 +00001883static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001884void evh__mem_help_cwrite_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001885 Thread* thr = get_current_Thread_in_C_C();
1886 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001887 LIBHB_CWRITE_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001888}
sewardjf98e1c02008-10-25 16:22:41 +00001889
sewardjb4112022007-11-09 22:49:28 +00001890static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001891void evh__mem_help_cwrite_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001892 Thread* thr = get_current_Thread_in_C_C();
1893 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001894 LIBHB_CWRITE_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001895}
1896
sewardjb4112022007-11-09 22:49:28 +00001897
sewardj9f569b72008-11-13 13:33:09 +00001898/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001899/* -------------- events to do with mutexes -------------- */
sewardj9f569b72008-11-13 13:33:09 +00001900/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001901
1902/* EXPOSITION only: by intercepting lock init events we can show the
1903 user where the lock was initialised, rather than only being able to
1904 show where it was first locked. Intercepting lock initialisations
1905 is not necessary for the basic operation of the race checker. */
1906static
1907void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
1908 void* mutex, Word mbRec )
1909{
1910 if (SHOW_EVENTS >= 1)
1911 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
1912 (Int)tid, mbRec, (void*)mutex );
1913 tl_assert(mbRec == 0 || mbRec == 1);
1914 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
1915 (Addr)mutex, tid );
sewardjf98e1c02008-10-25 16:22:41 +00001916 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001917 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
1918}
1919
1920static
sewardjc02f6c42013-10-14 13:51:25 +00001921void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex,
1922 Bool mutex_is_init )
sewardjb4112022007-11-09 22:49:28 +00001923{
1924 Thread* thr;
1925 Lock* lk;
1926 if (SHOW_EVENTS >= 1)
sewardjc02f6c42013-10-14 13:51:25 +00001927 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE"
1928 "(ctid=%d, %p, isInit=%d)\n",
1929 (Int)tid, (void*)mutex, (Int)mutex_is_init );
sewardjb4112022007-11-09 22:49:28 +00001930
1931 thr = map_threads_maybe_lookup( tid );
1932 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00001933 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00001934
1935 lk = map_locks_maybe_lookup( (Addr)mutex );
1936
sewardjc02f6c42013-10-14 13:51:25 +00001937 if (lk == NULL && mutex_is_init) {
1938 /* We're destroying a mutex which we don't have any record of,
1939 and which appears to have the value PTHREAD_MUTEX_INITIALIZER.
1940 Assume it never got used, and so we don't need to do anything
1941 more. */
1942 goto out;
1943 }
1944
sewardjb4112022007-11-09 22:49:28 +00001945 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
sewardjf98e1c02008-10-25 16:22:41 +00001946 HG_(record_error_Misc)(
1947 thr, "pthread_mutex_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00001948 }
1949
1950 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00001951 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00001952 tl_assert( lk->guestaddr == (Addr)mutex );
1953 if (lk->heldBy) {
1954 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00001955 HG_(record_error_Misc)(
1956 thr, "pthread_mutex_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00001957 /* remove lock from locksets of all owning threads */
1958 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00001959 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00001960 lk->heldBy = NULL;
1961 lk->heldW = False;
1962 lk->acquired_at = NULL;
1963 }
1964 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001965 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00001966
1967 if (HG_(clo_track_lockorders))
1968 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00001969 map_locks_delete( lk->guestaddr );
1970 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00001971 }
1972
sewardjc02f6c42013-10-14 13:51:25 +00001973 out:
sewardjf98e1c02008-10-25 16:22:41 +00001974 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001975 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
1976}
1977
1978static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
1979 void* mutex, Word isTryLock )
1980{
1981 /* Just check the mutex is sane; nothing else to do. */
1982 // 'mutex' may be invalid - not checked by wrapper
1983 Thread* thr;
1984 Lock* lk;
1985 if (SHOW_EVENTS >= 1)
1986 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
1987 (Int)tid, (void*)mutex );
1988
1989 tl_assert(isTryLock == 0 || isTryLock == 1);
1990 thr = map_threads_maybe_lookup( tid );
1991 tl_assert(thr); /* cannot fail - Thread* must already exist */
1992
1993 lk = map_locks_maybe_lookup( (Addr)mutex );
1994
1995 if (lk && (lk->kind == LK_rdwr)) {
sewardjf98e1c02008-10-25 16:22:41 +00001996 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
1997 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001998 }
1999
2000 if ( lk
2001 && isTryLock == 0
2002 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
2003 && lk->heldBy
2004 && lk->heldW
florian6bf37262012-10-21 03:23:36 +00002005 && VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0 ) {
sewardjb4112022007-11-09 22:49:28 +00002006 /* uh, it's a non-recursive lock and we already w-hold it, and
2007 this is a real lock operation (not a speculative "tryLock"
2008 kind of thing). Duh. Deadlock coming up; but at least
2009 produce an error message. */
florian6bd9dc12012-11-23 16:17:43 +00002010 const HChar* errstr = "Attempt to re-lock a "
2011 "non-recursive lock I already hold";
2012 const HChar* auxstr = "Lock was previously acquired";
sewardj8fef6252010-07-29 05:28:02 +00002013 if (lk->acquired_at) {
2014 HG_(record_error_Misc_w_aux)( thr, errstr, auxstr, lk->acquired_at );
2015 } else {
2016 HG_(record_error_Misc)( thr, errstr );
2017 }
sewardjb4112022007-11-09 22:49:28 +00002018 }
2019}
2020
2021static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
2022{
2023 // only called if the real library call succeeded - so mutex is sane
2024 Thread* thr;
2025 if (SHOW_EVENTS >= 1)
2026 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
2027 (Int)tid, (void*)mutex );
2028
2029 thr = map_threads_maybe_lookup( tid );
2030 tl_assert(thr); /* cannot fail - Thread* must already exist */
2031
2032 evhH__post_thread_w_acquires_lock(
2033 thr,
2034 LK_mbRec, /* if not known, create new lock with this LockKind */
2035 (Addr)mutex
2036 );
2037}
2038
2039static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
2040{
2041 // 'mutex' may be invalid - not checked by wrapper
2042 Thread* thr;
2043 if (SHOW_EVENTS >= 1)
2044 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
2045 (Int)tid, (void*)mutex );
2046
2047 thr = map_threads_maybe_lookup( tid );
2048 tl_assert(thr); /* cannot fail - Thread* must already exist */
2049
2050 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
2051}
2052
2053static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
2054{
2055 // only called if the real library call succeeded - so mutex is sane
2056 Thread* thr;
2057 if (SHOW_EVENTS >= 1)
2058 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
2059 (Int)tid, (void*)mutex );
2060 thr = map_threads_maybe_lookup( tid );
2061 tl_assert(thr); /* cannot fail - Thread* must already exist */
2062
2063 // anything we should do here?
2064}
2065
2066
sewardj5a644da2009-08-11 10:35:58 +00002067/* ------------------------------------------------------- */
sewardj1f77fec2010-04-12 19:51:04 +00002068/* -------------- events to do with spinlocks ------------ */
sewardj5a644da2009-08-11 10:35:58 +00002069/* ------------------------------------------------------- */
2070
2071/* All a bit of a kludge. Pretend we're really dealing with ordinary
2072 pthread_mutex_t's instead, for the most part. */
2073
2074static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid,
2075 void* slock )
2076{
2077 Thread* thr;
2078 Lock* lk;
2079 /* In glibc's kludgey world, we're either initialising or unlocking
2080 it. Since this is the pre-routine, if it is locked, unlock it
2081 and take a dependence edge. Otherwise, do nothing. */
2082
2083 if (SHOW_EVENTS >= 1)
2084 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
2085 "(ctid=%d, slock=%p)\n",
2086 (Int)tid, (void*)slock );
2087
2088 thr = map_threads_maybe_lookup( tid );
2089 /* cannot fail - Thread* must already exist */;
2090 tl_assert( HG_(is_sane_Thread)(thr) );
2091
2092 lk = map_locks_maybe_lookup( (Addr)slock );
2093 if (lk && lk->heldBy) {
2094 /* it's held. So do the normal pre-unlock actions, as copied
2095 from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE. This stupidly
2096 duplicates the map_locks_maybe_lookup. */
2097 evhH__pre_thread_releases_lock( thr, (Addr)slock,
2098 False/*!isRDWR*/ );
2099 }
2100}
2101
2102static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid,
2103 void* slock )
2104{
2105 Lock* lk;
2106 /* More kludgery. If the lock has never been seen before, do
2107 actions as per evh__HG_PTHREAD_MUTEX_INIT_POST. Else do
2108 nothing. */
2109
2110 if (SHOW_EVENTS >= 1)
2111 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
2112 "(ctid=%d, slock=%p)\n",
2113 (Int)tid, (void*)slock );
2114
2115 lk = map_locks_maybe_lookup( (Addr)slock );
2116 if (!lk) {
2117 map_locks_lookup_or_create( LK_nonRec, (Addr)slock, tid );
2118 }
2119}
2120
2121static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid,
2122 void* slock, Word isTryLock )
2123{
2124 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, slock, isTryLock );
2125}
2126
2127static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid,
2128 void* slock )
2129{
2130 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, slock );
2131}
2132
2133static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid,
2134 void* slock )
2135{
sewardjc02f6c42013-10-14 13:51:25 +00002136 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, slock, 0/*!isInit*/ );
sewardj5a644da2009-08-11 10:35:58 +00002137}
2138
2139
sewardj9f569b72008-11-13 13:33:09 +00002140/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002141/* --------------- events to do with CVs --------------- */
sewardj9f569b72008-11-13 13:33:09 +00002142/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002143
sewardj02114542009-07-28 20:52:36 +00002144/* A mapping from CV to (the SO associated with it, plus some
2145 auxiliary data for error checking). When the CV is
sewardjf98e1c02008-10-25 16:22:41 +00002146 signalled/broadcasted upon, we do a 'send' into the SO, and when a
2147 wait on it completes, we do a 'recv' from the SO. This is believed
2148 to give the correct happens-before events arising from CV
sewardjb4112022007-11-09 22:49:28 +00002149 signallings/broadcasts.
2150*/
2151
sewardj02114542009-07-28 20:52:36 +00002152/* .so is the SO for this CV.
2153 .mx_ga is the associated mutex, when .nWaiters > 0
sewardjb4112022007-11-09 22:49:28 +00002154
sewardj02114542009-07-28 20:52:36 +00002155 POSIX says effectively that the first pthread_cond_{timed}wait call
2156 causes a dynamic binding between the CV and the mutex, and that
2157 lasts until such time as the waiter count falls to zero. Hence
2158 need to keep track of the number of waiters in order to do
2159 consistency tracking. */
2160typedef
2161 struct {
2162 SO* so; /* libhb-allocated SO */
2163 void* mx_ga; /* addr of associated mutex, if any */
2164 UWord nWaiters; /* # threads waiting on the CV */
2165 }
2166 CVInfo;
2167
2168
2169/* pthread_cond_t* -> CVInfo* */
2170static WordFM* map_cond_to_CVInfo = NULL;
2171
2172static void map_cond_to_CVInfo_INIT ( void ) {
2173 if (UNLIKELY(map_cond_to_CVInfo == NULL)) {
2174 map_cond_to_CVInfo = VG_(newFM)( HG_(zalloc),
2175 "hg.mctCI.1", HG_(free), NULL );
2176 tl_assert(map_cond_to_CVInfo != NULL);
sewardjf98e1c02008-10-25 16:22:41 +00002177 }
2178}
2179
sewardj02114542009-07-28 20:52:36 +00002180static CVInfo* map_cond_to_CVInfo_lookup_or_alloc ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002181 UWord key, val;
sewardj02114542009-07-28 20:52:36 +00002182 map_cond_to_CVInfo_INIT();
2183 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
sewardjf98e1c02008-10-25 16:22:41 +00002184 tl_assert(key == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002185 return (CVInfo*)val;
sewardjf98e1c02008-10-25 16:22:41 +00002186 } else {
sewardj02114542009-07-28 20:52:36 +00002187 SO* so = libhb_so_alloc();
2188 CVInfo* cvi = HG_(zalloc)("hg.mctCloa.1", sizeof(CVInfo));
2189 cvi->so = so;
2190 cvi->mx_ga = 0;
2191 VG_(addToFM)( map_cond_to_CVInfo, (UWord)cond, (UWord)cvi );
2192 return cvi;
sewardjf98e1c02008-10-25 16:22:41 +00002193 }
2194}
2195
philippe8bfc2152012-07-06 23:38:24 +00002196static CVInfo* map_cond_to_CVInfo_lookup_NO_alloc ( void* cond ) {
2197 UWord key, val;
2198 map_cond_to_CVInfo_INIT();
2199 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
2200 tl_assert(key == (UWord)cond);
2201 return (CVInfo*)val;
2202 } else {
2203 return NULL;
2204 }
2205}
2206
sewardjc02f6c42013-10-14 13:51:25 +00002207static void map_cond_to_CVInfo_delete ( ThreadId tid,
2208 void* cond, Bool cond_is_init ) {
philippe8bfc2152012-07-06 23:38:24 +00002209 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +00002210 UWord keyW, valW;
philippe8bfc2152012-07-06 23:38:24 +00002211
2212 thr = map_threads_maybe_lookup( tid );
2213 tl_assert(thr); /* cannot fail - Thread* must already exist */
2214
sewardj02114542009-07-28 20:52:36 +00002215 map_cond_to_CVInfo_INIT();
philippe24111972013-03-18 22:48:22 +00002216 if (VG_(lookupFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond )) {
sewardj02114542009-07-28 20:52:36 +00002217 CVInfo* cvi = (CVInfo*)valW;
sewardjf98e1c02008-10-25 16:22:41 +00002218 tl_assert(keyW == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002219 tl_assert(cvi);
2220 tl_assert(cvi->so);
philippe8bfc2152012-07-06 23:38:24 +00002221 if (cvi->nWaiters > 0) {
sewardjc02f6c42013-10-14 13:51:25 +00002222 HG_(record_error_Misc)(
2223 thr, "pthread_cond_destroy:"
2224 " destruction of condition variable being waited upon");
philippe24111972013-03-18 22:48:22 +00002225 /* Destroying a cond var being waited upon outcome is EBUSY and
2226 variable is not destroyed. */
2227 return;
philippe8bfc2152012-07-06 23:38:24 +00002228 }
philippe24111972013-03-18 22:48:22 +00002229 if (!VG_(delFromFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond ))
2230 tl_assert(0); // cond var found above, and not here ???
sewardj02114542009-07-28 20:52:36 +00002231 libhb_so_dealloc(cvi->so);
2232 cvi->mx_ga = 0;
2233 HG_(free)(cvi);
philippe8bfc2152012-07-06 23:38:24 +00002234 } else {
sewardjc02f6c42013-10-14 13:51:25 +00002235 /* We have no record of this CV. So complain about it
2236 .. except, don't bother to complain if it has exactly the
2237 value PTHREAD_COND_INITIALIZER, since it might be that the CV
2238 was initialised like that but never used. */
2239 if (!cond_is_init) {
2240 HG_(record_error_Misc)(
2241 thr, "pthread_cond_destroy: destruction of unknown cond var");
2242 }
sewardjb4112022007-11-09 22:49:28 +00002243 }
2244}
2245
2246static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2247{
sewardjf98e1c02008-10-25 16:22:41 +00002248 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2249 cond to a SO if it is not already so bound, and 'send' on the
2250 SO. This is later used by other thread(s) which successfully
2251 exit from a pthread_cond_wait on the same cv; then they 'recv'
2252 from the SO, thereby acquiring a dependency on this signalling
2253 event. */
sewardjb4112022007-11-09 22:49:28 +00002254 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002255 CVInfo* cvi;
2256 //Lock* lk;
sewardjb4112022007-11-09 22:49:28 +00002257
2258 if (SHOW_EVENTS >= 1)
2259 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2260 (Int)tid, (void*)cond );
2261
sewardjb4112022007-11-09 22:49:28 +00002262 thr = map_threads_maybe_lookup( tid );
2263 tl_assert(thr); /* cannot fail - Thread* must already exist */
2264
sewardj02114542009-07-28 20:52:36 +00002265 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2266 tl_assert(cvi);
2267 tl_assert(cvi->so);
2268
sewardjb4112022007-11-09 22:49:28 +00002269 // error-if: mutex is bogus
2270 // error-if: mutex is not locked
sewardj02114542009-07-28 20:52:36 +00002271 // Hmm. POSIX doesn't actually say that it's an error to call
2272 // pthread_cond_signal with the associated mutex being unlocked.
2273 // Although it does say that it should be "if consistent scheduling
sewardjffce8152011-06-24 10:09:41 +00002274 // is desired." For that reason, print "dubious" if the lock isn't
2275 // held by any thread. Skip the "dubious" if it is held by some
2276 // other thread; that sounds straight-out wrong.
sewardj02114542009-07-28 20:52:36 +00002277 //
sewardjffce8152011-06-24 10:09:41 +00002278 // Anybody who writes code that signals on a CV without holding
2279 // the associated MX needs to be shipped off to a lunatic asylum
2280 // ASAP, even though POSIX doesn't actually declare such behaviour
2281 // illegal -- it makes code extremely difficult to understand/
2282 // reason about. In particular it puts the signalling thread in
2283 // a situation where it is racing against the released waiter
2284 // as soon as the signalling is done, and so there needs to be
2285 // some auxiliary synchronisation mechanism in the program that
2286 // makes this safe -- or the race(s) need to be harmless, or
2287 // probably nonexistent.
2288 //
2289 if (1) {
2290 Lock* lk = NULL;
2291 if (cvi->mx_ga != 0) {
2292 lk = map_locks_maybe_lookup( (Addr)cvi->mx_ga );
2293 }
2294 /* note: lk could be NULL. Be careful. */
2295 if (lk) {
2296 if (lk->kind == LK_rdwr) {
2297 HG_(record_error_Misc)(thr,
2298 "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2299 }
2300 if (lk->heldBy == NULL) {
2301 HG_(record_error_Misc)(thr,
2302 "pthread_cond_{signal,broadcast}: dubious: "
2303 "associated lock is not held by any thread");
2304 }
florian6bf37262012-10-21 03:23:36 +00002305 if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (UWord)thr)) {
sewardjffce8152011-06-24 10:09:41 +00002306 HG_(record_error_Misc)(thr,
2307 "pthread_cond_{signal,broadcast}: "
2308 "associated lock is not held by calling thread");
2309 }
2310 } else {
2311 /* Couldn't even find the damn thing. */
2312 // But actually .. that's not necessarily an error. We don't
2313 // know the (CV,MX) binding until a pthread_cond_wait or bcast
2314 // shows us what it is, and if that may not have happened yet.
2315 // So just keep quiet in this circumstance.
2316 //HG_(record_error_Misc)( thr,
2317 // "pthread_cond_{signal,broadcast}: "
2318 // "no or invalid mutex associated with cond");
2319 }
2320 }
sewardjb4112022007-11-09 22:49:28 +00002321
sewardj02114542009-07-28 20:52:36 +00002322 libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
sewardjb4112022007-11-09 22:49:28 +00002323}
2324
2325/* returns True if it reckons 'mutex' is valid and held by this
2326 thread, else False */
2327static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2328 void* cond, void* mutex )
2329{
2330 Thread* thr;
2331 Lock* lk;
2332 Bool lk_valid = True;
sewardj02114542009-07-28 20:52:36 +00002333 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002334
2335 if (SHOW_EVENTS >= 1)
2336 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2337 "(ctid=%d, cond=%p, mutex=%p)\n",
2338 (Int)tid, (void*)cond, (void*)mutex );
2339
sewardjb4112022007-11-09 22:49:28 +00002340 thr = map_threads_maybe_lookup( tid );
2341 tl_assert(thr); /* cannot fail - Thread* must already exist */
2342
2343 lk = map_locks_maybe_lookup( (Addr)mutex );
2344
2345 /* Check for stupid mutex arguments. There are various ways to be
2346 a bozo. Only complain once, though, even if more than one thing
2347 is wrong. */
2348 if (lk == NULL) {
2349 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002350 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002351 thr,
2352 "pthread_cond_{timed}wait called with invalid mutex" );
2353 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002354 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002355 if (lk->kind == LK_rdwr) {
2356 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002357 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002358 thr, "pthread_cond_{timed}wait called with mutex "
2359 "of type pthread_rwlock_t*" );
2360 } else
2361 if (lk->heldBy == NULL) {
2362 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002363 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002364 thr, "pthread_cond_{timed}wait called with un-held mutex");
2365 } else
2366 if (lk->heldBy != NULL
florian6bf37262012-10-21 03:23:36 +00002367 && VG_(elemBag)( lk->heldBy, (UWord)thr ) == 0) {
sewardjb4112022007-11-09 22:49:28 +00002368 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002369 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002370 thr, "pthread_cond_{timed}wait called with mutex "
2371 "held by a different thread" );
2372 }
2373 }
2374
2375 // error-if: cond is also associated with a different mutex
sewardj02114542009-07-28 20:52:36 +00002376 cvi = map_cond_to_CVInfo_lookup_or_alloc(cond);
2377 tl_assert(cvi);
2378 tl_assert(cvi->so);
2379 if (cvi->nWaiters == 0) {
2380 /* form initial (CV,MX) binding */
2381 cvi->mx_ga = mutex;
2382 }
2383 else /* check existing (CV,MX) binding */
2384 if (cvi->mx_ga != mutex) {
2385 HG_(record_error_Misc)(
2386 thr, "pthread_cond_{timed}wait: cond is associated "
2387 "with a different mutex");
2388 }
2389 cvi->nWaiters++;
sewardjb4112022007-11-09 22:49:28 +00002390
2391 return lk_valid;
2392}
2393
2394static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
sewardjff427c92013-10-14 12:13:52 +00002395 void* cond, void* mutex,
2396 Bool timeout)
sewardjb4112022007-11-09 22:49:28 +00002397{
sewardjf98e1c02008-10-25 16:22:41 +00002398 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2399 the SO for this cond, and 'recv' from it so as to acquire a
2400 dependency edge back to the signaller/broadcaster. */
2401 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002402 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002403
2404 if (SHOW_EVENTS >= 1)
2405 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
sewardjff427c92013-10-14 12:13:52 +00002406 "(ctid=%d, cond=%p, mutex=%p)\n, timeout=%d",
2407 (Int)tid, (void*)cond, (void*)mutex, (Int)timeout );
sewardjb4112022007-11-09 22:49:28 +00002408
sewardjb4112022007-11-09 22:49:28 +00002409 thr = map_threads_maybe_lookup( tid );
2410 tl_assert(thr); /* cannot fail - Thread* must already exist */
2411
2412 // error-if: cond is also associated with a different mutex
2413
philippe8bfc2152012-07-06 23:38:24 +00002414 cvi = map_cond_to_CVInfo_lookup_NO_alloc( cond );
2415 if (!cvi) {
2416 /* This could be either a bug in helgrind or the guest application
2417 that did an error (e.g. cond var was destroyed by another thread.
2418 Let's assume helgrind is perfect ...
2419 Note that this is similar to drd behaviour. */
2420 HG_(record_error_Misc)(thr, "condition variable has been destroyed while"
2421 " being waited upon");
2422 return;
2423 }
2424
sewardj02114542009-07-28 20:52:36 +00002425 tl_assert(cvi);
2426 tl_assert(cvi->so);
2427 tl_assert(cvi->nWaiters > 0);
sewardjb4112022007-11-09 22:49:28 +00002428
sewardjff427c92013-10-14 12:13:52 +00002429 if (!timeout && !libhb_so_everSent(cvi->so)) {
sewardjf98e1c02008-10-25 16:22:41 +00002430 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2431 it? If this happened it would surely be a bug in the threads
2432 library. Or one of those fabled "spurious wakeups". */
2433 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
sewardjffce8152011-06-24 10:09:41 +00002434 "succeeded"
sewardjf98e1c02008-10-25 16:22:41 +00002435 " without prior pthread_cond_post");
sewardjb4112022007-11-09 22:49:28 +00002436 }
sewardjf98e1c02008-10-25 16:22:41 +00002437
2438 /* anyway, acquire a dependency on it. */
sewardj02114542009-07-28 20:52:36 +00002439 libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2440
2441 cvi->nWaiters--;
sewardjf98e1c02008-10-25 16:22:41 +00002442}
2443
philippe19dfe032013-03-24 20:10:23 +00002444static void evh__HG_PTHREAD_COND_INIT_POST ( ThreadId tid,
2445 void* cond, void* cond_attr )
2446{
2447 CVInfo* cvi;
2448
2449 if (SHOW_EVENTS >= 1)
2450 VG_(printf)("evh__HG_PTHREAD_COND_INIT_POST"
2451 "(ctid=%d, cond=%p, cond_attr=%p)\n",
2452 (Int)tid, (void*)cond, (void*) cond_attr );
2453
2454 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2455 tl_assert (cvi);
2456 tl_assert (cvi->so);
2457}
2458
2459
sewardjf98e1c02008-10-25 16:22:41 +00002460static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
sewardjc02f6c42013-10-14 13:51:25 +00002461 void* cond, Bool cond_is_init )
sewardjf98e1c02008-10-25 16:22:41 +00002462{
2463 /* Deal with destroy events. The only purpose is to free storage
2464 associated with the CV, so as to avoid any possible resource
2465 leaks. */
2466 if (SHOW_EVENTS >= 1)
2467 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
sewardjc02f6c42013-10-14 13:51:25 +00002468 "(ctid=%d, cond=%p, cond_is_init=%d)\n",
2469 (Int)tid, (void*)cond, (Int)cond_is_init );
sewardjf98e1c02008-10-25 16:22:41 +00002470
sewardjc02f6c42013-10-14 13:51:25 +00002471 map_cond_to_CVInfo_delete( tid, cond, cond_is_init );
sewardjb4112022007-11-09 22:49:28 +00002472}
2473
2474
sewardj9f569b72008-11-13 13:33:09 +00002475/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002476/* -------------- events to do with rwlocks -------------- */
sewardj9f569b72008-11-13 13:33:09 +00002477/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002478
2479/* EXPOSITION only */
2480static
2481void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2482{
2483 if (SHOW_EVENTS >= 1)
2484 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2485 (Int)tid, (void*)rwl );
2486 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
sewardjf98e1c02008-10-25 16:22:41 +00002487 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002488 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2489}
2490
2491static
2492void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2493{
2494 Thread* thr;
2495 Lock* lk;
2496 if (SHOW_EVENTS >= 1)
2497 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2498 (Int)tid, (void*)rwl );
2499
2500 thr = map_threads_maybe_lookup( tid );
2501 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00002502 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00002503
2504 lk = map_locks_maybe_lookup( (Addr)rwl );
2505
2506 if (lk == NULL || lk->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00002507 HG_(record_error_Misc)(
2508 thr, "pthread_rwlock_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00002509 }
2510
2511 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00002512 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002513 tl_assert( lk->guestaddr == (Addr)rwl );
2514 if (lk->heldBy) {
2515 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00002516 HG_(record_error_Misc)(
2517 thr, "pthread_rwlock_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00002518 /* remove lock from locksets of all owning threads */
2519 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00002520 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00002521 lk->heldBy = NULL;
2522 lk->heldW = False;
sewardj1c7e8332007-11-29 13:04:03 +00002523 lk->acquired_at = NULL;
sewardjb4112022007-11-09 22:49:28 +00002524 }
2525 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002526 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00002527
2528 if (HG_(clo_track_lockorders))
2529 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002530 map_locks_delete( lk->guestaddr );
2531 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002532 }
2533
sewardjf98e1c02008-10-25 16:22:41 +00002534 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002535 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2536}
2537
2538static
sewardj789c3c52008-02-25 12:10:07 +00002539void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2540 void* rwl,
2541 Word isW, Word isTryLock )
sewardjb4112022007-11-09 22:49:28 +00002542{
2543 /* Just check the rwl is sane; nothing else to do. */
2544 // 'rwl' may be invalid - not checked by wrapper
2545 Thread* thr;
2546 Lock* lk;
2547 if (SHOW_EVENTS >= 1)
2548 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2549 (Int)tid, (Int)isW, (void*)rwl );
2550
2551 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
sewardj789c3c52008-02-25 12:10:07 +00002552 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
sewardjb4112022007-11-09 22:49:28 +00002553 thr = map_threads_maybe_lookup( tid );
2554 tl_assert(thr); /* cannot fail - Thread* must already exist */
2555
2556 lk = map_locks_maybe_lookup( (Addr)rwl );
2557 if ( lk
2558 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2559 /* Wrong kind of lock. Duh. */
sewardjf98e1c02008-10-25 16:22:41 +00002560 HG_(record_error_Misc)(
2561 thr, "pthread_rwlock_{rd,rw}lock with a "
2562 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002563 }
2564}
2565
2566static
2567void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2568{
2569 // only called if the real library call succeeded - so mutex is sane
2570 Thread* thr;
2571 if (SHOW_EVENTS >= 1)
2572 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2573 (Int)tid, (Int)isW, (void*)rwl );
2574
2575 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2576 thr = map_threads_maybe_lookup( tid );
2577 tl_assert(thr); /* cannot fail - Thread* must already exist */
2578
2579 (isW ? evhH__post_thread_w_acquires_lock
2580 : evhH__post_thread_r_acquires_lock)(
2581 thr,
2582 LK_rdwr, /* if not known, create new lock with this LockKind */
2583 (Addr)rwl
2584 );
2585}
2586
2587static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2588{
2589 // 'rwl' may be invalid - not checked by wrapper
2590 Thread* thr;
2591 if (SHOW_EVENTS >= 1)
2592 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2593 (Int)tid, (void*)rwl );
2594
2595 thr = map_threads_maybe_lookup( tid );
2596 tl_assert(thr); /* cannot fail - Thread* must already exist */
2597
2598 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2599}
2600
2601static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2602{
2603 // only called if the real library call succeeded - so mutex is sane
2604 Thread* thr;
2605 if (SHOW_EVENTS >= 1)
2606 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2607 (Int)tid, (void*)rwl );
2608 thr = map_threads_maybe_lookup( tid );
2609 tl_assert(thr); /* cannot fail - Thread* must already exist */
2610
2611 // anything we should do here?
2612}
2613
2614
sewardj9f569b72008-11-13 13:33:09 +00002615/* ---------------------------------------------------------- */
2616/* -------------- events to do with semaphores -------------- */
2617/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002618
sewardj11e352f2007-11-30 11:11:02 +00002619/* This is similar to but not identical to the handling for condition
sewardjb4112022007-11-09 22:49:28 +00002620 variables. */
2621
sewardjf98e1c02008-10-25 16:22:41 +00002622/* For each semaphore, we maintain a stack of SOs. When a 'post'
2623 operation is done on a semaphore (unlocking, essentially), a new SO
2624 is created for the posting thread, the posting thread does a strong
2625 send to it (which merely installs the posting thread's VC in the
2626 SO), and the SO is pushed on the semaphore's stack.
sewardjb4112022007-11-09 22:49:28 +00002627
2628 Later, when a (probably different) thread completes 'wait' on the
sewardjf98e1c02008-10-25 16:22:41 +00002629 semaphore, we pop a SO off the semaphore's stack (which should be
2630 nonempty), and do a strong recv from it. This mechanism creates
sewardjb4112022007-11-09 22:49:28 +00002631 dependencies between posters and waiters of the semaphore.
2632
sewardjf98e1c02008-10-25 16:22:41 +00002633 It may not be necessary to use a stack - perhaps a bag of SOs would
2634 do. But we do need to keep track of how many unused-up posts have
2635 happened for the semaphore.
sewardjb4112022007-11-09 22:49:28 +00002636
sewardjf98e1c02008-10-25 16:22:41 +00002637 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
sewardjb4112022007-11-09 22:49:28 +00002638 twice on S. T3 cannot complete its waits without both T1 and T2
2639 posting. The above mechanism will ensure that T3 acquires
2640 dependencies on both T1 and T2.
sewardj11e352f2007-11-30 11:11:02 +00002641
sewardjf98e1c02008-10-25 16:22:41 +00002642 When a semaphore is initialised with value N, we do as if we'd
2643 posted N times on the semaphore: basically create N SOs and do a
2644 strong send to all of then. This allows up to N waits on the
2645 semaphore to acquire a dependency on the initialisation point,
2646 which AFAICS is the correct behaviour.
sewardj11e352f2007-11-30 11:11:02 +00002647
2648 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2649 about. We should.
sewardjb4112022007-11-09 22:49:28 +00002650*/
2651
sewardjf98e1c02008-10-25 16:22:41 +00002652/* sem_t* -> XArray* SO* */
2653static WordFM* map_sem_to_SO_stack = NULL;
sewardjb4112022007-11-09 22:49:28 +00002654
sewardjf98e1c02008-10-25 16:22:41 +00002655static void map_sem_to_SO_stack_INIT ( void ) {
2656 if (map_sem_to_SO_stack == NULL) {
2657 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2658 HG_(free), NULL );
2659 tl_assert(map_sem_to_SO_stack != NULL);
sewardjb4112022007-11-09 22:49:28 +00002660 }
2661}
2662
sewardjf98e1c02008-10-25 16:22:41 +00002663static void push_SO_for_sem ( void* sem, SO* so ) {
2664 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002665 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002666 tl_assert(so);
2667 map_sem_to_SO_stack_INIT();
2668 if (VG_(lookupFM)( map_sem_to_SO_stack,
2669 &keyW, (UWord*)&xa, (UWord)sem )) {
2670 tl_assert(keyW == (UWord)sem);
sewardjb4112022007-11-09 22:49:28 +00002671 tl_assert(xa);
sewardjf98e1c02008-10-25 16:22:41 +00002672 VG_(addToXA)( xa, &so );
sewardjb4112022007-11-09 22:49:28 +00002673 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002674 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2675 VG_(addToXA)( xa, &so );
florian6bf37262012-10-21 03:23:36 +00002676 VG_(addToFM)( map_sem_to_SO_stack, (UWord)sem, (UWord)xa );
sewardjb4112022007-11-09 22:49:28 +00002677 }
2678}
2679
sewardjf98e1c02008-10-25 16:22:41 +00002680static SO* mb_pop_SO_for_sem ( void* sem ) {
2681 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002682 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002683 SO* so;
2684 map_sem_to_SO_stack_INIT();
2685 if (VG_(lookupFM)( map_sem_to_SO_stack,
2686 &keyW, (UWord*)&xa, (UWord)sem )) {
sewardjb4112022007-11-09 22:49:28 +00002687 /* xa is the stack for this semaphore. */
sewardjf98e1c02008-10-25 16:22:41 +00002688 Word sz;
2689 tl_assert(keyW == (UWord)sem);
2690 sz = VG_(sizeXA)( xa );
sewardjb4112022007-11-09 22:49:28 +00002691 tl_assert(sz >= 0);
2692 if (sz == 0)
2693 return NULL; /* odd, the stack is empty */
sewardjf98e1c02008-10-25 16:22:41 +00002694 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2695 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002696 VG_(dropTailXA)( xa, 1 );
sewardjf98e1c02008-10-25 16:22:41 +00002697 return so;
sewardjb4112022007-11-09 22:49:28 +00002698 } else {
2699 /* hmm, that's odd. No stack for this semaphore. */
2700 return NULL;
2701 }
2702}
2703
sewardj11e352f2007-11-30 11:11:02 +00002704static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002705{
sewardjf98e1c02008-10-25 16:22:41 +00002706 UWord keyW, valW;
2707 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002708
sewardjb4112022007-11-09 22:49:28 +00002709 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002710 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002711 (Int)tid, (void*)sem );
2712
sewardjf98e1c02008-10-25 16:22:41 +00002713 map_sem_to_SO_stack_INIT();
sewardjb4112022007-11-09 22:49:28 +00002714
sewardjf98e1c02008-10-25 16:22:41 +00002715 /* Empty out the semaphore's SO stack. This way of doing it is
2716 stupid, but at least it's easy. */
2717 while (1) {
2718 so = mb_pop_SO_for_sem( sem );
2719 if (!so) break;
2720 libhb_so_dealloc(so);
2721 }
2722
2723 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2724 XArray* xa = (XArray*)valW;
2725 tl_assert(keyW == (UWord)sem);
2726 tl_assert(xa);
2727 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2728 VG_(deleteXA)(xa);
2729 }
sewardjb4112022007-11-09 22:49:28 +00002730}
2731
sewardj11e352f2007-11-30 11:11:02 +00002732static
2733void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2734{
sewardjf98e1c02008-10-25 16:22:41 +00002735 SO* so;
2736 Thread* thr;
sewardj11e352f2007-11-30 11:11:02 +00002737
2738 if (SHOW_EVENTS >= 1)
2739 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2740 (Int)tid, (void*)sem, value );
2741
sewardjf98e1c02008-10-25 16:22:41 +00002742 thr = map_threads_maybe_lookup( tid );
2743 tl_assert(thr); /* cannot fail - Thread* must already exist */
sewardj11e352f2007-11-30 11:11:02 +00002744
sewardjf98e1c02008-10-25 16:22:41 +00002745 /* Empty out the semaphore's SO stack. This way of doing it is
2746 stupid, but at least it's easy. */
2747 while (1) {
2748 so = mb_pop_SO_for_sem( sem );
2749 if (!so) break;
2750 libhb_so_dealloc(so);
2751 }
sewardj11e352f2007-11-30 11:11:02 +00002752
sewardjf98e1c02008-10-25 16:22:41 +00002753 /* If we don't do this check, the following while loop runs us out
2754 of memory for stupid initial values of 'value'. */
2755 if (value > 10000) {
2756 HG_(record_error_Misc)(
2757 thr, "sem_init: initial value exceeds 10000; using 10000" );
2758 value = 10000;
2759 }
sewardj11e352f2007-11-30 11:11:02 +00002760
sewardjf98e1c02008-10-25 16:22:41 +00002761 /* Now create 'valid' new SOs for the thread, do a strong send to
2762 each of them, and push them all on the stack. */
2763 for (; value > 0; value--) {
2764 Thr* hbthr = thr->hbthr;
2765 tl_assert(hbthr);
sewardj11e352f2007-11-30 11:11:02 +00002766
sewardjf98e1c02008-10-25 16:22:41 +00002767 so = libhb_so_alloc();
2768 libhb_so_send( hbthr, so, True/*strong send*/ );
2769 push_SO_for_sem( sem, so );
sewardj11e352f2007-11-30 11:11:02 +00002770 }
2771}
2772
2773static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002774{
sewardjf98e1c02008-10-25 16:22:41 +00002775 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2776 it (iow, write our VC into it, then tick ours), and push the SO
2777 on on a stack of SOs associated with 'sem'. This is later used
2778 by other thread(s) which successfully exit from a sem_wait on
2779 the same sem; by doing a strong recv from SOs popped of the
2780 stack, they acquire dependencies on the posting thread
2781 segment(s). */
sewardjb4112022007-11-09 22:49:28 +00002782
sewardjf98e1c02008-10-25 16:22:41 +00002783 Thread* thr;
2784 SO* so;
2785 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002786
2787 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002788 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002789 (Int)tid, (void*)sem );
2790
2791 thr = map_threads_maybe_lookup( tid );
2792 tl_assert(thr); /* cannot fail - Thread* must already exist */
2793
2794 // error-if: sem is bogus
2795
sewardjf98e1c02008-10-25 16:22:41 +00002796 hbthr = thr->hbthr;
2797 tl_assert(hbthr);
sewardjb4112022007-11-09 22:49:28 +00002798
sewardjf98e1c02008-10-25 16:22:41 +00002799 so = libhb_so_alloc();
2800 libhb_so_send( hbthr, so, True/*strong send*/ );
2801 push_SO_for_sem( sem, so );
sewardjb4112022007-11-09 22:49:28 +00002802}
2803
sewardj11e352f2007-11-30 11:11:02 +00002804static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002805{
sewardjf98e1c02008-10-25 16:22:41 +00002806 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2807 the 'sem' from this semaphore's SO-stack, and do a strong recv
2808 from it. This creates a dependency back to one of the post-ers
2809 for the semaphore. */
sewardjb4112022007-11-09 22:49:28 +00002810
sewardjf98e1c02008-10-25 16:22:41 +00002811 Thread* thr;
2812 SO* so;
2813 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002814
2815 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002816 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002817 (Int)tid, (void*)sem );
2818
2819 thr = map_threads_maybe_lookup( tid );
2820 tl_assert(thr); /* cannot fail - Thread* must already exist */
2821
2822 // error-if: sem is bogus
2823
sewardjf98e1c02008-10-25 16:22:41 +00002824 so = mb_pop_SO_for_sem( sem );
sewardjb4112022007-11-09 22:49:28 +00002825
sewardjf98e1c02008-10-25 16:22:41 +00002826 if (so) {
2827 hbthr = thr->hbthr;
2828 tl_assert(hbthr);
2829
2830 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2831 libhb_so_dealloc(so);
2832 } else {
2833 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2834 If this happened it would surely be a bug in the threads
2835 library. */
2836 HG_(record_error_Misc)(
2837 thr, "Bug in libpthread: sem_wait succeeded on"
2838 " semaphore without prior sem_post");
sewardjb4112022007-11-09 22:49:28 +00002839 }
2840}
2841
2842
sewardj9f569b72008-11-13 13:33:09 +00002843/* -------------------------------------------------------- */
2844/* -------------- events to do with barriers -------------- */
2845/* -------------------------------------------------------- */
2846
2847typedef
2848 struct {
2849 Bool initted; /* has it yet been initted by guest? */
sewardj406bac82010-03-03 23:03:40 +00002850 Bool resizable; /* is resizing allowed? */
sewardj9f569b72008-11-13 13:33:09 +00002851 UWord size; /* declared size */
2852 XArray* waiting; /* XA of Thread*. # present is 0 .. .size */
2853 }
2854 Bar;
2855
2856static Bar* new_Bar ( void ) {
2857 Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
2858 tl_assert(bar);
2859 /* all fields are zero */
2860 tl_assert(bar->initted == False);
2861 return bar;
2862}
2863
2864static void delete_Bar ( Bar* bar ) {
2865 tl_assert(bar);
2866 if (bar->waiting)
2867 VG_(deleteXA)(bar->waiting);
2868 HG_(free)(bar);
2869}
2870
2871/* A mapping which stores auxiliary data for barriers. */
2872
2873/* pthread_barrier_t* -> Bar* */
2874static WordFM* map_barrier_to_Bar = NULL;
2875
2876static void map_barrier_to_Bar_INIT ( void ) {
2877 if (UNLIKELY(map_barrier_to_Bar == NULL)) {
2878 map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
2879 "hg.mbtBI.1", HG_(free), NULL );
2880 tl_assert(map_barrier_to_Bar != NULL);
2881 }
2882}
2883
2884static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
2885 UWord key, val;
2886 map_barrier_to_Bar_INIT();
2887 if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
2888 tl_assert(key == (UWord)barrier);
2889 return (Bar*)val;
2890 } else {
2891 Bar* bar = new_Bar();
2892 VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
2893 return bar;
2894 }
2895}
2896
2897static void map_barrier_to_Bar_delete ( void* barrier ) {
2898 UWord keyW, valW;
2899 map_barrier_to_Bar_INIT();
2900 if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
2901 Bar* bar = (Bar*)valW;
2902 tl_assert(keyW == (UWord)barrier);
2903 delete_Bar(bar);
2904 }
2905}
2906
2907
2908static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
2909 void* barrier,
sewardj406bac82010-03-03 23:03:40 +00002910 UWord count,
2911 UWord resizable )
sewardj9f569b72008-11-13 13:33:09 +00002912{
2913 Thread* thr;
2914 Bar* bar;
2915
2916 if (SHOW_EVENTS >= 1)
2917 VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
sewardj406bac82010-03-03 23:03:40 +00002918 "(tid=%d, barrier=%p, count=%lu, resizable=%lu)\n",
2919 (Int)tid, (void*)barrier, count, resizable );
sewardj9f569b72008-11-13 13:33:09 +00002920
2921 thr = map_threads_maybe_lookup( tid );
2922 tl_assert(thr); /* cannot fail - Thread* must already exist */
2923
2924 if (count == 0) {
2925 HG_(record_error_Misc)(
2926 thr, "pthread_barrier_init: 'count' argument is zero"
2927 );
2928 }
2929
sewardj406bac82010-03-03 23:03:40 +00002930 if (resizable != 0 && resizable != 1) {
2931 HG_(record_error_Misc)(
2932 thr, "pthread_barrier_init: invalid 'resizable' argument"
2933 );
2934 }
2935
sewardj9f569b72008-11-13 13:33:09 +00002936 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2937 tl_assert(bar);
2938
2939 if (bar->initted) {
2940 HG_(record_error_Misc)(
2941 thr, "pthread_barrier_init: barrier is already initialised"
2942 );
2943 }
2944
2945 if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2946 tl_assert(bar->initted);
2947 HG_(record_error_Misc)(
sewardj553655c2008-11-14 19:41:19 +00002948 thr, "pthread_barrier_init: threads are waiting at barrier"
sewardj9f569b72008-11-13 13:33:09 +00002949 );
2950 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2951 }
2952 if (!bar->waiting) {
2953 bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
2954 sizeof(Thread*) );
2955 }
2956
2957 tl_assert(bar->waiting);
2958 tl_assert(VG_(sizeXA)(bar->waiting) == 0);
sewardj406bac82010-03-03 23:03:40 +00002959 bar->initted = True;
2960 bar->resizable = resizable == 1 ? True : False;
2961 bar->size = count;
sewardj9f569b72008-11-13 13:33:09 +00002962}
2963
2964
2965static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
2966 void* barrier )
2967{
sewardj553655c2008-11-14 19:41:19 +00002968 Thread* thr;
2969 Bar* bar;
2970
sewardj9f569b72008-11-13 13:33:09 +00002971 /* Deal with destroy events. The only purpose is to free storage
2972 associated with the barrier, so as to avoid any possible
2973 resource leaks. */
2974 if (SHOW_EVENTS >= 1)
2975 VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
2976 "(tid=%d, barrier=%p)\n",
2977 (Int)tid, (void*)barrier );
2978
sewardj553655c2008-11-14 19:41:19 +00002979 thr = map_threads_maybe_lookup( tid );
2980 tl_assert(thr); /* cannot fail - Thread* must already exist */
2981
2982 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2983 tl_assert(bar);
2984
2985 if (!bar->initted) {
2986 HG_(record_error_Misc)(
2987 thr, "pthread_barrier_destroy: barrier was never initialised"
2988 );
2989 }
2990
2991 if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2992 HG_(record_error_Misc)(
2993 thr, "pthread_barrier_destroy: threads are waiting at barrier"
2994 );
2995 }
2996
sewardj9f569b72008-11-13 13:33:09 +00002997 /* Maybe we shouldn't do this; just let it persist, so that when it
2998 is reinitialised we don't need to do any dynamic memory
2999 allocation? The downside is a potentially unlimited space leak,
3000 if the client creates (in turn) a large number of barriers all
3001 at different locations. Note that if we do later move to the
3002 don't-delete-it scheme, we need to mark the barrier as
3003 uninitialised again since otherwise a later _init call will
sewardj553655c2008-11-14 19:41:19 +00003004 elicit a duplicate-init error. */
sewardj9f569b72008-11-13 13:33:09 +00003005 map_barrier_to_Bar_delete( barrier );
3006}
3007
3008
sewardj406bac82010-03-03 23:03:40 +00003009/* All the threads have arrived. Now do the Interesting Bit. Get a
3010 new synchronisation object and do a weak send to it from all the
3011 participating threads. This makes its vector clocks be the join of
3012 all the individual threads' vector clocks. Then do a strong
3013 receive from it back to all threads, so that their VCs are a copy
3014 of it (hence are all equal to the join of their original VCs.) */
3015static void do_barrier_cross_sync_and_empty ( Bar* bar )
3016{
3017 /* XXX check bar->waiting has no duplicates */
3018 UWord i;
3019 SO* so = libhb_so_alloc();
3020
3021 tl_assert(bar->waiting);
3022 tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
3023
3024 /* compute the join ... */
3025 for (i = 0; i < bar->size; i++) {
3026 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
3027 Thr* hbthr = t->hbthr;
3028 libhb_so_send( hbthr, so, False/*weak send*/ );
3029 }
3030 /* ... and distribute to all threads */
3031 for (i = 0; i < bar->size; i++) {
3032 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
3033 Thr* hbthr = t->hbthr;
3034 libhb_so_recv( hbthr, so, True/*strong recv*/ );
3035 }
3036
3037 /* finally, we must empty out the waiting vector */
3038 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
3039
3040 /* and we don't need this any more. Perhaps a stack-allocated
3041 SO would be better? */
3042 libhb_so_dealloc(so);
3043}
3044
3045
sewardj9f569b72008-11-13 13:33:09 +00003046static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
3047 void* barrier )
3048{
sewardj1c466b72008-11-19 11:52:14 +00003049 /* This function gets called after a client thread calls
3050 pthread_barrier_wait but before it arrives at the real
3051 pthread_barrier_wait.
3052
3053 Why is the following correct? It's a bit subtle.
3054
3055 If this is not the last thread arriving at the barrier, we simply
3056 note its presence and return. Because valgrind (at least as of
3057 Nov 08) is single threaded, we are guaranteed safe from any race
3058 conditions when in this function -- no other client threads are
3059 running.
3060
3061 If this is the last thread, then we are again the only running
3062 thread. All the other threads will have either arrived at the
3063 real pthread_barrier_wait or are on their way to it, but in any
3064 case are guaranteed not to be able to move past it, because this
3065 thread is currently in this function and so has not yet arrived
3066 at the real pthread_barrier_wait. That means that:
3067
3068 1. While we are in this function, none of the other threads
3069 waiting at the barrier can move past it.
3070
3071 2. When this function returns (and simulated execution resumes),
3072 this thread and all other waiting threads will be able to move
3073 past the real barrier.
3074
3075 Because of this, it is now safe to update the vector clocks of
3076 all threads, to represent the fact that they all arrived at the
3077 barrier and have all moved on. There is no danger of any
3078 complications to do with some threads leaving the barrier and
3079 racing back round to the front, whilst others are still leaving
3080 (which is the primary source of complication in correct handling/
3081 implementation of barriers). That can't happen because we update
3082 here our data structures so as to indicate that the threads have
3083 passed the barrier, even though, as per (2) above, they are
3084 guaranteed not to pass the barrier until we return.
3085
3086 This relies crucially on Valgrind being single threaded. If that
3087 changes, this will need to be reconsidered.
3088 */
sewardj9f569b72008-11-13 13:33:09 +00003089 Thread* thr;
3090 Bar* bar;
sewardj406bac82010-03-03 23:03:40 +00003091 UWord present;
sewardj9f569b72008-11-13 13:33:09 +00003092
3093 if (SHOW_EVENTS >= 1)
3094 VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
3095 "(tid=%d, barrier=%p)\n",
3096 (Int)tid, (void*)barrier );
3097
3098 thr = map_threads_maybe_lookup( tid );
3099 tl_assert(thr); /* cannot fail - Thread* must already exist */
3100
3101 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3102 tl_assert(bar);
3103
3104 if (!bar->initted) {
3105 HG_(record_error_Misc)(
3106 thr, "pthread_barrier_wait: barrier is uninitialised"
3107 );
3108 return; /* client is broken .. avoid assertions below */
3109 }
3110
3111 /* guaranteed by _INIT_PRE above */
3112 tl_assert(bar->size > 0);
3113 tl_assert(bar->waiting);
3114
3115 VG_(addToXA)( bar->waiting, &thr );
3116
3117 /* guaranteed by this function */
3118 present = VG_(sizeXA)(bar->waiting);
3119 tl_assert(present > 0 && present <= bar->size);
3120
3121 if (present < bar->size)
3122 return;
3123
sewardj406bac82010-03-03 23:03:40 +00003124 do_barrier_cross_sync_and_empty(bar);
3125}
sewardj9f569b72008-11-13 13:33:09 +00003126
sewardj9f569b72008-11-13 13:33:09 +00003127
sewardj406bac82010-03-03 23:03:40 +00003128static void evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( ThreadId tid,
3129 void* barrier,
3130 UWord newcount )
3131{
3132 Thread* thr;
3133 Bar* bar;
3134 UWord present;
3135
3136 if (SHOW_EVENTS >= 1)
3137 VG_(printf)("evh__HG_PTHREAD_BARRIER_RESIZE_PRE"
3138 "(tid=%d, barrier=%p, newcount=%lu)\n",
3139 (Int)tid, (void*)barrier, newcount );
3140
3141 thr = map_threads_maybe_lookup( tid );
3142 tl_assert(thr); /* cannot fail - Thread* must already exist */
3143
3144 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3145 tl_assert(bar);
3146
3147 if (!bar->initted) {
3148 HG_(record_error_Misc)(
3149 thr, "pthread_barrier_resize: barrier is uninitialised"
3150 );
3151 return; /* client is broken .. avoid assertions below */
3152 }
3153
3154 if (!bar->resizable) {
3155 HG_(record_error_Misc)(
3156 thr, "pthread_barrier_resize: barrier is may not be resized"
3157 );
3158 return; /* client is broken .. avoid assertions below */
3159 }
3160
3161 if (newcount == 0) {
3162 HG_(record_error_Misc)(
3163 thr, "pthread_barrier_resize: 'newcount' argument is zero"
3164 );
3165 return; /* client is broken .. avoid assertions below */
3166 }
3167
3168 /* guaranteed by _INIT_PRE above */
3169 tl_assert(bar->size > 0);
sewardj9f569b72008-11-13 13:33:09 +00003170 tl_assert(bar->waiting);
sewardj406bac82010-03-03 23:03:40 +00003171 /* Guaranteed by this fn */
3172 tl_assert(newcount > 0);
sewardj9f569b72008-11-13 13:33:09 +00003173
sewardj406bac82010-03-03 23:03:40 +00003174 if (newcount >= bar->size) {
3175 /* Increasing the capacity. There's no possibility of threads
3176 moving on from the barrier in this situation, so just note
3177 the fact and do nothing more. */
3178 bar->size = newcount;
3179 } else {
3180 /* Decreasing the capacity. If we decrease it to be equal or
3181 below the number of waiting threads, they will now move past
3182 the barrier, so need to mess with dep edges in the same way
3183 as if the barrier had filled up normally. */
3184 present = VG_(sizeXA)(bar->waiting);
3185 tl_assert(present >= 0 && present <= bar->size);
3186 if (newcount <= present) {
3187 bar->size = present; /* keep the cross_sync call happy */
3188 do_barrier_cross_sync_and_empty(bar);
3189 }
3190 bar->size = newcount;
sewardj9f569b72008-11-13 13:33:09 +00003191 }
sewardj9f569b72008-11-13 13:33:09 +00003192}
3193
3194
sewardjed2e72e2009-08-14 11:08:24 +00003195/* ----------------------------------------------------- */
3196/* ----- events to do with user-specified HB edges ----- */
3197/* ----------------------------------------------------- */
3198
3199/* A mapping from arbitrary UWord tag to the SO associated with it.
3200 The UWord tags are meaningless to us, interpreted only by the
3201 user. */
3202
3203
3204
3205/* UWord -> SO* */
3206static WordFM* map_usertag_to_SO = NULL;
3207
3208static void map_usertag_to_SO_INIT ( void ) {
3209 if (UNLIKELY(map_usertag_to_SO == NULL)) {
3210 map_usertag_to_SO = VG_(newFM)( HG_(zalloc),
3211 "hg.mutS.1", HG_(free), NULL );
3212 tl_assert(map_usertag_to_SO != NULL);
3213 }
3214}
3215
3216static SO* map_usertag_to_SO_lookup_or_alloc ( UWord usertag ) {
3217 UWord key, val;
3218 map_usertag_to_SO_INIT();
3219 if (VG_(lookupFM)( map_usertag_to_SO, &key, &val, usertag )) {
3220 tl_assert(key == (UWord)usertag);
3221 return (SO*)val;
3222 } else {
3223 SO* so = libhb_so_alloc();
3224 VG_(addToFM)( map_usertag_to_SO, usertag, (UWord)so );
3225 return so;
3226 }
3227}
3228
sewardj6015d0e2011-03-11 19:10:48 +00003229static void map_usertag_to_SO_delete ( UWord usertag ) {
3230 UWord keyW, valW;
3231 map_usertag_to_SO_INIT();
3232 if (VG_(delFromFM)( map_usertag_to_SO, &keyW, &valW, usertag )) {
3233 SO* so = (SO*)valW;
3234 tl_assert(keyW == usertag);
3235 tl_assert(so);
3236 libhb_so_dealloc(so);
3237 }
3238}
sewardjed2e72e2009-08-14 11:08:24 +00003239
3240
3241static
3242void evh__HG_USERSO_SEND_PRE ( ThreadId tid, UWord usertag )
3243{
3244 /* TID is just about to notionally sent a message on a notional
3245 abstract synchronisation object whose identity is given by
3246 USERTAG. Bind USERTAG to a real SO if it is not already so
sewardj8c50d3c2011-03-11 18:38:12 +00003247 bound, and do a 'weak send' on the SO. This joins the vector
3248 clocks from this thread into any vector clocks already present
3249 in the SO. The resulting SO vector clocks are later used by
sewardjed2e72e2009-08-14 11:08:24 +00003250 other thread(s) which successfully 'receive' from the SO,
sewardj8c50d3c2011-03-11 18:38:12 +00003251 thereby acquiring a dependency on all the events that have
3252 previously signalled on this SO. */
sewardjed2e72e2009-08-14 11:08:24 +00003253 Thread* thr;
3254 SO* so;
3255
3256 if (SHOW_EVENTS >= 1)
3257 VG_(printf)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3258 (Int)tid, usertag );
3259
3260 thr = map_threads_maybe_lookup( tid );
3261 tl_assert(thr); /* cannot fail - Thread* must already exist */
3262
3263 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3264 tl_assert(so);
3265
sewardj8c50d3c2011-03-11 18:38:12 +00003266 libhb_so_send( thr->hbthr, so, False/*!strong_send*/ );
sewardjed2e72e2009-08-14 11:08:24 +00003267}
3268
3269static
3270void evh__HG_USERSO_RECV_POST ( ThreadId tid, UWord usertag )
3271{
3272 /* TID has just notionally received a message from a notional
3273 abstract synchronisation object whose identity is given by
3274 USERTAG. Bind USERTAG to a real SO if it is not already so
3275 bound. If the SO has at some point in the past been 'sent' on,
3276 to a 'strong receive' on it, thereby acquiring a dependency on
3277 the sender. */
3278 Thread* thr;
3279 SO* so;
3280
3281 if (SHOW_EVENTS >= 1)
3282 VG_(printf)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3283 (Int)tid, usertag );
3284
3285 thr = map_threads_maybe_lookup( tid );
3286 tl_assert(thr); /* cannot fail - Thread* must already exist */
3287
3288 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3289 tl_assert(so);
3290
3291 /* Acquire a dependency on it. If the SO has never so far been
3292 sent on, then libhb_so_recv will do nothing. So we're safe
3293 regardless of SO's history. */
3294 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3295}
3296
sewardj6015d0e2011-03-11 19:10:48 +00003297static
3298void evh__HG_USERSO_FORGET_ALL ( ThreadId tid, UWord usertag )
3299{
3300 /* TID declares that any happens-before edges notionally stored in
3301 USERTAG can be deleted. If (as would normally be the case) a
3302 SO is associated with USERTAG, then the assocation is removed
3303 and all resources associated with SO are freed. Importantly,
3304 that frees up any VTSs stored in SO. */
3305 if (SHOW_EVENTS >= 1)
3306 VG_(printf)("evh__HG_USERSO_FORGET_ALL(ctid=%d, usertag=%#lx)\n",
3307 (Int)tid, usertag );
3308
3309 map_usertag_to_SO_delete( usertag );
3310}
3311
sewardjed2e72e2009-08-14 11:08:24 +00003312
sewardjb4112022007-11-09 22:49:28 +00003313/*--------------------------------------------------------------*/
3314/*--- Lock acquisition order monitoring ---*/
3315/*--------------------------------------------------------------*/
3316
3317/* FIXME: here are some optimisations still to do in
3318 laog__pre_thread_acquires_lock.
3319
3320 The graph is structured so that if L1 --*--> L2 then L1 must be
3321 acquired before L2.
3322
3323 The common case is that some thread T holds (eg) L1 L2 and L3 and
3324 is repeatedly acquiring and releasing Ln, and there is no ordering
3325 error in what it is doing. Hence it repeatly:
3326
3327 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3328 produces the answer No (because there is no error).
3329
3330 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3331 (because they already got added the first time T acquired Ln).
3332
3333 Hence cache these two events:
3334
3335 (1) Cache result of the query from last time. Invalidate the cache
3336 any time any edges are added to or deleted from laog.
3337
3338 (2) Cache these add-edge requests and ignore them if said edges
3339 have already been added to laog. Invalidate the cache any time
3340 any edges are deleted from laog.
3341*/
3342
3343typedef
3344 struct {
3345 WordSetID inns; /* in univ_laog */
3346 WordSetID outs; /* in univ_laog */
3347 }
3348 LAOGLinks;
3349
3350/* lock order acquisition graph */
3351static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
3352
3353/* EXPOSITION ONLY: for each edge in 'laog', record the two places
3354 where that edge was created, so that we can show the user later if
3355 we need to. */
3356typedef
3357 struct {
3358 Addr src_ga; /* Lock guest addresses for */
3359 Addr dst_ga; /* src/dst of the edge */
3360 ExeContext* src_ec; /* And corresponding places where that */
3361 ExeContext* dst_ec; /* ordering was established */
3362 }
3363 LAOGLinkExposition;
3364
sewardj250ec2e2008-02-15 22:02:30 +00003365static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
sewardjb4112022007-11-09 22:49:28 +00003366 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3367 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
3368 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
3369 if (llx1->src_ga < llx2->src_ga) return -1;
3370 if (llx1->src_ga > llx2->src_ga) return 1;
3371 if (llx1->dst_ga < llx2->dst_ga) return -1;
3372 if (llx1->dst_ga > llx2->dst_ga) return 1;
3373 return 0;
3374}
3375
3376static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
3377/* end EXPOSITION ONLY */
3378
3379
sewardja65db102009-01-26 10:45:16 +00003380__attribute__((noinline))
3381static void laog__init ( void )
3382{
3383 tl_assert(!laog);
3384 tl_assert(!laog_exposition);
sewardjc1fb9d22011-02-28 09:03:44 +00003385 tl_assert(HG_(clo_track_lockorders));
sewardja65db102009-01-26 10:45:16 +00003386
3387 laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
3388 HG_(free), NULL/*unboxedcmp*/ );
3389
3390 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
3391 cmp_LAOGLinkExposition );
3392 tl_assert(laog);
3393 tl_assert(laog_exposition);
3394}
3395
florian6bf37262012-10-21 03:23:36 +00003396static void laog__show ( const HChar* who ) {
3397 UWord i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003398 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003399 Lock* me;
3400 LAOGLinks* links;
3401 VG_(printf)("laog (requested by %s) {\n", who);
sewardj896f6f92008-08-19 08:38:52 +00003402 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003403 me = NULL;
3404 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003405 while (VG_(nextIterFM)( laog, (UWord*)&me,
3406 (UWord*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003407 tl_assert(me);
3408 tl_assert(links);
3409 VG_(printf)(" node %p:\n", me);
3410 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3411 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003412 VG_(printf)(" inn %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003413 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3414 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003415 VG_(printf)(" out %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003416 me = NULL;
3417 links = NULL;
3418 }
sewardj896f6f92008-08-19 08:38:52 +00003419 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003420 VG_(printf)("}\n");
3421}
3422
sewardj866c80c2011-10-22 19:29:51 +00003423static void univ_laog_do_GC ( void ) {
3424 Word i;
3425 LAOGLinks* links;
3426 Word seen = 0;
3427 Int prev_next_gc_univ_laog = next_gc_univ_laog;
3428 const UWord univ_laog_cardinality = HG_(cardinalityWSU)( univ_laog);
3429
3430 Bool *univ_laog_seen = HG_(zalloc) ( "hg.gc_univ_laog.1",
3431 (Int) univ_laog_cardinality
3432 * sizeof(Bool) );
3433 // univ_laog_seen[*] set to 0 (False) by zalloc.
3434
3435 if (VG_(clo_stats))
3436 VG_(message)(Vg_DebugMsg,
3437 "univ_laog_do_GC enter cardinality %'10d\n",
3438 (Int)univ_laog_cardinality);
3439
3440 VG_(initIterFM)( laog );
3441 links = NULL;
3442 while (VG_(nextIterFM)( laog, NULL, (UWord*)&links )) {
3443 tl_assert(links);
3444 tl_assert(links->inns >= 0 && links->inns < univ_laog_cardinality);
3445 univ_laog_seen[links->inns] = True;
3446 tl_assert(links->outs >= 0 && links->outs < univ_laog_cardinality);
3447 univ_laog_seen[links->outs] = True;
3448 links = NULL;
3449 }
3450 VG_(doneIterFM)( laog );
3451
3452 for (i = 0; i < (Int)univ_laog_cardinality; i++) {
3453 if (univ_laog_seen[i])
3454 seen++;
3455 else
3456 HG_(dieWS) ( univ_laog, (WordSet)i );
3457 }
3458
3459 HG_(free) (univ_laog_seen);
3460
3461 // We need to decide the value of the next_gc.
3462 // 3 solutions were looked at:
3463 // Sol 1: garbage collect at seen * 2
3464 // This solution was a lot slower, probably because we both do a lot of
3465 // garbage collection and do not keep long enough laog WV that will become
3466 // useful again very soon.
3467 // Sol 2: garbage collect at a percentage increase of the current cardinality
3468 // (with a min increase of 1)
3469 // Trials on a small test program with 1%, 5% and 10% increase was done.
3470 // 1% is slightly faster than 5%, which is slightly slower than 10%.
3471 // However, on a big application, this caused the memory to be exhausted,
3472 // as even a 1% increase of size at each gc becomes a lot, when many gc
3473 // are done.
3474 // Sol 3: always garbage collect at current cardinality + 1.
3475 // This solution was the fastest of the 3 solutions, and caused no memory
3476 // exhaustion in the big application.
3477 //
3478 // With regards to cost introduced by gc: on the t2t perf test (doing only
3479 // lock/unlock operations), t2t 50 10 2 was about 25% faster than the
3480 // version with garbage collection. With t2t 50 20 2, my machine started
3481 // to page out, and so the garbage collected version was much faster.
3482 // On smaller lock sets (e.g. t2t 20 5 2, giving about 100 locks), the
3483 // difference performance is insignificant (~ 0.1 s).
3484 // Of course, it might be that real life programs are not well represented
3485 // by t2t.
3486
3487 // If ever we want to have a more sophisticated control
3488 // (e.g. clo options to control the percentage increase or fixed increased),
3489 // we should do it here, eg.
3490 // next_gc_univ_laog = prev_next_gc_univ_laog + VG_(clo_laog_gc_fixed);
3491 // Currently, we just hard-code the solution 3 above.
3492 next_gc_univ_laog = prev_next_gc_univ_laog + 1;
3493
3494 if (VG_(clo_stats))
3495 VG_(message)
3496 (Vg_DebugMsg,
3497 "univ_laog_do_GC exit seen %'8d next gc at cardinality %'10d\n",
3498 (Int)seen, next_gc_univ_laog);
3499}
3500
3501
sewardjb4112022007-11-09 22:49:28 +00003502__attribute__((noinline))
3503static void laog__add_edge ( Lock* src, Lock* dst ) {
florian6bf37262012-10-21 03:23:36 +00003504 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003505 LAOGLinks* links;
3506 Bool presentF, presentR;
3507 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
3508
3509 /* Take the opportunity to sanity check the graph. Record in
3510 presentF if there is already a src->dst mapping in this node's
3511 forwards links, and presentR if there is already a src->dst
3512 mapping in this node's backwards links. They should agree!
3513 Also, we need to know whether the edge was already present so as
3514 to decide whether or not to update the link details mapping. We
3515 can compute presentF and presentR essentially for free, so may
3516 as well do this always. */
3517 presentF = presentR = False;
3518
3519 /* Update the out edges for src */
3520 keyW = 0;
3521 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003522 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)src )) {
sewardjb4112022007-11-09 22:49:28 +00003523 WordSetID outs_new;
3524 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003525 tl_assert(keyW == (UWord)src);
3526 outs_new = HG_(addToWS)( univ_laog, links->outs, (UWord)dst );
sewardjb4112022007-11-09 22:49:28 +00003527 presentF = outs_new == links->outs;
3528 links->outs = outs_new;
3529 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003530 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003531 links->inns = HG_(emptyWS)( univ_laog );
florian6bf37262012-10-21 03:23:36 +00003532 links->outs = HG_(singletonWS)( univ_laog, (UWord)dst );
3533 VG_(addToFM)( laog, (UWord)src, (UWord)links );
sewardjb4112022007-11-09 22:49:28 +00003534 }
3535 /* Update the in edges for dst */
3536 keyW = 0;
3537 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003538 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003539 WordSetID inns_new;
3540 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003541 tl_assert(keyW == (UWord)dst);
3542 inns_new = HG_(addToWS)( univ_laog, links->inns, (UWord)src );
sewardjb4112022007-11-09 22:49:28 +00003543 presentR = inns_new == links->inns;
3544 links->inns = inns_new;
3545 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003546 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
florian6bf37262012-10-21 03:23:36 +00003547 links->inns = HG_(singletonWS)( univ_laog, (UWord)src );
sewardjb4112022007-11-09 22:49:28 +00003548 links->outs = HG_(emptyWS)( univ_laog );
florian6bf37262012-10-21 03:23:36 +00003549 VG_(addToFM)( laog, (UWord)dst, (UWord)links );
sewardjb4112022007-11-09 22:49:28 +00003550 }
3551
3552 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
3553
3554 if (!presentF && src->acquired_at && dst->acquired_at) {
3555 LAOGLinkExposition expo;
3556 /* If this edge is entering the graph, and we have acquired_at
3557 information for both src and dst, record those acquisition
3558 points. Hence, if there is later a violation of this
3559 ordering, we can show the user the two places in which the
3560 required src-dst ordering was previously established. */
barta0b6b2c2008-07-07 06:49:24 +00003561 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
sewardjb4112022007-11-09 22:49:28 +00003562 src->guestaddr, dst->guestaddr);
3563 expo.src_ga = src->guestaddr;
3564 expo.dst_ga = dst->guestaddr;
3565 expo.src_ec = NULL;
3566 expo.dst_ec = NULL;
3567 tl_assert(laog_exposition);
florian6bf37262012-10-21 03:23:36 +00003568 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (UWord)&expo )) {
sewardjb4112022007-11-09 22:49:28 +00003569 /* we already have it; do nothing */
3570 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003571 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
3572 sizeof(LAOGLinkExposition));
sewardjb4112022007-11-09 22:49:28 +00003573 expo2->src_ga = src->guestaddr;
3574 expo2->dst_ga = dst->guestaddr;
3575 expo2->src_ec = src->acquired_at;
3576 expo2->dst_ec = dst->acquired_at;
florian6bf37262012-10-21 03:23:36 +00003577 VG_(addToFM)( laog_exposition, (UWord)expo2, (UWord)NULL );
sewardjb4112022007-11-09 22:49:28 +00003578 }
3579 }
sewardj866c80c2011-10-22 19:29:51 +00003580
3581 if (HG_(cardinalityWSU) (univ_laog) >= next_gc_univ_laog)
3582 univ_laog_do_GC();
sewardjb4112022007-11-09 22:49:28 +00003583}
3584
3585__attribute__((noinline))
3586static void laog__del_edge ( Lock* src, Lock* dst ) {
florian6bf37262012-10-21 03:23:36 +00003587 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003588 LAOGLinks* links;
sewardj866c80c2011-10-22 19:29:51 +00003589 if (0) VG_(printf)("laog__del_edge enter %p %p\n", src, dst);
sewardjb4112022007-11-09 22:49:28 +00003590 /* Update the out edges for src */
3591 keyW = 0;
3592 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003593 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)src )) {
sewardjb4112022007-11-09 22:49:28 +00003594 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003595 tl_assert(keyW == (UWord)src);
3596 links->outs = HG_(delFromWS)( univ_laog, links->outs, (UWord)dst );
sewardjb4112022007-11-09 22:49:28 +00003597 }
3598 /* Update the in edges for dst */
3599 keyW = 0;
3600 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003601 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003602 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003603 tl_assert(keyW == (UWord)dst);
3604 links->inns = HG_(delFromWS)( univ_laog, links->inns, (UWord)src );
sewardjb4112022007-11-09 22:49:28 +00003605 }
sewardj866c80c2011-10-22 19:29:51 +00003606
3607 /* Remove the exposition of src,dst (if present) */
3608 {
3609 LAOGLinkExposition *fm_expo;
3610
3611 LAOGLinkExposition expo;
3612 expo.src_ga = src->guestaddr;
3613 expo.dst_ga = dst->guestaddr;
3614 expo.src_ec = NULL;
3615 expo.dst_ec = NULL;
3616
3617 if (VG_(delFromFM) (laog_exposition,
3618 (UWord*)&fm_expo, NULL, (UWord)&expo )) {
3619 HG_(free) (fm_expo);
3620 }
3621 }
3622
3623 /* deleting edges can increase nr of of WS so check for gc. */
3624 if (HG_(cardinalityWSU) (univ_laog) >= next_gc_univ_laog)
3625 univ_laog_do_GC();
3626 if (0) VG_(printf)("laog__del_edge exit\n");
sewardjb4112022007-11-09 22:49:28 +00003627}
3628
3629__attribute__((noinline))
3630static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
florian6bf37262012-10-21 03:23:36 +00003631 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003632 LAOGLinks* links;
3633 keyW = 0;
3634 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003635 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003636 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003637 tl_assert(keyW == (UWord)lk);
sewardjb4112022007-11-09 22:49:28 +00003638 return links->outs;
3639 } else {
3640 return HG_(emptyWS)( univ_laog );
3641 }
3642}
3643
3644__attribute__((noinline))
3645static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
florian6bf37262012-10-21 03:23:36 +00003646 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003647 LAOGLinks* links;
3648 keyW = 0;
3649 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003650 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003651 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003652 tl_assert(keyW == (UWord)lk);
sewardjb4112022007-11-09 22:49:28 +00003653 return links->inns;
3654 } else {
3655 return HG_(emptyWS)( univ_laog );
3656 }
3657}
3658
3659__attribute__((noinline))
florian6bf37262012-10-21 03:23:36 +00003660static void laog__sanity_check ( const HChar* who ) {
3661 UWord i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003662 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003663 Lock* me;
3664 LAOGLinks* links;
sewardj896f6f92008-08-19 08:38:52 +00003665 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003666 me = NULL;
3667 links = NULL;
3668 if (0) VG_(printf)("laog sanity check\n");
florian6bf37262012-10-21 03:23:36 +00003669 while (VG_(nextIterFM)( laog, (UWord*)&me,
3670 (UWord*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003671 tl_assert(me);
3672 tl_assert(links);
3673 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3674 for (i = 0; i < ws_size; i++) {
3675 if ( ! HG_(elemWS)( univ_laog,
3676 laog__succs( (Lock*)ws_words[i] ),
florian6bf37262012-10-21 03:23:36 +00003677 (UWord)me ))
sewardjb4112022007-11-09 22:49:28 +00003678 goto bad;
3679 }
3680 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3681 for (i = 0; i < ws_size; i++) {
3682 if ( ! HG_(elemWS)( univ_laog,
3683 laog__preds( (Lock*)ws_words[i] ),
florian6bf37262012-10-21 03:23:36 +00003684 (UWord)me ))
sewardjb4112022007-11-09 22:49:28 +00003685 goto bad;
3686 }
3687 me = NULL;
3688 links = NULL;
3689 }
sewardj896f6f92008-08-19 08:38:52 +00003690 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003691 return;
3692
3693 bad:
3694 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3695 laog__show(who);
3696 tl_assert(0);
3697}
3698
3699/* If there is a path in laog from 'src' to any of the elements in
3700 'dst', return an arbitrarily chosen element of 'dst' reachable from
3701 'src'. If no path exist from 'src' to any element in 'dst', return
3702 NULL. */
3703__attribute__((noinline))
3704static
3705Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3706{
3707 Lock* ret;
florian6bf37262012-10-21 03:23:36 +00003708 Word ssz;
sewardjb4112022007-11-09 22:49:28 +00003709 XArray* stack; /* of Lock* */
3710 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
3711 Lock* here;
3712 WordSetID succs;
florian6bf37262012-10-21 03:23:36 +00003713 UWord succs_size, i;
sewardj250ec2e2008-02-15 22:02:30 +00003714 UWord* succs_words;
sewardjb4112022007-11-09 22:49:28 +00003715 //laog__sanity_check();
3716
3717 /* If the destination set is empty, we can never get there from
3718 'src' :-), so don't bother to try */
3719 if (HG_(isEmptyWS)( univ_lsets, dsts ))
3720 return NULL;
3721
3722 ret = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00003723 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3724 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00003725
3726 (void) VG_(addToXA)( stack, &src );
3727
3728 while (True) {
3729
3730 ssz = VG_(sizeXA)( stack );
3731
3732 if (ssz == 0) { ret = NULL; break; }
3733
3734 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3735 VG_(dropTailXA)( stack, 1 );
3736
florian6bf37262012-10-21 03:23:36 +00003737 if (HG_(elemWS)( univ_lsets, dsts, (UWord)here )) { ret = here; break; }
sewardjb4112022007-11-09 22:49:28 +00003738
florian6bf37262012-10-21 03:23:36 +00003739 if (VG_(lookupFM)( visited, NULL, NULL, (UWord)here ))
sewardjb4112022007-11-09 22:49:28 +00003740 continue;
3741
florian6bf37262012-10-21 03:23:36 +00003742 VG_(addToFM)( visited, (UWord)here, 0 );
sewardjb4112022007-11-09 22:49:28 +00003743
3744 succs = laog__succs( here );
3745 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3746 for (i = 0; i < succs_size; i++)
3747 (void) VG_(addToXA)( stack, &succs_words[i] );
3748 }
3749
sewardj896f6f92008-08-19 08:38:52 +00003750 VG_(deleteFM)( visited, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003751 VG_(deleteXA)( stack );
3752 return ret;
3753}
3754
3755
3756/* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3757 between 'lk' and the locks already held by 'thr' and issue a
3758 complaint if so. Also, update the ordering graph appropriately.
3759*/
3760__attribute__((noinline))
3761static void laog__pre_thread_acquires_lock (
3762 Thread* thr, /* NB: BEFORE lock is added */
3763 Lock* lk
3764 )
3765{
sewardj250ec2e2008-02-15 22:02:30 +00003766 UWord* ls_words;
florian6bf37262012-10-21 03:23:36 +00003767 UWord ls_size, i;
sewardjb4112022007-11-09 22:49:28 +00003768 Lock* other;
3769
3770 /* It may be that 'thr' already holds 'lk' and is recursively
3771 relocking in. In this case we just ignore the call. */
3772 /* NB: univ_lsets really is correct here */
florian6bf37262012-10-21 03:23:36 +00003773 if (HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lk ))
sewardjb4112022007-11-09 22:49:28 +00003774 return;
3775
sewardjb4112022007-11-09 22:49:28 +00003776 /* First, the check. Complain if there is any path in laog from lk
3777 to any of the locks already held by thr, since if any such path
3778 existed, it would mean that previously lk was acquired before
3779 (rather than after, as we are doing here) at least one of those
3780 locks.
3781 */
3782 other = laog__do_dfs_from_to(lk, thr->locksetA);
3783 if (other) {
3784 LAOGLinkExposition key, *found;
3785 /* So we managed to find a path lk --*--> other in the graph,
3786 which implies that 'lk' should have been acquired before
3787 'other' but is in fact being acquired afterwards. We present
3788 the lk/other arguments to record_error_LockOrder in the order
3789 in which they should have been acquired. */
3790 /* Go look in the laog_exposition mapping, to find the allocation
3791 points for this edge, so we can show the user. */
3792 key.src_ga = lk->guestaddr;
3793 key.dst_ga = other->guestaddr;
3794 key.src_ec = NULL;
3795 key.dst_ec = NULL;
3796 found = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003797 if (VG_(lookupFM)( laog_exposition,
florian6bf37262012-10-21 03:23:36 +00003798 (UWord*)&found, NULL, (UWord)&key )) {
sewardjb4112022007-11-09 22:49:28 +00003799 tl_assert(found != &key);
3800 tl_assert(found->src_ga == key.src_ga);
3801 tl_assert(found->dst_ga == key.dst_ga);
3802 tl_assert(found->src_ec);
3803 tl_assert(found->dst_ec);
sewardjf98e1c02008-10-25 16:22:41 +00003804 HG_(record_error_LockOrder)(
3805 thr, lk->guestaddr, other->guestaddr,
sewardjffce8152011-06-24 10:09:41 +00003806 found->src_ec, found->dst_ec, other->acquired_at );
sewardjb4112022007-11-09 22:49:28 +00003807 } else {
3808 /* Hmm. This can't happen (can it?) */
philippeebe25802013-01-30 23:21:34 +00003809 /* Yes, it can happen: see tests/tc14_laog_dinphils.
3810 Imagine we have 3 philosophers A B C, and the forks
3811 between them:
3812
3813 C
3814
3815 fCA fBC
3816
3817 A fAB B
3818
3819 Let's have the following actions:
3820 A takes fCA,fAB
3821 A releases fCA,fAB
3822 B takes fAB,fBC
3823 B releases fAB,fBC
3824 C takes fBC,fCA
3825 C releases fBC,fCA
3826
3827 Helgrind will report a lock order error when C takes fCA.
3828 Effectively, we have a deadlock if the following
3829 sequence is done:
3830 A takes fCA
3831 B takes fAB
3832 C takes fBC
3833
3834 The error reported is:
3835 Observed (incorrect) order fBC followed by fCA
3836 but the stack traces that have established the required order
3837 are not given.
3838
3839 This is because there is no pair (fCA, fBC) in laog exposition :
3840 the laog_exposition records all pairs of locks between a new lock
3841 taken by a thread and all the already taken locks.
3842 So, there is no laog_exposition (fCA, fBC) as no thread ever
3843 first locked fCA followed by fBC.
3844
3845 In other words, when the deadlock cycle involves more than
3846 two locks, then helgrind does not report the sequence of
3847 operations that created the cycle.
3848
3849 However, we can report the current stack trace (where
3850 lk is being taken), and the stack trace where other was acquired:
3851 Effectively, the variable 'other' contains a lock currently
3852 held by this thread, with its 'acquired_at'. */
3853
sewardjf98e1c02008-10-25 16:22:41 +00003854 HG_(record_error_LockOrder)(
3855 thr, lk->guestaddr, other->guestaddr,
philippeebe25802013-01-30 23:21:34 +00003856 NULL, NULL, other->acquired_at );
sewardjb4112022007-11-09 22:49:28 +00003857 }
3858 }
3859
3860 /* Second, add to laog the pairs
3861 (old, lk) | old <- locks already held by thr
3862 Since both old and lk are currently held by thr, their acquired_at
3863 fields must be non-NULL.
3864 */
3865 tl_assert(lk->acquired_at);
3866 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3867 for (i = 0; i < ls_size; i++) {
3868 Lock* old = (Lock*)ls_words[i];
3869 tl_assert(old->acquired_at);
3870 laog__add_edge( old, lk );
3871 }
3872
3873 /* Why "except_Locks" ? We're here because a lock is being
3874 acquired by a thread, and we're in an inconsistent state here.
3875 See the call points in evhH__post_thread_{r,w}_acquires_lock.
3876 When called in this inconsistent state, locks__sanity_check duly
3877 barfs. */
sewardjf98e1c02008-10-25 16:22:41 +00003878 if (HG_(clo_sanity_flags) & SCE_LAOG)
sewardjb4112022007-11-09 22:49:28 +00003879 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
3880}
3881
sewardj866c80c2011-10-22 19:29:51 +00003882/* Allocates a duplicate of words. Caller must HG_(free) the result. */
3883static UWord* UWordV_dup(UWord* words, Word words_size)
3884{
3885 UInt i;
3886
3887 if (words_size == 0)
3888 return NULL;
3889
3890 UWord *dup = HG_(zalloc) ("hg.dup.1", (SizeT) words_size * sizeof(UWord));
3891
3892 for (i = 0; i < words_size; i++)
3893 dup[i] = words[i];
3894
3895 return dup;
3896}
sewardjb4112022007-11-09 22:49:28 +00003897
3898/* Delete from 'laog' any pair mentioning a lock in locksToDelete */
3899
3900__attribute__((noinline))
3901static void laog__handle_one_lock_deletion ( Lock* lk )
3902{
3903 WordSetID preds, succs;
florian6bf37262012-10-21 03:23:36 +00003904 UWord preds_size, succs_size, i, j;
sewardj250ec2e2008-02-15 22:02:30 +00003905 UWord *preds_words, *succs_words;
sewardjb4112022007-11-09 22:49:28 +00003906
3907 preds = laog__preds( lk );
3908 succs = laog__succs( lk );
3909
sewardj866c80c2011-10-22 19:29:51 +00003910 // We need to duplicate the payload, as these can be garbage collected
3911 // during the del/add operations below.
sewardjb4112022007-11-09 22:49:28 +00003912 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
sewardj866c80c2011-10-22 19:29:51 +00003913 preds_words = UWordV_dup(preds_words, preds_size);
3914
3915 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3916 succs_words = UWordV_dup(succs_words, succs_size);
3917
sewardjb4112022007-11-09 22:49:28 +00003918 for (i = 0; i < preds_size; i++)
3919 laog__del_edge( (Lock*)preds_words[i], lk );
3920
sewardjb4112022007-11-09 22:49:28 +00003921 for (j = 0; j < succs_size; j++)
3922 laog__del_edge( lk, (Lock*)succs_words[j] );
3923
3924 for (i = 0; i < preds_size; i++) {
3925 for (j = 0; j < succs_size; j++) {
3926 if (preds_words[i] != succs_words[j]) {
3927 /* This can pass unlocked locks to laog__add_edge, since
3928 we're deleting stuff. So their acquired_at fields may
3929 be NULL. */
3930 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
3931 }
3932 }
3933 }
sewardj866c80c2011-10-22 19:29:51 +00003934
3935 if (preds_words)
3936 HG_(free) (preds_words);
3937 if (succs_words)
3938 HG_(free) (succs_words);
3939
3940 // Remove lk information from laog links FM
3941 {
3942 LAOGLinks *links;
3943 Lock* linked_lk;
3944
3945 if (VG_(delFromFM) (laog,
3946 (UWord*)&linked_lk, (UWord*)&links, (UWord)lk)) {
3947 tl_assert (linked_lk == lk);
3948 HG_(free) (links);
3949 }
3950 }
3951 /* FIXME ??? What about removing lock lk data from EXPOSITION ??? */
sewardjb4112022007-11-09 22:49:28 +00003952}
3953
sewardj1cbc12f2008-11-10 16:16:46 +00003954//__attribute__((noinline))
3955//static void laog__handle_lock_deletions (
3956// WordSetID /* in univ_laog */ locksToDelete
3957// )
3958//{
3959// Word i, ws_size;
3960// UWord* ws_words;
3961//
sewardj1cbc12f2008-11-10 16:16:46 +00003962//
3963// HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
sewardj866c80c2011-10-22 19:29:51 +00003964// UWordV_dup call needed here ...
sewardj1cbc12f2008-11-10 16:16:46 +00003965// for (i = 0; i < ws_size; i++)
3966// laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
3967//
3968// if (HG_(clo_sanity_flags) & SCE_LAOG)
3969// all__sanity_check("laog__handle_lock_deletions-post");
3970//}
sewardjb4112022007-11-09 22:49:28 +00003971
3972
3973/*--------------------------------------------------------------*/
3974/*--- Malloc/free replacements ---*/
3975/*--------------------------------------------------------------*/
3976
3977typedef
3978 struct {
3979 void* next; /* required by m_hashtable */
3980 Addr payload; /* ptr to actual block */
3981 SizeT szB; /* size requested */
3982 ExeContext* where; /* where it was allocated */
3983 Thread* thr; /* allocating thread */
3984 }
3985 MallocMeta;
3986
3987/* A hash table of MallocMetas, used to track malloc'd blocks
3988 (obviously). */
3989static VgHashTable hg_mallocmeta_table = NULL;
3990
philippe5fbc9762013-12-01 19:28:48 +00003991/* MallocMeta are small elements. We use a pool to avoid
3992 the overhead of malloc for each MallocMeta. */
3993static PoolAlloc *MallocMeta_poolalloc = NULL;
sewardjb4112022007-11-09 22:49:28 +00003994
3995static MallocMeta* new_MallocMeta ( void ) {
philippe5fbc9762013-12-01 19:28:48 +00003996 MallocMeta* md = VG_(allocEltPA) (MallocMeta_poolalloc);
3997 VG_(memset)(md, 0, sizeof(MallocMeta));
sewardjb4112022007-11-09 22:49:28 +00003998 return md;
3999}
4000static void delete_MallocMeta ( MallocMeta* md ) {
philippe5fbc9762013-12-01 19:28:48 +00004001 VG_(freeEltPA)(MallocMeta_poolalloc, md);
sewardjb4112022007-11-09 22:49:28 +00004002}
4003
4004
4005/* Allocate a client block and set up the metadata for it. */
4006
4007static
4008void* handle_alloc ( ThreadId tid,
4009 SizeT szB, SizeT alignB, Bool is_zeroed )
4010{
4011 Addr p;
4012 MallocMeta* md;
4013
4014 tl_assert( ((SSizeT)szB) >= 0 );
4015 p = (Addr)VG_(cli_malloc)(alignB, szB);
4016 if (!p) {
4017 return NULL;
4018 }
4019 if (is_zeroed)
4020 VG_(memset)((void*)p, 0, szB);
4021
4022 /* Note that map_threads_lookup must succeed (cannot assert), since
4023 memory can only be allocated by currently alive threads, hence
4024 they must have an entry in map_threads. */
4025 md = new_MallocMeta();
4026 md->payload = p;
4027 md->szB = szB;
4028 md->where = VG_(record_ExeContext)( tid, 0 );
4029 md->thr = map_threads_lookup( tid );
4030
4031 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
4032
4033 /* Tell the lower level memory wranglers. */
4034 evh__new_mem_heap( p, szB, is_zeroed );
4035
4036 return (void*)p;
4037}
4038
4039/* Re the checks for less-than-zero (also in hg_cli__realloc below):
4040 Cast to a signed type to catch any unexpectedly negative args.
4041 We're assuming here that the size asked for is not greater than
4042 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
4043 platforms). */
4044static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
4045 if (((SSizeT)n) < 0) return NULL;
4046 return handle_alloc ( tid, n, VG_(clo_alignment),
4047 /*is_zeroed*/False );
4048}
4049static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
4050 if (((SSizeT)n) < 0) return NULL;
4051 return handle_alloc ( tid, n, VG_(clo_alignment),
4052 /*is_zeroed*/False );
4053}
4054static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
4055 if (((SSizeT)n) < 0) return NULL;
4056 return handle_alloc ( tid, n, VG_(clo_alignment),
4057 /*is_zeroed*/False );
4058}
4059static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
4060 if (((SSizeT)n) < 0) return NULL;
4061 return handle_alloc ( tid, n, align,
4062 /*is_zeroed*/False );
4063}
4064static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
4065 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
4066 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
4067 /*is_zeroed*/True );
4068}
4069
4070
4071/* Free a client block, including getting rid of the relevant
4072 metadata. */
4073
4074static void handle_free ( ThreadId tid, void* p )
4075{
4076 MallocMeta *md, *old_md;
4077 SizeT szB;
4078
4079 /* First see if we can find the metadata for 'p'. */
4080 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
4081 if (!md)
4082 return; /* apparently freeing a bogus address. Oh well. */
4083
4084 tl_assert(md->payload == (Addr)p);
4085 szB = md->szB;
4086
4087 /* Nuke the metadata block */
4088 old_md = (MallocMeta*)
4089 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
4090 tl_assert(old_md); /* it must be present - we just found it */
4091 tl_assert(old_md == md);
4092 tl_assert(old_md->payload == (Addr)p);
4093
4094 VG_(cli_free)((void*)old_md->payload);
4095 delete_MallocMeta(old_md);
4096
4097 /* Tell the lower level memory wranglers. */
4098 evh__die_mem_heap( (Addr)p, szB );
4099}
4100
4101static void hg_cli__free ( ThreadId tid, void* p ) {
4102 handle_free(tid, p);
4103}
4104static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
4105 handle_free(tid, p);
4106}
4107static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
4108 handle_free(tid, p);
4109}
4110
4111
4112static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
4113{
4114 MallocMeta *md, *md_new, *md_tmp;
4115 SizeT i;
4116
4117 Addr payload = (Addr)payloadV;
4118
4119 if (((SSizeT)new_size) < 0) return NULL;
4120
4121 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
4122 if (!md)
4123 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
4124
4125 tl_assert(md->payload == payload);
4126
4127 if (md->szB == new_size) {
4128 /* size unchanged */
4129 md->where = VG_(record_ExeContext)(tid, 0);
4130 return payloadV;
4131 }
4132
4133 if (md->szB > new_size) {
4134 /* new size is smaller */
4135 md->szB = new_size;
4136 md->where = VG_(record_ExeContext)(tid, 0);
4137 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
4138 return payloadV;
4139 }
4140
4141 /* else */ {
4142 /* new size is bigger */
4143 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
4144
4145 /* First half kept and copied, second half new */
4146 // FIXME: shouldn't we use a copier which implements the
4147 // memory state machine?
sewardj23f12002009-07-24 08:45:08 +00004148 evh__copy_mem( payload, p_new, md->szB );
sewardjb4112022007-11-09 22:49:28 +00004149 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
sewardjf98e1c02008-10-25 16:22:41 +00004150 /*inited*/False );
sewardjb4112022007-11-09 22:49:28 +00004151 /* FIXME: can anything funny happen here? specifically, if the
4152 old range contained a lock, then die_mem_heap will complain.
4153 Is that the correct behaviour? Not sure. */
4154 evh__die_mem_heap( payload, md->szB );
4155
4156 /* Copy from old to new */
4157 for (i = 0; i < md->szB; i++)
4158 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
4159
4160 /* Because the metadata hash table is index by payload address,
4161 we have to get rid of the old hash table entry and make a new
4162 one. We can't just modify the existing metadata in place,
4163 because then it would (almost certainly) be in the wrong hash
4164 chain. */
4165 md_new = new_MallocMeta();
4166 *md_new = *md;
4167
4168 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
4169 tl_assert(md_tmp);
4170 tl_assert(md_tmp == md);
4171
4172 VG_(cli_free)((void*)md->payload);
4173 delete_MallocMeta(md);
4174
4175 /* Update fields */
4176 md_new->where = VG_(record_ExeContext)( tid, 0 );
4177 md_new->szB = new_size;
4178 md_new->payload = p_new;
4179 md_new->thr = map_threads_lookup( tid );
4180
4181 /* and add */
4182 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
4183
4184 return (void*)p_new;
4185 }
4186}
4187
njn8b140de2009-02-17 04:31:18 +00004188static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
4189{
4190 MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
4191
4192 // There may be slop, but pretend there isn't because only the asked-for
4193 // area will have been shadowed properly.
4194 return ( md ? md->szB : 0 );
4195}
4196
sewardjb4112022007-11-09 22:49:28 +00004197
sewardj095d61e2010-03-11 13:43:18 +00004198/* For error creation: map 'data_addr' to a malloc'd chunk, if any.
sewardjc8028ad2010-05-05 09:34:42 +00004199 Slow linear search. With a bit of hash table help if 'data_addr'
4200 is either the start of a block or up to 15 word-sized steps along
4201 from the start of a block. */
sewardj095d61e2010-03-11 13:43:18 +00004202
4203static inline Bool addr_is_in_MM_Chunk( MallocMeta* mm, Addr a )
4204{
sewardjc8028ad2010-05-05 09:34:42 +00004205 /* Accept 'a' as within 'mm' if 'mm's size is zero and 'a' points
4206 right at it. */
4207 if (UNLIKELY(mm->szB == 0 && a == mm->payload))
4208 return True;
4209 /* else normal interval rules apply */
4210 if (LIKELY(a < mm->payload)) return False;
4211 if (LIKELY(a >= mm->payload + mm->szB)) return False;
4212 return True;
sewardj095d61e2010-03-11 13:43:18 +00004213}
4214
sewardjc8028ad2010-05-05 09:34:42 +00004215Bool HG_(mm_find_containing_block)( /*OUT*/ExeContext** where,
sewardj095d61e2010-03-11 13:43:18 +00004216 /*OUT*/Addr* payload,
4217 /*OUT*/SizeT* szB,
4218 Addr data_addr )
4219{
4220 MallocMeta* mm;
sewardjc8028ad2010-05-05 09:34:42 +00004221 Int i;
4222 const Int n_fast_check_words = 16;
4223
4224 /* First, do a few fast searches on the basis that data_addr might
4225 be exactly the start of a block or up to 15 words inside. This
4226 can happen commonly via the creq
4227 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK. */
4228 for (i = 0; i < n_fast_check_words; i++) {
4229 mm = VG_(HT_lookup)( hg_mallocmeta_table,
4230 data_addr - (UWord)(UInt)i * sizeof(UWord) );
4231 if (UNLIKELY(mm && addr_is_in_MM_Chunk(mm, data_addr)))
4232 goto found;
4233 }
4234
sewardj095d61e2010-03-11 13:43:18 +00004235 /* Well, this totally sucks. But without using an interval tree or
sewardjc8028ad2010-05-05 09:34:42 +00004236 some such, it's hard to see how to do better. We have to check
4237 every block in the entire table. */
sewardj095d61e2010-03-11 13:43:18 +00004238 VG_(HT_ResetIter)(hg_mallocmeta_table);
4239 while ( (mm = VG_(HT_Next)(hg_mallocmeta_table)) ) {
sewardjc8028ad2010-05-05 09:34:42 +00004240 if (UNLIKELY(addr_is_in_MM_Chunk(mm, data_addr)))
4241 goto found;
sewardj095d61e2010-03-11 13:43:18 +00004242 }
sewardjc8028ad2010-05-05 09:34:42 +00004243
4244 /* Not found. Bah. */
4245 return False;
4246 /*NOTREACHED*/
4247
4248 found:
4249 tl_assert(mm);
4250 tl_assert(addr_is_in_MM_Chunk(mm, data_addr));
4251 if (where) *where = mm->where;
4252 if (payload) *payload = mm->payload;
4253 if (szB) *szB = mm->szB;
4254 return True;
sewardj095d61e2010-03-11 13:43:18 +00004255}
4256
4257
sewardjb4112022007-11-09 22:49:28 +00004258/*--------------------------------------------------------------*/
4259/*--- Instrumentation ---*/
4260/*--------------------------------------------------------------*/
4261
sewardjcafe5052013-01-17 14:24:35 +00004262#define unop(_op, _arg1) IRExpr_Unop((_op),(_arg1))
sewardjffce8152011-06-24 10:09:41 +00004263#define binop(_op, _arg1, _arg2) IRExpr_Binop((_op),(_arg1),(_arg2))
4264#define mkexpr(_tmp) IRExpr_RdTmp((_tmp))
4265#define mkU32(_n) IRExpr_Const(IRConst_U32(_n))
4266#define mkU64(_n) IRExpr_Const(IRConst_U64(_n))
4267#define assign(_t, _e) IRStmt_WrTmp((_t), (_e))
4268
sewardjcafe5052013-01-17 14:24:35 +00004269/* This takes and returns atoms, of course. Not full IRExprs. */
4270static IRExpr* mk_And1 ( IRSB* sbOut, IRExpr* arg1, IRExpr* arg2 )
4271{
4272 tl_assert(arg1 && arg2);
4273 tl_assert(isIRAtom(arg1));
4274 tl_assert(isIRAtom(arg2));
4275 /* Generate 32to1(And32(1Uto32(arg1), 1Uto32(arg2))). Appalling
4276 code, I know. */
4277 IRTemp wide1 = newIRTemp(sbOut->tyenv, Ity_I32);
4278 IRTemp wide2 = newIRTemp(sbOut->tyenv, Ity_I32);
4279 IRTemp anded = newIRTemp(sbOut->tyenv, Ity_I32);
4280 IRTemp res = newIRTemp(sbOut->tyenv, Ity_I1);
4281 addStmtToIRSB(sbOut, assign(wide1, unop(Iop_1Uto32, arg1)));
4282 addStmtToIRSB(sbOut, assign(wide2, unop(Iop_1Uto32, arg2)));
4283 addStmtToIRSB(sbOut, assign(anded, binop(Iop_And32, mkexpr(wide1),
4284 mkexpr(wide2))));
4285 addStmtToIRSB(sbOut, assign(res, unop(Iop_32to1, mkexpr(anded))));
4286 return mkexpr(res);
4287}
4288
sewardjffce8152011-06-24 10:09:41 +00004289static void instrument_mem_access ( IRSB* sbOut,
sewardjb4112022007-11-09 22:49:28 +00004290 IRExpr* addr,
4291 Int szB,
4292 Bool isStore,
sewardjffce8152011-06-24 10:09:41 +00004293 Int hWordTy_szB,
sewardjcafe5052013-01-17 14:24:35 +00004294 Int goff_sp,
4295 IRExpr* guard ) /* NULL => True */
sewardjb4112022007-11-09 22:49:28 +00004296{
4297 IRType tyAddr = Ity_INVALID;
florian6bf37262012-10-21 03:23:36 +00004298 const HChar* hName = NULL;
sewardjb4112022007-11-09 22:49:28 +00004299 void* hAddr = NULL;
4300 Int regparms = 0;
4301 IRExpr** argv = NULL;
4302 IRDirty* di = NULL;
4303
sewardjffce8152011-06-24 10:09:41 +00004304 // THRESH is the size of the window above SP (well,
4305 // mostly above) that we assume implies a stack reference.
4306 const Int THRESH = 4096 * 4; // somewhat arbitrary
4307 const Int rz_szB = VG_STACK_REDZONE_SZB;
4308
sewardjb4112022007-11-09 22:49:28 +00004309 tl_assert(isIRAtom(addr));
4310 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
4311
sewardjffce8152011-06-24 10:09:41 +00004312 tyAddr = typeOfIRExpr( sbOut->tyenv, addr );
sewardjb4112022007-11-09 22:49:28 +00004313 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
4314
4315 /* So the effective address is in 'addr' now. */
4316 regparms = 1; // unless stated otherwise
4317 if (isStore) {
4318 switch (szB) {
4319 case 1:
sewardj23f12002009-07-24 08:45:08 +00004320 hName = "evh__mem_help_cwrite_1";
4321 hAddr = &evh__mem_help_cwrite_1;
sewardjb4112022007-11-09 22:49:28 +00004322 argv = mkIRExprVec_1( addr );
4323 break;
4324 case 2:
sewardj23f12002009-07-24 08:45:08 +00004325 hName = "evh__mem_help_cwrite_2";
4326 hAddr = &evh__mem_help_cwrite_2;
sewardjb4112022007-11-09 22:49:28 +00004327 argv = mkIRExprVec_1( addr );
4328 break;
4329 case 4:
sewardj23f12002009-07-24 08:45:08 +00004330 hName = "evh__mem_help_cwrite_4";
4331 hAddr = &evh__mem_help_cwrite_4;
sewardjb4112022007-11-09 22:49:28 +00004332 argv = mkIRExprVec_1( addr );
4333 break;
4334 case 8:
sewardj23f12002009-07-24 08:45:08 +00004335 hName = "evh__mem_help_cwrite_8";
4336 hAddr = &evh__mem_help_cwrite_8;
sewardjb4112022007-11-09 22:49:28 +00004337 argv = mkIRExprVec_1( addr );
4338 break;
4339 default:
4340 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4341 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004342 hName = "evh__mem_help_cwrite_N";
4343 hAddr = &evh__mem_help_cwrite_N;
sewardjb4112022007-11-09 22:49:28 +00004344 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4345 break;
4346 }
4347 } else {
4348 switch (szB) {
4349 case 1:
sewardj23f12002009-07-24 08:45:08 +00004350 hName = "evh__mem_help_cread_1";
4351 hAddr = &evh__mem_help_cread_1;
sewardjb4112022007-11-09 22:49:28 +00004352 argv = mkIRExprVec_1( addr );
4353 break;
4354 case 2:
sewardj23f12002009-07-24 08:45:08 +00004355 hName = "evh__mem_help_cread_2";
4356 hAddr = &evh__mem_help_cread_2;
sewardjb4112022007-11-09 22:49:28 +00004357 argv = mkIRExprVec_1( addr );
4358 break;
4359 case 4:
sewardj23f12002009-07-24 08:45:08 +00004360 hName = "evh__mem_help_cread_4";
4361 hAddr = &evh__mem_help_cread_4;
sewardjb4112022007-11-09 22:49:28 +00004362 argv = mkIRExprVec_1( addr );
4363 break;
4364 case 8:
sewardj23f12002009-07-24 08:45:08 +00004365 hName = "evh__mem_help_cread_8";
4366 hAddr = &evh__mem_help_cread_8;
sewardjb4112022007-11-09 22:49:28 +00004367 argv = mkIRExprVec_1( addr );
4368 break;
4369 default:
4370 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4371 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004372 hName = "evh__mem_help_cread_N";
4373 hAddr = &evh__mem_help_cread_N;
sewardjb4112022007-11-09 22:49:28 +00004374 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4375 break;
4376 }
4377 }
4378
sewardjffce8152011-06-24 10:09:41 +00004379 /* Create the helper. */
sewardjb4112022007-11-09 22:49:28 +00004380 tl_assert(hName);
4381 tl_assert(hAddr);
4382 tl_assert(argv);
4383 di = unsafeIRDirty_0_N( regparms,
4384 hName, VG_(fnptr_to_fnentry)( hAddr ),
4385 argv );
sewardjffce8152011-06-24 10:09:41 +00004386
4387 if (! HG_(clo_check_stack_refs)) {
4388 /* We're ignoring memory references which are (obviously) to the
4389 stack. In fact just skip stack refs that are within 4 pages
4390 of SP (SP - the redzone, really), as that's simple, easy, and
4391 filters out most stack references. */
4392 /* Generate the guard condition: "(addr - (SP - RZ)) >u N", for
4393 some arbitrary N. If that is true then addr is outside the
4394 range (SP - RZ .. SP + N - RZ). If N is smallish (a few
4395 pages) then we can say addr is within a few pages of SP and
4396 so can't possibly be a heap access, and so can be skipped.
4397
4398 Note that the condition simplifies to
4399 (addr - SP + RZ) >u N
4400 which generates better code in x86/amd64 backends, but it does
4401 not unfortunately simplify to
4402 (addr - SP) >u (N - RZ)
4403 (would be beneficial because N - RZ is a constant) because
4404 wraparound arithmetic messes up the comparison. eg.
4405 20 >u 10 == True,
4406 but (20 - 15) >u (10 - 15) == 5 >u (MAXINT-5) == False.
4407 */
4408 IRTemp sp = newIRTemp(sbOut->tyenv, tyAddr);
4409 addStmtToIRSB( sbOut, assign(sp, IRExpr_Get(goff_sp, tyAddr)));
4410
4411 /* "addr - SP" */
4412 IRTemp addr_minus_sp = newIRTemp(sbOut->tyenv, tyAddr);
4413 addStmtToIRSB(
4414 sbOut,
4415 assign(addr_minus_sp,
4416 tyAddr == Ity_I32
4417 ? binop(Iop_Sub32, addr, mkexpr(sp))
4418 : binop(Iop_Sub64, addr, mkexpr(sp)))
4419 );
4420
4421 /* "addr - SP + RZ" */
4422 IRTemp diff = newIRTemp(sbOut->tyenv, tyAddr);
4423 addStmtToIRSB(
4424 sbOut,
4425 assign(diff,
4426 tyAddr == Ity_I32
4427 ? binop(Iop_Add32, mkexpr(addr_minus_sp), mkU32(rz_szB))
4428 : binop(Iop_Add64, mkexpr(addr_minus_sp), mkU64(rz_szB)))
4429 );
4430
sewardjcafe5052013-01-17 14:24:35 +00004431 /* guardA == "guard on the address" */
4432 IRTemp guardA = newIRTemp(sbOut->tyenv, Ity_I1);
sewardjffce8152011-06-24 10:09:41 +00004433 addStmtToIRSB(
4434 sbOut,
sewardjcafe5052013-01-17 14:24:35 +00004435 assign(guardA,
sewardjffce8152011-06-24 10:09:41 +00004436 tyAddr == Ity_I32
4437 ? binop(Iop_CmpLT32U, mkU32(THRESH), mkexpr(diff))
4438 : binop(Iop_CmpLT64U, mkU64(THRESH), mkexpr(diff)))
4439 );
sewardjcafe5052013-01-17 14:24:35 +00004440 di->guard = mkexpr(guardA);
4441 }
4442
4443 /* If there's a guard on the access itself (as supplied by the
4444 caller of this routine), we need to AND that in to any guard we
4445 might already have. */
4446 if (guard) {
4447 di->guard = mk_And1(sbOut, di->guard, guard);
sewardjffce8152011-06-24 10:09:41 +00004448 }
4449
4450 /* Add the helper. */
4451 addStmtToIRSB( sbOut, IRStmt_Dirty(di) );
sewardjb4112022007-11-09 22:49:28 +00004452}
4453
4454
sewardja0eee322009-07-31 08:46:35 +00004455/* Figure out if GA is a guest code address in the dynamic linker, and
4456 if so return True. Otherwise (and in case of any doubt) return
4457 False. (sidedly safe w/ False as the safe value) */
4458static Bool is_in_dynamic_linker_shared_object( Addr64 ga )
4459{
4460 DebugInfo* dinfo;
florian19f91bb2012-11-10 22:29:54 +00004461 const HChar* soname;
sewardja0eee322009-07-31 08:46:35 +00004462 if (0) return False;
4463
sewardje3f1e592009-07-31 09:41:29 +00004464 dinfo = VG_(find_DebugInfo)( (Addr)ga );
sewardja0eee322009-07-31 08:46:35 +00004465 if (!dinfo) return False;
4466
sewardje3f1e592009-07-31 09:41:29 +00004467 soname = VG_(DebugInfo_get_soname)(dinfo);
sewardja0eee322009-07-31 08:46:35 +00004468 tl_assert(soname);
4469 if (0) VG_(printf)("%s\n", soname);
4470
4471# if defined(VGO_linux)
sewardj651cfa42010-01-11 13:02:19 +00004472 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_3)) return True;
sewardja0eee322009-07-31 08:46:35 +00004473 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_2)) return True;
4474 if (VG_STREQ(soname, VG_U_LD_LINUX_X86_64_SO_2)) return True;
4475 if (VG_STREQ(soname, VG_U_LD64_SO_1)) return True;
4476 if (VG_STREQ(soname, VG_U_LD_SO_1)) return True;
4477# elif defined(VGO_darwin)
4478 if (VG_STREQ(soname, VG_U_DYLD)) return True;
4479# else
4480# error "Unsupported OS"
4481# endif
4482 return False;
4483}
4484
sewardjb4112022007-11-09 22:49:28 +00004485static
4486IRSB* hg_instrument ( VgCallbackClosure* closure,
4487 IRSB* bbIn,
4488 VexGuestLayout* layout,
4489 VexGuestExtents* vge,
florianca503be2012-10-07 21:59:42 +00004490 VexArchInfo* archinfo_host,
sewardjb4112022007-11-09 22:49:28 +00004491 IRType gWordTy, IRType hWordTy )
4492{
sewardj1c0ce7a2009-07-01 08:10:49 +00004493 Int i;
4494 IRSB* bbOut;
4495 Addr64 cia; /* address of current insn */
4496 IRStmt* st;
sewardja0eee322009-07-31 08:46:35 +00004497 Bool inLDSO = False;
4498 Addr64 inLDSOmask4K = 1; /* mismatches on first check */
sewardjb4112022007-11-09 22:49:28 +00004499
sewardjffce8152011-06-24 10:09:41 +00004500 const Int goff_sp = layout->offset_SP;
4501
sewardjb4112022007-11-09 22:49:28 +00004502 if (gWordTy != hWordTy) {
4503 /* We don't currently support this case. */
4504 VG_(tool_panic)("host/guest word size mismatch");
4505 }
4506
sewardja0eee322009-07-31 08:46:35 +00004507 if (VKI_PAGE_SIZE < 4096 || VG_(log2)(VKI_PAGE_SIZE) == -1) {
4508 VG_(tool_panic)("implausible or too-small VKI_PAGE_SIZE");
4509 }
4510
sewardjb4112022007-11-09 22:49:28 +00004511 /* Set up BB */
4512 bbOut = emptyIRSB();
4513 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
4514 bbOut->next = deepCopyIRExpr(bbIn->next);
4515 bbOut->jumpkind = bbIn->jumpkind;
sewardj291849f2012-04-20 23:58:55 +00004516 bbOut->offsIP = bbIn->offsIP;
sewardjb4112022007-11-09 22:49:28 +00004517
4518 // Copy verbatim any IR preamble preceding the first IMark
4519 i = 0;
4520 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
4521 addStmtToIRSB( bbOut, bbIn->stmts[i] );
4522 i++;
4523 }
4524
sewardj1c0ce7a2009-07-01 08:10:49 +00004525 // Get the first statement, and initial cia from it
4526 tl_assert(bbIn->stmts_used > 0);
4527 tl_assert(i < bbIn->stmts_used);
4528 st = bbIn->stmts[i];
4529 tl_assert(Ist_IMark == st->tag);
4530 cia = st->Ist.IMark.addr;
4531 st = NULL;
4532
sewardjb4112022007-11-09 22:49:28 +00004533 for (/*use current i*/; i < bbIn->stmts_used; i++) {
sewardj1c0ce7a2009-07-01 08:10:49 +00004534 st = bbIn->stmts[i];
sewardjb4112022007-11-09 22:49:28 +00004535 tl_assert(st);
4536 tl_assert(isFlatIRStmt(st));
4537 switch (st->tag) {
4538 case Ist_NoOp:
4539 case Ist_AbiHint:
4540 case Ist_Put:
4541 case Ist_PutI:
sewardjb4112022007-11-09 22:49:28 +00004542 case Ist_Exit:
4543 /* None of these can contain any memory references. */
4544 break;
4545
sewardj1c0ce7a2009-07-01 08:10:49 +00004546 case Ist_IMark:
4547 /* no mem refs, but note the insn address. */
4548 cia = st->Ist.IMark.addr;
sewardja0eee322009-07-31 08:46:35 +00004549 /* Don't instrument the dynamic linker. It generates a
4550 lot of races which we just expensively suppress, so
4551 it's pointless.
4552
4553 Avoid flooding is_in_dynamic_linker_shared_object with
4554 requests by only checking at transitions between 4K
4555 pages. */
4556 if ((cia & ~(Addr64)0xFFF) != inLDSOmask4K) {
4557 if (0) VG_(printf)("NEW %#lx\n", (Addr)cia);
4558 inLDSOmask4K = cia & ~(Addr64)0xFFF;
4559 inLDSO = is_in_dynamic_linker_shared_object(cia);
4560 } else {
4561 if (0) VG_(printf)("old %#lx\n", (Addr)cia);
4562 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004563 break;
4564
sewardjb4112022007-11-09 22:49:28 +00004565 case Ist_MBE:
sewardjf98e1c02008-10-25 16:22:41 +00004566 switch (st->Ist.MBE.event) {
4567 case Imbe_Fence:
4568 break; /* not interesting */
sewardjf98e1c02008-10-25 16:22:41 +00004569 default:
4570 goto unhandled;
4571 }
sewardjb4112022007-11-09 22:49:28 +00004572 break;
4573
sewardj1c0ce7a2009-07-01 08:10:49 +00004574 case Ist_CAS: {
4575 /* Atomic read-modify-write cycle. Just pretend it's a
4576 read. */
4577 IRCAS* cas = st->Ist.CAS.details;
sewardj23f12002009-07-24 08:45:08 +00004578 Bool isDCAS = cas->oldHi != IRTemp_INVALID;
4579 if (isDCAS) {
4580 tl_assert(cas->expdHi);
4581 tl_assert(cas->dataHi);
4582 } else {
4583 tl_assert(!cas->expdHi);
4584 tl_assert(!cas->dataHi);
4585 }
4586 /* Just be boring about it. */
sewardja0eee322009-07-31 08:46:35 +00004587 if (!inLDSO) {
4588 instrument_mem_access(
4589 bbOut,
4590 cas->addr,
4591 (isDCAS ? 2 : 1)
4592 * sizeofIRType(typeOfIRExpr(bbIn->tyenv, cas->dataLo)),
4593 False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004594 sizeofIRType(hWordTy), goff_sp,
4595 NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004596 );
4597 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004598 break;
4599 }
4600
sewardjdb5907d2009-11-26 17:20:21 +00004601 case Ist_LLSC: {
4602 /* We pretend store-conditionals don't exist, viz, ignore
4603 them. Whereas load-linked's are treated the same as
4604 normal loads. */
4605 IRType dataTy;
4606 if (st->Ist.LLSC.storedata == NULL) {
4607 /* LL */
4608 dataTy = typeOfIRTemp(bbIn->tyenv, st->Ist.LLSC.result);
sewardja0eee322009-07-31 08:46:35 +00004609 if (!inLDSO) {
sewardjdb5907d2009-11-26 17:20:21 +00004610 instrument_mem_access(
4611 bbOut,
4612 st->Ist.LLSC.addr,
4613 sizeofIRType(dataTy),
4614 False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004615 sizeofIRType(hWordTy), goff_sp,
4616 NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004617 );
4618 }
sewardjdb5907d2009-11-26 17:20:21 +00004619 } else {
4620 /* SC */
4621 /*ignore */
4622 }
4623 break;
4624 }
4625
4626 case Ist_Store:
sewardjdb5907d2009-11-26 17:20:21 +00004627 if (!inLDSO) {
4628 instrument_mem_access(
4629 bbOut,
4630 st->Ist.Store.addr,
4631 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
4632 True/*isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004633 sizeofIRType(hWordTy), goff_sp,
4634 NULL/*no-guard*/
sewardjdb5907d2009-11-26 17:20:21 +00004635 );
sewardj1c0ce7a2009-07-01 08:10:49 +00004636 }
njnb83caf22009-05-25 01:47:56 +00004637 break;
sewardjb4112022007-11-09 22:49:28 +00004638
sewardjcafe5052013-01-17 14:24:35 +00004639 case Ist_StoreG: {
4640 IRStoreG* sg = st->Ist.StoreG.details;
4641 IRExpr* data = sg->data;
4642 IRExpr* addr = sg->addr;
4643 IRType type = typeOfIRExpr(bbIn->tyenv, data);
4644 tl_assert(type != Ity_INVALID);
4645 instrument_mem_access( bbOut, addr, sizeofIRType(type),
4646 True/*isStore*/,
4647 sizeofIRType(hWordTy),
4648 goff_sp, sg->guard );
4649 break;
4650 }
4651
4652 case Ist_LoadG: {
4653 IRLoadG* lg = st->Ist.LoadG.details;
4654 IRType type = Ity_INVALID; /* loaded type */
4655 IRType typeWide = Ity_INVALID; /* after implicit widening */
4656 IRExpr* addr = lg->addr;
4657 typeOfIRLoadGOp(lg->cvt, &typeWide, &type);
4658 tl_assert(type != Ity_INVALID);
4659 instrument_mem_access( bbOut, addr, sizeofIRType(type),
4660 False/*!isStore*/,
4661 sizeofIRType(hWordTy),
4662 goff_sp, lg->guard );
4663 break;
4664 }
4665
sewardjb4112022007-11-09 22:49:28 +00004666 case Ist_WrTmp: {
4667 IRExpr* data = st->Ist.WrTmp.data;
4668 if (data->tag == Iex_Load) {
sewardja0eee322009-07-31 08:46:35 +00004669 if (!inLDSO) {
4670 instrument_mem_access(
4671 bbOut,
4672 data->Iex.Load.addr,
4673 sizeofIRType(data->Iex.Load.ty),
4674 False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004675 sizeofIRType(hWordTy), goff_sp,
4676 NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004677 );
4678 }
sewardjb4112022007-11-09 22:49:28 +00004679 }
4680 break;
4681 }
4682
4683 case Ist_Dirty: {
4684 Int dataSize;
4685 IRDirty* d = st->Ist.Dirty.details;
4686 if (d->mFx != Ifx_None) {
4687 /* This dirty helper accesses memory. Collect the
4688 details. */
4689 tl_assert(d->mAddr != NULL);
4690 tl_assert(d->mSize != 0);
4691 dataSize = d->mSize;
4692 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004693 if (!inLDSO) {
4694 instrument_mem_access(
4695 bbOut, d->mAddr, dataSize, False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004696 sizeofIRType(hWordTy), goff_sp, NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004697 );
4698 }
sewardjb4112022007-11-09 22:49:28 +00004699 }
4700 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004701 if (!inLDSO) {
4702 instrument_mem_access(
4703 bbOut, d->mAddr, dataSize, True/*isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004704 sizeofIRType(hWordTy), goff_sp, NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004705 );
4706 }
sewardjb4112022007-11-09 22:49:28 +00004707 }
4708 } else {
4709 tl_assert(d->mAddr == NULL);
4710 tl_assert(d->mSize == 0);
4711 }
4712 break;
4713 }
4714
4715 default:
sewardjf98e1c02008-10-25 16:22:41 +00004716 unhandled:
4717 ppIRStmt(st);
sewardjb4112022007-11-09 22:49:28 +00004718 tl_assert(0);
4719
4720 } /* switch (st->tag) */
4721
4722 addStmtToIRSB( bbOut, st );
4723 } /* iterate over bbIn->stmts */
4724
4725 return bbOut;
4726}
4727
sewardjffce8152011-06-24 10:09:41 +00004728#undef binop
4729#undef mkexpr
4730#undef mkU32
4731#undef mkU64
4732#undef assign
4733
sewardjb4112022007-11-09 22:49:28 +00004734
4735/*----------------------------------------------------------------*/
4736/*--- Client requests ---*/
4737/*----------------------------------------------------------------*/
4738
4739/* Sheesh. Yet another goddam finite map. */
4740static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
4741
4742static void map_pthread_t_to_Thread_INIT ( void ) {
4743 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
sewardjf98e1c02008-10-25 16:22:41 +00004744 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
4745 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00004746 tl_assert(map_pthread_t_to_Thread != NULL);
4747 }
4748}
4749
philipped40aff52014-06-16 20:00:14 +00004750/* A list of Ada dependent tasks and their masters. Used for implementing
4751 the Ada task termination semantic as implemented by the
4752 gcc gnat Ada runtime. */
4753typedef
4754 struct {
4755 void* dependent; // Ada Task Control Block of the Dependent
4756 void* master; // ATCB of the master
4757 Word master_level; // level of dependency between master and dependent
4758 Thread* hg_dependent; // helgrind Thread* for dependent task.
4759 }
4760 GNAT_dmml;
4761static XArray* gnat_dmmls; /* of GNAT_dmml */
4762static void gnat_dmmls_INIT (void)
4763{
4764 if (UNLIKELY(gnat_dmmls == NULL)) {
4765 gnat_dmmls = VG_(newXA) (HG_(zalloc), "hg.gnat_md.1",
4766 HG_(free),
4767 sizeof(GNAT_dmml) );
4768 }
4769}
philippef5774342014-05-03 11:12:50 +00004770static void print_monitor_help ( void )
4771{
4772 VG_(gdb_printf)
4773 (
4774"\n"
4775"helgrind monitor commands:\n"
philippef5774342014-05-03 11:12:50 +00004776" info locks : show list of locks and their status\n"
4777"\n");
4778}
4779
4780/* return True if request recognised, False otherwise */
4781static Bool handle_gdb_monitor_command (ThreadId tid, HChar *req)
4782{
philippef5774342014-05-03 11:12:50 +00004783 HChar* wcmd;
4784 HChar s[VG_(strlen(req))]; /* copy for strtok_r */
4785 HChar *ssaveptr;
4786 Int kwdid;
4787
4788 VG_(strcpy) (s, req);
4789
4790 wcmd = VG_(strtok_r) (s, " ", &ssaveptr);
4791 /* NB: if possible, avoid introducing a new command below which
4792 starts with the same first letter(s) as an already existing
4793 command. This ensures a shorter abbreviation for the user. */
4794 switch (VG_(keyword_id)
philippe07c08522014-05-14 20:39:27 +00004795 ("help info",
philippef5774342014-05-03 11:12:50 +00004796 wcmd, kwd_report_duplicated_matches)) {
4797 case -2: /* multiple matches */
4798 return True;
4799 case -1: /* not found */
4800 return False;
4801 case 0: /* help */
4802 print_monitor_help();
4803 return True;
4804 case 1: /* info */
philippef5774342014-05-03 11:12:50 +00004805 wcmd = VG_(strtok_r) (NULL, " ", &ssaveptr);
4806 switch (kwdid = VG_(keyword_id)
4807 ("locks",
4808 wcmd, kwd_report_all)) {
4809 case -2:
4810 case -1:
4811 break;
4812 case 0: // locks
4813 {
4814 Int i;
4815 Lock* lk;
4816 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next) {
4817 pp_Lock(0, lk,
4818 True /* show_lock_addrdescr */,
4819 False /* show_internal_data */);
4820 }
4821 if (i == 0)
4822 VG_(gdb_printf) ("no locks\n");
4823 }
4824 break;
4825 default:
4826 tl_assert(0);
4827 }
4828 return True;
philippef5774342014-05-03 11:12:50 +00004829 default:
4830 tl_assert(0);
4831 return False;
4832 }
4833}
sewardjb4112022007-11-09 22:49:28 +00004834
4835static
4836Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
4837{
philippef5774342014-05-03 11:12:50 +00004838 if (!VG_IS_TOOL_USERREQ('H','G',args[0])
4839 && VG_USERREQ__GDB_MONITOR_COMMAND != args[0])
sewardjb4112022007-11-09 22:49:28 +00004840 return False;
4841
4842 /* Anything that gets past the above check is one of ours, so we
4843 should be able to handle it. */
4844
4845 /* default, meaningless return value, unless otherwise set */
4846 *ret = 0;
4847
4848 switch (args[0]) {
4849
4850 /* --- --- User-visible client requests --- --- */
4851
4852 case VG_USERREQ__HG_CLEAN_MEMORY:
barta0b6b2c2008-07-07 06:49:24 +00004853 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%ld)\n",
sewardjb4112022007-11-09 22:49:28 +00004854 args[1], args[2]);
4855 /* Call die_mem to (expensively) tidy up properly, if there
sewardjf98e1c02008-10-25 16:22:41 +00004856 are any held locks etc in the area. Calling evh__die_mem
4857 and then evh__new_mem is a bit inefficient; probably just
4858 the latter would do. */
sewardjb4112022007-11-09 22:49:28 +00004859 if (args[2] > 0) { /* length */
4860 evh__die_mem(args[1], args[2]);
4861 /* and then set it to New */
4862 evh__new_mem(args[1], args[2]);
4863 }
4864 break;
4865
sewardjc8028ad2010-05-05 09:34:42 +00004866 case _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK: {
4867 Addr payload = 0;
4868 SizeT pszB = 0;
4869 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK(%#lx)\n",
4870 args[1]);
4871 if (HG_(mm_find_containing_block)(NULL, &payload, &pszB, args[1])) {
4872 if (pszB > 0) {
4873 evh__die_mem(payload, pszB);
4874 evh__new_mem(payload, pszB);
4875 }
4876 *ret = pszB;
4877 } else {
4878 *ret = (UWord)-1;
4879 }
4880 break;
4881 }
4882
sewardj406bac82010-03-03 23:03:40 +00004883 case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED:
4884 if (0) VG_(printf)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%ld)\n",
4885 args[1], args[2]);
4886 if (args[2] > 0) { /* length */
4887 evh__untrack_mem(args[1], args[2]);
4888 }
4889 break;
4890
4891 case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED:
4892 if (0) VG_(printf)("HG_ARANGE_MAKE_TRACKED(%#lx,%ld)\n",
4893 args[1], args[2]);
4894 if (args[2] > 0) { /* length */
4895 evh__new_mem(args[1], args[2]);
4896 }
4897 break;
4898
sewardjb4112022007-11-09 22:49:28 +00004899 /* --- --- Client requests for Helgrind's use only --- --- */
4900
4901 /* Some thread is telling us its pthread_t value. Record the
4902 binding between that and the associated Thread*, so we can
4903 later find the Thread* again when notified of a join by the
4904 thread. */
4905 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
4906 Thread* my_thr = NULL;
4907 if (0)
4908 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
4909 (void*)args[1]);
4910 map_pthread_t_to_Thread_INIT();
4911 my_thr = map_threads_maybe_lookup( tid );
4912 /* This assertion should hold because the map_threads (tid to
4913 Thread*) binding should have been made at the point of
4914 low-level creation of this thread, which should have
4915 happened prior to us getting this client request for it.
4916 That's because this client request is sent from
4917 client-world from the 'thread_wrapper' function, which
4918 only runs once the thread has been low-level created. */
4919 tl_assert(my_thr != NULL);
4920 /* So now we know that (pthread_t)args[1] is associated with
4921 (Thread*)my_thr. Note that down. */
4922 if (0)
4923 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
4924 (void*)args[1], (void*)my_thr );
florian6bf37262012-10-21 03:23:36 +00004925 VG_(addToFM)( map_pthread_t_to_Thread, (UWord)args[1], (UWord)my_thr );
sewardjb4112022007-11-09 22:49:28 +00004926 break;
4927 }
4928
4929 case _VG_USERREQ__HG_PTH_API_ERROR: {
4930 Thread* my_thr = NULL;
4931 map_pthread_t_to_Thread_INIT();
4932 my_thr = map_threads_maybe_lookup( tid );
4933 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
sewardjf98e1c02008-10-25 16:22:41 +00004934 HG_(record_error_PthAPIerror)(
florian6bf37262012-10-21 03:23:36 +00004935 my_thr, (HChar*)args[1], (UWord)args[2], (HChar*)args[3] );
sewardjb4112022007-11-09 22:49:28 +00004936 break;
4937 }
4938
4939 /* This thread (tid) has completed a join with the quitting
4940 thread whose pthread_t is in args[1]. */
4941 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
4942 Thread* thr_q = NULL; /* quitter Thread* */
4943 Bool found = False;
4944 if (0)
4945 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
4946 (void*)args[1]);
4947 map_pthread_t_to_Thread_INIT();
sewardj896f6f92008-08-19 08:38:52 +00004948 found = VG_(lookupFM)( map_pthread_t_to_Thread,
florian6bf37262012-10-21 03:23:36 +00004949 NULL, (UWord*)&thr_q, (UWord)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004950 /* Can this fail? It would mean that our pthread_join
4951 wrapper observed a successful join on args[1] yet that
4952 thread never existed (or at least, it never lodged an
4953 entry in the mapping (via SET_MY_PTHREAD_T)). Which
4954 sounds like a bug in the threads library. */
4955 // FIXME: get rid of this assertion; handle properly
4956 tl_assert(found);
4957 if (found) {
4958 if (0)
4959 VG_(printf)(".................... quitter Thread* = %p\n",
4960 thr_q);
4961 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
4962 }
4963 break;
4964 }
4965
philipped40aff52014-06-16 20:00:14 +00004966 /* This thread (tid) is informing us of its master. */
4967 case _VG_USERREQ__HG_GNAT_MASTER_HOOK: {
4968 GNAT_dmml dmml;
4969 dmml.dependent = (void*)args[1];
4970 dmml.master = (void*)args[2];
4971 dmml.master_level = (Word)args[3];
4972 dmml.hg_dependent = map_threads_maybe_lookup( tid );
4973 tl_assert(dmml.hg_dependent);
4974
4975 if (0)
4976 VG_(printf)("HG_GNAT_MASTER_HOOK (tid %d): "
4977 "dependent = %p master = %p master_level = %ld"
4978 " dependent Thread* = %p\n",
4979 (Int)tid, dmml.dependent, dmml.master, dmml.master_level,
4980 dmml.hg_dependent);
4981 gnat_dmmls_INIT();
4982 VG_(addToXA) (gnat_dmmls, &dmml);
4983 break;
4984 }
4985
4986 /* This thread (tid) is informing us that it has completed a
4987 master. */
4988 case _VG_USERREQ__HG_GNAT_MASTER_COMPLETED_HOOK: {
4989 Word n;
4990 const Thread *stayer = map_threads_maybe_lookup( tid );
4991 const void *master = (void*)args[1];
4992 const Word master_level = (Word) args[2];
4993 tl_assert(stayer);
4994
4995 if (0)
4996 VG_(printf)("HG_GNAT_MASTER_COMPLETED_HOOK (tid %d): "
4997 "self_id = %p master_level = %ld Thread* = %p\n",
4998 (Int)tid, master, master_level, stayer);
4999
5000 gnat_dmmls_INIT();
5001 /* Reverse loop on the array, simulating a pthread_join for
5002 the Dependent tasks of the completed master, and removing
5003 them from the array. */
5004 for (n = VG_(sizeXA) (gnat_dmmls) - 1; n >= 0; n--) {
5005 GNAT_dmml *dmml = (GNAT_dmml*) VG_(indexXA)(gnat_dmmls, n);
5006 if (dmml->master == master
5007 && dmml->master_level == master_level) {
5008 if (0)
5009 VG_(printf)("quitter %p dependency to stayer %p\n",
5010 dmml->hg_dependent->hbthr, stayer->hbthr);
5011 tl_assert(dmml->hg_dependent->hbthr != stayer->hbthr);
5012 generate_quitter_stayer_dependence (dmml->hg_dependent->hbthr,
5013 stayer->hbthr);
5014 VG_(removeIndexXA) (gnat_dmmls, n);
5015 }
5016 }
5017 break;
5018 }
5019
sewardjb4112022007-11-09 22:49:28 +00005020 /* EXPOSITION only: by intercepting lock init events we can show
5021 the user where the lock was initialised, rather than only
5022 being able to show where it was first locked. Intercepting
5023 lock initialisations is not necessary for the basic operation
5024 of the race checker. */
5025 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
5026 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
5027 break;
5028
sewardjc02f6c42013-10-14 13:51:25 +00005029 /* mutex=arg[1], mutex_is_init=arg[2] */
sewardjb4112022007-11-09 22:49:28 +00005030 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
sewardjc02f6c42013-10-14 13:51:25 +00005031 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1], args[2] != 0 );
sewardjb4112022007-11-09 22:49:28 +00005032 break;
5033
5034 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
5035 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
5036 break;
5037
5038 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
5039 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
5040 break;
5041
5042 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*, Word
5043 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
5044 break;
5045
5046 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*
5047 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
5048 break;
5049
5050 /* This thread is about to do pthread_cond_signal on the
5051 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
5052 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
5053 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
5054 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
5055 break;
5056
5057 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
5058 Returns a flag indicating whether or not the mutex is believed to be
5059 valid for this operation. */
5060 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
5061 Bool mutex_is_valid
5062 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
5063 (void*)args[2] );
5064 *ret = mutex_is_valid ? 1 : 0;
5065 break;
5066 }
5067
philippe19dfe032013-03-24 20:10:23 +00005068 /* Thread successfully completed pthread_cond_init:
5069 cond=arg[1], cond_attr=arg[2] */
5070 case _VG_USERREQ__HG_PTHREAD_COND_INIT_POST:
5071 evh__HG_PTHREAD_COND_INIT_POST( tid,
5072 (void*)args[1], (void*)args[2] );
5073 break;
5074
sewardjc02f6c42013-10-14 13:51:25 +00005075 /* cond=arg[1], cond_is_init=arg[2] */
sewardjf98e1c02008-10-25 16:22:41 +00005076 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
sewardjc02f6c42013-10-14 13:51:25 +00005077 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1], args[2] != 0 );
sewardjf98e1c02008-10-25 16:22:41 +00005078 break;
5079
sewardjb4112022007-11-09 22:49:28 +00005080 /* Thread successfully completed pthread_cond_wait, cond=arg[1],
5081 mutex=arg[2] */
5082 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
5083 evh__HG_PTHREAD_COND_WAIT_POST( tid,
sewardjff427c92013-10-14 12:13:52 +00005084 (void*)args[1], (void*)args[2],
5085 (Bool)args[3] );
sewardjb4112022007-11-09 22:49:28 +00005086 break;
5087
5088 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
5089 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
5090 break;
5091
5092 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
5093 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
5094 break;
5095
sewardj789c3c52008-02-25 12:10:07 +00005096 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
sewardjb4112022007-11-09 22:49:28 +00005097 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
sewardj789c3c52008-02-25 12:10:07 +00005098 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
5099 args[2], args[3] );
sewardjb4112022007-11-09 22:49:28 +00005100 break;
5101
5102 /* rwlock=arg[1], isW=arg[2] */
5103 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
5104 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
5105 break;
5106
5107 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
5108 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
5109 break;
5110
5111 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
5112 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
5113 break;
5114
sewardj11e352f2007-11-30 11:11:02 +00005115 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
5116 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
sewardjb4112022007-11-09 22:49:28 +00005117 break;
5118
sewardj11e352f2007-11-30 11:11:02 +00005119 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
5120 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00005121 break;
5122
sewardj11e352f2007-11-30 11:11:02 +00005123 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
5124 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
5125 break;
5126
5127 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t* */
5128 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00005129 break;
5130
sewardj9f569b72008-11-13 13:33:09 +00005131 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
sewardj406bac82010-03-03 23:03:40 +00005132 /* pth_bar_t*, ulong count, ulong resizable */
5133 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1],
5134 args[2], args[3] );
5135 break;
5136
5137 case _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE:
5138 /* pth_bar_t*, ulong newcount */
5139 evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( tid, (void*)args[1],
5140 args[2] );
sewardj9f569b72008-11-13 13:33:09 +00005141 break;
5142
5143 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
5144 /* pth_bar_t* */
5145 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
5146 break;
5147
5148 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
5149 /* pth_bar_t* */
5150 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
5151 break;
sewardjb4112022007-11-09 22:49:28 +00005152
sewardj5a644da2009-08-11 10:35:58 +00005153 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE:
5154 /* pth_spinlock_t* */
5155 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid, (void*)args[1] );
5156 break;
5157
5158 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST:
5159 /* pth_spinlock_t* */
5160 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid, (void*)args[1] );
5161 break;
5162
5163 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE:
5164 /* pth_spinlock_t*, Word */
5165 evh__HG_PTHREAD_SPIN_LOCK_PRE( tid, (void*)args[1], args[2] );
5166 break;
5167
5168 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST:
5169 /* pth_spinlock_t* */
5170 evh__HG_PTHREAD_SPIN_LOCK_POST( tid, (void*)args[1] );
5171 break;
5172
5173 case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE:
5174 /* pth_spinlock_t* */
5175 evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid, (void*)args[1] );
5176 break;
5177
sewardjed2e72e2009-08-14 11:08:24 +00005178 case _VG_USERREQ__HG_CLIENTREQ_UNIMP: {
florian19f91bb2012-11-10 22:29:54 +00005179 /* HChar* who */
sewardjed2e72e2009-08-14 11:08:24 +00005180 HChar* who = (HChar*)args[1];
5181 HChar buf[50 + 50];
5182 Thread* thr = map_threads_maybe_lookup( tid );
5183 tl_assert( thr ); /* I must be mapped */
5184 tl_assert( who );
5185 tl_assert( VG_(strlen)(who) <= 50 );
5186 VG_(sprintf)(buf, "Unimplemented client request macro \"%s\"", who );
5187 /* record_error_Misc strdup's buf, so this is safe: */
5188 HG_(record_error_Misc)( thr, buf );
5189 break;
5190 }
5191
5192 case _VG_USERREQ__HG_USERSO_SEND_PRE:
5193 /* UWord arbitrary-SO-tag */
5194 evh__HG_USERSO_SEND_PRE( tid, args[1] );
5195 break;
5196
5197 case _VG_USERREQ__HG_USERSO_RECV_POST:
5198 /* UWord arbitrary-SO-tag */
5199 evh__HG_USERSO_RECV_POST( tid, args[1] );
5200 break;
5201
sewardj6015d0e2011-03-11 19:10:48 +00005202 case _VG_USERREQ__HG_USERSO_FORGET_ALL:
5203 /* UWord arbitrary-SO-tag */
5204 evh__HG_USERSO_FORGET_ALL( tid, args[1] );
5205 break;
5206
philippef5774342014-05-03 11:12:50 +00005207 case VG_USERREQ__GDB_MONITOR_COMMAND: {
5208 Bool handled = handle_gdb_monitor_command (tid, (HChar*)args[1]);
5209 if (handled)
5210 *ret = 1;
5211 else
5212 *ret = 0;
5213 return handled;
5214 }
5215
sewardjb4112022007-11-09 22:49:28 +00005216 default:
5217 /* Unhandled Helgrind client request! */
sewardjf98e1c02008-10-25 16:22:41 +00005218 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
5219 args[0]);
sewardjb4112022007-11-09 22:49:28 +00005220 }
5221
5222 return True;
5223}
5224
5225
5226/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +00005227/*--- Setup ---*/
5228/*----------------------------------------------------------------*/
5229
florian19f91bb2012-11-10 22:29:54 +00005230static Bool hg_process_cmd_line_option ( const HChar* arg )
sewardjb4112022007-11-09 22:49:28 +00005231{
florian19f91bb2012-11-10 22:29:54 +00005232 const HChar* tmp_str;
sewardjb4112022007-11-09 22:49:28 +00005233
njn83df0b62009-02-25 01:01:05 +00005234 if VG_BOOL_CLO(arg, "--track-lockorders",
5235 HG_(clo_track_lockorders)) {}
5236 else if VG_BOOL_CLO(arg, "--cmp-race-err-addrs",
5237 HG_(clo_cmp_race_err_addrs)) {}
sewardj23f12002009-07-24 08:45:08 +00005238
5239 else if VG_XACT_CLO(arg, "--history-level=none",
5240 HG_(clo_history_level), 0);
sewardjf3861392009-08-02 10:16:03 +00005241 else if VG_XACT_CLO(arg, "--history-level=approx",
sewardj23f12002009-07-24 08:45:08 +00005242 HG_(clo_history_level), 1);
5243 else if VG_XACT_CLO(arg, "--history-level=full",
5244 HG_(clo_history_level), 2);
sewardj849b0ed2008-12-21 10:43:10 +00005245
sewardjf585e482009-08-16 22:52:29 +00005246 /* If you change the 10k/30mill limits, remember to also change
sewardj849b0ed2008-12-21 10:43:10 +00005247 them in assertions at the top of event_map_maybe_GC. */
njn83df0b62009-02-25 01:01:05 +00005248 else if VG_BINT_CLO(arg, "--conflict-cache-size",
sewardjf585e482009-08-16 22:52:29 +00005249 HG_(clo_conflict_cache_size), 10*1000, 30*1000*1000) {}
sewardjb4112022007-11-09 22:49:28 +00005250
sewardj11e352f2007-11-30 11:11:02 +00005251 /* "stuvwx" --> stuvwx (binary) */
njn83df0b62009-02-25 01:01:05 +00005252 else if VG_STR_CLO(arg, "--hg-sanity-flags", tmp_str) {
sewardjb4112022007-11-09 22:49:28 +00005253 Int j;
sewardjb4112022007-11-09 22:49:28 +00005254
njn83df0b62009-02-25 01:01:05 +00005255 if (6 != VG_(strlen)(tmp_str)) {
sewardjb4112022007-11-09 22:49:28 +00005256 VG_(message)(Vg_UserMsg,
sewardj24118492009-07-15 14:50:02 +00005257 "--hg-sanity-flags argument must have 6 digits\n");
sewardjb4112022007-11-09 22:49:28 +00005258 return False;
5259 }
sewardj11e352f2007-11-30 11:11:02 +00005260 for (j = 0; j < 6; j++) {
njn83df0b62009-02-25 01:01:05 +00005261 if ('0' == tmp_str[j]) { /* do nothing */ }
5262 else if ('1' == tmp_str[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
sewardjb4112022007-11-09 22:49:28 +00005263 else {
sewardj11e352f2007-11-30 11:11:02 +00005264 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
sewardj24118492009-07-15 14:50:02 +00005265 "only contain 0s and 1s\n");
sewardjb4112022007-11-09 22:49:28 +00005266 return False;
5267 }
5268 }
sewardjf98e1c02008-10-25 16:22:41 +00005269 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
sewardjb4112022007-11-09 22:49:28 +00005270 }
5271
sewardj622fe492011-03-11 21:06:59 +00005272 else if VG_BOOL_CLO(arg, "--free-is-write",
5273 HG_(clo_free_is_write)) {}
sewardjffce8152011-06-24 10:09:41 +00005274
5275 else if VG_XACT_CLO(arg, "--vts-pruning=never",
5276 HG_(clo_vts_pruning), 0);
5277 else if VG_XACT_CLO(arg, "--vts-pruning=auto",
5278 HG_(clo_vts_pruning), 1);
5279 else if VG_XACT_CLO(arg, "--vts-pruning=always",
5280 HG_(clo_vts_pruning), 2);
5281
5282 else if VG_BOOL_CLO(arg, "--check-stack-refs",
5283 HG_(clo_check_stack_refs)) {}
5284
sewardjb4112022007-11-09 22:49:28 +00005285 else
5286 return VG_(replacement_malloc_process_cmd_line_option)(arg);
5287
5288 return True;
5289}
5290
5291static void hg_print_usage ( void )
5292{
5293 VG_(printf)(
sewardj622fe492011-03-11 21:06:59 +00005294" --free-is-write=no|yes treat heap frees as writes [no]\n"
sewardj849b0ed2008-12-21 10:43:10 +00005295" --track-lockorders=no|yes show lock ordering errors? [yes]\n"
njnf6e8ca92009-08-07 02:18:00 +00005296" --history-level=none|approx|full [full]\n"
sewardjf3861392009-08-02 10:16:03 +00005297" full: show both stack traces for a data race (can be very slow)\n"
5298" approx: full trace for one thread, approx for the other (faster)\n"
5299" none: only show trace for one thread in a race (fastest)\n"
sewardj23f12002009-07-24 08:45:08 +00005300" --conflict-cache-size=N size of 'full' history cache [1000000]\n"
sewardjffce8152011-06-24 10:09:41 +00005301" --check-stack-refs=no|yes race-check reads and writes on the\n"
5302" main stack and thread stacks? [yes]\n"
sewardjb4112022007-11-09 22:49:28 +00005303 );
sewardjb4112022007-11-09 22:49:28 +00005304}
5305
5306static void hg_print_debug_usage ( void )
5307{
sewardjb4112022007-11-09 22:49:28 +00005308 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
5309 "race errors significant? [no]\n");
sewardj849b0ed2008-12-21 10:43:10 +00005310 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
sewardj11e352f2007-11-30 11:11:02 +00005311 " at events (X = 0|1) [000000]\n");
5312 VG_(printf)(" --hg-sanity-flags values:\n");
sewardj11e352f2007-11-30 11:11:02 +00005313 VG_(printf)(" 010000 after changes to "
sewardjb4112022007-11-09 22:49:28 +00005314 "lock-order-acquisition-graph\n");
sewardj11e352f2007-11-30 11:11:02 +00005315 VG_(printf)(" 001000 at memory accesses (NB: not currently used)\n");
5316 VG_(printf)(" 000100 at mem permission setting for "
sewardjb4112022007-11-09 22:49:28 +00005317 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
sewardj11e352f2007-11-30 11:11:02 +00005318 VG_(printf)(" 000010 at lock/unlock events\n");
5319 VG_(printf)(" 000001 at thread create/join events\n");
sewardjffce8152011-06-24 10:09:41 +00005320 VG_(printf)(
5321" --vts-pruning=never|auto|always [auto]\n"
5322" never: is never done (may cause big space leaks in Helgrind)\n"
5323" auto: done just often enough to keep space usage under control\n"
5324" always: done after every VTS GC (mostly just a big time waster)\n"
5325 );
sewardjb4112022007-11-09 22:49:28 +00005326}
5327
philippe8587b542013-12-15 20:24:43 +00005328static void hg_print_stats (void)
5329{
5330
5331 if (1) {
5332 VG_(printf)("\n");
5333 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
5334 if (HG_(clo_track_lockorders)) {
5335 VG_(printf)("\n");
5336 HG_(ppWSUstats)( univ_laog, "univ_laog" );
5337 }
5338 }
5339
5340 //zz VG_(printf)("\n");
5341 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
5342 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
5343 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
5344 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
5345 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
5346 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
5347 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
5348 //zz stats__hbefore_stk_hwm);
5349 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
5350 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
5351
5352 VG_(printf)("\n");
5353 VG_(printf)(" locksets: %'8d unique lock sets\n",
5354 (Int)HG_(cardinalityWSU)( univ_lsets ));
5355 if (HG_(clo_track_lockorders)) {
5356 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
5357 (Int)HG_(cardinalityWSU)( univ_laog ));
5358 }
5359
5360 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
5361 // stats__ga_LL_adds,
5362 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
5363
5364 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
5365 HG_(stats__LockN_to_P_queries),
5366 HG_(stats__LockN_to_P_get_map_size)() );
5367
5368 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
5369 HG_(stats__string_table_queries),
5370 HG_(stats__string_table_get_map_size)() );
5371 if (HG_(clo_track_lockorders)) {
5372 VG_(printf)(" LAOG: %'8d map size\n",
5373 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
5374 VG_(printf)(" LAOG exposition: %'8d map size\n",
5375 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
5376 }
5377
5378 VG_(printf)(" locks: %'8lu acquires, "
5379 "%'lu releases\n",
5380 stats__lockN_acquires,
5381 stats__lockN_releases
5382 );
5383 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
5384
5385 VG_(printf)("\n");
5386 libhb_shutdown(True); // This in fact only print stats.
5387}
5388
sewardjb4112022007-11-09 22:49:28 +00005389static void hg_fini ( Int exitcode )
5390{
sewardj2d9e8742009-08-07 15:46:56 +00005391 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
5392 VG_(message)(Vg_UserMsg,
5393 "For counts of detected and suppressed errors, "
5394 "rerun with: -v\n");
5395 }
5396
5397 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)
5398 && HG_(clo_history_level) >= 2) {
5399 VG_(umsg)(
5400 "Use --history-level=approx or =none to gain increased speed, at\n" );
5401 VG_(umsg)(
5402 "the cost of reduced accuracy of conflicting-access information\n");
5403 }
5404
sewardjb4112022007-11-09 22:49:28 +00005405 if (SHOW_DATA_STRUCTURES)
5406 pp_everything( PP_ALL, "SK_(fini)" );
sewardjf98e1c02008-10-25 16:22:41 +00005407 if (HG_(clo_sanity_flags))
sewardjb4112022007-11-09 22:49:28 +00005408 all__sanity_check("SK_(fini)");
5409
philippe8587b542013-12-15 20:24:43 +00005410 if (VG_(clo_stats))
5411 hg_print_stats();
sewardjb4112022007-11-09 22:49:28 +00005412}
5413
sewardjf98e1c02008-10-25 16:22:41 +00005414/* FIXME: move these somewhere sane */
5415
5416static
5417void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
5418{
5419 Thread* thr;
5420 ThreadId tid;
5421 UWord nActual;
5422 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00005423 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00005424 tl_assert(thr);
5425 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
5426 nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
5427 NULL, NULL, 0 );
5428 tl_assert(nActual <= nRequest);
5429 for (; nActual < nRequest; nActual++)
5430 frames[nActual] = 0;
5431}
5432
5433static
sewardj23f12002009-07-24 08:45:08 +00005434ExeContext* for_libhb__get_EC ( Thr* hbt )
sewardjf98e1c02008-10-25 16:22:41 +00005435{
5436 Thread* thr;
5437 ThreadId tid;
5438 ExeContext* ec;
5439 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00005440 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00005441 tl_assert(thr);
5442 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
sewardj23f12002009-07-24 08:45:08 +00005443 /* this will assert if tid is invalid */
sewardjf98e1c02008-10-25 16:22:41 +00005444 ec = VG_(record_ExeContext)( tid, 0 );
sewardjd52392d2008-11-08 20:36:26 +00005445 return ec;
sewardjf98e1c02008-10-25 16:22:41 +00005446}
5447
5448
sewardjc1fb9d22011-02-28 09:03:44 +00005449static void hg_post_clo_init ( void )
sewardjb4112022007-11-09 22:49:28 +00005450{
sewardjf98e1c02008-10-25 16:22:41 +00005451 Thr* hbthr_root;
njnf76d27a2009-05-28 01:53:07 +00005452
sewardjc1fb9d22011-02-28 09:03:44 +00005453 /////////////////////////////////////////////
5454 hbthr_root = libhb_init( for_libhb__get_stacktrace,
5455 for_libhb__get_EC );
5456 /////////////////////////////////////////////
5457
5458
5459 if (HG_(clo_track_lockorders))
5460 laog__init();
5461
5462 initialise_data_structures(hbthr_root);
5463}
5464
philippe07c08522014-05-14 20:39:27 +00005465static void hg_info_location (Addr a)
5466{
5467 (void) HG_(get_and_pp_addrdescr) (a);
5468}
5469
sewardjc1fb9d22011-02-28 09:03:44 +00005470static void hg_pre_clo_init ( void )
5471{
sewardjb4112022007-11-09 22:49:28 +00005472 VG_(details_name) ("Helgrind");
5473 VG_(details_version) (NULL);
5474 VG_(details_description) ("a thread error detector");
5475 VG_(details_copyright_author)(
sewardj0f157dd2013-10-18 14:27:36 +00005476 "Copyright (C) 2007-2013, and GNU GPL'd, by OpenWorks LLP et al.");
sewardjb4112022007-11-09 22:49:28 +00005477 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj9c08c0f2011-03-10 15:01:14 +00005478 VG_(details_avg_translation_sizeB) ( 320 );
sewardjb4112022007-11-09 22:49:28 +00005479
5480 VG_(basic_tool_funcs) (hg_post_clo_init,
5481 hg_instrument,
5482 hg_fini);
5483
5484 VG_(needs_core_errors) ();
sewardjf98e1c02008-10-25 16:22:41 +00005485 VG_(needs_tool_errors) (HG_(eq_Error),
sewardj24118492009-07-15 14:50:02 +00005486 HG_(before_pp_Error),
sewardjf98e1c02008-10-25 16:22:41 +00005487 HG_(pp_Error),
sewardjb4112022007-11-09 22:49:28 +00005488 False,/*show TIDs for errors*/
sewardjf98e1c02008-10-25 16:22:41 +00005489 HG_(update_extra),
5490 HG_(recognised_suppression),
5491 HG_(read_extra_suppression_info),
5492 HG_(error_matches_suppression),
5493 HG_(get_error_name),
philippe4e32d672013-10-17 22:10:41 +00005494 HG_(get_extra_suppression_info),
5495 HG_(print_extra_suppression_use),
5496 HG_(update_extra_suppression_use));
sewardjb4112022007-11-09 22:49:28 +00005497
sewardj24118492009-07-15 14:50:02 +00005498 VG_(needs_xml_output) ();
5499
sewardjb4112022007-11-09 22:49:28 +00005500 VG_(needs_command_line_options)(hg_process_cmd_line_option,
5501 hg_print_usage,
5502 hg_print_debug_usage);
5503 VG_(needs_client_requests) (hg_handle_client_request);
5504
5505 // FIXME?
5506 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
5507 // hg_expensive_sanity_check);
5508
philippe8587b542013-12-15 20:24:43 +00005509 VG_(needs_print_stats) (hg_print_stats);
philippe07c08522014-05-14 20:39:27 +00005510 VG_(needs_info_location) (hg_info_location);
philippe8587b542013-12-15 20:24:43 +00005511
sewardjb4112022007-11-09 22:49:28 +00005512 VG_(needs_malloc_replacement) (hg_cli__malloc,
5513 hg_cli____builtin_new,
5514 hg_cli____builtin_vec_new,
5515 hg_cli__memalign,
5516 hg_cli__calloc,
5517 hg_cli__free,
5518 hg_cli____builtin_delete,
5519 hg_cli____builtin_vec_delete,
5520 hg_cli__realloc,
njn8b140de2009-02-17 04:31:18 +00005521 hg_cli_malloc_usable_size,
philipped99c26a2012-07-31 22:17:28 +00005522 HG_CLI__DEFAULT_MALLOC_REDZONE_SZB );
sewardjb4112022007-11-09 22:49:28 +00005523
sewardj849b0ed2008-12-21 10:43:10 +00005524 /* 21 Dec 08: disabled this; it mostly causes H to start more
5525 slowly and use significantly more memory, without very often
5526 providing useful results. The user can request to load this
5527 information manually with --read-var-info=yes. */
5528 if (0) VG_(needs_var_info)(); /* optional */
sewardjb4112022007-11-09 22:49:28 +00005529
5530 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
sewardj7cf4e6b2008-05-01 20:24:26 +00005531 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
5532 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
sewardjb4112022007-11-09 22:49:28 +00005533 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
sewardj1f77fec2010-04-12 19:51:04 +00005534 VG_(track_new_mem_stack) ( evh__new_mem_stack );
sewardjb4112022007-11-09 22:49:28 +00005535
5536 // FIXME: surely this isn't thread-aware
sewardj23f12002009-07-24 08:45:08 +00005537 VG_(track_copy_mem_remap) ( evh__copy_mem );
sewardjb4112022007-11-09 22:49:28 +00005538
5539 VG_(track_change_mem_mprotect) ( evh__set_perms );
5540
5541 VG_(track_die_mem_stack_signal)( evh__die_mem );
sewardjfd35d492011-03-17 19:39:55 +00005542 VG_(track_die_mem_brk) ( evh__die_mem_munmap );
5543 VG_(track_die_mem_munmap) ( evh__die_mem_munmap );
sewardjb4112022007-11-09 22:49:28 +00005544 VG_(track_die_mem_stack) ( evh__die_mem );
5545
5546 // FIXME: what is this for?
5547 VG_(track_ban_mem_stack) (NULL);
5548
5549 VG_(track_pre_mem_read) ( evh__pre_mem_read );
5550 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
5551 VG_(track_pre_mem_write) ( evh__pre_mem_write );
5552 VG_(track_post_mem_write) (NULL);
5553
5554 /////////////////
5555
5556 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
5557 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
5558
5559 VG_(track_start_client_code)( evh__start_client_code );
5560 VG_(track_stop_client_code)( evh__stop_client_code );
5561
sewardjb4112022007-11-09 22:49:28 +00005562 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
5563 as described in comments at the top of pub_tool_hashtable.h, are
5564 met. Blargh. */
5565 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
5566 tl_assert( sizeof(UWord) == sizeof(Addr) );
5567 hg_mallocmeta_table
5568 = VG_(HT_construct)( "hg_malloc_metadata_table" );
5569
philippe5fbc9762013-12-01 19:28:48 +00005570 MallocMeta_poolalloc = VG_(newPA) ( sizeof(MallocMeta),
5571 1000,
5572 HG_(zalloc),
5573 "hg_malloc_metadata_pool",
5574 HG_(free));
5575
sewardj61bc2c52011-02-09 10:34:00 +00005576 // add a callback to clean up on (threaded) fork.
5577 VG_(atfork)(NULL/*pre*/, NULL/*parent*/, evh__atfork_child/*child*/);
sewardjb4112022007-11-09 22:49:28 +00005578}
5579
5580VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
5581
5582/*--------------------------------------------------------------------*/
5583/*--- end hg_main.c ---*/
5584/*--------------------------------------------------------------------*/