blob: 5e12520bb1c7093d35321d335616292630e98c4d [file] [log] [blame]
sewardjb4112022007-11-09 22:49:28 +00001
2/*--------------------------------------------------------------------*/
3/*--- Helgrind: a Valgrind tool for detecting errors ---*/
4/*--- in threaded programs. hg_main.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
sewardj0f157dd2013-10-18 14:27:36 +000011 Copyright (C) 2007-2013 OpenWorks LLP
sewardjb4112022007-11-09 22:49:28 +000012 info@open-works.co.uk
13
sewardj0f157dd2013-10-18 14:27:36 +000014 Copyright (C) 2007-2013 Apple, Inc.
njnf76d27a2009-05-28 01:53:07 +000015
sewardjb4112022007-11-09 22:49:28 +000016 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
20
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
25
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, write to the Free Software
28 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 02111-1307, USA.
30
31 The GNU General Public License is contained in the file COPYING.
32
33 Neither the names of the U.S. Department of Energy nor the
34 University of California nor the names of its contributors may be
35 used to endorse or promote products derived from this software
36 without prior written permission.
37*/
38
39#include "pub_tool_basics.h"
philippef5774342014-05-03 11:12:50 +000040#include "pub_tool_gdbserver.h"
sewardjb4112022007-11-09 22:49:28 +000041#include "pub_tool_libcassert.h"
42#include "pub_tool_libcbase.h"
43#include "pub_tool_libcprint.h"
sewardjb4112022007-11-09 22:49:28 +000044#include "pub_tool_threadstate.h"
45#include "pub_tool_tooliface.h"
46#include "pub_tool_hashtable.h"
47#include "pub_tool_replacemalloc.h"
48#include "pub_tool_machine.h"
49#include "pub_tool_options.h"
50#include "pub_tool_xarray.h"
51#include "pub_tool_stacktrace.h"
sewardj896f6f92008-08-19 08:38:52 +000052#include "pub_tool_wordfm.h"
sewardja0eee322009-07-31 08:46:35 +000053#include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
54#include "pub_tool_redir.h" // sonames for the dynamic linkers
55#include "pub_tool_vki.h" // VKI_PAGE_SIZE
sewardj61bc2c52011-02-09 10:34:00 +000056#include "pub_tool_libcproc.h" // VG_(atfork)
sewardj234e5582011-02-09 12:47:23 +000057#include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client)
philippe5fbc9762013-12-01 19:28:48 +000058#include "pub_tool_poolalloc.h"
philippe07c08522014-05-14 20:39:27 +000059#include "pub_tool_addrinfo.h"
sewardjb4112022007-11-09 22:49:28 +000060
sewardjf98e1c02008-10-25 16:22:41 +000061#include "hg_basics.h"
62#include "hg_wordset.h"
philippef5774342014-05-03 11:12:50 +000063#include "hg_addrdescr.h"
sewardjf98e1c02008-10-25 16:22:41 +000064#include "hg_lock_n_thread.h"
65#include "hg_errors.h"
66
67#include "libhb.h"
68
sewardjb4112022007-11-09 22:49:28 +000069#include "helgrind.h"
70
sewardjf98e1c02008-10-25 16:22:41 +000071
72// FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
73
74// FIXME: when client destroys a lock or a CV, remove these
75// from our mappings, so that the associated SO can be freed up
sewardjb4112022007-11-09 22:49:28 +000076
77/*----------------------------------------------------------------*/
78/*--- ---*/
79/*----------------------------------------------------------------*/
80
sewardj11e352f2007-11-30 11:11:02 +000081/* Note this needs to be compiled with -fno-strict-aliasing, since it
82 contains a whole bunch of calls to lookupFM etc which cast between
83 Word and pointer types. gcc rightly complains this breaks ANSI C
84 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
85 worthwhile performance benefits over -O.
sewardjc17be792007-11-10 22:50:13 +000086*/
sewardjb4112022007-11-09 22:49:28 +000087
88// FIXME what is supposed to happen to locks in memory which
89// is relocated as a result of client realloc?
90
sewardjb4112022007-11-09 22:49:28 +000091// FIXME put referencing ThreadId into Thread and get
92// rid of the slow reverse mapping function.
93
94// FIXME accesses to NoAccess areas: change state to Excl?
95
96// FIXME report errors for accesses of NoAccess memory?
97
98// FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
99// the thread still holds the lock.
100
101/* ------------ Debug/trace options ------------ */
102
sewardjb4112022007-11-09 22:49:28 +0000103// 0 for silent, 1 for some stuff, 2 for lots of stuff
104#define SHOW_EVENTS 0
105
sewardjb4112022007-11-09 22:49:28 +0000106
florian6bf37262012-10-21 03:23:36 +0000107static void all__sanity_check ( const HChar* who ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000108
philipped99c26a2012-07-31 22:17:28 +0000109#define HG_CLI__DEFAULT_MALLOC_REDZONE_SZB 16 /* let's say */
sewardjb4112022007-11-09 22:49:28 +0000110
111// 0 for none, 1 for dump at end of run
112#define SHOW_DATA_STRUCTURES 0
113
114
sewardjb4112022007-11-09 22:49:28 +0000115/* ------------ Misc comments ------------ */
116
117// FIXME: don't hardwire initial entries for root thread.
118// Instead, let the pre_thread_ll_create handler do this.
119
sewardjb4112022007-11-09 22:49:28 +0000120
121/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000122/*--- Primary data structures ---*/
sewardjb4112022007-11-09 22:49:28 +0000123/*----------------------------------------------------------------*/
124
sewardjb4112022007-11-09 22:49:28 +0000125/* Admin linked list of Threads */
126static Thread* admin_threads = NULL;
sewardjffce8152011-06-24 10:09:41 +0000127Thread* get_admin_threads ( void ) { return admin_threads; }
sewardjb4112022007-11-09 22:49:28 +0000128
sewardj1d7c3322011-02-28 09:22:51 +0000129/* Admin double linked list of Locks */
130/* We need a double linked list to properly and efficiently
131 handle del_LockN. */
sewardjb4112022007-11-09 22:49:28 +0000132static Lock* admin_locks = NULL;
133
sewardjb4112022007-11-09 22:49:28 +0000134/* Mapping table for core ThreadIds to Thread* */
135static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
136
sewardjb4112022007-11-09 22:49:28 +0000137/* Mapping table for lock guest addresses to Lock* */
138static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
139
sewardj0f64c9e2011-03-10 17:40:22 +0000140/* The word-set universes for lock sets. */
sewardjb4112022007-11-09 22:49:28 +0000141static WordSetU* univ_lsets = NULL; /* sets of Lock* */
142static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
sewardj866c80c2011-10-22 19:29:51 +0000143static Int next_gc_univ_laog = 1;
144/* univ_laog will be garbaged collected when the nr of element in univ_laog is
145 >= next_gc_univ_laog. */
sewardjb4112022007-11-09 22:49:28 +0000146
sewardjffce8152011-06-24 10:09:41 +0000147/* Allow libhb to get at the universe of locksets stored
148 here. Sigh. */
149WordSetU* HG_(get_univ_lsets) ( void ) { return univ_lsets; }
150
151/* Allow libhb to get at the list of locks stored here. Ditto
152 sigh. */
153Lock* HG_(get_admin_locks) ( void ) { return admin_locks; }
154
sewardjb4112022007-11-09 22:49:28 +0000155
156/*----------------------------------------------------------------*/
157/*--- Simple helpers for the data structures ---*/
158/*----------------------------------------------------------------*/
159
160static UWord stats__lockN_acquires = 0;
161static UWord stats__lockN_releases = 0;
162
sewardjf98e1c02008-10-25 16:22:41 +0000163static
164ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
sewardjb4112022007-11-09 22:49:28 +0000165
166/* --------- Constructors --------- */
167
sewardjf98e1c02008-10-25 16:22:41 +0000168static Thread* mk_Thread ( Thr* hbthr ) {
sewardjb4112022007-11-09 22:49:28 +0000169 static Int indx = 1;
sewardjf98e1c02008-10-25 16:22:41 +0000170 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
sewardjb4112022007-11-09 22:49:28 +0000171 thread->locksetA = HG_(emptyWS)( univ_lsets );
172 thread->locksetW = HG_(emptyWS)( univ_lsets );
sewardjb4112022007-11-09 22:49:28 +0000173 thread->magic = Thread_MAGIC;
sewardjf98e1c02008-10-25 16:22:41 +0000174 thread->hbthr = hbthr;
175 thread->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +0000176 thread->created_at = NULL;
177 thread->announced = False;
178 thread->errmsg_index = indx++;
179 thread->admin = admin_threads;
180 admin_threads = thread;
181 return thread;
182}
sewardjf98e1c02008-10-25 16:22:41 +0000183
sewardjb4112022007-11-09 22:49:28 +0000184// Make a new lock which is unlocked (hence ownerless)
sewardj1d7c3322011-02-28 09:22:51 +0000185// and insert the new lock in admin_locks double linked list.
sewardjb4112022007-11-09 22:49:28 +0000186static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
187 static ULong unique = 0;
sewardjf98e1c02008-10-25 16:22:41 +0000188 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
sewardj0f64c9e2011-03-10 17:40:22 +0000189 /* begin: add to double linked list */
sewardj1d7c3322011-02-28 09:22:51 +0000190 if (admin_locks)
191 admin_locks->admin_prev = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000192 lock->admin_next = admin_locks;
193 lock->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000194 admin_locks = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000195 /* end: add */
sewardjb4112022007-11-09 22:49:28 +0000196 lock->unique = unique++;
197 lock->magic = LockN_MAGIC;
198 lock->appeared_at = NULL;
199 lock->acquired_at = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000200 lock->hbso = libhb_so_alloc();
sewardjb4112022007-11-09 22:49:28 +0000201 lock->guestaddr = guestaddr;
202 lock->kind = kind;
203 lock->heldW = False;
204 lock->heldBy = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000205 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +0000206 return lock;
207}
sewardjb4112022007-11-09 22:49:28 +0000208
209/* Release storage for a Lock. Also release storage in .heldBy, if
sewardj1d7c3322011-02-28 09:22:51 +0000210 any. Removes from admin_locks double linked list. */
sewardjb4112022007-11-09 22:49:28 +0000211static void del_LockN ( Lock* lk )
212{
sewardjf98e1c02008-10-25 16:22:41 +0000213 tl_assert(HG_(is_sane_LockN)(lk));
214 tl_assert(lk->hbso);
215 libhb_so_dealloc(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +0000216 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000217 VG_(deleteBag)( lk->heldBy );
sewardj0f64c9e2011-03-10 17:40:22 +0000218 /* begin: del lock from double linked list */
219 if (lk == admin_locks) {
220 tl_assert(lk->admin_prev == NULL);
221 if (lk->admin_next)
222 lk->admin_next->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000223 admin_locks = lk->admin_next;
sewardj1d7c3322011-02-28 09:22:51 +0000224 }
225 else {
sewardj0f64c9e2011-03-10 17:40:22 +0000226 tl_assert(lk->admin_prev != NULL);
sewardj1d7c3322011-02-28 09:22:51 +0000227 lk->admin_prev->admin_next = lk->admin_next;
sewardj0f64c9e2011-03-10 17:40:22 +0000228 if (lk->admin_next)
229 lk->admin_next->admin_prev = lk->admin_prev;
sewardj1d7c3322011-02-28 09:22:51 +0000230 }
sewardj0f64c9e2011-03-10 17:40:22 +0000231 /* end: del */
sewardjb4112022007-11-09 22:49:28 +0000232 VG_(memset)(lk, 0xAA, sizeof(*lk));
sewardjf98e1c02008-10-25 16:22:41 +0000233 HG_(free)(lk);
sewardjb4112022007-11-09 22:49:28 +0000234}
235
236/* Update 'lk' to reflect that 'thr' now has a write-acquisition of
237 it. This is done strictly: only combinations resulting from
238 correct program and libpthread behaviour are allowed. */
239static void lockN_acquire_writer ( Lock* lk, Thread* thr )
240{
sewardjf98e1c02008-10-25 16:22:41 +0000241 tl_assert(HG_(is_sane_LockN)(lk));
242 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000243
244 stats__lockN_acquires++;
245
246 /* EXPOSITION only */
247 /* We need to keep recording snapshots of where the lock was
248 acquired, so as to produce better lock-order error messages. */
249 if (lk->acquired_at == NULL) {
250 ThreadId tid;
251 tl_assert(lk->heldBy == NULL);
252 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
253 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000254 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000255 } else {
256 tl_assert(lk->heldBy != NULL);
257 }
258 /* end EXPOSITION only */
259
260 switch (lk->kind) {
261 case LK_nonRec:
262 case_LK_nonRec:
263 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
264 tl_assert(!lk->heldW);
265 lk->heldW = True;
sewardjf98e1c02008-10-25 16:22:41 +0000266 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
florian6bf37262012-10-21 03:23:36 +0000267 VG_(addToBag)( lk->heldBy, (UWord)thr );
sewardjb4112022007-11-09 22:49:28 +0000268 break;
269 case LK_mbRec:
270 if (lk->heldBy == NULL)
271 goto case_LK_nonRec;
272 /* 2nd and subsequent locking of a lock by its owner */
273 tl_assert(lk->heldW);
274 /* assert: lk is only held by one thread .. */
sewardj896f6f92008-08-19 08:38:52 +0000275 tl_assert(VG_(sizeUniqueBag(lk->heldBy)) == 1);
sewardjb4112022007-11-09 22:49:28 +0000276 /* assert: .. and that thread is 'thr'. */
florian6bf37262012-10-21 03:23:36 +0000277 tl_assert(VG_(elemBag)(lk->heldBy, (UWord)thr)
sewardj896f6f92008-08-19 08:38:52 +0000278 == VG_(sizeTotalBag)(lk->heldBy));
florian6bf37262012-10-21 03:23:36 +0000279 VG_(addToBag)(lk->heldBy, (UWord)thr);
sewardjb4112022007-11-09 22:49:28 +0000280 break;
281 case LK_rdwr:
282 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
283 goto case_LK_nonRec;
284 default:
285 tl_assert(0);
286 }
sewardjf98e1c02008-10-25 16:22:41 +0000287 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000288}
289
290static void lockN_acquire_reader ( Lock* lk, Thread* thr )
291{
sewardjf98e1c02008-10-25 16:22:41 +0000292 tl_assert(HG_(is_sane_LockN)(lk));
293 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000294 /* can only add reader to a reader-writer lock. */
295 tl_assert(lk->kind == LK_rdwr);
296 /* lk must be free or already r-held. */
297 tl_assert(lk->heldBy == NULL
298 || (lk->heldBy != NULL && !lk->heldW));
299
300 stats__lockN_acquires++;
301
302 /* EXPOSITION only */
303 /* We need to keep recording snapshots of where the lock was
304 acquired, so as to produce better lock-order error messages. */
305 if (lk->acquired_at == NULL) {
306 ThreadId tid;
307 tl_assert(lk->heldBy == NULL);
308 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
309 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000310 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000311 } else {
312 tl_assert(lk->heldBy != NULL);
313 }
314 /* end EXPOSITION only */
315
316 if (lk->heldBy) {
florian6bf37262012-10-21 03:23:36 +0000317 VG_(addToBag)(lk->heldBy, (UWord)thr);
sewardjb4112022007-11-09 22:49:28 +0000318 } else {
319 lk->heldW = False;
sewardjf98e1c02008-10-25 16:22:41 +0000320 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
florian6bf37262012-10-21 03:23:36 +0000321 VG_(addToBag)( lk->heldBy, (UWord)thr );
sewardjb4112022007-11-09 22:49:28 +0000322 }
323 tl_assert(!lk->heldW);
sewardjf98e1c02008-10-25 16:22:41 +0000324 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000325}
326
327/* Update 'lk' to reflect a release of it by 'thr'. This is done
328 strictly: only combinations resulting from correct program and
329 libpthread behaviour are allowed. */
330
331static void lockN_release ( Lock* lk, Thread* thr )
332{
333 Bool b;
sewardjf98e1c02008-10-25 16:22:41 +0000334 tl_assert(HG_(is_sane_LockN)(lk));
335 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000336 /* lock must be held by someone */
337 tl_assert(lk->heldBy);
338 stats__lockN_releases++;
339 /* Remove it from the holder set */
florian6bf37262012-10-21 03:23:36 +0000340 b = VG_(delFromBag)(lk->heldBy, (UWord)thr);
sewardjb4112022007-11-09 22:49:28 +0000341 /* thr must actually have been a holder of lk */
342 tl_assert(b);
343 /* normalise */
344 tl_assert(lk->acquired_at);
sewardj896f6f92008-08-19 08:38:52 +0000345 if (VG_(isEmptyBag)(lk->heldBy)) {
346 VG_(deleteBag)(lk->heldBy);
sewardjb4112022007-11-09 22:49:28 +0000347 lk->heldBy = NULL;
348 lk->heldW = False;
349 lk->acquired_at = NULL;
350 }
sewardjf98e1c02008-10-25 16:22:41 +0000351 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000352}
353
354static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
355{
356 Thread* thr;
357 if (!lk->heldBy) {
358 tl_assert(!lk->heldW);
359 return;
360 }
361 /* for each thread that holds this lock do ... */
sewardj896f6f92008-08-19 08:38:52 +0000362 VG_(initIterBag)( lk->heldBy );
florian6bf37262012-10-21 03:23:36 +0000363 while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, NULL )) {
sewardjf98e1c02008-10-25 16:22:41 +0000364 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000365 tl_assert(HG_(elemWS)( univ_lsets,
florian6bf37262012-10-21 03:23:36 +0000366 thr->locksetA, (UWord)lk ));
sewardjb4112022007-11-09 22:49:28 +0000367 thr->locksetA
florian6bf37262012-10-21 03:23:36 +0000368 = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +0000369
370 if (lk->heldW) {
371 tl_assert(HG_(elemWS)( univ_lsets,
florian6bf37262012-10-21 03:23:36 +0000372 thr->locksetW, (UWord)lk ));
sewardjb4112022007-11-09 22:49:28 +0000373 thr->locksetW
florian6bf37262012-10-21 03:23:36 +0000374 = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +0000375 }
376 }
sewardj896f6f92008-08-19 08:38:52 +0000377 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000378}
379
sewardjb4112022007-11-09 22:49:28 +0000380
381/*----------------------------------------------------------------*/
382/*--- Print out the primary data structures ---*/
383/*----------------------------------------------------------------*/
384
sewardjb4112022007-11-09 22:49:28 +0000385#define PP_THREADS (1<<1)
386#define PP_LOCKS (1<<2)
sewardjf98e1c02008-10-25 16:22:41 +0000387#define PP_ALL (PP_THREADS | PP_LOCKS)
sewardjb4112022007-11-09 22:49:28 +0000388
389
390static const Int sHOW_ADMIN = 0;
391
392static void space ( Int n )
393{
394 Int i;
florian6bf37262012-10-21 03:23:36 +0000395 HChar spaces[128+1];
sewardjb4112022007-11-09 22:49:28 +0000396 tl_assert(n >= 0 && n < 128);
397 if (n == 0)
398 return;
399 for (i = 0; i < n; i++)
400 spaces[i] = ' ';
401 spaces[i] = 0;
402 tl_assert(i < 128+1);
403 VG_(printf)("%s", spaces);
404}
405
406static void pp_Thread ( Int d, Thread* t )
407{
408 space(d+0); VG_(printf)("Thread %p {\n", t);
409 if (sHOW_ADMIN) {
410 space(d+3); VG_(printf)("admin %p\n", t->admin);
411 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
412 }
413 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
414 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
sewardjb4112022007-11-09 22:49:28 +0000415 space(d+0); VG_(printf)("}\n");
416}
417
418static void pp_admin_threads ( Int d )
419{
420 Int i, n;
421 Thread* t;
422 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
423 /* nothing */
424 }
425 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
426 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
427 if (0) {
428 space(n);
429 VG_(printf)("admin_threads record %d of %d:\n", i, n);
430 }
431 pp_Thread(d+3, t);
432 }
barta0b6b2c2008-07-07 06:49:24 +0000433 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000434}
435
436static void pp_map_threads ( Int d )
437{
njn4c245e52009-03-15 23:25:38 +0000438 Int i, n = 0;
sewardjb4112022007-11-09 22:49:28 +0000439 space(d); VG_(printf)("map_threads ");
sewardjb4112022007-11-09 22:49:28 +0000440 for (i = 0; i < VG_N_THREADS; i++) {
441 if (map_threads[i] != NULL)
442 n++;
443 }
444 VG_(printf)("(%d entries) {\n", n);
445 for (i = 0; i < VG_N_THREADS; i++) {
446 if (map_threads[i] == NULL)
447 continue;
448 space(d+3);
449 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
450 }
451 space(d); VG_(printf)("}\n");
452}
453
454static const HChar* show_LockKind ( LockKind lkk ) {
455 switch (lkk) {
456 case LK_mbRec: return "mbRec";
457 case LK_nonRec: return "nonRec";
458 case LK_rdwr: return "rdwr";
459 default: tl_assert(0);
460 }
461}
462
philippef5774342014-05-03 11:12:50 +0000463/* Pretty Print lock lk.
464 if show_lock_addrdescr, describes the (guest) lock address.
465 (this description will be more complete with --read-var-info=yes).
466 if show_internal_data, shows also helgrind internal information.
467 d is the level at which output is indented. */
468static void pp_Lock ( Int d, Lock* lk,
469 Bool show_lock_addrdescr,
470 Bool show_internal_data)
sewardjb4112022007-11-09 22:49:28 +0000471{
philippef5774342014-05-03 11:12:50 +0000472 space(d+0);
473 if (show_internal_data)
philippe07c08522014-05-14 20:39:27 +0000474 VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
philippef5774342014-05-03 11:12:50 +0000475 else
philippe07c08522014-05-14 20:39:27 +0000476 VG_(printf)("Lock ga %#lx {\n", lk->guestaddr);
philippef5774342014-05-03 11:12:50 +0000477 if (!show_lock_addrdescr
philippe07c08522014-05-14 20:39:27 +0000478 || !HG_(get_and_pp_addrdescr) ((Addr) lk->guestaddr))
philippef5774342014-05-03 11:12:50 +0000479 VG_(printf)("\n");
480
sewardjb4112022007-11-09 22:49:28 +0000481 if (sHOW_ADMIN) {
sewardj1d7c3322011-02-28 09:22:51 +0000482 space(d+3); VG_(printf)("admin_n %p\n", lk->admin_next);
483 space(d+3); VG_(printf)("admin_p %p\n", lk->admin_prev);
484 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
sewardjb4112022007-11-09 22:49:28 +0000485 }
philippef5774342014-05-03 11:12:50 +0000486 if (show_internal_data) {
487 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
488 }
sewardjb4112022007-11-09 22:49:28 +0000489 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
philippef5774342014-05-03 11:12:50 +0000490 if (show_internal_data) {
491 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
492 }
493 if (show_internal_data) {
494 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
495 }
sewardjb4112022007-11-09 22:49:28 +0000496 if (lk->heldBy) {
497 Thread* thr;
florian6bf37262012-10-21 03:23:36 +0000498 UWord count;
sewardjb4112022007-11-09 22:49:28 +0000499 VG_(printf)(" { ");
sewardj896f6f92008-08-19 08:38:52 +0000500 VG_(initIterBag)( lk->heldBy );
philippef5774342014-05-03 11:12:50 +0000501 while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, &count )) {
502 if (show_internal_data)
503 VG_(printf)("%lu:%p ", count, thr);
504 else {
505 VG_(printf)("%c%lu:thread #%d ",
506 lk->heldW ? 'W' : 'R',
507 count, thr->errmsg_index);
508 if (thr->coretid == VG_INVALID_THREADID)
509 VG_(printf)("tid (exited) ");
510 else
511 VG_(printf)("tid %d ", thr->coretid);
512
513 }
514 }
sewardj896f6f92008-08-19 08:38:52 +0000515 VG_(doneIterBag)( lk->heldBy );
philippef5774342014-05-03 11:12:50 +0000516 VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000517 }
sewardjb4112022007-11-09 22:49:28 +0000518 space(d+0); VG_(printf)("}\n");
519}
520
521static void pp_admin_locks ( Int d )
522{
523 Int i, n;
524 Lock* lk;
sewardj1d7c3322011-02-28 09:22:51 +0000525 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000526 /* nothing */
527 }
528 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
sewardj1d7c3322011-02-28 09:22:51 +0000529 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000530 if (0) {
531 space(n);
532 VG_(printf)("admin_locks record %d of %d:\n", i, n);
533 }
philippef5774342014-05-03 11:12:50 +0000534 pp_Lock(d+3, lk,
535 False /* show_lock_addrdescr */,
536 True /* show_internal_data */);
sewardjb4112022007-11-09 22:49:28 +0000537 }
barta0b6b2c2008-07-07 06:49:24 +0000538 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000539}
540
philippef5774342014-05-03 11:12:50 +0000541static void pp_map_locks ( Int d)
sewardjb4112022007-11-09 22:49:28 +0000542{
543 void* gla;
544 Lock* lk;
545 space(d); VG_(printf)("map_locks (%d entries) {\n",
sewardj896f6f92008-08-19 08:38:52 +0000546 (Int)VG_(sizeFM)( map_locks ));
547 VG_(initIterFM)( map_locks );
florian6bf37262012-10-21 03:23:36 +0000548 while (VG_(nextIterFM)( map_locks, (UWord*)&gla,
549 (UWord*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000550 space(d+3);
551 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
552 }
sewardj896f6f92008-08-19 08:38:52 +0000553 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000554 space(d); VG_(printf)("}\n");
555}
556
florian6bf37262012-10-21 03:23:36 +0000557static void pp_everything ( Int flags, const HChar* caller )
sewardjb4112022007-11-09 22:49:28 +0000558{
559 Int d = 0;
560 VG_(printf)("\n");
561 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
562 if (flags & PP_THREADS) {
563 VG_(printf)("\n");
564 pp_admin_threads(d+3);
565 VG_(printf)("\n");
566 pp_map_threads(d+3);
567 }
568 if (flags & PP_LOCKS) {
569 VG_(printf)("\n");
570 pp_admin_locks(d+3);
571 VG_(printf)("\n");
572 pp_map_locks(d+3);
573 }
sewardjb4112022007-11-09 22:49:28 +0000574
575 VG_(printf)("\n");
576 VG_(printf)("}\n");
577 VG_(printf)("\n");
578}
579
580#undef SHOW_ADMIN
581
582
583/*----------------------------------------------------------------*/
584/*--- Initialise the primary data structures ---*/
585/*----------------------------------------------------------------*/
586
sewardjf98e1c02008-10-25 16:22:41 +0000587static void initialise_data_structures ( Thr* hbthr_root )
sewardjb4112022007-11-09 22:49:28 +0000588{
sewardjb4112022007-11-09 22:49:28 +0000589 Thread* thr;
sewardjffce8152011-06-24 10:09:41 +0000590 WordSetID wsid;
sewardjb4112022007-11-09 22:49:28 +0000591
592 /* Get everything initialised and zeroed. */
593 tl_assert(admin_threads == NULL);
594 tl_assert(admin_locks == NULL);
sewardjb4112022007-11-09 22:49:28 +0000595
sewardjb4112022007-11-09 22:49:28 +0000596 tl_assert(map_threads == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000597 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
sewardjb4112022007-11-09 22:49:28 +0000598 tl_assert(map_threads != NULL);
599
florian6bf37262012-10-21 03:23:36 +0000600 tl_assert(sizeof(Addr) == sizeof(UWord));
sewardjb4112022007-11-09 22:49:28 +0000601 tl_assert(map_locks == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000602 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
603 NULL/*unboxed Word cmp*/);
sewardjb4112022007-11-09 22:49:28 +0000604 tl_assert(map_locks != NULL);
605
sewardjb4112022007-11-09 22:49:28 +0000606 tl_assert(univ_lsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000607 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
608 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000609 tl_assert(univ_lsets != NULL);
sewardjffce8152011-06-24 10:09:41 +0000610 /* Ensure that univ_lsets is non-empty, with lockset zero being the
611 empty lockset. hg_errors.c relies on the assumption that
612 lockset number zero in univ_lsets is always valid. */
613 wsid = HG_(emptyWS)(univ_lsets);
614 tl_assert(wsid == 0);
sewardjb4112022007-11-09 22:49:28 +0000615
616 tl_assert(univ_laog == NULL);
sewardjc1fb9d22011-02-28 09:03:44 +0000617 if (HG_(clo_track_lockorders)) {
618 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
619 HG_(free), 24/*cacheSize*/ );
620 tl_assert(univ_laog != NULL);
621 }
sewardjb4112022007-11-09 22:49:28 +0000622
623 /* Set up entries for the root thread */
624 // FIXME: this assumes that the first real ThreadId is 1
625
sewardjb4112022007-11-09 22:49:28 +0000626 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +0000627 thr = mk_Thread(hbthr_root);
628 thr->coretid = 1; /* FIXME: hardwires an assumption about the
629 identity of the root thread. */
sewardj60626642011-03-10 15:14:37 +0000630 tl_assert( libhb_get_Thr_hgthread(hbthr_root) == NULL );
631 libhb_set_Thr_hgthread(hbthr_root, thr);
sewardjb4112022007-11-09 22:49:28 +0000632
sewardjf98e1c02008-10-25 16:22:41 +0000633 /* and bind it in the thread-map table. */
634 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
635 tl_assert(thr->coretid != VG_INVALID_THREADID);
sewardjb4112022007-11-09 22:49:28 +0000636
sewardjf98e1c02008-10-25 16:22:41 +0000637 map_threads[thr->coretid] = thr;
sewardjb4112022007-11-09 22:49:28 +0000638
639 tl_assert(VG_INVALID_THREADID == 0);
640
sewardjb4112022007-11-09 22:49:28 +0000641 all__sanity_check("initialise_data_structures");
642}
643
644
645/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000646/*--- map_threads :: array[core-ThreadId] of Thread* ---*/
sewardjb4112022007-11-09 22:49:28 +0000647/*----------------------------------------------------------------*/
648
649/* Doesn't assert if the relevant map_threads entry is NULL. */
650static Thread* map_threads_maybe_lookup ( ThreadId coretid )
651{
652 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000653 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000654 thr = map_threads[coretid];
655 return thr;
656}
657
658/* Asserts if the relevant map_threads entry is NULL. */
659static inline Thread* map_threads_lookup ( ThreadId coretid )
660{
661 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000662 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000663 thr = map_threads[coretid];
664 tl_assert(thr);
665 return thr;
666}
667
sewardjf98e1c02008-10-25 16:22:41 +0000668/* Do a reverse lookup. Does not assert if 'thr' is not found in
669 map_threads. */
sewardjb4112022007-11-09 22:49:28 +0000670static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
671{
sewardjf98e1c02008-10-25 16:22:41 +0000672 ThreadId tid;
673 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000674 /* Check nobody used the invalid-threadid slot */
675 tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
676 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000677 tid = thr->coretid;
678 tl_assert(HG_(is_sane_ThreadId)(tid));
679 return tid;
sewardjb4112022007-11-09 22:49:28 +0000680}
681
682/* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
683 is not found in map_threads. */
684static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
685{
686 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
687 tl_assert(tid != VG_INVALID_THREADID);
sewardjf98e1c02008-10-25 16:22:41 +0000688 tl_assert(map_threads[tid]);
689 tl_assert(map_threads[tid]->coretid == tid);
sewardjb4112022007-11-09 22:49:28 +0000690 return tid;
691}
692
693static void map_threads_delete ( ThreadId coretid )
694{
695 Thread* thr;
696 tl_assert(coretid != 0);
sewardjf98e1c02008-10-25 16:22:41 +0000697 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000698 thr = map_threads[coretid];
699 tl_assert(thr);
700 map_threads[coretid] = NULL;
701}
702
703
704/*----------------------------------------------------------------*/
705/*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
706/*----------------------------------------------------------------*/
707
708/* Make sure there is a lock table entry for the given (lock) guest
709 address. If not, create one of the stated 'kind' in unheld state.
710 In any case, return the address of the existing or new Lock. */
711static
712Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
713{
714 Bool found;
715 Lock* oldlock = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000716 tl_assert(HG_(is_sane_ThreadId)(tid));
sewardj896f6f92008-08-19 08:38:52 +0000717 found = VG_(lookupFM)( map_locks,
florian6bf37262012-10-21 03:23:36 +0000718 NULL, (UWord*)&oldlock, (UWord)ga );
sewardjb4112022007-11-09 22:49:28 +0000719 if (!found) {
720 Lock* lock = mk_LockN(lkk, ga);
721 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
sewardjf98e1c02008-10-25 16:22:41 +0000722 tl_assert(HG_(is_sane_LockN)(lock));
florian6bf37262012-10-21 03:23:36 +0000723 VG_(addToFM)( map_locks, (UWord)ga, (UWord)lock );
sewardjb4112022007-11-09 22:49:28 +0000724 tl_assert(oldlock == NULL);
sewardjb4112022007-11-09 22:49:28 +0000725 return lock;
726 } else {
727 tl_assert(oldlock != NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000728 tl_assert(HG_(is_sane_LockN)(oldlock));
sewardjb4112022007-11-09 22:49:28 +0000729 tl_assert(oldlock->guestaddr == ga);
sewardjb4112022007-11-09 22:49:28 +0000730 return oldlock;
731 }
732}
733
734static Lock* map_locks_maybe_lookup ( Addr ga )
735{
736 Bool found;
737 Lock* lk = NULL;
florian6bf37262012-10-21 03:23:36 +0000738 found = VG_(lookupFM)( map_locks, NULL, (UWord*)&lk, (UWord)ga );
sewardjb4112022007-11-09 22:49:28 +0000739 tl_assert(found ? lk != NULL : lk == NULL);
sewardjb4112022007-11-09 22:49:28 +0000740 return lk;
741}
742
743static void map_locks_delete ( Addr ga )
744{
745 Addr ga2 = 0;
746 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000747 VG_(delFromFM)( map_locks,
florian6bf37262012-10-21 03:23:36 +0000748 (UWord*)&ga2, (UWord*)&lk, (UWord)ga );
sewardjb4112022007-11-09 22:49:28 +0000749 /* delFromFM produces the val which is being deleted, if it is
750 found. So assert it is non-null; that in effect asserts that we
751 are deleting a (ga, Lock) pair which actually exists. */
752 tl_assert(lk != NULL);
753 tl_assert(ga2 == ga);
754}
755
756
sewardjb4112022007-11-09 22:49:28 +0000757
758/*----------------------------------------------------------------*/
759/*--- Sanity checking the data structures ---*/
760/*----------------------------------------------------------------*/
761
762static UWord stats__sanity_checks = 0;
763
florian6bf37262012-10-21 03:23:36 +0000764static void laog__sanity_check ( const HChar* who ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000765
766/* REQUIRED INVARIANTS:
767
768 Thread vs Segment/Lock/SecMaps
769
770 for each t in Threads {
771
772 // Thread.lockset: each element is really a valid Lock
773
774 // Thread.lockset: each Lock in set is actually held by that thread
775 for lk in Thread.lockset
776 lk == LockedBy(t)
777
778 // Thread.csegid is a valid SegmentID
779 // and the associated Segment has .thr == t
780
781 }
782
783 all thread Locksets are pairwise empty under intersection
784 (that is, no lock is claimed to be held by more than one thread)
785 -- this is guaranteed if all locks in locksets point back to their
786 owner threads
787
788 Lock vs Thread/Segment/SecMaps
789
790 for each entry (gla, la) in map_locks
791 gla == la->guest_addr
792
793 for each lk in Locks {
794
795 lk->tag is valid
796 lk->guest_addr does not have shadow state NoAccess
797 if lk == LockedBy(t), then t->lockset contains lk
798 if lk == UnlockedBy(segid) then segid is valid SegmentID
799 and can be mapped to a valid Segment(seg)
800 and seg->thr->lockset does not contain lk
801 if lk == UnlockedNew then (no lockset contains lk)
802
803 secmaps for lk has .mbHasLocks == True
804
805 }
806
807 Segment vs Thread/Lock/SecMaps
808
809 the Segment graph is a dag (no cycles)
810 all of the Segment graph must be reachable from the segids
811 mentioned in the Threads
812
813 for seg in Segments {
814
815 seg->thr is a sane Thread
816
817 }
818
819 SecMaps vs Segment/Thread/Lock
820
821 for sm in SecMaps {
822
823 sm properly aligned
824 if any shadow word is ShR or ShM then .mbHasShared == True
825
826 for each Excl(segid) state
827 map_segments_lookup maps to a sane Segment(seg)
828 for each ShM/ShR(tsetid,lsetid) state
829 each lk in lset is a valid Lock
830 each thr in tset is a valid thread, which is non-dead
831
832 }
833*/
834
835
836/* Return True iff 'thr' holds 'lk' in some mode. */
837static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
838{
839 if (lk->heldBy)
florian6bf37262012-10-21 03:23:36 +0000840 return VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0;
sewardjb4112022007-11-09 22:49:28 +0000841 else
842 return False;
843}
844
845/* Sanity check Threads, as far as possible */
846__attribute__((noinline))
florian6bf37262012-10-21 03:23:36 +0000847static void threads__sanity_check ( const HChar* who )
sewardjb4112022007-11-09 22:49:28 +0000848{
849#define BAD(_str) do { how = (_str); goto bad; } while (0)
florian6bf37262012-10-21 03:23:36 +0000850 const HChar* how = "no error";
sewardjb4112022007-11-09 22:49:28 +0000851 Thread* thr;
852 WordSetID wsA, wsW;
sewardj250ec2e2008-02-15 22:02:30 +0000853 UWord* ls_words;
florian6bf37262012-10-21 03:23:36 +0000854 UWord ls_size, i;
sewardjb4112022007-11-09 22:49:28 +0000855 Lock* lk;
sewardjb4112022007-11-09 22:49:28 +0000856 for (thr = admin_threads; thr; thr = thr->admin) {
sewardjf98e1c02008-10-25 16:22:41 +0000857 if (!HG_(is_sane_Thread)(thr)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000858 wsA = thr->locksetA;
859 wsW = thr->locksetW;
860 // locks held in W mode are a subset of all locks held
861 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
862 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
863 for (i = 0; i < ls_size; i++) {
864 lk = (Lock*)ls_words[i];
865 // Thread.lockset: each element is really a valid Lock
sewardjf98e1c02008-10-25 16:22:41 +0000866 if (!HG_(is_sane_LockN)(lk)) BAD("2");
sewardjb4112022007-11-09 22:49:28 +0000867 // Thread.lockset: each Lock in set is actually held by that
868 // thread
869 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000870 }
871 }
872 return;
873 bad:
874 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
875 tl_assert(0);
876#undef BAD
877}
878
879
880/* Sanity check Locks, as far as possible */
881__attribute__((noinline))
florian6bf37262012-10-21 03:23:36 +0000882static void locks__sanity_check ( const HChar* who )
sewardjb4112022007-11-09 22:49:28 +0000883{
884#define BAD(_str) do { how = (_str); goto bad; } while (0)
florian6bf37262012-10-21 03:23:36 +0000885 const HChar* how = "no error";
sewardjb4112022007-11-09 22:49:28 +0000886 Addr gla;
887 Lock* lk;
888 Int i;
889 // # entries in admin_locks == # entries in map_locks
sewardj1d7c3322011-02-28 09:22:51 +0000890 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next)
sewardjb4112022007-11-09 22:49:28 +0000891 ;
sewardj896f6f92008-08-19 08:38:52 +0000892 if (i != VG_(sizeFM)(map_locks)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000893 // for each entry (gla, lk) in map_locks
894 // gla == lk->guest_addr
sewardj896f6f92008-08-19 08:38:52 +0000895 VG_(initIterFM)( map_locks );
896 while (VG_(nextIterFM)( map_locks,
florian6bf37262012-10-21 03:23:36 +0000897 (UWord*)&gla, (UWord*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000898 if (lk->guestaddr != gla) BAD("2");
899 }
sewardj896f6f92008-08-19 08:38:52 +0000900 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000901 // scan through admin_locks ...
sewardj1d7c3322011-02-28 09:22:51 +0000902 for (lk = admin_locks; lk; lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000903 // lock is sane. Quite comprehensive, also checks that
904 // referenced (holder) threads are sane.
sewardjf98e1c02008-10-25 16:22:41 +0000905 if (!HG_(is_sane_LockN)(lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000906 // map_locks binds guest address back to this lock
907 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
sewardjb4112022007-11-09 22:49:28 +0000908 // look at all threads mentioned as holders of this lock. Ensure
909 // this lock is mentioned in their locksets.
910 if (lk->heldBy) {
911 Thread* thr;
florian6bf37262012-10-21 03:23:36 +0000912 UWord count;
sewardj896f6f92008-08-19 08:38:52 +0000913 VG_(initIterBag)( lk->heldBy );
914 while (VG_(nextIterBag)( lk->heldBy,
florian6bf37262012-10-21 03:23:36 +0000915 (UWord*)&thr, &count )) {
sewardjf98e1c02008-10-25 16:22:41 +0000916 // HG_(is_sane_LockN) above ensures these
sewardjb4112022007-11-09 22:49:28 +0000917 tl_assert(count >= 1);
sewardjf98e1c02008-10-25 16:22:41 +0000918 tl_assert(HG_(is_sane_Thread)(thr));
florian6bf37262012-10-21 03:23:36 +0000919 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (UWord)lk))
sewardjb4112022007-11-09 22:49:28 +0000920 BAD("6");
921 // also check the w-only lockset
922 if (lk->heldW
florian6bf37262012-10-21 03:23:36 +0000923 && !HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
sewardjb4112022007-11-09 22:49:28 +0000924 BAD("7");
925 if ((!lk->heldW)
florian6bf37262012-10-21 03:23:36 +0000926 && HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
sewardjb4112022007-11-09 22:49:28 +0000927 BAD("8");
928 }
sewardj896f6f92008-08-19 08:38:52 +0000929 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000930 } else {
931 /* lock not held by anybody */
932 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
933 // since lk is unheld, then (no lockset contains lk)
934 // hmm, this is really too expensive to check. Hmm.
935 }
sewardjb4112022007-11-09 22:49:28 +0000936 }
937
938 return;
939 bad:
940 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
941 tl_assert(0);
942#undef BAD
943}
944
945
florian6bf37262012-10-21 03:23:36 +0000946static void all_except_Locks__sanity_check ( const HChar* who ) {
sewardjb4112022007-11-09 22:49:28 +0000947 stats__sanity_checks++;
948 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
949 threads__sanity_check(who);
sewardjc1fb9d22011-02-28 09:03:44 +0000950 if (HG_(clo_track_lockorders))
951 laog__sanity_check(who);
sewardjb4112022007-11-09 22:49:28 +0000952}
florian6bf37262012-10-21 03:23:36 +0000953static void all__sanity_check ( const HChar* who ) {
sewardjb4112022007-11-09 22:49:28 +0000954 all_except_Locks__sanity_check(who);
955 locks__sanity_check(who);
956}
957
958
959/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +0000960/*--- Shadow value and address range handlers ---*/
961/*----------------------------------------------------------------*/
962
963static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +0000964//static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000965static inline Thread* get_current_Thread ( void ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +0000966__attribute__((noinline))
967static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000968
sewardjb4112022007-11-09 22:49:28 +0000969
970/* Block-copy states (needed for implementing realloc()). */
sewardj23f12002009-07-24 08:45:08 +0000971/* FIXME this copies shadow memory; it doesn't apply the MSM to it.
972 Is that a problem? (hence 'scopy' rather than 'ccopy') */
973static void shadow_mem_scopy_range ( Thread* thr,
974 Addr src, Addr dst, SizeT len )
sewardjf98e1c02008-10-25 16:22:41 +0000975{
976 Thr* hbthr = thr->hbthr;
977 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +0000978 libhb_copy_shadow_state( hbthr, src, dst, len );
sewardjb4112022007-11-09 22:49:28 +0000979}
980
sewardj23f12002009-07-24 08:45:08 +0000981static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
982{
sewardjf98e1c02008-10-25 16:22:41 +0000983 Thr* hbthr = thr->hbthr;
984 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +0000985 LIBHB_CREAD_N(hbthr, a, len);
986}
987
988static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
989 Thr* hbthr = thr->hbthr;
990 tl_assert(hbthr);
991 LIBHB_CWRITE_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +0000992}
993
994static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
995{
sewardj23f12002009-07-24 08:45:08 +0000996 libhb_srange_new( thr->hbthr, a, len );
sewardjb4112022007-11-09 22:49:28 +0000997}
998
sewardjfd35d492011-03-17 19:39:55 +0000999static void shadow_mem_make_NoAccess_NoFX ( Thread* thr, Addr aIN, SizeT len )
sewardjb4112022007-11-09 22:49:28 +00001000{
sewardjb4112022007-11-09 22:49:28 +00001001 if (0 && len > 500)
sewardjfd35d492011-03-17 19:39:55 +00001002 VG_(printf)("make NoAccess_NoFX ( %#lx, %ld )\n", aIN, len );
1003 // has no effect (NoFX)
1004 libhb_srange_noaccess_NoFX( thr->hbthr, aIN, len );
1005}
1006
1007static void shadow_mem_make_NoAccess_AHAE ( Thread* thr, Addr aIN, SizeT len )
1008{
1009 if (0 && len > 500)
1010 VG_(printf)("make NoAccess_AHAE ( %#lx, %ld )\n", aIN, len );
1011 // Actually Has An Effect (AHAE)
1012 libhb_srange_noaccess_AHAE( thr->hbthr, aIN, len );
sewardjb4112022007-11-09 22:49:28 +00001013}
1014
sewardj406bac82010-03-03 23:03:40 +00001015static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN, SizeT len )
1016{
1017 if (0 && len > 500)
1018 VG_(printf)("make Untracked ( %#lx, %ld )\n", aIN, len );
1019 libhb_srange_untrack( thr->hbthr, aIN, len );
1020}
1021
sewardjb4112022007-11-09 22:49:28 +00001022
1023/*----------------------------------------------------------------*/
1024/*--- Event handlers (evh__* functions) ---*/
1025/*--- plus helpers (evhH__* functions) ---*/
1026/*----------------------------------------------------------------*/
1027
1028/*--------- Event handler helpers (evhH__* functions) ---------*/
1029
1030/* Create a new segment for 'thr', making it depend (.prev) on its
1031 existing segment, bind together the SegmentID and Segment, and
1032 return both of them. Also update 'thr' so it references the new
1033 Segment. */
sewardjf98e1c02008-10-25 16:22:41 +00001034//zz static
1035//zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
1036//zz /*OUT*/Segment** new_segP,
1037//zz Thread* thr )
1038//zz {
1039//zz Segment* cur_seg;
1040//zz tl_assert(new_segP);
1041//zz tl_assert(new_segidP);
1042//zz tl_assert(HG_(is_sane_Thread)(thr));
1043//zz cur_seg = map_segments_lookup( thr->csegid );
1044//zz tl_assert(cur_seg);
1045//zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1046//zz at their owner thread. */
1047//zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1048//zz *new_segidP = alloc_SegmentID();
1049//zz map_segments_add( *new_segidP, *new_segP );
1050//zz thr->csegid = *new_segidP;
1051//zz }
sewardjb4112022007-11-09 22:49:28 +00001052
1053
1054/* The lock at 'lock_ga' has acquired a writer. Make all necessary
1055 updates, and also do all possible error checks. */
1056static
1057void evhH__post_thread_w_acquires_lock ( Thread* thr,
1058 LockKind lkk, Addr lock_ga )
1059{
1060 Lock* lk;
1061
1062 /* Basically what we need to do is call lockN_acquire_writer.
1063 However, that will barf if any 'invalid' lock states would
1064 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001065 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001066 routine.
1067
1068 Because this routine is only called after successful lock
1069 acquisition, we should not be asked to move the lock into any
1070 invalid states. Requests to do so are bugs in libpthread, since
1071 that should have rejected any such requests. */
1072
sewardjf98e1c02008-10-25 16:22:41 +00001073 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001074 /* Try to find the lock. If we can't, then create a new one with
1075 kind 'lkk'. */
1076 lk = map_locks_lookup_or_create(
1077 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001078 tl_assert( HG_(is_sane_LockN)(lk) );
1079
1080 /* check libhb level entities exist */
1081 tl_assert(thr->hbthr);
1082 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001083
1084 if (lk->heldBy == NULL) {
1085 /* the lock isn't held. Simple. */
1086 tl_assert(!lk->heldW);
1087 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001088 /* acquire a dependency from the lock's VCs */
1089 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001090 goto noerror;
1091 }
1092
1093 /* So the lock is already held. If held as a r-lock then
1094 libpthread must be buggy. */
1095 tl_assert(lk->heldBy);
1096 if (!lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001097 HG_(record_error_Misc)(
1098 thr, "Bug in libpthread: write lock "
1099 "granted on rwlock which is currently rd-held");
sewardjb4112022007-11-09 22:49:28 +00001100 goto error;
1101 }
1102
1103 /* So the lock is held in w-mode. If it's held by some other
1104 thread, then libpthread must be buggy. */
sewardj896f6f92008-08-19 08:38:52 +00001105 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
sewardjb4112022007-11-09 22:49:28 +00001106
sewardj896f6f92008-08-19 08:38:52 +00001107 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
sewardjf98e1c02008-10-25 16:22:41 +00001108 HG_(record_error_Misc)(
1109 thr, "Bug in libpthread: write lock "
1110 "granted on mutex/rwlock which is currently "
1111 "wr-held by a different thread");
sewardjb4112022007-11-09 22:49:28 +00001112 goto error;
1113 }
1114
1115 /* So the lock is already held in w-mode by 'thr'. That means this
1116 is an attempt to lock it recursively, which is only allowable
1117 for LK_mbRec kinded locks. Since this routine is called only
1118 once the lock has been acquired, this must also be a libpthread
1119 bug. */
1120 if (lk->kind != LK_mbRec) {
sewardjf98e1c02008-10-25 16:22:41 +00001121 HG_(record_error_Misc)(
1122 thr, "Bug in libpthread: recursive write lock "
1123 "granted on mutex/wrlock which does not "
1124 "support recursion");
sewardjb4112022007-11-09 22:49:28 +00001125 goto error;
1126 }
1127
1128 /* So we are recursively re-locking a lock we already w-hold. */
1129 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001130 /* acquire a dependency from the lock's VC. Probably pointless,
1131 but also harmless. */
1132 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001133 goto noerror;
1134
1135 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001136 if (HG_(clo_track_lockorders)) {
1137 /* check lock order acquisition graph, and update. This has to
1138 happen before the lock is added to the thread's locksetA/W. */
1139 laog__pre_thread_acquires_lock( thr, lk );
1140 }
sewardjb4112022007-11-09 22:49:28 +00001141 /* update the thread's held-locks set */
florian6bf37262012-10-21 03:23:36 +00001142 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
1143 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +00001144 /* fall through */
1145
1146 error:
sewardjf98e1c02008-10-25 16:22:41 +00001147 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001148}
1149
1150
1151/* The lock at 'lock_ga' has acquired a reader. Make all necessary
1152 updates, and also do all possible error checks. */
1153static
1154void evhH__post_thread_r_acquires_lock ( Thread* thr,
1155 LockKind lkk, Addr lock_ga )
1156{
1157 Lock* lk;
1158
1159 /* Basically what we need to do is call lockN_acquire_reader.
1160 However, that will barf if any 'invalid' lock states would
1161 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001162 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001163 routine.
1164
1165 Because this routine is only called after successful lock
1166 acquisition, we should not be asked to move the lock into any
1167 invalid states. Requests to do so are bugs in libpthread, since
1168 that should have rejected any such requests. */
1169
sewardjf98e1c02008-10-25 16:22:41 +00001170 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001171 /* Try to find the lock. If we can't, then create a new one with
1172 kind 'lkk'. Only a reader-writer lock can be read-locked,
1173 hence the first assertion. */
1174 tl_assert(lkk == LK_rdwr);
1175 lk = map_locks_lookup_or_create(
1176 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001177 tl_assert( HG_(is_sane_LockN)(lk) );
1178
1179 /* check libhb level entities exist */
1180 tl_assert(thr->hbthr);
1181 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001182
1183 if (lk->heldBy == NULL) {
1184 /* the lock isn't held. Simple. */
1185 tl_assert(!lk->heldW);
1186 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001187 /* acquire a dependency from the lock's VC */
1188 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001189 goto noerror;
1190 }
1191
1192 /* So the lock is already held. If held as a w-lock then
1193 libpthread must be buggy. */
1194 tl_assert(lk->heldBy);
1195 if (lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001196 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1197 "granted on rwlock which is "
1198 "currently wr-held");
sewardjb4112022007-11-09 22:49:28 +00001199 goto error;
1200 }
1201
1202 /* Easy enough. In short anybody can get a read-lock on a rwlock
1203 provided it is either unlocked or already in rd-held. */
1204 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001205 /* acquire a dependency from the lock's VC. Probably pointless,
1206 but also harmless. */
1207 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001208 goto noerror;
1209
1210 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001211 if (HG_(clo_track_lockorders)) {
1212 /* check lock order acquisition graph, and update. This has to
1213 happen before the lock is added to the thread's locksetA/W. */
1214 laog__pre_thread_acquires_lock( thr, lk );
1215 }
sewardjb4112022007-11-09 22:49:28 +00001216 /* update the thread's held-locks set */
florian6bf37262012-10-21 03:23:36 +00001217 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +00001218 /* but don't update thr->locksetW, since lk is only rd-held */
1219 /* fall through */
1220
1221 error:
sewardjf98e1c02008-10-25 16:22:41 +00001222 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001223}
1224
1225
1226/* The lock at 'lock_ga' is just about to be unlocked. Make all
1227 necessary updates, and also do all possible error checks. */
1228static
1229void evhH__pre_thread_releases_lock ( Thread* thr,
1230 Addr lock_ga, Bool isRDWR )
1231{
1232 Lock* lock;
1233 Word n;
sewardjf98e1c02008-10-25 16:22:41 +00001234 Bool was_heldW;
sewardjb4112022007-11-09 22:49:28 +00001235
1236 /* This routine is called prior to a lock release, before
1237 libpthread has had a chance to validate the call. Hence we need
1238 to detect and reject any attempts to move the lock into an
1239 invalid state. Such attempts are bugs in the client.
1240
1241 isRDWR is True if we know from the wrapper context that lock_ga
1242 should refer to a reader-writer lock, and is False if [ditto]
1243 lock_ga should refer to a standard mutex. */
1244
sewardjf98e1c02008-10-25 16:22:41 +00001245 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001246 lock = map_locks_maybe_lookup( lock_ga );
1247
1248 if (!lock) {
1249 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1250 the client is trying to unlock it. So complain, then ignore
1251 the attempt. */
sewardjf98e1c02008-10-25 16:22:41 +00001252 HG_(record_error_UnlockBogus)( thr, lock_ga );
sewardjb4112022007-11-09 22:49:28 +00001253 return;
1254 }
1255
1256 tl_assert(lock->guestaddr == lock_ga);
sewardjf98e1c02008-10-25 16:22:41 +00001257 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001258
1259 if (isRDWR && lock->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001260 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1261 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001262 }
1263 if ((!isRDWR) && lock->kind == LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001264 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1265 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001266 }
1267
1268 if (!lock->heldBy) {
1269 /* The lock is not held. This indicates a serious bug in the
1270 client. */
1271 tl_assert(!lock->heldW);
sewardjf98e1c02008-10-25 16:22:41 +00001272 HG_(record_error_UnlockUnlocked)( thr, lock );
florian6bf37262012-10-21 03:23:36 +00001273 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1274 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001275 goto error;
1276 }
1277
sewardjf98e1c02008-10-25 16:22:41 +00001278 /* test just above dominates */
1279 tl_assert(lock->heldBy);
1280 was_heldW = lock->heldW;
1281
sewardjb4112022007-11-09 22:49:28 +00001282 /* The lock is held. Is this thread one of the holders? If not,
1283 report a bug in the client. */
florian6bf37262012-10-21 03:23:36 +00001284 n = VG_(elemBag)( lock->heldBy, (UWord)thr );
sewardjb4112022007-11-09 22:49:28 +00001285 tl_assert(n >= 0);
1286 if (n == 0) {
1287 /* We are not a current holder of the lock. This is a bug in
1288 the guest, and (per POSIX pthread rules) the unlock
1289 attempt will fail. So just complain and do nothing
1290 else. */
sewardj896f6f92008-08-19 08:38:52 +00001291 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001292 tl_assert(HG_(is_sane_Thread)(realOwner));
sewardjb4112022007-11-09 22:49:28 +00001293 tl_assert(realOwner != thr);
florian6bf37262012-10-21 03:23:36 +00001294 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1295 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjf98e1c02008-10-25 16:22:41 +00001296 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
sewardjb4112022007-11-09 22:49:28 +00001297 goto error;
1298 }
1299
1300 /* Ok, we hold the lock 'n' times. */
1301 tl_assert(n >= 1);
1302
1303 lockN_release( lock, thr );
1304
1305 n--;
1306 tl_assert(n >= 0);
1307
1308 if (n > 0) {
1309 tl_assert(lock->heldBy);
florian6bf37262012-10-21 03:23:36 +00001310 tl_assert(n == VG_(elemBag)( lock->heldBy, (UWord)thr ));
sewardjb4112022007-11-09 22:49:28 +00001311 /* We still hold the lock. So either it's a recursive lock
1312 or a rwlock which is currently r-held. */
1313 tl_assert(lock->kind == LK_mbRec
1314 || (lock->kind == LK_rdwr && !lock->heldW));
florian6bf37262012-10-21 03:23:36 +00001315 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001316 if (lock->heldW)
florian6bf37262012-10-21 03:23:36 +00001317 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001318 else
florian6bf37262012-10-21 03:23:36 +00001319 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001320 } else {
sewardj983f3022009-05-21 14:49:55 +00001321 /* n is zero. This means we don't hold the lock any more. But
1322 if it's a rwlock held in r-mode, someone else could still
1323 hold it. Just do whatever sanity checks we can. */
1324 if (lock->kind == LK_rdwr && lock->heldBy) {
1325 /* It's a rwlock. We no longer hold it but we used to;
1326 nevertheless it still appears to be held by someone else.
1327 The implication is that, prior to this release, it must
1328 have been shared by us and and whoever else is holding it;
1329 which in turn implies it must be r-held, since a lock
1330 can't be w-held by more than one thread. */
1331 /* The lock is now R-held by somebody else: */
1332 tl_assert(lock->heldW == False);
1333 } else {
1334 /* Normal case. It's either not a rwlock, or it's a rwlock
1335 that we used to hold in w-mode (which is pretty much the
1336 same thing as a non-rwlock.) Since this transaction is
1337 atomic (V does not allow multiple threads to run
1338 simultaneously), it must mean the lock is now not held by
1339 anybody. Hence assert for it. */
1340 /* The lock is now not held by anybody: */
1341 tl_assert(!lock->heldBy);
1342 tl_assert(lock->heldW == False);
1343 }
sewardjf98e1c02008-10-25 16:22:41 +00001344 //if (lock->heldBy) {
florian6bf37262012-10-21 03:23:36 +00001345 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (UWord)thr ));
sewardjf98e1c02008-10-25 16:22:41 +00001346 //}
sewardjb4112022007-11-09 22:49:28 +00001347 /* update this thread's lockset accordingly. */
1348 thr->locksetA
florian6bf37262012-10-21 03:23:36 +00001349 = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lock );
sewardjb4112022007-11-09 22:49:28 +00001350 thr->locksetW
florian6bf37262012-10-21 03:23:36 +00001351 = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lock );
sewardjf98e1c02008-10-25 16:22:41 +00001352 /* push our VC into the lock */
1353 tl_assert(thr->hbthr);
1354 tl_assert(lock->hbso);
1355 /* If the lock was previously W-held, then we want to do a
1356 strong send, and if previously R-held, then a weak send. */
1357 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
sewardjb4112022007-11-09 22:49:28 +00001358 }
1359 /* fall through */
1360
1361 error:
sewardjf98e1c02008-10-25 16:22:41 +00001362 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001363}
1364
1365
sewardj9f569b72008-11-13 13:33:09 +00001366/* ---------------------------------------------------------- */
1367/* -------- Event handlers proper (evh__* functions) -------- */
1368/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001369
1370/* What is the Thread* for the currently running thread? This is
1371 absolutely performance critical. We receive notifications from the
1372 core for client code starts/stops, and cache the looked-up result
1373 in 'current_Thread'. Hence, for the vast majority of requests,
1374 finding the current thread reduces to a read of a global variable,
1375 provided get_current_Thread_in_C_C is inlined.
1376
1377 Outside of client code, current_Thread is NULL, and presumably
1378 any uses of it will cause a segfault. Hence:
1379
1380 - for uses definitely within client code, use
1381 get_current_Thread_in_C_C.
1382
1383 - for all other uses, use get_current_Thread.
1384*/
1385
sewardj23f12002009-07-24 08:45:08 +00001386static Thread *current_Thread = NULL,
1387 *current_Thread_prev = NULL;
sewardjb4112022007-11-09 22:49:28 +00001388
1389static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1390 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1391 tl_assert(current_Thread == NULL);
1392 current_Thread = map_threads_lookup( tid );
1393 tl_assert(current_Thread != NULL);
sewardj23f12002009-07-24 08:45:08 +00001394 if (current_Thread != current_Thread_prev) {
1395 libhb_Thr_resumes( current_Thread->hbthr );
1396 current_Thread_prev = current_Thread;
1397 }
sewardjb4112022007-11-09 22:49:28 +00001398}
1399static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1400 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1401 tl_assert(current_Thread != NULL);
1402 current_Thread = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00001403 libhb_maybe_GC();
sewardjb4112022007-11-09 22:49:28 +00001404}
1405static inline Thread* get_current_Thread_in_C_C ( void ) {
1406 return current_Thread;
1407}
1408static inline Thread* get_current_Thread ( void ) {
1409 ThreadId coretid;
1410 Thread* thr;
1411 thr = get_current_Thread_in_C_C();
1412 if (LIKELY(thr))
1413 return thr;
1414 /* evidently not in client code. Do it the slow way. */
1415 coretid = VG_(get_running_tid)();
1416 /* FIXME: get rid of the following kludge. It exists because
sewardjf98e1c02008-10-25 16:22:41 +00001417 evh__new_mem is called during initialisation (as notification
sewardjb4112022007-11-09 22:49:28 +00001418 of initial memory layout) and VG_(get_running_tid)() returns
1419 VG_INVALID_THREADID at that point. */
1420 if (coretid == VG_INVALID_THREADID)
1421 coretid = 1; /* KLUDGE */
1422 thr = map_threads_lookup( coretid );
1423 return thr;
1424}
1425
1426static
1427void evh__new_mem ( Addr a, SizeT len ) {
1428 if (SHOW_EVENTS >= 2)
1429 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1430 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001431 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001432 all__sanity_check("evh__new_mem-post");
1433}
1434
1435static
sewardj1f77fec2010-04-12 19:51:04 +00001436void evh__new_mem_stack ( Addr a, SizeT len ) {
1437 if (SHOW_EVENTS >= 2)
1438 VG_(printf)("evh__new_mem_stack(%p, %lu)\n", (void*)a, len );
1439 shadow_mem_make_New( get_current_Thread(),
1440 -VG_STACK_REDZONE_SZB + a, len );
1441 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1442 all__sanity_check("evh__new_mem_stack-post");
1443}
1444
1445static
sewardj7cf4e6b2008-05-01 20:24:26 +00001446void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1447 if (SHOW_EVENTS >= 2)
1448 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1449 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001450 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardj7cf4e6b2008-05-01 20:24:26 +00001451 all__sanity_check("evh__new_mem_w_tid-post");
1452}
1453
1454static
sewardjb4112022007-11-09 22:49:28 +00001455void evh__new_mem_w_perms ( Addr a, SizeT len,
sewardj9c606bd2008-09-18 18:12:50 +00001456 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
sewardjb4112022007-11-09 22:49:28 +00001457 if (SHOW_EVENTS >= 1)
1458 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1459 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1460 if (rr || ww || xx)
1461 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001462 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001463 all__sanity_check("evh__new_mem_w_perms-post");
1464}
1465
1466static
1467void evh__set_perms ( Addr a, SizeT len,
1468 Bool rr, Bool ww, Bool xx ) {
sewardjfd35d492011-03-17 19:39:55 +00001469 // This handles mprotect requests. If the memory is being put
1470 // into no-R no-W state, paint it as NoAccess, for the reasons
1471 // documented at evh__die_mem_munmap().
sewardjb4112022007-11-09 22:49:28 +00001472 if (SHOW_EVENTS >= 1)
sewardjfd35d492011-03-17 19:39:55 +00001473 VG_(printf)("evh__set_perms(%p, %lu, r=%d w=%d x=%d)\n",
sewardjb4112022007-11-09 22:49:28 +00001474 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1475 /* Hmm. What should we do here, that actually makes any sense?
1476 Let's say: if neither readable nor writable, then declare it
1477 NoAccess, else leave it alone. */
1478 if (!(rr || ww))
sewardjfd35d492011-03-17 19:39:55 +00001479 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001480 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001481 all__sanity_check("evh__set_perms-post");
1482}
1483
1484static
1485void evh__die_mem ( Addr a, SizeT len ) {
sewardjfd35d492011-03-17 19:39:55 +00001486 // Urr, libhb ignores this.
sewardjb4112022007-11-09 22:49:28 +00001487 if (SHOW_EVENTS >= 2)
1488 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
sewardjfd35d492011-03-17 19:39:55 +00001489 shadow_mem_make_NoAccess_NoFX( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001490 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001491 all__sanity_check("evh__die_mem-post");
1492}
1493
1494static
sewardjfd35d492011-03-17 19:39:55 +00001495void evh__die_mem_munmap ( Addr a, SizeT len ) {
1496 // It's important that libhb doesn't ignore this. If, as is likely,
1497 // the client is subject to address space layout randomization,
1498 // then unmapped areas may never get remapped over, even in long
1499 // runs. If we just ignore them we wind up with large resource
1500 // (VTS) leaks in libhb. So force them to NoAccess, so that all
1501 // VTS references in the affected area are dropped. Marking memory
1502 // as NoAccess is expensive, but we assume that munmap is sufficiently
1503 // rare that the space gains of doing this are worth the costs.
1504 if (SHOW_EVENTS >= 2)
1505 VG_(printf)("evh__die_mem_munmap(%p, %lu)\n", (void*)a, len );
1506 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
1507}
1508
1509static
sewardj406bac82010-03-03 23:03:40 +00001510void evh__untrack_mem ( Addr a, SizeT len ) {
sewardjfd35d492011-03-17 19:39:55 +00001511 // Libhb doesn't ignore this.
sewardj406bac82010-03-03 23:03:40 +00001512 if (SHOW_EVENTS >= 2)
1513 VG_(printf)("evh__untrack_mem(%p, %lu)\n", (void*)a, len );
1514 shadow_mem_make_Untracked( get_current_Thread(), a, len );
1515 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1516 all__sanity_check("evh__untrack_mem-post");
1517}
1518
1519static
sewardj23f12002009-07-24 08:45:08 +00001520void evh__copy_mem ( Addr src, Addr dst, SizeT len ) {
1521 if (SHOW_EVENTS >= 2)
1522 VG_(printf)("evh__copy_mem(%p, %p, %lu)\n", (void*)src, (void*)dst, len );
1523 shadow_mem_scopy_range( get_current_Thread(), src, dst, len );
1524 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1525 all__sanity_check("evh__copy_mem-post");
1526}
1527
1528static
sewardjb4112022007-11-09 22:49:28 +00001529void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1530{
1531 if (SHOW_EVENTS >= 1)
1532 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1533 (Int)parent, (Int)child );
1534
1535 if (parent != VG_INVALID_THREADID) {
sewardjf98e1c02008-10-25 16:22:41 +00001536 Thread* thr_p;
1537 Thread* thr_c;
1538 Thr* hbthr_p;
1539 Thr* hbthr_c;
sewardjb4112022007-11-09 22:49:28 +00001540
sewardjf98e1c02008-10-25 16:22:41 +00001541 tl_assert(HG_(is_sane_ThreadId)(parent));
1542 tl_assert(HG_(is_sane_ThreadId)(child));
sewardjb4112022007-11-09 22:49:28 +00001543 tl_assert(parent != child);
1544
1545 thr_p = map_threads_maybe_lookup( parent );
1546 thr_c = map_threads_maybe_lookup( child );
1547
1548 tl_assert(thr_p != NULL);
1549 tl_assert(thr_c == NULL);
1550
sewardjf98e1c02008-10-25 16:22:41 +00001551 hbthr_p = thr_p->hbthr;
1552 tl_assert(hbthr_p != NULL);
sewardj60626642011-03-10 15:14:37 +00001553 tl_assert( libhb_get_Thr_hgthread(hbthr_p) == thr_p );
sewardjb4112022007-11-09 22:49:28 +00001554
sewardjf98e1c02008-10-25 16:22:41 +00001555 hbthr_c = libhb_create ( hbthr_p );
1556
1557 /* Create a new thread record for the child. */
sewardjb4112022007-11-09 22:49:28 +00001558 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +00001559 thr_c = mk_Thread( hbthr_c );
sewardj60626642011-03-10 15:14:37 +00001560 tl_assert( libhb_get_Thr_hgthread(hbthr_c) == NULL );
1561 libhb_set_Thr_hgthread(hbthr_c, thr_c);
sewardjb4112022007-11-09 22:49:28 +00001562
1563 /* and bind it in the thread-map table */
1564 map_threads[child] = thr_c;
sewardjf98e1c02008-10-25 16:22:41 +00001565 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1566 thr_c->coretid = child;
sewardjb4112022007-11-09 22:49:28 +00001567
1568 /* Record where the parent is so we can later refer to this in
1569 error messages.
1570
1571 On amd64-linux, this entails a nasty glibc-2.5 specific hack.
1572 The stack snapshot is taken immediately after the parent has
1573 returned from its sys_clone call. Unfortunately there is no
1574 unwind info for the insn following "syscall" - reading the
1575 glibc sources confirms this. So we ask for a snapshot to be
1576 taken as if RIP was 3 bytes earlier, in a place where there
1577 is unwind info. Sigh.
1578 */
1579 { Word first_ip_delta = 0;
1580# if defined(VGP_amd64_linux)
1581 first_ip_delta = -3;
1582# endif
1583 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1584 }
sewardjb4112022007-11-09 22:49:28 +00001585 }
1586
sewardjf98e1c02008-10-25 16:22:41 +00001587 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001588 all__sanity_check("evh__pre_thread_create-post");
1589}
1590
1591static
1592void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1593{
1594 Int nHeld;
1595 Thread* thr_q;
1596 if (SHOW_EVENTS >= 1)
1597 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1598 (Int)quit_tid );
1599
1600 /* quit_tid has disappeared without joining to any other thread.
1601 Therefore there is no synchronisation event associated with its
1602 exit and so we have to pretty much treat it as if it was still
1603 alive but mysteriously making no progress. That is because, if
1604 we don't know when it really exited, then we can never say there
1605 is a point in time when we're sure the thread really has
1606 finished, and so we need to consider the possibility that it
1607 lingers indefinitely and continues to interact with other
1608 threads. */
1609 /* However, it might have rendezvous'd with a thread that called
1610 pthread_join with this one as arg, prior to this point (that's
1611 how NPTL works). In which case there has already been a prior
1612 sync event. So in any case, just let the thread exit. On NPTL,
1613 all thread exits go through here. */
sewardjf98e1c02008-10-25 16:22:41 +00001614 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
sewardjb4112022007-11-09 22:49:28 +00001615 thr_q = map_threads_maybe_lookup( quit_tid );
1616 tl_assert(thr_q != NULL);
1617
1618 /* Complain if this thread holds any locks. */
1619 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1620 tl_assert(nHeld >= 0);
1621 if (nHeld > 0) {
1622 HChar buf[80];
1623 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1624 nHeld, nHeld > 1 ? "s" : "");
sewardjf98e1c02008-10-25 16:22:41 +00001625 HG_(record_error_Misc)( thr_q, buf );
sewardjb4112022007-11-09 22:49:28 +00001626 }
1627
sewardj23f12002009-07-24 08:45:08 +00001628 /* Not much to do here:
1629 - tell libhb the thread is gone
1630 - clear the map_threads entry, in order that the Valgrind core
1631 can re-use it. */
sewardj61bc2c52011-02-09 10:34:00 +00001632 /* Cleanup actions (next 5 lines) copied in evh__atfork_child; keep
1633 in sync. */
sewardj23f12002009-07-24 08:45:08 +00001634 tl_assert(thr_q->hbthr);
1635 libhb_async_exit(thr_q->hbthr);
sewardjf98e1c02008-10-25 16:22:41 +00001636 tl_assert(thr_q->coretid == quit_tid);
1637 thr_q->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +00001638 map_threads_delete( quit_tid );
1639
sewardjf98e1c02008-10-25 16:22:41 +00001640 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001641 all__sanity_check("evh__pre_thread_ll_exit-post");
1642}
1643
sewardj61bc2c52011-02-09 10:34:00 +00001644/* This is called immediately after fork, for the child only. 'tid'
1645 is the only surviving thread (as per POSIX rules on fork() in
1646 threaded programs), so we have to clean up map_threads to remove
1647 entries for any other threads. */
1648static
1649void evh__atfork_child ( ThreadId tid )
1650{
1651 UInt i;
1652 Thread* thr;
1653 /* Slot 0 should never be used. */
1654 thr = map_threads_maybe_lookup( 0/*INVALID*/ );
1655 tl_assert(!thr);
1656 /* Clean up all other slots except 'tid'. */
1657 for (i = 1; i < VG_N_THREADS; i++) {
1658 if (i == tid)
1659 continue;
1660 thr = map_threads_maybe_lookup(i);
1661 if (!thr)
1662 continue;
1663 /* Cleanup actions (next 5 lines) copied from end of
1664 evh__pre_thread_ll_exit; keep in sync. */
1665 tl_assert(thr->hbthr);
1666 libhb_async_exit(thr->hbthr);
1667 tl_assert(thr->coretid == i);
1668 thr->coretid = VG_INVALID_THREADID;
1669 map_threads_delete(i);
1670 }
1671}
1672
sewardjf98e1c02008-10-25 16:22:41 +00001673
sewardjb4112022007-11-09 22:49:28 +00001674static
1675void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1676{
sewardjb4112022007-11-09 22:49:28 +00001677 Thread* thr_s;
1678 Thread* thr_q;
sewardjf98e1c02008-10-25 16:22:41 +00001679 Thr* hbthr_s;
1680 Thr* hbthr_q;
1681 SO* so;
sewardjb4112022007-11-09 22:49:28 +00001682
1683 if (SHOW_EVENTS >= 1)
1684 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1685 (Int)stay_tid, quit_thr );
1686
sewardjf98e1c02008-10-25 16:22:41 +00001687 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
sewardjb4112022007-11-09 22:49:28 +00001688
1689 thr_s = map_threads_maybe_lookup( stay_tid );
1690 thr_q = quit_thr;
1691 tl_assert(thr_s != NULL);
1692 tl_assert(thr_q != NULL);
1693 tl_assert(thr_s != thr_q);
1694
sewardjf98e1c02008-10-25 16:22:41 +00001695 hbthr_s = thr_s->hbthr;
1696 hbthr_q = thr_q->hbthr;
1697 tl_assert(hbthr_s != hbthr_q);
sewardj60626642011-03-10 15:14:37 +00001698 tl_assert( libhb_get_Thr_hgthread(hbthr_s) == thr_s );
1699 tl_assert( libhb_get_Thr_hgthread(hbthr_q) == thr_q );
sewardjb4112022007-11-09 22:49:28 +00001700
sewardjf98e1c02008-10-25 16:22:41 +00001701 /* Allocate a temporary synchronisation object and use it to send
1702 an imaginary message from the quitter to the stayer, the purpose
1703 being to generate a dependence from the quitter to the
1704 stayer. */
1705 so = libhb_so_alloc();
1706 tl_assert(so);
sewardj23f12002009-07-24 08:45:08 +00001707 /* Send last arg of _so_send as False, since the sending thread
1708 doesn't actually exist any more, so we don't want _so_send to
1709 try taking stack snapshots of it. */
sewardjffce8152011-06-24 10:09:41 +00001710 libhb_so_send(hbthr_q, so, True/*strong_send*//*?!? wrt comment above*/);
sewardjf98e1c02008-10-25 16:22:41 +00001711 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1712 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00001713
sewardjffce8152011-06-24 10:09:41 +00001714 /* Tell libhb that the quitter has been reaped. Note that we might
1715 have to be cleverer about this, to exclude 2nd and subsequent
1716 notifications for the same hbthr_q, in the case where the app is
1717 buggy (calls pthread_join twice or more on the same thread) AND
1718 where libpthread is also buggy and doesn't return ESRCH on
1719 subsequent calls. (If libpthread isn't thusly buggy, then the
1720 wrapper for pthread_join in hg_intercepts.c will stop us getting
1721 notified here multiple times for the same joinee.) See also
1722 comments in helgrind/tests/jointwice.c. */
1723 libhb_joinedwith_done(hbthr_q);
1724
sewardjf98e1c02008-10-25 16:22:41 +00001725 /* evh__pre_thread_ll_exit issues an error message if the exiting
1726 thread holds any locks. No need to check here. */
sewardjb4112022007-11-09 22:49:28 +00001727
1728 /* This holds because, at least when using NPTL as the thread
1729 library, we should be notified the low level thread exit before
1730 we hear of any join event on it. The low level exit
1731 notification feeds through into evh__pre_thread_ll_exit,
1732 which should clear the map_threads entry for it. Hence we
1733 expect there to be no map_threads entry at this point. */
1734 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1735 == VG_INVALID_THREADID);
1736
sewardjf98e1c02008-10-25 16:22:41 +00001737 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001738 all__sanity_check("evh__post_thread_join-post");
1739}
1740
1741static
floriane543f302012-10-21 19:43:43 +00001742void evh__pre_mem_read ( CorePart part, ThreadId tid, const HChar* s,
sewardjb4112022007-11-09 22:49:28 +00001743 Addr a, SizeT size) {
1744 if (SHOW_EVENTS >= 2
1745 || (SHOW_EVENTS >= 1 && size != 1))
1746 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1747 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001748 shadow_mem_cread_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001749 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001750 all__sanity_check("evh__pre_mem_read-post");
1751}
1752
1753static
1754void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
floriane543f302012-10-21 19:43:43 +00001755 const HChar* s, Addr a ) {
sewardjb4112022007-11-09 22:49:28 +00001756 Int len;
1757 if (SHOW_EVENTS >= 1)
1758 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1759 (Int)tid, s, (void*)a );
sewardj234e5582011-02-09 12:47:23 +00001760 // Don't segfault if the string starts in an obviously stupid
1761 // place. Actually we should check the whole string, not just
1762 // the start address, but that's too much trouble. At least
1763 // checking the first byte is better than nothing. See #255009.
1764 if (!VG_(am_is_valid_for_client) (a, 1, VKI_PROT_READ))
1765 return;
florian19f91bb2012-11-10 22:29:54 +00001766 len = VG_(strlen)( (HChar*) a );
sewardj23f12002009-07-24 08:45:08 +00001767 shadow_mem_cread_range( map_threads_lookup(tid), a, len+1 );
sewardjf98e1c02008-10-25 16:22:41 +00001768 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001769 all__sanity_check("evh__pre_mem_read_asciiz-post");
1770}
1771
1772static
floriane543f302012-10-21 19:43:43 +00001773void evh__pre_mem_write ( CorePart part, ThreadId tid, const HChar* s,
sewardjb4112022007-11-09 22:49:28 +00001774 Addr a, SizeT size ) {
1775 if (SHOW_EVENTS >= 1)
1776 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1777 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001778 shadow_mem_cwrite_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001779 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001780 all__sanity_check("evh__pre_mem_write-post");
1781}
1782
1783static
1784void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1785 if (SHOW_EVENTS >= 1)
1786 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1787 (void*)a, len, (Int)is_inited );
1788 // FIXME: this is kinda stupid
1789 if (is_inited) {
1790 shadow_mem_make_New(get_current_Thread(), a, len);
1791 } else {
1792 shadow_mem_make_New(get_current_Thread(), a, len);
1793 }
sewardjf98e1c02008-10-25 16:22:41 +00001794 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001795 all__sanity_check("evh__pre_mem_read-post");
1796}
1797
1798static
1799void evh__die_mem_heap ( Addr a, SizeT len ) {
sewardj622fe492011-03-11 21:06:59 +00001800 Thread* thr;
sewardjb4112022007-11-09 22:49:28 +00001801 if (SHOW_EVENTS >= 1)
1802 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
sewardj622fe492011-03-11 21:06:59 +00001803 thr = get_current_Thread();
1804 tl_assert(thr);
1805 if (HG_(clo_free_is_write)) {
1806 /* Treat frees as if the memory was written immediately prior to
1807 the free. This shakes out more races, specifically, cases
1808 where memory is referenced by one thread, and freed by
1809 another, and there's no observable synchronisation event to
1810 guarantee that the reference happens before the free. */
1811 shadow_mem_cwrite_range(thr, a, len);
1812 }
sewardjfd35d492011-03-17 19:39:55 +00001813 shadow_mem_make_NoAccess_NoFX( thr, a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001814 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001815 all__sanity_check("evh__pre_mem_read-post");
1816}
1817
sewardj23f12002009-07-24 08:45:08 +00001818/* --- Event handlers called from generated code --- */
1819
sewardjb4112022007-11-09 22:49:28 +00001820static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001821void evh__mem_help_cread_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001822 Thread* thr = get_current_Thread_in_C_C();
1823 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001824 LIBHB_CREAD_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001825}
sewardjf98e1c02008-10-25 16:22:41 +00001826
sewardjb4112022007-11-09 22:49:28 +00001827static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001828void evh__mem_help_cread_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001829 Thread* thr = get_current_Thread_in_C_C();
1830 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001831 LIBHB_CREAD_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001832}
sewardjf98e1c02008-10-25 16:22:41 +00001833
sewardjb4112022007-11-09 22:49:28 +00001834static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001835void evh__mem_help_cread_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001836 Thread* thr = get_current_Thread_in_C_C();
1837 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001838 LIBHB_CREAD_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001839}
sewardjf98e1c02008-10-25 16:22:41 +00001840
sewardjb4112022007-11-09 22:49:28 +00001841static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001842void evh__mem_help_cread_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001843 Thread* thr = get_current_Thread_in_C_C();
1844 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001845 LIBHB_CREAD_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001846}
sewardjf98e1c02008-10-25 16:22:41 +00001847
sewardjb4112022007-11-09 22:49:28 +00001848static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001849void evh__mem_help_cread_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001850 Thread* thr = get_current_Thread_in_C_C();
1851 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001852 LIBHB_CREAD_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001853}
1854
1855static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001856void evh__mem_help_cwrite_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001857 Thread* thr = get_current_Thread_in_C_C();
1858 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001859 LIBHB_CWRITE_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001860}
sewardjf98e1c02008-10-25 16:22:41 +00001861
sewardjb4112022007-11-09 22:49:28 +00001862static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001863void evh__mem_help_cwrite_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001864 Thread* thr = get_current_Thread_in_C_C();
1865 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001866 LIBHB_CWRITE_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001867}
sewardjf98e1c02008-10-25 16:22:41 +00001868
sewardjb4112022007-11-09 22:49:28 +00001869static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001870void evh__mem_help_cwrite_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001871 Thread* thr = get_current_Thread_in_C_C();
1872 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001873 LIBHB_CWRITE_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001874}
sewardjf98e1c02008-10-25 16:22:41 +00001875
sewardjb4112022007-11-09 22:49:28 +00001876static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001877void evh__mem_help_cwrite_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001878 Thread* thr = get_current_Thread_in_C_C();
1879 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001880 LIBHB_CWRITE_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001881}
sewardjf98e1c02008-10-25 16:22:41 +00001882
sewardjb4112022007-11-09 22:49:28 +00001883static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001884void evh__mem_help_cwrite_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001885 Thread* thr = get_current_Thread_in_C_C();
1886 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001887 LIBHB_CWRITE_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001888}
1889
sewardjb4112022007-11-09 22:49:28 +00001890
sewardj9f569b72008-11-13 13:33:09 +00001891/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001892/* -------------- events to do with mutexes -------------- */
sewardj9f569b72008-11-13 13:33:09 +00001893/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001894
1895/* EXPOSITION only: by intercepting lock init events we can show the
1896 user where the lock was initialised, rather than only being able to
1897 show where it was first locked. Intercepting lock initialisations
1898 is not necessary for the basic operation of the race checker. */
1899static
1900void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
1901 void* mutex, Word mbRec )
1902{
1903 if (SHOW_EVENTS >= 1)
1904 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
1905 (Int)tid, mbRec, (void*)mutex );
1906 tl_assert(mbRec == 0 || mbRec == 1);
1907 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
1908 (Addr)mutex, tid );
sewardjf98e1c02008-10-25 16:22:41 +00001909 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001910 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
1911}
1912
1913static
sewardjc02f6c42013-10-14 13:51:25 +00001914void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex,
1915 Bool mutex_is_init )
sewardjb4112022007-11-09 22:49:28 +00001916{
1917 Thread* thr;
1918 Lock* lk;
1919 if (SHOW_EVENTS >= 1)
sewardjc02f6c42013-10-14 13:51:25 +00001920 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE"
1921 "(ctid=%d, %p, isInit=%d)\n",
1922 (Int)tid, (void*)mutex, (Int)mutex_is_init );
sewardjb4112022007-11-09 22:49:28 +00001923
1924 thr = map_threads_maybe_lookup( tid );
1925 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00001926 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00001927
1928 lk = map_locks_maybe_lookup( (Addr)mutex );
1929
sewardjc02f6c42013-10-14 13:51:25 +00001930 if (lk == NULL && mutex_is_init) {
1931 /* We're destroying a mutex which we don't have any record of,
1932 and which appears to have the value PTHREAD_MUTEX_INITIALIZER.
1933 Assume it never got used, and so we don't need to do anything
1934 more. */
1935 goto out;
1936 }
1937
sewardjb4112022007-11-09 22:49:28 +00001938 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
sewardjf98e1c02008-10-25 16:22:41 +00001939 HG_(record_error_Misc)(
1940 thr, "pthread_mutex_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00001941 }
1942
1943 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00001944 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00001945 tl_assert( lk->guestaddr == (Addr)mutex );
1946 if (lk->heldBy) {
1947 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00001948 HG_(record_error_Misc)(
1949 thr, "pthread_mutex_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00001950 /* remove lock from locksets of all owning threads */
1951 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00001952 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00001953 lk->heldBy = NULL;
1954 lk->heldW = False;
1955 lk->acquired_at = NULL;
1956 }
1957 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001958 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00001959
1960 if (HG_(clo_track_lockorders))
1961 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00001962 map_locks_delete( lk->guestaddr );
1963 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00001964 }
1965
sewardjc02f6c42013-10-14 13:51:25 +00001966 out:
sewardjf98e1c02008-10-25 16:22:41 +00001967 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001968 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
1969}
1970
1971static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
1972 void* mutex, Word isTryLock )
1973{
1974 /* Just check the mutex is sane; nothing else to do. */
1975 // 'mutex' may be invalid - not checked by wrapper
1976 Thread* thr;
1977 Lock* lk;
1978 if (SHOW_EVENTS >= 1)
1979 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
1980 (Int)tid, (void*)mutex );
1981
1982 tl_assert(isTryLock == 0 || isTryLock == 1);
1983 thr = map_threads_maybe_lookup( tid );
1984 tl_assert(thr); /* cannot fail - Thread* must already exist */
1985
1986 lk = map_locks_maybe_lookup( (Addr)mutex );
1987
1988 if (lk && (lk->kind == LK_rdwr)) {
sewardjf98e1c02008-10-25 16:22:41 +00001989 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
1990 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001991 }
1992
1993 if ( lk
1994 && isTryLock == 0
1995 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
1996 && lk->heldBy
1997 && lk->heldW
florian6bf37262012-10-21 03:23:36 +00001998 && VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0 ) {
sewardjb4112022007-11-09 22:49:28 +00001999 /* uh, it's a non-recursive lock and we already w-hold it, and
2000 this is a real lock operation (not a speculative "tryLock"
2001 kind of thing). Duh. Deadlock coming up; but at least
2002 produce an error message. */
florian6bd9dc12012-11-23 16:17:43 +00002003 const HChar* errstr = "Attempt to re-lock a "
2004 "non-recursive lock I already hold";
2005 const HChar* auxstr = "Lock was previously acquired";
sewardj8fef6252010-07-29 05:28:02 +00002006 if (lk->acquired_at) {
2007 HG_(record_error_Misc_w_aux)( thr, errstr, auxstr, lk->acquired_at );
2008 } else {
2009 HG_(record_error_Misc)( thr, errstr );
2010 }
sewardjb4112022007-11-09 22:49:28 +00002011 }
2012}
2013
2014static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
2015{
2016 // only called if the real library call succeeded - so mutex is sane
2017 Thread* thr;
2018 if (SHOW_EVENTS >= 1)
2019 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
2020 (Int)tid, (void*)mutex );
2021
2022 thr = map_threads_maybe_lookup( tid );
2023 tl_assert(thr); /* cannot fail - Thread* must already exist */
2024
2025 evhH__post_thread_w_acquires_lock(
2026 thr,
2027 LK_mbRec, /* if not known, create new lock with this LockKind */
2028 (Addr)mutex
2029 );
2030}
2031
2032static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
2033{
2034 // 'mutex' may be invalid - not checked by wrapper
2035 Thread* thr;
2036 if (SHOW_EVENTS >= 1)
2037 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
2038 (Int)tid, (void*)mutex );
2039
2040 thr = map_threads_maybe_lookup( tid );
2041 tl_assert(thr); /* cannot fail - Thread* must already exist */
2042
2043 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
2044}
2045
2046static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
2047{
2048 // only called if the real library call succeeded - so mutex is sane
2049 Thread* thr;
2050 if (SHOW_EVENTS >= 1)
2051 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
2052 (Int)tid, (void*)mutex );
2053 thr = map_threads_maybe_lookup( tid );
2054 tl_assert(thr); /* cannot fail - Thread* must already exist */
2055
2056 // anything we should do here?
2057}
2058
2059
sewardj5a644da2009-08-11 10:35:58 +00002060/* ------------------------------------------------------- */
sewardj1f77fec2010-04-12 19:51:04 +00002061/* -------------- events to do with spinlocks ------------ */
sewardj5a644da2009-08-11 10:35:58 +00002062/* ------------------------------------------------------- */
2063
2064/* All a bit of a kludge. Pretend we're really dealing with ordinary
2065 pthread_mutex_t's instead, for the most part. */
2066
2067static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid,
2068 void* slock )
2069{
2070 Thread* thr;
2071 Lock* lk;
2072 /* In glibc's kludgey world, we're either initialising or unlocking
2073 it. Since this is the pre-routine, if it is locked, unlock it
2074 and take a dependence edge. Otherwise, do nothing. */
2075
2076 if (SHOW_EVENTS >= 1)
2077 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
2078 "(ctid=%d, slock=%p)\n",
2079 (Int)tid, (void*)slock );
2080
2081 thr = map_threads_maybe_lookup( tid );
2082 /* cannot fail - Thread* must already exist */;
2083 tl_assert( HG_(is_sane_Thread)(thr) );
2084
2085 lk = map_locks_maybe_lookup( (Addr)slock );
2086 if (lk && lk->heldBy) {
2087 /* it's held. So do the normal pre-unlock actions, as copied
2088 from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE. This stupidly
2089 duplicates the map_locks_maybe_lookup. */
2090 evhH__pre_thread_releases_lock( thr, (Addr)slock,
2091 False/*!isRDWR*/ );
2092 }
2093}
2094
2095static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid,
2096 void* slock )
2097{
2098 Lock* lk;
2099 /* More kludgery. If the lock has never been seen before, do
2100 actions as per evh__HG_PTHREAD_MUTEX_INIT_POST. Else do
2101 nothing. */
2102
2103 if (SHOW_EVENTS >= 1)
2104 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
2105 "(ctid=%d, slock=%p)\n",
2106 (Int)tid, (void*)slock );
2107
2108 lk = map_locks_maybe_lookup( (Addr)slock );
2109 if (!lk) {
2110 map_locks_lookup_or_create( LK_nonRec, (Addr)slock, tid );
2111 }
2112}
2113
2114static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid,
2115 void* slock, Word isTryLock )
2116{
2117 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, slock, isTryLock );
2118}
2119
2120static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid,
2121 void* slock )
2122{
2123 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, slock );
2124}
2125
2126static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid,
2127 void* slock )
2128{
sewardjc02f6c42013-10-14 13:51:25 +00002129 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, slock, 0/*!isInit*/ );
sewardj5a644da2009-08-11 10:35:58 +00002130}
2131
2132
sewardj9f569b72008-11-13 13:33:09 +00002133/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002134/* --------------- events to do with CVs --------------- */
sewardj9f569b72008-11-13 13:33:09 +00002135/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002136
sewardj02114542009-07-28 20:52:36 +00002137/* A mapping from CV to (the SO associated with it, plus some
2138 auxiliary data for error checking). When the CV is
sewardjf98e1c02008-10-25 16:22:41 +00002139 signalled/broadcasted upon, we do a 'send' into the SO, and when a
2140 wait on it completes, we do a 'recv' from the SO. This is believed
2141 to give the correct happens-before events arising from CV
sewardjb4112022007-11-09 22:49:28 +00002142 signallings/broadcasts.
2143*/
2144
sewardj02114542009-07-28 20:52:36 +00002145/* .so is the SO for this CV.
2146 .mx_ga is the associated mutex, when .nWaiters > 0
sewardjb4112022007-11-09 22:49:28 +00002147
sewardj02114542009-07-28 20:52:36 +00002148 POSIX says effectively that the first pthread_cond_{timed}wait call
2149 causes a dynamic binding between the CV and the mutex, and that
2150 lasts until such time as the waiter count falls to zero. Hence
2151 need to keep track of the number of waiters in order to do
2152 consistency tracking. */
2153typedef
2154 struct {
2155 SO* so; /* libhb-allocated SO */
2156 void* mx_ga; /* addr of associated mutex, if any */
2157 UWord nWaiters; /* # threads waiting on the CV */
2158 }
2159 CVInfo;
2160
2161
2162/* pthread_cond_t* -> CVInfo* */
2163static WordFM* map_cond_to_CVInfo = NULL;
2164
2165static void map_cond_to_CVInfo_INIT ( void ) {
2166 if (UNLIKELY(map_cond_to_CVInfo == NULL)) {
2167 map_cond_to_CVInfo = VG_(newFM)( HG_(zalloc),
2168 "hg.mctCI.1", HG_(free), NULL );
2169 tl_assert(map_cond_to_CVInfo != NULL);
sewardjf98e1c02008-10-25 16:22:41 +00002170 }
2171}
2172
sewardj02114542009-07-28 20:52:36 +00002173static CVInfo* map_cond_to_CVInfo_lookup_or_alloc ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002174 UWord key, val;
sewardj02114542009-07-28 20:52:36 +00002175 map_cond_to_CVInfo_INIT();
2176 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
sewardjf98e1c02008-10-25 16:22:41 +00002177 tl_assert(key == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002178 return (CVInfo*)val;
sewardjf98e1c02008-10-25 16:22:41 +00002179 } else {
sewardj02114542009-07-28 20:52:36 +00002180 SO* so = libhb_so_alloc();
2181 CVInfo* cvi = HG_(zalloc)("hg.mctCloa.1", sizeof(CVInfo));
2182 cvi->so = so;
2183 cvi->mx_ga = 0;
2184 VG_(addToFM)( map_cond_to_CVInfo, (UWord)cond, (UWord)cvi );
2185 return cvi;
sewardjf98e1c02008-10-25 16:22:41 +00002186 }
2187}
2188
philippe8bfc2152012-07-06 23:38:24 +00002189static CVInfo* map_cond_to_CVInfo_lookup_NO_alloc ( void* cond ) {
2190 UWord key, val;
2191 map_cond_to_CVInfo_INIT();
2192 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
2193 tl_assert(key == (UWord)cond);
2194 return (CVInfo*)val;
2195 } else {
2196 return NULL;
2197 }
2198}
2199
sewardjc02f6c42013-10-14 13:51:25 +00002200static void map_cond_to_CVInfo_delete ( ThreadId tid,
2201 void* cond, Bool cond_is_init ) {
philippe8bfc2152012-07-06 23:38:24 +00002202 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +00002203 UWord keyW, valW;
philippe8bfc2152012-07-06 23:38:24 +00002204
2205 thr = map_threads_maybe_lookup( tid );
2206 tl_assert(thr); /* cannot fail - Thread* must already exist */
2207
sewardj02114542009-07-28 20:52:36 +00002208 map_cond_to_CVInfo_INIT();
philippe24111972013-03-18 22:48:22 +00002209 if (VG_(lookupFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond )) {
sewardj02114542009-07-28 20:52:36 +00002210 CVInfo* cvi = (CVInfo*)valW;
sewardjf98e1c02008-10-25 16:22:41 +00002211 tl_assert(keyW == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002212 tl_assert(cvi);
2213 tl_assert(cvi->so);
philippe8bfc2152012-07-06 23:38:24 +00002214 if (cvi->nWaiters > 0) {
sewardjc02f6c42013-10-14 13:51:25 +00002215 HG_(record_error_Misc)(
2216 thr, "pthread_cond_destroy:"
2217 " destruction of condition variable being waited upon");
philippe24111972013-03-18 22:48:22 +00002218 /* Destroying a cond var being waited upon outcome is EBUSY and
2219 variable is not destroyed. */
2220 return;
philippe8bfc2152012-07-06 23:38:24 +00002221 }
philippe24111972013-03-18 22:48:22 +00002222 if (!VG_(delFromFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond ))
2223 tl_assert(0); // cond var found above, and not here ???
sewardj02114542009-07-28 20:52:36 +00002224 libhb_so_dealloc(cvi->so);
2225 cvi->mx_ga = 0;
2226 HG_(free)(cvi);
philippe8bfc2152012-07-06 23:38:24 +00002227 } else {
sewardjc02f6c42013-10-14 13:51:25 +00002228 /* We have no record of this CV. So complain about it
2229 .. except, don't bother to complain if it has exactly the
2230 value PTHREAD_COND_INITIALIZER, since it might be that the CV
2231 was initialised like that but never used. */
2232 if (!cond_is_init) {
2233 HG_(record_error_Misc)(
2234 thr, "pthread_cond_destroy: destruction of unknown cond var");
2235 }
sewardjb4112022007-11-09 22:49:28 +00002236 }
2237}
2238
2239static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2240{
sewardjf98e1c02008-10-25 16:22:41 +00002241 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2242 cond to a SO if it is not already so bound, and 'send' on the
2243 SO. This is later used by other thread(s) which successfully
2244 exit from a pthread_cond_wait on the same cv; then they 'recv'
2245 from the SO, thereby acquiring a dependency on this signalling
2246 event. */
sewardjb4112022007-11-09 22:49:28 +00002247 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002248 CVInfo* cvi;
2249 //Lock* lk;
sewardjb4112022007-11-09 22:49:28 +00002250
2251 if (SHOW_EVENTS >= 1)
2252 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2253 (Int)tid, (void*)cond );
2254
sewardjb4112022007-11-09 22:49:28 +00002255 thr = map_threads_maybe_lookup( tid );
2256 tl_assert(thr); /* cannot fail - Thread* must already exist */
2257
sewardj02114542009-07-28 20:52:36 +00002258 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2259 tl_assert(cvi);
2260 tl_assert(cvi->so);
2261
sewardjb4112022007-11-09 22:49:28 +00002262 // error-if: mutex is bogus
2263 // error-if: mutex is not locked
sewardj02114542009-07-28 20:52:36 +00002264 // Hmm. POSIX doesn't actually say that it's an error to call
2265 // pthread_cond_signal with the associated mutex being unlocked.
2266 // Although it does say that it should be "if consistent scheduling
sewardjffce8152011-06-24 10:09:41 +00002267 // is desired." For that reason, print "dubious" if the lock isn't
2268 // held by any thread. Skip the "dubious" if it is held by some
2269 // other thread; that sounds straight-out wrong.
sewardj02114542009-07-28 20:52:36 +00002270 //
sewardjffce8152011-06-24 10:09:41 +00002271 // Anybody who writes code that signals on a CV without holding
2272 // the associated MX needs to be shipped off to a lunatic asylum
2273 // ASAP, even though POSIX doesn't actually declare such behaviour
2274 // illegal -- it makes code extremely difficult to understand/
2275 // reason about. In particular it puts the signalling thread in
2276 // a situation where it is racing against the released waiter
2277 // as soon as the signalling is done, and so there needs to be
2278 // some auxiliary synchronisation mechanism in the program that
2279 // makes this safe -- or the race(s) need to be harmless, or
2280 // probably nonexistent.
2281 //
2282 if (1) {
2283 Lock* lk = NULL;
2284 if (cvi->mx_ga != 0) {
2285 lk = map_locks_maybe_lookup( (Addr)cvi->mx_ga );
2286 }
2287 /* note: lk could be NULL. Be careful. */
2288 if (lk) {
2289 if (lk->kind == LK_rdwr) {
2290 HG_(record_error_Misc)(thr,
2291 "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2292 }
2293 if (lk->heldBy == NULL) {
2294 HG_(record_error_Misc)(thr,
2295 "pthread_cond_{signal,broadcast}: dubious: "
2296 "associated lock is not held by any thread");
2297 }
florian6bf37262012-10-21 03:23:36 +00002298 if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (UWord)thr)) {
sewardjffce8152011-06-24 10:09:41 +00002299 HG_(record_error_Misc)(thr,
2300 "pthread_cond_{signal,broadcast}: "
2301 "associated lock is not held by calling thread");
2302 }
2303 } else {
2304 /* Couldn't even find the damn thing. */
2305 // But actually .. that's not necessarily an error. We don't
2306 // know the (CV,MX) binding until a pthread_cond_wait or bcast
2307 // shows us what it is, and if that may not have happened yet.
2308 // So just keep quiet in this circumstance.
2309 //HG_(record_error_Misc)( thr,
2310 // "pthread_cond_{signal,broadcast}: "
2311 // "no or invalid mutex associated with cond");
2312 }
2313 }
sewardjb4112022007-11-09 22:49:28 +00002314
sewardj02114542009-07-28 20:52:36 +00002315 libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
sewardjb4112022007-11-09 22:49:28 +00002316}
2317
2318/* returns True if it reckons 'mutex' is valid and held by this
2319 thread, else False */
2320static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2321 void* cond, void* mutex )
2322{
2323 Thread* thr;
2324 Lock* lk;
2325 Bool lk_valid = True;
sewardj02114542009-07-28 20:52:36 +00002326 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002327
2328 if (SHOW_EVENTS >= 1)
2329 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2330 "(ctid=%d, cond=%p, mutex=%p)\n",
2331 (Int)tid, (void*)cond, (void*)mutex );
2332
sewardjb4112022007-11-09 22:49:28 +00002333 thr = map_threads_maybe_lookup( tid );
2334 tl_assert(thr); /* cannot fail - Thread* must already exist */
2335
2336 lk = map_locks_maybe_lookup( (Addr)mutex );
2337
2338 /* Check for stupid mutex arguments. There are various ways to be
2339 a bozo. Only complain once, though, even if more than one thing
2340 is wrong. */
2341 if (lk == NULL) {
2342 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002343 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002344 thr,
2345 "pthread_cond_{timed}wait called with invalid mutex" );
2346 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002347 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002348 if (lk->kind == LK_rdwr) {
2349 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002350 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002351 thr, "pthread_cond_{timed}wait called with mutex "
2352 "of type pthread_rwlock_t*" );
2353 } else
2354 if (lk->heldBy == NULL) {
2355 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002356 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002357 thr, "pthread_cond_{timed}wait called with un-held mutex");
2358 } else
2359 if (lk->heldBy != NULL
florian6bf37262012-10-21 03:23:36 +00002360 && VG_(elemBag)( lk->heldBy, (UWord)thr ) == 0) {
sewardjb4112022007-11-09 22:49:28 +00002361 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002362 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002363 thr, "pthread_cond_{timed}wait called with mutex "
2364 "held by a different thread" );
2365 }
2366 }
2367
2368 // error-if: cond is also associated with a different mutex
sewardj02114542009-07-28 20:52:36 +00002369 cvi = map_cond_to_CVInfo_lookup_or_alloc(cond);
2370 tl_assert(cvi);
2371 tl_assert(cvi->so);
2372 if (cvi->nWaiters == 0) {
2373 /* form initial (CV,MX) binding */
2374 cvi->mx_ga = mutex;
2375 }
2376 else /* check existing (CV,MX) binding */
2377 if (cvi->mx_ga != mutex) {
2378 HG_(record_error_Misc)(
2379 thr, "pthread_cond_{timed}wait: cond is associated "
2380 "with a different mutex");
2381 }
2382 cvi->nWaiters++;
sewardjb4112022007-11-09 22:49:28 +00002383
2384 return lk_valid;
2385}
2386
2387static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
sewardjff427c92013-10-14 12:13:52 +00002388 void* cond, void* mutex,
2389 Bool timeout)
sewardjb4112022007-11-09 22:49:28 +00002390{
sewardjf98e1c02008-10-25 16:22:41 +00002391 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2392 the SO for this cond, and 'recv' from it so as to acquire a
2393 dependency edge back to the signaller/broadcaster. */
2394 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002395 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002396
2397 if (SHOW_EVENTS >= 1)
2398 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
sewardjff427c92013-10-14 12:13:52 +00002399 "(ctid=%d, cond=%p, mutex=%p)\n, timeout=%d",
2400 (Int)tid, (void*)cond, (void*)mutex, (Int)timeout );
sewardjb4112022007-11-09 22:49:28 +00002401
sewardjb4112022007-11-09 22:49:28 +00002402 thr = map_threads_maybe_lookup( tid );
2403 tl_assert(thr); /* cannot fail - Thread* must already exist */
2404
2405 // error-if: cond is also associated with a different mutex
2406
philippe8bfc2152012-07-06 23:38:24 +00002407 cvi = map_cond_to_CVInfo_lookup_NO_alloc( cond );
2408 if (!cvi) {
2409 /* This could be either a bug in helgrind or the guest application
2410 that did an error (e.g. cond var was destroyed by another thread.
2411 Let's assume helgrind is perfect ...
2412 Note that this is similar to drd behaviour. */
2413 HG_(record_error_Misc)(thr, "condition variable has been destroyed while"
2414 " being waited upon");
2415 return;
2416 }
2417
sewardj02114542009-07-28 20:52:36 +00002418 tl_assert(cvi);
2419 tl_assert(cvi->so);
2420 tl_assert(cvi->nWaiters > 0);
sewardjb4112022007-11-09 22:49:28 +00002421
sewardjff427c92013-10-14 12:13:52 +00002422 if (!timeout && !libhb_so_everSent(cvi->so)) {
sewardjf98e1c02008-10-25 16:22:41 +00002423 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2424 it? If this happened it would surely be a bug in the threads
2425 library. Or one of those fabled "spurious wakeups". */
2426 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
sewardjffce8152011-06-24 10:09:41 +00002427 "succeeded"
sewardjf98e1c02008-10-25 16:22:41 +00002428 " without prior pthread_cond_post");
sewardjb4112022007-11-09 22:49:28 +00002429 }
sewardjf98e1c02008-10-25 16:22:41 +00002430
2431 /* anyway, acquire a dependency on it. */
sewardj02114542009-07-28 20:52:36 +00002432 libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2433
2434 cvi->nWaiters--;
sewardjf98e1c02008-10-25 16:22:41 +00002435}
2436
philippe19dfe032013-03-24 20:10:23 +00002437static void evh__HG_PTHREAD_COND_INIT_POST ( ThreadId tid,
2438 void* cond, void* cond_attr )
2439{
2440 CVInfo* cvi;
2441
2442 if (SHOW_EVENTS >= 1)
2443 VG_(printf)("evh__HG_PTHREAD_COND_INIT_POST"
2444 "(ctid=%d, cond=%p, cond_attr=%p)\n",
2445 (Int)tid, (void*)cond, (void*) cond_attr );
2446
2447 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2448 tl_assert (cvi);
2449 tl_assert (cvi->so);
2450}
2451
2452
sewardjf98e1c02008-10-25 16:22:41 +00002453static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
sewardjc02f6c42013-10-14 13:51:25 +00002454 void* cond, Bool cond_is_init )
sewardjf98e1c02008-10-25 16:22:41 +00002455{
2456 /* Deal with destroy events. The only purpose is to free storage
2457 associated with the CV, so as to avoid any possible resource
2458 leaks. */
2459 if (SHOW_EVENTS >= 1)
2460 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
sewardjc02f6c42013-10-14 13:51:25 +00002461 "(ctid=%d, cond=%p, cond_is_init=%d)\n",
2462 (Int)tid, (void*)cond, (Int)cond_is_init );
sewardjf98e1c02008-10-25 16:22:41 +00002463
sewardjc02f6c42013-10-14 13:51:25 +00002464 map_cond_to_CVInfo_delete( tid, cond, cond_is_init );
sewardjb4112022007-11-09 22:49:28 +00002465}
2466
2467
sewardj9f569b72008-11-13 13:33:09 +00002468/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002469/* -------------- events to do with rwlocks -------------- */
sewardj9f569b72008-11-13 13:33:09 +00002470/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002471
2472/* EXPOSITION only */
2473static
2474void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2475{
2476 if (SHOW_EVENTS >= 1)
2477 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2478 (Int)tid, (void*)rwl );
2479 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
sewardjf98e1c02008-10-25 16:22:41 +00002480 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002481 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2482}
2483
2484static
2485void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2486{
2487 Thread* thr;
2488 Lock* lk;
2489 if (SHOW_EVENTS >= 1)
2490 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2491 (Int)tid, (void*)rwl );
2492
2493 thr = map_threads_maybe_lookup( tid );
2494 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00002495 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00002496
2497 lk = map_locks_maybe_lookup( (Addr)rwl );
2498
2499 if (lk == NULL || lk->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00002500 HG_(record_error_Misc)(
2501 thr, "pthread_rwlock_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00002502 }
2503
2504 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00002505 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002506 tl_assert( lk->guestaddr == (Addr)rwl );
2507 if (lk->heldBy) {
2508 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00002509 HG_(record_error_Misc)(
2510 thr, "pthread_rwlock_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00002511 /* remove lock from locksets of all owning threads */
2512 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00002513 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00002514 lk->heldBy = NULL;
2515 lk->heldW = False;
sewardj1c7e8332007-11-29 13:04:03 +00002516 lk->acquired_at = NULL;
sewardjb4112022007-11-09 22:49:28 +00002517 }
2518 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002519 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00002520
2521 if (HG_(clo_track_lockorders))
2522 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002523 map_locks_delete( lk->guestaddr );
2524 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002525 }
2526
sewardjf98e1c02008-10-25 16:22:41 +00002527 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002528 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2529}
2530
2531static
sewardj789c3c52008-02-25 12:10:07 +00002532void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2533 void* rwl,
2534 Word isW, Word isTryLock )
sewardjb4112022007-11-09 22:49:28 +00002535{
2536 /* Just check the rwl is sane; nothing else to do. */
2537 // 'rwl' may be invalid - not checked by wrapper
2538 Thread* thr;
2539 Lock* lk;
2540 if (SHOW_EVENTS >= 1)
2541 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2542 (Int)tid, (Int)isW, (void*)rwl );
2543
2544 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
sewardj789c3c52008-02-25 12:10:07 +00002545 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
sewardjb4112022007-11-09 22:49:28 +00002546 thr = map_threads_maybe_lookup( tid );
2547 tl_assert(thr); /* cannot fail - Thread* must already exist */
2548
2549 lk = map_locks_maybe_lookup( (Addr)rwl );
2550 if ( lk
2551 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2552 /* Wrong kind of lock. Duh. */
sewardjf98e1c02008-10-25 16:22:41 +00002553 HG_(record_error_Misc)(
2554 thr, "pthread_rwlock_{rd,rw}lock with a "
2555 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002556 }
2557}
2558
2559static
2560void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2561{
2562 // only called if the real library call succeeded - so mutex is sane
2563 Thread* thr;
2564 if (SHOW_EVENTS >= 1)
2565 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2566 (Int)tid, (Int)isW, (void*)rwl );
2567
2568 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2569 thr = map_threads_maybe_lookup( tid );
2570 tl_assert(thr); /* cannot fail - Thread* must already exist */
2571
2572 (isW ? evhH__post_thread_w_acquires_lock
2573 : evhH__post_thread_r_acquires_lock)(
2574 thr,
2575 LK_rdwr, /* if not known, create new lock with this LockKind */
2576 (Addr)rwl
2577 );
2578}
2579
2580static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2581{
2582 // 'rwl' may be invalid - not checked by wrapper
2583 Thread* thr;
2584 if (SHOW_EVENTS >= 1)
2585 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2586 (Int)tid, (void*)rwl );
2587
2588 thr = map_threads_maybe_lookup( tid );
2589 tl_assert(thr); /* cannot fail - Thread* must already exist */
2590
2591 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2592}
2593
2594static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2595{
2596 // only called if the real library call succeeded - so mutex is sane
2597 Thread* thr;
2598 if (SHOW_EVENTS >= 1)
2599 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2600 (Int)tid, (void*)rwl );
2601 thr = map_threads_maybe_lookup( tid );
2602 tl_assert(thr); /* cannot fail - Thread* must already exist */
2603
2604 // anything we should do here?
2605}
2606
2607
sewardj9f569b72008-11-13 13:33:09 +00002608/* ---------------------------------------------------------- */
2609/* -------------- events to do with semaphores -------------- */
2610/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002611
sewardj11e352f2007-11-30 11:11:02 +00002612/* This is similar to but not identical to the handling for condition
sewardjb4112022007-11-09 22:49:28 +00002613 variables. */
2614
sewardjf98e1c02008-10-25 16:22:41 +00002615/* For each semaphore, we maintain a stack of SOs. When a 'post'
2616 operation is done on a semaphore (unlocking, essentially), a new SO
2617 is created for the posting thread, the posting thread does a strong
2618 send to it (which merely installs the posting thread's VC in the
2619 SO), and the SO is pushed on the semaphore's stack.
sewardjb4112022007-11-09 22:49:28 +00002620
2621 Later, when a (probably different) thread completes 'wait' on the
sewardjf98e1c02008-10-25 16:22:41 +00002622 semaphore, we pop a SO off the semaphore's stack (which should be
2623 nonempty), and do a strong recv from it. This mechanism creates
sewardjb4112022007-11-09 22:49:28 +00002624 dependencies between posters and waiters of the semaphore.
2625
sewardjf98e1c02008-10-25 16:22:41 +00002626 It may not be necessary to use a stack - perhaps a bag of SOs would
2627 do. But we do need to keep track of how many unused-up posts have
2628 happened for the semaphore.
sewardjb4112022007-11-09 22:49:28 +00002629
sewardjf98e1c02008-10-25 16:22:41 +00002630 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
sewardjb4112022007-11-09 22:49:28 +00002631 twice on S. T3 cannot complete its waits without both T1 and T2
2632 posting. The above mechanism will ensure that T3 acquires
2633 dependencies on both T1 and T2.
sewardj11e352f2007-11-30 11:11:02 +00002634
sewardjf98e1c02008-10-25 16:22:41 +00002635 When a semaphore is initialised with value N, we do as if we'd
2636 posted N times on the semaphore: basically create N SOs and do a
2637 strong send to all of then. This allows up to N waits on the
2638 semaphore to acquire a dependency on the initialisation point,
2639 which AFAICS is the correct behaviour.
sewardj11e352f2007-11-30 11:11:02 +00002640
2641 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2642 about. We should.
sewardjb4112022007-11-09 22:49:28 +00002643*/
2644
sewardjf98e1c02008-10-25 16:22:41 +00002645/* sem_t* -> XArray* SO* */
2646static WordFM* map_sem_to_SO_stack = NULL;
sewardjb4112022007-11-09 22:49:28 +00002647
sewardjf98e1c02008-10-25 16:22:41 +00002648static void map_sem_to_SO_stack_INIT ( void ) {
2649 if (map_sem_to_SO_stack == NULL) {
2650 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2651 HG_(free), NULL );
2652 tl_assert(map_sem_to_SO_stack != NULL);
sewardjb4112022007-11-09 22:49:28 +00002653 }
2654}
2655
sewardjf98e1c02008-10-25 16:22:41 +00002656static void push_SO_for_sem ( void* sem, SO* so ) {
2657 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002658 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002659 tl_assert(so);
2660 map_sem_to_SO_stack_INIT();
2661 if (VG_(lookupFM)( map_sem_to_SO_stack,
2662 &keyW, (UWord*)&xa, (UWord)sem )) {
2663 tl_assert(keyW == (UWord)sem);
sewardjb4112022007-11-09 22:49:28 +00002664 tl_assert(xa);
sewardjf98e1c02008-10-25 16:22:41 +00002665 VG_(addToXA)( xa, &so );
sewardjb4112022007-11-09 22:49:28 +00002666 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002667 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2668 VG_(addToXA)( xa, &so );
florian6bf37262012-10-21 03:23:36 +00002669 VG_(addToFM)( map_sem_to_SO_stack, (UWord)sem, (UWord)xa );
sewardjb4112022007-11-09 22:49:28 +00002670 }
2671}
2672
sewardjf98e1c02008-10-25 16:22:41 +00002673static SO* mb_pop_SO_for_sem ( void* sem ) {
2674 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002675 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002676 SO* so;
2677 map_sem_to_SO_stack_INIT();
2678 if (VG_(lookupFM)( map_sem_to_SO_stack,
2679 &keyW, (UWord*)&xa, (UWord)sem )) {
sewardjb4112022007-11-09 22:49:28 +00002680 /* xa is the stack for this semaphore. */
sewardjf98e1c02008-10-25 16:22:41 +00002681 Word sz;
2682 tl_assert(keyW == (UWord)sem);
2683 sz = VG_(sizeXA)( xa );
sewardjb4112022007-11-09 22:49:28 +00002684 tl_assert(sz >= 0);
2685 if (sz == 0)
2686 return NULL; /* odd, the stack is empty */
sewardjf98e1c02008-10-25 16:22:41 +00002687 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2688 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002689 VG_(dropTailXA)( xa, 1 );
sewardjf98e1c02008-10-25 16:22:41 +00002690 return so;
sewardjb4112022007-11-09 22:49:28 +00002691 } else {
2692 /* hmm, that's odd. No stack for this semaphore. */
2693 return NULL;
2694 }
2695}
2696
sewardj11e352f2007-11-30 11:11:02 +00002697static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002698{
sewardjf98e1c02008-10-25 16:22:41 +00002699 UWord keyW, valW;
2700 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002701
sewardjb4112022007-11-09 22:49:28 +00002702 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002703 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002704 (Int)tid, (void*)sem );
2705
sewardjf98e1c02008-10-25 16:22:41 +00002706 map_sem_to_SO_stack_INIT();
sewardjb4112022007-11-09 22:49:28 +00002707
sewardjf98e1c02008-10-25 16:22:41 +00002708 /* Empty out the semaphore's SO stack. This way of doing it is
2709 stupid, but at least it's easy. */
2710 while (1) {
2711 so = mb_pop_SO_for_sem( sem );
2712 if (!so) break;
2713 libhb_so_dealloc(so);
2714 }
2715
2716 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2717 XArray* xa = (XArray*)valW;
2718 tl_assert(keyW == (UWord)sem);
2719 tl_assert(xa);
2720 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2721 VG_(deleteXA)(xa);
2722 }
sewardjb4112022007-11-09 22:49:28 +00002723}
2724
sewardj11e352f2007-11-30 11:11:02 +00002725static
2726void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2727{
sewardjf98e1c02008-10-25 16:22:41 +00002728 SO* so;
2729 Thread* thr;
sewardj11e352f2007-11-30 11:11:02 +00002730
2731 if (SHOW_EVENTS >= 1)
2732 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2733 (Int)tid, (void*)sem, value );
2734
sewardjf98e1c02008-10-25 16:22:41 +00002735 thr = map_threads_maybe_lookup( tid );
2736 tl_assert(thr); /* cannot fail - Thread* must already exist */
sewardj11e352f2007-11-30 11:11:02 +00002737
sewardjf98e1c02008-10-25 16:22:41 +00002738 /* Empty out the semaphore's SO stack. This way of doing it is
2739 stupid, but at least it's easy. */
2740 while (1) {
2741 so = mb_pop_SO_for_sem( sem );
2742 if (!so) break;
2743 libhb_so_dealloc(so);
2744 }
sewardj11e352f2007-11-30 11:11:02 +00002745
sewardjf98e1c02008-10-25 16:22:41 +00002746 /* If we don't do this check, the following while loop runs us out
2747 of memory for stupid initial values of 'value'. */
2748 if (value > 10000) {
2749 HG_(record_error_Misc)(
2750 thr, "sem_init: initial value exceeds 10000; using 10000" );
2751 value = 10000;
2752 }
sewardj11e352f2007-11-30 11:11:02 +00002753
sewardjf98e1c02008-10-25 16:22:41 +00002754 /* Now create 'valid' new SOs for the thread, do a strong send to
2755 each of them, and push them all on the stack. */
2756 for (; value > 0; value--) {
2757 Thr* hbthr = thr->hbthr;
2758 tl_assert(hbthr);
sewardj11e352f2007-11-30 11:11:02 +00002759
sewardjf98e1c02008-10-25 16:22:41 +00002760 so = libhb_so_alloc();
2761 libhb_so_send( hbthr, so, True/*strong send*/ );
2762 push_SO_for_sem( sem, so );
sewardj11e352f2007-11-30 11:11:02 +00002763 }
2764}
2765
2766static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002767{
sewardjf98e1c02008-10-25 16:22:41 +00002768 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2769 it (iow, write our VC into it, then tick ours), and push the SO
2770 on on a stack of SOs associated with 'sem'. This is later used
2771 by other thread(s) which successfully exit from a sem_wait on
2772 the same sem; by doing a strong recv from SOs popped of the
2773 stack, they acquire dependencies on the posting thread
2774 segment(s). */
sewardjb4112022007-11-09 22:49:28 +00002775
sewardjf98e1c02008-10-25 16:22:41 +00002776 Thread* thr;
2777 SO* so;
2778 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002779
2780 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002781 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002782 (Int)tid, (void*)sem );
2783
2784 thr = map_threads_maybe_lookup( tid );
2785 tl_assert(thr); /* cannot fail - Thread* must already exist */
2786
2787 // error-if: sem is bogus
2788
sewardjf98e1c02008-10-25 16:22:41 +00002789 hbthr = thr->hbthr;
2790 tl_assert(hbthr);
sewardjb4112022007-11-09 22:49:28 +00002791
sewardjf98e1c02008-10-25 16:22:41 +00002792 so = libhb_so_alloc();
2793 libhb_so_send( hbthr, so, True/*strong send*/ );
2794 push_SO_for_sem( sem, so );
sewardjb4112022007-11-09 22:49:28 +00002795}
2796
sewardj11e352f2007-11-30 11:11:02 +00002797static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002798{
sewardjf98e1c02008-10-25 16:22:41 +00002799 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2800 the 'sem' from this semaphore's SO-stack, and do a strong recv
2801 from it. This creates a dependency back to one of the post-ers
2802 for the semaphore. */
sewardjb4112022007-11-09 22:49:28 +00002803
sewardjf98e1c02008-10-25 16:22:41 +00002804 Thread* thr;
2805 SO* so;
2806 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002807
2808 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002809 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002810 (Int)tid, (void*)sem );
2811
2812 thr = map_threads_maybe_lookup( tid );
2813 tl_assert(thr); /* cannot fail - Thread* must already exist */
2814
2815 // error-if: sem is bogus
2816
sewardjf98e1c02008-10-25 16:22:41 +00002817 so = mb_pop_SO_for_sem( sem );
sewardjb4112022007-11-09 22:49:28 +00002818
sewardjf98e1c02008-10-25 16:22:41 +00002819 if (so) {
2820 hbthr = thr->hbthr;
2821 tl_assert(hbthr);
2822
2823 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2824 libhb_so_dealloc(so);
2825 } else {
2826 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2827 If this happened it would surely be a bug in the threads
2828 library. */
2829 HG_(record_error_Misc)(
2830 thr, "Bug in libpthread: sem_wait succeeded on"
2831 " semaphore without prior sem_post");
sewardjb4112022007-11-09 22:49:28 +00002832 }
2833}
2834
2835
sewardj9f569b72008-11-13 13:33:09 +00002836/* -------------------------------------------------------- */
2837/* -------------- events to do with barriers -------------- */
2838/* -------------------------------------------------------- */
2839
2840typedef
2841 struct {
2842 Bool initted; /* has it yet been initted by guest? */
sewardj406bac82010-03-03 23:03:40 +00002843 Bool resizable; /* is resizing allowed? */
sewardj9f569b72008-11-13 13:33:09 +00002844 UWord size; /* declared size */
2845 XArray* waiting; /* XA of Thread*. # present is 0 .. .size */
2846 }
2847 Bar;
2848
2849static Bar* new_Bar ( void ) {
2850 Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
2851 tl_assert(bar);
2852 /* all fields are zero */
2853 tl_assert(bar->initted == False);
2854 return bar;
2855}
2856
2857static void delete_Bar ( Bar* bar ) {
2858 tl_assert(bar);
2859 if (bar->waiting)
2860 VG_(deleteXA)(bar->waiting);
2861 HG_(free)(bar);
2862}
2863
2864/* A mapping which stores auxiliary data for barriers. */
2865
2866/* pthread_barrier_t* -> Bar* */
2867static WordFM* map_barrier_to_Bar = NULL;
2868
2869static void map_barrier_to_Bar_INIT ( void ) {
2870 if (UNLIKELY(map_barrier_to_Bar == NULL)) {
2871 map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
2872 "hg.mbtBI.1", HG_(free), NULL );
2873 tl_assert(map_barrier_to_Bar != NULL);
2874 }
2875}
2876
2877static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
2878 UWord key, val;
2879 map_barrier_to_Bar_INIT();
2880 if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
2881 tl_assert(key == (UWord)barrier);
2882 return (Bar*)val;
2883 } else {
2884 Bar* bar = new_Bar();
2885 VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
2886 return bar;
2887 }
2888}
2889
2890static void map_barrier_to_Bar_delete ( void* barrier ) {
2891 UWord keyW, valW;
2892 map_barrier_to_Bar_INIT();
2893 if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
2894 Bar* bar = (Bar*)valW;
2895 tl_assert(keyW == (UWord)barrier);
2896 delete_Bar(bar);
2897 }
2898}
2899
2900
2901static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
2902 void* barrier,
sewardj406bac82010-03-03 23:03:40 +00002903 UWord count,
2904 UWord resizable )
sewardj9f569b72008-11-13 13:33:09 +00002905{
2906 Thread* thr;
2907 Bar* bar;
2908
2909 if (SHOW_EVENTS >= 1)
2910 VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
sewardj406bac82010-03-03 23:03:40 +00002911 "(tid=%d, barrier=%p, count=%lu, resizable=%lu)\n",
2912 (Int)tid, (void*)barrier, count, resizable );
sewardj9f569b72008-11-13 13:33:09 +00002913
2914 thr = map_threads_maybe_lookup( tid );
2915 tl_assert(thr); /* cannot fail - Thread* must already exist */
2916
2917 if (count == 0) {
2918 HG_(record_error_Misc)(
2919 thr, "pthread_barrier_init: 'count' argument is zero"
2920 );
2921 }
2922
sewardj406bac82010-03-03 23:03:40 +00002923 if (resizable != 0 && resizable != 1) {
2924 HG_(record_error_Misc)(
2925 thr, "pthread_barrier_init: invalid 'resizable' argument"
2926 );
2927 }
2928
sewardj9f569b72008-11-13 13:33:09 +00002929 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2930 tl_assert(bar);
2931
2932 if (bar->initted) {
2933 HG_(record_error_Misc)(
2934 thr, "pthread_barrier_init: barrier is already initialised"
2935 );
2936 }
2937
2938 if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2939 tl_assert(bar->initted);
2940 HG_(record_error_Misc)(
sewardj553655c2008-11-14 19:41:19 +00002941 thr, "pthread_barrier_init: threads are waiting at barrier"
sewardj9f569b72008-11-13 13:33:09 +00002942 );
2943 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2944 }
2945 if (!bar->waiting) {
2946 bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
2947 sizeof(Thread*) );
2948 }
2949
2950 tl_assert(bar->waiting);
2951 tl_assert(VG_(sizeXA)(bar->waiting) == 0);
sewardj406bac82010-03-03 23:03:40 +00002952 bar->initted = True;
2953 bar->resizable = resizable == 1 ? True : False;
2954 bar->size = count;
sewardj9f569b72008-11-13 13:33:09 +00002955}
2956
2957
2958static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
2959 void* barrier )
2960{
sewardj553655c2008-11-14 19:41:19 +00002961 Thread* thr;
2962 Bar* bar;
2963
sewardj9f569b72008-11-13 13:33:09 +00002964 /* Deal with destroy events. The only purpose is to free storage
2965 associated with the barrier, so as to avoid any possible
2966 resource leaks. */
2967 if (SHOW_EVENTS >= 1)
2968 VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
2969 "(tid=%d, barrier=%p)\n",
2970 (Int)tid, (void*)barrier );
2971
sewardj553655c2008-11-14 19:41:19 +00002972 thr = map_threads_maybe_lookup( tid );
2973 tl_assert(thr); /* cannot fail - Thread* must already exist */
2974
2975 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2976 tl_assert(bar);
2977
2978 if (!bar->initted) {
2979 HG_(record_error_Misc)(
2980 thr, "pthread_barrier_destroy: barrier was never initialised"
2981 );
2982 }
2983
2984 if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2985 HG_(record_error_Misc)(
2986 thr, "pthread_barrier_destroy: threads are waiting at barrier"
2987 );
2988 }
2989
sewardj9f569b72008-11-13 13:33:09 +00002990 /* Maybe we shouldn't do this; just let it persist, so that when it
2991 is reinitialised we don't need to do any dynamic memory
2992 allocation? The downside is a potentially unlimited space leak,
2993 if the client creates (in turn) a large number of barriers all
2994 at different locations. Note that if we do later move to the
2995 don't-delete-it scheme, we need to mark the barrier as
2996 uninitialised again since otherwise a later _init call will
sewardj553655c2008-11-14 19:41:19 +00002997 elicit a duplicate-init error. */
sewardj9f569b72008-11-13 13:33:09 +00002998 map_barrier_to_Bar_delete( barrier );
2999}
3000
3001
sewardj406bac82010-03-03 23:03:40 +00003002/* All the threads have arrived. Now do the Interesting Bit. Get a
3003 new synchronisation object and do a weak send to it from all the
3004 participating threads. This makes its vector clocks be the join of
3005 all the individual threads' vector clocks. Then do a strong
3006 receive from it back to all threads, so that their VCs are a copy
3007 of it (hence are all equal to the join of their original VCs.) */
3008static void do_barrier_cross_sync_and_empty ( Bar* bar )
3009{
3010 /* XXX check bar->waiting has no duplicates */
3011 UWord i;
3012 SO* so = libhb_so_alloc();
3013
3014 tl_assert(bar->waiting);
3015 tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
3016
3017 /* compute the join ... */
3018 for (i = 0; i < bar->size; i++) {
3019 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
3020 Thr* hbthr = t->hbthr;
3021 libhb_so_send( hbthr, so, False/*weak send*/ );
3022 }
3023 /* ... and distribute to all threads */
3024 for (i = 0; i < bar->size; i++) {
3025 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
3026 Thr* hbthr = t->hbthr;
3027 libhb_so_recv( hbthr, so, True/*strong recv*/ );
3028 }
3029
3030 /* finally, we must empty out the waiting vector */
3031 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
3032
3033 /* and we don't need this any more. Perhaps a stack-allocated
3034 SO would be better? */
3035 libhb_so_dealloc(so);
3036}
3037
3038
sewardj9f569b72008-11-13 13:33:09 +00003039static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
3040 void* barrier )
3041{
sewardj1c466b72008-11-19 11:52:14 +00003042 /* This function gets called after a client thread calls
3043 pthread_barrier_wait but before it arrives at the real
3044 pthread_barrier_wait.
3045
3046 Why is the following correct? It's a bit subtle.
3047
3048 If this is not the last thread arriving at the barrier, we simply
3049 note its presence and return. Because valgrind (at least as of
3050 Nov 08) is single threaded, we are guaranteed safe from any race
3051 conditions when in this function -- no other client threads are
3052 running.
3053
3054 If this is the last thread, then we are again the only running
3055 thread. All the other threads will have either arrived at the
3056 real pthread_barrier_wait or are on their way to it, but in any
3057 case are guaranteed not to be able to move past it, because this
3058 thread is currently in this function and so has not yet arrived
3059 at the real pthread_barrier_wait. That means that:
3060
3061 1. While we are in this function, none of the other threads
3062 waiting at the barrier can move past it.
3063
3064 2. When this function returns (and simulated execution resumes),
3065 this thread and all other waiting threads will be able to move
3066 past the real barrier.
3067
3068 Because of this, it is now safe to update the vector clocks of
3069 all threads, to represent the fact that they all arrived at the
3070 barrier and have all moved on. There is no danger of any
3071 complications to do with some threads leaving the barrier and
3072 racing back round to the front, whilst others are still leaving
3073 (which is the primary source of complication in correct handling/
3074 implementation of barriers). That can't happen because we update
3075 here our data structures so as to indicate that the threads have
3076 passed the barrier, even though, as per (2) above, they are
3077 guaranteed not to pass the barrier until we return.
3078
3079 This relies crucially on Valgrind being single threaded. If that
3080 changes, this will need to be reconsidered.
3081 */
sewardj9f569b72008-11-13 13:33:09 +00003082 Thread* thr;
3083 Bar* bar;
sewardj406bac82010-03-03 23:03:40 +00003084 UWord present;
sewardj9f569b72008-11-13 13:33:09 +00003085
3086 if (SHOW_EVENTS >= 1)
3087 VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
3088 "(tid=%d, barrier=%p)\n",
3089 (Int)tid, (void*)barrier );
3090
3091 thr = map_threads_maybe_lookup( tid );
3092 tl_assert(thr); /* cannot fail - Thread* must already exist */
3093
3094 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3095 tl_assert(bar);
3096
3097 if (!bar->initted) {
3098 HG_(record_error_Misc)(
3099 thr, "pthread_barrier_wait: barrier is uninitialised"
3100 );
3101 return; /* client is broken .. avoid assertions below */
3102 }
3103
3104 /* guaranteed by _INIT_PRE above */
3105 tl_assert(bar->size > 0);
3106 tl_assert(bar->waiting);
3107
3108 VG_(addToXA)( bar->waiting, &thr );
3109
3110 /* guaranteed by this function */
3111 present = VG_(sizeXA)(bar->waiting);
3112 tl_assert(present > 0 && present <= bar->size);
3113
3114 if (present < bar->size)
3115 return;
3116
sewardj406bac82010-03-03 23:03:40 +00003117 do_barrier_cross_sync_and_empty(bar);
3118}
sewardj9f569b72008-11-13 13:33:09 +00003119
sewardj9f569b72008-11-13 13:33:09 +00003120
sewardj406bac82010-03-03 23:03:40 +00003121static void evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( ThreadId tid,
3122 void* barrier,
3123 UWord newcount )
3124{
3125 Thread* thr;
3126 Bar* bar;
3127 UWord present;
3128
3129 if (SHOW_EVENTS >= 1)
3130 VG_(printf)("evh__HG_PTHREAD_BARRIER_RESIZE_PRE"
3131 "(tid=%d, barrier=%p, newcount=%lu)\n",
3132 (Int)tid, (void*)barrier, newcount );
3133
3134 thr = map_threads_maybe_lookup( tid );
3135 tl_assert(thr); /* cannot fail - Thread* must already exist */
3136
3137 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3138 tl_assert(bar);
3139
3140 if (!bar->initted) {
3141 HG_(record_error_Misc)(
3142 thr, "pthread_barrier_resize: barrier is uninitialised"
3143 );
3144 return; /* client is broken .. avoid assertions below */
3145 }
3146
3147 if (!bar->resizable) {
3148 HG_(record_error_Misc)(
3149 thr, "pthread_barrier_resize: barrier is may not be resized"
3150 );
3151 return; /* client is broken .. avoid assertions below */
3152 }
3153
3154 if (newcount == 0) {
3155 HG_(record_error_Misc)(
3156 thr, "pthread_barrier_resize: 'newcount' argument is zero"
3157 );
3158 return; /* client is broken .. avoid assertions below */
3159 }
3160
3161 /* guaranteed by _INIT_PRE above */
3162 tl_assert(bar->size > 0);
sewardj9f569b72008-11-13 13:33:09 +00003163 tl_assert(bar->waiting);
sewardj406bac82010-03-03 23:03:40 +00003164 /* Guaranteed by this fn */
3165 tl_assert(newcount > 0);
sewardj9f569b72008-11-13 13:33:09 +00003166
sewardj406bac82010-03-03 23:03:40 +00003167 if (newcount >= bar->size) {
3168 /* Increasing the capacity. There's no possibility of threads
3169 moving on from the barrier in this situation, so just note
3170 the fact and do nothing more. */
3171 bar->size = newcount;
3172 } else {
3173 /* Decreasing the capacity. If we decrease it to be equal or
3174 below the number of waiting threads, they will now move past
3175 the barrier, so need to mess with dep edges in the same way
3176 as if the barrier had filled up normally. */
3177 present = VG_(sizeXA)(bar->waiting);
3178 tl_assert(present >= 0 && present <= bar->size);
3179 if (newcount <= present) {
3180 bar->size = present; /* keep the cross_sync call happy */
3181 do_barrier_cross_sync_and_empty(bar);
3182 }
3183 bar->size = newcount;
sewardj9f569b72008-11-13 13:33:09 +00003184 }
sewardj9f569b72008-11-13 13:33:09 +00003185}
3186
3187
sewardjed2e72e2009-08-14 11:08:24 +00003188/* ----------------------------------------------------- */
3189/* ----- events to do with user-specified HB edges ----- */
3190/* ----------------------------------------------------- */
3191
3192/* A mapping from arbitrary UWord tag to the SO associated with it.
3193 The UWord tags are meaningless to us, interpreted only by the
3194 user. */
3195
3196
3197
3198/* UWord -> SO* */
3199static WordFM* map_usertag_to_SO = NULL;
3200
3201static void map_usertag_to_SO_INIT ( void ) {
3202 if (UNLIKELY(map_usertag_to_SO == NULL)) {
3203 map_usertag_to_SO = VG_(newFM)( HG_(zalloc),
3204 "hg.mutS.1", HG_(free), NULL );
3205 tl_assert(map_usertag_to_SO != NULL);
3206 }
3207}
3208
3209static SO* map_usertag_to_SO_lookup_or_alloc ( UWord usertag ) {
3210 UWord key, val;
3211 map_usertag_to_SO_INIT();
3212 if (VG_(lookupFM)( map_usertag_to_SO, &key, &val, usertag )) {
3213 tl_assert(key == (UWord)usertag);
3214 return (SO*)val;
3215 } else {
3216 SO* so = libhb_so_alloc();
3217 VG_(addToFM)( map_usertag_to_SO, usertag, (UWord)so );
3218 return so;
3219 }
3220}
3221
sewardj6015d0e2011-03-11 19:10:48 +00003222static void map_usertag_to_SO_delete ( UWord usertag ) {
3223 UWord keyW, valW;
3224 map_usertag_to_SO_INIT();
3225 if (VG_(delFromFM)( map_usertag_to_SO, &keyW, &valW, usertag )) {
3226 SO* so = (SO*)valW;
3227 tl_assert(keyW == usertag);
3228 tl_assert(so);
3229 libhb_so_dealloc(so);
3230 }
3231}
sewardjed2e72e2009-08-14 11:08:24 +00003232
3233
3234static
3235void evh__HG_USERSO_SEND_PRE ( ThreadId tid, UWord usertag )
3236{
3237 /* TID is just about to notionally sent a message on a notional
3238 abstract synchronisation object whose identity is given by
3239 USERTAG. Bind USERTAG to a real SO if it is not already so
sewardj8c50d3c2011-03-11 18:38:12 +00003240 bound, and do a 'weak send' on the SO. This joins the vector
3241 clocks from this thread into any vector clocks already present
3242 in the SO. The resulting SO vector clocks are later used by
sewardjed2e72e2009-08-14 11:08:24 +00003243 other thread(s) which successfully 'receive' from the SO,
sewardj8c50d3c2011-03-11 18:38:12 +00003244 thereby acquiring a dependency on all the events that have
3245 previously signalled on this SO. */
sewardjed2e72e2009-08-14 11:08:24 +00003246 Thread* thr;
3247 SO* so;
3248
3249 if (SHOW_EVENTS >= 1)
3250 VG_(printf)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3251 (Int)tid, usertag );
3252
3253 thr = map_threads_maybe_lookup( tid );
3254 tl_assert(thr); /* cannot fail - Thread* must already exist */
3255
3256 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3257 tl_assert(so);
3258
sewardj8c50d3c2011-03-11 18:38:12 +00003259 libhb_so_send( thr->hbthr, so, False/*!strong_send*/ );
sewardjed2e72e2009-08-14 11:08:24 +00003260}
3261
3262static
3263void evh__HG_USERSO_RECV_POST ( ThreadId tid, UWord usertag )
3264{
3265 /* TID has just notionally received a message from a notional
3266 abstract synchronisation object whose identity is given by
3267 USERTAG. Bind USERTAG to a real SO if it is not already so
3268 bound. If the SO has at some point in the past been 'sent' on,
3269 to a 'strong receive' on it, thereby acquiring a dependency on
3270 the sender. */
3271 Thread* thr;
3272 SO* so;
3273
3274 if (SHOW_EVENTS >= 1)
3275 VG_(printf)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3276 (Int)tid, usertag );
3277
3278 thr = map_threads_maybe_lookup( tid );
3279 tl_assert(thr); /* cannot fail - Thread* must already exist */
3280
3281 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3282 tl_assert(so);
3283
3284 /* Acquire a dependency on it. If the SO has never so far been
3285 sent on, then libhb_so_recv will do nothing. So we're safe
3286 regardless of SO's history. */
3287 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3288}
3289
sewardj6015d0e2011-03-11 19:10:48 +00003290static
3291void evh__HG_USERSO_FORGET_ALL ( ThreadId tid, UWord usertag )
3292{
3293 /* TID declares that any happens-before edges notionally stored in
3294 USERTAG can be deleted. If (as would normally be the case) a
3295 SO is associated with USERTAG, then the assocation is removed
3296 and all resources associated with SO are freed. Importantly,
3297 that frees up any VTSs stored in SO. */
3298 if (SHOW_EVENTS >= 1)
3299 VG_(printf)("evh__HG_USERSO_FORGET_ALL(ctid=%d, usertag=%#lx)\n",
3300 (Int)tid, usertag );
3301
3302 map_usertag_to_SO_delete( usertag );
3303}
3304
sewardjed2e72e2009-08-14 11:08:24 +00003305
sewardjb4112022007-11-09 22:49:28 +00003306/*--------------------------------------------------------------*/
3307/*--- Lock acquisition order monitoring ---*/
3308/*--------------------------------------------------------------*/
3309
3310/* FIXME: here are some optimisations still to do in
3311 laog__pre_thread_acquires_lock.
3312
3313 The graph is structured so that if L1 --*--> L2 then L1 must be
3314 acquired before L2.
3315
3316 The common case is that some thread T holds (eg) L1 L2 and L3 and
3317 is repeatedly acquiring and releasing Ln, and there is no ordering
3318 error in what it is doing. Hence it repeatly:
3319
3320 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3321 produces the answer No (because there is no error).
3322
3323 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3324 (because they already got added the first time T acquired Ln).
3325
3326 Hence cache these two events:
3327
3328 (1) Cache result of the query from last time. Invalidate the cache
3329 any time any edges are added to or deleted from laog.
3330
3331 (2) Cache these add-edge requests and ignore them if said edges
3332 have already been added to laog. Invalidate the cache any time
3333 any edges are deleted from laog.
3334*/
3335
3336typedef
3337 struct {
3338 WordSetID inns; /* in univ_laog */
3339 WordSetID outs; /* in univ_laog */
3340 }
3341 LAOGLinks;
3342
3343/* lock order acquisition graph */
3344static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
3345
3346/* EXPOSITION ONLY: for each edge in 'laog', record the two places
3347 where that edge was created, so that we can show the user later if
3348 we need to. */
3349typedef
3350 struct {
3351 Addr src_ga; /* Lock guest addresses for */
3352 Addr dst_ga; /* src/dst of the edge */
3353 ExeContext* src_ec; /* And corresponding places where that */
3354 ExeContext* dst_ec; /* ordering was established */
3355 }
3356 LAOGLinkExposition;
3357
sewardj250ec2e2008-02-15 22:02:30 +00003358static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
sewardjb4112022007-11-09 22:49:28 +00003359 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3360 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
3361 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
3362 if (llx1->src_ga < llx2->src_ga) return -1;
3363 if (llx1->src_ga > llx2->src_ga) return 1;
3364 if (llx1->dst_ga < llx2->dst_ga) return -1;
3365 if (llx1->dst_ga > llx2->dst_ga) return 1;
3366 return 0;
3367}
3368
3369static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
3370/* end EXPOSITION ONLY */
3371
3372
sewardja65db102009-01-26 10:45:16 +00003373__attribute__((noinline))
3374static void laog__init ( void )
3375{
3376 tl_assert(!laog);
3377 tl_assert(!laog_exposition);
sewardjc1fb9d22011-02-28 09:03:44 +00003378 tl_assert(HG_(clo_track_lockorders));
sewardja65db102009-01-26 10:45:16 +00003379
3380 laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
3381 HG_(free), NULL/*unboxedcmp*/ );
3382
3383 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
3384 cmp_LAOGLinkExposition );
3385 tl_assert(laog);
3386 tl_assert(laog_exposition);
3387}
3388
florian6bf37262012-10-21 03:23:36 +00003389static void laog__show ( const HChar* who ) {
3390 UWord i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003391 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003392 Lock* me;
3393 LAOGLinks* links;
3394 VG_(printf)("laog (requested by %s) {\n", who);
sewardj896f6f92008-08-19 08:38:52 +00003395 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003396 me = NULL;
3397 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003398 while (VG_(nextIterFM)( laog, (UWord*)&me,
3399 (UWord*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003400 tl_assert(me);
3401 tl_assert(links);
3402 VG_(printf)(" node %p:\n", me);
3403 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3404 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003405 VG_(printf)(" inn %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003406 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3407 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003408 VG_(printf)(" out %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003409 me = NULL;
3410 links = NULL;
3411 }
sewardj896f6f92008-08-19 08:38:52 +00003412 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003413 VG_(printf)("}\n");
3414}
3415
sewardj866c80c2011-10-22 19:29:51 +00003416static void univ_laog_do_GC ( void ) {
3417 Word i;
3418 LAOGLinks* links;
3419 Word seen = 0;
3420 Int prev_next_gc_univ_laog = next_gc_univ_laog;
3421 const UWord univ_laog_cardinality = HG_(cardinalityWSU)( univ_laog);
3422
3423 Bool *univ_laog_seen = HG_(zalloc) ( "hg.gc_univ_laog.1",
3424 (Int) univ_laog_cardinality
3425 * sizeof(Bool) );
3426 // univ_laog_seen[*] set to 0 (False) by zalloc.
3427
3428 if (VG_(clo_stats))
3429 VG_(message)(Vg_DebugMsg,
3430 "univ_laog_do_GC enter cardinality %'10d\n",
3431 (Int)univ_laog_cardinality);
3432
3433 VG_(initIterFM)( laog );
3434 links = NULL;
3435 while (VG_(nextIterFM)( laog, NULL, (UWord*)&links )) {
3436 tl_assert(links);
3437 tl_assert(links->inns >= 0 && links->inns < univ_laog_cardinality);
3438 univ_laog_seen[links->inns] = True;
3439 tl_assert(links->outs >= 0 && links->outs < univ_laog_cardinality);
3440 univ_laog_seen[links->outs] = True;
3441 links = NULL;
3442 }
3443 VG_(doneIterFM)( laog );
3444
3445 for (i = 0; i < (Int)univ_laog_cardinality; i++) {
3446 if (univ_laog_seen[i])
3447 seen++;
3448 else
3449 HG_(dieWS) ( univ_laog, (WordSet)i );
3450 }
3451
3452 HG_(free) (univ_laog_seen);
3453
3454 // We need to decide the value of the next_gc.
3455 // 3 solutions were looked at:
3456 // Sol 1: garbage collect at seen * 2
3457 // This solution was a lot slower, probably because we both do a lot of
3458 // garbage collection and do not keep long enough laog WV that will become
3459 // useful again very soon.
3460 // Sol 2: garbage collect at a percentage increase of the current cardinality
3461 // (with a min increase of 1)
3462 // Trials on a small test program with 1%, 5% and 10% increase was done.
3463 // 1% is slightly faster than 5%, which is slightly slower than 10%.
3464 // However, on a big application, this caused the memory to be exhausted,
3465 // as even a 1% increase of size at each gc becomes a lot, when many gc
3466 // are done.
3467 // Sol 3: always garbage collect at current cardinality + 1.
3468 // This solution was the fastest of the 3 solutions, and caused no memory
3469 // exhaustion in the big application.
3470 //
3471 // With regards to cost introduced by gc: on the t2t perf test (doing only
3472 // lock/unlock operations), t2t 50 10 2 was about 25% faster than the
3473 // version with garbage collection. With t2t 50 20 2, my machine started
3474 // to page out, and so the garbage collected version was much faster.
3475 // On smaller lock sets (e.g. t2t 20 5 2, giving about 100 locks), the
3476 // difference performance is insignificant (~ 0.1 s).
3477 // Of course, it might be that real life programs are not well represented
3478 // by t2t.
3479
3480 // If ever we want to have a more sophisticated control
3481 // (e.g. clo options to control the percentage increase or fixed increased),
3482 // we should do it here, eg.
3483 // next_gc_univ_laog = prev_next_gc_univ_laog + VG_(clo_laog_gc_fixed);
3484 // Currently, we just hard-code the solution 3 above.
3485 next_gc_univ_laog = prev_next_gc_univ_laog + 1;
3486
3487 if (VG_(clo_stats))
3488 VG_(message)
3489 (Vg_DebugMsg,
3490 "univ_laog_do_GC exit seen %'8d next gc at cardinality %'10d\n",
3491 (Int)seen, next_gc_univ_laog);
3492}
3493
3494
sewardjb4112022007-11-09 22:49:28 +00003495__attribute__((noinline))
3496static void laog__add_edge ( Lock* src, Lock* dst ) {
florian6bf37262012-10-21 03:23:36 +00003497 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003498 LAOGLinks* links;
3499 Bool presentF, presentR;
3500 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
3501
3502 /* Take the opportunity to sanity check the graph. Record in
3503 presentF if there is already a src->dst mapping in this node's
3504 forwards links, and presentR if there is already a src->dst
3505 mapping in this node's backwards links. They should agree!
3506 Also, we need to know whether the edge was already present so as
3507 to decide whether or not to update the link details mapping. We
3508 can compute presentF and presentR essentially for free, so may
3509 as well do this always. */
3510 presentF = presentR = False;
3511
3512 /* Update the out edges for src */
3513 keyW = 0;
3514 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003515 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)src )) {
sewardjb4112022007-11-09 22:49:28 +00003516 WordSetID outs_new;
3517 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003518 tl_assert(keyW == (UWord)src);
3519 outs_new = HG_(addToWS)( univ_laog, links->outs, (UWord)dst );
sewardjb4112022007-11-09 22:49:28 +00003520 presentF = outs_new == links->outs;
3521 links->outs = outs_new;
3522 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003523 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003524 links->inns = HG_(emptyWS)( univ_laog );
florian6bf37262012-10-21 03:23:36 +00003525 links->outs = HG_(singletonWS)( univ_laog, (UWord)dst );
3526 VG_(addToFM)( laog, (UWord)src, (UWord)links );
sewardjb4112022007-11-09 22:49:28 +00003527 }
3528 /* Update the in edges for dst */
3529 keyW = 0;
3530 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003531 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003532 WordSetID inns_new;
3533 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003534 tl_assert(keyW == (UWord)dst);
3535 inns_new = HG_(addToWS)( univ_laog, links->inns, (UWord)src );
sewardjb4112022007-11-09 22:49:28 +00003536 presentR = inns_new == links->inns;
3537 links->inns = inns_new;
3538 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003539 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
florian6bf37262012-10-21 03:23:36 +00003540 links->inns = HG_(singletonWS)( univ_laog, (UWord)src );
sewardjb4112022007-11-09 22:49:28 +00003541 links->outs = HG_(emptyWS)( univ_laog );
florian6bf37262012-10-21 03:23:36 +00003542 VG_(addToFM)( laog, (UWord)dst, (UWord)links );
sewardjb4112022007-11-09 22:49:28 +00003543 }
3544
3545 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
3546
3547 if (!presentF && src->acquired_at && dst->acquired_at) {
3548 LAOGLinkExposition expo;
3549 /* If this edge is entering the graph, and we have acquired_at
3550 information for both src and dst, record those acquisition
3551 points. Hence, if there is later a violation of this
3552 ordering, we can show the user the two places in which the
3553 required src-dst ordering was previously established. */
barta0b6b2c2008-07-07 06:49:24 +00003554 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
sewardjb4112022007-11-09 22:49:28 +00003555 src->guestaddr, dst->guestaddr);
3556 expo.src_ga = src->guestaddr;
3557 expo.dst_ga = dst->guestaddr;
3558 expo.src_ec = NULL;
3559 expo.dst_ec = NULL;
3560 tl_assert(laog_exposition);
florian6bf37262012-10-21 03:23:36 +00003561 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (UWord)&expo )) {
sewardjb4112022007-11-09 22:49:28 +00003562 /* we already have it; do nothing */
3563 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003564 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
3565 sizeof(LAOGLinkExposition));
sewardjb4112022007-11-09 22:49:28 +00003566 expo2->src_ga = src->guestaddr;
3567 expo2->dst_ga = dst->guestaddr;
3568 expo2->src_ec = src->acquired_at;
3569 expo2->dst_ec = dst->acquired_at;
florian6bf37262012-10-21 03:23:36 +00003570 VG_(addToFM)( laog_exposition, (UWord)expo2, (UWord)NULL );
sewardjb4112022007-11-09 22:49:28 +00003571 }
3572 }
sewardj866c80c2011-10-22 19:29:51 +00003573
3574 if (HG_(cardinalityWSU) (univ_laog) >= next_gc_univ_laog)
3575 univ_laog_do_GC();
sewardjb4112022007-11-09 22:49:28 +00003576}
3577
3578__attribute__((noinline))
3579static void laog__del_edge ( Lock* src, Lock* dst ) {
florian6bf37262012-10-21 03:23:36 +00003580 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003581 LAOGLinks* links;
sewardj866c80c2011-10-22 19:29:51 +00003582 if (0) VG_(printf)("laog__del_edge enter %p %p\n", src, dst);
sewardjb4112022007-11-09 22:49:28 +00003583 /* Update the out edges for src */
3584 keyW = 0;
3585 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003586 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)src )) {
sewardjb4112022007-11-09 22:49:28 +00003587 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003588 tl_assert(keyW == (UWord)src);
3589 links->outs = HG_(delFromWS)( univ_laog, links->outs, (UWord)dst );
sewardjb4112022007-11-09 22:49:28 +00003590 }
3591 /* Update the in edges for dst */
3592 keyW = 0;
3593 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003594 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003595 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003596 tl_assert(keyW == (UWord)dst);
3597 links->inns = HG_(delFromWS)( univ_laog, links->inns, (UWord)src );
sewardjb4112022007-11-09 22:49:28 +00003598 }
sewardj866c80c2011-10-22 19:29:51 +00003599
3600 /* Remove the exposition of src,dst (if present) */
3601 {
3602 LAOGLinkExposition *fm_expo;
3603
3604 LAOGLinkExposition expo;
3605 expo.src_ga = src->guestaddr;
3606 expo.dst_ga = dst->guestaddr;
3607 expo.src_ec = NULL;
3608 expo.dst_ec = NULL;
3609
3610 if (VG_(delFromFM) (laog_exposition,
3611 (UWord*)&fm_expo, NULL, (UWord)&expo )) {
3612 HG_(free) (fm_expo);
3613 }
3614 }
3615
3616 /* deleting edges can increase nr of of WS so check for gc. */
3617 if (HG_(cardinalityWSU) (univ_laog) >= next_gc_univ_laog)
3618 univ_laog_do_GC();
3619 if (0) VG_(printf)("laog__del_edge exit\n");
sewardjb4112022007-11-09 22:49:28 +00003620}
3621
3622__attribute__((noinline))
3623static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
florian6bf37262012-10-21 03:23:36 +00003624 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003625 LAOGLinks* links;
3626 keyW = 0;
3627 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003628 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003629 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003630 tl_assert(keyW == (UWord)lk);
sewardjb4112022007-11-09 22:49:28 +00003631 return links->outs;
3632 } else {
3633 return HG_(emptyWS)( univ_laog );
3634 }
3635}
3636
3637__attribute__((noinline))
3638static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
florian6bf37262012-10-21 03:23:36 +00003639 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003640 LAOGLinks* links;
3641 keyW = 0;
3642 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003643 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003644 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003645 tl_assert(keyW == (UWord)lk);
sewardjb4112022007-11-09 22:49:28 +00003646 return links->inns;
3647 } else {
3648 return HG_(emptyWS)( univ_laog );
3649 }
3650}
3651
3652__attribute__((noinline))
florian6bf37262012-10-21 03:23:36 +00003653static void laog__sanity_check ( const HChar* who ) {
3654 UWord i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003655 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003656 Lock* me;
3657 LAOGLinks* links;
sewardj896f6f92008-08-19 08:38:52 +00003658 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003659 me = NULL;
3660 links = NULL;
3661 if (0) VG_(printf)("laog sanity check\n");
florian6bf37262012-10-21 03:23:36 +00003662 while (VG_(nextIterFM)( laog, (UWord*)&me,
3663 (UWord*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003664 tl_assert(me);
3665 tl_assert(links);
3666 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3667 for (i = 0; i < ws_size; i++) {
3668 if ( ! HG_(elemWS)( univ_laog,
3669 laog__succs( (Lock*)ws_words[i] ),
florian6bf37262012-10-21 03:23:36 +00003670 (UWord)me ))
sewardjb4112022007-11-09 22:49:28 +00003671 goto bad;
3672 }
3673 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3674 for (i = 0; i < ws_size; i++) {
3675 if ( ! HG_(elemWS)( univ_laog,
3676 laog__preds( (Lock*)ws_words[i] ),
florian6bf37262012-10-21 03:23:36 +00003677 (UWord)me ))
sewardjb4112022007-11-09 22:49:28 +00003678 goto bad;
3679 }
3680 me = NULL;
3681 links = NULL;
3682 }
sewardj896f6f92008-08-19 08:38:52 +00003683 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003684 return;
3685
3686 bad:
3687 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3688 laog__show(who);
3689 tl_assert(0);
3690}
3691
3692/* If there is a path in laog from 'src' to any of the elements in
3693 'dst', return an arbitrarily chosen element of 'dst' reachable from
3694 'src'. If no path exist from 'src' to any element in 'dst', return
3695 NULL. */
3696__attribute__((noinline))
3697static
3698Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3699{
3700 Lock* ret;
florian6bf37262012-10-21 03:23:36 +00003701 Word ssz;
sewardjb4112022007-11-09 22:49:28 +00003702 XArray* stack; /* of Lock* */
3703 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
3704 Lock* here;
3705 WordSetID succs;
florian6bf37262012-10-21 03:23:36 +00003706 UWord succs_size, i;
sewardj250ec2e2008-02-15 22:02:30 +00003707 UWord* succs_words;
sewardjb4112022007-11-09 22:49:28 +00003708 //laog__sanity_check();
3709
3710 /* If the destination set is empty, we can never get there from
3711 'src' :-), so don't bother to try */
3712 if (HG_(isEmptyWS)( univ_lsets, dsts ))
3713 return NULL;
3714
3715 ret = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00003716 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3717 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00003718
3719 (void) VG_(addToXA)( stack, &src );
3720
3721 while (True) {
3722
3723 ssz = VG_(sizeXA)( stack );
3724
3725 if (ssz == 0) { ret = NULL; break; }
3726
3727 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3728 VG_(dropTailXA)( stack, 1 );
3729
florian6bf37262012-10-21 03:23:36 +00003730 if (HG_(elemWS)( univ_lsets, dsts, (UWord)here )) { ret = here; break; }
sewardjb4112022007-11-09 22:49:28 +00003731
florian6bf37262012-10-21 03:23:36 +00003732 if (VG_(lookupFM)( visited, NULL, NULL, (UWord)here ))
sewardjb4112022007-11-09 22:49:28 +00003733 continue;
3734
florian6bf37262012-10-21 03:23:36 +00003735 VG_(addToFM)( visited, (UWord)here, 0 );
sewardjb4112022007-11-09 22:49:28 +00003736
3737 succs = laog__succs( here );
3738 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3739 for (i = 0; i < succs_size; i++)
3740 (void) VG_(addToXA)( stack, &succs_words[i] );
3741 }
3742
sewardj896f6f92008-08-19 08:38:52 +00003743 VG_(deleteFM)( visited, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003744 VG_(deleteXA)( stack );
3745 return ret;
3746}
3747
3748
3749/* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3750 between 'lk' and the locks already held by 'thr' and issue a
3751 complaint if so. Also, update the ordering graph appropriately.
3752*/
3753__attribute__((noinline))
3754static void laog__pre_thread_acquires_lock (
3755 Thread* thr, /* NB: BEFORE lock is added */
3756 Lock* lk
3757 )
3758{
sewardj250ec2e2008-02-15 22:02:30 +00003759 UWord* ls_words;
florian6bf37262012-10-21 03:23:36 +00003760 UWord ls_size, i;
sewardjb4112022007-11-09 22:49:28 +00003761 Lock* other;
3762
3763 /* It may be that 'thr' already holds 'lk' and is recursively
3764 relocking in. In this case we just ignore the call. */
3765 /* NB: univ_lsets really is correct here */
florian6bf37262012-10-21 03:23:36 +00003766 if (HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lk ))
sewardjb4112022007-11-09 22:49:28 +00003767 return;
3768
sewardjb4112022007-11-09 22:49:28 +00003769 /* First, the check. Complain if there is any path in laog from lk
3770 to any of the locks already held by thr, since if any such path
3771 existed, it would mean that previously lk was acquired before
3772 (rather than after, as we are doing here) at least one of those
3773 locks.
3774 */
3775 other = laog__do_dfs_from_to(lk, thr->locksetA);
3776 if (other) {
3777 LAOGLinkExposition key, *found;
3778 /* So we managed to find a path lk --*--> other in the graph,
3779 which implies that 'lk' should have been acquired before
3780 'other' but is in fact being acquired afterwards. We present
3781 the lk/other arguments to record_error_LockOrder in the order
3782 in which they should have been acquired. */
3783 /* Go look in the laog_exposition mapping, to find the allocation
3784 points for this edge, so we can show the user. */
3785 key.src_ga = lk->guestaddr;
3786 key.dst_ga = other->guestaddr;
3787 key.src_ec = NULL;
3788 key.dst_ec = NULL;
3789 found = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003790 if (VG_(lookupFM)( laog_exposition,
florian6bf37262012-10-21 03:23:36 +00003791 (UWord*)&found, NULL, (UWord)&key )) {
sewardjb4112022007-11-09 22:49:28 +00003792 tl_assert(found != &key);
3793 tl_assert(found->src_ga == key.src_ga);
3794 tl_assert(found->dst_ga == key.dst_ga);
3795 tl_assert(found->src_ec);
3796 tl_assert(found->dst_ec);
sewardjf98e1c02008-10-25 16:22:41 +00003797 HG_(record_error_LockOrder)(
3798 thr, lk->guestaddr, other->guestaddr,
sewardjffce8152011-06-24 10:09:41 +00003799 found->src_ec, found->dst_ec, other->acquired_at );
sewardjb4112022007-11-09 22:49:28 +00003800 } else {
3801 /* Hmm. This can't happen (can it?) */
philippeebe25802013-01-30 23:21:34 +00003802 /* Yes, it can happen: see tests/tc14_laog_dinphils.
3803 Imagine we have 3 philosophers A B C, and the forks
3804 between them:
3805
3806 C
3807
3808 fCA fBC
3809
3810 A fAB B
3811
3812 Let's have the following actions:
3813 A takes fCA,fAB
3814 A releases fCA,fAB
3815 B takes fAB,fBC
3816 B releases fAB,fBC
3817 C takes fBC,fCA
3818 C releases fBC,fCA
3819
3820 Helgrind will report a lock order error when C takes fCA.
3821 Effectively, we have a deadlock if the following
3822 sequence is done:
3823 A takes fCA
3824 B takes fAB
3825 C takes fBC
3826
3827 The error reported is:
3828 Observed (incorrect) order fBC followed by fCA
3829 but the stack traces that have established the required order
3830 are not given.
3831
3832 This is because there is no pair (fCA, fBC) in laog exposition :
3833 the laog_exposition records all pairs of locks between a new lock
3834 taken by a thread and all the already taken locks.
3835 So, there is no laog_exposition (fCA, fBC) as no thread ever
3836 first locked fCA followed by fBC.
3837
3838 In other words, when the deadlock cycle involves more than
3839 two locks, then helgrind does not report the sequence of
3840 operations that created the cycle.
3841
3842 However, we can report the current stack trace (where
3843 lk is being taken), and the stack trace where other was acquired:
3844 Effectively, the variable 'other' contains a lock currently
3845 held by this thread, with its 'acquired_at'. */
3846
sewardjf98e1c02008-10-25 16:22:41 +00003847 HG_(record_error_LockOrder)(
3848 thr, lk->guestaddr, other->guestaddr,
philippeebe25802013-01-30 23:21:34 +00003849 NULL, NULL, other->acquired_at );
sewardjb4112022007-11-09 22:49:28 +00003850 }
3851 }
3852
3853 /* Second, add to laog the pairs
3854 (old, lk) | old <- locks already held by thr
3855 Since both old and lk are currently held by thr, their acquired_at
3856 fields must be non-NULL.
3857 */
3858 tl_assert(lk->acquired_at);
3859 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3860 for (i = 0; i < ls_size; i++) {
3861 Lock* old = (Lock*)ls_words[i];
3862 tl_assert(old->acquired_at);
3863 laog__add_edge( old, lk );
3864 }
3865
3866 /* Why "except_Locks" ? We're here because a lock is being
3867 acquired by a thread, and we're in an inconsistent state here.
3868 See the call points in evhH__post_thread_{r,w}_acquires_lock.
3869 When called in this inconsistent state, locks__sanity_check duly
3870 barfs. */
sewardjf98e1c02008-10-25 16:22:41 +00003871 if (HG_(clo_sanity_flags) & SCE_LAOG)
sewardjb4112022007-11-09 22:49:28 +00003872 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
3873}
3874
sewardj866c80c2011-10-22 19:29:51 +00003875/* Allocates a duplicate of words. Caller must HG_(free) the result. */
3876static UWord* UWordV_dup(UWord* words, Word words_size)
3877{
3878 UInt i;
3879
3880 if (words_size == 0)
3881 return NULL;
3882
3883 UWord *dup = HG_(zalloc) ("hg.dup.1", (SizeT) words_size * sizeof(UWord));
3884
3885 for (i = 0; i < words_size; i++)
3886 dup[i] = words[i];
3887
3888 return dup;
3889}
sewardjb4112022007-11-09 22:49:28 +00003890
3891/* Delete from 'laog' any pair mentioning a lock in locksToDelete */
3892
3893__attribute__((noinline))
3894static void laog__handle_one_lock_deletion ( Lock* lk )
3895{
3896 WordSetID preds, succs;
florian6bf37262012-10-21 03:23:36 +00003897 UWord preds_size, succs_size, i, j;
sewardj250ec2e2008-02-15 22:02:30 +00003898 UWord *preds_words, *succs_words;
sewardjb4112022007-11-09 22:49:28 +00003899
3900 preds = laog__preds( lk );
3901 succs = laog__succs( lk );
3902
sewardj866c80c2011-10-22 19:29:51 +00003903 // We need to duplicate the payload, as these can be garbage collected
3904 // during the del/add operations below.
sewardjb4112022007-11-09 22:49:28 +00003905 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
sewardj866c80c2011-10-22 19:29:51 +00003906 preds_words = UWordV_dup(preds_words, preds_size);
3907
3908 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3909 succs_words = UWordV_dup(succs_words, succs_size);
3910
sewardjb4112022007-11-09 22:49:28 +00003911 for (i = 0; i < preds_size; i++)
3912 laog__del_edge( (Lock*)preds_words[i], lk );
3913
sewardjb4112022007-11-09 22:49:28 +00003914 for (j = 0; j < succs_size; j++)
3915 laog__del_edge( lk, (Lock*)succs_words[j] );
3916
3917 for (i = 0; i < preds_size; i++) {
3918 for (j = 0; j < succs_size; j++) {
3919 if (preds_words[i] != succs_words[j]) {
3920 /* This can pass unlocked locks to laog__add_edge, since
3921 we're deleting stuff. So their acquired_at fields may
3922 be NULL. */
3923 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
3924 }
3925 }
3926 }
sewardj866c80c2011-10-22 19:29:51 +00003927
3928 if (preds_words)
3929 HG_(free) (preds_words);
3930 if (succs_words)
3931 HG_(free) (succs_words);
3932
3933 // Remove lk information from laog links FM
3934 {
3935 LAOGLinks *links;
3936 Lock* linked_lk;
3937
3938 if (VG_(delFromFM) (laog,
3939 (UWord*)&linked_lk, (UWord*)&links, (UWord)lk)) {
3940 tl_assert (linked_lk == lk);
3941 HG_(free) (links);
3942 }
3943 }
3944 /* FIXME ??? What about removing lock lk data from EXPOSITION ??? */
sewardjb4112022007-11-09 22:49:28 +00003945}
3946
sewardj1cbc12f2008-11-10 16:16:46 +00003947//__attribute__((noinline))
3948//static void laog__handle_lock_deletions (
3949// WordSetID /* in univ_laog */ locksToDelete
3950// )
3951//{
3952// Word i, ws_size;
3953// UWord* ws_words;
3954//
sewardj1cbc12f2008-11-10 16:16:46 +00003955//
3956// HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
sewardj866c80c2011-10-22 19:29:51 +00003957// UWordV_dup call needed here ...
sewardj1cbc12f2008-11-10 16:16:46 +00003958// for (i = 0; i < ws_size; i++)
3959// laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
3960//
3961// if (HG_(clo_sanity_flags) & SCE_LAOG)
3962// all__sanity_check("laog__handle_lock_deletions-post");
3963//}
sewardjb4112022007-11-09 22:49:28 +00003964
3965
3966/*--------------------------------------------------------------*/
3967/*--- Malloc/free replacements ---*/
3968/*--------------------------------------------------------------*/
3969
3970typedef
3971 struct {
3972 void* next; /* required by m_hashtable */
3973 Addr payload; /* ptr to actual block */
3974 SizeT szB; /* size requested */
3975 ExeContext* where; /* where it was allocated */
3976 Thread* thr; /* allocating thread */
3977 }
3978 MallocMeta;
3979
3980/* A hash table of MallocMetas, used to track malloc'd blocks
3981 (obviously). */
3982static VgHashTable hg_mallocmeta_table = NULL;
3983
philippe5fbc9762013-12-01 19:28:48 +00003984/* MallocMeta are small elements. We use a pool to avoid
3985 the overhead of malloc for each MallocMeta. */
3986static PoolAlloc *MallocMeta_poolalloc = NULL;
sewardjb4112022007-11-09 22:49:28 +00003987
3988static MallocMeta* new_MallocMeta ( void ) {
philippe5fbc9762013-12-01 19:28:48 +00003989 MallocMeta* md = VG_(allocEltPA) (MallocMeta_poolalloc);
3990 VG_(memset)(md, 0, sizeof(MallocMeta));
sewardjb4112022007-11-09 22:49:28 +00003991 return md;
3992}
3993static void delete_MallocMeta ( MallocMeta* md ) {
philippe5fbc9762013-12-01 19:28:48 +00003994 VG_(freeEltPA)(MallocMeta_poolalloc, md);
sewardjb4112022007-11-09 22:49:28 +00003995}
3996
3997
3998/* Allocate a client block and set up the metadata for it. */
3999
4000static
4001void* handle_alloc ( ThreadId tid,
4002 SizeT szB, SizeT alignB, Bool is_zeroed )
4003{
4004 Addr p;
4005 MallocMeta* md;
4006
4007 tl_assert( ((SSizeT)szB) >= 0 );
4008 p = (Addr)VG_(cli_malloc)(alignB, szB);
4009 if (!p) {
4010 return NULL;
4011 }
4012 if (is_zeroed)
4013 VG_(memset)((void*)p, 0, szB);
4014
4015 /* Note that map_threads_lookup must succeed (cannot assert), since
4016 memory can only be allocated by currently alive threads, hence
4017 they must have an entry in map_threads. */
4018 md = new_MallocMeta();
4019 md->payload = p;
4020 md->szB = szB;
4021 md->where = VG_(record_ExeContext)( tid, 0 );
4022 md->thr = map_threads_lookup( tid );
4023
4024 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
4025
4026 /* Tell the lower level memory wranglers. */
4027 evh__new_mem_heap( p, szB, is_zeroed );
4028
4029 return (void*)p;
4030}
4031
4032/* Re the checks for less-than-zero (also in hg_cli__realloc below):
4033 Cast to a signed type to catch any unexpectedly negative args.
4034 We're assuming here that the size asked for is not greater than
4035 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
4036 platforms). */
4037static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
4038 if (((SSizeT)n) < 0) return NULL;
4039 return handle_alloc ( tid, n, VG_(clo_alignment),
4040 /*is_zeroed*/False );
4041}
4042static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
4043 if (((SSizeT)n) < 0) return NULL;
4044 return handle_alloc ( tid, n, VG_(clo_alignment),
4045 /*is_zeroed*/False );
4046}
4047static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
4048 if (((SSizeT)n) < 0) return NULL;
4049 return handle_alloc ( tid, n, VG_(clo_alignment),
4050 /*is_zeroed*/False );
4051}
4052static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
4053 if (((SSizeT)n) < 0) return NULL;
4054 return handle_alloc ( tid, n, align,
4055 /*is_zeroed*/False );
4056}
4057static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
4058 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
4059 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
4060 /*is_zeroed*/True );
4061}
4062
4063
4064/* Free a client block, including getting rid of the relevant
4065 metadata. */
4066
4067static void handle_free ( ThreadId tid, void* p )
4068{
4069 MallocMeta *md, *old_md;
4070 SizeT szB;
4071
4072 /* First see if we can find the metadata for 'p'. */
4073 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
4074 if (!md)
4075 return; /* apparently freeing a bogus address. Oh well. */
4076
4077 tl_assert(md->payload == (Addr)p);
4078 szB = md->szB;
4079
4080 /* Nuke the metadata block */
4081 old_md = (MallocMeta*)
4082 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
4083 tl_assert(old_md); /* it must be present - we just found it */
4084 tl_assert(old_md == md);
4085 tl_assert(old_md->payload == (Addr)p);
4086
4087 VG_(cli_free)((void*)old_md->payload);
4088 delete_MallocMeta(old_md);
4089
4090 /* Tell the lower level memory wranglers. */
4091 evh__die_mem_heap( (Addr)p, szB );
4092}
4093
4094static void hg_cli__free ( ThreadId tid, void* p ) {
4095 handle_free(tid, p);
4096}
4097static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
4098 handle_free(tid, p);
4099}
4100static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
4101 handle_free(tid, p);
4102}
4103
4104
4105static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
4106{
4107 MallocMeta *md, *md_new, *md_tmp;
4108 SizeT i;
4109
4110 Addr payload = (Addr)payloadV;
4111
4112 if (((SSizeT)new_size) < 0) return NULL;
4113
4114 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
4115 if (!md)
4116 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
4117
4118 tl_assert(md->payload == payload);
4119
4120 if (md->szB == new_size) {
4121 /* size unchanged */
4122 md->where = VG_(record_ExeContext)(tid, 0);
4123 return payloadV;
4124 }
4125
4126 if (md->szB > new_size) {
4127 /* new size is smaller */
4128 md->szB = new_size;
4129 md->where = VG_(record_ExeContext)(tid, 0);
4130 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
4131 return payloadV;
4132 }
4133
4134 /* else */ {
4135 /* new size is bigger */
4136 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
4137
4138 /* First half kept and copied, second half new */
4139 // FIXME: shouldn't we use a copier which implements the
4140 // memory state machine?
sewardj23f12002009-07-24 08:45:08 +00004141 evh__copy_mem( payload, p_new, md->szB );
sewardjb4112022007-11-09 22:49:28 +00004142 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
sewardjf98e1c02008-10-25 16:22:41 +00004143 /*inited*/False );
sewardjb4112022007-11-09 22:49:28 +00004144 /* FIXME: can anything funny happen here? specifically, if the
4145 old range contained a lock, then die_mem_heap will complain.
4146 Is that the correct behaviour? Not sure. */
4147 evh__die_mem_heap( payload, md->szB );
4148
4149 /* Copy from old to new */
4150 for (i = 0; i < md->szB; i++)
4151 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
4152
4153 /* Because the metadata hash table is index by payload address,
4154 we have to get rid of the old hash table entry and make a new
4155 one. We can't just modify the existing metadata in place,
4156 because then it would (almost certainly) be in the wrong hash
4157 chain. */
4158 md_new = new_MallocMeta();
4159 *md_new = *md;
4160
4161 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
4162 tl_assert(md_tmp);
4163 tl_assert(md_tmp == md);
4164
4165 VG_(cli_free)((void*)md->payload);
4166 delete_MallocMeta(md);
4167
4168 /* Update fields */
4169 md_new->where = VG_(record_ExeContext)( tid, 0 );
4170 md_new->szB = new_size;
4171 md_new->payload = p_new;
4172 md_new->thr = map_threads_lookup( tid );
4173
4174 /* and add */
4175 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
4176
4177 return (void*)p_new;
4178 }
4179}
4180
njn8b140de2009-02-17 04:31:18 +00004181static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
4182{
4183 MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
4184
4185 // There may be slop, but pretend there isn't because only the asked-for
4186 // area will have been shadowed properly.
4187 return ( md ? md->szB : 0 );
4188}
4189
sewardjb4112022007-11-09 22:49:28 +00004190
sewardj095d61e2010-03-11 13:43:18 +00004191/* For error creation: map 'data_addr' to a malloc'd chunk, if any.
sewardjc8028ad2010-05-05 09:34:42 +00004192 Slow linear search. With a bit of hash table help if 'data_addr'
4193 is either the start of a block or up to 15 word-sized steps along
4194 from the start of a block. */
sewardj095d61e2010-03-11 13:43:18 +00004195
4196static inline Bool addr_is_in_MM_Chunk( MallocMeta* mm, Addr a )
4197{
sewardjc8028ad2010-05-05 09:34:42 +00004198 /* Accept 'a' as within 'mm' if 'mm's size is zero and 'a' points
4199 right at it. */
4200 if (UNLIKELY(mm->szB == 0 && a == mm->payload))
4201 return True;
4202 /* else normal interval rules apply */
4203 if (LIKELY(a < mm->payload)) return False;
4204 if (LIKELY(a >= mm->payload + mm->szB)) return False;
4205 return True;
sewardj095d61e2010-03-11 13:43:18 +00004206}
4207
sewardjc8028ad2010-05-05 09:34:42 +00004208Bool HG_(mm_find_containing_block)( /*OUT*/ExeContext** where,
sewardj095d61e2010-03-11 13:43:18 +00004209 /*OUT*/Addr* payload,
4210 /*OUT*/SizeT* szB,
4211 Addr data_addr )
4212{
4213 MallocMeta* mm;
sewardjc8028ad2010-05-05 09:34:42 +00004214 Int i;
4215 const Int n_fast_check_words = 16;
4216
4217 /* First, do a few fast searches on the basis that data_addr might
4218 be exactly the start of a block or up to 15 words inside. This
4219 can happen commonly via the creq
4220 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK. */
4221 for (i = 0; i < n_fast_check_words; i++) {
4222 mm = VG_(HT_lookup)( hg_mallocmeta_table,
4223 data_addr - (UWord)(UInt)i * sizeof(UWord) );
4224 if (UNLIKELY(mm && addr_is_in_MM_Chunk(mm, data_addr)))
4225 goto found;
4226 }
4227
sewardj095d61e2010-03-11 13:43:18 +00004228 /* Well, this totally sucks. But without using an interval tree or
sewardjc8028ad2010-05-05 09:34:42 +00004229 some such, it's hard to see how to do better. We have to check
4230 every block in the entire table. */
sewardj095d61e2010-03-11 13:43:18 +00004231 VG_(HT_ResetIter)(hg_mallocmeta_table);
4232 while ( (mm = VG_(HT_Next)(hg_mallocmeta_table)) ) {
sewardjc8028ad2010-05-05 09:34:42 +00004233 if (UNLIKELY(addr_is_in_MM_Chunk(mm, data_addr)))
4234 goto found;
sewardj095d61e2010-03-11 13:43:18 +00004235 }
sewardjc8028ad2010-05-05 09:34:42 +00004236
4237 /* Not found. Bah. */
4238 return False;
4239 /*NOTREACHED*/
4240
4241 found:
4242 tl_assert(mm);
4243 tl_assert(addr_is_in_MM_Chunk(mm, data_addr));
4244 if (where) *where = mm->where;
4245 if (payload) *payload = mm->payload;
4246 if (szB) *szB = mm->szB;
4247 return True;
sewardj095d61e2010-03-11 13:43:18 +00004248}
4249
4250
sewardjb4112022007-11-09 22:49:28 +00004251/*--------------------------------------------------------------*/
4252/*--- Instrumentation ---*/
4253/*--------------------------------------------------------------*/
4254
sewardjcafe5052013-01-17 14:24:35 +00004255#define unop(_op, _arg1) IRExpr_Unop((_op),(_arg1))
sewardjffce8152011-06-24 10:09:41 +00004256#define binop(_op, _arg1, _arg2) IRExpr_Binop((_op),(_arg1),(_arg2))
4257#define mkexpr(_tmp) IRExpr_RdTmp((_tmp))
4258#define mkU32(_n) IRExpr_Const(IRConst_U32(_n))
4259#define mkU64(_n) IRExpr_Const(IRConst_U64(_n))
4260#define assign(_t, _e) IRStmt_WrTmp((_t), (_e))
4261
sewardjcafe5052013-01-17 14:24:35 +00004262/* This takes and returns atoms, of course. Not full IRExprs. */
4263static IRExpr* mk_And1 ( IRSB* sbOut, IRExpr* arg1, IRExpr* arg2 )
4264{
4265 tl_assert(arg1 && arg2);
4266 tl_assert(isIRAtom(arg1));
4267 tl_assert(isIRAtom(arg2));
4268 /* Generate 32to1(And32(1Uto32(arg1), 1Uto32(arg2))). Appalling
4269 code, I know. */
4270 IRTemp wide1 = newIRTemp(sbOut->tyenv, Ity_I32);
4271 IRTemp wide2 = newIRTemp(sbOut->tyenv, Ity_I32);
4272 IRTemp anded = newIRTemp(sbOut->tyenv, Ity_I32);
4273 IRTemp res = newIRTemp(sbOut->tyenv, Ity_I1);
4274 addStmtToIRSB(sbOut, assign(wide1, unop(Iop_1Uto32, arg1)));
4275 addStmtToIRSB(sbOut, assign(wide2, unop(Iop_1Uto32, arg2)));
4276 addStmtToIRSB(sbOut, assign(anded, binop(Iop_And32, mkexpr(wide1),
4277 mkexpr(wide2))));
4278 addStmtToIRSB(sbOut, assign(res, unop(Iop_32to1, mkexpr(anded))));
4279 return mkexpr(res);
4280}
4281
sewardjffce8152011-06-24 10:09:41 +00004282static void instrument_mem_access ( IRSB* sbOut,
sewardjb4112022007-11-09 22:49:28 +00004283 IRExpr* addr,
4284 Int szB,
4285 Bool isStore,
sewardjffce8152011-06-24 10:09:41 +00004286 Int hWordTy_szB,
sewardjcafe5052013-01-17 14:24:35 +00004287 Int goff_sp,
4288 IRExpr* guard ) /* NULL => True */
sewardjb4112022007-11-09 22:49:28 +00004289{
4290 IRType tyAddr = Ity_INVALID;
florian6bf37262012-10-21 03:23:36 +00004291 const HChar* hName = NULL;
sewardjb4112022007-11-09 22:49:28 +00004292 void* hAddr = NULL;
4293 Int regparms = 0;
4294 IRExpr** argv = NULL;
4295 IRDirty* di = NULL;
4296
sewardjffce8152011-06-24 10:09:41 +00004297 // THRESH is the size of the window above SP (well,
4298 // mostly above) that we assume implies a stack reference.
4299 const Int THRESH = 4096 * 4; // somewhat arbitrary
4300 const Int rz_szB = VG_STACK_REDZONE_SZB;
4301
sewardjb4112022007-11-09 22:49:28 +00004302 tl_assert(isIRAtom(addr));
4303 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
4304
sewardjffce8152011-06-24 10:09:41 +00004305 tyAddr = typeOfIRExpr( sbOut->tyenv, addr );
sewardjb4112022007-11-09 22:49:28 +00004306 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
4307
4308 /* So the effective address is in 'addr' now. */
4309 regparms = 1; // unless stated otherwise
4310 if (isStore) {
4311 switch (szB) {
4312 case 1:
sewardj23f12002009-07-24 08:45:08 +00004313 hName = "evh__mem_help_cwrite_1";
4314 hAddr = &evh__mem_help_cwrite_1;
sewardjb4112022007-11-09 22:49:28 +00004315 argv = mkIRExprVec_1( addr );
4316 break;
4317 case 2:
sewardj23f12002009-07-24 08:45:08 +00004318 hName = "evh__mem_help_cwrite_2";
4319 hAddr = &evh__mem_help_cwrite_2;
sewardjb4112022007-11-09 22:49:28 +00004320 argv = mkIRExprVec_1( addr );
4321 break;
4322 case 4:
sewardj23f12002009-07-24 08:45:08 +00004323 hName = "evh__mem_help_cwrite_4";
4324 hAddr = &evh__mem_help_cwrite_4;
sewardjb4112022007-11-09 22:49:28 +00004325 argv = mkIRExprVec_1( addr );
4326 break;
4327 case 8:
sewardj23f12002009-07-24 08:45:08 +00004328 hName = "evh__mem_help_cwrite_8";
4329 hAddr = &evh__mem_help_cwrite_8;
sewardjb4112022007-11-09 22:49:28 +00004330 argv = mkIRExprVec_1( addr );
4331 break;
4332 default:
4333 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4334 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004335 hName = "evh__mem_help_cwrite_N";
4336 hAddr = &evh__mem_help_cwrite_N;
sewardjb4112022007-11-09 22:49:28 +00004337 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4338 break;
4339 }
4340 } else {
4341 switch (szB) {
4342 case 1:
sewardj23f12002009-07-24 08:45:08 +00004343 hName = "evh__mem_help_cread_1";
4344 hAddr = &evh__mem_help_cread_1;
sewardjb4112022007-11-09 22:49:28 +00004345 argv = mkIRExprVec_1( addr );
4346 break;
4347 case 2:
sewardj23f12002009-07-24 08:45:08 +00004348 hName = "evh__mem_help_cread_2";
4349 hAddr = &evh__mem_help_cread_2;
sewardjb4112022007-11-09 22:49:28 +00004350 argv = mkIRExprVec_1( addr );
4351 break;
4352 case 4:
sewardj23f12002009-07-24 08:45:08 +00004353 hName = "evh__mem_help_cread_4";
4354 hAddr = &evh__mem_help_cread_4;
sewardjb4112022007-11-09 22:49:28 +00004355 argv = mkIRExprVec_1( addr );
4356 break;
4357 case 8:
sewardj23f12002009-07-24 08:45:08 +00004358 hName = "evh__mem_help_cread_8";
4359 hAddr = &evh__mem_help_cread_8;
sewardjb4112022007-11-09 22:49:28 +00004360 argv = mkIRExprVec_1( addr );
4361 break;
4362 default:
4363 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4364 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004365 hName = "evh__mem_help_cread_N";
4366 hAddr = &evh__mem_help_cread_N;
sewardjb4112022007-11-09 22:49:28 +00004367 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4368 break;
4369 }
4370 }
4371
sewardjffce8152011-06-24 10:09:41 +00004372 /* Create the helper. */
sewardjb4112022007-11-09 22:49:28 +00004373 tl_assert(hName);
4374 tl_assert(hAddr);
4375 tl_assert(argv);
4376 di = unsafeIRDirty_0_N( regparms,
4377 hName, VG_(fnptr_to_fnentry)( hAddr ),
4378 argv );
sewardjffce8152011-06-24 10:09:41 +00004379
4380 if (! HG_(clo_check_stack_refs)) {
4381 /* We're ignoring memory references which are (obviously) to the
4382 stack. In fact just skip stack refs that are within 4 pages
4383 of SP (SP - the redzone, really), as that's simple, easy, and
4384 filters out most stack references. */
4385 /* Generate the guard condition: "(addr - (SP - RZ)) >u N", for
4386 some arbitrary N. If that is true then addr is outside the
4387 range (SP - RZ .. SP + N - RZ). If N is smallish (a few
4388 pages) then we can say addr is within a few pages of SP and
4389 so can't possibly be a heap access, and so can be skipped.
4390
4391 Note that the condition simplifies to
4392 (addr - SP + RZ) >u N
4393 which generates better code in x86/amd64 backends, but it does
4394 not unfortunately simplify to
4395 (addr - SP) >u (N - RZ)
4396 (would be beneficial because N - RZ is a constant) because
4397 wraparound arithmetic messes up the comparison. eg.
4398 20 >u 10 == True,
4399 but (20 - 15) >u (10 - 15) == 5 >u (MAXINT-5) == False.
4400 */
4401 IRTemp sp = newIRTemp(sbOut->tyenv, tyAddr);
4402 addStmtToIRSB( sbOut, assign(sp, IRExpr_Get(goff_sp, tyAddr)));
4403
4404 /* "addr - SP" */
4405 IRTemp addr_minus_sp = newIRTemp(sbOut->tyenv, tyAddr);
4406 addStmtToIRSB(
4407 sbOut,
4408 assign(addr_minus_sp,
4409 tyAddr == Ity_I32
4410 ? binop(Iop_Sub32, addr, mkexpr(sp))
4411 : binop(Iop_Sub64, addr, mkexpr(sp)))
4412 );
4413
4414 /* "addr - SP + RZ" */
4415 IRTemp diff = newIRTemp(sbOut->tyenv, tyAddr);
4416 addStmtToIRSB(
4417 sbOut,
4418 assign(diff,
4419 tyAddr == Ity_I32
4420 ? binop(Iop_Add32, mkexpr(addr_minus_sp), mkU32(rz_szB))
4421 : binop(Iop_Add64, mkexpr(addr_minus_sp), mkU64(rz_szB)))
4422 );
4423
sewardjcafe5052013-01-17 14:24:35 +00004424 /* guardA == "guard on the address" */
4425 IRTemp guardA = newIRTemp(sbOut->tyenv, Ity_I1);
sewardjffce8152011-06-24 10:09:41 +00004426 addStmtToIRSB(
4427 sbOut,
sewardjcafe5052013-01-17 14:24:35 +00004428 assign(guardA,
sewardjffce8152011-06-24 10:09:41 +00004429 tyAddr == Ity_I32
4430 ? binop(Iop_CmpLT32U, mkU32(THRESH), mkexpr(diff))
4431 : binop(Iop_CmpLT64U, mkU64(THRESH), mkexpr(diff)))
4432 );
sewardjcafe5052013-01-17 14:24:35 +00004433 di->guard = mkexpr(guardA);
4434 }
4435
4436 /* If there's a guard on the access itself (as supplied by the
4437 caller of this routine), we need to AND that in to any guard we
4438 might already have. */
4439 if (guard) {
4440 di->guard = mk_And1(sbOut, di->guard, guard);
sewardjffce8152011-06-24 10:09:41 +00004441 }
4442
4443 /* Add the helper. */
4444 addStmtToIRSB( sbOut, IRStmt_Dirty(di) );
sewardjb4112022007-11-09 22:49:28 +00004445}
4446
4447
sewardja0eee322009-07-31 08:46:35 +00004448/* Figure out if GA is a guest code address in the dynamic linker, and
4449 if so return True. Otherwise (and in case of any doubt) return
4450 False. (sidedly safe w/ False as the safe value) */
4451static Bool is_in_dynamic_linker_shared_object( Addr64 ga )
4452{
4453 DebugInfo* dinfo;
florian19f91bb2012-11-10 22:29:54 +00004454 const HChar* soname;
sewardja0eee322009-07-31 08:46:35 +00004455 if (0) return False;
4456
sewardje3f1e592009-07-31 09:41:29 +00004457 dinfo = VG_(find_DebugInfo)( (Addr)ga );
sewardja0eee322009-07-31 08:46:35 +00004458 if (!dinfo) return False;
4459
sewardje3f1e592009-07-31 09:41:29 +00004460 soname = VG_(DebugInfo_get_soname)(dinfo);
sewardja0eee322009-07-31 08:46:35 +00004461 tl_assert(soname);
4462 if (0) VG_(printf)("%s\n", soname);
4463
4464# if defined(VGO_linux)
sewardj651cfa42010-01-11 13:02:19 +00004465 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_3)) return True;
sewardja0eee322009-07-31 08:46:35 +00004466 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_2)) return True;
4467 if (VG_STREQ(soname, VG_U_LD_LINUX_X86_64_SO_2)) return True;
4468 if (VG_STREQ(soname, VG_U_LD64_SO_1)) return True;
4469 if (VG_STREQ(soname, VG_U_LD_SO_1)) return True;
4470# elif defined(VGO_darwin)
4471 if (VG_STREQ(soname, VG_U_DYLD)) return True;
4472# else
4473# error "Unsupported OS"
4474# endif
4475 return False;
4476}
4477
sewardjb4112022007-11-09 22:49:28 +00004478static
4479IRSB* hg_instrument ( VgCallbackClosure* closure,
4480 IRSB* bbIn,
4481 VexGuestLayout* layout,
4482 VexGuestExtents* vge,
florianca503be2012-10-07 21:59:42 +00004483 VexArchInfo* archinfo_host,
sewardjb4112022007-11-09 22:49:28 +00004484 IRType gWordTy, IRType hWordTy )
4485{
sewardj1c0ce7a2009-07-01 08:10:49 +00004486 Int i;
4487 IRSB* bbOut;
4488 Addr64 cia; /* address of current insn */
4489 IRStmt* st;
sewardja0eee322009-07-31 08:46:35 +00004490 Bool inLDSO = False;
4491 Addr64 inLDSOmask4K = 1; /* mismatches on first check */
sewardjb4112022007-11-09 22:49:28 +00004492
sewardjffce8152011-06-24 10:09:41 +00004493 const Int goff_sp = layout->offset_SP;
4494
sewardjb4112022007-11-09 22:49:28 +00004495 if (gWordTy != hWordTy) {
4496 /* We don't currently support this case. */
4497 VG_(tool_panic)("host/guest word size mismatch");
4498 }
4499
sewardja0eee322009-07-31 08:46:35 +00004500 if (VKI_PAGE_SIZE < 4096 || VG_(log2)(VKI_PAGE_SIZE) == -1) {
4501 VG_(tool_panic)("implausible or too-small VKI_PAGE_SIZE");
4502 }
4503
sewardjb4112022007-11-09 22:49:28 +00004504 /* Set up BB */
4505 bbOut = emptyIRSB();
4506 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
4507 bbOut->next = deepCopyIRExpr(bbIn->next);
4508 bbOut->jumpkind = bbIn->jumpkind;
sewardj291849f2012-04-20 23:58:55 +00004509 bbOut->offsIP = bbIn->offsIP;
sewardjb4112022007-11-09 22:49:28 +00004510
4511 // Copy verbatim any IR preamble preceding the first IMark
4512 i = 0;
4513 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
4514 addStmtToIRSB( bbOut, bbIn->stmts[i] );
4515 i++;
4516 }
4517
sewardj1c0ce7a2009-07-01 08:10:49 +00004518 // Get the first statement, and initial cia from it
4519 tl_assert(bbIn->stmts_used > 0);
4520 tl_assert(i < bbIn->stmts_used);
4521 st = bbIn->stmts[i];
4522 tl_assert(Ist_IMark == st->tag);
4523 cia = st->Ist.IMark.addr;
4524 st = NULL;
4525
sewardjb4112022007-11-09 22:49:28 +00004526 for (/*use current i*/; i < bbIn->stmts_used; i++) {
sewardj1c0ce7a2009-07-01 08:10:49 +00004527 st = bbIn->stmts[i];
sewardjb4112022007-11-09 22:49:28 +00004528 tl_assert(st);
4529 tl_assert(isFlatIRStmt(st));
4530 switch (st->tag) {
4531 case Ist_NoOp:
4532 case Ist_AbiHint:
4533 case Ist_Put:
4534 case Ist_PutI:
sewardjb4112022007-11-09 22:49:28 +00004535 case Ist_Exit:
4536 /* None of these can contain any memory references. */
4537 break;
4538
sewardj1c0ce7a2009-07-01 08:10:49 +00004539 case Ist_IMark:
4540 /* no mem refs, but note the insn address. */
4541 cia = st->Ist.IMark.addr;
sewardja0eee322009-07-31 08:46:35 +00004542 /* Don't instrument the dynamic linker. It generates a
4543 lot of races which we just expensively suppress, so
4544 it's pointless.
4545
4546 Avoid flooding is_in_dynamic_linker_shared_object with
4547 requests by only checking at transitions between 4K
4548 pages. */
4549 if ((cia & ~(Addr64)0xFFF) != inLDSOmask4K) {
4550 if (0) VG_(printf)("NEW %#lx\n", (Addr)cia);
4551 inLDSOmask4K = cia & ~(Addr64)0xFFF;
4552 inLDSO = is_in_dynamic_linker_shared_object(cia);
4553 } else {
4554 if (0) VG_(printf)("old %#lx\n", (Addr)cia);
4555 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004556 break;
4557
sewardjb4112022007-11-09 22:49:28 +00004558 case Ist_MBE:
sewardjf98e1c02008-10-25 16:22:41 +00004559 switch (st->Ist.MBE.event) {
4560 case Imbe_Fence:
4561 break; /* not interesting */
sewardjf98e1c02008-10-25 16:22:41 +00004562 default:
4563 goto unhandled;
4564 }
sewardjb4112022007-11-09 22:49:28 +00004565 break;
4566
sewardj1c0ce7a2009-07-01 08:10:49 +00004567 case Ist_CAS: {
4568 /* Atomic read-modify-write cycle. Just pretend it's a
4569 read. */
4570 IRCAS* cas = st->Ist.CAS.details;
sewardj23f12002009-07-24 08:45:08 +00004571 Bool isDCAS = cas->oldHi != IRTemp_INVALID;
4572 if (isDCAS) {
4573 tl_assert(cas->expdHi);
4574 tl_assert(cas->dataHi);
4575 } else {
4576 tl_assert(!cas->expdHi);
4577 tl_assert(!cas->dataHi);
4578 }
4579 /* Just be boring about it. */
sewardja0eee322009-07-31 08:46:35 +00004580 if (!inLDSO) {
4581 instrument_mem_access(
4582 bbOut,
4583 cas->addr,
4584 (isDCAS ? 2 : 1)
4585 * sizeofIRType(typeOfIRExpr(bbIn->tyenv, cas->dataLo)),
4586 False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004587 sizeofIRType(hWordTy), goff_sp,
4588 NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004589 );
4590 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004591 break;
4592 }
4593
sewardjdb5907d2009-11-26 17:20:21 +00004594 case Ist_LLSC: {
4595 /* We pretend store-conditionals don't exist, viz, ignore
4596 them. Whereas load-linked's are treated the same as
4597 normal loads. */
4598 IRType dataTy;
4599 if (st->Ist.LLSC.storedata == NULL) {
4600 /* LL */
4601 dataTy = typeOfIRTemp(bbIn->tyenv, st->Ist.LLSC.result);
sewardja0eee322009-07-31 08:46:35 +00004602 if (!inLDSO) {
sewardjdb5907d2009-11-26 17:20:21 +00004603 instrument_mem_access(
4604 bbOut,
4605 st->Ist.LLSC.addr,
4606 sizeofIRType(dataTy),
4607 False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004608 sizeofIRType(hWordTy), goff_sp,
4609 NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004610 );
4611 }
sewardjdb5907d2009-11-26 17:20:21 +00004612 } else {
4613 /* SC */
4614 /*ignore */
4615 }
4616 break;
4617 }
4618
4619 case Ist_Store:
sewardjdb5907d2009-11-26 17:20:21 +00004620 if (!inLDSO) {
4621 instrument_mem_access(
4622 bbOut,
4623 st->Ist.Store.addr,
4624 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
4625 True/*isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004626 sizeofIRType(hWordTy), goff_sp,
4627 NULL/*no-guard*/
sewardjdb5907d2009-11-26 17:20:21 +00004628 );
sewardj1c0ce7a2009-07-01 08:10:49 +00004629 }
njnb83caf22009-05-25 01:47:56 +00004630 break;
sewardjb4112022007-11-09 22:49:28 +00004631
sewardjcafe5052013-01-17 14:24:35 +00004632 case Ist_StoreG: {
4633 IRStoreG* sg = st->Ist.StoreG.details;
4634 IRExpr* data = sg->data;
4635 IRExpr* addr = sg->addr;
4636 IRType type = typeOfIRExpr(bbIn->tyenv, data);
4637 tl_assert(type != Ity_INVALID);
4638 instrument_mem_access( bbOut, addr, sizeofIRType(type),
4639 True/*isStore*/,
4640 sizeofIRType(hWordTy),
4641 goff_sp, sg->guard );
4642 break;
4643 }
4644
4645 case Ist_LoadG: {
4646 IRLoadG* lg = st->Ist.LoadG.details;
4647 IRType type = Ity_INVALID; /* loaded type */
4648 IRType typeWide = Ity_INVALID; /* after implicit widening */
4649 IRExpr* addr = lg->addr;
4650 typeOfIRLoadGOp(lg->cvt, &typeWide, &type);
4651 tl_assert(type != Ity_INVALID);
4652 instrument_mem_access( bbOut, addr, sizeofIRType(type),
4653 False/*!isStore*/,
4654 sizeofIRType(hWordTy),
4655 goff_sp, lg->guard );
4656 break;
4657 }
4658
sewardjb4112022007-11-09 22:49:28 +00004659 case Ist_WrTmp: {
4660 IRExpr* data = st->Ist.WrTmp.data;
4661 if (data->tag == Iex_Load) {
sewardja0eee322009-07-31 08:46:35 +00004662 if (!inLDSO) {
4663 instrument_mem_access(
4664 bbOut,
4665 data->Iex.Load.addr,
4666 sizeofIRType(data->Iex.Load.ty),
4667 False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004668 sizeofIRType(hWordTy), goff_sp,
4669 NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004670 );
4671 }
sewardjb4112022007-11-09 22:49:28 +00004672 }
4673 break;
4674 }
4675
4676 case Ist_Dirty: {
4677 Int dataSize;
4678 IRDirty* d = st->Ist.Dirty.details;
4679 if (d->mFx != Ifx_None) {
4680 /* This dirty helper accesses memory. Collect the
4681 details. */
4682 tl_assert(d->mAddr != NULL);
4683 tl_assert(d->mSize != 0);
4684 dataSize = d->mSize;
4685 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004686 if (!inLDSO) {
4687 instrument_mem_access(
4688 bbOut, d->mAddr, dataSize, False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004689 sizeofIRType(hWordTy), goff_sp, NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004690 );
4691 }
sewardjb4112022007-11-09 22:49:28 +00004692 }
4693 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004694 if (!inLDSO) {
4695 instrument_mem_access(
4696 bbOut, d->mAddr, dataSize, True/*isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004697 sizeofIRType(hWordTy), goff_sp, NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004698 );
4699 }
sewardjb4112022007-11-09 22:49:28 +00004700 }
4701 } else {
4702 tl_assert(d->mAddr == NULL);
4703 tl_assert(d->mSize == 0);
4704 }
4705 break;
4706 }
4707
4708 default:
sewardjf98e1c02008-10-25 16:22:41 +00004709 unhandled:
4710 ppIRStmt(st);
sewardjb4112022007-11-09 22:49:28 +00004711 tl_assert(0);
4712
4713 } /* switch (st->tag) */
4714
4715 addStmtToIRSB( bbOut, st );
4716 } /* iterate over bbIn->stmts */
4717
4718 return bbOut;
4719}
4720
sewardjffce8152011-06-24 10:09:41 +00004721#undef binop
4722#undef mkexpr
4723#undef mkU32
4724#undef mkU64
4725#undef assign
4726
sewardjb4112022007-11-09 22:49:28 +00004727
4728/*----------------------------------------------------------------*/
4729/*--- Client requests ---*/
4730/*----------------------------------------------------------------*/
4731
4732/* Sheesh. Yet another goddam finite map. */
4733static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
4734
4735static void map_pthread_t_to_Thread_INIT ( void ) {
4736 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
sewardjf98e1c02008-10-25 16:22:41 +00004737 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
4738 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00004739 tl_assert(map_pthread_t_to_Thread != NULL);
4740 }
4741}
4742
philippef5774342014-05-03 11:12:50 +00004743static void print_monitor_help ( void )
4744{
4745 VG_(gdb_printf)
4746 (
4747"\n"
4748"helgrind monitor commands:\n"
philippef5774342014-05-03 11:12:50 +00004749" info locks : show list of locks and their status\n"
4750"\n");
4751}
4752
4753/* return True if request recognised, False otherwise */
4754static Bool handle_gdb_monitor_command (ThreadId tid, HChar *req)
4755{
philippef5774342014-05-03 11:12:50 +00004756 HChar* wcmd;
4757 HChar s[VG_(strlen(req))]; /* copy for strtok_r */
4758 HChar *ssaveptr;
4759 Int kwdid;
4760
4761 VG_(strcpy) (s, req);
4762
4763 wcmd = VG_(strtok_r) (s, " ", &ssaveptr);
4764 /* NB: if possible, avoid introducing a new command below which
4765 starts with the same first letter(s) as an already existing
4766 command. This ensures a shorter abbreviation for the user. */
4767 switch (VG_(keyword_id)
philippe07c08522014-05-14 20:39:27 +00004768 ("help info",
philippef5774342014-05-03 11:12:50 +00004769 wcmd, kwd_report_duplicated_matches)) {
4770 case -2: /* multiple matches */
4771 return True;
4772 case -1: /* not found */
4773 return False;
4774 case 0: /* help */
4775 print_monitor_help();
4776 return True;
4777 case 1: /* info */
philippef5774342014-05-03 11:12:50 +00004778 wcmd = VG_(strtok_r) (NULL, " ", &ssaveptr);
4779 switch (kwdid = VG_(keyword_id)
4780 ("locks",
4781 wcmd, kwd_report_all)) {
4782 case -2:
4783 case -1:
4784 break;
4785 case 0: // locks
4786 {
4787 Int i;
4788 Lock* lk;
4789 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next) {
4790 pp_Lock(0, lk,
4791 True /* show_lock_addrdescr */,
4792 False /* show_internal_data */);
4793 }
4794 if (i == 0)
4795 VG_(gdb_printf) ("no locks\n");
4796 }
4797 break;
4798 default:
4799 tl_assert(0);
4800 }
4801 return True;
philippef5774342014-05-03 11:12:50 +00004802 default:
4803 tl_assert(0);
4804 return False;
4805 }
4806}
sewardjb4112022007-11-09 22:49:28 +00004807
4808static
4809Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
4810{
philippef5774342014-05-03 11:12:50 +00004811 if (!VG_IS_TOOL_USERREQ('H','G',args[0])
4812 && VG_USERREQ__GDB_MONITOR_COMMAND != args[0])
sewardjb4112022007-11-09 22:49:28 +00004813 return False;
4814
4815 /* Anything that gets past the above check is one of ours, so we
4816 should be able to handle it. */
4817
4818 /* default, meaningless return value, unless otherwise set */
4819 *ret = 0;
4820
4821 switch (args[0]) {
4822
4823 /* --- --- User-visible client requests --- --- */
4824
4825 case VG_USERREQ__HG_CLEAN_MEMORY:
barta0b6b2c2008-07-07 06:49:24 +00004826 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%ld)\n",
sewardjb4112022007-11-09 22:49:28 +00004827 args[1], args[2]);
4828 /* Call die_mem to (expensively) tidy up properly, if there
sewardjf98e1c02008-10-25 16:22:41 +00004829 are any held locks etc in the area. Calling evh__die_mem
4830 and then evh__new_mem is a bit inefficient; probably just
4831 the latter would do. */
sewardjb4112022007-11-09 22:49:28 +00004832 if (args[2] > 0) { /* length */
4833 evh__die_mem(args[1], args[2]);
4834 /* and then set it to New */
4835 evh__new_mem(args[1], args[2]);
4836 }
4837 break;
4838
sewardjc8028ad2010-05-05 09:34:42 +00004839 case _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK: {
4840 Addr payload = 0;
4841 SizeT pszB = 0;
4842 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK(%#lx)\n",
4843 args[1]);
4844 if (HG_(mm_find_containing_block)(NULL, &payload, &pszB, args[1])) {
4845 if (pszB > 0) {
4846 evh__die_mem(payload, pszB);
4847 evh__new_mem(payload, pszB);
4848 }
4849 *ret = pszB;
4850 } else {
4851 *ret = (UWord)-1;
4852 }
4853 break;
4854 }
4855
sewardj406bac82010-03-03 23:03:40 +00004856 case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED:
4857 if (0) VG_(printf)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%ld)\n",
4858 args[1], args[2]);
4859 if (args[2] > 0) { /* length */
4860 evh__untrack_mem(args[1], args[2]);
4861 }
4862 break;
4863
4864 case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED:
4865 if (0) VG_(printf)("HG_ARANGE_MAKE_TRACKED(%#lx,%ld)\n",
4866 args[1], args[2]);
4867 if (args[2] > 0) { /* length */
4868 evh__new_mem(args[1], args[2]);
4869 }
4870 break;
4871
sewardjb4112022007-11-09 22:49:28 +00004872 /* --- --- Client requests for Helgrind's use only --- --- */
4873
4874 /* Some thread is telling us its pthread_t value. Record the
4875 binding between that and the associated Thread*, so we can
4876 later find the Thread* again when notified of a join by the
4877 thread. */
4878 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
4879 Thread* my_thr = NULL;
4880 if (0)
4881 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
4882 (void*)args[1]);
4883 map_pthread_t_to_Thread_INIT();
4884 my_thr = map_threads_maybe_lookup( tid );
4885 /* This assertion should hold because the map_threads (tid to
4886 Thread*) binding should have been made at the point of
4887 low-level creation of this thread, which should have
4888 happened prior to us getting this client request for it.
4889 That's because this client request is sent from
4890 client-world from the 'thread_wrapper' function, which
4891 only runs once the thread has been low-level created. */
4892 tl_assert(my_thr != NULL);
4893 /* So now we know that (pthread_t)args[1] is associated with
4894 (Thread*)my_thr. Note that down. */
4895 if (0)
4896 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
4897 (void*)args[1], (void*)my_thr );
florian6bf37262012-10-21 03:23:36 +00004898 VG_(addToFM)( map_pthread_t_to_Thread, (UWord)args[1], (UWord)my_thr );
sewardjb4112022007-11-09 22:49:28 +00004899 break;
4900 }
4901
4902 case _VG_USERREQ__HG_PTH_API_ERROR: {
4903 Thread* my_thr = NULL;
4904 map_pthread_t_to_Thread_INIT();
4905 my_thr = map_threads_maybe_lookup( tid );
4906 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
sewardjf98e1c02008-10-25 16:22:41 +00004907 HG_(record_error_PthAPIerror)(
florian6bf37262012-10-21 03:23:36 +00004908 my_thr, (HChar*)args[1], (UWord)args[2], (HChar*)args[3] );
sewardjb4112022007-11-09 22:49:28 +00004909 break;
4910 }
4911
4912 /* This thread (tid) has completed a join with the quitting
4913 thread whose pthread_t is in args[1]. */
4914 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
4915 Thread* thr_q = NULL; /* quitter Thread* */
4916 Bool found = False;
4917 if (0)
4918 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
4919 (void*)args[1]);
4920 map_pthread_t_to_Thread_INIT();
sewardj896f6f92008-08-19 08:38:52 +00004921 found = VG_(lookupFM)( map_pthread_t_to_Thread,
florian6bf37262012-10-21 03:23:36 +00004922 NULL, (UWord*)&thr_q, (UWord)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004923 /* Can this fail? It would mean that our pthread_join
4924 wrapper observed a successful join on args[1] yet that
4925 thread never existed (or at least, it never lodged an
4926 entry in the mapping (via SET_MY_PTHREAD_T)). Which
4927 sounds like a bug in the threads library. */
4928 // FIXME: get rid of this assertion; handle properly
4929 tl_assert(found);
4930 if (found) {
4931 if (0)
4932 VG_(printf)(".................... quitter Thread* = %p\n",
4933 thr_q);
4934 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
4935 }
4936 break;
4937 }
4938
4939 /* EXPOSITION only: by intercepting lock init events we can show
4940 the user where the lock was initialised, rather than only
4941 being able to show where it was first locked. Intercepting
4942 lock initialisations is not necessary for the basic operation
4943 of the race checker. */
4944 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
4945 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
4946 break;
4947
sewardjc02f6c42013-10-14 13:51:25 +00004948 /* mutex=arg[1], mutex_is_init=arg[2] */
sewardjb4112022007-11-09 22:49:28 +00004949 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
sewardjc02f6c42013-10-14 13:51:25 +00004950 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1], args[2] != 0 );
sewardjb4112022007-11-09 22:49:28 +00004951 break;
4952
4953 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
4954 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
4955 break;
4956
4957 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
4958 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
4959 break;
4960
4961 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*, Word
4962 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
4963 break;
4964
4965 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*
4966 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
4967 break;
4968
4969 /* This thread is about to do pthread_cond_signal on the
4970 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
4971 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
4972 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
4973 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
4974 break;
4975
4976 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
4977 Returns a flag indicating whether or not the mutex is believed to be
4978 valid for this operation. */
4979 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
4980 Bool mutex_is_valid
4981 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
4982 (void*)args[2] );
4983 *ret = mutex_is_valid ? 1 : 0;
4984 break;
4985 }
4986
philippe19dfe032013-03-24 20:10:23 +00004987 /* Thread successfully completed pthread_cond_init:
4988 cond=arg[1], cond_attr=arg[2] */
4989 case _VG_USERREQ__HG_PTHREAD_COND_INIT_POST:
4990 evh__HG_PTHREAD_COND_INIT_POST( tid,
4991 (void*)args[1], (void*)args[2] );
4992 break;
4993
sewardjc02f6c42013-10-14 13:51:25 +00004994 /* cond=arg[1], cond_is_init=arg[2] */
sewardjf98e1c02008-10-25 16:22:41 +00004995 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
sewardjc02f6c42013-10-14 13:51:25 +00004996 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1], args[2] != 0 );
sewardjf98e1c02008-10-25 16:22:41 +00004997 break;
4998
sewardjb4112022007-11-09 22:49:28 +00004999 /* Thread successfully completed pthread_cond_wait, cond=arg[1],
5000 mutex=arg[2] */
5001 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
5002 evh__HG_PTHREAD_COND_WAIT_POST( tid,
sewardjff427c92013-10-14 12:13:52 +00005003 (void*)args[1], (void*)args[2],
5004 (Bool)args[3] );
sewardjb4112022007-11-09 22:49:28 +00005005 break;
5006
5007 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
5008 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
5009 break;
5010
5011 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
5012 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
5013 break;
5014
sewardj789c3c52008-02-25 12:10:07 +00005015 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
sewardjb4112022007-11-09 22:49:28 +00005016 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
sewardj789c3c52008-02-25 12:10:07 +00005017 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
5018 args[2], args[3] );
sewardjb4112022007-11-09 22:49:28 +00005019 break;
5020
5021 /* rwlock=arg[1], isW=arg[2] */
5022 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
5023 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
5024 break;
5025
5026 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
5027 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
5028 break;
5029
5030 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
5031 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
5032 break;
5033
sewardj11e352f2007-11-30 11:11:02 +00005034 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
5035 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
sewardjb4112022007-11-09 22:49:28 +00005036 break;
5037
sewardj11e352f2007-11-30 11:11:02 +00005038 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
5039 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00005040 break;
5041
sewardj11e352f2007-11-30 11:11:02 +00005042 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
5043 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
5044 break;
5045
5046 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t* */
5047 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00005048 break;
5049
sewardj9f569b72008-11-13 13:33:09 +00005050 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
sewardj406bac82010-03-03 23:03:40 +00005051 /* pth_bar_t*, ulong count, ulong resizable */
5052 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1],
5053 args[2], args[3] );
5054 break;
5055
5056 case _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE:
5057 /* pth_bar_t*, ulong newcount */
5058 evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( tid, (void*)args[1],
5059 args[2] );
sewardj9f569b72008-11-13 13:33:09 +00005060 break;
5061
5062 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
5063 /* pth_bar_t* */
5064 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
5065 break;
5066
5067 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
5068 /* pth_bar_t* */
5069 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
5070 break;
sewardjb4112022007-11-09 22:49:28 +00005071
sewardj5a644da2009-08-11 10:35:58 +00005072 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE:
5073 /* pth_spinlock_t* */
5074 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid, (void*)args[1] );
5075 break;
5076
5077 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST:
5078 /* pth_spinlock_t* */
5079 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid, (void*)args[1] );
5080 break;
5081
5082 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE:
5083 /* pth_spinlock_t*, Word */
5084 evh__HG_PTHREAD_SPIN_LOCK_PRE( tid, (void*)args[1], args[2] );
5085 break;
5086
5087 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST:
5088 /* pth_spinlock_t* */
5089 evh__HG_PTHREAD_SPIN_LOCK_POST( tid, (void*)args[1] );
5090 break;
5091
5092 case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE:
5093 /* pth_spinlock_t* */
5094 evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid, (void*)args[1] );
5095 break;
5096
sewardjed2e72e2009-08-14 11:08:24 +00005097 case _VG_USERREQ__HG_CLIENTREQ_UNIMP: {
florian19f91bb2012-11-10 22:29:54 +00005098 /* HChar* who */
sewardjed2e72e2009-08-14 11:08:24 +00005099 HChar* who = (HChar*)args[1];
5100 HChar buf[50 + 50];
5101 Thread* thr = map_threads_maybe_lookup( tid );
5102 tl_assert( thr ); /* I must be mapped */
5103 tl_assert( who );
5104 tl_assert( VG_(strlen)(who) <= 50 );
5105 VG_(sprintf)(buf, "Unimplemented client request macro \"%s\"", who );
5106 /* record_error_Misc strdup's buf, so this is safe: */
5107 HG_(record_error_Misc)( thr, buf );
5108 break;
5109 }
5110
5111 case _VG_USERREQ__HG_USERSO_SEND_PRE:
5112 /* UWord arbitrary-SO-tag */
5113 evh__HG_USERSO_SEND_PRE( tid, args[1] );
5114 break;
5115
5116 case _VG_USERREQ__HG_USERSO_RECV_POST:
5117 /* UWord arbitrary-SO-tag */
5118 evh__HG_USERSO_RECV_POST( tid, args[1] );
5119 break;
5120
sewardj6015d0e2011-03-11 19:10:48 +00005121 case _VG_USERREQ__HG_USERSO_FORGET_ALL:
5122 /* UWord arbitrary-SO-tag */
5123 evh__HG_USERSO_FORGET_ALL( tid, args[1] );
5124 break;
5125
philippef5774342014-05-03 11:12:50 +00005126 case VG_USERREQ__GDB_MONITOR_COMMAND: {
5127 Bool handled = handle_gdb_monitor_command (tid, (HChar*)args[1]);
5128 if (handled)
5129 *ret = 1;
5130 else
5131 *ret = 0;
5132 return handled;
5133 }
5134
sewardjb4112022007-11-09 22:49:28 +00005135 default:
5136 /* Unhandled Helgrind client request! */
sewardjf98e1c02008-10-25 16:22:41 +00005137 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
5138 args[0]);
sewardjb4112022007-11-09 22:49:28 +00005139 }
5140
5141 return True;
5142}
5143
5144
5145/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +00005146/*--- Setup ---*/
5147/*----------------------------------------------------------------*/
5148
florian19f91bb2012-11-10 22:29:54 +00005149static Bool hg_process_cmd_line_option ( const HChar* arg )
sewardjb4112022007-11-09 22:49:28 +00005150{
florian19f91bb2012-11-10 22:29:54 +00005151 const HChar* tmp_str;
sewardjb4112022007-11-09 22:49:28 +00005152
njn83df0b62009-02-25 01:01:05 +00005153 if VG_BOOL_CLO(arg, "--track-lockorders",
5154 HG_(clo_track_lockorders)) {}
5155 else if VG_BOOL_CLO(arg, "--cmp-race-err-addrs",
5156 HG_(clo_cmp_race_err_addrs)) {}
sewardj23f12002009-07-24 08:45:08 +00005157
5158 else if VG_XACT_CLO(arg, "--history-level=none",
5159 HG_(clo_history_level), 0);
sewardjf3861392009-08-02 10:16:03 +00005160 else if VG_XACT_CLO(arg, "--history-level=approx",
sewardj23f12002009-07-24 08:45:08 +00005161 HG_(clo_history_level), 1);
5162 else if VG_XACT_CLO(arg, "--history-level=full",
5163 HG_(clo_history_level), 2);
sewardj849b0ed2008-12-21 10:43:10 +00005164
sewardjf585e482009-08-16 22:52:29 +00005165 /* If you change the 10k/30mill limits, remember to also change
sewardj849b0ed2008-12-21 10:43:10 +00005166 them in assertions at the top of event_map_maybe_GC. */
njn83df0b62009-02-25 01:01:05 +00005167 else if VG_BINT_CLO(arg, "--conflict-cache-size",
sewardjf585e482009-08-16 22:52:29 +00005168 HG_(clo_conflict_cache_size), 10*1000, 30*1000*1000) {}
sewardjb4112022007-11-09 22:49:28 +00005169
sewardj11e352f2007-11-30 11:11:02 +00005170 /* "stuvwx" --> stuvwx (binary) */
njn83df0b62009-02-25 01:01:05 +00005171 else if VG_STR_CLO(arg, "--hg-sanity-flags", tmp_str) {
sewardjb4112022007-11-09 22:49:28 +00005172 Int j;
sewardjb4112022007-11-09 22:49:28 +00005173
njn83df0b62009-02-25 01:01:05 +00005174 if (6 != VG_(strlen)(tmp_str)) {
sewardjb4112022007-11-09 22:49:28 +00005175 VG_(message)(Vg_UserMsg,
sewardj24118492009-07-15 14:50:02 +00005176 "--hg-sanity-flags argument must have 6 digits\n");
sewardjb4112022007-11-09 22:49:28 +00005177 return False;
5178 }
sewardj11e352f2007-11-30 11:11:02 +00005179 for (j = 0; j < 6; j++) {
njn83df0b62009-02-25 01:01:05 +00005180 if ('0' == tmp_str[j]) { /* do nothing */ }
5181 else if ('1' == tmp_str[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
sewardjb4112022007-11-09 22:49:28 +00005182 else {
sewardj11e352f2007-11-30 11:11:02 +00005183 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
sewardj24118492009-07-15 14:50:02 +00005184 "only contain 0s and 1s\n");
sewardjb4112022007-11-09 22:49:28 +00005185 return False;
5186 }
5187 }
sewardjf98e1c02008-10-25 16:22:41 +00005188 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
sewardjb4112022007-11-09 22:49:28 +00005189 }
5190
sewardj622fe492011-03-11 21:06:59 +00005191 else if VG_BOOL_CLO(arg, "--free-is-write",
5192 HG_(clo_free_is_write)) {}
sewardjffce8152011-06-24 10:09:41 +00005193
5194 else if VG_XACT_CLO(arg, "--vts-pruning=never",
5195 HG_(clo_vts_pruning), 0);
5196 else if VG_XACT_CLO(arg, "--vts-pruning=auto",
5197 HG_(clo_vts_pruning), 1);
5198 else if VG_XACT_CLO(arg, "--vts-pruning=always",
5199 HG_(clo_vts_pruning), 2);
5200
5201 else if VG_BOOL_CLO(arg, "--check-stack-refs",
5202 HG_(clo_check_stack_refs)) {}
5203
sewardjb4112022007-11-09 22:49:28 +00005204 else
5205 return VG_(replacement_malloc_process_cmd_line_option)(arg);
5206
5207 return True;
5208}
5209
5210static void hg_print_usage ( void )
5211{
5212 VG_(printf)(
sewardj622fe492011-03-11 21:06:59 +00005213" --free-is-write=no|yes treat heap frees as writes [no]\n"
sewardj849b0ed2008-12-21 10:43:10 +00005214" --track-lockorders=no|yes show lock ordering errors? [yes]\n"
njnf6e8ca92009-08-07 02:18:00 +00005215" --history-level=none|approx|full [full]\n"
sewardjf3861392009-08-02 10:16:03 +00005216" full: show both stack traces for a data race (can be very slow)\n"
5217" approx: full trace for one thread, approx for the other (faster)\n"
5218" none: only show trace for one thread in a race (fastest)\n"
sewardj23f12002009-07-24 08:45:08 +00005219" --conflict-cache-size=N size of 'full' history cache [1000000]\n"
sewardjffce8152011-06-24 10:09:41 +00005220" --check-stack-refs=no|yes race-check reads and writes on the\n"
5221" main stack and thread stacks? [yes]\n"
sewardjb4112022007-11-09 22:49:28 +00005222 );
sewardjb4112022007-11-09 22:49:28 +00005223}
5224
5225static void hg_print_debug_usage ( void )
5226{
sewardjb4112022007-11-09 22:49:28 +00005227 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
5228 "race errors significant? [no]\n");
sewardj849b0ed2008-12-21 10:43:10 +00005229 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
sewardj11e352f2007-11-30 11:11:02 +00005230 " at events (X = 0|1) [000000]\n");
5231 VG_(printf)(" --hg-sanity-flags values:\n");
sewardj11e352f2007-11-30 11:11:02 +00005232 VG_(printf)(" 010000 after changes to "
sewardjb4112022007-11-09 22:49:28 +00005233 "lock-order-acquisition-graph\n");
sewardj11e352f2007-11-30 11:11:02 +00005234 VG_(printf)(" 001000 at memory accesses (NB: not currently used)\n");
5235 VG_(printf)(" 000100 at mem permission setting for "
sewardjb4112022007-11-09 22:49:28 +00005236 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
sewardj11e352f2007-11-30 11:11:02 +00005237 VG_(printf)(" 000010 at lock/unlock events\n");
5238 VG_(printf)(" 000001 at thread create/join events\n");
sewardjffce8152011-06-24 10:09:41 +00005239 VG_(printf)(
5240" --vts-pruning=never|auto|always [auto]\n"
5241" never: is never done (may cause big space leaks in Helgrind)\n"
5242" auto: done just often enough to keep space usage under control\n"
5243" always: done after every VTS GC (mostly just a big time waster)\n"
5244 );
sewardjb4112022007-11-09 22:49:28 +00005245}
5246
philippe8587b542013-12-15 20:24:43 +00005247static void hg_print_stats (void)
5248{
5249
5250 if (1) {
5251 VG_(printf)("\n");
5252 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
5253 if (HG_(clo_track_lockorders)) {
5254 VG_(printf)("\n");
5255 HG_(ppWSUstats)( univ_laog, "univ_laog" );
5256 }
5257 }
5258
5259 //zz VG_(printf)("\n");
5260 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
5261 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
5262 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
5263 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
5264 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
5265 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
5266 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
5267 //zz stats__hbefore_stk_hwm);
5268 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
5269 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
5270
5271 VG_(printf)("\n");
5272 VG_(printf)(" locksets: %'8d unique lock sets\n",
5273 (Int)HG_(cardinalityWSU)( univ_lsets ));
5274 if (HG_(clo_track_lockorders)) {
5275 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
5276 (Int)HG_(cardinalityWSU)( univ_laog ));
5277 }
5278
5279 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
5280 // stats__ga_LL_adds,
5281 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
5282
5283 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
5284 HG_(stats__LockN_to_P_queries),
5285 HG_(stats__LockN_to_P_get_map_size)() );
5286
5287 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
5288 HG_(stats__string_table_queries),
5289 HG_(stats__string_table_get_map_size)() );
5290 if (HG_(clo_track_lockorders)) {
5291 VG_(printf)(" LAOG: %'8d map size\n",
5292 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
5293 VG_(printf)(" LAOG exposition: %'8d map size\n",
5294 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
5295 }
5296
5297 VG_(printf)(" locks: %'8lu acquires, "
5298 "%'lu releases\n",
5299 stats__lockN_acquires,
5300 stats__lockN_releases
5301 );
5302 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
5303
5304 VG_(printf)("\n");
5305 libhb_shutdown(True); // This in fact only print stats.
5306}
5307
sewardjb4112022007-11-09 22:49:28 +00005308static void hg_fini ( Int exitcode )
5309{
sewardj2d9e8742009-08-07 15:46:56 +00005310 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
5311 VG_(message)(Vg_UserMsg,
5312 "For counts of detected and suppressed errors, "
5313 "rerun with: -v\n");
5314 }
5315
5316 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)
5317 && HG_(clo_history_level) >= 2) {
5318 VG_(umsg)(
5319 "Use --history-level=approx or =none to gain increased speed, at\n" );
5320 VG_(umsg)(
5321 "the cost of reduced accuracy of conflicting-access information\n");
5322 }
5323
sewardjb4112022007-11-09 22:49:28 +00005324 if (SHOW_DATA_STRUCTURES)
5325 pp_everything( PP_ALL, "SK_(fini)" );
sewardjf98e1c02008-10-25 16:22:41 +00005326 if (HG_(clo_sanity_flags))
sewardjb4112022007-11-09 22:49:28 +00005327 all__sanity_check("SK_(fini)");
5328
philippe8587b542013-12-15 20:24:43 +00005329 if (VG_(clo_stats))
5330 hg_print_stats();
sewardjb4112022007-11-09 22:49:28 +00005331}
5332
sewardjf98e1c02008-10-25 16:22:41 +00005333/* FIXME: move these somewhere sane */
5334
5335static
5336void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
5337{
5338 Thread* thr;
5339 ThreadId tid;
5340 UWord nActual;
5341 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00005342 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00005343 tl_assert(thr);
5344 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
5345 nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
5346 NULL, NULL, 0 );
5347 tl_assert(nActual <= nRequest);
5348 for (; nActual < nRequest; nActual++)
5349 frames[nActual] = 0;
5350}
5351
5352static
sewardj23f12002009-07-24 08:45:08 +00005353ExeContext* for_libhb__get_EC ( Thr* hbt )
sewardjf98e1c02008-10-25 16:22:41 +00005354{
5355 Thread* thr;
5356 ThreadId tid;
5357 ExeContext* ec;
5358 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00005359 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00005360 tl_assert(thr);
5361 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
sewardj23f12002009-07-24 08:45:08 +00005362 /* this will assert if tid is invalid */
sewardjf98e1c02008-10-25 16:22:41 +00005363 ec = VG_(record_ExeContext)( tid, 0 );
sewardjd52392d2008-11-08 20:36:26 +00005364 return ec;
sewardjf98e1c02008-10-25 16:22:41 +00005365}
5366
5367
sewardjc1fb9d22011-02-28 09:03:44 +00005368static void hg_post_clo_init ( void )
sewardjb4112022007-11-09 22:49:28 +00005369{
sewardjf98e1c02008-10-25 16:22:41 +00005370 Thr* hbthr_root;
njnf76d27a2009-05-28 01:53:07 +00005371
sewardjc1fb9d22011-02-28 09:03:44 +00005372 /////////////////////////////////////////////
5373 hbthr_root = libhb_init( for_libhb__get_stacktrace,
5374 for_libhb__get_EC );
5375 /////////////////////////////////////////////
5376
5377
5378 if (HG_(clo_track_lockorders))
5379 laog__init();
5380
5381 initialise_data_structures(hbthr_root);
5382}
5383
philippe07c08522014-05-14 20:39:27 +00005384static void hg_info_location (Addr a)
5385{
5386 (void) HG_(get_and_pp_addrdescr) (a);
5387}
5388
sewardjc1fb9d22011-02-28 09:03:44 +00005389static void hg_pre_clo_init ( void )
5390{
sewardjb4112022007-11-09 22:49:28 +00005391 VG_(details_name) ("Helgrind");
5392 VG_(details_version) (NULL);
5393 VG_(details_description) ("a thread error detector");
5394 VG_(details_copyright_author)(
sewardj0f157dd2013-10-18 14:27:36 +00005395 "Copyright (C) 2007-2013, and GNU GPL'd, by OpenWorks LLP et al.");
sewardjb4112022007-11-09 22:49:28 +00005396 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj9c08c0f2011-03-10 15:01:14 +00005397 VG_(details_avg_translation_sizeB) ( 320 );
sewardjb4112022007-11-09 22:49:28 +00005398
5399 VG_(basic_tool_funcs) (hg_post_clo_init,
5400 hg_instrument,
5401 hg_fini);
5402
5403 VG_(needs_core_errors) ();
sewardjf98e1c02008-10-25 16:22:41 +00005404 VG_(needs_tool_errors) (HG_(eq_Error),
sewardj24118492009-07-15 14:50:02 +00005405 HG_(before_pp_Error),
sewardjf98e1c02008-10-25 16:22:41 +00005406 HG_(pp_Error),
sewardjb4112022007-11-09 22:49:28 +00005407 False,/*show TIDs for errors*/
sewardjf98e1c02008-10-25 16:22:41 +00005408 HG_(update_extra),
5409 HG_(recognised_suppression),
5410 HG_(read_extra_suppression_info),
5411 HG_(error_matches_suppression),
5412 HG_(get_error_name),
philippe4e32d672013-10-17 22:10:41 +00005413 HG_(get_extra_suppression_info),
5414 HG_(print_extra_suppression_use),
5415 HG_(update_extra_suppression_use));
sewardjb4112022007-11-09 22:49:28 +00005416
sewardj24118492009-07-15 14:50:02 +00005417 VG_(needs_xml_output) ();
5418
sewardjb4112022007-11-09 22:49:28 +00005419 VG_(needs_command_line_options)(hg_process_cmd_line_option,
5420 hg_print_usage,
5421 hg_print_debug_usage);
5422 VG_(needs_client_requests) (hg_handle_client_request);
5423
5424 // FIXME?
5425 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
5426 // hg_expensive_sanity_check);
5427
philippe8587b542013-12-15 20:24:43 +00005428 VG_(needs_print_stats) (hg_print_stats);
philippe07c08522014-05-14 20:39:27 +00005429 VG_(needs_info_location) (hg_info_location);
philippe8587b542013-12-15 20:24:43 +00005430
sewardjb4112022007-11-09 22:49:28 +00005431 VG_(needs_malloc_replacement) (hg_cli__malloc,
5432 hg_cli____builtin_new,
5433 hg_cli____builtin_vec_new,
5434 hg_cli__memalign,
5435 hg_cli__calloc,
5436 hg_cli__free,
5437 hg_cli____builtin_delete,
5438 hg_cli____builtin_vec_delete,
5439 hg_cli__realloc,
njn8b140de2009-02-17 04:31:18 +00005440 hg_cli_malloc_usable_size,
philipped99c26a2012-07-31 22:17:28 +00005441 HG_CLI__DEFAULT_MALLOC_REDZONE_SZB );
sewardjb4112022007-11-09 22:49:28 +00005442
sewardj849b0ed2008-12-21 10:43:10 +00005443 /* 21 Dec 08: disabled this; it mostly causes H to start more
5444 slowly and use significantly more memory, without very often
5445 providing useful results. The user can request to load this
5446 information manually with --read-var-info=yes. */
5447 if (0) VG_(needs_var_info)(); /* optional */
sewardjb4112022007-11-09 22:49:28 +00005448
5449 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
sewardj7cf4e6b2008-05-01 20:24:26 +00005450 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
5451 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
sewardjb4112022007-11-09 22:49:28 +00005452 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
sewardj1f77fec2010-04-12 19:51:04 +00005453 VG_(track_new_mem_stack) ( evh__new_mem_stack );
sewardjb4112022007-11-09 22:49:28 +00005454
5455 // FIXME: surely this isn't thread-aware
sewardj23f12002009-07-24 08:45:08 +00005456 VG_(track_copy_mem_remap) ( evh__copy_mem );
sewardjb4112022007-11-09 22:49:28 +00005457
5458 VG_(track_change_mem_mprotect) ( evh__set_perms );
5459
5460 VG_(track_die_mem_stack_signal)( evh__die_mem );
sewardjfd35d492011-03-17 19:39:55 +00005461 VG_(track_die_mem_brk) ( evh__die_mem_munmap );
5462 VG_(track_die_mem_munmap) ( evh__die_mem_munmap );
sewardjb4112022007-11-09 22:49:28 +00005463 VG_(track_die_mem_stack) ( evh__die_mem );
5464
5465 // FIXME: what is this for?
5466 VG_(track_ban_mem_stack) (NULL);
5467
5468 VG_(track_pre_mem_read) ( evh__pre_mem_read );
5469 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
5470 VG_(track_pre_mem_write) ( evh__pre_mem_write );
5471 VG_(track_post_mem_write) (NULL);
5472
5473 /////////////////
5474
5475 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
5476 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
5477
5478 VG_(track_start_client_code)( evh__start_client_code );
5479 VG_(track_stop_client_code)( evh__stop_client_code );
5480
sewardjb4112022007-11-09 22:49:28 +00005481 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
5482 as described in comments at the top of pub_tool_hashtable.h, are
5483 met. Blargh. */
5484 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
5485 tl_assert( sizeof(UWord) == sizeof(Addr) );
5486 hg_mallocmeta_table
5487 = VG_(HT_construct)( "hg_malloc_metadata_table" );
5488
philippe5fbc9762013-12-01 19:28:48 +00005489 MallocMeta_poolalloc = VG_(newPA) ( sizeof(MallocMeta),
5490 1000,
5491 HG_(zalloc),
5492 "hg_malloc_metadata_pool",
5493 HG_(free));
5494
sewardj61bc2c52011-02-09 10:34:00 +00005495 // add a callback to clean up on (threaded) fork.
5496 VG_(atfork)(NULL/*pre*/, NULL/*parent*/, evh__atfork_child/*child*/);
sewardjb4112022007-11-09 22:49:28 +00005497}
5498
5499VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
5500
5501/*--------------------------------------------------------------------*/
5502/*--- end hg_main.c ---*/
5503/*--------------------------------------------------------------------*/