blob: 58ae2d6e859b8b9da9c6175fb8b337e9f09cf8b9 [file] [log] [blame]
sewardjb4112022007-11-09 22:49:28 +00001
2/*--------------------------------------------------------------------*/
3/*--- Helgrind: a Valgrind tool for detecting errors ---*/
4/*--- in threaded programs. hg_main.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
sewardj0f157dd2013-10-18 14:27:36 +000011 Copyright (C) 2007-2013 OpenWorks LLP
sewardjb4112022007-11-09 22:49:28 +000012 info@open-works.co.uk
13
sewardj0f157dd2013-10-18 14:27:36 +000014 Copyright (C) 2007-2013 Apple, Inc.
njnf76d27a2009-05-28 01:53:07 +000015
sewardjb4112022007-11-09 22:49:28 +000016 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
20
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
25
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, write to the Free Software
28 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 02111-1307, USA.
30
31 The GNU General Public License is contained in the file COPYING.
32
33 Neither the names of the U.S. Department of Energy nor the
34 University of California nor the names of its contributors may be
35 used to endorse or promote products derived from this software
36 without prior written permission.
37*/
38
39#include "pub_tool_basics.h"
philippef5774342014-05-03 11:12:50 +000040#include "pub_tool_gdbserver.h"
sewardjb4112022007-11-09 22:49:28 +000041#include "pub_tool_libcassert.h"
42#include "pub_tool_libcbase.h"
43#include "pub_tool_libcprint.h"
sewardjb4112022007-11-09 22:49:28 +000044#include "pub_tool_threadstate.h"
45#include "pub_tool_tooliface.h"
46#include "pub_tool_hashtable.h"
47#include "pub_tool_replacemalloc.h"
48#include "pub_tool_machine.h"
49#include "pub_tool_options.h"
50#include "pub_tool_xarray.h"
51#include "pub_tool_stacktrace.h"
sewardj896f6f92008-08-19 08:38:52 +000052#include "pub_tool_wordfm.h"
sewardja0eee322009-07-31 08:46:35 +000053#include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
54#include "pub_tool_redir.h" // sonames for the dynamic linkers
55#include "pub_tool_vki.h" // VKI_PAGE_SIZE
sewardj61bc2c52011-02-09 10:34:00 +000056#include "pub_tool_libcproc.h" // VG_(atfork)
sewardj234e5582011-02-09 12:47:23 +000057#include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client)
philippe5fbc9762013-12-01 19:28:48 +000058#include "pub_tool_poolalloc.h"
philippe07c08522014-05-14 20:39:27 +000059#include "pub_tool_addrinfo.h"
sewardjb4112022007-11-09 22:49:28 +000060
sewardjf98e1c02008-10-25 16:22:41 +000061#include "hg_basics.h"
62#include "hg_wordset.h"
philippef5774342014-05-03 11:12:50 +000063#include "hg_addrdescr.h"
sewardjf98e1c02008-10-25 16:22:41 +000064#include "hg_lock_n_thread.h"
65#include "hg_errors.h"
66
67#include "libhb.h"
68
sewardjb4112022007-11-09 22:49:28 +000069#include "helgrind.h"
70
sewardjf98e1c02008-10-25 16:22:41 +000071
72// FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
73
74// FIXME: when client destroys a lock or a CV, remove these
75// from our mappings, so that the associated SO can be freed up
sewardjb4112022007-11-09 22:49:28 +000076
77/*----------------------------------------------------------------*/
78/*--- ---*/
79/*----------------------------------------------------------------*/
80
sewardj11e352f2007-11-30 11:11:02 +000081/* Note this needs to be compiled with -fno-strict-aliasing, since it
82 contains a whole bunch of calls to lookupFM etc which cast between
83 Word and pointer types. gcc rightly complains this breaks ANSI C
84 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
85 worthwhile performance benefits over -O.
sewardjc17be792007-11-10 22:50:13 +000086*/
sewardjb4112022007-11-09 22:49:28 +000087
88// FIXME what is supposed to happen to locks in memory which
89// is relocated as a result of client realloc?
90
sewardjb4112022007-11-09 22:49:28 +000091// FIXME put referencing ThreadId into Thread and get
92// rid of the slow reverse mapping function.
93
94// FIXME accesses to NoAccess areas: change state to Excl?
95
96// FIXME report errors for accesses of NoAccess memory?
97
98// FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
99// the thread still holds the lock.
100
101/* ------------ Debug/trace options ------------ */
102
sewardjb4112022007-11-09 22:49:28 +0000103// 0 for silent, 1 for some stuff, 2 for lots of stuff
104#define SHOW_EVENTS 0
105
sewardjb4112022007-11-09 22:49:28 +0000106
florian6bf37262012-10-21 03:23:36 +0000107static void all__sanity_check ( const HChar* who ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000108
philipped99c26a2012-07-31 22:17:28 +0000109#define HG_CLI__DEFAULT_MALLOC_REDZONE_SZB 16 /* let's say */
sewardjb4112022007-11-09 22:49:28 +0000110
111// 0 for none, 1 for dump at end of run
112#define SHOW_DATA_STRUCTURES 0
113
114
sewardjb4112022007-11-09 22:49:28 +0000115/* ------------ Misc comments ------------ */
116
117// FIXME: don't hardwire initial entries for root thread.
118// Instead, let the pre_thread_ll_create handler do this.
119
sewardjb4112022007-11-09 22:49:28 +0000120
121/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000122/*--- Primary data structures ---*/
sewardjb4112022007-11-09 22:49:28 +0000123/*----------------------------------------------------------------*/
124
sewardjb4112022007-11-09 22:49:28 +0000125/* Admin linked list of Threads */
126static Thread* admin_threads = NULL;
sewardjffce8152011-06-24 10:09:41 +0000127Thread* get_admin_threads ( void ) { return admin_threads; }
sewardjb4112022007-11-09 22:49:28 +0000128
sewardj1d7c3322011-02-28 09:22:51 +0000129/* Admin double linked list of Locks */
130/* We need a double linked list to properly and efficiently
131 handle del_LockN. */
sewardjb4112022007-11-09 22:49:28 +0000132static Lock* admin_locks = NULL;
133
sewardjb4112022007-11-09 22:49:28 +0000134/* Mapping table for core ThreadIds to Thread* */
135static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
136
sewardjb4112022007-11-09 22:49:28 +0000137/* Mapping table for lock guest addresses to Lock* */
138static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
139
sewardj0f64c9e2011-03-10 17:40:22 +0000140/* The word-set universes for lock sets. */
sewardjb4112022007-11-09 22:49:28 +0000141static WordSetU* univ_lsets = NULL; /* sets of Lock* */
142static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
sewardj866c80c2011-10-22 19:29:51 +0000143static Int next_gc_univ_laog = 1;
144/* univ_laog will be garbaged collected when the nr of element in univ_laog is
145 >= next_gc_univ_laog. */
sewardjb4112022007-11-09 22:49:28 +0000146
sewardjffce8152011-06-24 10:09:41 +0000147/* Allow libhb to get at the universe of locksets stored
148 here. Sigh. */
149WordSetU* HG_(get_univ_lsets) ( void ) { return univ_lsets; }
150
151/* Allow libhb to get at the list of locks stored here. Ditto
152 sigh. */
153Lock* HG_(get_admin_locks) ( void ) { return admin_locks; }
154
sewardjb4112022007-11-09 22:49:28 +0000155
156/*----------------------------------------------------------------*/
157/*--- Simple helpers for the data structures ---*/
158/*----------------------------------------------------------------*/
159
160static UWord stats__lockN_acquires = 0;
161static UWord stats__lockN_releases = 0;
162
sewardjf98e1c02008-10-25 16:22:41 +0000163static
164ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
sewardjb4112022007-11-09 22:49:28 +0000165
166/* --------- Constructors --------- */
167
sewardjf98e1c02008-10-25 16:22:41 +0000168static Thread* mk_Thread ( Thr* hbthr ) {
sewardjb4112022007-11-09 22:49:28 +0000169 static Int indx = 1;
sewardjf98e1c02008-10-25 16:22:41 +0000170 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
sewardjb4112022007-11-09 22:49:28 +0000171 thread->locksetA = HG_(emptyWS)( univ_lsets );
172 thread->locksetW = HG_(emptyWS)( univ_lsets );
sewardjb4112022007-11-09 22:49:28 +0000173 thread->magic = Thread_MAGIC;
sewardjf98e1c02008-10-25 16:22:41 +0000174 thread->hbthr = hbthr;
175 thread->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +0000176 thread->created_at = NULL;
177 thread->announced = False;
178 thread->errmsg_index = indx++;
179 thread->admin = admin_threads;
180 admin_threads = thread;
181 return thread;
182}
sewardjf98e1c02008-10-25 16:22:41 +0000183
sewardjb4112022007-11-09 22:49:28 +0000184// Make a new lock which is unlocked (hence ownerless)
sewardj1d7c3322011-02-28 09:22:51 +0000185// and insert the new lock in admin_locks double linked list.
sewardjb4112022007-11-09 22:49:28 +0000186static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
187 static ULong unique = 0;
sewardjf98e1c02008-10-25 16:22:41 +0000188 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
sewardj0f64c9e2011-03-10 17:40:22 +0000189 /* begin: add to double linked list */
sewardj1d7c3322011-02-28 09:22:51 +0000190 if (admin_locks)
191 admin_locks->admin_prev = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000192 lock->admin_next = admin_locks;
193 lock->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000194 admin_locks = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000195 /* end: add */
sewardjb4112022007-11-09 22:49:28 +0000196 lock->unique = unique++;
197 lock->magic = LockN_MAGIC;
198 lock->appeared_at = NULL;
199 lock->acquired_at = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000200 lock->hbso = libhb_so_alloc();
sewardjb4112022007-11-09 22:49:28 +0000201 lock->guestaddr = guestaddr;
202 lock->kind = kind;
203 lock->heldW = False;
204 lock->heldBy = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000205 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +0000206 return lock;
207}
sewardjb4112022007-11-09 22:49:28 +0000208
209/* Release storage for a Lock. Also release storage in .heldBy, if
sewardj1d7c3322011-02-28 09:22:51 +0000210 any. Removes from admin_locks double linked list. */
sewardjb4112022007-11-09 22:49:28 +0000211static void del_LockN ( Lock* lk )
212{
sewardjf98e1c02008-10-25 16:22:41 +0000213 tl_assert(HG_(is_sane_LockN)(lk));
214 tl_assert(lk->hbso);
215 libhb_so_dealloc(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +0000216 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000217 VG_(deleteBag)( lk->heldBy );
sewardj0f64c9e2011-03-10 17:40:22 +0000218 /* begin: del lock from double linked list */
219 if (lk == admin_locks) {
220 tl_assert(lk->admin_prev == NULL);
221 if (lk->admin_next)
222 lk->admin_next->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000223 admin_locks = lk->admin_next;
sewardj1d7c3322011-02-28 09:22:51 +0000224 }
225 else {
sewardj0f64c9e2011-03-10 17:40:22 +0000226 tl_assert(lk->admin_prev != NULL);
sewardj1d7c3322011-02-28 09:22:51 +0000227 lk->admin_prev->admin_next = lk->admin_next;
sewardj0f64c9e2011-03-10 17:40:22 +0000228 if (lk->admin_next)
229 lk->admin_next->admin_prev = lk->admin_prev;
sewardj1d7c3322011-02-28 09:22:51 +0000230 }
sewardj0f64c9e2011-03-10 17:40:22 +0000231 /* end: del */
sewardjb4112022007-11-09 22:49:28 +0000232 VG_(memset)(lk, 0xAA, sizeof(*lk));
sewardjf98e1c02008-10-25 16:22:41 +0000233 HG_(free)(lk);
sewardjb4112022007-11-09 22:49:28 +0000234}
235
236/* Update 'lk' to reflect that 'thr' now has a write-acquisition of
237 it. This is done strictly: only combinations resulting from
238 correct program and libpthread behaviour are allowed. */
239static void lockN_acquire_writer ( Lock* lk, Thread* thr )
240{
sewardjf98e1c02008-10-25 16:22:41 +0000241 tl_assert(HG_(is_sane_LockN)(lk));
242 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000243
244 stats__lockN_acquires++;
245
246 /* EXPOSITION only */
247 /* We need to keep recording snapshots of where the lock was
248 acquired, so as to produce better lock-order error messages. */
249 if (lk->acquired_at == NULL) {
250 ThreadId tid;
251 tl_assert(lk->heldBy == NULL);
252 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
253 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000254 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000255 } else {
256 tl_assert(lk->heldBy != NULL);
257 }
258 /* end EXPOSITION only */
259
260 switch (lk->kind) {
261 case LK_nonRec:
262 case_LK_nonRec:
263 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
264 tl_assert(!lk->heldW);
265 lk->heldW = True;
sewardjf98e1c02008-10-25 16:22:41 +0000266 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
florian6bf37262012-10-21 03:23:36 +0000267 VG_(addToBag)( lk->heldBy, (UWord)thr );
sewardjb4112022007-11-09 22:49:28 +0000268 break;
269 case LK_mbRec:
270 if (lk->heldBy == NULL)
271 goto case_LK_nonRec;
272 /* 2nd and subsequent locking of a lock by its owner */
273 tl_assert(lk->heldW);
274 /* assert: lk is only held by one thread .. */
sewardj896f6f92008-08-19 08:38:52 +0000275 tl_assert(VG_(sizeUniqueBag(lk->heldBy)) == 1);
sewardjb4112022007-11-09 22:49:28 +0000276 /* assert: .. and that thread is 'thr'. */
florian6bf37262012-10-21 03:23:36 +0000277 tl_assert(VG_(elemBag)(lk->heldBy, (UWord)thr)
sewardj896f6f92008-08-19 08:38:52 +0000278 == VG_(sizeTotalBag)(lk->heldBy));
florian6bf37262012-10-21 03:23:36 +0000279 VG_(addToBag)(lk->heldBy, (UWord)thr);
sewardjb4112022007-11-09 22:49:28 +0000280 break;
281 case LK_rdwr:
282 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
283 goto case_LK_nonRec;
284 default:
285 tl_assert(0);
286 }
sewardjf98e1c02008-10-25 16:22:41 +0000287 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000288}
289
290static void lockN_acquire_reader ( Lock* lk, Thread* thr )
291{
sewardjf98e1c02008-10-25 16:22:41 +0000292 tl_assert(HG_(is_sane_LockN)(lk));
293 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000294 /* can only add reader to a reader-writer lock. */
295 tl_assert(lk->kind == LK_rdwr);
296 /* lk must be free or already r-held. */
297 tl_assert(lk->heldBy == NULL
298 || (lk->heldBy != NULL && !lk->heldW));
299
300 stats__lockN_acquires++;
301
302 /* EXPOSITION only */
303 /* We need to keep recording snapshots of where the lock was
304 acquired, so as to produce better lock-order error messages. */
305 if (lk->acquired_at == NULL) {
306 ThreadId tid;
307 tl_assert(lk->heldBy == NULL);
308 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
309 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000310 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000311 } else {
312 tl_assert(lk->heldBy != NULL);
313 }
314 /* end EXPOSITION only */
315
316 if (lk->heldBy) {
florian6bf37262012-10-21 03:23:36 +0000317 VG_(addToBag)(lk->heldBy, (UWord)thr);
sewardjb4112022007-11-09 22:49:28 +0000318 } else {
319 lk->heldW = False;
sewardjf98e1c02008-10-25 16:22:41 +0000320 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
florian6bf37262012-10-21 03:23:36 +0000321 VG_(addToBag)( lk->heldBy, (UWord)thr );
sewardjb4112022007-11-09 22:49:28 +0000322 }
323 tl_assert(!lk->heldW);
sewardjf98e1c02008-10-25 16:22:41 +0000324 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000325}
326
327/* Update 'lk' to reflect a release of it by 'thr'. This is done
328 strictly: only combinations resulting from correct program and
329 libpthread behaviour are allowed. */
330
331static void lockN_release ( Lock* lk, Thread* thr )
332{
333 Bool b;
sewardjf98e1c02008-10-25 16:22:41 +0000334 tl_assert(HG_(is_sane_LockN)(lk));
335 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000336 /* lock must be held by someone */
337 tl_assert(lk->heldBy);
338 stats__lockN_releases++;
339 /* Remove it from the holder set */
florian6bf37262012-10-21 03:23:36 +0000340 b = VG_(delFromBag)(lk->heldBy, (UWord)thr);
sewardjb4112022007-11-09 22:49:28 +0000341 /* thr must actually have been a holder of lk */
342 tl_assert(b);
343 /* normalise */
344 tl_assert(lk->acquired_at);
sewardj896f6f92008-08-19 08:38:52 +0000345 if (VG_(isEmptyBag)(lk->heldBy)) {
346 VG_(deleteBag)(lk->heldBy);
sewardjb4112022007-11-09 22:49:28 +0000347 lk->heldBy = NULL;
348 lk->heldW = False;
349 lk->acquired_at = NULL;
350 }
sewardjf98e1c02008-10-25 16:22:41 +0000351 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000352}
353
354static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
355{
356 Thread* thr;
357 if (!lk->heldBy) {
358 tl_assert(!lk->heldW);
359 return;
360 }
361 /* for each thread that holds this lock do ... */
sewardj896f6f92008-08-19 08:38:52 +0000362 VG_(initIterBag)( lk->heldBy );
florian6bf37262012-10-21 03:23:36 +0000363 while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, NULL )) {
sewardjf98e1c02008-10-25 16:22:41 +0000364 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000365 tl_assert(HG_(elemWS)( univ_lsets,
florian6bf37262012-10-21 03:23:36 +0000366 thr->locksetA, (UWord)lk ));
sewardjb4112022007-11-09 22:49:28 +0000367 thr->locksetA
florian6bf37262012-10-21 03:23:36 +0000368 = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +0000369
370 if (lk->heldW) {
371 tl_assert(HG_(elemWS)( univ_lsets,
florian6bf37262012-10-21 03:23:36 +0000372 thr->locksetW, (UWord)lk ));
sewardjb4112022007-11-09 22:49:28 +0000373 thr->locksetW
florian6bf37262012-10-21 03:23:36 +0000374 = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +0000375 }
376 }
sewardj896f6f92008-08-19 08:38:52 +0000377 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000378}
379
sewardjb4112022007-11-09 22:49:28 +0000380
381/*----------------------------------------------------------------*/
382/*--- Print out the primary data structures ---*/
383/*----------------------------------------------------------------*/
384
sewardjb4112022007-11-09 22:49:28 +0000385#define PP_THREADS (1<<1)
386#define PP_LOCKS (1<<2)
sewardjf98e1c02008-10-25 16:22:41 +0000387#define PP_ALL (PP_THREADS | PP_LOCKS)
sewardjb4112022007-11-09 22:49:28 +0000388
389
390static const Int sHOW_ADMIN = 0;
391
392static void space ( Int n )
393{
394 Int i;
florian6bf37262012-10-21 03:23:36 +0000395 HChar spaces[128+1];
sewardjb4112022007-11-09 22:49:28 +0000396 tl_assert(n >= 0 && n < 128);
397 if (n == 0)
398 return;
399 for (i = 0; i < n; i++)
400 spaces[i] = ' ';
401 spaces[i] = 0;
402 tl_assert(i < 128+1);
403 VG_(printf)("%s", spaces);
404}
405
406static void pp_Thread ( Int d, Thread* t )
407{
408 space(d+0); VG_(printf)("Thread %p {\n", t);
409 if (sHOW_ADMIN) {
410 space(d+3); VG_(printf)("admin %p\n", t->admin);
411 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
412 }
413 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
414 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
sewardjb4112022007-11-09 22:49:28 +0000415 space(d+0); VG_(printf)("}\n");
416}
417
418static void pp_admin_threads ( Int d )
419{
420 Int i, n;
421 Thread* t;
422 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
423 /* nothing */
424 }
425 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
426 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
427 if (0) {
428 space(n);
429 VG_(printf)("admin_threads record %d of %d:\n", i, n);
430 }
431 pp_Thread(d+3, t);
432 }
barta0b6b2c2008-07-07 06:49:24 +0000433 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000434}
435
436static void pp_map_threads ( Int d )
437{
njn4c245e52009-03-15 23:25:38 +0000438 Int i, n = 0;
sewardjb4112022007-11-09 22:49:28 +0000439 space(d); VG_(printf)("map_threads ");
sewardjb4112022007-11-09 22:49:28 +0000440 for (i = 0; i < VG_N_THREADS; i++) {
441 if (map_threads[i] != NULL)
442 n++;
443 }
444 VG_(printf)("(%d entries) {\n", n);
445 for (i = 0; i < VG_N_THREADS; i++) {
446 if (map_threads[i] == NULL)
447 continue;
448 space(d+3);
449 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
450 }
451 space(d); VG_(printf)("}\n");
452}
453
454static const HChar* show_LockKind ( LockKind lkk ) {
455 switch (lkk) {
456 case LK_mbRec: return "mbRec";
457 case LK_nonRec: return "nonRec";
458 case LK_rdwr: return "rdwr";
459 default: tl_assert(0);
460 }
461}
462
philippef5774342014-05-03 11:12:50 +0000463/* Pretty Print lock lk.
464 if show_lock_addrdescr, describes the (guest) lock address.
465 (this description will be more complete with --read-var-info=yes).
466 if show_internal_data, shows also helgrind internal information.
467 d is the level at which output is indented. */
468static void pp_Lock ( Int d, Lock* lk,
469 Bool show_lock_addrdescr,
470 Bool show_internal_data)
sewardjb4112022007-11-09 22:49:28 +0000471{
philippef5774342014-05-03 11:12:50 +0000472 space(d+0);
473 if (show_internal_data)
philippe07c08522014-05-14 20:39:27 +0000474 VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
philippef5774342014-05-03 11:12:50 +0000475 else
philippe07c08522014-05-14 20:39:27 +0000476 VG_(printf)("Lock ga %#lx {\n", lk->guestaddr);
philippef5774342014-05-03 11:12:50 +0000477 if (!show_lock_addrdescr
philippe07c08522014-05-14 20:39:27 +0000478 || !HG_(get_and_pp_addrdescr) ((Addr) lk->guestaddr))
philippef5774342014-05-03 11:12:50 +0000479 VG_(printf)("\n");
480
sewardjb4112022007-11-09 22:49:28 +0000481 if (sHOW_ADMIN) {
sewardj1d7c3322011-02-28 09:22:51 +0000482 space(d+3); VG_(printf)("admin_n %p\n", lk->admin_next);
483 space(d+3); VG_(printf)("admin_p %p\n", lk->admin_prev);
484 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
sewardjb4112022007-11-09 22:49:28 +0000485 }
philippef5774342014-05-03 11:12:50 +0000486 if (show_internal_data) {
487 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
488 }
sewardjb4112022007-11-09 22:49:28 +0000489 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
philippef5774342014-05-03 11:12:50 +0000490 if (show_internal_data) {
491 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
492 }
493 if (show_internal_data) {
494 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
495 }
sewardjb4112022007-11-09 22:49:28 +0000496 if (lk->heldBy) {
497 Thread* thr;
florian6bf37262012-10-21 03:23:36 +0000498 UWord count;
sewardjb4112022007-11-09 22:49:28 +0000499 VG_(printf)(" { ");
sewardj896f6f92008-08-19 08:38:52 +0000500 VG_(initIterBag)( lk->heldBy );
philippef5774342014-05-03 11:12:50 +0000501 while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, &count )) {
502 if (show_internal_data)
503 VG_(printf)("%lu:%p ", count, thr);
504 else {
505 VG_(printf)("%c%lu:thread #%d ",
506 lk->heldW ? 'W' : 'R',
507 count, thr->errmsg_index);
508 if (thr->coretid == VG_INVALID_THREADID)
509 VG_(printf)("tid (exited) ");
510 else
511 VG_(printf)("tid %d ", thr->coretid);
512
513 }
514 }
sewardj896f6f92008-08-19 08:38:52 +0000515 VG_(doneIterBag)( lk->heldBy );
philippef5774342014-05-03 11:12:50 +0000516 VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000517 }
sewardjb4112022007-11-09 22:49:28 +0000518 space(d+0); VG_(printf)("}\n");
519}
520
521static void pp_admin_locks ( Int d )
522{
523 Int i, n;
524 Lock* lk;
sewardj1d7c3322011-02-28 09:22:51 +0000525 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000526 /* nothing */
527 }
528 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
sewardj1d7c3322011-02-28 09:22:51 +0000529 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000530 if (0) {
531 space(n);
532 VG_(printf)("admin_locks record %d of %d:\n", i, n);
533 }
philippef5774342014-05-03 11:12:50 +0000534 pp_Lock(d+3, lk,
535 False /* show_lock_addrdescr */,
536 True /* show_internal_data */);
sewardjb4112022007-11-09 22:49:28 +0000537 }
barta0b6b2c2008-07-07 06:49:24 +0000538 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000539}
540
philippef5774342014-05-03 11:12:50 +0000541static void pp_map_locks ( Int d)
sewardjb4112022007-11-09 22:49:28 +0000542{
543 void* gla;
544 Lock* lk;
545 space(d); VG_(printf)("map_locks (%d entries) {\n",
sewardj896f6f92008-08-19 08:38:52 +0000546 (Int)VG_(sizeFM)( map_locks ));
547 VG_(initIterFM)( map_locks );
florian6bf37262012-10-21 03:23:36 +0000548 while (VG_(nextIterFM)( map_locks, (UWord*)&gla,
549 (UWord*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000550 space(d+3);
551 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
552 }
sewardj896f6f92008-08-19 08:38:52 +0000553 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000554 space(d); VG_(printf)("}\n");
555}
556
florian6bf37262012-10-21 03:23:36 +0000557static void pp_everything ( Int flags, const HChar* caller )
sewardjb4112022007-11-09 22:49:28 +0000558{
559 Int d = 0;
560 VG_(printf)("\n");
561 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
562 if (flags & PP_THREADS) {
563 VG_(printf)("\n");
564 pp_admin_threads(d+3);
565 VG_(printf)("\n");
566 pp_map_threads(d+3);
567 }
568 if (flags & PP_LOCKS) {
569 VG_(printf)("\n");
570 pp_admin_locks(d+3);
571 VG_(printf)("\n");
572 pp_map_locks(d+3);
573 }
sewardjb4112022007-11-09 22:49:28 +0000574
575 VG_(printf)("\n");
576 VG_(printf)("}\n");
577 VG_(printf)("\n");
578}
579
580#undef SHOW_ADMIN
581
582
583/*----------------------------------------------------------------*/
584/*--- Initialise the primary data structures ---*/
585/*----------------------------------------------------------------*/
586
sewardjf98e1c02008-10-25 16:22:41 +0000587static void initialise_data_structures ( Thr* hbthr_root )
sewardjb4112022007-11-09 22:49:28 +0000588{
sewardjb4112022007-11-09 22:49:28 +0000589 Thread* thr;
sewardjffce8152011-06-24 10:09:41 +0000590 WordSetID wsid;
sewardjb4112022007-11-09 22:49:28 +0000591
592 /* Get everything initialised and zeroed. */
593 tl_assert(admin_threads == NULL);
594 tl_assert(admin_locks == NULL);
sewardjb4112022007-11-09 22:49:28 +0000595
sewardjb4112022007-11-09 22:49:28 +0000596 tl_assert(map_threads == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000597 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
sewardjb4112022007-11-09 22:49:28 +0000598 tl_assert(map_threads != NULL);
599
florian6bf37262012-10-21 03:23:36 +0000600 tl_assert(sizeof(Addr) == sizeof(UWord));
sewardjb4112022007-11-09 22:49:28 +0000601 tl_assert(map_locks == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000602 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
603 NULL/*unboxed Word cmp*/);
sewardjb4112022007-11-09 22:49:28 +0000604 tl_assert(map_locks != NULL);
605
sewardjb4112022007-11-09 22:49:28 +0000606 tl_assert(univ_lsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000607 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
608 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000609 tl_assert(univ_lsets != NULL);
sewardjffce8152011-06-24 10:09:41 +0000610 /* Ensure that univ_lsets is non-empty, with lockset zero being the
611 empty lockset. hg_errors.c relies on the assumption that
612 lockset number zero in univ_lsets is always valid. */
613 wsid = HG_(emptyWS)(univ_lsets);
614 tl_assert(wsid == 0);
sewardjb4112022007-11-09 22:49:28 +0000615
616 tl_assert(univ_laog == NULL);
sewardjc1fb9d22011-02-28 09:03:44 +0000617 if (HG_(clo_track_lockorders)) {
618 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
619 HG_(free), 24/*cacheSize*/ );
620 tl_assert(univ_laog != NULL);
621 }
sewardjb4112022007-11-09 22:49:28 +0000622
623 /* Set up entries for the root thread */
624 // FIXME: this assumes that the first real ThreadId is 1
625
sewardjb4112022007-11-09 22:49:28 +0000626 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +0000627 thr = mk_Thread(hbthr_root);
628 thr->coretid = 1; /* FIXME: hardwires an assumption about the
629 identity of the root thread. */
sewardj60626642011-03-10 15:14:37 +0000630 tl_assert( libhb_get_Thr_hgthread(hbthr_root) == NULL );
631 libhb_set_Thr_hgthread(hbthr_root, thr);
sewardjb4112022007-11-09 22:49:28 +0000632
sewardjf98e1c02008-10-25 16:22:41 +0000633 /* and bind it in the thread-map table. */
634 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
635 tl_assert(thr->coretid != VG_INVALID_THREADID);
sewardjb4112022007-11-09 22:49:28 +0000636
sewardjf98e1c02008-10-25 16:22:41 +0000637 map_threads[thr->coretid] = thr;
sewardjb4112022007-11-09 22:49:28 +0000638
639 tl_assert(VG_INVALID_THREADID == 0);
640
sewardjb4112022007-11-09 22:49:28 +0000641 all__sanity_check("initialise_data_structures");
642}
643
644
645/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000646/*--- map_threads :: array[core-ThreadId] of Thread* ---*/
sewardjb4112022007-11-09 22:49:28 +0000647/*----------------------------------------------------------------*/
648
649/* Doesn't assert if the relevant map_threads entry is NULL. */
650static Thread* map_threads_maybe_lookup ( ThreadId coretid )
651{
652 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000653 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000654 thr = map_threads[coretid];
655 return thr;
656}
657
658/* Asserts if the relevant map_threads entry is NULL. */
659static inline Thread* map_threads_lookup ( ThreadId coretid )
660{
661 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000662 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000663 thr = map_threads[coretid];
664 tl_assert(thr);
665 return thr;
666}
667
sewardjf98e1c02008-10-25 16:22:41 +0000668/* Do a reverse lookup. Does not assert if 'thr' is not found in
669 map_threads. */
sewardjb4112022007-11-09 22:49:28 +0000670static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
671{
sewardjf98e1c02008-10-25 16:22:41 +0000672 ThreadId tid;
673 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000674 /* Check nobody used the invalid-threadid slot */
675 tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
676 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000677 tid = thr->coretid;
678 tl_assert(HG_(is_sane_ThreadId)(tid));
679 return tid;
sewardjb4112022007-11-09 22:49:28 +0000680}
681
682/* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
683 is not found in map_threads. */
684static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
685{
686 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
687 tl_assert(tid != VG_INVALID_THREADID);
sewardjf98e1c02008-10-25 16:22:41 +0000688 tl_assert(map_threads[tid]);
689 tl_assert(map_threads[tid]->coretid == tid);
sewardjb4112022007-11-09 22:49:28 +0000690 return tid;
691}
692
693static void map_threads_delete ( ThreadId coretid )
694{
695 Thread* thr;
696 tl_assert(coretid != 0);
sewardjf98e1c02008-10-25 16:22:41 +0000697 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000698 thr = map_threads[coretid];
699 tl_assert(thr);
700 map_threads[coretid] = NULL;
701}
702
703
704/*----------------------------------------------------------------*/
705/*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
706/*----------------------------------------------------------------*/
707
708/* Make sure there is a lock table entry for the given (lock) guest
709 address. If not, create one of the stated 'kind' in unheld state.
710 In any case, return the address of the existing or new Lock. */
711static
712Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
713{
714 Bool found;
715 Lock* oldlock = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000716 tl_assert(HG_(is_sane_ThreadId)(tid));
sewardj896f6f92008-08-19 08:38:52 +0000717 found = VG_(lookupFM)( map_locks,
florian6bf37262012-10-21 03:23:36 +0000718 NULL, (UWord*)&oldlock, (UWord)ga );
sewardjb4112022007-11-09 22:49:28 +0000719 if (!found) {
720 Lock* lock = mk_LockN(lkk, ga);
721 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
sewardjf98e1c02008-10-25 16:22:41 +0000722 tl_assert(HG_(is_sane_LockN)(lock));
florian6bf37262012-10-21 03:23:36 +0000723 VG_(addToFM)( map_locks, (UWord)ga, (UWord)lock );
sewardjb4112022007-11-09 22:49:28 +0000724 tl_assert(oldlock == NULL);
sewardjb4112022007-11-09 22:49:28 +0000725 return lock;
726 } else {
727 tl_assert(oldlock != NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000728 tl_assert(HG_(is_sane_LockN)(oldlock));
sewardjb4112022007-11-09 22:49:28 +0000729 tl_assert(oldlock->guestaddr == ga);
sewardjb4112022007-11-09 22:49:28 +0000730 return oldlock;
731 }
732}
733
734static Lock* map_locks_maybe_lookup ( Addr ga )
735{
736 Bool found;
737 Lock* lk = NULL;
florian6bf37262012-10-21 03:23:36 +0000738 found = VG_(lookupFM)( map_locks, NULL, (UWord*)&lk, (UWord)ga );
sewardjb4112022007-11-09 22:49:28 +0000739 tl_assert(found ? lk != NULL : lk == NULL);
sewardjb4112022007-11-09 22:49:28 +0000740 return lk;
741}
742
743static void map_locks_delete ( Addr ga )
744{
745 Addr ga2 = 0;
746 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000747 VG_(delFromFM)( map_locks,
florian6bf37262012-10-21 03:23:36 +0000748 (UWord*)&ga2, (UWord*)&lk, (UWord)ga );
sewardjb4112022007-11-09 22:49:28 +0000749 /* delFromFM produces the val which is being deleted, if it is
750 found. So assert it is non-null; that in effect asserts that we
751 are deleting a (ga, Lock) pair which actually exists. */
752 tl_assert(lk != NULL);
753 tl_assert(ga2 == ga);
754}
755
756
sewardjb4112022007-11-09 22:49:28 +0000757
758/*----------------------------------------------------------------*/
759/*--- Sanity checking the data structures ---*/
760/*----------------------------------------------------------------*/
761
762static UWord stats__sanity_checks = 0;
763
florian6bf37262012-10-21 03:23:36 +0000764static void laog__sanity_check ( const HChar* who ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000765
766/* REQUIRED INVARIANTS:
767
768 Thread vs Segment/Lock/SecMaps
769
770 for each t in Threads {
771
772 // Thread.lockset: each element is really a valid Lock
773
774 // Thread.lockset: each Lock in set is actually held by that thread
775 for lk in Thread.lockset
776 lk == LockedBy(t)
777
778 // Thread.csegid is a valid SegmentID
779 // and the associated Segment has .thr == t
780
781 }
782
783 all thread Locksets are pairwise empty under intersection
784 (that is, no lock is claimed to be held by more than one thread)
785 -- this is guaranteed if all locks in locksets point back to their
786 owner threads
787
788 Lock vs Thread/Segment/SecMaps
789
790 for each entry (gla, la) in map_locks
791 gla == la->guest_addr
792
793 for each lk in Locks {
794
795 lk->tag is valid
796 lk->guest_addr does not have shadow state NoAccess
797 if lk == LockedBy(t), then t->lockset contains lk
798 if lk == UnlockedBy(segid) then segid is valid SegmentID
799 and can be mapped to a valid Segment(seg)
800 and seg->thr->lockset does not contain lk
801 if lk == UnlockedNew then (no lockset contains lk)
802
803 secmaps for lk has .mbHasLocks == True
804
805 }
806
807 Segment vs Thread/Lock/SecMaps
808
809 the Segment graph is a dag (no cycles)
810 all of the Segment graph must be reachable from the segids
811 mentioned in the Threads
812
813 for seg in Segments {
814
815 seg->thr is a sane Thread
816
817 }
818
819 SecMaps vs Segment/Thread/Lock
820
821 for sm in SecMaps {
822
823 sm properly aligned
824 if any shadow word is ShR or ShM then .mbHasShared == True
825
826 for each Excl(segid) state
827 map_segments_lookup maps to a sane Segment(seg)
828 for each ShM/ShR(tsetid,lsetid) state
829 each lk in lset is a valid Lock
830 each thr in tset is a valid thread, which is non-dead
831
832 }
833*/
834
835
836/* Return True iff 'thr' holds 'lk' in some mode. */
837static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
838{
839 if (lk->heldBy)
florian6bf37262012-10-21 03:23:36 +0000840 return VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0;
sewardjb4112022007-11-09 22:49:28 +0000841 else
842 return False;
843}
844
845/* Sanity check Threads, as far as possible */
846__attribute__((noinline))
florian6bf37262012-10-21 03:23:36 +0000847static void threads__sanity_check ( const HChar* who )
sewardjb4112022007-11-09 22:49:28 +0000848{
849#define BAD(_str) do { how = (_str); goto bad; } while (0)
florian6bf37262012-10-21 03:23:36 +0000850 const HChar* how = "no error";
sewardjb4112022007-11-09 22:49:28 +0000851 Thread* thr;
852 WordSetID wsA, wsW;
sewardj250ec2e2008-02-15 22:02:30 +0000853 UWord* ls_words;
florian6bf37262012-10-21 03:23:36 +0000854 UWord ls_size, i;
sewardjb4112022007-11-09 22:49:28 +0000855 Lock* lk;
sewardjb4112022007-11-09 22:49:28 +0000856 for (thr = admin_threads; thr; thr = thr->admin) {
sewardjf98e1c02008-10-25 16:22:41 +0000857 if (!HG_(is_sane_Thread)(thr)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000858 wsA = thr->locksetA;
859 wsW = thr->locksetW;
860 // locks held in W mode are a subset of all locks held
861 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
862 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
863 for (i = 0; i < ls_size; i++) {
864 lk = (Lock*)ls_words[i];
865 // Thread.lockset: each element is really a valid Lock
sewardjf98e1c02008-10-25 16:22:41 +0000866 if (!HG_(is_sane_LockN)(lk)) BAD("2");
sewardjb4112022007-11-09 22:49:28 +0000867 // Thread.lockset: each Lock in set is actually held by that
868 // thread
869 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000870 }
871 }
872 return;
873 bad:
874 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
875 tl_assert(0);
876#undef BAD
877}
878
879
880/* Sanity check Locks, as far as possible */
881__attribute__((noinline))
florian6bf37262012-10-21 03:23:36 +0000882static void locks__sanity_check ( const HChar* who )
sewardjb4112022007-11-09 22:49:28 +0000883{
884#define BAD(_str) do { how = (_str); goto bad; } while (0)
florian6bf37262012-10-21 03:23:36 +0000885 const HChar* how = "no error";
sewardjb4112022007-11-09 22:49:28 +0000886 Addr gla;
887 Lock* lk;
888 Int i;
889 // # entries in admin_locks == # entries in map_locks
sewardj1d7c3322011-02-28 09:22:51 +0000890 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next)
sewardjb4112022007-11-09 22:49:28 +0000891 ;
sewardj896f6f92008-08-19 08:38:52 +0000892 if (i != VG_(sizeFM)(map_locks)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000893 // for each entry (gla, lk) in map_locks
894 // gla == lk->guest_addr
sewardj896f6f92008-08-19 08:38:52 +0000895 VG_(initIterFM)( map_locks );
896 while (VG_(nextIterFM)( map_locks,
florian6bf37262012-10-21 03:23:36 +0000897 (UWord*)&gla, (UWord*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000898 if (lk->guestaddr != gla) BAD("2");
899 }
sewardj896f6f92008-08-19 08:38:52 +0000900 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000901 // scan through admin_locks ...
sewardj1d7c3322011-02-28 09:22:51 +0000902 for (lk = admin_locks; lk; lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000903 // lock is sane. Quite comprehensive, also checks that
904 // referenced (holder) threads are sane.
sewardjf98e1c02008-10-25 16:22:41 +0000905 if (!HG_(is_sane_LockN)(lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000906 // map_locks binds guest address back to this lock
907 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
sewardjb4112022007-11-09 22:49:28 +0000908 // look at all threads mentioned as holders of this lock. Ensure
909 // this lock is mentioned in their locksets.
910 if (lk->heldBy) {
911 Thread* thr;
florian6bf37262012-10-21 03:23:36 +0000912 UWord count;
sewardj896f6f92008-08-19 08:38:52 +0000913 VG_(initIterBag)( lk->heldBy );
914 while (VG_(nextIterBag)( lk->heldBy,
florian6bf37262012-10-21 03:23:36 +0000915 (UWord*)&thr, &count )) {
sewardjf98e1c02008-10-25 16:22:41 +0000916 // HG_(is_sane_LockN) above ensures these
sewardjb4112022007-11-09 22:49:28 +0000917 tl_assert(count >= 1);
sewardjf98e1c02008-10-25 16:22:41 +0000918 tl_assert(HG_(is_sane_Thread)(thr));
florian6bf37262012-10-21 03:23:36 +0000919 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (UWord)lk))
sewardjb4112022007-11-09 22:49:28 +0000920 BAD("6");
921 // also check the w-only lockset
922 if (lk->heldW
florian6bf37262012-10-21 03:23:36 +0000923 && !HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
sewardjb4112022007-11-09 22:49:28 +0000924 BAD("7");
925 if ((!lk->heldW)
florian6bf37262012-10-21 03:23:36 +0000926 && HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
sewardjb4112022007-11-09 22:49:28 +0000927 BAD("8");
928 }
sewardj896f6f92008-08-19 08:38:52 +0000929 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000930 } else {
931 /* lock not held by anybody */
932 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
933 // since lk is unheld, then (no lockset contains lk)
934 // hmm, this is really too expensive to check. Hmm.
935 }
sewardjb4112022007-11-09 22:49:28 +0000936 }
937
938 return;
939 bad:
940 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
941 tl_assert(0);
942#undef BAD
943}
944
945
florian6bf37262012-10-21 03:23:36 +0000946static void all_except_Locks__sanity_check ( const HChar* who ) {
sewardjb4112022007-11-09 22:49:28 +0000947 stats__sanity_checks++;
948 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
949 threads__sanity_check(who);
sewardjc1fb9d22011-02-28 09:03:44 +0000950 if (HG_(clo_track_lockorders))
951 laog__sanity_check(who);
sewardjb4112022007-11-09 22:49:28 +0000952}
florian6bf37262012-10-21 03:23:36 +0000953static void all__sanity_check ( const HChar* who ) {
sewardjb4112022007-11-09 22:49:28 +0000954 all_except_Locks__sanity_check(who);
955 locks__sanity_check(who);
956}
957
958
959/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +0000960/*--- Shadow value and address range handlers ---*/
961/*----------------------------------------------------------------*/
962
963static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +0000964//static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000965static inline Thread* get_current_Thread ( void ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +0000966__attribute__((noinline))
967static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000968
sewardjb4112022007-11-09 22:49:28 +0000969
970/* Block-copy states (needed for implementing realloc()). */
sewardj23f12002009-07-24 08:45:08 +0000971/* FIXME this copies shadow memory; it doesn't apply the MSM to it.
972 Is that a problem? (hence 'scopy' rather than 'ccopy') */
973static void shadow_mem_scopy_range ( Thread* thr,
974 Addr src, Addr dst, SizeT len )
sewardjf98e1c02008-10-25 16:22:41 +0000975{
976 Thr* hbthr = thr->hbthr;
977 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +0000978 libhb_copy_shadow_state( hbthr, src, dst, len );
sewardjb4112022007-11-09 22:49:28 +0000979}
980
sewardj23f12002009-07-24 08:45:08 +0000981static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
982{
sewardjf98e1c02008-10-25 16:22:41 +0000983 Thr* hbthr = thr->hbthr;
984 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +0000985 LIBHB_CREAD_N(hbthr, a, len);
986}
987
988static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
989 Thr* hbthr = thr->hbthr;
990 tl_assert(hbthr);
991 LIBHB_CWRITE_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +0000992}
993
994static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
995{
sewardj23f12002009-07-24 08:45:08 +0000996 libhb_srange_new( thr->hbthr, a, len );
sewardjb4112022007-11-09 22:49:28 +0000997}
998
sewardjfd35d492011-03-17 19:39:55 +0000999static void shadow_mem_make_NoAccess_NoFX ( Thread* thr, Addr aIN, SizeT len )
sewardjb4112022007-11-09 22:49:28 +00001000{
sewardjb4112022007-11-09 22:49:28 +00001001 if (0 && len > 500)
sewardjfd35d492011-03-17 19:39:55 +00001002 VG_(printf)("make NoAccess_NoFX ( %#lx, %ld )\n", aIN, len );
1003 // has no effect (NoFX)
1004 libhb_srange_noaccess_NoFX( thr->hbthr, aIN, len );
1005}
1006
1007static void shadow_mem_make_NoAccess_AHAE ( Thread* thr, Addr aIN, SizeT len )
1008{
1009 if (0 && len > 500)
1010 VG_(printf)("make NoAccess_AHAE ( %#lx, %ld )\n", aIN, len );
1011 // Actually Has An Effect (AHAE)
1012 libhb_srange_noaccess_AHAE( thr->hbthr, aIN, len );
sewardjb4112022007-11-09 22:49:28 +00001013}
1014
sewardj406bac82010-03-03 23:03:40 +00001015static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN, SizeT len )
1016{
1017 if (0 && len > 500)
1018 VG_(printf)("make Untracked ( %#lx, %ld )\n", aIN, len );
1019 libhb_srange_untrack( thr->hbthr, aIN, len );
1020}
1021
sewardjb4112022007-11-09 22:49:28 +00001022
1023/*----------------------------------------------------------------*/
1024/*--- Event handlers (evh__* functions) ---*/
1025/*--- plus helpers (evhH__* functions) ---*/
1026/*----------------------------------------------------------------*/
1027
1028/*--------- Event handler helpers (evhH__* functions) ---------*/
1029
1030/* Create a new segment for 'thr', making it depend (.prev) on its
1031 existing segment, bind together the SegmentID and Segment, and
1032 return both of them. Also update 'thr' so it references the new
1033 Segment. */
sewardjf98e1c02008-10-25 16:22:41 +00001034//zz static
1035//zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
1036//zz /*OUT*/Segment** new_segP,
1037//zz Thread* thr )
1038//zz {
1039//zz Segment* cur_seg;
1040//zz tl_assert(new_segP);
1041//zz tl_assert(new_segidP);
1042//zz tl_assert(HG_(is_sane_Thread)(thr));
1043//zz cur_seg = map_segments_lookup( thr->csegid );
1044//zz tl_assert(cur_seg);
1045//zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1046//zz at their owner thread. */
1047//zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1048//zz *new_segidP = alloc_SegmentID();
1049//zz map_segments_add( *new_segidP, *new_segP );
1050//zz thr->csegid = *new_segidP;
1051//zz }
sewardjb4112022007-11-09 22:49:28 +00001052
1053
1054/* The lock at 'lock_ga' has acquired a writer. Make all necessary
1055 updates, and also do all possible error checks. */
1056static
1057void evhH__post_thread_w_acquires_lock ( Thread* thr,
1058 LockKind lkk, Addr lock_ga )
1059{
1060 Lock* lk;
1061
1062 /* Basically what we need to do is call lockN_acquire_writer.
1063 However, that will barf if any 'invalid' lock states would
1064 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001065 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001066 routine.
1067
1068 Because this routine is only called after successful lock
1069 acquisition, we should not be asked to move the lock into any
1070 invalid states. Requests to do so are bugs in libpthread, since
1071 that should have rejected any such requests. */
1072
sewardjf98e1c02008-10-25 16:22:41 +00001073 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001074 /* Try to find the lock. If we can't, then create a new one with
1075 kind 'lkk'. */
1076 lk = map_locks_lookup_or_create(
1077 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001078 tl_assert( HG_(is_sane_LockN)(lk) );
1079
1080 /* check libhb level entities exist */
1081 tl_assert(thr->hbthr);
1082 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001083
1084 if (lk->heldBy == NULL) {
1085 /* the lock isn't held. Simple. */
1086 tl_assert(!lk->heldW);
1087 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001088 /* acquire a dependency from the lock's VCs */
1089 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001090 goto noerror;
1091 }
1092
1093 /* So the lock is already held. If held as a r-lock then
1094 libpthread must be buggy. */
1095 tl_assert(lk->heldBy);
1096 if (!lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001097 HG_(record_error_Misc)(
1098 thr, "Bug in libpthread: write lock "
1099 "granted on rwlock which is currently rd-held");
sewardjb4112022007-11-09 22:49:28 +00001100 goto error;
1101 }
1102
1103 /* So the lock is held in w-mode. If it's held by some other
1104 thread, then libpthread must be buggy. */
sewardj896f6f92008-08-19 08:38:52 +00001105 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
sewardjb4112022007-11-09 22:49:28 +00001106
sewardj896f6f92008-08-19 08:38:52 +00001107 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
sewardjf98e1c02008-10-25 16:22:41 +00001108 HG_(record_error_Misc)(
1109 thr, "Bug in libpthread: write lock "
1110 "granted on mutex/rwlock which is currently "
1111 "wr-held by a different thread");
sewardjb4112022007-11-09 22:49:28 +00001112 goto error;
1113 }
1114
1115 /* So the lock is already held in w-mode by 'thr'. That means this
1116 is an attempt to lock it recursively, which is only allowable
1117 for LK_mbRec kinded locks. Since this routine is called only
1118 once the lock has been acquired, this must also be a libpthread
1119 bug. */
1120 if (lk->kind != LK_mbRec) {
sewardjf98e1c02008-10-25 16:22:41 +00001121 HG_(record_error_Misc)(
1122 thr, "Bug in libpthread: recursive write lock "
1123 "granted on mutex/wrlock which does not "
1124 "support recursion");
sewardjb4112022007-11-09 22:49:28 +00001125 goto error;
1126 }
1127
1128 /* So we are recursively re-locking a lock we already w-hold. */
1129 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001130 /* acquire a dependency from the lock's VC. Probably pointless,
1131 but also harmless. */
1132 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001133 goto noerror;
1134
1135 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001136 if (HG_(clo_track_lockorders)) {
1137 /* check lock order acquisition graph, and update. This has to
1138 happen before the lock is added to the thread's locksetA/W. */
1139 laog__pre_thread_acquires_lock( thr, lk );
1140 }
sewardjb4112022007-11-09 22:49:28 +00001141 /* update the thread's held-locks set */
florian6bf37262012-10-21 03:23:36 +00001142 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
1143 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +00001144 /* fall through */
1145
1146 error:
sewardjf98e1c02008-10-25 16:22:41 +00001147 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001148}
1149
1150
1151/* The lock at 'lock_ga' has acquired a reader. Make all necessary
1152 updates, and also do all possible error checks. */
1153static
1154void evhH__post_thread_r_acquires_lock ( Thread* thr,
1155 LockKind lkk, Addr lock_ga )
1156{
1157 Lock* lk;
1158
1159 /* Basically what we need to do is call lockN_acquire_reader.
1160 However, that will barf if any 'invalid' lock states would
1161 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001162 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001163 routine.
1164
1165 Because this routine is only called after successful lock
1166 acquisition, we should not be asked to move the lock into any
1167 invalid states. Requests to do so are bugs in libpthread, since
1168 that should have rejected any such requests. */
1169
sewardjf98e1c02008-10-25 16:22:41 +00001170 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001171 /* Try to find the lock. If we can't, then create a new one with
1172 kind 'lkk'. Only a reader-writer lock can be read-locked,
1173 hence the first assertion. */
1174 tl_assert(lkk == LK_rdwr);
1175 lk = map_locks_lookup_or_create(
1176 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001177 tl_assert( HG_(is_sane_LockN)(lk) );
1178
1179 /* check libhb level entities exist */
1180 tl_assert(thr->hbthr);
1181 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001182
1183 if (lk->heldBy == NULL) {
1184 /* the lock isn't held. Simple. */
1185 tl_assert(!lk->heldW);
1186 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001187 /* acquire a dependency from the lock's VC */
1188 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001189 goto noerror;
1190 }
1191
1192 /* So the lock is already held. If held as a w-lock then
1193 libpthread must be buggy. */
1194 tl_assert(lk->heldBy);
1195 if (lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001196 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1197 "granted on rwlock which is "
1198 "currently wr-held");
sewardjb4112022007-11-09 22:49:28 +00001199 goto error;
1200 }
1201
1202 /* Easy enough. In short anybody can get a read-lock on a rwlock
1203 provided it is either unlocked or already in rd-held. */
1204 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001205 /* acquire a dependency from the lock's VC. Probably pointless,
1206 but also harmless. */
1207 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001208 goto noerror;
1209
1210 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001211 if (HG_(clo_track_lockorders)) {
1212 /* check lock order acquisition graph, and update. This has to
1213 happen before the lock is added to the thread's locksetA/W. */
1214 laog__pre_thread_acquires_lock( thr, lk );
1215 }
sewardjb4112022007-11-09 22:49:28 +00001216 /* update the thread's held-locks set */
florian6bf37262012-10-21 03:23:36 +00001217 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +00001218 /* but don't update thr->locksetW, since lk is only rd-held */
1219 /* fall through */
1220
1221 error:
sewardjf98e1c02008-10-25 16:22:41 +00001222 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001223}
1224
1225
1226/* The lock at 'lock_ga' is just about to be unlocked. Make all
1227 necessary updates, and also do all possible error checks. */
1228static
1229void evhH__pre_thread_releases_lock ( Thread* thr,
1230 Addr lock_ga, Bool isRDWR )
1231{
1232 Lock* lock;
1233 Word n;
sewardjf98e1c02008-10-25 16:22:41 +00001234 Bool was_heldW;
sewardjb4112022007-11-09 22:49:28 +00001235
1236 /* This routine is called prior to a lock release, before
1237 libpthread has had a chance to validate the call. Hence we need
1238 to detect and reject any attempts to move the lock into an
1239 invalid state. Such attempts are bugs in the client.
1240
1241 isRDWR is True if we know from the wrapper context that lock_ga
1242 should refer to a reader-writer lock, and is False if [ditto]
1243 lock_ga should refer to a standard mutex. */
1244
sewardjf98e1c02008-10-25 16:22:41 +00001245 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001246 lock = map_locks_maybe_lookup( lock_ga );
1247
1248 if (!lock) {
1249 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1250 the client is trying to unlock it. So complain, then ignore
1251 the attempt. */
sewardjf98e1c02008-10-25 16:22:41 +00001252 HG_(record_error_UnlockBogus)( thr, lock_ga );
sewardjb4112022007-11-09 22:49:28 +00001253 return;
1254 }
1255
1256 tl_assert(lock->guestaddr == lock_ga);
sewardjf98e1c02008-10-25 16:22:41 +00001257 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001258
1259 if (isRDWR && lock->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001260 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1261 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001262 }
1263 if ((!isRDWR) && lock->kind == LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001264 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1265 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001266 }
1267
1268 if (!lock->heldBy) {
1269 /* The lock is not held. This indicates a serious bug in the
1270 client. */
1271 tl_assert(!lock->heldW);
sewardjf98e1c02008-10-25 16:22:41 +00001272 HG_(record_error_UnlockUnlocked)( thr, lock );
florian6bf37262012-10-21 03:23:36 +00001273 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1274 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001275 goto error;
1276 }
1277
sewardjf98e1c02008-10-25 16:22:41 +00001278 /* test just above dominates */
1279 tl_assert(lock->heldBy);
1280 was_heldW = lock->heldW;
1281
sewardjb4112022007-11-09 22:49:28 +00001282 /* The lock is held. Is this thread one of the holders? If not,
1283 report a bug in the client. */
florian6bf37262012-10-21 03:23:36 +00001284 n = VG_(elemBag)( lock->heldBy, (UWord)thr );
sewardjb4112022007-11-09 22:49:28 +00001285 tl_assert(n >= 0);
1286 if (n == 0) {
1287 /* We are not a current holder of the lock. This is a bug in
1288 the guest, and (per POSIX pthread rules) the unlock
1289 attempt will fail. So just complain and do nothing
1290 else. */
sewardj896f6f92008-08-19 08:38:52 +00001291 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001292 tl_assert(HG_(is_sane_Thread)(realOwner));
sewardjb4112022007-11-09 22:49:28 +00001293 tl_assert(realOwner != thr);
florian6bf37262012-10-21 03:23:36 +00001294 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1295 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjf98e1c02008-10-25 16:22:41 +00001296 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
sewardjb4112022007-11-09 22:49:28 +00001297 goto error;
1298 }
1299
1300 /* Ok, we hold the lock 'n' times. */
1301 tl_assert(n >= 1);
1302
1303 lockN_release( lock, thr );
1304
1305 n--;
1306 tl_assert(n >= 0);
1307
1308 if (n > 0) {
1309 tl_assert(lock->heldBy);
florian6bf37262012-10-21 03:23:36 +00001310 tl_assert(n == VG_(elemBag)( lock->heldBy, (UWord)thr ));
sewardjb4112022007-11-09 22:49:28 +00001311 /* We still hold the lock. So either it's a recursive lock
1312 or a rwlock which is currently r-held. */
1313 tl_assert(lock->kind == LK_mbRec
1314 || (lock->kind == LK_rdwr && !lock->heldW));
florian6bf37262012-10-21 03:23:36 +00001315 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001316 if (lock->heldW)
florian6bf37262012-10-21 03:23:36 +00001317 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001318 else
florian6bf37262012-10-21 03:23:36 +00001319 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001320 } else {
sewardj983f3022009-05-21 14:49:55 +00001321 /* n is zero. This means we don't hold the lock any more. But
1322 if it's a rwlock held in r-mode, someone else could still
1323 hold it. Just do whatever sanity checks we can. */
1324 if (lock->kind == LK_rdwr && lock->heldBy) {
1325 /* It's a rwlock. We no longer hold it but we used to;
1326 nevertheless it still appears to be held by someone else.
1327 The implication is that, prior to this release, it must
1328 have been shared by us and and whoever else is holding it;
1329 which in turn implies it must be r-held, since a lock
1330 can't be w-held by more than one thread. */
1331 /* The lock is now R-held by somebody else: */
1332 tl_assert(lock->heldW == False);
1333 } else {
1334 /* Normal case. It's either not a rwlock, or it's a rwlock
1335 that we used to hold in w-mode (which is pretty much the
1336 same thing as a non-rwlock.) Since this transaction is
1337 atomic (V does not allow multiple threads to run
1338 simultaneously), it must mean the lock is now not held by
1339 anybody. Hence assert for it. */
1340 /* The lock is now not held by anybody: */
1341 tl_assert(!lock->heldBy);
1342 tl_assert(lock->heldW == False);
1343 }
sewardjf98e1c02008-10-25 16:22:41 +00001344 //if (lock->heldBy) {
florian6bf37262012-10-21 03:23:36 +00001345 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (UWord)thr ));
sewardjf98e1c02008-10-25 16:22:41 +00001346 //}
sewardjb4112022007-11-09 22:49:28 +00001347 /* update this thread's lockset accordingly. */
1348 thr->locksetA
florian6bf37262012-10-21 03:23:36 +00001349 = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lock );
sewardjb4112022007-11-09 22:49:28 +00001350 thr->locksetW
florian6bf37262012-10-21 03:23:36 +00001351 = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lock );
sewardjf98e1c02008-10-25 16:22:41 +00001352 /* push our VC into the lock */
1353 tl_assert(thr->hbthr);
1354 tl_assert(lock->hbso);
1355 /* If the lock was previously W-held, then we want to do a
1356 strong send, and if previously R-held, then a weak send. */
1357 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
sewardjb4112022007-11-09 22:49:28 +00001358 }
1359 /* fall through */
1360
1361 error:
sewardjf98e1c02008-10-25 16:22:41 +00001362 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001363}
1364
1365
sewardj9f569b72008-11-13 13:33:09 +00001366/* ---------------------------------------------------------- */
1367/* -------- Event handlers proper (evh__* functions) -------- */
1368/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001369
1370/* What is the Thread* for the currently running thread? This is
1371 absolutely performance critical. We receive notifications from the
1372 core for client code starts/stops, and cache the looked-up result
1373 in 'current_Thread'. Hence, for the vast majority of requests,
1374 finding the current thread reduces to a read of a global variable,
1375 provided get_current_Thread_in_C_C is inlined.
1376
1377 Outside of client code, current_Thread is NULL, and presumably
1378 any uses of it will cause a segfault. Hence:
1379
1380 - for uses definitely within client code, use
1381 get_current_Thread_in_C_C.
1382
1383 - for all other uses, use get_current_Thread.
1384*/
1385
sewardj23f12002009-07-24 08:45:08 +00001386static Thread *current_Thread = NULL,
1387 *current_Thread_prev = NULL;
sewardjb4112022007-11-09 22:49:28 +00001388
1389static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1390 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1391 tl_assert(current_Thread == NULL);
1392 current_Thread = map_threads_lookup( tid );
1393 tl_assert(current_Thread != NULL);
sewardj23f12002009-07-24 08:45:08 +00001394 if (current_Thread != current_Thread_prev) {
1395 libhb_Thr_resumes( current_Thread->hbthr );
1396 current_Thread_prev = current_Thread;
1397 }
sewardjb4112022007-11-09 22:49:28 +00001398}
1399static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1400 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1401 tl_assert(current_Thread != NULL);
1402 current_Thread = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00001403 libhb_maybe_GC();
sewardjb4112022007-11-09 22:49:28 +00001404}
1405static inline Thread* get_current_Thread_in_C_C ( void ) {
1406 return current_Thread;
1407}
1408static inline Thread* get_current_Thread ( void ) {
1409 ThreadId coretid;
1410 Thread* thr;
1411 thr = get_current_Thread_in_C_C();
1412 if (LIKELY(thr))
1413 return thr;
1414 /* evidently not in client code. Do it the slow way. */
1415 coretid = VG_(get_running_tid)();
1416 /* FIXME: get rid of the following kludge. It exists because
sewardjf98e1c02008-10-25 16:22:41 +00001417 evh__new_mem is called during initialisation (as notification
sewardjb4112022007-11-09 22:49:28 +00001418 of initial memory layout) and VG_(get_running_tid)() returns
1419 VG_INVALID_THREADID at that point. */
1420 if (coretid == VG_INVALID_THREADID)
1421 coretid = 1; /* KLUDGE */
1422 thr = map_threads_lookup( coretid );
1423 return thr;
1424}
1425
1426static
1427void evh__new_mem ( Addr a, SizeT len ) {
1428 if (SHOW_EVENTS >= 2)
1429 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1430 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001431 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001432 all__sanity_check("evh__new_mem-post");
1433}
1434
1435static
sewardj1f77fec2010-04-12 19:51:04 +00001436void evh__new_mem_stack ( Addr a, SizeT len ) {
1437 if (SHOW_EVENTS >= 2)
1438 VG_(printf)("evh__new_mem_stack(%p, %lu)\n", (void*)a, len );
1439 shadow_mem_make_New( get_current_Thread(),
1440 -VG_STACK_REDZONE_SZB + a, len );
1441 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1442 all__sanity_check("evh__new_mem_stack-post");
1443}
1444
1445static
sewardj7cf4e6b2008-05-01 20:24:26 +00001446void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1447 if (SHOW_EVENTS >= 2)
1448 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1449 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001450 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardj7cf4e6b2008-05-01 20:24:26 +00001451 all__sanity_check("evh__new_mem_w_tid-post");
1452}
1453
1454static
sewardjb4112022007-11-09 22:49:28 +00001455void evh__new_mem_w_perms ( Addr a, SizeT len,
sewardj9c606bd2008-09-18 18:12:50 +00001456 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
sewardjb4112022007-11-09 22:49:28 +00001457 if (SHOW_EVENTS >= 1)
1458 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1459 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1460 if (rr || ww || xx)
1461 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001462 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001463 all__sanity_check("evh__new_mem_w_perms-post");
1464}
1465
1466static
1467void evh__set_perms ( Addr a, SizeT len,
1468 Bool rr, Bool ww, Bool xx ) {
sewardjfd35d492011-03-17 19:39:55 +00001469 // This handles mprotect requests. If the memory is being put
1470 // into no-R no-W state, paint it as NoAccess, for the reasons
1471 // documented at evh__die_mem_munmap().
sewardjb4112022007-11-09 22:49:28 +00001472 if (SHOW_EVENTS >= 1)
sewardjfd35d492011-03-17 19:39:55 +00001473 VG_(printf)("evh__set_perms(%p, %lu, r=%d w=%d x=%d)\n",
sewardjb4112022007-11-09 22:49:28 +00001474 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1475 /* Hmm. What should we do here, that actually makes any sense?
1476 Let's say: if neither readable nor writable, then declare it
1477 NoAccess, else leave it alone. */
1478 if (!(rr || ww))
sewardjfd35d492011-03-17 19:39:55 +00001479 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001480 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001481 all__sanity_check("evh__set_perms-post");
1482}
1483
1484static
1485void evh__die_mem ( Addr a, SizeT len ) {
sewardjfd35d492011-03-17 19:39:55 +00001486 // Urr, libhb ignores this.
sewardjb4112022007-11-09 22:49:28 +00001487 if (SHOW_EVENTS >= 2)
1488 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
sewardjfd35d492011-03-17 19:39:55 +00001489 shadow_mem_make_NoAccess_NoFX( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001490 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001491 all__sanity_check("evh__die_mem-post");
1492}
1493
1494static
sewardjfd35d492011-03-17 19:39:55 +00001495void evh__die_mem_munmap ( Addr a, SizeT len ) {
1496 // It's important that libhb doesn't ignore this. If, as is likely,
1497 // the client is subject to address space layout randomization,
1498 // then unmapped areas may never get remapped over, even in long
1499 // runs. If we just ignore them we wind up with large resource
1500 // (VTS) leaks in libhb. So force them to NoAccess, so that all
1501 // VTS references in the affected area are dropped. Marking memory
1502 // as NoAccess is expensive, but we assume that munmap is sufficiently
1503 // rare that the space gains of doing this are worth the costs.
1504 if (SHOW_EVENTS >= 2)
1505 VG_(printf)("evh__die_mem_munmap(%p, %lu)\n", (void*)a, len );
1506 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
1507}
1508
1509static
sewardj406bac82010-03-03 23:03:40 +00001510void evh__untrack_mem ( Addr a, SizeT len ) {
sewardjfd35d492011-03-17 19:39:55 +00001511 // Libhb doesn't ignore this.
sewardj406bac82010-03-03 23:03:40 +00001512 if (SHOW_EVENTS >= 2)
1513 VG_(printf)("evh__untrack_mem(%p, %lu)\n", (void*)a, len );
1514 shadow_mem_make_Untracked( get_current_Thread(), a, len );
1515 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1516 all__sanity_check("evh__untrack_mem-post");
1517}
1518
1519static
sewardj23f12002009-07-24 08:45:08 +00001520void evh__copy_mem ( Addr src, Addr dst, SizeT len ) {
1521 if (SHOW_EVENTS >= 2)
1522 VG_(printf)("evh__copy_mem(%p, %p, %lu)\n", (void*)src, (void*)dst, len );
1523 shadow_mem_scopy_range( get_current_Thread(), src, dst, len );
1524 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1525 all__sanity_check("evh__copy_mem-post");
1526}
1527
1528static
sewardjb4112022007-11-09 22:49:28 +00001529void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1530{
1531 if (SHOW_EVENTS >= 1)
1532 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1533 (Int)parent, (Int)child );
1534
1535 if (parent != VG_INVALID_THREADID) {
sewardjf98e1c02008-10-25 16:22:41 +00001536 Thread* thr_p;
1537 Thread* thr_c;
1538 Thr* hbthr_p;
1539 Thr* hbthr_c;
sewardjb4112022007-11-09 22:49:28 +00001540
sewardjf98e1c02008-10-25 16:22:41 +00001541 tl_assert(HG_(is_sane_ThreadId)(parent));
1542 tl_assert(HG_(is_sane_ThreadId)(child));
sewardjb4112022007-11-09 22:49:28 +00001543 tl_assert(parent != child);
1544
1545 thr_p = map_threads_maybe_lookup( parent );
1546 thr_c = map_threads_maybe_lookup( child );
1547
1548 tl_assert(thr_p != NULL);
1549 tl_assert(thr_c == NULL);
1550
sewardjf98e1c02008-10-25 16:22:41 +00001551 hbthr_p = thr_p->hbthr;
1552 tl_assert(hbthr_p != NULL);
sewardj60626642011-03-10 15:14:37 +00001553 tl_assert( libhb_get_Thr_hgthread(hbthr_p) == thr_p );
sewardjb4112022007-11-09 22:49:28 +00001554
sewardjf98e1c02008-10-25 16:22:41 +00001555 hbthr_c = libhb_create ( hbthr_p );
1556
1557 /* Create a new thread record for the child. */
sewardjb4112022007-11-09 22:49:28 +00001558 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +00001559 thr_c = mk_Thread( hbthr_c );
sewardj60626642011-03-10 15:14:37 +00001560 tl_assert( libhb_get_Thr_hgthread(hbthr_c) == NULL );
1561 libhb_set_Thr_hgthread(hbthr_c, thr_c);
sewardjb4112022007-11-09 22:49:28 +00001562
1563 /* and bind it in the thread-map table */
1564 map_threads[child] = thr_c;
sewardjf98e1c02008-10-25 16:22:41 +00001565 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1566 thr_c->coretid = child;
sewardjb4112022007-11-09 22:49:28 +00001567
1568 /* Record where the parent is so we can later refer to this in
1569 error messages.
1570
1571 On amd64-linux, this entails a nasty glibc-2.5 specific hack.
1572 The stack snapshot is taken immediately after the parent has
1573 returned from its sys_clone call. Unfortunately there is no
1574 unwind info for the insn following "syscall" - reading the
1575 glibc sources confirms this. So we ask for a snapshot to be
1576 taken as if RIP was 3 bytes earlier, in a place where there
1577 is unwind info. Sigh.
1578 */
1579 { Word first_ip_delta = 0;
1580# if defined(VGP_amd64_linux)
1581 first_ip_delta = -3;
sewardj5a460f52014-08-30 19:24:05 +00001582# elif defined(VGP_arm64_linux)
1583 first_ip_delta = -1;
sewardjb4112022007-11-09 22:49:28 +00001584# endif
1585 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1586 }
sewardjb4112022007-11-09 22:49:28 +00001587 }
1588
sewardjf98e1c02008-10-25 16:22:41 +00001589 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001590 all__sanity_check("evh__pre_thread_create-post");
1591}
1592
1593static
1594void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1595{
1596 Int nHeld;
1597 Thread* thr_q;
1598 if (SHOW_EVENTS >= 1)
1599 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1600 (Int)quit_tid );
1601
1602 /* quit_tid has disappeared without joining to any other thread.
1603 Therefore there is no synchronisation event associated with its
1604 exit and so we have to pretty much treat it as if it was still
1605 alive but mysteriously making no progress. That is because, if
1606 we don't know when it really exited, then we can never say there
1607 is a point in time when we're sure the thread really has
1608 finished, and so we need to consider the possibility that it
1609 lingers indefinitely and continues to interact with other
1610 threads. */
1611 /* However, it might have rendezvous'd with a thread that called
1612 pthread_join with this one as arg, prior to this point (that's
1613 how NPTL works). In which case there has already been a prior
1614 sync event. So in any case, just let the thread exit. On NPTL,
1615 all thread exits go through here. */
sewardjf98e1c02008-10-25 16:22:41 +00001616 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
sewardjb4112022007-11-09 22:49:28 +00001617 thr_q = map_threads_maybe_lookup( quit_tid );
1618 tl_assert(thr_q != NULL);
1619
1620 /* Complain if this thread holds any locks. */
1621 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1622 tl_assert(nHeld >= 0);
1623 if (nHeld > 0) {
1624 HChar buf[80];
1625 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1626 nHeld, nHeld > 1 ? "s" : "");
sewardjf98e1c02008-10-25 16:22:41 +00001627 HG_(record_error_Misc)( thr_q, buf );
sewardjb4112022007-11-09 22:49:28 +00001628 }
1629
sewardj23f12002009-07-24 08:45:08 +00001630 /* Not much to do here:
1631 - tell libhb the thread is gone
1632 - clear the map_threads entry, in order that the Valgrind core
1633 can re-use it. */
sewardj61bc2c52011-02-09 10:34:00 +00001634 /* Cleanup actions (next 5 lines) copied in evh__atfork_child; keep
1635 in sync. */
sewardj23f12002009-07-24 08:45:08 +00001636 tl_assert(thr_q->hbthr);
1637 libhb_async_exit(thr_q->hbthr);
sewardjf98e1c02008-10-25 16:22:41 +00001638 tl_assert(thr_q->coretid == quit_tid);
1639 thr_q->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +00001640 map_threads_delete( quit_tid );
1641
sewardjf98e1c02008-10-25 16:22:41 +00001642 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001643 all__sanity_check("evh__pre_thread_ll_exit-post");
1644}
1645
sewardj61bc2c52011-02-09 10:34:00 +00001646/* This is called immediately after fork, for the child only. 'tid'
1647 is the only surviving thread (as per POSIX rules on fork() in
1648 threaded programs), so we have to clean up map_threads to remove
1649 entries for any other threads. */
1650static
1651void evh__atfork_child ( ThreadId tid )
1652{
1653 UInt i;
1654 Thread* thr;
1655 /* Slot 0 should never be used. */
1656 thr = map_threads_maybe_lookup( 0/*INVALID*/ );
1657 tl_assert(!thr);
1658 /* Clean up all other slots except 'tid'. */
1659 for (i = 1; i < VG_N_THREADS; i++) {
1660 if (i == tid)
1661 continue;
1662 thr = map_threads_maybe_lookup(i);
1663 if (!thr)
1664 continue;
1665 /* Cleanup actions (next 5 lines) copied from end of
1666 evh__pre_thread_ll_exit; keep in sync. */
1667 tl_assert(thr->hbthr);
1668 libhb_async_exit(thr->hbthr);
1669 tl_assert(thr->coretid == i);
1670 thr->coretid = VG_INVALID_THREADID;
1671 map_threads_delete(i);
1672 }
1673}
1674
philipped40aff52014-06-16 20:00:14 +00001675/* generate a dependence from the hbthr_q quitter to the hbthr_s stayer. */
sewardjb4112022007-11-09 22:49:28 +00001676static
philipped40aff52014-06-16 20:00:14 +00001677void generate_quitter_stayer_dependence (Thr* hbthr_q, Thr* hbthr_s)
sewardjb4112022007-11-09 22:49:28 +00001678{
sewardjf98e1c02008-10-25 16:22:41 +00001679 SO* so;
sewardjf98e1c02008-10-25 16:22:41 +00001680 /* Allocate a temporary synchronisation object and use it to send
1681 an imaginary message from the quitter to the stayer, the purpose
1682 being to generate a dependence from the quitter to the
1683 stayer. */
1684 so = libhb_so_alloc();
1685 tl_assert(so);
sewardj23f12002009-07-24 08:45:08 +00001686 /* Send last arg of _so_send as False, since the sending thread
1687 doesn't actually exist any more, so we don't want _so_send to
1688 try taking stack snapshots of it. */
sewardjffce8152011-06-24 10:09:41 +00001689 libhb_so_send(hbthr_q, so, True/*strong_send*//*?!? wrt comment above*/);
sewardjf98e1c02008-10-25 16:22:41 +00001690 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1691 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00001692
sewardjffce8152011-06-24 10:09:41 +00001693 /* Tell libhb that the quitter has been reaped. Note that we might
1694 have to be cleverer about this, to exclude 2nd and subsequent
1695 notifications for the same hbthr_q, in the case where the app is
1696 buggy (calls pthread_join twice or more on the same thread) AND
1697 where libpthread is also buggy and doesn't return ESRCH on
1698 subsequent calls. (If libpthread isn't thusly buggy, then the
1699 wrapper for pthread_join in hg_intercepts.c will stop us getting
1700 notified here multiple times for the same joinee.) See also
1701 comments in helgrind/tests/jointwice.c. */
1702 libhb_joinedwith_done(hbthr_q);
philipped40aff52014-06-16 20:00:14 +00001703}
1704
1705
1706static
1707void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1708{
1709 Thread* thr_s;
1710 Thread* thr_q;
1711 Thr* hbthr_s;
1712 Thr* hbthr_q;
1713
1714 if (SHOW_EVENTS >= 1)
1715 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1716 (Int)stay_tid, quit_thr );
1717
1718 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
1719
1720 thr_s = map_threads_maybe_lookup( stay_tid );
1721 thr_q = quit_thr;
1722 tl_assert(thr_s != NULL);
1723 tl_assert(thr_q != NULL);
1724 tl_assert(thr_s != thr_q);
1725
1726 hbthr_s = thr_s->hbthr;
1727 hbthr_q = thr_q->hbthr;
1728 tl_assert(hbthr_s != hbthr_q);
1729 tl_assert( libhb_get_Thr_hgthread(hbthr_s) == thr_s );
1730 tl_assert( libhb_get_Thr_hgthread(hbthr_q) == thr_q );
1731
1732 generate_quitter_stayer_dependence (hbthr_q, hbthr_s);
sewardjffce8152011-06-24 10:09:41 +00001733
sewardjf98e1c02008-10-25 16:22:41 +00001734 /* evh__pre_thread_ll_exit issues an error message if the exiting
1735 thread holds any locks. No need to check here. */
sewardjb4112022007-11-09 22:49:28 +00001736
1737 /* This holds because, at least when using NPTL as the thread
1738 library, we should be notified the low level thread exit before
1739 we hear of any join event on it. The low level exit
1740 notification feeds through into evh__pre_thread_ll_exit,
1741 which should clear the map_threads entry for it. Hence we
1742 expect there to be no map_threads entry at this point. */
1743 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1744 == VG_INVALID_THREADID);
1745
sewardjf98e1c02008-10-25 16:22:41 +00001746 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001747 all__sanity_check("evh__post_thread_join-post");
1748}
1749
1750static
floriane543f302012-10-21 19:43:43 +00001751void evh__pre_mem_read ( CorePart part, ThreadId tid, const HChar* s,
sewardjb4112022007-11-09 22:49:28 +00001752 Addr a, SizeT size) {
1753 if (SHOW_EVENTS >= 2
1754 || (SHOW_EVENTS >= 1 && size != 1))
1755 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1756 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001757 shadow_mem_cread_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001758 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001759 all__sanity_check("evh__pre_mem_read-post");
1760}
1761
1762static
1763void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
floriane543f302012-10-21 19:43:43 +00001764 const HChar* s, Addr a ) {
sewardjb4112022007-11-09 22:49:28 +00001765 Int len;
1766 if (SHOW_EVENTS >= 1)
1767 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1768 (Int)tid, s, (void*)a );
sewardj234e5582011-02-09 12:47:23 +00001769 // Don't segfault if the string starts in an obviously stupid
1770 // place. Actually we should check the whole string, not just
1771 // the start address, but that's too much trouble. At least
1772 // checking the first byte is better than nothing. See #255009.
1773 if (!VG_(am_is_valid_for_client) (a, 1, VKI_PROT_READ))
1774 return;
florian19f91bb2012-11-10 22:29:54 +00001775 len = VG_(strlen)( (HChar*) a );
sewardj23f12002009-07-24 08:45:08 +00001776 shadow_mem_cread_range( map_threads_lookup(tid), a, len+1 );
sewardjf98e1c02008-10-25 16:22:41 +00001777 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001778 all__sanity_check("evh__pre_mem_read_asciiz-post");
1779}
1780
1781static
floriane543f302012-10-21 19:43:43 +00001782void evh__pre_mem_write ( CorePart part, ThreadId tid, const HChar* s,
sewardjb4112022007-11-09 22:49:28 +00001783 Addr a, SizeT size ) {
1784 if (SHOW_EVENTS >= 1)
1785 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1786 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001787 shadow_mem_cwrite_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001788 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001789 all__sanity_check("evh__pre_mem_write-post");
1790}
1791
1792static
1793void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1794 if (SHOW_EVENTS >= 1)
1795 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1796 (void*)a, len, (Int)is_inited );
1797 // FIXME: this is kinda stupid
1798 if (is_inited) {
1799 shadow_mem_make_New(get_current_Thread(), a, len);
1800 } else {
1801 shadow_mem_make_New(get_current_Thread(), a, len);
1802 }
sewardjf98e1c02008-10-25 16:22:41 +00001803 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001804 all__sanity_check("evh__pre_mem_read-post");
1805}
1806
1807static
1808void evh__die_mem_heap ( Addr a, SizeT len ) {
sewardj622fe492011-03-11 21:06:59 +00001809 Thread* thr;
sewardjb4112022007-11-09 22:49:28 +00001810 if (SHOW_EVENTS >= 1)
1811 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
sewardj622fe492011-03-11 21:06:59 +00001812 thr = get_current_Thread();
1813 tl_assert(thr);
1814 if (HG_(clo_free_is_write)) {
1815 /* Treat frees as if the memory was written immediately prior to
1816 the free. This shakes out more races, specifically, cases
1817 where memory is referenced by one thread, and freed by
1818 another, and there's no observable synchronisation event to
1819 guarantee that the reference happens before the free. */
1820 shadow_mem_cwrite_range(thr, a, len);
1821 }
sewardjfd35d492011-03-17 19:39:55 +00001822 shadow_mem_make_NoAccess_NoFX( thr, a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001823 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001824 all__sanity_check("evh__pre_mem_read-post");
1825}
1826
sewardj23f12002009-07-24 08:45:08 +00001827/* --- Event handlers called from generated code --- */
1828
sewardjb4112022007-11-09 22:49:28 +00001829static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001830void evh__mem_help_cread_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001831 Thread* thr = get_current_Thread_in_C_C();
1832 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001833 LIBHB_CREAD_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001834}
sewardjf98e1c02008-10-25 16:22:41 +00001835
sewardjb4112022007-11-09 22:49:28 +00001836static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001837void evh__mem_help_cread_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001838 Thread* thr = get_current_Thread_in_C_C();
1839 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001840 LIBHB_CREAD_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001841}
sewardjf98e1c02008-10-25 16:22:41 +00001842
sewardjb4112022007-11-09 22:49:28 +00001843static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001844void evh__mem_help_cread_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001845 Thread* thr = get_current_Thread_in_C_C();
1846 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001847 LIBHB_CREAD_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001848}
sewardjf98e1c02008-10-25 16:22:41 +00001849
sewardjb4112022007-11-09 22:49:28 +00001850static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001851void evh__mem_help_cread_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001852 Thread* thr = get_current_Thread_in_C_C();
1853 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001854 LIBHB_CREAD_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001855}
sewardjf98e1c02008-10-25 16:22:41 +00001856
sewardjb4112022007-11-09 22:49:28 +00001857static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001858void evh__mem_help_cread_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001859 Thread* thr = get_current_Thread_in_C_C();
1860 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001861 LIBHB_CREAD_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001862}
1863
1864static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001865void evh__mem_help_cwrite_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001866 Thread* thr = get_current_Thread_in_C_C();
1867 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001868 LIBHB_CWRITE_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001869}
sewardjf98e1c02008-10-25 16:22:41 +00001870
sewardjb4112022007-11-09 22:49:28 +00001871static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001872void evh__mem_help_cwrite_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001873 Thread* thr = get_current_Thread_in_C_C();
1874 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001875 LIBHB_CWRITE_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001876}
sewardjf98e1c02008-10-25 16:22:41 +00001877
sewardjb4112022007-11-09 22:49:28 +00001878static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001879void evh__mem_help_cwrite_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001880 Thread* thr = get_current_Thread_in_C_C();
1881 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001882 LIBHB_CWRITE_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001883}
sewardjf98e1c02008-10-25 16:22:41 +00001884
sewardjb4112022007-11-09 22:49:28 +00001885static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001886void evh__mem_help_cwrite_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001887 Thread* thr = get_current_Thread_in_C_C();
1888 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001889 LIBHB_CWRITE_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001890}
sewardjf98e1c02008-10-25 16:22:41 +00001891
sewardjb4112022007-11-09 22:49:28 +00001892static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001893void evh__mem_help_cwrite_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001894 Thread* thr = get_current_Thread_in_C_C();
1895 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001896 LIBHB_CWRITE_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001897}
1898
sewardjb4112022007-11-09 22:49:28 +00001899
sewardj9f569b72008-11-13 13:33:09 +00001900/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001901/* -------------- events to do with mutexes -------------- */
sewardj9f569b72008-11-13 13:33:09 +00001902/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001903
1904/* EXPOSITION only: by intercepting lock init events we can show the
1905 user where the lock was initialised, rather than only being able to
1906 show where it was first locked. Intercepting lock initialisations
1907 is not necessary for the basic operation of the race checker. */
1908static
1909void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
1910 void* mutex, Word mbRec )
1911{
1912 if (SHOW_EVENTS >= 1)
1913 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
1914 (Int)tid, mbRec, (void*)mutex );
1915 tl_assert(mbRec == 0 || mbRec == 1);
1916 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
1917 (Addr)mutex, tid );
sewardjf98e1c02008-10-25 16:22:41 +00001918 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001919 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
1920}
1921
1922static
sewardjc02f6c42013-10-14 13:51:25 +00001923void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex,
1924 Bool mutex_is_init )
sewardjb4112022007-11-09 22:49:28 +00001925{
1926 Thread* thr;
1927 Lock* lk;
1928 if (SHOW_EVENTS >= 1)
sewardjc02f6c42013-10-14 13:51:25 +00001929 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE"
1930 "(ctid=%d, %p, isInit=%d)\n",
1931 (Int)tid, (void*)mutex, (Int)mutex_is_init );
sewardjb4112022007-11-09 22:49:28 +00001932
1933 thr = map_threads_maybe_lookup( tid );
1934 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00001935 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00001936
1937 lk = map_locks_maybe_lookup( (Addr)mutex );
1938
sewardjc02f6c42013-10-14 13:51:25 +00001939 if (lk == NULL && mutex_is_init) {
1940 /* We're destroying a mutex which we don't have any record of,
1941 and which appears to have the value PTHREAD_MUTEX_INITIALIZER.
1942 Assume it never got used, and so we don't need to do anything
1943 more. */
1944 goto out;
1945 }
1946
sewardjb4112022007-11-09 22:49:28 +00001947 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
sewardjf98e1c02008-10-25 16:22:41 +00001948 HG_(record_error_Misc)(
1949 thr, "pthread_mutex_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00001950 }
1951
1952 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00001953 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00001954 tl_assert( lk->guestaddr == (Addr)mutex );
1955 if (lk->heldBy) {
1956 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00001957 HG_(record_error_Misc)(
1958 thr, "pthread_mutex_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00001959 /* remove lock from locksets of all owning threads */
1960 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00001961 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00001962 lk->heldBy = NULL;
1963 lk->heldW = False;
1964 lk->acquired_at = NULL;
1965 }
1966 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001967 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00001968
1969 if (HG_(clo_track_lockorders))
1970 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00001971 map_locks_delete( lk->guestaddr );
1972 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00001973 }
1974
sewardjc02f6c42013-10-14 13:51:25 +00001975 out:
sewardjf98e1c02008-10-25 16:22:41 +00001976 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001977 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
1978}
1979
1980static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
1981 void* mutex, Word isTryLock )
1982{
1983 /* Just check the mutex is sane; nothing else to do. */
1984 // 'mutex' may be invalid - not checked by wrapper
1985 Thread* thr;
1986 Lock* lk;
1987 if (SHOW_EVENTS >= 1)
1988 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
1989 (Int)tid, (void*)mutex );
1990
1991 tl_assert(isTryLock == 0 || isTryLock == 1);
1992 thr = map_threads_maybe_lookup( tid );
1993 tl_assert(thr); /* cannot fail - Thread* must already exist */
1994
1995 lk = map_locks_maybe_lookup( (Addr)mutex );
1996
1997 if (lk && (lk->kind == LK_rdwr)) {
sewardjf98e1c02008-10-25 16:22:41 +00001998 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
1999 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002000 }
2001
2002 if ( lk
2003 && isTryLock == 0
2004 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
2005 && lk->heldBy
2006 && lk->heldW
florian6bf37262012-10-21 03:23:36 +00002007 && VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0 ) {
sewardjb4112022007-11-09 22:49:28 +00002008 /* uh, it's a non-recursive lock and we already w-hold it, and
2009 this is a real lock operation (not a speculative "tryLock"
2010 kind of thing). Duh. Deadlock coming up; but at least
2011 produce an error message. */
florian6bd9dc12012-11-23 16:17:43 +00002012 const HChar* errstr = "Attempt to re-lock a "
2013 "non-recursive lock I already hold";
2014 const HChar* auxstr = "Lock was previously acquired";
sewardj8fef6252010-07-29 05:28:02 +00002015 if (lk->acquired_at) {
2016 HG_(record_error_Misc_w_aux)( thr, errstr, auxstr, lk->acquired_at );
2017 } else {
2018 HG_(record_error_Misc)( thr, errstr );
2019 }
sewardjb4112022007-11-09 22:49:28 +00002020 }
2021}
2022
2023static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
2024{
2025 // only called if the real library call succeeded - so mutex is sane
2026 Thread* thr;
2027 if (SHOW_EVENTS >= 1)
2028 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
2029 (Int)tid, (void*)mutex );
2030
2031 thr = map_threads_maybe_lookup( tid );
2032 tl_assert(thr); /* cannot fail - Thread* must already exist */
2033
2034 evhH__post_thread_w_acquires_lock(
2035 thr,
2036 LK_mbRec, /* if not known, create new lock with this LockKind */
2037 (Addr)mutex
2038 );
2039}
2040
2041static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
2042{
2043 // 'mutex' may be invalid - not checked by wrapper
2044 Thread* thr;
2045 if (SHOW_EVENTS >= 1)
2046 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
2047 (Int)tid, (void*)mutex );
2048
2049 thr = map_threads_maybe_lookup( tid );
2050 tl_assert(thr); /* cannot fail - Thread* must already exist */
2051
2052 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
2053}
2054
2055static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
2056{
2057 // only called if the real library call succeeded - so mutex is sane
2058 Thread* thr;
2059 if (SHOW_EVENTS >= 1)
2060 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
2061 (Int)tid, (void*)mutex );
2062 thr = map_threads_maybe_lookup( tid );
2063 tl_assert(thr); /* cannot fail - Thread* must already exist */
2064
2065 // anything we should do here?
2066}
2067
2068
sewardj5a644da2009-08-11 10:35:58 +00002069/* ------------------------------------------------------- */
sewardj1f77fec2010-04-12 19:51:04 +00002070/* -------------- events to do with spinlocks ------------ */
sewardj5a644da2009-08-11 10:35:58 +00002071/* ------------------------------------------------------- */
2072
2073/* All a bit of a kludge. Pretend we're really dealing with ordinary
2074 pthread_mutex_t's instead, for the most part. */
2075
2076static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid,
2077 void* slock )
2078{
2079 Thread* thr;
2080 Lock* lk;
2081 /* In glibc's kludgey world, we're either initialising or unlocking
2082 it. Since this is the pre-routine, if it is locked, unlock it
2083 and take a dependence edge. Otherwise, do nothing. */
2084
2085 if (SHOW_EVENTS >= 1)
2086 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
2087 "(ctid=%d, slock=%p)\n",
2088 (Int)tid, (void*)slock );
2089
2090 thr = map_threads_maybe_lookup( tid );
2091 /* cannot fail - Thread* must already exist */;
2092 tl_assert( HG_(is_sane_Thread)(thr) );
2093
2094 lk = map_locks_maybe_lookup( (Addr)slock );
2095 if (lk && lk->heldBy) {
2096 /* it's held. So do the normal pre-unlock actions, as copied
2097 from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE. This stupidly
2098 duplicates the map_locks_maybe_lookup. */
2099 evhH__pre_thread_releases_lock( thr, (Addr)slock,
2100 False/*!isRDWR*/ );
2101 }
2102}
2103
2104static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid,
2105 void* slock )
2106{
2107 Lock* lk;
2108 /* More kludgery. If the lock has never been seen before, do
2109 actions as per evh__HG_PTHREAD_MUTEX_INIT_POST. Else do
2110 nothing. */
2111
2112 if (SHOW_EVENTS >= 1)
2113 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
2114 "(ctid=%d, slock=%p)\n",
2115 (Int)tid, (void*)slock );
2116
2117 lk = map_locks_maybe_lookup( (Addr)slock );
2118 if (!lk) {
2119 map_locks_lookup_or_create( LK_nonRec, (Addr)slock, tid );
2120 }
2121}
2122
2123static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid,
2124 void* slock, Word isTryLock )
2125{
2126 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, slock, isTryLock );
2127}
2128
2129static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid,
2130 void* slock )
2131{
2132 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, slock );
2133}
2134
2135static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid,
2136 void* slock )
2137{
sewardjc02f6c42013-10-14 13:51:25 +00002138 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, slock, 0/*!isInit*/ );
sewardj5a644da2009-08-11 10:35:58 +00002139}
2140
2141
sewardj9f569b72008-11-13 13:33:09 +00002142/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002143/* --------------- events to do with CVs --------------- */
sewardj9f569b72008-11-13 13:33:09 +00002144/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002145
sewardj02114542009-07-28 20:52:36 +00002146/* A mapping from CV to (the SO associated with it, plus some
2147 auxiliary data for error checking). When the CV is
sewardjf98e1c02008-10-25 16:22:41 +00002148 signalled/broadcasted upon, we do a 'send' into the SO, and when a
2149 wait on it completes, we do a 'recv' from the SO. This is believed
2150 to give the correct happens-before events arising from CV
sewardjb4112022007-11-09 22:49:28 +00002151 signallings/broadcasts.
2152*/
2153
sewardj02114542009-07-28 20:52:36 +00002154/* .so is the SO for this CV.
2155 .mx_ga is the associated mutex, when .nWaiters > 0
sewardjb4112022007-11-09 22:49:28 +00002156
sewardj02114542009-07-28 20:52:36 +00002157 POSIX says effectively that the first pthread_cond_{timed}wait call
2158 causes a dynamic binding between the CV and the mutex, and that
2159 lasts until such time as the waiter count falls to zero. Hence
2160 need to keep track of the number of waiters in order to do
2161 consistency tracking. */
2162typedef
2163 struct {
2164 SO* so; /* libhb-allocated SO */
2165 void* mx_ga; /* addr of associated mutex, if any */
2166 UWord nWaiters; /* # threads waiting on the CV */
2167 }
2168 CVInfo;
2169
2170
2171/* pthread_cond_t* -> CVInfo* */
2172static WordFM* map_cond_to_CVInfo = NULL;
2173
2174static void map_cond_to_CVInfo_INIT ( void ) {
2175 if (UNLIKELY(map_cond_to_CVInfo == NULL)) {
2176 map_cond_to_CVInfo = VG_(newFM)( HG_(zalloc),
2177 "hg.mctCI.1", HG_(free), NULL );
2178 tl_assert(map_cond_to_CVInfo != NULL);
sewardjf98e1c02008-10-25 16:22:41 +00002179 }
2180}
2181
sewardj02114542009-07-28 20:52:36 +00002182static CVInfo* map_cond_to_CVInfo_lookup_or_alloc ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002183 UWord key, val;
sewardj02114542009-07-28 20:52:36 +00002184 map_cond_to_CVInfo_INIT();
2185 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
sewardjf98e1c02008-10-25 16:22:41 +00002186 tl_assert(key == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002187 return (CVInfo*)val;
sewardjf98e1c02008-10-25 16:22:41 +00002188 } else {
sewardj02114542009-07-28 20:52:36 +00002189 SO* so = libhb_so_alloc();
2190 CVInfo* cvi = HG_(zalloc)("hg.mctCloa.1", sizeof(CVInfo));
2191 cvi->so = so;
2192 cvi->mx_ga = 0;
2193 VG_(addToFM)( map_cond_to_CVInfo, (UWord)cond, (UWord)cvi );
2194 return cvi;
sewardjf98e1c02008-10-25 16:22:41 +00002195 }
2196}
2197
philippe8bfc2152012-07-06 23:38:24 +00002198static CVInfo* map_cond_to_CVInfo_lookup_NO_alloc ( void* cond ) {
2199 UWord key, val;
2200 map_cond_to_CVInfo_INIT();
2201 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
2202 tl_assert(key == (UWord)cond);
2203 return (CVInfo*)val;
2204 } else {
2205 return NULL;
2206 }
2207}
2208
sewardjc02f6c42013-10-14 13:51:25 +00002209static void map_cond_to_CVInfo_delete ( ThreadId tid,
2210 void* cond, Bool cond_is_init ) {
philippe8bfc2152012-07-06 23:38:24 +00002211 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +00002212 UWord keyW, valW;
philippe8bfc2152012-07-06 23:38:24 +00002213
2214 thr = map_threads_maybe_lookup( tid );
2215 tl_assert(thr); /* cannot fail - Thread* must already exist */
2216
sewardj02114542009-07-28 20:52:36 +00002217 map_cond_to_CVInfo_INIT();
philippe24111972013-03-18 22:48:22 +00002218 if (VG_(lookupFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond )) {
sewardj02114542009-07-28 20:52:36 +00002219 CVInfo* cvi = (CVInfo*)valW;
sewardjf98e1c02008-10-25 16:22:41 +00002220 tl_assert(keyW == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002221 tl_assert(cvi);
2222 tl_assert(cvi->so);
philippe8bfc2152012-07-06 23:38:24 +00002223 if (cvi->nWaiters > 0) {
sewardjc02f6c42013-10-14 13:51:25 +00002224 HG_(record_error_Misc)(
2225 thr, "pthread_cond_destroy:"
2226 " destruction of condition variable being waited upon");
philippe24111972013-03-18 22:48:22 +00002227 /* Destroying a cond var being waited upon outcome is EBUSY and
2228 variable is not destroyed. */
2229 return;
philippe8bfc2152012-07-06 23:38:24 +00002230 }
philippe24111972013-03-18 22:48:22 +00002231 if (!VG_(delFromFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond ))
2232 tl_assert(0); // cond var found above, and not here ???
sewardj02114542009-07-28 20:52:36 +00002233 libhb_so_dealloc(cvi->so);
2234 cvi->mx_ga = 0;
2235 HG_(free)(cvi);
philippe8bfc2152012-07-06 23:38:24 +00002236 } else {
sewardjc02f6c42013-10-14 13:51:25 +00002237 /* We have no record of this CV. So complain about it
2238 .. except, don't bother to complain if it has exactly the
2239 value PTHREAD_COND_INITIALIZER, since it might be that the CV
2240 was initialised like that but never used. */
2241 if (!cond_is_init) {
2242 HG_(record_error_Misc)(
2243 thr, "pthread_cond_destroy: destruction of unknown cond var");
2244 }
sewardjb4112022007-11-09 22:49:28 +00002245 }
2246}
2247
2248static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2249{
sewardjf98e1c02008-10-25 16:22:41 +00002250 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2251 cond to a SO if it is not already so bound, and 'send' on the
2252 SO. This is later used by other thread(s) which successfully
2253 exit from a pthread_cond_wait on the same cv; then they 'recv'
2254 from the SO, thereby acquiring a dependency on this signalling
2255 event. */
sewardjb4112022007-11-09 22:49:28 +00002256 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002257 CVInfo* cvi;
2258 //Lock* lk;
sewardjb4112022007-11-09 22:49:28 +00002259
2260 if (SHOW_EVENTS >= 1)
2261 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2262 (Int)tid, (void*)cond );
2263
sewardjb4112022007-11-09 22:49:28 +00002264 thr = map_threads_maybe_lookup( tid );
2265 tl_assert(thr); /* cannot fail - Thread* must already exist */
2266
sewardj02114542009-07-28 20:52:36 +00002267 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2268 tl_assert(cvi);
2269 tl_assert(cvi->so);
2270
sewardjb4112022007-11-09 22:49:28 +00002271 // error-if: mutex is bogus
2272 // error-if: mutex is not locked
sewardj02114542009-07-28 20:52:36 +00002273 // Hmm. POSIX doesn't actually say that it's an error to call
2274 // pthread_cond_signal with the associated mutex being unlocked.
2275 // Although it does say that it should be "if consistent scheduling
sewardjffce8152011-06-24 10:09:41 +00002276 // is desired." For that reason, print "dubious" if the lock isn't
2277 // held by any thread. Skip the "dubious" if it is held by some
2278 // other thread; that sounds straight-out wrong.
sewardj02114542009-07-28 20:52:36 +00002279 //
sewardjffce8152011-06-24 10:09:41 +00002280 // Anybody who writes code that signals on a CV without holding
2281 // the associated MX needs to be shipped off to a lunatic asylum
2282 // ASAP, even though POSIX doesn't actually declare such behaviour
2283 // illegal -- it makes code extremely difficult to understand/
2284 // reason about. In particular it puts the signalling thread in
2285 // a situation where it is racing against the released waiter
2286 // as soon as the signalling is done, and so there needs to be
2287 // some auxiliary synchronisation mechanism in the program that
2288 // makes this safe -- or the race(s) need to be harmless, or
2289 // probably nonexistent.
2290 //
2291 if (1) {
2292 Lock* lk = NULL;
2293 if (cvi->mx_ga != 0) {
2294 lk = map_locks_maybe_lookup( (Addr)cvi->mx_ga );
2295 }
2296 /* note: lk could be NULL. Be careful. */
2297 if (lk) {
2298 if (lk->kind == LK_rdwr) {
2299 HG_(record_error_Misc)(thr,
2300 "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2301 }
2302 if (lk->heldBy == NULL) {
2303 HG_(record_error_Misc)(thr,
2304 "pthread_cond_{signal,broadcast}: dubious: "
2305 "associated lock is not held by any thread");
2306 }
florian6bf37262012-10-21 03:23:36 +00002307 if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (UWord)thr)) {
sewardjffce8152011-06-24 10:09:41 +00002308 HG_(record_error_Misc)(thr,
2309 "pthread_cond_{signal,broadcast}: "
2310 "associated lock is not held by calling thread");
2311 }
2312 } else {
2313 /* Couldn't even find the damn thing. */
2314 // But actually .. that's not necessarily an error. We don't
2315 // know the (CV,MX) binding until a pthread_cond_wait or bcast
2316 // shows us what it is, and if that may not have happened yet.
2317 // So just keep quiet in this circumstance.
2318 //HG_(record_error_Misc)( thr,
2319 // "pthread_cond_{signal,broadcast}: "
2320 // "no or invalid mutex associated with cond");
2321 }
2322 }
sewardjb4112022007-11-09 22:49:28 +00002323
sewardj02114542009-07-28 20:52:36 +00002324 libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
sewardjb4112022007-11-09 22:49:28 +00002325}
2326
2327/* returns True if it reckons 'mutex' is valid and held by this
2328 thread, else False */
2329static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2330 void* cond, void* mutex )
2331{
2332 Thread* thr;
2333 Lock* lk;
2334 Bool lk_valid = True;
sewardj02114542009-07-28 20:52:36 +00002335 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002336
2337 if (SHOW_EVENTS >= 1)
2338 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2339 "(ctid=%d, cond=%p, mutex=%p)\n",
2340 (Int)tid, (void*)cond, (void*)mutex );
2341
sewardjb4112022007-11-09 22:49:28 +00002342 thr = map_threads_maybe_lookup( tid );
2343 tl_assert(thr); /* cannot fail - Thread* must already exist */
2344
2345 lk = map_locks_maybe_lookup( (Addr)mutex );
2346
2347 /* Check for stupid mutex arguments. There are various ways to be
2348 a bozo. Only complain once, though, even if more than one thing
2349 is wrong. */
2350 if (lk == NULL) {
2351 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002352 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002353 thr,
2354 "pthread_cond_{timed}wait called with invalid mutex" );
2355 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002356 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002357 if (lk->kind == LK_rdwr) {
2358 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002359 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002360 thr, "pthread_cond_{timed}wait called with mutex "
2361 "of type pthread_rwlock_t*" );
2362 } else
2363 if (lk->heldBy == NULL) {
2364 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002365 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002366 thr, "pthread_cond_{timed}wait called with un-held mutex");
2367 } else
2368 if (lk->heldBy != NULL
florian6bf37262012-10-21 03:23:36 +00002369 && VG_(elemBag)( lk->heldBy, (UWord)thr ) == 0) {
sewardjb4112022007-11-09 22:49:28 +00002370 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002371 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002372 thr, "pthread_cond_{timed}wait called with mutex "
2373 "held by a different thread" );
2374 }
2375 }
2376
2377 // error-if: cond is also associated with a different mutex
sewardj02114542009-07-28 20:52:36 +00002378 cvi = map_cond_to_CVInfo_lookup_or_alloc(cond);
2379 tl_assert(cvi);
2380 tl_assert(cvi->so);
2381 if (cvi->nWaiters == 0) {
2382 /* form initial (CV,MX) binding */
2383 cvi->mx_ga = mutex;
2384 }
2385 else /* check existing (CV,MX) binding */
2386 if (cvi->mx_ga != mutex) {
2387 HG_(record_error_Misc)(
2388 thr, "pthread_cond_{timed}wait: cond is associated "
2389 "with a different mutex");
2390 }
2391 cvi->nWaiters++;
sewardjb4112022007-11-09 22:49:28 +00002392
2393 return lk_valid;
2394}
2395
2396static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
sewardjff427c92013-10-14 12:13:52 +00002397 void* cond, void* mutex,
2398 Bool timeout)
sewardjb4112022007-11-09 22:49:28 +00002399{
sewardjf98e1c02008-10-25 16:22:41 +00002400 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2401 the SO for this cond, and 'recv' from it so as to acquire a
2402 dependency edge back to the signaller/broadcaster. */
2403 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002404 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002405
2406 if (SHOW_EVENTS >= 1)
2407 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
sewardjff427c92013-10-14 12:13:52 +00002408 "(ctid=%d, cond=%p, mutex=%p)\n, timeout=%d",
2409 (Int)tid, (void*)cond, (void*)mutex, (Int)timeout );
sewardjb4112022007-11-09 22:49:28 +00002410
sewardjb4112022007-11-09 22:49:28 +00002411 thr = map_threads_maybe_lookup( tid );
2412 tl_assert(thr); /* cannot fail - Thread* must already exist */
2413
2414 // error-if: cond is also associated with a different mutex
2415
philippe8bfc2152012-07-06 23:38:24 +00002416 cvi = map_cond_to_CVInfo_lookup_NO_alloc( cond );
2417 if (!cvi) {
2418 /* This could be either a bug in helgrind or the guest application
2419 that did an error (e.g. cond var was destroyed by another thread.
2420 Let's assume helgrind is perfect ...
2421 Note that this is similar to drd behaviour. */
2422 HG_(record_error_Misc)(thr, "condition variable has been destroyed while"
2423 " being waited upon");
2424 return;
2425 }
2426
sewardj02114542009-07-28 20:52:36 +00002427 tl_assert(cvi);
2428 tl_assert(cvi->so);
2429 tl_assert(cvi->nWaiters > 0);
sewardjb4112022007-11-09 22:49:28 +00002430
sewardjff427c92013-10-14 12:13:52 +00002431 if (!timeout && !libhb_so_everSent(cvi->so)) {
sewardjf98e1c02008-10-25 16:22:41 +00002432 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2433 it? If this happened it would surely be a bug in the threads
2434 library. Or one of those fabled "spurious wakeups". */
2435 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
sewardjffce8152011-06-24 10:09:41 +00002436 "succeeded"
sewardjf98e1c02008-10-25 16:22:41 +00002437 " without prior pthread_cond_post");
sewardjb4112022007-11-09 22:49:28 +00002438 }
sewardjf98e1c02008-10-25 16:22:41 +00002439
2440 /* anyway, acquire a dependency on it. */
sewardj02114542009-07-28 20:52:36 +00002441 libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2442
2443 cvi->nWaiters--;
sewardjf98e1c02008-10-25 16:22:41 +00002444}
2445
philippe19dfe032013-03-24 20:10:23 +00002446static void evh__HG_PTHREAD_COND_INIT_POST ( ThreadId tid,
2447 void* cond, void* cond_attr )
2448{
2449 CVInfo* cvi;
2450
2451 if (SHOW_EVENTS >= 1)
2452 VG_(printf)("evh__HG_PTHREAD_COND_INIT_POST"
2453 "(ctid=%d, cond=%p, cond_attr=%p)\n",
2454 (Int)tid, (void*)cond, (void*) cond_attr );
2455
2456 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2457 tl_assert (cvi);
2458 tl_assert (cvi->so);
2459}
2460
2461
sewardjf98e1c02008-10-25 16:22:41 +00002462static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
sewardjc02f6c42013-10-14 13:51:25 +00002463 void* cond, Bool cond_is_init )
sewardjf98e1c02008-10-25 16:22:41 +00002464{
2465 /* Deal with destroy events. The only purpose is to free storage
2466 associated with the CV, so as to avoid any possible resource
2467 leaks. */
2468 if (SHOW_EVENTS >= 1)
2469 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
sewardjc02f6c42013-10-14 13:51:25 +00002470 "(ctid=%d, cond=%p, cond_is_init=%d)\n",
2471 (Int)tid, (void*)cond, (Int)cond_is_init );
sewardjf98e1c02008-10-25 16:22:41 +00002472
sewardjc02f6c42013-10-14 13:51:25 +00002473 map_cond_to_CVInfo_delete( tid, cond, cond_is_init );
sewardjb4112022007-11-09 22:49:28 +00002474}
2475
2476
sewardj9f569b72008-11-13 13:33:09 +00002477/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002478/* -------------- events to do with rwlocks -------------- */
sewardj9f569b72008-11-13 13:33:09 +00002479/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002480
2481/* EXPOSITION only */
2482static
2483void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2484{
2485 if (SHOW_EVENTS >= 1)
2486 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2487 (Int)tid, (void*)rwl );
2488 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
sewardjf98e1c02008-10-25 16:22:41 +00002489 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002490 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2491}
2492
2493static
2494void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2495{
2496 Thread* thr;
2497 Lock* lk;
2498 if (SHOW_EVENTS >= 1)
2499 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2500 (Int)tid, (void*)rwl );
2501
2502 thr = map_threads_maybe_lookup( tid );
2503 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00002504 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00002505
2506 lk = map_locks_maybe_lookup( (Addr)rwl );
2507
2508 if (lk == NULL || lk->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00002509 HG_(record_error_Misc)(
2510 thr, "pthread_rwlock_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00002511 }
2512
2513 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00002514 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002515 tl_assert( lk->guestaddr == (Addr)rwl );
2516 if (lk->heldBy) {
2517 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00002518 HG_(record_error_Misc)(
2519 thr, "pthread_rwlock_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00002520 /* remove lock from locksets of all owning threads */
2521 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00002522 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00002523 lk->heldBy = NULL;
2524 lk->heldW = False;
sewardj1c7e8332007-11-29 13:04:03 +00002525 lk->acquired_at = NULL;
sewardjb4112022007-11-09 22:49:28 +00002526 }
2527 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002528 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00002529
2530 if (HG_(clo_track_lockorders))
2531 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002532 map_locks_delete( lk->guestaddr );
2533 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002534 }
2535
sewardjf98e1c02008-10-25 16:22:41 +00002536 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002537 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2538}
2539
2540static
sewardj789c3c52008-02-25 12:10:07 +00002541void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2542 void* rwl,
2543 Word isW, Word isTryLock )
sewardjb4112022007-11-09 22:49:28 +00002544{
2545 /* Just check the rwl is sane; nothing else to do. */
2546 // 'rwl' may be invalid - not checked by wrapper
2547 Thread* thr;
2548 Lock* lk;
2549 if (SHOW_EVENTS >= 1)
2550 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2551 (Int)tid, (Int)isW, (void*)rwl );
2552
2553 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
sewardj789c3c52008-02-25 12:10:07 +00002554 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
sewardjb4112022007-11-09 22:49:28 +00002555 thr = map_threads_maybe_lookup( tid );
2556 tl_assert(thr); /* cannot fail - Thread* must already exist */
2557
2558 lk = map_locks_maybe_lookup( (Addr)rwl );
2559 if ( lk
2560 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2561 /* Wrong kind of lock. Duh. */
sewardjf98e1c02008-10-25 16:22:41 +00002562 HG_(record_error_Misc)(
2563 thr, "pthread_rwlock_{rd,rw}lock with a "
2564 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002565 }
2566}
2567
2568static
2569void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2570{
2571 // only called if the real library call succeeded - so mutex is sane
2572 Thread* thr;
2573 if (SHOW_EVENTS >= 1)
2574 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2575 (Int)tid, (Int)isW, (void*)rwl );
2576
2577 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2578 thr = map_threads_maybe_lookup( tid );
2579 tl_assert(thr); /* cannot fail - Thread* must already exist */
2580
2581 (isW ? evhH__post_thread_w_acquires_lock
2582 : evhH__post_thread_r_acquires_lock)(
2583 thr,
2584 LK_rdwr, /* if not known, create new lock with this LockKind */
2585 (Addr)rwl
2586 );
2587}
2588
2589static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2590{
2591 // 'rwl' may be invalid - not checked by wrapper
2592 Thread* thr;
2593 if (SHOW_EVENTS >= 1)
2594 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2595 (Int)tid, (void*)rwl );
2596
2597 thr = map_threads_maybe_lookup( tid );
2598 tl_assert(thr); /* cannot fail - Thread* must already exist */
2599
2600 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2601}
2602
2603static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2604{
2605 // only called if the real library call succeeded - so mutex is sane
2606 Thread* thr;
2607 if (SHOW_EVENTS >= 1)
2608 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2609 (Int)tid, (void*)rwl );
2610 thr = map_threads_maybe_lookup( tid );
2611 tl_assert(thr); /* cannot fail - Thread* must already exist */
2612
2613 // anything we should do here?
2614}
2615
2616
sewardj9f569b72008-11-13 13:33:09 +00002617/* ---------------------------------------------------------- */
2618/* -------------- events to do with semaphores -------------- */
2619/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002620
sewardj11e352f2007-11-30 11:11:02 +00002621/* This is similar to but not identical to the handling for condition
sewardjb4112022007-11-09 22:49:28 +00002622 variables. */
2623
sewardjf98e1c02008-10-25 16:22:41 +00002624/* For each semaphore, we maintain a stack of SOs. When a 'post'
2625 operation is done on a semaphore (unlocking, essentially), a new SO
2626 is created for the posting thread, the posting thread does a strong
2627 send to it (which merely installs the posting thread's VC in the
2628 SO), and the SO is pushed on the semaphore's stack.
sewardjb4112022007-11-09 22:49:28 +00002629
2630 Later, when a (probably different) thread completes 'wait' on the
sewardjf98e1c02008-10-25 16:22:41 +00002631 semaphore, we pop a SO off the semaphore's stack (which should be
2632 nonempty), and do a strong recv from it. This mechanism creates
sewardjb4112022007-11-09 22:49:28 +00002633 dependencies between posters and waiters of the semaphore.
2634
sewardjf98e1c02008-10-25 16:22:41 +00002635 It may not be necessary to use a stack - perhaps a bag of SOs would
2636 do. But we do need to keep track of how many unused-up posts have
2637 happened for the semaphore.
sewardjb4112022007-11-09 22:49:28 +00002638
sewardjf98e1c02008-10-25 16:22:41 +00002639 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
sewardjb4112022007-11-09 22:49:28 +00002640 twice on S. T3 cannot complete its waits without both T1 and T2
2641 posting. The above mechanism will ensure that T3 acquires
2642 dependencies on both T1 and T2.
sewardj11e352f2007-11-30 11:11:02 +00002643
sewardjf98e1c02008-10-25 16:22:41 +00002644 When a semaphore is initialised with value N, we do as if we'd
2645 posted N times on the semaphore: basically create N SOs and do a
2646 strong send to all of then. This allows up to N waits on the
2647 semaphore to acquire a dependency on the initialisation point,
2648 which AFAICS is the correct behaviour.
sewardj11e352f2007-11-30 11:11:02 +00002649
2650 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2651 about. We should.
sewardjb4112022007-11-09 22:49:28 +00002652*/
2653
sewardjf98e1c02008-10-25 16:22:41 +00002654/* sem_t* -> XArray* SO* */
2655static WordFM* map_sem_to_SO_stack = NULL;
sewardjb4112022007-11-09 22:49:28 +00002656
sewardjf98e1c02008-10-25 16:22:41 +00002657static void map_sem_to_SO_stack_INIT ( void ) {
2658 if (map_sem_to_SO_stack == NULL) {
2659 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2660 HG_(free), NULL );
2661 tl_assert(map_sem_to_SO_stack != NULL);
sewardjb4112022007-11-09 22:49:28 +00002662 }
2663}
2664
sewardjf98e1c02008-10-25 16:22:41 +00002665static void push_SO_for_sem ( void* sem, SO* so ) {
2666 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002667 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002668 tl_assert(so);
2669 map_sem_to_SO_stack_INIT();
2670 if (VG_(lookupFM)( map_sem_to_SO_stack,
2671 &keyW, (UWord*)&xa, (UWord)sem )) {
2672 tl_assert(keyW == (UWord)sem);
sewardjb4112022007-11-09 22:49:28 +00002673 tl_assert(xa);
sewardjf98e1c02008-10-25 16:22:41 +00002674 VG_(addToXA)( xa, &so );
sewardjb4112022007-11-09 22:49:28 +00002675 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002676 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2677 VG_(addToXA)( xa, &so );
florian6bf37262012-10-21 03:23:36 +00002678 VG_(addToFM)( map_sem_to_SO_stack, (UWord)sem, (UWord)xa );
sewardjb4112022007-11-09 22:49:28 +00002679 }
2680}
2681
sewardjf98e1c02008-10-25 16:22:41 +00002682static SO* mb_pop_SO_for_sem ( void* sem ) {
2683 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002684 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002685 SO* so;
2686 map_sem_to_SO_stack_INIT();
2687 if (VG_(lookupFM)( map_sem_to_SO_stack,
2688 &keyW, (UWord*)&xa, (UWord)sem )) {
sewardjb4112022007-11-09 22:49:28 +00002689 /* xa is the stack for this semaphore. */
sewardjf98e1c02008-10-25 16:22:41 +00002690 Word sz;
2691 tl_assert(keyW == (UWord)sem);
2692 sz = VG_(sizeXA)( xa );
sewardjb4112022007-11-09 22:49:28 +00002693 tl_assert(sz >= 0);
2694 if (sz == 0)
2695 return NULL; /* odd, the stack is empty */
sewardjf98e1c02008-10-25 16:22:41 +00002696 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2697 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002698 VG_(dropTailXA)( xa, 1 );
sewardjf98e1c02008-10-25 16:22:41 +00002699 return so;
sewardjb4112022007-11-09 22:49:28 +00002700 } else {
2701 /* hmm, that's odd. No stack for this semaphore. */
2702 return NULL;
2703 }
2704}
2705
sewardj11e352f2007-11-30 11:11:02 +00002706static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002707{
sewardjf98e1c02008-10-25 16:22:41 +00002708 UWord keyW, valW;
2709 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002710
sewardjb4112022007-11-09 22:49:28 +00002711 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002712 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002713 (Int)tid, (void*)sem );
2714
sewardjf98e1c02008-10-25 16:22:41 +00002715 map_sem_to_SO_stack_INIT();
sewardjb4112022007-11-09 22:49:28 +00002716
sewardjf98e1c02008-10-25 16:22:41 +00002717 /* Empty out the semaphore's SO stack. This way of doing it is
2718 stupid, but at least it's easy. */
2719 while (1) {
2720 so = mb_pop_SO_for_sem( sem );
2721 if (!so) break;
2722 libhb_so_dealloc(so);
2723 }
2724
2725 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2726 XArray* xa = (XArray*)valW;
2727 tl_assert(keyW == (UWord)sem);
2728 tl_assert(xa);
2729 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2730 VG_(deleteXA)(xa);
2731 }
sewardjb4112022007-11-09 22:49:28 +00002732}
2733
sewardj11e352f2007-11-30 11:11:02 +00002734static
2735void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2736{
sewardjf98e1c02008-10-25 16:22:41 +00002737 SO* so;
2738 Thread* thr;
sewardj11e352f2007-11-30 11:11:02 +00002739
2740 if (SHOW_EVENTS >= 1)
2741 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2742 (Int)tid, (void*)sem, value );
2743
sewardjf98e1c02008-10-25 16:22:41 +00002744 thr = map_threads_maybe_lookup( tid );
2745 tl_assert(thr); /* cannot fail - Thread* must already exist */
sewardj11e352f2007-11-30 11:11:02 +00002746
sewardjf98e1c02008-10-25 16:22:41 +00002747 /* Empty out the semaphore's SO stack. This way of doing it is
2748 stupid, but at least it's easy. */
2749 while (1) {
2750 so = mb_pop_SO_for_sem( sem );
2751 if (!so) break;
2752 libhb_so_dealloc(so);
2753 }
sewardj11e352f2007-11-30 11:11:02 +00002754
sewardjf98e1c02008-10-25 16:22:41 +00002755 /* If we don't do this check, the following while loop runs us out
2756 of memory for stupid initial values of 'value'. */
2757 if (value > 10000) {
2758 HG_(record_error_Misc)(
2759 thr, "sem_init: initial value exceeds 10000; using 10000" );
2760 value = 10000;
2761 }
sewardj11e352f2007-11-30 11:11:02 +00002762
sewardjf98e1c02008-10-25 16:22:41 +00002763 /* Now create 'valid' new SOs for the thread, do a strong send to
2764 each of them, and push them all on the stack. */
2765 for (; value > 0; value--) {
2766 Thr* hbthr = thr->hbthr;
2767 tl_assert(hbthr);
sewardj11e352f2007-11-30 11:11:02 +00002768
sewardjf98e1c02008-10-25 16:22:41 +00002769 so = libhb_so_alloc();
2770 libhb_so_send( hbthr, so, True/*strong send*/ );
2771 push_SO_for_sem( sem, so );
sewardj11e352f2007-11-30 11:11:02 +00002772 }
2773}
2774
2775static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002776{
sewardjf98e1c02008-10-25 16:22:41 +00002777 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2778 it (iow, write our VC into it, then tick ours), and push the SO
2779 on on a stack of SOs associated with 'sem'. This is later used
2780 by other thread(s) which successfully exit from a sem_wait on
2781 the same sem; by doing a strong recv from SOs popped of the
2782 stack, they acquire dependencies on the posting thread
2783 segment(s). */
sewardjb4112022007-11-09 22:49:28 +00002784
sewardjf98e1c02008-10-25 16:22:41 +00002785 Thread* thr;
2786 SO* so;
2787 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002788
2789 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002790 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002791 (Int)tid, (void*)sem );
2792
2793 thr = map_threads_maybe_lookup( tid );
2794 tl_assert(thr); /* cannot fail - Thread* must already exist */
2795
2796 // error-if: sem is bogus
2797
sewardjf98e1c02008-10-25 16:22:41 +00002798 hbthr = thr->hbthr;
2799 tl_assert(hbthr);
sewardjb4112022007-11-09 22:49:28 +00002800
sewardjf98e1c02008-10-25 16:22:41 +00002801 so = libhb_so_alloc();
2802 libhb_so_send( hbthr, so, True/*strong send*/ );
2803 push_SO_for_sem( sem, so );
sewardjb4112022007-11-09 22:49:28 +00002804}
2805
sewardj11e352f2007-11-30 11:11:02 +00002806static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002807{
sewardjf98e1c02008-10-25 16:22:41 +00002808 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2809 the 'sem' from this semaphore's SO-stack, and do a strong recv
2810 from it. This creates a dependency back to one of the post-ers
2811 for the semaphore. */
sewardjb4112022007-11-09 22:49:28 +00002812
sewardjf98e1c02008-10-25 16:22:41 +00002813 Thread* thr;
2814 SO* so;
2815 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002816
2817 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002818 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002819 (Int)tid, (void*)sem );
2820
2821 thr = map_threads_maybe_lookup( tid );
2822 tl_assert(thr); /* cannot fail - Thread* must already exist */
2823
2824 // error-if: sem is bogus
2825
sewardjf98e1c02008-10-25 16:22:41 +00002826 so = mb_pop_SO_for_sem( sem );
sewardjb4112022007-11-09 22:49:28 +00002827
sewardjf98e1c02008-10-25 16:22:41 +00002828 if (so) {
2829 hbthr = thr->hbthr;
2830 tl_assert(hbthr);
2831
2832 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2833 libhb_so_dealloc(so);
2834 } else {
2835 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2836 If this happened it would surely be a bug in the threads
2837 library. */
2838 HG_(record_error_Misc)(
2839 thr, "Bug in libpthread: sem_wait succeeded on"
2840 " semaphore without prior sem_post");
sewardjb4112022007-11-09 22:49:28 +00002841 }
2842}
2843
2844
sewardj9f569b72008-11-13 13:33:09 +00002845/* -------------------------------------------------------- */
2846/* -------------- events to do with barriers -------------- */
2847/* -------------------------------------------------------- */
2848
2849typedef
2850 struct {
2851 Bool initted; /* has it yet been initted by guest? */
sewardj406bac82010-03-03 23:03:40 +00002852 Bool resizable; /* is resizing allowed? */
sewardj9f569b72008-11-13 13:33:09 +00002853 UWord size; /* declared size */
2854 XArray* waiting; /* XA of Thread*. # present is 0 .. .size */
2855 }
2856 Bar;
2857
2858static Bar* new_Bar ( void ) {
2859 Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
2860 tl_assert(bar);
2861 /* all fields are zero */
2862 tl_assert(bar->initted == False);
2863 return bar;
2864}
2865
2866static void delete_Bar ( Bar* bar ) {
2867 tl_assert(bar);
2868 if (bar->waiting)
2869 VG_(deleteXA)(bar->waiting);
2870 HG_(free)(bar);
2871}
2872
2873/* A mapping which stores auxiliary data for barriers. */
2874
2875/* pthread_barrier_t* -> Bar* */
2876static WordFM* map_barrier_to_Bar = NULL;
2877
2878static void map_barrier_to_Bar_INIT ( void ) {
2879 if (UNLIKELY(map_barrier_to_Bar == NULL)) {
2880 map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
2881 "hg.mbtBI.1", HG_(free), NULL );
2882 tl_assert(map_barrier_to_Bar != NULL);
2883 }
2884}
2885
2886static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
2887 UWord key, val;
2888 map_barrier_to_Bar_INIT();
2889 if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
2890 tl_assert(key == (UWord)barrier);
2891 return (Bar*)val;
2892 } else {
2893 Bar* bar = new_Bar();
2894 VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
2895 return bar;
2896 }
2897}
2898
2899static void map_barrier_to_Bar_delete ( void* barrier ) {
2900 UWord keyW, valW;
2901 map_barrier_to_Bar_INIT();
2902 if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
2903 Bar* bar = (Bar*)valW;
2904 tl_assert(keyW == (UWord)barrier);
2905 delete_Bar(bar);
2906 }
2907}
2908
2909
2910static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
2911 void* barrier,
sewardj406bac82010-03-03 23:03:40 +00002912 UWord count,
2913 UWord resizable )
sewardj9f569b72008-11-13 13:33:09 +00002914{
2915 Thread* thr;
2916 Bar* bar;
2917
2918 if (SHOW_EVENTS >= 1)
2919 VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
sewardj406bac82010-03-03 23:03:40 +00002920 "(tid=%d, barrier=%p, count=%lu, resizable=%lu)\n",
2921 (Int)tid, (void*)barrier, count, resizable );
sewardj9f569b72008-11-13 13:33:09 +00002922
2923 thr = map_threads_maybe_lookup( tid );
2924 tl_assert(thr); /* cannot fail - Thread* must already exist */
2925
2926 if (count == 0) {
2927 HG_(record_error_Misc)(
2928 thr, "pthread_barrier_init: 'count' argument is zero"
2929 );
2930 }
2931
sewardj406bac82010-03-03 23:03:40 +00002932 if (resizable != 0 && resizable != 1) {
2933 HG_(record_error_Misc)(
2934 thr, "pthread_barrier_init: invalid 'resizable' argument"
2935 );
2936 }
2937
sewardj9f569b72008-11-13 13:33:09 +00002938 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2939 tl_assert(bar);
2940
2941 if (bar->initted) {
2942 HG_(record_error_Misc)(
2943 thr, "pthread_barrier_init: barrier is already initialised"
2944 );
2945 }
2946
2947 if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2948 tl_assert(bar->initted);
2949 HG_(record_error_Misc)(
sewardj553655c2008-11-14 19:41:19 +00002950 thr, "pthread_barrier_init: threads are waiting at barrier"
sewardj9f569b72008-11-13 13:33:09 +00002951 );
2952 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2953 }
2954 if (!bar->waiting) {
2955 bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
2956 sizeof(Thread*) );
2957 }
2958
2959 tl_assert(bar->waiting);
2960 tl_assert(VG_(sizeXA)(bar->waiting) == 0);
sewardj406bac82010-03-03 23:03:40 +00002961 bar->initted = True;
2962 bar->resizable = resizable == 1 ? True : False;
2963 bar->size = count;
sewardj9f569b72008-11-13 13:33:09 +00002964}
2965
2966
2967static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
2968 void* barrier )
2969{
sewardj553655c2008-11-14 19:41:19 +00002970 Thread* thr;
2971 Bar* bar;
2972
sewardj9f569b72008-11-13 13:33:09 +00002973 /* Deal with destroy events. The only purpose is to free storage
2974 associated with the barrier, so as to avoid any possible
2975 resource leaks. */
2976 if (SHOW_EVENTS >= 1)
2977 VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
2978 "(tid=%d, barrier=%p)\n",
2979 (Int)tid, (void*)barrier );
2980
sewardj553655c2008-11-14 19:41:19 +00002981 thr = map_threads_maybe_lookup( tid );
2982 tl_assert(thr); /* cannot fail - Thread* must already exist */
2983
2984 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2985 tl_assert(bar);
2986
2987 if (!bar->initted) {
2988 HG_(record_error_Misc)(
2989 thr, "pthread_barrier_destroy: barrier was never initialised"
2990 );
2991 }
2992
2993 if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2994 HG_(record_error_Misc)(
2995 thr, "pthread_barrier_destroy: threads are waiting at barrier"
2996 );
2997 }
2998
sewardj9f569b72008-11-13 13:33:09 +00002999 /* Maybe we shouldn't do this; just let it persist, so that when it
3000 is reinitialised we don't need to do any dynamic memory
3001 allocation? The downside is a potentially unlimited space leak,
3002 if the client creates (in turn) a large number of barriers all
3003 at different locations. Note that if we do later move to the
3004 don't-delete-it scheme, we need to mark the barrier as
3005 uninitialised again since otherwise a later _init call will
sewardj553655c2008-11-14 19:41:19 +00003006 elicit a duplicate-init error. */
sewardj9f569b72008-11-13 13:33:09 +00003007 map_barrier_to_Bar_delete( barrier );
3008}
3009
3010
sewardj406bac82010-03-03 23:03:40 +00003011/* All the threads have arrived. Now do the Interesting Bit. Get a
3012 new synchronisation object and do a weak send to it from all the
3013 participating threads. This makes its vector clocks be the join of
3014 all the individual threads' vector clocks. Then do a strong
3015 receive from it back to all threads, so that their VCs are a copy
3016 of it (hence are all equal to the join of their original VCs.) */
3017static void do_barrier_cross_sync_and_empty ( Bar* bar )
3018{
3019 /* XXX check bar->waiting has no duplicates */
3020 UWord i;
3021 SO* so = libhb_so_alloc();
3022
3023 tl_assert(bar->waiting);
3024 tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
3025
3026 /* compute the join ... */
3027 for (i = 0; i < bar->size; i++) {
3028 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
3029 Thr* hbthr = t->hbthr;
3030 libhb_so_send( hbthr, so, False/*weak send*/ );
3031 }
3032 /* ... and distribute to all threads */
3033 for (i = 0; i < bar->size; i++) {
3034 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
3035 Thr* hbthr = t->hbthr;
3036 libhb_so_recv( hbthr, so, True/*strong recv*/ );
3037 }
3038
3039 /* finally, we must empty out the waiting vector */
3040 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
3041
3042 /* and we don't need this any more. Perhaps a stack-allocated
3043 SO would be better? */
3044 libhb_so_dealloc(so);
3045}
3046
3047
sewardj9f569b72008-11-13 13:33:09 +00003048static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
3049 void* barrier )
3050{
sewardj1c466b72008-11-19 11:52:14 +00003051 /* This function gets called after a client thread calls
3052 pthread_barrier_wait but before it arrives at the real
3053 pthread_barrier_wait.
3054
3055 Why is the following correct? It's a bit subtle.
3056
3057 If this is not the last thread arriving at the barrier, we simply
3058 note its presence and return. Because valgrind (at least as of
3059 Nov 08) is single threaded, we are guaranteed safe from any race
3060 conditions when in this function -- no other client threads are
3061 running.
3062
3063 If this is the last thread, then we are again the only running
3064 thread. All the other threads will have either arrived at the
3065 real pthread_barrier_wait or are on their way to it, but in any
3066 case are guaranteed not to be able to move past it, because this
3067 thread is currently in this function and so has not yet arrived
3068 at the real pthread_barrier_wait. That means that:
3069
3070 1. While we are in this function, none of the other threads
3071 waiting at the barrier can move past it.
3072
3073 2. When this function returns (and simulated execution resumes),
3074 this thread and all other waiting threads will be able to move
3075 past the real barrier.
3076
3077 Because of this, it is now safe to update the vector clocks of
3078 all threads, to represent the fact that they all arrived at the
3079 barrier and have all moved on. There is no danger of any
3080 complications to do with some threads leaving the barrier and
3081 racing back round to the front, whilst others are still leaving
3082 (which is the primary source of complication in correct handling/
3083 implementation of barriers). That can't happen because we update
3084 here our data structures so as to indicate that the threads have
3085 passed the barrier, even though, as per (2) above, they are
3086 guaranteed not to pass the barrier until we return.
3087
3088 This relies crucially on Valgrind being single threaded. If that
3089 changes, this will need to be reconsidered.
3090 */
sewardj9f569b72008-11-13 13:33:09 +00003091 Thread* thr;
3092 Bar* bar;
sewardj406bac82010-03-03 23:03:40 +00003093 UWord present;
sewardj9f569b72008-11-13 13:33:09 +00003094
3095 if (SHOW_EVENTS >= 1)
3096 VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
3097 "(tid=%d, barrier=%p)\n",
3098 (Int)tid, (void*)barrier );
3099
3100 thr = map_threads_maybe_lookup( tid );
3101 tl_assert(thr); /* cannot fail - Thread* must already exist */
3102
3103 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3104 tl_assert(bar);
3105
3106 if (!bar->initted) {
3107 HG_(record_error_Misc)(
3108 thr, "pthread_barrier_wait: barrier is uninitialised"
3109 );
3110 return; /* client is broken .. avoid assertions below */
3111 }
3112
3113 /* guaranteed by _INIT_PRE above */
3114 tl_assert(bar->size > 0);
3115 tl_assert(bar->waiting);
3116
3117 VG_(addToXA)( bar->waiting, &thr );
3118
3119 /* guaranteed by this function */
3120 present = VG_(sizeXA)(bar->waiting);
3121 tl_assert(present > 0 && present <= bar->size);
3122
3123 if (present < bar->size)
3124 return;
3125
sewardj406bac82010-03-03 23:03:40 +00003126 do_barrier_cross_sync_and_empty(bar);
3127}
sewardj9f569b72008-11-13 13:33:09 +00003128
sewardj9f569b72008-11-13 13:33:09 +00003129
sewardj406bac82010-03-03 23:03:40 +00003130static void evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( ThreadId tid,
3131 void* barrier,
3132 UWord newcount )
3133{
3134 Thread* thr;
3135 Bar* bar;
3136 UWord present;
3137
3138 if (SHOW_EVENTS >= 1)
3139 VG_(printf)("evh__HG_PTHREAD_BARRIER_RESIZE_PRE"
3140 "(tid=%d, barrier=%p, newcount=%lu)\n",
3141 (Int)tid, (void*)barrier, newcount );
3142
3143 thr = map_threads_maybe_lookup( tid );
3144 tl_assert(thr); /* cannot fail - Thread* must already exist */
3145
3146 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3147 tl_assert(bar);
3148
3149 if (!bar->initted) {
3150 HG_(record_error_Misc)(
3151 thr, "pthread_barrier_resize: barrier is uninitialised"
3152 );
3153 return; /* client is broken .. avoid assertions below */
3154 }
3155
3156 if (!bar->resizable) {
3157 HG_(record_error_Misc)(
3158 thr, "pthread_barrier_resize: barrier is may not be resized"
3159 );
3160 return; /* client is broken .. avoid assertions below */
3161 }
3162
3163 if (newcount == 0) {
3164 HG_(record_error_Misc)(
3165 thr, "pthread_barrier_resize: 'newcount' argument is zero"
3166 );
3167 return; /* client is broken .. avoid assertions below */
3168 }
3169
3170 /* guaranteed by _INIT_PRE above */
3171 tl_assert(bar->size > 0);
sewardj9f569b72008-11-13 13:33:09 +00003172 tl_assert(bar->waiting);
sewardj406bac82010-03-03 23:03:40 +00003173 /* Guaranteed by this fn */
3174 tl_assert(newcount > 0);
sewardj9f569b72008-11-13 13:33:09 +00003175
sewardj406bac82010-03-03 23:03:40 +00003176 if (newcount >= bar->size) {
3177 /* Increasing the capacity. There's no possibility of threads
3178 moving on from the barrier in this situation, so just note
3179 the fact and do nothing more. */
3180 bar->size = newcount;
3181 } else {
3182 /* Decreasing the capacity. If we decrease it to be equal or
3183 below the number of waiting threads, they will now move past
3184 the barrier, so need to mess with dep edges in the same way
3185 as if the barrier had filled up normally. */
3186 present = VG_(sizeXA)(bar->waiting);
3187 tl_assert(present >= 0 && present <= bar->size);
3188 if (newcount <= present) {
3189 bar->size = present; /* keep the cross_sync call happy */
3190 do_barrier_cross_sync_and_empty(bar);
3191 }
3192 bar->size = newcount;
sewardj9f569b72008-11-13 13:33:09 +00003193 }
sewardj9f569b72008-11-13 13:33:09 +00003194}
3195
3196
sewardjed2e72e2009-08-14 11:08:24 +00003197/* ----------------------------------------------------- */
3198/* ----- events to do with user-specified HB edges ----- */
3199/* ----------------------------------------------------- */
3200
3201/* A mapping from arbitrary UWord tag to the SO associated with it.
3202 The UWord tags are meaningless to us, interpreted only by the
3203 user. */
3204
3205
3206
3207/* UWord -> SO* */
3208static WordFM* map_usertag_to_SO = NULL;
3209
3210static void map_usertag_to_SO_INIT ( void ) {
3211 if (UNLIKELY(map_usertag_to_SO == NULL)) {
3212 map_usertag_to_SO = VG_(newFM)( HG_(zalloc),
3213 "hg.mutS.1", HG_(free), NULL );
3214 tl_assert(map_usertag_to_SO != NULL);
3215 }
3216}
3217
3218static SO* map_usertag_to_SO_lookup_or_alloc ( UWord usertag ) {
3219 UWord key, val;
3220 map_usertag_to_SO_INIT();
3221 if (VG_(lookupFM)( map_usertag_to_SO, &key, &val, usertag )) {
3222 tl_assert(key == (UWord)usertag);
3223 return (SO*)val;
3224 } else {
3225 SO* so = libhb_so_alloc();
3226 VG_(addToFM)( map_usertag_to_SO, usertag, (UWord)so );
3227 return so;
3228 }
3229}
3230
sewardj6015d0e2011-03-11 19:10:48 +00003231static void map_usertag_to_SO_delete ( UWord usertag ) {
3232 UWord keyW, valW;
3233 map_usertag_to_SO_INIT();
3234 if (VG_(delFromFM)( map_usertag_to_SO, &keyW, &valW, usertag )) {
3235 SO* so = (SO*)valW;
3236 tl_assert(keyW == usertag);
3237 tl_assert(so);
3238 libhb_so_dealloc(so);
3239 }
3240}
sewardjed2e72e2009-08-14 11:08:24 +00003241
3242
3243static
3244void evh__HG_USERSO_SEND_PRE ( ThreadId tid, UWord usertag )
3245{
3246 /* TID is just about to notionally sent a message on a notional
3247 abstract synchronisation object whose identity is given by
3248 USERTAG. Bind USERTAG to a real SO if it is not already so
sewardj8c50d3c2011-03-11 18:38:12 +00003249 bound, and do a 'weak send' on the SO. This joins the vector
3250 clocks from this thread into any vector clocks already present
3251 in the SO. The resulting SO vector clocks are later used by
sewardjed2e72e2009-08-14 11:08:24 +00003252 other thread(s) which successfully 'receive' from the SO,
sewardj8c50d3c2011-03-11 18:38:12 +00003253 thereby acquiring a dependency on all the events that have
3254 previously signalled on this SO. */
sewardjed2e72e2009-08-14 11:08:24 +00003255 Thread* thr;
3256 SO* so;
3257
3258 if (SHOW_EVENTS >= 1)
3259 VG_(printf)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3260 (Int)tid, usertag );
3261
3262 thr = map_threads_maybe_lookup( tid );
3263 tl_assert(thr); /* cannot fail - Thread* must already exist */
3264
3265 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3266 tl_assert(so);
3267
sewardj8c50d3c2011-03-11 18:38:12 +00003268 libhb_so_send( thr->hbthr, so, False/*!strong_send*/ );
sewardjed2e72e2009-08-14 11:08:24 +00003269}
3270
3271static
3272void evh__HG_USERSO_RECV_POST ( ThreadId tid, UWord usertag )
3273{
3274 /* TID has just notionally received a message from a notional
3275 abstract synchronisation object whose identity is given by
3276 USERTAG. Bind USERTAG to a real SO if it is not already so
3277 bound. If the SO has at some point in the past been 'sent' on,
3278 to a 'strong receive' on it, thereby acquiring a dependency on
3279 the sender. */
3280 Thread* thr;
3281 SO* so;
3282
3283 if (SHOW_EVENTS >= 1)
3284 VG_(printf)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3285 (Int)tid, usertag );
3286
3287 thr = map_threads_maybe_lookup( tid );
3288 tl_assert(thr); /* cannot fail - Thread* must already exist */
3289
3290 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3291 tl_assert(so);
3292
3293 /* Acquire a dependency on it. If the SO has never so far been
3294 sent on, then libhb_so_recv will do nothing. So we're safe
3295 regardless of SO's history. */
3296 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3297}
3298
sewardj6015d0e2011-03-11 19:10:48 +00003299static
3300void evh__HG_USERSO_FORGET_ALL ( ThreadId tid, UWord usertag )
3301{
3302 /* TID declares that any happens-before edges notionally stored in
3303 USERTAG can be deleted. If (as would normally be the case) a
3304 SO is associated with USERTAG, then the assocation is removed
3305 and all resources associated with SO are freed. Importantly,
3306 that frees up any VTSs stored in SO. */
3307 if (SHOW_EVENTS >= 1)
3308 VG_(printf)("evh__HG_USERSO_FORGET_ALL(ctid=%d, usertag=%#lx)\n",
3309 (Int)tid, usertag );
3310
3311 map_usertag_to_SO_delete( usertag );
3312}
3313
sewardjed2e72e2009-08-14 11:08:24 +00003314
sewardjb4112022007-11-09 22:49:28 +00003315/*--------------------------------------------------------------*/
3316/*--- Lock acquisition order monitoring ---*/
3317/*--------------------------------------------------------------*/
3318
3319/* FIXME: here are some optimisations still to do in
3320 laog__pre_thread_acquires_lock.
3321
3322 The graph is structured so that if L1 --*--> L2 then L1 must be
3323 acquired before L2.
3324
3325 The common case is that some thread T holds (eg) L1 L2 and L3 and
3326 is repeatedly acquiring and releasing Ln, and there is no ordering
3327 error in what it is doing. Hence it repeatly:
3328
3329 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3330 produces the answer No (because there is no error).
3331
3332 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3333 (because they already got added the first time T acquired Ln).
3334
3335 Hence cache these two events:
3336
3337 (1) Cache result of the query from last time. Invalidate the cache
3338 any time any edges are added to or deleted from laog.
3339
3340 (2) Cache these add-edge requests and ignore them if said edges
3341 have already been added to laog. Invalidate the cache any time
3342 any edges are deleted from laog.
3343*/
3344
3345typedef
3346 struct {
3347 WordSetID inns; /* in univ_laog */
3348 WordSetID outs; /* in univ_laog */
3349 }
3350 LAOGLinks;
3351
3352/* lock order acquisition graph */
3353static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
3354
3355/* EXPOSITION ONLY: for each edge in 'laog', record the two places
3356 where that edge was created, so that we can show the user later if
3357 we need to. */
3358typedef
3359 struct {
3360 Addr src_ga; /* Lock guest addresses for */
3361 Addr dst_ga; /* src/dst of the edge */
3362 ExeContext* src_ec; /* And corresponding places where that */
3363 ExeContext* dst_ec; /* ordering was established */
3364 }
3365 LAOGLinkExposition;
3366
sewardj250ec2e2008-02-15 22:02:30 +00003367static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
sewardjb4112022007-11-09 22:49:28 +00003368 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3369 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
3370 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
3371 if (llx1->src_ga < llx2->src_ga) return -1;
3372 if (llx1->src_ga > llx2->src_ga) return 1;
3373 if (llx1->dst_ga < llx2->dst_ga) return -1;
3374 if (llx1->dst_ga > llx2->dst_ga) return 1;
3375 return 0;
3376}
3377
3378static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
3379/* end EXPOSITION ONLY */
3380
3381
sewardja65db102009-01-26 10:45:16 +00003382__attribute__((noinline))
3383static void laog__init ( void )
3384{
3385 tl_assert(!laog);
3386 tl_assert(!laog_exposition);
sewardjc1fb9d22011-02-28 09:03:44 +00003387 tl_assert(HG_(clo_track_lockorders));
sewardja65db102009-01-26 10:45:16 +00003388
3389 laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
3390 HG_(free), NULL/*unboxedcmp*/ );
3391
3392 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
3393 cmp_LAOGLinkExposition );
3394 tl_assert(laog);
3395 tl_assert(laog_exposition);
3396}
3397
florian6bf37262012-10-21 03:23:36 +00003398static void laog__show ( const HChar* who ) {
3399 UWord i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003400 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003401 Lock* me;
3402 LAOGLinks* links;
3403 VG_(printf)("laog (requested by %s) {\n", who);
sewardj896f6f92008-08-19 08:38:52 +00003404 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003405 me = NULL;
3406 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003407 while (VG_(nextIterFM)( laog, (UWord*)&me,
3408 (UWord*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003409 tl_assert(me);
3410 tl_assert(links);
3411 VG_(printf)(" node %p:\n", me);
3412 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3413 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003414 VG_(printf)(" inn %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003415 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3416 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003417 VG_(printf)(" out %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003418 me = NULL;
3419 links = NULL;
3420 }
sewardj896f6f92008-08-19 08:38:52 +00003421 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003422 VG_(printf)("}\n");
3423}
3424
sewardj866c80c2011-10-22 19:29:51 +00003425static void univ_laog_do_GC ( void ) {
3426 Word i;
3427 LAOGLinks* links;
3428 Word seen = 0;
3429 Int prev_next_gc_univ_laog = next_gc_univ_laog;
3430 const UWord univ_laog_cardinality = HG_(cardinalityWSU)( univ_laog);
3431
3432 Bool *univ_laog_seen = HG_(zalloc) ( "hg.gc_univ_laog.1",
3433 (Int) univ_laog_cardinality
3434 * sizeof(Bool) );
3435 // univ_laog_seen[*] set to 0 (False) by zalloc.
3436
3437 if (VG_(clo_stats))
3438 VG_(message)(Vg_DebugMsg,
3439 "univ_laog_do_GC enter cardinality %'10d\n",
3440 (Int)univ_laog_cardinality);
3441
3442 VG_(initIterFM)( laog );
3443 links = NULL;
3444 while (VG_(nextIterFM)( laog, NULL, (UWord*)&links )) {
3445 tl_assert(links);
3446 tl_assert(links->inns >= 0 && links->inns < univ_laog_cardinality);
3447 univ_laog_seen[links->inns] = True;
3448 tl_assert(links->outs >= 0 && links->outs < univ_laog_cardinality);
3449 univ_laog_seen[links->outs] = True;
3450 links = NULL;
3451 }
3452 VG_(doneIterFM)( laog );
3453
3454 for (i = 0; i < (Int)univ_laog_cardinality; i++) {
3455 if (univ_laog_seen[i])
3456 seen++;
3457 else
3458 HG_(dieWS) ( univ_laog, (WordSet)i );
3459 }
3460
3461 HG_(free) (univ_laog_seen);
3462
3463 // We need to decide the value of the next_gc.
3464 // 3 solutions were looked at:
3465 // Sol 1: garbage collect at seen * 2
3466 // This solution was a lot slower, probably because we both do a lot of
3467 // garbage collection and do not keep long enough laog WV that will become
3468 // useful again very soon.
3469 // Sol 2: garbage collect at a percentage increase of the current cardinality
3470 // (with a min increase of 1)
3471 // Trials on a small test program with 1%, 5% and 10% increase was done.
3472 // 1% is slightly faster than 5%, which is slightly slower than 10%.
3473 // However, on a big application, this caused the memory to be exhausted,
3474 // as even a 1% increase of size at each gc becomes a lot, when many gc
3475 // are done.
3476 // Sol 3: always garbage collect at current cardinality + 1.
3477 // This solution was the fastest of the 3 solutions, and caused no memory
3478 // exhaustion in the big application.
3479 //
3480 // With regards to cost introduced by gc: on the t2t perf test (doing only
3481 // lock/unlock operations), t2t 50 10 2 was about 25% faster than the
3482 // version with garbage collection. With t2t 50 20 2, my machine started
3483 // to page out, and so the garbage collected version was much faster.
3484 // On smaller lock sets (e.g. t2t 20 5 2, giving about 100 locks), the
3485 // difference performance is insignificant (~ 0.1 s).
3486 // Of course, it might be that real life programs are not well represented
3487 // by t2t.
3488
3489 // If ever we want to have a more sophisticated control
3490 // (e.g. clo options to control the percentage increase or fixed increased),
3491 // we should do it here, eg.
3492 // next_gc_univ_laog = prev_next_gc_univ_laog + VG_(clo_laog_gc_fixed);
3493 // Currently, we just hard-code the solution 3 above.
3494 next_gc_univ_laog = prev_next_gc_univ_laog + 1;
3495
3496 if (VG_(clo_stats))
3497 VG_(message)
3498 (Vg_DebugMsg,
3499 "univ_laog_do_GC exit seen %'8d next gc at cardinality %'10d\n",
3500 (Int)seen, next_gc_univ_laog);
3501}
3502
3503
sewardjb4112022007-11-09 22:49:28 +00003504__attribute__((noinline))
3505static void laog__add_edge ( Lock* src, Lock* dst ) {
florian6bf37262012-10-21 03:23:36 +00003506 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003507 LAOGLinks* links;
3508 Bool presentF, presentR;
3509 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
3510
3511 /* Take the opportunity to sanity check the graph. Record in
3512 presentF if there is already a src->dst mapping in this node's
3513 forwards links, and presentR if there is already a src->dst
3514 mapping in this node's backwards links. They should agree!
3515 Also, we need to know whether the edge was already present so as
3516 to decide whether or not to update the link details mapping. We
3517 can compute presentF and presentR essentially for free, so may
3518 as well do this always. */
3519 presentF = presentR = False;
3520
3521 /* Update the out edges for src */
3522 keyW = 0;
3523 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003524 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)src )) {
sewardjb4112022007-11-09 22:49:28 +00003525 WordSetID outs_new;
3526 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003527 tl_assert(keyW == (UWord)src);
3528 outs_new = HG_(addToWS)( univ_laog, links->outs, (UWord)dst );
sewardjb4112022007-11-09 22:49:28 +00003529 presentF = outs_new == links->outs;
3530 links->outs = outs_new;
3531 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003532 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003533 links->inns = HG_(emptyWS)( univ_laog );
florian6bf37262012-10-21 03:23:36 +00003534 links->outs = HG_(singletonWS)( univ_laog, (UWord)dst );
3535 VG_(addToFM)( laog, (UWord)src, (UWord)links );
sewardjb4112022007-11-09 22:49:28 +00003536 }
3537 /* Update the in edges for dst */
3538 keyW = 0;
3539 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003540 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003541 WordSetID inns_new;
3542 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003543 tl_assert(keyW == (UWord)dst);
3544 inns_new = HG_(addToWS)( univ_laog, links->inns, (UWord)src );
sewardjb4112022007-11-09 22:49:28 +00003545 presentR = inns_new == links->inns;
3546 links->inns = inns_new;
3547 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003548 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
florian6bf37262012-10-21 03:23:36 +00003549 links->inns = HG_(singletonWS)( univ_laog, (UWord)src );
sewardjb4112022007-11-09 22:49:28 +00003550 links->outs = HG_(emptyWS)( univ_laog );
florian6bf37262012-10-21 03:23:36 +00003551 VG_(addToFM)( laog, (UWord)dst, (UWord)links );
sewardjb4112022007-11-09 22:49:28 +00003552 }
3553
3554 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
3555
3556 if (!presentF && src->acquired_at && dst->acquired_at) {
3557 LAOGLinkExposition expo;
3558 /* If this edge is entering the graph, and we have acquired_at
3559 information for both src and dst, record those acquisition
3560 points. Hence, if there is later a violation of this
3561 ordering, we can show the user the two places in which the
3562 required src-dst ordering was previously established. */
barta0b6b2c2008-07-07 06:49:24 +00003563 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
sewardjb4112022007-11-09 22:49:28 +00003564 src->guestaddr, dst->guestaddr);
3565 expo.src_ga = src->guestaddr;
3566 expo.dst_ga = dst->guestaddr;
3567 expo.src_ec = NULL;
3568 expo.dst_ec = NULL;
3569 tl_assert(laog_exposition);
florian6bf37262012-10-21 03:23:36 +00003570 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (UWord)&expo )) {
sewardjb4112022007-11-09 22:49:28 +00003571 /* we already have it; do nothing */
3572 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003573 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
3574 sizeof(LAOGLinkExposition));
sewardjb4112022007-11-09 22:49:28 +00003575 expo2->src_ga = src->guestaddr;
3576 expo2->dst_ga = dst->guestaddr;
3577 expo2->src_ec = src->acquired_at;
3578 expo2->dst_ec = dst->acquired_at;
florian6bf37262012-10-21 03:23:36 +00003579 VG_(addToFM)( laog_exposition, (UWord)expo2, (UWord)NULL );
sewardjb4112022007-11-09 22:49:28 +00003580 }
3581 }
sewardj866c80c2011-10-22 19:29:51 +00003582
3583 if (HG_(cardinalityWSU) (univ_laog) >= next_gc_univ_laog)
3584 univ_laog_do_GC();
sewardjb4112022007-11-09 22:49:28 +00003585}
3586
3587__attribute__((noinline))
3588static void laog__del_edge ( Lock* src, Lock* dst ) {
florian6bf37262012-10-21 03:23:36 +00003589 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003590 LAOGLinks* links;
sewardj866c80c2011-10-22 19:29:51 +00003591 if (0) VG_(printf)("laog__del_edge enter %p %p\n", src, dst);
sewardjb4112022007-11-09 22:49:28 +00003592 /* Update the out edges for src */
3593 keyW = 0;
3594 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003595 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)src )) {
sewardjb4112022007-11-09 22:49:28 +00003596 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003597 tl_assert(keyW == (UWord)src);
3598 links->outs = HG_(delFromWS)( univ_laog, links->outs, (UWord)dst );
sewardjb4112022007-11-09 22:49:28 +00003599 }
3600 /* Update the in edges for dst */
3601 keyW = 0;
3602 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003603 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003604 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003605 tl_assert(keyW == (UWord)dst);
3606 links->inns = HG_(delFromWS)( univ_laog, links->inns, (UWord)src );
sewardjb4112022007-11-09 22:49:28 +00003607 }
sewardj866c80c2011-10-22 19:29:51 +00003608
3609 /* Remove the exposition of src,dst (if present) */
3610 {
3611 LAOGLinkExposition *fm_expo;
3612
3613 LAOGLinkExposition expo;
3614 expo.src_ga = src->guestaddr;
3615 expo.dst_ga = dst->guestaddr;
3616 expo.src_ec = NULL;
3617 expo.dst_ec = NULL;
3618
3619 if (VG_(delFromFM) (laog_exposition,
3620 (UWord*)&fm_expo, NULL, (UWord)&expo )) {
3621 HG_(free) (fm_expo);
3622 }
3623 }
3624
3625 /* deleting edges can increase nr of of WS so check for gc. */
3626 if (HG_(cardinalityWSU) (univ_laog) >= next_gc_univ_laog)
3627 univ_laog_do_GC();
3628 if (0) VG_(printf)("laog__del_edge exit\n");
sewardjb4112022007-11-09 22:49:28 +00003629}
3630
3631__attribute__((noinline))
3632static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
florian6bf37262012-10-21 03:23:36 +00003633 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003634 LAOGLinks* links;
3635 keyW = 0;
3636 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003637 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003638 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003639 tl_assert(keyW == (UWord)lk);
sewardjb4112022007-11-09 22:49:28 +00003640 return links->outs;
3641 } else {
3642 return HG_(emptyWS)( univ_laog );
3643 }
3644}
3645
3646__attribute__((noinline))
3647static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
florian6bf37262012-10-21 03:23:36 +00003648 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003649 LAOGLinks* links;
3650 keyW = 0;
3651 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003652 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003653 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003654 tl_assert(keyW == (UWord)lk);
sewardjb4112022007-11-09 22:49:28 +00003655 return links->inns;
3656 } else {
3657 return HG_(emptyWS)( univ_laog );
3658 }
3659}
3660
3661__attribute__((noinline))
florian6bf37262012-10-21 03:23:36 +00003662static void laog__sanity_check ( const HChar* who ) {
3663 UWord i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003664 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003665 Lock* me;
3666 LAOGLinks* links;
sewardj896f6f92008-08-19 08:38:52 +00003667 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003668 me = NULL;
3669 links = NULL;
3670 if (0) VG_(printf)("laog sanity check\n");
florian6bf37262012-10-21 03:23:36 +00003671 while (VG_(nextIterFM)( laog, (UWord*)&me,
3672 (UWord*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003673 tl_assert(me);
3674 tl_assert(links);
3675 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3676 for (i = 0; i < ws_size; i++) {
3677 if ( ! HG_(elemWS)( univ_laog,
3678 laog__succs( (Lock*)ws_words[i] ),
florian6bf37262012-10-21 03:23:36 +00003679 (UWord)me ))
sewardjb4112022007-11-09 22:49:28 +00003680 goto bad;
3681 }
3682 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3683 for (i = 0; i < ws_size; i++) {
3684 if ( ! HG_(elemWS)( univ_laog,
3685 laog__preds( (Lock*)ws_words[i] ),
florian6bf37262012-10-21 03:23:36 +00003686 (UWord)me ))
sewardjb4112022007-11-09 22:49:28 +00003687 goto bad;
3688 }
3689 me = NULL;
3690 links = NULL;
3691 }
sewardj896f6f92008-08-19 08:38:52 +00003692 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003693 return;
3694
3695 bad:
3696 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3697 laog__show(who);
3698 tl_assert(0);
3699}
3700
3701/* If there is a path in laog from 'src' to any of the elements in
3702 'dst', return an arbitrarily chosen element of 'dst' reachable from
3703 'src'. If no path exist from 'src' to any element in 'dst', return
3704 NULL. */
3705__attribute__((noinline))
3706static
3707Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3708{
3709 Lock* ret;
florian6bf37262012-10-21 03:23:36 +00003710 Word ssz;
sewardjb4112022007-11-09 22:49:28 +00003711 XArray* stack; /* of Lock* */
3712 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
3713 Lock* here;
3714 WordSetID succs;
florian6bf37262012-10-21 03:23:36 +00003715 UWord succs_size, i;
sewardj250ec2e2008-02-15 22:02:30 +00003716 UWord* succs_words;
sewardjb4112022007-11-09 22:49:28 +00003717 //laog__sanity_check();
3718
3719 /* If the destination set is empty, we can never get there from
3720 'src' :-), so don't bother to try */
3721 if (HG_(isEmptyWS)( univ_lsets, dsts ))
3722 return NULL;
3723
3724 ret = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00003725 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3726 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00003727
3728 (void) VG_(addToXA)( stack, &src );
3729
3730 while (True) {
3731
3732 ssz = VG_(sizeXA)( stack );
3733
3734 if (ssz == 0) { ret = NULL; break; }
3735
3736 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3737 VG_(dropTailXA)( stack, 1 );
3738
florian6bf37262012-10-21 03:23:36 +00003739 if (HG_(elemWS)( univ_lsets, dsts, (UWord)here )) { ret = here; break; }
sewardjb4112022007-11-09 22:49:28 +00003740
florian6bf37262012-10-21 03:23:36 +00003741 if (VG_(lookupFM)( visited, NULL, NULL, (UWord)here ))
sewardjb4112022007-11-09 22:49:28 +00003742 continue;
3743
florian6bf37262012-10-21 03:23:36 +00003744 VG_(addToFM)( visited, (UWord)here, 0 );
sewardjb4112022007-11-09 22:49:28 +00003745
3746 succs = laog__succs( here );
3747 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3748 for (i = 0; i < succs_size; i++)
3749 (void) VG_(addToXA)( stack, &succs_words[i] );
3750 }
3751
sewardj896f6f92008-08-19 08:38:52 +00003752 VG_(deleteFM)( visited, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003753 VG_(deleteXA)( stack );
3754 return ret;
3755}
3756
3757
3758/* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3759 between 'lk' and the locks already held by 'thr' and issue a
3760 complaint if so. Also, update the ordering graph appropriately.
3761*/
3762__attribute__((noinline))
3763static void laog__pre_thread_acquires_lock (
3764 Thread* thr, /* NB: BEFORE lock is added */
3765 Lock* lk
3766 )
3767{
sewardj250ec2e2008-02-15 22:02:30 +00003768 UWord* ls_words;
florian6bf37262012-10-21 03:23:36 +00003769 UWord ls_size, i;
sewardjb4112022007-11-09 22:49:28 +00003770 Lock* other;
3771
3772 /* It may be that 'thr' already holds 'lk' and is recursively
3773 relocking in. In this case we just ignore the call. */
3774 /* NB: univ_lsets really is correct here */
florian6bf37262012-10-21 03:23:36 +00003775 if (HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lk ))
sewardjb4112022007-11-09 22:49:28 +00003776 return;
3777
sewardjb4112022007-11-09 22:49:28 +00003778 /* First, the check. Complain if there is any path in laog from lk
3779 to any of the locks already held by thr, since if any such path
3780 existed, it would mean that previously lk was acquired before
3781 (rather than after, as we are doing here) at least one of those
3782 locks.
3783 */
3784 other = laog__do_dfs_from_to(lk, thr->locksetA);
3785 if (other) {
3786 LAOGLinkExposition key, *found;
3787 /* So we managed to find a path lk --*--> other in the graph,
3788 which implies that 'lk' should have been acquired before
3789 'other' but is in fact being acquired afterwards. We present
3790 the lk/other arguments to record_error_LockOrder in the order
3791 in which they should have been acquired. */
3792 /* Go look in the laog_exposition mapping, to find the allocation
3793 points for this edge, so we can show the user. */
3794 key.src_ga = lk->guestaddr;
3795 key.dst_ga = other->guestaddr;
3796 key.src_ec = NULL;
3797 key.dst_ec = NULL;
3798 found = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003799 if (VG_(lookupFM)( laog_exposition,
florian6bf37262012-10-21 03:23:36 +00003800 (UWord*)&found, NULL, (UWord)&key )) {
sewardjb4112022007-11-09 22:49:28 +00003801 tl_assert(found != &key);
3802 tl_assert(found->src_ga == key.src_ga);
3803 tl_assert(found->dst_ga == key.dst_ga);
3804 tl_assert(found->src_ec);
3805 tl_assert(found->dst_ec);
sewardjf98e1c02008-10-25 16:22:41 +00003806 HG_(record_error_LockOrder)(
philippe46daf0d2014-07-29 20:08:15 +00003807 thr, lk, other,
sewardjffce8152011-06-24 10:09:41 +00003808 found->src_ec, found->dst_ec, other->acquired_at );
sewardjb4112022007-11-09 22:49:28 +00003809 } else {
3810 /* Hmm. This can't happen (can it?) */
philippeebe25802013-01-30 23:21:34 +00003811 /* Yes, it can happen: see tests/tc14_laog_dinphils.
3812 Imagine we have 3 philosophers A B C, and the forks
3813 between them:
3814
3815 C
3816
3817 fCA fBC
3818
3819 A fAB B
3820
3821 Let's have the following actions:
3822 A takes fCA,fAB
3823 A releases fCA,fAB
3824 B takes fAB,fBC
3825 B releases fAB,fBC
3826 C takes fBC,fCA
3827 C releases fBC,fCA
3828
3829 Helgrind will report a lock order error when C takes fCA.
3830 Effectively, we have a deadlock if the following
3831 sequence is done:
3832 A takes fCA
3833 B takes fAB
3834 C takes fBC
3835
3836 The error reported is:
3837 Observed (incorrect) order fBC followed by fCA
3838 but the stack traces that have established the required order
3839 are not given.
3840
3841 This is because there is no pair (fCA, fBC) in laog exposition :
3842 the laog_exposition records all pairs of locks between a new lock
3843 taken by a thread and all the already taken locks.
3844 So, there is no laog_exposition (fCA, fBC) as no thread ever
3845 first locked fCA followed by fBC.
3846
3847 In other words, when the deadlock cycle involves more than
3848 two locks, then helgrind does not report the sequence of
3849 operations that created the cycle.
3850
3851 However, we can report the current stack trace (where
3852 lk is being taken), and the stack trace where other was acquired:
3853 Effectively, the variable 'other' contains a lock currently
3854 held by this thread, with its 'acquired_at'. */
3855
sewardjf98e1c02008-10-25 16:22:41 +00003856 HG_(record_error_LockOrder)(
philippe46daf0d2014-07-29 20:08:15 +00003857 thr, lk, other,
philippeebe25802013-01-30 23:21:34 +00003858 NULL, NULL, other->acquired_at );
sewardjb4112022007-11-09 22:49:28 +00003859 }
3860 }
3861
3862 /* Second, add to laog the pairs
3863 (old, lk) | old <- locks already held by thr
3864 Since both old and lk are currently held by thr, their acquired_at
3865 fields must be non-NULL.
3866 */
3867 tl_assert(lk->acquired_at);
3868 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3869 for (i = 0; i < ls_size; i++) {
3870 Lock* old = (Lock*)ls_words[i];
3871 tl_assert(old->acquired_at);
3872 laog__add_edge( old, lk );
3873 }
3874
3875 /* Why "except_Locks" ? We're here because a lock is being
3876 acquired by a thread, and we're in an inconsistent state here.
3877 See the call points in evhH__post_thread_{r,w}_acquires_lock.
3878 When called in this inconsistent state, locks__sanity_check duly
3879 barfs. */
sewardjf98e1c02008-10-25 16:22:41 +00003880 if (HG_(clo_sanity_flags) & SCE_LAOG)
sewardjb4112022007-11-09 22:49:28 +00003881 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
3882}
3883
sewardj866c80c2011-10-22 19:29:51 +00003884/* Allocates a duplicate of words. Caller must HG_(free) the result. */
3885static UWord* UWordV_dup(UWord* words, Word words_size)
3886{
3887 UInt i;
3888
3889 if (words_size == 0)
3890 return NULL;
3891
3892 UWord *dup = HG_(zalloc) ("hg.dup.1", (SizeT) words_size * sizeof(UWord));
3893
3894 for (i = 0; i < words_size; i++)
3895 dup[i] = words[i];
3896
3897 return dup;
3898}
sewardjb4112022007-11-09 22:49:28 +00003899
3900/* Delete from 'laog' any pair mentioning a lock in locksToDelete */
3901
3902__attribute__((noinline))
3903static void laog__handle_one_lock_deletion ( Lock* lk )
3904{
3905 WordSetID preds, succs;
florian6bf37262012-10-21 03:23:36 +00003906 UWord preds_size, succs_size, i, j;
sewardj250ec2e2008-02-15 22:02:30 +00003907 UWord *preds_words, *succs_words;
sewardjb4112022007-11-09 22:49:28 +00003908
3909 preds = laog__preds( lk );
3910 succs = laog__succs( lk );
3911
sewardj866c80c2011-10-22 19:29:51 +00003912 // We need to duplicate the payload, as these can be garbage collected
3913 // during the del/add operations below.
sewardjb4112022007-11-09 22:49:28 +00003914 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
sewardj866c80c2011-10-22 19:29:51 +00003915 preds_words = UWordV_dup(preds_words, preds_size);
3916
3917 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3918 succs_words = UWordV_dup(succs_words, succs_size);
3919
sewardjb4112022007-11-09 22:49:28 +00003920 for (i = 0; i < preds_size; i++)
3921 laog__del_edge( (Lock*)preds_words[i], lk );
3922
sewardjb4112022007-11-09 22:49:28 +00003923 for (j = 0; j < succs_size; j++)
3924 laog__del_edge( lk, (Lock*)succs_words[j] );
3925
3926 for (i = 0; i < preds_size; i++) {
3927 for (j = 0; j < succs_size; j++) {
3928 if (preds_words[i] != succs_words[j]) {
3929 /* This can pass unlocked locks to laog__add_edge, since
3930 we're deleting stuff. So their acquired_at fields may
3931 be NULL. */
3932 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
3933 }
3934 }
3935 }
sewardj866c80c2011-10-22 19:29:51 +00003936
3937 if (preds_words)
3938 HG_(free) (preds_words);
3939 if (succs_words)
3940 HG_(free) (succs_words);
3941
3942 // Remove lk information from laog links FM
3943 {
3944 LAOGLinks *links;
3945 Lock* linked_lk;
3946
3947 if (VG_(delFromFM) (laog,
3948 (UWord*)&linked_lk, (UWord*)&links, (UWord)lk)) {
3949 tl_assert (linked_lk == lk);
3950 HG_(free) (links);
3951 }
3952 }
3953 /* FIXME ??? What about removing lock lk data from EXPOSITION ??? */
sewardjb4112022007-11-09 22:49:28 +00003954}
3955
sewardj1cbc12f2008-11-10 16:16:46 +00003956//__attribute__((noinline))
3957//static void laog__handle_lock_deletions (
3958// WordSetID /* in univ_laog */ locksToDelete
3959// )
3960//{
3961// Word i, ws_size;
3962// UWord* ws_words;
3963//
sewardj1cbc12f2008-11-10 16:16:46 +00003964//
3965// HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
sewardj866c80c2011-10-22 19:29:51 +00003966// UWordV_dup call needed here ...
sewardj1cbc12f2008-11-10 16:16:46 +00003967// for (i = 0; i < ws_size; i++)
3968// laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
3969//
3970// if (HG_(clo_sanity_flags) & SCE_LAOG)
3971// all__sanity_check("laog__handle_lock_deletions-post");
3972//}
sewardjb4112022007-11-09 22:49:28 +00003973
3974
3975/*--------------------------------------------------------------*/
3976/*--- Malloc/free replacements ---*/
3977/*--------------------------------------------------------------*/
3978
3979typedef
3980 struct {
3981 void* next; /* required by m_hashtable */
3982 Addr payload; /* ptr to actual block */
3983 SizeT szB; /* size requested */
3984 ExeContext* where; /* where it was allocated */
3985 Thread* thr; /* allocating thread */
3986 }
3987 MallocMeta;
3988
3989/* A hash table of MallocMetas, used to track malloc'd blocks
3990 (obviously). */
3991static VgHashTable hg_mallocmeta_table = NULL;
3992
philippe5fbc9762013-12-01 19:28:48 +00003993/* MallocMeta are small elements. We use a pool to avoid
3994 the overhead of malloc for each MallocMeta. */
3995static PoolAlloc *MallocMeta_poolalloc = NULL;
sewardjb4112022007-11-09 22:49:28 +00003996
3997static MallocMeta* new_MallocMeta ( void ) {
philippe5fbc9762013-12-01 19:28:48 +00003998 MallocMeta* md = VG_(allocEltPA) (MallocMeta_poolalloc);
3999 VG_(memset)(md, 0, sizeof(MallocMeta));
sewardjb4112022007-11-09 22:49:28 +00004000 return md;
4001}
4002static void delete_MallocMeta ( MallocMeta* md ) {
philippe5fbc9762013-12-01 19:28:48 +00004003 VG_(freeEltPA)(MallocMeta_poolalloc, md);
sewardjb4112022007-11-09 22:49:28 +00004004}
4005
4006
4007/* Allocate a client block and set up the metadata for it. */
4008
4009static
4010void* handle_alloc ( ThreadId tid,
4011 SizeT szB, SizeT alignB, Bool is_zeroed )
4012{
4013 Addr p;
4014 MallocMeta* md;
4015
4016 tl_assert( ((SSizeT)szB) >= 0 );
4017 p = (Addr)VG_(cli_malloc)(alignB, szB);
4018 if (!p) {
4019 return NULL;
4020 }
4021 if (is_zeroed)
4022 VG_(memset)((void*)p, 0, szB);
4023
4024 /* Note that map_threads_lookup must succeed (cannot assert), since
4025 memory can only be allocated by currently alive threads, hence
4026 they must have an entry in map_threads. */
4027 md = new_MallocMeta();
4028 md->payload = p;
4029 md->szB = szB;
4030 md->where = VG_(record_ExeContext)( tid, 0 );
4031 md->thr = map_threads_lookup( tid );
4032
4033 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
4034
4035 /* Tell the lower level memory wranglers. */
4036 evh__new_mem_heap( p, szB, is_zeroed );
4037
4038 return (void*)p;
4039}
4040
4041/* Re the checks for less-than-zero (also in hg_cli__realloc below):
4042 Cast to a signed type to catch any unexpectedly negative args.
4043 We're assuming here that the size asked for is not greater than
4044 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
4045 platforms). */
4046static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
4047 if (((SSizeT)n) < 0) return NULL;
4048 return handle_alloc ( tid, n, VG_(clo_alignment),
4049 /*is_zeroed*/False );
4050}
4051static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
4052 if (((SSizeT)n) < 0) return NULL;
4053 return handle_alloc ( tid, n, VG_(clo_alignment),
4054 /*is_zeroed*/False );
4055}
4056static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
4057 if (((SSizeT)n) < 0) return NULL;
4058 return handle_alloc ( tid, n, VG_(clo_alignment),
4059 /*is_zeroed*/False );
4060}
4061static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
4062 if (((SSizeT)n) < 0) return NULL;
4063 return handle_alloc ( tid, n, align,
4064 /*is_zeroed*/False );
4065}
4066static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
4067 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
4068 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
4069 /*is_zeroed*/True );
4070}
4071
4072
4073/* Free a client block, including getting rid of the relevant
4074 metadata. */
4075
4076static void handle_free ( ThreadId tid, void* p )
4077{
4078 MallocMeta *md, *old_md;
4079 SizeT szB;
4080
4081 /* First see if we can find the metadata for 'p'. */
4082 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
4083 if (!md)
4084 return; /* apparently freeing a bogus address. Oh well. */
4085
4086 tl_assert(md->payload == (Addr)p);
4087 szB = md->szB;
4088
4089 /* Nuke the metadata block */
4090 old_md = (MallocMeta*)
4091 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
4092 tl_assert(old_md); /* it must be present - we just found it */
4093 tl_assert(old_md == md);
4094 tl_assert(old_md->payload == (Addr)p);
4095
4096 VG_(cli_free)((void*)old_md->payload);
4097 delete_MallocMeta(old_md);
4098
4099 /* Tell the lower level memory wranglers. */
4100 evh__die_mem_heap( (Addr)p, szB );
4101}
4102
4103static void hg_cli__free ( ThreadId tid, void* p ) {
4104 handle_free(tid, p);
4105}
4106static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
4107 handle_free(tid, p);
4108}
4109static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
4110 handle_free(tid, p);
4111}
4112
4113
4114static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
4115{
4116 MallocMeta *md, *md_new, *md_tmp;
4117 SizeT i;
4118
4119 Addr payload = (Addr)payloadV;
4120
4121 if (((SSizeT)new_size) < 0) return NULL;
4122
4123 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
4124 if (!md)
4125 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
4126
4127 tl_assert(md->payload == payload);
4128
4129 if (md->szB == new_size) {
4130 /* size unchanged */
4131 md->where = VG_(record_ExeContext)(tid, 0);
4132 return payloadV;
4133 }
4134
4135 if (md->szB > new_size) {
4136 /* new size is smaller */
4137 md->szB = new_size;
4138 md->where = VG_(record_ExeContext)(tid, 0);
4139 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
4140 return payloadV;
4141 }
4142
4143 /* else */ {
4144 /* new size is bigger */
4145 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
4146
4147 /* First half kept and copied, second half new */
4148 // FIXME: shouldn't we use a copier which implements the
4149 // memory state machine?
sewardj23f12002009-07-24 08:45:08 +00004150 evh__copy_mem( payload, p_new, md->szB );
sewardjb4112022007-11-09 22:49:28 +00004151 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
sewardjf98e1c02008-10-25 16:22:41 +00004152 /*inited*/False );
sewardjb4112022007-11-09 22:49:28 +00004153 /* FIXME: can anything funny happen here? specifically, if the
4154 old range contained a lock, then die_mem_heap will complain.
4155 Is that the correct behaviour? Not sure. */
4156 evh__die_mem_heap( payload, md->szB );
4157
4158 /* Copy from old to new */
4159 for (i = 0; i < md->szB; i++)
4160 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
4161
4162 /* Because the metadata hash table is index by payload address,
4163 we have to get rid of the old hash table entry and make a new
4164 one. We can't just modify the existing metadata in place,
4165 because then it would (almost certainly) be in the wrong hash
4166 chain. */
4167 md_new = new_MallocMeta();
4168 *md_new = *md;
4169
4170 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
4171 tl_assert(md_tmp);
4172 tl_assert(md_tmp == md);
4173
4174 VG_(cli_free)((void*)md->payload);
4175 delete_MallocMeta(md);
4176
4177 /* Update fields */
4178 md_new->where = VG_(record_ExeContext)( tid, 0 );
4179 md_new->szB = new_size;
4180 md_new->payload = p_new;
4181 md_new->thr = map_threads_lookup( tid );
4182
4183 /* and add */
4184 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
4185
4186 return (void*)p_new;
4187 }
4188}
4189
njn8b140de2009-02-17 04:31:18 +00004190static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
4191{
4192 MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
4193
4194 // There may be slop, but pretend there isn't because only the asked-for
4195 // area will have been shadowed properly.
4196 return ( md ? md->szB : 0 );
4197}
4198
sewardjb4112022007-11-09 22:49:28 +00004199
sewardj095d61e2010-03-11 13:43:18 +00004200/* For error creation: map 'data_addr' to a malloc'd chunk, if any.
sewardjc8028ad2010-05-05 09:34:42 +00004201 Slow linear search. With a bit of hash table help if 'data_addr'
4202 is either the start of a block or up to 15 word-sized steps along
4203 from the start of a block. */
sewardj095d61e2010-03-11 13:43:18 +00004204
4205static inline Bool addr_is_in_MM_Chunk( MallocMeta* mm, Addr a )
4206{
sewardjc8028ad2010-05-05 09:34:42 +00004207 /* Accept 'a' as within 'mm' if 'mm's size is zero and 'a' points
4208 right at it. */
4209 if (UNLIKELY(mm->szB == 0 && a == mm->payload))
4210 return True;
4211 /* else normal interval rules apply */
4212 if (LIKELY(a < mm->payload)) return False;
4213 if (LIKELY(a >= mm->payload + mm->szB)) return False;
4214 return True;
sewardj095d61e2010-03-11 13:43:18 +00004215}
4216
sewardjc8028ad2010-05-05 09:34:42 +00004217Bool HG_(mm_find_containing_block)( /*OUT*/ExeContext** where,
philippe0c9ac8d2014-07-18 00:03:58 +00004218 /*OUT*/UInt* tnr,
sewardj095d61e2010-03-11 13:43:18 +00004219 /*OUT*/Addr* payload,
4220 /*OUT*/SizeT* szB,
4221 Addr data_addr )
4222{
4223 MallocMeta* mm;
sewardjc8028ad2010-05-05 09:34:42 +00004224 Int i;
4225 const Int n_fast_check_words = 16;
4226
4227 /* First, do a few fast searches on the basis that data_addr might
4228 be exactly the start of a block or up to 15 words inside. This
4229 can happen commonly via the creq
4230 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK. */
4231 for (i = 0; i < n_fast_check_words; i++) {
4232 mm = VG_(HT_lookup)( hg_mallocmeta_table,
4233 data_addr - (UWord)(UInt)i * sizeof(UWord) );
4234 if (UNLIKELY(mm && addr_is_in_MM_Chunk(mm, data_addr)))
4235 goto found;
4236 }
4237
sewardj095d61e2010-03-11 13:43:18 +00004238 /* Well, this totally sucks. But without using an interval tree or
sewardjc8028ad2010-05-05 09:34:42 +00004239 some such, it's hard to see how to do better. We have to check
4240 every block in the entire table. */
sewardj095d61e2010-03-11 13:43:18 +00004241 VG_(HT_ResetIter)(hg_mallocmeta_table);
4242 while ( (mm = VG_(HT_Next)(hg_mallocmeta_table)) ) {
sewardjc8028ad2010-05-05 09:34:42 +00004243 if (UNLIKELY(addr_is_in_MM_Chunk(mm, data_addr)))
4244 goto found;
sewardj095d61e2010-03-11 13:43:18 +00004245 }
sewardjc8028ad2010-05-05 09:34:42 +00004246
4247 /* Not found. Bah. */
4248 return False;
4249 /*NOTREACHED*/
4250
4251 found:
4252 tl_assert(mm);
4253 tl_assert(addr_is_in_MM_Chunk(mm, data_addr));
4254 if (where) *where = mm->where;
philippe0c9ac8d2014-07-18 00:03:58 +00004255 if (tnr) *tnr = mm->thr->errmsg_index;
sewardjc8028ad2010-05-05 09:34:42 +00004256 if (payload) *payload = mm->payload;
4257 if (szB) *szB = mm->szB;
4258 return True;
sewardj095d61e2010-03-11 13:43:18 +00004259}
4260
4261
sewardjb4112022007-11-09 22:49:28 +00004262/*--------------------------------------------------------------*/
4263/*--- Instrumentation ---*/
4264/*--------------------------------------------------------------*/
4265
sewardjcafe5052013-01-17 14:24:35 +00004266#define unop(_op, _arg1) IRExpr_Unop((_op),(_arg1))
sewardjffce8152011-06-24 10:09:41 +00004267#define binop(_op, _arg1, _arg2) IRExpr_Binop((_op),(_arg1),(_arg2))
4268#define mkexpr(_tmp) IRExpr_RdTmp((_tmp))
4269#define mkU32(_n) IRExpr_Const(IRConst_U32(_n))
4270#define mkU64(_n) IRExpr_Const(IRConst_U64(_n))
4271#define assign(_t, _e) IRStmt_WrTmp((_t), (_e))
4272
sewardjcafe5052013-01-17 14:24:35 +00004273/* This takes and returns atoms, of course. Not full IRExprs. */
4274static IRExpr* mk_And1 ( IRSB* sbOut, IRExpr* arg1, IRExpr* arg2 )
4275{
4276 tl_assert(arg1 && arg2);
4277 tl_assert(isIRAtom(arg1));
4278 tl_assert(isIRAtom(arg2));
4279 /* Generate 32to1(And32(1Uto32(arg1), 1Uto32(arg2))). Appalling
4280 code, I know. */
4281 IRTemp wide1 = newIRTemp(sbOut->tyenv, Ity_I32);
4282 IRTemp wide2 = newIRTemp(sbOut->tyenv, Ity_I32);
4283 IRTemp anded = newIRTemp(sbOut->tyenv, Ity_I32);
4284 IRTemp res = newIRTemp(sbOut->tyenv, Ity_I1);
4285 addStmtToIRSB(sbOut, assign(wide1, unop(Iop_1Uto32, arg1)));
4286 addStmtToIRSB(sbOut, assign(wide2, unop(Iop_1Uto32, arg2)));
4287 addStmtToIRSB(sbOut, assign(anded, binop(Iop_And32, mkexpr(wide1),
4288 mkexpr(wide2))));
4289 addStmtToIRSB(sbOut, assign(res, unop(Iop_32to1, mkexpr(anded))));
4290 return mkexpr(res);
4291}
4292
sewardjffce8152011-06-24 10:09:41 +00004293static void instrument_mem_access ( IRSB* sbOut,
sewardjb4112022007-11-09 22:49:28 +00004294 IRExpr* addr,
4295 Int szB,
4296 Bool isStore,
sewardjffce8152011-06-24 10:09:41 +00004297 Int hWordTy_szB,
sewardjcafe5052013-01-17 14:24:35 +00004298 Int goff_sp,
4299 IRExpr* guard ) /* NULL => True */
sewardjb4112022007-11-09 22:49:28 +00004300{
4301 IRType tyAddr = Ity_INVALID;
florian6bf37262012-10-21 03:23:36 +00004302 const HChar* hName = NULL;
sewardjb4112022007-11-09 22:49:28 +00004303 void* hAddr = NULL;
4304 Int regparms = 0;
4305 IRExpr** argv = NULL;
4306 IRDirty* di = NULL;
4307
sewardjffce8152011-06-24 10:09:41 +00004308 // THRESH is the size of the window above SP (well,
4309 // mostly above) that we assume implies a stack reference.
4310 const Int THRESH = 4096 * 4; // somewhat arbitrary
4311 const Int rz_szB = VG_STACK_REDZONE_SZB;
4312
sewardjb4112022007-11-09 22:49:28 +00004313 tl_assert(isIRAtom(addr));
4314 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
4315
sewardjffce8152011-06-24 10:09:41 +00004316 tyAddr = typeOfIRExpr( sbOut->tyenv, addr );
sewardjb4112022007-11-09 22:49:28 +00004317 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
4318
4319 /* So the effective address is in 'addr' now. */
4320 regparms = 1; // unless stated otherwise
4321 if (isStore) {
4322 switch (szB) {
4323 case 1:
sewardj23f12002009-07-24 08:45:08 +00004324 hName = "evh__mem_help_cwrite_1";
4325 hAddr = &evh__mem_help_cwrite_1;
sewardjb4112022007-11-09 22:49:28 +00004326 argv = mkIRExprVec_1( addr );
4327 break;
4328 case 2:
sewardj23f12002009-07-24 08:45:08 +00004329 hName = "evh__mem_help_cwrite_2";
4330 hAddr = &evh__mem_help_cwrite_2;
sewardjb4112022007-11-09 22:49:28 +00004331 argv = mkIRExprVec_1( addr );
4332 break;
4333 case 4:
sewardj23f12002009-07-24 08:45:08 +00004334 hName = "evh__mem_help_cwrite_4";
4335 hAddr = &evh__mem_help_cwrite_4;
sewardjb4112022007-11-09 22:49:28 +00004336 argv = mkIRExprVec_1( addr );
4337 break;
4338 case 8:
sewardj23f12002009-07-24 08:45:08 +00004339 hName = "evh__mem_help_cwrite_8";
4340 hAddr = &evh__mem_help_cwrite_8;
sewardjb4112022007-11-09 22:49:28 +00004341 argv = mkIRExprVec_1( addr );
4342 break;
4343 default:
4344 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4345 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004346 hName = "evh__mem_help_cwrite_N";
4347 hAddr = &evh__mem_help_cwrite_N;
sewardjb4112022007-11-09 22:49:28 +00004348 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4349 break;
4350 }
4351 } else {
4352 switch (szB) {
4353 case 1:
sewardj23f12002009-07-24 08:45:08 +00004354 hName = "evh__mem_help_cread_1";
4355 hAddr = &evh__mem_help_cread_1;
sewardjb4112022007-11-09 22:49:28 +00004356 argv = mkIRExprVec_1( addr );
4357 break;
4358 case 2:
sewardj23f12002009-07-24 08:45:08 +00004359 hName = "evh__mem_help_cread_2";
4360 hAddr = &evh__mem_help_cread_2;
sewardjb4112022007-11-09 22:49:28 +00004361 argv = mkIRExprVec_1( addr );
4362 break;
4363 case 4:
sewardj23f12002009-07-24 08:45:08 +00004364 hName = "evh__mem_help_cread_4";
4365 hAddr = &evh__mem_help_cread_4;
sewardjb4112022007-11-09 22:49:28 +00004366 argv = mkIRExprVec_1( addr );
4367 break;
4368 case 8:
sewardj23f12002009-07-24 08:45:08 +00004369 hName = "evh__mem_help_cread_8";
4370 hAddr = &evh__mem_help_cread_8;
sewardjb4112022007-11-09 22:49:28 +00004371 argv = mkIRExprVec_1( addr );
4372 break;
4373 default:
4374 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4375 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004376 hName = "evh__mem_help_cread_N";
4377 hAddr = &evh__mem_help_cread_N;
sewardjb4112022007-11-09 22:49:28 +00004378 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4379 break;
4380 }
4381 }
4382
sewardjffce8152011-06-24 10:09:41 +00004383 /* Create the helper. */
sewardjb4112022007-11-09 22:49:28 +00004384 tl_assert(hName);
4385 tl_assert(hAddr);
4386 tl_assert(argv);
4387 di = unsafeIRDirty_0_N( regparms,
4388 hName, VG_(fnptr_to_fnentry)( hAddr ),
4389 argv );
sewardjffce8152011-06-24 10:09:41 +00004390
4391 if (! HG_(clo_check_stack_refs)) {
4392 /* We're ignoring memory references which are (obviously) to the
4393 stack. In fact just skip stack refs that are within 4 pages
4394 of SP (SP - the redzone, really), as that's simple, easy, and
4395 filters out most stack references. */
4396 /* Generate the guard condition: "(addr - (SP - RZ)) >u N", for
4397 some arbitrary N. If that is true then addr is outside the
4398 range (SP - RZ .. SP + N - RZ). If N is smallish (a few
4399 pages) then we can say addr is within a few pages of SP and
4400 so can't possibly be a heap access, and so can be skipped.
4401
4402 Note that the condition simplifies to
4403 (addr - SP + RZ) >u N
4404 which generates better code in x86/amd64 backends, but it does
4405 not unfortunately simplify to
4406 (addr - SP) >u (N - RZ)
4407 (would be beneficial because N - RZ is a constant) because
4408 wraparound arithmetic messes up the comparison. eg.
4409 20 >u 10 == True,
4410 but (20 - 15) >u (10 - 15) == 5 >u (MAXINT-5) == False.
4411 */
4412 IRTemp sp = newIRTemp(sbOut->tyenv, tyAddr);
4413 addStmtToIRSB( sbOut, assign(sp, IRExpr_Get(goff_sp, tyAddr)));
4414
4415 /* "addr - SP" */
4416 IRTemp addr_minus_sp = newIRTemp(sbOut->tyenv, tyAddr);
4417 addStmtToIRSB(
4418 sbOut,
4419 assign(addr_minus_sp,
4420 tyAddr == Ity_I32
4421 ? binop(Iop_Sub32, addr, mkexpr(sp))
4422 : binop(Iop_Sub64, addr, mkexpr(sp)))
4423 );
4424
4425 /* "addr - SP + RZ" */
4426 IRTemp diff = newIRTemp(sbOut->tyenv, tyAddr);
4427 addStmtToIRSB(
4428 sbOut,
4429 assign(diff,
4430 tyAddr == Ity_I32
4431 ? binop(Iop_Add32, mkexpr(addr_minus_sp), mkU32(rz_szB))
4432 : binop(Iop_Add64, mkexpr(addr_minus_sp), mkU64(rz_szB)))
4433 );
4434
sewardjcafe5052013-01-17 14:24:35 +00004435 /* guardA == "guard on the address" */
4436 IRTemp guardA = newIRTemp(sbOut->tyenv, Ity_I1);
sewardjffce8152011-06-24 10:09:41 +00004437 addStmtToIRSB(
4438 sbOut,
sewardjcafe5052013-01-17 14:24:35 +00004439 assign(guardA,
sewardjffce8152011-06-24 10:09:41 +00004440 tyAddr == Ity_I32
4441 ? binop(Iop_CmpLT32U, mkU32(THRESH), mkexpr(diff))
4442 : binop(Iop_CmpLT64U, mkU64(THRESH), mkexpr(diff)))
4443 );
sewardjcafe5052013-01-17 14:24:35 +00004444 di->guard = mkexpr(guardA);
4445 }
4446
4447 /* If there's a guard on the access itself (as supplied by the
4448 caller of this routine), we need to AND that in to any guard we
4449 might already have. */
4450 if (guard) {
4451 di->guard = mk_And1(sbOut, di->guard, guard);
sewardjffce8152011-06-24 10:09:41 +00004452 }
4453
4454 /* Add the helper. */
4455 addStmtToIRSB( sbOut, IRStmt_Dirty(di) );
sewardjb4112022007-11-09 22:49:28 +00004456}
4457
4458
sewardja0eee322009-07-31 08:46:35 +00004459/* Figure out if GA is a guest code address in the dynamic linker, and
4460 if so return True. Otherwise (and in case of any doubt) return
4461 False. (sidedly safe w/ False as the safe value) */
4462static Bool is_in_dynamic_linker_shared_object( Addr64 ga )
4463{
4464 DebugInfo* dinfo;
florian19f91bb2012-11-10 22:29:54 +00004465 const HChar* soname;
sewardja0eee322009-07-31 08:46:35 +00004466 if (0) return False;
4467
sewardje3f1e592009-07-31 09:41:29 +00004468 dinfo = VG_(find_DebugInfo)( (Addr)ga );
sewardja0eee322009-07-31 08:46:35 +00004469 if (!dinfo) return False;
4470
sewardje3f1e592009-07-31 09:41:29 +00004471 soname = VG_(DebugInfo_get_soname)(dinfo);
sewardja0eee322009-07-31 08:46:35 +00004472 tl_assert(soname);
4473 if (0) VG_(printf)("%s\n", soname);
4474
4475# if defined(VGO_linux)
sewardj651cfa42010-01-11 13:02:19 +00004476 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_3)) return True;
sewardja0eee322009-07-31 08:46:35 +00004477 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_2)) return True;
4478 if (VG_STREQ(soname, VG_U_LD_LINUX_X86_64_SO_2)) return True;
4479 if (VG_STREQ(soname, VG_U_LD64_SO_1)) return True;
carll582d5822014-08-07 23:35:54 +00004480 if (VG_STREQ(soname, VG_U_LD64_SO_2)) return True;
sewardja0eee322009-07-31 08:46:35 +00004481 if (VG_STREQ(soname, VG_U_LD_SO_1)) return True;
sewardjdcd90512014-08-30 19:21:48 +00004482 if (VG_STREQ(soname, VG_U_LD_LINUX_AARCH64_SO_1)) return True;
sewardja0eee322009-07-31 08:46:35 +00004483# elif defined(VGO_darwin)
4484 if (VG_STREQ(soname, VG_U_DYLD)) return True;
4485# else
4486# error "Unsupported OS"
4487# endif
4488 return False;
4489}
4490
sewardjb4112022007-11-09 22:49:28 +00004491static
4492IRSB* hg_instrument ( VgCallbackClosure* closure,
4493 IRSB* bbIn,
4494 VexGuestLayout* layout,
4495 VexGuestExtents* vge,
florianca503be2012-10-07 21:59:42 +00004496 VexArchInfo* archinfo_host,
sewardjb4112022007-11-09 22:49:28 +00004497 IRType gWordTy, IRType hWordTy )
4498{
sewardj1c0ce7a2009-07-01 08:10:49 +00004499 Int i;
4500 IRSB* bbOut;
4501 Addr64 cia; /* address of current insn */
4502 IRStmt* st;
sewardja0eee322009-07-31 08:46:35 +00004503 Bool inLDSO = False;
4504 Addr64 inLDSOmask4K = 1; /* mismatches on first check */
sewardjb4112022007-11-09 22:49:28 +00004505
sewardjffce8152011-06-24 10:09:41 +00004506 const Int goff_sp = layout->offset_SP;
4507
sewardjb4112022007-11-09 22:49:28 +00004508 if (gWordTy != hWordTy) {
4509 /* We don't currently support this case. */
4510 VG_(tool_panic)("host/guest word size mismatch");
4511 }
4512
sewardja0eee322009-07-31 08:46:35 +00004513 if (VKI_PAGE_SIZE < 4096 || VG_(log2)(VKI_PAGE_SIZE) == -1) {
4514 VG_(tool_panic)("implausible or too-small VKI_PAGE_SIZE");
4515 }
4516
sewardjb4112022007-11-09 22:49:28 +00004517 /* Set up BB */
4518 bbOut = emptyIRSB();
4519 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
4520 bbOut->next = deepCopyIRExpr(bbIn->next);
4521 bbOut->jumpkind = bbIn->jumpkind;
sewardj291849f2012-04-20 23:58:55 +00004522 bbOut->offsIP = bbIn->offsIP;
sewardjb4112022007-11-09 22:49:28 +00004523
4524 // Copy verbatim any IR preamble preceding the first IMark
4525 i = 0;
4526 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
4527 addStmtToIRSB( bbOut, bbIn->stmts[i] );
4528 i++;
4529 }
4530
sewardj1c0ce7a2009-07-01 08:10:49 +00004531 // Get the first statement, and initial cia from it
4532 tl_assert(bbIn->stmts_used > 0);
4533 tl_assert(i < bbIn->stmts_used);
4534 st = bbIn->stmts[i];
4535 tl_assert(Ist_IMark == st->tag);
4536 cia = st->Ist.IMark.addr;
4537 st = NULL;
4538
sewardjb4112022007-11-09 22:49:28 +00004539 for (/*use current i*/; i < bbIn->stmts_used; i++) {
sewardj1c0ce7a2009-07-01 08:10:49 +00004540 st = bbIn->stmts[i];
sewardjb4112022007-11-09 22:49:28 +00004541 tl_assert(st);
4542 tl_assert(isFlatIRStmt(st));
4543 switch (st->tag) {
4544 case Ist_NoOp:
4545 case Ist_AbiHint:
4546 case Ist_Put:
4547 case Ist_PutI:
sewardjb4112022007-11-09 22:49:28 +00004548 case Ist_Exit:
4549 /* None of these can contain any memory references. */
4550 break;
4551
sewardj1c0ce7a2009-07-01 08:10:49 +00004552 case Ist_IMark:
4553 /* no mem refs, but note the insn address. */
4554 cia = st->Ist.IMark.addr;
sewardja0eee322009-07-31 08:46:35 +00004555 /* Don't instrument the dynamic linker. It generates a
4556 lot of races which we just expensively suppress, so
4557 it's pointless.
4558
4559 Avoid flooding is_in_dynamic_linker_shared_object with
4560 requests by only checking at transitions between 4K
4561 pages. */
4562 if ((cia & ~(Addr64)0xFFF) != inLDSOmask4K) {
4563 if (0) VG_(printf)("NEW %#lx\n", (Addr)cia);
4564 inLDSOmask4K = cia & ~(Addr64)0xFFF;
4565 inLDSO = is_in_dynamic_linker_shared_object(cia);
4566 } else {
4567 if (0) VG_(printf)("old %#lx\n", (Addr)cia);
4568 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004569 break;
4570
sewardjb4112022007-11-09 22:49:28 +00004571 case Ist_MBE:
sewardjf98e1c02008-10-25 16:22:41 +00004572 switch (st->Ist.MBE.event) {
4573 case Imbe_Fence:
4574 break; /* not interesting */
sewardjf98e1c02008-10-25 16:22:41 +00004575 default:
4576 goto unhandled;
4577 }
sewardjb4112022007-11-09 22:49:28 +00004578 break;
4579
sewardj1c0ce7a2009-07-01 08:10:49 +00004580 case Ist_CAS: {
4581 /* Atomic read-modify-write cycle. Just pretend it's a
4582 read. */
4583 IRCAS* cas = st->Ist.CAS.details;
sewardj23f12002009-07-24 08:45:08 +00004584 Bool isDCAS = cas->oldHi != IRTemp_INVALID;
4585 if (isDCAS) {
4586 tl_assert(cas->expdHi);
4587 tl_assert(cas->dataHi);
4588 } else {
4589 tl_assert(!cas->expdHi);
4590 tl_assert(!cas->dataHi);
4591 }
4592 /* Just be boring about it. */
sewardja0eee322009-07-31 08:46:35 +00004593 if (!inLDSO) {
4594 instrument_mem_access(
4595 bbOut,
4596 cas->addr,
4597 (isDCAS ? 2 : 1)
4598 * sizeofIRType(typeOfIRExpr(bbIn->tyenv, cas->dataLo)),
4599 False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004600 sizeofIRType(hWordTy), goff_sp,
4601 NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004602 );
4603 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004604 break;
4605 }
4606
sewardjdb5907d2009-11-26 17:20:21 +00004607 case Ist_LLSC: {
4608 /* We pretend store-conditionals don't exist, viz, ignore
4609 them. Whereas load-linked's are treated the same as
4610 normal loads. */
4611 IRType dataTy;
4612 if (st->Ist.LLSC.storedata == NULL) {
4613 /* LL */
4614 dataTy = typeOfIRTemp(bbIn->tyenv, st->Ist.LLSC.result);
sewardja0eee322009-07-31 08:46:35 +00004615 if (!inLDSO) {
sewardjdb5907d2009-11-26 17:20:21 +00004616 instrument_mem_access(
4617 bbOut,
4618 st->Ist.LLSC.addr,
4619 sizeofIRType(dataTy),
4620 False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004621 sizeofIRType(hWordTy), goff_sp,
4622 NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004623 );
4624 }
sewardjdb5907d2009-11-26 17:20:21 +00004625 } else {
4626 /* SC */
4627 /*ignore */
4628 }
4629 break;
4630 }
4631
4632 case Ist_Store:
sewardjdb5907d2009-11-26 17:20:21 +00004633 if (!inLDSO) {
4634 instrument_mem_access(
4635 bbOut,
4636 st->Ist.Store.addr,
4637 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
4638 True/*isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004639 sizeofIRType(hWordTy), goff_sp,
4640 NULL/*no-guard*/
sewardjdb5907d2009-11-26 17:20:21 +00004641 );
sewardj1c0ce7a2009-07-01 08:10:49 +00004642 }
njnb83caf22009-05-25 01:47:56 +00004643 break;
sewardjb4112022007-11-09 22:49:28 +00004644
sewardjcafe5052013-01-17 14:24:35 +00004645 case Ist_StoreG: {
4646 IRStoreG* sg = st->Ist.StoreG.details;
4647 IRExpr* data = sg->data;
4648 IRExpr* addr = sg->addr;
4649 IRType type = typeOfIRExpr(bbIn->tyenv, data);
4650 tl_assert(type != Ity_INVALID);
4651 instrument_mem_access( bbOut, addr, sizeofIRType(type),
4652 True/*isStore*/,
4653 sizeofIRType(hWordTy),
4654 goff_sp, sg->guard );
4655 break;
4656 }
4657
4658 case Ist_LoadG: {
4659 IRLoadG* lg = st->Ist.LoadG.details;
4660 IRType type = Ity_INVALID; /* loaded type */
4661 IRType typeWide = Ity_INVALID; /* after implicit widening */
4662 IRExpr* addr = lg->addr;
4663 typeOfIRLoadGOp(lg->cvt, &typeWide, &type);
4664 tl_assert(type != Ity_INVALID);
4665 instrument_mem_access( bbOut, addr, sizeofIRType(type),
4666 False/*!isStore*/,
4667 sizeofIRType(hWordTy),
4668 goff_sp, lg->guard );
4669 break;
4670 }
4671
sewardjb4112022007-11-09 22:49:28 +00004672 case Ist_WrTmp: {
4673 IRExpr* data = st->Ist.WrTmp.data;
4674 if (data->tag == Iex_Load) {
sewardja0eee322009-07-31 08:46:35 +00004675 if (!inLDSO) {
4676 instrument_mem_access(
4677 bbOut,
4678 data->Iex.Load.addr,
4679 sizeofIRType(data->Iex.Load.ty),
4680 False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004681 sizeofIRType(hWordTy), goff_sp,
4682 NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004683 );
4684 }
sewardjb4112022007-11-09 22:49:28 +00004685 }
4686 break;
4687 }
4688
4689 case Ist_Dirty: {
4690 Int dataSize;
4691 IRDirty* d = st->Ist.Dirty.details;
4692 if (d->mFx != Ifx_None) {
4693 /* This dirty helper accesses memory. Collect the
4694 details. */
4695 tl_assert(d->mAddr != NULL);
4696 tl_assert(d->mSize != 0);
4697 dataSize = d->mSize;
4698 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004699 if (!inLDSO) {
4700 instrument_mem_access(
4701 bbOut, d->mAddr, dataSize, False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004702 sizeofIRType(hWordTy), goff_sp, NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004703 );
4704 }
sewardjb4112022007-11-09 22:49:28 +00004705 }
4706 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004707 if (!inLDSO) {
4708 instrument_mem_access(
4709 bbOut, d->mAddr, dataSize, True/*isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004710 sizeofIRType(hWordTy), goff_sp, NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004711 );
4712 }
sewardjb4112022007-11-09 22:49:28 +00004713 }
4714 } else {
4715 tl_assert(d->mAddr == NULL);
4716 tl_assert(d->mSize == 0);
4717 }
4718 break;
4719 }
4720
4721 default:
sewardjf98e1c02008-10-25 16:22:41 +00004722 unhandled:
4723 ppIRStmt(st);
sewardjb4112022007-11-09 22:49:28 +00004724 tl_assert(0);
4725
4726 } /* switch (st->tag) */
4727
4728 addStmtToIRSB( bbOut, st );
4729 } /* iterate over bbIn->stmts */
4730
4731 return bbOut;
4732}
4733
sewardjffce8152011-06-24 10:09:41 +00004734#undef binop
4735#undef mkexpr
4736#undef mkU32
4737#undef mkU64
4738#undef assign
4739
sewardjb4112022007-11-09 22:49:28 +00004740
4741/*----------------------------------------------------------------*/
4742/*--- Client requests ---*/
4743/*----------------------------------------------------------------*/
4744
4745/* Sheesh. Yet another goddam finite map. */
4746static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
4747
4748static void map_pthread_t_to_Thread_INIT ( void ) {
4749 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
sewardjf98e1c02008-10-25 16:22:41 +00004750 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
4751 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00004752 tl_assert(map_pthread_t_to_Thread != NULL);
4753 }
4754}
4755
philipped40aff52014-06-16 20:00:14 +00004756/* A list of Ada dependent tasks and their masters. Used for implementing
4757 the Ada task termination semantic as implemented by the
4758 gcc gnat Ada runtime. */
4759typedef
4760 struct {
4761 void* dependent; // Ada Task Control Block of the Dependent
4762 void* master; // ATCB of the master
4763 Word master_level; // level of dependency between master and dependent
4764 Thread* hg_dependent; // helgrind Thread* for dependent task.
4765 }
4766 GNAT_dmml;
4767static XArray* gnat_dmmls; /* of GNAT_dmml */
4768static void gnat_dmmls_INIT (void)
4769{
4770 if (UNLIKELY(gnat_dmmls == NULL)) {
4771 gnat_dmmls = VG_(newXA) (HG_(zalloc), "hg.gnat_md.1",
4772 HG_(free),
4773 sizeof(GNAT_dmml) );
4774 }
4775}
philippef5774342014-05-03 11:12:50 +00004776static void print_monitor_help ( void )
4777{
4778 VG_(gdb_printf)
4779 (
4780"\n"
4781"helgrind monitor commands:\n"
philippef5774342014-05-03 11:12:50 +00004782" info locks : show list of locks and their status\n"
4783"\n");
4784}
4785
4786/* return True if request recognised, False otherwise */
4787static Bool handle_gdb_monitor_command (ThreadId tid, HChar *req)
4788{
philippef5774342014-05-03 11:12:50 +00004789 HChar* wcmd;
4790 HChar s[VG_(strlen(req))]; /* copy for strtok_r */
4791 HChar *ssaveptr;
4792 Int kwdid;
4793
4794 VG_(strcpy) (s, req);
4795
4796 wcmd = VG_(strtok_r) (s, " ", &ssaveptr);
4797 /* NB: if possible, avoid introducing a new command below which
4798 starts with the same first letter(s) as an already existing
4799 command. This ensures a shorter abbreviation for the user. */
4800 switch (VG_(keyword_id)
philippe07c08522014-05-14 20:39:27 +00004801 ("help info",
philippef5774342014-05-03 11:12:50 +00004802 wcmd, kwd_report_duplicated_matches)) {
4803 case -2: /* multiple matches */
4804 return True;
4805 case -1: /* not found */
4806 return False;
4807 case 0: /* help */
4808 print_monitor_help();
4809 return True;
4810 case 1: /* info */
philippef5774342014-05-03 11:12:50 +00004811 wcmd = VG_(strtok_r) (NULL, " ", &ssaveptr);
4812 switch (kwdid = VG_(keyword_id)
4813 ("locks",
4814 wcmd, kwd_report_all)) {
4815 case -2:
4816 case -1:
4817 break;
4818 case 0: // locks
4819 {
4820 Int i;
4821 Lock* lk;
4822 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next) {
4823 pp_Lock(0, lk,
4824 True /* show_lock_addrdescr */,
4825 False /* show_internal_data */);
4826 }
4827 if (i == 0)
4828 VG_(gdb_printf) ("no locks\n");
4829 }
4830 break;
4831 default:
4832 tl_assert(0);
4833 }
4834 return True;
philippef5774342014-05-03 11:12:50 +00004835 default:
4836 tl_assert(0);
4837 return False;
4838 }
4839}
sewardjb4112022007-11-09 22:49:28 +00004840
4841static
4842Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
4843{
philippef5774342014-05-03 11:12:50 +00004844 if (!VG_IS_TOOL_USERREQ('H','G',args[0])
4845 && VG_USERREQ__GDB_MONITOR_COMMAND != args[0])
sewardjb4112022007-11-09 22:49:28 +00004846 return False;
4847
4848 /* Anything that gets past the above check is one of ours, so we
4849 should be able to handle it. */
4850
4851 /* default, meaningless return value, unless otherwise set */
4852 *ret = 0;
4853
4854 switch (args[0]) {
4855
4856 /* --- --- User-visible client requests --- --- */
4857
4858 case VG_USERREQ__HG_CLEAN_MEMORY:
barta0b6b2c2008-07-07 06:49:24 +00004859 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%ld)\n",
sewardjb4112022007-11-09 22:49:28 +00004860 args[1], args[2]);
4861 /* Call die_mem to (expensively) tidy up properly, if there
sewardjf98e1c02008-10-25 16:22:41 +00004862 are any held locks etc in the area. Calling evh__die_mem
4863 and then evh__new_mem is a bit inefficient; probably just
4864 the latter would do. */
sewardjb4112022007-11-09 22:49:28 +00004865 if (args[2] > 0) { /* length */
4866 evh__die_mem(args[1], args[2]);
4867 /* and then set it to New */
4868 evh__new_mem(args[1], args[2]);
4869 }
4870 break;
4871
sewardjc8028ad2010-05-05 09:34:42 +00004872 case _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK: {
4873 Addr payload = 0;
4874 SizeT pszB = 0;
4875 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK(%#lx)\n",
4876 args[1]);
philippe0c9ac8d2014-07-18 00:03:58 +00004877 if (HG_(mm_find_containing_block)(NULL, NULL,
4878 &payload, &pszB, args[1])) {
sewardjc8028ad2010-05-05 09:34:42 +00004879 if (pszB > 0) {
4880 evh__die_mem(payload, pszB);
4881 evh__new_mem(payload, pszB);
4882 }
4883 *ret = pszB;
4884 } else {
4885 *ret = (UWord)-1;
4886 }
4887 break;
4888 }
4889
sewardj406bac82010-03-03 23:03:40 +00004890 case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED:
4891 if (0) VG_(printf)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%ld)\n",
4892 args[1], args[2]);
4893 if (args[2] > 0) { /* length */
4894 evh__untrack_mem(args[1], args[2]);
4895 }
4896 break;
4897
4898 case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED:
4899 if (0) VG_(printf)("HG_ARANGE_MAKE_TRACKED(%#lx,%ld)\n",
4900 args[1], args[2]);
4901 if (args[2] > 0) { /* length */
4902 evh__new_mem(args[1], args[2]);
4903 }
4904 break;
4905
sewardjb4112022007-11-09 22:49:28 +00004906 /* --- --- Client requests for Helgrind's use only --- --- */
4907
4908 /* Some thread is telling us its pthread_t value. Record the
4909 binding between that and the associated Thread*, so we can
4910 later find the Thread* again when notified of a join by the
4911 thread. */
4912 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
4913 Thread* my_thr = NULL;
4914 if (0)
4915 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
4916 (void*)args[1]);
4917 map_pthread_t_to_Thread_INIT();
4918 my_thr = map_threads_maybe_lookup( tid );
4919 /* This assertion should hold because the map_threads (tid to
4920 Thread*) binding should have been made at the point of
4921 low-level creation of this thread, which should have
4922 happened prior to us getting this client request for it.
4923 That's because this client request is sent from
4924 client-world from the 'thread_wrapper' function, which
4925 only runs once the thread has been low-level created. */
4926 tl_assert(my_thr != NULL);
4927 /* So now we know that (pthread_t)args[1] is associated with
4928 (Thread*)my_thr. Note that down. */
4929 if (0)
4930 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
4931 (void*)args[1], (void*)my_thr );
florian6bf37262012-10-21 03:23:36 +00004932 VG_(addToFM)( map_pthread_t_to_Thread, (UWord)args[1], (UWord)my_thr );
sewardjb4112022007-11-09 22:49:28 +00004933 break;
4934 }
4935
4936 case _VG_USERREQ__HG_PTH_API_ERROR: {
4937 Thread* my_thr = NULL;
4938 map_pthread_t_to_Thread_INIT();
4939 my_thr = map_threads_maybe_lookup( tid );
4940 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
sewardjf98e1c02008-10-25 16:22:41 +00004941 HG_(record_error_PthAPIerror)(
florian6bf37262012-10-21 03:23:36 +00004942 my_thr, (HChar*)args[1], (UWord)args[2], (HChar*)args[3] );
sewardjb4112022007-11-09 22:49:28 +00004943 break;
4944 }
4945
4946 /* This thread (tid) has completed a join with the quitting
4947 thread whose pthread_t is in args[1]. */
4948 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
4949 Thread* thr_q = NULL; /* quitter Thread* */
4950 Bool found = False;
4951 if (0)
4952 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
4953 (void*)args[1]);
4954 map_pthread_t_to_Thread_INIT();
sewardj896f6f92008-08-19 08:38:52 +00004955 found = VG_(lookupFM)( map_pthread_t_to_Thread,
florian6bf37262012-10-21 03:23:36 +00004956 NULL, (UWord*)&thr_q, (UWord)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004957 /* Can this fail? It would mean that our pthread_join
4958 wrapper observed a successful join on args[1] yet that
4959 thread never existed (or at least, it never lodged an
4960 entry in the mapping (via SET_MY_PTHREAD_T)). Which
4961 sounds like a bug in the threads library. */
4962 // FIXME: get rid of this assertion; handle properly
4963 tl_assert(found);
4964 if (found) {
4965 if (0)
4966 VG_(printf)(".................... quitter Thread* = %p\n",
4967 thr_q);
4968 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
4969 }
4970 break;
4971 }
4972
philipped40aff52014-06-16 20:00:14 +00004973 /* This thread (tid) is informing us of its master. */
4974 case _VG_USERREQ__HG_GNAT_MASTER_HOOK: {
4975 GNAT_dmml dmml;
4976 dmml.dependent = (void*)args[1];
4977 dmml.master = (void*)args[2];
4978 dmml.master_level = (Word)args[3];
4979 dmml.hg_dependent = map_threads_maybe_lookup( tid );
4980 tl_assert(dmml.hg_dependent);
4981
4982 if (0)
4983 VG_(printf)("HG_GNAT_MASTER_HOOK (tid %d): "
4984 "dependent = %p master = %p master_level = %ld"
4985 " dependent Thread* = %p\n",
4986 (Int)tid, dmml.dependent, dmml.master, dmml.master_level,
4987 dmml.hg_dependent);
4988 gnat_dmmls_INIT();
4989 VG_(addToXA) (gnat_dmmls, &dmml);
4990 break;
4991 }
4992
4993 /* This thread (tid) is informing us that it has completed a
4994 master. */
4995 case _VG_USERREQ__HG_GNAT_MASTER_COMPLETED_HOOK: {
4996 Word n;
4997 const Thread *stayer = map_threads_maybe_lookup( tid );
4998 const void *master = (void*)args[1];
4999 const Word master_level = (Word) args[2];
5000 tl_assert(stayer);
5001
5002 if (0)
5003 VG_(printf)("HG_GNAT_MASTER_COMPLETED_HOOK (tid %d): "
5004 "self_id = %p master_level = %ld Thread* = %p\n",
5005 (Int)tid, master, master_level, stayer);
5006
5007 gnat_dmmls_INIT();
5008 /* Reverse loop on the array, simulating a pthread_join for
5009 the Dependent tasks of the completed master, and removing
5010 them from the array. */
5011 for (n = VG_(sizeXA) (gnat_dmmls) - 1; n >= 0; n--) {
5012 GNAT_dmml *dmml = (GNAT_dmml*) VG_(indexXA)(gnat_dmmls, n);
5013 if (dmml->master == master
5014 && dmml->master_level == master_level) {
5015 if (0)
5016 VG_(printf)("quitter %p dependency to stayer %p\n",
5017 dmml->hg_dependent->hbthr, stayer->hbthr);
5018 tl_assert(dmml->hg_dependent->hbthr != stayer->hbthr);
5019 generate_quitter_stayer_dependence (dmml->hg_dependent->hbthr,
5020 stayer->hbthr);
5021 VG_(removeIndexXA) (gnat_dmmls, n);
5022 }
5023 }
5024 break;
5025 }
5026
sewardjb4112022007-11-09 22:49:28 +00005027 /* EXPOSITION only: by intercepting lock init events we can show
5028 the user where the lock was initialised, rather than only
5029 being able to show where it was first locked. Intercepting
5030 lock initialisations is not necessary for the basic operation
5031 of the race checker. */
5032 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
5033 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
5034 break;
5035
sewardjc02f6c42013-10-14 13:51:25 +00005036 /* mutex=arg[1], mutex_is_init=arg[2] */
sewardjb4112022007-11-09 22:49:28 +00005037 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
sewardjc02f6c42013-10-14 13:51:25 +00005038 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1], args[2] != 0 );
sewardjb4112022007-11-09 22:49:28 +00005039 break;
5040
5041 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
5042 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
5043 break;
5044
5045 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
5046 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
5047 break;
5048
5049 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*, Word
5050 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
5051 break;
5052
5053 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*
5054 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
5055 break;
5056
5057 /* This thread is about to do pthread_cond_signal on the
5058 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
5059 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
5060 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
5061 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
5062 break;
5063
5064 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
5065 Returns a flag indicating whether or not the mutex is believed to be
5066 valid for this operation. */
5067 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
5068 Bool mutex_is_valid
5069 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
5070 (void*)args[2] );
5071 *ret = mutex_is_valid ? 1 : 0;
5072 break;
5073 }
5074
philippe19dfe032013-03-24 20:10:23 +00005075 /* Thread successfully completed pthread_cond_init:
5076 cond=arg[1], cond_attr=arg[2] */
5077 case _VG_USERREQ__HG_PTHREAD_COND_INIT_POST:
5078 evh__HG_PTHREAD_COND_INIT_POST( tid,
5079 (void*)args[1], (void*)args[2] );
5080 break;
5081
sewardjc02f6c42013-10-14 13:51:25 +00005082 /* cond=arg[1], cond_is_init=arg[2] */
sewardjf98e1c02008-10-25 16:22:41 +00005083 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
sewardjc02f6c42013-10-14 13:51:25 +00005084 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1], args[2] != 0 );
sewardjf98e1c02008-10-25 16:22:41 +00005085 break;
5086
sewardjb4112022007-11-09 22:49:28 +00005087 /* Thread successfully completed pthread_cond_wait, cond=arg[1],
5088 mutex=arg[2] */
5089 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
5090 evh__HG_PTHREAD_COND_WAIT_POST( tid,
sewardjff427c92013-10-14 12:13:52 +00005091 (void*)args[1], (void*)args[2],
5092 (Bool)args[3] );
sewardjb4112022007-11-09 22:49:28 +00005093 break;
5094
5095 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
5096 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
5097 break;
5098
5099 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
5100 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
5101 break;
5102
sewardj789c3c52008-02-25 12:10:07 +00005103 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
sewardjb4112022007-11-09 22:49:28 +00005104 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
sewardj789c3c52008-02-25 12:10:07 +00005105 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
5106 args[2], args[3] );
sewardjb4112022007-11-09 22:49:28 +00005107 break;
5108
5109 /* rwlock=arg[1], isW=arg[2] */
5110 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
5111 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
5112 break;
5113
5114 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
5115 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
5116 break;
5117
5118 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
5119 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
5120 break;
5121
sewardj11e352f2007-11-30 11:11:02 +00005122 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
5123 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
sewardjb4112022007-11-09 22:49:28 +00005124 break;
5125
sewardj11e352f2007-11-30 11:11:02 +00005126 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
5127 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00005128 break;
5129
sewardj11e352f2007-11-30 11:11:02 +00005130 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
5131 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
5132 break;
5133
5134 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t* */
5135 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00005136 break;
5137
sewardj9f569b72008-11-13 13:33:09 +00005138 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
sewardj406bac82010-03-03 23:03:40 +00005139 /* pth_bar_t*, ulong count, ulong resizable */
5140 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1],
5141 args[2], args[3] );
5142 break;
5143
5144 case _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE:
5145 /* pth_bar_t*, ulong newcount */
5146 evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( tid, (void*)args[1],
5147 args[2] );
sewardj9f569b72008-11-13 13:33:09 +00005148 break;
5149
5150 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
5151 /* pth_bar_t* */
5152 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
5153 break;
5154
5155 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
5156 /* pth_bar_t* */
5157 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
5158 break;
sewardjb4112022007-11-09 22:49:28 +00005159
sewardj5a644da2009-08-11 10:35:58 +00005160 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE:
5161 /* pth_spinlock_t* */
5162 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid, (void*)args[1] );
5163 break;
5164
5165 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST:
5166 /* pth_spinlock_t* */
5167 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid, (void*)args[1] );
5168 break;
5169
5170 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE:
5171 /* pth_spinlock_t*, Word */
5172 evh__HG_PTHREAD_SPIN_LOCK_PRE( tid, (void*)args[1], args[2] );
5173 break;
5174
5175 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST:
5176 /* pth_spinlock_t* */
5177 evh__HG_PTHREAD_SPIN_LOCK_POST( tid, (void*)args[1] );
5178 break;
5179
5180 case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE:
5181 /* pth_spinlock_t* */
5182 evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid, (void*)args[1] );
5183 break;
5184
sewardjed2e72e2009-08-14 11:08:24 +00005185 case _VG_USERREQ__HG_CLIENTREQ_UNIMP: {
florian19f91bb2012-11-10 22:29:54 +00005186 /* HChar* who */
sewardjed2e72e2009-08-14 11:08:24 +00005187 HChar* who = (HChar*)args[1];
5188 HChar buf[50 + 50];
5189 Thread* thr = map_threads_maybe_lookup( tid );
5190 tl_assert( thr ); /* I must be mapped */
5191 tl_assert( who );
5192 tl_assert( VG_(strlen)(who) <= 50 );
5193 VG_(sprintf)(buf, "Unimplemented client request macro \"%s\"", who );
5194 /* record_error_Misc strdup's buf, so this is safe: */
5195 HG_(record_error_Misc)( thr, buf );
5196 break;
5197 }
5198
5199 case _VG_USERREQ__HG_USERSO_SEND_PRE:
5200 /* UWord arbitrary-SO-tag */
5201 evh__HG_USERSO_SEND_PRE( tid, args[1] );
5202 break;
5203
5204 case _VG_USERREQ__HG_USERSO_RECV_POST:
5205 /* UWord arbitrary-SO-tag */
5206 evh__HG_USERSO_RECV_POST( tid, args[1] );
5207 break;
5208
sewardj6015d0e2011-03-11 19:10:48 +00005209 case _VG_USERREQ__HG_USERSO_FORGET_ALL:
5210 /* UWord arbitrary-SO-tag */
5211 evh__HG_USERSO_FORGET_ALL( tid, args[1] );
5212 break;
5213
philippef5774342014-05-03 11:12:50 +00005214 case VG_USERREQ__GDB_MONITOR_COMMAND: {
5215 Bool handled = handle_gdb_monitor_command (tid, (HChar*)args[1]);
5216 if (handled)
5217 *ret = 1;
5218 else
5219 *ret = 0;
5220 return handled;
5221 }
5222
sewardjb4112022007-11-09 22:49:28 +00005223 default:
5224 /* Unhandled Helgrind client request! */
sewardjf98e1c02008-10-25 16:22:41 +00005225 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
5226 args[0]);
sewardjb4112022007-11-09 22:49:28 +00005227 }
5228
5229 return True;
5230}
5231
5232
5233/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +00005234/*--- Setup ---*/
5235/*----------------------------------------------------------------*/
5236
florian19f91bb2012-11-10 22:29:54 +00005237static Bool hg_process_cmd_line_option ( const HChar* arg )
sewardjb4112022007-11-09 22:49:28 +00005238{
florian19f91bb2012-11-10 22:29:54 +00005239 const HChar* tmp_str;
sewardjb4112022007-11-09 22:49:28 +00005240
njn83df0b62009-02-25 01:01:05 +00005241 if VG_BOOL_CLO(arg, "--track-lockorders",
5242 HG_(clo_track_lockorders)) {}
5243 else if VG_BOOL_CLO(arg, "--cmp-race-err-addrs",
5244 HG_(clo_cmp_race_err_addrs)) {}
sewardj23f12002009-07-24 08:45:08 +00005245
5246 else if VG_XACT_CLO(arg, "--history-level=none",
5247 HG_(clo_history_level), 0);
sewardjf3861392009-08-02 10:16:03 +00005248 else if VG_XACT_CLO(arg, "--history-level=approx",
sewardj23f12002009-07-24 08:45:08 +00005249 HG_(clo_history_level), 1);
5250 else if VG_XACT_CLO(arg, "--history-level=full",
5251 HG_(clo_history_level), 2);
sewardj849b0ed2008-12-21 10:43:10 +00005252
sewardjf585e482009-08-16 22:52:29 +00005253 /* If you change the 10k/30mill limits, remember to also change
sewardj849b0ed2008-12-21 10:43:10 +00005254 them in assertions at the top of event_map_maybe_GC. */
njn83df0b62009-02-25 01:01:05 +00005255 else if VG_BINT_CLO(arg, "--conflict-cache-size",
sewardjf585e482009-08-16 22:52:29 +00005256 HG_(clo_conflict_cache_size), 10*1000, 30*1000*1000) {}
sewardjb4112022007-11-09 22:49:28 +00005257
sewardj11e352f2007-11-30 11:11:02 +00005258 /* "stuvwx" --> stuvwx (binary) */
njn83df0b62009-02-25 01:01:05 +00005259 else if VG_STR_CLO(arg, "--hg-sanity-flags", tmp_str) {
sewardjb4112022007-11-09 22:49:28 +00005260 Int j;
sewardjb4112022007-11-09 22:49:28 +00005261
njn83df0b62009-02-25 01:01:05 +00005262 if (6 != VG_(strlen)(tmp_str)) {
sewardjb4112022007-11-09 22:49:28 +00005263 VG_(message)(Vg_UserMsg,
sewardj24118492009-07-15 14:50:02 +00005264 "--hg-sanity-flags argument must have 6 digits\n");
sewardjb4112022007-11-09 22:49:28 +00005265 return False;
5266 }
sewardj11e352f2007-11-30 11:11:02 +00005267 for (j = 0; j < 6; j++) {
njn83df0b62009-02-25 01:01:05 +00005268 if ('0' == tmp_str[j]) { /* do nothing */ }
5269 else if ('1' == tmp_str[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
sewardjb4112022007-11-09 22:49:28 +00005270 else {
sewardj11e352f2007-11-30 11:11:02 +00005271 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
sewardj24118492009-07-15 14:50:02 +00005272 "only contain 0s and 1s\n");
sewardjb4112022007-11-09 22:49:28 +00005273 return False;
5274 }
5275 }
sewardjf98e1c02008-10-25 16:22:41 +00005276 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
sewardjb4112022007-11-09 22:49:28 +00005277 }
5278
sewardj622fe492011-03-11 21:06:59 +00005279 else if VG_BOOL_CLO(arg, "--free-is-write",
5280 HG_(clo_free_is_write)) {}
sewardjffce8152011-06-24 10:09:41 +00005281
5282 else if VG_XACT_CLO(arg, "--vts-pruning=never",
5283 HG_(clo_vts_pruning), 0);
5284 else if VG_XACT_CLO(arg, "--vts-pruning=auto",
5285 HG_(clo_vts_pruning), 1);
5286 else if VG_XACT_CLO(arg, "--vts-pruning=always",
5287 HG_(clo_vts_pruning), 2);
5288
5289 else if VG_BOOL_CLO(arg, "--check-stack-refs",
5290 HG_(clo_check_stack_refs)) {}
5291
sewardjb4112022007-11-09 22:49:28 +00005292 else
5293 return VG_(replacement_malloc_process_cmd_line_option)(arg);
5294
5295 return True;
5296}
5297
5298static void hg_print_usage ( void )
5299{
5300 VG_(printf)(
sewardj622fe492011-03-11 21:06:59 +00005301" --free-is-write=no|yes treat heap frees as writes [no]\n"
sewardj849b0ed2008-12-21 10:43:10 +00005302" --track-lockorders=no|yes show lock ordering errors? [yes]\n"
njnf6e8ca92009-08-07 02:18:00 +00005303" --history-level=none|approx|full [full]\n"
sewardjf3861392009-08-02 10:16:03 +00005304" full: show both stack traces for a data race (can be very slow)\n"
5305" approx: full trace for one thread, approx for the other (faster)\n"
5306" none: only show trace for one thread in a race (fastest)\n"
sewardj23f12002009-07-24 08:45:08 +00005307" --conflict-cache-size=N size of 'full' history cache [1000000]\n"
sewardjffce8152011-06-24 10:09:41 +00005308" --check-stack-refs=no|yes race-check reads and writes on the\n"
5309" main stack and thread stacks? [yes]\n"
sewardjb4112022007-11-09 22:49:28 +00005310 );
sewardjb4112022007-11-09 22:49:28 +00005311}
5312
5313static void hg_print_debug_usage ( void )
5314{
sewardjb4112022007-11-09 22:49:28 +00005315 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
5316 "race errors significant? [no]\n");
sewardj849b0ed2008-12-21 10:43:10 +00005317 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
sewardj11e352f2007-11-30 11:11:02 +00005318 " at events (X = 0|1) [000000]\n");
5319 VG_(printf)(" --hg-sanity-flags values:\n");
sewardj11e352f2007-11-30 11:11:02 +00005320 VG_(printf)(" 010000 after changes to "
sewardjb4112022007-11-09 22:49:28 +00005321 "lock-order-acquisition-graph\n");
sewardj11e352f2007-11-30 11:11:02 +00005322 VG_(printf)(" 001000 at memory accesses (NB: not currently used)\n");
5323 VG_(printf)(" 000100 at mem permission setting for "
sewardjb4112022007-11-09 22:49:28 +00005324 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
sewardj11e352f2007-11-30 11:11:02 +00005325 VG_(printf)(" 000010 at lock/unlock events\n");
5326 VG_(printf)(" 000001 at thread create/join events\n");
sewardjffce8152011-06-24 10:09:41 +00005327 VG_(printf)(
5328" --vts-pruning=never|auto|always [auto]\n"
5329" never: is never done (may cause big space leaks in Helgrind)\n"
5330" auto: done just often enough to keep space usage under control\n"
5331" always: done after every VTS GC (mostly just a big time waster)\n"
5332 );
sewardjb4112022007-11-09 22:49:28 +00005333}
5334
philippe8587b542013-12-15 20:24:43 +00005335static void hg_print_stats (void)
5336{
5337
5338 if (1) {
5339 VG_(printf)("\n");
5340 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
5341 if (HG_(clo_track_lockorders)) {
5342 VG_(printf)("\n");
5343 HG_(ppWSUstats)( univ_laog, "univ_laog" );
5344 }
5345 }
5346
5347 //zz VG_(printf)("\n");
5348 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
5349 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
5350 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
5351 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
5352 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
5353 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
5354 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
5355 //zz stats__hbefore_stk_hwm);
5356 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
5357 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
5358
5359 VG_(printf)("\n");
5360 VG_(printf)(" locksets: %'8d unique lock sets\n",
5361 (Int)HG_(cardinalityWSU)( univ_lsets ));
5362 if (HG_(clo_track_lockorders)) {
5363 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
5364 (Int)HG_(cardinalityWSU)( univ_laog ));
5365 }
5366
5367 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
5368 // stats__ga_LL_adds,
5369 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
5370
5371 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
5372 HG_(stats__LockN_to_P_queries),
5373 HG_(stats__LockN_to_P_get_map_size)() );
5374
5375 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
5376 HG_(stats__string_table_queries),
5377 HG_(stats__string_table_get_map_size)() );
5378 if (HG_(clo_track_lockorders)) {
5379 VG_(printf)(" LAOG: %'8d map size\n",
5380 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
5381 VG_(printf)(" LAOG exposition: %'8d map size\n",
5382 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
5383 }
5384
5385 VG_(printf)(" locks: %'8lu acquires, "
5386 "%'lu releases\n",
5387 stats__lockN_acquires,
5388 stats__lockN_releases
5389 );
5390 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
5391
5392 VG_(printf)("\n");
5393 libhb_shutdown(True); // This in fact only print stats.
5394}
5395
sewardjb4112022007-11-09 22:49:28 +00005396static void hg_fini ( Int exitcode )
5397{
sewardj2d9e8742009-08-07 15:46:56 +00005398 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
5399 VG_(message)(Vg_UserMsg,
5400 "For counts of detected and suppressed errors, "
5401 "rerun with: -v\n");
5402 }
5403
5404 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)
5405 && HG_(clo_history_level) >= 2) {
5406 VG_(umsg)(
5407 "Use --history-level=approx or =none to gain increased speed, at\n" );
5408 VG_(umsg)(
5409 "the cost of reduced accuracy of conflicting-access information\n");
5410 }
5411
sewardjb4112022007-11-09 22:49:28 +00005412 if (SHOW_DATA_STRUCTURES)
5413 pp_everything( PP_ALL, "SK_(fini)" );
sewardjf98e1c02008-10-25 16:22:41 +00005414 if (HG_(clo_sanity_flags))
sewardjb4112022007-11-09 22:49:28 +00005415 all__sanity_check("SK_(fini)");
5416
philippe8587b542013-12-15 20:24:43 +00005417 if (VG_(clo_stats))
5418 hg_print_stats();
sewardjb4112022007-11-09 22:49:28 +00005419}
5420
sewardjf98e1c02008-10-25 16:22:41 +00005421/* FIXME: move these somewhere sane */
5422
5423static
5424void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
5425{
5426 Thread* thr;
5427 ThreadId tid;
5428 UWord nActual;
5429 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00005430 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00005431 tl_assert(thr);
5432 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
5433 nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
5434 NULL, NULL, 0 );
5435 tl_assert(nActual <= nRequest);
5436 for (; nActual < nRequest; nActual++)
5437 frames[nActual] = 0;
5438}
5439
5440static
sewardj23f12002009-07-24 08:45:08 +00005441ExeContext* for_libhb__get_EC ( Thr* hbt )
sewardjf98e1c02008-10-25 16:22:41 +00005442{
5443 Thread* thr;
5444 ThreadId tid;
5445 ExeContext* ec;
5446 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00005447 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00005448 tl_assert(thr);
5449 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
sewardj23f12002009-07-24 08:45:08 +00005450 /* this will assert if tid is invalid */
sewardjf98e1c02008-10-25 16:22:41 +00005451 ec = VG_(record_ExeContext)( tid, 0 );
sewardjd52392d2008-11-08 20:36:26 +00005452 return ec;
sewardjf98e1c02008-10-25 16:22:41 +00005453}
5454
5455
sewardjc1fb9d22011-02-28 09:03:44 +00005456static void hg_post_clo_init ( void )
sewardjb4112022007-11-09 22:49:28 +00005457{
sewardjf98e1c02008-10-25 16:22:41 +00005458 Thr* hbthr_root;
njnf76d27a2009-05-28 01:53:07 +00005459
sewardjc1fb9d22011-02-28 09:03:44 +00005460 /////////////////////////////////////////////
5461 hbthr_root = libhb_init( for_libhb__get_stacktrace,
5462 for_libhb__get_EC );
5463 /////////////////////////////////////////////
5464
5465
5466 if (HG_(clo_track_lockorders))
5467 laog__init();
5468
5469 initialise_data_structures(hbthr_root);
5470}
5471
philippe07c08522014-05-14 20:39:27 +00005472static void hg_info_location (Addr a)
5473{
5474 (void) HG_(get_and_pp_addrdescr) (a);
5475}
5476
sewardjc1fb9d22011-02-28 09:03:44 +00005477static void hg_pre_clo_init ( void )
5478{
sewardjb4112022007-11-09 22:49:28 +00005479 VG_(details_name) ("Helgrind");
5480 VG_(details_version) (NULL);
5481 VG_(details_description) ("a thread error detector");
5482 VG_(details_copyright_author)(
sewardj0f157dd2013-10-18 14:27:36 +00005483 "Copyright (C) 2007-2013, and GNU GPL'd, by OpenWorks LLP et al.");
sewardjb4112022007-11-09 22:49:28 +00005484 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj9c08c0f2011-03-10 15:01:14 +00005485 VG_(details_avg_translation_sizeB) ( 320 );
sewardjb4112022007-11-09 22:49:28 +00005486
5487 VG_(basic_tool_funcs) (hg_post_clo_init,
5488 hg_instrument,
5489 hg_fini);
5490
5491 VG_(needs_core_errors) ();
sewardjf98e1c02008-10-25 16:22:41 +00005492 VG_(needs_tool_errors) (HG_(eq_Error),
sewardj24118492009-07-15 14:50:02 +00005493 HG_(before_pp_Error),
sewardjf98e1c02008-10-25 16:22:41 +00005494 HG_(pp_Error),
sewardjb4112022007-11-09 22:49:28 +00005495 False,/*show TIDs for errors*/
sewardjf98e1c02008-10-25 16:22:41 +00005496 HG_(update_extra),
5497 HG_(recognised_suppression),
5498 HG_(read_extra_suppression_info),
5499 HG_(error_matches_suppression),
5500 HG_(get_error_name),
philippe4e32d672013-10-17 22:10:41 +00005501 HG_(get_extra_suppression_info),
5502 HG_(print_extra_suppression_use),
5503 HG_(update_extra_suppression_use));
sewardjb4112022007-11-09 22:49:28 +00005504
sewardj24118492009-07-15 14:50:02 +00005505 VG_(needs_xml_output) ();
5506
sewardjb4112022007-11-09 22:49:28 +00005507 VG_(needs_command_line_options)(hg_process_cmd_line_option,
5508 hg_print_usage,
5509 hg_print_debug_usage);
5510 VG_(needs_client_requests) (hg_handle_client_request);
5511
5512 // FIXME?
5513 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
5514 // hg_expensive_sanity_check);
5515
philippe8587b542013-12-15 20:24:43 +00005516 VG_(needs_print_stats) (hg_print_stats);
philippe07c08522014-05-14 20:39:27 +00005517 VG_(needs_info_location) (hg_info_location);
philippe8587b542013-12-15 20:24:43 +00005518
sewardjb4112022007-11-09 22:49:28 +00005519 VG_(needs_malloc_replacement) (hg_cli__malloc,
5520 hg_cli____builtin_new,
5521 hg_cli____builtin_vec_new,
5522 hg_cli__memalign,
5523 hg_cli__calloc,
5524 hg_cli__free,
5525 hg_cli____builtin_delete,
5526 hg_cli____builtin_vec_delete,
5527 hg_cli__realloc,
njn8b140de2009-02-17 04:31:18 +00005528 hg_cli_malloc_usable_size,
philipped99c26a2012-07-31 22:17:28 +00005529 HG_CLI__DEFAULT_MALLOC_REDZONE_SZB );
sewardjb4112022007-11-09 22:49:28 +00005530
sewardj849b0ed2008-12-21 10:43:10 +00005531 /* 21 Dec 08: disabled this; it mostly causes H to start more
5532 slowly and use significantly more memory, without very often
5533 providing useful results. The user can request to load this
5534 information manually with --read-var-info=yes. */
5535 if (0) VG_(needs_var_info)(); /* optional */
sewardjb4112022007-11-09 22:49:28 +00005536
5537 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
sewardj7cf4e6b2008-05-01 20:24:26 +00005538 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
5539 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
sewardjb4112022007-11-09 22:49:28 +00005540 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
sewardj1f77fec2010-04-12 19:51:04 +00005541 VG_(track_new_mem_stack) ( evh__new_mem_stack );
sewardjb4112022007-11-09 22:49:28 +00005542
5543 // FIXME: surely this isn't thread-aware
sewardj23f12002009-07-24 08:45:08 +00005544 VG_(track_copy_mem_remap) ( evh__copy_mem );
sewardjb4112022007-11-09 22:49:28 +00005545
5546 VG_(track_change_mem_mprotect) ( evh__set_perms );
5547
5548 VG_(track_die_mem_stack_signal)( evh__die_mem );
sewardjfd35d492011-03-17 19:39:55 +00005549 VG_(track_die_mem_brk) ( evh__die_mem_munmap );
5550 VG_(track_die_mem_munmap) ( evh__die_mem_munmap );
sewardjb4112022007-11-09 22:49:28 +00005551 VG_(track_die_mem_stack) ( evh__die_mem );
5552
5553 // FIXME: what is this for?
5554 VG_(track_ban_mem_stack) (NULL);
5555
5556 VG_(track_pre_mem_read) ( evh__pre_mem_read );
5557 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
5558 VG_(track_pre_mem_write) ( evh__pre_mem_write );
5559 VG_(track_post_mem_write) (NULL);
5560
5561 /////////////////
5562
5563 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
5564 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
5565
5566 VG_(track_start_client_code)( evh__start_client_code );
5567 VG_(track_stop_client_code)( evh__stop_client_code );
5568
sewardjb4112022007-11-09 22:49:28 +00005569 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
5570 as described in comments at the top of pub_tool_hashtable.h, are
5571 met. Blargh. */
5572 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
5573 tl_assert( sizeof(UWord) == sizeof(Addr) );
5574 hg_mallocmeta_table
5575 = VG_(HT_construct)( "hg_malloc_metadata_table" );
5576
philippe5fbc9762013-12-01 19:28:48 +00005577 MallocMeta_poolalloc = VG_(newPA) ( sizeof(MallocMeta),
5578 1000,
5579 HG_(zalloc),
5580 "hg_malloc_metadata_pool",
5581 HG_(free));
5582
sewardj61bc2c52011-02-09 10:34:00 +00005583 // add a callback to clean up on (threaded) fork.
5584 VG_(atfork)(NULL/*pre*/, NULL/*parent*/, evh__atfork_child/*child*/);
sewardjb4112022007-11-09 22:49:28 +00005585}
5586
5587VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
5588
5589/*--------------------------------------------------------------------*/
5590/*--- end hg_main.c ---*/
5591/*--------------------------------------------------------------------*/