blob: 10a0e4b5d9ba264589d92364a1947228505aa5d1 [file] [log] [blame]
sewardjb4112022007-11-09 22:49:28 +00001
2/*--------------------------------------------------------------------*/
3/*--- Helgrind: a Valgrind tool for detecting errors ---*/
4/*--- in threaded programs. hg_main.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
sewardj0f157dd2013-10-18 14:27:36 +000011 Copyright (C) 2007-2013 OpenWorks LLP
sewardjb4112022007-11-09 22:49:28 +000012 info@open-works.co.uk
13
sewardj0f157dd2013-10-18 14:27:36 +000014 Copyright (C) 2007-2013 Apple, Inc.
njnf76d27a2009-05-28 01:53:07 +000015
sewardjb4112022007-11-09 22:49:28 +000016 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
20
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
25
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, write to the Free Software
28 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 02111-1307, USA.
30
31 The GNU General Public License is contained in the file COPYING.
32
33 Neither the names of the U.S. Department of Energy nor the
34 University of California nor the names of its contributors may be
35 used to endorse or promote products derived from this software
36 without prior written permission.
37*/
38
39#include "pub_tool_basics.h"
philippef5774342014-05-03 11:12:50 +000040#include "pub_tool_gdbserver.h"
sewardjb4112022007-11-09 22:49:28 +000041#include "pub_tool_libcassert.h"
42#include "pub_tool_libcbase.h"
43#include "pub_tool_libcprint.h"
sewardjb4112022007-11-09 22:49:28 +000044#include "pub_tool_threadstate.h"
45#include "pub_tool_tooliface.h"
46#include "pub_tool_hashtable.h"
47#include "pub_tool_replacemalloc.h"
48#include "pub_tool_machine.h"
49#include "pub_tool_options.h"
50#include "pub_tool_xarray.h"
51#include "pub_tool_stacktrace.h"
sewardj896f6f92008-08-19 08:38:52 +000052#include "pub_tool_wordfm.h"
sewardja0eee322009-07-31 08:46:35 +000053#include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
54#include "pub_tool_redir.h" // sonames for the dynamic linkers
55#include "pub_tool_vki.h" // VKI_PAGE_SIZE
sewardj8eb8bab2015-07-21 14:44:28 +000056#include "pub_tool_libcproc.h"
sewardj234e5582011-02-09 12:47:23 +000057#include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client)
philippe5fbc9762013-12-01 19:28:48 +000058#include "pub_tool_poolalloc.h"
philippe07c08522014-05-14 20:39:27 +000059#include "pub_tool_addrinfo.h"
sewardjb4112022007-11-09 22:49:28 +000060
sewardjf98e1c02008-10-25 16:22:41 +000061#include "hg_basics.h"
62#include "hg_wordset.h"
philippef5774342014-05-03 11:12:50 +000063#include "hg_addrdescr.h"
sewardjf98e1c02008-10-25 16:22:41 +000064#include "hg_lock_n_thread.h"
65#include "hg_errors.h"
66
67#include "libhb.h"
68
sewardjb4112022007-11-09 22:49:28 +000069#include "helgrind.h"
70
sewardjf98e1c02008-10-25 16:22:41 +000071
72// FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
73
74// FIXME: when client destroys a lock or a CV, remove these
75// from our mappings, so that the associated SO can be freed up
sewardjb4112022007-11-09 22:49:28 +000076
77/*----------------------------------------------------------------*/
78/*--- ---*/
79/*----------------------------------------------------------------*/
80
sewardj11e352f2007-11-30 11:11:02 +000081/* Note this needs to be compiled with -fno-strict-aliasing, since it
82 contains a whole bunch of calls to lookupFM etc which cast between
83 Word and pointer types. gcc rightly complains this breaks ANSI C
84 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
85 worthwhile performance benefits over -O.
sewardjc17be792007-11-10 22:50:13 +000086*/
sewardjb4112022007-11-09 22:49:28 +000087
88// FIXME what is supposed to happen to locks in memory which
89// is relocated as a result of client realloc?
90
sewardjb4112022007-11-09 22:49:28 +000091// FIXME put referencing ThreadId into Thread and get
92// rid of the slow reverse mapping function.
93
94// FIXME accesses to NoAccess areas: change state to Excl?
95
96// FIXME report errors for accesses of NoAccess memory?
97
98// FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
99// the thread still holds the lock.
100
101/* ------------ Debug/trace options ------------ */
102
sewardjb4112022007-11-09 22:49:28 +0000103// 0 for silent, 1 for some stuff, 2 for lots of stuff
104#define SHOW_EVENTS 0
105
sewardjb4112022007-11-09 22:49:28 +0000106
florian6bf37262012-10-21 03:23:36 +0000107static void all__sanity_check ( const HChar* who ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000108
philipped99c26a2012-07-31 22:17:28 +0000109#define HG_CLI__DEFAULT_MALLOC_REDZONE_SZB 16 /* let's say */
sewardjb4112022007-11-09 22:49:28 +0000110
111// 0 for none, 1 for dump at end of run
112#define SHOW_DATA_STRUCTURES 0
113
114
sewardjb4112022007-11-09 22:49:28 +0000115/* ------------ Misc comments ------------ */
116
117// FIXME: don't hardwire initial entries for root thread.
118// Instead, let the pre_thread_ll_create handler do this.
119
sewardjb4112022007-11-09 22:49:28 +0000120
121/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000122/*--- Primary data structures ---*/
sewardjb4112022007-11-09 22:49:28 +0000123/*----------------------------------------------------------------*/
124
sewardjb4112022007-11-09 22:49:28 +0000125/* Admin linked list of Threads */
126static Thread* admin_threads = NULL;
sewardjffce8152011-06-24 10:09:41 +0000127Thread* get_admin_threads ( void ) { return admin_threads; }
sewardjb4112022007-11-09 22:49:28 +0000128
sewardj1d7c3322011-02-28 09:22:51 +0000129/* Admin double linked list of Locks */
130/* We need a double linked list to properly and efficiently
131 handle del_LockN. */
sewardjb4112022007-11-09 22:49:28 +0000132static Lock* admin_locks = NULL;
133
sewardjb4112022007-11-09 22:49:28 +0000134/* Mapping table for core ThreadIds to Thread* */
135static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
136
sewardjb4112022007-11-09 22:49:28 +0000137/* Mapping table for lock guest addresses to Lock* */
138static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
139
sewardj0f64c9e2011-03-10 17:40:22 +0000140/* The word-set universes for lock sets. */
sewardjb4112022007-11-09 22:49:28 +0000141static WordSetU* univ_lsets = NULL; /* sets of Lock* */
142static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
sewardj866c80c2011-10-22 19:29:51 +0000143static Int next_gc_univ_laog = 1;
144/* univ_laog will be garbaged collected when the nr of element in univ_laog is
145 >= next_gc_univ_laog. */
sewardjb4112022007-11-09 22:49:28 +0000146
sewardjffce8152011-06-24 10:09:41 +0000147/* Allow libhb to get at the universe of locksets stored
148 here. Sigh. */
149WordSetU* HG_(get_univ_lsets) ( void ) { return univ_lsets; }
150
151/* Allow libhb to get at the list of locks stored here. Ditto
152 sigh. */
153Lock* HG_(get_admin_locks) ( void ) { return admin_locks; }
154
sewardjb4112022007-11-09 22:49:28 +0000155
156/*----------------------------------------------------------------*/
157/*--- Simple helpers for the data structures ---*/
158/*----------------------------------------------------------------*/
159
160static UWord stats__lockN_acquires = 0;
161static UWord stats__lockN_releases = 0;
162
sewardj8eb8bab2015-07-21 14:44:28 +0000163#if defined(VGO_solaris)
164Bool HG_(clo_ignore_thread_creation) = True;
165#else
166Bool HG_(clo_ignore_thread_creation) = False;
167#endif /* VGO_solaris */
168
sewardjf98e1c02008-10-25 16:22:41 +0000169static
170ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
sewardjb4112022007-11-09 22:49:28 +0000171
172/* --------- Constructors --------- */
173
sewardjf98e1c02008-10-25 16:22:41 +0000174static Thread* mk_Thread ( Thr* hbthr ) {
sewardjb4112022007-11-09 22:49:28 +0000175 static Int indx = 1;
sewardjf98e1c02008-10-25 16:22:41 +0000176 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
sewardjb4112022007-11-09 22:49:28 +0000177 thread->locksetA = HG_(emptyWS)( univ_lsets );
178 thread->locksetW = HG_(emptyWS)( univ_lsets );
sewardjb4112022007-11-09 22:49:28 +0000179 thread->magic = Thread_MAGIC;
sewardjf98e1c02008-10-25 16:22:41 +0000180 thread->hbthr = hbthr;
181 thread->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +0000182 thread->created_at = NULL;
183 thread->announced = False;
184 thread->errmsg_index = indx++;
185 thread->admin = admin_threads;
sewardj8eb8bab2015-07-21 14:44:28 +0000186 thread->synchr_nesting = 0;
187 thread->pthread_create_nesting_level = 0;
188#if defined(VGO_solaris)
189 thread->bind_guard_flag = 0;
190#endif /* VGO_solaris */
191
sewardjb4112022007-11-09 22:49:28 +0000192 admin_threads = thread;
193 return thread;
194}
sewardjf98e1c02008-10-25 16:22:41 +0000195
sewardjb4112022007-11-09 22:49:28 +0000196// Make a new lock which is unlocked (hence ownerless)
sewardj1d7c3322011-02-28 09:22:51 +0000197// and insert the new lock in admin_locks double linked list.
sewardjb4112022007-11-09 22:49:28 +0000198static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
199 static ULong unique = 0;
sewardjf98e1c02008-10-25 16:22:41 +0000200 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
sewardj0f64c9e2011-03-10 17:40:22 +0000201 /* begin: add to double linked list */
sewardj1d7c3322011-02-28 09:22:51 +0000202 if (admin_locks)
203 admin_locks->admin_prev = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000204 lock->admin_next = admin_locks;
205 lock->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000206 admin_locks = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000207 /* end: add */
sewardjb4112022007-11-09 22:49:28 +0000208 lock->unique = unique++;
209 lock->magic = LockN_MAGIC;
210 lock->appeared_at = NULL;
211 lock->acquired_at = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000212 lock->hbso = libhb_so_alloc();
sewardjb4112022007-11-09 22:49:28 +0000213 lock->guestaddr = guestaddr;
214 lock->kind = kind;
215 lock->heldW = False;
216 lock->heldBy = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000217 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +0000218 return lock;
219}
sewardjb4112022007-11-09 22:49:28 +0000220
221/* Release storage for a Lock. Also release storage in .heldBy, if
sewardj1d7c3322011-02-28 09:22:51 +0000222 any. Removes from admin_locks double linked list. */
sewardjb4112022007-11-09 22:49:28 +0000223static void del_LockN ( Lock* lk )
224{
sewardjf98e1c02008-10-25 16:22:41 +0000225 tl_assert(HG_(is_sane_LockN)(lk));
226 tl_assert(lk->hbso);
227 libhb_so_dealloc(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +0000228 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000229 VG_(deleteBag)( lk->heldBy );
sewardj0f64c9e2011-03-10 17:40:22 +0000230 /* begin: del lock from double linked list */
231 if (lk == admin_locks) {
232 tl_assert(lk->admin_prev == NULL);
233 if (lk->admin_next)
234 lk->admin_next->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000235 admin_locks = lk->admin_next;
sewardj1d7c3322011-02-28 09:22:51 +0000236 }
237 else {
sewardj0f64c9e2011-03-10 17:40:22 +0000238 tl_assert(lk->admin_prev != NULL);
sewardj1d7c3322011-02-28 09:22:51 +0000239 lk->admin_prev->admin_next = lk->admin_next;
sewardj0f64c9e2011-03-10 17:40:22 +0000240 if (lk->admin_next)
241 lk->admin_next->admin_prev = lk->admin_prev;
sewardj1d7c3322011-02-28 09:22:51 +0000242 }
sewardj0f64c9e2011-03-10 17:40:22 +0000243 /* end: del */
sewardjb4112022007-11-09 22:49:28 +0000244 VG_(memset)(lk, 0xAA, sizeof(*lk));
sewardjf98e1c02008-10-25 16:22:41 +0000245 HG_(free)(lk);
sewardjb4112022007-11-09 22:49:28 +0000246}
247
248/* Update 'lk' to reflect that 'thr' now has a write-acquisition of
249 it. This is done strictly: only combinations resulting from
250 correct program and libpthread behaviour are allowed. */
251static void lockN_acquire_writer ( Lock* lk, Thread* thr )
252{
sewardjf98e1c02008-10-25 16:22:41 +0000253 tl_assert(HG_(is_sane_LockN)(lk));
254 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000255
256 stats__lockN_acquires++;
257
258 /* EXPOSITION only */
259 /* We need to keep recording snapshots of where the lock was
260 acquired, so as to produce better lock-order error messages. */
261 if (lk->acquired_at == NULL) {
262 ThreadId tid;
263 tl_assert(lk->heldBy == NULL);
264 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
265 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000266 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000267 } else {
268 tl_assert(lk->heldBy != NULL);
269 }
270 /* end EXPOSITION only */
271
272 switch (lk->kind) {
273 case LK_nonRec:
274 case_LK_nonRec:
275 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
276 tl_assert(!lk->heldW);
277 lk->heldW = True;
sewardjf98e1c02008-10-25 16:22:41 +0000278 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
florian6bf37262012-10-21 03:23:36 +0000279 VG_(addToBag)( lk->heldBy, (UWord)thr );
sewardjb4112022007-11-09 22:49:28 +0000280 break;
281 case LK_mbRec:
282 if (lk->heldBy == NULL)
283 goto case_LK_nonRec;
284 /* 2nd and subsequent locking of a lock by its owner */
285 tl_assert(lk->heldW);
286 /* assert: lk is only held by one thread .. */
sewardj896f6f92008-08-19 08:38:52 +0000287 tl_assert(VG_(sizeUniqueBag(lk->heldBy)) == 1);
sewardjb4112022007-11-09 22:49:28 +0000288 /* assert: .. and that thread is 'thr'. */
florian6bf37262012-10-21 03:23:36 +0000289 tl_assert(VG_(elemBag)(lk->heldBy, (UWord)thr)
sewardj896f6f92008-08-19 08:38:52 +0000290 == VG_(sizeTotalBag)(lk->heldBy));
florian6bf37262012-10-21 03:23:36 +0000291 VG_(addToBag)(lk->heldBy, (UWord)thr);
sewardjb4112022007-11-09 22:49:28 +0000292 break;
293 case LK_rdwr:
294 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
295 goto case_LK_nonRec;
296 default:
297 tl_assert(0);
298 }
sewardjf98e1c02008-10-25 16:22:41 +0000299 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000300}
301
302static void lockN_acquire_reader ( Lock* lk, Thread* thr )
303{
sewardjf98e1c02008-10-25 16:22:41 +0000304 tl_assert(HG_(is_sane_LockN)(lk));
305 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000306 /* can only add reader to a reader-writer lock. */
307 tl_assert(lk->kind == LK_rdwr);
308 /* lk must be free or already r-held. */
309 tl_assert(lk->heldBy == NULL
310 || (lk->heldBy != NULL && !lk->heldW));
311
312 stats__lockN_acquires++;
313
314 /* EXPOSITION only */
315 /* We need to keep recording snapshots of where the lock was
316 acquired, so as to produce better lock-order error messages. */
317 if (lk->acquired_at == NULL) {
318 ThreadId tid;
319 tl_assert(lk->heldBy == NULL);
320 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
321 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000322 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000323 } else {
324 tl_assert(lk->heldBy != NULL);
325 }
326 /* end EXPOSITION only */
327
328 if (lk->heldBy) {
florian6bf37262012-10-21 03:23:36 +0000329 VG_(addToBag)(lk->heldBy, (UWord)thr);
sewardjb4112022007-11-09 22:49:28 +0000330 } else {
331 lk->heldW = False;
sewardjf98e1c02008-10-25 16:22:41 +0000332 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
florian6bf37262012-10-21 03:23:36 +0000333 VG_(addToBag)( lk->heldBy, (UWord)thr );
sewardjb4112022007-11-09 22:49:28 +0000334 }
335 tl_assert(!lk->heldW);
sewardjf98e1c02008-10-25 16:22:41 +0000336 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000337}
338
339/* Update 'lk' to reflect a release of it by 'thr'. This is done
340 strictly: only combinations resulting from correct program and
341 libpthread behaviour are allowed. */
342
343static void lockN_release ( Lock* lk, Thread* thr )
344{
345 Bool b;
sewardjf98e1c02008-10-25 16:22:41 +0000346 tl_assert(HG_(is_sane_LockN)(lk));
347 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000348 /* lock must be held by someone */
349 tl_assert(lk->heldBy);
350 stats__lockN_releases++;
351 /* Remove it from the holder set */
florian6bf37262012-10-21 03:23:36 +0000352 b = VG_(delFromBag)(lk->heldBy, (UWord)thr);
sewardjb4112022007-11-09 22:49:28 +0000353 /* thr must actually have been a holder of lk */
354 tl_assert(b);
355 /* normalise */
356 tl_assert(lk->acquired_at);
sewardj896f6f92008-08-19 08:38:52 +0000357 if (VG_(isEmptyBag)(lk->heldBy)) {
358 VG_(deleteBag)(lk->heldBy);
sewardjb4112022007-11-09 22:49:28 +0000359 lk->heldBy = NULL;
360 lk->heldW = False;
361 lk->acquired_at = NULL;
362 }
sewardjf98e1c02008-10-25 16:22:41 +0000363 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000364}
365
366static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
367{
368 Thread* thr;
369 if (!lk->heldBy) {
370 tl_assert(!lk->heldW);
371 return;
372 }
373 /* for each thread that holds this lock do ... */
sewardj896f6f92008-08-19 08:38:52 +0000374 VG_(initIterBag)( lk->heldBy );
florian6bf37262012-10-21 03:23:36 +0000375 while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, NULL )) {
sewardjf98e1c02008-10-25 16:22:41 +0000376 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000377 tl_assert(HG_(elemWS)( univ_lsets,
florian6bf37262012-10-21 03:23:36 +0000378 thr->locksetA, (UWord)lk ));
sewardjb4112022007-11-09 22:49:28 +0000379 thr->locksetA
florian6bf37262012-10-21 03:23:36 +0000380 = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +0000381
382 if (lk->heldW) {
383 tl_assert(HG_(elemWS)( univ_lsets,
florian6bf37262012-10-21 03:23:36 +0000384 thr->locksetW, (UWord)lk ));
sewardjb4112022007-11-09 22:49:28 +0000385 thr->locksetW
florian6bf37262012-10-21 03:23:36 +0000386 = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +0000387 }
388 }
sewardj896f6f92008-08-19 08:38:52 +0000389 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000390}
391
sewardjb4112022007-11-09 22:49:28 +0000392
393/*----------------------------------------------------------------*/
394/*--- Print out the primary data structures ---*/
395/*----------------------------------------------------------------*/
396
sewardjb4112022007-11-09 22:49:28 +0000397#define PP_THREADS (1<<1)
398#define PP_LOCKS (1<<2)
sewardjf98e1c02008-10-25 16:22:41 +0000399#define PP_ALL (PP_THREADS | PP_LOCKS)
sewardjb4112022007-11-09 22:49:28 +0000400
401
402static const Int sHOW_ADMIN = 0;
403
404static void space ( Int n )
405{
406 Int i;
florian6bf37262012-10-21 03:23:36 +0000407 HChar spaces[128+1];
sewardjb4112022007-11-09 22:49:28 +0000408 tl_assert(n >= 0 && n < 128);
409 if (n == 0)
410 return;
411 for (i = 0; i < n; i++)
412 spaces[i] = ' ';
413 spaces[i] = 0;
414 tl_assert(i < 128+1);
415 VG_(printf)("%s", spaces);
416}
417
418static void pp_Thread ( Int d, Thread* t )
419{
420 space(d+0); VG_(printf)("Thread %p {\n", t);
421 if (sHOW_ADMIN) {
422 space(d+3); VG_(printf)("admin %p\n", t->admin);
423 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
424 }
425 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
426 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
sewardjb4112022007-11-09 22:49:28 +0000427 space(d+0); VG_(printf)("}\n");
428}
429
430static void pp_admin_threads ( Int d )
431{
432 Int i, n;
433 Thread* t;
434 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
435 /* nothing */
436 }
437 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
438 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
439 if (0) {
440 space(n);
441 VG_(printf)("admin_threads record %d of %d:\n", i, n);
442 }
443 pp_Thread(d+3, t);
444 }
barta0b6b2c2008-07-07 06:49:24 +0000445 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000446}
447
448static void pp_map_threads ( Int d )
449{
njn4c245e52009-03-15 23:25:38 +0000450 Int i, n = 0;
sewardjb4112022007-11-09 22:49:28 +0000451 space(d); VG_(printf)("map_threads ");
sewardjb4112022007-11-09 22:49:28 +0000452 for (i = 0; i < VG_N_THREADS; i++) {
453 if (map_threads[i] != NULL)
454 n++;
455 }
456 VG_(printf)("(%d entries) {\n", n);
457 for (i = 0; i < VG_N_THREADS; i++) {
458 if (map_threads[i] == NULL)
459 continue;
460 space(d+3);
461 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
462 }
463 space(d); VG_(printf)("}\n");
464}
465
466static const HChar* show_LockKind ( LockKind lkk ) {
467 switch (lkk) {
468 case LK_mbRec: return "mbRec";
469 case LK_nonRec: return "nonRec";
470 case LK_rdwr: return "rdwr";
471 default: tl_assert(0);
472 }
473}
474
philippef5774342014-05-03 11:12:50 +0000475/* Pretty Print lock lk.
476 if show_lock_addrdescr, describes the (guest) lock address.
477 (this description will be more complete with --read-var-info=yes).
478 if show_internal_data, shows also helgrind internal information.
479 d is the level at which output is indented. */
480static void pp_Lock ( Int d, Lock* lk,
481 Bool show_lock_addrdescr,
482 Bool show_internal_data)
sewardjb4112022007-11-09 22:49:28 +0000483{
philippef5774342014-05-03 11:12:50 +0000484 space(d+0);
485 if (show_internal_data)
philippe07c08522014-05-14 20:39:27 +0000486 VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
philippef5774342014-05-03 11:12:50 +0000487 else
philippe07c08522014-05-14 20:39:27 +0000488 VG_(printf)("Lock ga %#lx {\n", lk->guestaddr);
philippef5774342014-05-03 11:12:50 +0000489 if (!show_lock_addrdescr
philippe07c08522014-05-14 20:39:27 +0000490 || !HG_(get_and_pp_addrdescr) ((Addr) lk->guestaddr))
philippef5774342014-05-03 11:12:50 +0000491 VG_(printf)("\n");
492
sewardjb4112022007-11-09 22:49:28 +0000493 if (sHOW_ADMIN) {
sewardj1d7c3322011-02-28 09:22:51 +0000494 space(d+3); VG_(printf)("admin_n %p\n", lk->admin_next);
495 space(d+3); VG_(printf)("admin_p %p\n", lk->admin_prev);
496 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
sewardjb4112022007-11-09 22:49:28 +0000497 }
philippef5774342014-05-03 11:12:50 +0000498 if (show_internal_data) {
499 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
500 }
sewardjb4112022007-11-09 22:49:28 +0000501 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
philippef5774342014-05-03 11:12:50 +0000502 if (show_internal_data) {
503 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
504 }
505 if (show_internal_data) {
506 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
507 }
sewardjb4112022007-11-09 22:49:28 +0000508 if (lk->heldBy) {
509 Thread* thr;
florian6bf37262012-10-21 03:23:36 +0000510 UWord count;
sewardjb4112022007-11-09 22:49:28 +0000511 VG_(printf)(" { ");
sewardj896f6f92008-08-19 08:38:52 +0000512 VG_(initIterBag)( lk->heldBy );
philippef5774342014-05-03 11:12:50 +0000513 while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, &count )) {
514 if (show_internal_data)
515 VG_(printf)("%lu:%p ", count, thr);
516 else {
517 VG_(printf)("%c%lu:thread #%d ",
518 lk->heldW ? 'W' : 'R',
519 count, thr->errmsg_index);
520 if (thr->coretid == VG_INVALID_THREADID)
521 VG_(printf)("tid (exited) ");
522 else
florian5e5cb002015-08-03 21:21:42 +0000523 VG_(printf)("tid %u ", thr->coretid);
philippef5774342014-05-03 11:12:50 +0000524
525 }
526 }
sewardj896f6f92008-08-19 08:38:52 +0000527 VG_(doneIterBag)( lk->heldBy );
philippef5774342014-05-03 11:12:50 +0000528 VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000529 }
sewardjb4112022007-11-09 22:49:28 +0000530 space(d+0); VG_(printf)("}\n");
531}
532
533static void pp_admin_locks ( Int d )
534{
535 Int i, n;
536 Lock* lk;
sewardj1d7c3322011-02-28 09:22:51 +0000537 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000538 /* nothing */
539 }
540 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
sewardj1d7c3322011-02-28 09:22:51 +0000541 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000542 if (0) {
543 space(n);
544 VG_(printf)("admin_locks record %d of %d:\n", i, n);
545 }
philippef5774342014-05-03 11:12:50 +0000546 pp_Lock(d+3, lk,
547 False /* show_lock_addrdescr */,
548 True /* show_internal_data */);
sewardjb4112022007-11-09 22:49:28 +0000549 }
barta0b6b2c2008-07-07 06:49:24 +0000550 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000551}
552
philippef5774342014-05-03 11:12:50 +0000553static void pp_map_locks ( Int d)
sewardjb4112022007-11-09 22:49:28 +0000554{
555 void* gla;
556 Lock* lk;
557 space(d); VG_(printf)("map_locks (%d entries) {\n",
sewardj896f6f92008-08-19 08:38:52 +0000558 (Int)VG_(sizeFM)( map_locks ));
559 VG_(initIterFM)( map_locks );
florian6bf37262012-10-21 03:23:36 +0000560 while (VG_(nextIterFM)( map_locks, (UWord*)&gla,
561 (UWord*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000562 space(d+3);
563 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
564 }
sewardj896f6f92008-08-19 08:38:52 +0000565 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000566 space(d); VG_(printf)("}\n");
567}
568
florian6bf37262012-10-21 03:23:36 +0000569static void pp_everything ( Int flags, const HChar* caller )
sewardjb4112022007-11-09 22:49:28 +0000570{
571 Int d = 0;
572 VG_(printf)("\n");
573 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
574 if (flags & PP_THREADS) {
575 VG_(printf)("\n");
576 pp_admin_threads(d+3);
577 VG_(printf)("\n");
578 pp_map_threads(d+3);
579 }
580 if (flags & PP_LOCKS) {
581 VG_(printf)("\n");
582 pp_admin_locks(d+3);
583 VG_(printf)("\n");
584 pp_map_locks(d+3);
585 }
sewardjb4112022007-11-09 22:49:28 +0000586
587 VG_(printf)("\n");
588 VG_(printf)("}\n");
589 VG_(printf)("\n");
590}
591
592#undef SHOW_ADMIN
593
594
595/*----------------------------------------------------------------*/
596/*--- Initialise the primary data structures ---*/
597/*----------------------------------------------------------------*/
598
sewardjf98e1c02008-10-25 16:22:41 +0000599static void initialise_data_structures ( Thr* hbthr_root )
sewardjb4112022007-11-09 22:49:28 +0000600{
sewardjb4112022007-11-09 22:49:28 +0000601 Thread* thr;
sewardjffce8152011-06-24 10:09:41 +0000602 WordSetID wsid;
sewardjb4112022007-11-09 22:49:28 +0000603
604 /* Get everything initialised and zeroed. */
605 tl_assert(admin_threads == NULL);
606 tl_assert(admin_locks == NULL);
sewardjb4112022007-11-09 22:49:28 +0000607
sewardjb4112022007-11-09 22:49:28 +0000608 tl_assert(map_threads == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000609 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
sewardjb4112022007-11-09 22:49:28 +0000610
florian6bf37262012-10-21 03:23:36 +0000611 tl_assert(sizeof(Addr) == sizeof(UWord));
sewardjb4112022007-11-09 22:49:28 +0000612 tl_assert(map_locks == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000613 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
614 NULL/*unboxed Word cmp*/);
sewardjb4112022007-11-09 22:49:28 +0000615
sewardjb4112022007-11-09 22:49:28 +0000616 tl_assert(univ_lsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000617 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
618 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000619 tl_assert(univ_lsets != NULL);
sewardjffce8152011-06-24 10:09:41 +0000620 /* Ensure that univ_lsets is non-empty, with lockset zero being the
621 empty lockset. hg_errors.c relies on the assumption that
622 lockset number zero in univ_lsets is always valid. */
623 wsid = HG_(emptyWS)(univ_lsets);
624 tl_assert(wsid == 0);
sewardjb4112022007-11-09 22:49:28 +0000625
626 tl_assert(univ_laog == NULL);
sewardjc1fb9d22011-02-28 09:03:44 +0000627 if (HG_(clo_track_lockorders)) {
628 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
629 HG_(free), 24/*cacheSize*/ );
630 tl_assert(univ_laog != NULL);
631 }
sewardjb4112022007-11-09 22:49:28 +0000632
633 /* Set up entries for the root thread */
634 // FIXME: this assumes that the first real ThreadId is 1
635
sewardjb4112022007-11-09 22:49:28 +0000636 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +0000637 thr = mk_Thread(hbthr_root);
638 thr->coretid = 1; /* FIXME: hardwires an assumption about the
639 identity of the root thread. */
sewardj60626642011-03-10 15:14:37 +0000640 tl_assert( libhb_get_Thr_hgthread(hbthr_root) == NULL );
641 libhb_set_Thr_hgthread(hbthr_root, thr);
sewardjb4112022007-11-09 22:49:28 +0000642
sewardjf98e1c02008-10-25 16:22:41 +0000643 /* and bind it in the thread-map table. */
644 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
645 tl_assert(thr->coretid != VG_INVALID_THREADID);
sewardjb4112022007-11-09 22:49:28 +0000646
sewardjf98e1c02008-10-25 16:22:41 +0000647 map_threads[thr->coretid] = thr;
sewardjb4112022007-11-09 22:49:28 +0000648
649 tl_assert(VG_INVALID_THREADID == 0);
650
sewardjb4112022007-11-09 22:49:28 +0000651 all__sanity_check("initialise_data_structures");
652}
653
654
655/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000656/*--- map_threads :: array[core-ThreadId] of Thread* ---*/
sewardjb4112022007-11-09 22:49:28 +0000657/*----------------------------------------------------------------*/
658
659/* Doesn't assert if the relevant map_threads entry is NULL. */
660static Thread* map_threads_maybe_lookup ( ThreadId coretid )
661{
662 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000663 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000664 thr = map_threads[coretid];
665 return thr;
666}
667
668/* Asserts if the relevant map_threads entry is NULL. */
669static inline Thread* map_threads_lookup ( ThreadId coretid )
670{
671 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000672 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000673 thr = map_threads[coretid];
674 tl_assert(thr);
675 return thr;
676}
677
sewardjf98e1c02008-10-25 16:22:41 +0000678/* Do a reverse lookup. Does not assert if 'thr' is not found in
679 map_threads. */
sewardjb4112022007-11-09 22:49:28 +0000680static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
681{
sewardjf98e1c02008-10-25 16:22:41 +0000682 ThreadId tid;
683 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000684 /* Check nobody used the invalid-threadid slot */
685 tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
686 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000687 tid = thr->coretid;
688 tl_assert(HG_(is_sane_ThreadId)(tid));
689 return tid;
sewardjb4112022007-11-09 22:49:28 +0000690}
691
692/* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
693 is not found in map_threads. */
694static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
695{
696 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
697 tl_assert(tid != VG_INVALID_THREADID);
sewardjf98e1c02008-10-25 16:22:41 +0000698 tl_assert(map_threads[tid]);
699 tl_assert(map_threads[tid]->coretid == tid);
sewardjb4112022007-11-09 22:49:28 +0000700 return tid;
701}
702
703static void map_threads_delete ( ThreadId coretid )
704{
705 Thread* thr;
706 tl_assert(coretid != 0);
sewardjf98e1c02008-10-25 16:22:41 +0000707 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000708 thr = map_threads[coretid];
709 tl_assert(thr);
710 map_threads[coretid] = NULL;
711}
712
sewardj8eb8bab2015-07-21 14:44:28 +0000713static void HG_(thread_enter_synchr)(Thread *thr) {
714 tl_assert(thr->synchr_nesting >= 0);
715#if defined(VGO_solaris)
716 thr->synchr_nesting += 1;
717#endif /* VGO_solaris */
718}
719
720static void HG_(thread_leave_synchr)(Thread *thr) {
721#if defined(VGO_solaris)
722 thr->synchr_nesting -= 1;
723#endif /* VGO_solaris */
724 tl_assert(thr->synchr_nesting >= 0);
725}
726
727static void HG_(thread_enter_pthread_create)(Thread *thr) {
728 tl_assert(thr->pthread_create_nesting_level >= 0);
729 thr->pthread_create_nesting_level += 1;
730}
731
732static void HG_(thread_leave_pthread_create)(Thread *thr) {
733 tl_assert(thr->pthread_create_nesting_level > 0);
734 thr->pthread_create_nesting_level -= 1;
735}
736
737static Int HG_(get_pthread_create_nesting_level)(ThreadId tid) {
738 Thread *thr = map_threads_maybe_lookup(tid);
739 return thr->pthread_create_nesting_level;
740}
sewardjb4112022007-11-09 22:49:28 +0000741
742/*----------------------------------------------------------------*/
743/*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
744/*----------------------------------------------------------------*/
745
746/* Make sure there is a lock table entry for the given (lock) guest
747 address. If not, create one of the stated 'kind' in unheld state.
748 In any case, return the address of the existing or new Lock. */
749static
750Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
751{
752 Bool found;
753 Lock* oldlock = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000754 tl_assert(HG_(is_sane_ThreadId)(tid));
sewardj896f6f92008-08-19 08:38:52 +0000755 found = VG_(lookupFM)( map_locks,
florian6bf37262012-10-21 03:23:36 +0000756 NULL, (UWord*)&oldlock, (UWord)ga );
sewardjb4112022007-11-09 22:49:28 +0000757 if (!found) {
758 Lock* lock = mk_LockN(lkk, ga);
759 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
sewardjf98e1c02008-10-25 16:22:41 +0000760 tl_assert(HG_(is_sane_LockN)(lock));
florian6bf37262012-10-21 03:23:36 +0000761 VG_(addToFM)( map_locks, (UWord)ga, (UWord)lock );
sewardjb4112022007-11-09 22:49:28 +0000762 tl_assert(oldlock == NULL);
sewardjb4112022007-11-09 22:49:28 +0000763 return lock;
764 } else {
765 tl_assert(oldlock != NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000766 tl_assert(HG_(is_sane_LockN)(oldlock));
sewardjb4112022007-11-09 22:49:28 +0000767 tl_assert(oldlock->guestaddr == ga);
sewardjb4112022007-11-09 22:49:28 +0000768 return oldlock;
769 }
770}
771
772static Lock* map_locks_maybe_lookup ( Addr ga )
773{
774 Bool found;
775 Lock* lk = NULL;
florian6bf37262012-10-21 03:23:36 +0000776 found = VG_(lookupFM)( map_locks, NULL, (UWord*)&lk, (UWord)ga );
sewardjb4112022007-11-09 22:49:28 +0000777 tl_assert(found ? lk != NULL : lk == NULL);
sewardjb4112022007-11-09 22:49:28 +0000778 return lk;
779}
780
781static void map_locks_delete ( Addr ga )
782{
783 Addr ga2 = 0;
784 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000785 VG_(delFromFM)( map_locks,
florian6bf37262012-10-21 03:23:36 +0000786 (UWord*)&ga2, (UWord*)&lk, (UWord)ga );
sewardjb4112022007-11-09 22:49:28 +0000787 /* delFromFM produces the val which is being deleted, if it is
788 found. So assert it is non-null; that in effect asserts that we
789 are deleting a (ga, Lock) pair which actually exists. */
790 tl_assert(lk != NULL);
791 tl_assert(ga2 == ga);
792}
793
794
sewardjb4112022007-11-09 22:49:28 +0000795
796/*----------------------------------------------------------------*/
797/*--- Sanity checking the data structures ---*/
798/*----------------------------------------------------------------*/
799
800static UWord stats__sanity_checks = 0;
801
florian6bf37262012-10-21 03:23:36 +0000802static void laog__sanity_check ( const HChar* who ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000803
804/* REQUIRED INVARIANTS:
805
806 Thread vs Segment/Lock/SecMaps
807
808 for each t in Threads {
809
810 // Thread.lockset: each element is really a valid Lock
811
812 // Thread.lockset: each Lock in set is actually held by that thread
813 for lk in Thread.lockset
814 lk == LockedBy(t)
815
816 // Thread.csegid is a valid SegmentID
817 // and the associated Segment has .thr == t
818
819 }
820
821 all thread Locksets are pairwise empty under intersection
822 (that is, no lock is claimed to be held by more than one thread)
823 -- this is guaranteed if all locks in locksets point back to their
824 owner threads
825
826 Lock vs Thread/Segment/SecMaps
827
828 for each entry (gla, la) in map_locks
829 gla == la->guest_addr
830
831 for each lk in Locks {
832
833 lk->tag is valid
834 lk->guest_addr does not have shadow state NoAccess
835 if lk == LockedBy(t), then t->lockset contains lk
836 if lk == UnlockedBy(segid) then segid is valid SegmentID
837 and can be mapped to a valid Segment(seg)
838 and seg->thr->lockset does not contain lk
839 if lk == UnlockedNew then (no lockset contains lk)
840
841 secmaps for lk has .mbHasLocks == True
842
843 }
844
845 Segment vs Thread/Lock/SecMaps
846
847 the Segment graph is a dag (no cycles)
848 all of the Segment graph must be reachable from the segids
849 mentioned in the Threads
850
851 for seg in Segments {
852
853 seg->thr is a sane Thread
854
855 }
856
857 SecMaps vs Segment/Thread/Lock
858
859 for sm in SecMaps {
860
861 sm properly aligned
862 if any shadow word is ShR or ShM then .mbHasShared == True
863
864 for each Excl(segid) state
865 map_segments_lookup maps to a sane Segment(seg)
866 for each ShM/ShR(tsetid,lsetid) state
867 each lk in lset is a valid Lock
868 each thr in tset is a valid thread, which is non-dead
869
870 }
871*/
872
873
874/* Return True iff 'thr' holds 'lk' in some mode. */
875static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
876{
877 if (lk->heldBy)
florian6bf37262012-10-21 03:23:36 +0000878 return VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0;
sewardjb4112022007-11-09 22:49:28 +0000879 else
880 return False;
881}
882
883/* Sanity check Threads, as far as possible */
884__attribute__((noinline))
florian6bf37262012-10-21 03:23:36 +0000885static void threads__sanity_check ( const HChar* who )
sewardjb4112022007-11-09 22:49:28 +0000886{
887#define BAD(_str) do { how = (_str); goto bad; } while (0)
florian6bf37262012-10-21 03:23:36 +0000888 const HChar* how = "no error";
sewardjb4112022007-11-09 22:49:28 +0000889 Thread* thr;
890 WordSetID wsA, wsW;
sewardj250ec2e2008-02-15 22:02:30 +0000891 UWord* ls_words;
florian6bf37262012-10-21 03:23:36 +0000892 UWord ls_size, i;
sewardjb4112022007-11-09 22:49:28 +0000893 Lock* lk;
sewardjb4112022007-11-09 22:49:28 +0000894 for (thr = admin_threads; thr; thr = thr->admin) {
sewardjf98e1c02008-10-25 16:22:41 +0000895 if (!HG_(is_sane_Thread)(thr)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000896 wsA = thr->locksetA;
897 wsW = thr->locksetW;
898 // locks held in W mode are a subset of all locks held
899 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
900 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
901 for (i = 0; i < ls_size; i++) {
902 lk = (Lock*)ls_words[i];
903 // Thread.lockset: each element is really a valid Lock
sewardjf98e1c02008-10-25 16:22:41 +0000904 if (!HG_(is_sane_LockN)(lk)) BAD("2");
sewardjb4112022007-11-09 22:49:28 +0000905 // Thread.lockset: each Lock in set is actually held by that
906 // thread
907 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000908 }
909 }
910 return;
911 bad:
912 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
913 tl_assert(0);
914#undef BAD
915}
916
917
918/* Sanity check Locks, as far as possible */
919__attribute__((noinline))
florian6bf37262012-10-21 03:23:36 +0000920static void locks__sanity_check ( const HChar* who )
sewardjb4112022007-11-09 22:49:28 +0000921{
922#define BAD(_str) do { how = (_str); goto bad; } while (0)
florian6bf37262012-10-21 03:23:36 +0000923 const HChar* how = "no error";
sewardjb4112022007-11-09 22:49:28 +0000924 Addr gla;
925 Lock* lk;
926 Int i;
927 // # entries in admin_locks == # entries in map_locks
sewardj1d7c3322011-02-28 09:22:51 +0000928 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next)
sewardjb4112022007-11-09 22:49:28 +0000929 ;
sewardj896f6f92008-08-19 08:38:52 +0000930 if (i != VG_(sizeFM)(map_locks)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000931 // for each entry (gla, lk) in map_locks
932 // gla == lk->guest_addr
sewardj896f6f92008-08-19 08:38:52 +0000933 VG_(initIterFM)( map_locks );
934 while (VG_(nextIterFM)( map_locks,
florian6bf37262012-10-21 03:23:36 +0000935 (UWord*)&gla, (UWord*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000936 if (lk->guestaddr != gla) BAD("2");
937 }
sewardj896f6f92008-08-19 08:38:52 +0000938 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000939 // scan through admin_locks ...
sewardj1d7c3322011-02-28 09:22:51 +0000940 for (lk = admin_locks; lk; lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000941 // lock is sane. Quite comprehensive, also checks that
942 // referenced (holder) threads are sane.
sewardjf98e1c02008-10-25 16:22:41 +0000943 if (!HG_(is_sane_LockN)(lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000944 // map_locks binds guest address back to this lock
945 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
sewardjb4112022007-11-09 22:49:28 +0000946 // look at all threads mentioned as holders of this lock. Ensure
947 // this lock is mentioned in their locksets.
948 if (lk->heldBy) {
949 Thread* thr;
florian6bf37262012-10-21 03:23:36 +0000950 UWord count;
sewardj896f6f92008-08-19 08:38:52 +0000951 VG_(initIterBag)( lk->heldBy );
952 while (VG_(nextIterBag)( lk->heldBy,
florian6bf37262012-10-21 03:23:36 +0000953 (UWord*)&thr, &count )) {
sewardjf98e1c02008-10-25 16:22:41 +0000954 // HG_(is_sane_LockN) above ensures these
sewardjb4112022007-11-09 22:49:28 +0000955 tl_assert(count >= 1);
sewardjf98e1c02008-10-25 16:22:41 +0000956 tl_assert(HG_(is_sane_Thread)(thr));
florian6bf37262012-10-21 03:23:36 +0000957 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (UWord)lk))
sewardjb4112022007-11-09 22:49:28 +0000958 BAD("6");
959 // also check the w-only lockset
960 if (lk->heldW
florian6bf37262012-10-21 03:23:36 +0000961 && !HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
sewardjb4112022007-11-09 22:49:28 +0000962 BAD("7");
963 if ((!lk->heldW)
florian6bf37262012-10-21 03:23:36 +0000964 && HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
sewardjb4112022007-11-09 22:49:28 +0000965 BAD("8");
966 }
sewardj896f6f92008-08-19 08:38:52 +0000967 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000968 } else {
969 /* lock not held by anybody */
970 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
971 // since lk is unheld, then (no lockset contains lk)
972 // hmm, this is really too expensive to check. Hmm.
973 }
sewardjb4112022007-11-09 22:49:28 +0000974 }
975
976 return;
977 bad:
978 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
979 tl_assert(0);
980#undef BAD
981}
982
983
florian6bf37262012-10-21 03:23:36 +0000984static void all_except_Locks__sanity_check ( const HChar* who ) {
sewardjb4112022007-11-09 22:49:28 +0000985 stats__sanity_checks++;
986 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
987 threads__sanity_check(who);
sewardjc1fb9d22011-02-28 09:03:44 +0000988 if (HG_(clo_track_lockorders))
989 laog__sanity_check(who);
sewardjb4112022007-11-09 22:49:28 +0000990}
florian6bf37262012-10-21 03:23:36 +0000991static void all__sanity_check ( const HChar* who ) {
sewardjb4112022007-11-09 22:49:28 +0000992 all_except_Locks__sanity_check(who);
993 locks__sanity_check(who);
994}
995
996
997/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +0000998/*--- Shadow value and address range handlers ---*/
999/*----------------------------------------------------------------*/
1000
1001static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +00001002//static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +00001003static inline Thread* get_current_Thread ( void ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +00001004__attribute__((noinline))
1005static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +00001006
sewardjb4112022007-11-09 22:49:28 +00001007
1008/* Block-copy states (needed for implementing realloc()). */
sewardj23f12002009-07-24 08:45:08 +00001009/* FIXME this copies shadow memory; it doesn't apply the MSM to it.
1010 Is that a problem? (hence 'scopy' rather than 'ccopy') */
1011static void shadow_mem_scopy_range ( Thread* thr,
1012 Addr src, Addr dst, SizeT len )
sewardjf98e1c02008-10-25 16:22:41 +00001013{
1014 Thr* hbthr = thr->hbthr;
1015 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +00001016 libhb_copy_shadow_state( hbthr, src, dst, len );
sewardjb4112022007-11-09 22:49:28 +00001017}
1018
sewardj23f12002009-07-24 08:45:08 +00001019static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
1020{
sewardjf98e1c02008-10-25 16:22:41 +00001021 Thr* hbthr = thr->hbthr;
1022 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +00001023 LIBHB_CREAD_N(hbthr, a, len);
1024}
1025
1026static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
1027 Thr* hbthr = thr->hbthr;
1028 tl_assert(hbthr);
1029 LIBHB_CWRITE_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +00001030}
1031
1032static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
1033{
sewardj23f12002009-07-24 08:45:08 +00001034 libhb_srange_new( thr->hbthr, a, len );
sewardjb4112022007-11-09 22:49:28 +00001035}
1036
sewardjfd35d492011-03-17 19:39:55 +00001037static void shadow_mem_make_NoAccess_NoFX ( Thread* thr, Addr aIN, SizeT len )
sewardjb4112022007-11-09 22:49:28 +00001038{
sewardjb4112022007-11-09 22:49:28 +00001039 if (0 && len > 500)
florian5e5cb002015-08-03 21:21:42 +00001040 VG_(printf)("make NoAccess_NoFX ( %#lx, %lu )\n", aIN, len );
sewardjfd35d492011-03-17 19:39:55 +00001041 // has no effect (NoFX)
1042 libhb_srange_noaccess_NoFX( thr->hbthr, aIN, len );
1043}
1044
1045static void shadow_mem_make_NoAccess_AHAE ( Thread* thr, Addr aIN, SizeT len )
1046{
1047 if (0 && len > 500)
florian5e5cb002015-08-03 21:21:42 +00001048 VG_(printf)("make NoAccess_AHAE ( %#lx, %lu )\n", aIN, len );
sewardjfd35d492011-03-17 19:39:55 +00001049 // Actually Has An Effect (AHAE)
1050 libhb_srange_noaccess_AHAE( thr->hbthr, aIN, len );
sewardjb4112022007-11-09 22:49:28 +00001051}
1052
sewardj406bac82010-03-03 23:03:40 +00001053static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN, SizeT len )
1054{
1055 if (0 && len > 500)
florian5e5cb002015-08-03 21:21:42 +00001056 VG_(printf)("make Untracked ( %#lx, %lu )\n", aIN, len );
sewardj406bac82010-03-03 23:03:40 +00001057 libhb_srange_untrack( thr->hbthr, aIN, len );
1058}
1059
sewardjb4112022007-11-09 22:49:28 +00001060
1061/*----------------------------------------------------------------*/
1062/*--- Event handlers (evh__* functions) ---*/
1063/*--- plus helpers (evhH__* functions) ---*/
1064/*----------------------------------------------------------------*/
1065
1066/*--------- Event handler helpers (evhH__* functions) ---------*/
1067
1068/* Create a new segment for 'thr', making it depend (.prev) on its
1069 existing segment, bind together the SegmentID and Segment, and
1070 return both of them. Also update 'thr' so it references the new
1071 Segment. */
sewardjf98e1c02008-10-25 16:22:41 +00001072//zz static
1073//zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
1074//zz /*OUT*/Segment** new_segP,
1075//zz Thread* thr )
1076//zz {
1077//zz Segment* cur_seg;
1078//zz tl_assert(new_segP);
1079//zz tl_assert(new_segidP);
1080//zz tl_assert(HG_(is_sane_Thread)(thr));
1081//zz cur_seg = map_segments_lookup( thr->csegid );
1082//zz tl_assert(cur_seg);
1083//zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1084//zz at their owner thread. */
1085//zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1086//zz *new_segidP = alloc_SegmentID();
1087//zz map_segments_add( *new_segidP, *new_segP );
1088//zz thr->csegid = *new_segidP;
1089//zz }
sewardjb4112022007-11-09 22:49:28 +00001090
1091
1092/* The lock at 'lock_ga' has acquired a writer. Make all necessary
1093 updates, and also do all possible error checks. */
1094static
1095void evhH__post_thread_w_acquires_lock ( Thread* thr,
1096 LockKind lkk, Addr lock_ga )
1097{
1098 Lock* lk;
1099
1100 /* Basically what we need to do is call lockN_acquire_writer.
1101 However, that will barf if any 'invalid' lock states would
1102 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001103 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001104 routine.
1105
1106 Because this routine is only called after successful lock
1107 acquisition, we should not be asked to move the lock into any
1108 invalid states. Requests to do so are bugs in libpthread, since
1109 that should have rejected any such requests. */
1110
sewardjf98e1c02008-10-25 16:22:41 +00001111 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001112 /* Try to find the lock. If we can't, then create a new one with
1113 kind 'lkk'. */
1114 lk = map_locks_lookup_or_create(
1115 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001116 tl_assert( HG_(is_sane_LockN)(lk) );
1117
1118 /* check libhb level entities exist */
1119 tl_assert(thr->hbthr);
1120 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001121
1122 if (lk->heldBy == NULL) {
1123 /* the lock isn't held. Simple. */
1124 tl_assert(!lk->heldW);
1125 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001126 /* acquire a dependency from the lock's VCs */
1127 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001128 goto noerror;
1129 }
1130
1131 /* So the lock is already held. If held as a r-lock then
1132 libpthread must be buggy. */
1133 tl_assert(lk->heldBy);
1134 if (!lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001135 HG_(record_error_Misc)(
1136 thr, "Bug in libpthread: write lock "
1137 "granted on rwlock which is currently rd-held");
sewardjb4112022007-11-09 22:49:28 +00001138 goto error;
1139 }
1140
1141 /* So the lock is held in w-mode. If it's held by some other
1142 thread, then libpthread must be buggy. */
sewardj896f6f92008-08-19 08:38:52 +00001143 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
sewardjb4112022007-11-09 22:49:28 +00001144
sewardj896f6f92008-08-19 08:38:52 +00001145 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
sewardjf98e1c02008-10-25 16:22:41 +00001146 HG_(record_error_Misc)(
1147 thr, "Bug in libpthread: write lock "
1148 "granted on mutex/rwlock which is currently "
1149 "wr-held by a different thread");
sewardjb4112022007-11-09 22:49:28 +00001150 goto error;
1151 }
1152
1153 /* So the lock is already held in w-mode by 'thr'. That means this
1154 is an attempt to lock it recursively, which is only allowable
1155 for LK_mbRec kinded locks. Since this routine is called only
1156 once the lock has been acquired, this must also be a libpthread
1157 bug. */
1158 if (lk->kind != LK_mbRec) {
sewardjf98e1c02008-10-25 16:22:41 +00001159 HG_(record_error_Misc)(
1160 thr, "Bug in libpthread: recursive write lock "
1161 "granted on mutex/wrlock which does not "
1162 "support recursion");
sewardjb4112022007-11-09 22:49:28 +00001163 goto error;
1164 }
1165
1166 /* So we are recursively re-locking a lock we already w-hold. */
1167 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001168 /* acquire a dependency from the lock's VC. Probably pointless,
1169 but also harmless. */
1170 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001171 goto noerror;
1172
1173 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001174 if (HG_(clo_track_lockorders)) {
1175 /* check lock order acquisition graph, and update. This has to
1176 happen before the lock is added to the thread's locksetA/W. */
1177 laog__pre_thread_acquires_lock( thr, lk );
1178 }
sewardjb4112022007-11-09 22:49:28 +00001179 /* update the thread's held-locks set */
florian6bf37262012-10-21 03:23:36 +00001180 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
1181 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +00001182 /* fall through */
1183
1184 error:
sewardjf98e1c02008-10-25 16:22:41 +00001185 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001186}
1187
1188
1189/* The lock at 'lock_ga' has acquired a reader. Make all necessary
1190 updates, and also do all possible error checks. */
1191static
1192void evhH__post_thread_r_acquires_lock ( Thread* thr,
1193 LockKind lkk, Addr lock_ga )
1194{
1195 Lock* lk;
1196
1197 /* Basically what we need to do is call lockN_acquire_reader.
1198 However, that will barf if any 'invalid' lock states would
1199 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001200 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001201 routine.
1202
1203 Because this routine is only called after successful lock
1204 acquisition, we should not be asked to move the lock into any
1205 invalid states. Requests to do so are bugs in libpthread, since
1206 that should have rejected any such requests. */
1207
sewardjf98e1c02008-10-25 16:22:41 +00001208 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001209 /* Try to find the lock. If we can't, then create a new one with
1210 kind 'lkk'. Only a reader-writer lock can be read-locked,
1211 hence the first assertion. */
1212 tl_assert(lkk == LK_rdwr);
1213 lk = map_locks_lookup_or_create(
1214 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001215 tl_assert( HG_(is_sane_LockN)(lk) );
1216
1217 /* check libhb level entities exist */
1218 tl_assert(thr->hbthr);
1219 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001220
1221 if (lk->heldBy == NULL) {
1222 /* the lock isn't held. Simple. */
1223 tl_assert(!lk->heldW);
1224 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001225 /* acquire a dependency from the lock's VC */
1226 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001227 goto noerror;
1228 }
1229
1230 /* So the lock is already held. If held as a w-lock then
1231 libpthread must be buggy. */
1232 tl_assert(lk->heldBy);
1233 if (lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001234 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1235 "granted on rwlock which is "
1236 "currently wr-held");
sewardjb4112022007-11-09 22:49:28 +00001237 goto error;
1238 }
1239
1240 /* Easy enough. In short anybody can get a read-lock on a rwlock
1241 provided it is either unlocked or already in rd-held. */
1242 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001243 /* acquire a dependency from the lock's VC. Probably pointless,
1244 but also harmless. */
1245 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001246 goto noerror;
1247
1248 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001249 if (HG_(clo_track_lockorders)) {
1250 /* check lock order acquisition graph, and update. This has to
1251 happen before the lock is added to the thread's locksetA/W. */
1252 laog__pre_thread_acquires_lock( thr, lk );
1253 }
sewardjb4112022007-11-09 22:49:28 +00001254 /* update the thread's held-locks set */
florian6bf37262012-10-21 03:23:36 +00001255 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +00001256 /* but don't update thr->locksetW, since lk is only rd-held */
1257 /* fall through */
1258
1259 error:
sewardjf98e1c02008-10-25 16:22:41 +00001260 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001261}
1262
1263
1264/* The lock at 'lock_ga' is just about to be unlocked. Make all
1265 necessary updates, and also do all possible error checks. */
1266static
1267void evhH__pre_thread_releases_lock ( Thread* thr,
1268 Addr lock_ga, Bool isRDWR )
1269{
1270 Lock* lock;
1271 Word n;
sewardjf98e1c02008-10-25 16:22:41 +00001272 Bool was_heldW;
sewardjb4112022007-11-09 22:49:28 +00001273
1274 /* This routine is called prior to a lock release, before
1275 libpthread has had a chance to validate the call. Hence we need
1276 to detect and reject any attempts to move the lock into an
1277 invalid state. Such attempts are bugs in the client.
1278
1279 isRDWR is True if we know from the wrapper context that lock_ga
1280 should refer to a reader-writer lock, and is False if [ditto]
1281 lock_ga should refer to a standard mutex. */
1282
sewardjf98e1c02008-10-25 16:22:41 +00001283 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001284 lock = map_locks_maybe_lookup( lock_ga );
1285
1286 if (!lock) {
1287 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1288 the client is trying to unlock it. So complain, then ignore
1289 the attempt. */
sewardjf98e1c02008-10-25 16:22:41 +00001290 HG_(record_error_UnlockBogus)( thr, lock_ga );
sewardjb4112022007-11-09 22:49:28 +00001291 return;
1292 }
1293
1294 tl_assert(lock->guestaddr == lock_ga);
sewardjf98e1c02008-10-25 16:22:41 +00001295 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001296
1297 if (isRDWR && lock->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001298 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1299 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001300 }
1301 if ((!isRDWR) && lock->kind == LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001302 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1303 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001304 }
1305
1306 if (!lock->heldBy) {
1307 /* The lock is not held. This indicates a serious bug in the
1308 client. */
1309 tl_assert(!lock->heldW);
sewardjf98e1c02008-10-25 16:22:41 +00001310 HG_(record_error_UnlockUnlocked)( thr, lock );
florian6bf37262012-10-21 03:23:36 +00001311 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1312 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001313 goto error;
1314 }
1315
sewardjf98e1c02008-10-25 16:22:41 +00001316 /* test just above dominates */
1317 tl_assert(lock->heldBy);
1318 was_heldW = lock->heldW;
1319
sewardjb4112022007-11-09 22:49:28 +00001320 /* The lock is held. Is this thread one of the holders? If not,
1321 report a bug in the client. */
florian6bf37262012-10-21 03:23:36 +00001322 n = VG_(elemBag)( lock->heldBy, (UWord)thr );
sewardjb4112022007-11-09 22:49:28 +00001323 tl_assert(n >= 0);
1324 if (n == 0) {
1325 /* We are not a current holder of the lock. This is a bug in
1326 the guest, and (per POSIX pthread rules) the unlock
1327 attempt will fail. So just complain and do nothing
1328 else. */
sewardj896f6f92008-08-19 08:38:52 +00001329 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001330 tl_assert(HG_(is_sane_Thread)(realOwner));
sewardjb4112022007-11-09 22:49:28 +00001331 tl_assert(realOwner != thr);
florian6bf37262012-10-21 03:23:36 +00001332 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1333 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjf98e1c02008-10-25 16:22:41 +00001334 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
sewardjb4112022007-11-09 22:49:28 +00001335 goto error;
1336 }
1337
1338 /* Ok, we hold the lock 'n' times. */
1339 tl_assert(n >= 1);
1340
1341 lockN_release( lock, thr );
1342
1343 n--;
1344 tl_assert(n >= 0);
1345
1346 if (n > 0) {
1347 tl_assert(lock->heldBy);
florian6bf37262012-10-21 03:23:36 +00001348 tl_assert(n == VG_(elemBag)( lock->heldBy, (UWord)thr ));
sewardjb4112022007-11-09 22:49:28 +00001349 /* We still hold the lock. So either it's a recursive lock
1350 or a rwlock which is currently r-held. */
1351 tl_assert(lock->kind == LK_mbRec
1352 || (lock->kind == LK_rdwr && !lock->heldW));
florian6bf37262012-10-21 03:23:36 +00001353 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001354 if (lock->heldW)
florian6bf37262012-10-21 03:23:36 +00001355 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001356 else
florian6bf37262012-10-21 03:23:36 +00001357 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001358 } else {
sewardj983f3022009-05-21 14:49:55 +00001359 /* n is zero. This means we don't hold the lock any more. But
1360 if it's a rwlock held in r-mode, someone else could still
1361 hold it. Just do whatever sanity checks we can. */
1362 if (lock->kind == LK_rdwr && lock->heldBy) {
1363 /* It's a rwlock. We no longer hold it but we used to;
1364 nevertheless it still appears to be held by someone else.
1365 The implication is that, prior to this release, it must
1366 have been shared by us and and whoever else is holding it;
1367 which in turn implies it must be r-held, since a lock
1368 can't be w-held by more than one thread. */
1369 /* The lock is now R-held by somebody else: */
1370 tl_assert(lock->heldW == False);
1371 } else {
1372 /* Normal case. It's either not a rwlock, or it's a rwlock
1373 that we used to hold in w-mode (which is pretty much the
1374 same thing as a non-rwlock.) Since this transaction is
1375 atomic (V does not allow multiple threads to run
1376 simultaneously), it must mean the lock is now not held by
1377 anybody. Hence assert for it. */
1378 /* The lock is now not held by anybody: */
1379 tl_assert(!lock->heldBy);
1380 tl_assert(lock->heldW == False);
1381 }
sewardjf98e1c02008-10-25 16:22:41 +00001382 //if (lock->heldBy) {
florian6bf37262012-10-21 03:23:36 +00001383 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (UWord)thr ));
sewardjf98e1c02008-10-25 16:22:41 +00001384 //}
sewardjb4112022007-11-09 22:49:28 +00001385 /* update this thread's lockset accordingly. */
1386 thr->locksetA
florian6bf37262012-10-21 03:23:36 +00001387 = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lock );
sewardjb4112022007-11-09 22:49:28 +00001388 thr->locksetW
florian6bf37262012-10-21 03:23:36 +00001389 = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lock );
sewardjf98e1c02008-10-25 16:22:41 +00001390 /* push our VC into the lock */
1391 tl_assert(thr->hbthr);
1392 tl_assert(lock->hbso);
1393 /* If the lock was previously W-held, then we want to do a
1394 strong send, and if previously R-held, then a weak send. */
1395 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
sewardjb4112022007-11-09 22:49:28 +00001396 }
1397 /* fall through */
1398
1399 error:
sewardjf98e1c02008-10-25 16:22:41 +00001400 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001401}
1402
1403
sewardj9f569b72008-11-13 13:33:09 +00001404/* ---------------------------------------------------------- */
1405/* -------- Event handlers proper (evh__* functions) -------- */
1406/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001407
1408/* What is the Thread* for the currently running thread? This is
1409 absolutely performance critical. We receive notifications from the
1410 core for client code starts/stops, and cache the looked-up result
1411 in 'current_Thread'. Hence, for the vast majority of requests,
1412 finding the current thread reduces to a read of a global variable,
1413 provided get_current_Thread_in_C_C is inlined.
1414
1415 Outside of client code, current_Thread is NULL, and presumably
1416 any uses of it will cause a segfault. Hence:
1417
1418 - for uses definitely within client code, use
1419 get_current_Thread_in_C_C.
1420
1421 - for all other uses, use get_current_Thread.
1422*/
1423
sewardj23f12002009-07-24 08:45:08 +00001424static Thread *current_Thread = NULL,
1425 *current_Thread_prev = NULL;
sewardjb4112022007-11-09 22:49:28 +00001426
1427static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1428 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1429 tl_assert(current_Thread == NULL);
1430 current_Thread = map_threads_lookup( tid );
1431 tl_assert(current_Thread != NULL);
sewardj23f12002009-07-24 08:45:08 +00001432 if (current_Thread != current_Thread_prev) {
1433 libhb_Thr_resumes( current_Thread->hbthr );
1434 current_Thread_prev = current_Thread;
1435 }
sewardjb4112022007-11-09 22:49:28 +00001436}
1437static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1438 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1439 tl_assert(current_Thread != NULL);
1440 current_Thread = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00001441 libhb_maybe_GC();
sewardjb4112022007-11-09 22:49:28 +00001442}
1443static inline Thread* get_current_Thread_in_C_C ( void ) {
1444 return current_Thread;
1445}
1446static inline Thread* get_current_Thread ( void ) {
1447 ThreadId coretid;
1448 Thread* thr;
1449 thr = get_current_Thread_in_C_C();
1450 if (LIKELY(thr))
1451 return thr;
1452 /* evidently not in client code. Do it the slow way. */
1453 coretid = VG_(get_running_tid)();
1454 /* FIXME: get rid of the following kludge. It exists because
sewardjf98e1c02008-10-25 16:22:41 +00001455 evh__new_mem is called during initialisation (as notification
sewardjb4112022007-11-09 22:49:28 +00001456 of initial memory layout) and VG_(get_running_tid)() returns
1457 VG_INVALID_THREADID at that point. */
1458 if (coretid == VG_INVALID_THREADID)
1459 coretid = 1; /* KLUDGE */
1460 thr = map_threads_lookup( coretid );
1461 return thr;
1462}
1463
1464static
1465void evh__new_mem ( Addr a, SizeT len ) {
sewardj8eb8bab2015-07-21 14:44:28 +00001466 Thread *thr = get_current_Thread();
sewardjb4112022007-11-09 22:49:28 +00001467 if (SHOW_EVENTS >= 2)
1468 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
sewardj8eb8bab2015-07-21 14:44:28 +00001469 shadow_mem_make_New( thr, a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001470 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001471 all__sanity_check("evh__new_mem-post");
sewardj8eb8bab2015-07-21 14:44:28 +00001472 if (UNLIKELY(thr->pthread_create_nesting_level > 0))
1473 shadow_mem_make_Untracked( thr, a, len );
sewardjb4112022007-11-09 22:49:28 +00001474}
1475
1476static
sewardj1f77fec2010-04-12 19:51:04 +00001477void evh__new_mem_stack ( Addr a, SizeT len ) {
sewardj8eb8bab2015-07-21 14:44:28 +00001478 Thread *thr = get_current_Thread();
sewardj1f77fec2010-04-12 19:51:04 +00001479 if (SHOW_EVENTS >= 2)
1480 VG_(printf)("evh__new_mem_stack(%p, %lu)\n", (void*)a, len );
sewardj8eb8bab2015-07-21 14:44:28 +00001481 shadow_mem_make_New( thr, -VG_STACK_REDZONE_SZB + a, len );
sewardj1f77fec2010-04-12 19:51:04 +00001482 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1483 all__sanity_check("evh__new_mem_stack-post");
sewardj8eb8bab2015-07-21 14:44:28 +00001484 if (UNLIKELY(thr->pthread_create_nesting_level > 0))
1485 shadow_mem_make_Untracked( thr, a, len );
sewardj1f77fec2010-04-12 19:51:04 +00001486}
1487
1488static
sewardj7cf4e6b2008-05-01 20:24:26 +00001489void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
sewardj8eb8bab2015-07-21 14:44:28 +00001490 Thread *thr = get_current_Thread();
sewardj7cf4e6b2008-05-01 20:24:26 +00001491 if (SHOW_EVENTS >= 2)
1492 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
sewardj8eb8bab2015-07-21 14:44:28 +00001493 shadow_mem_make_New( thr, a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001494 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardj7cf4e6b2008-05-01 20:24:26 +00001495 all__sanity_check("evh__new_mem_w_tid-post");
sewardj8eb8bab2015-07-21 14:44:28 +00001496 if (UNLIKELY(thr->pthread_create_nesting_level > 0))
1497 shadow_mem_make_Untracked( thr, a, len );
sewardj7cf4e6b2008-05-01 20:24:26 +00001498}
1499
1500static
sewardjb4112022007-11-09 22:49:28 +00001501void evh__new_mem_w_perms ( Addr a, SizeT len,
sewardj9c606bd2008-09-18 18:12:50 +00001502 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
sewardj8eb8bab2015-07-21 14:44:28 +00001503 Thread *thr = get_current_Thread();
sewardjb4112022007-11-09 22:49:28 +00001504 if (SHOW_EVENTS >= 1)
1505 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1506 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
sewardj8eb8bab2015-07-21 14:44:28 +00001507 if (rr || ww || xx) {
1508 shadow_mem_make_New( thr, a, len );
1509 if (UNLIKELY(thr->pthread_create_nesting_level > 0))
1510 shadow_mem_make_Untracked( thr, a, len );
1511 }
sewardjf98e1c02008-10-25 16:22:41 +00001512 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001513 all__sanity_check("evh__new_mem_w_perms-post");
1514}
1515
1516static
1517void evh__set_perms ( Addr a, SizeT len,
1518 Bool rr, Bool ww, Bool xx ) {
sewardjfd35d492011-03-17 19:39:55 +00001519 // This handles mprotect requests. If the memory is being put
1520 // into no-R no-W state, paint it as NoAccess, for the reasons
1521 // documented at evh__die_mem_munmap().
sewardjb4112022007-11-09 22:49:28 +00001522 if (SHOW_EVENTS >= 1)
sewardjfd35d492011-03-17 19:39:55 +00001523 VG_(printf)("evh__set_perms(%p, %lu, r=%d w=%d x=%d)\n",
sewardjb4112022007-11-09 22:49:28 +00001524 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1525 /* Hmm. What should we do here, that actually makes any sense?
1526 Let's say: if neither readable nor writable, then declare it
1527 NoAccess, else leave it alone. */
1528 if (!(rr || ww))
sewardjfd35d492011-03-17 19:39:55 +00001529 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001530 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001531 all__sanity_check("evh__set_perms-post");
1532}
1533
1534static
1535void evh__die_mem ( Addr a, SizeT len ) {
sewardjfd35d492011-03-17 19:39:55 +00001536 // Urr, libhb ignores this.
sewardjb4112022007-11-09 22:49:28 +00001537 if (SHOW_EVENTS >= 2)
1538 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
sewardjfd35d492011-03-17 19:39:55 +00001539 shadow_mem_make_NoAccess_NoFX( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001540 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001541 all__sanity_check("evh__die_mem-post");
1542}
1543
1544static
sewardjfd35d492011-03-17 19:39:55 +00001545void evh__die_mem_munmap ( Addr a, SizeT len ) {
1546 // It's important that libhb doesn't ignore this. If, as is likely,
1547 // the client is subject to address space layout randomization,
1548 // then unmapped areas may never get remapped over, even in long
1549 // runs. If we just ignore them we wind up with large resource
1550 // (VTS) leaks in libhb. So force them to NoAccess, so that all
1551 // VTS references in the affected area are dropped. Marking memory
1552 // as NoAccess is expensive, but we assume that munmap is sufficiently
1553 // rare that the space gains of doing this are worth the costs.
1554 if (SHOW_EVENTS >= 2)
1555 VG_(printf)("evh__die_mem_munmap(%p, %lu)\n", (void*)a, len );
1556 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
1557}
1558
1559static
sewardj406bac82010-03-03 23:03:40 +00001560void evh__untrack_mem ( Addr a, SizeT len ) {
sewardjfd35d492011-03-17 19:39:55 +00001561 // Libhb doesn't ignore this.
sewardj406bac82010-03-03 23:03:40 +00001562 if (SHOW_EVENTS >= 2)
1563 VG_(printf)("evh__untrack_mem(%p, %lu)\n", (void*)a, len );
1564 shadow_mem_make_Untracked( get_current_Thread(), a, len );
1565 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1566 all__sanity_check("evh__untrack_mem-post");
1567}
1568
1569static
sewardj23f12002009-07-24 08:45:08 +00001570void evh__copy_mem ( Addr src, Addr dst, SizeT len ) {
1571 if (SHOW_EVENTS >= 2)
1572 VG_(printf)("evh__copy_mem(%p, %p, %lu)\n", (void*)src, (void*)dst, len );
sewardj8eb8bab2015-07-21 14:44:28 +00001573 Thread *thr = get_current_Thread();
1574 if (LIKELY(thr->synchr_nesting == 0))
1575 shadow_mem_scopy_range( thr , src, dst, len );
sewardj23f12002009-07-24 08:45:08 +00001576 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1577 all__sanity_check("evh__copy_mem-post");
1578}
1579
1580static
sewardjb4112022007-11-09 22:49:28 +00001581void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1582{
1583 if (SHOW_EVENTS >= 1)
1584 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1585 (Int)parent, (Int)child );
1586
1587 if (parent != VG_INVALID_THREADID) {
sewardjf98e1c02008-10-25 16:22:41 +00001588 Thread* thr_p;
1589 Thread* thr_c;
1590 Thr* hbthr_p;
1591 Thr* hbthr_c;
sewardjb4112022007-11-09 22:49:28 +00001592
sewardjf98e1c02008-10-25 16:22:41 +00001593 tl_assert(HG_(is_sane_ThreadId)(parent));
1594 tl_assert(HG_(is_sane_ThreadId)(child));
sewardjb4112022007-11-09 22:49:28 +00001595 tl_assert(parent != child);
1596
1597 thr_p = map_threads_maybe_lookup( parent );
1598 thr_c = map_threads_maybe_lookup( child );
1599
1600 tl_assert(thr_p != NULL);
1601 tl_assert(thr_c == NULL);
1602
sewardjf98e1c02008-10-25 16:22:41 +00001603 hbthr_p = thr_p->hbthr;
1604 tl_assert(hbthr_p != NULL);
sewardj60626642011-03-10 15:14:37 +00001605 tl_assert( libhb_get_Thr_hgthread(hbthr_p) == thr_p );
sewardjb4112022007-11-09 22:49:28 +00001606
sewardjf98e1c02008-10-25 16:22:41 +00001607 hbthr_c = libhb_create ( hbthr_p );
1608
1609 /* Create a new thread record for the child. */
sewardjb4112022007-11-09 22:49:28 +00001610 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +00001611 thr_c = mk_Thread( hbthr_c );
sewardj60626642011-03-10 15:14:37 +00001612 tl_assert( libhb_get_Thr_hgthread(hbthr_c) == NULL );
1613 libhb_set_Thr_hgthread(hbthr_c, thr_c);
sewardjb4112022007-11-09 22:49:28 +00001614
1615 /* and bind it in the thread-map table */
1616 map_threads[child] = thr_c;
sewardjf98e1c02008-10-25 16:22:41 +00001617 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1618 thr_c->coretid = child;
sewardjb4112022007-11-09 22:49:28 +00001619
1620 /* Record where the parent is so we can later refer to this in
1621 error messages.
1622
mjw36750c02014-08-30 20:37:40 +00001623 On x86/amd64-linux, this entails a nasty glibc specific hack.
sewardjb4112022007-11-09 22:49:28 +00001624 The stack snapshot is taken immediately after the parent has
1625 returned from its sys_clone call. Unfortunately there is no
1626 unwind info for the insn following "syscall" - reading the
1627 glibc sources confirms this. So we ask for a snapshot to be
1628 taken as if RIP was 3 bytes earlier, in a place where there
1629 is unwind info. Sigh.
1630 */
1631 { Word first_ip_delta = 0;
mjw36750c02014-08-30 20:37:40 +00001632# if defined(VGP_amd64_linux) || defined(VGP_x86_linux)
sewardjb4112022007-11-09 22:49:28 +00001633 first_ip_delta = -3;
mjw4fa71082014-09-01 15:29:55 +00001634# elif defined(VGP_arm64_linux) || defined(VGP_arm_linux)
sewardj5a460f52014-08-30 19:24:05 +00001635 first_ip_delta = -1;
sewardjb4112022007-11-09 22:49:28 +00001636# endif
1637 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1638 }
sewardj8eb8bab2015-07-21 14:44:28 +00001639
1640 if (HG_(clo_ignore_thread_creation)) {
1641 HG_(thread_enter_pthread_create)(thr_c);
1642 tl_assert(thr_c->synchr_nesting == 0);
1643 HG_(thread_enter_synchr)(thr_c);
1644 /* Counterpart in _VG_USERREQ__HG_SET_MY_PTHREAD_T. */
1645 }
sewardjb4112022007-11-09 22:49:28 +00001646 }
1647
sewardjf98e1c02008-10-25 16:22:41 +00001648 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001649 all__sanity_check("evh__pre_thread_create-post");
1650}
1651
1652static
1653void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1654{
1655 Int nHeld;
1656 Thread* thr_q;
1657 if (SHOW_EVENTS >= 1)
1658 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1659 (Int)quit_tid );
1660
1661 /* quit_tid has disappeared without joining to any other thread.
1662 Therefore there is no synchronisation event associated with its
1663 exit and so we have to pretty much treat it as if it was still
1664 alive but mysteriously making no progress. That is because, if
1665 we don't know when it really exited, then we can never say there
1666 is a point in time when we're sure the thread really has
1667 finished, and so we need to consider the possibility that it
1668 lingers indefinitely and continues to interact with other
1669 threads. */
1670 /* However, it might have rendezvous'd with a thread that called
1671 pthread_join with this one as arg, prior to this point (that's
1672 how NPTL works). In which case there has already been a prior
1673 sync event. So in any case, just let the thread exit. On NPTL,
1674 all thread exits go through here. */
sewardjf98e1c02008-10-25 16:22:41 +00001675 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
sewardjb4112022007-11-09 22:49:28 +00001676 thr_q = map_threads_maybe_lookup( quit_tid );
1677 tl_assert(thr_q != NULL);
1678
1679 /* Complain if this thread holds any locks. */
1680 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1681 tl_assert(nHeld >= 0);
1682 if (nHeld > 0) {
1683 HChar buf[80];
1684 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1685 nHeld, nHeld > 1 ? "s" : "");
sewardjf98e1c02008-10-25 16:22:41 +00001686 HG_(record_error_Misc)( thr_q, buf );
sewardjb4112022007-11-09 22:49:28 +00001687 }
1688
sewardj23f12002009-07-24 08:45:08 +00001689 /* Not much to do here:
1690 - tell libhb the thread is gone
1691 - clear the map_threads entry, in order that the Valgrind core
1692 can re-use it. */
sewardj61bc2c52011-02-09 10:34:00 +00001693 /* Cleanup actions (next 5 lines) copied in evh__atfork_child; keep
1694 in sync. */
sewardj23f12002009-07-24 08:45:08 +00001695 tl_assert(thr_q->hbthr);
1696 libhb_async_exit(thr_q->hbthr);
sewardjf98e1c02008-10-25 16:22:41 +00001697 tl_assert(thr_q->coretid == quit_tid);
1698 thr_q->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +00001699 map_threads_delete( quit_tid );
1700
sewardjf98e1c02008-10-25 16:22:41 +00001701 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001702 all__sanity_check("evh__pre_thread_ll_exit-post");
1703}
1704
sewardj61bc2c52011-02-09 10:34:00 +00001705/* This is called immediately after fork, for the child only. 'tid'
1706 is the only surviving thread (as per POSIX rules on fork() in
1707 threaded programs), so we have to clean up map_threads to remove
1708 entries for any other threads. */
1709static
1710void evh__atfork_child ( ThreadId tid )
1711{
1712 UInt i;
1713 Thread* thr;
1714 /* Slot 0 should never be used. */
1715 thr = map_threads_maybe_lookup( 0/*INVALID*/ );
1716 tl_assert(!thr);
1717 /* Clean up all other slots except 'tid'. */
1718 for (i = 1; i < VG_N_THREADS; i++) {
1719 if (i == tid)
1720 continue;
1721 thr = map_threads_maybe_lookup(i);
1722 if (!thr)
1723 continue;
1724 /* Cleanup actions (next 5 lines) copied from end of
1725 evh__pre_thread_ll_exit; keep in sync. */
1726 tl_assert(thr->hbthr);
1727 libhb_async_exit(thr->hbthr);
1728 tl_assert(thr->coretid == i);
1729 thr->coretid = VG_INVALID_THREADID;
1730 map_threads_delete(i);
1731 }
1732}
1733
philipped40aff52014-06-16 20:00:14 +00001734/* generate a dependence from the hbthr_q quitter to the hbthr_s stayer. */
sewardjb4112022007-11-09 22:49:28 +00001735static
philipped40aff52014-06-16 20:00:14 +00001736void generate_quitter_stayer_dependence (Thr* hbthr_q, Thr* hbthr_s)
sewardjb4112022007-11-09 22:49:28 +00001737{
sewardjf98e1c02008-10-25 16:22:41 +00001738 SO* so;
sewardjf98e1c02008-10-25 16:22:41 +00001739 /* Allocate a temporary synchronisation object and use it to send
1740 an imaginary message from the quitter to the stayer, the purpose
1741 being to generate a dependence from the quitter to the
1742 stayer. */
1743 so = libhb_so_alloc();
1744 tl_assert(so);
sewardj23f12002009-07-24 08:45:08 +00001745 /* Send last arg of _so_send as False, since the sending thread
1746 doesn't actually exist any more, so we don't want _so_send to
1747 try taking stack snapshots of it. */
sewardjffce8152011-06-24 10:09:41 +00001748 libhb_so_send(hbthr_q, so, True/*strong_send*//*?!? wrt comment above*/);
sewardjf98e1c02008-10-25 16:22:41 +00001749 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1750 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00001751
sewardjffce8152011-06-24 10:09:41 +00001752 /* Tell libhb that the quitter has been reaped. Note that we might
1753 have to be cleverer about this, to exclude 2nd and subsequent
1754 notifications for the same hbthr_q, in the case where the app is
1755 buggy (calls pthread_join twice or more on the same thread) AND
1756 where libpthread is also buggy and doesn't return ESRCH on
1757 subsequent calls. (If libpthread isn't thusly buggy, then the
1758 wrapper for pthread_join in hg_intercepts.c will stop us getting
1759 notified here multiple times for the same joinee.) See also
1760 comments in helgrind/tests/jointwice.c. */
1761 libhb_joinedwith_done(hbthr_q);
philipped40aff52014-06-16 20:00:14 +00001762}
1763
1764
1765static
1766void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1767{
1768 Thread* thr_s;
1769 Thread* thr_q;
1770 Thr* hbthr_s;
1771 Thr* hbthr_q;
1772
1773 if (SHOW_EVENTS >= 1)
1774 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1775 (Int)stay_tid, quit_thr );
1776
1777 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
1778
1779 thr_s = map_threads_maybe_lookup( stay_tid );
1780 thr_q = quit_thr;
1781 tl_assert(thr_s != NULL);
1782 tl_assert(thr_q != NULL);
1783 tl_assert(thr_s != thr_q);
1784
1785 hbthr_s = thr_s->hbthr;
1786 hbthr_q = thr_q->hbthr;
1787 tl_assert(hbthr_s != hbthr_q);
1788 tl_assert( libhb_get_Thr_hgthread(hbthr_s) == thr_s );
1789 tl_assert( libhb_get_Thr_hgthread(hbthr_q) == thr_q );
1790
1791 generate_quitter_stayer_dependence (hbthr_q, hbthr_s);
sewardjffce8152011-06-24 10:09:41 +00001792
sewardjf98e1c02008-10-25 16:22:41 +00001793 /* evh__pre_thread_ll_exit issues an error message if the exiting
1794 thread holds any locks. No need to check here. */
sewardjb4112022007-11-09 22:49:28 +00001795
1796 /* This holds because, at least when using NPTL as the thread
1797 library, we should be notified the low level thread exit before
1798 we hear of any join event on it. The low level exit
1799 notification feeds through into evh__pre_thread_ll_exit,
1800 which should clear the map_threads entry for it. Hence we
1801 expect there to be no map_threads entry at this point. */
1802 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1803 == VG_INVALID_THREADID);
1804
sewardjf98e1c02008-10-25 16:22:41 +00001805 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001806 all__sanity_check("evh__post_thread_join-post");
1807}
1808
1809static
floriane543f302012-10-21 19:43:43 +00001810void evh__pre_mem_read ( CorePart part, ThreadId tid, const HChar* s,
sewardjb4112022007-11-09 22:49:28 +00001811 Addr a, SizeT size) {
1812 if (SHOW_EVENTS >= 2
1813 || (SHOW_EVENTS >= 1 && size != 1))
1814 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1815 (Int)tid, s, (void*)a, size );
sewardj8eb8bab2015-07-21 14:44:28 +00001816 Thread *thr = map_threads_lookup(tid);
1817 if (LIKELY(thr->synchr_nesting == 0))
1818 shadow_mem_cread_range(thr, a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001819 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001820 all__sanity_check("evh__pre_mem_read-post");
1821}
1822
1823static
1824void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
floriane543f302012-10-21 19:43:43 +00001825 const HChar* s, Addr a ) {
sewardjb4112022007-11-09 22:49:28 +00001826 Int len;
1827 if (SHOW_EVENTS >= 1)
1828 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1829 (Int)tid, s, (void*)a );
sewardj234e5582011-02-09 12:47:23 +00001830 // Don't segfault if the string starts in an obviously stupid
1831 // place. Actually we should check the whole string, not just
1832 // the start address, but that's too much trouble. At least
1833 // checking the first byte is better than nothing. See #255009.
1834 if (!VG_(am_is_valid_for_client) (a, 1, VKI_PROT_READ))
1835 return;
sewardj8eb8bab2015-07-21 14:44:28 +00001836 Thread *thr = map_threads_lookup(tid);
florian19f91bb2012-11-10 22:29:54 +00001837 len = VG_(strlen)( (HChar*) a );
sewardj8eb8bab2015-07-21 14:44:28 +00001838 if (LIKELY(thr->synchr_nesting == 0))
1839 shadow_mem_cread_range( thr, a, len+1 );
sewardjf98e1c02008-10-25 16:22:41 +00001840 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001841 all__sanity_check("evh__pre_mem_read_asciiz-post");
1842}
1843
1844static
floriane543f302012-10-21 19:43:43 +00001845void evh__pre_mem_write ( CorePart part, ThreadId tid, const HChar* s,
sewardjb4112022007-11-09 22:49:28 +00001846 Addr a, SizeT size ) {
1847 if (SHOW_EVENTS >= 1)
1848 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1849 (Int)tid, s, (void*)a, size );
sewardj8eb8bab2015-07-21 14:44:28 +00001850 Thread *thr = map_threads_lookup(tid);
1851 if (LIKELY(thr->synchr_nesting == 0))
1852 shadow_mem_cwrite_range(thr, a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001853 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001854 all__sanity_check("evh__pre_mem_write-post");
1855}
1856
1857static
1858void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1859 if (SHOW_EVENTS >= 1)
1860 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1861 (void*)a, len, (Int)is_inited );
sewardj438c4712014-09-05 20:29:10 +00001862 // We ignore the initialisation state (is_inited); that's ok.
1863 shadow_mem_make_New(get_current_Thread(), a, len);
sewardjf98e1c02008-10-25 16:22:41 +00001864 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001865 all__sanity_check("evh__pre_mem_read-post");
1866}
1867
1868static
1869void evh__die_mem_heap ( Addr a, SizeT len ) {
sewardj622fe492011-03-11 21:06:59 +00001870 Thread* thr;
sewardjb4112022007-11-09 22:49:28 +00001871 if (SHOW_EVENTS >= 1)
1872 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
sewardj622fe492011-03-11 21:06:59 +00001873 thr = get_current_Thread();
1874 tl_assert(thr);
1875 if (HG_(clo_free_is_write)) {
1876 /* Treat frees as if the memory was written immediately prior to
1877 the free. This shakes out more races, specifically, cases
1878 where memory is referenced by one thread, and freed by
1879 another, and there's no observable synchronisation event to
1880 guarantee that the reference happens before the free. */
sewardj8eb8bab2015-07-21 14:44:28 +00001881 if (LIKELY(thr->synchr_nesting == 0))
1882 shadow_mem_cwrite_range(thr, a, len);
sewardj622fe492011-03-11 21:06:59 +00001883 }
philippef54cb662015-05-10 22:19:31 +00001884 shadow_mem_make_NoAccess_AHAE( thr, a, len );
1885 /* We used to call instead
1886 shadow_mem_make_NoAccess_NoFX( thr, a, len );
1887 A non-buggy application will not access anymore
1888 the freed memory, and so marking no access is in theory useless.
1889 Not marking freed memory would avoid the overhead for applications
1890 doing mostly malloc/free, as the freed memory should then be recycled
1891 very quickly after marking.
1892 We rather mark it noaccess for the following reasons:
1893 * accessibility bits then always correctly represents the memory
1894 status (e.g. for the client request VALGRIND_HG_GET_ABITS).
1895 * the overhead is reasonable (about 5 seconds per Gb in 1000 bytes
1896 blocks, on a ppc64le, for a unrealistic workload of an application
1897 doing only malloc/free).
1898 * marking no access allows to GC the SecMap, which might improve
1899 performance and/or memory usage.
1900 * we might detect more applications bugs when memory is marked
1901 noaccess.
1902 If needed, we could support here an option --free-is-noaccess=yes|no
1903 to avoid marking freed memory as no access if some applications
1904 would need to avoid the marking noaccess overhead. */
1905
sewardjf98e1c02008-10-25 16:22:41 +00001906 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001907 all__sanity_check("evh__pre_mem_read-post");
1908}
1909
sewardj23f12002009-07-24 08:45:08 +00001910/* --- Event handlers called from generated code --- */
1911
sewardjb4112022007-11-09 22:49:28 +00001912static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001913void evh__mem_help_cread_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001914 Thread* thr = get_current_Thread_in_C_C();
1915 Thr* hbthr = thr->hbthr;
sewardj8eb8bab2015-07-21 14:44:28 +00001916 if (LIKELY(thr->synchr_nesting == 0))
1917 LIBHB_CREAD_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001918}
sewardjf98e1c02008-10-25 16:22:41 +00001919
sewardjb4112022007-11-09 22:49:28 +00001920static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001921void evh__mem_help_cread_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001922 Thread* thr = get_current_Thread_in_C_C();
1923 Thr* hbthr = thr->hbthr;
sewardj8eb8bab2015-07-21 14:44:28 +00001924 if (LIKELY(thr->synchr_nesting == 0))
1925 LIBHB_CREAD_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001926}
sewardjf98e1c02008-10-25 16:22:41 +00001927
sewardjb4112022007-11-09 22:49:28 +00001928static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001929void evh__mem_help_cread_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001930 Thread* thr = get_current_Thread_in_C_C();
1931 Thr* hbthr = thr->hbthr;
sewardj8eb8bab2015-07-21 14:44:28 +00001932 if (LIKELY(thr->synchr_nesting == 0))
1933 LIBHB_CREAD_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001934}
sewardjf98e1c02008-10-25 16:22:41 +00001935
sewardjb4112022007-11-09 22:49:28 +00001936static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001937void evh__mem_help_cread_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001938 Thread* thr = get_current_Thread_in_C_C();
1939 Thr* hbthr = thr->hbthr;
sewardj8eb8bab2015-07-21 14:44:28 +00001940 if (LIKELY(thr->synchr_nesting == 0))
1941 LIBHB_CREAD_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001942}
sewardjf98e1c02008-10-25 16:22:41 +00001943
sewardjb4112022007-11-09 22:49:28 +00001944static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001945void evh__mem_help_cread_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001946 Thread* thr = get_current_Thread_in_C_C();
1947 Thr* hbthr = thr->hbthr;
sewardj8eb8bab2015-07-21 14:44:28 +00001948 if (LIKELY(thr->synchr_nesting == 0))
1949 LIBHB_CREAD_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001950}
1951
1952static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001953void evh__mem_help_cwrite_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001954 Thread* thr = get_current_Thread_in_C_C();
1955 Thr* hbthr = thr->hbthr;
sewardj8eb8bab2015-07-21 14:44:28 +00001956 if (LIKELY(thr->synchr_nesting == 0))
1957 LIBHB_CWRITE_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001958}
sewardjf98e1c02008-10-25 16:22:41 +00001959
sewardjb4112022007-11-09 22:49:28 +00001960static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001961void evh__mem_help_cwrite_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001962 Thread* thr = get_current_Thread_in_C_C();
1963 Thr* hbthr = thr->hbthr;
sewardj8eb8bab2015-07-21 14:44:28 +00001964 if (LIKELY(thr->synchr_nesting == 0))
1965 LIBHB_CWRITE_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001966}
sewardjf98e1c02008-10-25 16:22:41 +00001967
sewardjb4112022007-11-09 22:49:28 +00001968static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001969void evh__mem_help_cwrite_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001970 Thread* thr = get_current_Thread_in_C_C();
1971 Thr* hbthr = thr->hbthr;
sewardj8eb8bab2015-07-21 14:44:28 +00001972 if (LIKELY(thr->synchr_nesting == 0))
1973 LIBHB_CWRITE_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001974}
sewardjf98e1c02008-10-25 16:22:41 +00001975
sewardjb4112022007-11-09 22:49:28 +00001976static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001977void evh__mem_help_cwrite_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001978 Thread* thr = get_current_Thread_in_C_C();
1979 Thr* hbthr = thr->hbthr;
sewardj8eb8bab2015-07-21 14:44:28 +00001980 if (LIKELY(thr->synchr_nesting == 0))
1981 LIBHB_CWRITE_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001982}
sewardjf98e1c02008-10-25 16:22:41 +00001983
sewardjb4112022007-11-09 22:49:28 +00001984static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001985void evh__mem_help_cwrite_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001986 Thread* thr = get_current_Thread_in_C_C();
1987 Thr* hbthr = thr->hbthr;
sewardj8eb8bab2015-07-21 14:44:28 +00001988 if (LIKELY(thr->synchr_nesting == 0))
1989 LIBHB_CWRITE_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001990}
1991
sewardjb4112022007-11-09 22:49:28 +00001992
sewardj9f569b72008-11-13 13:33:09 +00001993/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001994/* -------------- events to do with mutexes -------------- */
sewardj9f569b72008-11-13 13:33:09 +00001995/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001996
1997/* EXPOSITION only: by intercepting lock init events we can show the
1998 user where the lock was initialised, rather than only being able to
1999 show where it was first locked. Intercepting lock initialisations
2000 is not necessary for the basic operation of the race checker. */
2001static
2002void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
2003 void* mutex, Word mbRec )
2004{
2005 if (SHOW_EVENTS >= 1)
2006 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
2007 (Int)tid, mbRec, (void*)mutex );
2008 tl_assert(mbRec == 0 || mbRec == 1);
2009 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
2010 (Addr)mutex, tid );
sewardjf98e1c02008-10-25 16:22:41 +00002011 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002012 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
2013}
2014
2015static
sewardjc02f6c42013-10-14 13:51:25 +00002016void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex,
2017 Bool mutex_is_init )
sewardjb4112022007-11-09 22:49:28 +00002018{
2019 Thread* thr;
2020 Lock* lk;
2021 if (SHOW_EVENTS >= 1)
sewardjc02f6c42013-10-14 13:51:25 +00002022 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE"
2023 "(ctid=%d, %p, isInit=%d)\n",
2024 (Int)tid, (void*)mutex, (Int)mutex_is_init );
sewardjb4112022007-11-09 22:49:28 +00002025
2026 thr = map_threads_maybe_lookup( tid );
2027 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00002028 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00002029
2030 lk = map_locks_maybe_lookup( (Addr)mutex );
2031
sewardjc02f6c42013-10-14 13:51:25 +00002032 if (lk == NULL && mutex_is_init) {
2033 /* We're destroying a mutex which we don't have any record of,
2034 and which appears to have the value PTHREAD_MUTEX_INITIALIZER.
2035 Assume it never got used, and so we don't need to do anything
2036 more. */
2037 goto out;
2038 }
2039
sewardjb4112022007-11-09 22:49:28 +00002040 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
sewardjf98e1c02008-10-25 16:22:41 +00002041 HG_(record_error_Misc)(
2042 thr, "pthread_mutex_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00002043 }
2044
2045 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00002046 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002047 tl_assert( lk->guestaddr == (Addr)mutex );
2048 if (lk->heldBy) {
2049 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00002050 HG_(record_error_Misc)(
2051 thr, "pthread_mutex_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00002052 /* remove lock from locksets of all owning threads */
2053 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00002054 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00002055 lk->heldBy = NULL;
2056 lk->heldW = False;
2057 lk->acquired_at = NULL;
2058 }
2059 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002060 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00002061
2062 if (HG_(clo_track_lockorders))
2063 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002064 map_locks_delete( lk->guestaddr );
2065 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002066 }
2067
sewardjc02f6c42013-10-14 13:51:25 +00002068 out:
sewardjf98e1c02008-10-25 16:22:41 +00002069 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002070 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
2071}
2072
2073static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
2074 void* mutex, Word isTryLock )
2075{
2076 /* Just check the mutex is sane; nothing else to do. */
2077 // 'mutex' may be invalid - not checked by wrapper
2078 Thread* thr;
2079 Lock* lk;
2080 if (SHOW_EVENTS >= 1)
2081 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
2082 (Int)tid, (void*)mutex );
2083
2084 tl_assert(isTryLock == 0 || isTryLock == 1);
2085 thr = map_threads_maybe_lookup( tid );
2086 tl_assert(thr); /* cannot fail - Thread* must already exist */
2087
2088 lk = map_locks_maybe_lookup( (Addr)mutex );
2089
2090 if (lk && (lk->kind == LK_rdwr)) {
sewardjf98e1c02008-10-25 16:22:41 +00002091 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
2092 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002093 }
2094
2095 if ( lk
2096 && isTryLock == 0
2097 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
2098 && lk->heldBy
2099 && lk->heldW
florian6bf37262012-10-21 03:23:36 +00002100 && VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0 ) {
sewardjb4112022007-11-09 22:49:28 +00002101 /* uh, it's a non-recursive lock and we already w-hold it, and
2102 this is a real lock operation (not a speculative "tryLock"
2103 kind of thing). Duh. Deadlock coming up; but at least
2104 produce an error message. */
florian6bd9dc12012-11-23 16:17:43 +00002105 const HChar* errstr = "Attempt to re-lock a "
2106 "non-recursive lock I already hold";
2107 const HChar* auxstr = "Lock was previously acquired";
sewardj8fef6252010-07-29 05:28:02 +00002108 if (lk->acquired_at) {
2109 HG_(record_error_Misc_w_aux)( thr, errstr, auxstr, lk->acquired_at );
2110 } else {
2111 HG_(record_error_Misc)( thr, errstr );
2112 }
sewardjb4112022007-11-09 22:49:28 +00002113 }
2114}
2115
2116static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
2117{
2118 // only called if the real library call succeeded - so mutex is sane
2119 Thread* thr;
2120 if (SHOW_EVENTS >= 1)
2121 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
2122 (Int)tid, (void*)mutex );
2123
2124 thr = map_threads_maybe_lookup( tid );
2125 tl_assert(thr); /* cannot fail - Thread* must already exist */
2126
2127 evhH__post_thread_w_acquires_lock(
2128 thr,
2129 LK_mbRec, /* if not known, create new lock with this LockKind */
2130 (Addr)mutex
2131 );
2132}
2133
2134static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
2135{
2136 // 'mutex' may be invalid - not checked by wrapper
2137 Thread* thr;
2138 if (SHOW_EVENTS >= 1)
2139 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
2140 (Int)tid, (void*)mutex );
2141
2142 thr = map_threads_maybe_lookup( tid );
2143 tl_assert(thr); /* cannot fail - Thread* must already exist */
2144
2145 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
2146}
2147
2148static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
2149{
2150 // only called if the real library call succeeded - so mutex is sane
2151 Thread* thr;
2152 if (SHOW_EVENTS >= 1)
2153 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
2154 (Int)tid, (void*)mutex );
2155 thr = map_threads_maybe_lookup( tid );
2156 tl_assert(thr); /* cannot fail - Thread* must already exist */
2157
2158 // anything we should do here?
2159}
2160
2161
sewardj5a644da2009-08-11 10:35:58 +00002162/* ------------------------------------------------------- */
sewardj1f77fec2010-04-12 19:51:04 +00002163/* -------------- events to do with spinlocks ------------ */
sewardj5a644da2009-08-11 10:35:58 +00002164/* ------------------------------------------------------- */
2165
2166/* All a bit of a kludge. Pretend we're really dealing with ordinary
2167 pthread_mutex_t's instead, for the most part. */
2168
2169static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid,
2170 void* slock )
2171{
2172 Thread* thr;
2173 Lock* lk;
2174 /* In glibc's kludgey world, we're either initialising or unlocking
2175 it. Since this is the pre-routine, if it is locked, unlock it
2176 and take a dependence edge. Otherwise, do nothing. */
2177
2178 if (SHOW_EVENTS >= 1)
2179 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
2180 "(ctid=%d, slock=%p)\n",
2181 (Int)tid, (void*)slock );
2182
2183 thr = map_threads_maybe_lookup( tid );
2184 /* cannot fail - Thread* must already exist */;
2185 tl_assert( HG_(is_sane_Thread)(thr) );
2186
2187 lk = map_locks_maybe_lookup( (Addr)slock );
2188 if (lk && lk->heldBy) {
2189 /* it's held. So do the normal pre-unlock actions, as copied
2190 from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE. This stupidly
2191 duplicates the map_locks_maybe_lookup. */
2192 evhH__pre_thread_releases_lock( thr, (Addr)slock,
2193 False/*!isRDWR*/ );
2194 }
2195}
2196
2197static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid,
2198 void* slock )
2199{
2200 Lock* lk;
2201 /* More kludgery. If the lock has never been seen before, do
2202 actions as per evh__HG_PTHREAD_MUTEX_INIT_POST. Else do
2203 nothing. */
2204
2205 if (SHOW_EVENTS >= 1)
2206 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
2207 "(ctid=%d, slock=%p)\n",
2208 (Int)tid, (void*)slock );
2209
2210 lk = map_locks_maybe_lookup( (Addr)slock );
2211 if (!lk) {
2212 map_locks_lookup_or_create( LK_nonRec, (Addr)slock, tid );
2213 }
2214}
2215
2216static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid,
2217 void* slock, Word isTryLock )
2218{
2219 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, slock, isTryLock );
2220}
2221
2222static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid,
2223 void* slock )
2224{
2225 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, slock );
2226}
2227
2228static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid,
2229 void* slock )
2230{
sewardjc02f6c42013-10-14 13:51:25 +00002231 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, slock, 0/*!isInit*/ );
sewardj5a644da2009-08-11 10:35:58 +00002232}
2233
2234
sewardj9f569b72008-11-13 13:33:09 +00002235/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002236/* --------------- events to do with CVs --------------- */
sewardj9f569b72008-11-13 13:33:09 +00002237/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002238
sewardj02114542009-07-28 20:52:36 +00002239/* A mapping from CV to (the SO associated with it, plus some
2240 auxiliary data for error checking). When the CV is
sewardjf98e1c02008-10-25 16:22:41 +00002241 signalled/broadcasted upon, we do a 'send' into the SO, and when a
2242 wait on it completes, we do a 'recv' from the SO. This is believed
2243 to give the correct happens-before events arising from CV
sewardjb4112022007-11-09 22:49:28 +00002244 signallings/broadcasts.
2245*/
2246
sewardj02114542009-07-28 20:52:36 +00002247/* .so is the SO for this CV.
2248 .mx_ga is the associated mutex, when .nWaiters > 0
sewardjb4112022007-11-09 22:49:28 +00002249
sewardj02114542009-07-28 20:52:36 +00002250 POSIX says effectively that the first pthread_cond_{timed}wait call
2251 causes a dynamic binding between the CV and the mutex, and that
2252 lasts until such time as the waiter count falls to zero. Hence
2253 need to keep track of the number of waiters in order to do
2254 consistency tracking. */
2255typedef
2256 struct {
2257 SO* so; /* libhb-allocated SO */
2258 void* mx_ga; /* addr of associated mutex, if any */
2259 UWord nWaiters; /* # threads waiting on the CV */
2260 }
2261 CVInfo;
2262
2263
2264/* pthread_cond_t* -> CVInfo* */
2265static WordFM* map_cond_to_CVInfo = NULL;
2266
2267static void map_cond_to_CVInfo_INIT ( void ) {
2268 if (UNLIKELY(map_cond_to_CVInfo == NULL)) {
2269 map_cond_to_CVInfo = VG_(newFM)( HG_(zalloc),
2270 "hg.mctCI.1", HG_(free), NULL );
sewardjf98e1c02008-10-25 16:22:41 +00002271 }
2272}
2273
sewardj02114542009-07-28 20:52:36 +00002274static CVInfo* map_cond_to_CVInfo_lookup_or_alloc ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002275 UWord key, val;
sewardj02114542009-07-28 20:52:36 +00002276 map_cond_to_CVInfo_INIT();
2277 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
sewardjf98e1c02008-10-25 16:22:41 +00002278 tl_assert(key == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002279 return (CVInfo*)val;
sewardjf98e1c02008-10-25 16:22:41 +00002280 } else {
sewardj02114542009-07-28 20:52:36 +00002281 SO* so = libhb_so_alloc();
2282 CVInfo* cvi = HG_(zalloc)("hg.mctCloa.1", sizeof(CVInfo));
2283 cvi->so = so;
2284 cvi->mx_ga = 0;
2285 VG_(addToFM)( map_cond_to_CVInfo, (UWord)cond, (UWord)cvi );
2286 return cvi;
sewardjf98e1c02008-10-25 16:22:41 +00002287 }
2288}
2289
philippe8bfc2152012-07-06 23:38:24 +00002290static CVInfo* map_cond_to_CVInfo_lookup_NO_alloc ( void* cond ) {
2291 UWord key, val;
2292 map_cond_to_CVInfo_INIT();
2293 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
2294 tl_assert(key == (UWord)cond);
2295 return (CVInfo*)val;
2296 } else {
2297 return NULL;
2298 }
2299}
2300
sewardjc02f6c42013-10-14 13:51:25 +00002301static void map_cond_to_CVInfo_delete ( ThreadId tid,
2302 void* cond, Bool cond_is_init ) {
philippe8bfc2152012-07-06 23:38:24 +00002303 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +00002304 UWord keyW, valW;
philippe8bfc2152012-07-06 23:38:24 +00002305
2306 thr = map_threads_maybe_lookup( tid );
2307 tl_assert(thr); /* cannot fail - Thread* must already exist */
2308
sewardj02114542009-07-28 20:52:36 +00002309 map_cond_to_CVInfo_INIT();
philippe24111972013-03-18 22:48:22 +00002310 if (VG_(lookupFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond )) {
sewardj02114542009-07-28 20:52:36 +00002311 CVInfo* cvi = (CVInfo*)valW;
sewardjf98e1c02008-10-25 16:22:41 +00002312 tl_assert(keyW == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002313 tl_assert(cvi);
2314 tl_assert(cvi->so);
philippe8bfc2152012-07-06 23:38:24 +00002315 if (cvi->nWaiters > 0) {
sewardjc02f6c42013-10-14 13:51:25 +00002316 HG_(record_error_Misc)(
2317 thr, "pthread_cond_destroy:"
2318 " destruction of condition variable being waited upon");
philippe24111972013-03-18 22:48:22 +00002319 /* Destroying a cond var being waited upon outcome is EBUSY and
2320 variable is not destroyed. */
2321 return;
philippe8bfc2152012-07-06 23:38:24 +00002322 }
philippe24111972013-03-18 22:48:22 +00002323 if (!VG_(delFromFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond ))
2324 tl_assert(0); // cond var found above, and not here ???
sewardj02114542009-07-28 20:52:36 +00002325 libhb_so_dealloc(cvi->so);
2326 cvi->mx_ga = 0;
2327 HG_(free)(cvi);
philippe8bfc2152012-07-06 23:38:24 +00002328 } else {
sewardjc02f6c42013-10-14 13:51:25 +00002329 /* We have no record of this CV. So complain about it
2330 .. except, don't bother to complain if it has exactly the
2331 value PTHREAD_COND_INITIALIZER, since it might be that the CV
2332 was initialised like that but never used. */
2333 if (!cond_is_init) {
2334 HG_(record_error_Misc)(
2335 thr, "pthread_cond_destroy: destruction of unknown cond var");
2336 }
sewardjb4112022007-11-09 22:49:28 +00002337 }
2338}
2339
2340static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2341{
sewardjf98e1c02008-10-25 16:22:41 +00002342 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2343 cond to a SO if it is not already so bound, and 'send' on the
2344 SO. This is later used by other thread(s) which successfully
2345 exit from a pthread_cond_wait on the same cv; then they 'recv'
2346 from the SO, thereby acquiring a dependency on this signalling
2347 event. */
sewardjb4112022007-11-09 22:49:28 +00002348 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002349 CVInfo* cvi;
2350 //Lock* lk;
sewardjb4112022007-11-09 22:49:28 +00002351
2352 if (SHOW_EVENTS >= 1)
2353 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2354 (Int)tid, (void*)cond );
2355
sewardjb4112022007-11-09 22:49:28 +00002356 thr = map_threads_maybe_lookup( tid );
2357 tl_assert(thr); /* cannot fail - Thread* must already exist */
2358
sewardj02114542009-07-28 20:52:36 +00002359 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2360 tl_assert(cvi);
2361 tl_assert(cvi->so);
2362
sewardjb4112022007-11-09 22:49:28 +00002363 // error-if: mutex is bogus
2364 // error-if: mutex is not locked
sewardj02114542009-07-28 20:52:36 +00002365 // Hmm. POSIX doesn't actually say that it's an error to call
2366 // pthread_cond_signal with the associated mutex being unlocked.
2367 // Although it does say that it should be "if consistent scheduling
sewardjffce8152011-06-24 10:09:41 +00002368 // is desired." For that reason, print "dubious" if the lock isn't
2369 // held by any thread. Skip the "dubious" if it is held by some
2370 // other thread; that sounds straight-out wrong.
sewardj02114542009-07-28 20:52:36 +00002371 //
sewardjffce8152011-06-24 10:09:41 +00002372 // Anybody who writes code that signals on a CV without holding
2373 // the associated MX needs to be shipped off to a lunatic asylum
2374 // ASAP, even though POSIX doesn't actually declare such behaviour
2375 // illegal -- it makes code extremely difficult to understand/
2376 // reason about. In particular it puts the signalling thread in
2377 // a situation where it is racing against the released waiter
2378 // as soon as the signalling is done, and so there needs to be
2379 // some auxiliary synchronisation mechanism in the program that
2380 // makes this safe -- or the race(s) need to be harmless, or
2381 // probably nonexistent.
2382 //
2383 if (1) {
2384 Lock* lk = NULL;
2385 if (cvi->mx_ga != 0) {
2386 lk = map_locks_maybe_lookup( (Addr)cvi->mx_ga );
2387 }
2388 /* note: lk could be NULL. Be careful. */
2389 if (lk) {
2390 if (lk->kind == LK_rdwr) {
2391 HG_(record_error_Misc)(thr,
2392 "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2393 }
2394 if (lk->heldBy == NULL) {
2395 HG_(record_error_Misc)(thr,
2396 "pthread_cond_{signal,broadcast}: dubious: "
2397 "associated lock is not held by any thread");
2398 }
florian6bf37262012-10-21 03:23:36 +00002399 if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (UWord)thr)) {
sewardjffce8152011-06-24 10:09:41 +00002400 HG_(record_error_Misc)(thr,
2401 "pthread_cond_{signal,broadcast}: "
2402 "associated lock is not held by calling thread");
2403 }
2404 } else {
2405 /* Couldn't even find the damn thing. */
2406 // But actually .. that's not necessarily an error. We don't
2407 // know the (CV,MX) binding until a pthread_cond_wait or bcast
2408 // shows us what it is, and if that may not have happened yet.
2409 // So just keep quiet in this circumstance.
2410 //HG_(record_error_Misc)( thr,
2411 // "pthread_cond_{signal,broadcast}: "
2412 // "no or invalid mutex associated with cond");
2413 }
2414 }
sewardjb4112022007-11-09 22:49:28 +00002415
sewardj02114542009-07-28 20:52:36 +00002416 libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
sewardjb4112022007-11-09 22:49:28 +00002417}
2418
2419/* returns True if it reckons 'mutex' is valid and held by this
2420 thread, else False */
2421static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2422 void* cond, void* mutex )
2423{
2424 Thread* thr;
2425 Lock* lk;
2426 Bool lk_valid = True;
sewardj02114542009-07-28 20:52:36 +00002427 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002428
2429 if (SHOW_EVENTS >= 1)
2430 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2431 "(ctid=%d, cond=%p, mutex=%p)\n",
2432 (Int)tid, (void*)cond, (void*)mutex );
2433
sewardjb4112022007-11-09 22:49:28 +00002434 thr = map_threads_maybe_lookup( tid );
2435 tl_assert(thr); /* cannot fail - Thread* must already exist */
2436
2437 lk = map_locks_maybe_lookup( (Addr)mutex );
2438
2439 /* Check for stupid mutex arguments. There are various ways to be
2440 a bozo. Only complain once, though, even if more than one thing
2441 is wrong. */
2442 if (lk == NULL) {
2443 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002444 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002445 thr,
2446 "pthread_cond_{timed}wait called with invalid mutex" );
2447 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002448 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002449 if (lk->kind == LK_rdwr) {
2450 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002451 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002452 thr, "pthread_cond_{timed}wait called with mutex "
2453 "of type pthread_rwlock_t*" );
2454 } else
2455 if (lk->heldBy == NULL) {
2456 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002457 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002458 thr, "pthread_cond_{timed}wait called with un-held mutex");
2459 } else
2460 if (lk->heldBy != NULL
florian6bf37262012-10-21 03:23:36 +00002461 && VG_(elemBag)( lk->heldBy, (UWord)thr ) == 0) {
sewardjb4112022007-11-09 22:49:28 +00002462 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002463 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002464 thr, "pthread_cond_{timed}wait called with mutex "
2465 "held by a different thread" );
2466 }
2467 }
2468
2469 // error-if: cond is also associated with a different mutex
sewardj02114542009-07-28 20:52:36 +00002470 cvi = map_cond_to_CVInfo_lookup_or_alloc(cond);
2471 tl_assert(cvi);
2472 tl_assert(cvi->so);
2473 if (cvi->nWaiters == 0) {
2474 /* form initial (CV,MX) binding */
2475 cvi->mx_ga = mutex;
2476 }
2477 else /* check existing (CV,MX) binding */
2478 if (cvi->mx_ga != mutex) {
2479 HG_(record_error_Misc)(
2480 thr, "pthread_cond_{timed}wait: cond is associated "
2481 "with a different mutex");
2482 }
2483 cvi->nWaiters++;
sewardjb4112022007-11-09 22:49:28 +00002484
2485 return lk_valid;
2486}
2487
2488static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
sewardjff427c92013-10-14 12:13:52 +00002489 void* cond, void* mutex,
2490 Bool timeout)
sewardjb4112022007-11-09 22:49:28 +00002491{
sewardjf98e1c02008-10-25 16:22:41 +00002492 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2493 the SO for this cond, and 'recv' from it so as to acquire a
2494 dependency edge back to the signaller/broadcaster. */
2495 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002496 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002497
2498 if (SHOW_EVENTS >= 1)
2499 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
sewardjff427c92013-10-14 12:13:52 +00002500 "(ctid=%d, cond=%p, mutex=%p)\n, timeout=%d",
2501 (Int)tid, (void*)cond, (void*)mutex, (Int)timeout );
sewardjb4112022007-11-09 22:49:28 +00002502
sewardjb4112022007-11-09 22:49:28 +00002503 thr = map_threads_maybe_lookup( tid );
2504 tl_assert(thr); /* cannot fail - Thread* must already exist */
2505
2506 // error-if: cond is also associated with a different mutex
2507
philippe8bfc2152012-07-06 23:38:24 +00002508 cvi = map_cond_to_CVInfo_lookup_NO_alloc( cond );
2509 if (!cvi) {
2510 /* This could be either a bug in helgrind or the guest application
2511 that did an error (e.g. cond var was destroyed by another thread.
2512 Let's assume helgrind is perfect ...
2513 Note that this is similar to drd behaviour. */
2514 HG_(record_error_Misc)(thr, "condition variable has been destroyed while"
2515 " being waited upon");
2516 return;
2517 }
2518
sewardj02114542009-07-28 20:52:36 +00002519 tl_assert(cvi);
2520 tl_assert(cvi->so);
2521 tl_assert(cvi->nWaiters > 0);
sewardjb4112022007-11-09 22:49:28 +00002522
sewardjff427c92013-10-14 12:13:52 +00002523 if (!timeout && !libhb_so_everSent(cvi->so)) {
sewardjf98e1c02008-10-25 16:22:41 +00002524 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2525 it? If this happened it would surely be a bug in the threads
2526 library. Or one of those fabled "spurious wakeups". */
2527 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
sewardjffce8152011-06-24 10:09:41 +00002528 "succeeded"
sewardjf98e1c02008-10-25 16:22:41 +00002529 " without prior pthread_cond_post");
sewardjb4112022007-11-09 22:49:28 +00002530 }
sewardjf98e1c02008-10-25 16:22:41 +00002531
2532 /* anyway, acquire a dependency on it. */
sewardj02114542009-07-28 20:52:36 +00002533 libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2534
2535 cvi->nWaiters--;
sewardjf98e1c02008-10-25 16:22:41 +00002536}
2537
philippe19dfe032013-03-24 20:10:23 +00002538static void evh__HG_PTHREAD_COND_INIT_POST ( ThreadId tid,
2539 void* cond, void* cond_attr )
2540{
2541 CVInfo* cvi;
2542
2543 if (SHOW_EVENTS >= 1)
2544 VG_(printf)("evh__HG_PTHREAD_COND_INIT_POST"
2545 "(ctid=%d, cond=%p, cond_attr=%p)\n",
2546 (Int)tid, (void*)cond, (void*) cond_attr );
2547
2548 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2549 tl_assert (cvi);
2550 tl_assert (cvi->so);
2551}
2552
2553
sewardjf98e1c02008-10-25 16:22:41 +00002554static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
sewardjc02f6c42013-10-14 13:51:25 +00002555 void* cond, Bool cond_is_init )
sewardjf98e1c02008-10-25 16:22:41 +00002556{
2557 /* Deal with destroy events. The only purpose is to free storage
2558 associated with the CV, so as to avoid any possible resource
2559 leaks. */
2560 if (SHOW_EVENTS >= 1)
2561 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
sewardjc02f6c42013-10-14 13:51:25 +00002562 "(ctid=%d, cond=%p, cond_is_init=%d)\n",
2563 (Int)tid, (void*)cond, (Int)cond_is_init );
sewardjf98e1c02008-10-25 16:22:41 +00002564
sewardjc02f6c42013-10-14 13:51:25 +00002565 map_cond_to_CVInfo_delete( tid, cond, cond_is_init );
sewardjb4112022007-11-09 22:49:28 +00002566}
2567
2568
sewardj9f569b72008-11-13 13:33:09 +00002569/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002570/* -------------- events to do with rwlocks -------------- */
sewardj9f569b72008-11-13 13:33:09 +00002571/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002572
2573/* EXPOSITION only */
2574static
2575void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2576{
2577 if (SHOW_EVENTS >= 1)
2578 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2579 (Int)tid, (void*)rwl );
2580 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
sewardjf98e1c02008-10-25 16:22:41 +00002581 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002582 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2583}
2584
2585static
2586void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2587{
2588 Thread* thr;
2589 Lock* lk;
2590 if (SHOW_EVENTS >= 1)
2591 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2592 (Int)tid, (void*)rwl );
2593
2594 thr = map_threads_maybe_lookup( tid );
2595 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00002596 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00002597
2598 lk = map_locks_maybe_lookup( (Addr)rwl );
2599
2600 if (lk == NULL || lk->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00002601 HG_(record_error_Misc)(
2602 thr, "pthread_rwlock_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00002603 }
2604
2605 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00002606 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002607 tl_assert( lk->guestaddr == (Addr)rwl );
2608 if (lk->heldBy) {
2609 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00002610 HG_(record_error_Misc)(
2611 thr, "pthread_rwlock_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00002612 /* remove lock from locksets of all owning threads */
2613 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00002614 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00002615 lk->heldBy = NULL;
2616 lk->heldW = False;
sewardj1c7e8332007-11-29 13:04:03 +00002617 lk->acquired_at = NULL;
sewardjb4112022007-11-09 22:49:28 +00002618 }
2619 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002620 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00002621
2622 if (HG_(clo_track_lockorders))
2623 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002624 map_locks_delete( lk->guestaddr );
2625 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002626 }
2627
sewardjf98e1c02008-10-25 16:22:41 +00002628 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002629 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2630}
2631
2632static
sewardj789c3c52008-02-25 12:10:07 +00002633void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2634 void* rwl,
2635 Word isW, Word isTryLock )
sewardjb4112022007-11-09 22:49:28 +00002636{
2637 /* Just check the rwl is sane; nothing else to do. */
2638 // 'rwl' may be invalid - not checked by wrapper
2639 Thread* thr;
2640 Lock* lk;
2641 if (SHOW_EVENTS >= 1)
2642 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2643 (Int)tid, (Int)isW, (void*)rwl );
2644
2645 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
sewardj789c3c52008-02-25 12:10:07 +00002646 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
sewardjb4112022007-11-09 22:49:28 +00002647 thr = map_threads_maybe_lookup( tid );
2648 tl_assert(thr); /* cannot fail - Thread* must already exist */
2649
2650 lk = map_locks_maybe_lookup( (Addr)rwl );
2651 if ( lk
2652 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2653 /* Wrong kind of lock. Duh. */
sewardjf98e1c02008-10-25 16:22:41 +00002654 HG_(record_error_Misc)(
2655 thr, "pthread_rwlock_{rd,rw}lock with a "
2656 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002657 }
2658}
2659
2660static
2661void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2662{
2663 // only called if the real library call succeeded - so mutex is sane
2664 Thread* thr;
2665 if (SHOW_EVENTS >= 1)
2666 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2667 (Int)tid, (Int)isW, (void*)rwl );
2668
2669 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2670 thr = map_threads_maybe_lookup( tid );
2671 tl_assert(thr); /* cannot fail - Thread* must already exist */
2672
2673 (isW ? evhH__post_thread_w_acquires_lock
2674 : evhH__post_thread_r_acquires_lock)(
2675 thr,
2676 LK_rdwr, /* if not known, create new lock with this LockKind */
2677 (Addr)rwl
2678 );
2679}
2680
2681static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2682{
2683 // 'rwl' may be invalid - not checked by wrapper
2684 Thread* thr;
2685 if (SHOW_EVENTS >= 1)
2686 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2687 (Int)tid, (void*)rwl );
2688
2689 thr = map_threads_maybe_lookup( tid );
2690 tl_assert(thr); /* cannot fail - Thread* must already exist */
2691
2692 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2693}
2694
2695static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2696{
2697 // only called if the real library call succeeded - so mutex is sane
2698 Thread* thr;
2699 if (SHOW_EVENTS >= 1)
2700 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2701 (Int)tid, (void*)rwl );
2702 thr = map_threads_maybe_lookup( tid );
2703 tl_assert(thr); /* cannot fail - Thread* must already exist */
2704
2705 // anything we should do here?
2706}
2707
2708
sewardj9f569b72008-11-13 13:33:09 +00002709/* ---------------------------------------------------------- */
2710/* -------------- events to do with semaphores -------------- */
2711/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002712
sewardj11e352f2007-11-30 11:11:02 +00002713/* This is similar to but not identical to the handling for condition
sewardjb4112022007-11-09 22:49:28 +00002714 variables. */
2715
sewardjf98e1c02008-10-25 16:22:41 +00002716/* For each semaphore, we maintain a stack of SOs. When a 'post'
2717 operation is done on a semaphore (unlocking, essentially), a new SO
2718 is created for the posting thread, the posting thread does a strong
2719 send to it (which merely installs the posting thread's VC in the
2720 SO), and the SO is pushed on the semaphore's stack.
sewardjb4112022007-11-09 22:49:28 +00002721
2722 Later, when a (probably different) thread completes 'wait' on the
sewardjf98e1c02008-10-25 16:22:41 +00002723 semaphore, we pop a SO off the semaphore's stack (which should be
2724 nonempty), and do a strong recv from it. This mechanism creates
sewardjb4112022007-11-09 22:49:28 +00002725 dependencies between posters and waiters of the semaphore.
2726
sewardjf98e1c02008-10-25 16:22:41 +00002727 It may not be necessary to use a stack - perhaps a bag of SOs would
2728 do. But we do need to keep track of how many unused-up posts have
2729 happened for the semaphore.
sewardjb4112022007-11-09 22:49:28 +00002730
sewardjf98e1c02008-10-25 16:22:41 +00002731 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
sewardjb4112022007-11-09 22:49:28 +00002732 twice on S. T3 cannot complete its waits without both T1 and T2
2733 posting. The above mechanism will ensure that T3 acquires
2734 dependencies on both T1 and T2.
sewardj11e352f2007-11-30 11:11:02 +00002735
sewardjf98e1c02008-10-25 16:22:41 +00002736 When a semaphore is initialised with value N, we do as if we'd
2737 posted N times on the semaphore: basically create N SOs and do a
2738 strong send to all of then. This allows up to N waits on the
2739 semaphore to acquire a dependency on the initialisation point,
2740 which AFAICS is the correct behaviour.
sewardj11e352f2007-11-30 11:11:02 +00002741
2742 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2743 about. We should.
sewardjb4112022007-11-09 22:49:28 +00002744*/
2745
sewardjf98e1c02008-10-25 16:22:41 +00002746/* sem_t* -> XArray* SO* */
2747static WordFM* map_sem_to_SO_stack = NULL;
sewardjb4112022007-11-09 22:49:28 +00002748
sewardjf98e1c02008-10-25 16:22:41 +00002749static void map_sem_to_SO_stack_INIT ( void ) {
2750 if (map_sem_to_SO_stack == NULL) {
2751 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2752 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00002753 }
2754}
2755
sewardjf98e1c02008-10-25 16:22:41 +00002756static void push_SO_for_sem ( void* sem, SO* so ) {
2757 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002758 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002759 tl_assert(so);
2760 map_sem_to_SO_stack_INIT();
2761 if (VG_(lookupFM)( map_sem_to_SO_stack,
2762 &keyW, (UWord*)&xa, (UWord)sem )) {
2763 tl_assert(keyW == (UWord)sem);
sewardjb4112022007-11-09 22:49:28 +00002764 tl_assert(xa);
sewardjf98e1c02008-10-25 16:22:41 +00002765 VG_(addToXA)( xa, &so );
sewardjb4112022007-11-09 22:49:28 +00002766 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002767 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2768 VG_(addToXA)( xa, &so );
florian6bf37262012-10-21 03:23:36 +00002769 VG_(addToFM)( map_sem_to_SO_stack, (UWord)sem, (UWord)xa );
sewardjb4112022007-11-09 22:49:28 +00002770 }
2771}
2772
sewardjf98e1c02008-10-25 16:22:41 +00002773static SO* mb_pop_SO_for_sem ( void* sem ) {
2774 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002775 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002776 SO* so;
2777 map_sem_to_SO_stack_INIT();
2778 if (VG_(lookupFM)( map_sem_to_SO_stack,
2779 &keyW, (UWord*)&xa, (UWord)sem )) {
sewardjb4112022007-11-09 22:49:28 +00002780 /* xa is the stack for this semaphore. */
sewardjf98e1c02008-10-25 16:22:41 +00002781 Word sz;
2782 tl_assert(keyW == (UWord)sem);
2783 sz = VG_(sizeXA)( xa );
sewardjb4112022007-11-09 22:49:28 +00002784 tl_assert(sz >= 0);
2785 if (sz == 0)
2786 return NULL; /* odd, the stack is empty */
sewardjf98e1c02008-10-25 16:22:41 +00002787 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2788 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002789 VG_(dropTailXA)( xa, 1 );
sewardjf98e1c02008-10-25 16:22:41 +00002790 return so;
sewardjb4112022007-11-09 22:49:28 +00002791 } else {
2792 /* hmm, that's odd. No stack for this semaphore. */
2793 return NULL;
2794 }
2795}
2796
sewardj11e352f2007-11-30 11:11:02 +00002797static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002798{
sewardjf98e1c02008-10-25 16:22:41 +00002799 UWord keyW, valW;
2800 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002801
sewardjb4112022007-11-09 22:49:28 +00002802 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002803 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002804 (Int)tid, (void*)sem );
2805
sewardjf98e1c02008-10-25 16:22:41 +00002806 map_sem_to_SO_stack_INIT();
sewardjb4112022007-11-09 22:49:28 +00002807
sewardjf98e1c02008-10-25 16:22:41 +00002808 /* Empty out the semaphore's SO stack. This way of doing it is
2809 stupid, but at least it's easy. */
2810 while (1) {
2811 so = mb_pop_SO_for_sem( sem );
2812 if (!so) break;
2813 libhb_so_dealloc(so);
2814 }
2815
2816 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2817 XArray* xa = (XArray*)valW;
2818 tl_assert(keyW == (UWord)sem);
2819 tl_assert(xa);
2820 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2821 VG_(deleteXA)(xa);
2822 }
sewardjb4112022007-11-09 22:49:28 +00002823}
2824
sewardj11e352f2007-11-30 11:11:02 +00002825static
2826void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2827{
sewardjf98e1c02008-10-25 16:22:41 +00002828 SO* so;
2829 Thread* thr;
sewardj11e352f2007-11-30 11:11:02 +00002830
2831 if (SHOW_EVENTS >= 1)
2832 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2833 (Int)tid, (void*)sem, value );
2834
sewardjf98e1c02008-10-25 16:22:41 +00002835 thr = map_threads_maybe_lookup( tid );
2836 tl_assert(thr); /* cannot fail - Thread* must already exist */
sewardj11e352f2007-11-30 11:11:02 +00002837
sewardjf98e1c02008-10-25 16:22:41 +00002838 /* Empty out the semaphore's SO stack. This way of doing it is
2839 stupid, but at least it's easy. */
2840 while (1) {
2841 so = mb_pop_SO_for_sem( sem );
2842 if (!so) break;
2843 libhb_so_dealloc(so);
2844 }
sewardj11e352f2007-11-30 11:11:02 +00002845
sewardjf98e1c02008-10-25 16:22:41 +00002846 /* If we don't do this check, the following while loop runs us out
2847 of memory for stupid initial values of 'value'. */
2848 if (value > 10000) {
2849 HG_(record_error_Misc)(
2850 thr, "sem_init: initial value exceeds 10000; using 10000" );
2851 value = 10000;
2852 }
sewardj11e352f2007-11-30 11:11:02 +00002853
sewardjf98e1c02008-10-25 16:22:41 +00002854 /* Now create 'valid' new SOs for the thread, do a strong send to
2855 each of them, and push them all on the stack. */
2856 for (; value > 0; value--) {
2857 Thr* hbthr = thr->hbthr;
2858 tl_assert(hbthr);
sewardj11e352f2007-11-30 11:11:02 +00002859
sewardjf98e1c02008-10-25 16:22:41 +00002860 so = libhb_so_alloc();
2861 libhb_so_send( hbthr, so, True/*strong send*/ );
2862 push_SO_for_sem( sem, so );
sewardj11e352f2007-11-30 11:11:02 +00002863 }
2864}
2865
2866static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002867{
sewardjf98e1c02008-10-25 16:22:41 +00002868 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2869 it (iow, write our VC into it, then tick ours), and push the SO
2870 on on a stack of SOs associated with 'sem'. This is later used
2871 by other thread(s) which successfully exit from a sem_wait on
2872 the same sem; by doing a strong recv from SOs popped of the
2873 stack, they acquire dependencies on the posting thread
2874 segment(s). */
sewardjb4112022007-11-09 22:49:28 +00002875
sewardjf98e1c02008-10-25 16:22:41 +00002876 Thread* thr;
2877 SO* so;
2878 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002879
2880 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002881 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002882 (Int)tid, (void*)sem );
2883
2884 thr = map_threads_maybe_lookup( tid );
2885 tl_assert(thr); /* cannot fail - Thread* must already exist */
2886
2887 // error-if: sem is bogus
2888
sewardjf98e1c02008-10-25 16:22:41 +00002889 hbthr = thr->hbthr;
2890 tl_assert(hbthr);
sewardjb4112022007-11-09 22:49:28 +00002891
sewardjf98e1c02008-10-25 16:22:41 +00002892 so = libhb_so_alloc();
2893 libhb_so_send( hbthr, so, True/*strong send*/ );
2894 push_SO_for_sem( sem, so );
sewardjb4112022007-11-09 22:49:28 +00002895}
2896
sewardj11e352f2007-11-30 11:11:02 +00002897static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002898{
sewardjf98e1c02008-10-25 16:22:41 +00002899 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2900 the 'sem' from this semaphore's SO-stack, and do a strong recv
2901 from it. This creates a dependency back to one of the post-ers
2902 for the semaphore. */
sewardjb4112022007-11-09 22:49:28 +00002903
sewardjf98e1c02008-10-25 16:22:41 +00002904 Thread* thr;
2905 SO* so;
2906 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002907
2908 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002909 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002910 (Int)tid, (void*)sem );
2911
2912 thr = map_threads_maybe_lookup( tid );
2913 tl_assert(thr); /* cannot fail - Thread* must already exist */
2914
2915 // error-if: sem is bogus
2916
sewardjf98e1c02008-10-25 16:22:41 +00002917 so = mb_pop_SO_for_sem( sem );
sewardjb4112022007-11-09 22:49:28 +00002918
sewardjf98e1c02008-10-25 16:22:41 +00002919 if (so) {
2920 hbthr = thr->hbthr;
2921 tl_assert(hbthr);
2922
2923 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2924 libhb_so_dealloc(so);
2925 } else {
2926 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2927 If this happened it would surely be a bug in the threads
2928 library. */
2929 HG_(record_error_Misc)(
2930 thr, "Bug in libpthread: sem_wait succeeded on"
2931 " semaphore without prior sem_post");
sewardjb4112022007-11-09 22:49:28 +00002932 }
2933}
2934
2935
sewardj9f569b72008-11-13 13:33:09 +00002936/* -------------------------------------------------------- */
2937/* -------------- events to do with barriers -------------- */
2938/* -------------------------------------------------------- */
2939
2940typedef
2941 struct {
2942 Bool initted; /* has it yet been initted by guest? */
sewardj406bac82010-03-03 23:03:40 +00002943 Bool resizable; /* is resizing allowed? */
sewardj9f569b72008-11-13 13:33:09 +00002944 UWord size; /* declared size */
2945 XArray* waiting; /* XA of Thread*. # present is 0 .. .size */
2946 }
2947 Bar;
2948
2949static Bar* new_Bar ( void ) {
2950 Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
sewardj9f569b72008-11-13 13:33:09 +00002951 /* all fields are zero */
2952 tl_assert(bar->initted == False);
2953 return bar;
2954}
2955
2956static void delete_Bar ( Bar* bar ) {
2957 tl_assert(bar);
2958 if (bar->waiting)
2959 VG_(deleteXA)(bar->waiting);
2960 HG_(free)(bar);
2961}
2962
2963/* A mapping which stores auxiliary data for barriers. */
2964
2965/* pthread_barrier_t* -> Bar* */
2966static WordFM* map_barrier_to_Bar = NULL;
2967
2968static void map_barrier_to_Bar_INIT ( void ) {
2969 if (UNLIKELY(map_barrier_to_Bar == NULL)) {
2970 map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
2971 "hg.mbtBI.1", HG_(free), NULL );
sewardj9f569b72008-11-13 13:33:09 +00002972 }
2973}
2974
2975static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
2976 UWord key, val;
2977 map_barrier_to_Bar_INIT();
2978 if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
2979 tl_assert(key == (UWord)barrier);
2980 return (Bar*)val;
2981 } else {
2982 Bar* bar = new_Bar();
2983 VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
2984 return bar;
2985 }
2986}
2987
2988static void map_barrier_to_Bar_delete ( void* barrier ) {
2989 UWord keyW, valW;
2990 map_barrier_to_Bar_INIT();
2991 if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
2992 Bar* bar = (Bar*)valW;
2993 tl_assert(keyW == (UWord)barrier);
2994 delete_Bar(bar);
2995 }
2996}
2997
2998
2999static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
3000 void* barrier,
sewardj406bac82010-03-03 23:03:40 +00003001 UWord count,
3002 UWord resizable )
sewardj9f569b72008-11-13 13:33:09 +00003003{
3004 Thread* thr;
3005 Bar* bar;
3006
3007 if (SHOW_EVENTS >= 1)
3008 VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
sewardj406bac82010-03-03 23:03:40 +00003009 "(tid=%d, barrier=%p, count=%lu, resizable=%lu)\n",
3010 (Int)tid, (void*)barrier, count, resizable );
sewardj9f569b72008-11-13 13:33:09 +00003011
3012 thr = map_threads_maybe_lookup( tid );
3013 tl_assert(thr); /* cannot fail - Thread* must already exist */
3014
3015 if (count == 0) {
3016 HG_(record_error_Misc)(
3017 thr, "pthread_barrier_init: 'count' argument is zero"
3018 );
3019 }
3020
sewardj406bac82010-03-03 23:03:40 +00003021 if (resizable != 0 && resizable != 1) {
3022 HG_(record_error_Misc)(
3023 thr, "pthread_barrier_init: invalid 'resizable' argument"
3024 );
3025 }
3026
sewardj9f569b72008-11-13 13:33:09 +00003027 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3028 tl_assert(bar);
3029
3030 if (bar->initted) {
3031 HG_(record_error_Misc)(
3032 thr, "pthread_barrier_init: barrier is already initialised"
3033 );
3034 }
3035
3036 if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
3037 tl_assert(bar->initted);
3038 HG_(record_error_Misc)(
sewardj553655c2008-11-14 19:41:19 +00003039 thr, "pthread_barrier_init: threads are waiting at barrier"
sewardj9f569b72008-11-13 13:33:09 +00003040 );
3041 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
3042 }
3043 if (!bar->waiting) {
3044 bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
3045 sizeof(Thread*) );
3046 }
3047
sewardj9f569b72008-11-13 13:33:09 +00003048 tl_assert(VG_(sizeXA)(bar->waiting) == 0);
sewardj406bac82010-03-03 23:03:40 +00003049 bar->initted = True;
3050 bar->resizable = resizable == 1 ? True : False;
3051 bar->size = count;
sewardj9f569b72008-11-13 13:33:09 +00003052}
3053
3054
3055static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
3056 void* barrier )
3057{
sewardj553655c2008-11-14 19:41:19 +00003058 Thread* thr;
3059 Bar* bar;
3060
sewardj9f569b72008-11-13 13:33:09 +00003061 /* Deal with destroy events. The only purpose is to free storage
3062 associated with the barrier, so as to avoid any possible
3063 resource leaks. */
3064 if (SHOW_EVENTS >= 1)
3065 VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
3066 "(tid=%d, barrier=%p)\n",
3067 (Int)tid, (void*)barrier );
3068
sewardj553655c2008-11-14 19:41:19 +00003069 thr = map_threads_maybe_lookup( tid );
3070 tl_assert(thr); /* cannot fail - Thread* must already exist */
3071
3072 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3073 tl_assert(bar);
3074
3075 if (!bar->initted) {
3076 HG_(record_error_Misc)(
3077 thr, "pthread_barrier_destroy: barrier was never initialised"
3078 );
3079 }
3080
3081 if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
3082 HG_(record_error_Misc)(
3083 thr, "pthread_barrier_destroy: threads are waiting at barrier"
3084 );
3085 }
3086
sewardj9f569b72008-11-13 13:33:09 +00003087 /* Maybe we shouldn't do this; just let it persist, so that when it
3088 is reinitialised we don't need to do any dynamic memory
3089 allocation? The downside is a potentially unlimited space leak,
3090 if the client creates (in turn) a large number of barriers all
3091 at different locations. Note that if we do later move to the
3092 don't-delete-it scheme, we need to mark the barrier as
3093 uninitialised again since otherwise a later _init call will
sewardj553655c2008-11-14 19:41:19 +00003094 elicit a duplicate-init error. */
sewardj9f569b72008-11-13 13:33:09 +00003095 map_barrier_to_Bar_delete( barrier );
3096}
3097
3098
sewardj406bac82010-03-03 23:03:40 +00003099/* All the threads have arrived. Now do the Interesting Bit. Get a
3100 new synchronisation object and do a weak send to it from all the
3101 participating threads. This makes its vector clocks be the join of
3102 all the individual threads' vector clocks. Then do a strong
3103 receive from it back to all threads, so that their VCs are a copy
3104 of it (hence are all equal to the join of their original VCs.) */
3105static void do_barrier_cross_sync_and_empty ( Bar* bar )
3106{
3107 /* XXX check bar->waiting has no duplicates */
3108 UWord i;
3109 SO* so = libhb_so_alloc();
3110
3111 tl_assert(bar->waiting);
3112 tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
3113
3114 /* compute the join ... */
3115 for (i = 0; i < bar->size; i++) {
3116 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
3117 Thr* hbthr = t->hbthr;
3118 libhb_so_send( hbthr, so, False/*weak send*/ );
3119 }
3120 /* ... and distribute to all threads */
3121 for (i = 0; i < bar->size; i++) {
3122 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
3123 Thr* hbthr = t->hbthr;
3124 libhb_so_recv( hbthr, so, True/*strong recv*/ );
3125 }
3126
3127 /* finally, we must empty out the waiting vector */
3128 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
3129
3130 /* and we don't need this any more. Perhaps a stack-allocated
3131 SO would be better? */
3132 libhb_so_dealloc(so);
3133}
3134
3135
sewardj9f569b72008-11-13 13:33:09 +00003136static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
3137 void* barrier )
3138{
sewardj1c466b72008-11-19 11:52:14 +00003139 /* This function gets called after a client thread calls
3140 pthread_barrier_wait but before it arrives at the real
3141 pthread_barrier_wait.
3142
3143 Why is the following correct? It's a bit subtle.
3144
3145 If this is not the last thread arriving at the barrier, we simply
3146 note its presence and return. Because valgrind (at least as of
3147 Nov 08) is single threaded, we are guaranteed safe from any race
3148 conditions when in this function -- no other client threads are
3149 running.
3150
3151 If this is the last thread, then we are again the only running
3152 thread. All the other threads will have either arrived at the
3153 real pthread_barrier_wait or are on their way to it, but in any
3154 case are guaranteed not to be able to move past it, because this
3155 thread is currently in this function and so has not yet arrived
3156 at the real pthread_barrier_wait. That means that:
3157
3158 1. While we are in this function, none of the other threads
3159 waiting at the barrier can move past it.
3160
3161 2. When this function returns (and simulated execution resumes),
3162 this thread and all other waiting threads will be able to move
3163 past the real barrier.
3164
3165 Because of this, it is now safe to update the vector clocks of
3166 all threads, to represent the fact that they all arrived at the
3167 barrier and have all moved on. There is no danger of any
3168 complications to do with some threads leaving the barrier and
3169 racing back round to the front, whilst others are still leaving
3170 (which is the primary source of complication in correct handling/
3171 implementation of barriers). That can't happen because we update
3172 here our data structures so as to indicate that the threads have
3173 passed the barrier, even though, as per (2) above, they are
3174 guaranteed not to pass the barrier until we return.
3175
3176 This relies crucially on Valgrind being single threaded. If that
3177 changes, this will need to be reconsidered.
3178 */
sewardj9f569b72008-11-13 13:33:09 +00003179 Thread* thr;
3180 Bar* bar;
sewardj406bac82010-03-03 23:03:40 +00003181 UWord present;
sewardj9f569b72008-11-13 13:33:09 +00003182
3183 if (SHOW_EVENTS >= 1)
3184 VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
3185 "(tid=%d, barrier=%p)\n",
3186 (Int)tid, (void*)barrier );
3187
3188 thr = map_threads_maybe_lookup( tid );
3189 tl_assert(thr); /* cannot fail - Thread* must already exist */
3190
3191 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3192 tl_assert(bar);
3193
3194 if (!bar->initted) {
3195 HG_(record_error_Misc)(
3196 thr, "pthread_barrier_wait: barrier is uninitialised"
3197 );
3198 return; /* client is broken .. avoid assertions below */
3199 }
3200
3201 /* guaranteed by _INIT_PRE above */
3202 tl_assert(bar->size > 0);
3203 tl_assert(bar->waiting);
3204
3205 VG_(addToXA)( bar->waiting, &thr );
3206
3207 /* guaranteed by this function */
3208 present = VG_(sizeXA)(bar->waiting);
3209 tl_assert(present > 0 && present <= bar->size);
3210
3211 if (present < bar->size)
3212 return;
3213
sewardj406bac82010-03-03 23:03:40 +00003214 do_barrier_cross_sync_and_empty(bar);
3215}
sewardj9f569b72008-11-13 13:33:09 +00003216
sewardj9f569b72008-11-13 13:33:09 +00003217
sewardj406bac82010-03-03 23:03:40 +00003218static void evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( ThreadId tid,
3219 void* barrier,
3220 UWord newcount )
3221{
3222 Thread* thr;
3223 Bar* bar;
3224 UWord present;
3225
3226 if (SHOW_EVENTS >= 1)
3227 VG_(printf)("evh__HG_PTHREAD_BARRIER_RESIZE_PRE"
3228 "(tid=%d, barrier=%p, newcount=%lu)\n",
3229 (Int)tid, (void*)barrier, newcount );
3230
3231 thr = map_threads_maybe_lookup( tid );
3232 tl_assert(thr); /* cannot fail - Thread* must already exist */
3233
3234 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3235 tl_assert(bar);
3236
3237 if (!bar->initted) {
3238 HG_(record_error_Misc)(
3239 thr, "pthread_barrier_resize: barrier is uninitialised"
3240 );
3241 return; /* client is broken .. avoid assertions below */
3242 }
3243
3244 if (!bar->resizable) {
3245 HG_(record_error_Misc)(
3246 thr, "pthread_barrier_resize: barrier is may not be resized"
3247 );
3248 return; /* client is broken .. avoid assertions below */
3249 }
3250
3251 if (newcount == 0) {
3252 HG_(record_error_Misc)(
3253 thr, "pthread_barrier_resize: 'newcount' argument is zero"
3254 );
3255 return; /* client is broken .. avoid assertions below */
3256 }
3257
3258 /* guaranteed by _INIT_PRE above */
3259 tl_assert(bar->size > 0);
sewardj9f569b72008-11-13 13:33:09 +00003260 tl_assert(bar->waiting);
sewardj406bac82010-03-03 23:03:40 +00003261 /* Guaranteed by this fn */
3262 tl_assert(newcount > 0);
sewardj9f569b72008-11-13 13:33:09 +00003263
sewardj406bac82010-03-03 23:03:40 +00003264 if (newcount >= bar->size) {
3265 /* Increasing the capacity. There's no possibility of threads
3266 moving on from the barrier in this situation, so just note
3267 the fact and do nothing more. */
3268 bar->size = newcount;
3269 } else {
3270 /* Decreasing the capacity. If we decrease it to be equal or
3271 below the number of waiting threads, they will now move past
3272 the barrier, so need to mess with dep edges in the same way
3273 as if the barrier had filled up normally. */
3274 present = VG_(sizeXA)(bar->waiting);
3275 tl_assert(present >= 0 && present <= bar->size);
3276 if (newcount <= present) {
3277 bar->size = present; /* keep the cross_sync call happy */
3278 do_barrier_cross_sync_and_empty(bar);
3279 }
3280 bar->size = newcount;
sewardj9f569b72008-11-13 13:33:09 +00003281 }
sewardj9f569b72008-11-13 13:33:09 +00003282}
3283
3284
sewardjed2e72e2009-08-14 11:08:24 +00003285/* ----------------------------------------------------- */
3286/* ----- events to do with user-specified HB edges ----- */
3287/* ----------------------------------------------------- */
3288
3289/* A mapping from arbitrary UWord tag to the SO associated with it.
3290 The UWord tags are meaningless to us, interpreted only by the
3291 user. */
3292
3293
3294
3295/* UWord -> SO* */
3296static WordFM* map_usertag_to_SO = NULL;
3297
3298static void map_usertag_to_SO_INIT ( void ) {
3299 if (UNLIKELY(map_usertag_to_SO == NULL)) {
3300 map_usertag_to_SO = VG_(newFM)( HG_(zalloc),
3301 "hg.mutS.1", HG_(free), NULL );
sewardjed2e72e2009-08-14 11:08:24 +00003302 }
3303}
3304
3305static SO* map_usertag_to_SO_lookup_or_alloc ( UWord usertag ) {
3306 UWord key, val;
3307 map_usertag_to_SO_INIT();
3308 if (VG_(lookupFM)( map_usertag_to_SO, &key, &val, usertag )) {
3309 tl_assert(key == (UWord)usertag);
3310 return (SO*)val;
3311 } else {
3312 SO* so = libhb_so_alloc();
3313 VG_(addToFM)( map_usertag_to_SO, usertag, (UWord)so );
3314 return so;
3315 }
3316}
3317
sewardj6015d0e2011-03-11 19:10:48 +00003318static void map_usertag_to_SO_delete ( UWord usertag ) {
3319 UWord keyW, valW;
3320 map_usertag_to_SO_INIT();
3321 if (VG_(delFromFM)( map_usertag_to_SO, &keyW, &valW, usertag )) {
3322 SO* so = (SO*)valW;
3323 tl_assert(keyW == usertag);
3324 tl_assert(so);
3325 libhb_so_dealloc(so);
3326 }
3327}
sewardjed2e72e2009-08-14 11:08:24 +00003328
3329
3330static
3331void evh__HG_USERSO_SEND_PRE ( ThreadId tid, UWord usertag )
3332{
3333 /* TID is just about to notionally sent a message on a notional
3334 abstract synchronisation object whose identity is given by
3335 USERTAG. Bind USERTAG to a real SO if it is not already so
sewardj8c50d3c2011-03-11 18:38:12 +00003336 bound, and do a 'weak send' on the SO. This joins the vector
3337 clocks from this thread into any vector clocks already present
3338 in the SO. The resulting SO vector clocks are later used by
sewardjed2e72e2009-08-14 11:08:24 +00003339 other thread(s) which successfully 'receive' from the SO,
sewardj8c50d3c2011-03-11 18:38:12 +00003340 thereby acquiring a dependency on all the events that have
3341 previously signalled on this SO. */
sewardjed2e72e2009-08-14 11:08:24 +00003342 Thread* thr;
3343 SO* so;
3344
3345 if (SHOW_EVENTS >= 1)
3346 VG_(printf)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3347 (Int)tid, usertag );
3348
3349 thr = map_threads_maybe_lookup( tid );
3350 tl_assert(thr); /* cannot fail - Thread* must already exist */
3351
3352 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3353 tl_assert(so);
3354
sewardj8c50d3c2011-03-11 18:38:12 +00003355 libhb_so_send( thr->hbthr, so, False/*!strong_send*/ );
sewardjed2e72e2009-08-14 11:08:24 +00003356}
3357
3358static
3359void evh__HG_USERSO_RECV_POST ( ThreadId tid, UWord usertag )
3360{
3361 /* TID has just notionally received a message from a notional
3362 abstract synchronisation object whose identity is given by
3363 USERTAG. Bind USERTAG to a real SO if it is not already so
3364 bound. If the SO has at some point in the past been 'sent' on,
3365 to a 'strong receive' on it, thereby acquiring a dependency on
3366 the sender. */
3367 Thread* thr;
3368 SO* so;
3369
3370 if (SHOW_EVENTS >= 1)
3371 VG_(printf)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3372 (Int)tid, usertag );
3373
3374 thr = map_threads_maybe_lookup( tid );
3375 tl_assert(thr); /* cannot fail - Thread* must already exist */
3376
3377 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3378 tl_assert(so);
3379
3380 /* Acquire a dependency on it. If the SO has never so far been
3381 sent on, then libhb_so_recv will do nothing. So we're safe
3382 regardless of SO's history. */
3383 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3384}
3385
sewardj6015d0e2011-03-11 19:10:48 +00003386static
3387void evh__HG_USERSO_FORGET_ALL ( ThreadId tid, UWord usertag )
3388{
3389 /* TID declares that any happens-before edges notionally stored in
3390 USERTAG can be deleted. If (as would normally be the case) a
florianad4e9792015-07-05 21:53:33 +00003391 SO is associated with USERTAG, then the association is removed
sewardj6015d0e2011-03-11 19:10:48 +00003392 and all resources associated with SO are freed. Importantly,
3393 that frees up any VTSs stored in SO. */
3394 if (SHOW_EVENTS >= 1)
3395 VG_(printf)("evh__HG_USERSO_FORGET_ALL(ctid=%d, usertag=%#lx)\n",
3396 (Int)tid, usertag );
3397
3398 map_usertag_to_SO_delete( usertag );
3399}
3400
sewardjed2e72e2009-08-14 11:08:24 +00003401
sewardj8eb8bab2015-07-21 14:44:28 +00003402#if defined(VGO_solaris)
3403/* ----------------------------------------------------- */
3404/* --- events to do with bind guard/clear intercepts --- */
3405/* ----------------------------------------------------- */
3406
3407static
3408void evh__HG_RTLD_BIND_GUARD(ThreadId tid, Int flags)
3409{
3410 if (SHOW_EVENTS >= 1)
3411 VG_(printf)("evh__HG_RTLD_BIND_GUARD"
3412 "(tid=%d, flags=%d)\n",
3413 (Int)tid, flags);
3414
3415 Thread *thr = map_threads_maybe_lookup(tid);
3416 tl_assert(thr != NULL);
3417
3418 Int bindflag = (flags & VKI_THR_FLG_RTLD);
3419 if ((bindflag & thr->bind_guard_flag) == 0) {
3420 thr->bind_guard_flag |= bindflag;
3421 HG_(thread_enter_synchr)(thr);
3422 /* Misuse pthread_create_nesting_level for ignoring mutex activity. */
3423 HG_(thread_enter_pthread_create)(thr);
3424 }
3425}
3426
3427static
3428void evh__HG_RTLD_BIND_CLEAR(ThreadId tid, Int flags)
3429{
3430 if (SHOW_EVENTS >= 1)
3431 VG_(printf)("evh__HG_RTLD_BIND_CLEAR"
3432 "(tid=%d, flags=%d)\n",
3433 (Int)tid, flags);
3434
3435 Thread *thr = map_threads_maybe_lookup(tid);
3436 tl_assert(thr != NULL);
3437
3438 Int bindflag = (flags & VKI_THR_FLG_RTLD);
3439 if ((thr->bind_guard_flag & bindflag) != 0) {
3440 thr->bind_guard_flag &= ~bindflag;
3441 HG_(thread_leave_synchr)(thr);
3442 HG_(thread_leave_pthread_create)(thr);
3443 }
3444}
3445#endif /* VGO_solaris */
3446
3447
sewardjb4112022007-11-09 22:49:28 +00003448/*--------------------------------------------------------------*/
3449/*--- Lock acquisition order monitoring ---*/
3450/*--------------------------------------------------------------*/
3451
3452/* FIXME: here are some optimisations still to do in
3453 laog__pre_thread_acquires_lock.
3454
3455 The graph is structured so that if L1 --*--> L2 then L1 must be
3456 acquired before L2.
3457
3458 The common case is that some thread T holds (eg) L1 L2 and L3 and
3459 is repeatedly acquiring and releasing Ln, and there is no ordering
3460 error in what it is doing. Hence it repeatly:
3461
3462 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3463 produces the answer No (because there is no error).
3464
3465 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3466 (because they already got added the first time T acquired Ln).
3467
3468 Hence cache these two events:
3469
3470 (1) Cache result of the query from last time. Invalidate the cache
3471 any time any edges are added to or deleted from laog.
3472
3473 (2) Cache these add-edge requests and ignore them if said edges
3474 have already been added to laog. Invalidate the cache any time
3475 any edges are deleted from laog.
3476*/
3477
3478typedef
3479 struct {
3480 WordSetID inns; /* in univ_laog */
3481 WordSetID outs; /* in univ_laog */
3482 }
3483 LAOGLinks;
3484
3485/* lock order acquisition graph */
3486static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
3487
3488/* EXPOSITION ONLY: for each edge in 'laog', record the two places
3489 where that edge was created, so that we can show the user later if
3490 we need to. */
3491typedef
3492 struct {
3493 Addr src_ga; /* Lock guest addresses for */
3494 Addr dst_ga; /* src/dst of the edge */
3495 ExeContext* src_ec; /* And corresponding places where that */
3496 ExeContext* dst_ec; /* ordering was established */
3497 }
3498 LAOGLinkExposition;
3499
sewardj250ec2e2008-02-15 22:02:30 +00003500static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
sewardjb4112022007-11-09 22:49:28 +00003501 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3502 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
3503 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
3504 if (llx1->src_ga < llx2->src_ga) return -1;
3505 if (llx1->src_ga > llx2->src_ga) return 1;
3506 if (llx1->dst_ga < llx2->dst_ga) return -1;
3507 if (llx1->dst_ga > llx2->dst_ga) return 1;
3508 return 0;
3509}
3510
3511static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
3512/* end EXPOSITION ONLY */
3513
3514
sewardja65db102009-01-26 10:45:16 +00003515__attribute__((noinline))
3516static void laog__init ( void )
3517{
3518 tl_assert(!laog);
3519 tl_assert(!laog_exposition);
sewardjc1fb9d22011-02-28 09:03:44 +00003520 tl_assert(HG_(clo_track_lockorders));
sewardja65db102009-01-26 10:45:16 +00003521
3522 laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
3523 HG_(free), NULL/*unboxedcmp*/ );
3524
3525 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
3526 cmp_LAOGLinkExposition );
sewardja65db102009-01-26 10:45:16 +00003527}
3528
florian6bf37262012-10-21 03:23:36 +00003529static void laog__show ( const HChar* who ) {
3530 UWord i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003531 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003532 Lock* me;
3533 LAOGLinks* links;
3534 VG_(printf)("laog (requested by %s) {\n", who);
sewardj896f6f92008-08-19 08:38:52 +00003535 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003536 me = NULL;
3537 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003538 while (VG_(nextIterFM)( laog, (UWord*)&me,
3539 (UWord*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003540 tl_assert(me);
3541 tl_assert(links);
3542 VG_(printf)(" node %p:\n", me);
3543 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3544 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003545 VG_(printf)(" inn %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003546 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3547 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003548 VG_(printf)(" out %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003549 me = NULL;
3550 links = NULL;
3551 }
sewardj896f6f92008-08-19 08:38:52 +00003552 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003553 VG_(printf)("}\n");
3554}
3555
sewardj866c80c2011-10-22 19:29:51 +00003556static void univ_laog_do_GC ( void ) {
3557 Word i;
3558 LAOGLinks* links;
3559 Word seen = 0;
3560 Int prev_next_gc_univ_laog = next_gc_univ_laog;
3561 const UWord univ_laog_cardinality = HG_(cardinalityWSU)( univ_laog);
3562
3563 Bool *univ_laog_seen = HG_(zalloc) ( "hg.gc_univ_laog.1",
3564 (Int) univ_laog_cardinality
3565 * sizeof(Bool) );
3566 // univ_laog_seen[*] set to 0 (False) by zalloc.
3567
sewardj866c80c2011-10-22 19:29:51 +00003568 VG_(initIterFM)( laog );
3569 links = NULL;
3570 while (VG_(nextIterFM)( laog, NULL, (UWord*)&links )) {
3571 tl_assert(links);
3572 tl_assert(links->inns >= 0 && links->inns < univ_laog_cardinality);
3573 univ_laog_seen[links->inns] = True;
3574 tl_assert(links->outs >= 0 && links->outs < univ_laog_cardinality);
3575 univ_laog_seen[links->outs] = True;
3576 links = NULL;
3577 }
3578 VG_(doneIterFM)( laog );
3579
3580 for (i = 0; i < (Int)univ_laog_cardinality; i++) {
3581 if (univ_laog_seen[i])
3582 seen++;
3583 else
3584 HG_(dieWS) ( univ_laog, (WordSet)i );
3585 }
3586
3587 HG_(free) (univ_laog_seen);
3588
3589 // We need to decide the value of the next_gc.
3590 // 3 solutions were looked at:
3591 // Sol 1: garbage collect at seen * 2
3592 // This solution was a lot slower, probably because we both do a lot of
3593 // garbage collection and do not keep long enough laog WV that will become
3594 // useful again very soon.
3595 // Sol 2: garbage collect at a percentage increase of the current cardinality
3596 // (with a min increase of 1)
3597 // Trials on a small test program with 1%, 5% and 10% increase was done.
3598 // 1% is slightly faster than 5%, which is slightly slower than 10%.
3599 // However, on a big application, this caused the memory to be exhausted,
3600 // as even a 1% increase of size at each gc becomes a lot, when many gc
3601 // are done.
3602 // Sol 3: always garbage collect at current cardinality + 1.
3603 // This solution was the fastest of the 3 solutions, and caused no memory
3604 // exhaustion in the big application.
3605 //
3606 // With regards to cost introduced by gc: on the t2t perf test (doing only
3607 // lock/unlock operations), t2t 50 10 2 was about 25% faster than the
3608 // version with garbage collection. With t2t 50 20 2, my machine started
3609 // to page out, and so the garbage collected version was much faster.
3610 // On smaller lock sets (e.g. t2t 20 5 2, giving about 100 locks), the
3611 // difference performance is insignificant (~ 0.1 s).
3612 // Of course, it might be that real life programs are not well represented
3613 // by t2t.
3614
3615 // If ever we want to have a more sophisticated control
3616 // (e.g. clo options to control the percentage increase or fixed increased),
3617 // we should do it here, eg.
3618 // next_gc_univ_laog = prev_next_gc_univ_laog + VG_(clo_laog_gc_fixed);
3619 // Currently, we just hard-code the solution 3 above.
3620 next_gc_univ_laog = prev_next_gc_univ_laog + 1;
3621
3622 if (VG_(clo_stats))
3623 VG_(message)
3624 (Vg_DebugMsg,
philippebf37ae82015-05-03 10:56:16 +00003625 "univ_laog_do_GC cardinality entered %d exit %d next gc at %d\n",
3626 (Int)univ_laog_cardinality, (Int)seen, next_gc_univ_laog);
sewardj866c80c2011-10-22 19:29:51 +00003627}
3628
3629
sewardjb4112022007-11-09 22:49:28 +00003630__attribute__((noinline))
3631static void laog__add_edge ( Lock* src, Lock* dst ) {
florian6bf37262012-10-21 03:23:36 +00003632 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003633 LAOGLinks* links;
3634 Bool presentF, presentR;
3635 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
3636
3637 /* Take the opportunity to sanity check the graph. Record in
3638 presentF if there is already a src->dst mapping in this node's
3639 forwards links, and presentR if there is already a src->dst
3640 mapping in this node's backwards links. They should agree!
3641 Also, we need to know whether the edge was already present so as
3642 to decide whether or not to update the link details mapping. We
3643 can compute presentF and presentR essentially for free, so may
3644 as well do this always. */
3645 presentF = presentR = False;
3646
3647 /* Update the out edges for src */
3648 keyW = 0;
3649 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003650 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)src )) {
sewardjb4112022007-11-09 22:49:28 +00003651 WordSetID outs_new;
3652 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003653 tl_assert(keyW == (UWord)src);
3654 outs_new = HG_(addToWS)( univ_laog, links->outs, (UWord)dst );
sewardjb4112022007-11-09 22:49:28 +00003655 presentF = outs_new == links->outs;
3656 links->outs = outs_new;
3657 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003658 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003659 links->inns = HG_(emptyWS)( univ_laog );
florian6bf37262012-10-21 03:23:36 +00003660 links->outs = HG_(singletonWS)( univ_laog, (UWord)dst );
3661 VG_(addToFM)( laog, (UWord)src, (UWord)links );
sewardjb4112022007-11-09 22:49:28 +00003662 }
3663 /* Update the in edges for dst */
3664 keyW = 0;
3665 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003666 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003667 WordSetID inns_new;
3668 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003669 tl_assert(keyW == (UWord)dst);
3670 inns_new = HG_(addToWS)( univ_laog, links->inns, (UWord)src );
sewardjb4112022007-11-09 22:49:28 +00003671 presentR = inns_new == links->inns;
3672 links->inns = inns_new;
3673 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003674 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
florian6bf37262012-10-21 03:23:36 +00003675 links->inns = HG_(singletonWS)( univ_laog, (UWord)src );
sewardjb4112022007-11-09 22:49:28 +00003676 links->outs = HG_(emptyWS)( univ_laog );
florian6bf37262012-10-21 03:23:36 +00003677 VG_(addToFM)( laog, (UWord)dst, (UWord)links );
sewardjb4112022007-11-09 22:49:28 +00003678 }
3679
3680 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
3681
3682 if (!presentF && src->acquired_at && dst->acquired_at) {
3683 LAOGLinkExposition expo;
3684 /* If this edge is entering the graph, and we have acquired_at
3685 information for both src and dst, record those acquisition
3686 points. Hence, if there is later a violation of this
3687 ordering, we can show the user the two places in which the
3688 required src-dst ordering was previously established. */
barta0b6b2c2008-07-07 06:49:24 +00003689 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
sewardjb4112022007-11-09 22:49:28 +00003690 src->guestaddr, dst->guestaddr);
3691 expo.src_ga = src->guestaddr;
3692 expo.dst_ga = dst->guestaddr;
3693 expo.src_ec = NULL;
3694 expo.dst_ec = NULL;
3695 tl_assert(laog_exposition);
florian6bf37262012-10-21 03:23:36 +00003696 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (UWord)&expo )) {
sewardjb4112022007-11-09 22:49:28 +00003697 /* we already have it; do nothing */
3698 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003699 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
3700 sizeof(LAOGLinkExposition));
sewardjb4112022007-11-09 22:49:28 +00003701 expo2->src_ga = src->guestaddr;
3702 expo2->dst_ga = dst->guestaddr;
3703 expo2->src_ec = src->acquired_at;
3704 expo2->dst_ec = dst->acquired_at;
florian6bf37262012-10-21 03:23:36 +00003705 VG_(addToFM)( laog_exposition, (UWord)expo2, (UWord)NULL );
sewardjb4112022007-11-09 22:49:28 +00003706 }
3707 }
sewardj866c80c2011-10-22 19:29:51 +00003708
3709 if (HG_(cardinalityWSU) (univ_laog) >= next_gc_univ_laog)
3710 univ_laog_do_GC();
sewardjb4112022007-11-09 22:49:28 +00003711}
3712
3713__attribute__((noinline))
3714static void laog__del_edge ( Lock* src, Lock* dst ) {
florian6bf37262012-10-21 03:23:36 +00003715 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003716 LAOGLinks* links;
sewardj866c80c2011-10-22 19:29:51 +00003717 if (0) VG_(printf)("laog__del_edge enter %p %p\n", src, dst);
sewardjb4112022007-11-09 22:49:28 +00003718 /* Update the out edges for src */
3719 keyW = 0;
3720 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003721 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)src )) {
sewardjb4112022007-11-09 22:49:28 +00003722 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003723 tl_assert(keyW == (UWord)src);
3724 links->outs = HG_(delFromWS)( univ_laog, links->outs, (UWord)dst );
sewardjb4112022007-11-09 22:49:28 +00003725 }
3726 /* Update the in edges for dst */
3727 keyW = 0;
3728 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003729 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003730 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003731 tl_assert(keyW == (UWord)dst);
3732 links->inns = HG_(delFromWS)( univ_laog, links->inns, (UWord)src );
sewardjb4112022007-11-09 22:49:28 +00003733 }
sewardj866c80c2011-10-22 19:29:51 +00003734
3735 /* Remove the exposition of src,dst (if present) */
3736 {
3737 LAOGLinkExposition *fm_expo;
3738
3739 LAOGLinkExposition expo;
3740 expo.src_ga = src->guestaddr;
3741 expo.dst_ga = dst->guestaddr;
3742 expo.src_ec = NULL;
3743 expo.dst_ec = NULL;
3744
3745 if (VG_(delFromFM) (laog_exposition,
3746 (UWord*)&fm_expo, NULL, (UWord)&expo )) {
3747 HG_(free) (fm_expo);
3748 }
3749 }
3750
3751 /* deleting edges can increase nr of of WS so check for gc. */
3752 if (HG_(cardinalityWSU) (univ_laog) >= next_gc_univ_laog)
3753 univ_laog_do_GC();
3754 if (0) VG_(printf)("laog__del_edge exit\n");
sewardjb4112022007-11-09 22:49:28 +00003755}
3756
3757__attribute__((noinline))
3758static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
florian6bf37262012-10-21 03:23:36 +00003759 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003760 LAOGLinks* links;
3761 keyW = 0;
3762 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003763 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003764 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003765 tl_assert(keyW == (UWord)lk);
sewardjb4112022007-11-09 22:49:28 +00003766 return links->outs;
3767 } else {
3768 return HG_(emptyWS)( univ_laog );
3769 }
3770}
3771
3772__attribute__((noinline))
3773static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
florian6bf37262012-10-21 03:23:36 +00003774 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003775 LAOGLinks* links;
3776 keyW = 0;
3777 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003778 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003779 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003780 tl_assert(keyW == (UWord)lk);
sewardjb4112022007-11-09 22:49:28 +00003781 return links->inns;
3782 } else {
3783 return HG_(emptyWS)( univ_laog );
3784 }
3785}
3786
3787__attribute__((noinline))
florian6bf37262012-10-21 03:23:36 +00003788static void laog__sanity_check ( const HChar* who ) {
3789 UWord i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003790 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003791 Lock* me;
3792 LAOGLinks* links;
sewardj896f6f92008-08-19 08:38:52 +00003793 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003794 me = NULL;
3795 links = NULL;
3796 if (0) VG_(printf)("laog sanity check\n");
florian6bf37262012-10-21 03:23:36 +00003797 while (VG_(nextIterFM)( laog, (UWord*)&me,
3798 (UWord*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003799 tl_assert(me);
3800 tl_assert(links);
3801 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3802 for (i = 0; i < ws_size; i++) {
3803 if ( ! HG_(elemWS)( univ_laog,
3804 laog__succs( (Lock*)ws_words[i] ),
florian6bf37262012-10-21 03:23:36 +00003805 (UWord)me ))
sewardjb4112022007-11-09 22:49:28 +00003806 goto bad;
3807 }
3808 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3809 for (i = 0; i < ws_size; i++) {
3810 if ( ! HG_(elemWS)( univ_laog,
3811 laog__preds( (Lock*)ws_words[i] ),
florian6bf37262012-10-21 03:23:36 +00003812 (UWord)me ))
sewardjb4112022007-11-09 22:49:28 +00003813 goto bad;
3814 }
3815 me = NULL;
3816 links = NULL;
3817 }
sewardj896f6f92008-08-19 08:38:52 +00003818 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003819 return;
3820
3821 bad:
3822 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3823 laog__show(who);
3824 tl_assert(0);
3825}
3826
3827/* If there is a path in laog from 'src' to any of the elements in
3828 'dst', return an arbitrarily chosen element of 'dst' reachable from
3829 'src'. If no path exist from 'src' to any element in 'dst', return
3830 NULL. */
3831__attribute__((noinline))
3832static
3833Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3834{
3835 Lock* ret;
florian6bf37262012-10-21 03:23:36 +00003836 Word ssz;
sewardjb4112022007-11-09 22:49:28 +00003837 XArray* stack; /* of Lock* */
3838 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
3839 Lock* here;
3840 WordSetID succs;
florian6bf37262012-10-21 03:23:36 +00003841 UWord succs_size, i;
sewardj250ec2e2008-02-15 22:02:30 +00003842 UWord* succs_words;
sewardjb4112022007-11-09 22:49:28 +00003843 //laog__sanity_check();
3844
3845 /* If the destination set is empty, we can never get there from
3846 'src' :-), so don't bother to try */
3847 if (HG_(isEmptyWS)( univ_lsets, dsts ))
3848 return NULL;
3849
3850 ret = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00003851 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3852 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00003853
3854 (void) VG_(addToXA)( stack, &src );
3855
3856 while (True) {
3857
3858 ssz = VG_(sizeXA)( stack );
3859
3860 if (ssz == 0) { ret = NULL; break; }
3861
3862 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3863 VG_(dropTailXA)( stack, 1 );
3864
florian6bf37262012-10-21 03:23:36 +00003865 if (HG_(elemWS)( univ_lsets, dsts, (UWord)here )) { ret = here; break; }
sewardjb4112022007-11-09 22:49:28 +00003866
florian6bf37262012-10-21 03:23:36 +00003867 if (VG_(lookupFM)( visited, NULL, NULL, (UWord)here ))
sewardjb4112022007-11-09 22:49:28 +00003868 continue;
3869
florian6bf37262012-10-21 03:23:36 +00003870 VG_(addToFM)( visited, (UWord)here, 0 );
sewardjb4112022007-11-09 22:49:28 +00003871
3872 succs = laog__succs( here );
3873 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3874 for (i = 0; i < succs_size; i++)
3875 (void) VG_(addToXA)( stack, &succs_words[i] );
3876 }
3877
sewardj896f6f92008-08-19 08:38:52 +00003878 VG_(deleteFM)( visited, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003879 VG_(deleteXA)( stack );
3880 return ret;
3881}
3882
3883
3884/* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3885 between 'lk' and the locks already held by 'thr' and issue a
3886 complaint if so. Also, update the ordering graph appropriately.
3887*/
3888__attribute__((noinline))
3889static void laog__pre_thread_acquires_lock (
3890 Thread* thr, /* NB: BEFORE lock is added */
3891 Lock* lk
3892 )
3893{
sewardj250ec2e2008-02-15 22:02:30 +00003894 UWord* ls_words;
florian6bf37262012-10-21 03:23:36 +00003895 UWord ls_size, i;
sewardjb4112022007-11-09 22:49:28 +00003896 Lock* other;
3897
3898 /* It may be that 'thr' already holds 'lk' and is recursively
3899 relocking in. In this case we just ignore the call. */
3900 /* NB: univ_lsets really is correct here */
florian6bf37262012-10-21 03:23:36 +00003901 if (HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lk ))
sewardjb4112022007-11-09 22:49:28 +00003902 return;
3903
sewardjb4112022007-11-09 22:49:28 +00003904 /* First, the check. Complain if there is any path in laog from lk
3905 to any of the locks already held by thr, since if any such path
3906 existed, it would mean that previously lk was acquired before
3907 (rather than after, as we are doing here) at least one of those
3908 locks.
3909 */
3910 other = laog__do_dfs_from_to(lk, thr->locksetA);
3911 if (other) {
3912 LAOGLinkExposition key, *found;
3913 /* So we managed to find a path lk --*--> other in the graph,
3914 which implies that 'lk' should have been acquired before
3915 'other' but is in fact being acquired afterwards. We present
3916 the lk/other arguments to record_error_LockOrder in the order
3917 in which they should have been acquired. */
3918 /* Go look in the laog_exposition mapping, to find the allocation
3919 points for this edge, so we can show the user. */
3920 key.src_ga = lk->guestaddr;
3921 key.dst_ga = other->guestaddr;
3922 key.src_ec = NULL;
3923 key.dst_ec = NULL;
3924 found = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003925 if (VG_(lookupFM)( laog_exposition,
florian6bf37262012-10-21 03:23:36 +00003926 (UWord*)&found, NULL, (UWord)&key )) {
sewardjb4112022007-11-09 22:49:28 +00003927 tl_assert(found != &key);
3928 tl_assert(found->src_ga == key.src_ga);
3929 tl_assert(found->dst_ga == key.dst_ga);
3930 tl_assert(found->src_ec);
3931 tl_assert(found->dst_ec);
sewardjf98e1c02008-10-25 16:22:41 +00003932 HG_(record_error_LockOrder)(
philippe46daf0d2014-07-29 20:08:15 +00003933 thr, lk, other,
sewardjffce8152011-06-24 10:09:41 +00003934 found->src_ec, found->dst_ec, other->acquired_at );
sewardjb4112022007-11-09 22:49:28 +00003935 } else {
3936 /* Hmm. This can't happen (can it?) */
philippeebe25802013-01-30 23:21:34 +00003937 /* Yes, it can happen: see tests/tc14_laog_dinphils.
3938 Imagine we have 3 philosophers A B C, and the forks
3939 between them:
3940
3941 C
3942
3943 fCA fBC
3944
3945 A fAB B
3946
3947 Let's have the following actions:
3948 A takes fCA,fAB
3949 A releases fCA,fAB
3950 B takes fAB,fBC
3951 B releases fAB,fBC
3952 C takes fBC,fCA
3953 C releases fBC,fCA
3954
3955 Helgrind will report a lock order error when C takes fCA.
3956 Effectively, we have a deadlock if the following
3957 sequence is done:
3958 A takes fCA
3959 B takes fAB
3960 C takes fBC
3961
3962 The error reported is:
3963 Observed (incorrect) order fBC followed by fCA
3964 but the stack traces that have established the required order
3965 are not given.
3966
3967 This is because there is no pair (fCA, fBC) in laog exposition :
3968 the laog_exposition records all pairs of locks between a new lock
3969 taken by a thread and all the already taken locks.
3970 So, there is no laog_exposition (fCA, fBC) as no thread ever
3971 first locked fCA followed by fBC.
3972
3973 In other words, when the deadlock cycle involves more than
3974 two locks, then helgrind does not report the sequence of
3975 operations that created the cycle.
3976
3977 However, we can report the current stack trace (where
3978 lk is being taken), and the stack trace where other was acquired:
3979 Effectively, the variable 'other' contains a lock currently
3980 held by this thread, with its 'acquired_at'. */
3981
sewardjf98e1c02008-10-25 16:22:41 +00003982 HG_(record_error_LockOrder)(
philippe46daf0d2014-07-29 20:08:15 +00003983 thr, lk, other,
philippeebe25802013-01-30 23:21:34 +00003984 NULL, NULL, other->acquired_at );
sewardjb4112022007-11-09 22:49:28 +00003985 }
3986 }
3987
3988 /* Second, add to laog the pairs
3989 (old, lk) | old <- locks already held by thr
3990 Since both old and lk are currently held by thr, their acquired_at
3991 fields must be non-NULL.
3992 */
3993 tl_assert(lk->acquired_at);
3994 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3995 for (i = 0; i < ls_size; i++) {
3996 Lock* old = (Lock*)ls_words[i];
3997 tl_assert(old->acquired_at);
3998 laog__add_edge( old, lk );
3999 }
4000
4001 /* Why "except_Locks" ? We're here because a lock is being
4002 acquired by a thread, and we're in an inconsistent state here.
4003 See the call points in evhH__post_thread_{r,w}_acquires_lock.
4004 When called in this inconsistent state, locks__sanity_check duly
4005 barfs. */
sewardjf98e1c02008-10-25 16:22:41 +00004006 if (HG_(clo_sanity_flags) & SCE_LAOG)
sewardjb4112022007-11-09 22:49:28 +00004007 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
4008}
4009
sewardj866c80c2011-10-22 19:29:51 +00004010/* Allocates a duplicate of words. Caller must HG_(free) the result. */
4011static UWord* UWordV_dup(UWord* words, Word words_size)
4012{
4013 UInt i;
4014
4015 if (words_size == 0)
4016 return NULL;
4017
4018 UWord *dup = HG_(zalloc) ("hg.dup.1", (SizeT) words_size * sizeof(UWord));
4019
4020 for (i = 0; i < words_size; i++)
4021 dup[i] = words[i];
4022
4023 return dup;
4024}
sewardjb4112022007-11-09 22:49:28 +00004025
4026/* Delete from 'laog' any pair mentioning a lock in locksToDelete */
4027
4028__attribute__((noinline))
4029static void laog__handle_one_lock_deletion ( Lock* lk )
4030{
4031 WordSetID preds, succs;
florian6bf37262012-10-21 03:23:36 +00004032 UWord preds_size, succs_size, i, j;
sewardj250ec2e2008-02-15 22:02:30 +00004033 UWord *preds_words, *succs_words;
sewardjb4112022007-11-09 22:49:28 +00004034
4035 preds = laog__preds( lk );
4036 succs = laog__succs( lk );
4037
sewardj866c80c2011-10-22 19:29:51 +00004038 // We need to duplicate the payload, as these can be garbage collected
4039 // during the del/add operations below.
sewardjb4112022007-11-09 22:49:28 +00004040 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
sewardj866c80c2011-10-22 19:29:51 +00004041 preds_words = UWordV_dup(preds_words, preds_size);
4042
4043 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
4044 succs_words = UWordV_dup(succs_words, succs_size);
4045
sewardjb4112022007-11-09 22:49:28 +00004046 for (i = 0; i < preds_size; i++)
4047 laog__del_edge( (Lock*)preds_words[i], lk );
4048
sewardjb4112022007-11-09 22:49:28 +00004049 for (j = 0; j < succs_size; j++)
4050 laog__del_edge( lk, (Lock*)succs_words[j] );
4051
4052 for (i = 0; i < preds_size; i++) {
4053 for (j = 0; j < succs_size; j++) {
4054 if (preds_words[i] != succs_words[j]) {
4055 /* This can pass unlocked locks to laog__add_edge, since
4056 we're deleting stuff. So their acquired_at fields may
4057 be NULL. */
4058 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
4059 }
4060 }
4061 }
sewardj866c80c2011-10-22 19:29:51 +00004062
4063 if (preds_words)
4064 HG_(free) (preds_words);
4065 if (succs_words)
4066 HG_(free) (succs_words);
4067
4068 // Remove lk information from laog links FM
4069 {
4070 LAOGLinks *links;
4071 Lock* linked_lk;
4072
4073 if (VG_(delFromFM) (laog,
4074 (UWord*)&linked_lk, (UWord*)&links, (UWord)lk)) {
4075 tl_assert (linked_lk == lk);
4076 HG_(free) (links);
4077 }
4078 }
4079 /* FIXME ??? What about removing lock lk data from EXPOSITION ??? */
sewardjb4112022007-11-09 22:49:28 +00004080}
4081
sewardj1cbc12f2008-11-10 16:16:46 +00004082//__attribute__((noinline))
4083//static void laog__handle_lock_deletions (
4084// WordSetID /* in univ_laog */ locksToDelete
4085// )
4086//{
4087// Word i, ws_size;
4088// UWord* ws_words;
4089//
sewardj1cbc12f2008-11-10 16:16:46 +00004090//
4091// HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
sewardj866c80c2011-10-22 19:29:51 +00004092// UWordV_dup call needed here ...
sewardj1cbc12f2008-11-10 16:16:46 +00004093// for (i = 0; i < ws_size; i++)
4094// laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
4095//
4096// if (HG_(clo_sanity_flags) & SCE_LAOG)
4097// all__sanity_check("laog__handle_lock_deletions-post");
4098//}
sewardjb4112022007-11-09 22:49:28 +00004099
4100
4101/*--------------------------------------------------------------*/
4102/*--- Malloc/free replacements ---*/
4103/*--------------------------------------------------------------*/
4104
4105typedef
4106 struct {
4107 void* next; /* required by m_hashtable */
4108 Addr payload; /* ptr to actual block */
4109 SizeT szB; /* size requested */
4110 ExeContext* where; /* where it was allocated */
4111 Thread* thr; /* allocating thread */
4112 }
4113 MallocMeta;
4114
4115/* A hash table of MallocMetas, used to track malloc'd blocks
4116 (obviously). */
florian09a4c792014-10-18 10:58:05 +00004117static VgHashTable *hg_mallocmeta_table = NULL;
sewardjb4112022007-11-09 22:49:28 +00004118
philippe5fbc9762013-12-01 19:28:48 +00004119/* MallocMeta are small elements. We use a pool to avoid
4120 the overhead of malloc for each MallocMeta. */
4121static PoolAlloc *MallocMeta_poolalloc = NULL;
sewardjb4112022007-11-09 22:49:28 +00004122
4123static MallocMeta* new_MallocMeta ( void ) {
philippe5fbc9762013-12-01 19:28:48 +00004124 MallocMeta* md = VG_(allocEltPA) (MallocMeta_poolalloc);
4125 VG_(memset)(md, 0, sizeof(MallocMeta));
sewardjb4112022007-11-09 22:49:28 +00004126 return md;
4127}
4128static void delete_MallocMeta ( MallocMeta* md ) {
philippe5fbc9762013-12-01 19:28:48 +00004129 VG_(freeEltPA)(MallocMeta_poolalloc, md);
sewardjb4112022007-11-09 22:49:28 +00004130}
4131
4132
4133/* Allocate a client block and set up the metadata for it. */
4134
4135static
4136void* handle_alloc ( ThreadId tid,
4137 SizeT szB, SizeT alignB, Bool is_zeroed )
4138{
4139 Addr p;
4140 MallocMeta* md;
4141
4142 tl_assert( ((SSizeT)szB) >= 0 );
4143 p = (Addr)VG_(cli_malloc)(alignB, szB);
4144 if (!p) {
4145 return NULL;
4146 }
4147 if (is_zeroed)
4148 VG_(memset)((void*)p, 0, szB);
4149
4150 /* Note that map_threads_lookup must succeed (cannot assert), since
4151 memory can only be allocated by currently alive threads, hence
4152 they must have an entry in map_threads. */
4153 md = new_MallocMeta();
4154 md->payload = p;
4155 md->szB = szB;
4156 md->where = VG_(record_ExeContext)( tid, 0 );
4157 md->thr = map_threads_lookup( tid );
4158
4159 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
4160
4161 /* Tell the lower level memory wranglers. */
4162 evh__new_mem_heap( p, szB, is_zeroed );
4163
4164 return (void*)p;
4165}
4166
4167/* Re the checks for less-than-zero (also in hg_cli__realloc below):
4168 Cast to a signed type to catch any unexpectedly negative args.
4169 We're assuming here that the size asked for is not greater than
4170 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
4171 platforms). */
4172static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
4173 if (((SSizeT)n) < 0) return NULL;
4174 return handle_alloc ( tid, n, VG_(clo_alignment),
4175 /*is_zeroed*/False );
4176}
4177static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
4178 if (((SSizeT)n) < 0) return NULL;
4179 return handle_alloc ( tid, n, VG_(clo_alignment),
4180 /*is_zeroed*/False );
4181}
4182static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
4183 if (((SSizeT)n) < 0) return NULL;
4184 return handle_alloc ( tid, n, VG_(clo_alignment),
4185 /*is_zeroed*/False );
4186}
4187static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
4188 if (((SSizeT)n) < 0) return NULL;
4189 return handle_alloc ( tid, n, align,
4190 /*is_zeroed*/False );
4191}
4192static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
4193 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
4194 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
4195 /*is_zeroed*/True );
4196}
4197
4198
4199/* Free a client block, including getting rid of the relevant
4200 metadata. */
4201
4202static void handle_free ( ThreadId tid, void* p )
4203{
4204 MallocMeta *md, *old_md;
4205 SizeT szB;
4206
4207 /* First see if we can find the metadata for 'p'. */
4208 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
4209 if (!md)
4210 return; /* apparently freeing a bogus address. Oh well. */
4211
4212 tl_assert(md->payload == (Addr)p);
4213 szB = md->szB;
4214
4215 /* Nuke the metadata block */
4216 old_md = (MallocMeta*)
4217 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
4218 tl_assert(old_md); /* it must be present - we just found it */
4219 tl_assert(old_md == md);
4220 tl_assert(old_md->payload == (Addr)p);
4221
4222 VG_(cli_free)((void*)old_md->payload);
4223 delete_MallocMeta(old_md);
4224
4225 /* Tell the lower level memory wranglers. */
4226 evh__die_mem_heap( (Addr)p, szB );
4227}
4228
4229static void hg_cli__free ( ThreadId tid, void* p ) {
4230 handle_free(tid, p);
4231}
4232static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
4233 handle_free(tid, p);
4234}
4235static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
4236 handle_free(tid, p);
4237}
4238
4239
4240static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
4241{
4242 MallocMeta *md, *md_new, *md_tmp;
4243 SizeT i;
4244
4245 Addr payload = (Addr)payloadV;
4246
4247 if (((SSizeT)new_size) < 0) return NULL;
4248
4249 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
4250 if (!md)
4251 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
4252
4253 tl_assert(md->payload == payload);
4254
4255 if (md->szB == new_size) {
4256 /* size unchanged */
4257 md->where = VG_(record_ExeContext)(tid, 0);
4258 return payloadV;
4259 }
4260
4261 if (md->szB > new_size) {
4262 /* new size is smaller */
4263 md->szB = new_size;
4264 md->where = VG_(record_ExeContext)(tid, 0);
4265 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
4266 return payloadV;
4267 }
4268
4269 /* else */ {
4270 /* new size is bigger */
4271 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
4272
4273 /* First half kept and copied, second half new */
4274 // FIXME: shouldn't we use a copier which implements the
4275 // memory state machine?
sewardj23f12002009-07-24 08:45:08 +00004276 evh__copy_mem( payload, p_new, md->szB );
sewardjb4112022007-11-09 22:49:28 +00004277 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
sewardjf98e1c02008-10-25 16:22:41 +00004278 /*inited*/False );
sewardjb4112022007-11-09 22:49:28 +00004279 /* FIXME: can anything funny happen here? specifically, if the
4280 old range contained a lock, then die_mem_heap will complain.
4281 Is that the correct behaviour? Not sure. */
4282 evh__die_mem_heap( payload, md->szB );
4283
4284 /* Copy from old to new */
4285 for (i = 0; i < md->szB; i++)
4286 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
4287
4288 /* Because the metadata hash table is index by payload address,
4289 we have to get rid of the old hash table entry and make a new
4290 one. We can't just modify the existing metadata in place,
4291 because then it would (almost certainly) be in the wrong hash
4292 chain. */
4293 md_new = new_MallocMeta();
4294 *md_new = *md;
4295
4296 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
4297 tl_assert(md_tmp);
4298 tl_assert(md_tmp == md);
4299
4300 VG_(cli_free)((void*)md->payload);
4301 delete_MallocMeta(md);
4302
4303 /* Update fields */
4304 md_new->where = VG_(record_ExeContext)( tid, 0 );
4305 md_new->szB = new_size;
4306 md_new->payload = p_new;
4307 md_new->thr = map_threads_lookup( tid );
4308
4309 /* and add */
4310 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
4311
4312 return (void*)p_new;
4313 }
4314}
4315
njn8b140de2009-02-17 04:31:18 +00004316static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
4317{
4318 MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
4319
4320 // There may be slop, but pretend there isn't because only the asked-for
4321 // area will have been shadowed properly.
4322 return ( md ? md->szB : 0 );
4323}
4324
sewardjb4112022007-11-09 22:49:28 +00004325
sewardj095d61e2010-03-11 13:43:18 +00004326/* For error creation: map 'data_addr' to a malloc'd chunk, if any.
sewardjc8028ad2010-05-05 09:34:42 +00004327 Slow linear search. With a bit of hash table help if 'data_addr'
4328 is either the start of a block or up to 15 word-sized steps along
4329 from the start of a block. */
sewardj095d61e2010-03-11 13:43:18 +00004330
4331static inline Bool addr_is_in_MM_Chunk( MallocMeta* mm, Addr a )
4332{
sewardjc8028ad2010-05-05 09:34:42 +00004333 /* Accept 'a' as within 'mm' if 'mm's size is zero and 'a' points
4334 right at it. */
4335 if (UNLIKELY(mm->szB == 0 && a == mm->payload))
4336 return True;
4337 /* else normal interval rules apply */
4338 if (LIKELY(a < mm->payload)) return False;
4339 if (LIKELY(a >= mm->payload + mm->szB)) return False;
4340 return True;
sewardj095d61e2010-03-11 13:43:18 +00004341}
4342
sewardjc8028ad2010-05-05 09:34:42 +00004343Bool HG_(mm_find_containing_block)( /*OUT*/ExeContext** where,
philippe0c9ac8d2014-07-18 00:03:58 +00004344 /*OUT*/UInt* tnr,
sewardj095d61e2010-03-11 13:43:18 +00004345 /*OUT*/Addr* payload,
4346 /*OUT*/SizeT* szB,
4347 Addr data_addr )
4348{
4349 MallocMeta* mm;
sewardjc8028ad2010-05-05 09:34:42 +00004350 Int i;
4351 const Int n_fast_check_words = 16;
4352
4353 /* First, do a few fast searches on the basis that data_addr might
4354 be exactly the start of a block or up to 15 words inside. This
4355 can happen commonly via the creq
4356 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK. */
4357 for (i = 0; i < n_fast_check_words; i++) {
4358 mm = VG_(HT_lookup)( hg_mallocmeta_table,
4359 data_addr - (UWord)(UInt)i * sizeof(UWord) );
4360 if (UNLIKELY(mm && addr_is_in_MM_Chunk(mm, data_addr)))
4361 goto found;
4362 }
4363
sewardj095d61e2010-03-11 13:43:18 +00004364 /* Well, this totally sucks. But without using an interval tree or
sewardjc8028ad2010-05-05 09:34:42 +00004365 some such, it's hard to see how to do better. We have to check
4366 every block in the entire table. */
sewardj095d61e2010-03-11 13:43:18 +00004367 VG_(HT_ResetIter)(hg_mallocmeta_table);
4368 while ( (mm = VG_(HT_Next)(hg_mallocmeta_table)) ) {
sewardjc8028ad2010-05-05 09:34:42 +00004369 if (UNLIKELY(addr_is_in_MM_Chunk(mm, data_addr)))
4370 goto found;
sewardj095d61e2010-03-11 13:43:18 +00004371 }
sewardjc8028ad2010-05-05 09:34:42 +00004372
4373 /* Not found. Bah. */
4374 return False;
4375 /*NOTREACHED*/
4376
4377 found:
4378 tl_assert(mm);
4379 tl_assert(addr_is_in_MM_Chunk(mm, data_addr));
4380 if (where) *where = mm->where;
philippe0c9ac8d2014-07-18 00:03:58 +00004381 if (tnr) *tnr = mm->thr->errmsg_index;
sewardjc8028ad2010-05-05 09:34:42 +00004382 if (payload) *payload = mm->payload;
4383 if (szB) *szB = mm->szB;
4384 return True;
sewardj095d61e2010-03-11 13:43:18 +00004385}
4386
4387
sewardjb4112022007-11-09 22:49:28 +00004388/*--------------------------------------------------------------*/
4389/*--- Instrumentation ---*/
4390/*--------------------------------------------------------------*/
4391
sewardjcafe5052013-01-17 14:24:35 +00004392#define unop(_op, _arg1) IRExpr_Unop((_op),(_arg1))
sewardjffce8152011-06-24 10:09:41 +00004393#define binop(_op, _arg1, _arg2) IRExpr_Binop((_op),(_arg1),(_arg2))
4394#define mkexpr(_tmp) IRExpr_RdTmp((_tmp))
4395#define mkU32(_n) IRExpr_Const(IRConst_U32(_n))
4396#define mkU64(_n) IRExpr_Const(IRConst_U64(_n))
4397#define assign(_t, _e) IRStmt_WrTmp((_t), (_e))
4398
sewardjcafe5052013-01-17 14:24:35 +00004399/* This takes and returns atoms, of course. Not full IRExprs. */
4400static IRExpr* mk_And1 ( IRSB* sbOut, IRExpr* arg1, IRExpr* arg2 )
4401{
4402 tl_assert(arg1 && arg2);
4403 tl_assert(isIRAtom(arg1));
4404 tl_assert(isIRAtom(arg2));
4405 /* Generate 32to1(And32(1Uto32(arg1), 1Uto32(arg2))). Appalling
4406 code, I know. */
4407 IRTemp wide1 = newIRTemp(sbOut->tyenv, Ity_I32);
4408 IRTemp wide2 = newIRTemp(sbOut->tyenv, Ity_I32);
4409 IRTemp anded = newIRTemp(sbOut->tyenv, Ity_I32);
4410 IRTemp res = newIRTemp(sbOut->tyenv, Ity_I1);
4411 addStmtToIRSB(sbOut, assign(wide1, unop(Iop_1Uto32, arg1)));
4412 addStmtToIRSB(sbOut, assign(wide2, unop(Iop_1Uto32, arg2)));
4413 addStmtToIRSB(sbOut, assign(anded, binop(Iop_And32, mkexpr(wide1),
4414 mkexpr(wide2))));
4415 addStmtToIRSB(sbOut, assign(res, unop(Iop_32to1, mkexpr(anded))));
4416 return mkexpr(res);
4417}
4418
sewardjffce8152011-06-24 10:09:41 +00004419static void instrument_mem_access ( IRSB* sbOut,
sewardjb4112022007-11-09 22:49:28 +00004420 IRExpr* addr,
4421 Int szB,
4422 Bool isStore,
sewardjffce8152011-06-24 10:09:41 +00004423 Int hWordTy_szB,
sewardjcafe5052013-01-17 14:24:35 +00004424 Int goff_sp,
4425 IRExpr* guard ) /* NULL => True */
sewardjb4112022007-11-09 22:49:28 +00004426{
4427 IRType tyAddr = Ity_INVALID;
florian6bf37262012-10-21 03:23:36 +00004428 const HChar* hName = NULL;
sewardjb4112022007-11-09 22:49:28 +00004429 void* hAddr = NULL;
4430 Int regparms = 0;
4431 IRExpr** argv = NULL;
4432 IRDirty* di = NULL;
4433
sewardjffce8152011-06-24 10:09:41 +00004434 // THRESH is the size of the window above SP (well,
4435 // mostly above) that we assume implies a stack reference.
4436 const Int THRESH = 4096 * 4; // somewhat arbitrary
4437 const Int rz_szB = VG_STACK_REDZONE_SZB;
4438
sewardjb4112022007-11-09 22:49:28 +00004439 tl_assert(isIRAtom(addr));
4440 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
4441
sewardjffce8152011-06-24 10:09:41 +00004442 tyAddr = typeOfIRExpr( sbOut->tyenv, addr );
sewardjb4112022007-11-09 22:49:28 +00004443 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
4444
4445 /* So the effective address is in 'addr' now. */
4446 regparms = 1; // unless stated otherwise
4447 if (isStore) {
4448 switch (szB) {
4449 case 1:
sewardj23f12002009-07-24 08:45:08 +00004450 hName = "evh__mem_help_cwrite_1";
4451 hAddr = &evh__mem_help_cwrite_1;
sewardjb4112022007-11-09 22:49:28 +00004452 argv = mkIRExprVec_1( addr );
4453 break;
4454 case 2:
sewardj23f12002009-07-24 08:45:08 +00004455 hName = "evh__mem_help_cwrite_2";
4456 hAddr = &evh__mem_help_cwrite_2;
sewardjb4112022007-11-09 22:49:28 +00004457 argv = mkIRExprVec_1( addr );
4458 break;
4459 case 4:
sewardj23f12002009-07-24 08:45:08 +00004460 hName = "evh__mem_help_cwrite_4";
4461 hAddr = &evh__mem_help_cwrite_4;
sewardjb4112022007-11-09 22:49:28 +00004462 argv = mkIRExprVec_1( addr );
4463 break;
4464 case 8:
sewardj23f12002009-07-24 08:45:08 +00004465 hName = "evh__mem_help_cwrite_8";
4466 hAddr = &evh__mem_help_cwrite_8;
sewardjb4112022007-11-09 22:49:28 +00004467 argv = mkIRExprVec_1( addr );
4468 break;
4469 default:
4470 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4471 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004472 hName = "evh__mem_help_cwrite_N";
4473 hAddr = &evh__mem_help_cwrite_N;
sewardjb4112022007-11-09 22:49:28 +00004474 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4475 break;
4476 }
4477 } else {
4478 switch (szB) {
4479 case 1:
sewardj23f12002009-07-24 08:45:08 +00004480 hName = "evh__mem_help_cread_1";
4481 hAddr = &evh__mem_help_cread_1;
sewardjb4112022007-11-09 22:49:28 +00004482 argv = mkIRExprVec_1( addr );
4483 break;
4484 case 2:
sewardj23f12002009-07-24 08:45:08 +00004485 hName = "evh__mem_help_cread_2";
4486 hAddr = &evh__mem_help_cread_2;
sewardjb4112022007-11-09 22:49:28 +00004487 argv = mkIRExprVec_1( addr );
4488 break;
4489 case 4:
sewardj23f12002009-07-24 08:45:08 +00004490 hName = "evh__mem_help_cread_4";
4491 hAddr = &evh__mem_help_cread_4;
sewardjb4112022007-11-09 22:49:28 +00004492 argv = mkIRExprVec_1( addr );
4493 break;
4494 case 8:
sewardj23f12002009-07-24 08:45:08 +00004495 hName = "evh__mem_help_cread_8";
4496 hAddr = &evh__mem_help_cread_8;
sewardjb4112022007-11-09 22:49:28 +00004497 argv = mkIRExprVec_1( addr );
4498 break;
4499 default:
4500 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4501 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004502 hName = "evh__mem_help_cread_N";
4503 hAddr = &evh__mem_help_cread_N;
sewardjb4112022007-11-09 22:49:28 +00004504 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4505 break;
4506 }
4507 }
4508
sewardjffce8152011-06-24 10:09:41 +00004509 /* Create the helper. */
sewardjb4112022007-11-09 22:49:28 +00004510 tl_assert(hName);
4511 tl_assert(hAddr);
4512 tl_assert(argv);
4513 di = unsafeIRDirty_0_N( regparms,
4514 hName, VG_(fnptr_to_fnentry)( hAddr ),
4515 argv );
sewardjffce8152011-06-24 10:09:41 +00004516
4517 if (! HG_(clo_check_stack_refs)) {
4518 /* We're ignoring memory references which are (obviously) to the
4519 stack. In fact just skip stack refs that are within 4 pages
4520 of SP (SP - the redzone, really), as that's simple, easy, and
4521 filters out most stack references. */
4522 /* Generate the guard condition: "(addr - (SP - RZ)) >u N", for
4523 some arbitrary N. If that is true then addr is outside the
4524 range (SP - RZ .. SP + N - RZ). If N is smallish (a few
4525 pages) then we can say addr is within a few pages of SP and
4526 so can't possibly be a heap access, and so can be skipped.
4527
4528 Note that the condition simplifies to
4529 (addr - SP + RZ) >u N
4530 which generates better code in x86/amd64 backends, but it does
4531 not unfortunately simplify to
4532 (addr - SP) >u (N - RZ)
4533 (would be beneficial because N - RZ is a constant) because
4534 wraparound arithmetic messes up the comparison. eg.
4535 20 >u 10 == True,
4536 but (20 - 15) >u (10 - 15) == 5 >u (MAXINT-5) == False.
4537 */
4538 IRTemp sp = newIRTemp(sbOut->tyenv, tyAddr);
4539 addStmtToIRSB( sbOut, assign(sp, IRExpr_Get(goff_sp, tyAddr)));
4540
4541 /* "addr - SP" */
4542 IRTemp addr_minus_sp = newIRTemp(sbOut->tyenv, tyAddr);
4543 addStmtToIRSB(
4544 sbOut,
4545 assign(addr_minus_sp,
4546 tyAddr == Ity_I32
4547 ? binop(Iop_Sub32, addr, mkexpr(sp))
4548 : binop(Iop_Sub64, addr, mkexpr(sp)))
4549 );
4550
4551 /* "addr - SP + RZ" */
4552 IRTemp diff = newIRTemp(sbOut->tyenv, tyAddr);
4553 addStmtToIRSB(
4554 sbOut,
4555 assign(diff,
4556 tyAddr == Ity_I32
4557 ? binop(Iop_Add32, mkexpr(addr_minus_sp), mkU32(rz_szB))
4558 : binop(Iop_Add64, mkexpr(addr_minus_sp), mkU64(rz_szB)))
4559 );
4560
sewardjcafe5052013-01-17 14:24:35 +00004561 /* guardA == "guard on the address" */
4562 IRTemp guardA = newIRTemp(sbOut->tyenv, Ity_I1);
sewardjffce8152011-06-24 10:09:41 +00004563 addStmtToIRSB(
4564 sbOut,
sewardjcafe5052013-01-17 14:24:35 +00004565 assign(guardA,
sewardjffce8152011-06-24 10:09:41 +00004566 tyAddr == Ity_I32
4567 ? binop(Iop_CmpLT32U, mkU32(THRESH), mkexpr(diff))
4568 : binop(Iop_CmpLT64U, mkU64(THRESH), mkexpr(diff)))
4569 );
sewardjcafe5052013-01-17 14:24:35 +00004570 di->guard = mkexpr(guardA);
4571 }
4572
4573 /* If there's a guard on the access itself (as supplied by the
4574 caller of this routine), we need to AND that in to any guard we
4575 might already have. */
4576 if (guard) {
4577 di->guard = mk_And1(sbOut, di->guard, guard);
sewardjffce8152011-06-24 10:09:41 +00004578 }
4579
4580 /* Add the helper. */
4581 addStmtToIRSB( sbOut, IRStmt_Dirty(di) );
sewardjb4112022007-11-09 22:49:28 +00004582}
4583
4584
sewardja0eee322009-07-31 08:46:35 +00004585/* Figure out if GA is a guest code address in the dynamic linker, and
4586 if so return True. Otherwise (and in case of any doubt) return
4587 False. (sidedly safe w/ False as the safe value) */
florianf466eef2015-01-02 17:32:40 +00004588static Bool is_in_dynamic_linker_shared_object( Addr ga )
sewardja0eee322009-07-31 08:46:35 +00004589{
4590 DebugInfo* dinfo;
florian19f91bb2012-11-10 22:29:54 +00004591 const HChar* soname;
sewardja0eee322009-07-31 08:46:35 +00004592 if (0) return False;
4593
florianf466eef2015-01-02 17:32:40 +00004594 dinfo = VG_(find_DebugInfo)( ga );
sewardja0eee322009-07-31 08:46:35 +00004595 if (!dinfo) return False;
4596
sewardje3f1e592009-07-31 09:41:29 +00004597 soname = VG_(DebugInfo_get_soname)(dinfo);
sewardja0eee322009-07-31 08:46:35 +00004598 tl_assert(soname);
4599 if (0) VG_(printf)("%s\n", soname);
4600
4601# if defined(VGO_linux)
sewardj651cfa42010-01-11 13:02:19 +00004602 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_3)) return True;
sewardja0eee322009-07-31 08:46:35 +00004603 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_2)) return True;
4604 if (VG_STREQ(soname, VG_U_LD_LINUX_X86_64_SO_2)) return True;
4605 if (VG_STREQ(soname, VG_U_LD64_SO_1)) return True;
carll582d5822014-08-07 23:35:54 +00004606 if (VG_STREQ(soname, VG_U_LD64_SO_2)) return True;
sewardja0eee322009-07-31 08:46:35 +00004607 if (VG_STREQ(soname, VG_U_LD_SO_1)) return True;
sewardjdcd90512014-08-30 19:21:48 +00004608 if (VG_STREQ(soname, VG_U_LD_LINUX_AARCH64_SO_1)) return True;
mjw4fa71082014-09-01 15:29:55 +00004609 if (VG_STREQ(soname, VG_U_LD_LINUX_ARMHF_SO_3)) return True;
sewardja0eee322009-07-31 08:46:35 +00004610# elif defined(VGO_darwin)
4611 if (VG_STREQ(soname, VG_U_DYLD)) return True;
sewardj8eb8bab2015-07-21 14:44:28 +00004612# elif defined(VGO_solaris)
4613 if (VG_STREQ(soname, VG_U_LD_SO_1)) return True;
sewardja0eee322009-07-31 08:46:35 +00004614# else
4615# error "Unsupported OS"
4616# endif
4617 return False;
4618}
4619
sewardjb4112022007-11-09 22:49:28 +00004620static
4621IRSB* hg_instrument ( VgCallbackClosure* closure,
4622 IRSB* bbIn,
florian3c0c9472014-09-24 12:06:55 +00004623 const VexGuestLayout* layout,
4624 const VexGuestExtents* vge,
4625 const VexArchInfo* archinfo_host,
sewardjb4112022007-11-09 22:49:28 +00004626 IRType gWordTy, IRType hWordTy )
4627{
sewardj1c0ce7a2009-07-01 08:10:49 +00004628 Int i;
4629 IRSB* bbOut;
florianf466eef2015-01-02 17:32:40 +00004630 Addr cia; /* address of current insn */
sewardj1c0ce7a2009-07-01 08:10:49 +00004631 IRStmt* st;
sewardja0eee322009-07-31 08:46:35 +00004632 Bool inLDSO = False;
florianf466eef2015-01-02 17:32:40 +00004633 Addr inLDSOmask4K = 1; /* mismatches on first check */
sewardjb4112022007-11-09 22:49:28 +00004634
sewardjffce8152011-06-24 10:09:41 +00004635 const Int goff_sp = layout->offset_SP;
4636
sewardjb4112022007-11-09 22:49:28 +00004637 if (gWordTy != hWordTy) {
4638 /* We don't currently support this case. */
4639 VG_(tool_panic)("host/guest word size mismatch");
4640 }
4641
sewardja0eee322009-07-31 08:46:35 +00004642 if (VKI_PAGE_SIZE < 4096 || VG_(log2)(VKI_PAGE_SIZE) == -1) {
4643 VG_(tool_panic)("implausible or too-small VKI_PAGE_SIZE");
4644 }
4645
sewardjb4112022007-11-09 22:49:28 +00004646 /* Set up BB */
4647 bbOut = emptyIRSB();
4648 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
4649 bbOut->next = deepCopyIRExpr(bbIn->next);
4650 bbOut->jumpkind = bbIn->jumpkind;
sewardj291849f2012-04-20 23:58:55 +00004651 bbOut->offsIP = bbIn->offsIP;
sewardjb4112022007-11-09 22:49:28 +00004652
4653 // Copy verbatim any IR preamble preceding the first IMark
4654 i = 0;
4655 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
4656 addStmtToIRSB( bbOut, bbIn->stmts[i] );
4657 i++;
4658 }
4659
sewardj1c0ce7a2009-07-01 08:10:49 +00004660 // Get the first statement, and initial cia from it
4661 tl_assert(bbIn->stmts_used > 0);
4662 tl_assert(i < bbIn->stmts_used);
4663 st = bbIn->stmts[i];
4664 tl_assert(Ist_IMark == st->tag);
4665 cia = st->Ist.IMark.addr;
4666 st = NULL;
4667
sewardjb4112022007-11-09 22:49:28 +00004668 for (/*use current i*/; i < bbIn->stmts_used; i++) {
sewardj1c0ce7a2009-07-01 08:10:49 +00004669 st = bbIn->stmts[i];
sewardjb4112022007-11-09 22:49:28 +00004670 tl_assert(st);
4671 tl_assert(isFlatIRStmt(st));
4672 switch (st->tag) {
4673 case Ist_NoOp:
4674 case Ist_AbiHint:
4675 case Ist_Put:
4676 case Ist_PutI:
sewardjb4112022007-11-09 22:49:28 +00004677 case Ist_Exit:
4678 /* None of these can contain any memory references. */
4679 break;
4680
sewardj1c0ce7a2009-07-01 08:10:49 +00004681 case Ist_IMark:
4682 /* no mem refs, but note the insn address. */
4683 cia = st->Ist.IMark.addr;
sewardja0eee322009-07-31 08:46:35 +00004684 /* Don't instrument the dynamic linker. It generates a
4685 lot of races which we just expensively suppress, so
4686 it's pointless.
4687
4688 Avoid flooding is_in_dynamic_linker_shared_object with
4689 requests by only checking at transitions between 4K
4690 pages. */
florianf466eef2015-01-02 17:32:40 +00004691 if ((cia & ~(Addr)0xFFF) != inLDSOmask4K) {
4692 if (0) VG_(printf)("NEW %#lx\n", cia);
4693 inLDSOmask4K = cia & ~(Addr)0xFFF;
sewardja0eee322009-07-31 08:46:35 +00004694 inLDSO = is_in_dynamic_linker_shared_object(cia);
4695 } else {
florianf466eef2015-01-02 17:32:40 +00004696 if (0) VG_(printf)("old %#lx\n", cia);
sewardja0eee322009-07-31 08:46:35 +00004697 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004698 break;
4699
sewardjb4112022007-11-09 22:49:28 +00004700 case Ist_MBE:
sewardjf98e1c02008-10-25 16:22:41 +00004701 switch (st->Ist.MBE.event) {
4702 case Imbe_Fence:
sewardj2b9232a2014-10-11 13:54:52 +00004703 case Imbe_CancelReservation:
sewardjf98e1c02008-10-25 16:22:41 +00004704 break; /* not interesting */
sewardjf98e1c02008-10-25 16:22:41 +00004705 default:
4706 goto unhandled;
4707 }
sewardjb4112022007-11-09 22:49:28 +00004708 break;
4709
sewardj1c0ce7a2009-07-01 08:10:49 +00004710 case Ist_CAS: {
4711 /* Atomic read-modify-write cycle. Just pretend it's a
4712 read. */
4713 IRCAS* cas = st->Ist.CAS.details;
sewardj23f12002009-07-24 08:45:08 +00004714 Bool isDCAS = cas->oldHi != IRTemp_INVALID;
4715 if (isDCAS) {
4716 tl_assert(cas->expdHi);
4717 tl_assert(cas->dataHi);
4718 } else {
4719 tl_assert(!cas->expdHi);
4720 tl_assert(!cas->dataHi);
4721 }
4722 /* Just be boring about it. */
sewardja0eee322009-07-31 08:46:35 +00004723 if (!inLDSO) {
4724 instrument_mem_access(
4725 bbOut,
4726 cas->addr,
4727 (isDCAS ? 2 : 1)
4728 * sizeofIRType(typeOfIRExpr(bbIn->tyenv, cas->dataLo)),
4729 False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004730 sizeofIRType(hWordTy), goff_sp,
4731 NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004732 );
4733 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004734 break;
4735 }
4736
sewardjdb5907d2009-11-26 17:20:21 +00004737 case Ist_LLSC: {
4738 /* We pretend store-conditionals don't exist, viz, ignore
4739 them. Whereas load-linked's are treated the same as
4740 normal loads. */
4741 IRType dataTy;
4742 if (st->Ist.LLSC.storedata == NULL) {
4743 /* LL */
4744 dataTy = typeOfIRTemp(bbIn->tyenv, st->Ist.LLSC.result);
sewardja0eee322009-07-31 08:46:35 +00004745 if (!inLDSO) {
sewardjdb5907d2009-11-26 17:20:21 +00004746 instrument_mem_access(
4747 bbOut,
4748 st->Ist.LLSC.addr,
4749 sizeofIRType(dataTy),
4750 False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004751 sizeofIRType(hWordTy), goff_sp,
4752 NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004753 );
4754 }
sewardjdb5907d2009-11-26 17:20:21 +00004755 } else {
4756 /* SC */
4757 /*ignore */
4758 }
4759 break;
4760 }
4761
4762 case Ist_Store:
sewardjdb5907d2009-11-26 17:20:21 +00004763 if (!inLDSO) {
4764 instrument_mem_access(
4765 bbOut,
4766 st->Ist.Store.addr,
4767 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
4768 True/*isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004769 sizeofIRType(hWordTy), goff_sp,
4770 NULL/*no-guard*/
sewardjdb5907d2009-11-26 17:20:21 +00004771 );
sewardj1c0ce7a2009-07-01 08:10:49 +00004772 }
njnb83caf22009-05-25 01:47:56 +00004773 break;
sewardjb4112022007-11-09 22:49:28 +00004774
sewardjcafe5052013-01-17 14:24:35 +00004775 case Ist_StoreG: {
4776 IRStoreG* sg = st->Ist.StoreG.details;
4777 IRExpr* data = sg->data;
4778 IRExpr* addr = sg->addr;
4779 IRType type = typeOfIRExpr(bbIn->tyenv, data);
4780 tl_assert(type != Ity_INVALID);
4781 instrument_mem_access( bbOut, addr, sizeofIRType(type),
4782 True/*isStore*/,
4783 sizeofIRType(hWordTy),
4784 goff_sp, sg->guard );
4785 break;
4786 }
4787
4788 case Ist_LoadG: {
4789 IRLoadG* lg = st->Ist.LoadG.details;
4790 IRType type = Ity_INVALID; /* loaded type */
4791 IRType typeWide = Ity_INVALID; /* after implicit widening */
4792 IRExpr* addr = lg->addr;
4793 typeOfIRLoadGOp(lg->cvt, &typeWide, &type);
4794 tl_assert(type != Ity_INVALID);
4795 instrument_mem_access( bbOut, addr, sizeofIRType(type),
4796 False/*!isStore*/,
4797 sizeofIRType(hWordTy),
4798 goff_sp, lg->guard );
4799 break;
4800 }
4801
sewardjb4112022007-11-09 22:49:28 +00004802 case Ist_WrTmp: {
4803 IRExpr* data = st->Ist.WrTmp.data;
4804 if (data->tag == Iex_Load) {
sewardja0eee322009-07-31 08:46:35 +00004805 if (!inLDSO) {
4806 instrument_mem_access(
4807 bbOut,
4808 data->Iex.Load.addr,
4809 sizeofIRType(data->Iex.Load.ty),
4810 False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004811 sizeofIRType(hWordTy), goff_sp,
4812 NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004813 );
4814 }
sewardjb4112022007-11-09 22:49:28 +00004815 }
4816 break;
4817 }
4818
4819 case Ist_Dirty: {
4820 Int dataSize;
4821 IRDirty* d = st->Ist.Dirty.details;
4822 if (d->mFx != Ifx_None) {
4823 /* This dirty helper accesses memory. Collect the
4824 details. */
4825 tl_assert(d->mAddr != NULL);
4826 tl_assert(d->mSize != 0);
4827 dataSize = d->mSize;
4828 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004829 if (!inLDSO) {
4830 instrument_mem_access(
4831 bbOut, d->mAddr, dataSize, False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004832 sizeofIRType(hWordTy), goff_sp, NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004833 );
4834 }
sewardjb4112022007-11-09 22:49:28 +00004835 }
4836 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004837 if (!inLDSO) {
4838 instrument_mem_access(
4839 bbOut, d->mAddr, dataSize, True/*isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004840 sizeofIRType(hWordTy), goff_sp, NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004841 );
4842 }
sewardjb4112022007-11-09 22:49:28 +00004843 }
4844 } else {
4845 tl_assert(d->mAddr == NULL);
4846 tl_assert(d->mSize == 0);
4847 }
4848 break;
4849 }
4850
4851 default:
sewardjf98e1c02008-10-25 16:22:41 +00004852 unhandled:
4853 ppIRStmt(st);
sewardjb4112022007-11-09 22:49:28 +00004854 tl_assert(0);
4855
4856 } /* switch (st->tag) */
4857
4858 addStmtToIRSB( bbOut, st );
4859 } /* iterate over bbIn->stmts */
4860
4861 return bbOut;
4862}
4863
sewardjffce8152011-06-24 10:09:41 +00004864#undef binop
4865#undef mkexpr
4866#undef mkU32
4867#undef mkU64
4868#undef assign
4869
sewardjb4112022007-11-09 22:49:28 +00004870
4871/*----------------------------------------------------------------*/
4872/*--- Client requests ---*/
4873/*----------------------------------------------------------------*/
4874
4875/* Sheesh. Yet another goddam finite map. */
4876static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
4877
4878static void map_pthread_t_to_Thread_INIT ( void ) {
4879 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
sewardjf98e1c02008-10-25 16:22:41 +00004880 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
4881 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00004882 }
4883}
4884
philipped40aff52014-06-16 20:00:14 +00004885/* A list of Ada dependent tasks and their masters. Used for implementing
4886 the Ada task termination semantic as implemented by the
4887 gcc gnat Ada runtime. */
4888typedef
4889 struct {
4890 void* dependent; // Ada Task Control Block of the Dependent
4891 void* master; // ATCB of the master
4892 Word master_level; // level of dependency between master and dependent
4893 Thread* hg_dependent; // helgrind Thread* for dependent task.
4894 }
4895 GNAT_dmml;
4896static XArray* gnat_dmmls; /* of GNAT_dmml */
4897static void gnat_dmmls_INIT (void)
4898{
4899 if (UNLIKELY(gnat_dmmls == NULL)) {
4900 gnat_dmmls = VG_(newXA) (HG_(zalloc), "hg.gnat_md.1",
4901 HG_(free),
4902 sizeof(GNAT_dmml) );
4903 }
4904}
philippef5774342014-05-03 11:12:50 +00004905static void print_monitor_help ( void )
4906{
4907 VG_(gdb_printf)
4908 (
4909"\n"
4910"helgrind monitor commands:\n"
philippe328d6622015-05-25 17:24:27 +00004911" info locks [lock_addr] : show status of lock at addr lock_addr\n"
4912" with no lock_addr, show status of all locks\n"
4913" accesshistory <addr> [<len>] : show access history recorded\n"
4914" for <len> (or 1) bytes at <addr>\n"
philippef5774342014-05-03 11:12:50 +00004915"\n");
4916}
4917
4918/* return True if request recognised, False otherwise */
4919static Bool handle_gdb_monitor_command (ThreadId tid, HChar *req)
4920{
philippef5774342014-05-03 11:12:50 +00004921 HChar* wcmd;
4922 HChar s[VG_(strlen(req))]; /* copy for strtok_r */
4923 HChar *ssaveptr;
4924 Int kwdid;
4925
4926 VG_(strcpy) (s, req);
4927
4928 wcmd = VG_(strtok_r) (s, " ", &ssaveptr);
4929 /* NB: if possible, avoid introducing a new command below which
4930 starts with the same first letter(s) as an already existing
4931 command. This ensures a shorter abbreviation for the user. */
4932 switch (VG_(keyword_id)
philippe328d6622015-05-25 17:24:27 +00004933 ("help info accesshistory",
philippef5774342014-05-03 11:12:50 +00004934 wcmd, kwd_report_duplicated_matches)) {
4935 case -2: /* multiple matches */
4936 return True;
4937 case -1: /* not found */
4938 return False;
4939 case 0: /* help */
4940 print_monitor_help();
4941 return True;
4942 case 1: /* info */
philippef5774342014-05-03 11:12:50 +00004943 wcmd = VG_(strtok_r) (NULL, " ", &ssaveptr);
4944 switch (kwdid = VG_(keyword_id)
4945 ("locks",
4946 wcmd, kwd_report_all)) {
4947 case -2:
4948 case -1:
4949 break;
4950 case 0: // locks
4951 {
philippe328d6622015-05-25 17:24:27 +00004952 const HChar* wa;
4953 Addr lk_addr = 0;
4954 Bool lk_shown = False;
4955 Bool all_locks = True;
philippef5774342014-05-03 11:12:50 +00004956 Int i;
4957 Lock* lk;
philippe328d6622015-05-25 17:24:27 +00004958
4959 wa = VG_(strtok_r) (NULL, " ", &ssaveptr);
4960 if (wa != NULL) {
4961 if (VG_(parse_Addr) (&wa, &lk_addr) )
4962 all_locks = False;
4963 else {
4964 VG_(gdb_printf) ("missing or malformed address\n");
4965 }
4966 }
philippef5774342014-05-03 11:12:50 +00004967 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next) {
philippe328d6622015-05-25 17:24:27 +00004968 if (all_locks || lk_addr == lk->guestaddr) {
4969 pp_Lock(0, lk,
4970 True /* show_lock_addrdescr */,
4971 False /* show_internal_data */);
4972 lk_shown = True;
4973 }
philippef5774342014-05-03 11:12:50 +00004974 }
4975 if (i == 0)
4976 VG_(gdb_printf) ("no locks\n");
philippe328d6622015-05-25 17:24:27 +00004977 if (!all_locks && !lk_shown)
4978 VG_(gdb_printf) ("lock with address %p not found\n",
4979 (void*)lk_addr);
philippef5774342014-05-03 11:12:50 +00004980 }
4981 break;
4982 default:
4983 tl_assert(0);
4984 }
4985 return True;
philippe328d6622015-05-25 17:24:27 +00004986
4987 case 2: /* accesshistory */
4988 {
4989 Addr address;
4990 SizeT szB = 1;
4991 if (VG_(strtok_get_address_and_size) (&address, &szB, &ssaveptr)) {
4992 if (szB >= 1)
4993 libhb_event_map_access_history (address, szB, HG_(print_access));
4994 else
4995 VG_(gdb_printf) ("len must be >=1\n");
4996 }
4997 return True;
4998 }
4999
philippef5774342014-05-03 11:12:50 +00005000 default:
5001 tl_assert(0);
5002 return False;
5003 }
5004}
sewardjb4112022007-11-09 22:49:28 +00005005
5006static
5007Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
5008{
philippef5774342014-05-03 11:12:50 +00005009 if (!VG_IS_TOOL_USERREQ('H','G',args[0])
5010 && VG_USERREQ__GDB_MONITOR_COMMAND != args[0])
sewardjb4112022007-11-09 22:49:28 +00005011 return False;
5012
5013 /* Anything that gets past the above check is one of ours, so we
5014 should be able to handle it. */
5015
5016 /* default, meaningless return value, unless otherwise set */
5017 *ret = 0;
5018
5019 switch (args[0]) {
5020
5021 /* --- --- User-visible client requests --- --- */
5022
5023 case VG_USERREQ__HG_CLEAN_MEMORY:
florian5e5cb002015-08-03 21:21:42 +00005024 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%lu)\n",
sewardjb4112022007-11-09 22:49:28 +00005025 args[1], args[2]);
5026 /* Call die_mem to (expensively) tidy up properly, if there
sewardjf98e1c02008-10-25 16:22:41 +00005027 are any held locks etc in the area. Calling evh__die_mem
5028 and then evh__new_mem is a bit inefficient; probably just
5029 the latter would do. */
sewardjb4112022007-11-09 22:49:28 +00005030 if (args[2] > 0) { /* length */
5031 evh__die_mem(args[1], args[2]);
5032 /* and then set it to New */
5033 evh__new_mem(args[1], args[2]);
5034 }
5035 break;
5036
sewardjc8028ad2010-05-05 09:34:42 +00005037 case _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK: {
5038 Addr payload = 0;
5039 SizeT pszB = 0;
5040 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK(%#lx)\n",
5041 args[1]);
philippe0c9ac8d2014-07-18 00:03:58 +00005042 if (HG_(mm_find_containing_block)(NULL, NULL,
5043 &payload, &pszB, args[1])) {
sewardjc8028ad2010-05-05 09:34:42 +00005044 if (pszB > 0) {
5045 evh__die_mem(payload, pszB);
5046 evh__new_mem(payload, pszB);
5047 }
5048 *ret = pszB;
5049 } else {
5050 *ret = (UWord)-1;
5051 }
5052 break;
5053 }
5054
sewardj406bac82010-03-03 23:03:40 +00005055 case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED:
florian5e5cb002015-08-03 21:21:42 +00005056 if (0) VG_(printf)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%lu)\n",
sewardj406bac82010-03-03 23:03:40 +00005057 args[1], args[2]);
5058 if (args[2] > 0) { /* length */
5059 evh__untrack_mem(args[1], args[2]);
5060 }
5061 break;
5062
5063 case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED:
florian5e5cb002015-08-03 21:21:42 +00005064 if (0) VG_(printf)("HG_ARANGE_MAKE_TRACKED(%#lx,%lu)\n",
sewardj406bac82010-03-03 23:03:40 +00005065 args[1], args[2]);
5066 if (args[2] > 0) { /* length */
5067 evh__new_mem(args[1], args[2]);
5068 }
5069 break;
5070
philippef54cb662015-05-10 22:19:31 +00005071 case _VG_USERREQ__HG_GET_ABITS:
florian5e5cb002015-08-03 21:21:42 +00005072 if (0) VG_(printf)("HG_GET_ABITS(%#lx,%#lx,%lu)\n",
philippef54cb662015-05-10 22:19:31 +00005073 args[1], args[2], args[3]);
5074 UChar *zzabit = (UChar *) args[2];
5075 if (zzabit == NULL
5076 || VG_(am_is_valid_for_client)((Addr)zzabit, (SizeT)args[3],
5077 VKI_PROT_READ|VKI_PROT_WRITE))
5078 *ret = (UWord) libhb_srange_get_abits ((Addr) args[1],
5079 (UChar*) args[2],
5080 (SizeT) args[3]);
5081 else
5082 *ret = -1;
5083 break;
5084
sewardjb4112022007-11-09 22:49:28 +00005085 /* --- --- Client requests for Helgrind's use only --- --- */
5086
5087 /* Some thread is telling us its pthread_t value. Record the
5088 binding between that and the associated Thread*, so we can
5089 later find the Thread* again when notified of a join by the
5090 thread. */
5091 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
5092 Thread* my_thr = NULL;
5093 if (0)
5094 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
5095 (void*)args[1]);
5096 map_pthread_t_to_Thread_INIT();
5097 my_thr = map_threads_maybe_lookup( tid );
5098 /* This assertion should hold because the map_threads (tid to
5099 Thread*) binding should have been made at the point of
5100 low-level creation of this thread, which should have
5101 happened prior to us getting this client request for it.
5102 That's because this client request is sent from
5103 client-world from the 'thread_wrapper' function, which
5104 only runs once the thread has been low-level created. */
5105 tl_assert(my_thr != NULL);
5106 /* So now we know that (pthread_t)args[1] is associated with
5107 (Thread*)my_thr. Note that down. */
5108 if (0)
5109 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
5110 (void*)args[1], (void*)my_thr );
florian6bf37262012-10-21 03:23:36 +00005111 VG_(addToFM)( map_pthread_t_to_Thread, (UWord)args[1], (UWord)my_thr );
sewardj8eb8bab2015-07-21 14:44:28 +00005112
5113 if (my_thr->coretid != 1) {
5114 /* FIXME: hardwires assumption about identity of the root thread. */
5115 if (HG_(clo_ignore_thread_creation)) {
5116 HG_(thread_leave_pthread_create)(my_thr);
5117 HG_(thread_leave_synchr)(my_thr);
5118 tl_assert(my_thr->synchr_nesting == 0);
5119 }
5120 }
sewardjb4112022007-11-09 22:49:28 +00005121 break;
5122 }
5123
5124 case _VG_USERREQ__HG_PTH_API_ERROR: {
5125 Thread* my_thr = NULL;
5126 map_pthread_t_to_Thread_INIT();
5127 my_thr = map_threads_maybe_lookup( tid );
5128 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
sewardjf98e1c02008-10-25 16:22:41 +00005129 HG_(record_error_PthAPIerror)(
florian6bf37262012-10-21 03:23:36 +00005130 my_thr, (HChar*)args[1], (UWord)args[2], (HChar*)args[3] );
sewardjb4112022007-11-09 22:49:28 +00005131 break;
5132 }
5133
5134 /* This thread (tid) has completed a join with the quitting
5135 thread whose pthread_t is in args[1]. */
5136 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
5137 Thread* thr_q = NULL; /* quitter Thread* */
5138 Bool found = False;
5139 if (0)
5140 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
5141 (void*)args[1]);
5142 map_pthread_t_to_Thread_INIT();
sewardj896f6f92008-08-19 08:38:52 +00005143 found = VG_(lookupFM)( map_pthread_t_to_Thread,
florian6bf37262012-10-21 03:23:36 +00005144 NULL, (UWord*)&thr_q, (UWord)args[1] );
sewardjb4112022007-11-09 22:49:28 +00005145 /* Can this fail? It would mean that our pthread_join
5146 wrapper observed a successful join on args[1] yet that
5147 thread never existed (or at least, it never lodged an
5148 entry in the mapping (via SET_MY_PTHREAD_T)). Which
5149 sounds like a bug in the threads library. */
5150 // FIXME: get rid of this assertion; handle properly
5151 tl_assert(found);
5152 if (found) {
5153 if (0)
5154 VG_(printf)(".................... quitter Thread* = %p\n",
5155 thr_q);
5156 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
5157 }
5158 break;
5159 }
5160
philipped40aff52014-06-16 20:00:14 +00005161 /* This thread (tid) is informing us of its master. */
5162 case _VG_USERREQ__HG_GNAT_MASTER_HOOK: {
5163 GNAT_dmml dmml;
5164 dmml.dependent = (void*)args[1];
5165 dmml.master = (void*)args[2];
5166 dmml.master_level = (Word)args[3];
5167 dmml.hg_dependent = map_threads_maybe_lookup( tid );
5168 tl_assert(dmml.hg_dependent);
5169
5170 if (0)
5171 VG_(printf)("HG_GNAT_MASTER_HOOK (tid %d): "
5172 "dependent = %p master = %p master_level = %ld"
5173 " dependent Thread* = %p\n",
5174 (Int)tid, dmml.dependent, dmml.master, dmml.master_level,
5175 dmml.hg_dependent);
5176 gnat_dmmls_INIT();
5177 VG_(addToXA) (gnat_dmmls, &dmml);
5178 break;
5179 }
5180
5181 /* This thread (tid) is informing us that it has completed a
5182 master. */
5183 case _VG_USERREQ__HG_GNAT_MASTER_COMPLETED_HOOK: {
5184 Word n;
5185 const Thread *stayer = map_threads_maybe_lookup( tid );
5186 const void *master = (void*)args[1];
5187 const Word master_level = (Word) args[2];
5188 tl_assert(stayer);
5189
5190 if (0)
5191 VG_(printf)("HG_GNAT_MASTER_COMPLETED_HOOK (tid %d): "
5192 "self_id = %p master_level = %ld Thread* = %p\n",
5193 (Int)tid, master, master_level, stayer);
5194
5195 gnat_dmmls_INIT();
5196 /* Reverse loop on the array, simulating a pthread_join for
5197 the Dependent tasks of the completed master, and removing
5198 them from the array. */
5199 for (n = VG_(sizeXA) (gnat_dmmls) - 1; n >= 0; n--) {
5200 GNAT_dmml *dmml = (GNAT_dmml*) VG_(indexXA)(gnat_dmmls, n);
5201 if (dmml->master == master
5202 && dmml->master_level == master_level) {
5203 if (0)
5204 VG_(printf)("quitter %p dependency to stayer %p\n",
5205 dmml->hg_dependent->hbthr, stayer->hbthr);
5206 tl_assert(dmml->hg_dependent->hbthr != stayer->hbthr);
5207 generate_quitter_stayer_dependence (dmml->hg_dependent->hbthr,
5208 stayer->hbthr);
5209 VG_(removeIndexXA) (gnat_dmmls, n);
5210 }
5211 }
5212 break;
5213 }
5214
sewardjb4112022007-11-09 22:49:28 +00005215 /* EXPOSITION only: by intercepting lock init events we can show
5216 the user where the lock was initialised, rather than only
5217 being able to show where it was first locked. Intercepting
5218 lock initialisations is not necessary for the basic operation
5219 of the race checker. */
5220 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
5221 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
5222 break;
5223
sewardjc02f6c42013-10-14 13:51:25 +00005224 /* mutex=arg[1], mutex_is_init=arg[2] */
sewardjb4112022007-11-09 22:49:28 +00005225 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
sewardjc02f6c42013-10-14 13:51:25 +00005226 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1], args[2] != 0 );
sewardjb4112022007-11-09 22:49:28 +00005227 break;
5228
5229 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
sewardj8eb8bab2015-07-21 14:44:28 +00005230 HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5231 if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5232 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00005233 break;
5234
5235 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
sewardj8eb8bab2015-07-21 14:44:28 +00005236 if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5237 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
5238 HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
sewardjb4112022007-11-09 22:49:28 +00005239 break;
5240
sewardj8eb8bab2015-07-21 14:44:28 +00005241 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*
5242 HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5243 if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5244 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
sewardjb4112022007-11-09 22:49:28 +00005245 break;
5246
sewardj8eb8bab2015-07-21 14:44:28 +00005247 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*, long
5248 if ((args[2] == True) // lock actually taken
5249 && (HG_(get_pthread_create_nesting_level)(tid) == 0))
5250 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
5251 HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
sewardjb4112022007-11-09 22:49:28 +00005252 break;
5253
5254 /* This thread is about to do pthread_cond_signal on the
5255 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
5256 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
5257 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
sewardj8eb8bab2015-07-21 14:44:28 +00005258 HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
sewardjb4112022007-11-09 22:49:28 +00005259 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
5260 break;
5261
sewardj8eb8bab2015-07-21 14:44:28 +00005262 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_POST:
5263 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_POST:
5264 HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5265 break;
5266
sewardjb4112022007-11-09 22:49:28 +00005267 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
5268 Returns a flag indicating whether or not the mutex is believed to be
5269 valid for this operation. */
5270 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
sewardj8eb8bab2015-07-21 14:44:28 +00005271 HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
sewardjb4112022007-11-09 22:49:28 +00005272 Bool mutex_is_valid
5273 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
5274 (void*)args[2] );
5275 *ret = mutex_is_valid ? 1 : 0;
5276 break;
5277 }
5278
philippe19dfe032013-03-24 20:10:23 +00005279 /* Thread successfully completed pthread_cond_init:
5280 cond=arg[1], cond_attr=arg[2] */
5281 case _VG_USERREQ__HG_PTHREAD_COND_INIT_POST:
5282 evh__HG_PTHREAD_COND_INIT_POST( tid,
5283 (void*)args[1], (void*)args[2] );
5284 break;
5285
sewardjc02f6c42013-10-14 13:51:25 +00005286 /* cond=arg[1], cond_is_init=arg[2] */
sewardjf98e1c02008-10-25 16:22:41 +00005287 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
sewardjc02f6c42013-10-14 13:51:25 +00005288 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1], args[2] != 0 );
sewardjf98e1c02008-10-25 16:22:41 +00005289 break;
5290
sewardj8eb8bab2015-07-21 14:44:28 +00005291 /* Thread completed pthread_cond_wait, cond=arg[1],
5292 mutex=arg[2], timeout=arg[3], successful=arg[4] */
sewardjb4112022007-11-09 22:49:28 +00005293 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
sewardj8eb8bab2015-07-21 14:44:28 +00005294 if (args[4] == True)
5295 evh__HG_PTHREAD_COND_WAIT_POST( tid,
5296 (void*)args[1], (void*)args[2],
5297 (Bool)args[3] );
5298 HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
sewardjb4112022007-11-09 22:49:28 +00005299 break;
5300
5301 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
5302 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
5303 break;
5304
5305 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
5306 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
5307 break;
5308
sewardj789c3c52008-02-25 12:10:07 +00005309 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
sewardjb4112022007-11-09 22:49:28 +00005310 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
sewardj8eb8bab2015-07-21 14:44:28 +00005311 HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5312 if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5313 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
5314 args[2], args[3] );
sewardjb4112022007-11-09 22:49:28 +00005315 break;
5316
sewardj8eb8bab2015-07-21 14:44:28 +00005317 /* rwlock=arg[1], isW=arg[2], tookLock=arg[3] */
sewardjb4112022007-11-09 22:49:28 +00005318 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
sewardj8eb8bab2015-07-21 14:44:28 +00005319 if ((args[3] == True)
5320 && (HG_(get_pthread_create_nesting_level)(tid) == 0))
5321 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
5322 HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
sewardjb4112022007-11-09 22:49:28 +00005323 break;
5324
5325 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
sewardj8eb8bab2015-07-21 14:44:28 +00005326 HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5327 if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5328 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00005329 break;
5330
5331 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
sewardj8eb8bab2015-07-21 14:44:28 +00005332 if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5333 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
5334 HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
sewardjb4112022007-11-09 22:49:28 +00005335 break;
5336
sewardj11e352f2007-11-30 11:11:02 +00005337 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
5338 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
sewardjb4112022007-11-09 22:49:28 +00005339 break;
5340
sewardj11e352f2007-11-30 11:11:02 +00005341 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
5342 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00005343 break;
5344
sewardj11e352f2007-11-30 11:11:02 +00005345 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
sewardj8eb8bab2015-07-21 14:44:28 +00005346 HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
sewardj11e352f2007-11-30 11:11:02 +00005347 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
5348 break;
5349
sewardj8eb8bab2015-07-21 14:44:28 +00005350 case _VG_USERREQ__HG_POSIX_SEM_POST_POST: /* sem_t* */
5351 HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5352 break;
5353
5354 case _VG_USERREQ__HG_POSIX_SEM_WAIT_PRE: /* sem_t* */
5355 HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5356 break;
5357
5358 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t*, long tookLock */
5359 if (args[2] == True)
5360 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
5361 HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
sewardjb4112022007-11-09 22:49:28 +00005362 break;
5363
sewardj9f569b72008-11-13 13:33:09 +00005364 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
sewardj406bac82010-03-03 23:03:40 +00005365 /* pth_bar_t*, ulong count, ulong resizable */
5366 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1],
5367 args[2], args[3] );
5368 break;
5369
5370 case _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE:
5371 /* pth_bar_t*, ulong newcount */
5372 evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( tid, (void*)args[1],
5373 args[2] );
sewardj9f569b72008-11-13 13:33:09 +00005374 break;
5375
5376 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
5377 /* pth_bar_t* */
5378 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
5379 break;
5380
5381 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
5382 /* pth_bar_t* */
5383 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
5384 break;
sewardjb4112022007-11-09 22:49:28 +00005385
sewardj5a644da2009-08-11 10:35:58 +00005386 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE:
5387 /* pth_spinlock_t* */
5388 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid, (void*)args[1] );
5389 break;
5390
5391 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST:
5392 /* pth_spinlock_t* */
5393 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid, (void*)args[1] );
5394 break;
5395
5396 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE:
5397 /* pth_spinlock_t*, Word */
5398 evh__HG_PTHREAD_SPIN_LOCK_PRE( tid, (void*)args[1], args[2] );
5399 break;
5400
5401 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST:
5402 /* pth_spinlock_t* */
5403 evh__HG_PTHREAD_SPIN_LOCK_POST( tid, (void*)args[1] );
5404 break;
5405
5406 case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE:
5407 /* pth_spinlock_t* */
5408 evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid, (void*)args[1] );
5409 break;
5410
sewardjed2e72e2009-08-14 11:08:24 +00005411 case _VG_USERREQ__HG_CLIENTREQ_UNIMP: {
florian19f91bb2012-11-10 22:29:54 +00005412 /* HChar* who */
sewardjed2e72e2009-08-14 11:08:24 +00005413 HChar* who = (HChar*)args[1];
5414 HChar buf[50 + 50];
5415 Thread* thr = map_threads_maybe_lookup( tid );
5416 tl_assert( thr ); /* I must be mapped */
5417 tl_assert( who );
5418 tl_assert( VG_(strlen)(who) <= 50 );
5419 VG_(sprintf)(buf, "Unimplemented client request macro \"%s\"", who );
5420 /* record_error_Misc strdup's buf, so this is safe: */
5421 HG_(record_error_Misc)( thr, buf );
5422 break;
5423 }
5424
5425 case _VG_USERREQ__HG_USERSO_SEND_PRE:
5426 /* UWord arbitrary-SO-tag */
5427 evh__HG_USERSO_SEND_PRE( tid, args[1] );
5428 break;
5429
5430 case _VG_USERREQ__HG_USERSO_RECV_POST:
5431 /* UWord arbitrary-SO-tag */
5432 evh__HG_USERSO_RECV_POST( tid, args[1] );
5433 break;
5434
sewardj6015d0e2011-03-11 19:10:48 +00005435 case _VG_USERREQ__HG_USERSO_FORGET_ALL:
5436 /* UWord arbitrary-SO-tag */
5437 evh__HG_USERSO_FORGET_ALL( tid, args[1] );
5438 break;
5439
philippef5774342014-05-03 11:12:50 +00005440 case VG_USERREQ__GDB_MONITOR_COMMAND: {
5441 Bool handled = handle_gdb_monitor_command (tid, (HChar*)args[1]);
5442 if (handled)
5443 *ret = 1;
5444 else
5445 *ret = 0;
5446 return handled;
5447 }
5448
sewardj8eb8bab2015-07-21 14:44:28 +00005449 case _VG_USERREQ__HG_PTHREAD_CREATE_BEGIN: {
5450 Thread *thr = map_threads_maybe_lookup(tid);
5451 if (HG_(clo_ignore_thread_creation)) {
5452 HG_(thread_enter_pthread_create)(thr);
5453 HG_(thread_enter_synchr)(thr);
5454 }
5455 break;
5456 }
5457
5458 case _VG_USERREQ__HG_PTHREAD_CREATE_END: {
5459 Thread *thr = map_threads_maybe_lookup(tid);
5460 if (HG_(clo_ignore_thread_creation)) {
5461 HG_(thread_leave_pthread_create)(thr);
5462 HG_(thread_leave_synchr)(thr);
5463 }
5464 break;
5465 }
5466
5467 case _VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_PRE: // pth_mx_t*, long tryLock
5468 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
5469 break;
5470
5471 case _VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_POST: // pth_mx_t*
5472 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
5473 break;
5474
5475 case _VG_USERREQ__HG_PTHREAD_RWLOCK_ACQUIRED: // void*, long isW
5476 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
5477 break;
5478
5479 case _VG_USERREQ__HG_PTHREAD_RWLOCK_RELEASED: // void*
5480 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
5481 break;
5482
5483 case _VG_USERREQ__HG_POSIX_SEM_RELEASED: /* sem_t* */
5484 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
5485 break;
5486
5487 case _VG_USERREQ__HG_POSIX_SEM_ACQUIRED: /* sem_t* */
5488 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
5489 break;
5490
5491#if defined(VGO_solaris)
5492 case _VG_USERREQ__HG_RTLD_BIND_GUARD:
5493 evh__HG_RTLD_BIND_GUARD(tid, args[1]);
5494 break;
5495
5496 case _VG_USERREQ__HG_RTLD_BIND_CLEAR:
5497 evh__HG_RTLD_BIND_CLEAR(tid, args[1]);
5498 break;
5499#endif /* VGO_solaris */
5500
sewardjb4112022007-11-09 22:49:28 +00005501 default:
5502 /* Unhandled Helgrind client request! */
sewardjf98e1c02008-10-25 16:22:41 +00005503 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
5504 args[0]);
sewardjb4112022007-11-09 22:49:28 +00005505 }
5506
5507 return True;
5508}
5509
5510
5511/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +00005512/*--- Setup ---*/
5513/*----------------------------------------------------------------*/
5514
florian19f91bb2012-11-10 22:29:54 +00005515static Bool hg_process_cmd_line_option ( const HChar* arg )
sewardjb4112022007-11-09 22:49:28 +00005516{
florian19f91bb2012-11-10 22:29:54 +00005517 const HChar* tmp_str;
sewardjb4112022007-11-09 22:49:28 +00005518
njn83df0b62009-02-25 01:01:05 +00005519 if VG_BOOL_CLO(arg, "--track-lockorders",
5520 HG_(clo_track_lockorders)) {}
5521 else if VG_BOOL_CLO(arg, "--cmp-race-err-addrs",
5522 HG_(clo_cmp_race_err_addrs)) {}
sewardj23f12002009-07-24 08:45:08 +00005523
5524 else if VG_XACT_CLO(arg, "--history-level=none",
5525 HG_(clo_history_level), 0);
sewardjf3861392009-08-02 10:16:03 +00005526 else if VG_XACT_CLO(arg, "--history-level=approx",
sewardj23f12002009-07-24 08:45:08 +00005527 HG_(clo_history_level), 1);
5528 else if VG_XACT_CLO(arg, "--history-level=full",
5529 HG_(clo_history_level), 2);
sewardj849b0ed2008-12-21 10:43:10 +00005530
njn83df0b62009-02-25 01:01:05 +00005531 else if VG_BINT_CLO(arg, "--conflict-cache-size",
philippe328d6622015-05-25 17:24:27 +00005532 HG_(clo_conflict_cache_size), 10*1000, 150*1000*1000) {}
sewardjb4112022007-11-09 22:49:28 +00005533
sewardj11e352f2007-11-30 11:11:02 +00005534 /* "stuvwx" --> stuvwx (binary) */
njn83df0b62009-02-25 01:01:05 +00005535 else if VG_STR_CLO(arg, "--hg-sanity-flags", tmp_str) {
sewardjb4112022007-11-09 22:49:28 +00005536 Int j;
sewardjb4112022007-11-09 22:49:28 +00005537
njn83df0b62009-02-25 01:01:05 +00005538 if (6 != VG_(strlen)(tmp_str)) {
sewardjb4112022007-11-09 22:49:28 +00005539 VG_(message)(Vg_UserMsg,
sewardj24118492009-07-15 14:50:02 +00005540 "--hg-sanity-flags argument must have 6 digits\n");
sewardjb4112022007-11-09 22:49:28 +00005541 return False;
5542 }
sewardj11e352f2007-11-30 11:11:02 +00005543 for (j = 0; j < 6; j++) {
njn83df0b62009-02-25 01:01:05 +00005544 if ('0' == tmp_str[j]) { /* do nothing */ }
5545 else if ('1' == tmp_str[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
sewardjb4112022007-11-09 22:49:28 +00005546 else {
sewardj11e352f2007-11-30 11:11:02 +00005547 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
sewardj24118492009-07-15 14:50:02 +00005548 "only contain 0s and 1s\n");
sewardjb4112022007-11-09 22:49:28 +00005549 return False;
5550 }
5551 }
sewardjf98e1c02008-10-25 16:22:41 +00005552 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
sewardjb4112022007-11-09 22:49:28 +00005553 }
5554
sewardj622fe492011-03-11 21:06:59 +00005555 else if VG_BOOL_CLO(arg, "--free-is-write",
5556 HG_(clo_free_is_write)) {}
sewardjffce8152011-06-24 10:09:41 +00005557
5558 else if VG_XACT_CLO(arg, "--vts-pruning=never",
5559 HG_(clo_vts_pruning), 0);
5560 else if VG_XACT_CLO(arg, "--vts-pruning=auto",
5561 HG_(clo_vts_pruning), 1);
5562 else if VG_XACT_CLO(arg, "--vts-pruning=always",
5563 HG_(clo_vts_pruning), 2);
5564
5565 else if VG_BOOL_CLO(arg, "--check-stack-refs",
5566 HG_(clo_check_stack_refs)) {}
sewardj8eb8bab2015-07-21 14:44:28 +00005567 else if VG_BOOL_CLO(arg, "--ignore-thread-creation",
5568 HG_(clo_ignore_thread_creation)) {}
sewardjffce8152011-06-24 10:09:41 +00005569
sewardjb4112022007-11-09 22:49:28 +00005570 else
5571 return VG_(replacement_malloc_process_cmd_line_option)(arg);
5572
5573 return True;
5574}
5575
5576static void hg_print_usage ( void )
5577{
5578 VG_(printf)(
sewardj622fe492011-03-11 21:06:59 +00005579" --free-is-write=no|yes treat heap frees as writes [no]\n"
sewardj849b0ed2008-12-21 10:43:10 +00005580" --track-lockorders=no|yes show lock ordering errors? [yes]\n"
njnf6e8ca92009-08-07 02:18:00 +00005581" --history-level=none|approx|full [full]\n"
sewardjf3861392009-08-02 10:16:03 +00005582" full: show both stack traces for a data race (can be very slow)\n"
5583" approx: full trace for one thread, approx for the other (faster)\n"
5584" none: only show trace for one thread in a race (fastest)\n"
philippe328d6622015-05-25 17:24:27 +00005585" --conflict-cache-size=N size of 'full' history cache [2000000]\n"
sewardjffce8152011-06-24 10:09:41 +00005586" --check-stack-refs=no|yes race-check reads and writes on the\n"
5587" main stack and thread stacks? [yes]\n"
sewardj8eb8bab2015-07-21 14:44:28 +00005588" --ignore-thread-creation=yes|no Ignore activities during thread\n"
5589" creation [%s]\n",
5590HG_(clo_ignore_thread_creation) ? "yes" : "no"
sewardjb4112022007-11-09 22:49:28 +00005591 );
sewardjb4112022007-11-09 22:49:28 +00005592}
5593
5594static void hg_print_debug_usage ( void )
5595{
sewardjb4112022007-11-09 22:49:28 +00005596 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
5597 "race errors significant? [no]\n");
sewardj849b0ed2008-12-21 10:43:10 +00005598 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
sewardj11e352f2007-11-30 11:11:02 +00005599 " at events (X = 0|1) [000000]\n");
5600 VG_(printf)(" --hg-sanity-flags values:\n");
sewardj11e352f2007-11-30 11:11:02 +00005601 VG_(printf)(" 010000 after changes to "
sewardjb4112022007-11-09 22:49:28 +00005602 "lock-order-acquisition-graph\n");
sewardj11e352f2007-11-30 11:11:02 +00005603 VG_(printf)(" 001000 at memory accesses (NB: not currently used)\n");
5604 VG_(printf)(" 000100 at mem permission setting for "
sewardjb4112022007-11-09 22:49:28 +00005605 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
sewardj11e352f2007-11-30 11:11:02 +00005606 VG_(printf)(" 000010 at lock/unlock events\n");
5607 VG_(printf)(" 000001 at thread create/join events\n");
sewardjffce8152011-06-24 10:09:41 +00005608 VG_(printf)(
5609" --vts-pruning=never|auto|always [auto]\n"
5610" never: is never done (may cause big space leaks in Helgrind)\n"
5611" auto: done just often enough to keep space usage under control\n"
5612" always: done after every VTS GC (mostly just a big time waster)\n"
5613 );
sewardjb4112022007-11-09 22:49:28 +00005614}
5615
philippe8587b542013-12-15 20:24:43 +00005616static void hg_print_stats (void)
5617{
5618
5619 if (1) {
5620 VG_(printf)("\n");
5621 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
5622 if (HG_(clo_track_lockorders)) {
5623 VG_(printf)("\n");
5624 HG_(ppWSUstats)( univ_laog, "univ_laog" );
5625 }
5626 }
5627
5628 //zz VG_(printf)("\n");
5629 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
5630 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
5631 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
5632 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
5633 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
5634 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
5635 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
5636 //zz stats__hbefore_stk_hwm);
5637 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
5638 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
5639
5640 VG_(printf)("\n");
5641 VG_(printf)(" locksets: %'8d unique lock sets\n",
5642 (Int)HG_(cardinalityWSU)( univ_lsets ));
5643 if (HG_(clo_track_lockorders)) {
5644 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
5645 (Int)HG_(cardinalityWSU)( univ_laog ));
5646 }
5647
5648 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
5649 // stats__ga_LL_adds,
5650 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
5651
5652 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
5653 HG_(stats__LockN_to_P_queries),
5654 HG_(stats__LockN_to_P_get_map_size)() );
5655
floriana6a6d922015-08-05 11:26:10 +00005656 VG_(printf)("client malloc-ed blocks: %'8u\n",
philipped005b2c2015-04-21 21:58:14 +00005657 VG_(HT_count_nodes)(hg_mallocmeta_table));
5658
philippe8587b542013-12-15 20:24:43 +00005659 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
5660 HG_(stats__string_table_queries),
5661 HG_(stats__string_table_get_map_size)() );
5662 if (HG_(clo_track_lockorders)) {
5663 VG_(printf)(" LAOG: %'8d map size\n",
5664 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
5665 VG_(printf)(" LAOG exposition: %'8d map size\n",
5666 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
5667 }
5668
5669 VG_(printf)(" locks: %'8lu acquires, "
5670 "%'lu releases\n",
5671 stats__lockN_acquires,
5672 stats__lockN_releases
5673 );
5674 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
5675
5676 VG_(printf)("\n");
5677 libhb_shutdown(True); // This in fact only print stats.
5678}
5679
sewardjb4112022007-11-09 22:49:28 +00005680static void hg_fini ( Int exitcode )
5681{
sewardj2d9e8742009-08-07 15:46:56 +00005682 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
5683 VG_(message)(Vg_UserMsg,
5684 "For counts of detected and suppressed errors, "
5685 "rerun with: -v\n");
5686 }
5687
5688 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)
5689 && HG_(clo_history_level) >= 2) {
5690 VG_(umsg)(
5691 "Use --history-level=approx or =none to gain increased speed, at\n" );
5692 VG_(umsg)(
5693 "the cost of reduced accuracy of conflicting-access information\n");
5694 }
5695
sewardjb4112022007-11-09 22:49:28 +00005696 if (SHOW_DATA_STRUCTURES)
5697 pp_everything( PP_ALL, "SK_(fini)" );
sewardjf98e1c02008-10-25 16:22:41 +00005698 if (HG_(clo_sanity_flags))
sewardjb4112022007-11-09 22:49:28 +00005699 all__sanity_check("SK_(fini)");
5700
philippe8587b542013-12-15 20:24:43 +00005701 if (VG_(clo_stats))
5702 hg_print_stats();
sewardjb4112022007-11-09 22:49:28 +00005703}
5704
sewardjf98e1c02008-10-25 16:22:41 +00005705/* FIXME: move these somewhere sane */
5706
5707static
5708void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
5709{
5710 Thread* thr;
5711 ThreadId tid;
5712 UWord nActual;
5713 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00005714 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00005715 tl_assert(thr);
5716 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
5717 nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
5718 NULL, NULL, 0 );
5719 tl_assert(nActual <= nRequest);
5720 for (; nActual < nRequest; nActual++)
5721 frames[nActual] = 0;
5722}
5723
5724static
sewardj23f12002009-07-24 08:45:08 +00005725ExeContext* for_libhb__get_EC ( Thr* hbt )
sewardjf98e1c02008-10-25 16:22:41 +00005726{
5727 Thread* thr;
5728 ThreadId tid;
5729 ExeContext* ec;
5730 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00005731 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00005732 tl_assert(thr);
5733 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
sewardj23f12002009-07-24 08:45:08 +00005734 /* this will assert if tid is invalid */
sewardjf98e1c02008-10-25 16:22:41 +00005735 ec = VG_(record_ExeContext)( tid, 0 );
sewardjd52392d2008-11-08 20:36:26 +00005736 return ec;
sewardjf98e1c02008-10-25 16:22:41 +00005737}
5738
5739
sewardjc1fb9d22011-02-28 09:03:44 +00005740static void hg_post_clo_init ( void )
sewardjb4112022007-11-09 22:49:28 +00005741{
sewardjf98e1c02008-10-25 16:22:41 +00005742 Thr* hbthr_root;
njnf76d27a2009-05-28 01:53:07 +00005743
sewardjc1fb9d22011-02-28 09:03:44 +00005744 /////////////////////////////////////////////
5745 hbthr_root = libhb_init( for_libhb__get_stacktrace,
5746 for_libhb__get_EC );
5747 /////////////////////////////////////////////
5748
5749
5750 if (HG_(clo_track_lockorders))
5751 laog__init();
5752
5753 initialise_data_structures(hbthr_root);
5754}
5755
philippe07c08522014-05-14 20:39:27 +00005756static void hg_info_location (Addr a)
5757{
5758 (void) HG_(get_and_pp_addrdescr) (a);
5759}
5760
sewardjc1fb9d22011-02-28 09:03:44 +00005761static void hg_pre_clo_init ( void )
5762{
sewardjb4112022007-11-09 22:49:28 +00005763 VG_(details_name) ("Helgrind");
5764 VG_(details_version) (NULL);
5765 VG_(details_description) ("a thread error detector");
5766 VG_(details_copyright_author)(
sewardj0f157dd2013-10-18 14:27:36 +00005767 "Copyright (C) 2007-2013, and GNU GPL'd, by OpenWorks LLP et al.");
sewardjb4112022007-11-09 22:49:28 +00005768 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj9c08c0f2011-03-10 15:01:14 +00005769 VG_(details_avg_translation_sizeB) ( 320 );
sewardjb4112022007-11-09 22:49:28 +00005770
5771 VG_(basic_tool_funcs) (hg_post_clo_init,
5772 hg_instrument,
5773 hg_fini);
5774
5775 VG_(needs_core_errors) ();
sewardjf98e1c02008-10-25 16:22:41 +00005776 VG_(needs_tool_errors) (HG_(eq_Error),
sewardj24118492009-07-15 14:50:02 +00005777 HG_(before_pp_Error),
sewardjf98e1c02008-10-25 16:22:41 +00005778 HG_(pp_Error),
sewardjb4112022007-11-09 22:49:28 +00005779 False,/*show TIDs for errors*/
sewardjf98e1c02008-10-25 16:22:41 +00005780 HG_(update_extra),
5781 HG_(recognised_suppression),
5782 HG_(read_extra_suppression_info),
5783 HG_(error_matches_suppression),
5784 HG_(get_error_name),
philippe4e32d672013-10-17 22:10:41 +00005785 HG_(get_extra_suppression_info),
5786 HG_(print_extra_suppression_use),
5787 HG_(update_extra_suppression_use));
sewardjb4112022007-11-09 22:49:28 +00005788
sewardj24118492009-07-15 14:50:02 +00005789 VG_(needs_xml_output) ();
5790
sewardjb4112022007-11-09 22:49:28 +00005791 VG_(needs_command_line_options)(hg_process_cmd_line_option,
5792 hg_print_usage,
5793 hg_print_debug_usage);
5794 VG_(needs_client_requests) (hg_handle_client_request);
5795
5796 // FIXME?
5797 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
5798 // hg_expensive_sanity_check);
5799
philippe8587b542013-12-15 20:24:43 +00005800 VG_(needs_print_stats) (hg_print_stats);
philippe07c08522014-05-14 20:39:27 +00005801 VG_(needs_info_location) (hg_info_location);
philippe8587b542013-12-15 20:24:43 +00005802
sewardjb4112022007-11-09 22:49:28 +00005803 VG_(needs_malloc_replacement) (hg_cli__malloc,
5804 hg_cli____builtin_new,
5805 hg_cli____builtin_vec_new,
5806 hg_cli__memalign,
5807 hg_cli__calloc,
5808 hg_cli__free,
5809 hg_cli____builtin_delete,
5810 hg_cli____builtin_vec_delete,
5811 hg_cli__realloc,
njn8b140de2009-02-17 04:31:18 +00005812 hg_cli_malloc_usable_size,
philipped99c26a2012-07-31 22:17:28 +00005813 HG_CLI__DEFAULT_MALLOC_REDZONE_SZB );
sewardjb4112022007-11-09 22:49:28 +00005814
sewardj849b0ed2008-12-21 10:43:10 +00005815 /* 21 Dec 08: disabled this; it mostly causes H to start more
5816 slowly and use significantly more memory, without very often
5817 providing useful results. The user can request to load this
5818 information manually with --read-var-info=yes. */
5819 if (0) VG_(needs_var_info)(); /* optional */
sewardjb4112022007-11-09 22:49:28 +00005820
5821 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
sewardj7cf4e6b2008-05-01 20:24:26 +00005822 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
5823 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
sewardjb4112022007-11-09 22:49:28 +00005824 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
sewardj1f77fec2010-04-12 19:51:04 +00005825 VG_(track_new_mem_stack) ( evh__new_mem_stack );
sewardjb4112022007-11-09 22:49:28 +00005826
5827 // FIXME: surely this isn't thread-aware
sewardj23f12002009-07-24 08:45:08 +00005828 VG_(track_copy_mem_remap) ( evh__copy_mem );
sewardjb4112022007-11-09 22:49:28 +00005829
5830 VG_(track_change_mem_mprotect) ( evh__set_perms );
5831
5832 VG_(track_die_mem_stack_signal)( evh__die_mem );
sewardjfd35d492011-03-17 19:39:55 +00005833 VG_(track_die_mem_brk) ( evh__die_mem_munmap );
5834 VG_(track_die_mem_munmap) ( evh__die_mem_munmap );
philippefc00a2a2015-05-15 11:41:54 +00005835
5836 /* evh__die_mem calls at the end libhb_srange_noaccess_NoFX
5837 which has no effect. We do not use VG_(track_die_mem_stack),
5838 as this would be an expensive way to do nothing. */
5839 // VG_(track_die_mem_stack) ( evh__die_mem );
sewardjb4112022007-11-09 22:49:28 +00005840
5841 // FIXME: what is this for?
5842 VG_(track_ban_mem_stack) (NULL);
5843
5844 VG_(track_pre_mem_read) ( evh__pre_mem_read );
5845 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
5846 VG_(track_pre_mem_write) ( evh__pre_mem_write );
5847 VG_(track_post_mem_write) (NULL);
5848
5849 /////////////////
5850
5851 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
5852 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
5853
5854 VG_(track_start_client_code)( evh__start_client_code );
5855 VG_(track_stop_client_code)( evh__stop_client_code );
5856
sewardjb4112022007-11-09 22:49:28 +00005857 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
5858 as described in comments at the top of pub_tool_hashtable.h, are
5859 met. Blargh. */
5860 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
5861 tl_assert( sizeof(UWord) == sizeof(Addr) );
5862 hg_mallocmeta_table
5863 = VG_(HT_construct)( "hg_malloc_metadata_table" );
5864
philippe5fbc9762013-12-01 19:28:48 +00005865 MallocMeta_poolalloc = VG_(newPA) ( sizeof(MallocMeta),
5866 1000,
5867 HG_(zalloc),
5868 "hg_malloc_metadata_pool",
5869 HG_(free));
5870
sewardj61bc2c52011-02-09 10:34:00 +00005871 // add a callback to clean up on (threaded) fork.
5872 VG_(atfork)(NULL/*pre*/, NULL/*parent*/, evh__atfork_child/*child*/);
sewardjb4112022007-11-09 22:49:28 +00005873}
5874
5875VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
5876
5877/*--------------------------------------------------------------------*/
5878/*--- end hg_main.c ---*/
5879/*--------------------------------------------------------------------*/