blob: 5c302e56f81810d5238077b69c16bbf619fd8b29 [file] [log] [blame]
sewardjb4112022007-11-09 22:49:28 +00001
2/*--------------------------------------------------------------------*/
3/*--- Helgrind: a Valgrind tool for detecting errors ---*/
4/*--- in threaded programs. hg_main.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
Elliott Hughesed398002017-06-21 14:41:24 -070011 Copyright (C) 2007-2017 OpenWorks LLP
sewardjb4112022007-11-09 22:49:28 +000012 info@open-works.co.uk
13
Elliott Hughesed398002017-06-21 14:41:24 -070014 Copyright (C) 2007-2017 Apple, Inc.
njnf76d27a2009-05-28 01:53:07 +000015
sewardjb4112022007-11-09 22:49:28 +000016 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
20
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
25
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, write to the Free Software
28 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 02111-1307, USA.
30
31 The GNU General Public License is contained in the file COPYING.
32
33 Neither the names of the U.S. Department of Energy nor the
34 University of California nor the names of its contributors may be
35 used to endorse or promote products derived from this software
36 without prior written permission.
37*/
38
39#include "pub_tool_basics.h"
philippef5774342014-05-03 11:12:50 +000040#include "pub_tool_gdbserver.h"
sewardjb4112022007-11-09 22:49:28 +000041#include "pub_tool_libcassert.h"
42#include "pub_tool_libcbase.h"
43#include "pub_tool_libcprint.h"
sewardjb4112022007-11-09 22:49:28 +000044#include "pub_tool_threadstate.h"
45#include "pub_tool_tooliface.h"
46#include "pub_tool_hashtable.h"
47#include "pub_tool_replacemalloc.h"
48#include "pub_tool_machine.h"
49#include "pub_tool_options.h"
50#include "pub_tool_xarray.h"
51#include "pub_tool_stacktrace.h"
sewardj896f6f92008-08-19 08:38:52 +000052#include "pub_tool_wordfm.h"
sewardja0eee322009-07-31 08:46:35 +000053#include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
54#include "pub_tool_redir.h" // sonames for the dynamic linkers
55#include "pub_tool_vki.h" // VKI_PAGE_SIZE
sewardj8eb8bab2015-07-21 14:44:28 +000056#include "pub_tool_libcproc.h"
sewardj234e5582011-02-09 12:47:23 +000057#include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client)
philippe5fbc9762013-12-01 19:28:48 +000058#include "pub_tool_poolalloc.h"
philippe07c08522014-05-14 20:39:27 +000059#include "pub_tool_addrinfo.h"
Elliott Hughesed398002017-06-21 14:41:24 -070060#include "pub_tool_xtree.h"
61#include "pub_tool_xtmemory.h"
sewardjb4112022007-11-09 22:49:28 +000062
sewardjf98e1c02008-10-25 16:22:41 +000063#include "hg_basics.h"
64#include "hg_wordset.h"
philippef5774342014-05-03 11:12:50 +000065#include "hg_addrdescr.h"
sewardjf98e1c02008-10-25 16:22:41 +000066#include "hg_lock_n_thread.h"
67#include "hg_errors.h"
68
69#include "libhb.h"
70
sewardjb4112022007-11-09 22:49:28 +000071#include "helgrind.h"
72
sewardjf98e1c02008-10-25 16:22:41 +000073
74// FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
75
76// FIXME: when client destroys a lock or a CV, remove these
77// from our mappings, so that the associated SO can be freed up
sewardjb4112022007-11-09 22:49:28 +000078
79/*----------------------------------------------------------------*/
80/*--- ---*/
81/*----------------------------------------------------------------*/
82
sewardj11e352f2007-11-30 11:11:02 +000083/* Note this needs to be compiled with -fno-strict-aliasing, since it
84 contains a whole bunch of calls to lookupFM etc which cast between
85 Word and pointer types. gcc rightly complains this breaks ANSI C
86 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
87 worthwhile performance benefits over -O.
sewardjc17be792007-11-10 22:50:13 +000088*/
sewardjb4112022007-11-09 22:49:28 +000089
90// FIXME what is supposed to happen to locks in memory which
91// is relocated as a result of client realloc?
92
sewardjb4112022007-11-09 22:49:28 +000093// FIXME put referencing ThreadId into Thread and get
94// rid of the slow reverse mapping function.
95
96// FIXME accesses to NoAccess areas: change state to Excl?
97
98// FIXME report errors for accesses of NoAccess memory?
99
100// FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
101// the thread still holds the lock.
102
103/* ------------ Debug/trace options ------------ */
104
sewardjb4112022007-11-09 22:49:28 +0000105// 0 for silent, 1 for some stuff, 2 for lots of stuff
106#define SHOW_EVENTS 0
107
sewardjb4112022007-11-09 22:49:28 +0000108
florian6bf37262012-10-21 03:23:36 +0000109static void all__sanity_check ( const HChar* who ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000110
philipped99c26a2012-07-31 22:17:28 +0000111#define HG_CLI__DEFAULT_MALLOC_REDZONE_SZB 16 /* let's say */
sewardjb4112022007-11-09 22:49:28 +0000112
113// 0 for none, 1 for dump at end of run
114#define SHOW_DATA_STRUCTURES 0
115
116
sewardjb4112022007-11-09 22:49:28 +0000117/* ------------ Misc comments ------------ */
118
119// FIXME: don't hardwire initial entries for root thread.
120// Instead, let the pre_thread_ll_create handler do this.
121
sewardjb4112022007-11-09 22:49:28 +0000122
123/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000124/*--- Primary data structures ---*/
sewardjb4112022007-11-09 22:49:28 +0000125/*----------------------------------------------------------------*/
126
sewardjb4112022007-11-09 22:49:28 +0000127/* Admin linked list of Threads */
128static Thread* admin_threads = NULL;
sewardjffce8152011-06-24 10:09:41 +0000129Thread* get_admin_threads ( void ) { return admin_threads; }
sewardjb4112022007-11-09 22:49:28 +0000130
sewardj1d7c3322011-02-28 09:22:51 +0000131/* Admin double linked list of Locks */
132/* We need a double linked list to properly and efficiently
133 handle del_LockN. */
sewardjb4112022007-11-09 22:49:28 +0000134static Lock* admin_locks = NULL;
135
sewardjb4112022007-11-09 22:49:28 +0000136/* Mapping table for core ThreadIds to Thread* */
137static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
138
sewardjb4112022007-11-09 22:49:28 +0000139/* Mapping table for lock guest addresses to Lock* */
140static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
141
sewardj0f64c9e2011-03-10 17:40:22 +0000142/* The word-set universes for lock sets. */
sewardjb4112022007-11-09 22:49:28 +0000143static WordSetU* univ_lsets = NULL; /* sets of Lock* */
144static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
sewardj866c80c2011-10-22 19:29:51 +0000145static Int next_gc_univ_laog = 1;
146/* univ_laog will be garbaged collected when the nr of element in univ_laog is
147 >= next_gc_univ_laog. */
sewardjb4112022007-11-09 22:49:28 +0000148
sewardjffce8152011-06-24 10:09:41 +0000149/* Allow libhb to get at the universe of locksets stored
150 here. Sigh. */
151WordSetU* HG_(get_univ_lsets) ( void ) { return univ_lsets; }
152
153/* Allow libhb to get at the list of locks stored here. Ditto
154 sigh. */
155Lock* HG_(get_admin_locks) ( void ) { return admin_locks; }
156
sewardjb4112022007-11-09 22:49:28 +0000157
158/*----------------------------------------------------------------*/
159/*--- Simple helpers for the data structures ---*/
160/*----------------------------------------------------------------*/
161
162static UWord stats__lockN_acquires = 0;
163static UWord stats__lockN_releases = 0;
164
sewardj8eb8bab2015-07-21 14:44:28 +0000165#if defined(VGO_solaris)
166Bool HG_(clo_ignore_thread_creation) = True;
167#else
168Bool HG_(clo_ignore_thread_creation) = False;
169#endif /* VGO_solaris */
170
sewardjf98e1c02008-10-25 16:22:41 +0000171static
172ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
sewardjb4112022007-11-09 22:49:28 +0000173
174/* --------- Constructors --------- */
175
sewardjf98e1c02008-10-25 16:22:41 +0000176static Thread* mk_Thread ( Thr* hbthr ) {
sewardjb4112022007-11-09 22:49:28 +0000177 static Int indx = 1;
sewardjf98e1c02008-10-25 16:22:41 +0000178 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
sewardjb4112022007-11-09 22:49:28 +0000179 thread->locksetA = HG_(emptyWS)( univ_lsets );
180 thread->locksetW = HG_(emptyWS)( univ_lsets );
sewardjb4112022007-11-09 22:49:28 +0000181 thread->magic = Thread_MAGIC;
sewardjf98e1c02008-10-25 16:22:41 +0000182 thread->hbthr = hbthr;
183 thread->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +0000184 thread->created_at = NULL;
185 thread->announced = False;
186 thread->errmsg_index = indx++;
187 thread->admin = admin_threads;
sewardj8eb8bab2015-07-21 14:44:28 +0000188 thread->synchr_nesting = 0;
189 thread->pthread_create_nesting_level = 0;
190#if defined(VGO_solaris)
191 thread->bind_guard_flag = 0;
192#endif /* VGO_solaris */
193
sewardjb4112022007-11-09 22:49:28 +0000194 admin_threads = thread;
195 return thread;
196}
sewardjf98e1c02008-10-25 16:22:41 +0000197
sewardjb4112022007-11-09 22:49:28 +0000198// Make a new lock which is unlocked (hence ownerless)
sewardj1d7c3322011-02-28 09:22:51 +0000199// and insert the new lock in admin_locks double linked list.
sewardjb4112022007-11-09 22:49:28 +0000200static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
201 static ULong unique = 0;
sewardjf98e1c02008-10-25 16:22:41 +0000202 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
sewardj0f64c9e2011-03-10 17:40:22 +0000203 /* begin: add to double linked list */
sewardj1d7c3322011-02-28 09:22:51 +0000204 if (admin_locks)
205 admin_locks->admin_prev = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000206 lock->admin_next = admin_locks;
207 lock->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000208 admin_locks = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000209 /* end: add */
sewardjb4112022007-11-09 22:49:28 +0000210 lock->unique = unique++;
211 lock->magic = LockN_MAGIC;
212 lock->appeared_at = NULL;
213 lock->acquired_at = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000214 lock->hbso = libhb_so_alloc();
sewardjb4112022007-11-09 22:49:28 +0000215 lock->guestaddr = guestaddr;
216 lock->kind = kind;
217 lock->heldW = False;
218 lock->heldBy = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000219 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +0000220 return lock;
221}
sewardjb4112022007-11-09 22:49:28 +0000222
223/* Release storage for a Lock. Also release storage in .heldBy, if
sewardj1d7c3322011-02-28 09:22:51 +0000224 any. Removes from admin_locks double linked list. */
sewardjb4112022007-11-09 22:49:28 +0000225static void del_LockN ( Lock* lk )
226{
sewardjf98e1c02008-10-25 16:22:41 +0000227 tl_assert(HG_(is_sane_LockN)(lk));
228 tl_assert(lk->hbso);
229 libhb_so_dealloc(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +0000230 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000231 VG_(deleteBag)( lk->heldBy );
sewardj0f64c9e2011-03-10 17:40:22 +0000232 /* begin: del lock from double linked list */
233 if (lk == admin_locks) {
234 tl_assert(lk->admin_prev == NULL);
235 if (lk->admin_next)
236 lk->admin_next->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000237 admin_locks = lk->admin_next;
sewardj1d7c3322011-02-28 09:22:51 +0000238 }
239 else {
sewardj0f64c9e2011-03-10 17:40:22 +0000240 tl_assert(lk->admin_prev != NULL);
sewardj1d7c3322011-02-28 09:22:51 +0000241 lk->admin_prev->admin_next = lk->admin_next;
sewardj0f64c9e2011-03-10 17:40:22 +0000242 if (lk->admin_next)
243 lk->admin_next->admin_prev = lk->admin_prev;
sewardj1d7c3322011-02-28 09:22:51 +0000244 }
sewardj0f64c9e2011-03-10 17:40:22 +0000245 /* end: del */
sewardjb4112022007-11-09 22:49:28 +0000246 VG_(memset)(lk, 0xAA, sizeof(*lk));
sewardjf98e1c02008-10-25 16:22:41 +0000247 HG_(free)(lk);
sewardjb4112022007-11-09 22:49:28 +0000248}
249
250/* Update 'lk' to reflect that 'thr' now has a write-acquisition of
251 it. This is done strictly: only combinations resulting from
252 correct program and libpthread behaviour are allowed. */
253static void lockN_acquire_writer ( Lock* lk, Thread* thr )
254{
sewardjf98e1c02008-10-25 16:22:41 +0000255 tl_assert(HG_(is_sane_LockN)(lk));
256 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000257
258 stats__lockN_acquires++;
259
260 /* EXPOSITION only */
261 /* We need to keep recording snapshots of where the lock was
262 acquired, so as to produce better lock-order error messages. */
263 if (lk->acquired_at == NULL) {
264 ThreadId tid;
265 tl_assert(lk->heldBy == NULL);
266 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
267 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000268 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000269 } else {
270 tl_assert(lk->heldBy != NULL);
271 }
272 /* end EXPOSITION only */
273
274 switch (lk->kind) {
275 case LK_nonRec:
276 case_LK_nonRec:
277 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
278 tl_assert(!lk->heldW);
279 lk->heldW = True;
sewardjf98e1c02008-10-25 16:22:41 +0000280 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
florian6bf37262012-10-21 03:23:36 +0000281 VG_(addToBag)( lk->heldBy, (UWord)thr );
sewardjb4112022007-11-09 22:49:28 +0000282 break;
283 case LK_mbRec:
284 if (lk->heldBy == NULL)
285 goto case_LK_nonRec;
286 /* 2nd and subsequent locking of a lock by its owner */
287 tl_assert(lk->heldW);
288 /* assert: lk is only held by one thread .. */
Elliott Hughesa0664b92017-04-18 17:46:52 -0700289 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1);
sewardjb4112022007-11-09 22:49:28 +0000290 /* assert: .. and that thread is 'thr'. */
florian6bf37262012-10-21 03:23:36 +0000291 tl_assert(VG_(elemBag)(lk->heldBy, (UWord)thr)
sewardj896f6f92008-08-19 08:38:52 +0000292 == VG_(sizeTotalBag)(lk->heldBy));
florian6bf37262012-10-21 03:23:36 +0000293 VG_(addToBag)(lk->heldBy, (UWord)thr);
sewardjb4112022007-11-09 22:49:28 +0000294 break;
295 case LK_rdwr:
296 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
297 goto case_LK_nonRec;
298 default:
299 tl_assert(0);
300 }
sewardjf98e1c02008-10-25 16:22:41 +0000301 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000302}
303
304static void lockN_acquire_reader ( Lock* lk, Thread* thr )
305{
sewardjf98e1c02008-10-25 16:22:41 +0000306 tl_assert(HG_(is_sane_LockN)(lk));
307 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000308 /* can only add reader to a reader-writer lock. */
309 tl_assert(lk->kind == LK_rdwr);
310 /* lk must be free or already r-held. */
311 tl_assert(lk->heldBy == NULL
312 || (lk->heldBy != NULL && !lk->heldW));
313
314 stats__lockN_acquires++;
315
316 /* EXPOSITION only */
317 /* We need to keep recording snapshots of where the lock was
318 acquired, so as to produce better lock-order error messages. */
319 if (lk->acquired_at == NULL) {
320 ThreadId tid;
321 tl_assert(lk->heldBy == NULL);
322 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
323 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000324 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000325 } else {
326 tl_assert(lk->heldBy != NULL);
327 }
328 /* end EXPOSITION only */
329
330 if (lk->heldBy) {
florian6bf37262012-10-21 03:23:36 +0000331 VG_(addToBag)(lk->heldBy, (UWord)thr);
sewardjb4112022007-11-09 22:49:28 +0000332 } else {
333 lk->heldW = False;
sewardjf98e1c02008-10-25 16:22:41 +0000334 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
florian6bf37262012-10-21 03:23:36 +0000335 VG_(addToBag)( lk->heldBy, (UWord)thr );
sewardjb4112022007-11-09 22:49:28 +0000336 }
337 tl_assert(!lk->heldW);
sewardjf98e1c02008-10-25 16:22:41 +0000338 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000339}
340
341/* Update 'lk' to reflect a release of it by 'thr'. This is done
342 strictly: only combinations resulting from correct program and
343 libpthread behaviour are allowed. */
344
345static void lockN_release ( Lock* lk, Thread* thr )
346{
347 Bool b;
sewardjf98e1c02008-10-25 16:22:41 +0000348 tl_assert(HG_(is_sane_LockN)(lk));
349 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000350 /* lock must be held by someone */
351 tl_assert(lk->heldBy);
352 stats__lockN_releases++;
353 /* Remove it from the holder set */
florian6bf37262012-10-21 03:23:36 +0000354 b = VG_(delFromBag)(lk->heldBy, (UWord)thr);
sewardjb4112022007-11-09 22:49:28 +0000355 /* thr must actually have been a holder of lk */
356 tl_assert(b);
357 /* normalise */
358 tl_assert(lk->acquired_at);
sewardj896f6f92008-08-19 08:38:52 +0000359 if (VG_(isEmptyBag)(lk->heldBy)) {
360 VG_(deleteBag)(lk->heldBy);
sewardjb4112022007-11-09 22:49:28 +0000361 lk->heldBy = NULL;
362 lk->heldW = False;
363 lk->acquired_at = NULL;
364 }
sewardjf98e1c02008-10-25 16:22:41 +0000365 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000366}
367
368static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
369{
370 Thread* thr;
371 if (!lk->heldBy) {
372 tl_assert(!lk->heldW);
373 return;
374 }
375 /* for each thread that holds this lock do ... */
sewardj896f6f92008-08-19 08:38:52 +0000376 VG_(initIterBag)( lk->heldBy );
florian6bf37262012-10-21 03:23:36 +0000377 while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, NULL )) {
sewardjf98e1c02008-10-25 16:22:41 +0000378 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000379 tl_assert(HG_(elemWS)( univ_lsets,
florian6bf37262012-10-21 03:23:36 +0000380 thr->locksetA, (UWord)lk ));
sewardjb4112022007-11-09 22:49:28 +0000381 thr->locksetA
florian6bf37262012-10-21 03:23:36 +0000382 = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +0000383
384 if (lk->heldW) {
385 tl_assert(HG_(elemWS)( univ_lsets,
florian6bf37262012-10-21 03:23:36 +0000386 thr->locksetW, (UWord)lk ));
sewardjb4112022007-11-09 22:49:28 +0000387 thr->locksetW
florian6bf37262012-10-21 03:23:36 +0000388 = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +0000389 }
390 }
sewardj896f6f92008-08-19 08:38:52 +0000391 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000392}
393
sewardjb4112022007-11-09 22:49:28 +0000394
395/*----------------------------------------------------------------*/
396/*--- Print out the primary data structures ---*/
397/*----------------------------------------------------------------*/
398
sewardjb4112022007-11-09 22:49:28 +0000399#define PP_THREADS (1<<1)
400#define PP_LOCKS (1<<2)
sewardjf98e1c02008-10-25 16:22:41 +0000401#define PP_ALL (PP_THREADS | PP_LOCKS)
sewardjb4112022007-11-09 22:49:28 +0000402
403
404static const Int sHOW_ADMIN = 0;
405
406static void space ( Int n )
407{
408 Int i;
florian6bf37262012-10-21 03:23:36 +0000409 HChar spaces[128+1];
sewardjb4112022007-11-09 22:49:28 +0000410 tl_assert(n >= 0 && n < 128);
411 if (n == 0)
412 return;
413 for (i = 0; i < n; i++)
414 spaces[i] = ' ';
415 spaces[i] = 0;
416 tl_assert(i < 128+1);
417 VG_(printf)("%s", spaces);
418}
419
420static void pp_Thread ( Int d, Thread* t )
421{
422 space(d+0); VG_(printf)("Thread %p {\n", t);
423 if (sHOW_ADMIN) {
424 space(d+3); VG_(printf)("admin %p\n", t->admin);
425 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
426 }
427 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
428 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
sewardjb4112022007-11-09 22:49:28 +0000429 space(d+0); VG_(printf)("}\n");
430}
431
432static void pp_admin_threads ( Int d )
433{
434 Int i, n;
435 Thread* t;
436 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
437 /* nothing */
438 }
439 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
440 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
441 if (0) {
442 space(n);
443 VG_(printf)("admin_threads record %d of %d:\n", i, n);
444 }
445 pp_Thread(d+3, t);
446 }
barta0b6b2c2008-07-07 06:49:24 +0000447 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000448}
449
450static void pp_map_threads ( Int d )
451{
njn4c245e52009-03-15 23:25:38 +0000452 Int i, n = 0;
sewardjb4112022007-11-09 22:49:28 +0000453 space(d); VG_(printf)("map_threads ");
sewardjb4112022007-11-09 22:49:28 +0000454 for (i = 0; i < VG_N_THREADS; i++) {
455 if (map_threads[i] != NULL)
456 n++;
457 }
458 VG_(printf)("(%d entries) {\n", n);
459 for (i = 0; i < VG_N_THREADS; i++) {
460 if (map_threads[i] == NULL)
461 continue;
462 space(d+3);
463 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
464 }
465 space(d); VG_(printf)("}\n");
466}
467
468static const HChar* show_LockKind ( LockKind lkk ) {
469 switch (lkk) {
470 case LK_mbRec: return "mbRec";
471 case LK_nonRec: return "nonRec";
472 case LK_rdwr: return "rdwr";
473 default: tl_assert(0);
474 }
475}
476
philippef5774342014-05-03 11:12:50 +0000477/* Pretty Print lock lk.
478 if show_lock_addrdescr, describes the (guest) lock address.
479 (this description will be more complete with --read-var-info=yes).
480 if show_internal_data, shows also helgrind internal information.
481 d is the level at which output is indented. */
482static void pp_Lock ( Int d, Lock* lk,
483 Bool show_lock_addrdescr,
484 Bool show_internal_data)
sewardjb4112022007-11-09 22:49:28 +0000485{
philippef5774342014-05-03 11:12:50 +0000486 space(d+0);
487 if (show_internal_data)
philippe07c08522014-05-14 20:39:27 +0000488 VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
philippef5774342014-05-03 11:12:50 +0000489 else
philippe07c08522014-05-14 20:39:27 +0000490 VG_(printf)("Lock ga %#lx {\n", lk->guestaddr);
philippef5774342014-05-03 11:12:50 +0000491 if (!show_lock_addrdescr
philippe07c08522014-05-14 20:39:27 +0000492 || !HG_(get_and_pp_addrdescr) ((Addr) lk->guestaddr))
philippef5774342014-05-03 11:12:50 +0000493 VG_(printf)("\n");
494
sewardjb4112022007-11-09 22:49:28 +0000495 if (sHOW_ADMIN) {
sewardj1d7c3322011-02-28 09:22:51 +0000496 space(d+3); VG_(printf)("admin_n %p\n", lk->admin_next);
497 space(d+3); VG_(printf)("admin_p %p\n", lk->admin_prev);
498 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
sewardjb4112022007-11-09 22:49:28 +0000499 }
philippef5774342014-05-03 11:12:50 +0000500 if (show_internal_data) {
501 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
502 }
sewardjb4112022007-11-09 22:49:28 +0000503 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
philippef5774342014-05-03 11:12:50 +0000504 if (show_internal_data) {
505 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
506 }
507 if (show_internal_data) {
508 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
509 }
sewardjb4112022007-11-09 22:49:28 +0000510 if (lk->heldBy) {
511 Thread* thr;
florian6bf37262012-10-21 03:23:36 +0000512 UWord count;
sewardjb4112022007-11-09 22:49:28 +0000513 VG_(printf)(" { ");
sewardj896f6f92008-08-19 08:38:52 +0000514 VG_(initIterBag)( lk->heldBy );
philippef5774342014-05-03 11:12:50 +0000515 while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, &count )) {
516 if (show_internal_data)
517 VG_(printf)("%lu:%p ", count, thr);
518 else {
519 VG_(printf)("%c%lu:thread #%d ",
520 lk->heldW ? 'W' : 'R',
521 count, thr->errmsg_index);
522 if (thr->coretid == VG_INVALID_THREADID)
523 VG_(printf)("tid (exited) ");
524 else
florian5e5cb002015-08-03 21:21:42 +0000525 VG_(printf)("tid %u ", thr->coretid);
philippef5774342014-05-03 11:12:50 +0000526
527 }
528 }
sewardj896f6f92008-08-19 08:38:52 +0000529 VG_(doneIterBag)( lk->heldBy );
philippef5774342014-05-03 11:12:50 +0000530 VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000531 }
sewardjb4112022007-11-09 22:49:28 +0000532 space(d+0); VG_(printf)("}\n");
533}
534
535static void pp_admin_locks ( Int d )
536{
537 Int i, n;
538 Lock* lk;
sewardj1d7c3322011-02-28 09:22:51 +0000539 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000540 /* nothing */
541 }
542 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
sewardj1d7c3322011-02-28 09:22:51 +0000543 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000544 if (0) {
545 space(n);
546 VG_(printf)("admin_locks record %d of %d:\n", i, n);
547 }
philippef5774342014-05-03 11:12:50 +0000548 pp_Lock(d+3, lk,
549 False /* show_lock_addrdescr */,
550 True /* show_internal_data */);
sewardjb4112022007-11-09 22:49:28 +0000551 }
barta0b6b2c2008-07-07 06:49:24 +0000552 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000553}
554
philippef5774342014-05-03 11:12:50 +0000555static void pp_map_locks ( Int d)
sewardjb4112022007-11-09 22:49:28 +0000556{
557 void* gla;
558 Lock* lk;
559 space(d); VG_(printf)("map_locks (%d entries) {\n",
sewardj896f6f92008-08-19 08:38:52 +0000560 (Int)VG_(sizeFM)( map_locks ));
561 VG_(initIterFM)( map_locks );
florian6bf37262012-10-21 03:23:36 +0000562 while (VG_(nextIterFM)( map_locks, (UWord*)&gla,
563 (UWord*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000564 space(d+3);
565 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
566 }
sewardj896f6f92008-08-19 08:38:52 +0000567 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000568 space(d); VG_(printf)("}\n");
569}
570
florian6bf37262012-10-21 03:23:36 +0000571static void pp_everything ( Int flags, const HChar* caller )
sewardjb4112022007-11-09 22:49:28 +0000572{
573 Int d = 0;
574 VG_(printf)("\n");
575 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
576 if (flags & PP_THREADS) {
577 VG_(printf)("\n");
578 pp_admin_threads(d+3);
579 VG_(printf)("\n");
580 pp_map_threads(d+3);
581 }
582 if (flags & PP_LOCKS) {
583 VG_(printf)("\n");
584 pp_admin_locks(d+3);
585 VG_(printf)("\n");
586 pp_map_locks(d+3);
587 }
sewardjb4112022007-11-09 22:49:28 +0000588
589 VG_(printf)("\n");
590 VG_(printf)("}\n");
591 VG_(printf)("\n");
592}
593
594#undef SHOW_ADMIN
595
596
597/*----------------------------------------------------------------*/
598/*--- Initialise the primary data structures ---*/
599/*----------------------------------------------------------------*/
600
sewardjf98e1c02008-10-25 16:22:41 +0000601static void initialise_data_structures ( Thr* hbthr_root )
sewardjb4112022007-11-09 22:49:28 +0000602{
sewardjb4112022007-11-09 22:49:28 +0000603 Thread* thr;
sewardjffce8152011-06-24 10:09:41 +0000604 WordSetID wsid;
sewardjb4112022007-11-09 22:49:28 +0000605
606 /* Get everything initialised and zeroed. */
607 tl_assert(admin_threads == NULL);
608 tl_assert(admin_locks == NULL);
sewardjb4112022007-11-09 22:49:28 +0000609
sewardjb4112022007-11-09 22:49:28 +0000610 tl_assert(map_threads == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000611 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
sewardjb4112022007-11-09 22:49:28 +0000612
florian6bf37262012-10-21 03:23:36 +0000613 tl_assert(sizeof(Addr) == sizeof(UWord));
sewardjb4112022007-11-09 22:49:28 +0000614 tl_assert(map_locks == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000615 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
616 NULL/*unboxed Word cmp*/);
sewardjb4112022007-11-09 22:49:28 +0000617
sewardjb4112022007-11-09 22:49:28 +0000618 tl_assert(univ_lsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000619 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
620 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000621 tl_assert(univ_lsets != NULL);
sewardjffce8152011-06-24 10:09:41 +0000622 /* Ensure that univ_lsets is non-empty, with lockset zero being the
623 empty lockset. hg_errors.c relies on the assumption that
624 lockset number zero in univ_lsets is always valid. */
625 wsid = HG_(emptyWS)(univ_lsets);
626 tl_assert(wsid == 0);
sewardjb4112022007-11-09 22:49:28 +0000627
628 tl_assert(univ_laog == NULL);
sewardjc1fb9d22011-02-28 09:03:44 +0000629 if (HG_(clo_track_lockorders)) {
630 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
631 HG_(free), 24/*cacheSize*/ );
632 tl_assert(univ_laog != NULL);
633 }
sewardjb4112022007-11-09 22:49:28 +0000634
635 /* Set up entries for the root thread */
636 // FIXME: this assumes that the first real ThreadId is 1
637
sewardjb4112022007-11-09 22:49:28 +0000638 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +0000639 thr = mk_Thread(hbthr_root);
640 thr->coretid = 1; /* FIXME: hardwires an assumption about the
641 identity of the root thread. */
sewardj60626642011-03-10 15:14:37 +0000642 tl_assert( libhb_get_Thr_hgthread(hbthr_root) == NULL );
643 libhb_set_Thr_hgthread(hbthr_root, thr);
sewardjb4112022007-11-09 22:49:28 +0000644
sewardjf98e1c02008-10-25 16:22:41 +0000645 /* and bind it in the thread-map table. */
646 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
647 tl_assert(thr->coretid != VG_INVALID_THREADID);
sewardjb4112022007-11-09 22:49:28 +0000648
sewardjf98e1c02008-10-25 16:22:41 +0000649 map_threads[thr->coretid] = thr;
sewardjb4112022007-11-09 22:49:28 +0000650
651 tl_assert(VG_INVALID_THREADID == 0);
652
sewardjb4112022007-11-09 22:49:28 +0000653 all__sanity_check("initialise_data_structures");
654}
655
656
657/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000658/*--- map_threads :: array[core-ThreadId] of Thread* ---*/
sewardjb4112022007-11-09 22:49:28 +0000659/*----------------------------------------------------------------*/
660
661/* Doesn't assert if the relevant map_threads entry is NULL. */
662static Thread* map_threads_maybe_lookup ( ThreadId coretid )
663{
664 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000665 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000666 thr = map_threads[coretid];
667 return thr;
668}
669
670/* Asserts if the relevant map_threads entry is NULL. */
671static inline Thread* map_threads_lookup ( ThreadId coretid )
672{
673 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000674 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000675 thr = map_threads[coretid];
676 tl_assert(thr);
677 return thr;
678}
679
sewardjf98e1c02008-10-25 16:22:41 +0000680/* Do a reverse lookup. Does not assert if 'thr' is not found in
681 map_threads. */
sewardjb4112022007-11-09 22:49:28 +0000682static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
683{
sewardjf98e1c02008-10-25 16:22:41 +0000684 ThreadId tid;
685 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000686 /* Check nobody used the invalid-threadid slot */
687 tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
688 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000689 tid = thr->coretid;
690 tl_assert(HG_(is_sane_ThreadId)(tid));
691 return tid;
sewardjb4112022007-11-09 22:49:28 +0000692}
693
694/* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
695 is not found in map_threads. */
696static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
697{
698 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
699 tl_assert(tid != VG_INVALID_THREADID);
sewardjf98e1c02008-10-25 16:22:41 +0000700 tl_assert(map_threads[tid]);
701 tl_assert(map_threads[tid]->coretid == tid);
sewardjb4112022007-11-09 22:49:28 +0000702 return tid;
703}
704
705static void map_threads_delete ( ThreadId coretid )
706{
707 Thread* thr;
708 tl_assert(coretid != 0);
sewardjf98e1c02008-10-25 16:22:41 +0000709 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000710 thr = map_threads[coretid];
711 tl_assert(thr);
712 map_threads[coretid] = NULL;
713}
714
sewardj8eb8bab2015-07-21 14:44:28 +0000715static void HG_(thread_enter_synchr)(Thread *thr) {
716 tl_assert(thr->synchr_nesting >= 0);
717#if defined(VGO_solaris)
718 thr->synchr_nesting += 1;
719#endif /* VGO_solaris */
720}
721
722static void HG_(thread_leave_synchr)(Thread *thr) {
723#if defined(VGO_solaris)
724 thr->synchr_nesting -= 1;
725#endif /* VGO_solaris */
726 tl_assert(thr->synchr_nesting >= 0);
727}
728
729static void HG_(thread_enter_pthread_create)(Thread *thr) {
730 tl_assert(thr->pthread_create_nesting_level >= 0);
731 thr->pthread_create_nesting_level += 1;
732}
733
734static void HG_(thread_leave_pthread_create)(Thread *thr) {
735 tl_assert(thr->pthread_create_nesting_level > 0);
736 thr->pthread_create_nesting_level -= 1;
737}
738
739static Int HG_(get_pthread_create_nesting_level)(ThreadId tid) {
740 Thread *thr = map_threads_maybe_lookup(tid);
741 return thr->pthread_create_nesting_level;
742}
sewardjb4112022007-11-09 22:49:28 +0000743
744/*----------------------------------------------------------------*/
745/*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
746/*----------------------------------------------------------------*/
747
748/* Make sure there is a lock table entry for the given (lock) guest
749 address. If not, create one of the stated 'kind' in unheld state.
750 In any case, return the address of the existing or new Lock. */
751static
752Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
753{
754 Bool found;
755 Lock* oldlock = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000756 tl_assert(HG_(is_sane_ThreadId)(tid));
sewardj896f6f92008-08-19 08:38:52 +0000757 found = VG_(lookupFM)( map_locks,
florian6bf37262012-10-21 03:23:36 +0000758 NULL, (UWord*)&oldlock, (UWord)ga );
sewardjb4112022007-11-09 22:49:28 +0000759 if (!found) {
760 Lock* lock = mk_LockN(lkk, ga);
761 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
sewardjf98e1c02008-10-25 16:22:41 +0000762 tl_assert(HG_(is_sane_LockN)(lock));
florian6bf37262012-10-21 03:23:36 +0000763 VG_(addToFM)( map_locks, (UWord)ga, (UWord)lock );
sewardjb4112022007-11-09 22:49:28 +0000764 tl_assert(oldlock == NULL);
sewardjb4112022007-11-09 22:49:28 +0000765 return lock;
766 } else {
767 tl_assert(oldlock != NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000768 tl_assert(HG_(is_sane_LockN)(oldlock));
sewardjb4112022007-11-09 22:49:28 +0000769 tl_assert(oldlock->guestaddr == ga);
sewardjb4112022007-11-09 22:49:28 +0000770 return oldlock;
771 }
772}
773
774static Lock* map_locks_maybe_lookup ( Addr ga )
775{
776 Bool found;
777 Lock* lk = NULL;
florian6bf37262012-10-21 03:23:36 +0000778 found = VG_(lookupFM)( map_locks, NULL, (UWord*)&lk, (UWord)ga );
sewardjb4112022007-11-09 22:49:28 +0000779 tl_assert(found ? lk != NULL : lk == NULL);
sewardjb4112022007-11-09 22:49:28 +0000780 return lk;
781}
782
783static void map_locks_delete ( Addr ga )
784{
785 Addr ga2 = 0;
786 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000787 VG_(delFromFM)( map_locks,
florian6bf37262012-10-21 03:23:36 +0000788 (UWord*)&ga2, (UWord*)&lk, (UWord)ga );
sewardjb4112022007-11-09 22:49:28 +0000789 /* delFromFM produces the val which is being deleted, if it is
790 found. So assert it is non-null; that in effect asserts that we
791 are deleting a (ga, Lock) pair which actually exists. */
792 tl_assert(lk != NULL);
793 tl_assert(ga2 == ga);
794}
795
796
sewardjb4112022007-11-09 22:49:28 +0000797
798/*----------------------------------------------------------------*/
799/*--- Sanity checking the data structures ---*/
800/*----------------------------------------------------------------*/
801
802static UWord stats__sanity_checks = 0;
803
florian6bf37262012-10-21 03:23:36 +0000804static void laog__sanity_check ( const HChar* who ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000805
806/* REQUIRED INVARIANTS:
807
808 Thread vs Segment/Lock/SecMaps
809
810 for each t in Threads {
811
812 // Thread.lockset: each element is really a valid Lock
813
814 // Thread.lockset: each Lock in set is actually held by that thread
815 for lk in Thread.lockset
816 lk == LockedBy(t)
817
818 // Thread.csegid is a valid SegmentID
819 // and the associated Segment has .thr == t
820
821 }
822
823 all thread Locksets are pairwise empty under intersection
824 (that is, no lock is claimed to be held by more than one thread)
825 -- this is guaranteed if all locks in locksets point back to their
826 owner threads
827
828 Lock vs Thread/Segment/SecMaps
829
830 for each entry (gla, la) in map_locks
831 gla == la->guest_addr
832
833 for each lk in Locks {
834
835 lk->tag is valid
836 lk->guest_addr does not have shadow state NoAccess
837 if lk == LockedBy(t), then t->lockset contains lk
838 if lk == UnlockedBy(segid) then segid is valid SegmentID
839 and can be mapped to a valid Segment(seg)
840 and seg->thr->lockset does not contain lk
841 if lk == UnlockedNew then (no lockset contains lk)
842
843 secmaps for lk has .mbHasLocks == True
844
845 }
846
847 Segment vs Thread/Lock/SecMaps
848
849 the Segment graph is a dag (no cycles)
850 all of the Segment graph must be reachable from the segids
851 mentioned in the Threads
852
853 for seg in Segments {
854
855 seg->thr is a sane Thread
856
857 }
858
859 SecMaps vs Segment/Thread/Lock
860
861 for sm in SecMaps {
862
863 sm properly aligned
864 if any shadow word is ShR or ShM then .mbHasShared == True
865
866 for each Excl(segid) state
867 map_segments_lookup maps to a sane Segment(seg)
868 for each ShM/ShR(tsetid,lsetid) state
869 each lk in lset is a valid Lock
870 each thr in tset is a valid thread, which is non-dead
871
872 }
873*/
874
875
876/* Return True iff 'thr' holds 'lk' in some mode. */
877static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
878{
879 if (lk->heldBy)
florian6bf37262012-10-21 03:23:36 +0000880 return VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0;
sewardjb4112022007-11-09 22:49:28 +0000881 else
882 return False;
883}
884
885/* Sanity check Threads, as far as possible */
886__attribute__((noinline))
florian6bf37262012-10-21 03:23:36 +0000887static void threads__sanity_check ( const HChar* who )
sewardjb4112022007-11-09 22:49:28 +0000888{
889#define BAD(_str) do { how = (_str); goto bad; } while (0)
florian6bf37262012-10-21 03:23:36 +0000890 const HChar* how = "no error";
sewardjb4112022007-11-09 22:49:28 +0000891 Thread* thr;
892 WordSetID wsA, wsW;
sewardj250ec2e2008-02-15 22:02:30 +0000893 UWord* ls_words;
florian6bf37262012-10-21 03:23:36 +0000894 UWord ls_size, i;
sewardjb4112022007-11-09 22:49:28 +0000895 Lock* lk;
sewardjb4112022007-11-09 22:49:28 +0000896 for (thr = admin_threads; thr; thr = thr->admin) {
sewardjf98e1c02008-10-25 16:22:41 +0000897 if (!HG_(is_sane_Thread)(thr)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000898 wsA = thr->locksetA;
899 wsW = thr->locksetW;
900 // locks held in W mode are a subset of all locks held
901 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
902 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
903 for (i = 0; i < ls_size; i++) {
904 lk = (Lock*)ls_words[i];
905 // Thread.lockset: each element is really a valid Lock
sewardjf98e1c02008-10-25 16:22:41 +0000906 if (!HG_(is_sane_LockN)(lk)) BAD("2");
sewardjb4112022007-11-09 22:49:28 +0000907 // Thread.lockset: each Lock in set is actually held by that
908 // thread
909 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000910 }
911 }
912 return;
913 bad:
914 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
915 tl_assert(0);
916#undef BAD
917}
918
919
920/* Sanity check Locks, as far as possible */
921__attribute__((noinline))
florian6bf37262012-10-21 03:23:36 +0000922static void locks__sanity_check ( const HChar* who )
sewardjb4112022007-11-09 22:49:28 +0000923{
924#define BAD(_str) do { how = (_str); goto bad; } while (0)
florian6bf37262012-10-21 03:23:36 +0000925 const HChar* how = "no error";
sewardjb4112022007-11-09 22:49:28 +0000926 Addr gla;
927 Lock* lk;
928 Int i;
929 // # entries in admin_locks == # entries in map_locks
sewardj1d7c3322011-02-28 09:22:51 +0000930 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next)
sewardjb4112022007-11-09 22:49:28 +0000931 ;
sewardj896f6f92008-08-19 08:38:52 +0000932 if (i != VG_(sizeFM)(map_locks)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000933 // for each entry (gla, lk) in map_locks
934 // gla == lk->guest_addr
sewardj896f6f92008-08-19 08:38:52 +0000935 VG_(initIterFM)( map_locks );
936 while (VG_(nextIterFM)( map_locks,
florian6bf37262012-10-21 03:23:36 +0000937 (UWord*)&gla, (UWord*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000938 if (lk->guestaddr != gla) BAD("2");
939 }
sewardj896f6f92008-08-19 08:38:52 +0000940 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000941 // scan through admin_locks ...
sewardj1d7c3322011-02-28 09:22:51 +0000942 for (lk = admin_locks; lk; lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000943 // lock is sane. Quite comprehensive, also checks that
944 // referenced (holder) threads are sane.
sewardjf98e1c02008-10-25 16:22:41 +0000945 if (!HG_(is_sane_LockN)(lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000946 // map_locks binds guest address back to this lock
947 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
sewardjb4112022007-11-09 22:49:28 +0000948 // look at all threads mentioned as holders of this lock. Ensure
949 // this lock is mentioned in their locksets.
950 if (lk->heldBy) {
951 Thread* thr;
florian6bf37262012-10-21 03:23:36 +0000952 UWord count;
sewardj896f6f92008-08-19 08:38:52 +0000953 VG_(initIterBag)( lk->heldBy );
954 while (VG_(nextIterBag)( lk->heldBy,
florian6bf37262012-10-21 03:23:36 +0000955 (UWord*)&thr, &count )) {
sewardjf98e1c02008-10-25 16:22:41 +0000956 // HG_(is_sane_LockN) above ensures these
sewardjb4112022007-11-09 22:49:28 +0000957 tl_assert(count >= 1);
sewardjf98e1c02008-10-25 16:22:41 +0000958 tl_assert(HG_(is_sane_Thread)(thr));
florian6bf37262012-10-21 03:23:36 +0000959 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (UWord)lk))
sewardjb4112022007-11-09 22:49:28 +0000960 BAD("6");
961 // also check the w-only lockset
962 if (lk->heldW
florian6bf37262012-10-21 03:23:36 +0000963 && !HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
sewardjb4112022007-11-09 22:49:28 +0000964 BAD("7");
965 if ((!lk->heldW)
florian6bf37262012-10-21 03:23:36 +0000966 && HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
sewardjb4112022007-11-09 22:49:28 +0000967 BAD("8");
968 }
sewardj896f6f92008-08-19 08:38:52 +0000969 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000970 } else {
971 /* lock not held by anybody */
972 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
973 // since lk is unheld, then (no lockset contains lk)
974 // hmm, this is really too expensive to check. Hmm.
975 }
sewardjb4112022007-11-09 22:49:28 +0000976 }
977
978 return;
979 bad:
980 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
981 tl_assert(0);
982#undef BAD
983}
984
985
florian6bf37262012-10-21 03:23:36 +0000986static void all_except_Locks__sanity_check ( const HChar* who ) {
sewardjb4112022007-11-09 22:49:28 +0000987 stats__sanity_checks++;
988 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
989 threads__sanity_check(who);
sewardjc1fb9d22011-02-28 09:03:44 +0000990 if (HG_(clo_track_lockorders))
991 laog__sanity_check(who);
sewardjb4112022007-11-09 22:49:28 +0000992}
florian6bf37262012-10-21 03:23:36 +0000993static void all__sanity_check ( const HChar* who ) {
sewardjb4112022007-11-09 22:49:28 +0000994 all_except_Locks__sanity_check(who);
995 locks__sanity_check(who);
996}
997
998
999/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +00001000/*--- Shadow value and address range handlers ---*/
1001/*----------------------------------------------------------------*/
1002
1003static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +00001004//static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +00001005static inline Thread* get_current_Thread ( void ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +00001006__attribute__((noinline))
1007static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +00001008
sewardjb4112022007-11-09 22:49:28 +00001009
1010/* Block-copy states (needed for implementing realloc()). */
sewardj23f12002009-07-24 08:45:08 +00001011/* FIXME this copies shadow memory; it doesn't apply the MSM to it.
1012 Is that a problem? (hence 'scopy' rather than 'ccopy') */
1013static void shadow_mem_scopy_range ( Thread* thr,
1014 Addr src, Addr dst, SizeT len )
sewardjf98e1c02008-10-25 16:22:41 +00001015{
1016 Thr* hbthr = thr->hbthr;
1017 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +00001018 libhb_copy_shadow_state( hbthr, src, dst, len );
sewardjb4112022007-11-09 22:49:28 +00001019}
1020
sewardj23f12002009-07-24 08:45:08 +00001021static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
1022{
sewardjf98e1c02008-10-25 16:22:41 +00001023 Thr* hbthr = thr->hbthr;
1024 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +00001025 LIBHB_CREAD_N(hbthr, a, len);
1026}
1027
1028static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
1029 Thr* hbthr = thr->hbthr;
1030 tl_assert(hbthr);
1031 LIBHB_CWRITE_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +00001032}
1033
1034static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
1035{
sewardj23f12002009-07-24 08:45:08 +00001036 libhb_srange_new( thr->hbthr, a, len );
sewardjb4112022007-11-09 22:49:28 +00001037}
1038
sewardjfd35d492011-03-17 19:39:55 +00001039static void shadow_mem_make_NoAccess_NoFX ( Thread* thr, Addr aIN, SizeT len )
sewardjb4112022007-11-09 22:49:28 +00001040{
sewardjb4112022007-11-09 22:49:28 +00001041 if (0 && len > 500)
florian5e5cb002015-08-03 21:21:42 +00001042 VG_(printf)("make NoAccess_NoFX ( %#lx, %lu )\n", aIN, len );
sewardjfd35d492011-03-17 19:39:55 +00001043 // has no effect (NoFX)
1044 libhb_srange_noaccess_NoFX( thr->hbthr, aIN, len );
1045}
1046
1047static void shadow_mem_make_NoAccess_AHAE ( Thread* thr, Addr aIN, SizeT len )
1048{
1049 if (0 && len > 500)
florian5e5cb002015-08-03 21:21:42 +00001050 VG_(printf)("make NoAccess_AHAE ( %#lx, %lu )\n", aIN, len );
sewardjfd35d492011-03-17 19:39:55 +00001051 // Actually Has An Effect (AHAE)
1052 libhb_srange_noaccess_AHAE( thr->hbthr, aIN, len );
sewardjb4112022007-11-09 22:49:28 +00001053}
1054
sewardj406bac82010-03-03 23:03:40 +00001055static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN, SizeT len )
1056{
1057 if (0 && len > 500)
florian5e5cb002015-08-03 21:21:42 +00001058 VG_(printf)("make Untracked ( %#lx, %lu )\n", aIN, len );
sewardj406bac82010-03-03 23:03:40 +00001059 libhb_srange_untrack( thr->hbthr, aIN, len );
1060}
1061
sewardjb4112022007-11-09 22:49:28 +00001062
1063/*----------------------------------------------------------------*/
1064/*--- Event handlers (evh__* functions) ---*/
1065/*--- plus helpers (evhH__* functions) ---*/
1066/*----------------------------------------------------------------*/
1067
1068/*--------- Event handler helpers (evhH__* functions) ---------*/
1069
1070/* Create a new segment for 'thr', making it depend (.prev) on its
1071 existing segment, bind together the SegmentID and Segment, and
1072 return both of them. Also update 'thr' so it references the new
1073 Segment. */
sewardjf98e1c02008-10-25 16:22:41 +00001074//zz static
1075//zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
1076//zz /*OUT*/Segment** new_segP,
1077//zz Thread* thr )
1078//zz {
1079//zz Segment* cur_seg;
1080//zz tl_assert(new_segP);
1081//zz tl_assert(new_segidP);
1082//zz tl_assert(HG_(is_sane_Thread)(thr));
1083//zz cur_seg = map_segments_lookup( thr->csegid );
1084//zz tl_assert(cur_seg);
1085//zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1086//zz at their owner thread. */
1087//zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1088//zz *new_segidP = alloc_SegmentID();
1089//zz map_segments_add( *new_segidP, *new_segP );
1090//zz thr->csegid = *new_segidP;
1091//zz }
sewardjb4112022007-11-09 22:49:28 +00001092
1093
1094/* The lock at 'lock_ga' has acquired a writer. Make all necessary
1095 updates, and also do all possible error checks. */
1096static
1097void evhH__post_thread_w_acquires_lock ( Thread* thr,
1098 LockKind lkk, Addr lock_ga )
1099{
1100 Lock* lk;
1101
1102 /* Basically what we need to do is call lockN_acquire_writer.
1103 However, that will barf if any 'invalid' lock states would
1104 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001105 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001106 routine.
1107
1108 Because this routine is only called after successful lock
1109 acquisition, we should not be asked to move the lock into any
1110 invalid states. Requests to do so are bugs in libpthread, since
1111 that should have rejected any such requests. */
1112
sewardjf98e1c02008-10-25 16:22:41 +00001113 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001114 /* Try to find the lock. If we can't, then create a new one with
1115 kind 'lkk'. */
1116 lk = map_locks_lookup_or_create(
1117 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001118 tl_assert( HG_(is_sane_LockN)(lk) );
1119
1120 /* check libhb level entities exist */
1121 tl_assert(thr->hbthr);
1122 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001123
1124 if (lk->heldBy == NULL) {
1125 /* the lock isn't held. Simple. */
1126 tl_assert(!lk->heldW);
1127 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001128 /* acquire a dependency from the lock's VCs */
1129 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001130 goto noerror;
1131 }
1132
1133 /* So the lock is already held. If held as a r-lock then
1134 libpthread must be buggy. */
1135 tl_assert(lk->heldBy);
1136 if (!lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001137 HG_(record_error_Misc)(
1138 thr, "Bug in libpthread: write lock "
1139 "granted on rwlock which is currently rd-held");
sewardjb4112022007-11-09 22:49:28 +00001140 goto error;
1141 }
1142
1143 /* So the lock is held in w-mode. If it's held by some other
1144 thread, then libpthread must be buggy. */
sewardj896f6f92008-08-19 08:38:52 +00001145 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
sewardjb4112022007-11-09 22:49:28 +00001146
sewardj896f6f92008-08-19 08:38:52 +00001147 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
sewardjf98e1c02008-10-25 16:22:41 +00001148 HG_(record_error_Misc)(
1149 thr, "Bug in libpthread: write lock "
1150 "granted on mutex/rwlock which is currently "
1151 "wr-held by a different thread");
sewardjb4112022007-11-09 22:49:28 +00001152 goto error;
1153 }
1154
1155 /* So the lock is already held in w-mode by 'thr'. That means this
1156 is an attempt to lock it recursively, which is only allowable
1157 for LK_mbRec kinded locks. Since this routine is called only
1158 once the lock has been acquired, this must also be a libpthread
1159 bug. */
1160 if (lk->kind != LK_mbRec) {
sewardjf98e1c02008-10-25 16:22:41 +00001161 HG_(record_error_Misc)(
1162 thr, "Bug in libpthread: recursive write lock "
1163 "granted on mutex/wrlock which does not "
1164 "support recursion");
sewardjb4112022007-11-09 22:49:28 +00001165 goto error;
1166 }
1167
1168 /* So we are recursively re-locking a lock we already w-hold. */
1169 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001170 /* acquire a dependency from the lock's VC. Probably pointless,
1171 but also harmless. */
1172 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001173 goto noerror;
1174
1175 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001176 if (HG_(clo_track_lockorders)) {
1177 /* check lock order acquisition graph, and update. This has to
1178 happen before the lock is added to the thread's locksetA/W. */
1179 laog__pre_thread_acquires_lock( thr, lk );
1180 }
sewardjb4112022007-11-09 22:49:28 +00001181 /* update the thread's held-locks set */
florian6bf37262012-10-21 03:23:36 +00001182 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
1183 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +00001184 /* fall through */
1185
1186 error:
sewardjf98e1c02008-10-25 16:22:41 +00001187 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001188}
1189
1190
1191/* The lock at 'lock_ga' has acquired a reader. Make all necessary
1192 updates, and also do all possible error checks. */
1193static
1194void evhH__post_thread_r_acquires_lock ( Thread* thr,
1195 LockKind lkk, Addr lock_ga )
1196{
1197 Lock* lk;
1198
1199 /* Basically what we need to do is call lockN_acquire_reader.
1200 However, that will barf if any 'invalid' lock states would
1201 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001202 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001203 routine.
1204
1205 Because this routine is only called after successful lock
1206 acquisition, we should not be asked to move the lock into any
1207 invalid states. Requests to do so are bugs in libpthread, since
1208 that should have rejected any such requests. */
1209
sewardjf98e1c02008-10-25 16:22:41 +00001210 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001211 /* Try to find the lock. If we can't, then create a new one with
1212 kind 'lkk'. Only a reader-writer lock can be read-locked,
1213 hence the first assertion. */
1214 tl_assert(lkk == LK_rdwr);
1215 lk = map_locks_lookup_or_create(
1216 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001217 tl_assert( HG_(is_sane_LockN)(lk) );
1218
1219 /* check libhb level entities exist */
1220 tl_assert(thr->hbthr);
1221 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001222
1223 if (lk->heldBy == NULL) {
1224 /* the lock isn't held. Simple. */
1225 tl_assert(!lk->heldW);
1226 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001227 /* acquire a dependency from the lock's VC */
1228 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001229 goto noerror;
1230 }
1231
1232 /* So the lock is already held. If held as a w-lock then
1233 libpthread must be buggy. */
1234 tl_assert(lk->heldBy);
1235 if (lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001236 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1237 "granted on rwlock which is "
1238 "currently wr-held");
sewardjb4112022007-11-09 22:49:28 +00001239 goto error;
1240 }
1241
1242 /* Easy enough. In short anybody can get a read-lock on a rwlock
1243 provided it is either unlocked or already in rd-held. */
1244 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001245 /* acquire a dependency from the lock's VC. Probably pointless,
1246 but also harmless. */
1247 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001248 goto noerror;
1249
1250 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001251 if (HG_(clo_track_lockorders)) {
1252 /* check lock order acquisition graph, and update. This has to
1253 happen before the lock is added to the thread's locksetA/W. */
1254 laog__pre_thread_acquires_lock( thr, lk );
1255 }
sewardjb4112022007-11-09 22:49:28 +00001256 /* update the thread's held-locks set */
florian6bf37262012-10-21 03:23:36 +00001257 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +00001258 /* but don't update thr->locksetW, since lk is only rd-held */
1259 /* fall through */
1260
1261 error:
sewardjf98e1c02008-10-25 16:22:41 +00001262 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001263}
1264
1265
1266/* The lock at 'lock_ga' is just about to be unlocked. Make all
1267 necessary updates, and also do all possible error checks. */
1268static
1269void evhH__pre_thread_releases_lock ( Thread* thr,
1270 Addr lock_ga, Bool isRDWR )
1271{
1272 Lock* lock;
1273 Word n;
sewardjf98e1c02008-10-25 16:22:41 +00001274 Bool was_heldW;
sewardjb4112022007-11-09 22:49:28 +00001275
1276 /* This routine is called prior to a lock release, before
1277 libpthread has had a chance to validate the call. Hence we need
1278 to detect and reject any attempts to move the lock into an
1279 invalid state. Such attempts are bugs in the client.
1280
1281 isRDWR is True if we know from the wrapper context that lock_ga
1282 should refer to a reader-writer lock, and is False if [ditto]
1283 lock_ga should refer to a standard mutex. */
1284
sewardjf98e1c02008-10-25 16:22:41 +00001285 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001286 lock = map_locks_maybe_lookup( lock_ga );
1287
1288 if (!lock) {
1289 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1290 the client is trying to unlock it. So complain, then ignore
1291 the attempt. */
sewardjf98e1c02008-10-25 16:22:41 +00001292 HG_(record_error_UnlockBogus)( thr, lock_ga );
sewardjb4112022007-11-09 22:49:28 +00001293 return;
1294 }
1295
1296 tl_assert(lock->guestaddr == lock_ga);
sewardjf98e1c02008-10-25 16:22:41 +00001297 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001298
1299 if (isRDWR && lock->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001300 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1301 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001302 }
1303 if ((!isRDWR) && lock->kind == LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001304 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1305 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001306 }
1307
1308 if (!lock->heldBy) {
1309 /* The lock is not held. This indicates a serious bug in the
1310 client. */
1311 tl_assert(!lock->heldW);
sewardjf98e1c02008-10-25 16:22:41 +00001312 HG_(record_error_UnlockUnlocked)( thr, lock );
florian6bf37262012-10-21 03:23:36 +00001313 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1314 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001315 goto error;
1316 }
1317
sewardjf98e1c02008-10-25 16:22:41 +00001318 /* test just above dominates */
1319 tl_assert(lock->heldBy);
1320 was_heldW = lock->heldW;
1321
sewardjb4112022007-11-09 22:49:28 +00001322 /* The lock is held. Is this thread one of the holders? If not,
1323 report a bug in the client. */
florian6bf37262012-10-21 03:23:36 +00001324 n = VG_(elemBag)( lock->heldBy, (UWord)thr );
sewardjb4112022007-11-09 22:49:28 +00001325 tl_assert(n >= 0);
1326 if (n == 0) {
1327 /* We are not a current holder of the lock. This is a bug in
1328 the guest, and (per POSIX pthread rules) the unlock
1329 attempt will fail. So just complain and do nothing
1330 else. */
sewardj896f6f92008-08-19 08:38:52 +00001331 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001332 tl_assert(HG_(is_sane_Thread)(realOwner));
sewardjb4112022007-11-09 22:49:28 +00001333 tl_assert(realOwner != thr);
florian6bf37262012-10-21 03:23:36 +00001334 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1335 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjf98e1c02008-10-25 16:22:41 +00001336 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
sewardjb4112022007-11-09 22:49:28 +00001337 goto error;
1338 }
1339
1340 /* Ok, we hold the lock 'n' times. */
1341 tl_assert(n >= 1);
1342
1343 lockN_release( lock, thr );
1344
1345 n--;
1346 tl_assert(n >= 0);
1347
1348 if (n > 0) {
1349 tl_assert(lock->heldBy);
florian6bf37262012-10-21 03:23:36 +00001350 tl_assert(n == VG_(elemBag)( lock->heldBy, (UWord)thr ));
sewardjb4112022007-11-09 22:49:28 +00001351 /* We still hold the lock. So either it's a recursive lock
1352 or a rwlock which is currently r-held. */
1353 tl_assert(lock->kind == LK_mbRec
1354 || (lock->kind == LK_rdwr && !lock->heldW));
florian6bf37262012-10-21 03:23:36 +00001355 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001356 if (lock->heldW)
florian6bf37262012-10-21 03:23:36 +00001357 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001358 else
florian6bf37262012-10-21 03:23:36 +00001359 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001360 } else {
sewardj983f3022009-05-21 14:49:55 +00001361 /* n is zero. This means we don't hold the lock any more. But
1362 if it's a rwlock held in r-mode, someone else could still
1363 hold it. Just do whatever sanity checks we can. */
1364 if (lock->kind == LK_rdwr && lock->heldBy) {
1365 /* It's a rwlock. We no longer hold it but we used to;
1366 nevertheless it still appears to be held by someone else.
1367 The implication is that, prior to this release, it must
1368 have been shared by us and and whoever else is holding it;
1369 which in turn implies it must be r-held, since a lock
1370 can't be w-held by more than one thread. */
1371 /* The lock is now R-held by somebody else: */
1372 tl_assert(lock->heldW == False);
1373 } else {
1374 /* Normal case. It's either not a rwlock, or it's a rwlock
1375 that we used to hold in w-mode (which is pretty much the
1376 same thing as a non-rwlock.) Since this transaction is
1377 atomic (V does not allow multiple threads to run
1378 simultaneously), it must mean the lock is now not held by
1379 anybody. Hence assert for it. */
1380 /* The lock is now not held by anybody: */
1381 tl_assert(!lock->heldBy);
1382 tl_assert(lock->heldW == False);
1383 }
sewardjf98e1c02008-10-25 16:22:41 +00001384 //if (lock->heldBy) {
florian6bf37262012-10-21 03:23:36 +00001385 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (UWord)thr ));
sewardjf98e1c02008-10-25 16:22:41 +00001386 //}
sewardjb4112022007-11-09 22:49:28 +00001387 /* update this thread's lockset accordingly. */
1388 thr->locksetA
florian6bf37262012-10-21 03:23:36 +00001389 = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lock );
sewardjb4112022007-11-09 22:49:28 +00001390 thr->locksetW
florian6bf37262012-10-21 03:23:36 +00001391 = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lock );
sewardjf98e1c02008-10-25 16:22:41 +00001392 /* push our VC into the lock */
1393 tl_assert(thr->hbthr);
1394 tl_assert(lock->hbso);
1395 /* If the lock was previously W-held, then we want to do a
1396 strong send, and if previously R-held, then a weak send. */
1397 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
sewardjb4112022007-11-09 22:49:28 +00001398 }
1399 /* fall through */
1400
1401 error:
sewardjf98e1c02008-10-25 16:22:41 +00001402 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001403}
1404
1405
sewardj9f569b72008-11-13 13:33:09 +00001406/* ---------------------------------------------------------- */
1407/* -------- Event handlers proper (evh__* functions) -------- */
1408/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001409
1410/* What is the Thread* for the currently running thread? This is
1411 absolutely performance critical. We receive notifications from the
1412 core for client code starts/stops, and cache the looked-up result
1413 in 'current_Thread'. Hence, for the vast majority of requests,
1414 finding the current thread reduces to a read of a global variable,
1415 provided get_current_Thread_in_C_C is inlined.
1416
1417 Outside of client code, current_Thread is NULL, and presumably
1418 any uses of it will cause a segfault. Hence:
1419
1420 - for uses definitely within client code, use
1421 get_current_Thread_in_C_C.
1422
1423 - for all other uses, use get_current_Thread.
1424*/
1425
sewardj23f12002009-07-24 08:45:08 +00001426static Thread *current_Thread = NULL,
1427 *current_Thread_prev = NULL;
sewardjb4112022007-11-09 22:49:28 +00001428
1429static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1430 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1431 tl_assert(current_Thread == NULL);
1432 current_Thread = map_threads_lookup( tid );
1433 tl_assert(current_Thread != NULL);
sewardj23f12002009-07-24 08:45:08 +00001434 if (current_Thread != current_Thread_prev) {
1435 libhb_Thr_resumes( current_Thread->hbthr );
1436 current_Thread_prev = current_Thread;
1437 }
sewardjb4112022007-11-09 22:49:28 +00001438}
1439static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1440 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1441 tl_assert(current_Thread != NULL);
1442 current_Thread = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00001443 libhb_maybe_GC();
sewardjb4112022007-11-09 22:49:28 +00001444}
1445static inline Thread* get_current_Thread_in_C_C ( void ) {
1446 return current_Thread;
1447}
1448static inline Thread* get_current_Thread ( void ) {
1449 ThreadId coretid;
1450 Thread* thr;
1451 thr = get_current_Thread_in_C_C();
1452 if (LIKELY(thr))
1453 return thr;
1454 /* evidently not in client code. Do it the slow way. */
1455 coretid = VG_(get_running_tid)();
1456 /* FIXME: get rid of the following kludge. It exists because
sewardjf98e1c02008-10-25 16:22:41 +00001457 evh__new_mem is called during initialisation (as notification
sewardjb4112022007-11-09 22:49:28 +00001458 of initial memory layout) and VG_(get_running_tid)() returns
1459 VG_INVALID_THREADID at that point. */
1460 if (coretid == VG_INVALID_THREADID)
1461 coretid = 1; /* KLUDGE */
1462 thr = map_threads_lookup( coretid );
1463 return thr;
1464}
1465
1466static
1467void evh__new_mem ( Addr a, SizeT len ) {
sewardj8eb8bab2015-07-21 14:44:28 +00001468 Thread *thr = get_current_Thread();
sewardjb4112022007-11-09 22:49:28 +00001469 if (SHOW_EVENTS >= 2)
1470 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
sewardj8eb8bab2015-07-21 14:44:28 +00001471 shadow_mem_make_New( thr, a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001472 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001473 all__sanity_check("evh__new_mem-post");
sewardj8eb8bab2015-07-21 14:44:28 +00001474 if (UNLIKELY(thr->pthread_create_nesting_level > 0))
1475 shadow_mem_make_Untracked( thr, a, len );
sewardjb4112022007-11-09 22:49:28 +00001476}
1477
1478static
sewardj1f77fec2010-04-12 19:51:04 +00001479void evh__new_mem_stack ( Addr a, SizeT len ) {
sewardj8eb8bab2015-07-21 14:44:28 +00001480 Thread *thr = get_current_Thread();
sewardj1f77fec2010-04-12 19:51:04 +00001481 if (SHOW_EVENTS >= 2)
1482 VG_(printf)("evh__new_mem_stack(%p, %lu)\n", (void*)a, len );
sewardj8eb8bab2015-07-21 14:44:28 +00001483 shadow_mem_make_New( thr, -VG_STACK_REDZONE_SZB + a, len );
sewardj1f77fec2010-04-12 19:51:04 +00001484 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1485 all__sanity_check("evh__new_mem_stack-post");
sewardj8eb8bab2015-07-21 14:44:28 +00001486 if (UNLIKELY(thr->pthread_create_nesting_level > 0))
1487 shadow_mem_make_Untracked( thr, a, len );
sewardj1f77fec2010-04-12 19:51:04 +00001488}
1489
1490static
sewardj7cf4e6b2008-05-01 20:24:26 +00001491void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
sewardj8eb8bab2015-07-21 14:44:28 +00001492 Thread *thr = get_current_Thread();
sewardj7cf4e6b2008-05-01 20:24:26 +00001493 if (SHOW_EVENTS >= 2)
1494 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
sewardj8eb8bab2015-07-21 14:44:28 +00001495 shadow_mem_make_New( thr, a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001496 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardj7cf4e6b2008-05-01 20:24:26 +00001497 all__sanity_check("evh__new_mem_w_tid-post");
sewardj8eb8bab2015-07-21 14:44:28 +00001498 if (UNLIKELY(thr->pthread_create_nesting_level > 0))
1499 shadow_mem_make_Untracked( thr, a, len );
sewardj7cf4e6b2008-05-01 20:24:26 +00001500}
1501
1502static
sewardjb4112022007-11-09 22:49:28 +00001503void evh__new_mem_w_perms ( Addr a, SizeT len,
sewardj9c606bd2008-09-18 18:12:50 +00001504 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
sewardj8eb8bab2015-07-21 14:44:28 +00001505 Thread *thr = get_current_Thread();
sewardjb4112022007-11-09 22:49:28 +00001506 if (SHOW_EVENTS >= 1)
1507 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1508 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
sewardj8eb8bab2015-07-21 14:44:28 +00001509 if (rr || ww || xx) {
1510 shadow_mem_make_New( thr, a, len );
1511 if (UNLIKELY(thr->pthread_create_nesting_level > 0))
1512 shadow_mem_make_Untracked( thr, a, len );
1513 }
sewardjf98e1c02008-10-25 16:22:41 +00001514 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001515 all__sanity_check("evh__new_mem_w_perms-post");
1516}
1517
1518static
1519void evh__set_perms ( Addr a, SizeT len,
1520 Bool rr, Bool ww, Bool xx ) {
sewardjfd35d492011-03-17 19:39:55 +00001521 // This handles mprotect requests. If the memory is being put
1522 // into no-R no-W state, paint it as NoAccess, for the reasons
1523 // documented at evh__die_mem_munmap().
sewardjb4112022007-11-09 22:49:28 +00001524 if (SHOW_EVENTS >= 1)
sewardjfd35d492011-03-17 19:39:55 +00001525 VG_(printf)("evh__set_perms(%p, %lu, r=%d w=%d x=%d)\n",
sewardjb4112022007-11-09 22:49:28 +00001526 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1527 /* Hmm. What should we do here, that actually makes any sense?
1528 Let's say: if neither readable nor writable, then declare it
1529 NoAccess, else leave it alone. */
1530 if (!(rr || ww))
sewardjfd35d492011-03-17 19:39:55 +00001531 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001532 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001533 all__sanity_check("evh__set_perms-post");
1534}
1535
1536static
1537void evh__die_mem ( Addr a, SizeT len ) {
sewardjfd35d492011-03-17 19:39:55 +00001538 // Urr, libhb ignores this.
sewardjb4112022007-11-09 22:49:28 +00001539 if (SHOW_EVENTS >= 2)
1540 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
sewardjfd35d492011-03-17 19:39:55 +00001541 shadow_mem_make_NoAccess_NoFX( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001542 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001543 all__sanity_check("evh__die_mem-post");
1544}
1545
1546static
sewardjfd35d492011-03-17 19:39:55 +00001547void evh__die_mem_munmap ( Addr a, SizeT len ) {
1548 // It's important that libhb doesn't ignore this. If, as is likely,
1549 // the client is subject to address space layout randomization,
1550 // then unmapped areas may never get remapped over, even in long
1551 // runs. If we just ignore them we wind up with large resource
1552 // (VTS) leaks in libhb. So force them to NoAccess, so that all
1553 // VTS references in the affected area are dropped. Marking memory
1554 // as NoAccess is expensive, but we assume that munmap is sufficiently
1555 // rare that the space gains of doing this are worth the costs.
1556 if (SHOW_EVENTS >= 2)
1557 VG_(printf)("evh__die_mem_munmap(%p, %lu)\n", (void*)a, len );
1558 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
1559}
1560
1561static
sewardj406bac82010-03-03 23:03:40 +00001562void evh__untrack_mem ( Addr a, SizeT len ) {
sewardjfd35d492011-03-17 19:39:55 +00001563 // Libhb doesn't ignore this.
sewardj406bac82010-03-03 23:03:40 +00001564 if (SHOW_EVENTS >= 2)
1565 VG_(printf)("evh__untrack_mem(%p, %lu)\n", (void*)a, len );
1566 shadow_mem_make_Untracked( get_current_Thread(), a, len );
1567 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1568 all__sanity_check("evh__untrack_mem-post");
1569}
1570
1571static
sewardj23f12002009-07-24 08:45:08 +00001572void evh__copy_mem ( Addr src, Addr dst, SizeT len ) {
1573 if (SHOW_EVENTS >= 2)
1574 VG_(printf)("evh__copy_mem(%p, %p, %lu)\n", (void*)src, (void*)dst, len );
sewardj8eb8bab2015-07-21 14:44:28 +00001575 Thread *thr = get_current_Thread();
1576 if (LIKELY(thr->synchr_nesting == 0))
1577 shadow_mem_scopy_range( thr , src, dst, len );
sewardj23f12002009-07-24 08:45:08 +00001578 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1579 all__sanity_check("evh__copy_mem-post");
1580}
1581
1582static
sewardjb4112022007-11-09 22:49:28 +00001583void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1584{
1585 if (SHOW_EVENTS >= 1)
1586 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1587 (Int)parent, (Int)child );
1588
1589 if (parent != VG_INVALID_THREADID) {
sewardjf98e1c02008-10-25 16:22:41 +00001590 Thread* thr_p;
1591 Thread* thr_c;
1592 Thr* hbthr_p;
1593 Thr* hbthr_c;
sewardjb4112022007-11-09 22:49:28 +00001594
sewardjf98e1c02008-10-25 16:22:41 +00001595 tl_assert(HG_(is_sane_ThreadId)(parent));
1596 tl_assert(HG_(is_sane_ThreadId)(child));
sewardjb4112022007-11-09 22:49:28 +00001597 tl_assert(parent != child);
1598
1599 thr_p = map_threads_maybe_lookup( parent );
1600 thr_c = map_threads_maybe_lookup( child );
1601
1602 tl_assert(thr_p != NULL);
1603 tl_assert(thr_c == NULL);
1604
sewardjf98e1c02008-10-25 16:22:41 +00001605 hbthr_p = thr_p->hbthr;
1606 tl_assert(hbthr_p != NULL);
sewardj60626642011-03-10 15:14:37 +00001607 tl_assert( libhb_get_Thr_hgthread(hbthr_p) == thr_p );
sewardjb4112022007-11-09 22:49:28 +00001608
sewardjf98e1c02008-10-25 16:22:41 +00001609 hbthr_c = libhb_create ( hbthr_p );
1610
1611 /* Create a new thread record for the child. */
sewardjb4112022007-11-09 22:49:28 +00001612 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +00001613 thr_c = mk_Thread( hbthr_c );
sewardj60626642011-03-10 15:14:37 +00001614 tl_assert( libhb_get_Thr_hgthread(hbthr_c) == NULL );
1615 libhb_set_Thr_hgthread(hbthr_c, thr_c);
sewardjb4112022007-11-09 22:49:28 +00001616
1617 /* and bind it in the thread-map table */
1618 map_threads[child] = thr_c;
sewardjf98e1c02008-10-25 16:22:41 +00001619 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1620 thr_c->coretid = child;
sewardjb4112022007-11-09 22:49:28 +00001621
1622 /* Record where the parent is so we can later refer to this in
1623 error messages.
1624
mjw36750c02014-08-30 20:37:40 +00001625 On x86/amd64-linux, this entails a nasty glibc specific hack.
sewardjb4112022007-11-09 22:49:28 +00001626 The stack snapshot is taken immediately after the parent has
1627 returned from its sys_clone call. Unfortunately there is no
1628 unwind info for the insn following "syscall" - reading the
1629 glibc sources confirms this. So we ask for a snapshot to be
1630 taken as if RIP was 3 bytes earlier, in a place where there
1631 is unwind info. Sigh.
1632 */
1633 { Word first_ip_delta = 0;
mjw36750c02014-08-30 20:37:40 +00001634# if defined(VGP_amd64_linux) || defined(VGP_x86_linux)
sewardjb4112022007-11-09 22:49:28 +00001635 first_ip_delta = -3;
mjw4fa71082014-09-01 15:29:55 +00001636# elif defined(VGP_arm64_linux) || defined(VGP_arm_linux)
sewardj5a460f52014-08-30 19:24:05 +00001637 first_ip_delta = -1;
sewardjb4112022007-11-09 22:49:28 +00001638# endif
1639 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1640 }
sewardj8eb8bab2015-07-21 14:44:28 +00001641
1642 if (HG_(clo_ignore_thread_creation)) {
1643 HG_(thread_enter_pthread_create)(thr_c);
1644 tl_assert(thr_c->synchr_nesting == 0);
1645 HG_(thread_enter_synchr)(thr_c);
1646 /* Counterpart in _VG_USERREQ__HG_SET_MY_PTHREAD_T. */
1647 }
sewardjb4112022007-11-09 22:49:28 +00001648 }
1649
sewardjf98e1c02008-10-25 16:22:41 +00001650 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001651 all__sanity_check("evh__pre_thread_create-post");
1652}
1653
1654static
1655void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1656{
1657 Int nHeld;
1658 Thread* thr_q;
1659 if (SHOW_EVENTS >= 1)
1660 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1661 (Int)quit_tid );
1662
1663 /* quit_tid has disappeared without joining to any other thread.
1664 Therefore there is no synchronisation event associated with its
1665 exit and so we have to pretty much treat it as if it was still
1666 alive but mysteriously making no progress. That is because, if
1667 we don't know when it really exited, then we can never say there
1668 is a point in time when we're sure the thread really has
1669 finished, and so we need to consider the possibility that it
1670 lingers indefinitely and continues to interact with other
1671 threads. */
1672 /* However, it might have rendezvous'd with a thread that called
1673 pthread_join with this one as arg, prior to this point (that's
1674 how NPTL works). In which case there has already been a prior
1675 sync event. So in any case, just let the thread exit. On NPTL,
1676 all thread exits go through here. */
sewardjf98e1c02008-10-25 16:22:41 +00001677 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
sewardjb4112022007-11-09 22:49:28 +00001678 thr_q = map_threads_maybe_lookup( quit_tid );
1679 tl_assert(thr_q != NULL);
1680
1681 /* Complain if this thread holds any locks. */
1682 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1683 tl_assert(nHeld >= 0);
1684 if (nHeld > 0) {
1685 HChar buf[80];
1686 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1687 nHeld, nHeld > 1 ? "s" : "");
sewardjf98e1c02008-10-25 16:22:41 +00001688 HG_(record_error_Misc)( thr_q, buf );
sewardjb4112022007-11-09 22:49:28 +00001689 }
1690
sewardj23f12002009-07-24 08:45:08 +00001691 /* Not much to do here:
1692 - tell libhb the thread is gone
1693 - clear the map_threads entry, in order that the Valgrind core
1694 can re-use it. */
sewardj61bc2c52011-02-09 10:34:00 +00001695 /* Cleanup actions (next 5 lines) copied in evh__atfork_child; keep
1696 in sync. */
sewardj23f12002009-07-24 08:45:08 +00001697 tl_assert(thr_q->hbthr);
1698 libhb_async_exit(thr_q->hbthr);
sewardjf98e1c02008-10-25 16:22:41 +00001699 tl_assert(thr_q->coretid == quit_tid);
1700 thr_q->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +00001701 map_threads_delete( quit_tid );
1702
sewardjf98e1c02008-10-25 16:22:41 +00001703 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001704 all__sanity_check("evh__pre_thread_ll_exit-post");
1705}
1706
sewardj61bc2c52011-02-09 10:34:00 +00001707/* This is called immediately after fork, for the child only. 'tid'
1708 is the only surviving thread (as per POSIX rules on fork() in
1709 threaded programs), so we have to clean up map_threads to remove
1710 entries for any other threads. */
1711static
1712void evh__atfork_child ( ThreadId tid )
1713{
1714 UInt i;
1715 Thread* thr;
1716 /* Slot 0 should never be used. */
1717 thr = map_threads_maybe_lookup( 0/*INVALID*/ );
1718 tl_assert(!thr);
1719 /* Clean up all other slots except 'tid'. */
1720 for (i = 1; i < VG_N_THREADS; i++) {
1721 if (i == tid)
1722 continue;
1723 thr = map_threads_maybe_lookup(i);
1724 if (!thr)
1725 continue;
1726 /* Cleanup actions (next 5 lines) copied from end of
1727 evh__pre_thread_ll_exit; keep in sync. */
1728 tl_assert(thr->hbthr);
1729 libhb_async_exit(thr->hbthr);
1730 tl_assert(thr->coretid == i);
1731 thr->coretid = VG_INVALID_THREADID;
1732 map_threads_delete(i);
1733 }
1734}
1735
philipped40aff52014-06-16 20:00:14 +00001736/* generate a dependence from the hbthr_q quitter to the hbthr_s stayer. */
sewardjb4112022007-11-09 22:49:28 +00001737static
philipped40aff52014-06-16 20:00:14 +00001738void generate_quitter_stayer_dependence (Thr* hbthr_q, Thr* hbthr_s)
sewardjb4112022007-11-09 22:49:28 +00001739{
sewardjf98e1c02008-10-25 16:22:41 +00001740 SO* so;
sewardjf98e1c02008-10-25 16:22:41 +00001741 /* Allocate a temporary synchronisation object and use it to send
1742 an imaginary message from the quitter to the stayer, the purpose
1743 being to generate a dependence from the quitter to the
1744 stayer. */
1745 so = libhb_so_alloc();
1746 tl_assert(so);
sewardj23f12002009-07-24 08:45:08 +00001747 /* Send last arg of _so_send as False, since the sending thread
1748 doesn't actually exist any more, so we don't want _so_send to
1749 try taking stack snapshots of it. */
sewardjffce8152011-06-24 10:09:41 +00001750 libhb_so_send(hbthr_q, so, True/*strong_send*//*?!? wrt comment above*/);
sewardjf98e1c02008-10-25 16:22:41 +00001751 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1752 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00001753
sewardjffce8152011-06-24 10:09:41 +00001754 /* Tell libhb that the quitter has been reaped. Note that we might
1755 have to be cleverer about this, to exclude 2nd and subsequent
1756 notifications for the same hbthr_q, in the case where the app is
1757 buggy (calls pthread_join twice or more on the same thread) AND
1758 where libpthread is also buggy and doesn't return ESRCH on
1759 subsequent calls. (If libpthread isn't thusly buggy, then the
1760 wrapper for pthread_join in hg_intercepts.c will stop us getting
1761 notified here multiple times for the same joinee.) See also
1762 comments in helgrind/tests/jointwice.c. */
1763 libhb_joinedwith_done(hbthr_q);
philipped40aff52014-06-16 20:00:14 +00001764}
1765
1766
1767static
1768void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1769{
1770 Thread* thr_s;
1771 Thread* thr_q;
1772 Thr* hbthr_s;
1773 Thr* hbthr_q;
1774
1775 if (SHOW_EVENTS >= 1)
1776 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1777 (Int)stay_tid, quit_thr );
1778
1779 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
1780
1781 thr_s = map_threads_maybe_lookup( stay_tid );
1782 thr_q = quit_thr;
1783 tl_assert(thr_s != NULL);
1784 tl_assert(thr_q != NULL);
1785 tl_assert(thr_s != thr_q);
1786
1787 hbthr_s = thr_s->hbthr;
1788 hbthr_q = thr_q->hbthr;
1789 tl_assert(hbthr_s != hbthr_q);
1790 tl_assert( libhb_get_Thr_hgthread(hbthr_s) == thr_s );
1791 tl_assert( libhb_get_Thr_hgthread(hbthr_q) == thr_q );
1792
1793 generate_quitter_stayer_dependence (hbthr_q, hbthr_s);
sewardjffce8152011-06-24 10:09:41 +00001794
sewardjf98e1c02008-10-25 16:22:41 +00001795 /* evh__pre_thread_ll_exit issues an error message if the exiting
1796 thread holds any locks. No need to check here. */
sewardjb4112022007-11-09 22:49:28 +00001797
1798 /* This holds because, at least when using NPTL as the thread
1799 library, we should be notified the low level thread exit before
1800 we hear of any join event on it. The low level exit
1801 notification feeds through into evh__pre_thread_ll_exit,
1802 which should clear the map_threads entry for it. Hence we
1803 expect there to be no map_threads entry at this point. */
1804 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1805 == VG_INVALID_THREADID);
1806
sewardjf98e1c02008-10-25 16:22:41 +00001807 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001808 all__sanity_check("evh__post_thread_join-post");
1809}
1810
1811static
floriane543f302012-10-21 19:43:43 +00001812void evh__pre_mem_read ( CorePart part, ThreadId tid, const HChar* s,
sewardjb4112022007-11-09 22:49:28 +00001813 Addr a, SizeT size) {
1814 if (SHOW_EVENTS >= 2
1815 || (SHOW_EVENTS >= 1 && size != 1))
1816 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1817 (Int)tid, s, (void*)a, size );
sewardj8eb8bab2015-07-21 14:44:28 +00001818 Thread *thr = map_threads_lookup(tid);
1819 if (LIKELY(thr->synchr_nesting == 0))
1820 shadow_mem_cread_range(thr, a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001821 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001822 all__sanity_check("evh__pre_mem_read-post");
1823}
1824
1825static
1826void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
floriane543f302012-10-21 19:43:43 +00001827 const HChar* s, Addr a ) {
sewardjb4112022007-11-09 22:49:28 +00001828 Int len;
1829 if (SHOW_EVENTS >= 1)
1830 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1831 (Int)tid, s, (void*)a );
sewardj234e5582011-02-09 12:47:23 +00001832 // Don't segfault if the string starts in an obviously stupid
1833 // place. Actually we should check the whole string, not just
1834 // the start address, but that's too much trouble. At least
1835 // checking the first byte is better than nothing. See #255009.
1836 if (!VG_(am_is_valid_for_client) (a, 1, VKI_PROT_READ))
1837 return;
sewardj8eb8bab2015-07-21 14:44:28 +00001838 Thread *thr = map_threads_lookup(tid);
florian19f91bb2012-11-10 22:29:54 +00001839 len = VG_(strlen)( (HChar*) a );
sewardj8eb8bab2015-07-21 14:44:28 +00001840 if (LIKELY(thr->synchr_nesting == 0))
1841 shadow_mem_cread_range( thr, a, len+1 );
sewardjf98e1c02008-10-25 16:22:41 +00001842 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001843 all__sanity_check("evh__pre_mem_read_asciiz-post");
1844}
1845
1846static
floriane543f302012-10-21 19:43:43 +00001847void evh__pre_mem_write ( CorePart part, ThreadId tid, const HChar* s,
sewardjb4112022007-11-09 22:49:28 +00001848 Addr a, SizeT size ) {
1849 if (SHOW_EVENTS >= 1)
1850 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1851 (Int)tid, s, (void*)a, size );
sewardj8eb8bab2015-07-21 14:44:28 +00001852 Thread *thr = map_threads_lookup(tid);
1853 if (LIKELY(thr->synchr_nesting == 0))
1854 shadow_mem_cwrite_range(thr, a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001855 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001856 all__sanity_check("evh__pre_mem_write-post");
1857}
1858
1859static
1860void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1861 if (SHOW_EVENTS >= 1)
1862 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1863 (void*)a, len, (Int)is_inited );
sewardj438c4712014-09-05 20:29:10 +00001864 // We ignore the initialisation state (is_inited); that's ok.
1865 shadow_mem_make_New(get_current_Thread(), a, len);
sewardjf98e1c02008-10-25 16:22:41 +00001866 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001867 all__sanity_check("evh__pre_mem_read-post");
1868}
1869
1870static
1871void evh__die_mem_heap ( Addr a, SizeT len ) {
sewardj622fe492011-03-11 21:06:59 +00001872 Thread* thr;
sewardjb4112022007-11-09 22:49:28 +00001873 if (SHOW_EVENTS >= 1)
1874 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
sewardj622fe492011-03-11 21:06:59 +00001875 thr = get_current_Thread();
1876 tl_assert(thr);
1877 if (HG_(clo_free_is_write)) {
1878 /* Treat frees as if the memory was written immediately prior to
1879 the free. This shakes out more races, specifically, cases
1880 where memory is referenced by one thread, and freed by
1881 another, and there's no observable synchronisation event to
1882 guarantee that the reference happens before the free. */
sewardj8eb8bab2015-07-21 14:44:28 +00001883 if (LIKELY(thr->synchr_nesting == 0))
1884 shadow_mem_cwrite_range(thr, a, len);
sewardj622fe492011-03-11 21:06:59 +00001885 }
philippef54cb662015-05-10 22:19:31 +00001886 shadow_mem_make_NoAccess_AHAE( thr, a, len );
1887 /* We used to call instead
1888 shadow_mem_make_NoAccess_NoFX( thr, a, len );
1889 A non-buggy application will not access anymore
1890 the freed memory, and so marking no access is in theory useless.
1891 Not marking freed memory would avoid the overhead for applications
1892 doing mostly malloc/free, as the freed memory should then be recycled
1893 very quickly after marking.
1894 We rather mark it noaccess for the following reasons:
1895 * accessibility bits then always correctly represents the memory
1896 status (e.g. for the client request VALGRIND_HG_GET_ABITS).
1897 * the overhead is reasonable (about 5 seconds per Gb in 1000 bytes
1898 blocks, on a ppc64le, for a unrealistic workload of an application
1899 doing only malloc/free).
1900 * marking no access allows to GC the SecMap, which might improve
1901 performance and/or memory usage.
1902 * we might detect more applications bugs when memory is marked
1903 noaccess.
1904 If needed, we could support here an option --free-is-noaccess=yes|no
1905 to avoid marking freed memory as no access if some applications
1906 would need to avoid the marking noaccess overhead. */
1907
sewardjf98e1c02008-10-25 16:22:41 +00001908 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001909 all__sanity_check("evh__pre_mem_read-post");
1910}
1911
sewardj23f12002009-07-24 08:45:08 +00001912/* --- Event handlers called from generated code --- */
1913
sewardjb4112022007-11-09 22:49:28 +00001914static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001915void evh__mem_help_cread_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001916 Thread* thr = get_current_Thread_in_C_C();
1917 Thr* hbthr = thr->hbthr;
sewardj8eb8bab2015-07-21 14:44:28 +00001918 if (LIKELY(thr->synchr_nesting == 0))
1919 LIBHB_CREAD_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001920}
sewardjf98e1c02008-10-25 16:22:41 +00001921
sewardjb4112022007-11-09 22:49:28 +00001922static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001923void evh__mem_help_cread_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001924 Thread* thr = get_current_Thread_in_C_C();
1925 Thr* hbthr = thr->hbthr;
sewardj8eb8bab2015-07-21 14:44:28 +00001926 if (LIKELY(thr->synchr_nesting == 0))
1927 LIBHB_CREAD_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001928}
sewardjf98e1c02008-10-25 16:22:41 +00001929
sewardjb4112022007-11-09 22:49:28 +00001930static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001931void evh__mem_help_cread_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001932 Thread* thr = get_current_Thread_in_C_C();
1933 Thr* hbthr = thr->hbthr;
sewardj8eb8bab2015-07-21 14:44:28 +00001934 if (LIKELY(thr->synchr_nesting == 0))
1935 LIBHB_CREAD_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001936}
sewardjf98e1c02008-10-25 16:22:41 +00001937
sewardjb4112022007-11-09 22:49:28 +00001938static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001939void evh__mem_help_cread_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001940 Thread* thr = get_current_Thread_in_C_C();
1941 Thr* hbthr = thr->hbthr;
sewardj8eb8bab2015-07-21 14:44:28 +00001942 if (LIKELY(thr->synchr_nesting == 0))
1943 LIBHB_CREAD_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001944}
sewardjf98e1c02008-10-25 16:22:41 +00001945
sewardjb4112022007-11-09 22:49:28 +00001946static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001947void evh__mem_help_cread_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001948 Thread* thr = get_current_Thread_in_C_C();
1949 Thr* hbthr = thr->hbthr;
sewardj8eb8bab2015-07-21 14:44:28 +00001950 if (LIKELY(thr->synchr_nesting == 0))
1951 LIBHB_CREAD_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001952}
1953
1954static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001955void evh__mem_help_cwrite_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001956 Thread* thr = get_current_Thread_in_C_C();
1957 Thr* hbthr = thr->hbthr;
sewardj8eb8bab2015-07-21 14:44:28 +00001958 if (LIKELY(thr->synchr_nesting == 0))
1959 LIBHB_CWRITE_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001960}
sewardjf98e1c02008-10-25 16:22:41 +00001961
sewardjb4112022007-11-09 22:49:28 +00001962static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001963void evh__mem_help_cwrite_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001964 Thread* thr = get_current_Thread_in_C_C();
1965 Thr* hbthr = thr->hbthr;
sewardj8eb8bab2015-07-21 14:44:28 +00001966 if (LIKELY(thr->synchr_nesting == 0))
1967 LIBHB_CWRITE_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001968}
sewardjf98e1c02008-10-25 16:22:41 +00001969
sewardjb4112022007-11-09 22:49:28 +00001970static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001971void evh__mem_help_cwrite_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001972 Thread* thr = get_current_Thread_in_C_C();
1973 Thr* hbthr = thr->hbthr;
sewardj8eb8bab2015-07-21 14:44:28 +00001974 if (LIKELY(thr->synchr_nesting == 0))
1975 LIBHB_CWRITE_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001976}
sewardjf98e1c02008-10-25 16:22:41 +00001977
sewardjb4112022007-11-09 22:49:28 +00001978static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001979void evh__mem_help_cwrite_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001980 Thread* thr = get_current_Thread_in_C_C();
1981 Thr* hbthr = thr->hbthr;
sewardj8eb8bab2015-07-21 14:44:28 +00001982 if (LIKELY(thr->synchr_nesting == 0))
1983 LIBHB_CWRITE_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001984}
sewardjf98e1c02008-10-25 16:22:41 +00001985
sewardjb4112022007-11-09 22:49:28 +00001986static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001987void evh__mem_help_cwrite_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001988 Thread* thr = get_current_Thread_in_C_C();
1989 Thr* hbthr = thr->hbthr;
sewardj8eb8bab2015-07-21 14:44:28 +00001990 if (LIKELY(thr->synchr_nesting == 0))
1991 LIBHB_CWRITE_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001992}
1993
sewardjb4112022007-11-09 22:49:28 +00001994
sewardj9f569b72008-11-13 13:33:09 +00001995/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001996/* -------------- events to do with mutexes -------------- */
sewardj9f569b72008-11-13 13:33:09 +00001997/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001998
1999/* EXPOSITION only: by intercepting lock init events we can show the
2000 user where the lock was initialised, rather than only being able to
2001 show where it was first locked. Intercepting lock initialisations
2002 is not necessary for the basic operation of the race checker. */
2003static
2004void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
2005 void* mutex, Word mbRec )
2006{
2007 if (SHOW_EVENTS >= 1)
2008 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
2009 (Int)tid, mbRec, (void*)mutex );
2010 tl_assert(mbRec == 0 || mbRec == 1);
2011 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
2012 (Addr)mutex, tid );
sewardjf98e1c02008-10-25 16:22:41 +00002013 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002014 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
2015}
2016
2017static
sewardjc02f6c42013-10-14 13:51:25 +00002018void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex,
2019 Bool mutex_is_init )
sewardjb4112022007-11-09 22:49:28 +00002020{
2021 Thread* thr;
2022 Lock* lk;
2023 if (SHOW_EVENTS >= 1)
sewardjc02f6c42013-10-14 13:51:25 +00002024 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE"
2025 "(ctid=%d, %p, isInit=%d)\n",
2026 (Int)tid, (void*)mutex, (Int)mutex_is_init );
sewardjb4112022007-11-09 22:49:28 +00002027
2028 thr = map_threads_maybe_lookup( tid );
2029 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00002030 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00002031
2032 lk = map_locks_maybe_lookup( (Addr)mutex );
2033
sewardjc02f6c42013-10-14 13:51:25 +00002034 if (lk == NULL && mutex_is_init) {
2035 /* We're destroying a mutex which we don't have any record of,
2036 and which appears to have the value PTHREAD_MUTEX_INITIALIZER.
2037 Assume it never got used, and so we don't need to do anything
2038 more. */
2039 goto out;
2040 }
2041
sewardjb4112022007-11-09 22:49:28 +00002042 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
sewardjf98e1c02008-10-25 16:22:41 +00002043 HG_(record_error_Misc)(
2044 thr, "pthread_mutex_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00002045 }
2046
2047 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00002048 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002049 tl_assert( lk->guestaddr == (Addr)mutex );
2050 if (lk->heldBy) {
2051 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00002052 HG_(record_error_Misc)(
2053 thr, "pthread_mutex_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00002054 /* remove lock from locksets of all owning threads */
2055 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00002056 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00002057 lk->heldBy = NULL;
2058 lk->heldW = False;
2059 lk->acquired_at = NULL;
2060 }
2061 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002062 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00002063
2064 if (HG_(clo_track_lockorders))
2065 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002066 map_locks_delete( lk->guestaddr );
2067 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002068 }
2069
sewardjc02f6c42013-10-14 13:51:25 +00002070 out:
sewardjf98e1c02008-10-25 16:22:41 +00002071 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002072 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
2073}
2074
2075static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
2076 void* mutex, Word isTryLock )
2077{
2078 /* Just check the mutex is sane; nothing else to do. */
2079 // 'mutex' may be invalid - not checked by wrapper
2080 Thread* thr;
2081 Lock* lk;
2082 if (SHOW_EVENTS >= 1)
2083 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
2084 (Int)tid, (void*)mutex );
2085
2086 tl_assert(isTryLock == 0 || isTryLock == 1);
2087 thr = map_threads_maybe_lookup( tid );
2088 tl_assert(thr); /* cannot fail - Thread* must already exist */
2089
2090 lk = map_locks_maybe_lookup( (Addr)mutex );
2091
2092 if (lk && (lk->kind == LK_rdwr)) {
sewardjf98e1c02008-10-25 16:22:41 +00002093 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
2094 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002095 }
2096
2097 if ( lk
2098 && isTryLock == 0
2099 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
2100 && lk->heldBy
2101 && lk->heldW
florian6bf37262012-10-21 03:23:36 +00002102 && VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0 ) {
sewardjb4112022007-11-09 22:49:28 +00002103 /* uh, it's a non-recursive lock and we already w-hold it, and
2104 this is a real lock operation (not a speculative "tryLock"
2105 kind of thing). Duh. Deadlock coming up; but at least
2106 produce an error message. */
florian6bd9dc12012-11-23 16:17:43 +00002107 const HChar* errstr = "Attempt to re-lock a "
2108 "non-recursive lock I already hold";
2109 const HChar* auxstr = "Lock was previously acquired";
sewardj8fef6252010-07-29 05:28:02 +00002110 if (lk->acquired_at) {
2111 HG_(record_error_Misc_w_aux)( thr, errstr, auxstr, lk->acquired_at );
2112 } else {
2113 HG_(record_error_Misc)( thr, errstr );
2114 }
sewardjb4112022007-11-09 22:49:28 +00002115 }
2116}
2117
2118static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
2119{
2120 // only called if the real library call succeeded - so mutex is sane
2121 Thread* thr;
2122 if (SHOW_EVENTS >= 1)
2123 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
2124 (Int)tid, (void*)mutex );
2125
2126 thr = map_threads_maybe_lookup( tid );
2127 tl_assert(thr); /* cannot fail - Thread* must already exist */
2128
2129 evhH__post_thread_w_acquires_lock(
2130 thr,
2131 LK_mbRec, /* if not known, create new lock with this LockKind */
2132 (Addr)mutex
2133 );
2134}
2135
2136static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
2137{
2138 // 'mutex' may be invalid - not checked by wrapper
2139 Thread* thr;
2140 if (SHOW_EVENTS >= 1)
2141 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
2142 (Int)tid, (void*)mutex );
2143
2144 thr = map_threads_maybe_lookup( tid );
2145 tl_assert(thr); /* cannot fail - Thread* must already exist */
2146
2147 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
2148}
2149
2150static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
2151{
2152 // only called if the real library call succeeded - so mutex is sane
2153 Thread* thr;
2154 if (SHOW_EVENTS >= 1)
2155 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
2156 (Int)tid, (void*)mutex );
2157 thr = map_threads_maybe_lookup( tid );
2158 tl_assert(thr); /* cannot fail - Thread* must already exist */
2159
2160 // anything we should do here?
2161}
2162
2163
sewardj5a644da2009-08-11 10:35:58 +00002164/* ------------------------------------------------------- */
sewardj1f77fec2010-04-12 19:51:04 +00002165/* -------------- events to do with spinlocks ------------ */
sewardj5a644da2009-08-11 10:35:58 +00002166/* ------------------------------------------------------- */
2167
2168/* All a bit of a kludge. Pretend we're really dealing with ordinary
2169 pthread_mutex_t's instead, for the most part. */
2170
2171static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid,
2172 void* slock )
2173{
2174 Thread* thr;
2175 Lock* lk;
2176 /* In glibc's kludgey world, we're either initialising or unlocking
2177 it. Since this is the pre-routine, if it is locked, unlock it
2178 and take a dependence edge. Otherwise, do nothing. */
2179
2180 if (SHOW_EVENTS >= 1)
2181 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
2182 "(ctid=%d, slock=%p)\n",
2183 (Int)tid, (void*)slock );
2184
2185 thr = map_threads_maybe_lookup( tid );
2186 /* cannot fail - Thread* must already exist */;
2187 tl_assert( HG_(is_sane_Thread)(thr) );
2188
2189 lk = map_locks_maybe_lookup( (Addr)slock );
2190 if (lk && lk->heldBy) {
2191 /* it's held. So do the normal pre-unlock actions, as copied
2192 from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE. This stupidly
2193 duplicates the map_locks_maybe_lookup. */
2194 evhH__pre_thread_releases_lock( thr, (Addr)slock,
2195 False/*!isRDWR*/ );
2196 }
2197}
2198
2199static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid,
2200 void* slock )
2201{
2202 Lock* lk;
2203 /* More kludgery. If the lock has never been seen before, do
2204 actions as per evh__HG_PTHREAD_MUTEX_INIT_POST. Else do
2205 nothing. */
2206
2207 if (SHOW_EVENTS >= 1)
2208 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
2209 "(ctid=%d, slock=%p)\n",
2210 (Int)tid, (void*)slock );
2211
2212 lk = map_locks_maybe_lookup( (Addr)slock );
2213 if (!lk) {
2214 map_locks_lookup_or_create( LK_nonRec, (Addr)slock, tid );
2215 }
2216}
2217
2218static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid,
2219 void* slock, Word isTryLock )
2220{
2221 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, slock, isTryLock );
2222}
2223
2224static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid,
2225 void* slock )
2226{
2227 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, slock );
2228}
2229
2230static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid,
2231 void* slock )
2232{
sewardjc02f6c42013-10-14 13:51:25 +00002233 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, slock, 0/*!isInit*/ );
sewardj5a644da2009-08-11 10:35:58 +00002234}
2235
2236
sewardj9f569b72008-11-13 13:33:09 +00002237/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002238/* --------------- events to do with CVs --------------- */
sewardj9f569b72008-11-13 13:33:09 +00002239/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002240
sewardj02114542009-07-28 20:52:36 +00002241/* A mapping from CV to (the SO associated with it, plus some
2242 auxiliary data for error checking). When the CV is
sewardjf98e1c02008-10-25 16:22:41 +00002243 signalled/broadcasted upon, we do a 'send' into the SO, and when a
2244 wait on it completes, we do a 'recv' from the SO. This is believed
2245 to give the correct happens-before events arising from CV
sewardjb4112022007-11-09 22:49:28 +00002246 signallings/broadcasts.
2247*/
2248
sewardj02114542009-07-28 20:52:36 +00002249/* .so is the SO for this CV.
2250 .mx_ga is the associated mutex, when .nWaiters > 0
sewardjb4112022007-11-09 22:49:28 +00002251
sewardj02114542009-07-28 20:52:36 +00002252 POSIX says effectively that the first pthread_cond_{timed}wait call
2253 causes a dynamic binding between the CV and the mutex, and that
2254 lasts until such time as the waiter count falls to zero. Hence
2255 need to keep track of the number of waiters in order to do
2256 consistency tracking. */
2257typedef
2258 struct {
2259 SO* so; /* libhb-allocated SO */
2260 void* mx_ga; /* addr of associated mutex, if any */
2261 UWord nWaiters; /* # threads waiting on the CV */
2262 }
2263 CVInfo;
2264
2265
2266/* pthread_cond_t* -> CVInfo* */
2267static WordFM* map_cond_to_CVInfo = NULL;
2268
2269static void map_cond_to_CVInfo_INIT ( void ) {
2270 if (UNLIKELY(map_cond_to_CVInfo == NULL)) {
2271 map_cond_to_CVInfo = VG_(newFM)( HG_(zalloc),
2272 "hg.mctCI.1", HG_(free), NULL );
sewardjf98e1c02008-10-25 16:22:41 +00002273 }
2274}
2275
sewardj02114542009-07-28 20:52:36 +00002276static CVInfo* map_cond_to_CVInfo_lookup_or_alloc ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002277 UWord key, val;
sewardj02114542009-07-28 20:52:36 +00002278 map_cond_to_CVInfo_INIT();
2279 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
sewardjf98e1c02008-10-25 16:22:41 +00002280 tl_assert(key == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002281 return (CVInfo*)val;
sewardjf98e1c02008-10-25 16:22:41 +00002282 } else {
sewardj02114542009-07-28 20:52:36 +00002283 SO* so = libhb_so_alloc();
2284 CVInfo* cvi = HG_(zalloc)("hg.mctCloa.1", sizeof(CVInfo));
2285 cvi->so = so;
2286 cvi->mx_ga = 0;
2287 VG_(addToFM)( map_cond_to_CVInfo, (UWord)cond, (UWord)cvi );
2288 return cvi;
sewardjf98e1c02008-10-25 16:22:41 +00002289 }
2290}
2291
philippe8bfc2152012-07-06 23:38:24 +00002292static CVInfo* map_cond_to_CVInfo_lookup_NO_alloc ( void* cond ) {
2293 UWord key, val;
2294 map_cond_to_CVInfo_INIT();
2295 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
2296 tl_assert(key == (UWord)cond);
2297 return (CVInfo*)val;
2298 } else {
2299 return NULL;
2300 }
2301}
2302
sewardjc02f6c42013-10-14 13:51:25 +00002303static void map_cond_to_CVInfo_delete ( ThreadId tid,
2304 void* cond, Bool cond_is_init ) {
philippe8bfc2152012-07-06 23:38:24 +00002305 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +00002306 UWord keyW, valW;
philippe8bfc2152012-07-06 23:38:24 +00002307
2308 thr = map_threads_maybe_lookup( tid );
2309 tl_assert(thr); /* cannot fail - Thread* must already exist */
2310
sewardj02114542009-07-28 20:52:36 +00002311 map_cond_to_CVInfo_INIT();
philippe24111972013-03-18 22:48:22 +00002312 if (VG_(lookupFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond )) {
sewardj02114542009-07-28 20:52:36 +00002313 CVInfo* cvi = (CVInfo*)valW;
sewardjf98e1c02008-10-25 16:22:41 +00002314 tl_assert(keyW == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002315 tl_assert(cvi);
2316 tl_assert(cvi->so);
philippe8bfc2152012-07-06 23:38:24 +00002317 if (cvi->nWaiters > 0) {
sewardjc02f6c42013-10-14 13:51:25 +00002318 HG_(record_error_Misc)(
2319 thr, "pthread_cond_destroy:"
2320 " destruction of condition variable being waited upon");
philippe24111972013-03-18 22:48:22 +00002321 /* Destroying a cond var being waited upon outcome is EBUSY and
2322 variable is not destroyed. */
2323 return;
philippe8bfc2152012-07-06 23:38:24 +00002324 }
philippe24111972013-03-18 22:48:22 +00002325 if (!VG_(delFromFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond ))
2326 tl_assert(0); // cond var found above, and not here ???
sewardj02114542009-07-28 20:52:36 +00002327 libhb_so_dealloc(cvi->so);
2328 cvi->mx_ga = 0;
2329 HG_(free)(cvi);
philippe8bfc2152012-07-06 23:38:24 +00002330 } else {
sewardjc02f6c42013-10-14 13:51:25 +00002331 /* We have no record of this CV. So complain about it
2332 .. except, don't bother to complain if it has exactly the
2333 value PTHREAD_COND_INITIALIZER, since it might be that the CV
2334 was initialised like that but never used. */
2335 if (!cond_is_init) {
2336 HG_(record_error_Misc)(
2337 thr, "pthread_cond_destroy: destruction of unknown cond var");
2338 }
sewardjb4112022007-11-09 22:49:28 +00002339 }
2340}
2341
2342static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2343{
sewardjf98e1c02008-10-25 16:22:41 +00002344 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2345 cond to a SO if it is not already so bound, and 'send' on the
2346 SO. This is later used by other thread(s) which successfully
2347 exit from a pthread_cond_wait on the same cv; then they 'recv'
2348 from the SO, thereby acquiring a dependency on this signalling
2349 event. */
sewardjb4112022007-11-09 22:49:28 +00002350 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002351 CVInfo* cvi;
2352 //Lock* lk;
sewardjb4112022007-11-09 22:49:28 +00002353
2354 if (SHOW_EVENTS >= 1)
2355 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2356 (Int)tid, (void*)cond );
2357
sewardjb4112022007-11-09 22:49:28 +00002358 thr = map_threads_maybe_lookup( tid );
2359 tl_assert(thr); /* cannot fail - Thread* must already exist */
2360
sewardj02114542009-07-28 20:52:36 +00002361 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2362 tl_assert(cvi);
2363 tl_assert(cvi->so);
2364
sewardjb4112022007-11-09 22:49:28 +00002365 // error-if: mutex is bogus
2366 // error-if: mutex is not locked
sewardj02114542009-07-28 20:52:36 +00002367 // Hmm. POSIX doesn't actually say that it's an error to call
2368 // pthread_cond_signal with the associated mutex being unlocked.
2369 // Although it does say that it should be "if consistent scheduling
sewardjffce8152011-06-24 10:09:41 +00002370 // is desired." For that reason, print "dubious" if the lock isn't
2371 // held by any thread. Skip the "dubious" if it is held by some
2372 // other thread; that sounds straight-out wrong.
sewardj02114542009-07-28 20:52:36 +00002373 //
sewardjffce8152011-06-24 10:09:41 +00002374 // Anybody who writes code that signals on a CV without holding
2375 // the associated MX needs to be shipped off to a lunatic asylum
2376 // ASAP, even though POSIX doesn't actually declare such behaviour
2377 // illegal -- it makes code extremely difficult to understand/
2378 // reason about. In particular it puts the signalling thread in
2379 // a situation where it is racing against the released waiter
2380 // as soon as the signalling is done, and so there needs to be
2381 // some auxiliary synchronisation mechanism in the program that
2382 // makes this safe -- or the race(s) need to be harmless, or
2383 // probably nonexistent.
2384 //
2385 if (1) {
2386 Lock* lk = NULL;
2387 if (cvi->mx_ga != 0) {
2388 lk = map_locks_maybe_lookup( (Addr)cvi->mx_ga );
2389 }
2390 /* note: lk could be NULL. Be careful. */
2391 if (lk) {
2392 if (lk->kind == LK_rdwr) {
2393 HG_(record_error_Misc)(thr,
2394 "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2395 }
2396 if (lk->heldBy == NULL) {
2397 HG_(record_error_Misc)(thr,
2398 "pthread_cond_{signal,broadcast}: dubious: "
2399 "associated lock is not held by any thread");
2400 }
florian6bf37262012-10-21 03:23:36 +00002401 if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (UWord)thr)) {
sewardjffce8152011-06-24 10:09:41 +00002402 HG_(record_error_Misc)(thr,
2403 "pthread_cond_{signal,broadcast}: "
2404 "associated lock is not held by calling thread");
2405 }
2406 } else {
2407 /* Couldn't even find the damn thing. */
2408 // But actually .. that's not necessarily an error. We don't
2409 // know the (CV,MX) binding until a pthread_cond_wait or bcast
2410 // shows us what it is, and if that may not have happened yet.
2411 // So just keep quiet in this circumstance.
2412 //HG_(record_error_Misc)( thr,
2413 // "pthread_cond_{signal,broadcast}: "
2414 // "no or invalid mutex associated with cond");
2415 }
2416 }
sewardjb4112022007-11-09 22:49:28 +00002417
sewardj02114542009-07-28 20:52:36 +00002418 libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
sewardjb4112022007-11-09 22:49:28 +00002419}
2420
2421/* returns True if it reckons 'mutex' is valid and held by this
2422 thread, else False */
2423static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2424 void* cond, void* mutex )
2425{
2426 Thread* thr;
2427 Lock* lk;
2428 Bool lk_valid = True;
sewardj02114542009-07-28 20:52:36 +00002429 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002430
2431 if (SHOW_EVENTS >= 1)
2432 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2433 "(ctid=%d, cond=%p, mutex=%p)\n",
2434 (Int)tid, (void*)cond, (void*)mutex );
2435
sewardjb4112022007-11-09 22:49:28 +00002436 thr = map_threads_maybe_lookup( tid );
2437 tl_assert(thr); /* cannot fail - Thread* must already exist */
2438
2439 lk = map_locks_maybe_lookup( (Addr)mutex );
2440
2441 /* Check for stupid mutex arguments. There are various ways to be
2442 a bozo. Only complain once, though, even if more than one thing
2443 is wrong. */
2444 if (lk == NULL) {
2445 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002446 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002447 thr,
2448 "pthread_cond_{timed}wait called with invalid mutex" );
2449 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002450 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002451 if (lk->kind == LK_rdwr) {
2452 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002453 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002454 thr, "pthread_cond_{timed}wait called with mutex "
2455 "of type pthread_rwlock_t*" );
2456 } else
2457 if (lk->heldBy == NULL) {
2458 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002459 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002460 thr, "pthread_cond_{timed}wait called with un-held mutex");
2461 } else
2462 if (lk->heldBy != NULL
florian6bf37262012-10-21 03:23:36 +00002463 && VG_(elemBag)( lk->heldBy, (UWord)thr ) == 0) {
sewardjb4112022007-11-09 22:49:28 +00002464 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002465 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002466 thr, "pthread_cond_{timed}wait called with mutex "
2467 "held by a different thread" );
2468 }
2469 }
2470
2471 // error-if: cond is also associated with a different mutex
sewardj02114542009-07-28 20:52:36 +00002472 cvi = map_cond_to_CVInfo_lookup_or_alloc(cond);
2473 tl_assert(cvi);
2474 tl_assert(cvi->so);
2475 if (cvi->nWaiters == 0) {
2476 /* form initial (CV,MX) binding */
2477 cvi->mx_ga = mutex;
2478 }
2479 else /* check existing (CV,MX) binding */
2480 if (cvi->mx_ga != mutex) {
2481 HG_(record_error_Misc)(
2482 thr, "pthread_cond_{timed}wait: cond is associated "
2483 "with a different mutex");
2484 }
2485 cvi->nWaiters++;
sewardjb4112022007-11-09 22:49:28 +00002486
2487 return lk_valid;
2488}
2489
2490static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
sewardjff427c92013-10-14 12:13:52 +00002491 void* cond, void* mutex,
2492 Bool timeout)
sewardjb4112022007-11-09 22:49:28 +00002493{
sewardjf98e1c02008-10-25 16:22:41 +00002494 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2495 the SO for this cond, and 'recv' from it so as to acquire a
2496 dependency edge back to the signaller/broadcaster. */
2497 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002498 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002499
2500 if (SHOW_EVENTS >= 1)
2501 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
sewardjff427c92013-10-14 12:13:52 +00002502 "(ctid=%d, cond=%p, mutex=%p)\n, timeout=%d",
2503 (Int)tid, (void*)cond, (void*)mutex, (Int)timeout );
sewardjb4112022007-11-09 22:49:28 +00002504
sewardjb4112022007-11-09 22:49:28 +00002505 thr = map_threads_maybe_lookup( tid );
2506 tl_assert(thr); /* cannot fail - Thread* must already exist */
2507
2508 // error-if: cond is also associated with a different mutex
2509
philippe8bfc2152012-07-06 23:38:24 +00002510 cvi = map_cond_to_CVInfo_lookup_NO_alloc( cond );
2511 if (!cvi) {
2512 /* This could be either a bug in helgrind or the guest application
2513 that did an error (e.g. cond var was destroyed by another thread.
2514 Let's assume helgrind is perfect ...
2515 Note that this is similar to drd behaviour. */
2516 HG_(record_error_Misc)(thr, "condition variable has been destroyed while"
2517 " being waited upon");
2518 return;
2519 }
2520
sewardj02114542009-07-28 20:52:36 +00002521 tl_assert(cvi);
2522 tl_assert(cvi->so);
2523 tl_assert(cvi->nWaiters > 0);
sewardjb4112022007-11-09 22:49:28 +00002524
sewardjff427c92013-10-14 12:13:52 +00002525 if (!timeout && !libhb_so_everSent(cvi->so)) {
sewardjf98e1c02008-10-25 16:22:41 +00002526 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2527 it? If this happened it would surely be a bug in the threads
2528 library. Or one of those fabled "spurious wakeups". */
2529 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
sewardjffce8152011-06-24 10:09:41 +00002530 "succeeded"
sewardjf98e1c02008-10-25 16:22:41 +00002531 " without prior pthread_cond_post");
sewardjb4112022007-11-09 22:49:28 +00002532 }
sewardjf98e1c02008-10-25 16:22:41 +00002533
2534 /* anyway, acquire a dependency on it. */
sewardj02114542009-07-28 20:52:36 +00002535 libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2536
2537 cvi->nWaiters--;
sewardjf98e1c02008-10-25 16:22:41 +00002538}
2539
philippe19dfe032013-03-24 20:10:23 +00002540static void evh__HG_PTHREAD_COND_INIT_POST ( ThreadId tid,
2541 void* cond, void* cond_attr )
2542{
2543 CVInfo* cvi;
2544
2545 if (SHOW_EVENTS >= 1)
2546 VG_(printf)("evh__HG_PTHREAD_COND_INIT_POST"
2547 "(ctid=%d, cond=%p, cond_attr=%p)\n",
2548 (Int)tid, (void*)cond, (void*) cond_attr );
2549
2550 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2551 tl_assert (cvi);
2552 tl_assert (cvi->so);
2553}
2554
2555
sewardjf98e1c02008-10-25 16:22:41 +00002556static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
sewardjc02f6c42013-10-14 13:51:25 +00002557 void* cond, Bool cond_is_init )
sewardjf98e1c02008-10-25 16:22:41 +00002558{
2559 /* Deal with destroy events. The only purpose is to free storage
2560 associated with the CV, so as to avoid any possible resource
2561 leaks. */
2562 if (SHOW_EVENTS >= 1)
2563 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
sewardjc02f6c42013-10-14 13:51:25 +00002564 "(ctid=%d, cond=%p, cond_is_init=%d)\n",
2565 (Int)tid, (void*)cond, (Int)cond_is_init );
sewardjf98e1c02008-10-25 16:22:41 +00002566
sewardjc02f6c42013-10-14 13:51:25 +00002567 map_cond_to_CVInfo_delete( tid, cond, cond_is_init );
sewardjb4112022007-11-09 22:49:28 +00002568}
2569
2570
sewardj9f569b72008-11-13 13:33:09 +00002571/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002572/* -------------- events to do with rwlocks -------------- */
sewardj9f569b72008-11-13 13:33:09 +00002573/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002574
2575/* EXPOSITION only */
2576static
2577void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2578{
2579 if (SHOW_EVENTS >= 1)
2580 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2581 (Int)tid, (void*)rwl );
2582 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
sewardjf98e1c02008-10-25 16:22:41 +00002583 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002584 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2585}
2586
2587static
2588void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2589{
2590 Thread* thr;
2591 Lock* lk;
2592 if (SHOW_EVENTS >= 1)
2593 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2594 (Int)tid, (void*)rwl );
2595
2596 thr = map_threads_maybe_lookup( tid );
2597 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00002598 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00002599
2600 lk = map_locks_maybe_lookup( (Addr)rwl );
2601
2602 if (lk == NULL || lk->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00002603 HG_(record_error_Misc)(
2604 thr, "pthread_rwlock_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00002605 }
2606
2607 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00002608 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002609 tl_assert( lk->guestaddr == (Addr)rwl );
2610 if (lk->heldBy) {
2611 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00002612 HG_(record_error_Misc)(
2613 thr, "pthread_rwlock_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00002614 /* remove lock from locksets of all owning threads */
2615 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00002616 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00002617 lk->heldBy = NULL;
2618 lk->heldW = False;
sewardj1c7e8332007-11-29 13:04:03 +00002619 lk->acquired_at = NULL;
sewardjb4112022007-11-09 22:49:28 +00002620 }
2621 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002622 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00002623
2624 if (HG_(clo_track_lockorders))
2625 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002626 map_locks_delete( lk->guestaddr );
2627 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002628 }
2629
sewardjf98e1c02008-10-25 16:22:41 +00002630 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002631 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2632}
2633
2634static
sewardj789c3c52008-02-25 12:10:07 +00002635void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2636 void* rwl,
2637 Word isW, Word isTryLock )
sewardjb4112022007-11-09 22:49:28 +00002638{
2639 /* Just check the rwl is sane; nothing else to do. */
2640 // 'rwl' may be invalid - not checked by wrapper
2641 Thread* thr;
2642 Lock* lk;
2643 if (SHOW_EVENTS >= 1)
2644 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2645 (Int)tid, (Int)isW, (void*)rwl );
2646
2647 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
sewardj789c3c52008-02-25 12:10:07 +00002648 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
sewardjb4112022007-11-09 22:49:28 +00002649 thr = map_threads_maybe_lookup( tid );
2650 tl_assert(thr); /* cannot fail - Thread* must already exist */
2651
2652 lk = map_locks_maybe_lookup( (Addr)rwl );
2653 if ( lk
2654 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2655 /* Wrong kind of lock. Duh. */
sewardjf98e1c02008-10-25 16:22:41 +00002656 HG_(record_error_Misc)(
2657 thr, "pthread_rwlock_{rd,rw}lock with a "
2658 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002659 }
2660}
2661
2662static
2663void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2664{
2665 // only called if the real library call succeeded - so mutex is sane
2666 Thread* thr;
2667 if (SHOW_EVENTS >= 1)
2668 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2669 (Int)tid, (Int)isW, (void*)rwl );
2670
2671 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2672 thr = map_threads_maybe_lookup( tid );
2673 tl_assert(thr); /* cannot fail - Thread* must already exist */
2674
2675 (isW ? evhH__post_thread_w_acquires_lock
2676 : evhH__post_thread_r_acquires_lock)(
2677 thr,
2678 LK_rdwr, /* if not known, create new lock with this LockKind */
2679 (Addr)rwl
2680 );
2681}
2682
2683static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2684{
2685 // 'rwl' may be invalid - not checked by wrapper
2686 Thread* thr;
2687 if (SHOW_EVENTS >= 1)
2688 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2689 (Int)tid, (void*)rwl );
2690
2691 thr = map_threads_maybe_lookup( tid );
2692 tl_assert(thr); /* cannot fail - Thread* must already exist */
2693
2694 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2695}
2696
2697static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2698{
2699 // only called if the real library call succeeded - so mutex is sane
2700 Thread* thr;
2701 if (SHOW_EVENTS >= 1)
2702 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2703 (Int)tid, (void*)rwl );
2704 thr = map_threads_maybe_lookup( tid );
2705 tl_assert(thr); /* cannot fail - Thread* must already exist */
2706
2707 // anything we should do here?
2708}
2709
2710
sewardj9f569b72008-11-13 13:33:09 +00002711/* ---------------------------------------------------------- */
2712/* -------------- events to do with semaphores -------------- */
2713/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002714
sewardj11e352f2007-11-30 11:11:02 +00002715/* This is similar to but not identical to the handling for condition
sewardjb4112022007-11-09 22:49:28 +00002716 variables. */
2717
sewardjf98e1c02008-10-25 16:22:41 +00002718/* For each semaphore, we maintain a stack of SOs. When a 'post'
2719 operation is done on a semaphore (unlocking, essentially), a new SO
2720 is created for the posting thread, the posting thread does a strong
2721 send to it (which merely installs the posting thread's VC in the
2722 SO), and the SO is pushed on the semaphore's stack.
sewardjb4112022007-11-09 22:49:28 +00002723
2724 Later, when a (probably different) thread completes 'wait' on the
sewardjf98e1c02008-10-25 16:22:41 +00002725 semaphore, we pop a SO off the semaphore's stack (which should be
2726 nonempty), and do a strong recv from it. This mechanism creates
sewardjb4112022007-11-09 22:49:28 +00002727 dependencies between posters and waiters of the semaphore.
2728
sewardjf98e1c02008-10-25 16:22:41 +00002729 It may not be necessary to use a stack - perhaps a bag of SOs would
2730 do. But we do need to keep track of how many unused-up posts have
2731 happened for the semaphore.
sewardjb4112022007-11-09 22:49:28 +00002732
sewardjf98e1c02008-10-25 16:22:41 +00002733 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
sewardjb4112022007-11-09 22:49:28 +00002734 twice on S. T3 cannot complete its waits without both T1 and T2
2735 posting. The above mechanism will ensure that T3 acquires
2736 dependencies on both T1 and T2.
sewardj11e352f2007-11-30 11:11:02 +00002737
sewardjf98e1c02008-10-25 16:22:41 +00002738 When a semaphore is initialised with value N, we do as if we'd
2739 posted N times on the semaphore: basically create N SOs and do a
2740 strong send to all of then. This allows up to N waits on the
2741 semaphore to acquire a dependency on the initialisation point,
2742 which AFAICS is the correct behaviour.
sewardj11e352f2007-11-30 11:11:02 +00002743
2744 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2745 about. We should.
sewardjb4112022007-11-09 22:49:28 +00002746*/
2747
sewardjf98e1c02008-10-25 16:22:41 +00002748/* sem_t* -> XArray* SO* */
2749static WordFM* map_sem_to_SO_stack = NULL;
sewardjb4112022007-11-09 22:49:28 +00002750
sewardjf98e1c02008-10-25 16:22:41 +00002751static void map_sem_to_SO_stack_INIT ( void ) {
2752 if (map_sem_to_SO_stack == NULL) {
2753 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2754 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00002755 }
2756}
2757
sewardjf98e1c02008-10-25 16:22:41 +00002758static void push_SO_for_sem ( void* sem, SO* so ) {
2759 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002760 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002761 tl_assert(so);
2762 map_sem_to_SO_stack_INIT();
2763 if (VG_(lookupFM)( map_sem_to_SO_stack,
2764 &keyW, (UWord*)&xa, (UWord)sem )) {
2765 tl_assert(keyW == (UWord)sem);
sewardjb4112022007-11-09 22:49:28 +00002766 tl_assert(xa);
sewardjf98e1c02008-10-25 16:22:41 +00002767 VG_(addToXA)( xa, &so );
sewardjb4112022007-11-09 22:49:28 +00002768 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002769 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2770 VG_(addToXA)( xa, &so );
florian6bf37262012-10-21 03:23:36 +00002771 VG_(addToFM)( map_sem_to_SO_stack, (UWord)sem, (UWord)xa );
sewardjb4112022007-11-09 22:49:28 +00002772 }
2773}
2774
sewardjf98e1c02008-10-25 16:22:41 +00002775static SO* mb_pop_SO_for_sem ( void* sem ) {
2776 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002777 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002778 SO* so;
2779 map_sem_to_SO_stack_INIT();
2780 if (VG_(lookupFM)( map_sem_to_SO_stack,
2781 &keyW, (UWord*)&xa, (UWord)sem )) {
sewardjb4112022007-11-09 22:49:28 +00002782 /* xa is the stack for this semaphore. */
sewardjf98e1c02008-10-25 16:22:41 +00002783 Word sz;
2784 tl_assert(keyW == (UWord)sem);
2785 sz = VG_(sizeXA)( xa );
sewardjb4112022007-11-09 22:49:28 +00002786 tl_assert(sz >= 0);
2787 if (sz == 0)
2788 return NULL; /* odd, the stack is empty */
sewardjf98e1c02008-10-25 16:22:41 +00002789 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2790 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002791 VG_(dropTailXA)( xa, 1 );
sewardjf98e1c02008-10-25 16:22:41 +00002792 return so;
sewardjb4112022007-11-09 22:49:28 +00002793 } else {
2794 /* hmm, that's odd. No stack for this semaphore. */
2795 return NULL;
2796 }
2797}
2798
sewardj11e352f2007-11-30 11:11:02 +00002799static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002800{
sewardjf98e1c02008-10-25 16:22:41 +00002801 UWord keyW, valW;
2802 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002803
sewardjb4112022007-11-09 22:49:28 +00002804 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002805 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002806 (Int)tid, (void*)sem );
2807
sewardjf98e1c02008-10-25 16:22:41 +00002808 map_sem_to_SO_stack_INIT();
sewardjb4112022007-11-09 22:49:28 +00002809
sewardjf98e1c02008-10-25 16:22:41 +00002810 /* Empty out the semaphore's SO stack. This way of doing it is
2811 stupid, but at least it's easy. */
2812 while (1) {
2813 so = mb_pop_SO_for_sem( sem );
2814 if (!so) break;
2815 libhb_so_dealloc(so);
2816 }
2817
2818 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2819 XArray* xa = (XArray*)valW;
2820 tl_assert(keyW == (UWord)sem);
2821 tl_assert(xa);
2822 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2823 VG_(deleteXA)(xa);
2824 }
sewardjb4112022007-11-09 22:49:28 +00002825}
2826
sewardj11e352f2007-11-30 11:11:02 +00002827static
2828void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2829{
sewardjf98e1c02008-10-25 16:22:41 +00002830 SO* so;
2831 Thread* thr;
sewardj11e352f2007-11-30 11:11:02 +00002832
2833 if (SHOW_EVENTS >= 1)
2834 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2835 (Int)tid, (void*)sem, value );
2836
sewardjf98e1c02008-10-25 16:22:41 +00002837 thr = map_threads_maybe_lookup( tid );
2838 tl_assert(thr); /* cannot fail - Thread* must already exist */
sewardj11e352f2007-11-30 11:11:02 +00002839
sewardjf98e1c02008-10-25 16:22:41 +00002840 /* Empty out the semaphore's SO stack. This way of doing it is
2841 stupid, but at least it's easy. */
2842 while (1) {
2843 so = mb_pop_SO_for_sem( sem );
2844 if (!so) break;
2845 libhb_so_dealloc(so);
2846 }
sewardj11e352f2007-11-30 11:11:02 +00002847
sewardjf98e1c02008-10-25 16:22:41 +00002848 /* If we don't do this check, the following while loop runs us out
2849 of memory for stupid initial values of 'value'. */
2850 if (value > 10000) {
2851 HG_(record_error_Misc)(
2852 thr, "sem_init: initial value exceeds 10000; using 10000" );
2853 value = 10000;
2854 }
sewardj11e352f2007-11-30 11:11:02 +00002855
sewardjf98e1c02008-10-25 16:22:41 +00002856 /* Now create 'valid' new SOs for the thread, do a strong send to
2857 each of them, and push them all on the stack. */
2858 for (; value > 0; value--) {
2859 Thr* hbthr = thr->hbthr;
2860 tl_assert(hbthr);
sewardj11e352f2007-11-30 11:11:02 +00002861
sewardjf98e1c02008-10-25 16:22:41 +00002862 so = libhb_so_alloc();
2863 libhb_so_send( hbthr, so, True/*strong send*/ );
2864 push_SO_for_sem( sem, so );
sewardj11e352f2007-11-30 11:11:02 +00002865 }
2866}
2867
2868static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002869{
sewardjf98e1c02008-10-25 16:22:41 +00002870 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2871 it (iow, write our VC into it, then tick ours), and push the SO
2872 on on a stack of SOs associated with 'sem'. This is later used
2873 by other thread(s) which successfully exit from a sem_wait on
2874 the same sem; by doing a strong recv from SOs popped of the
2875 stack, they acquire dependencies on the posting thread
2876 segment(s). */
sewardjb4112022007-11-09 22:49:28 +00002877
sewardjf98e1c02008-10-25 16:22:41 +00002878 Thread* thr;
2879 SO* so;
2880 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002881
2882 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002883 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002884 (Int)tid, (void*)sem );
2885
2886 thr = map_threads_maybe_lookup( tid );
2887 tl_assert(thr); /* cannot fail - Thread* must already exist */
2888
2889 // error-if: sem is bogus
2890
sewardjf98e1c02008-10-25 16:22:41 +00002891 hbthr = thr->hbthr;
2892 tl_assert(hbthr);
sewardjb4112022007-11-09 22:49:28 +00002893
sewardjf98e1c02008-10-25 16:22:41 +00002894 so = libhb_so_alloc();
2895 libhb_so_send( hbthr, so, True/*strong send*/ );
2896 push_SO_for_sem( sem, so );
sewardjb4112022007-11-09 22:49:28 +00002897}
2898
sewardj11e352f2007-11-30 11:11:02 +00002899static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002900{
sewardjf98e1c02008-10-25 16:22:41 +00002901 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2902 the 'sem' from this semaphore's SO-stack, and do a strong recv
2903 from it. This creates a dependency back to one of the post-ers
2904 for the semaphore. */
sewardjb4112022007-11-09 22:49:28 +00002905
sewardjf98e1c02008-10-25 16:22:41 +00002906 Thread* thr;
2907 SO* so;
2908 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002909
2910 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002911 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002912 (Int)tid, (void*)sem );
2913
2914 thr = map_threads_maybe_lookup( tid );
2915 tl_assert(thr); /* cannot fail - Thread* must already exist */
2916
2917 // error-if: sem is bogus
2918
sewardjf98e1c02008-10-25 16:22:41 +00002919 so = mb_pop_SO_for_sem( sem );
sewardjb4112022007-11-09 22:49:28 +00002920
sewardjf98e1c02008-10-25 16:22:41 +00002921 if (so) {
2922 hbthr = thr->hbthr;
2923 tl_assert(hbthr);
2924
2925 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2926 libhb_so_dealloc(so);
2927 } else {
2928 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2929 If this happened it would surely be a bug in the threads
2930 library. */
2931 HG_(record_error_Misc)(
2932 thr, "Bug in libpthread: sem_wait succeeded on"
2933 " semaphore without prior sem_post");
sewardjb4112022007-11-09 22:49:28 +00002934 }
2935}
2936
2937
sewardj9f569b72008-11-13 13:33:09 +00002938/* -------------------------------------------------------- */
2939/* -------------- events to do with barriers -------------- */
2940/* -------------------------------------------------------- */
2941
2942typedef
2943 struct {
2944 Bool initted; /* has it yet been initted by guest? */
sewardj406bac82010-03-03 23:03:40 +00002945 Bool resizable; /* is resizing allowed? */
sewardj9f569b72008-11-13 13:33:09 +00002946 UWord size; /* declared size */
2947 XArray* waiting; /* XA of Thread*. # present is 0 .. .size */
2948 }
2949 Bar;
2950
2951static Bar* new_Bar ( void ) {
2952 Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
sewardj9f569b72008-11-13 13:33:09 +00002953 /* all fields are zero */
2954 tl_assert(bar->initted == False);
2955 return bar;
2956}
2957
2958static void delete_Bar ( Bar* bar ) {
2959 tl_assert(bar);
2960 if (bar->waiting)
2961 VG_(deleteXA)(bar->waiting);
2962 HG_(free)(bar);
2963}
2964
2965/* A mapping which stores auxiliary data for barriers. */
2966
2967/* pthread_barrier_t* -> Bar* */
2968static WordFM* map_barrier_to_Bar = NULL;
2969
2970static void map_barrier_to_Bar_INIT ( void ) {
2971 if (UNLIKELY(map_barrier_to_Bar == NULL)) {
2972 map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
2973 "hg.mbtBI.1", HG_(free), NULL );
sewardj9f569b72008-11-13 13:33:09 +00002974 }
2975}
2976
2977static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
2978 UWord key, val;
2979 map_barrier_to_Bar_INIT();
2980 if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
2981 tl_assert(key == (UWord)barrier);
2982 return (Bar*)val;
2983 } else {
2984 Bar* bar = new_Bar();
2985 VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
2986 return bar;
2987 }
2988}
2989
2990static void map_barrier_to_Bar_delete ( void* barrier ) {
2991 UWord keyW, valW;
2992 map_barrier_to_Bar_INIT();
2993 if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
2994 Bar* bar = (Bar*)valW;
2995 tl_assert(keyW == (UWord)barrier);
2996 delete_Bar(bar);
2997 }
2998}
2999
3000
3001static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
3002 void* barrier,
sewardj406bac82010-03-03 23:03:40 +00003003 UWord count,
3004 UWord resizable )
sewardj9f569b72008-11-13 13:33:09 +00003005{
3006 Thread* thr;
3007 Bar* bar;
3008
3009 if (SHOW_EVENTS >= 1)
3010 VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
sewardj406bac82010-03-03 23:03:40 +00003011 "(tid=%d, barrier=%p, count=%lu, resizable=%lu)\n",
3012 (Int)tid, (void*)barrier, count, resizable );
sewardj9f569b72008-11-13 13:33:09 +00003013
3014 thr = map_threads_maybe_lookup( tid );
3015 tl_assert(thr); /* cannot fail - Thread* must already exist */
3016
3017 if (count == 0) {
3018 HG_(record_error_Misc)(
3019 thr, "pthread_barrier_init: 'count' argument is zero"
3020 );
3021 }
3022
sewardj406bac82010-03-03 23:03:40 +00003023 if (resizable != 0 && resizable != 1) {
3024 HG_(record_error_Misc)(
3025 thr, "pthread_barrier_init: invalid 'resizable' argument"
3026 );
3027 }
3028
sewardj9f569b72008-11-13 13:33:09 +00003029 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3030 tl_assert(bar);
3031
3032 if (bar->initted) {
3033 HG_(record_error_Misc)(
3034 thr, "pthread_barrier_init: barrier is already initialised"
3035 );
3036 }
3037
3038 if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
3039 tl_assert(bar->initted);
3040 HG_(record_error_Misc)(
sewardj553655c2008-11-14 19:41:19 +00003041 thr, "pthread_barrier_init: threads are waiting at barrier"
sewardj9f569b72008-11-13 13:33:09 +00003042 );
3043 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
3044 }
3045 if (!bar->waiting) {
3046 bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
3047 sizeof(Thread*) );
3048 }
3049
sewardj9f569b72008-11-13 13:33:09 +00003050 tl_assert(VG_(sizeXA)(bar->waiting) == 0);
sewardj406bac82010-03-03 23:03:40 +00003051 bar->initted = True;
3052 bar->resizable = resizable == 1 ? True : False;
3053 bar->size = count;
sewardj9f569b72008-11-13 13:33:09 +00003054}
3055
3056
3057static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
3058 void* barrier )
3059{
sewardj553655c2008-11-14 19:41:19 +00003060 Thread* thr;
3061 Bar* bar;
3062
sewardj9f569b72008-11-13 13:33:09 +00003063 /* Deal with destroy events. The only purpose is to free storage
3064 associated with the barrier, so as to avoid any possible
3065 resource leaks. */
3066 if (SHOW_EVENTS >= 1)
3067 VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
3068 "(tid=%d, barrier=%p)\n",
3069 (Int)tid, (void*)barrier );
3070
sewardj553655c2008-11-14 19:41:19 +00003071 thr = map_threads_maybe_lookup( tid );
3072 tl_assert(thr); /* cannot fail - Thread* must already exist */
3073
3074 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3075 tl_assert(bar);
3076
3077 if (!bar->initted) {
3078 HG_(record_error_Misc)(
3079 thr, "pthread_barrier_destroy: barrier was never initialised"
3080 );
3081 }
3082
3083 if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
3084 HG_(record_error_Misc)(
3085 thr, "pthread_barrier_destroy: threads are waiting at barrier"
3086 );
3087 }
3088
sewardj9f569b72008-11-13 13:33:09 +00003089 /* Maybe we shouldn't do this; just let it persist, so that when it
3090 is reinitialised we don't need to do any dynamic memory
3091 allocation? The downside is a potentially unlimited space leak,
3092 if the client creates (in turn) a large number of barriers all
3093 at different locations. Note that if we do later move to the
3094 don't-delete-it scheme, we need to mark the barrier as
3095 uninitialised again since otherwise a later _init call will
sewardj553655c2008-11-14 19:41:19 +00003096 elicit a duplicate-init error. */
sewardj9f569b72008-11-13 13:33:09 +00003097 map_barrier_to_Bar_delete( barrier );
3098}
3099
3100
sewardj406bac82010-03-03 23:03:40 +00003101/* All the threads have arrived. Now do the Interesting Bit. Get a
3102 new synchronisation object and do a weak send to it from all the
3103 participating threads. This makes its vector clocks be the join of
3104 all the individual threads' vector clocks. Then do a strong
3105 receive from it back to all threads, so that their VCs are a copy
3106 of it (hence are all equal to the join of their original VCs.) */
3107static void do_barrier_cross_sync_and_empty ( Bar* bar )
3108{
3109 /* XXX check bar->waiting has no duplicates */
3110 UWord i;
3111 SO* so = libhb_so_alloc();
3112
3113 tl_assert(bar->waiting);
3114 tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
3115
3116 /* compute the join ... */
3117 for (i = 0; i < bar->size; i++) {
3118 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
3119 Thr* hbthr = t->hbthr;
3120 libhb_so_send( hbthr, so, False/*weak send*/ );
3121 }
3122 /* ... and distribute to all threads */
3123 for (i = 0; i < bar->size; i++) {
3124 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
3125 Thr* hbthr = t->hbthr;
3126 libhb_so_recv( hbthr, so, True/*strong recv*/ );
3127 }
3128
3129 /* finally, we must empty out the waiting vector */
3130 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
3131
3132 /* and we don't need this any more. Perhaps a stack-allocated
3133 SO would be better? */
3134 libhb_so_dealloc(so);
3135}
3136
3137
sewardj9f569b72008-11-13 13:33:09 +00003138static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
3139 void* barrier )
3140{
sewardj1c466b72008-11-19 11:52:14 +00003141 /* This function gets called after a client thread calls
3142 pthread_barrier_wait but before it arrives at the real
3143 pthread_barrier_wait.
3144
3145 Why is the following correct? It's a bit subtle.
3146
3147 If this is not the last thread arriving at the barrier, we simply
3148 note its presence and return. Because valgrind (at least as of
3149 Nov 08) is single threaded, we are guaranteed safe from any race
3150 conditions when in this function -- no other client threads are
3151 running.
3152
3153 If this is the last thread, then we are again the only running
3154 thread. All the other threads will have either arrived at the
3155 real pthread_barrier_wait or are on their way to it, but in any
3156 case are guaranteed not to be able to move past it, because this
3157 thread is currently in this function and so has not yet arrived
3158 at the real pthread_barrier_wait. That means that:
3159
3160 1. While we are in this function, none of the other threads
3161 waiting at the barrier can move past it.
3162
3163 2. When this function returns (and simulated execution resumes),
3164 this thread and all other waiting threads will be able to move
3165 past the real barrier.
3166
3167 Because of this, it is now safe to update the vector clocks of
3168 all threads, to represent the fact that they all arrived at the
3169 barrier and have all moved on. There is no danger of any
3170 complications to do with some threads leaving the barrier and
3171 racing back round to the front, whilst others are still leaving
3172 (which is the primary source of complication in correct handling/
3173 implementation of barriers). That can't happen because we update
3174 here our data structures so as to indicate that the threads have
3175 passed the barrier, even though, as per (2) above, they are
3176 guaranteed not to pass the barrier until we return.
3177
3178 This relies crucially on Valgrind being single threaded. If that
3179 changes, this will need to be reconsidered.
3180 */
sewardj9f569b72008-11-13 13:33:09 +00003181 Thread* thr;
3182 Bar* bar;
sewardj406bac82010-03-03 23:03:40 +00003183 UWord present;
sewardj9f569b72008-11-13 13:33:09 +00003184
3185 if (SHOW_EVENTS >= 1)
3186 VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
3187 "(tid=%d, barrier=%p)\n",
3188 (Int)tid, (void*)barrier );
3189
3190 thr = map_threads_maybe_lookup( tid );
3191 tl_assert(thr); /* cannot fail - Thread* must already exist */
3192
3193 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3194 tl_assert(bar);
3195
3196 if (!bar->initted) {
3197 HG_(record_error_Misc)(
3198 thr, "pthread_barrier_wait: barrier is uninitialised"
3199 );
3200 return; /* client is broken .. avoid assertions below */
3201 }
3202
3203 /* guaranteed by _INIT_PRE above */
3204 tl_assert(bar->size > 0);
3205 tl_assert(bar->waiting);
3206
3207 VG_(addToXA)( bar->waiting, &thr );
3208
3209 /* guaranteed by this function */
3210 present = VG_(sizeXA)(bar->waiting);
3211 tl_assert(present > 0 && present <= bar->size);
3212
3213 if (present < bar->size)
3214 return;
3215
sewardj406bac82010-03-03 23:03:40 +00003216 do_barrier_cross_sync_and_empty(bar);
3217}
sewardj9f569b72008-11-13 13:33:09 +00003218
sewardj9f569b72008-11-13 13:33:09 +00003219
sewardj406bac82010-03-03 23:03:40 +00003220static void evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( ThreadId tid,
3221 void* barrier,
3222 UWord newcount )
3223{
3224 Thread* thr;
3225 Bar* bar;
3226 UWord present;
3227
3228 if (SHOW_EVENTS >= 1)
3229 VG_(printf)("evh__HG_PTHREAD_BARRIER_RESIZE_PRE"
3230 "(tid=%d, barrier=%p, newcount=%lu)\n",
3231 (Int)tid, (void*)barrier, newcount );
3232
3233 thr = map_threads_maybe_lookup( tid );
3234 tl_assert(thr); /* cannot fail - Thread* must already exist */
3235
3236 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3237 tl_assert(bar);
3238
3239 if (!bar->initted) {
3240 HG_(record_error_Misc)(
3241 thr, "pthread_barrier_resize: barrier is uninitialised"
3242 );
3243 return; /* client is broken .. avoid assertions below */
3244 }
3245
3246 if (!bar->resizable) {
3247 HG_(record_error_Misc)(
3248 thr, "pthread_barrier_resize: barrier is may not be resized"
3249 );
3250 return; /* client is broken .. avoid assertions below */
3251 }
3252
3253 if (newcount == 0) {
3254 HG_(record_error_Misc)(
3255 thr, "pthread_barrier_resize: 'newcount' argument is zero"
3256 );
3257 return; /* client is broken .. avoid assertions below */
3258 }
3259
3260 /* guaranteed by _INIT_PRE above */
3261 tl_assert(bar->size > 0);
sewardj9f569b72008-11-13 13:33:09 +00003262 tl_assert(bar->waiting);
sewardj406bac82010-03-03 23:03:40 +00003263 /* Guaranteed by this fn */
3264 tl_assert(newcount > 0);
sewardj9f569b72008-11-13 13:33:09 +00003265
sewardj406bac82010-03-03 23:03:40 +00003266 if (newcount >= bar->size) {
3267 /* Increasing the capacity. There's no possibility of threads
3268 moving on from the barrier in this situation, so just note
3269 the fact and do nothing more. */
3270 bar->size = newcount;
3271 } else {
3272 /* Decreasing the capacity. If we decrease it to be equal or
3273 below the number of waiting threads, they will now move past
3274 the barrier, so need to mess with dep edges in the same way
3275 as if the barrier had filled up normally. */
3276 present = VG_(sizeXA)(bar->waiting);
3277 tl_assert(present >= 0 && present <= bar->size);
3278 if (newcount <= present) {
3279 bar->size = present; /* keep the cross_sync call happy */
3280 do_barrier_cross_sync_and_empty(bar);
3281 }
3282 bar->size = newcount;
sewardj9f569b72008-11-13 13:33:09 +00003283 }
sewardj9f569b72008-11-13 13:33:09 +00003284}
3285
3286
sewardjed2e72e2009-08-14 11:08:24 +00003287/* ----------------------------------------------------- */
3288/* ----- events to do with user-specified HB edges ----- */
3289/* ----------------------------------------------------- */
3290
3291/* A mapping from arbitrary UWord tag to the SO associated with it.
3292 The UWord tags are meaningless to us, interpreted only by the
3293 user. */
3294
3295
3296
3297/* UWord -> SO* */
3298static WordFM* map_usertag_to_SO = NULL;
3299
3300static void map_usertag_to_SO_INIT ( void ) {
3301 if (UNLIKELY(map_usertag_to_SO == NULL)) {
3302 map_usertag_to_SO = VG_(newFM)( HG_(zalloc),
3303 "hg.mutS.1", HG_(free), NULL );
sewardjed2e72e2009-08-14 11:08:24 +00003304 }
3305}
3306
3307static SO* map_usertag_to_SO_lookup_or_alloc ( UWord usertag ) {
3308 UWord key, val;
3309 map_usertag_to_SO_INIT();
3310 if (VG_(lookupFM)( map_usertag_to_SO, &key, &val, usertag )) {
3311 tl_assert(key == (UWord)usertag);
3312 return (SO*)val;
3313 } else {
3314 SO* so = libhb_so_alloc();
3315 VG_(addToFM)( map_usertag_to_SO, usertag, (UWord)so );
3316 return so;
3317 }
3318}
3319
sewardj6015d0e2011-03-11 19:10:48 +00003320static void map_usertag_to_SO_delete ( UWord usertag ) {
3321 UWord keyW, valW;
3322 map_usertag_to_SO_INIT();
3323 if (VG_(delFromFM)( map_usertag_to_SO, &keyW, &valW, usertag )) {
3324 SO* so = (SO*)valW;
3325 tl_assert(keyW == usertag);
3326 tl_assert(so);
3327 libhb_so_dealloc(so);
3328 }
3329}
sewardjed2e72e2009-08-14 11:08:24 +00003330
3331
3332static
3333void evh__HG_USERSO_SEND_PRE ( ThreadId tid, UWord usertag )
3334{
3335 /* TID is just about to notionally sent a message on a notional
3336 abstract synchronisation object whose identity is given by
3337 USERTAG. Bind USERTAG to a real SO if it is not already so
sewardj8c50d3c2011-03-11 18:38:12 +00003338 bound, and do a 'weak send' on the SO. This joins the vector
3339 clocks from this thread into any vector clocks already present
3340 in the SO. The resulting SO vector clocks are later used by
sewardjed2e72e2009-08-14 11:08:24 +00003341 other thread(s) which successfully 'receive' from the SO,
sewardj8c50d3c2011-03-11 18:38:12 +00003342 thereby acquiring a dependency on all the events that have
3343 previously signalled on this SO. */
sewardjed2e72e2009-08-14 11:08:24 +00003344 Thread* thr;
3345 SO* so;
3346
3347 if (SHOW_EVENTS >= 1)
3348 VG_(printf)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3349 (Int)tid, usertag );
3350
3351 thr = map_threads_maybe_lookup( tid );
3352 tl_assert(thr); /* cannot fail - Thread* must already exist */
3353
3354 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3355 tl_assert(so);
3356
sewardj8c50d3c2011-03-11 18:38:12 +00003357 libhb_so_send( thr->hbthr, so, False/*!strong_send*/ );
sewardjed2e72e2009-08-14 11:08:24 +00003358}
3359
3360static
3361void evh__HG_USERSO_RECV_POST ( ThreadId tid, UWord usertag )
3362{
3363 /* TID has just notionally received a message from a notional
3364 abstract synchronisation object whose identity is given by
3365 USERTAG. Bind USERTAG to a real SO if it is not already so
3366 bound. If the SO has at some point in the past been 'sent' on,
3367 to a 'strong receive' on it, thereby acquiring a dependency on
3368 the sender. */
3369 Thread* thr;
3370 SO* so;
3371
3372 if (SHOW_EVENTS >= 1)
3373 VG_(printf)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3374 (Int)tid, usertag );
3375
3376 thr = map_threads_maybe_lookup( tid );
3377 tl_assert(thr); /* cannot fail - Thread* must already exist */
3378
3379 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3380 tl_assert(so);
3381
3382 /* Acquire a dependency on it. If the SO has never so far been
3383 sent on, then libhb_so_recv will do nothing. So we're safe
3384 regardless of SO's history. */
3385 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3386}
3387
sewardj6015d0e2011-03-11 19:10:48 +00003388static
3389void evh__HG_USERSO_FORGET_ALL ( ThreadId tid, UWord usertag )
3390{
3391 /* TID declares that any happens-before edges notionally stored in
3392 USERTAG can be deleted. If (as would normally be the case) a
florianad4e9792015-07-05 21:53:33 +00003393 SO is associated with USERTAG, then the association is removed
sewardj6015d0e2011-03-11 19:10:48 +00003394 and all resources associated with SO are freed. Importantly,
3395 that frees up any VTSs stored in SO. */
3396 if (SHOW_EVENTS >= 1)
3397 VG_(printf)("evh__HG_USERSO_FORGET_ALL(ctid=%d, usertag=%#lx)\n",
3398 (Int)tid, usertag );
3399
3400 map_usertag_to_SO_delete( usertag );
3401}
3402
sewardjed2e72e2009-08-14 11:08:24 +00003403
sewardj8eb8bab2015-07-21 14:44:28 +00003404#if defined(VGO_solaris)
3405/* ----------------------------------------------------- */
3406/* --- events to do with bind guard/clear intercepts --- */
3407/* ----------------------------------------------------- */
3408
3409static
3410void evh__HG_RTLD_BIND_GUARD(ThreadId tid, Int flags)
3411{
3412 if (SHOW_EVENTS >= 1)
3413 VG_(printf)("evh__HG_RTLD_BIND_GUARD"
3414 "(tid=%d, flags=%d)\n",
3415 (Int)tid, flags);
3416
3417 Thread *thr = map_threads_maybe_lookup(tid);
3418 tl_assert(thr != NULL);
3419
3420 Int bindflag = (flags & VKI_THR_FLG_RTLD);
3421 if ((bindflag & thr->bind_guard_flag) == 0) {
3422 thr->bind_guard_flag |= bindflag;
3423 HG_(thread_enter_synchr)(thr);
3424 /* Misuse pthread_create_nesting_level for ignoring mutex activity. */
3425 HG_(thread_enter_pthread_create)(thr);
3426 }
3427}
3428
3429static
3430void evh__HG_RTLD_BIND_CLEAR(ThreadId tid, Int flags)
3431{
3432 if (SHOW_EVENTS >= 1)
3433 VG_(printf)("evh__HG_RTLD_BIND_CLEAR"
3434 "(tid=%d, flags=%d)\n",
3435 (Int)tid, flags);
3436
3437 Thread *thr = map_threads_maybe_lookup(tid);
3438 tl_assert(thr != NULL);
3439
3440 Int bindflag = (flags & VKI_THR_FLG_RTLD);
3441 if ((thr->bind_guard_flag & bindflag) != 0) {
3442 thr->bind_guard_flag &= ~bindflag;
3443 HG_(thread_leave_synchr)(thr);
3444 HG_(thread_leave_pthread_create)(thr);
3445 }
3446}
3447#endif /* VGO_solaris */
3448
3449
sewardjb4112022007-11-09 22:49:28 +00003450/*--------------------------------------------------------------*/
3451/*--- Lock acquisition order monitoring ---*/
3452/*--------------------------------------------------------------*/
3453
3454/* FIXME: here are some optimisations still to do in
3455 laog__pre_thread_acquires_lock.
3456
3457 The graph is structured so that if L1 --*--> L2 then L1 must be
3458 acquired before L2.
3459
3460 The common case is that some thread T holds (eg) L1 L2 and L3 and
3461 is repeatedly acquiring and releasing Ln, and there is no ordering
Elliott Hughesed398002017-06-21 14:41:24 -07003462 error in what it is doing. Hence it repeatedly:
sewardjb4112022007-11-09 22:49:28 +00003463
3464 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3465 produces the answer No (because there is no error).
3466
3467 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3468 (because they already got added the first time T acquired Ln).
3469
3470 Hence cache these two events:
3471
3472 (1) Cache result of the query from last time. Invalidate the cache
3473 any time any edges are added to or deleted from laog.
3474
3475 (2) Cache these add-edge requests and ignore them if said edges
3476 have already been added to laog. Invalidate the cache any time
3477 any edges are deleted from laog.
3478*/
3479
3480typedef
3481 struct {
3482 WordSetID inns; /* in univ_laog */
3483 WordSetID outs; /* in univ_laog */
3484 }
3485 LAOGLinks;
3486
3487/* lock order acquisition graph */
3488static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
3489
3490/* EXPOSITION ONLY: for each edge in 'laog', record the two places
3491 where that edge was created, so that we can show the user later if
3492 we need to. */
3493typedef
3494 struct {
3495 Addr src_ga; /* Lock guest addresses for */
3496 Addr dst_ga; /* src/dst of the edge */
3497 ExeContext* src_ec; /* And corresponding places where that */
3498 ExeContext* dst_ec; /* ordering was established */
3499 }
3500 LAOGLinkExposition;
3501
sewardj250ec2e2008-02-15 22:02:30 +00003502static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
sewardjb4112022007-11-09 22:49:28 +00003503 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3504 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
3505 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
3506 if (llx1->src_ga < llx2->src_ga) return -1;
3507 if (llx1->src_ga > llx2->src_ga) return 1;
3508 if (llx1->dst_ga < llx2->dst_ga) return -1;
3509 if (llx1->dst_ga > llx2->dst_ga) return 1;
3510 return 0;
3511}
3512
3513static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
3514/* end EXPOSITION ONLY */
3515
3516
sewardja65db102009-01-26 10:45:16 +00003517__attribute__((noinline))
3518static void laog__init ( void )
3519{
3520 tl_assert(!laog);
3521 tl_assert(!laog_exposition);
sewardjc1fb9d22011-02-28 09:03:44 +00003522 tl_assert(HG_(clo_track_lockorders));
sewardja65db102009-01-26 10:45:16 +00003523
3524 laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
3525 HG_(free), NULL/*unboxedcmp*/ );
3526
3527 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
3528 cmp_LAOGLinkExposition );
sewardja65db102009-01-26 10:45:16 +00003529}
3530
florian6bf37262012-10-21 03:23:36 +00003531static void laog__show ( const HChar* who ) {
3532 UWord i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003533 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003534 Lock* me;
3535 LAOGLinks* links;
3536 VG_(printf)("laog (requested by %s) {\n", who);
sewardj896f6f92008-08-19 08:38:52 +00003537 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003538 me = NULL;
3539 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003540 while (VG_(nextIterFM)( laog, (UWord*)&me,
3541 (UWord*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003542 tl_assert(me);
3543 tl_assert(links);
3544 VG_(printf)(" node %p:\n", me);
3545 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3546 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003547 VG_(printf)(" inn %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003548 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3549 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003550 VG_(printf)(" out %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003551 me = NULL;
3552 links = NULL;
3553 }
sewardj896f6f92008-08-19 08:38:52 +00003554 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003555 VG_(printf)("}\n");
3556}
3557
sewardj866c80c2011-10-22 19:29:51 +00003558static void univ_laog_do_GC ( void ) {
3559 Word i;
3560 LAOGLinks* links;
3561 Word seen = 0;
3562 Int prev_next_gc_univ_laog = next_gc_univ_laog;
3563 const UWord univ_laog_cardinality = HG_(cardinalityWSU)( univ_laog);
3564
3565 Bool *univ_laog_seen = HG_(zalloc) ( "hg.gc_univ_laog.1",
3566 (Int) univ_laog_cardinality
3567 * sizeof(Bool) );
3568 // univ_laog_seen[*] set to 0 (False) by zalloc.
3569
sewardj866c80c2011-10-22 19:29:51 +00003570 VG_(initIterFM)( laog );
3571 links = NULL;
3572 while (VG_(nextIterFM)( laog, NULL, (UWord*)&links )) {
3573 tl_assert(links);
3574 tl_assert(links->inns >= 0 && links->inns < univ_laog_cardinality);
3575 univ_laog_seen[links->inns] = True;
3576 tl_assert(links->outs >= 0 && links->outs < univ_laog_cardinality);
3577 univ_laog_seen[links->outs] = True;
3578 links = NULL;
3579 }
3580 VG_(doneIterFM)( laog );
3581
3582 for (i = 0; i < (Int)univ_laog_cardinality; i++) {
3583 if (univ_laog_seen[i])
3584 seen++;
3585 else
3586 HG_(dieWS) ( univ_laog, (WordSet)i );
3587 }
3588
3589 HG_(free) (univ_laog_seen);
3590
3591 // We need to decide the value of the next_gc.
3592 // 3 solutions were looked at:
3593 // Sol 1: garbage collect at seen * 2
3594 // This solution was a lot slower, probably because we both do a lot of
3595 // garbage collection and do not keep long enough laog WV that will become
3596 // useful again very soon.
3597 // Sol 2: garbage collect at a percentage increase of the current cardinality
3598 // (with a min increase of 1)
3599 // Trials on a small test program with 1%, 5% and 10% increase was done.
3600 // 1% is slightly faster than 5%, which is slightly slower than 10%.
3601 // However, on a big application, this caused the memory to be exhausted,
3602 // as even a 1% increase of size at each gc becomes a lot, when many gc
3603 // are done.
3604 // Sol 3: always garbage collect at current cardinality + 1.
3605 // This solution was the fastest of the 3 solutions, and caused no memory
3606 // exhaustion in the big application.
3607 //
3608 // With regards to cost introduced by gc: on the t2t perf test (doing only
3609 // lock/unlock operations), t2t 50 10 2 was about 25% faster than the
3610 // version with garbage collection. With t2t 50 20 2, my machine started
3611 // to page out, and so the garbage collected version was much faster.
3612 // On smaller lock sets (e.g. t2t 20 5 2, giving about 100 locks), the
3613 // difference performance is insignificant (~ 0.1 s).
3614 // Of course, it might be that real life programs are not well represented
3615 // by t2t.
3616
3617 // If ever we want to have a more sophisticated control
3618 // (e.g. clo options to control the percentage increase or fixed increased),
3619 // we should do it here, eg.
3620 // next_gc_univ_laog = prev_next_gc_univ_laog + VG_(clo_laog_gc_fixed);
3621 // Currently, we just hard-code the solution 3 above.
3622 next_gc_univ_laog = prev_next_gc_univ_laog + 1;
3623
3624 if (VG_(clo_stats))
3625 VG_(message)
3626 (Vg_DebugMsg,
philippebf37ae82015-05-03 10:56:16 +00003627 "univ_laog_do_GC cardinality entered %d exit %d next gc at %d\n",
3628 (Int)univ_laog_cardinality, (Int)seen, next_gc_univ_laog);
sewardj866c80c2011-10-22 19:29:51 +00003629}
3630
3631
sewardjb4112022007-11-09 22:49:28 +00003632__attribute__((noinline))
3633static void laog__add_edge ( Lock* src, Lock* dst ) {
florian6bf37262012-10-21 03:23:36 +00003634 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003635 LAOGLinks* links;
3636 Bool presentF, presentR;
3637 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
3638
3639 /* Take the opportunity to sanity check the graph. Record in
3640 presentF if there is already a src->dst mapping in this node's
3641 forwards links, and presentR if there is already a src->dst
3642 mapping in this node's backwards links. They should agree!
3643 Also, we need to know whether the edge was already present so as
3644 to decide whether or not to update the link details mapping. We
3645 can compute presentF and presentR essentially for free, so may
3646 as well do this always. */
3647 presentF = presentR = False;
3648
3649 /* Update the out edges for src */
3650 keyW = 0;
3651 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003652 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)src )) {
sewardjb4112022007-11-09 22:49:28 +00003653 WordSetID outs_new;
3654 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003655 tl_assert(keyW == (UWord)src);
3656 outs_new = HG_(addToWS)( univ_laog, links->outs, (UWord)dst );
sewardjb4112022007-11-09 22:49:28 +00003657 presentF = outs_new == links->outs;
3658 links->outs = outs_new;
3659 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003660 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003661 links->inns = HG_(emptyWS)( univ_laog );
florian6bf37262012-10-21 03:23:36 +00003662 links->outs = HG_(singletonWS)( univ_laog, (UWord)dst );
3663 VG_(addToFM)( laog, (UWord)src, (UWord)links );
sewardjb4112022007-11-09 22:49:28 +00003664 }
3665 /* Update the in edges for dst */
3666 keyW = 0;
3667 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003668 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003669 WordSetID inns_new;
3670 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003671 tl_assert(keyW == (UWord)dst);
3672 inns_new = HG_(addToWS)( univ_laog, links->inns, (UWord)src );
sewardjb4112022007-11-09 22:49:28 +00003673 presentR = inns_new == links->inns;
3674 links->inns = inns_new;
3675 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003676 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
florian6bf37262012-10-21 03:23:36 +00003677 links->inns = HG_(singletonWS)( univ_laog, (UWord)src );
sewardjb4112022007-11-09 22:49:28 +00003678 links->outs = HG_(emptyWS)( univ_laog );
florian6bf37262012-10-21 03:23:36 +00003679 VG_(addToFM)( laog, (UWord)dst, (UWord)links );
sewardjb4112022007-11-09 22:49:28 +00003680 }
3681
3682 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
3683
3684 if (!presentF && src->acquired_at && dst->acquired_at) {
3685 LAOGLinkExposition expo;
3686 /* If this edge is entering the graph, and we have acquired_at
3687 information for both src and dst, record those acquisition
3688 points. Hence, if there is later a violation of this
3689 ordering, we can show the user the two places in which the
3690 required src-dst ordering was previously established. */
barta0b6b2c2008-07-07 06:49:24 +00003691 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
sewardjb4112022007-11-09 22:49:28 +00003692 src->guestaddr, dst->guestaddr);
3693 expo.src_ga = src->guestaddr;
3694 expo.dst_ga = dst->guestaddr;
3695 expo.src_ec = NULL;
3696 expo.dst_ec = NULL;
3697 tl_assert(laog_exposition);
florian6bf37262012-10-21 03:23:36 +00003698 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (UWord)&expo )) {
sewardjb4112022007-11-09 22:49:28 +00003699 /* we already have it; do nothing */
3700 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003701 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
3702 sizeof(LAOGLinkExposition));
sewardjb4112022007-11-09 22:49:28 +00003703 expo2->src_ga = src->guestaddr;
3704 expo2->dst_ga = dst->guestaddr;
3705 expo2->src_ec = src->acquired_at;
3706 expo2->dst_ec = dst->acquired_at;
florian6bf37262012-10-21 03:23:36 +00003707 VG_(addToFM)( laog_exposition, (UWord)expo2, (UWord)NULL );
sewardjb4112022007-11-09 22:49:28 +00003708 }
3709 }
sewardj866c80c2011-10-22 19:29:51 +00003710
3711 if (HG_(cardinalityWSU) (univ_laog) >= next_gc_univ_laog)
3712 univ_laog_do_GC();
sewardjb4112022007-11-09 22:49:28 +00003713}
3714
3715__attribute__((noinline))
3716static void laog__del_edge ( Lock* src, Lock* dst ) {
florian6bf37262012-10-21 03:23:36 +00003717 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003718 LAOGLinks* links;
sewardj866c80c2011-10-22 19:29:51 +00003719 if (0) VG_(printf)("laog__del_edge enter %p %p\n", src, dst);
sewardjb4112022007-11-09 22:49:28 +00003720 /* Update the out edges for src */
3721 keyW = 0;
3722 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003723 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)src )) {
sewardjb4112022007-11-09 22:49:28 +00003724 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003725 tl_assert(keyW == (UWord)src);
3726 links->outs = HG_(delFromWS)( univ_laog, links->outs, (UWord)dst );
sewardjb4112022007-11-09 22:49:28 +00003727 }
3728 /* Update the in edges for dst */
3729 keyW = 0;
3730 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003731 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003732 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003733 tl_assert(keyW == (UWord)dst);
3734 links->inns = HG_(delFromWS)( univ_laog, links->inns, (UWord)src );
sewardjb4112022007-11-09 22:49:28 +00003735 }
sewardj866c80c2011-10-22 19:29:51 +00003736
3737 /* Remove the exposition of src,dst (if present) */
3738 {
3739 LAOGLinkExposition *fm_expo;
3740
3741 LAOGLinkExposition expo;
3742 expo.src_ga = src->guestaddr;
3743 expo.dst_ga = dst->guestaddr;
3744 expo.src_ec = NULL;
3745 expo.dst_ec = NULL;
3746
3747 if (VG_(delFromFM) (laog_exposition,
3748 (UWord*)&fm_expo, NULL, (UWord)&expo )) {
3749 HG_(free) (fm_expo);
3750 }
3751 }
3752
3753 /* deleting edges can increase nr of of WS so check for gc. */
3754 if (HG_(cardinalityWSU) (univ_laog) >= next_gc_univ_laog)
3755 univ_laog_do_GC();
3756 if (0) VG_(printf)("laog__del_edge exit\n");
sewardjb4112022007-11-09 22:49:28 +00003757}
3758
3759__attribute__((noinline))
3760static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
florian6bf37262012-10-21 03:23:36 +00003761 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003762 LAOGLinks* links;
3763 keyW = 0;
3764 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003765 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003766 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003767 tl_assert(keyW == (UWord)lk);
sewardjb4112022007-11-09 22:49:28 +00003768 return links->outs;
3769 } else {
3770 return HG_(emptyWS)( univ_laog );
3771 }
3772}
3773
3774__attribute__((noinline))
3775static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
florian6bf37262012-10-21 03:23:36 +00003776 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003777 LAOGLinks* links;
3778 keyW = 0;
3779 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003780 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003781 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003782 tl_assert(keyW == (UWord)lk);
sewardjb4112022007-11-09 22:49:28 +00003783 return links->inns;
3784 } else {
3785 return HG_(emptyWS)( univ_laog );
3786 }
3787}
3788
3789__attribute__((noinline))
florian6bf37262012-10-21 03:23:36 +00003790static void laog__sanity_check ( const HChar* who ) {
3791 UWord i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003792 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003793 Lock* me;
3794 LAOGLinks* links;
sewardj896f6f92008-08-19 08:38:52 +00003795 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003796 me = NULL;
3797 links = NULL;
3798 if (0) VG_(printf)("laog sanity check\n");
florian6bf37262012-10-21 03:23:36 +00003799 while (VG_(nextIterFM)( laog, (UWord*)&me,
3800 (UWord*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003801 tl_assert(me);
3802 tl_assert(links);
3803 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3804 for (i = 0; i < ws_size; i++) {
3805 if ( ! HG_(elemWS)( univ_laog,
3806 laog__succs( (Lock*)ws_words[i] ),
florian6bf37262012-10-21 03:23:36 +00003807 (UWord)me ))
sewardjb4112022007-11-09 22:49:28 +00003808 goto bad;
3809 }
3810 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3811 for (i = 0; i < ws_size; i++) {
3812 if ( ! HG_(elemWS)( univ_laog,
3813 laog__preds( (Lock*)ws_words[i] ),
florian6bf37262012-10-21 03:23:36 +00003814 (UWord)me ))
sewardjb4112022007-11-09 22:49:28 +00003815 goto bad;
3816 }
3817 me = NULL;
3818 links = NULL;
3819 }
sewardj896f6f92008-08-19 08:38:52 +00003820 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003821 return;
3822
3823 bad:
3824 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3825 laog__show(who);
3826 tl_assert(0);
3827}
3828
3829/* If there is a path in laog from 'src' to any of the elements in
3830 'dst', return an arbitrarily chosen element of 'dst' reachable from
3831 'src'. If no path exist from 'src' to any element in 'dst', return
3832 NULL. */
3833__attribute__((noinline))
3834static
3835Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3836{
3837 Lock* ret;
florian6bf37262012-10-21 03:23:36 +00003838 Word ssz;
sewardjb4112022007-11-09 22:49:28 +00003839 XArray* stack; /* of Lock* */
3840 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
3841 Lock* here;
3842 WordSetID succs;
florian6bf37262012-10-21 03:23:36 +00003843 UWord succs_size, i;
sewardj250ec2e2008-02-15 22:02:30 +00003844 UWord* succs_words;
sewardjb4112022007-11-09 22:49:28 +00003845 //laog__sanity_check();
3846
3847 /* If the destination set is empty, we can never get there from
3848 'src' :-), so don't bother to try */
3849 if (HG_(isEmptyWS)( univ_lsets, dsts ))
3850 return NULL;
3851
3852 ret = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00003853 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3854 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00003855
3856 (void) VG_(addToXA)( stack, &src );
3857
3858 while (True) {
3859
3860 ssz = VG_(sizeXA)( stack );
3861
3862 if (ssz == 0) { ret = NULL; break; }
3863
3864 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3865 VG_(dropTailXA)( stack, 1 );
3866
florian6bf37262012-10-21 03:23:36 +00003867 if (HG_(elemWS)( univ_lsets, dsts, (UWord)here )) { ret = here; break; }
sewardjb4112022007-11-09 22:49:28 +00003868
florian6bf37262012-10-21 03:23:36 +00003869 if (VG_(lookupFM)( visited, NULL, NULL, (UWord)here ))
sewardjb4112022007-11-09 22:49:28 +00003870 continue;
3871
florian6bf37262012-10-21 03:23:36 +00003872 VG_(addToFM)( visited, (UWord)here, 0 );
sewardjb4112022007-11-09 22:49:28 +00003873
3874 succs = laog__succs( here );
3875 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3876 for (i = 0; i < succs_size; i++)
3877 (void) VG_(addToXA)( stack, &succs_words[i] );
3878 }
3879
sewardj896f6f92008-08-19 08:38:52 +00003880 VG_(deleteFM)( visited, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003881 VG_(deleteXA)( stack );
3882 return ret;
3883}
3884
3885
3886/* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3887 between 'lk' and the locks already held by 'thr' and issue a
3888 complaint if so. Also, update the ordering graph appropriately.
3889*/
3890__attribute__((noinline))
3891static void laog__pre_thread_acquires_lock (
3892 Thread* thr, /* NB: BEFORE lock is added */
3893 Lock* lk
3894 )
3895{
sewardj250ec2e2008-02-15 22:02:30 +00003896 UWord* ls_words;
florian6bf37262012-10-21 03:23:36 +00003897 UWord ls_size, i;
sewardjb4112022007-11-09 22:49:28 +00003898 Lock* other;
3899
3900 /* It may be that 'thr' already holds 'lk' and is recursively
3901 relocking in. In this case we just ignore the call. */
3902 /* NB: univ_lsets really is correct here */
florian6bf37262012-10-21 03:23:36 +00003903 if (HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lk ))
sewardjb4112022007-11-09 22:49:28 +00003904 return;
3905
sewardjb4112022007-11-09 22:49:28 +00003906 /* First, the check. Complain if there is any path in laog from lk
3907 to any of the locks already held by thr, since if any such path
3908 existed, it would mean that previously lk was acquired before
3909 (rather than after, as we are doing here) at least one of those
3910 locks.
3911 */
3912 other = laog__do_dfs_from_to(lk, thr->locksetA);
3913 if (other) {
3914 LAOGLinkExposition key, *found;
3915 /* So we managed to find a path lk --*--> other in the graph,
3916 which implies that 'lk' should have been acquired before
3917 'other' but is in fact being acquired afterwards. We present
3918 the lk/other arguments to record_error_LockOrder in the order
3919 in which they should have been acquired. */
3920 /* Go look in the laog_exposition mapping, to find the allocation
3921 points for this edge, so we can show the user. */
3922 key.src_ga = lk->guestaddr;
3923 key.dst_ga = other->guestaddr;
3924 key.src_ec = NULL;
3925 key.dst_ec = NULL;
3926 found = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003927 if (VG_(lookupFM)( laog_exposition,
florian6bf37262012-10-21 03:23:36 +00003928 (UWord*)&found, NULL, (UWord)&key )) {
sewardjb4112022007-11-09 22:49:28 +00003929 tl_assert(found != &key);
3930 tl_assert(found->src_ga == key.src_ga);
3931 tl_assert(found->dst_ga == key.dst_ga);
3932 tl_assert(found->src_ec);
3933 tl_assert(found->dst_ec);
sewardjf98e1c02008-10-25 16:22:41 +00003934 HG_(record_error_LockOrder)(
philippe46daf0d2014-07-29 20:08:15 +00003935 thr, lk, other,
sewardjffce8152011-06-24 10:09:41 +00003936 found->src_ec, found->dst_ec, other->acquired_at );
sewardjb4112022007-11-09 22:49:28 +00003937 } else {
3938 /* Hmm. This can't happen (can it?) */
philippeebe25802013-01-30 23:21:34 +00003939 /* Yes, it can happen: see tests/tc14_laog_dinphils.
3940 Imagine we have 3 philosophers A B C, and the forks
3941 between them:
3942
3943 C
3944
3945 fCA fBC
3946
3947 A fAB B
3948
3949 Let's have the following actions:
3950 A takes fCA,fAB
3951 A releases fCA,fAB
3952 B takes fAB,fBC
3953 B releases fAB,fBC
3954 C takes fBC,fCA
3955 C releases fBC,fCA
3956
3957 Helgrind will report a lock order error when C takes fCA.
3958 Effectively, we have a deadlock if the following
3959 sequence is done:
3960 A takes fCA
3961 B takes fAB
3962 C takes fBC
3963
3964 The error reported is:
3965 Observed (incorrect) order fBC followed by fCA
3966 but the stack traces that have established the required order
3967 are not given.
3968
3969 This is because there is no pair (fCA, fBC) in laog exposition :
3970 the laog_exposition records all pairs of locks between a new lock
3971 taken by a thread and all the already taken locks.
3972 So, there is no laog_exposition (fCA, fBC) as no thread ever
3973 first locked fCA followed by fBC.
3974
3975 In other words, when the deadlock cycle involves more than
3976 two locks, then helgrind does not report the sequence of
3977 operations that created the cycle.
3978
3979 However, we can report the current stack trace (where
3980 lk is being taken), and the stack trace where other was acquired:
3981 Effectively, the variable 'other' contains a lock currently
3982 held by this thread, with its 'acquired_at'. */
3983
sewardjf98e1c02008-10-25 16:22:41 +00003984 HG_(record_error_LockOrder)(
philippe46daf0d2014-07-29 20:08:15 +00003985 thr, lk, other,
philippeebe25802013-01-30 23:21:34 +00003986 NULL, NULL, other->acquired_at );
sewardjb4112022007-11-09 22:49:28 +00003987 }
3988 }
3989
3990 /* Second, add to laog the pairs
3991 (old, lk) | old <- locks already held by thr
3992 Since both old and lk are currently held by thr, their acquired_at
3993 fields must be non-NULL.
3994 */
3995 tl_assert(lk->acquired_at);
3996 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3997 for (i = 0; i < ls_size; i++) {
3998 Lock* old = (Lock*)ls_words[i];
3999 tl_assert(old->acquired_at);
4000 laog__add_edge( old, lk );
4001 }
4002
4003 /* Why "except_Locks" ? We're here because a lock is being
4004 acquired by a thread, and we're in an inconsistent state here.
4005 See the call points in evhH__post_thread_{r,w}_acquires_lock.
4006 When called in this inconsistent state, locks__sanity_check duly
4007 barfs. */
sewardjf98e1c02008-10-25 16:22:41 +00004008 if (HG_(clo_sanity_flags) & SCE_LAOG)
sewardjb4112022007-11-09 22:49:28 +00004009 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
4010}
4011
sewardj866c80c2011-10-22 19:29:51 +00004012/* Allocates a duplicate of words. Caller must HG_(free) the result. */
4013static UWord* UWordV_dup(UWord* words, Word words_size)
4014{
4015 UInt i;
4016
4017 if (words_size == 0)
4018 return NULL;
4019
4020 UWord *dup = HG_(zalloc) ("hg.dup.1", (SizeT) words_size * sizeof(UWord));
4021
4022 for (i = 0; i < words_size; i++)
4023 dup[i] = words[i];
4024
4025 return dup;
4026}
sewardjb4112022007-11-09 22:49:28 +00004027
4028/* Delete from 'laog' any pair mentioning a lock in locksToDelete */
4029
4030__attribute__((noinline))
4031static void laog__handle_one_lock_deletion ( Lock* lk )
4032{
4033 WordSetID preds, succs;
florian6bf37262012-10-21 03:23:36 +00004034 UWord preds_size, succs_size, i, j;
sewardj250ec2e2008-02-15 22:02:30 +00004035 UWord *preds_words, *succs_words;
sewardjb4112022007-11-09 22:49:28 +00004036
4037 preds = laog__preds( lk );
4038 succs = laog__succs( lk );
4039
sewardj866c80c2011-10-22 19:29:51 +00004040 // We need to duplicate the payload, as these can be garbage collected
4041 // during the del/add operations below.
sewardjb4112022007-11-09 22:49:28 +00004042 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
sewardj866c80c2011-10-22 19:29:51 +00004043 preds_words = UWordV_dup(preds_words, preds_size);
4044
4045 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
4046 succs_words = UWordV_dup(succs_words, succs_size);
4047
sewardjb4112022007-11-09 22:49:28 +00004048 for (i = 0; i < preds_size; i++)
4049 laog__del_edge( (Lock*)preds_words[i], lk );
4050
sewardjb4112022007-11-09 22:49:28 +00004051 for (j = 0; j < succs_size; j++)
4052 laog__del_edge( lk, (Lock*)succs_words[j] );
4053
4054 for (i = 0; i < preds_size; i++) {
4055 for (j = 0; j < succs_size; j++) {
4056 if (preds_words[i] != succs_words[j]) {
4057 /* This can pass unlocked locks to laog__add_edge, since
4058 we're deleting stuff. So their acquired_at fields may
4059 be NULL. */
4060 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
4061 }
4062 }
4063 }
sewardj866c80c2011-10-22 19:29:51 +00004064
4065 if (preds_words)
4066 HG_(free) (preds_words);
4067 if (succs_words)
4068 HG_(free) (succs_words);
4069
4070 // Remove lk information from laog links FM
4071 {
4072 LAOGLinks *links;
4073 Lock* linked_lk;
4074
4075 if (VG_(delFromFM) (laog,
4076 (UWord*)&linked_lk, (UWord*)&links, (UWord)lk)) {
4077 tl_assert (linked_lk == lk);
4078 HG_(free) (links);
4079 }
4080 }
4081 /* FIXME ??? What about removing lock lk data from EXPOSITION ??? */
sewardjb4112022007-11-09 22:49:28 +00004082}
4083
sewardj1cbc12f2008-11-10 16:16:46 +00004084//__attribute__((noinline))
4085//static void laog__handle_lock_deletions (
4086// WordSetID /* in univ_laog */ locksToDelete
4087// )
4088//{
4089// Word i, ws_size;
4090// UWord* ws_words;
4091//
sewardj1cbc12f2008-11-10 16:16:46 +00004092//
4093// HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
sewardj866c80c2011-10-22 19:29:51 +00004094// UWordV_dup call needed here ...
sewardj1cbc12f2008-11-10 16:16:46 +00004095// for (i = 0; i < ws_size; i++)
4096// laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
4097//
4098// if (HG_(clo_sanity_flags) & SCE_LAOG)
4099// all__sanity_check("laog__handle_lock_deletions-post");
4100//}
sewardjb4112022007-11-09 22:49:28 +00004101
4102
4103/*--------------------------------------------------------------*/
4104/*--- Malloc/free replacements ---*/
4105/*--------------------------------------------------------------*/
4106
4107typedef
4108 struct {
4109 void* next; /* required by m_hashtable */
4110 Addr payload; /* ptr to actual block */
4111 SizeT szB; /* size requested */
4112 ExeContext* where; /* where it was allocated */
4113 Thread* thr; /* allocating thread */
4114 }
4115 MallocMeta;
4116
4117/* A hash table of MallocMetas, used to track malloc'd blocks
4118 (obviously). */
florian09a4c792014-10-18 10:58:05 +00004119static VgHashTable *hg_mallocmeta_table = NULL;
sewardjb4112022007-11-09 22:49:28 +00004120
philippe5fbc9762013-12-01 19:28:48 +00004121/* MallocMeta are small elements. We use a pool to avoid
4122 the overhead of malloc for each MallocMeta. */
4123static PoolAlloc *MallocMeta_poolalloc = NULL;
sewardjb4112022007-11-09 22:49:28 +00004124
4125static MallocMeta* new_MallocMeta ( void ) {
philippe5fbc9762013-12-01 19:28:48 +00004126 MallocMeta* md = VG_(allocEltPA) (MallocMeta_poolalloc);
4127 VG_(memset)(md, 0, sizeof(MallocMeta));
sewardjb4112022007-11-09 22:49:28 +00004128 return md;
4129}
4130static void delete_MallocMeta ( MallocMeta* md ) {
philippe5fbc9762013-12-01 19:28:48 +00004131 VG_(freeEltPA)(MallocMeta_poolalloc, md);
sewardjb4112022007-11-09 22:49:28 +00004132}
4133
4134
4135/* Allocate a client block and set up the metadata for it. */
4136
4137static
4138void* handle_alloc ( ThreadId tid,
4139 SizeT szB, SizeT alignB, Bool is_zeroed )
4140{
4141 Addr p;
4142 MallocMeta* md;
4143
4144 tl_assert( ((SSizeT)szB) >= 0 );
4145 p = (Addr)VG_(cli_malloc)(alignB, szB);
4146 if (!p) {
4147 return NULL;
4148 }
4149 if (is_zeroed)
4150 VG_(memset)((void*)p, 0, szB);
4151
4152 /* Note that map_threads_lookup must succeed (cannot assert), since
4153 memory can only be allocated by currently alive threads, hence
4154 they must have an entry in map_threads. */
4155 md = new_MallocMeta();
4156 md->payload = p;
4157 md->szB = szB;
4158 md->where = VG_(record_ExeContext)( tid, 0 );
4159 md->thr = map_threads_lookup( tid );
4160
4161 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
Elliott Hughesed398002017-06-21 14:41:24 -07004162 if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full))
4163 VG_(XTMemory_Full_alloc)(md->szB, md->where);
sewardjb4112022007-11-09 22:49:28 +00004164
4165 /* Tell the lower level memory wranglers. */
4166 evh__new_mem_heap( p, szB, is_zeroed );
4167
4168 return (void*)p;
4169}
4170
4171/* Re the checks for less-than-zero (also in hg_cli__realloc below):
4172 Cast to a signed type to catch any unexpectedly negative args.
4173 We're assuming here that the size asked for is not greater than
4174 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
4175 platforms). */
4176static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
4177 if (((SSizeT)n) < 0) return NULL;
4178 return handle_alloc ( tid, n, VG_(clo_alignment),
4179 /*is_zeroed*/False );
4180}
4181static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
4182 if (((SSizeT)n) < 0) return NULL;
4183 return handle_alloc ( tid, n, VG_(clo_alignment),
4184 /*is_zeroed*/False );
4185}
4186static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
4187 if (((SSizeT)n) < 0) return NULL;
4188 return handle_alloc ( tid, n, VG_(clo_alignment),
4189 /*is_zeroed*/False );
4190}
4191static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
4192 if (((SSizeT)n) < 0) return NULL;
4193 return handle_alloc ( tid, n, align,
4194 /*is_zeroed*/False );
4195}
4196static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
4197 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
4198 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
4199 /*is_zeroed*/True );
4200}
4201
4202
4203/* Free a client block, including getting rid of the relevant
4204 metadata. */
4205
4206static void handle_free ( ThreadId tid, void* p )
4207{
4208 MallocMeta *md, *old_md;
4209 SizeT szB;
4210
4211 /* First see if we can find the metadata for 'p'. */
4212 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
4213 if (!md)
4214 return; /* apparently freeing a bogus address. Oh well. */
4215
4216 tl_assert(md->payload == (Addr)p);
4217 szB = md->szB;
Elliott Hughesed398002017-06-21 14:41:24 -07004218 if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full)) {
4219 ExeContext* ec_free = VG_(record_ExeContext)( tid, 0 );
4220 VG_(XTMemory_Full_free)(md->szB, md->where, ec_free);
4221 }
sewardjb4112022007-11-09 22:49:28 +00004222
4223 /* Nuke the metadata block */
4224 old_md = (MallocMeta*)
4225 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
4226 tl_assert(old_md); /* it must be present - we just found it */
4227 tl_assert(old_md == md);
4228 tl_assert(old_md->payload == (Addr)p);
4229
4230 VG_(cli_free)((void*)old_md->payload);
4231 delete_MallocMeta(old_md);
4232
4233 /* Tell the lower level memory wranglers. */
4234 evh__die_mem_heap( (Addr)p, szB );
4235}
4236
4237static void hg_cli__free ( ThreadId tid, void* p ) {
4238 handle_free(tid, p);
4239}
4240static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
4241 handle_free(tid, p);
4242}
4243static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
4244 handle_free(tid, p);
4245}
4246
4247
4248static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
4249{
4250 MallocMeta *md, *md_new, *md_tmp;
4251 SizeT i;
4252
4253 Addr payload = (Addr)payloadV;
4254
4255 if (((SSizeT)new_size) < 0) return NULL;
4256
4257 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
4258 if (!md)
4259 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
4260
4261 tl_assert(md->payload == payload);
4262
4263 if (md->szB == new_size) {
4264 /* size unchanged */
4265 md->where = VG_(record_ExeContext)(tid, 0);
4266 return payloadV;
4267 }
4268
4269 if (md->szB > new_size) {
4270 /* new size is smaller */
4271 md->szB = new_size;
4272 md->where = VG_(record_ExeContext)(tid, 0);
4273 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
4274 return payloadV;
4275 }
4276
4277 /* else */ {
4278 /* new size is bigger */
4279 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
4280
4281 /* First half kept and copied, second half new */
4282 // FIXME: shouldn't we use a copier which implements the
4283 // memory state machine?
sewardj23f12002009-07-24 08:45:08 +00004284 evh__copy_mem( payload, p_new, md->szB );
sewardjb4112022007-11-09 22:49:28 +00004285 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
sewardjf98e1c02008-10-25 16:22:41 +00004286 /*inited*/False );
sewardjb4112022007-11-09 22:49:28 +00004287 /* FIXME: can anything funny happen here? specifically, if the
4288 old range contained a lock, then die_mem_heap will complain.
4289 Is that the correct behaviour? Not sure. */
4290 evh__die_mem_heap( payload, md->szB );
4291
4292 /* Copy from old to new */
4293 for (i = 0; i < md->szB; i++)
4294 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
4295
4296 /* Because the metadata hash table is index by payload address,
4297 we have to get rid of the old hash table entry and make a new
4298 one. We can't just modify the existing metadata in place,
4299 because then it would (almost certainly) be in the wrong hash
4300 chain. */
4301 md_new = new_MallocMeta();
4302 *md_new = *md;
4303
4304 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
4305 tl_assert(md_tmp);
4306 tl_assert(md_tmp == md);
4307
4308 VG_(cli_free)((void*)md->payload);
4309 delete_MallocMeta(md);
4310
4311 /* Update fields */
4312 md_new->where = VG_(record_ExeContext)( tid, 0 );
4313 md_new->szB = new_size;
4314 md_new->payload = p_new;
4315 md_new->thr = map_threads_lookup( tid );
4316
4317 /* and add */
4318 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
4319
4320 return (void*)p_new;
4321 }
4322}
4323
njn8b140de2009-02-17 04:31:18 +00004324static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
4325{
4326 MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
4327
4328 // There may be slop, but pretend there isn't because only the asked-for
4329 // area will have been shadowed properly.
4330 return ( md ? md->szB : 0 );
4331}
4332
sewardjb4112022007-11-09 22:49:28 +00004333
sewardj095d61e2010-03-11 13:43:18 +00004334/* For error creation: map 'data_addr' to a malloc'd chunk, if any.
sewardjc8028ad2010-05-05 09:34:42 +00004335 Slow linear search. With a bit of hash table help if 'data_addr'
4336 is either the start of a block or up to 15 word-sized steps along
4337 from the start of a block. */
sewardj095d61e2010-03-11 13:43:18 +00004338
4339static inline Bool addr_is_in_MM_Chunk( MallocMeta* mm, Addr a )
4340{
sewardjc8028ad2010-05-05 09:34:42 +00004341 /* Accept 'a' as within 'mm' if 'mm's size is zero and 'a' points
4342 right at it. */
4343 if (UNLIKELY(mm->szB == 0 && a == mm->payload))
4344 return True;
4345 /* else normal interval rules apply */
4346 if (LIKELY(a < mm->payload)) return False;
4347 if (LIKELY(a >= mm->payload + mm->szB)) return False;
4348 return True;
sewardj095d61e2010-03-11 13:43:18 +00004349}
4350
sewardjc8028ad2010-05-05 09:34:42 +00004351Bool HG_(mm_find_containing_block)( /*OUT*/ExeContext** where,
philippe0c9ac8d2014-07-18 00:03:58 +00004352 /*OUT*/UInt* tnr,
sewardj095d61e2010-03-11 13:43:18 +00004353 /*OUT*/Addr* payload,
4354 /*OUT*/SizeT* szB,
4355 Addr data_addr )
4356{
4357 MallocMeta* mm;
sewardjc8028ad2010-05-05 09:34:42 +00004358 Int i;
4359 const Int n_fast_check_words = 16;
4360
4361 /* First, do a few fast searches on the basis that data_addr might
4362 be exactly the start of a block or up to 15 words inside. This
4363 can happen commonly via the creq
4364 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK. */
4365 for (i = 0; i < n_fast_check_words; i++) {
4366 mm = VG_(HT_lookup)( hg_mallocmeta_table,
4367 data_addr - (UWord)(UInt)i * sizeof(UWord) );
4368 if (UNLIKELY(mm && addr_is_in_MM_Chunk(mm, data_addr)))
4369 goto found;
4370 }
4371
sewardj095d61e2010-03-11 13:43:18 +00004372 /* Well, this totally sucks. But without using an interval tree or
sewardjc8028ad2010-05-05 09:34:42 +00004373 some such, it's hard to see how to do better. We have to check
4374 every block in the entire table. */
sewardj095d61e2010-03-11 13:43:18 +00004375 VG_(HT_ResetIter)(hg_mallocmeta_table);
4376 while ( (mm = VG_(HT_Next)(hg_mallocmeta_table)) ) {
sewardjc8028ad2010-05-05 09:34:42 +00004377 if (UNLIKELY(addr_is_in_MM_Chunk(mm, data_addr)))
4378 goto found;
sewardj095d61e2010-03-11 13:43:18 +00004379 }
sewardjc8028ad2010-05-05 09:34:42 +00004380
4381 /* Not found. Bah. */
4382 return False;
4383 /*NOTREACHED*/
4384
4385 found:
4386 tl_assert(mm);
4387 tl_assert(addr_is_in_MM_Chunk(mm, data_addr));
4388 if (where) *where = mm->where;
philippe0c9ac8d2014-07-18 00:03:58 +00004389 if (tnr) *tnr = mm->thr->errmsg_index;
sewardjc8028ad2010-05-05 09:34:42 +00004390 if (payload) *payload = mm->payload;
4391 if (szB) *szB = mm->szB;
4392 return True;
sewardj095d61e2010-03-11 13:43:18 +00004393}
4394
4395
sewardjb4112022007-11-09 22:49:28 +00004396/*--------------------------------------------------------------*/
4397/*--- Instrumentation ---*/
4398/*--------------------------------------------------------------*/
4399
sewardjcafe5052013-01-17 14:24:35 +00004400#define unop(_op, _arg1) IRExpr_Unop((_op),(_arg1))
sewardjffce8152011-06-24 10:09:41 +00004401#define binop(_op, _arg1, _arg2) IRExpr_Binop((_op),(_arg1),(_arg2))
4402#define mkexpr(_tmp) IRExpr_RdTmp((_tmp))
4403#define mkU32(_n) IRExpr_Const(IRConst_U32(_n))
4404#define mkU64(_n) IRExpr_Const(IRConst_U64(_n))
4405#define assign(_t, _e) IRStmt_WrTmp((_t), (_e))
4406
sewardjcafe5052013-01-17 14:24:35 +00004407/* This takes and returns atoms, of course. Not full IRExprs. */
4408static IRExpr* mk_And1 ( IRSB* sbOut, IRExpr* arg1, IRExpr* arg2 )
4409{
4410 tl_assert(arg1 && arg2);
4411 tl_assert(isIRAtom(arg1));
4412 tl_assert(isIRAtom(arg2));
4413 /* Generate 32to1(And32(1Uto32(arg1), 1Uto32(arg2))). Appalling
4414 code, I know. */
4415 IRTemp wide1 = newIRTemp(sbOut->tyenv, Ity_I32);
4416 IRTemp wide2 = newIRTemp(sbOut->tyenv, Ity_I32);
4417 IRTemp anded = newIRTemp(sbOut->tyenv, Ity_I32);
4418 IRTemp res = newIRTemp(sbOut->tyenv, Ity_I1);
4419 addStmtToIRSB(sbOut, assign(wide1, unop(Iop_1Uto32, arg1)));
4420 addStmtToIRSB(sbOut, assign(wide2, unop(Iop_1Uto32, arg2)));
4421 addStmtToIRSB(sbOut, assign(anded, binop(Iop_And32, mkexpr(wide1),
4422 mkexpr(wide2))));
4423 addStmtToIRSB(sbOut, assign(res, unop(Iop_32to1, mkexpr(anded))));
4424 return mkexpr(res);
4425}
4426
sewardjffce8152011-06-24 10:09:41 +00004427static void instrument_mem_access ( IRSB* sbOut,
sewardjb4112022007-11-09 22:49:28 +00004428 IRExpr* addr,
4429 Int szB,
4430 Bool isStore,
sewardjffce8152011-06-24 10:09:41 +00004431 Int hWordTy_szB,
sewardjcafe5052013-01-17 14:24:35 +00004432 Int goff_sp,
4433 IRExpr* guard ) /* NULL => True */
sewardjb4112022007-11-09 22:49:28 +00004434{
4435 IRType tyAddr = Ity_INVALID;
florian6bf37262012-10-21 03:23:36 +00004436 const HChar* hName = NULL;
sewardjb4112022007-11-09 22:49:28 +00004437 void* hAddr = NULL;
4438 Int regparms = 0;
4439 IRExpr** argv = NULL;
4440 IRDirty* di = NULL;
4441
sewardjffce8152011-06-24 10:09:41 +00004442 // THRESH is the size of the window above SP (well,
4443 // mostly above) that we assume implies a stack reference.
4444 const Int THRESH = 4096 * 4; // somewhat arbitrary
4445 const Int rz_szB = VG_STACK_REDZONE_SZB;
4446
sewardjb4112022007-11-09 22:49:28 +00004447 tl_assert(isIRAtom(addr));
4448 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
4449
sewardjffce8152011-06-24 10:09:41 +00004450 tyAddr = typeOfIRExpr( sbOut->tyenv, addr );
sewardjb4112022007-11-09 22:49:28 +00004451 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
4452
4453 /* So the effective address is in 'addr' now. */
4454 regparms = 1; // unless stated otherwise
4455 if (isStore) {
4456 switch (szB) {
4457 case 1:
sewardj23f12002009-07-24 08:45:08 +00004458 hName = "evh__mem_help_cwrite_1";
4459 hAddr = &evh__mem_help_cwrite_1;
sewardjb4112022007-11-09 22:49:28 +00004460 argv = mkIRExprVec_1( addr );
4461 break;
4462 case 2:
sewardj23f12002009-07-24 08:45:08 +00004463 hName = "evh__mem_help_cwrite_2";
4464 hAddr = &evh__mem_help_cwrite_2;
sewardjb4112022007-11-09 22:49:28 +00004465 argv = mkIRExprVec_1( addr );
4466 break;
4467 case 4:
sewardj23f12002009-07-24 08:45:08 +00004468 hName = "evh__mem_help_cwrite_4";
4469 hAddr = &evh__mem_help_cwrite_4;
sewardjb4112022007-11-09 22:49:28 +00004470 argv = mkIRExprVec_1( addr );
4471 break;
4472 case 8:
sewardj23f12002009-07-24 08:45:08 +00004473 hName = "evh__mem_help_cwrite_8";
4474 hAddr = &evh__mem_help_cwrite_8;
sewardjb4112022007-11-09 22:49:28 +00004475 argv = mkIRExprVec_1( addr );
4476 break;
4477 default:
4478 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4479 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004480 hName = "evh__mem_help_cwrite_N";
4481 hAddr = &evh__mem_help_cwrite_N;
sewardjb4112022007-11-09 22:49:28 +00004482 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4483 break;
4484 }
4485 } else {
4486 switch (szB) {
4487 case 1:
sewardj23f12002009-07-24 08:45:08 +00004488 hName = "evh__mem_help_cread_1";
4489 hAddr = &evh__mem_help_cread_1;
sewardjb4112022007-11-09 22:49:28 +00004490 argv = mkIRExprVec_1( addr );
4491 break;
4492 case 2:
sewardj23f12002009-07-24 08:45:08 +00004493 hName = "evh__mem_help_cread_2";
4494 hAddr = &evh__mem_help_cread_2;
sewardjb4112022007-11-09 22:49:28 +00004495 argv = mkIRExprVec_1( addr );
4496 break;
4497 case 4:
sewardj23f12002009-07-24 08:45:08 +00004498 hName = "evh__mem_help_cread_4";
4499 hAddr = &evh__mem_help_cread_4;
sewardjb4112022007-11-09 22:49:28 +00004500 argv = mkIRExprVec_1( addr );
4501 break;
4502 case 8:
sewardj23f12002009-07-24 08:45:08 +00004503 hName = "evh__mem_help_cread_8";
4504 hAddr = &evh__mem_help_cread_8;
sewardjb4112022007-11-09 22:49:28 +00004505 argv = mkIRExprVec_1( addr );
4506 break;
4507 default:
4508 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4509 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004510 hName = "evh__mem_help_cread_N";
4511 hAddr = &evh__mem_help_cread_N;
sewardjb4112022007-11-09 22:49:28 +00004512 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4513 break;
4514 }
4515 }
4516
sewardjffce8152011-06-24 10:09:41 +00004517 /* Create the helper. */
sewardjb4112022007-11-09 22:49:28 +00004518 tl_assert(hName);
4519 tl_assert(hAddr);
4520 tl_assert(argv);
4521 di = unsafeIRDirty_0_N( regparms,
4522 hName, VG_(fnptr_to_fnentry)( hAddr ),
4523 argv );
sewardjffce8152011-06-24 10:09:41 +00004524
4525 if (! HG_(clo_check_stack_refs)) {
4526 /* We're ignoring memory references which are (obviously) to the
4527 stack. In fact just skip stack refs that are within 4 pages
4528 of SP (SP - the redzone, really), as that's simple, easy, and
4529 filters out most stack references. */
4530 /* Generate the guard condition: "(addr - (SP - RZ)) >u N", for
4531 some arbitrary N. If that is true then addr is outside the
4532 range (SP - RZ .. SP + N - RZ). If N is smallish (a few
4533 pages) then we can say addr is within a few pages of SP and
4534 so can't possibly be a heap access, and so can be skipped.
4535
4536 Note that the condition simplifies to
4537 (addr - SP + RZ) >u N
4538 which generates better code in x86/amd64 backends, but it does
4539 not unfortunately simplify to
4540 (addr - SP) >u (N - RZ)
4541 (would be beneficial because N - RZ is a constant) because
4542 wraparound arithmetic messes up the comparison. eg.
4543 20 >u 10 == True,
4544 but (20 - 15) >u (10 - 15) == 5 >u (MAXINT-5) == False.
4545 */
4546 IRTemp sp = newIRTemp(sbOut->tyenv, tyAddr);
4547 addStmtToIRSB( sbOut, assign(sp, IRExpr_Get(goff_sp, tyAddr)));
4548
4549 /* "addr - SP" */
4550 IRTemp addr_minus_sp = newIRTemp(sbOut->tyenv, tyAddr);
4551 addStmtToIRSB(
4552 sbOut,
4553 assign(addr_minus_sp,
4554 tyAddr == Ity_I32
4555 ? binop(Iop_Sub32, addr, mkexpr(sp))
4556 : binop(Iop_Sub64, addr, mkexpr(sp)))
4557 );
4558
4559 /* "addr - SP + RZ" */
4560 IRTemp diff = newIRTemp(sbOut->tyenv, tyAddr);
4561 addStmtToIRSB(
4562 sbOut,
4563 assign(diff,
4564 tyAddr == Ity_I32
4565 ? binop(Iop_Add32, mkexpr(addr_minus_sp), mkU32(rz_szB))
4566 : binop(Iop_Add64, mkexpr(addr_minus_sp), mkU64(rz_szB)))
4567 );
4568
sewardjcafe5052013-01-17 14:24:35 +00004569 /* guardA == "guard on the address" */
4570 IRTemp guardA = newIRTemp(sbOut->tyenv, Ity_I1);
sewardjffce8152011-06-24 10:09:41 +00004571 addStmtToIRSB(
4572 sbOut,
sewardjcafe5052013-01-17 14:24:35 +00004573 assign(guardA,
sewardjffce8152011-06-24 10:09:41 +00004574 tyAddr == Ity_I32
4575 ? binop(Iop_CmpLT32U, mkU32(THRESH), mkexpr(diff))
4576 : binop(Iop_CmpLT64U, mkU64(THRESH), mkexpr(diff)))
4577 );
sewardjcafe5052013-01-17 14:24:35 +00004578 di->guard = mkexpr(guardA);
4579 }
4580
4581 /* If there's a guard on the access itself (as supplied by the
4582 caller of this routine), we need to AND that in to any guard we
4583 might already have. */
4584 if (guard) {
4585 di->guard = mk_And1(sbOut, di->guard, guard);
sewardjffce8152011-06-24 10:09:41 +00004586 }
4587
4588 /* Add the helper. */
4589 addStmtToIRSB( sbOut, IRStmt_Dirty(di) );
sewardjb4112022007-11-09 22:49:28 +00004590}
4591
4592
sewardja0eee322009-07-31 08:46:35 +00004593/* Figure out if GA is a guest code address in the dynamic linker, and
4594 if so return True. Otherwise (and in case of any doubt) return
4595 False. (sidedly safe w/ False as the safe value) */
florianf466eef2015-01-02 17:32:40 +00004596static Bool is_in_dynamic_linker_shared_object( Addr ga )
sewardja0eee322009-07-31 08:46:35 +00004597{
4598 DebugInfo* dinfo;
florian19f91bb2012-11-10 22:29:54 +00004599 const HChar* soname;
sewardja0eee322009-07-31 08:46:35 +00004600
florianf466eef2015-01-02 17:32:40 +00004601 dinfo = VG_(find_DebugInfo)( ga );
sewardja0eee322009-07-31 08:46:35 +00004602 if (!dinfo) return False;
4603
sewardje3f1e592009-07-31 09:41:29 +00004604 soname = VG_(DebugInfo_get_soname)(dinfo);
sewardja0eee322009-07-31 08:46:35 +00004605 tl_assert(soname);
4606 if (0) VG_(printf)("%s\n", soname);
4607
Elliott Hughesa0664b92017-04-18 17:46:52 -07004608 return VG_(is_soname_ld_so)(soname);
sewardja0eee322009-07-31 08:46:35 +00004609}
4610
sewardjb4112022007-11-09 22:49:28 +00004611static
4612IRSB* hg_instrument ( VgCallbackClosure* closure,
4613 IRSB* bbIn,
florian3c0c9472014-09-24 12:06:55 +00004614 const VexGuestLayout* layout,
4615 const VexGuestExtents* vge,
4616 const VexArchInfo* archinfo_host,
sewardjb4112022007-11-09 22:49:28 +00004617 IRType gWordTy, IRType hWordTy )
4618{
sewardj1c0ce7a2009-07-01 08:10:49 +00004619 Int i;
4620 IRSB* bbOut;
florianf466eef2015-01-02 17:32:40 +00004621 Addr cia; /* address of current insn */
sewardj1c0ce7a2009-07-01 08:10:49 +00004622 IRStmt* st;
sewardja0eee322009-07-31 08:46:35 +00004623 Bool inLDSO = False;
florianf466eef2015-01-02 17:32:40 +00004624 Addr inLDSOmask4K = 1; /* mismatches on first check */
sewardjb4112022007-11-09 22:49:28 +00004625
sewardjffce8152011-06-24 10:09:41 +00004626 const Int goff_sp = layout->offset_SP;
4627
sewardjb4112022007-11-09 22:49:28 +00004628 if (gWordTy != hWordTy) {
4629 /* We don't currently support this case. */
4630 VG_(tool_panic)("host/guest word size mismatch");
4631 }
4632
sewardja0eee322009-07-31 08:46:35 +00004633 if (VKI_PAGE_SIZE < 4096 || VG_(log2)(VKI_PAGE_SIZE) == -1) {
4634 VG_(tool_panic)("implausible or too-small VKI_PAGE_SIZE");
4635 }
4636
sewardjb4112022007-11-09 22:49:28 +00004637 /* Set up BB */
4638 bbOut = emptyIRSB();
4639 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
4640 bbOut->next = deepCopyIRExpr(bbIn->next);
4641 bbOut->jumpkind = bbIn->jumpkind;
sewardj291849f2012-04-20 23:58:55 +00004642 bbOut->offsIP = bbIn->offsIP;
sewardjb4112022007-11-09 22:49:28 +00004643
4644 // Copy verbatim any IR preamble preceding the first IMark
4645 i = 0;
4646 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
4647 addStmtToIRSB( bbOut, bbIn->stmts[i] );
4648 i++;
4649 }
4650
sewardj1c0ce7a2009-07-01 08:10:49 +00004651 // Get the first statement, and initial cia from it
4652 tl_assert(bbIn->stmts_used > 0);
4653 tl_assert(i < bbIn->stmts_used);
4654 st = bbIn->stmts[i];
4655 tl_assert(Ist_IMark == st->tag);
4656 cia = st->Ist.IMark.addr;
4657 st = NULL;
4658
sewardjb4112022007-11-09 22:49:28 +00004659 for (/*use current i*/; i < bbIn->stmts_used; i++) {
sewardj1c0ce7a2009-07-01 08:10:49 +00004660 st = bbIn->stmts[i];
sewardjb4112022007-11-09 22:49:28 +00004661 tl_assert(st);
4662 tl_assert(isFlatIRStmt(st));
4663 switch (st->tag) {
4664 case Ist_NoOp:
4665 case Ist_AbiHint:
4666 case Ist_Put:
4667 case Ist_PutI:
sewardjb4112022007-11-09 22:49:28 +00004668 case Ist_Exit:
4669 /* None of these can contain any memory references. */
4670 break;
4671
sewardj1c0ce7a2009-07-01 08:10:49 +00004672 case Ist_IMark:
4673 /* no mem refs, but note the insn address. */
4674 cia = st->Ist.IMark.addr;
sewardja0eee322009-07-31 08:46:35 +00004675 /* Don't instrument the dynamic linker. It generates a
4676 lot of races which we just expensively suppress, so
4677 it's pointless.
4678
4679 Avoid flooding is_in_dynamic_linker_shared_object with
4680 requests by only checking at transitions between 4K
4681 pages. */
florianf466eef2015-01-02 17:32:40 +00004682 if ((cia & ~(Addr)0xFFF) != inLDSOmask4K) {
4683 if (0) VG_(printf)("NEW %#lx\n", cia);
4684 inLDSOmask4K = cia & ~(Addr)0xFFF;
sewardja0eee322009-07-31 08:46:35 +00004685 inLDSO = is_in_dynamic_linker_shared_object(cia);
4686 } else {
florianf466eef2015-01-02 17:32:40 +00004687 if (0) VG_(printf)("old %#lx\n", cia);
sewardja0eee322009-07-31 08:46:35 +00004688 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004689 break;
4690
sewardjb4112022007-11-09 22:49:28 +00004691 case Ist_MBE:
sewardjf98e1c02008-10-25 16:22:41 +00004692 switch (st->Ist.MBE.event) {
4693 case Imbe_Fence:
sewardj2b9232a2014-10-11 13:54:52 +00004694 case Imbe_CancelReservation:
sewardjf98e1c02008-10-25 16:22:41 +00004695 break; /* not interesting */
sewardjf98e1c02008-10-25 16:22:41 +00004696 default:
4697 goto unhandled;
4698 }
sewardjb4112022007-11-09 22:49:28 +00004699 break;
4700
sewardj1c0ce7a2009-07-01 08:10:49 +00004701 case Ist_CAS: {
4702 /* Atomic read-modify-write cycle. Just pretend it's a
4703 read. */
4704 IRCAS* cas = st->Ist.CAS.details;
sewardj23f12002009-07-24 08:45:08 +00004705 Bool isDCAS = cas->oldHi != IRTemp_INVALID;
4706 if (isDCAS) {
4707 tl_assert(cas->expdHi);
4708 tl_assert(cas->dataHi);
4709 } else {
4710 tl_assert(!cas->expdHi);
4711 tl_assert(!cas->dataHi);
4712 }
4713 /* Just be boring about it. */
sewardja0eee322009-07-31 08:46:35 +00004714 if (!inLDSO) {
4715 instrument_mem_access(
4716 bbOut,
4717 cas->addr,
4718 (isDCAS ? 2 : 1)
4719 * sizeofIRType(typeOfIRExpr(bbIn->tyenv, cas->dataLo)),
4720 False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004721 sizeofIRType(hWordTy), goff_sp,
4722 NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004723 );
4724 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004725 break;
4726 }
4727
sewardjdb5907d2009-11-26 17:20:21 +00004728 case Ist_LLSC: {
4729 /* We pretend store-conditionals don't exist, viz, ignore
4730 them. Whereas load-linked's are treated the same as
4731 normal loads. */
4732 IRType dataTy;
4733 if (st->Ist.LLSC.storedata == NULL) {
4734 /* LL */
4735 dataTy = typeOfIRTemp(bbIn->tyenv, st->Ist.LLSC.result);
sewardja0eee322009-07-31 08:46:35 +00004736 if (!inLDSO) {
sewardjdb5907d2009-11-26 17:20:21 +00004737 instrument_mem_access(
4738 bbOut,
4739 st->Ist.LLSC.addr,
4740 sizeofIRType(dataTy),
4741 False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004742 sizeofIRType(hWordTy), goff_sp,
4743 NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004744 );
4745 }
sewardjdb5907d2009-11-26 17:20:21 +00004746 } else {
4747 /* SC */
4748 /*ignore */
4749 }
4750 break;
4751 }
4752
4753 case Ist_Store:
sewardjdb5907d2009-11-26 17:20:21 +00004754 if (!inLDSO) {
4755 instrument_mem_access(
4756 bbOut,
4757 st->Ist.Store.addr,
4758 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
4759 True/*isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004760 sizeofIRType(hWordTy), goff_sp,
4761 NULL/*no-guard*/
sewardjdb5907d2009-11-26 17:20:21 +00004762 );
sewardj1c0ce7a2009-07-01 08:10:49 +00004763 }
njnb83caf22009-05-25 01:47:56 +00004764 break;
sewardjb4112022007-11-09 22:49:28 +00004765
sewardjcafe5052013-01-17 14:24:35 +00004766 case Ist_StoreG: {
4767 IRStoreG* sg = st->Ist.StoreG.details;
4768 IRExpr* data = sg->data;
4769 IRExpr* addr = sg->addr;
4770 IRType type = typeOfIRExpr(bbIn->tyenv, data);
4771 tl_assert(type != Ity_INVALID);
4772 instrument_mem_access( bbOut, addr, sizeofIRType(type),
4773 True/*isStore*/,
4774 sizeofIRType(hWordTy),
4775 goff_sp, sg->guard );
4776 break;
4777 }
4778
4779 case Ist_LoadG: {
4780 IRLoadG* lg = st->Ist.LoadG.details;
4781 IRType type = Ity_INVALID; /* loaded type */
4782 IRType typeWide = Ity_INVALID; /* after implicit widening */
4783 IRExpr* addr = lg->addr;
4784 typeOfIRLoadGOp(lg->cvt, &typeWide, &type);
4785 tl_assert(type != Ity_INVALID);
4786 instrument_mem_access( bbOut, addr, sizeofIRType(type),
4787 False/*!isStore*/,
4788 sizeofIRType(hWordTy),
4789 goff_sp, lg->guard );
4790 break;
4791 }
4792
sewardjb4112022007-11-09 22:49:28 +00004793 case Ist_WrTmp: {
4794 IRExpr* data = st->Ist.WrTmp.data;
4795 if (data->tag == Iex_Load) {
sewardja0eee322009-07-31 08:46:35 +00004796 if (!inLDSO) {
4797 instrument_mem_access(
4798 bbOut,
4799 data->Iex.Load.addr,
4800 sizeofIRType(data->Iex.Load.ty),
4801 False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004802 sizeofIRType(hWordTy), goff_sp,
4803 NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004804 );
4805 }
sewardjb4112022007-11-09 22:49:28 +00004806 }
4807 break;
4808 }
4809
4810 case Ist_Dirty: {
4811 Int dataSize;
4812 IRDirty* d = st->Ist.Dirty.details;
4813 if (d->mFx != Ifx_None) {
4814 /* This dirty helper accesses memory. Collect the
4815 details. */
4816 tl_assert(d->mAddr != NULL);
4817 tl_assert(d->mSize != 0);
4818 dataSize = d->mSize;
4819 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004820 if (!inLDSO) {
4821 instrument_mem_access(
4822 bbOut, d->mAddr, dataSize, False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004823 sizeofIRType(hWordTy), goff_sp, NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004824 );
4825 }
sewardjb4112022007-11-09 22:49:28 +00004826 }
4827 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004828 if (!inLDSO) {
4829 instrument_mem_access(
4830 bbOut, d->mAddr, dataSize, True/*isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004831 sizeofIRType(hWordTy), goff_sp, NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004832 );
4833 }
sewardjb4112022007-11-09 22:49:28 +00004834 }
4835 } else {
4836 tl_assert(d->mAddr == NULL);
4837 tl_assert(d->mSize == 0);
4838 }
4839 break;
4840 }
4841
4842 default:
sewardjf98e1c02008-10-25 16:22:41 +00004843 unhandled:
4844 ppIRStmt(st);
sewardjb4112022007-11-09 22:49:28 +00004845 tl_assert(0);
4846
4847 } /* switch (st->tag) */
4848
4849 addStmtToIRSB( bbOut, st );
4850 } /* iterate over bbIn->stmts */
4851
4852 return bbOut;
4853}
4854
sewardjffce8152011-06-24 10:09:41 +00004855#undef binop
4856#undef mkexpr
4857#undef mkU32
4858#undef mkU64
4859#undef assign
4860
sewardjb4112022007-11-09 22:49:28 +00004861
4862/*----------------------------------------------------------------*/
4863/*--- Client requests ---*/
4864/*----------------------------------------------------------------*/
4865
4866/* Sheesh. Yet another goddam finite map. */
4867static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
4868
4869static void map_pthread_t_to_Thread_INIT ( void ) {
4870 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
sewardjf98e1c02008-10-25 16:22:41 +00004871 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
4872 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00004873 }
4874}
4875
philipped40aff52014-06-16 20:00:14 +00004876/* A list of Ada dependent tasks and their masters. Used for implementing
4877 the Ada task termination semantic as implemented by the
4878 gcc gnat Ada runtime. */
4879typedef
4880 struct {
4881 void* dependent; // Ada Task Control Block of the Dependent
4882 void* master; // ATCB of the master
4883 Word master_level; // level of dependency between master and dependent
4884 Thread* hg_dependent; // helgrind Thread* for dependent task.
4885 }
Elliott Hughesed398002017-06-21 14:41:24 -07004886 GNAT_dmml; // (d)ependent (m)aster (m)aster_(l)evel.
philipped40aff52014-06-16 20:00:14 +00004887static XArray* gnat_dmmls; /* of GNAT_dmml */
4888static void gnat_dmmls_INIT (void)
4889{
4890 if (UNLIKELY(gnat_dmmls == NULL)) {
4891 gnat_dmmls = VG_(newXA) (HG_(zalloc), "hg.gnat_md.1",
4892 HG_(free),
4893 sizeof(GNAT_dmml) );
4894 }
4895}
Elliott Hughesed398002017-06-21 14:41:24 -07004896
4897static void xtmemory_report_next_block(XT_Allocs* xta, ExeContext** ec_alloc)
4898{
4899 const MallocMeta* md = VG_(HT_Next)(hg_mallocmeta_table);
4900 if (md) {
4901 xta->nbytes = md->szB;
4902 xta->nblocks = 1;
4903 *ec_alloc = md->where;
4904 } else
4905 xta->nblocks = 0;
4906}
4907static void HG_(xtmemory_report) ( const HChar* filename, Bool fini )
4908{
4909 // Make xtmemory_report_next_block ready to be called.
4910 VG_(HT_ResetIter)(hg_mallocmeta_table);
4911 VG_(XTMemory_report)(filename, fini, xtmemory_report_next_block,
4912 VG_(XT_filter_1top_and_maybe_below_main));
4913}
4914
philippef5774342014-05-03 11:12:50 +00004915static void print_monitor_help ( void )
4916{
4917 VG_(gdb_printf)
4918 (
4919"\n"
4920"helgrind monitor commands:\n"
philippe328d6622015-05-25 17:24:27 +00004921" info locks [lock_addr] : show status of lock at addr lock_addr\n"
4922" with no lock_addr, show status of all locks\n"
4923" accesshistory <addr> [<len>] : show access history recorded\n"
4924" for <len> (or 1) bytes at <addr>\n"
Elliott Hughesed398002017-06-21 14:41:24 -07004925" xtmemory [<filename>]\n"
4926" dump xtree memory profile in <filename> (default xtmemory.kcg)\n"
philippef5774342014-05-03 11:12:50 +00004927"\n");
4928}
4929
4930/* return True if request recognised, False otherwise */
4931static Bool handle_gdb_monitor_command (ThreadId tid, HChar *req)
4932{
philippef5774342014-05-03 11:12:50 +00004933 HChar* wcmd;
Elliott Hughesa0664b92017-04-18 17:46:52 -07004934 HChar s[VG_(strlen)(req)]; /* copy for strtok_r */
philippef5774342014-05-03 11:12:50 +00004935 HChar *ssaveptr;
4936 Int kwdid;
4937
4938 VG_(strcpy) (s, req);
4939
4940 wcmd = VG_(strtok_r) (s, " ", &ssaveptr);
4941 /* NB: if possible, avoid introducing a new command below which
4942 starts with the same first letter(s) as an already existing
4943 command. This ensures a shorter abbreviation for the user. */
4944 switch (VG_(keyword_id)
Elliott Hughesed398002017-06-21 14:41:24 -07004945 ("help info accesshistory xtmemory",
philippef5774342014-05-03 11:12:50 +00004946 wcmd, kwd_report_duplicated_matches)) {
4947 case -2: /* multiple matches */
4948 return True;
4949 case -1: /* not found */
4950 return False;
4951 case 0: /* help */
4952 print_monitor_help();
4953 return True;
4954 case 1: /* info */
philippef5774342014-05-03 11:12:50 +00004955 wcmd = VG_(strtok_r) (NULL, " ", &ssaveptr);
4956 switch (kwdid = VG_(keyword_id)
4957 ("locks",
4958 wcmd, kwd_report_all)) {
4959 case -2:
4960 case -1:
4961 break;
4962 case 0: // locks
4963 {
philippe328d6622015-05-25 17:24:27 +00004964 const HChar* wa;
4965 Addr lk_addr = 0;
4966 Bool lk_shown = False;
4967 Bool all_locks = True;
philippef5774342014-05-03 11:12:50 +00004968 Int i;
4969 Lock* lk;
philippe328d6622015-05-25 17:24:27 +00004970
4971 wa = VG_(strtok_r) (NULL, " ", &ssaveptr);
4972 if (wa != NULL) {
4973 if (VG_(parse_Addr) (&wa, &lk_addr) )
4974 all_locks = False;
4975 else {
4976 VG_(gdb_printf) ("missing or malformed address\n");
4977 }
4978 }
philippef5774342014-05-03 11:12:50 +00004979 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next) {
philippe328d6622015-05-25 17:24:27 +00004980 if (all_locks || lk_addr == lk->guestaddr) {
4981 pp_Lock(0, lk,
4982 True /* show_lock_addrdescr */,
4983 False /* show_internal_data */);
4984 lk_shown = True;
4985 }
philippef5774342014-05-03 11:12:50 +00004986 }
4987 if (i == 0)
4988 VG_(gdb_printf) ("no locks\n");
philippe328d6622015-05-25 17:24:27 +00004989 if (!all_locks && !lk_shown)
4990 VG_(gdb_printf) ("lock with address %p not found\n",
4991 (void*)lk_addr);
philippef5774342014-05-03 11:12:50 +00004992 }
4993 break;
4994 default:
4995 tl_assert(0);
4996 }
4997 return True;
philippe328d6622015-05-25 17:24:27 +00004998
4999 case 2: /* accesshistory */
5000 {
5001 Addr address;
5002 SizeT szB = 1;
Elliott Hughesa0664b92017-04-18 17:46:52 -07005003 if (HG_(clo_history_level) < 2) {
5004 VG_(gdb_printf)
5005 ("helgrind must be started with --history-level=full"
5006 " to use accesshistory\n");
5007 return True;
5008 }
philippe328d6622015-05-25 17:24:27 +00005009 if (VG_(strtok_get_address_and_size) (&address, &szB, &ssaveptr)) {
5010 if (szB >= 1)
5011 libhb_event_map_access_history (address, szB, HG_(print_access));
5012 else
5013 VG_(gdb_printf) ("len must be >=1\n");
5014 }
5015 return True;
5016 }
5017
Elliott Hughesed398002017-06-21 14:41:24 -07005018 case 3: { /* xtmemory */
5019 HChar* filename;
5020 filename = VG_(strtok_r) (NULL, " ", &ssaveptr);
5021 HG_(xtmemory_report)(filename, False);
5022 return True;
5023 }
5024
philippef5774342014-05-03 11:12:50 +00005025 default:
5026 tl_assert(0);
5027 return False;
5028 }
5029}
sewardjb4112022007-11-09 22:49:28 +00005030
5031static
5032Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
5033{
philippef5774342014-05-03 11:12:50 +00005034 if (!VG_IS_TOOL_USERREQ('H','G',args[0])
5035 && VG_USERREQ__GDB_MONITOR_COMMAND != args[0])
sewardjb4112022007-11-09 22:49:28 +00005036 return False;
5037
5038 /* Anything that gets past the above check is one of ours, so we
5039 should be able to handle it. */
5040
5041 /* default, meaningless return value, unless otherwise set */
5042 *ret = 0;
5043
5044 switch (args[0]) {
5045
5046 /* --- --- User-visible client requests --- --- */
5047
5048 case VG_USERREQ__HG_CLEAN_MEMORY:
florian5e5cb002015-08-03 21:21:42 +00005049 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%lu)\n",
sewardjb4112022007-11-09 22:49:28 +00005050 args[1], args[2]);
5051 /* Call die_mem to (expensively) tidy up properly, if there
sewardjf98e1c02008-10-25 16:22:41 +00005052 are any held locks etc in the area. Calling evh__die_mem
5053 and then evh__new_mem is a bit inefficient; probably just
5054 the latter would do. */
sewardjb4112022007-11-09 22:49:28 +00005055 if (args[2] > 0) { /* length */
5056 evh__die_mem(args[1], args[2]);
5057 /* and then set it to New */
5058 evh__new_mem(args[1], args[2]);
5059 }
5060 break;
5061
sewardjc8028ad2010-05-05 09:34:42 +00005062 case _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK: {
5063 Addr payload = 0;
5064 SizeT pszB = 0;
5065 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK(%#lx)\n",
5066 args[1]);
philippe0c9ac8d2014-07-18 00:03:58 +00005067 if (HG_(mm_find_containing_block)(NULL, NULL,
5068 &payload, &pszB, args[1])) {
sewardjc8028ad2010-05-05 09:34:42 +00005069 if (pszB > 0) {
5070 evh__die_mem(payload, pszB);
5071 evh__new_mem(payload, pszB);
5072 }
5073 *ret = pszB;
5074 } else {
5075 *ret = (UWord)-1;
5076 }
5077 break;
5078 }
5079
sewardj406bac82010-03-03 23:03:40 +00005080 case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED:
florian5e5cb002015-08-03 21:21:42 +00005081 if (0) VG_(printf)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%lu)\n",
sewardj406bac82010-03-03 23:03:40 +00005082 args[1], args[2]);
5083 if (args[2] > 0) { /* length */
5084 evh__untrack_mem(args[1], args[2]);
5085 }
5086 break;
5087
5088 case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED:
florian5e5cb002015-08-03 21:21:42 +00005089 if (0) VG_(printf)("HG_ARANGE_MAKE_TRACKED(%#lx,%lu)\n",
sewardj406bac82010-03-03 23:03:40 +00005090 args[1], args[2]);
5091 if (args[2] > 0) { /* length */
5092 evh__new_mem(args[1], args[2]);
5093 }
5094 break;
5095
philippef54cb662015-05-10 22:19:31 +00005096 case _VG_USERREQ__HG_GET_ABITS:
florian5e5cb002015-08-03 21:21:42 +00005097 if (0) VG_(printf)("HG_GET_ABITS(%#lx,%#lx,%lu)\n",
philippef54cb662015-05-10 22:19:31 +00005098 args[1], args[2], args[3]);
5099 UChar *zzabit = (UChar *) args[2];
5100 if (zzabit == NULL
5101 || VG_(am_is_valid_for_client)((Addr)zzabit, (SizeT)args[3],
5102 VKI_PROT_READ|VKI_PROT_WRITE))
5103 *ret = (UWord) libhb_srange_get_abits ((Addr) args[1],
5104 (UChar*) args[2],
5105 (SizeT) args[3]);
5106 else
5107 *ret = -1;
5108 break;
5109
Elliott Hughesed398002017-06-21 14:41:24 -07005110 /* This thread (tid) (a master) is informing us that it has
5111 seen the termination of a dependent task, and that this should
5112 be considered as a join between master and dependent. */
5113 case _VG_USERREQ__HG_GNAT_DEPENDENT_MASTER_JOIN: {
5114 Word n;
5115 const Thread *stayer = map_threads_maybe_lookup( tid );
5116 const void *dependent = (void*)args[1];
5117 const void *master = (void*)args[2];
5118
5119 if (0)
5120 VG_(printf)("HG_GNAT_DEPENDENT_MASTER_JOIN (tid %d): "
5121 "self_id = %p Thread* = %p dependent %p\n",
5122 (Int)tid, master, stayer, dependent);
5123
5124 gnat_dmmls_INIT();
5125 /* Similar loop as for master completed hook below, but stops at
5126 the first matching occurence, only comparing master and
5127 dependent. */
5128 for (n = VG_(sizeXA) (gnat_dmmls) - 1; n >= 0; n--) {
5129 GNAT_dmml *dmml = (GNAT_dmml*) VG_(indexXA)(gnat_dmmls, n);
5130 if (dmml->master == master
5131 && dmml->dependent == dependent) {
5132 if (0)
5133 VG_(printf)("quitter %p dependency to stayer %p (join)\n",
5134 dmml->hg_dependent->hbthr, stayer->hbthr);
5135 tl_assert(dmml->hg_dependent->hbthr != stayer->hbthr);
5136 generate_quitter_stayer_dependence (dmml->hg_dependent->hbthr,
5137 stayer->hbthr);
5138 VG_(removeIndexXA) (gnat_dmmls, n);
5139 break;
5140 }
5141 }
5142 break;
5143 }
5144
sewardjb4112022007-11-09 22:49:28 +00005145 /* --- --- Client requests for Helgrind's use only --- --- */
5146
5147 /* Some thread is telling us its pthread_t value. Record the
5148 binding between that and the associated Thread*, so we can
5149 later find the Thread* again when notified of a join by the
5150 thread. */
5151 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
5152 Thread* my_thr = NULL;
5153 if (0)
5154 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
5155 (void*)args[1]);
5156 map_pthread_t_to_Thread_INIT();
5157 my_thr = map_threads_maybe_lookup( tid );
5158 /* This assertion should hold because the map_threads (tid to
5159 Thread*) binding should have been made at the point of
5160 low-level creation of this thread, which should have
5161 happened prior to us getting this client request for it.
5162 That's because this client request is sent from
5163 client-world from the 'thread_wrapper' function, which
5164 only runs once the thread has been low-level created. */
5165 tl_assert(my_thr != NULL);
5166 /* So now we know that (pthread_t)args[1] is associated with
5167 (Thread*)my_thr. Note that down. */
5168 if (0)
5169 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
5170 (void*)args[1], (void*)my_thr );
florian6bf37262012-10-21 03:23:36 +00005171 VG_(addToFM)( map_pthread_t_to_Thread, (UWord)args[1], (UWord)my_thr );
sewardj8eb8bab2015-07-21 14:44:28 +00005172
5173 if (my_thr->coretid != 1) {
5174 /* FIXME: hardwires assumption about identity of the root thread. */
5175 if (HG_(clo_ignore_thread_creation)) {
5176 HG_(thread_leave_pthread_create)(my_thr);
5177 HG_(thread_leave_synchr)(my_thr);
5178 tl_assert(my_thr->synchr_nesting == 0);
5179 }
5180 }
sewardjb4112022007-11-09 22:49:28 +00005181 break;
5182 }
5183
5184 case _VG_USERREQ__HG_PTH_API_ERROR: {
5185 Thread* my_thr = NULL;
5186 map_pthread_t_to_Thread_INIT();
5187 my_thr = map_threads_maybe_lookup( tid );
5188 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
sewardjf98e1c02008-10-25 16:22:41 +00005189 HG_(record_error_PthAPIerror)(
florian6bf37262012-10-21 03:23:36 +00005190 my_thr, (HChar*)args[1], (UWord)args[2], (HChar*)args[3] );
sewardjb4112022007-11-09 22:49:28 +00005191 break;
5192 }
5193
5194 /* This thread (tid) has completed a join with the quitting
5195 thread whose pthread_t is in args[1]. */
5196 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
5197 Thread* thr_q = NULL; /* quitter Thread* */
5198 Bool found = False;
5199 if (0)
5200 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
5201 (void*)args[1]);
5202 map_pthread_t_to_Thread_INIT();
sewardj896f6f92008-08-19 08:38:52 +00005203 found = VG_(lookupFM)( map_pthread_t_to_Thread,
florian6bf37262012-10-21 03:23:36 +00005204 NULL, (UWord*)&thr_q, (UWord)args[1] );
sewardjb4112022007-11-09 22:49:28 +00005205 /* Can this fail? It would mean that our pthread_join
5206 wrapper observed a successful join on args[1] yet that
5207 thread never existed (or at least, it never lodged an
5208 entry in the mapping (via SET_MY_PTHREAD_T)). Which
5209 sounds like a bug in the threads library. */
5210 // FIXME: get rid of this assertion; handle properly
5211 tl_assert(found);
5212 if (found) {
5213 if (0)
5214 VG_(printf)(".................... quitter Thread* = %p\n",
5215 thr_q);
5216 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
5217 }
5218 break;
5219 }
5220
philipped40aff52014-06-16 20:00:14 +00005221 /* This thread (tid) is informing us of its master. */
5222 case _VG_USERREQ__HG_GNAT_MASTER_HOOK: {
5223 GNAT_dmml dmml;
5224 dmml.dependent = (void*)args[1];
5225 dmml.master = (void*)args[2];
5226 dmml.master_level = (Word)args[3];
5227 dmml.hg_dependent = map_threads_maybe_lookup( tid );
5228 tl_assert(dmml.hg_dependent);
5229
5230 if (0)
5231 VG_(printf)("HG_GNAT_MASTER_HOOK (tid %d): "
5232 "dependent = %p master = %p master_level = %ld"
5233 " dependent Thread* = %p\n",
5234 (Int)tid, dmml.dependent, dmml.master, dmml.master_level,
5235 dmml.hg_dependent);
5236 gnat_dmmls_INIT();
5237 VG_(addToXA) (gnat_dmmls, &dmml);
5238 break;
5239 }
5240
5241 /* This thread (tid) is informing us that it has completed a
5242 master. */
5243 case _VG_USERREQ__HG_GNAT_MASTER_COMPLETED_HOOK: {
5244 Word n;
5245 const Thread *stayer = map_threads_maybe_lookup( tid );
5246 const void *master = (void*)args[1];
5247 const Word master_level = (Word) args[2];
5248 tl_assert(stayer);
5249
5250 if (0)
5251 VG_(printf)("HG_GNAT_MASTER_COMPLETED_HOOK (tid %d): "
5252 "self_id = %p master_level = %ld Thread* = %p\n",
5253 (Int)tid, master, master_level, stayer);
5254
5255 gnat_dmmls_INIT();
5256 /* Reverse loop on the array, simulating a pthread_join for
5257 the Dependent tasks of the completed master, and removing
5258 them from the array. */
5259 for (n = VG_(sizeXA) (gnat_dmmls) - 1; n >= 0; n--) {
5260 GNAT_dmml *dmml = (GNAT_dmml*) VG_(indexXA)(gnat_dmmls, n);
5261 if (dmml->master == master
5262 && dmml->master_level == master_level) {
5263 if (0)
5264 VG_(printf)("quitter %p dependency to stayer %p\n",
5265 dmml->hg_dependent->hbthr, stayer->hbthr);
5266 tl_assert(dmml->hg_dependent->hbthr != stayer->hbthr);
5267 generate_quitter_stayer_dependence (dmml->hg_dependent->hbthr,
5268 stayer->hbthr);
5269 VG_(removeIndexXA) (gnat_dmmls, n);
5270 }
5271 }
5272 break;
5273 }
5274
sewardjb4112022007-11-09 22:49:28 +00005275 /* EXPOSITION only: by intercepting lock init events we can show
5276 the user where the lock was initialised, rather than only
5277 being able to show where it was first locked. Intercepting
5278 lock initialisations is not necessary for the basic operation
5279 of the race checker. */
5280 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
5281 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
5282 break;
5283
sewardjc02f6c42013-10-14 13:51:25 +00005284 /* mutex=arg[1], mutex_is_init=arg[2] */
sewardjb4112022007-11-09 22:49:28 +00005285 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
sewardjc02f6c42013-10-14 13:51:25 +00005286 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1], args[2] != 0 );
sewardjb4112022007-11-09 22:49:28 +00005287 break;
5288
5289 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
sewardj8eb8bab2015-07-21 14:44:28 +00005290 HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5291 if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5292 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00005293 break;
5294
5295 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
sewardj8eb8bab2015-07-21 14:44:28 +00005296 if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5297 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
5298 HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
sewardjb4112022007-11-09 22:49:28 +00005299 break;
5300
sewardj8eb8bab2015-07-21 14:44:28 +00005301 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*
5302 HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5303 if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5304 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
sewardjb4112022007-11-09 22:49:28 +00005305 break;
5306
sewardj8eb8bab2015-07-21 14:44:28 +00005307 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*, long
5308 if ((args[2] == True) // lock actually taken
5309 && (HG_(get_pthread_create_nesting_level)(tid) == 0))
5310 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
5311 HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
sewardjb4112022007-11-09 22:49:28 +00005312 break;
5313
5314 /* This thread is about to do pthread_cond_signal on the
5315 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
5316 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
5317 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
sewardj8eb8bab2015-07-21 14:44:28 +00005318 HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
sewardjb4112022007-11-09 22:49:28 +00005319 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
5320 break;
5321
sewardj8eb8bab2015-07-21 14:44:28 +00005322 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_POST:
5323 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_POST:
5324 HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5325 break;
5326
sewardjb4112022007-11-09 22:49:28 +00005327 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
5328 Returns a flag indicating whether or not the mutex is believed to be
5329 valid for this operation. */
5330 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
sewardj8eb8bab2015-07-21 14:44:28 +00005331 HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
sewardjb4112022007-11-09 22:49:28 +00005332 Bool mutex_is_valid
5333 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
5334 (void*)args[2] );
5335 *ret = mutex_is_valid ? 1 : 0;
5336 break;
5337 }
5338
philippe19dfe032013-03-24 20:10:23 +00005339 /* Thread successfully completed pthread_cond_init:
5340 cond=arg[1], cond_attr=arg[2] */
5341 case _VG_USERREQ__HG_PTHREAD_COND_INIT_POST:
5342 evh__HG_PTHREAD_COND_INIT_POST( tid,
5343 (void*)args[1], (void*)args[2] );
5344 break;
5345
sewardjc02f6c42013-10-14 13:51:25 +00005346 /* cond=arg[1], cond_is_init=arg[2] */
sewardjf98e1c02008-10-25 16:22:41 +00005347 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
sewardjc02f6c42013-10-14 13:51:25 +00005348 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1], args[2] != 0 );
sewardjf98e1c02008-10-25 16:22:41 +00005349 break;
5350
sewardj8eb8bab2015-07-21 14:44:28 +00005351 /* Thread completed pthread_cond_wait, cond=arg[1],
5352 mutex=arg[2], timeout=arg[3], successful=arg[4] */
sewardjb4112022007-11-09 22:49:28 +00005353 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
sewardj8eb8bab2015-07-21 14:44:28 +00005354 if (args[4] == True)
5355 evh__HG_PTHREAD_COND_WAIT_POST( tid,
5356 (void*)args[1], (void*)args[2],
5357 (Bool)args[3] );
5358 HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
sewardjb4112022007-11-09 22:49:28 +00005359 break;
5360
5361 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
5362 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
5363 break;
5364
5365 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
5366 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
5367 break;
5368
sewardj789c3c52008-02-25 12:10:07 +00005369 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
sewardjb4112022007-11-09 22:49:28 +00005370 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
sewardj8eb8bab2015-07-21 14:44:28 +00005371 HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5372 if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5373 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
5374 args[2], args[3] );
sewardjb4112022007-11-09 22:49:28 +00005375 break;
5376
sewardj8eb8bab2015-07-21 14:44:28 +00005377 /* rwlock=arg[1], isW=arg[2], tookLock=arg[3] */
sewardjb4112022007-11-09 22:49:28 +00005378 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
sewardj8eb8bab2015-07-21 14:44:28 +00005379 if ((args[3] == True)
5380 && (HG_(get_pthread_create_nesting_level)(tid) == 0))
5381 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
5382 HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
sewardjb4112022007-11-09 22:49:28 +00005383 break;
5384
5385 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
sewardj8eb8bab2015-07-21 14:44:28 +00005386 HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5387 if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5388 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00005389 break;
5390
5391 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
sewardj8eb8bab2015-07-21 14:44:28 +00005392 if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5393 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
5394 HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
sewardjb4112022007-11-09 22:49:28 +00005395 break;
5396
sewardj11e352f2007-11-30 11:11:02 +00005397 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
5398 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
sewardjb4112022007-11-09 22:49:28 +00005399 break;
5400
sewardj11e352f2007-11-30 11:11:02 +00005401 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
5402 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00005403 break;
5404
sewardj11e352f2007-11-30 11:11:02 +00005405 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
sewardj8eb8bab2015-07-21 14:44:28 +00005406 HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
sewardj11e352f2007-11-30 11:11:02 +00005407 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
5408 break;
5409
sewardj8eb8bab2015-07-21 14:44:28 +00005410 case _VG_USERREQ__HG_POSIX_SEM_POST_POST: /* sem_t* */
5411 HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5412 break;
5413
5414 case _VG_USERREQ__HG_POSIX_SEM_WAIT_PRE: /* sem_t* */
5415 HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5416 break;
5417
5418 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t*, long tookLock */
5419 if (args[2] == True)
5420 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
5421 HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
sewardjb4112022007-11-09 22:49:28 +00005422 break;
5423
sewardj9f569b72008-11-13 13:33:09 +00005424 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
sewardj406bac82010-03-03 23:03:40 +00005425 /* pth_bar_t*, ulong count, ulong resizable */
5426 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1],
5427 args[2], args[3] );
5428 break;
5429
5430 case _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE:
5431 /* pth_bar_t*, ulong newcount */
5432 evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( tid, (void*)args[1],
5433 args[2] );
sewardj9f569b72008-11-13 13:33:09 +00005434 break;
5435
5436 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
5437 /* pth_bar_t* */
5438 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
5439 break;
5440
5441 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
5442 /* pth_bar_t* */
5443 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
5444 break;
sewardjb4112022007-11-09 22:49:28 +00005445
sewardj5a644da2009-08-11 10:35:58 +00005446 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE:
5447 /* pth_spinlock_t* */
5448 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid, (void*)args[1] );
5449 break;
5450
5451 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST:
5452 /* pth_spinlock_t* */
5453 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid, (void*)args[1] );
5454 break;
5455
5456 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE:
5457 /* pth_spinlock_t*, Word */
5458 evh__HG_PTHREAD_SPIN_LOCK_PRE( tid, (void*)args[1], args[2] );
5459 break;
5460
5461 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST:
5462 /* pth_spinlock_t* */
5463 evh__HG_PTHREAD_SPIN_LOCK_POST( tid, (void*)args[1] );
5464 break;
5465
5466 case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE:
5467 /* pth_spinlock_t* */
5468 evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid, (void*)args[1] );
5469 break;
5470
sewardjed2e72e2009-08-14 11:08:24 +00005471 case _VG_USERREQ__HG_CLIENTREQ_UNIMP: {
florian19f91bb2012-11-10 22:29:54 +00005472 /* HChar* who */
sewardjed2e72e2009-08-14 11:08:24 +00005473 HChar* who = (HChar*)args[1];
5474 HChar buf[50 + 50];
5475 Thread* thr = map_threads_maybe_lookup( tid );
5476 tl_assert( thr ); /* I must be mapped */
5477 tl_assert( who );
5478 tl_assert( VG_(strlen)(who) <= 50 );
5479 VG_(sprintf)(buf, "Unimplemented client request macro \"%s\"", who );
5480 /* record_error_Misc strdup's buf, so this is safe: */
5481 HG_(record_error_Misc)( thr, buf );
5482 break;
5483 }
5484
5485 case _VG_USERREQ__HG_USERSO_SEND_PRE:
5486 /* UWord arbitrary-SO-tag */
5487 evh__HG_USERSO_SEND_PRE( tid, args[1] );
5488 break;
5489
5490 case _VG_USERREQ__HG_USERSO_RECV_POST:
5491 /* UWord arbitrary-SO-tag */
5492 evh__HG_USERSO_RECV_POST( tid, args[1] );
5493 break;
5494
sewardj6015d0e2011-03-11 19:10:48 +00005495 case _VG_USERREQ__HG_USERSO_FORGET_ALL:
5496 /* UWord arbitrary-SO-tag */
5497 evh__HG_USERSO_FORGET_ALL( tid, args[1] );
5498 break;
5499
philippef5774342014-05-03 11:12:50 +00005500 case VG_USERREQ__GDB_MONITOR_COMMAND: {
5501 Bool handled = handle_gdb_monitor_command (tid, (HChar*)args[1]);
5502 if (handled)
5503 *ret = 1;
5504 else
5505 *ret = 0;
5506 return handled;
5507 }
5508
sewardj8eb8bab2015-07-21 14:44:28 +00005509 case _VG_USERREQ__HG_PTHREAD_CREATE_BEGIN: {
5510 Thread *thr = map_threads_maybe_lookup(tid);
5511 if (HG_(clo_ignore_thread_creation)) {
5512 HG_(thread_enter_pthread_create)(thr);
5513 HG_(thread_enter_synchr)(thr);
5514 }
5515 break;
5516 }
5517
5518 case _VG_USERREQ__HG_PTHREAD_CREATE_END: {
5519 Thread *thr = map_threads_maybe_lookup(tid);
5520 if (HG_(clo_ignore_thread_creation)) {
5521 HG_(thread_leave_pthread_create)(thr);
5522 HG_(thread_leave_synchr)(thr);
5523 }
5524 break;
5525 }
5526
5527 case _VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_PRE: // pth_mx_t*, long tryLock
5528 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
5529 break;
5530
5531 case _VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_POST: // pth_mx_t*
5532 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
5533 break;
5534
5535 case _VG_USERREQ__HG_PTHREAD_RWLOCK_ACQUIRED: // void*, long isW
5536 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
5537 break;
5538
5539 case _VG_USERREQ__HG_PTHREAD_RWLOCK_RELEASED: // void*
5540 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
5541 break;
5542
5543 case _VG_USERREQ__HG_POSIX_SEM_RELEASED: /* sem_t* */
5544 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
5545 break;
5546
5547 case _VG_USERREQ__HG_POSIX_SEM_ACQUIRED: /* sem_t* */
5548 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
5549 break;
5550
5551#if defined(VGO_solaris)
5552 case _VG_USERREQ__HG_RTLD_BIND_GUARD:
5553 evh__HG_RTLD_BIND_GUARD(tid, args[1]);
5554 break;
5555
5556 case _VG_USERREQ__HG_RTLD_BIND_CLEAR:
5557 evh__HG_RTLD_BIND_CLEAR(tid, args[1]);
5558 break;
5559#endif /* VGO_solaris */
5560
sewardjb4112022007-11-09 22:49:28 +00005561 default:
5562 /* Unhandled Helgrind client request! */
sewardjf98e1c02008-10-25 16:22:41 +00005563 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
5564 args[0]);
sewardjb4112022007-11-09 22:49:28 +00005565 }
5566
5567 return True;
5568}
5569
5570
5571/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +00005572/*--- Setup ---*/
5573/*----------------------------------------------------------------*/
5574
florian19f91bb2012-11-10 22:29:54 +00005575static Bool hg_process_cmd_line_option ( const HChar* arg )
sewardjb4112022007-11-09 22:49:28 +00005576{
florian19f91bb2012-11-10 22:29:54 +00005577 const HChar* tmp_str;
sewardjb4112022007-11-09 22:49:28 +00005578
njn83df0b62009-02-25 01:01:05 +00005579 if VG_BOOL_CLO(arg, "--track-lockorders",
5580 HG_(clo_track_lockorders)) {}
5581 else if VG_BOOL_CLO(arg, "--cmp-race-err-addrs",
5582 HG_(clo_cmp_race_err_addrs)) {}
sewardj23f12002009-07-24 08:45:08 +00005583
5584 else if VG_XACT_CLO(arg, "--history-level=none",
5585 HG_(clo_history_level), 0);
sewardjf3861392009-08-02 10:16:03 +00005586 else if VG_XACT_CLO(arg, "--history-level=approx",
sewardj23f12002009-07-24 08:45:08 +00005587 HG_(clo_history_level), 1);
5588 else if VG_XACT_CLO(arg, "--history-level=full",
5589 HG_(clo_history_level), 2);
sewardj849b0ed2008-12-21 10:43:10 +00005590
njn83df0b62009-02-25 01:01:05 +00005591 else if VG_BINT_CLO(arg, "--conflict-cache-size",
philippe328d6622015-05-25 17:24:27 +00005592 HG_(clo_conflict_cache_size), 10*1000, 150*1000*1000) {}
sewardjb4112022007-11-09 22:49:28 +00005593
sewardj11e352f2007-11-30 11:11:02 +00005594 /* "stuvwx" --> stuvwx (binary) */
njn83df0b62009-02-25 01:01:05 +00005595 else if VG_STR_CLO(arg, "--hg-sanity-flags", tmp_str) {
sewardjb4112022007-11-09 22:49:28 +00005596 Int j;
sewardjb4112022007-11-09 22:49:28 +00005597
njn83df0b62009-02-25 01:01:05 +00005598 if (6 != VG_(strlen)(tmp_str)) {
sewardjb4112022007-11-09 22:49:28 +00005599 VG_(message)(Vg_UserMsg,
sewardj24118492009-07-15 14:50:02 +00005600 "--hg-sanity-flags argument must have 6 digits\n");
sewardjb4112022007-11-09 22:49:28 +00005601 return False;
5602 }
sewardj11e352f2007-11-30 11:11:02 +00005603 for (j = 0; j < 6; j++) {
njn83df0b62009-02-25 01:01:05 +00005604 if ('0' == tmp_str[j]) { /* do nothing */ }
5605 else if ('1' == tmp_str[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
sewardjb4112022007-11-09 22:49:28 +00005606 else {
sewardj11e352f2007-11-30 11:11:02 +00005607 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
sewardj24118492009-07-15 14:50:02 +00005608 "only contain 0s and 1s\n");
sewardjb4112022007-11-09 22:49:28 +00005609 return False;
5610 }
5611 }
sewardjf98e1c02008-10-25 16:22:41 +00005612 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
sewardjb4112022007-11-09 22:49:28 +00005613 }
5614
sewardj622fe492011-03-11 21:06:59 +00005615 else if VG_BOOL_CLO(arg, "--free-is-write",
5616 HG_(clo_free_is_write)) {}
sewardjffce8152011-06-24 10:09:41 +00005617
5618 else if VG_XACT_CLO(arg, "--vts-pruning=never",
5619 HG_(clo_vts_pruning), 0);
5620 else if VG_XACT_CLO(arg, "--vts-pruning=auto",
5621 HG_(clo_vts_pruning), 1);
5622 else if VG_XACT_CLO(arg, "--vts-pruning=always",
5623 HG_(clo_vts_pruning), 2);
5624
5625 else if VG_BOOL_CLO(arg, "--check-stack-refs",
5626 HG_(clo_check_stack_refs)) {}
sewardj8eb8bab2015-07-21 14:44:28 +00005627 else if VG_BOOL_CLO(arg, "--ignore-thread-creation",
5628 HG_(clo_ignore_thread_creation)) {}
sewardjffce8152011-06-24 10:09:41 +00005629
sewardjb4112022007-11-09 22:49:28 +00005630 else
5631 return VG_(replacement_malloc_process_cmd_line_option)(arg);
5632
5633 return True;
5634}
5635
5636static void hg_print_usage ( void )
5637{
5638 VG_(printf)(
sewardj622fe492011-03-11 21:06:59 +00005639" --free-is-write=no|yes treat heap frees as writes [no]\n"
sewardj849b0ed2008-12-21 10:43:10 +00005640" --track-lockorders=no|yes show lock ordering errors? [yes]\n"
njnf6e8ca92009-08-07 02:18:00 +00005641" --history-level=none|approx|full [full]\n"
sewardjf3861392009-08-02 10:16:03 +00005642" full: show both stack traces for a data race (can be very slow)\n"
5643" approx: full trace for one thread, approx for the other (faster)\n"
5644" none: only show trace for one thread in a race (fastest)\n"
philippe328d6622015-05-25 17:24:27 +00005645" --conflict-cache-size=N size of 'full' history cache [2000000]\n"
sewardjffce8152011-06-24 10:09:41 +00005646" --check-stack-refs=no|yes race-check reads and writes on the\n"
5647" main stack and thread stacks? [yes]\n"
sewardj8eb8bab2015-07-21 14:44:28 +00005648" --ignore-thread-creation=yes|no Ignore activities during thread\n"
5649" creation [%s]\n",
5650HG_(clo_ignore_thread_creation) ? "yes" : "no"
sewardjb4112022007-11-09 22:49:28 +00005651 );
sewardjb4112022007-11-09 22:49:28 +00005652}
5653
5654static void hg_print_debug_usage ( void )
5655{
sewardjb4112022007-11-09 22:49:28 +00005656 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
5657 "race errors significant? [no]\n");
sewardj849b0ed2008-12-21 10:43:10 +00005658 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
sewardj11e352f2007-11-30 11:11:02 +00005659 " at events (X = 0|1) [000000]\n");
5660 VG_(printf)(" --hg-sanity-flags values:\n");
sewardj11e352f2007-11-30 11:11:02 +00005661 VG_(printf)(" 010000 after changes to "
sewardjb4112022007-11-09 22:49:28 +00005662 "lock-order-acquisition-graph\n");
sewardj11e352f2007-11-30 11:11:02 +00005663 VG_(printf)(" 001000 at memory accesses (NB: not currently used)\n");
5664 VG_(printf)(" 000100 at mem permission setting for "
sewardjb4112022007-11-09 22:49:28 +00005665 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
sewardj11e352f2007-11-30 11:11:02 +00005666 VG_(printf)(" 000010 at lock/unlock events\n");
5667 VG_(printf)(" 000001 at thread create/join events\n");
sewardjffce8152011-06-24 10:09:41 +00005668 VG_(printf)(
5669" --vts-pruning=never|auto|always [auto]\n"
5670" never: is never done (may cause big space leaks in Helgrind)\n"
5671" auto: done just often enough to keep space usage under control\n"
5672" always: done after every VTS GC (mostly just a big time waster)\n"
5673 );
sewardjb4112022007-11-09 22:49:28 +00005674}
5675
philippe8587b542013-12-15 20:24:43 +00005676static void hg_print_stats (void)
5677{
5678
5679 if (1) {
5680 VG_(printf)("\n");
5681 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
5682 if (HG_(clo_track_lockorders)) {
5683 VG_(printf)("\n");
5684 HG_(ppWSUstats)( univ_laog, "univ_laog" );
5685 }
5686 }
5687
5688 //zz VG_(printf)("\n");
5689 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
5690 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
5691 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
5692 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
5693 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
5694 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
5695 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
5696 //zz stats__hbefore_stk_hwm);
5697 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
5698 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
5699
5700 VG_(printf)("\n");
5701 VG_(printf)(" locksets: %'8d unique lock sets\n",
5702 (Int)HG_(cardinalityWSU)( univ_lsets ));
5703 if (HG_(clo_track_lockorders)) {
5704 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
5705 (Int)HG_(cardinalityWSU)( univ_laog ));
5706 }
5707
5708 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
5709 // stats__ga_LL_adds,
5710 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
5711
5712 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
5713 HG_(stats__LockN_to_P_queries),
5714 HG_(stats__LockN_to_P_get_map_size)() );
5715
floriana6a6d922015-08-05 11:26:10 +00005716 VG_(printf)("client malloc-ed blocks: %'8u\n",
philipped005b2c2015-04-21 21:58:14 +00005717 VG_(HT_count_nodes)(hg_mallocmeta_table));
5718
philippe8587b542013-12-15 20:24:43 +00005719 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
5720 HG_(stats__string_table_queries),
5721 HG_(stats__string_table_get_map_size)() );
5722 if (HG_(clo_track_lockorders)) {
5723 VG_(printf)(" LAOG: %'8d map size\n",
5724 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
5725 VG_(printf)(" LAOG exposition: %'8d map size\n",
5726 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
5727 }
5728
5729 VG_(printf)(" locks: %'8lu acquires, "
5730 "%'lu releases\n",
5731 stats__lockN_acquires,
5732 stats__lockN_releases
5733 );
5734 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
5735
5736 VG_(printf)("\n");
5737 libhb_shutdown(True); // This in fact only print stats.
5738}
5739
sewardjb4112022007-11-09 22:49:28 +00005740static void hg_fini ( Int exitcode )
5741{
Elliott Hughesed398002017-06-21 14:41:24 -07005742 HG_(xtmemory_report) (VG_(clo_xtree_memory_file), True);
sewardj2d9e8742009-08-07 15:46:56 +00005743 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
5744 VG_(message)(Vg_UserMsg,
5745 "For counts of detected and suppressed errors, "
5746 "rerun with: -v\n");
5747 }
5748
5749 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)
5750 && HG_(clo_history_level) >= 2) {
5751 VG_(umsg)(
5752 "Use --history-level=approx or =none to gain increased speed, at\n" );
5753 VG_(umsg)(
5754 "the cost of reduced accuracy of conflicting-access information\n");
5755 }
5756
sewardjb4112022007-11-09 22:49:28 +00005757 if (SHOW_DATA_STRUCTURES)
5758 pp_everything( PP_ALL, "SK_(fini)" );
sewardjf98e1c02008-10-25 16:22:41 +00005759 if (HG_(clo_sanity_flags))
sewardjb4112022007-11-09 22:49:28 +00005760 all__sanity_check("SK_(fini)");
5761
philippe8587b542013-12-15 20:24:43 +00005762 if (VG_(clo_stats))
5763 hg_print_stats();
sewardjb4112022007-11-09 22:49:28 +00005764}
5765
sewardjf98e1c02008-10-25 16:22:41 +00005766/* FIXME: move these somewhere sane */
5767
5768static
5769void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
5770{
5771 Thread* thr;
5772 ThreadId tid;
5773 UWord nActual;
5774 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00005775 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00005776 tl_assert(thr);
5777 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
5778 nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
5779 NULL, NULL, 0 );
5780 tl_assert(nActual <= nRequest);
5781 for (; nActual < nRequest; nActual++)
5782 frames[nActual] = 0;
5783}
5784
5785static
sewardj23f12002009-07-24 08:45:08 +00005786ExeContext* for_libhb__get_EC ( Thr* hbt )
sewardjf98e1c02008-10-25 16:22:41 +00005787{
5788 Thread* thr;
5789 ThreadId tid;
5790 ExeContext* ec;
5791 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00005792 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00005793 tl_assert(thr);
5794 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
sewardj23f12002009-07-24 08:45:08 +00005795 /* this will assert if tid is invalid */
sewardjf98e1c02008-10-25 16:22:41 +00005796 ec = VG_(record_ExeContext)( tid, 0 );
sewardjd52392d2008-11-08 20:36:26 +00005797 return ec;
sewardjf98e1c02008-10-25 16:22:41 +00005798}
5799
5800
sewardjc1fb9d22011-02-28 09:03:44 +00005801static void hg_post_clo_init ( void )
sewardjb4112022007-11-09 22:49:28 +00005802{
sewardjf98e1c02008-10-25 16:22:41 +00005803 Thr* hbthr_root;
njnf76d27a2009-05-28 01:53:07 +00005804
sewardjc1fb9d22011-02-28 09:03:44 +00005805 /////////////////////////////////////////////
5806 hbthr_root = libhb_init( for_libhb__get_stacktrace,
5807 for_libhb__get_EC );
5808 /////////////////////////////////////////////
5809
5810
5811 if (HG_(clo_track_lockorders))
5812 laog__init();
5813
5814 initialise_data_structures(hbthr_root);
Elliott Hughesed398002017-06-21 14:41:24 -07005815 if (VG_(clo_xtree_memory) == Vg_XTMemory_Full)
5816 // Activate full xtree memory profiling.
5817 VG_(XTMemory_Full_init)(VG_(XT_filter_1top_and_maybe_below_main));
sewardjc1fb9d22011-02-28 09:03:44 +00005818}
5819
philippe07c08522014-05-14 20:39:27 +00005820static void hg_info_location (Addr a)
5821{
5822 (void) HG_(get_and_pp_addrdescr) (a);
5823}
5824
sewardjc1fb9d22011-02-28 09:03:44 +00005825static void hg_pre_clo_init ( void )
5826{
sewardjb4112022007-11-09 22:49:28 +00005827 VG_(details_name) ("Helgrind");
5828 VG_(details_version) (NULL);
5829 VG_(details_description) ("a thread error detector");
5830 VG_(details_copyright_author)(
Elliott Hughesed398002017-06-21 14:41:24 -07005831 "Copyright (C) 2007-2017, and GNU GPL'd, by OpenWorks LLP et al.");
sewardjb4112022007-11-09 22:49:28 +00005832 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj9c08c0f2011-03-10 15:01:14 +00005833 VG_(details_avg_translation_sizeB) ( 320 );
sewardjb4112022007-11-09 22:49:28 +00005834
5835 VG_(basic_tool_funcs) (hg_post_clo_init,
5836 hg_instrument,
5837 hg_fini);
5838
5839 VG_(needs_core_errors) ();
sewardjf98e1c02008-10-25 16:22:41 +00005840 VG_(needs_tool_errors) (HG_(eq_Error),
sewardj24118492009-07-15 14:50:02 +00005841 HG_(before_pp_Error),
sewardjf98e1c02008-10-25 16:22:41 +00005842 HG_(pp_Error),
sewardjb4112022007-11-09 22:49:28 +00005843 False,/*show TIDs for errors*/
sewardjf98e1c02008-10-25 16:22:41 +00005844 HG_(update_extra),
5845 HG_(recognised_suppression),
5846 HG_(read_extra_suppression_info),
5847 HG_(error_matches_suppression),
5848 HG_(get_error_name),
philippe4e32d672013-10-17 22:10:41 +00005849 HG_(get_extra_suppression_info),
5850 HG_(print_extra_suppression_use),
5851 HG_(update_extra_suppression_use));
sewardjb4112022007-11-09 22:49:28 +00005852
sewardj24118492009-07-15 14:50:02 +00005853 VG_(needs_xml_output) ();
5854
sewardjb4112022007-11-09 22:49:28 +00005855 VG_(needs_command_line_options)(hg_process_cmd_line_option,
5856 hg_print_usage,
5857 hg_print_debug_usage);
5858 VG_(needs_client_requests) (hg_handle_client_request);
5859
5860 // FIXME?
5861 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
5862 // hg_expensive_sanity_check);
5863
philippe8587b542013-12-15 20:24:43 +00005864 VG_(needs_print_stats) (hg_print_stats);
philippe07c08522014-05-14 20:39:27 +00005865 VG_(needs_info_location) (hg_info_location);
philippe8587b542013-12-15 20:24:43 +00005866
sewardjb4112022007-11-09 22:49:28 +00005867 VG_(needs_malloc_replacement) (hg_cli__malloc,
5868 hg_cli____builtin_new,
5869 hg_cli____builtin_vec_new,
5870 hg_cli__memalign,
5871 hg_cli__calloc,
5872 hg_cli__free,
5873 hg_cli____builtin_delete,
5874 hg_cli____builtin_vec_delete,
5875 hg_cli__realloc,
njn8b140de2009-02-17 04:31:18 +00005876 hg_cli_malloc_usable_size,
philipped99c26a2012-07-31 22:17:28 +00005877 HG_CLI__DEFAULT_MALLOC_REDZONE_SZB );
sewardjb4112022007-11-09 22:49:28 +00005878
sewardj849b0ed2008-12-21 10:43:10 +00005879 /* 21 Dec 08: disabled this; it mostly causes H to start more
5880 slowly and use significantly more memory, without very often
5881 providing useful results. The user can request to load this
5882 information manually with --read-var-info=yes. */
5883 if (0) VG_(needs_var_info)(); /* optional */
sewardjb4112022007-11-09 22:49:28 +00005884
5885 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
sewardj7cf4e6b2008-05-01 20:24:26 +00005886 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
5887 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
sewardjb4112022007-11-09 22:49:28 +00005888 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
sewardj1f77fec2010-04-12 19:51:04 +00005889 VG_(track_new_mem_stack) ( evh__new_mem_stack );
sewardjb4112022007-11-09 22:49:28 +00005890
5891 // FIXME: surely this isn't thread-aware
sewardj23f12002009-07-24 08:45:08 +00005892 VG_(track_copy_mem_remap) ( evh__copy_mem );
sewardjb4112022007-11-09 22:49:28 +00005893
5894 VG_(track_change_mem_mprotect) ( evh__set_perms );
5895
5896 VG_(track_die_mem_stack_signal)( evh__die_mem );
sewardjfd35d492011-03-17 19:39:55 +00005897 VG_(track_die_mem_brk) ( evh__die_mem_munmap );
5898 VG_(track_die_mem_munmap) ( evh__die_mem_munmap );
philippefc00a2a2015-05-15 11:41:54 +00005899
5900 /* evh__die_mem calls at the end libhb_srange_noaccess_NoFX
5901 which has no effect. We do not use VG_(track_die_mem_stack),
5902 as this would be an expensive way to do nothing. */
5903 // VG_(track_die_mem_stack) ( evh__die_mem );
sewardjb4112022007-11-09 22:49:28 +00005904
5905 // FIXME: what is this for?
5906 VG_(track_ban_mem_stack) (NULL);
5907
5908 VG_(track_pre_mem_read) ( evh__pre_mem_read );
5909 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
5910 VG_(track_pre_mem_write) ( evh__pre_mem_write );
5911 VG_(track_post_mem_write) (NULL);
5912
5913 /////////////////
5914
5915 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
5916 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
5917
5918 VG_(track_start_client_code)( evh__start_client_code );
5919 VG_(track_stop_client_code)( evh__stop_client_code );
5920
sewardjb4112022007-11-09 22:49:28 +00005921 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
5922 as described in comments at the top of pub_tool_hashtable.h, are
5923 met. Blargh. */
5924 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
5925 tl_assert( sizeof(UWord) == sizeof(Addr) );
5926 hg_mallocmeta_table
5927 = VG_(HT_construct)( "hg_malloc_metadata_table" );
5928
philippe5fbc9762013-12-01 19:28:48 +00005929 MallocMeta_poolalloc = VG_(newPA) ( sizeof(MallocMeta),
5930 1000,
5931 HG_(zalloc),
5932 "hg_malloc_metadata_pool",
5933 HG_(free));
5934
sewardj61bc2c52011-02-09 10:34:00 +00005935 // add a callback to clean up on (threaded) fork.
5936 VG_(atfork)(NULL/*pre*/, NULL/*parent*/, evh__atfork_child/*child*/);
sewardjb4112022007-11-09 22:49:28 +00005937}
5938
5939VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
5940
5941/*--------------------------------------------------------------------*/
5942/*--- end hg_main.c ---*/
5943/*--------------------------------------------------------------------*/