blob: 40fbbe3fd8ea5be16e33df40b9808f1d2dc2f7a3 [file] [log] [blame]
sewardjb4112022007-11-09 22:49:28 +00001
2/*--------------------------------------------------------------------*/
3/*--- Helgrind: a Valgrind tool for detecting errors ---*/
4/*--- in threaded programs. hg_main.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
sewardj03f8d3f2012-08-05 15:46:46 +000011 Copyright (C) 2007-2012 OpenWorks LLP
sewardjb4112022007-11-09 22:49:28 +000012 info@open-works.co.uk
13
sewardj03f8d3f2012-08-05 15:46:46 +000014 Copyright (C) 2007-2012 Apple, Inc.
njnf76d27a2009-05-28 01:53:07 +000015
sewardjb4112022007-11-09 22:49:28 +000016 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
20
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
25
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, write to the Free Software
28 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 02111-1307, USA.
30
31 The GNU General Public License is contained in the file COPYING.
32
33 Neither the names of the U.S. Department of Energy nor the
34 University of California nor the names of its contributors may be
35 used to endorse or promote products derived from this software
36 without prior written permission.
37*/
38
39#include "pub_tool_basics.h"
sewardjb4112022007-11-09 22:49:28 +000040#include "pub_tool_libcassert.h"
41#include "pub_tool_libcbase.h"
42#include "pub_tool_libcprint.h"
sewardjb4112022007-11-09 22:49:28 +000043#include "pub_tool_threadstate.h"
44#include "pub_tool_tooliface.h"
45#include "pub_tool_hashtable.h"
46#include "pub_tool_replacemalloc.h"
47#include "pub_tool_machine.h"
48#include "pub_tool_options.h"
49#include "pub_tool_xarray.h"
50#include "pub_tool_stacktrace.h"
sewardj896f6f92008-08-19 08:38:52 +000051#include "pub_tool_wordfm.h"
sewardja0eee322009-07-31 08:46:35 +000052#include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
53#include "pub_tool_redir.h" // sonames for the dynamic linkers
54#include "pub_tool_vki.h" // VKI_PAGE_SIZE
sewardj61bc2c52011-02-09 10:34:00 +000055#include "pub_tool_libcproc.h" // VG_(atfork)
sewardj234e5582011-02-09 12:47:23 +000056#include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client)
sewardjb4112022007-11-09 22:49:28 +000057
sewardjf98e1c02008-10-25 16:22:41 +000058#include "hg_basics.h"
59#include "hg_wordset.h"
60#include "hg_lock_n_thread.h"
61#include "hg_errors.h"
62
63#include "libhb.h"
64
sewardjb4112022007-11-09 22:49:28 +000065#include "helgrind.h"
66
sewardjf98e1c02008-10-25 16:22:41 +000067
68// FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
69
70// FIXME: when client destroys a lock or a CV, remove these
71// from our mappings, so that the associated SO can be freed up
sewardjb4112022007-11-09 22:49:28 +000072
73/*----------------------------------------------------------------*/
74/*--- ---*/
75/*----------------------------------------------------------------*/
76
sewardj11e352f2007-11-30 11:11:02 +000077/* Note this needs to be compiled with -fno-strict-aliasing, since it
78 contains a whole bunch of calls to lookupFM etc which cast between
79 Word and pointer types. gcc rightly complains this breaks ANSI C
80 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
81 worthwhile performance benefits over -O.
sewardjc17be792007-11-10 22:50:13 +000082*/
sewardjb4112022007-11-09 22:49:28 +000083
84// FIXME what is supposed to happen to locks in memory which
85// is relocated as a result of client realloc?
86
sewardjb4112022007-11-09 22:49:28 +000087// FIXME put referencing ThreadId into Thread and get
88// rid of the slow reverse mapping function.
89
90// FIXME accesses to NoAccess areas: change state to Excl?
91
92// FIXME report errors for accesses of NoAccess memory?
93
94// FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
95// the thread still holds the lock.
96
97/* ------------ Debug/trace options ------------ */
98
sewardjb4112022007-11-09 22:49:28 +000099// 0 for silent, 1 for some stuff, 2 for lots of stuff
100#define SHOW_EVENTS 0
101
sewardjb4112022007-11-09 22:49:28 +0000102
florian6bf37262012-10-21 03:23:36 +0000103static void all__sanity_check ( const HChar* who ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000104
philipped99c26a2012-07-31 22:17:28 +0000105#define HG_CLI__DEFAULT_MALLOC_REDZONE_SZB 16 /* let's say */
sewardjb4112022007-11-09 22:49:28 +0000106
107// 0 for none, 1 for dump at end of run
108#define SHOW_DATA_STRUCTURES 0
109
110
sewardjb4112022007-11-09 22:49:28 +0000111/* ------------ Misc comments ------------ */
112
113// FIXME: don't hardwire initial entries for root thread.
114// Instead, let the pre_thread_ll_create handler do this.
115
sewardjb4112022007-11-09 22:49:28 +0000116
117/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000118/*--- Primary data structures ---*/
sewardjb4112022007-11-09 22:49:28 +0000119/*----------------------------------------------------------------*/
120
sewardjb4112022007-11-09 22:49:28 +0000121/* Admin linked list of Threads */
122static Thread* admin_threads = NULL;
sewardjffce8152011-06-24 10:09:41 +0000123Thread* get_admin_threads ( void ) { return admin_threads; }
sewardjb4112022007-11-09 22:49:28 +0000124
sewardj1d7c3322011-02-28 09:22:51 +0000125/* Admin double linked list of Locks */
126/* We need a double linked list to properly and efficiently
127 handle del_LockN. */
sewardjb4112022007-11-09 22:49:28 +0000128static Lock* admin_locks = NULL;
129
sewardjb4112022007-11-09 22:49:28 +0000130/* Mapping table for core ThreadIds to Thread* */
131static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
132
sewardjb4112022007-11-09 22:49:28 +0000133/* Mapping table for lock guest addresses to Lock* */
134static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
135
sewardj0f64c9e2011-03-10 17:40:22 +0000136/* The word-set universes for lock sets. */
sewardjb4112022007-11-09 22:49:28 +0000137static WordSetU* univ_lsets = NULL; /* sets of Lock* */
138static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
sewardj866c80c2011-10-22 19:29:51 +0000139static Int next_gc_univ_laog = 1;
140/* univ_laog will be garbaged collected when the nr of element in univ_laog is
141 >= next_gc_univ_laog. */
sewardjb4112022007-11-09 22:49:28 +0000142
sewardjffce8152011-06-24 10:09:41 +0000143/* Allow libhb to get at the universe of locksets stored
144 here. Sigh. */
145WordSetU* HG_(get_univ_lsets) ( void ) { return univ_lsets; }
146
147/* Allow libhb to get at the list of locks stored here. Ditto
148 sigh. */
149Lock* HG_(get_admin_locks) ( void ) { return admin_locks; }
150
sewardjb4112022007-11-09 22:49:28 +0000151
152/*----------------------------------------------------------------*/
153/*--- Simple helpers for the data structures ---*/
154/*----------------------------------------------------------------*/
155
156static UWord stats__lockN_acquires = 0;
157static UWord stats__lockN_releases = 0;
158
sewardjf98e1c02008-10-25 16:22:41 +0000159static
160ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
sewardjb4112022007-11-09 22:49:28 +0000161
162/* --------- Constructors --------- */
163
sewardjf98e1c02008-10-25 16:22:41 +0000164static Thread* mk_Thread ( Thr* hbthr ) {
sewardjb4112022007-11-09 22:49:28 +0000165 static Int indx = 1;
sewardjf98e1c02008-10-25 16:22:41 +0000166 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
sewardjb4112022007-11-09 22:49:28 +0000167 thread->locksetA = HG_(emptyWS)( univ_lsets );
168 thread->locksetW = HG_(emptyWS)( univ_lsets );
sewardjb4112022007-11-09 22:49:28 +0000169 thread->magic = Thread_MAGIC;
sewardjf98e1c02008-10-25 16:22:41 +0000170 thread->hbthr = hbthr;
171 thread->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +0000172 thread->created_at = NULL;
173 thread->announced = False;
174 thread->errmsg_index = indx++;
175 thread->admin = admin_threads;
176 admin_threads = thread;
177 return thread;
178}
sewardjf98e1c02008-10-25 16:22:41 +0000179
sewardjb4112022007-11-09 22:49:28 +0000180// Make a new lock which is unlocked (hence ownerless)
sewardj1d7c3322011-02-28 09:22:51 +0000181// and insert the new lock in admin_locks double linked list.
sewardjb4112022007-11-09 22:49:28 +0000182static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
183 static ULong unique = 0;
sewardjf98e1c02008-10-25 16:22:41 +0000184 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
sewardj0f64c9e2011-03-10 17:40:22 +0000185 /* begin: add to double linked list */
sewardj1d7c3322011-02-28 09:22:51 +0000186 if (admin_locks)
187 admin_locks->admin_prev = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000188 lock->admin_next = admin_locks;
189 lock->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000190 admin_locks = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000191 /* end: add */
sewardjb4112022007-11-09 22:49:28 +0000192 lock->unique = unique++;
193 lock->magic = LockN_MAGIC;
194 lock->appeared_at = NULL;
195 lock->acquired_at = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000196 lock->hbso = libhb_so_alloc();
sewardjb4112022007-11-09 22:49:28 +0000197 lock->guestaddr = guestaddr;
198 lock->kind = kind;
199 lock->heldW = False;
200 lock->heldBy = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000201 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +0000202 return lock;
203}
sewardjb4112022007-11-09 22:49:28 +0000204
205/* Release storage for a Lock. Also release storage in .heldBy, if
sewardj1d7c3322011-02-28 09:22:51 +0000206 any. Removes from admin_locks double linked list. */
sewardjb4112022007-11-09 22:49:28 +0000207static void del_LockN ( Lock* lk )
208{
sewardjf98e1c02008-10-25 16:22:41 +0000209 tl_assert(HG_(is_sane_LockN)(lk));
210 tl_assert(lk->hbso);
211 libhb_so_dealloc(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +0000212 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000213 VG_(deleteBag)( lk->heldBy );
sewardj0f64c9e2011-03-10 17:40:22 +0000214 /* begin: del lock from double linked list */
215 if (lk == admin_locks) {
216 tl_assert(lk->admin_prev == NULL);
217 if (lk->admin_next)
218 lk->admin_next->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000219 admin_locks = lk->admin_next;
sewardj1d7c3322011-02-28 09:22:51 +0000220 }
221 else {
sewardj0f64c9e2011-03-10 17:40:22 +0000222 tl_assert(lk->admin_prev != NULL);
sewardj1d7c3322011-02-28 09:22:51 +0000223 lk->admin_prev->admin_next = lk->admin_next;
sewardj0f64c9e2011-03-10 17:40:22 +0000224 if (lk->admin_next)
225 lk->admin_next->admin_prev = lk->admin_prev;
sewardj1d7c3322011-02-28 09:22:51 +0000226 }
sewardj0f64c9e2011-03-10 17:40:22 +0000227 /* end: del */
sewardjb4112022007-11-09 22:49:28 +0000228 VG_(memset)(lk, 0xAA, sizeof(*lk));
sewardjf98e1c02008-10-25 16:22:41 +0000229 HG_(free)(lk);
sewardjb4112022007-11-09 22:49:28 +0000230}
231
232/* Update 'lk' to reflect that 'thr' now has a write-acquisition of
233 it. This is done strictly: only combinations resulting from
234 correct program and libpthread behaviour are allowed. */
235static void lockN_acquire_writer ( Lock* lk, Thread* thr )
236{
sewardjf98e1c02008-10-25 16:22:41 +0000237 tl_assert(HG_(is_sane_LockN)(lk));
238 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000239
240 stats__lockN_acquires++;
241
242 /* EXPOSITION only */
243 /* We need to keep recording snapshots of where the lock was
244 acquired, so as to produce better lock-order error messages. */
245 if (lk->acquired_at == NULL) {
246 ThreadId tid;
247 tl_assert(lk->heldBy == NULL);
248 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
249 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000250 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000251 } else {
252 tl_assert(lk->heldBy != NULL);
253 }
254 /* end EXPOSITION only */
255
256 switch (lk->kind) {
257 case LK_nonRec:
258 case_LK_nonRec:
259 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
260 tl_assert(!lk->heldW);
261 lk->heldW = True;
sewardjf98e1c02008-10-25 16:22:41 +0000262 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
florian6bf37262012-10-21 03:23:36 +0000263 VG_(addToBag)( lk->heldBy, (UWord)thr );
sewardjb4112022007-11-09 22:49:28 +0000264 break;
265 case LK_mbRec:
266 if (lk->heldBy == NULL)
267 goto case_LK_nonRec;
268 /* 2nd and subsequent locking of a lock by its owner */
269 tl_assert(lk->heldW);
270 /* assert: lk is only held by one thread .. */
sewardj896f6f92008-08-19 08:38:52 +0000271 tl_assert(VG_(sizeUniqueBag(lk->heldBy)) == 1);
sewardjb4112022007-11-09 22:49:28 +0000272 /* assert: .. and that thread is 'thr'. */
florian6bf37262012-10-21 03:23:36 +0000273 tl_assert(VG_(elemBag)(lk->heldBy, (UWord)thr)
sewardj896f6f92008-08-19 08:38:52 +0000274 == VG_(sizeTotalBag)(lk->heldBy));
florian6bf37262012-10-21 03:23:36 +0000275 VG_(addToBag)(lk->heldBy, (UWord)thr);
sewardjb4112022007-11-09 22:49:28 +0000276 break;
277 case LK_rdwr:
278 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
279 goto case_LK_nonRec;
280 default:
281 tl_assert(0);
282 }
sewardjf98e1c02008-10-25 16:22:41 +0000283 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000284}
285
286static void lockN_acquire_reader ( Lock* lk, Thread* thr )
287{
sewardjf98e1c02008-10-25 16:22:41 +0000288 tl_assert(HG_(is_sane_LockN)(lk));
289 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000290 /* can only add reader to a reader-writer lock. */
291 tl_assert(lk->kind == LK_rdwr);
292 /* lk must be free or already r-held. */
293 tl_assert(lk->heldBy == NULL
294 || (lk->heldBy != NULL && !lk->heldW));
295
296 stats__lockN_acquires++;
297
298 /* EXPOSITION only */
299 /* We need to keep recording snapshots of where the lock was
300 acquired, so as to produce better lock-order error messages. */
301 if (lk->acquired_at == NULL) {
302 ThreadId tid;
303 tl_assert(lk->heldBy == NULL);
304 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
305 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000306 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000307 } else {
308 tl_assert(lk->heldBy != NULL);
309 }
310 /* end EXPOSITION only */
311
312 if (lk->heldBy) {
florian6bf37262012-10-21 03:23:36 +0000313 VG_(addToBag)(lk->heldBy, (UWord)thr);
sewardjb4112022007-11-09 22:49:28 +0000314 } else {
315 lk->heldW = False;
sewardjf98e1c02008-10-25 16:22:41 +0000316 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
florian6bf37262012-10-21 03:23:36 +0000317 VG_(addToBag)( lk->heldBy, (UWord)thr );
sewardjb4112022007-11-09 22:49:28 +0000318 }
319 tl_assert(!lk->heldW);
sewardjf98e1c02008-10-25 16:22:41 +0000320 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000321}
322
323/* Update 'lk' to reflect a release of it by 'thr'. This is done
324 strictly: only combinations resulting from correct program and
325 libpthread behaviour are allowed. */
326
327static void lockN_release ( Lock* lk, Thread* thr )
328{
329 Bool b;
sewardjf98e1c02008-10-25 16:22:41 +0000330 tl_assert(HG_(is_sane_LockN)(lk));
331 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000332 /* lock must be held by someone */
333 tl_assert(lk->heldBy);
334 stats__lockN_releases++;
335 /* Remove it from the holder set */
florian6bf37262012-10-21 03:23:36 +0000336 b = VG_(delFromBag)(lk->heldBy, (UWord)thr);
sewardjb4112022007-11-09 22:49:28 +0000337 /* thr must actually have been a holder of lk */
338 tl_assert(b);
339 /* normalise */
340 tl_assert(lk->acquired_at);
sewardj896f6f92008-08-19 08:38:52 +0000341 if (VG_(isEmptyBag)(lk->heldBy)) {
342 VG_(deleteBag)(lk->heldBy);
sewardjb4112022007-11-09 22:49:28 +0000343 lk->heldBy = NULL;
344 lk->heldW = False;
345 lk->acquired_at = NULL;
346 }
sewardjf98e1c02008-10-25 16:22:41 +0000347 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000348}
349
350static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
351{
352 Thread* thr;
353 if (!lk->heldBy) {
354 tl_assert(!lk->heldW);
355 return;
356 }
357 /* for each thread that holds this lock do ... */
sewardj896f6f92008-08-19 08:38:52 +0000358 VG_(initIterBag)( lk->heldBy );
florian6bf37262012-10-21 03:23:36 +0000359 while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, NULL )) {
sewardjf98e1c02008-10-25 16:22:41 +0000360 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000361 tl_assert(HG_(elemWS)( univ_lsets,
florian6bf37262012-10-21 03:23:36 +0000362 thr->locksetA, (UWord)lk ));
sewardjb4112022007-11-09 22:49:28 +0000363 thr->locksetA
florian6bf37262012-10-21 03:23:36 +0000364 = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +0000365
366 if (lk->heldW) {
367 tl_assert(HG_(elemWS)( univ_lsets,
florian6bf37262012-10-21 03:23:36 +0000368 thr->locksetW, (UWord)lk ));
sewardjb4112022007-11-09 22:49:28 +0000369 thr->locksetW
florian6bf37262012-10-21 03:23:36 +0000370 = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +0000371 }
372 }
sewardj896f6f92008-08-19 08:38:52 +0000373 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000374}
375
sewardjb4112022007-11-09 22:49:28 +0000376
377/*----------------------------------------------------------------*/
378/*--- Print out the primary data structures ---*/
379/*----------------------------------------------------------------*/
380
sewardjb4112022007-11-09 22:49:28 +0000381#define PP_THREADS (1<<1)
382#define PP_LOCKS (1<<2)
sewardjf98e1c02008-10-25 16:22:41 +0000383#define PP_ALL (PP_THREADS | PP_LOCKS)
sewardjb4112022007-11-09 22:49:28 +0000384
385
386static const Int sHOW_ADMIN = 0;
387
388static void space ( Int n )
389{
390 Int i;
florian6bf37262012-10-21 03:23:36 +0000391 HChar spaces[128+1];
sewardjb4112022007-11-09 22:49:28 +0000392 tl_assert(n >= 0 && n < 128);
393 if (n == 0)
394 return;
395 for (i = 0; i < n; i++)
396 spaces[i] = ' ';
397 spaces[i] = 0;
398 tl_assert(i < 128+1);
399 VG_(printf)("%s", spaces);
400}
401
402static void pp_Thread ( Int d, Thread* t )
403{
404 space(d+0); VG_(printf)("Thread %p {\n", t);
405 if (sHOW_ADMIN) {
406 space(d+3); VG_(printf)("admin %p\n", t->admin);
407 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
408 }
409 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
410 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
sewardjb4112022007-11-09 22:49:28 +0000411 space(d+0); VG_(printf)("}\n");
412}
413
414static void pp_admin_threads ( Int d )
415{
416 Int i, n;
417 Thread* t;
418 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
419 /* nothing */
420 }
421 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
422 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
423 if (0) {
424 space(n);
425 VG_(printf)("admin_threads record %d of %d:\n", i, n);
426 }
427 pp_Thread(d+3, t);
428 }
barta0b6b2c2008-07-07 06:49:24 +0000429 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000430}
431
432static void pp_map_threads ( Int d )
433{
njn4c245e52009-03-15 23:25:38 +0000434 Int i, n = 0;
sewardjb4112022007-11-09 22:49:28 +0000435 space(d); VG_(printf)("map_threads ");
sewardjb4112022007-11-09 22:49:28 +0000436 for (i = 0; i < VG_N_THREADS; i++) {
437 if (map_threads[i] != NULL)
438 n++;
439 }
440 VG_(printf)("(%d entries) {\n", n);
441 for (i = 0; i < VG_N_THREADS; i++) {
442 if (map_threads[i] == NULL)
443 continue;
444 space(d+3);
445 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
446 }
447 space(d); VG_(printf)("}\n");
448}
449
450static const HChar* show_LockKind ( LockKind lkk ) {
451 switch (lkk) {
452 case LK_mbRec: return "mbRec";
453 case LK_nonRec: return "nonRec";
454 case LK_rdwr: return "rdwr";
455 default: tl_assert(0);
456 }
457}
458
459static void pp_Lock ( Int d, Lock* lk )
460{
barta0b6b2c2008-07-07 06:49:24 +0000461 space(d+0); VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
sewardjb4112022007-11-09 22:49:28 +0000462 if (sHOW_ADMIN) {
sewardj1d7c3322011-02-28 09:22:51 +0000463 space(d+3); VG_(printf)("admin_n %p\n", lk->admin_next);
464 space(d+3); VG_(printf)("admin_p %p\n", lk->admin_prev);
465 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
sewardjb4112022007-11-09 22:49:28 +0000466 }
467 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
468 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
469 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
470 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
471 if (lk->heldBy) {
472 Thread* thr;
florian6bf37262012-10-21 03:23:36 +0000473 UWord count;
sewardjb4112022007-11-09 22:49:28 +0000474 VG_(printf)(" { ");
sewardj896f6f92008-08-19 08:38:52 +0000475 VG_(initIterBag)( lk->heldBy );
florian6bf37262012-10-21 03:23:36 +0000476 while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, &count ))
sewardjb4112022007-11-09 22:49:28 +0000477 VG_(printf)("%lu:%p ", count, thr);
sewardj896f6f92008-08-19 08:38:52 +0000478 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000479 VG_(printf)("}");
480 }
481 VG_(printf)("\n");
482 space(d+0); VG_(printf)("}\n");
483}
484
485static void pp_admin_locks ( Int d )
486{
487 Int i, n;
488 Lock* lk;
sewardj1d7c3322011-02-28 09:22:51 +0000489 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000490 /* nothing */
491 }
492 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
sewardj1d7c3322011-02-28 09:22:51 +0000493 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000494 if (0) {
495 space(n);
496 VG_(printf)("admin_locks record %d of %d:\n", i, n);
497 }
498 pp_Lock(d+3, lk);
499 }
barta0b6b2c2008-07-07 06:49:24 +0000500 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000501}
502
503static void pp_map_locks ( Int d )
504{
505 void* gla;
506 Lock* lk;
507 space(d); VG_(printf)("map_locks (%d entries) {\n",
sewardj896f6f92008-08-19 08:38:52 +0000508 (Int)VG_(sizeFM)( map_locks ));
509 VG_(initIterFM)( map_locks );
florian6bf37262012-10-21 03:23:36 +0000510 while (VG_(nextIterFM)( map_locks, (UWord*)&gla,
511 (UWord*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000512 space(d+3);
513 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
514 }
sewardj896f6f92008-08-19 08:38:52 +0000515 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000516 space(d); VG_(printf)("}\n");
517}
518
florian6bf37262012-10-21 03:23:36 +0000519static void pp_everything ( Int flags, const HChar* caller )
sewardjb4112022007-11-09 22:49:28 +0000520{
521 Int d = 0;
522 VG_(printf)("\n");
523 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
524 if (flags & PP_THREADS) {
525 VG_(printf)("\n");
526 pp_admin_threads(d+3);
527 VG_(printf)("\n");
528 pp_map_threads(d+3);
529 }
530 if (flags & PP_LOCKS) {
531 VG_(printf)("\n");
532 pp_admin_locks(d+3);
533 VG_(printf)("\n");
534 pp_map_locks(d+3);
535 }
sewardjb4112022007-11-09 22:49:28 +0000536
537 VG_(printf)("\n");
538 VG_(printf)("}\n");
539 VG_(printf)("\n");
540}
541
542#undef SHOW_ADMIN
543
544
545/*----------------------------------------------------------------*/
546/*--- Initialise the primary data structures ---*/
547/*----------------------------------------------------------------*/
548
sewardjf98e1c02008-10-25 16:22:41 +0000549static void initialise_data_structures ( Thr* hbthr_root )
sewardjb4112022007-11-09 22:49:28 +0000550{
sewardjb4112022007-11-09 22:49:28 +0000551 Thread* thr;
sewardjffce8152011-06-24 10:09:41 +0000552 WordSetID wsid;
sewardjb4112022007-11-09 22:49:28 +0000553
554 /* Get everything initialised and zeroed. */
555 tl_assert(admin_threads == NULL);
556 tl_assert(admin_locks == NULL);
sewardjb4112022007-11-09 22:49:28 +0000557
sewardjb4112022007-11-09 22:49:28 +0000558 tl_assert(map_threads == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000559 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
sewardjb4112022007-11-09 22:49:28 +0000560 tl_assert(map_threads != NULL);
561
florian6bf37262012-10-21 03:23:36 +0000562 tl_assert(sizeof(Addr) == sizeof(UWord));
sewardjb4112022007-11-09 22:49:28 +0000563 tl_assert(map_locks == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000564 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
565 NULL/*unboxed Word cmp*/);
sewardjb4112022007-11-09 22:49:28 +0000566 tl_assert(map_locks != NULL);
567
sewardjb4112022007-11-09 22:49:28 +0000568 tl_assert(univ_lsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000569 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
570 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000571 tl_assert(univ_lsets != NULL);
sewardjffce8152011-06-24 10:09:41 +0000572 /* Ensure that univ_lsets is non-empty, with lockset zero being the
573 empty lockset. hg_errors.c relies on the assumption that
574 lockset number zero in univ_lsets is always valid. */
575 wsid = HG_(emptyWS)(univ_lsets);
576 tl_assert(wsid == 0);
sewardjb4112022007-11-09 22:49:28 +0000577
578 tl_assert(univ_laog == NULL);
sewardjc1fb9d22011-02-28 09:03:44 +0000579 if (HG_(clo_track_lockorders)) {
580 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
581 HG_(free), 24/*cacheSize*/ );
582 tl_assert(univ_laog != NULL);
583 }
sewardjb4112022007-11-09 22:49:28 +0000584
585 /* Set up entries for the root thread */
586 // FIXME: this assumes that the first real ThreadId is 1
587
sewardjb4112022007-11-09 22:49:28 +0000588 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +0000589 thr = mk_Thread(hbthr_root);
590 thr->coretid = 1; /* FIXME: hardwires an assumption about the
591 identity of the root thread. */
sewardj60626642011-03-10 15:14:37 +0000592 tl_assert( libhb_get_Thr_hgthread(hbthr_root) == NULL );
593 libhb_set_Thr_hgthread(hbthr_root, thr);
sewardjb4112022007-11-09 22:49:28 +0000594
sewardjf98e1c02008-10-25 16:22:41 +0000595 /* and bind it in the thread-map table. */
596 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
597 tl_assert(thr->coretid != VG_INVALID_THREADID);
sewardjb4112022007-11-09 22:49:28 +0000598
sewardjf98e1c02008-10-25 16:22:41 +0000599 map_threads[thr->coretid] = thr;
sewardjb4112022007-11-09 22:49:28 +0000600
601 tl_assert(VG_INVALID_THREADID == 0);
602
sewardjb4112022007-11-09 22:49:28 +0000603 all__sanity_check("initialise_data_structures");
604}
605
606
607/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000608/*--- map_threads :: array[core-ThreadId] of Thread* ---*/
sewardjb4112022007-11-09 22:49:28 +0000609/*----------------------------------------------------------------*/
610
611/* Doesn't assert if the relevant map_threads entry is NULL. */
612static Thread* map_threads_maybe_lookup ( ThreadId coretid )
613{
614 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000615 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000616 thr = map_threads[coretid];
617 return thr;
618}
619
620/* Asserts if the relevant map_threads entry is NULL. */
621static inline Thread* map_threads_lookup ( ThreadId coretid )
622{
623 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000624 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000625 thr = map_threads[coretid];
626 tl_assert(thr);
627 return thr;
628}
629
sewardjf98e1c02008-10-25 16:22:41 +0000630/* Do a reverse lookup. Does not assert if 'thr' is not found in
631 map_threads. */
sewardjb4112022007-11-09 22:49:28 +0000632static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
633{
sewardjf98e1c02008-10-25 16:22:41 +0000634 ThreadId tid;
635 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000636 /* Check nobody used the invalid-threadid slot */
637 tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
638 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000639 tid = thr->coretid;
640 tl_assert(HG_(is_sane_ThreadId)(tid));
641 return tid;
sewardjb4112022007-11-09 22:49:28 +0000642}
643
644/* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
645 is not found in map_threads. */
646static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
647{
648 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
649 tl_assert(tid != VG_INVALID_THREADID);
sewardjf98e1c02008-10-25 16:22:41 +0000650 tl_assert(map_threads[tid]);
651 tl_assert(map_threads[tid]->coretid == tid);
sewardjb4112022007-11-09 22:49:28 +0000652 return tid;
653}
654
655static void map_threads_delete ( ThreadId coretid )
656{
657 Thread* thr;
658 tl_assert(coretid != 0);
sewardjf98e1c02008-10-25 16:22:41 +0000659 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000660 thr = map_threads[coretid];
661 tl_assert(thr);
662 map_threads[coretid] = NULL;
663}
664
665
666/*----------------------------------------------------------------*/
667/*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
668/*----------------------------------------------------------------*/
669
670/* Make sure there is a lock table entry for the given (lock) guest
671 address. If not, create one of the stated 'kind' in unheld state.
672 In any case, return the address of the existing or new Lock. */
673static
674Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
675{
676 Bool found;
677 Lock* oldlock = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000678 tl_assert(HG_(is_sane_ThreadId)(tid));
sewardj896f6f92008-08-19 08:38:52 +0000679 found = VG_(lookupFM)( map_locks,
florian6bf37262012-10-21 03:23:36 +0000680 NULL, (UWord*)&oldlock, (UWord)ga );
sewardjb4112022007-11-09 22:49:28 +0000681 if (!found) {
682 Lock* lock = mk_LockN(lkk, ga);
683 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
sewardjf98e1c02008-10-25 16:22:41 +0000684 tl_assert(HG_(is_sane_LockN)(lock));
florian6bf37262012-10-21 03:23:36 +0000685 VG_(addToFM)( map_locks, (UWord)ga, (UWord)lock );
sewardjb4112022007-11-09 22:49:28 +0000686 tl_assert(oldlock == NULL);
sewardjb4112022007-11-09 22:49:28 +0000687 return lock;
688 } else {
689 tl_assert(oldlock != NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000690 tl_assert(HG_(is_sane_LockN)(oldlock));
sewardjb4112022007-11-09 22:49:28 +0000691 tl_assert(oldlock->guestaddr == ga);
sewardjb4112022007-11-09 22:49:28 +0000692 return oldlock;
693 }
694}
695
696static Lock* map_locks_maybe_lookup ( Addr ga )
697{
698 Bool found;
699 Lock* lk = NULL;
florian6bf37262012-10-21 03:23:36 +0000700 found = VG_(lookupFM)( map_locks, NULL, (UWord*)&lk, (UWord)ga );
sewardjb4112022007-11-09 22:49:28 +0000701 tl_assert(found ? lk != NULL : lk == NULL);
sewardjb4112022007-11-09 22:49:28 +0000702 return lk;
703}
704
705static void map_locks_delete ( Addr ga )
706{
707 Addr ga2 = 0;
708 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000709 VG_(delFromFM)( map_locks,
florian6bf37262012-10-21 03:23:36 +0000710 (UWord*)&ga2, (UWord*)&lk, (UWord)ga );
sewardjb4112022007-11-09 22:49:28 +0000711 /* delFromFM produces the val which is being deleted, if it is
712 found. So assert it is non-null; that in effect asserts that we
713 are deleting a (ga, Lock) pair which actually exists. */
714 tl_assert(lk != NULL);
715 tl_assert(ga2 == ga);
716}
717
718
sewardjb4112022007-11-09 22:49:28 +0000719
720/*----------------------------------------------------------------*/
721/*--- Sanity checking the data structures ---*/
722/*----------------------------------------------------------------*/
723
724static UWord stats__sanity_checks = 0;
725
florian6bf37262012-10-21 03:23:36 +0000726static void laog__sanity_check ( const HChar* who ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000727
728/* REQUIRED INVARIANTS:
729
730 Thread vs Segment/Lock/SecMaps
731
732 for each t in Threads {
733
734 // Thread.lockset: each element is really a valid Lock
735
736 // Thread.lockset: each Lock in set is actually held by that thread
737 for lk in Thread.lockset
738 lk == LockedBy(t)
739
740 // Thread.csegid is a valid SegmentID
741 // and the associated Segment has .thr == t
742
743 }
744
745 all thread Locksets are pairwise empty under intersection
746 (that is, no lock is claimed to be held by more than one thread)
747 -- this is guaranteed if all locks in locksets point back to their
748 owner threads
749
750 Lock vs Thread/Segment/SecMaps
751
752 for each entry (gla, la) in map_locks
753 gla == la->guest_addr
754
755 for each lk in Locks {
756
757 lk->tag is valid
758 lk->guest_addr does not have shadow state NoAccess
759 if lk == LockedBy(t), then t->lockset contains lk
760 if lk == UnlockedBy(segid) then segid is valid SegmentID
761 and can be mapped to a valid Segment(seg)
762 and seg->thr->lockset does not contain lk
763 if lk == UnlockedNew then (no lockset contains lk)
764
765 secmaps for lk has .mbHasLocks == True
766
767 }
768
769 Segment vs Thread/Lock/SecMaps
770
771 the Segment graph is a dag (no cycles)
772 all of the Segment graph must be reachable from the segids
773 mentioned in the Threads
774
775 for seg in Segments {
776
777 seg->thr is a sane Thread
778
779 }
780
781 SecMaps vs Segment/Thread/Lock
782
783 for sm in SecMaps {
784
785 sm properly aligned
786 if any shadow word is ShR or ShM then .mbHasShared == True
787
788 for each Excl(segid) state
789 map_segments_lookup maps to a sane Segment(seg)
790 for each ShM/ShR(tsetid,lsetid) state
791 each lk in lset is a valid Lock
792 each thr in tset is a valid thread, which is non-dead
793
794 }
795*/
796
797
798/* Return True iff 'thr' holds 'lk' in some mode. */
799static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
800{
801 if (lk->heldBy)
florian6bf37262012-10-21 03:23:36 +0000802 return VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0;
sewardjb4112022007-11-09 22:49:28 +0000803 else
804 return False;
805}
806
807/* Sanity check Threads, as far as possible */
808__attribute__((noinline))
florian6bf37262012-10-21 03:23:36 +0000809static void threads__sanity_check ( const HChar* who )
sewardjb4112022007-11-09 22:49:28 +0000810{
811#define BAD(_str) do { how = (_str); goto bad; } while (0)
florian6bf37262012-10-21 03:23:36 +0000812 const HChar* how = "no error";
sewardjb4112022007-11-09 22:49:28 +0000813 Thread* thr;
814 WordSetID wsA, wsW;
sewardj250ec2e2008-02-15 22:02:30 +0000815 UWord* ls_words;
florian6bf37262012-10-21 03:23:36 +0000816 UWord ls_size, i;
sewardjb4112022007-11-09 22:49:28 +0000817 Lock* lk;
sewardjb4112022007-11-09 22:49:28 +0000818 for (thr = admin_threads; thr; thr = thr->admin) {
sewardjf98e1c02008-10-25 16:22:41 +0000819 if (!HG_(is_sane_Thread)(thr)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000820 wsA = thr->locksetA;
821 wsW = thr->locksetW;
822 // locks held in W mode are a subset of all locks held
823 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
824 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
825 for (i = 0; i < ls_size; i++) {
826 lk = (Lock*)ls_words[i];
827 // Thread.lockset: each element is really a valid Lock
sewardjf98e1c02008-10-25 16:22:41 +0000828 if (!HG_(is_sane_LockN)(lk)) BAD("2");
sewardjb4112022007-11-09 22:49:28 +0000829 // Thread.lockset: each Lock in set is actually held by that
830 // thread
831 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000832 }
833 }
834 return;
835 bad:
836 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
837 tl_assert(0);
838#undef BAD
839}
840
841
842/* Sanity check Locks, as far as possible */
843__attribute__((noinline))
florian6bf37262012-10-21 03:23:36 +0000844static void locks__sanity_check ( const HChar* who )
sewardjb4112022007-11-09 22:49:28 +0000845{
846#define BAD(_str) do { how = (_str); goto bad; } while (0)
florian6bf37262012-10-21 03:23:36 +0000847 const HChar* how = "no error";
sewardjb4112022007-11-09 22:49:28 +0000848 Addr gla;
849 Lock* lk;
850 Int i;
851 // # entries in admin_locks == # entries in map_locks
sewardj1d7c3322011-02-28 09:22:51 +0000852 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next)
sewardjb4112022007-11-09 22:49:28 +0000853 ;
sewardj896f6f92008-08-19 08:38:52 +0000854 if (i != VG_(sizeFM)(map_locks)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000855 // for each entry (gla, lk) in map_locks
856 // gla == lk->guest_addr
sewardj896f6f92008-08-19 08:38:52 +0000857 VG_(initIterFM)( map_locks );
858 while (VG_(nextIterFM)( map_locks,
florian6bf37262012-10-21 03:23:36 +0000859 (UWord*)&gla, (UWord*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000860 if (lk->guestaddr != gla) BAD("2");
861 }
sewardj896f6f92008-08-19 08:38:52 +0000862 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000863 // scan through admin_locks ...
sewardj1d7c3322011-02-28 09:22:51 +0000864 for (lk = admin_locks; lk; lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000865 // lock is sane. Quite comprehensive, also checks that
866 // referenced (holder) threads are sane.
sewardjf98e1c02008-10-25 16:22:41 +0000867 if (!HG_(is_sane_LockN)(lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000868 // map_locks binds guest address back to this lock
869 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
sewardjb4112022007-11-09 22:49:28 +0000870 // look at all threads mentioned as holders of this lock. Ensure
871 // this lock is mentioned in their locksets.
872 if (lk->heldBy) {
873 Thread* thr;
florian6bf37262012-10-21 03:23:36 +0000874 UWord count;
sewardj896f6f92008-08-19 08:38:52 +0000875 VG_(initIterBag)( lk->heldBy );
876 while (VG_(nextIterBag)( lk->heldBy,
florian6bf37262012-10-21 03:23:36 +0000877 (UWord*)&thr, &count )) {
sewardjf98e1c02008-10-25 16:22:41 +0000878 // HG_(is_sane_LockN) above ensures these
sewardjb4112022007-11-09 22:49:28 +0000879 tl_assert(count >= 1);
sewardjf98e1c02008-10-25 16:22:41 +0000880 tl_assert(HG_(is_sane_Thread)(thr));
florian6bf37262012-10-21 03:23:36 +0000881 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (UWord)lk))
sewardjb4112022007-11-09 22:49:28 +0000882 BAD("6");
883 // also check the w-only lockset
884 if (lk->heldW
florian6bf37262012-10-21 03:23:36 +0000885 && !HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
sewardjb4112022007-11-09 22:49:28 +0000886 BAD("7");
887 if ((!lk->heldW)
florian6bf37262012-10-21 03:23:36 +0000888 && HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
sewardjb4112022007-11-09 22:49:28 +0000889 BAD("8");
890 }
sewardj896f6f92008-08-19 08:38:52 +0000891 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000892 } else {
893 /* lock not held by anybody */
894 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
895 // since lk is unheld, then (no lockset contains lk)
896 // hmm, this is really too expensive to check. Hmm.
897 }
sewardjb4112022007-11-09 22:49:28 +0000898 }
899
900 return;
901 bad:
902 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
903 tl_assert(0);
904#undef BAD
905}
906
907
florian6bf37262012-10-21 03:23:36 +0000908static void all_except_Locks__sanity_check ( const HChar* who ) {
sewardjb4112022007-11-09 22:49:28 +0000909 stats__sanity_checks++;
910 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
911 threads__sanity_check(who);
sewardjc1fb9d22011-02-28 09:03:44 +0000912 if (HG_(clo_track_lockorders))
913 laog__sanity_check(who);
sewardjb4112022007-11-09 22:49:28 +0000914}
florian6bf37262012-10-21 03:23:36 +0000915static void all__sanity_check ( const HChar* who ) {
sewardjb4112022007-11-09 22:49:28 +0000916 all_except_Locks__sanity_check(who);
917 locks__sanity_check(who);
918}
919
920
921/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +0000922/*--- Shadow value and address range handlers ---*/
923/*----------------------------------------------------------------*/
924
925static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +0000926//static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000927static inline Thread* get_current_Thread ( void ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +0000928__attribute__((noinline))
929static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000930
sewardjb4112022007-11-09 22:49:28 +0000931
932/* Block-copy states (needed for implementing realloc()). */
sewardj23f12002009-07-24 08:45:08 +0000933/* FIXME this copies shadow memory; it doesn't apply the MSM to it.
934 Is that a problem? (hence 'scopy' rather than 'ccopy') */
935static void shadow_mem_scopy_range ( Thread* thr,
936 Addr src, Addr dst, SizeT len )
sewardjf98e1c02008-10-25 16:22:41 +0000937{
938 Thr* hbthr = thr->hbthr;
939 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +0000940 libhb_copy_shadow_state( hbthr, src, dst, len );
sewardjb4112022007-11-09 22:49:28 +0000941}
942
sewardj23f12002009-07-24 08:45:08 +0000943static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
944{
sewardjf98e1c02008-10-25 16:22:41 +0000945 Thr* hbthr = thr->hbthr;
946 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +0000947 LIBHB_CREAD_N(hbthr, a, len);
948}
949
950static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
951 Thr* hbthr = thr->hbthr;
952 tl_assert(hbthr);
953 LIBHB_CWRITE_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +0000954}
955
956static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
957{
sewardj23f12002009-07-24 08:45:08 +0000958 libhb_srange_new( thr->hbthr, a, len );
sewardjb4112022007-11-09 22:49:28 +0000959}
960
sewardjfd35d492011-03-17 19:39:55 +0000961static void shadow_mem_make_NoAccess_NoFX ( Thread* thr, Addr aIN, SizeT len )
sewardjb4112022007-11-09 22:49:28 +0000962{
sewardjb4112022007-11-09 22:49:28 +0000963 if (0 && len > 500)
sewardjfd35d492011-03-17 19:39:55 +0000964 VG_(printf)("make NoAccess_NoFX ( %#lx, %ld )\n", aIN, len );
965 // has no effect (NoFX)
966 libhb_srange_noaccess_NoFX( thr->hbthr, aIN, len );
967}
968
969static void shadow_mem_make_NoAccess_AHAE ( Thread* thr, Addr aIN, SizeT len )
970{
971 if (0 && len > 500)
972 VG_(printf)("make NoAccess_AHAE ( %#lx, %ld )\n", aIN, len );
973 // Actually Has An Effect (AHAE)
974 libhb_srange_noaccess_AHAE( thr->hbthr, aIN, len );
sewardjb4112022007-11-09 22:49:28 +0000975}
976
sewardj406bac82010-03-03 23:03:40 +0000977static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN, SizeT len )
978{
979 if (0 && len > 500)
980 VG_(printf)("make Untracked ( %#lx, %ld )\n", aIN, len );
981 libhb_srange_untrack( thr->hbthr, aIN, len );
982}
983
sewardjb4112022007-11-09 22:49:28 +0000984
985/*----------------------------------------------------------------*/
986/*--- Event handlers (evh__* functions) ---*/
987/*--- plus helpers (evhH__* functions) ---*/
988/*----------------------------------------------------------------*/
989
990/*--------- Event handler helpers (evhH__* functions) ---------*/
991
992/* Create a new segment for 'thr', making it depend (.prev) on its
993 existing segment, bind together the SegmentID and Segment, and
994 return both of them. Also update 'thr' so it references the new
995 Segment. */
sewardjf98e1c02008-10-25 16:22:41 +0000996//zz static
997//zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
998//zz /*OUT*/Segment** new_segP,
999//zz Thread* thr )
1000//zz {
1001//zz Segment* cur_seg;
1002//zz tl_assert(new_segP);
1003//zz tl_assert(new_segidP);
1004//zz tl_assert(HG_(is_sane_Thread)(thr));
1005//zz cur_seg = map_segments_lookup( thr->csegid );
1006//zz tl_assert(cur_seg);
1007//zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1008//zz at their owner thread. */
1009//zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1010//zz *new_segidP = alloc_SegmentID();
1011//zz map_segments_add( *new_segidP, *new_segP );
1012//zz thr->csegid = *new_segidP;
1013//zz }
sewardjb4112022007-11-09 22:49:28 +00001014
1015
1016/* The lock at 'lock_ga' has acquired a writer. Make all necessary
1017 updates, and also do all possible error checks. */
1018static
1019void evhH__post_thread_w_acquires_lock ( Thread* thr,
1020 LockKind lkk, Addr lock_ga )
1021{
1022 Lock* lk;
1023
1024 /* Basically what we need to do is call lockN_acquire_writer.
1025 However, that will barf if any 'invalid' lock states would
1026 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001027 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001028 routine.
1029
1030 Because this routine is only called after successful lock
1031 acquisition, we should not be asked to move the lock into any
1032 invalid states. Requests to do so are bugs in libpthread, since
1033 that should have rejected any such requests. */
1034
sewardjf98e1c02008-10-25 16:22:41 +00001035 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001036 /* Try to find the lock. If we can't, then create a new one with
1037 kind 'lkk'. */
1038 lk = map_locks_lookup_or_create(
1039 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001040 tl_assert( HG_(is_sane_LockN)(lk) );
1041
1042 /* check libhb level entities exist */
1043 tl_assert(thr->hbthr);
1044 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001045
1046 if (lk->heldBy == NULL) {
1047 /* the lock isn't held. Simple. */
1048 tl_assert(!lk->heldW);
1049 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001050 /* acquire a dependency from the lock's VCs */
1051 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001052 goto noerror;
1053 }
1054
1055 /* So the lock is already held. If held as a r-lock then
1056 libpthread must be buggy. */
1057 tl_assert(lk->heldBy);
1058 if (!lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001059 HG_(record_error_Misc)(
1060 thr, "Bug in libpthread: write lock "
1061 "granted on rwlock which is currently rd-held");
sewardjb4112022007-11-09 22:49:28 +00001062 goto error;
1063 }
1064
1065 /* So the lock is held in w-mode. If it's held by some other
1066 thread, then libpthread must be buggy. */
sewardj896f6f92008-08-19 08:38:52 +00001067 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
sewardjb4112022007-11-09 22:49:28 +00001068
sewardj896f6f92008-08-19 08:38:52 +00001069 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
sewardjf98e1c02008-10-25 16:22:41 +00001070 HG_(record_error_Misc)(
1071 thr, "Bug in libpthread: write lock "
1072 "granted on mutex/rwlock which is currently "
1073 "wr-held by a different thread");
sewardjb4112022007-11-09 22:49:28 +00001074 goto error;
1075 }
1076
1077 /* So the lock is already held in w-mode by 'thr'. That means this
1078 is an attempt to lock it recursively, which is only allowable
1079 for LK_mbRec kinded locks. Since this routine is called only
1080 once the lock has been acquired, this must also be a libpthread
1081 bug. */
1082 if (lk->kind != LK_mbRec) {
sewardjf98e1c02008-10-25 16:22:41 +00001083 HG_(record_error_Misc)(
1084 thr, "Bug in libpthread: recursive write lock "
1085 "granted on mutex/wrlock which does not "
1086 "support recursion");
sewardjb4112022007-11-09 22:49:28 +00001087 goto error;
1088 }
1089
1090 /* So we are recursively re-locking a lock we already w-hold. */
1091 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001092 /* acquire a dependency from the lock's VC. Probably pointless,
1093 but also harmless. */
1094 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001095 goto noerror;
1096
1097 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001098 if (HG_(clo_track_lockorders)) {
1099 /* check lock order acquisition graph, and update. This has to
1100 happen before the lock is added to the thread's locksetA/W. */
1101 laog__pre_thread_acquires_lock( thr, lk );
1102 }
sewardjb4112022007-11-09 22:49:28 +00001103 /* update the thread's held-locks set */
florian6bf37262012-10-21 03:23:36 +00001104 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
1105 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +00001106 /* fall through */
1107
1108 error:
sewardjf98e1c02008-10-25 16:22:41 +00001109 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001110}
1111
1112
1113/* The lock at 'lock_ga' has acquired a reader. Make all necessary
1114 updates, and also do all possible error checks. */
1115static
1116void evhH__post_thread_r_acquires_lock ( Thread* thr,
1117 LockKind lkk, Addr lock_ga )
1118{
1119 Lock* lk;
1120
1121 /* Basically what we need to do is call lockN_acquire_reader.
1122 However, that will barf if any 'invalid' lock states would
1123 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001124 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001125 routine.
1126
1127 Because this routine is only called after successful lock
1128 acquisition, we should not be asked to move the lock into any
1129 invalid states. Requests to do so are bugs in libpthread, since
1130 that should have rejected any such requests. */
1131
sewardjf98e1c02008-10-25 16:22:41 +00001132 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001133 /* Try to find the lock. If we can't, then create a new one with
1134 kind 'lkk'. Only a reader-writer lock can be read-locked,
1135 hence the first assertion. */
1136 tl_assert(lkk == LK_rdwr);
1137 lk = map_locks_lookup_or_create(
1138 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001139 tl_assert( HG_(is_sane_LockN)(lk) );
1140
1141 /* check libhb level entities exist */
1142 tl_assert(thr->hbthr);
1143 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001144
1145 if (lk->heldBy == NULL) {
1146 /* the lock isn't held. Simple. */
1147 tl_assert(!lk->heldW);
1148 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001149 /* acquire a dependency from the lock's VC */
1150 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001151 goto noerror;
1152 }
1153
1154 /* So the lock is already held. If held as a w-lock then
1155 libpthread must be buggy. */
1156 tl_assert(lk->heldBy);
1157 if (lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001158 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1159 "granted on rwlock which is "
1160 "currently wr-held");
sewardjb4112022007-11-09 22:49:28 +00001161 goto error;
1162 }
1163
1164 /* Easy enough. In short anybody can get a read-lock on a rwlock
1165 provided it is either unlocked or already in rd-held. */
1166 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001167 /* acquire a dependency from the lock's VC. Probably pointless,
1168 but also harmless. */
1169 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001170 goto noerror;
1171
1172 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001173 if (HG_(clo_track_lockorders)) {
1174 /* check lock order acquisition graph, and update. This has to
1175 happen before the lock is added to the thread's locksetA/W. */
1176 laog__pre_thread_acquires_lock( thr, lk );
1177 }
sewardjb4112022007-11-09 22:49:28 +00001178 /* update the thread's held-locks set */
florian6bf37262012-10-21 03:23:36 +00001179 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +00001180 /* but don't update thr->locksetW, since lk is only rd-held */
1181 /* fall through */
1182
1183 error:
sewardjf98e1c02008-10-25 16:22:41 +00001184 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001185}
1186
1187
1188/* The lock at 'lock_ga' is just about to be unlocked. Make all
1189 necessary updates, and also do all possible error checks. */
1190static
1191void evhH__pre_thread_releases_lock ( Thread* thr,
1192 Addr lock_ga, Bool isRDWR )
1193{
1194 Lock* lock;
1195 Word n;
sewardjf98e1c02008-10-25 16:22:41 +00001196 Bool was_heldW;
sewardjb4112022007-11-09 22:49:28 +00001197
1198 /* This routine is called prior to a lock release, before
1199 libpthread has had a chance to validate the call. Hence we need
1200 to detect and reject any attempts to move the lock into an
1201 invalid state. Such attempts are bugs in the client.
1202
1203 isRDWR is True if we know from the wrapper context that lock_ga
1204 should refer to a reader-writer lock, and is False if [ditto]
1205 lock_ga should refer to a standard mutex. */
1206
sewardjf98e1c02008-10-25 16:22:41 +00001207 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001208 lock = map_locks_maybe_lookup( lock_ga );
1209
1210 if (!lock) {
1211 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1212 the client is trying to unlock it. So complain, then ignore
1213 the attempt. */
sewardjf98e1c02008-10-25 16:22:41 +00001214 HG_(record_error_UnlockBogus)( thr, lock_ga );
sewardjb4112022007-11-09 22:49:28 +00001215 return;
1216 }
1217
1218 tl_assert(lock->guestaddr == lock_ga);
sewardjf98e1c02008-10-25 16:22:41 +00001219 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001220
1221 if (isRDWR && lock->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001222 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1223 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001224 }
1225 if ((!isRDWR) && lock->kind == LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001226 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1227 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001228 }
1229
1230 if (!lock->heldBy) {
1231 /* The lock is not held. This indicates a serious bug in the
1232 client. */
1233 tl_assert(!lock->heldW);
sewardjf98e1c02008-10-25 16:22:41 +00001234 HG_(record_error_UnlockUnlocked)( thr, lock );
florian6bf37262012-10-21 03:23:36 +00001235 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1236 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001237 goto error;
1238 }
1239
sewardjf98e1c02008-10-25 16:22:41 +00001240 /* test just above dominates */
1241 tl_assert(lock->heldBy);
1242 was_heldW = lock->heldW;
1243
sewardjb4112022007-11-09 22:49:28 +00001244 /* The lock is held. Is this thread one of the holders? If not,
1245 report a bug in the client. */
florian6bf37262012-10-21 03:23:36 +00001246 n = VG_(elemBag)( lock->heldBy, (UWord)thr );
sewardjb4112022007-11-09 22:49:28 +00001247 tl_assert(n >= 0);
1248 if (n == 0) {
1249 /* We are not a current holder of the lock. This is a bug in
1250 the guest, and (per POSIX pthread rules) the unlock
1251 attempt will fail. So just complain and do nothing
1252 else. */
sewardj896f6f92008-08-19 08:38:52 +00001253 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001254 tl_assert(HG_(is_sane_Thread)(realOwner));
sewardjb4112022007-11-09 22:49:28 +00001255 tl_assert(realOwner != thr);
florian6bf37262012-10-21 03:23:36 +00001256 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1257 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjf98e1c02008-10-25 16:22:41 +00001258 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
sewardjb4112022007-11-09 22:49:28 +00001259 goto error;
1260 }
1261
1262 /* Ok, we hold the lock 'n' times. */
1263 tl_assert(n >= 1);
1264
1265 lockN_release( lock, thr );
1266
1267 n--;
1268 tl_assert(n >= 0);
1269
1270 if (n > 0) {
1271 tl_assert(lock->heldBy);
florian6bf37262012-10-21 03:23:36 +00001272 tl_assert(n == VG_(elemBag)( lock->heldBy, (UWord)thr ));
sewardjb4112022007-11-09 22:49:28 +00001273 /* We still hold the lock. So either it's a recursive lock
1274 or a rwlock which is currently r-held. */
1275 tl_assert(lock->kind == LK_mbRec
1276 || (lock->kind == LK_rdwr && !lock->heldW));
florian6bf37262012-10-21 03:23:36 +00001277 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001278 if (lock->heldW)
florian6bf37262012-10-21 03:23:36 +00001279 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001280 else
florian6bf37262012-10-21 03:23:36 +00001281 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001282 } else {
sewardj983f3022009-05-21 14:49:55 +00001283 /* n is zero. This means we don't hold the lock any more. But
1284 if it's a rwlock held in r-mode, someone else could still
1285 hold it. Just do whatever sanity checks we can. */
1286 if (lock->kind == LK_rdwr && lock->heldBy) {
1287 /* It's a rwlock. We no longer hold it but we used to;
1288 nevertheless it still appears to be held by someone else.
1289 The implication is that, prior to this release, it must
1290 have been shared by us and and whoever else is holding it;
1291 which in turn implies it must be r-held, since a lock
1292 can't be w-held by more than one thread. */
1293 /* The lock is now R-held by somebody else: */
1294 tl_assert(lock->heldW == False);
1295 } else {
1296 /* Normal case. It's either not a rwlock, or it's a rwlock
1297 that we used to hold in w-mode (which is pretty much the
1298 same thing as a non-rwlock.) Since this transaction is
1299 atomic (V does not allow multiple threads to run
1300 simultaneously), it must mean the lock is now not held by
1301 anybody. Hence assert for it. */
1302 /* The lock is now not held by anybody: */
1303 tl_assert(!lock->heldBy);
1304 tl_assert(lock->heldW == False);
1305 }
sewardjf98e1c02008-10-25 16:22:41 +00001306 //if (lock->heldBy) {
florian6bf37262012-10-21 03:23:36 +00001307 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (UWord)thr ));
sewardjf98e1c02008-10-25 16:22:41 +00001308 //}
sewardjb4112022007-11-09 22:49:28 +00001309 /* update this thread's lockset accordingly. */
1310 thr->locksetA
florian6bf37262012-10-21 03:23:36 +00001311 = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lock );
sewardjb4112022007-11-09 22:49:28 +00001312 thr->locksetW
florian6bf37262012-10-21 03:23:36 +00001313 = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lock );
sewardjf98e1c02008-10-25 16:22:41 +00001314 /* push our VC into the lock */
1315 tl_assert(thr->hbthr);
1316 tl_assert(lock->hbso);
1317 /* If the lock was previously W-held, then we want to do a
1318 strong send, and if previously R-held, then a weak send. */
1319 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
sewardjb4112022007-11-09 22:49:28 +00001320 }
1321 /* fall through */
1322
1323 error:
sewardjf98e1c02008-10-25 16:22:41 +00001324 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001325}
1326
1327
sewardj9f569b72008-11-13 13:33:09 +00001328/* ---------------------------------------------------------- */
1329/* -------- Event handlers proper (evh__* functions) -------- */
1330/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001331
1332/* What is the Thread* for the currently running thread? This is
1333 absolutely performance critical. We receive notifications from the
1334 core for client code starts/stops, and cache the looked-up result
1335 in 'current_Thread'. Hence, for the vast majority of requests,
1336 finding the current thread reduces to a read of a global variable,
1337 provided get_current_Thread_in_C_C is inlined.
1338
1339 Outside of client code, current_Thread is NULL, and presumably
1340 any uses of it will cause a segfault. Hence:
1341
1342 - for uses definitely within client code, use
1343 get_current_Thread_in_C_C.
1344
1345 - for all other uses, use get_current_Thread.
1346*/
1347
sewardj23f12002009-07-24 08:45:08 +00001348static Thread *current_Thread = NULL,
1349 *current_Thread_prev = NULL;
sewardjb4112022007-11-09 22:49:28 +00001350
1351static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1352 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1353 tl_assert(current_Thread == NULL);
1354 current_Thread = map_threads_lookup( tid );
1355 tl_assert(current_Thread != NULL);
sewardj23f12002009-07-24 08:45:08 +00001356 if (current_Thread != current_Thread_prev) {
1357 libhb_Thr_resumes( current_Thread->hbthr );
1358 current_Thread_prev = current_Thread;
1359 }
sewardjb4112022007-11-09 22:49:28 +00001360}
1361static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1362 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1363 tl_assert(current_Thread != NULL);
1364 current_Thread = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00001365 libhb_maybe_GC();
sewardjb4112022007-11-09 22:49:28 +00001366}
1367static inline Thread* get_current_Thread_in_C_C ( void ) {
1368 return current_Thread;
1369}
1370static inline Thread* get_current_Thread ( void ) {
1371 ThreadId coretid;
1372 Thread* thr;
1373 thr = get_current_Thread_in_C_C();
1374 if (LIKELY(thr))
1375 return thr;
1376 /* evidently not in client code. Do it the slow way. */
1377 coretid = VG_(get_running_tid)();
1378 /* FIXME: get rid of the following kludge. It exists because
sewardjf98e1c02008-10-25 16:22:41 +00001379 evh__new_mem is called during initialisation (as notification
sewardjb4112022007-11-09 22:49:28 +00001380 of initial memory layout) and VG_(get_running_tid)() returns
1381 VG_INVALID_THREADID at that point. */
1382 if (coretid == VG_INVALID_THREADID)
1383 coretid = 1; /* KLUDGE */
1384 thr = map_threads_lookup( coretid );
1385 return thr;
1386}
1387
1388static
1389void evh__new_mem ( Addr a, SizeT len ) {
1390 if (SHOW_EVENTS >= 2)
1391 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1392 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001393 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001394 all__sanity_check("evh__new_mem-post");
1395}
1396
1397static
sewardj1f77fec2010-04-12 19:51:04 +00001398void evh__new_mem_stack ( Addr a, SizeT len ) {
1399 if (SHOW_EVENTS >= 2)
1400 VG_(printf)("evh__new_mem_stack(%p, %lu)\n", (void*)a, len );
1401 shadow_mem_make_New( get_current_Thread(),
1402 -VG_STACK_REDZONE_SZB + a, len );
1403 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1404 all__sanity_check("evh__new_mem_stack-post");
1405}
1406
1407static
sewardj7cf4e6b2008-05-01 20:24:26 +00001408void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1409 if (SHOW_EVENTS >= 2)
1410 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1411 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001412 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardj7cf4e6b2008-05-01 20:24:26 +00001413 all__sanity_check("evh__new_mem_w_tid-post");
1414}
1415
1416static
sewardjb4112022007-11-09 22:49:28 +00001417void evh__new_mem_w_perms ( Addr a, SizeT len,
sewardj9c606bd2008-09-18 18:12:50 +00001418 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
sewardjb4112022007-11-09 22:49:28 +00001419 if (SHOW_EVENTS >= 1)
1420 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1421 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1422 if (rr || ww || xx)
1423 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001424 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001425 all__sanity_check("evh__new_mem_w_perms-post");
1426}
1427
1428static
1429void evh__set_perms ( Addr a, SizeT len,
1430 Bool rr, Bool ww, Bool xx ) {
sewardjfd35d492011-03-17 19:39:55 +00001431 // This handles mprotect requests. If the memory is being put
1432 // into no-R no-W state, paint it as NoAccess, for the reasons
1433 // documented at evh__die_mem_munmap().
sewardjb4112022007-11-09 22:49:28 +00001434 if (SHOW_EVENTS >= 1)
sewardjfd35d492011-03-17 19:39:55 +00001435 VG_(printf)("evh__set_perms(%p, %lu, r=%d w=%d x=%d)\n",
sewardjb4112022007-11-09 22:49:28 +00001436 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1437 /* Hmm. What should we do here, that actually makes any sense?
1438 Let's say: if neither readable nor writable, then declare it
1439 NoAccess, else leave it alone. */
1440 if (!(rr || ww))
sewardjfd35d492011-03-17 19:39:55 +00001441 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001442 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001443 all__sanity_check("evh__set_perms-post");
1444}
1445
1446static
1447void evh__die_mem ( Addr a, SizeT len ) {
sewardjfd35d492011-03-17 19:39:55 +00001448 // Urr, libhb ignores this.
sewardjb4112022007-11-09 22:49:28 +00001449 if (SHOW_EVENTS >= 2)
1450 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
sewardjfd35d492011-03-17 19:39:55 +00001451 shadow_mem_make_NoAccess_NoFX( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001452 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001453 all__sanity_check("evh__die_mem-post");
1454}
1455
1456static
sewardjfd35d492011-03-17 19:39:55 +00001457void evh__die_mem_munmap ( Addr a, SizeT len ) {
1458 // It's important that libhb doesn't ignore this. If, as is likely,
1459 // the client is subject to address space layout randomization,
1460 // then unmapped areas may never get remapped over, even in long
1461 // runs. If we just ignore them we wind up with large resource
1462 // (VTS) leaks in libhb. So force them to NoAccess, so that all
1463 // VTS references in the affected area are dropped. Marking memory
1464 // as NoAccess is expensive, but we assume that munmap is sufficiently
1465 // rare that the space gains of doing this are worth the costs.
1466 if (SHOW_EVENTS >= 2)
1467 VG_(printf)("evh__die_mem_munmap(%p, %lu)\n", (void*)a, len );
1468 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
1469}
1470
1471static
sewardj406bac82010-03-03 23:03:40 +00001472void evh__untrack_mem ( Addr a, SizeT len ) {
sewardjfd35d492011-03-17 19:39:55 +00001473 // Libhb doesn't ignore this.
sewardj406bac82010-03-03 23:03:40 +00001474 if (SHOW_EVENTS >= 2)
1475 VG_(printf)("evh__untrack_mem(%p, %lu)\n", (void*)a, len );
1476 shadow_mem_make_Untracked( get_current_Thread(), a, len );
1477 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1478 all__sanity_check("evh__untrack_mem-post");
1479}
1480
1481static
sewardj23f12002009-07-24 08:45:08 +00001482void evh__copy_mem ( Addr src, Addr dst, SizeT len ) {
1483 if (SHOW_EVENTS >= 2)
1484 VG_(printf)("evh__copy_mem(%p, %p, %lu)\n", (void*)src, (void*)dst, len );
1485 shadow_mem_scopy_range( get_current_Thread(), src, dst, len );
1486 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1487 all__sanity_check("evh__copy_mem-post");
1488}
1489
1490static
sewardjb4112022007-11-09 22:49:28 +00001491void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1492{
1493 if (SHOW_EVENTS >= 1)
1494 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1495 (Int)parent, (Int)child );
1496
1497 if (parent != VG_INVALID_THREADID) {
sewardjf98e1c02008-10-25 16:22:41 +00001498 Thread* thr_p;
1499 Thread* thr_c;
1500 Thr* hbthr_p;
1501 Thr* hbthr_c;
sewardjb4112022007-11-09 22:49:28 +00001502
sewardjf98e1c02008-10-25 16:22:41 +00001503 tl_assert(HG_(is_sane_ThreadId)(parent));
1504 tl_assert(HG_(is_sane_ThreadId)(child));
sewardjb4112022007-11-09 22:49:28 +00001505 tl_assert(parent != child);
1506
1507 thr_p = map_threads_maybe_lookup( parent );
1508 thr_c = map_threads_maybe_lookup( child );
1509
1510 tl_assert(thr_p != NULL);
1511 tl_assert(thr_c == NULL);
1512
sewardjf98e1c02008-10-25 16:22:41 +00001513 hbthr_p = thr_p->hbthr;
1514 tl_assert(hbthr_p != NULL);
sewardj60626642011-03-10 15:14:37 +00001515 tl_assert( libhb_get_Thr_hgthread(hbthr_p) == thr_p );
sewardjb4112022007-11-09 22:49:28 +00001516
sewardjf98e1c02008-10-25 16:22:41 +00001517 hbthr_c = libhb_create ( hbthr_p );
1518
1519 /* Create a new thread record for the child. */
sewardjb4112022007-11-09 22:49:28 +00001520 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +00001521 thr_c = mk_Thread( hbthr_c );
sewardj60626642011-03-10 15:14:37 +00001522 tl_assert( libhb_get_Thr_hgthread(hbthr_c) == NULL );
1523 libhb_set_Thr_hgthread(hbthr_c, thr_c);
sewardjb4112022007-11-09 22:49:28 +00001524
1525 /* and bind it in the thread-map table */
1526 map_threads[child] = thr_c;
sewardjf98e1c02008-10-25 16:22:41 +00001527 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1528 thr_c->coretid = child;
sewardjb4112022007-11-09 22:49:28 +00001529
1530 /* Record where the parent is so we can later refer to this in
1531 error messages.
1532
1533 On amd64-linux, this entails a nasty glibc-2.5 specific hack.
1534 The stack snapshot is taken immediately after the parent has
1535 returned from its sys_clone call. Unfortunately there is no
1536 unwind info for the insn following "syscall" - reading the
1537 glibc sources confirms this. So we ask for a snapshot to be
1538 taken as if RIP was 3 bytes earlier, in a place where there
1539 is unwind info. Sigh.
1540 */
1541 { Word first_ip_delta = 0;
1542# if defined(VGP_amd64_linux)
1543 first_ip_delta = -3;
1544# endif
1545 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1546 }
sewardjb4112022007-11-09 22:49:28 +00001547 }
1548
sewardjf98e1c02008-10-25 16:22:41 +00001549 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001550 all__sanity_check("evh__pre_thread_create-post");
1551}
1552
1553static
1554void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1555{
1556 Int nHeld;
1557 Thread* thr_q;
1558 if (SHOW_EVENTS >= 1)
1559 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1560 (Int)quit_tid );
1561
1562 /* quit_tid has disappeared without joining to any other thread.
1563 Therefore there is no synchronisation event associated with its
1564 exit and so we have to pretty much treat it as if it was still
1565 alive but mysteriously making no progress. That is because, if
1566 we don't know when it really exited, then we can never say there
1567 is a point in time when we're sure the thread really has
1568 finished, and so we need to consider the possibility that it
1569 lingers indefinitely and continues to interact with other
1570 threads. */
1571 /* However, it might have rendezvous'd with a thread that called
1572 pthread_join with this one as arg, prior to this point (that's
1573 how NPTL works). In which case there has already been a prior
1574 sync event. So in any case, just let the thread exit. On NPTL,
1575 all thread exits go through here. */
sewardjf98e1c02008-10-25 16:22:41 +00001576 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
sewardjb4112022007-11-09 22:49:28 +00001577 thr_q = map_threads_maybe_lookup( quit_tid );
1578 tl_assert(thr_q != NULL);
1579
1580 /* Complain if this thread holds any locks. */
1581 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1582 tl_assert(nHeld >= 0);
1583 if (nHeld > 0) {
1584 HChar buf[80];
1585 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1586 nHeld, nHeld > 1 ? "s" : "");
sewardjf98e1c02008-10-25 16:22:41 +00001587 HG_(record_error_Misc)( thr_q, buf );
sewardjb4112022007-11-09 22:49:28 +00001588 }
1589
sewardj23f12002009-07-24 08:45:08 +00001590 /* Not much to do here:
1591 - tell libhb the thread is gone
1592 - clear the map_threads entry, in order that the Valgrind core
1593 can re-use it. */
sewardj61bc2c52011-02-09 10:34:00 +00001594 /* Cleanup actions (next 5 lines) copied in evh__atfork_child; keep
1595 in sync. */
sewardj23f12002009-07-24 08:45:08 +00001596 tl_assert(thr_q->hbthr);
1597 libhb_async_exit(thr_q->hbthr);
sewardjf98e1c02008-10-25 16:22:41 +00001598 tl_assert(thr_q->coretid == quit_tid);
1599 thr_q->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +00001600 map_threads_delete( quit_tid );
1601
sewardjf98e1c02008-10-25 16:22:41 +00001602 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001603 all__sanity_check("evh__pre_thread_ll_exit-post");
1604}
1605
sewardj61bc2c52011-02-09 10:34:00 +00001606/* This is called immediately after fork, for the child only. 'tid'
1607 is the only surviving thread (as per POSIX rules on fork() in
1608 threaded programs), so we have to clean up map_threads to remove
1609 entries for any other threads. */
1610static
1611void evh__atfork_child ( ThreadId tid )
1612{
1613 UInt i;
1614 Thread* thr;
1615 /* Slot 0 should never be used. */
1616 thr = map_threads_maybe_lookup( 0/*INVALID*/ );
1617 tl_assert(!thr);
1618 /* Clean up all other slots except 'tid'. */
1619 for (i = 1; i < VG_N_THREADS; i++) {
1620 if (i == tid)
1621 continue;
1622 thr = map_threads_maybe_lookup(i);
1623 if (!thr)
1624 continue;
1625 /* Cleanup actions (next 5 lines) copied from end of
1626 evh__pre_thread_ll_exit; keep in sync. */
1627 tl_assert(thr->hbthr);
1628 libhb_async_exit(thr->hbthr);
1629 tl_assert(thr->coretid == i);
1630 thr->coretid = VG_INVALID_THREADID;
1631 map_threads_delete(i);
1632 }
1633}
1634
sewardjf98e1c02008-10-25 16:22:41 +00001635
sewardjb4112022007-11-09 22:49:28 +00001636static
1637void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1638{
sewardjb4112022007-11-09 22:49:28 +00001639 Thread* thr_s;
1640 Thread* thr_q;
sewardjf98e1c02008-10-25 16:22:41 +00001641 Thr* hbthr_s;
1642 Thr* hbthr_q;
1643 SO* so;
sewardjb4112022007-11-09 22:49:28 +00001644
1645 if (SHOW_EVENTS >= 1)
1646 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1647 (Int)stay_tid, quit_thr );
1648
sewardjf98e1c02008-10-25 16:22:41 +00001649 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
sewardjb4112022007-11-09 22:49:28 +00001650
1651 thr_s = map_threads_maybe_lookup( stay_tid );
1652 thr_q = quit_thr;
1653 tl_assert(thr_s != NULL);
1654 tl_assert(thr_q != NULL);
1655 tl_assert(thr_s != thr_q);
1656
sewardjf98e1c02008-10-25 16:22:41 +00001657 hbthr_s = thr_s->hbthr;
1658 hbthr_q = thr_q->hbthr;
1659 tl_assert(hbthr_s != hbthr_q);
sewardj60626642011-03-10 15:14:37 +00001660 tl_assert( libhb_get_Thr_hgthread(hbthr_s) == thr_s );
1661 tl_assert( libhb_get_Thr_hgthread(hbthr_q) == thr_q );
sewardjb4112022007-11-09 22:49:28 +00001662
sewardjf98e1c02008-10-25 16:22:41 +00001663 /* Allocate a temporary synchronisation object and use it to send
1664 an imaginary message from the quitter to the stayer, the purpose
1665 being to generate a dependence from the quitter to the
1666 stayer. */
1667 so = libhb_so_alloc();
1668 tl_assert(so);
sewardj23f12002009-07-24 08:45:08 +00001669 /* Send last arg of _so_send as False, since the sending thread
1670 doesn't actually exist any more, so we don't want _so_send to
1671 try taking stack snapshots of it. */
sewardjffce8152011-06-24 10:09:41 +00001672 libhb_so_send(hbthr_q, so, True/*strong_send*//*?!? wrt comment above*/);
sewardjf98e1c02008-10-25 16:22:41 +00001673 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1674 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00001675
sewardjffce8152011-06-24 10:09:41 +00001676 /* Tell libhb that the quitter has been reaped. Note that we might
1677 have to be cleverer about this, to exclude 2nd and subsequent
1678 notifications for the same hbthr_q, in the case where the app is
1679 buggy (calls pthread_join twice or more on the same thread) AND
1680 where libpthread is also buggy and doesn't return ESRCH on
1681 subsequent calls. (If libpthread isn't thusly buggy, then the
1682 wrapper for pthread_join in hg_intercepts.c will stop us getting
1683 notified here multiple times for the same joinee.) See also
1684 comments in helgrind/tests/jointwice.c. */
1685 libhb_joinedwith_done(hbthr_q);
1686
sewardjf98e1c02008-10-25 16:22:41 +00001687 /* evh__pre_thread_ll_exit issues an error message if the exiting
1688 thread holds any locks. No need to check here. */
sewardjb4112022007-11-09 22:49:28 +00001689
1690 /* This holds because, at least when using NPTL as the thread
1691 library, we should be notified the low level thread exit before
1692 we hear of any join event on it. The low level exit
1693 notification feeds through into evh__pre_thread_ll_exit,
1694 which should clear the map_threads entry for it. Hence we
1695 expect there to be no map_threads entry at this point. */
1696 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1697 == VG_INVALID_THREADID);
1698
sewardjf98e1c02008-10-25 16:22:41 +00001699 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001700 all__sanity_check("evh__post_thread_join-post");
1701}
1702
1703static
floriane543f302012-10-21 19:43:43 +00001704void evh__pre_mem_read ( CorePart part, ThreadId tid, const HChar* s,
sewardjb4112022007-11-09 22:49:28 +00001705 Addr a, SizeT size) {
1706 if (SHOW_EVENTS >= 2
1707 || (SHOW_EVENTS >= 1 && size != 1))
1708 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1709 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001710 shadow_mem_cread_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001711 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001712 all__sanity_check("evh__pre_mem_read-post");
1713}
1714
1715static
1716void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
floriane543f302012-10-21 19:43:43 +00001717 const HChar* s, Addr a ) {
sewardjb4112022007-11-09 22:49:28 +00001718 Int len;
1719 if (SHOW_EVENTS >= 1)
1720 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1721 (Int)tid, s, (void*)a );
sewardj234e5582011-02-09 12:47:23 +00001722 // Don't segfault if the string starts in an obviously stupid
1723 // place. Actually we should check the whole string, not just
1724 // the start address, but that's too much trouble. At least
1725 // checking the first byte is better than nothing. See #255009.
1726 if (!VG_(am_is_valid_for_client) (a, 1, VKI_PROT_READ))
1727 return;
florian19f91bb2012-11-10 22:29:54 +00001728 len = VG_(strlen)( (HChar*) a );
sewardj23f12002009-07-24 08:45:08 +00001729 shadow_mem_cread_range( map_threads_lookup(tid), a, len+1 );
sewardjf98e1c02008-10-25 16:22:41 +00001730 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001731 all__sanity_check("evh__pre_mem_read_asciiz-post");
1732}
1733
1734static
floriane543f302012-10-21 19:43:43 +00001735void evh__pre_mem_write ( CorePart part, ThreadId tid, const HChar* s,
sewardjb4112022007-11-09 22:49:28 +00001736 Addr a, SizeT size ) {
1737 if (SHOW_EVENTS >= 1)
1738 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1739 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001740 shadow_mem_cwrite_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001741 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001742 all__sanity_check("evh__pre_mem_write-post");
1743}
1744
1745static
1746void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1747 if (SHOW_EVENTS >= 1)
1748 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1749 (void*)a, len, (Int)is_inited );
1750 // FIXME: this is kinda stupid
1751 if (is_inited) {
1752 shadow_mem_make_New(get_current_Thread(), a, len);
1753 } else {
1754 shadow_mem_make_New(get_current_Thread(), a, len);
1755 }
sewardjf98e1c02008-10-25 16:22:41 +00001756 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001757 all__sanity_check("evh__pre_mem_read-post");
1758}
1759
1760static
1761void evh__die_mem_heap ( Addr a, SizeT len ) {
sewardj622fe492011-03-11 21:06:59 +00001762 Thread* thr;
sewardjb4112022007-11-09 22:49:28 +00001763 if (SHOW_EVENTS >= 1)
1764 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
sewardj622fe492011-03-11 21:06:59 +00001765 thr = get_current_Thread();
1766 tl_assert(thr);
1767 if (HG_(clo_free_is_write)) {
1768 /* Treat frees as if the memory was written immediately prior to
1769 the free. This shakes out more races, specifically, cases
1770 where memory is referenced by one thread, and freed by
1771 another, and there's no observable synchronisation event to
1772 guarantee that the reference happens before the free. */
1773 shadow_mem_cwrite_range(thr, a, len);
1774 }
sewardjfd35d492011-03-17 19:39:55 +00001775 shadow_mem_make_NoAccess_NoFX( thr, a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001776 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001777 all__sanity_check("evh__pre_mem_read-post");
1778}
1779
sewardj23f12002009-07-24 08:45:08 +00001780/* --- Event handlers called from generated code --- */
1781
sewardjb4112022007-11-09 22:49:28 +00001782static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001783void evh__mem_help_cread_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001784 Thread* thr = get_current_Thread_in_C_C();
1785 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001786 LIBHB_CREAD_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001787}
sewardjf98e1c02008-10-25 16:22:41 +00001788
sewardjb4112022007-11-09 22:49:28 +00001789static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001790void evh__mem_help_cread_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001791 Thread* thr = get_current_Thread_in_C_C();
1792 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001793 LIBHB_CREAD_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001794}
sewardjf98e1c02008-10-25 16:22:41 +00001795
sewardjb4112022007-11-09 22:49:28 +00001796static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001797void evh__mem_help_cread_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001798 Thread* thr = get_current_Thread_in_C_C();
1799 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001800 LIBHB_CREAD_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001801}
sewardjf98e1c02008-10-25 16:22:41 +00001802
sewardjb4112022007-11-09 22:49:28 +00001803static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001804void evh__mem_help_cread_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001805 Thread* thr = get_current_Thread_in_C_C();
1806 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001807 LIBHB_CREAD_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001808}
sewardjf98e1c02008-10-25 16:22:41 +00001809
sewardjb4112022007-11-09 22:49:28 +00001810static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001811void evh__mem_help_cread_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001812 Thread* thr = get_current_Thread_in_C_C();
1813 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001814 LIBHB_CREAD_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001815}
1816
1817static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001818void evh__mem_help_cwrite_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001819 Thread* thr = get_current_Thread_in_C_C();
1820 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001821 LIBHB_CWRITE_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001822}
sewardjf98e1c02008-10-25 16:22:41 +00001823
sewardjb4112022007-11-09 22:49:28 +00001824static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001825void evh__mem_help_cwrite_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001826 Thread* thr = get_current_Thread_in_C_C();
1827 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001828 LIBHB_CWRITE_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001829}
sewardjf98e1c02008-10-25 16:22:41 +00001830
sewardjb4112022007-11-09 22:49:28 +00001831static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001832void evh__mem_help_cwrite_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001833 Thread* thr = get_current_Thread_in_C_C();
1834 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001835 LIBHB_CWRITE_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001836}
sewardjf98e1c02008-10-25 16:22:41 +00001837
sewardjb4112022007-11-09 22:49:28 +00001838static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001839void evh__mem_help_cwrite_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001840 Thread* thr = get_current_Thread_in_C_C();
1841 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001842 LIBHB_CWRITE_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001843}
sewardjf98e1c02008-10-25 16:22:41 +00001844
sewardjb4112022007-11-09 22:49:28 +00001845static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001846void evh__mem_help_cwrite_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001847 Thread* thr = get_current_Thread_in_C_C();
1848 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001849 LIBHB_CWRITE_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001850}
1851
sewardjb4112022007-11-09 22:49:28 +00001852
sewardj9f569b72008-11-13 13:33:09 +00001853/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001854/* -------------- events to do with mutexes -------------- */
sewardj9f569b72008-11-13 13:33:09 +00001855/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001856
1857/* EXPOSITION only: by intercepting lock init events we can show the
1858 user where the lock was initialised, rather than only being able to
1859 show where it was first locked. Intercepting lock initialisations
1860 is not necessary for the basic operation of the race checker. */
1861static
1862void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
1863 void* mutex, Word mbRec )
1864{
1865 if (SHOW_EVENTS >= 1)
1866 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
1867 (Int)tid, mbRec, (void*)mutex );
1868 tl_assert(mbRec == 0 || mbRec == 1);
1869 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
1870 (Addr)mutex, tid );
sewardjf98e1c02008-10-25 16:22:41 +00001871 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001872 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
1873}
1874
1875static
sewardjc02f6c42013-10-14 13:51:25 +00001876void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex,
1877 Bool mutex_is_init )
sewardjb4112022007-11-09 22:49:28 +00001878{
1879 Thread* thr;
1880 Lock* lk;
1881 if (SHOW_EVENTS >= 1)
sewardjc02f6c42013-10-14 13:51:25 +00001882 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE"
1883 "(ctid=%d, %p, isInit=%d)\n",
1884 (Int)tid, (void*)mutex, (Int)mutex_is_init );
sewardjb4112022007-11-09 22:49:28 +00001885
1886 thr = map_threads_maybe_lookup( tid );
1887 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00001888 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00001889
1890 lk = map_locks_maybe_lookup( (Addr)mutex );
1891
sewardjc02f6c42013-10-14 13:51:25 +00001892 if (lk == NULL && mutex_is_init) {
1893 /* We're destroying a mutex which we don't have any record of,
1894 and which appears to have the value PTHREAD_MUTEX_INITIALIZER.
1895 Assume it never got used, and so we don't need to do anything
1896 more. */
1897 goto out;
1898 }
1899
sewardjb4112022007-11-09 22:49:28 +00001900 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
sewardjf98e1c02008-10-25 16:22:41 +00001901 HG_(record_error_Misc)(
1902 thr, "pthread_mutex_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00001903 }
1904
1905 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00001906 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00001907 tl_assert( lk->guestaddr == (Addr)mutex );
1908 if (lk->heldBy) {
1909 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00001910 HG_(record_error_Misc)(
1911 thr, "pthread_mutex_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00001912 /* remove lock from locksets of all owning threads */
1913 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00001914 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00001915 lk->heldBy = NULL;
1916 lk->heldW = False;
1917 lk->acquired_at = NULL;
1918 }
1919 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001920 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00001921
1922 if (HG_(clo_track_lockorders))
1923 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00001924 map_locks_delete( lk->guestaddr );
1925 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00001926 }
1927
sewardjc02f6c42013-10-14 13:51:25 +00001928 out:
sewardjf98e1c02008-10-25 16:22:41 +00001929 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001930 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
1931}
1932
1933static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
1934 void* mutex, Word isTryLock )
1935{
1936 /* Just check the mutex is sane; nothing else to do. */
1937 // 'mutex' may be invalid - not checked by wrapper
1938 Thread* thr;
1939 Lock* lk;
1940 if (SHOW_EVENTS >= 1)
1941 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
1942 (Int)tid, (void*)mutex );
1943
1944 tl_assert(isTryLock == 0 || isTryLock == 1);
1945 thr = map_threads_maybe_lookup( tid );
1946 tl_assert(thr); /* cannot fail - Thread* must already exist */
1947
1948 lk = map_locks_maybe_lookup( (Addr)mutex );
1949
1950 if (lk && (lk->kind == LK_rdwr)) {
sewardjf98e1c02008-10-25 16:22:41 +00001951 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
1952 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001953 }
1954
1955 if ( lk
1956 && isTryLock == 0
1957 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
1958 && lk->heldBy
1959 && lk->heldW
florian6bf37262012-10-21 03:23:36 +00001960 && VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0 ) {
sewardjb4112022007-11-09 22:49:28 +00001961 /* uh, it's a non-recursive lock and we already w-hold it, and
1962 this is a real lock operation (not a speculative "tryLock"
1963 kind of thing). Duh. Deadlock coming up; but at least
1964 produce an error message. */
florian6bd9dc12012-11-23 16:17:43 +00001965 const HChar* errstr = "Attempt to re-lock a "
1966 "non-recursive lock I already hold";
1967 const HChar* auxstr = "Lock was previously acquired";
sewardj8fef6252010-07-29 05:28:02 +00001968 if (lk->acquired_at) {
1969 HG_(record_error_Misc_w_aux)( thr, errstr, auxstr, lk->acquired_at );
1970 } else {
1971 HG_(record_error_Misc)( thr, errstr );
1972 }
sewardjb4112022007-11-09 22:49:28 +00001973 }
1974}
1975
1976static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
1977{
1978 // only called if the real library call succeeded - so mutex is sane
1979 Thread* thr;
1980 if (SHOW_EVENTS >= 1)
1981 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
1982 (Int)tid, (void*)mutex );
1983
1984 thr = map_threads_maybe_lookup( tid );
1985 tl_assert(thr); /* cannot fail - Thread* must already exist */
1986
1987 evhH__post_thread_w_acquires_lock(
1988 thr,
1989 LK_mbRec, /* if not known, create new lock with this LockKind */
1990 (Addr)mutex
1991 );
1992}
1993
1994static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
1995{
1996 // 'mutex' may be invalid - not checked by wrapper
1997 Thread* thr;
1998 if (SHOW_EVENTS >= 1)
1999 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
2000 (Int)tid, (void*)mutex );
2001
2002 thr = map_threads_maybe_lookup( tid );
2003 tl_assert(thr); /* cannot fail - Thread* must already exist */
2004
2005 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
2006}
2007
2008static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
2009{
2010 // only called if the real library call succeeded - so mutex is sane
2011 Thread* thr;
2012 if (SHOW_EVENTS >= 1)
2013 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
2014 (Int)tid, (void*)mutex );
2015 thr = map_threads_maybe_lookup( tid );
2016 tl_assert(thr); /* cannot fail - Thread* must already exist */
2017
2018 // anything we should do here?
2019}
2020
2021
sewardj5a644da2009-08-11 10:35:58 +00002022/* ------------------------------------------------------- */
sewardj1f77fec2010-04-12 19:51:04 +00002023/* -------------- events to do with spinlocks ------------ */
sewardj5a644da2009-08-11 10:35:58 +00002024/* ------------------------------------------------------- */
2025
2026/* All a bit of a kludge. Pretend we're really dealing with ordinary
2027 pthread_mutex_t's instead, for the most part. */
2028
2029static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid,
2030 void* slock )
2031{
2032 Thread* thr;
2033 Lock* lk;
2034 /* In glibc's kludgey world, we're either initialising or unlocking
2035 it. Since this is the pre-routine, if it is locked, unlock it
2036 and take a dependence edge. Otherwise, do nothing. */
2037
2038 if (SHOW_EVENTS >= 1)
2039 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
2040 "(ctid=%d, slock=%p)\n",
2041 (Int)tid, (void*)slock );
2042
2043 thr = map_threads_maybe_lookup( tid );
2044 /* cannot fail - Thread* must already exist */;
2045 tl_assert( HG_(is_sane_Thread)(thr) );
2046
2047 lk = map_locks_maybe_lookup( (Addr)slock );
2048 if (lk && lk->heldBy) {
2049 /* it's held. So do the normal pre-unlock actions, as copied
2050 from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE. This stupidly
2051 duplicates the map_locks_maybe_lookup. */
2052 evhH__pre_thread_releases_lock( thr, (Addr)slock,
2053 False/*!isRDWR*/ );
2054 }
2055}
2056
2057static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid,
2058 void* slock )
2059{
2060 Lock* lk;
2061 /* More kludgery. If the lock has never been seen before, do
2062 actions as per evh__HG_PTHREAD_MUTEX_INIT_POST. Else do
2063 nothing. */
2064
2065 if (SHOW_EVENTS >= 1)
2066 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
2067 "(ctid=%d, slock=%p)\n",
2068 (Int)tid, (void*)slock );
2069
2070 lk = map_locks_maybe_lookup( (Addr)slock );
2071 if (!lk) {
2072 map_locks_lookup_or_create( LK_nonRec, (Addr)slock, tid );
2073 }
2074}
2075
2076static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid,
2077 void* slock, Word isTryLock )
2078{
2079 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, slock, isTryLock );
2080}
2081
2082static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid,
2083 void* slock )
2084{
2085 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, slock );
2086}
2087
2088static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid,
2089 void* slock )
2090{
sewardjc02f6c42013-10-14 13:51:25 +00002091 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, slock, 0/*!isInit*/ );
sewardj5a644da2009-08-11 10:35:58 +00002092}
2093
2094
sewardj9f569b72008-11-13 13:33:09 +00002095/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002096/* --------------- events to do with CVs --------------- */
sewardj9f569b72008-11-13 13:33:09 +00002097/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002098
sewardj02114542009-07-28 20:52:36 +00002099/* A mapping from CV to (the SO associated with it, plus some
2100 auxiliary data for error checking). When the CV is
sewardjf98e1c02008-10-25 16:22:41 +00002101 signalled/broadcasted upon, we do a 'send' into the SO, and when a
2102 wait on it completes, we do a 'recv' from the SO. This is believed
2103 to give the correct happens-before events arising from CV
sewardjb4112022007-11-09 22:49:28 +00002104 signallings/broadcasts.
2105*/
2106
sewardj02114542009-07-28 20:52:36 +00002107/* .so is the SO for this CV.
2108 .mx_ga is the associated mutex, when .nWaiters > 0
sewardjb4112022007-11-09 22:49:28 +00002109
sewardj02114542009-07-28 20:52:36 +00002110 POSIX says effectively that the first pthread_cond_{timed}wait call
2111 causes a dynamic binding between the CV and the mutex, and that
2112 lasts until such time as the waiter count falls to zero. Hence
2113 need to keep track of the number of waiters in order to do
2114 consistency tracking. */
2115typedef
2116 struct {
2117 SO* so; /* libhb-allocated SO */
2118 void* mx_ga; /* addr of associated mutex, if any */
2119 UWord nWaiters; /* # threads waiting on the CV */
2120 }
2121 CVInfo;
2122
2123
2124/* pthread_cond_t* -> CVInfo* */
2125static WordFM* map_cond_to_CVInfo = NULL;
2126
2127static void map_cond_to_CVInfo_INIT ( void ) {
2128 if (UNLIKELY(map_cond_to_CVInfo == NULL)) {
2129 map_cond_to_CVInfo = VG_(newFM)( HG_(zalloc),
2130 "hg.mctCI.1", HG_(free), NULL );
2131 tl_assert(map_cond_to_CVInfo != NULL);
sewardjf98e1c02008-10-25 16:22:41 +00002132 }
2133}
2134
sewardj02114542009-07-28 20:52:36 +00002135static CVInfo* map_cond_to_CVInfo_lookup_or_alloc ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002136 UWord key, val;
sewardj02114542009-07-28 20:52:36 +00002137 map_cond_to_CVInfo_INIT();
2138 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
sewardjf98e1c02008-10-25 16:22:41 +00002139 tl_assert(key == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002140 return (CVInfo*)val;
sewardjf98e1c02008-10-25 16:22:41 +00002141 } else {
sewardj02114542009-07-28 20:52:36 +00002142 SO* so = libhb_so_alloc();
2143 CVInfo* cvi = HG_(zalloc)("hg.mctCloa.1", sizeof(CVInfo));
2144 cvi->so = so;
2145 cvi->mx_ga = 0;
2146 VG_(addToFM)( map_cond_to_CVInfo, (UWord)cond, (UWord)cvi );
2147 return cvi;
sewardjf98e1c02008-10-25 16:22:41 +00002148 }
2149}
2150
philippe8bfc2152012-07-06 23:38:24 +00002151static CVInfo* map_cond_to_CVInfo_lookup_NO_alloc ( void* cond ) {
2152 UWord key, val;
2153 map_cond_to_CVInfo_INIT();
2154 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
2155 tl_assert(key == (UWord)cond);
2156 return (CVInfo*)val;
2157 } else {
2158 return NULL;
2159 }
2160}
2161
sewardjc02f6c42013-10-14 13:51:25 +00002162static void map_cond_to_CVInfo_delete ( ThreadId tid,
2163 void* cond, Bool cond_is_init ) {
philippe8bfc2152012-07-06 23:38:24 +00002164 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +00002165 UWord keyW, valW;
philippe8bfc2152012-07-06 23:38:24 +00002166
2167 thr = map_threads_maybe_lookup( tid );
2168 tl_assert(thr); /* cannot fail - Thread* must already exist */
2169
sewardj02114542009-07-28 20:52:36 +00002170 map_cond_to_CVInfo_INIT();
philippe24111972013-03-18 22:48:22 +00002171 if (VG_(lookupFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond )) {
sewardj02114542009-07-28 20:52:36 +00002172 CVInfo* cvi = (CVInfo*)valW;
sewardjf98e1c02008-10-25 16:22:41 +00002173 tl_assert(keyW == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002174 tl_assert(cvi);
2175 tl_assert(cvi->so);
philippe8bfc2152012-07-06 23:38:24 +00002176 if (cvi->nWaiters > 0) {
sewardjc02f6c42013-10-14 13:51:25 +00002177 HG_(record_error_Misc)(
2178 thr, "pthread_cond_destroy:"
2179 " destruction of condition variable being waited upon");
philippe24111972013-03-18 22:48:22 +00002180 /* Destroying a cond var being waited upon outcome is EBUSY and
2181 variable is not destroyed. */
2182 return;
philippe8bfc2152012-07-06 23:38:24 +00002183 }
philippe24111972013-03-18 22:48:22 +00002184 if (!VG_(delFromFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond ))
2185 tl_assert(0); // cond var found above, and not here ???
sewardj02114542009-07-28 20:52:36 +00002186 libhb_so_dealloc(cvi->so);
2187 cvi->mx_ga = 0;
2188 HG_(free)(cvi);
philippe8bfc2152012-07-06 23:38:24 +00002189 } else {
sewardjc02f6c42013-10-14 13:51:25 +00002190 /* We have no record of this CV. So complain about it
2191 .. except, don't bother to complain if it has exactly the
2192 value PTHREAD_COND_INITIALIZER, since it might be that the CV
2193 was initialised like that but never used. */
2194 if (!cond_is_init) {
2195 HG_(record_error_Misc)(
2196 thr, "pthread_cond_destroy: destruction of unknown cond var");
2197 }
sewardjb4112022007-11-09 22:49:28 +00002198 }
2199}
2200
2201static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2202{
sewardjf98e1c02008-10-25 16:22:41 +00002203 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2204 cond to a SO if it is not already so bound, and 'send' on the
2205 SO. This is later used by other thread(s) which successfully
2206 exit from a pthread_cond_wait on the same cv; then they 'recv'
2207 from the SO, thereby acquiring a dependency on this signalling
2208 event. */
sewardjb4112022007-11-09 22:49:28 +00002209 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002210 CVInfo* cvi;
2211 //Lock* lk;
sewardjb4112022007-11-09 22:49:28 +00002212
2213 if (SHOW_EVENTS >= 1)
2214 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2215 (Int)tid, (void*)cond );
2216
sewardjb4112022007-11-09 22:49:28 +00002217 thr = map_threads_maybe_lookup( tid );
2218 tl_assert(thr); /* cannot fail - Thread* must already exist */
2219
sewardj02114542009-07-28 20:52:36 +00002220 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2221 tl_assert(cvi);
2222 tl_assert(cvi->so);
2223
sewardjb4112022007-11-09 22:49:28 +00002224 // error-if: mutex is bogus
2225 // error-if: mutex is not locked
sewardj02114542009-07-28 20:52:36 +00002226 // Hmm. POSIX doesn't actually say that it's an error to call
2227 // pthread_cond_signal with the associated mutex being unlocked.
2228 // Although it does say that it should be "if consistent scheduling
sewardjffce8152011-06-24 10:09:41 +00002229 // is desired." For that reason, print "dubious" if the lock isn't
2230 // held by any thread. Skip the "dubious" if it is held by some
2231 // other thread; that sounds straight-out wrong.
sewardj02114542009-07-28 20:52:36 +00002232 //
sewardjffce8152011-06-24 10:09:41 +00002233 // Anybody who writes code that signals on a CV without holding
2234 // the associated MX needs to be shipped off to a lunatic asylum
2235 // ASAP, even though POSIX doesn't actually declare such behaviour
2236 // illegal -- it makes code extremely difficult to understand/
2237 // reason about. In particular it puts the signalling thread in
2238 // a situation where it is racing against the released waiter
2239 // as soon as the signalling is done, and so there needs to be
2240 // some auxiliary synchronisation mechanism in the program that
2241 // makes this safe -- or the race(s) need to be harmless, or
2242 // probably nonexistent.
2243 //
2244 if (1) {
2245 Lock* lk = NULL;
2246 if (cvi->mx_ga != 0) {
2247 lk = map_locks_maybe_lookup( (Addr)cvi->mx_ga );
2248 }
2249 /* note: lk could be NULL. Be careful. */
2250 if (lk) {
2251 if (lk->kind == LK_rdwr) {
2252 HG_(record_error_Misc)(thr,
2253 "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2254 }
2255 if (lk->heldBy == NULL) {
2256 HG_(record_error_Misc)(thr,
2257 "pthread_cond_{signal,broadcast}: dubious: "
2258 "associated lock is not held by any thread");
2259 }
florian6bf37262012-10-21 03:23:36 +00002260 if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (UWord)thr)) {
sewardjffce8152011-06-24 10:09:41 +00002261 HG_(record_error_Misc)(thr,
2262 "pthread_cond_{signal,broadcast}: "
2263 "associated lock is not held by calling thread");
2264 }
2265 } else {
2266 /* Couldn't even find the damn thing. */
2267 // But actually .. that's not necessarily an error. We don't
2268 // know the (CV,MX) binding until a pthread_cond_wait or bcast
2269 // shows us what it is, and if that may not have happened yet.
2270 // So just keep quiet in this circumstance.
2271 //HG_(record_error_Misc)( thr,
2272 // "pthread_cond_{signal,broadcast}: "
2273 // "no or invalid mutex associated with cond");
2274 }
2275 }
sewardjb4112022007-11-09 22:49:28 +00002276
sewardj02114542009-07-28 20:52:36 +00002277 libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
sewardjb4112022007-11-09 22:49:28 +00002278}
2279
2280/* returns True if it reckons 'mutex' is valid and held by this
2281 thread, else False */
2282static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2283 void* cond, void* mutex )
2284{
2285 Thread* thr;
2286 Lock* lk;
2287 Bool lk_valid = True;
sewardj02114542009-07-28 20:52:36 +00002288 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002289
2290 if (SHOW_EVENTS >= 1)
2291 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2292 "(ctid=%d, cond=%p, mutex=%p)\n",
2293 (Int)tid, (void*)cond, (void*)mutex );
2294
sewardjb4112022007-11-09 22:49:28 +00002295 thr = map_threads_maybe_lookup( tid );
2296 tl_assert(thr); /* cannot fail - Thread* must already exist */
2297
2298 lk = map_locks_maybe_lookup( (Addr)mutex );
2299
2300 /* Check for stupid mutex arguments. There are various ways to be
2301 a bozo. Only complain once, though, even if more than one thing
2302 is wrong. */
2303 if (lk == NULL) {
2304 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002305 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002306 thr,
2307 "pthread_cond_{timed}wait called with invalid mutex" );
2308 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002309 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002310 if (lk->kind == LK_rdwr) {
2311 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002312 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002313 thr, "pthread_cond_{timed}wait called with mutex "
2314 "of type pthread_rwlock_t*" );
2315 } else
2316 if (lk->heldBy == NULL) {
2317 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002318 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002319 thr, "pthread_cond_{timed}wait called with un-held mutex");
2320 } else
2321 if (lk->heldBy != NULL
florian6bf37262012-10-21 03:23:36 +00002322 && VG_(elemBag)( lk->heldBy, (UWord)thr ) == 0) {
sewardjb4112022007-11-09 22:49:28 +00002323 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002324 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002325 thr, "pthread_cond_{timed}wait called with mutex "
2326 "held by a different thread" );
2327 }
2328 }
2329
2330 // error-if: cond is also associated with a different mutex
sewardj02114542009-07-28 20:52:36 +00002331 cvi = map_cond_to_CVInfo_lookup_or_alloc(cond);
2332 tl_assert(cvi);
2333 tl_assert(cvi->so);
2334 if (cvi->nWaiters == 0) {
2335 /* form initial (CV,MX) binding */
2336 cvi->mx_ga = mutex;
2337 }
2338 else /* check existing (CV,MX) binding */
2339 if (cvi->mx_ga != mutex) {
2340 HG_(record_error_Misc)(
2341 thr, "pthread_cond_{timed}wait: cond is associated "
2342 "with a different mutex");
2343 }
2344 cvi->nWaiters++;
sewardjb4112022007-11-09 22:49:28 +00002345
2346 return lk_valid;
2347}
2348
2349static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
sewardjff427c92013-10-14 12:13:52 +00002350 void* cond, void* mutex,
2351 Bool timeout)
sewardjb4112022007-11-09 22:49:28 +00002352{
sewardjf98e1c02008-10-25 16:22:41 +00002353 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2354 the SO for this cond, and 'recv' from it so as to acquire a
2355 dependency edge back to the signaller/broadcaster. */
2356 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002357 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002358
2359 if (SHOW_EVENTS >= 1)
2360 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
sewardjff427c92013-10-14 12:13:52 +00002361 "(ctid=%d, cond=%p, mutex=%p)\n, timeout=%d",
2362 (Int)tid, (void*)cond, (void*)mutex, (Int)timeout );
sewardjb4112022007-11-09 22:49:28 +00002363
sewardjb4112022007-11-09 22:49:28 +00002364 thr = map_threads_maybe_lookup( tid );
2365 tl_assert(thr); /* cannot fail - Thread* must already exist */
2366
2367 // error-if: cond is also associated with a different mutex
2368
philippe8bfc2152012-07-06 23:38:24 +00002369 cvi = map_cond_to_CVInfo_lookup_NO_alloc( cond );
2370 if (!cvi) {
2371 /* This could be either a bug in helgrind or the guest application
2372 that did an error (e.g. cond var was destroyed by another thread.
2373 Let's assume helgrind is perfect ...
2374 Note that this is similar to drd behaviour. */
2375 HG_(record_error_Misc)(thr, "condition variable has been destroyed while"
2376 " being waited upon");
2377 return;
2378 }
2379
sewardj02114542009-07-28 20:52:36 +00002380 tl_assert(cvi);
2381 tl_assert(cvi->so);
2382 tl_assert(cvi->nWaiters > 0);
sewardjb4112022007-11-09 22:49:28 +00002383
sewardjff427c92013-10-14 12:13:52 +00002384 if (!timeout && !libhb_so_everSent(cvi->so)) {
sewardjf98e1c02008-10-25 16:22:41 +00002385 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2386 it? If this happened it would surely be a bug in the threads
2387 library. Or one of those fabled "spurious wakeups". */
2388 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
sewardjffce8152011-06-24 10:09:41 +00002389 "succeeded"
sewardjf98e1c02008-10-25 16:22:41 +00002390 " without prior pthread_cond_post");
sewardjb4112022007-11-09 22:49:28 +00002391 }
sewardjf98e1c02008-10-25 16:22:41 +00002392
2393 /* anyway, acquire a dependency on it. */
sewardj02114542009-07-28 20:52:36 +00002394 libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2395
2396 cvi->nWaiters--;
sewardjf98e1c02008-10-25 16:22:41 +00002397}
2398
philippe19dfe032013-03-24 20:10:23 +00002399static void evh__HG_PTHREAD_COND_INIT_POST ( ThreadId tid,
2400 void* cond, void* cond_attr )
2401{
2402 CVInfo* cvi;
2403
2404 if (SHOW_EVENTS >= 1)
2405 VG_(printf)("evh__HG_PTHREAD_COND_INIT_POST"
2406 "(ctid=%d, cond=%p, cond_attr=%p)\n",
2407 (Int)tid, (void*)cond, (void*) cond_attr );
2408
2409 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2410 tl_assert (cvi);
2411 tl_assert (cvi->so);
2412}
2413
2414
sewardjf98e1c02008-10-25 16:22:41 +00002415static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
sewardjc02f6c42013-10-14 13:51:25 +00002416 void* cond, Bool cond_is_init )
sewardjf98e1c02008-10-25 16:22:41 +00002417{
2418 /* Deal with destroy events. The only purpose is to free storage
2419 associated with the CV, so as to avoid any possible resource
2420 leaks. */
2421 if (SHOW_EVENTS >= 1)
2422 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
sewardjc02f6c42013-10-14 13:51:25 +00002423 "(ctid=%d, cond=%p, cond_is_init=%d)\n",
2424 (Int)tid, (void*)cond, (Int)cond_is_init );
sewardjf98e1c02008-10-25 16:22:41 +00002425
sewardjc02f6c42013-10-14 13:51:25 +00002426 map_cond_to_CVInfo_delete( tid, cond, cond_is_init );
sewardjb4112022007-11-09 22:49:28 +00002427}
2428
2429
sewardj9f569b72008-11-13 13:33:09 +00002430/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002431/* -------------- events to do with rwlocks -------------- */
sewardj9f569b72008-11-13 13:33:09 +00002432/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002433
2434/* EXPOSITION only */
2435static
2436void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2437{
2438 if (SHOW_EVENTS >= 1)
2439 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2440 (Int)tid, (void*)rwl );
2441 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
sewardjf98e1c02008-10-25 16:22:41 +00002442 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002443 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2444}
2445
2446static
2447void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2448{
2449 Thread* thr;
2450 Lock* lk;
2451 if (SHOW_EVENTS >= 1)
2452 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2453 (Int)tid, (void*)rwl );
2454
2455 thr = map_threads_maybe_lookup( tid );
2456 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00002457 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00002458
2459 lk = map_locks_maybe_lookup( (Addr)rwl );
2460
2461 if (lk == NULL || lk->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00002462 HG_(record_error_Misc)(
2463 thr, "pthread_rwlock_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00002464 }
2465
2466 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00002467 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002468 tl_assert( lk->guestaddr == (Addr)rwl );
2469 if (lk->heldBy) {
2470 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00002471 HG_(record_error_Misc)(
2472 thr, "pthread_rwlock_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00002473 /* remove lock from locksets of all owning threads */
2474 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00002475 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00002476 lk->heldBy = NULL;
2477 lk->heldW = False;
sewardj1c7e8332007-11-29 13:04:03 +00002478 lk->acquired_at = NULL;
sewardjb4112022007-11-09 22:49:28 +00002479 }
2480 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002481 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00002482
2483 if (HG_(clo_track_lockorders))
2484 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002485 map_locks_delete( lk->guestaddr );
2486 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002487 }
2488
sewardjf98e1c02008-10-25 16:22:41 +00002489 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002490 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2491}
2492
2493static
sewardj789c3c52008-02-25 12:10:07 +00002494void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2495 void* rwl,
2496 Word isW, Word isTryLock )
sewardjb4112022007-11-09 22:49:28 +00002497{
2498 /* Just check the rwl is sane; nothing else to do. */
2499 // 'rwl' may be invalid - not checked by wrapper
2500 Thread* thr;
2501 Lock* lk;
2502 if (SHOW_EVENTS >= 1)
2503 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2504 (Int)tid, (Int)isW, (void*)rwl );
2505
2506 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
sewardj789c3c52008-02-25 12:10:07 +00002507 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
sewardjb4112022007-11-09 22:49:28 +00002508 thr = map_threads_maybe_lookup( tid );
2509 tl_assert(thr); /* cannot fail - Thread* must already exist */
2510
2511 lk = map_locks_maybe_lookup( (Addr)rwl );
2512 if ( lk
2513 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2514 /* Wrong kind of lock. Duh. */
sewardjf98e1c02008-10-25 16:22:41 +00002515 HG_(record_error_Misc)(
2516 thr, "pthread_rwlock_{rd,rw}lock with a "
2517 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002518 }
2519}
2520
2521static
2522void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2523{
2524 // only called if the real library call succeeded - so mutex is sane
2525 Thread* thr;
2526 if (SHOW_EVENTS >= 1)
2527 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2528 (Int)tid, (Int)isW, (void*)rwl );
2529
2530 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2531 thr = map_threads_maybe_lookup( tid );
2532 tl_assert(thr); /* cannot fail - Thread* must already exist */
2533
2534 (isW ? evhH__post_thread_w_acquires_lock
2535 : evhH__post_thread_r_acquires_lock)(
2536 thr,
2537 LK_rdwr, /* if not known, create new lock with this LockKind */
2538 (Addr)rwl
2539 );
2540}
2541
2542static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2543{
2544 // 'rwl' may be invalid - not checked by wrapper
2545 Thread* thr;
2546 if (SHOW_EVENTS >= 1)
2547 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2548 (Int)tid, (void*)rwl );
2549
2550 thr = map_threads_maybe_lookup( tid );
2551 tl_assert(thr); /* cannot fail - Thread* must already exist */
2552
2553 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2554}
2555
2556static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2557{
2558 // only called if the real library call succeeded - so mutex is sane
2559 Thread* thr;
2560 if (SHOW_EVENTS >= 1)
2561 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2562 (Int)tid, (void*)rwl );
2563 thr = map_threads_maybe_lookup( tid );
2564 tl_assert(thr); /* cannot fail - Thread* must already exist */
2565
2566 // anything we should do here?
2567}
2568
2569
sewardj9f569b72008-11-13 13:33:09 +00002570/* ---------------------------------------------------------- */
2571/* -------------- events to do with semaphores -------------- */
2572/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002573
sewardj11e352f2007-11-30 11:11:02 +00002574/* This is similar to but not identical to the handling for condition
sewardjb4112022007-11-09 22:49:28 +00002575 variables. */
2576
sewardjf98e1c02008-10-25 16:22:41 +00002577/* For each semaphore, we maintain a stack of SOs. When a 'post'
2578 operation is done on a semaphore (unlocking, essentially), a new SO
2579 is created for the posting thread, the posting thread does a strong
2580 send to it (which merely installs the posting thread's VC in the
2581 SO), and the SO is pushed on the semaphore's stack.
sewardjb4112022007-11-09 22:49:28 +00002582
2583 Later, when a (probably different) thread completes 'wait' on the
sewardjf98e1c02008-10-25 16:22:41 +00002584 semaphore, we pop a SO off the semaphore's stack (which should be
2585 nonempty), and do a strong recv from it. This mechanism creates
sewardjb4112022007-11-09 22:49:28 +00002586 dependencies between posters and waiters of the semaphore.
2587
sewardjf98e1c02008-10-25 16:22:41 +00002588 It may not be necessary to use a stack - perhaps a bag of SOs would
2589 do. But we do need to keep track of how many unused-up posts have
2590 happened for the semaphore.
sewardjb4112022007-11-09 22:49:28 +00002591
sewardjf98e1c02008-10-25 16:22:41 +00002592 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
sewardjb4112022007-11-09 22:49:28 +00002593 twice on S. T3 cannot complete its waits without both T1 and T2
2594 posting. The above mechanism will ensure that T3 acquires
2595 dependencies on both T1 and T2.
sewardj11e352f2007-11-30 11:11:02 +00002596
sewardjf98e1c02008-10-25 16:22:41 +00002597 When a semaphore is initialised with value N, we do as if we'd
2598 posted N times on the semaphore: basically create N SOs and do a
2599 strong send to all of then. This allows up to N waits on the
2600 semaphore to acquire a dependency on the initialisation point,
2601 which AFAICS is the correct behaviour.
sewardj11e352f2007-11-30 11:11:02 +00002602
2603 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2604 about. We should.
sewardjb4112022007-11-09 22:49:28 +00002605*/
2606
sewardjf98e1c02008-10-25 16:22:41 +00002607/* sem_t* -> XArray* SO* */
2608static WordFM* map_sem_to_SO_stack = NULL;
sewardjb4112022007-11-09 22:49:28 +00002609
sewardjf98e1c02008-10-25 16:22:41 +00002610static void map_sem_to_SO_stack_INIT ( void ) {
2611 if (map_sem_to_SO_stack == NULL) {
2612 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2613 HG_(free), NULL );
2614 tl_assert(map_sem_to_SO_stack != NULL);
sewardjb4112022007-11-09 22:49:28 +00002615 }
2616}
2617
sewardjf98e1c02008-10-25 16:22:41 +00002618static void push_SO_for_sem ( void* sem, SO* so ) {
2619 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002620 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002621 tl_assert(so);
2622 map_sem_to_SO_stack_INIT();
2623 if (VG_(lookupFM)( map_sem_to_SO_stack,
2624 &keyW, (UWord*)&xa, (UWord)sem )) {
2625 tl_assert(keyW == (UWord)sem);
sewardjb4112022007-11-09 22:49:28 +00002626 tl_assert(xa);
sewardjf98e1c02008-10-25 16:22:41 +00002627 VG_(addToXA)( xa, &so );
sewardjb4112022007-11-09 22:49:28 +00002628 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002629 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2630 VG_(addToXA)( xa, &so );
florian6bf37262012-10-21 03:23:36 +00002631 VG_(addToFM)( map_sem_to_SO_stack, (UWord)sem, (UWord)xa );
sewardjb4112022007-11-09 22:49:28 +00002632 }
2633}
2634
sewardjf98e1c02008-10-25 16:22:41 +00002635static SO* mb_pop_SO_for_sem ( void* sem ) {
2636 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002637 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002638 SO* so;
2639 map_sem_to_SO_stack_INIT();
2640 if (VG_(lookupFM)( map_sem_to_SO_stack,
2641 &keyW, (UWord*)&xa, (UWord)sem )) {
sewardjb4112022007-11-09 22:49:28 +00002642 /* xa is the stack for this semaphore. */
sewardjf98e1c02008-10-25 16:22:41 +00002643 Word sz;
2644 tl_assert(keyW == (UWord)sem);
2645 sz = VG_(sizeXA)( xa );
sewardjb4112022007-11-09 22:49:28 +00002646 tl_assert(sz >= 0);
2647 if (sz == 0)
2648 return NULL; /* odd, the stack is empty */
sewardjf98e1c02008-10-25 16:22:41 +00002649 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2650 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002651 VG_(dropTailXA)( xa, 1 );
sewardjf98e1c02008-10-25 16:22:41 +00002652 return so;
sewardjb4112022007-11-09 22:49:28 +00002653 } else {
2654 /* hmm, that's odd. No stack for this semaphore. */
2655 return NULL;
2656 }
2657}
2658
sewardj11e352f2007-11-30 11:11:02 +00002659static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002660{
sewardjf98e1c02008-10-25 16:22:41 +00002661 UWord keyW, valW;
2662 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002663
sewardjb4112022007-11-09 22:49:28 +00002664 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002665 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002666 (Int)tid, (void*)sem );
2667
sewardjf98e1c02008-10-25 16:22:41 +00002668 map_sem_to_SO_stack_INIT();
sewardjb4112022007-11-09 22:49:28 +00002669
sewardjf98e1c02008-10-25 16:22:41 +00002670 /* Empty out the semaphore's SO stack. This way of doing it is
2671 stupid, but at least it's easy. */
2672 while (1) {
2673 so = mb_pop_SO_for_sem( sem );
2674 if (!so) break;
2675 libhb_so_dealloc(so);
2676 }
2677
2678 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2679 XArray* xa = (XArray*)valW;
2680 tl_assert(keyW == (UWord)sem);
2681 tl_assert(xa);
2682 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2683 VG_(deleteXA)(xa);
2684 }
sewardjb4112022007-11-09 22:49:28 +00002685}
2686
sewardj11e352f2007-11-30 11:11:02 +00002687static
2688void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2689{
sewardjf98e1c02008-10-25 16:22:41 +00002690 SO* so;
2691 Thread* thr;
sewardj11e352f2007-11-30 11:11:02 +00002692
2693 if (SHOW_EVENTS >= 1)
2694 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2695 (Int)tid, (void*)sem, value );
2696
sewardjf98e1c02008-10-25 16:22:41 +00002697 thr = map_threads_maybe_lookup( tid );
2698 tl_assert(thr); /* cannot fail - Thread* must already exist */
sewardj11e352f2007-11-30 11:11:02 +00002699
sewardjf98e1c02008-10-25 16:22:41 +00002700 /* Empty out the semaphore's SO stack. This way of doing it is
2701 stupid, but at least it's easy. */
2702 while (1) {
2703 so = mb_pop_SO_for_sem( sem );
2704 if (!so) break;
2705 libhb_so_dealloc(so);
2706 }
sewardj11e352f2007-11-30 11:11:02 +00002707
sewardjf98e1c02008-10-25 16:22:41 +00002708 /* If we don't do this check, the following while loop runs us out
2709 of memory for stupid initial values of 'value'. */
2710 if (value > 10000) {
2711 HG_(record_error_Misc)(
2712 thr, "sem_init: initial value exceeds 10000; using 10000" );
2713 value = 10000;
2714 }
sewardj11e352f2007-11-30 11:11:02 +00002715
sewardjf98e1c02008-10-25 16:22:41 +00002716 /* Now create 'valid' new SOs for the thread, do a strong send to
2717 each of them, and push them all on the stack. */
2718 for (; value > 0; value--) {
2719 Thr* hbthr = thr->hbthr;
2720 tl_assert(hbthr);
sewardj11e352f2007-11-30 11:11:02 +00002721
sewardjf98e1c02008-10-25 16:22:41 +00002722 so = libhb_so_alloc();
2723 libhb_so_send( hbthr, so, True/*strong send*/ );
2724 push_SO_for_sem( sem, so );
sewardj11e352f2007-11-30 11:11:02 +00002725 }
2726}
2727
2728static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002729{
sewardjf98e1c02008-10-25 16:22:41 +00002730 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2731 it (iow, write our VC into it, then tick ours), and push the SO
2732 on on a stack of SOs associated with 'sem'. This is later used
2733 by other thread(s) which successfully exit from a sem_wait on
2734 the same sem; by doing a strong recv from SOs popped of the
2735 stack, they acquire dependencies on the posting thread
2736 segment(s). */
sewardjb4112022007-11-09 22:49:28 +00002737
sewardjf98e1c02008-10-25 16:22:41 +00002738 Thread* thr;
2739 SO* so;
2740 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002741
2742 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002743 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002744 (Int)tid, (void*)sem );
2745
2746 thr = map_threads_maybe_lookup( tid );
2747 tl_assert(thr); /* cannot fail - Thread* must already exist */
2748
2749 // error-if: sem is bogus
2750
sewardjf98e1c02008-10-25 16:22:41 +00002751 hbthr = thr->hbthr;
2752 tl_assert(hbthr);
sewardjb4112022007-11-09 22:49:28 +00002753
sewardjf98e1c02008-10-25 16:22:41 +00002754 so = libhb_so_alloc();
2755 libhb_so_send( hbthr, so, True/*strong send*/ );
2756 push_SO_for_sem( sem, so );
sewardjb4112022007-11-09 22:49:28 +00002757}
2758
sewardj11e352f2007-11-30 11:11:02 +00002759static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002760{
sewardjf98e1c02008-10-25 16:22:41 +00002761 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2762 the 'sem' from this semaphore's SO-stack, and do a strong recv
2763 from it. This creates a dependency back to one of the post-ers
2764 for the semaphore. */
sewardjb4112022007-11-09 22:49:28 +00002765
sewardjf98e1c02008-10-25 16:22:41 +00002766 Thread* thr;
2767 SO* so;
2768 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002769
2770 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002771 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002772 (Int)tid, (void*)sem );
2773
2774 thr = map_threads_maybe_lookup( tid );
2775 tl_assert(thr); /* cannot fail - Thread* must already exist */
2776
2777 // error-if: sem is bogus
2778
sewardjf98e1c02008-10-25 16:22:41 +00002779 so = mb_pop_SO_for_sem( sem );
sewardjb4112022007-11-09 22:49:28 +00002780
sewardjf98e1c02008-10-25 16:22:41 +00002781 if (so) {
2782 hbthr = thr->hbthr;
2783 tl_assert(hbthr);
2784
2785 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2786 libhb_so_dealloc(so);
2787 } else {
2788 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2789 If this happened it would surely be a bug in the threads
2790 library. */
2791 HG_(record_error_Misc)(
2792 thr, "Bug in libpthread: sem_wait succeeded on"
2793 " semaphore without prior sem_post");
sewardjb4112022007-11-09 22:49:28 +00002794 }
2795}
2796
2797
sewardj9f569b72008-11-13 13:33:09 +00002798/* -------------------------------------------------------- */
2799/* -------------- events to do with barriers -------------- */
2800/* -------------------------------------------------------- */
2801
2802typedef
2803 struct {
2804 Bool initted; /* has it yet been initted by guest? */
sewardj406bac82010-03-03 23:03:40 +00002805 Bool resizable; /* is resizing allowed? */
sewardj9f569b72008-11-13 13:33:09 +00002806 UWord size; /* declared size */
2807 XArray* waiting; /* XA of Thread*. # present is 0 .. .size */
2808 }
2809 Bar;
2810
2811static Bar* new_Bar ( void ) {
2812 Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
2813 tl_assert(bar);
2814 /* all fields are zero */
2815 tl_assert(bar->initted == False);
2816 return bar;
2817}
2818
2819static void delete_Bar ( Bar* bar ) {
2820 tl_assert(bar);
2821 if (bar->waiting)
2822 VG_(deleteXA)(bar->waiting);
2823 HG_(free)(bar);
2824}
2825
2826/* A mapping which stores auxiliary data for barriers. */
2827
2828/* pthread_barrier_t* -> Bar* */
2829static WordFM* map_barrier_to_Bar = NULL;
2830
2831static void map_barrier_to_Bar_INIT ( void ) {
2832 if (UNLIKELY(map_barrier_to_Bar == NULL)) {
2833 map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
2834 "hg.mbtBI.1", HG_(free), NULL );
2835 tl_assert(map_barrier_to_Bar != NULL);
2836 }
2837}
2838
2839static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
2840 UWord key, val;
2841 map_barrier_to_Bar_INIT();
2842 if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
2843 tl_assert(key == (UWord)barrier);
2844 return (Bar*)val;
2845 } else {
2846 Bar* bar = new_Bar();
2847 VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
2848 return bar;
2849 }
2850}
2851
2852static void map_barrier_to_Bar_delete ( void* barrier ) {
2853 UWord keyW, valW;
2854 map_barrier_to_Bar_INIT();
2855 if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
2856 Bar* bar = (Bar*)valW;
2857 tl_assert(keyW == (UWord)barrier);
2858 delete_Bar(bar);
2859 }
2860}
2861
2862
2863static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
2864 void* barrier,
sewardj406bac82010-03-03 23:03:40 +00002865 UWord count,
2866 UWord resizable )
sewardj9f569b72008-11-13 13:33:09 +00002867{
2868 Thread* thr;
2869 Bar* bar;
2870
2871 if (SHOW_EVENTS >= 1)
2872 VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
sewardj406bac82010-03-03 23:03:40 +00002873 "(tid=%d, barrier=%p, count=%lu, resizable=%lu)\n",
2874 (Int)tid, (void*)barrier, count, resizable );
sewardj9f569b72008-11-13 13:33:09 +00002875
2876 thr = map_threads_maybe_lookup( tid );
2877 tl_assert(thr); /* cannot fail - Thread* must already exist */
2878
2879 if (count == 0) {
2880 HG_(record_error_Misc)(
2881 thr, "pthread_barrier_init: 'count' argument is zero"
2882 );
2883 }
2884
sewardj406bac82010-03-03 23:03:40 +00002885 if (resizable != 0 && resizable != 1) {
2886 HG_(record_error_Misc)(
2887 thr, "pthread_barrier_init: invalid 'resizable' argument"
2888 );
2889 }
2890
sewardj9f569b72008-11-13 13:33:09 +00002891 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2892 tl_assert(bar);
2893
2894 if (bar->initted) {
2895 HG_(record_error_Misc)(
2896 thr, "pthread_barrier_init: barrier is already initialised"
2897 );
2898 }
2899
2900 if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2901 tl_assert(bar->initted);
2902 HG_(record_error_Misc)(
sewardj553655c2008-11-14 19:41:19 +00002903 thr, "pthread_barrier_init: threads are waiting at barrier"
sewardj9f569b72008-11-13 13:33:09 +00002904 );
2905 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2906 }
2907 if (!bar->waiting) {
2908 bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
2909 sizeof(Thread*) );
2910 }
2911
2912 tl_assert(bar->waiting);
2913 tl_assert(VG_(sizeXA)(bar->waiting) == 0);
sewardj406bac82010-03-03 23:03:40 +00002914 bar->initted = True;
2915 bar->resizable = resizable == 1 ? True : False;
2916 bar->size = count;
sewardj9f569b72008-11-13 13:33:09 +00002917}
2918
2919
2920static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
2921 void* barrier )
2922{
sewardj553655c2008-11-14 19:41:19 +00002923 Thread* thr;
2924 Bar* bar;
2925
sewardj9f569b72008-11-13 13:33:09 +00002926 /* Deal with destroy events. The only purpose is to free storage
2927 associated with the barrier, so as to avoid any possible
2928 resource leaks. */
2929 if (SHOW_EVENTS >= 1)
2930 VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
2931 "(tid=%d, barrier=%p)\n",
2932 (Int)tid, (void*)barrier );
2933
sewardj553655c2008-11-14 19:41:19 +00002934 thr = map_threads_maybe_lookup( tid );
2935 tl_assert(thr); /* cannot fail - Thread* must already exist */
2936
2937 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2938 tl_assert(bar);
2939
2940 if (!bar->initted) {
2941 HG_(record_error_Misc)(
2942 thr, "pthread_barrier_destroy: barrier was never initialised"
2943 );
2944 }
2945
2946 if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2947 HG_(record_error_Misc)(
2948 thr, "pthread_barrier_destroy: threads are waiting at barrier"
2949 );
2950 }
2951
sewardj9f569b72008-11-13 13:33:09 +00002952 /* Maybe we shouldn't do this; just let it persist, so that when it
2953 is reinitialised we don't need to do any dynamic memory
2954 allocation? The downside is a potentially unlimited space leak,
2955 if the client creates (in turn) a large number of barriers all
2956 at different locations. Note that if we do later move to the
2957 don't-delete-it scheme, we need to mark the barrier as
2958 uninitialised again since otherwise a later _init call will
sewardj553655c2008-11-14 19:41:19 +00002959 elicit a duplicate-init error. */
sewardj9f569b72008-11-13 13:33:09 +00002960 map_barrier_to_Bar_delete( barrier );
2961}
2962
2963
sewardj406bac82010-03-03 23:03:40 +00002964/* All the threads have arrived. Now do the Interesting Bit. Get a
2965 new synchronisation object and do a weak send to it from all the
2966 participating threads. This makes its vector clocks be the join of
2967 all the individual threads' vector clocks. Then do a strong
2968 receive from it back to all threads, so that their VCs are a copy
2969 of it (hence are all equal to the join of their original VCs.) */
2970static void do_barrier_cross_sync_and_empty ( Bar* bar )
2971{
2972 /* XXX check bar->waiting has no duplicates */
2973 UWord i;
2974 SO* so = libhb_so_alloc();
2975
2976 tl_assert(bar->waiting);
2977 tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
2978
2979 /* compute the join ... */
2980 for (i = 0; i < bar->size; i++) {
2981 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2982 Thr* hbthr = t->hbthr;
2983 libhb_so_send( hbthr, so, False/*weak send*/ );
2984 }
2985 /* ... and distribute to all threads */
2986 for (i = 0; i < bar->size; i++) {
2987 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2988 Thr* hbthr = t->hbthr;
2989 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2990 }
2991
2992 /* finally, we must empty out the waiting vector */
2993 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2994
2995 /* and we don't need this any more. Perhaps a stack-allocated
2996 SO would be better? */
2997 libhb_so_dealloc(so);
2998}
2999
3000
sewardj9f569b72008-11-13 13:33:09 +00003001static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
3002 void* barrier )
3003{
sewardj1c466b72008-11-19 11:52:14 +00003004 /* This function gets called after a client thread calls
3005 pthread_barrier_wait but before it arrives at the real
3006 pthread_barrier_wait.
3007
3008 Why is the following correct? It's a bit subtle.
3009
3010 If this is not the last thread arriving at the barrier, we simply
3011 note its presence and return. Because valgrind (at least as of
3012 Nov 08) is single threaded, we are guaranteed safe from any race
3013 conditions when in this function -- no other client threads are
3014 running.
3015
3016 If this is the last thread, then we are again the only running
3017 thread. All the other threads will have either arrived at the
3018 real pthread_barrier_wait or are on their way to it, but in any
3019 case are guaranteed not to be able to move past it, because this
3020 thread is currently in this function and so has not yet arrived
3021 at the real pthread_barrier_wait. That means that:
3022
3023 1. While we are in this function, none of the other threads
3024 waiting at the barrier can move past it.
3025
3026 2. When this function returns (and simulated execution resumes),
3027 this thread and all other waiting threads will be able to move
3028 past the real barrier.
3029
3030 Because of this, it is now safe to update the vector clocks of
3031 all threads, to represent the fact that they all arrived at the
3032 barrier and have all moved on. There is no danger of any
3033 complications to do with some threads leaving the barrier and
3034 racing back round to the front, whilst others are still leaving
3035 (which is the primary source of complication in correct handling/
3036 implementation of barriers). That can't happen because we update
3037 here our data structures so as to indicate that the threads have
3038 passed the barrier, even though, as per (2) above, they are
3039 guaranteed not to pass the barrier until we return.
3040
3041 This relies crucially on Valgrind being single threaded. If that
3042 changes, this will need to be reconsidered.
3043 */
sewardj9f569b72008-11-13 13:33:09 +00003044 Thread* thr;
3045 Bar* bar;
sewardj406bac82010-03-03 23:03:40 +00003046 UWord present;
sewardj9f569b72008-11-13 13:33:09 +00003047
3048 if (SHOW_EVENTS >= 1)
3049 VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
3050 "(tid=%d, barrier=%p)\n",
3051 (Int)tid, (void*)barrier );
3052
3053 thr = map_threads_maybe_lookup( tid );
3054 tl_assert(thr); /* cannot fail - Thread* must already exist */
3055
3056 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3057 tl_assert(bar);
3058
3059 if (!bar->initted) {
3060 HG_(record_error_Misc)(
3061 thr, "pthread_barrier_wait: barrier is uninitialised"
3062 );
3063 return; /* client is broken .. avoid assertions below */
3064 }
3065
3066 /* guaranteed by _INIT_PRE above */
3067 tl_assert(bar->size > 0);
3068 tl_assert(bar->waiting);
3069
3070 VG_(addToXA)( bar->waiting, &thr );
3071
3072 /* guaranteed by this function */
3073 present = VG_(sizeXA)(bar->waiting);
3074 tl_assert(present > 0 && present <= bar->size);
3075
3076 if (present < bar->size)
3077 return;
3078
sewardj406bac82010-03-03 23:03:40 +00003079 do_barrier_cross_sync_and_empty(bar);
3080}
sewardj9f569b72008-11-13 13:33:09 +00003081
sewardj9f569b72008-11-13 13:33:09 +00003082
sewardj406bac82010-03-03 23:03:40 +00003083static void evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( ThreadId tid,
3084 void* barrier,
3085 UWord newcount )
3086{
3087 Thread* thr;
3088 Bar* bar;
3089 UWord present;
3090
3091 if (SHOW_EVENTS >= 1)
3092 VG_(printf)("evh__HG_PTHREAD_BARRIER_RESIZE_PRE"
3093 "(tid=%d, barrier=%p, newcount=%lu)\n",
3094 (Int)tid, (void*)barrier, newcount );
3095
3096 thr = map_threads_maybe_lookup( tid );
3097 tl_assert(thr); /* cannot fail - Thread* must already exist */
3098
3099 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3100 tl_assert(bar);
3101
3102 if (!bar->initted) {
3103 HG_(record_error_Misc)(
3104 thr, "pthread_barrier_resize: barrier is uninitialised"
3105 );
3106 return; /* client is broken .. avoid assertions below */
3107 }
3108
3109 if (!bar->resizable) {
3110 HG_(record_error_Misc)(
3111 thr, "pthread_barrier_resize: barrier is may not be resized"
3112 );
3113 return; /* client is broken .. avoid assertions below */
3114 }
3115
3116 if (newcount == 0) {
3117 HG_(record_error_Misc)(
3118 thr, "pthread_barrier_resize: 'newcount' argument is zero"
3119 );
3120 return; /* client is broken .. avoid assertions below */
3121 }
3122
3123 /* guaranteed by _INIT_PRE above */
3124 tl_assert(bar->size > 0);
sewardj9f569b72008-11-13 13:33:09 +00003125 tl_assert(bar->waiting);
sewardj406bac82010-03-03 23:03:40 +00003126 /* Guaranteed by this fn */
3127 tl_assert(newcount > 0);
sewardj9f569b72008-11-13 13:33:09 +00003128
sewardj406bac82010-03-03 23:03:40 +00003129 if (newcount >= bar->size) {
3130 /* Increasing the capacity. There's no possibility of threads
3131 moving on from the barrier in this situation, so just note
3132 the fact and do nothing more. */
3133 bar->size = newcount;
3134 } else {
3135 /* Decreasing the capacity. If we decrease it to be equal or
3136 below the number of waiting threads, they will now move past
3137 the barrier, so need to mess with dep edges in the same way
3138 as if the barrier had filled up normally. */
3139 present = VG_(sizeXA)(bar->waiting);
3140 tl_assert(present >= 0 && present <= bar->size);
3141 if (newcount <= present) {
3142 bar->size = present; /* keep the cross_sync call happy */
3143 do_barrier_cross_sync_and_empty(bar);
3144 }
3145 bar->size = newcount;
sewardj9f569b72008-11-13 13:33:09 +00003146 }
sewardj9f569b72008-11-13 13:33:09 +00003147}
3148
3149
sewardjed2e72e2009-08-14 11:08:24 +00003150/* ----------------------------------------------------- */
3151/* ----- events to do with user-specified HB edges ----- */
3152/* ----------------------------------------------------- */
3153
3154/* A mapping from arbitrary UWord tag to the SO associated with it.
3155 The UWord tags are meaningless to us, interpreted only by the
3156 user. */
3157
3158
3159
3160/* UWord -> SO* */
3161static WordFM* map_usertag_to_SO = NULL;
3162
3163static void map_usertag_to_SO_INIT ( void ) {
3164 if (UNLIKELY(map_usertag_to_SO == NULL)) {
3165 map_usertag_to_SO = VG_(newFM)( HG_(zalloc),
3166 "hg.mutS.1", HG_(free), NULL );
3167 tl_assert(map_usertag_to_SO != NULL);
3168 }
3169}
3170
3171static SO* map_usertag_to_SO_lookup_or_alloc ( UWord usertag ) {
3172 UWord key, val;
3173 map_usertag_to_SO_INIT();
3174 if (VG_(lookupFM)( map_usertag_to_SO, &key, &val, usertag )) {
3175 tl_assert(key == (UWord)usertag);
3176 return (SO*)val;
3177 } else {
3178 SO* so = libhb_so_alloc();
3179 VG_(addToFM)( map_usertag_to_SO, usertag, (UWord)so );
3180 return so;
3181 }
3182}
3183
sewardj6015d0e2011-03-11 19:10:48 +00003184static void map_usertag_to_SO_delete ( UWord usertag ) {
3185 UWord keyW, valW;
3186 map_usertag_to_SO_INIT();
3187 if (VG_(delFromFM)( map_usertag_to_SO, &keyW, &valW, usertag )) {
3188 SO* so = (SO*)valW;
3189 tl_assert(keyW == usertag);
3190 tl_assert(so);
3191 libhb_so_dealloc(so);
3192 }
3193}
sewardjed2e72e2009-08-14 11:08:24 +00003194
3195
3196static
3197void evh__HG_USERSO_SEND_PRE ( ThreadId tid, UWord usertag )
3198{
3199 /* TID is just about to notionally sent a message on a notional
3200 abstract synchronisation object whose identity is given by
3201 USERTAG. Bind USERTAG to a real SO if it is not already so
sewardj8c50d3c2011-03-11 18:38:12 +00003202 bound, and do a 'weak send' on the SO. This joins the vector
3203 clocks from this thread into any vector clocks already present
3204 in the SO. The resulting SO vector clocks are later used by
sewardjed2e72e2009-08-14 11:08:24 +00003205 other thread(s) which successfully 'receive' from the SO,
sewardj8c50d3c2011-03-11 18:38:12 +00003206 thereby acquiring a dependency on all the events that have
3207 previously signalled on this SO. */
sewardjed2e72e2009-08-14 11:08:24 +00003208 Thread* thr;
3209 SO* so;
3210
3211 if (SHOW_EVENTS >= 1)
3212 VG_(printf)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3213 (Int)tid, usertag );
3214
3215 thr = map_threads_maybe_lookup( tid );
3216 tl_assert(thr); /* cannot fail - Thread* must already exist */
3217
3218 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3219 tl_assert(so);
3220
sewardj8c50d3c2011-03-11 18:38:12 +00003221 libhb_so_send( thr->hbthr, so, False/*!strong_send*/ );
sewardjed2e72e2009-08-14 11:08:24 +00003222}
3223
3224static
3225void evh__HG_USERSO_RECV_POST ( ThreadId tid, UWord usertag )
3226{
3227 /* TID has just notionally received a message from a notional
3228 abstract synchronisation object whose identity is given by
3229 USERTAG. Bind USERTAG to a real SO if it is not already so
3230 bound. If the SO has at some point in the past been 'sent' on,
3231 to a 'strong receive' on it, thereby acquiring a dependency on
3232 the sender. */
3233 Thread* thr;
3234 SO* so;
3235
3236 if (SHOW_EVENTS >= 1)
3237 VG_(printf)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3238 (Int)tid, usertag );
3239
3240 thr = map_threads_maybe_lookup( tid );
3241 tl_assert(thr); /* cannot fail - Thread* must already exist */
3242
3243 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3244 tl_assert(so);
3245
3246 /* Acquire a dependency on it. If the SO has never so far been
3247 sent on, then libhb_so_recv will do nothing. So we're safe
3248 regardless of SO's history. */
3249 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3250}
3251
sewardj6015d0e2011-03-11 19:10:48 +00003252static
3253void evh__HG_USERSO_FORGET_ALL ( ThreadId tid, UWord usertag )
3254{
3255 /* TID declares that any happens-before edges notionally stored in
3256 USERTAG can be deleted. If (as would normally be the case) a
3257 SO is associated with USERTAG, then the assocation is removed
3258 and all resources associated with SO are freed. Importantly,
3259 that frees up any VTSs stored in SO. */
3260 if (SHOW_EVENTS >= 1)
3261 VG_(printf)("evh__HG_USERSO_FORGET_ALL(ctid=%d, usertag=%#lx)\n",
3262 (Int)tid, usertag );
3263
3264 map_usertag_to_SO_delete( usertag );
3265}
3266
sewardjed2e72e2009-08-14 11:08:24 +00003267
sewardjb4112022007-11-09 22:49:28 +00003268/*--------------------------------------------------------------*/
3269/*--- Lock acquisition order monitoring ---*/
3270/*--------------------------------------------------------------*/
3271
3272/* FIXME: here are some optimisations still to do in
3273 laog__pre_thread_acquires_lock.
3274
3275 The graph is structured so that if L1 --*--> L2 then L1 must be
3276 acquired before L2.
3277
3278 The common case is that some thread T holds (eg) L1 L2 and L3 and
3279 is repeatedly acquiring and releasing Ln, and there is no ordering
3280 error in what it is doing. Hence it repeatly:
3281
3282 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3283 produces the answer No (because there is no error).
3284
3285 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3286 (because they already got added the first time T acquired Ln).
3287
3288 Hence cache these two events:
3289
3290 (1) Cache result of the query from last time. Invalidate the cache
3291 any time any edges are added to or deleted from laog.
3292
3293 (2) Cache these add-edge requests and ignore them if said edges
3294 have already been added to laog. Invalidate the cache any time
3295 any edges are deleted from laog.
3296*/
3297
3298typedef
3299 struct {
3300 WordSetID inns; /* in univ_laog */
3301 WordSetID outs; /* in univ_laog */
3302 }
3303 LAOGLinks;
3304
3305/* lock order acquisition graph */
3306static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
3307
3308/* EXPOSITION ONLY: for each edge in 'laog', record the two places
3309 where that edge was created, so that we can show the user later if
3310 we need to. */
3311typedef
3312 struct {
3313 Addr src_ga; /* Lock guest addresses for */
3314 Addr dst_ga; /* src/dst of the edge */
3315 ExeContext* src_ec; /* And corresponding places where that */
3316 ExeContext* dst_ec; /* ordering was established */
3317 }
3318 LAOGLinkExposition;
3319
sewardj250ec2e2008-02-15 22:02:30 +00003320static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
sewardjb4112022007-11-09 22:49:28 +00003321 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3322 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
3323 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
3324 if (llx1->src_ga < llx2->src_ga) return -1;
3325 if (llx1->src_ga > llx2->src_ga) return 1;
3326 if (llx1->dst_ga < llx2->dst_ga) return -1;
3327 if (llx1->dst_ga > llx2->dst_ga) return 1;
3328 return 0;
3329}
3330
3331static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
3332/* end EXPOSITION ONLY */
3333
3334
sewardja65db102009-01-26 10:45:16 +00003335__attribute__((noinline))
3336static void laog__init ( void )
3337{
3338 tl_assert(!laog);
3339 tl_assert(!laog_exposition);
sewardjc1fb9d22011-02-28 09:03:44 +00003340 tl_assert(HG_(clo_track_lockorders));
sewardja65db102009-01-26 10:45:16 +00003341
3342 laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
3343 HG_(free), NULL/*unboxedcmp*/ );
3344
3345 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
3346 cmp_LAOGLinkExposition );
3347 tl_assert(laog);
3348 tl_assert(laog_exposition);
3349}
3350
florian6bf37262012-10-21 03:23:36 +00003351static void laog__show ( const HChar* who ) {
3352 UWord i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003353 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003354 Lock* me;
3355 LAOGLinks* links;
3356 VG_(printf)("laog (requested by %s) {\n", who);
sewardj896f6f92008-08-19 08:38:52 +00003357 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003358 me = NULL;
3359 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003360 while (VG_(nextIterFM)( laog, (UWord*)&me,
3361 (UWord*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003362 tl_assert(me);
3363 tl_assert(links);
3364 VG_(printf)(" node %p:\n", me);
3365 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3366 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003367 VG_(printf)(" inn %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003368 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3369 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003370 VG_(printf)(" out %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003371 me = NULL;
3372 links = NULL;
3373 }
sewardj896f6f92008-08-19 08:38:52 +00003374 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003375 VG_(printf)("}\n");
3376}
3377
sewardj866c80c2011-10-22 19:29:51 +00003378static void univ_laog_do_GC ( void ) {
3379 Word i;
3380 LAOGLinks* links;
3381 Word seen = 0;
3382 Int prev_next_gc_univ_laog = next_gc_univ_laog;
3383 const UWord univ_laog_cardinality = HG_(cardinalityWSU)( univ_laog);
3384
3385 Bool *univ_laog_seen = HG_(zalloc) ( "hg.gc_univ_laog.1",
3386 (Int) univ_laog_cardinality
3387 * sizeof(Bool) );
3388 // univ_laog_seen[*] set to 0 (False) by zalloc.
3389
3390 if (VG_(clo_stats))
3391 VG_(message)(Vg_DebugMsg,
3392 "univ_laog_do_GC enter cardinality %'10d\n",
3393 (Int)univ_laog_cardinality);
3394
3395 VG_(initIterFM)( laog );
3396 links = NULL;
3397 while (VG_(nextIterFM)( laog, NULL, (UWord*)&links )) {
3398 tl_assert(links);
3399 tl_assert(links->inns >= 0 && links->inns < univ_laog_cardinality);
3400 univ_laog_seen[links->inns] = True;
3401 tl_assert(links->outs >= 0 && links->outs < univ_laog_cardinality);
3402 univ_laog_seen[links->outs] = True;
3403 links = NULL;
3404 }
3405 VG_(doneIterFM)( laog );
3406
3407 for (i = 0; i < (Int)univ_laog_cardinality; i++) {
3408 if (univ_laog_seen[i])
3409 seen++;
3410 else
3411 HG_(dieWS) ( univ_laog, (WordSet)i );
3412 }
3413
3414 HG_(free) (univ_laog_seen);
3415
3416 // We need to decide the value of the next_gc.
3417 // 3 solutions were looked at:
3418 // Sol 1: garbage collect at seen * 2
3419 // This solution was a lot slower, probably because we both do a lot of
3420 // garbage collection and do not keep long enough laog WV that will become
3421 // useful again very soon.
3422 // Sol 2: garbage collect at a percentage increase of the current cardinality
3423 // (with a min increase of 1)
3424 // Trials on a small test program with 1%, 5% and 10% increase was done.
3425 // 1% is slightly faster than 5%, which is slightly slower than 10%.
3426 // However, on a big application, this caused the memory to be exhausted,
3427 // as even a 1% increase of size at each gc becomes a lot, when many gc
3428 // are done.
3429 // Sol 3: always garbage collect at current cardinality + 1.
3430 // This solution was the fastest of the 3 solutions, and caused no memory
3431 // exhaustion in the big application.
3432 //
3433 // With regards to cost introduced by gc: on the t2t perf test (doing only
3434 // lock/unlock operations), t2t 50 10 2 was about 25% faster than the
3435 // version with garbage collection. With t2t 50 20 2, my machine started
3436 // to page out, and so the garbage collected version was much faster.
3437 // On smaller lock sets (e.g. t2t 20 5 2, giving about 100 locks), the
3438 // difference performance is insignificant (~ 0.1 s).
3439 // Of course, it might be that real life programs are not well represented
3440 // by t2t.
3441
3442 // If ever we want to have a more sophisticated control
3443 // (e.g. clo options to control the percentage increase or fixed increased),
3444 // we should do it here, eg.
3445 // next_gc_univ_laog = prev_next_gc_univ_laog + VG_(clo_laog_gc_fixed);
3446 // Currently, we just hard-code the solution 3 above.
3447 next_gc_univ_laog = prev_next_gc_univ_laog + 1;
3448
3449 if (VG_(clo_stats))
3450 VG_(message)
3451 (Vg_DebugMsg,
3452 "univ_laog_do_GC exit seen %'8d next gc at cardinality %'10d\n",
3453 (Int)seen, next_gc_univ_laog);
3454}
3455
3456
sewardjb4112022007-11-09 22:49:28 +00003457__attribute__((noinline))
3458static void laog__add_edge ( Lock* src, Lock* dst ) {
florian6bf37262012-10-21 03:23:36 +00003459 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003460 LAOGLinks* links;
3461 Bool presentF, presentR;
3462 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
3463
3464 /* Take the opportunity to sanity check the graph. Record in
3465 presentF if there is already a src->dst mapping in this node's
3466 forwards links, and presentR if there is already a src->dst
3467 mapping in this node's backwards links. They should agree!
3468 Also, we need to know whether the edge was already present so as
3469 to decide whether or not to update the link details mapping. We
3470 can compute presentF and presentR essentially for free, so may
3471 as well do this always. */
3472 presentF = presentR = False;
3473
3474 /* Update the out edges for src */
3475 keyW = 0;
3476 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003477 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)src )) {
sewardjb4112022007-11-09 22:49:28 +00003478 WordSetID outs_new;
3479 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003480 tl_assert(keyW == (UWord)src);
3481 outs_new = HG_(addToWS)( univ_laog, links->outs, (UWord)dst );
sewardjb4112022007-11-09 22:49:28 +00003482 presentF = outs_new == links->outs;
3483 links->outs = outs_new;
3484 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003485 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003486 links->inns = HG_(emptyWS)( univ_laog );
florian6bf37262012-10-21 03:23:36 +00003487 links->outs = HG_(singletonWS)( univ_laog, (UWord)dst );
3488 VG_(addToFM)( laog, (UWord)src, (UWord)links );
sewardjb4112022007-11-09 22:49:28 +00003489 }
3490 /* Update the in edges for dst */
3491 keyW = 0;
3492 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003493 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003494 WordSetID inns_new;
3495 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003496 tl_assert(keyW == (UWord)dst);
3497 inns_new = HG_(addToWS)( univ_laog, links->inns, (UWord)src );
sewardjb4112022007-11-09 22:49:28 +00003498 presentR = inns_new == links->inns;
3499 links->inns = inns_new;
3500 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003501 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
florian6bf37262012-10-21 03:23:36 +00003502 links->inns = HG_(singletonWS)( univ_laog, (UWord)src );
sewardjb4112022007-11-09 22:49:28 +00003503 links->outs = HG_(emptyWS)( univ_laog );
florian6bf37262012-10-21 03:23:36 +00003504 VG_(addToFM)( laog, (UWord)dst, (UWord)links );
sewardjb4112022007-11-09 22:49:28 +00003505 }
3506
3507 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
3508
3509 if (!presentF && src->acquired_at && dst->acquired_at) {
3510 LAOGLinkExposition expo;
3511 /* If this edge is entering the graph, and we have acquired_at
3512 information for both src and dst, record those acquisition
3513 points. Hence, if there is later a violation of this
3514 ordering, we can show the user the two places in which the
3515 required src-dst ordering was previously established. */
barta0b6b2c2008-07-07 06:49:24 +00003516 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
sewardjb4112022007-11-09 22:49:28 +00003517 src->guestaddr, dst->guestaddr);
3518 expo.src_ga = src->guestaddr;
3519 expo.dst_ga = dst->guestaddr;
3520 expo.src_ec = NULL;
3521 expo.dst_ec = NULL;
3522 tl_assert(laog_exposition);
florian6bf37262012-10-21 03:23:36 +00003523 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (UWord)&expo )) {
sewardjb4112022007-11-09 22:49:28 +00003524 /* we already have it; do nothing */
3525 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003526 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
3527 sizeof(LAOGLinkExposition));
sewardjb4112022007-11-09 22:49:28 +00003528 expo2->src_ga = src->guestaddr;
3529 expo2->dst_ga = dst->guestaddr;
3530 expo2->src_ec = src->acquired_at;
3531 expo2->dst_ec = dst->acquired_at;
florian6bf37262012-10-21 03:23:36 +00003532 VG_(addToFM)( laog_exposition, (UWord)expo2, (UWord)NULL );
sewardjb4112022007-11-09 22:49:28 +00003533 }
3534 }
sewardj866c80c2011-10-22 19:29:51 +00003535
3536 if (HG_(cardinalityWSU) (univ_laog) >= next_gc_univ_laog)
3537 univ_laog_do_GC();
sewardjb4112022007-11-09 22:49:28 +00003538}
3539
3540__attribute__((noinline))
3541static void laog__del_edge ( Lock* src, Lock* dst ) {
florian6bf37262012-10-21 03:23:36 +00003542 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003543 LAOGLinks* links;
sewardj866c80c2011-10-22 19:29:51 +00003544 if (0) VG_(printf)("laog__del_edge enter %p %p\n", src, dst);
sewardjb4112022007-11-09 22:49:28 +00003545 /* Update the out edges for src */
3546 keyW = 0;
3547 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003548 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)src )) {
sewardjb4112022007-11-09 22:49:28 +00003549 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003550 tl_assert(keyW == (UWord)src);
3551 links->outs = HG_(delFromWS)( univ_laog, links->outs, (UWord)dst );
sewardjb4112022007-11-09 22:49:28 +00003552 }
3553 /* Update the in edges for dst */
3554 keyW = 0;
3555 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003556 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003557 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003558 tl_assert(keyW == (UWord)dst);
3559 links->inns = HG_(delFromWS)( univ_laog, links->inns, (UWord)src );
sewardjb4112022007-11-09 22:49:28 +00003560 }
sewardj866c80c2011-10-22 19:29:51 +00003561
3562 /* Remove the exposition of src,dst (if present) */
3563 {
3564 LAOGLinkExposition *fm_expo;
3565
3566 LAOGLinkExposition expo;
3567 expo.src_ga = src->guestaddr;
3568 expo.dst_ga = dst->guestaddr;
3569 expo.src_ec = NULL;
3570 expo.dst_ec = NULL;
3571
3572 if (VG_(delFromFM) (laog_exposition,
3573 (UWord*)&fm_expo, NULL, (UWord)&expo )) {
3574 HG_(free) (fm_expo);
3575 }
3576 }
3577
3578 /* deleting edges can increase nr of of WS so check for gc. */
3579 if (HG_(cardinalityWSU) (univ_laog) >= next_gc_univ_laog)
3580 univ_laog_do_GC();
3581 if (0) VG_(printf)("laog__del_edge exit\n");
sewardjb4112022007-11-09 22:49:28 +00003582}
3583
3584__attribute__((noinline))
3585static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
florian6bf37262012-10-21 03:23:36 +00003586 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003587 LAOGLinks* links;
3588 keyW = 0;
3589 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003590 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003591 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003592 tl_assert(keyW == (UWord)lk);
sewardjb4112022007-11-09 22:49:28 +00003593 return links->outs;
3594 } else {
3595 return HG_(emptyWS)( univ_laog );
3596 }
3597}
3598
3599__attribute__((noinline))
3600static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
florian6bf37262012-10-21 03:23:36 +00003601 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003602 LAOGLinks* links;
3603 keyW = 0;
3604 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003605 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003606 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003607 tl_assert(keyW == (UWord)lk);
sewardjb4112022007-11-09 22:49:28 +00003608 return links->inns;
3609 } else {
3610 return HG_(emptyWS)( univ_laog );
3611 }
3612}
3613
3614__attribute__((noinline))
florian6bf37262012-10-21 03:23:36 +00003615static void laog__sanity_check ( const HChar* who ) {
3616 UWord i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003617 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003618 Lock* me;
3619 LAOGLinks* links;
sewardj896f6f92008-08-19 08:38:52 +00003620 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003621 me = NULL;
3622 links = NULL;
3623 if (0) VG_(printf)("laog sanity check\n");
florian6bf37262012-10-21 03:23:36 +00003624 while (VG_(nextIterFM)( laog, (UWord*)&me,
3625 (UWord*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003626 tl_assert(me);
3627 tl_assert(links);
3628 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3629 for (i = 0; i < ws_size; i++) {
3630 if ( ! HG_(elemWS)( univ_laog,
3631 laog__succs( (Lock*)ws_words[i] ),
florian6bf37262012-10-21 03:23:36 +00003632 (UWord)me ))
sewardjb4112022007-11-09 22:49:28 +00003633 goto bad;
3634 }
3635 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3636 for (i = 0; i < ws_size; i++) {
3637 if ( ! HG_(elemWS)( univ_laog,
3638 laog__preds( (Lock*)ws_words[i] ),
florian6bf37262012-10-21 03:23:36 +00003639 (UWord)me ))
sewardjb4112022007-11-09 22:49:28 +00003640 goto bad;
3641 }
3642 me = NULL;
3643 links = NULL;
3644 }
sewardj896f6f92008-08-19 08:38:52 +00003645 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003646 return;
3647
3648 bad:
3649 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3650 laog__show(who);
3651 tl_assert(0);
3652}
3653
3654/* If there is a path in laog from 'src' to any of the elements in
3655 'dst', return an arbitrarily chosen element of 'dst' reachable from
3656 'src'. If no path exist from 'src' to any element in 'dst', return
3657 NULL. */
3658__attribute__((noinline))
3659static
3660Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3661{
3662 Lock* ret;
florian6bf37262012-10-21 03:23:36 +00003663 Word ssz;
sewardjb4112022007-11-09 22:49:28 +00003664 XArray* stack; /* of Lock* */
3665 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
3666 Lock* here;
3667 WordSetID succs;
florian6bf37262012-10-21 03:23:36 +00003668 UWord succs_size, i;
sewardj250ec2e2008-02-15 22:02:30 +00003669 UWord* succs_words;
sewardjb4112022007-11-09 22:49:28 +00003670 //laog__sanity_check();
3671
3672 /* If the destination set is empty, we can never get there from
3673 'src' :-), so don't bother to try */
3674 if (HG_(isEmptyWS)( univ_lsets, dsts ))
3675 return NULL;
3676
3677 ret = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00003678 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3679 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00003680
3681 (void) VG_(addToXA)( stack, &src );
3682
3683 while (True) {
3684
3685 ssz = VG_(sizeXA)( stack );
3686
3687 if (ssz == 0) { ret = NULL; break; }
3688
3689 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3690 VG_(dropTailXA)( stack, 1 );
3691
florian6bf37262012-10-21 03:23:36 +00003692 if (HG_(elemWS)( univ_lsets, dsts, (UWord)here )) { ret = here; break; }
sewardjb4112022007-11-09 22:49:28 +00003693
florian6bf37262012-10-21 03:23:36 +00003694 if (VG_(lookupFM)( visited, NULL, NULL, (UWord)here ))
sewardjb4112022007-11-09 22:49:28 +00003695 continue;
3696
florian6bf37262012-10-21 03:23:36 +00003697 VG_(addToFM)( visited, (UWord)here, 0 );
sewardjb4112022007-11-09 22:49:28 +00003698
3699 succs = laog__succs( here );
3700 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3701 for (i = 0; i < succs_size; i++)
3702 (void) VG_(addToXA)( stack, &succs_words[i] );
3703 }
3704
sewardj896f6f92008-08-19 08:38:52 +00003705 VG_(deleteFM)( visited, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003706 VG_(deleteXA)( stack );
3707 return ret;
3708}
3709
3710
3711/* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3712 between 'lk' and the locks already held by 'thr' and issue a
3713 complaint if so. Also, update the ordering graph appropriately.
3714*/
3715__attribute__((noinline))
3716static void laog__pre_thread_acquires_lock (
3717 Thread* thr, /* NB: BEFORE lock is added */
3718 Lock* lk
3719 )
3720{
sewardj250ec2e2008-02-15 22:02:30 +00003721 UWord* ls_words;
florian6bf37262012-10-21 03:23:36 +00003722 UWord ls_size, i;
sewardjb4112022007-11-09 22:49:28 +00003723 Lock* other;
3724
3725 /* It may be that 'thr' already holds 'lk' and is recursively
3726 relocking in. In this case we just ignore the call. */
3727 /* NB: univ_lsets really is correct here */
florian6bf37262012-10-21 03:23:36 +00003728 if (HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lk ))
sewardjb4112022007-11-09 22:49:28 +00003729 return;
3730
sewardjb4112022007-11-09 22:49:28 +00003731 /* First, the check. Complain if there is any path in laog from lk
3732 to any of the locks already held by thr, since if any such path
3733 existed, it would mean that previously lk was acquired before
3734 (rather than after, as we are doing here) at least one of those
3735 locks.
3736 */
3737 other = laog__do_dfs_from_to(lk, thr->locksetA);
3738 if (other) {
3739 LAOGLinkExposition key, *found;
3740 /* So we managed to find a path lk --*--> other in the graph,
3741 which implies that 'lk' should have been acquired before
3742 'other' but is in fact being acquired afterwards. We present
3743 the lk/other arguments to record_error_LockOrder in the order
3744 in which they should have been acquired. */
3745 /* Go look in the laog_exposition mapping, to find the allocation
3746 points for this edge, so we can show the user. */
3747 key.src_ga = lk->guestaddr;
3748 key.dst_ga = other->guestaddr;
3749 key.src_ec = NULL;
3750 key.dst_ec = NULL;
3751 found = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003752 if (VG_(lookupFM)( laog_exposition,
florian6bf37262012-10-21 03:23:36 +00003753 (UWord*)&found, NULL, (UWord)&key )) {
sewardjb4112022007-11-09 22:49:28 +00003754 tl_assert(found != &key);
3755 tl_assert(found->src_ga == key.src_ga);
3756 tl_assert(found->dst_ga == key.dst_ga);
3757 tl_assert(found->src_ec);
3758 tl_assert(found->dst_ec);
sewardjf98e1c02008-10-25 16:22:41 +00003759 HG_(record_error_LockOrder)(
3760 thr, lk->guestaddr, other->guestaddr,
sewardjffce8152011-06-24 10:09:41 +00003761 found->src_ec, found->dst_ec, other->acquired_at );
sewardjb4112022007-11-09 22:49:28 +00003762 } else {
3763 /* Hmm. This can't happen (can it?) */
philippeebe25802013-01-30 23:21:34 +00003764 /* Yes, it can happen: see tests/tc14_laog_dinphils.
3765 Imagine we have 3 philosophers A B C, and the forks
3766 between them:
3767
3768 C
3769
3770 fCA fBC
3771
3772 A fAB B
3773
3774 Let's have the following actions:
3775 A takes fCA,fAB
3776 A releases fCA,fAB
3777 B takes fAB,fBC
3778 B releases fAB,fBC
3779 C takes fBC,fCA
3780 C releases fBC,fCA
3781
3782 Helgrind will report a lock order error when C takes fCA.
3783 Effectively, we have a deadlock if the following
3784 sequence is done:
3785 A takes fCA
3786 B takes fAB
3787 C takes fBC
3788
3789 The error reported is:
3790 Observed (incorrect) order fBC followed by fCA
3791 but the stack traces that have established the required order
3792 are not given.
3793
3794 This is because there is no pair (fCA, fBC) in laog exposition :
3795 the laog_exposition records all pairs of locks between a new lock
3796 taken by a thread and all the already taken locks.
3797 So, there is no laog_exposition (fCA, fBC) as no thread ever
3798 first locked fCA followed by fBC.
3799
3800 In other words, when the deadlock cycle involves more than
3801 two locks, then helgrind does not report the sequence of
3802 operations that created the cycle.
3803
3804 However, we can report the current stack trace (where
3805 lk is being taken), and the stack trace where other was acquired:
3806 Effectively, the variable 'other' contains a lock currently
3807 held by this thread, with its 'acquired_at'. */
3808
sewardjf98e1c02008-10-25 16:22:41 +00003809 HG_(record_error_LockOrder)(
3810 thr, lk->guestaddr, other->guestaddr,
philippeebe25802013-01-30 23:21:34 +00003811 NULL, NULL, other->acquired_at );
sewardjb4112022007-11-09 22:49:28 +00003812 }
3813 }
3814
3815 /* Second, add to laog the pairs
3816 (old, lk) | old <- locks already held by thr
3817 Since both old and lk are currently held by thr, their acquired_at
3818 fields must be non-NULL.
3819 */
3820 tl_assert(lk->acquired_at);
3821 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3822 for (i = 0; i < ls_size; i++) {
3823 Lock* old = (Lock*)ls_words[i];
3824 tl_assert(old->acquired_at);
3825 laog__add_edge( old, lk );
3826 }
3827
3828 /* Why "except_Locks" ? We're here because a lock is being
3829 acquired by a thread, and we're in an inconsistent state here.
3830 See the call points in evhH__post_thread_{r,w}_acquires_lock.
3831 When called in this inconsistent state, locks__sanity_check duly
3832 barfs. */
sewardjf98e1c02008-10-25 16:22:41 +00003833 if (HG_(clo_sanity_flags) & SCE_LAOG)
sewardjb4112022007-11-09 22:49:28 +00003834 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
3835}
3836
sewardj866c80c2011-10-22 19:29:51 +00003837/* Allocates a duplicate of words. Caller must HG_(free) the result. */
3838static UWord* UWordV_dup(UWord* words, Word words_size)
3839{
3840 UInt i;
3841
3842 if (words_size == 0)
3843 return NULL;
3844
3845 UWord *dup = HG_(zalloc) ("hg.dup.1", (SizeT) words_size * sizeof(UWord));
3846
3847 for (i = 0; i < words_size; i++)
3848 dup[i] = words[i];
3849
3850 return dup;
3851}
sewardjb4112022007-11-09 22:49:28 +00003852
3853/* Delete from 'laog' any pair mentioning a lock in locksToDelete */
3854
3855__attribute__((noinline))
3856static void laog__handle_one_lock_deletion ( Lock* lk )
3857{
3858 WordSetID preds, succs;
florian6bf37262012-10-21 03:23:36 +00003859 UWord preds_size, succs_size, i, j;
sewardj250ec2e2008-02-15 22:02:30 +00003860 UWord *preds_words, *succs_words;
sewardjb4112022007-11-09 22:49:28 +00003861
3862 preds = laog__preds( lk );
3863 succs = laog__succs( lk );
3864
sewardj866c80c2011-10-22 19:29:51 +00003865 // We need to duplicate the payload, as these can be garbage collected
3866 // during the del/add operations below.
sewardjb4112022007-11-09 22:49:28 +00003867 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
sewardj866c80c2011-10-22 19:29:51 +00003868 preds_words = UWordV_dup(preds_words, preds_size);
3869
3870 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3871 succs_words = UWordV_dup(succs_words, succs_size);
3872
sewardjb4112022007-11-09 22:49:28 +00003873 for (i = 0; i < preds_size; i++)
3874 laog__del_edge( (Lock*)preds_words[i], lk );
3875
sewardjb4112022007-11-09 22:49:28 +00003876 for (j = 0; j < succs_size; j++)
3877 laog__del_edge( lk, (Lock*)succs_words[j] );
3878
3879 for (i = 0; i < preds_size; i++) {
3880 for (j = 0; j < succs_size; j++) {
3881 if (preds_words[i] != succs_words[j]) {
3882 /* This can pass unlocked locks to laog__add_edge, since
3883 we're deleting stuff. So their acquired_at fields may
3884 be NULL. */
3885 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
3886 }
3887 }
3888 }
sewardj866c80c2011-10-22 19:29:51 +00003889
3890 if (preds_words)
3891 HG_(free) (preds_words);
3892 if (succs_words)
3893 HG_(free) (succs_words);
3894
3895 // Remove lk information from laog links FM
3896 {
3897 LAOGLinks *links;
3898 Lock* linked_lk;
3899
3900 if (VG_(delFromFM) (laog,
3901 (UWord*)&linked_lk, (UWord*)&links, (UWord)lk)) {
3902 tl_assert (linked_lk == lk);
3903 HG_(free) (links);
3904 }
3905 }
3906 /* FIXME ??? What about removing lock lk data from EXPOSITION ??? */
sewardjb4112022007-11-09 22:49:28 +00003907}
3908
sewardj1cbc12f2008-11-10 16:16:46 +00003909//__attribute__((noinline))
3910//static void laog__handle_lock_deletions (
3911// WordSetID /* in univ_laog */ locksToDelete
3912// )
3913//{
3914// Word i, ws_size;
3915// UWord* ws_words;
3916//
sewardj1cbc12f2008-11-10 16:16:46 +00003917//
3918// HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
sewardj866c80c2011-10-22 19:29:51 +00003919// UWordV_dup call needed here ...
sewardj1cbc12f2008-11-10 16:16:46 +00003920// for (i = 0; i < ws_size; i++)
3921// laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
3922//
3923// if (HG_(clo_sanity_flags) & SCE_LAOG)
3924// all__sanity_check("laog__handle_lock_deletions-post");
3925//}
sewardjb4112022007-11-09 22:49:28 +00003926
3927
3928/*--------------------------------------------------------------*/
3929/*--- Malloc/free replacements ---*/
3930/*--------------------------------------------------------------*/
3931
3932typedef
3933 struct {
3934 void* next; /* required by m_hashtable */
3935 Addr payload; /* ptr to actual block */
3936 SizeT szB; /* size requested */
3937 ExeContext* where; /* where it was allocated */
3938 Thread* thr; /* allocating thread */
3939 }
3940 MallocMeta;
3941
3942/* A hash table of MallocMetas, used to track malloc'd blocks
3943 (obviously). */
3944static VgHashTable hg_mallocmeta_table = NULL;
3945
3946
3947static MallocMeta* new_MallocMeta ( void ) {
sewardjf98e1c02008-10-25 16:22:41 +00003948 MallocMeta* md = HG_(zalloc)( "hg.new_MallocMeta.1", sizeof(MallocMeta) );
sewardjb4112022007-11-09 22:49:28 +00003949 tl_assert(md);
3950 return md;
3951}
3952static void delete_MallocMeta ( MallocMeta* md ) {
sewardjf98e1c02008-10-25 16:22:41 +00003953 HG_(free)(md);
sewardjb4112022007-11-09 22:49:28 +00003954}
3955
3956
3957/* Allocate a client block and set up the metadata for it. */
3958
3959static
3960void* handle_alloc ( ThreadId tid,
3961 SizeT szB, SizeT alignB, Bool is_zeroed )
3962{
3963 Addr p;
3964 MallocMeta* md;
3965
3966 tl_assert( ((SSizeT)szB) >= 0 );
3967 p = (Addr)VG_(cli_malloc)(alignB, szB);
3968 if (!p) {
3969 return NULL;
3970 }
3971 if (is_zeroed)
3972 VG_(memset)((void*)p, 0, szB);
3973
3974 /* Note that map_threads_lookup must succeed (cannot assert), since
3975 memory can only be allocated by currently alive threads, hence
3976 they must have an entry in map_threads. */
3977 md = new_MallocMeta();
3978 md->payload = p;
3979 md->szB = szB;
3980 md->where = VG_(record_ExeContext)( tid, 0 );
3981 md->thr = map_threads_lookup( tid );
3982
3983 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
3984
3985 /* Tell the lower level memory wranglers. */
3986 evh__new_mem_heap( p, szB, is_zeroed );
3987
3988 return (void*)p;
3989}
3990
3991/* Re the checks for less-than-zero (also in hg_cli__realloc below):
3992 Cast to a signed type to catch any unexpectedly negative args.
3993 We're assuming here that the size asked for is not greater than
3994 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
3995 platforms). */
3996static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
3997 if (((SSizeT)n) < 0) return NULL;
3998 return handle_alloc ( tid, n, VG_(clo_alignment),
3999 /*is_zeroed*/False );
4000}
4001static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
4002 if (((SSizeT)n) < 0) return NULL;
4003 return handle_alloc ( tid, n, VG_(clo_alignment),
4004 /*is_zeroed*/False );
4005}
4006static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
4007 if (((SSizeT)n) < 0) return NULL;
4008 return handle_alloc ( tid, n, VG_(clo_alignment),
4009 /*is_zeroed*/False );
4010}
4011static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
4012 if (((SSizeT)n) < 0) return NULL;
4013 return handle_alloc ( tid, n, align,
4014 /*is_zeroed*/False );
4015}
4016static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
4017 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
4018 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
4019 /*is_zeroed*/True );
4020}
4021
4022
4023/* Free a client block, including getting rid of the relevant
4024 metadata. */
4025
4026static void handle_free ( ThreadId tid, void* p )
4027{
4028 MallocMeta *md, *old_md;
4029 SizeT szB;
4030
4031 /* First see if we can find the metadata for 'p'. */
4032 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
4033 if (!md)
4034 return; /* apparently freeing a bogus address. Oh well. */
4035
4036 tl_assert(md->payload == (Addr)p);
4037 szB = md->szB;
4038
4039 /* Nuke the metadata block */
4040 old_md = (MallocMeta*)
4041 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
4042 tl_assert(old_md); /* it must be present - we just found it */
4043 tl_assert(old_md == md);
4044 tl_assert(old_md->payload == (Addr)p);
4045
4046 VG_(cli_free)((void*)old_md->payload);
4047 delete_MallocMeta(old_md);
4048
4049 /* Tell the lower level memory wranglers. */
4050 evh__die_mem_heap( (Addr)p, szB );
4051}
4052
4053static void hg_cli__free ( ThreadId tid, void* p ) {
4054 handle_free(tid, p);
4055}
4056static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
4057 handle_free(tid, p);
4058}
4059static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
4060 handle_free(tid, p);
4061}
4062
4063
4064static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
4065{
4066 MallocMeta *md, *md_new, *md_tmp;
4067 SizeT i;
4068
4069 Addr payload = (Addr)payloadV;
4070
4071 if (((SSizeT)new_size) < 0) return NULL;
4072
4073 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
4074 if (!md)
4075 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
4076
4077 tl_assert(md->payload == payload);
4078
4079 if (md->szB == new_size) {
4080 /* size unchanged */
4081 md->where = VG_(record_ExeContext)(tid, 0);
4082 return payloadV;
4083 }
4084
4085 if (md->szB > new_size) {
4086 /* new size is smaller */
4087 md->szB = new_size;
4088 md->where = VG_(record_ExeContext)(tid, 0);
4089 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
4090 return payloadV;
4091 }
4092
4093 /* else */ {
4094 /* new size is bigger */
4095 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
4096
4097 /* First half kept and copied, second half new */
4098 // FIXME: shouldn't we use a copier which implements the
4099 // memory state machine?
sewardj23f12002009-07-24 08:45:08 +00004100 evh__copy_mem( payload, p_new, md->szB );
sewardjb4112022007-11-09 22:49:28 +00004101 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
sewardjf98e1c02008-10-25 16:22:41 +00004102 /*inited*/False );
sewardjb4112022007-11-09 22:49:28 +00004103 /* FIXME: can anything funny happen here? specifically, if the
4104 old range contained a lock, then die_mem_heap will complain.
4105 Is that the correct behaviour? Not sure. */
4106 evh__die_mem_heap( payload, md->szB );
4107
4108 /* Copy from old to new */
4109 for (i = 0; i < md->szB; i++)
4110 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
4111
4112 /* Because the metadata hash table is index by payload address,
4113 we have to get rid of the old hash table entry and make a new
4114 one. We can't just modify the existing metadata in place,
4115 because then it would (almost certainly) be in the wrong hash
4116 chain. */
4117 md_new = new_MallocMeta();
4118 *md_new = *md;
4119
4120 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
4121 tl_assert(md_tmp);
4122 tl_assert(md_tmp == md);
4123
4124 VG_(cli_free)((void*)md->payload);
4125 delete_MallocMeta(md);
4126
4127 /* Update fields */
4128 md_new->where = VG_(record_ExeContext)( tid, 0 );
4129 md_new->szB = new_size;
4130 md_new->payload = p_new;
4131 md_new->thr = map_threads_lookup( tid );
4132
4133 /* and add */
4134 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
4135
4136 return (void*)p_new;
4137 }
4138}
4139
njn8b140de2009-02-17 04:31:18 +00004140static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
4141{
4142 MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
4143
4144 // There may be slop, but pretend there isn't because only the asked-for
4145 // area will have been shadowed properly.
4146 return ( md ? md->szB : 0 );
4147}
4148
sewardjb4112022007-11-09 22:49:28 +00004149
sewardj095d61e2010-03-11 13:43:18 +00004150/* For error creation: map 'data_addr' to a malloc'd chunk, if any.
sewardjc8028ad2010-05-05 09:34:42 +00004151 Slow linear search. With a bit of hash table help if 'data_addr'
4152 is either the start of a block or up to 15 word-sized steps along
4153 from the start of a block. */
sewardj095d61e2010-03-11 13:43:18 +00004154
4155static inline Bool addr_is_in_MM_Chunk( MallocMeta* mm, Addr a )
4156{
sewardjc8028ad2010-05-05 09:34:42 +00004157 /* Accept 'a' as within 'mm' if 'mm's size is zero and 'a' points
4158 right at it. */
4159 if (UNLIKELY(mm->szB == 0 && a == mm->payload))
4160 return True;
4161 /* else normal interval rules apply */
4162 if (LIKELY(a < mm->payload)) return False;
4163 if (LIKELY(a >= mm->payload + mm->szB)) return False;
4164 return True;
sewardj095d61e2010-03-11 13:43:18 +00004165}
4166
sewardjc8028ad2010-05-05 09:34:42 +00004167Bool HG_(mm_find_containing_block)( /*OUT*/ExeContext** where,
sewardj095d61e2010-03-11 13:43:18 +00004168 /*OUT*/Addr* payload,
4169 /*OUT*/SizeT* szB,
4170 Addr data_addr )
4171{
4172 MallocMeta* mm;
sewardjc8028ad2010-05-05 09:34:42 +00004173 Int i;
4174 const Int n_fast_check_words = 16;
4175
4176 /* First, do a few fast searches on the basis that data_addr might
4177 be exactly the start of a block or up to 15 words inside. This
4178 can happen commonly via the creq
4179 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK. */
4180 for (i = 0; i < n_fast_check_words; i++) {
4181 mm = VG_(HT_lookup)( hg_mallocmeta_table,
4182 data_addr - (UWord)(UInt)i * sizeof(UWord) );
4183 if (UNLIKELY(mm && addr_is_in_MM_Chunk(mm, data_addr)))
4184 goto found;
4185 }
4186
sewardj095d61e2010-03-11 13:43:18 +00004187 /* Well, this totally sucks. But without using an interval tree or
sewardjc8028ad2010-05-05 09:34:42 +00004188 some such, it's hard to see how to do better. We have to check
4189 every block in the entire table. */
sewardj095d61e2010-03-11 13:43:18 +00004190 VG_(HT_ResetIter)(hg_mallocmeta_table);
4191 while ( (mm = VG_(HT_Next)(hg_mallocmeta_table)) ) {
sewardjc8028ad2010-05-05 09:34:42 +00004192 if (UNLIKELY(addr_is_in_MM_Chunk(mm, data_addr)))
4193 goto found;
sewardj095d61e2010-03-11 13:43:18 +00004194 }
sewardjc8028ad2010-05-05 09:34:42 +00004195
4196 /* Not found. Bah. */
4197 return False;
4198 /*NOTREACHED*/
4199
4200 found:
4201 tl_assert(mm);
4202 tl_assert(addr_is_in_MM_Chunk(mm, data_addr));
4203 if (where) *where = mm->where;
4204 if (payload) *payload = mm->payload;
4205 if (szB) *szB = mm->szB;
4206 return True;
sewardj095d61e2010-03-11 13:43:18 +00004207}
4208
4209
sewardjb4112022007-11-09 22:49:28 +00004210/*--------------------------------------------------------------*/
4211/*--- Instrumentation ---*/
4212/*--------------------------------------------------------------*/
4213
sewardjcafe5052013-01-17 14:24:35 +00004214#define unop(_op, _arg1) IRExpr_Unop((_op),(_arg1))
sewardjffce8152011-06-24 10:09:41 +00004215#define binop(_op, _arg1, _arg2) IRExpr_Binop((_op),(_arg1),(_arg2))
4216#define mkexpr(_tmp) IRExpr_RdTmp((_tmp))
4217#define mkU32(_n) IRExpr_Const(IRConst_U32(_n))
4218#define mkU64(_n) IRExpr_Const(IRConst_U64(_n))
4219#define assign(_t, _e) IRStmt_WrTmp((_t), (_e))
4220
sewardjcafe5052013-01-17 14:24:35 +00004221/* This takes and returns atoms, of course. Not full IRExprs. */
4222static IRExpr* mk_And1 ( IRSB* sbOut, IRExpr* arg1, IRExpr* arg2 )
4223{
4224 tl_assert(arg1 && arg2);
4225 tl_assert(isIRAtom(arg1));
4226 tl_assert(isIRAtom(arg2));
4227 /* Generate 32to1(And32(1Uto32(arg1), 1Uto32(arg2))). Appalling
4228 code, I know. */
4229 IRTemp wide1 = newIRTemp(sbOut->tyenv, Ity_I32);
4230 IRTemp wide2 = newIRTemp(sbOut->tyenv, Ity_I32);
4231 IRTemp anded = newIRTemp(sbOut->tyenv, Ity_I32);
4232 IRTemp res = newIRTemp(sbOut->tyenv, Ity_I1);
4233 addStmtToIRSB(sbOut, assign(wide1, unop(Iop_1Uto32, arg1)));
4234 addStmtToIRSB(sbOut, assign(wide2, unop(Iop_1Uto32, arg2)));
4235 addStmtToIRSB(sbOut, assign(anded, binop(Iop_And32, mkexpr(wide1),
4236 mkexpr(wide2))));
4237 addStmtToIRSB(sbOut, assign(res, unop(Iop_32to1, mkexpr(anded))));
4238 return mkexpr(res);
4239}
4240
sewardjffce8152011-06-24 10:09:41 +00004241static void instrument_mem_access ( IRSB* sbOut,
sewardjb4112022007-11-09 22:49:28 +00004242 IRExpr* addr,
4243 Int szB,
4244 Bool isStore,
sewardjffce8152011-06-24 10:09:41 +00004245 Int hWordTy_szB,
sewardjcafe5052013-01-17 14:24:35 +00004246 Int goff_sp,
4247 IRExpr* guard ) /* NULL => True */
sewardjb4112022007-11-09 22:49:28 +00004248{
4249 IRType tyAddr = Ity_INVALID;
florian6bf37262012-10-21 03:23:36 +00004250 const HChar* hName = NULL;
sewardjb4112022007-11-09 22:49:28 +00004251 void* hAddr = NULL;
4252 Int regparms = 0;
4253 IRExpr** argv = NULL;
4254 IRDirty* di = NULL;
4255
sewardjffce8152011-06-24 10:09:41 +00004256 // THRESH is the size of the window above SP (well,
4257 // mostly above) that we assume implies a stack reference.
4258 const Int THRESH = 4096 * 4; // somewhat arbitrary
4259 const Int rz_szB = VG_STACK_REDZONE_SZB;
4260
sewardjb4112022007-11-09 22:49:28 +00004261 tl_assert(isIRAtom(addr));
4262 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
4263
sewardjffce8152011-06-24 10:09:41 +00004264 tyAddr = typeOfIRExpr( sbOut->tyenv, addr );
sewardjb4112022007-11-09 22:49:28 +00004265 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
4266
4267 /* So the effective address is in 'addr' now. */
4268 regparms = 1; // unless stated otherwise
4269 if (isStore) {
4270 switch (szB) {
4271 case 1:
sewardj23f12002009-07-24 08:45:08 +00004272 hName = "evh__mem_help_cwrite_1";
4273 hAddr = &evh__mem_help_cwrite_1;
sewardjb4112022007-11-09 22:49:28 +00004274 argv = mkIRExprVec_1( addr );
4275 break;
4276 case 2:
sewardj23f12002009-07-24 08:45:08 +00004277 hName = "evh__mem_help_cwrite_2";
4278 hAddr = &evh__mem_help_cwrite_2;
sewardjb4112022007-11-09 22:49:28 +00004279 argv = mkIRExprVec_1( addr );
4280 break;
4281 case 4:
sewardj23f12002009-07-24 08:45:08 +00004282 hName = "evh__mem_help_cwrite_4";
4283 hAddr = &evh__mem_help_cwrite_4;
sewardjb4112022007-11-09 22:49:28 +00004284 argv = mkIRExprVec_1( addr );
4285 break;
4286 case 8:
sewardj23f12002009-07-24 08:45:08 +00004287 hName = "evh__mem_help_cwrite_8";
4288 hAddr = &evh__mem_help_cwrite_8;
sewardjb4112022007-11-09 22:49:28 +00004289 argv = mkIRExprVec_1( addr );
4290 break;
4291 default:
4292 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4293 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004294 hName = "evh__mem_help_cwrite_N";
4295 hAddr = &evh__mem_help_cwrite_N;
sewardjb4112022007-11-09 22:49:28 +00004296 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4297 break;
4298 }
4299 } else {
4300 switch (szB) {
4301 case 1:
sewardj23f12002009-07-24 08:45:08 +00004302 hName = "evh__mem_help_cread_1";
4303 hAddr = &evh__mem_help_cread_1;
sewardjb4112022007-11-09 22:49:28 +00004304 argv = mkIRExprVec_1( addr );
4305 break;
4306 case 2:
sewardj23f12002009-07-24 08:45:08 +00004307 hName = "evh__mem_help_cread_2";
4308 hAddr = &evh__mem_help_cread_2;
sewardjb4112022007-11-09 22:49:28 +00004309 argv = mkIRExprVec_1( addr );
4310 break;
4311 case 4:
sewardj23f12002009-07-24 08:45:08 +00004312 hName = "evh__mem_help_cread_4";
4313 hAddr = &evh__mem_help_cread_4;
sewardjb4112022007-11-09 22:49:28 +00004314 argv = mkIRExprVec_1( addr );
4315 break;
4316 case 8:
sewardj23f12002009-07-24 08:45:08 +00004317 hName = "evh__mem_help_cread_8";
4318 hAddr = &evh__mem_help_cread_8;
sewardjb4112022007-11-09 22:49:28 +00004319 argv = mkIRExprVec_1( addr );
4320 break;
4321 default:
4322 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4323 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004324 hName = "evh__mem_help_cread_N";
4325 hAddr = &evh__mem_help_cread_N;
sewardjb4112022007-11-09 22:49:28 +00004326 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4327 break;
4328 }
4329 }
4330
sewardjffce8152011-06-24 10:09:41 +00004331 /* Create the helper. */
sewardjb4112022007-11-09 22:49:28 +00004332 tl_assert(hName);
4333 tl_assert(hAddr);
4334 tl_assert(argv);
4335 di = unsafeIRDirty_0_N( regparms,
4336 hName, VG_(fnptr_to_fnentry)( hAddr ),
4337 argv );
sewardjffce8152011-06-24 10:09:41 +00004338
4339 if (! HG_(clo_check_stack_refs)) {
4340 /* We're ignoring memory references which are (obviously) to the
4341 stack. In fact just skip stack refs that are within 4 pages
4342 of SP (SP - the redzone, really), as that's simple, easy, and
4343 filters out most stack references. */
4344 /* Generate the guard condition: "(addr - (SP - RZ)) >u N", for
4345 some arbitrary N. If that is true then addr is outside the
4346 range (SP - RZ .. SP + N - RZ). If N is smallish (a few
4347 pages) then we can say addr is within a few pages of SP and
4348 so can't possibly be a heap access, and so can be skipped.
4349
4350 Note that the condition simplifies to
4351 (addr - SP + RZ) >u N
4352 which generates better code in x86/amd64 backends, but it does
4353 not unfortunately simplify to
4354 (addr - SP) >u (N - RZ)
4355 (would be beneficial because N - RZ is a constant) because
4356 wraparound arithmetic messes up the comparison. eg.
4357 20 >u 10 == True,
4358 but (20 - 15) >u (10 - 15) == 5 >u (MAXINT-5) == False.
4359 */
4360 IRTemp sp = newIRTemp(sbOut->tyenv, tyAddr);
4361 addStmtToIRSB( sbOut, assign(sp, IRExpr_Get(goff_sp, tyAddr)));
4362
4363 /* "addr - SP" */
4364 IRTemp addr_minus_sp = newIRTemp(sbOut->tyenv, tyAddr);
4365 addStmtToIRSB(
4366 sbOut,
4367 assign(addr_minus_sp,
4368 tyAddr == Ity_I32
4369 ? binop(Iop_Sub32, addr, mkexpr(sp))
4370 : binop(Iop_Sub64, addr, mkexpr(sp)))
4371 );
4372
4373 /* "addr - SP + RZ" */
4374 IRTemp diff = newIRTemp(sbOut->tyenv, tyAddr);
4375 addStmtToIRSB(
4376 sbOut,
4377 assign(diff,
4378 tyAddr == Ity_I32
4379 ? binop(Iop_Add32, mkexpr(addr_minus_sp), mkU32(rz_szB))
4380 : binop(Iop_Add64, mkexpr(addr_minus_sp), mkU64(rz_szB)))
4381 );
4382
sewardjcafe5052013-01-17 14:24:35 +00004383 /* guardA == "guard on the address" */
4384 IRTemp guardA = newIRTemp(sbOut->tyenv, Ity_I1);
sewardjffce8152011-06-24 10:09:41 +00004385 addStmtToIRSB(
4386 sbOut,
sewardjcafe5052013-01-17 14:24:35 +00004387 assign(guardA,
sewardjffce8152011-06-24 10:09:41 +00004388 tyAddr == Ity_I32
4389 ? binop(Iop_CmpLT32U, mkU32(THRESH), mkexpr(diff))
4390 : binop(Iop_CmpLT64U, mkU64(THRESH), mkexpr(diff)))
4391 );
sewardjcafe5052013-01-17 14:24:35 +00004392 di->guard = mkexpr(guardA);
4393 }
4394
4395 /* If there's a guard on the access itself (as supplied by the
4396 caller of this routine), we need to AND that in to any guard we
4397 might already have. */
4398 if (guard) {
4399 di->guard = mk_And1(sbOut, di->guard, guard);
sewardjffce8152011-06-24 10:09:41 +00004400 }
4401
4402 /* Add the helper. */
4403 addStmtToIRSB( sbOut, IRStmt_Dirty(di) );
sewardjb4112022007-11-09 22:49:28 +00004404}
4405
4406
sewardja0eee322009-07-31 08:46:35 +00004407/* Figure out if GA is a guest code address in the dynamic linker, and
4408 if so return True. Otherwise (and in case of any doubt) return
4409 False. (sidedly safe w/ False as the safe value) */
4410static Bool is_in_dynamic_linker_shared_object( Addr64 ga )
4411{
4412 DebugInfo* dinfo;
florian19f91bb2012-11-10 22:29:54 +00004413 const HChar* soname;
sewardja0eee322009-07-31 08:46:35 +00004414 if (0) return False;
4415
sewardje3f1e592009-07-31 09:41:29 +00004416 dinfo = VG_(find_DebugInfo)( (Addr)ga );
sewardja0eee322009-07-31 08:46:35 +00004417 if (!dinfo) return False;
4418
sewardje3f1e592009-07-31 09:41:29 +00004419 soname = VG_(DebugInfo_get_soname)(dinfo);
sewardja0eee322009-07-31 08:46:35 +00004420 tl_assert(soname);
4421 if (0) VG_(printf)("%s\n", soname);
4422
4423# if defined(VGO_linux)
sewardj651cfa42010-01-11 13:02:19 +00004424 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_3)) return True;
sewardja0eee322009-07-31 08:46:35 +00004425 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_2)) return True;
4426 if (VG_STREQ(soname, VG_U_LD_LINUX_X86_64_SO_2)) return True;
4427 if (VG_STREQ(soname, VG_U_LD64_SO_1)) return True;
4428 if (VG_STREQ(soname, VG_U_LD_SO_1)) return True;
4429# elif defined(VGO_darwin)
4430 if (VG_STREQ(soname, VG_U_DYLD)) return True;
4431# else
4432# error "Unsupported OS"
4433# endif
4434 return False;
4435}
4436
sewardjb4112022007-11-09 22:49:28 +00004437static
4438IRSB* hg_instrument ( VgCallbackClosure* closure,
4439 IRSB* bbIn,
4440 VexGuestLayout* layout,
4441 VexGuestExtents* vge,
florianca503be2012-10-07 21:59:42 +00004442 VexArchInfo* archinfo_host,
sewardjb4112022007-11-09 22:49:28 +00004443 IRType gWordTy, IRType hWordTy )
4444{
sewardj1c0ce7a2009-07-01 08:10:49 +00004445 Int i;
4446 IRSB* bbOut;
4447 Addr64 cia; /* address of current insn */
4448 IRStmt* st;
sewardja0eee322009-07-31 08:46:35 +00004449 Bool inLDSO = False;
4450 Addr64 inLDSOmask4K = 1; /* mismatches on first check */
sewardjb4112022007-11-09 22:49:28 +00004451
sewardjffce8152011-06-24 10:09:41 +00004452 const Int goff_sp = layout->offset_SP;
4453
sewardjb4112022007-11-09 22:49:28 +00004454 if (gWordTy != hWordTy) {
4455 /* We don't currently support this case. */
4456 VG_(tool_panic)("host/guest word size mismatch");
4457 }
4458
sewardja0eee322009-07-31 08:46:35 +00004459 if (VKI_PAGE_SIZE < 4096 || VG_(log2)(VKI_PAGE_SIZE) == -1) {
4460 VG_(tool_panic)("implausible or too-small VKI_PAGE_SIZE");
4461 }
4462
sewardjb4112022007-11-09 22:49:28 +00004463 /* Set up BB */
4464 bbOut = emptyIRSB();
4465 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
4466 bbOut->next = deepCopyIRExpr(bbIn->next);
4467 bbOut->jumpkind = bbIn->jumpkind;
sewardj291849f2012-04-20 23:58:55 +00004468 bbOut->offsIP = bbIn->offsIP;
sewardjb4112022007-11-09 22:49:28 +00004469
4470 // Copy verbatim any IR preamble preceding the first IMark
4471 i = 0;
4472 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
4473 addStmtToIRSB( bbOut, bbIn->stmts[i] );
4474 i++;
4475 }
4476
sewardj1c0ce7a2009-07-01 08:10:49 +00004477 // Get the first statement, and initial cia from it
4478 tl_assert(bbIn->stmts_used > 0);
4479 tl_assert(i < bbIn->stmts_used);
4480 st = bbIn->stmts[i];
4481 tl_assert(Ist_IMark == st->tag);
4482 cia = st->Ist.IMark.addr;
4483 st = NULL;
4484
sewardjb4112022007-11-09 22:49:28 +00004485 for (/*use current i*/; i < bbIn->stmts_used; i++) {
sewardj1c0ce7a2009-07-01 08:10:49 +00004486 st = bbIn->stmts[i];
sewardjb4112022007-11-09 22:49:28 +00004487 tl_assert(st);
4488 tl_assert(isFlatIRStmt(st));
4489 switch (st->tag) {
4490 case Ist_NoOp:
4491 case Ist_AbiHint:
4492 case Ist_Put:
4493 case Ist_PutI:
sewardjb4112022007-11-09 22:49:28 +00004494 case Ist_Exit:
4495 /* None of these can contain any memory references. */
4496 break;
4497
sewardj1c0ce7a2009-07-01 08:10:49 +00004498 case Ist_IMark:
4499 /* no mem refs, but note the insn address. */
4500 cia = st->Ist.IMark.addr;
sewardja0eee322009-07-31 08:46:35 +00004501 /* Don't instrument the dynamic linker. It generates a
4502 lot of races which we just expensively suppress, so
4503 it's pointless.
4504
4505 Avoid flooding is_in_dynamic_linker_shared_object with
4506 requests by only checking at transitions between 4K
4507 pages. */
4508 if ((cia & ~(Addr64)0xFFF) != inLDSOmask4K) {
4509 if (0) VG_(printf)("NEW %#lx\n", (Addr)cia);
4510 inLDSOmask4K = cia & ~(Addr64)0xFFF;
4511 inLDSO = is_in_dynamic_linker_shared_object(cia);
4512 } else {
4513 if (0) VG_(printf)("old %#lx\n", (Addr)cia);
4514 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004515 break;
4516
sewardjb4112022007-11-09 22:49:28 +00004517 case Ist_MBE:
sewardjf98e1c02008-10-25 16:22:41 +00004518 switch (st->Ist.MBE.event) {
4519 case Imbe_Fence:
4520 break; /* not interesting */
sewardjf98e1c02008-10-25 16:22:41 +00004521 default:
4522 goto unhandled;
4523 }
sewardjb4112022007-11-09 22:49:28 +00004524 break;
4525
sewardj1c0ce7a2009-07-01 08:10:49 +00004526 case Ist_CAS: {
4527 /* Atomic read-modify-write cycle. Just pretend it's a
4528 read. */
4529 IRCAS* cas = st->Ist.CAS.details;
sewardj23f12002009-07-24 08:45:08 +00004530 Bool isDCAS = cas->oldHi != IRTemp_INVALID;
4531 if (isDCAS) {
4532 tl_assert(cas->expdHi);
4533 tl_assert(cas->dataHi);
4534 } else {
4535 tl_assert(!cas->expdHi);
4536 tl_assert(!cas->dataHi);
4537 }
4538 /* Just be boring about it. */
sewardja0eee322009-07-31 08:46:35 +00004539 if (!inLDSO) {
4540 instrument_mem_access(
4541 bbOut,
4542 cas->addr,
4543 (isDCAS ? 2 : 1)
4544 * sizeofIRType(typeOfIRExpr(bbIn->tyenv, cas->dataLo)),
4545 False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004546 sizeofIRType(hWordTy), goff_sp,
4547 NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004548 );
4549 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004550 break;
4551 }
4552
sewardjdb5907d2009-11-26 17:20:21 +00004553 case Ist_LLSC: {
4554 /* We pretend store-conditionals don't exist, viz, ignore
4555 them. Whereas load-linked's are treated the same as
4556 normal loads. */
4557 IRType dataTy;
4558 if (st->Ist.LLSC.storedata == NULL) {
4559 /* LL */
4560 dataTy = typeOfIRTemp(bbIn->tyenv, st->Ist.LLSC.result);
sewardja0eee322009-07-31 08:46:35 +00004561 if (!inLDSO) {
sewardjdb5907d2009-11-26 17:20:21 +00004562 instrument_mem_access(
4563 bbOut,
4564 st->Ist.LLSC.addr,
4565 sizeofIRType(dataTy),
4566 False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004567 sizeofIRType(hWordTy), goff_sp,
4568 NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004569 );
4570 }
sewardjdb5907d2009-11-26 17:20:21 +00004571 } else {
4572 /* SC */
4573 /*ignore */
4574 }
4575 break;
4576 }
4577
4578 case Ist_Store:
sewardjdb5907d2009-11-26 17:20:21 +00004579 if (!inLDSO) {
4580 instrument_mem_access(
4581 bbOut,
4582 st->Ist.Store.addr,
4583 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
4584 True/*isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004585 sizeofIRType(hWordTy), goff_sp,
4586 NULL/*no-guard*/
sewardjdb5907d2009-11-26 17:20:21 +00004587 );
sewardj1c0ce7a2009-07-01 08:10:49 +00004588 }
njnb83caf22009-05-25 01:47:56 +00004589 break;
sewardjb4112022007-11-09 22:49:28 +00004590
sewardjcafe5052013-01-17 14:24:35 +00004591 case Ist_StoreG: {
4592 IRStoreG* sg = st->Ist.StoreG.details;
4593 IRExpr* data = sg->data;
4594 IRExpr* addr = sg->addr;
4595 IRType type = typeOfIRExpr(bbIn->tyenv, data);
4596 tl_assert(type != Ity_INVALID);
4597 instrument_mem_access( bbOut, addr, sizeofIRType(type),
4598 True/*isStore*/,
4599 sizeofIRType(hWordTy),
4600 goff_sp, sg->guard );
4601 break;
4602 }
4603
4604 case Ist_LoadG: {
4605 IRLoadG* lg = st->Ist.LoadG.details;
4606 IRType type = Ity_INVALID; /* loaded type */
4607 IRType typeWide = Ity_INVALID; /* after implicit widening */
4608 IRExpr* addr = lg->addr;
4609 typeOfIRLoadGOp(lg->cvt, &typeWide, &type);
4610 tl_assert(type != Ity_INVALID);
4611 instrument_mem_access( bbOut, addr, sizeofIRType(type),
4612 False/*!isStore*/,
4613 sizeofIRType(hWordTy),
4614 goff_sp, lg->guard );
4615 break;
4616 }
4617
sewardjb4112022007-11-09 22:49:28 +00004618 case Ist_WrTmp: {
4619 IRExpr* data = st->Ist.WrTmp.data;
4620 if (data->tag == Iex_Load) {
sewardja0eee322009-07-31 08:46:35 +00004621 if (!inLDSO) {
4622 instrument_mem_access(
4623 bbOut,
4624 data->Iex.Load.addr,
4625 sizeofIRType(data->Iex.Load.ty),
4626 False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004627 sizeofIRType(hWordTy), goff_sp,
4628 NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004629 );
4630 }
sewardjb4112022007-11-09 22:49:28 +00004631 }
4632 break;
4633 }
4634
4635 case Ist_Dirty: {
4636 Int dataSize;
4637 IRDirty* d = st->Ist.Dirty.details;
4638 if (d->mFx != Ifx_None) {
4639 /* This dirty helper accesses memory. Collect the
4640 details. */
4641 tl_assert(d->mAddr != NULL);
4642 tl_assert(d->mSize != 0);
4643 dataSize = d->mSize;
4644 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004645 if (!inLDSO) {
4646 instrument_mem_access(
4647 bbOut, d->mAddr, dataSize, False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004648 sizeofIRType(hWordTy), goff_sp, NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004649 );
4650 }
sewardjb4112022007-11-09 22:49:28 +00004651 }
4652 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004653 if (!inLDSO) {
4654 instrument_mem_access(
4655 bbOut, d->mAddr, dataSize, True/*isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004656 sizeofIRType(hWordTy), goff_sp, NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004657 );
4658 }
sewardjb4112022007-11-09 22:49:28 +00004659 }
4660 } else {
4661 tl_assert(d->mAddr == NULL);
4662 tl_assert(d->mSize == 0);
4663 }
4664 break;
4665 }
4666
4667 default:
sewardjf98e1c02008-10-25 16:22:41 +00004668 unhandled:
4669 ppIRStmt(st);
sewardjb4112022007-11-09 22:49:28 +00004670 tl_assert(0);
4671
4672 } /* switch (st->tag) */
4673
4674 addStmtToIRSB( bbOut, st );
4675 } /* iterate over bbIn->stmts */
4676
4677 return bbOut;
4678}
4679
sewardjffce8152011-06-24 10:09:41 +00004680#undef binop
4681#undef mkexpr
4682#undef mkU32
4683#undef mkU64
4684#undef assign
4685
sewardjb4112022007-11-09 22:49:28 +00004686
4687/*----------------------------------------------------------------*/
4688/*--- Client requests ---*/
4689/*----------------------------------------------------------------*/
4690
4691/* Sheesh. Yet another goddam finite map. */
4692static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
4693
4694static void map_pthread_t_to_Thread_INIT ( void ) {
4695 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
sewardjf98e1c02008-10-25 16:22:41 +00004696 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
4697 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00004698 tl_assert(map_pthread_t_to_Thread != NULL);
4699 }
4700}
4701
4702
4703static
4704Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
4705{
4706 if (!VG_IS_TOOL_USERREQ('H','G',args[0]))
4707 return False;
4708
4709 /* Anything that gets past the above check is one of ours, so we
4710 should be able to handle it. */
4711
4712 /* default, meaningless return value, unless otherwise set */
4713 *ret = 0;
4714
4715 switch (args[0]) {
4716
4717 /* --- --- User-visible client requests --- --- */
4718
4719 case VG_USERREQ__HG_CLEAN_MEMORY:
barta0b6b2c2008-07-07 06:49:24 +00004720 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%ld)\n",
sewardjb4112022007-11-09 22:49:28 +00004721 args[1], args[2]);
4722 /* Call die_mem to (expensively) tidy up properly, if there
sewardjf98e1c02008-10-25 16:22:41 +00004723 are any held locks etc in the area. Calling evh__die_mem
4724 and then evh__new_mem is a bit inefficient; probably just
4725 the latter would do. */
sewardjb4112022007-11-09 22:49:28 +00004726 if (args[2] > 0) { /* length */
4727 evh__die_mem(args[1], args[2]);
4728 /* and then set it to New */
4729 evh__new_mem(args[1], args[2]);
4730 }
4731 break;
4732
sewardjc8028ad2010-05-05 09:34:42 +00004733 case _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK: {
4734 Addr payload = 0;
4735 SizeT pszB = 0;
4736 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK(%#lx)\n",
4737 args[1]);
4738 if (HG_(mm_find_containing_block)(NULL, &payload, &pszB, args[1])) {
4739 if (pszB > 0) {
4740 evh__die_mem(payload, pszB);
4741 evh__new_mem(payload, pszB);
4742 }
4743 *ret = pszB;
4744 } else {
4745 *ret = (UWord)-1;
4746 }
4747 break;
4748 }
4749
sewardj406bac82010-03-03 23:03:40 +00004750 case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED:
4751 if (0) VG_(printf)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%ld)\n",
4752 args[1], args[2]);
4753 if (args[2] > 0) { /* length */
4754 evh__untrack_mem(args[1], args[2]);
4755 }
4756 break;
4757
4758 case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED:
4759 if (0) VG_(printf)("HG_ARANGE_MAKE_TRACKED(%#lx,%ld)\n",
4760 args[1], args[2]);
4761 if (args[2] > 0) { /* length */
4762 evh__new_mem(args[1], args[2]);
4763 }
4764 break;
4765
sewardjb4112022007-11-09 22:49:28 +00004766 /* --- --- Client requests for Helgrind's use only --- --- */
4767
4768 /* Some thread is telling us its pthread_t value. Record the
4769 binding between that and the associated Thread*, so we can
4770 later find the Thread* again when notified of a join by the
4771 thread. */
4772 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
4773 Thread* my_thr = NULL;
4774 if (0)
4775 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
4776 (void*)args[1]);
4777 map_pthread_t_to_Thread_INIT();
4778 my_thr = map_threads_maybe_lookup( tid );
4779 /* This assertion should hold because the map_threads (tid to
4780 Thread*) binding should have been made at the point of
4781 low-level creation of this thread, which should have
4782 happened prior to us getting this client request for it.
4783 That's because this client request is sent from
4784 client-world from the 'thread_wrapper' function, which
4785 only runs once the thread has been low-level created. */
4786 tl_assert(my_thr != NULL);
4787 /* So now we know that (pthread_t)args[1] is associated with
4788 (Thread*)my_thr. Note that down. */
4789 if (0)
4790 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
4791 (void*)args[1], (void*)my_thr );
florian6bf37262012-10-21 03:23:36 +00004792 VG_(addToFM)( map_pthread_t_to_Thread, (UWord)args[1], (UWord)my_thr );
sewardjb4112022007-11-09 22:49:28 +00004793 break;
4794 }
4795
4796 case _VG_USERREQ__HG_PTH_API_ERROR: {
4797 Thread* my_thr = NULL;
4798 map_pthread_t_to_Thread_INIT();
4799 my_thr = map_threads_maybe_lookup( tid );
4800 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
sewardjf98e1c02008-10-25 16:22:41 +00004801 HG_(record_error_PthAPIerror)(
florian6bf37262012-10-21 03:23:36 +00004802 my_thr, (HChar*)args[1], (UWord)args[2], (HChar*)args[3] );
sewardjb4112022007-11-09 22:49:28 +00004803 break;
4804 }
4805
4806 /* This thread (tid) has completed a join with the quitting
4807 thread whose pthread_t is in args[1]. */
4808 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
4809 Thread* thr_q = NULL; /* quitter Thread* */
4810 Bool found = False;
4811 if (0)
4812 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
4813 (void*)args[1]);
4814 map_pthread_t_to_Thread_INIT();
sewardj896f6f92008-08-19 08:38:52 +00004815 found = VG_(lookupFM)( map_pthread_t_to_Thread,
florian6bf37262012-10-21 03:23:36 +00004816 NULL, (UWord*)&thr_q, (UWord)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004817 /* Can this fail? It would mean that our pthread_join
4818 wrapper observed a successful join on args[1] yet that
4819 thread never existed (or at least, it never lodged an
4820 entry in the mapping (via SET_MY_PTHREAD_T)). Which
4821 sounds like a bug in the threads library. */
4822 // FIXME: get rid of this assertion; handle properly
4823 tl_assert(found);
4824 if (found) {
4825 if (0)
4826 VG_(printf)(".................... quitter Thread* = %p\n",
4827 thr_q);
4828 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
4829 }
4830 break;
4831 }
4832
4833 /* EXPOSITION only: by intercepting lock init events we can show
4834 the user where the lock was initialised, rather than only
4835 being able to show where it was first locked. Intercepting
4836 lock initialisations is not necessary for the basic operation
4837 of the race checker. */
4838 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
4839 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
4840 break;
4841
sewardjc02f6c42013-10-14 13:51:25 +00004842 /* mutex=arg[1], mutex_is_init=arg[2] */
sewardjb4112022007-11-09 22:49:28 +00004843 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
sewardjc02f6c42013-10-14 13:51:25 +00004844 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1], args[2] != 0 );
sewardjb4112022007-11-09 22:49:28 +00004845 break;
4846
4847 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
4848 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
4849 break;
4850
4851 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
4852 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
4853 break;
4854
4855 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*, Word
4856 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
4857 break;
4858
4859 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*
4860 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
4861 break;
4862
4863 /* This thread is about to do pthread_cond_signal on the
4864 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
4865 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
4866 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
4867 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
4868 break;
4869
4870 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
4871 Returns a flag indicating whether or not the mutex is believed to be
4872 valid for this operation. */
4873 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
4874 Bool mutex_is_valid
4875 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
4876 (void*)args[2] );
4877 *ret = mutex_is_valid ? 1 : 0;
4878 break;
4879 }
4880
philippe19dfe032013-03-24 20:10:23 +00004881 /* Thread successfully completed pthread_cond_init:
4882 cond=arg[1], cond_attr=arg[2] */
4883 case _VG_USERREQ__HG_PTHREAD_COND_INIT_POST:
4884 evh__HG_PTHREAD_COND_INIT_POST( tid,
4885 (void*)args[1], (void*)args[2] );
4886 break;
4887
sewardjc02f6c42013-10-14 13:51:25 +00004888 /* cond=arg[1], cond_is_init=arg[2] */
sewardjf98e1c02008-10-25 16:22:41 +00004889 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
sewardjc02f6c42013-10-14 13:51:25 +00004890 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1], args[2] != 0 );
sewardjf98e1c02008-10-25 16:22:41 +00004891 break;
4892
sewardjb4112022007-11-09 22:49:28 +00004893 /* Thread successfully completed pthread_cond_wait, cond=arg[1],
4894 mutex=arg[2] */
4895 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
4896 evh__HG_PTHREAD_COND_WAIT_POST( tid,
sewardjff427c92013-10-14 12:13:52 +00004897 (void*)args[1], (void*)args[2],
4898 (Bool)args[3] );
sewardjb4112022007-11-09 22:49:28 +00004899 break;
4900
4901 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
4902 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
4903 break;
4904
4905 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
4906 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
4907 break;
4908
sewardj789c3c52008-02-25 12:10:07 +00004909 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
sewardjb4112022007-11-09 22:49:28 +00004910 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
sewardj789c3c52008-02-25 12:10:07 +00004911 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
4912 args[2], args[3] );
sewardjb4112022007-11-09 22:49:28 +00004913 break;
4914
4915 /* rwlock=arg[1], isW=arg[2] */
4916 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
4917 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
4918 break;
4919
4920 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
4921 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
4922 break;
4923
4924 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
4925 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
4926 break;
4927
sewardj11e352f2007-11-30 11:11:02 +00004928 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
4929 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
sewardjb4112022007-11-09 22:49:28 +00004930 break;
4931
sewardj11e352f2007-11-30 11:11:02 +00004932 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
4933 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004934 break;
4935
sewardj11e352f2007-11-30 11:11:02 +00004936 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
4937 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
4938 break;
4939
4940 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t* */
4941 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004942 break;
4943
sewardj9f569b72008-11-13 13:33:09 +00004944 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
sewardj406bac82010-03-03 23:03:40 +00004945 /* pth_bar_t*, ulong count, ulong resizable */
4946 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1],
4947 args[2], args[3] );
4948 break;
4949
4950 case _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE:
4951 /* pth_bar_t*, ulong newcount */
4952 evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( tid, (void*)args[1],
4953 args[2] );
sewardj9f569b72008-11-13 13:33:09 +00004954 break;
4955
4956 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
4957 /* pth_bar_t* */
4958 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
4959 break;
4960
4961 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
4962 /* pth_bar_t* */
4963 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
4964 break;
sewardjb4112022007-11-09 22:49:28 +00004965
sewardj5a644da2009-08-11 10:35:58 +00004966 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE:
4967 /* pth_spinlock_t* */
4968 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid, (void*)args[1] );
4969 break;
4970
4971 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST:
4972 /* pth_spinlock_t* */
4973 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid, (void*)args[1] );
4974 break;
4975
4976 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE:
4977 /* pth_spinlock_t*, Word */
4978 evh__HG_PTHREAD_SPIN_LOCK_PRE( tid, (void*)args[1], args[2] );
4979 break;
4980
4981 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST:
4982 /* pth_spinlock_t* */
4983 evh__HG_PTHREAD_SPIN_LOCK_POST( tid, (void*)args[1] );
4984 break;
4985
4986 case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE:
4987 /* pth_spinlock_t* */
4988 evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid, (void*)args[1] );
4989 break;
4990
sewardjed2e72e2009-08-14 11:08:24 +00004991 case _VG_USERREQ__HG_CLIENTREQ_UNIMP: {
florian19f91bb2012-11-10 22:29:54 +00004992 /* HChar* who */
sewardjed2e72e2009-08-14 11:08:24 +00004993 HChar* who = (HChar*)args[1];
4994 HChar buf[50 + 50];
4995 Thread* thr = map_threads_maybe_lookup( tid );
4996 tl_assert( thr ); /* I must be mapped */
4997 tl_assert( who );
4998 tl_assert( VG_(strlen)(who) <= 50 );
4999 VG_(sprintf)(buf, "Unimplemented client request macro \"%s\"", who );
5000 /* record_error_Misc strdup's buf, so this is safe: */
5001 HG_(record_error_Misc)( thr, buf );
5002 break;
5003 }
5004
5005 case _VG_USERREQ__HG_USERSO_SEND_PRE:
5006 /* UWord arbitrary-SO-tag */
5007 evh__HG_USERSO_SEND_PRE( tid, args[1] );
5008 break;
5009
5010 case _VG_USERREQ__HG_USERSO_RECV_POST:
5011 /* UWord arbitrary-SO-tag */
5012 evh__HG_USERSO_RECV_POST( tid, args[1] );
5013 break;
5014
sewardj6015d0e2011-03-11 19:10:48 +00005015 case _VG_USERREQ__HG_USERSO_FORGET_ALL:
5016 /* UWord arbitrary-SO-tag */
5017 evh__HG_USERSO_FORGET_ALL( tid, args[1] );
5018 break;
5019
sewardjb4112022007-11-09 22:49:28 +00005020 default:
5021 /* Unhandled Helgrind client request! */
sewardjf98e1c02008-10-25 16:22:41 +00005022 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
5023 args[0]);
sewardjb4112022007-11-09 22:49:28 +00005024 }
5025
5026 return True;
5027}
5028
5029
5030/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +00005031/*--- Setup ---*/
5032/*----------------------------------------------------------------*/
5033
florian19f91bb2012-11-10 22:29:54 +00005034static Bool hg_process_cmd_line_option ( const HChar* arg )
sewardjb4112022007-11-09 22:49:28 +00005035{
florian19f91bb2012-11-10 22:29:54 +00005036 const HChar* tmp_str;
sewardjb4112022007-11-09 22:49:28 +00005037
njn83df0b62009-02-25 01:01:05 +00005038 if VG_BOOL_CLO(arg, "--track-lockorders",
5039 HG_(clo_track_lockorders)) {}
5040 else if VG_BOOL_CLO(arg, "--cmp-race-err-addrs",
5041 HG_(clo_cmp_race_err_addrs)) {}
sewardj23f12002009-07-24 08:45:08 +00005042
5043 else if VG_XACT_CLO(arg, "--history-level=none",
5044 HG_(clo_history_level), 0);
sewardjf3861392009-08-02 10:16:03 +00005045 else if VG_XACT_CLO(arg, "--history-level=approx",
sewardj23f12002009-07-24 08:45:08 +00005046 HG_(clo_history_level), 1);
5047 else if VG_XACT_CLO(arg, "--history-level=full",
5048 HG_(clo_history_level), 2);
sewardj849b0ed2008-12-21 10:43:10 +00005049
sewardjf585e482009-08-16 22:52:29 +00005050 /* If you change the 10k/30mill limits, remember to also change
sewardj849b0ed2008-12-21 10:43:10 +00005051 them in assertions at the top of event_map_maybe_GC. */
njn83df0b62009-02-25 01:01:05 +00005052 else if VG_BINT_CLO(arg, "--conflict-cache-size",
sewardjf585e482009-08-16 22:52:29 +00005053 HG_(clo_conflict_cache_size), 10*1000, 30*1000*1000) {}
sewardjb4112022007-11-09 22:49:28 +00005054
sewardj11e352f2007-11-30 11:11:02 +00005055 /* "stuvwx" --> stuvwx (binary) */
njn83df0b62009-02-25 01:01:05 +00005056 else if VG_STR_CLO(arg, "--hg-sanity-flags", tmp_str) {
sewardjb4112022007-11-09 22:49:28 +00005057 Int j;
sewardjb4112022007-11-09 22:49:28 +00005058
njn83df0b62009-02-25 01:01:05 +00005059 if (6 != VG_(strlen)(tmp_str)) {
sewardjb4112022007-11-09 22:49:28 +00005060 VG_(message)(Vg_UserMsg,
sewardj24118492009-07-15 14:50:02 +00005061 "--hg-sanity-flags argument must have 6 digits\n");
sewardjb4112022007-11-09 22:49:28 +00005062 return False;
5063 }
sewardj11e352f2007-11-30 11:11:02 +00005064 for (j = 0; j < 6; j++) {
njn83df0b62009-02-25 01:01:05 +00005065 if ('0' == tmp_str[j]) { /* do nothing */ }
5066 else if ('1' == tmp_str[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
sewardjb4112022007-11-09 22:49:28 +00005067 else {
sewardj11e352f2007-11-30 11:11:02 +00005068 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
sewardj24118492009-07-15 14:50:02 +00005069 "only contain 0s and 1s\n");
sewardjb4112022007-11-09 22:49:28 +00005070 return False;
5071 }
5072 }
sewardjf98e1c02008-10-25 16:22:41 +00005073 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
sewardjb4112022007-11-09 22:49:28 +00005074 }
5075
sewardj622fe492011-03-11 21:06:59 +00005076 else if VG_BOOL_CLO(arg, "--free-is-write",
5077 HG_(clo_free_is_write)) {}
sewardjffce8152011-06-24 10:09:41 +00005078
5079 else if VG_XACT_CLO(arg, "--vts-pruning=never",
5080 HG_(clo_vts_pruning), 0);
5081 else if VG_XACT_CLO(arg, "--vts-pruning=auto",
5082 HG_(clo_vts_pruning), 1);
5083 else if VG_XACT_CLO(arg, "--vts-pruning=always",
5084 HG_(clo_vts_pruning), 2);
5085
5086 else if VG_BOOL_CLO(arg, "--check-stack-refs",
5087 HG_(clo_check_stack_refs)) {}
5088
sewardjb4112022007-11-09 22:49:28 +00005089 else
5090 return VG_(replacement_malloc_process_cmd_line_option)(arg);
5091
5092 return True;
5093}
5094
5095static void hg_print_usage ( void )
5096{
5097 VG_(printf)(
sewardj622fe492011-03-11 21:06:59 +00005098" --free-is-write=no|yes treat heap frees as writes [no]\n"
sewardj849b0ed2008-12-21 10:43:10 +00005099" --track-lockorders=no|yes show lock ordering errors? [yes]\n"
njnf6e8ca92009-08-07 02:18:00 +00005100" --history-level=none|approx|full [full]\n"
sewardjf3861392009-08-02 10:16:03 +00005101" full: show both stack traces for a data race (can be very slow)\n"
5102" approx: full trace for one thread, approx for the other (faster)\n"
5103" none: only show trace for one thread in a race (fastest)\n"
sewardj23f12002009-07-24 08:45:08 +00005104" --conflict-cache-size=N size of 'full' history cache [1000000]\n"
sewardjffce8152011-06-24 10:09:41 +00005105" --check-stack-refs=no|yes race-check reads and writes on the\n"
5106" main stack and thread stacks? [yes]\n"
sewardjb4112022007-11-09 22:49:28 +00005107 );
sewardjb4112022007-11-09 22:49:28 +00005108}
5109
5110static void hg_print_debug_usage ( void )
5111{
sewardjb4112022007-11-09 22:49:28 +00005112 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
5113 "race errors significant? [no]\n");
sewardj849b0ed2008-12-21 10:43:10 +00005114 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
sewardj11e352f2007-11-30 11:11:02 +00005115 " at events (X = 0|1) [000000]\n");
5116 VG_(printf)(" --hg-sanity-flags values:\n");
sewardj11e352f2007-11-30 11:11:02 +00005117 VG_(printf)(" 010000 after changes to "
sewardjb4112022007-11-09 22:49:28 +00005118 "lock-order-acquisition-graph\n");
sewardj11e352f2007-11-30 11:11:02 +00005119 VG_(printf)(" 001000 at memory accesses (NB: not currently used)\n");
5120 VG_(printf)(" 000100 at mem permission setting for "
sewardjb4112022007-11-09 22:49:28 +00005121 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
sewardj11e352f2007-11-30 11:11:02 +00005122 VG_(printf)(" 000010 at lock/unlock events\n");
5123 VG_(printf)(" 000001 at thread create/join events\n");
sewardjffce8152011-06-24 10:09:41 +00005124 VG_(printf)(
5125" --vts-pruning=never|auto|always [auto]\n"
5126" never: is never done (may cause big space leaks in Helgrind)\n"
5127" auto: done just often enough to keep space usage under control\n"
5128" always: done after every VTS GC (mostly just a big time waster)\n"
5129 );
sewardjb4112022007-11-09 22:49:28 +00005130}
5131
sewardjb4112022007-11-09 22:49:28 +00005132static void hg_fini ( Int exitcode )
5133{
sewardj2d9e8742009-08-07 15:46:56 +00005134 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
5135 VG_(message)(Vg_UserMsg,
5136 "For counts of detected and suppressed errors, "
5137 "rerun with: -v\n");
5138 }
5139
5140 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)
5141 && HG_(clo_history_level) >= 2) {
5142 VG_(umsg)(
5143 "Use --history-level=approx or =none to gain increased speed, at\n" );
5144 VG_(umsg)(
5145 "the cost of reduced accuracy of conflicting-access information\n");
5146 }
5147
sewardjb4112022007-11-09 22:49:28 +00005148 if (SHOW_DATA_STRUCTURES)
5149 pp_everything( PP_ALL, "SK_(fini)" );
sewardjf98e1c02008-10-25 16:22:41 +00005150 if (HG_(clo_sanity_flags))
sewardjb4112022007-11-09 22:49:28 +00005151 all__sanity_check("SK_(fini)");
5152
sewardj2d9e8742009-08-07 15:46:56 +00005153 if (VG_(clo_stats)) {
sewardjb4112022007-11-09 22:49:28 +00005154
5155 if (1) {
5156 VG_(printf)("\n");
sewardjb4112022007-11-09 22:49:28 +00005157 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
sewardjc1fb9d22011-02-28 09:03:44 +00005158 if (HG_(clo_track_lockorders)) {
5159 VG_(printf)("\n");
5160 HG_(ppWSUstats)( univ_laog, "univ_laog" );
5161 }
sewardjb4112022007-11-09 22:49:28 +00005162 }
5163
sewardjf98e1c02008-10-25 16:22:41 +00005164 //zz VG_(printf)("\n");
5165 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
5166 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
5167 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
5168 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
5169 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
5170 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
5171 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
5172 //zz stats__hbefore_stk_hwm);
5173 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
5174 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
sewardjb4112022007-11-09 22:49:28 +00005175
5176 VG_(printf)("\n");
barta0b6b2c2008-07-07 06:49:24 +00005177 VG_(printf)(" locksets: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00005178 (Int)HG_(cardinalityWSU)( univ_lsets ));
sewardjc1fb9d22011-02-28 09:03:44 +00005179 if (HG_(clo_track_lockorders)) {
5180 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
5181 (Int)HG_(cardinalityWSU)( univ_laog ));
5182 }
sewardjb4112022007-11-09 22:49:28 +00005183
sewardjd52392d2008-11-08 20:36:26 +00005184 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
5185 // stats__ga_LL_adds,
5186 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
sewardjb4112022007-11-09 22:49:28 +00005187
sewardjf98e1c02008-10-25 16:22:41 +00005188 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
5189 HG_(stats__LockN_to_P_queries),
5190 HG_(stats__LockN_to_P_get_map_size)() );
sewardjb4112022007-11-09 22:49:28 +00005191
sewardjf98e1c02008-10-25 16:22:41 +00005192 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
5193 HG_(stats__string_table_queries),
5194 HG_(stats__string_table_get_map_size)() );
sewardjc1fb9d22011-02-28 09:03:44 +00005195 if (HG_(clo_track_lockorders)) {
5196 VG_(printf)(" LAOG: %'8d map size\n",
5197 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
5198 VG_(printf)(" LAOG exposition: %'8d map size\n",
5199 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
5200 }
5201
barta0b6b2c2008-07-07 06:49:24 +00005202 VG_(printf)(" locks: %'8lu acquires, "
5203 "%'lu releases\n",
sewardjb4112022007-11-09 22:49:28 +00005204 stats__lockN_acquires,
5205 stats__lockN_releases
5206 );
barta0b6b2c2008-07-07 06:49:24 +00005207 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
sewardjb4112022007-11-09 22:49:28 +00005208
5209 VG_(printf)("\n");
sewardjf98e1c02008-10-25 16:22:41 +00005210 libhb_shutdown(True);
sewardjb4112022007-11-09 22:49:28 +00005211 }
5212}
5213
sewardjf98e1c02008-10-25 16:22:41 +00005214/* FIXME: move these somewhere sane */
5215
5216static
5217void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
5218{
5219 Thread* thr;
5220 ThreadId tid;
5221 UWord nActual;
5222 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00005223 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00005224 tl_assert(thr);
5225 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
5226 nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
5227 NULL, NULL, 0 );
5228 tl_assert(nActual <= nRequest);
5229 for (; nActual < nRequest; nActual++)
5230 frames[nActual] = 0;
5231}
5232
5233static
sewardj23f12002009-07-24 08:45:08 +00005234ExeContext* for_libhb__get_EC ( Thr* hbt )
sewardjf98e1c02008-10-25 16:22:41 +00005235{
5236 Thread* thr;
5237 ThreadId tid;
5238 ExeContext* ec;
5239 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00005240 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00005241 tl_assert(thr);
5242 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
sewardj23f12002009-07-24 08:45:08 +00005243 /* this will assert if tid is invalid */
sewardjf98e1c02008-10-25 16:22:41 +00005244 ec = VG_(record_ExeContext)( tid, 0 );
sewardjd52392d2008-11-08 20:36:26 +00005245 return ec;
sewardjf98e1c02008-10-25 16:22:41 +00005246}
5247
5248
sewardjc1fb9d22011-02-28 09:03:44 +00005249static void hg_post_clo_init ( void )
sewardjb4112022007-11-09 22:49:28 +00005250{
sewardjf98e1c02008-10-25 16:22:41 +00005251 Thr* hbthr_root;
njnf76d27a2009-05-28 01:53:07 +00005252
sewardjc1fb9d22011-02-28 09:03:44 +00005253 /////////////////////////////////////////////
5254 hbthr_root = libhb_init( for_libhb__get_stacktrace,
5255 for_libhb__get_EC );
5256 /////////////////////////////////////////////
5257
5258
5259 if (HG_(clo_track_lockorders))
5260 laog__init();
5261
5262 initialise_data_structures(hbthr_root);
5263}
5264
5265static void hg_pre_clo_init ( void )
5266{
sewardjb4112022007-11-09 22:49:28 +00005267 VG_(details_name) ("Helgrind");
5268 VG_(details_version) (NULL);
5269 VG_(details_description) ("a thread error detector");
5270 VG_(details_copyright_author)(
sewardj03f8d3f2012-08-05 15:46:46 +00005271 "Copyright (C) 2007-2012, and GNU GPL'd, by OpenWorks LLP et al.");
sewardjb4112022007-11-09 22:49:28 +00005272 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj9c08c0f2011-03-10 15:01:14 +00005273 VG_(details_avg_translation_sizeB) ( 320 );
sewardjb4112022007-11-09 22:49:28 +00005274
5275 VG_(basic_tool_funcs) (hg_post_clo_init,
5276 hg_instrument,
5277 hg_fini);
5278
5279 VG_(needs_core_errors) ();
sewardjf98e1c02008-10-25 16:22:41 +00005280 VG_(needs_tool_errors) (HG_(eq_Error),
sewardj24118492009-07-15 14:50:02 +00005281 HG_(before_pp_Error),
sewardjf98e1c02008-10-25 16:22:41 +00005282 HG_(pp_Error),
sewardjb4112022007-11-09 22:49:28 +00005283 False,/*show TIDs for errors*/
sewardjf98e1c02008-10-25 16:22:41 +00005284 HG_(update_extra),
5285 HG_(recognised_suppression),
5286 HG_(read_extra_suppression_info),
5287 HG_(error_matches_suppression),
5288 HG_(get_error_name),
philippe4e32d672013-10-17 22:10:41 +00005289 HG_(get_extra_suppression_info),
5290 HG_(print_extra_suppression_use),
5291 HG_(update_extra_suppression_use));
sewardjb4112022007-11-09 22:49:28 +00005292
sewardj24118492009-07-15 14:50:02 +00005293 VG_(needs_xml_output) ();
5294
sewardjb4112022007-11-09 22:49:28 +00005295 VG_(needs_command_line_options)(hg_process_cmd_line_option,
5296 hg_print_usage,
5297 hg_print_debug_usage);
5298 VG_(needs_client_requests) (hg_handle_client_request);
5299
5300 // FIXME?
5301 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
5302 // hg_expensive_sanity_check);
5303
5304 VG_(needs_malloc_replacement) (hg_cli__malloc,
5305 hg_cli____builtin_new,
5306 hg_cli____builtin_vec_new,
5307 hg_cli__memalign,
5308 hg_cli__calloc,
5309 hg_cli__free,
5310 hg_cli____builtin_delete,
5311 hg_cli____builtin_vec_delete,
5312 hg_cli__realloc,
njn8b140de2009-02-17 04:31:18 +00005313 hg_cli_malloc_usable_size,
philipped99c26a2012-07-31 22:17:28 +00005314 HG_CLI__DEFAULT_MALLOC_REDZONE_SZB );
sewardjb4112022007-11-09 22:49:28 +00005315
sewardj849b0ed2008-12-21 10:43:10 +00005316 /* 21 Dec 08: disabled this; it mostly causes H to start more
5317 slowly and use significantly more memory, without very often
5318 providing useful results. The user can request to load this
5319 information manually with --read-var-info=yes. */
5320 if (0) VG_(needs_var_info)(); /* optional */
sewardjb4112022007-11-09 22:49:28 +00005321
5322 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
sewardj7cf4e6b2008-05-01 20:24:26 +00005323 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
5324 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
sewardjb4112022007-11-09 22:49:28 +00005325 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
sewardj1f77fec2010-04-12 19:51:04 +00005326 VG_(track_new_mem_stack) ( evh__new_mem_stack );
sewardjb4112022007-11-09 22:49:28 +00005327
5328 // FIXME: surely this isn't thread-aware
sewardj23f12002009-07-24 08:45:08 +00005329 VG_(track_copy_mem_remap) ( evh__copy_mem );
sewardjb4112022007-11-09 22:49:28 +00005330
5331 VG_(track_change_mem_mprotect) ( evh__set_perms );
5332
5333 VG_(track_die_mem_stack_signal)( evh__die_mem );
sewardjfd35d492011-03-17 19:39:55 +00005334 VG_(track_die_mem_brk) ( evh__die_mem_munmap );
5335 VG_(track_die_mem_munmap) ( evh__die_mem_munmap );
sewardjb4112022007-11-09 22:49:28 +00005336 VG_(track_die_mem_stack) ( evh__die_mem );
5337
5338 // FIXME: what is this for?
5339 VG_(track_ban_mem_stack) (NULL);
5340
5341 VG_(track_pre_mem_read) ( evh__pre_mem_read );
5342 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
5343 VG_(track_pre_mem_write) ( evh__pre_mem_write );
5344 VG_(track_post_mem_write) (NULL);
5345
5346 /////////////////
5347
5348 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
5349 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
5350
5351 VG_(track_start_client_code)( evh__start_client_code );
5352 VG_(track_stop_client_code)( evh__stop_client_code );
5353
sewardjb4112022007-11-09 22:49:28 +00005354 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
5355 as described in comments at the top of pub_tool_hashtable.h, are
5356 met. Blargh. */
5357 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
5358 tl_assert( sizeof(UWord) == sizeof(Addr) );
5359 hg_mallocmeta_table
5360 = VG_(HT_construct)( "hg_malloc_metadata_table" );
5361
sewardj61bc2c52011-02-09 10:34:00 +00005362 // add a callback to clean up on (threaded) fork.
5363 VG_(atfork)(NULL/*pre*/, NULL/*parent*/, evh__atfork_child/*child*/);
sewardjb4112022007-11-09 22:49:28 +00005364}
5365
5366VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
5367
5368/*--------------------------------------------------------------------*/
5369/*--- end hg_main.c ---*/
5370/*--------------------------------------------------------------------*/