blob: f5e36252a83981360d31a8c424a16cdd759c1a24 [file] [log] [blame]
sewardjb4112022007-11-09 22:49:28 +00001
2/*--------------------------------------------------------------------*/
3/*--- Helgrind: a Valgrind tool for detecting errors ---*/
4/*--- in threaded programs. hg_main.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
sewardj03f8d3f2012-08-05 15:46:46 +000011 Copyright (C) 2007-2012 OpenWorks LLP
sewardjb4112022007-11-09 22:49:28 +000012 info@open-works.co.uk
13
sewardj03f8d3f2012-08-05 15:46:46 +000014 Copyright (C) 2007-2012 Apple, Inc.
njnf76d27a2009-05-28 01:53:07 +000015
sewardjb4112022007-11-09 22:49:28 +000016 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
20
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
25
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, write to the Free Software
28 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 02111-1307, USA.
30
31 The GNU General Public License is contained in the file COPYING.
32
33 Neither the names of the U.S. Department of Energy nor the
34 University of California nor the names of its contributors may be
35 used to endorse or promote products derived from this software
36 without prior written permission.
37*/
38
39#include "pub_tool_basics.h"
sewardjb4112022007-11-09 22:49:28 +000040#include "pub_tool_libcassert.h"
41#include "pub_tool_libcbase.h"
42#include "pub_tool_libcprint.h"
sewardjb4112022007-11-09 22:49:28 +000043#include "pub_tool_threadstate.h"
44#include "pub_tool_tooliface.h"
45#include "pub_tool_hashtable.h"
46#include "pub_tool_replacemalloc.h"
47#include "pub_tool_machine.h"
48#include "pub_tool_options.h"
49#include "pub_tool_xarray.h"
50#include "pub_tool_stacktrace.h"
sewardj896f6f92008-08-19 08:38:52 +000051#include "pub_tool_wordfm.h"
sewardja0eee322009-07-31 08:46:35 +000052#include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
53#include "pub_tool_redir.h" // sonames for the dynamic linkers
54#include "pub_tool_vki.h" // VKI_PAGE_SIZE
sewardj61bc2c52011-02-09 10:34:00 +000055#include "pub_tool_libcproc.h" // VG_(atfork)
sewardj234e5582011-02-09 12:47:23 +000056#include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client)
sewardjb4112022007-11-09 22:49:28 +000057
sewardjf98e1c02008-10-25 16:22:41 +000058#include "hg_basics.h"
59#include "hg_wordset.h"
60#include "hg_lock_n_thread.h"
61#include "hg_errors.h"
62
63#include "libhb.h"
64
sewardjb4112022007-11-09 22:49:28 +000065#include "helgrind.h"
66
sewardjf98e1c02008-10-25 16:22:41 +000067
68// FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
69
70// FIXME: when client destroys a lock or a CV, remove these
71// from our mappings, so that the associated SO can be freed up
sewardjb4112022007-11-09 22:49:28 +000072
73/*----------------------------------------------------------------*/
74/*--- ---*/
75/*----------------------------------------------------------------*/
76
sewardj11e352f2007-11-30 11:11:02 +000077/* Note this needs to be compiled with -fno-strict-aliasing, since it
78 contains a whole bunch of calls to lookupFM etc which cast between
79 Word and pointer types. gcc rightly complains this breaks ANSI C
80 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
81 worthwhile performance benefits over -O.
sewardjc17be792007-11-10 22:50:13 +000082*/
sewardjb4112022007-11-09 22:49:28 +000083
84// FIXME what is supposed to happen to locks in memory which
85// is relocated as a result of client realloc?
86
sewardjb4112022007-11-09 22:49:28 +000087// FIXME put referencing ThreadId into Thread and get
88// rid of the slow reverse mapping function.
89
90// FIXME accesses to NoAccess areas: change state to Excl?
91
92// FIXME report errors for accesses of NoAccess memory?
93
94// FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
95// the thread still holds the lock.
96
97/* ------------ Debug/trace options ------------ */
98
sewardjb4112022007-11-09 22:49:28 +000099// 0 for silent, 1 for some stuff, 2 for lots of stuff
100#define SHOW_EVENTS 0
101
sewardjb4112022007-11-09 22:49:28 +0000102
florian6bf37262012-10-21 03:23:36 +0000103static void all__sanity_check ( const HChar* who ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000104
philipped99c26a2012-07-31 22:17:28 +0000105#define HG_CLI__DEFAULT_MALLOC_REDZONE_SZB 16 /* let's say */
sewardjb4112022007-11-09 22:49:28 +0000106
107// 0 for none, 1 for dump at end of run
108#define SHOW_DATA_STRUCTURES 0
109
110
sewardjb4112022007-11-09 22:49:28 +0000111/* ------------ Misc comments ------------ */
112
113// FIXME: don't hardwire initial entries for root thread.
114// Instead, let the pre_thread_ll_create handler do this.
115
sewardjb4112022007-11-09 22:49:28 +0000116
117/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000118/*--- Primary data structures ---*/
sewardjb4112022007-11-09 22:49:28 +0000119/*----------------------------------------------------------------*/
120
sewardjb4112022007-11-09 22:49:28 +0000121/* Admin linked list of Threads */
122static Thread* admin_threads = NULL;
sewardjffce8152011-06-24 10:09:41 +0000123Thread* get_admin_threads ( void ) { return admin_threads; }
sewardjb4112022007-11-09 22:49:28 +0000124
sewardj1d7c3322011-02-28 09:22:51 +0000125/* Admin double linked list of Locks */
126/* We need a double linked list to properly and efficiently
127 handle del_LockN. */
sewardjb4112022007-11-09 22:49:28 +0000128static Lock* admin_locks = NULL;
129
sewardjb4112022007-11-09 22:49:28 +0000130/* Mapping table for core ThreadIds to Thread* */
131static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
132
sewardjb4112022007-11-09 22:49:28 +0000133/* Mapping table for lock guest addresses to Lock* */
134static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
135
sewardj0f64c9e2011-03-10 17:40:22 +0000136/* The word-set universes for lock sets. */
sewardjb4112022007-11-09 22:49:28 +0000137static WordSetU* univ_lsets = NULL; /* sets of Lock* */
138static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
sewardj866c80c2011-10-22 19:29:51 +0000139static Int next_gc_univ_laog = 1;
140/* univ_laog will be garbaged collected when the nr of element in univ_laog is
141 >= next_gc_univ_laog. */
sewardjb4112022007-11-09 22:49:28 +0000142
sewardjffce8152011-06-24 10:09:41 +0000143/* Allow libhb to get at the universe of locksets stored
144 here. Sigh. */
145WordSetU* HG_(get_univ_lsets) ( void ) { return univ_lsets; }
146
147/* Allow libhb to get at the list of locks stored here. Ditto
148 sigh. */
149Lock* HG_(get_admin_locks) ( void ) { return admin_locks; }
150
sewardjb4112022007-11-09 22:49:28 +0000151
152/*----------------------------------------------------------------*/
153/*--- Simple helpers for the data structures ---*/
154/*----------------------------------------------------------------*/
155
156static UWord stats__lockN_acquires = 0;
157static UWord stats__lockN_releases = 0;
158
sewardjf98e1c02008-10-25 16:22:41 +0000159static
160ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
sewardjb4112022007-11-09 22:49:28 +0000161
162/* --------- Constructors --------- */
163
sewardjf98e1c02008-10-25 16:22:41 +0000164static Thread* mk_Thread ( Thr* hbthr ) {
sewardjb4112022007-11-09 22:49:28 +0000165 static Int indx = 1;
sewardjf98e1c02008-10-25 16:22:41 +0000166 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
sewardjb4112022007-11-09 22:49:28 +0000167 thread->locksetA = HG_(emptyWS)( univ_lsets );
168 thread->locksetW = HG_(emptyWS)( univ_lsets );
sewardjb4112022007-11-09 22:49:28 +0000169 thread->magic = Thread_MAGIC;
sewardjf98e1c02008-10-25 16:22:41 +0000170 thread->hbthr = hbthr;
171 thread->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +0000172 thread->created_at = NULL;
173 thread->announced = False;
174 thread->errmsg_index = indx++;
175 thread->admin = admin_threads;
176 admin_threads = thread;
177 return thread;
178}
sewardjf98e1c02008-10-25 16:22:41 +0000179
sewardjb4112022007-11-09 22:49:28 +0000180// Make a new lock which is unlocked (hence ownerless)
sewardj1d7c3322011-02-28 09:22:51 +0000181// and insert the new lock in admin_locks double linked list.
sewardjb4112022007-11-09 22:49:28 +0000182static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
183 static ULong unique = 0;
sewardjf98e1c02008-10-25 16:22:41 +0000184 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
sewardj0f64c9e2011-03-10 17:40:22 +0000185 /* begin: add to double linked list */
sewardj1d7c3322011-02-28 09:22:51 +0000186 if (admin_locks)
187 admin_locks->admin_prev = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000188 lock->admin_next = admin_locks;
189 lock->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000190 admin_locks = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000191 /* end: add */
sewardjb4112022007-11-09 22:49:28 +0000192 lock->unique = unique++;
193 lock->magic = LockN_MAGIC;
194 lock->appeared_at = NULL;
195 lock->acquired_at = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000196 lock->hbso = libhb_so_alloc();
sewardjb4112022007-11-09 22:49:28 +0000197 lock->guestaddr = guestaddr;
198 lock->kind = kind;
199 lock->heldW = False;
200 lock->heldBy = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000201 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +0000202 return lock;
203}
sewardjb4112022007-11-09 22:49:28 +0000204
205/* Release storage for a Lock. Also release storage in .heldBy, if
sewardj1d7c3322011-02-28 09:22:51 +0000206 any. Removes from admin_locks double linked list. */
sewardjb4112022007-11-09 22:49:28 +0000207static void del_LockN ( Lock* lk )
208{
sewardjf98e1c02008-10-25 16:22:41 +0000209 tl_assert(HG_(is_sane_LockN)(lk));
210 tl_assert(lk->hbso);
211 libhb_so_dealloc(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +0000212 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000213 VG_(deleteBag)( lk->heldBy );
sewardj0f64c9e2011-03-10 17:40:22 +0000214 /* begin: del lock from double linked list */
215 if (lk == admin_locks) {
216 tl_assert(lk->admin_prev == NULL);
217 if (lk->admin_next)
218 lk->admin_next->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000219 admin_locks = lk->admin_next;
sewardj1d7c3322011-02-28 09:22:51 +0000220 }
221 else {
sewardj0f64c9e2011-03-10 17:40:22 +0000222 tl_assert(lk->admin_prev != NULL);
sewardj1d7c3322011-02-28 09:22:51 +0000223 lk->admin_prev->admin_next = lk->admin_next;
sewardj0f64c9e2011-03-10 17:40:22 +0000224 if (lk->admin_next)
225 lk->admin_next->admin_prev = lk->admin_prev;
sewardj1d7c3322011-02-28 09:22:51 +0000226 }
sewardj0f64c9e2011-03-10 17:40:22 +0000227 /* end: del */
sewardjb4112022007-11-09 22:49:28 +0000228 VG_(memset)(lk, 0xAA, sizeof(*lk));
sewardjf98e1c02008-10-25 16:22:41 +0000229 HG_(free)(lk);
sewardjb4112022007-11-09 22:49:28 +0000230}
231
232/* Update 'lk' to reflect that 'thr' now has a write-acquisition of
233 it. This is done strictly: only combinations resulting from
234 correct program and libpthread behaviour are allowed. */
235static void lockN_acquire_writer ( Lock* lk, Thread* thr )
236{
sewardjf98e1c02008-10-25 16:22:41 +0000237 tl_assert(HG_(is_sane_LockN)(lk));
238 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000239
240 stats__lockN_acquires++;
241
242 /* EXPOSITION only */
243 /* We need to keep recording snapshots of where the lock was
244 acquired, so as to produce better lock-order error messages. */
245 if (lk->acquired_at == NULL) {
246 ThreadId tid;
247 tl_assert(lk->heldBy == NULL);
248 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
249 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000250 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000251 } else {
252 tl_assert(lk->heldBy != NULL);
253 }
254 /* end EXPOSITION only */
255
256 switch (lk->kind) {
257 case LK_nonRec:
258 case_LK_nonRec:
259 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
260 tl_assert(!lk->heldW);
261 lk->heldW = True;
sewardjf98e1c02008-10-25 16:22:41 +0000262 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
florian6bf37262012-10-21 03:23:36 +0000263 VG_(addToBag)( lk->heldBy, (UWord)thr );
sewardjb4112022007-11-09 22:49:28 +0000264 break;
265 case LK_mbRec:
266 if (lk->heldBy == NULL)
267 goto case_LK_nonRec;
268 /* 2nd and subsequent locking of a lock by its owner */
269 tl_assert(lk->heldW);
270 /* assert: lk is only held by one thread .. */
sewardj896f6f92008-08-19 08:38:52 +0000271 tl_assert(VG_(sizeUniqueBag(lk->heldBy)) == 1);
sewardjb4112022007-11-09 22:49:28 +0000272 /* assert: .. and that thread is 'thr'. */
florian6bf37262012-10-21 03:23:36 +0000273 tl_assert(VG_(elemBag)(lk->heldBy, (UWord)thr)
sewardj896f6f92008-08-19 08:38:52 +0000274 == VG_(sizeTotalBag)(lk->heldBy));
florian6bf37262012-10-21 03:23:36 +0000275 VG_(addToBag)(lk->heldBy, (UWord)thr);
sewardjb4112022007-11-09 22:49:28 +0000276 break;
277 case LK_rdwr:
278 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
279 goto case_LK_nonRec;
280 default:
281 tl_assert(0);
282 }
sewardjf98e1c02008-10-25 16:22:41 +0000283 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000284}
285
286static void lockN_acquire_reader ( Lock* lk, Thread* thr )
287{
sewardjf98e1c02008-10-25 16:22:41 +0000288 tl_assert(HG_(is_sane_LockN)(lk));
289 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000290 /* can only add reader to a reader-writer lock. */
291 tl_assert(lk->kind == LK_rdwr);
292 /* lk must be free or already r-held. */
293 tl_assert(lk->heldBy == NULL
294 || (lk->heldBy != NULL && !lk->heldW));
295
296 stats__lockN_acquires++;
297
298 /* EXPOSITION only */
299 /* We need to keep recording snapshots of where the lock was
300 acquired, so as to produce better lock-order error messages. */
301 if (lk->acquired_at == NULL) {
302 ThreadId tid;
303 tl_assert(lk->heldBy == NULL);
304 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
305 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000306 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000307 } else {
308 tl_assert(lk->heldBy != NULL);
309 }
310 /* end EXPOSITION only */
311
312 if (lk->heldBy) {
florian6bf37262012-10-21 03:23:36 +0000313 VG_(addToBag)(lk->heldBy, (UWord)thr);
sewardjb4112022007-11-09 22:49:28 +0000314 } else {
315 lk->heldW = False;
sewardjf98e1c02008-10-25 16:22:41 +0000316 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
florian6bf37262012-10-21 03:23:36 +0000317 VG_(addToBag)( lk->heldBy, (UWord)thr );
sewardjb4112022007-11-09 22:49:28 +0000318 }
319 tl_assert(!lk->heldW);
sewardjf98e1c02008-10-25 16:22:41 +0000320 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000321}
322
323/* Update 'lk' to reflect a release of it by 'thr'. This is done
324 strictly: only combinations resulting from correct program and
325 libpthread behaviour are allowed. */
326
327static void lockN_release ( Lock* lk, Thread* thr )
328{
329 Bool b;
sewardjf98e1c02008-10-25 16:22:41 +0000330 tl_assert(HG_(is_sane_LockN)(lk));
331 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000332 /* lock must be held by someone */
333 tl_assert(lk->heldBy);
334 stats__lockN_releases++;
335 /* Remove it from the holder set */
florian6bf37262012-10-21 03:23:36 +0000336 b = VG_(delFromBag)(lk->heldBy, (UWord)thr);
sewardjb4112022007-11-09 22:49:28 +0000337 /* thr must actually have been a holder of lk */
338 tl_assert(b);
339 /* normalise */
340 tl_assert(lk->acquired_at);
sewardj896f6f92008-08-19 08:38:52 +0000341 if (VG_(isEmptyBag)(lk->heldBy)) {
342 VG_(deleteBag)(lk->heldBy);
sewardjb4112022007-11-09 22:49:28 +0000343 lk->heldBy = NULL;
344 lk->heldW = False;
345 lk->acquired_at = NULL;
346 }
sewardjf98e1c02008-10-25 16:22:41 +0000347 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000348}
349
350static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
351{
352 Thread* thr;
353 if (!lk->heldBy) {
354 tl_assert(!lk->heldW);
355 return;
356 }
357 /* for each thread that holds this lock do ... */
sewardj896f6f92008-08-19 08:38:52 +0000358 VG_(initIterBag)( lk->heldBy );
florian6bf37262012-10-21 03:23:36 +0000359 while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, NULL )) {
sewardjf98e1c02008-10-25 16:22:41 +0000360 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000361 tl_assert(HG_(elemWS)( univ_lsets,
florian6bf37262012-10-21 03:23:36 +0000362 thr->locksetA, (UWord)lk ));
sewardjb4112022007-11-09 22:49:28 +0000363 thr->locksetA
florian6bf37262012-10-21 03:23:36 +0000364 = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +0000365
366 if (lk->heldW) {
367 tl_assert(HG_(elemWS)( univ_lsets,
florian6bf37262012-10-21 03:23:36 +0000368 thr->locksetW, (UWord)lk ));
sewardjb4112022007-11-09 22:49:28 +0000369 thr->locksetW
florian6bf37262012-10-21 03:23:36 +0000370 = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +0000371 }
372 }
sewardj896f6f92008-08-19 08:38:52 +0000373 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000374}
375
sewardjb4112022007-11-09 22:49:28 +0000376
377/*----------------------------------------------------------------*/
378/*--- Print out the primary data structures ---*/
379/*----------------------------------------------------------------*/
380
sewardjb4112022007-11-09 22:49:28 +0000381#define PP_THREADS (1<<1)
382#define PP_LOCKS (1<<2)
sewardjf98e1c02008-10-25 16:22:41 +0000383#define PP_ALL (PP_THREADS | PP_LOCKS)
sewardjb4112022007-11-09 22:49:28 +0000384
385
386static const Int sHOW_ADMIN = 0;
387
388static void space ( Int n )
389{
390 Int i;
florian6bf37262012-10-21 03:23:36 +0000391 HChar spaces[128+1];
sewardjb4112022007-11-09 22:49:28 +0000392 tl_assert(n >= 0 && n < 128);
393 if (n == 0)
394 return;
395 for (i = 0; i < n; i++)
396 spaces[i] = ' ';
397 spaces[i] = 0;
398 tl_assert(i < 128+1);
399 VG_(printf)("%s", spaces);
400}
401
402static void pp_Thread ( Int d, Thread* t )
403{
404 space(d+0); VG_(printf)("Thread %p {\n", t);
405 if (sHOW_ADMIN) {
406 space(d+3); VG_(printf)("admin %p\n", t->admin);
407 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
408 }
409 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
410 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
sewardjb4112022007-11-09 22:49:28 +0000411 space(d+0); VG_(printf)("}\n");
412}
413
414static void pp_admin_threads ( Int d )
415{
416 Int i, n;
417 Thread* t;
418 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
419 /* nothing */
420 }
421 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
422 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
423 if (0) {
424 space(n);
425 VG_(printf)("admin_threads record %d of %d:\n", i, n);
426 }
427 pp_Thread(d+3, t);
428 }
barta0b6b2c2008-07-07 06:49:24 +0000429 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000430}
431
432static void pp_map_threads ( Int d )
433{
njn4c245e52009-03-15 23:25:38 +0000434 Int i, n = 0;
sewardjb4112022007-11-09 22:49:28 +0000435 space(d); VG_(printf)("map_threads ");
sewardjb4112022007-11-09 22:49:28 +0000436 for (i = 0; i < VG_N_THREADS; i++) {
437 if (map_threads[i] != NULL)
438 n++;
439 }
440 VG_(printf)("(%d entries) {\n", n);
441 for (i = 0; i < VG_N_THREADS; i++) {
442 if (map_threads[i] == NULL)
443 continue;
444 space(d+3);
445 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
446 }
447 space(d); VG_(printf)("}\n");
448}
449
450static const HChar* show_LockKind ( LockKind lkk ) {
451 switch (lkk) {
452 case LK_mbRec: return "mbRec";
453 case LK_nonRec: return "nonRec";
454 case LK_rdwr: return "rdwr";
455 default: tl_assert(0);
456 }
457}
458
459static void pp_Lock ( Int d, Lock* lk )
460{
barta0b6b2c2008-07-07 06:49:24 +0000461 space(d+0); VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
sewardjb4112022007-11-09 22:49:28 +0000462 if (sHOW_ADMIN) {
sewardj1d7c3322011-02-28 09:22:51 +0000463 space(d+3); VG_(printf)("admin_n %p\n", lk->admin_next);
464 space(d+3); VG_(printf)("admin_p %p\n", lk->admin_prev);
465 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
sewardjb4112022007-11-09 22:49:28 +0000466 }
467 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
468 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
469 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
470 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
471 if (lk->heldBy) {
472 Thread* thr;
florian6bf37262012-10-21 03:23:36 +0000473 UWord count;
sewardjb4112022007-11-09 22:49:28 +0000474 VG_(printf)(" { ");
sewardj896f6f92008-08-19 08:38:52 +0000475 VG_(initIterBag)( lk->heldBy );
florian6bf37262012-10-21 03:23:36 +0000476 while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, &count ))
sewardjb4112022007-11-09 22:49:28 +0000477 VG_(printf)("%lu:%p ", count, thr);
sewardj896f6f92008-08-19 08:38:52 +0000478 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000479 VG_(printf)("}");
480 }
481 VG_(printf)("\n");
482 space(d+0); VG_(printf)("}\n");
483}
484
485static void pp_admin_locks ( Int d )
486{
487 Int i, n;
488 Lock* lk;
sewardj1d7c3322011-02-28 09:22:51 +0000489 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000490 /* nothing */
491 }
492 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
sewardj1d7c3322011-02-28 09:22:51 +0000493 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000494 if (0) {
495 space(n);
496 VG_(printf)("admin_locks record %d of %d:\n", i, n);
497 }
498 pp_Lock(d+3, lk);
499 }
barta0b6b2c2008-07-07 06:49:24 +0000500 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000501}
502
503static void pp_map_locks ( Int d )
504{
505 void* gla;
506 Lock* lk;
507 space(d); VG_(printf)("map_locks (%d entries) {\n",
sewardj896f6f92008-08-19 08:38:52 +0000508 (Int)VG_(sizeFM)( map_locks ));
509 VG_(initIterFM)( map_locks );
florian6bf37262012-10-21 03:23:36 +0000510 while (VG_(nextIterFM)( map_locks, (UWord*)&gla,
511 (UWord*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000512 space(d+3);
513 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
514 }
sewardj896f6f92008-08-19 08:38:52 +0000515 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000516 space(d); VG_(printf)("}\n");
517}
518
florian6bf37262012-10-21 03:23:36 +0000519static void pp_everything ( Int flags, const HChar* caller )
sewardjb4112022007-11-09 22:49:28 +0000520{
521 Int d = 0;
522 VG_(printf)("\n");
523 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
524 if (flags & PP_THREADS) {
525 VG_(printf)("\n");
526 pp_admin_threads(d+3);
527 VG_(printf)("\n");
528 pp_map_threads(d+3);
529 }
530 if (flags & PP_LOCKS) {
531 VG_(printf)("\n");
532 pp_admin_locks(d+3);
533 VG_(printf)("\n");
534 pp_map_locks(d+3);
535 }
sewardjb4112022007-11-09 22:49:28 +0000536
537 VG_(printf)("\n");
538 VG_(printf)("}\n");
539 VG_(printf)("\n");
540}
541
542#undef SHOW_ADMIN
543
544
545/*----------------------------------------------------------------*/
546/*--- Initialise the primary data structures ---*/
547/*----------------------------------------------------------------*/
548
sewardjf98e1c02008-10-25 16:22:41 +0000549static void initialise_data_structures ( Thr* hbthr_root )
sewardjb4112022007-11-09 22:49:28 +0000550{
sewardjb4112022007-11-09 22:49:28 +0000551 Thread* thr;
sewardjffce8152011-06-24 10:09:41 +0000552 WordSetID wsid;
sewardjb4112022007-11-09 22:49:28 +0000553
554 /* Get everything initialised and zeroed. */
555 tl_assert(admin_threads == NULL);
556 tl_assert(admin_locks == NULL);
sewardjb4112022007-11-09 22:49:28 +0000557
sewardjb4112022007-11-09 22:49:28 +0000558 tl_assert(map_threads == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000559 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
sewardjb4112022007-11-09 22:49:28 +0000560 tl_assert(map_threads != NULL);
561
florian6bf37262012-10-21 03:23:36 +0000562 tl_assert(sizeof(Addr) == sizeof(UWord));
sewardjb4112022007-11-09 22:49:28 +0000563 tl_assert(map_locks == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000564 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
565 NULL/*unboxed Word cmp*/);
sewardjb4112022007-11-09 22:49:28 +0000566 tl_assert(map_locks != NULL);
567
sewardjb4112022007-11-09 22:49:28 +0000568 tl_assert(univ_lsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000569 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
570 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000571 tl_assert(univ_lsets != NULL);
sewardjffce8152011-06-24 10:09:41 +0000572 /* Ensure that univ_lsets is non-empty, with lockset zero being the
573 empty lockset. hg_errors.c relies on the assumption that
574 lockset number zero in univ_lsets is always valid. */
575 wsid = HG_(emptyWS)(univ_lsets);
576 tl_assert(wsid == 0);
sewardjb4112022007-11-09 22:49:28 +0000577
578 tl_assert(univ_laog == NULL);
sewardjc1fb9d22011-02-28 09:03:44 +0000579 if (HG_(clo_track_lockorders)) {
580 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
581 HG_(free), 24/*cacheSize*/ );
582 tl_assert(univ_laog != NULL);
583 }
sewardjb4112022007-11-09 22:49:28 +0000584
585 /* Set up entries for the root thread */
586 // FIXME: this assumes that the first real ThreadId is 1
587
sewardjb4112022007-11-09 22:49:28 +0000588 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +0000589 thr = mk_Thread(hbthr_root);
590 thr->coretid = 1; /* FIXME: hardwires an assumption about the
591 identity of the root thread. */
sewardj60626642011-03-10 15:14:37 +0000592 tl_assert( libhb_get_Thr_hgthread(hbthr_root) == NULL );
593 libhb_set_Thr_hgthread(hbthr_root, thr);
sewardjb4112022007-11-09 22:49:28 +0000594
sewardjf98e1c02008-10-25 16:22:41 +0000595 /* and bind it in the thread-map table. */
596 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
597 tl_assert(thr->coretid != VG_INVALID_THREADID);
sewardjb4112022007-11-09 22:49:28 +0000598
sewardjf98e1c02008-10-25 16:22:41 +0000599 map_threads[thr->coretid] = thr;
sewardjb4112022007-11-09 22:49:28 +0000600
601 tl_assert(VG_INVALID_THREADID == 0);
602
sewardjb4112022007-11-09 22:49:28 +0000603 all__sanity_check("initialise_data_structures");
604}
605
606
607/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000608/*--- map_threads :: array[core-ThreadId] of Thread* ---*/
sewardjb4112022007-11-09 22:49:28 +0000609/*----------------------------------------------------------------*/
610
611/* Doesn't assert if the relevant map_threads entry is NULL. */
612static Thread* map_threads_maybe_lookup ( ThreadId coretid )
613{
614 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000615 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000616 thr = map_threads[coretid];
617 return thr;
618}
619
620/* Asserts if the relevant map_threads entry is NULL. */
621static inline Thread* map_threads_lookup ( ThreadId coretid )
622{
623 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000624 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000625 thr = map_threads[coretid];
626 tl_assert(thr);
627 return thr;
628}
629
sewardjf98e1c02008-10-25 16:22:41 +0000630/* Do a reverse lookup. Does not assert if 'thr' is not found in
631 map_threads. */
sewardjb4112022007-11-09 22:49:28 +0000632static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
633{
sewardjf98e1c02008-10-25 16:22:41 +0000634 ThreadId tid;
635 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000636 /* Check nobody used the invalid-threadid slot */
637 tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
638 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000639 tid = thr->coretid;
640 tl_assert(HG_(is_sane_ThreadId)(tid));
641 return tid;
sewardjb4112022007-11-09 22:49:28 +0000642}
643
644/* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
645 is not found in map_threads. */
646static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
647{
648 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
649 tl_assert(tid != VG_INVALID_THREADID);
sewardjf98e1c02008-10-25 16:22:41 +0000650 tl_assert(map_threads[tid]);
651 tl_assert(map_threads[tid]->coretid == tid);
sewardjb4112022007-11-09 22:49:28 +0000652 return tid;
653}
654
655static void map_threads_delete ( ThreadId coretid )
656{
657 Thread* thr;
658 tl_assert(coretid != 0);
sewardjf98e1c02008-10-25 16:22:41 +0000659 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000660 thr = map_threads[coretid];
661 tl_assert(thr);
662 map_threads[coretid] = NULL;
663}
664
665
666/*----------------------------------------------------------------*/
667/*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
668/*----------------------------------------------------------------*/
669
670/* Make sure there is a lock table entry for the given (lock) guest
671 address. If not, create one of the stated 'kind' in unheld state.
672 In any case, return the address of the existing or new Lock. */
673static
674Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
675{
676 Bool found;
677 Lock* oldlock = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000678 tl_assert(HG_(is_sane_ThreadId)(tid));
sewardj896f6f92008-08-19 08:38:52 +0000679 found = VG_(lookupFM)( map_locks,
florian6bf37262012-10-21 03:23:36 +0000680 NULL, (UWord*)&oldlock, (UWord)ga );
sewardjb4112022007-11-09 22:49:28 +0000681 if (!found) {
682 Lock* lock = mk_LockN(lkk, ga);
683 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
sewardjf98e1c02008-10-25 16:22:41 +0000684 tl_assert(HG_(is_sane_LockN)(lock));
florian6bf37262012-10-21 03:23:36 +0000685 VG_(addToFM)( map_locks, (UWord)ga, (UWord)lock );
sewardjb4112022007-11-09 22:49:28 +0000686 tl_assert(oldlock == NULL);
sewardjb4112022007-11-09 22:49:28 +0000687 return lock;
688 } else {
689 tl_assert(oldlock != NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000690 tl_assert(HG_(is_sane_LockN)(oldlock));
sewardjb4112022007-11-09 22:49:28 +0000691 tl_assert(oldlock->guestaddr == ga);
sewardjb4112022007-11-09 22:49:28 +0000692 return oldlock;
693 }
694}
695
696static Lock* map_locks_maybe_lookup ( Addr ga )
697{
698 Bool found;
699 Lock* lk = NULL;
florian6bf37262012-10-21 03:23:36 +0000700 found = VG_(lookupFM)( map_locks, NULL, (UWord*)&lk, (UWord)ga );
sewardjb4112022007-11-09 22:49:28 +0000701 tl_assert(found ? lk != NULL : lk == NULL);
sewardjb4112022007-11-09 22:49:28 +0000702 return lk;
703}
704
705static void map_locks_delete ( Addr ga )
706{
707 Addr ga2 = 0;
708 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000709 VG_(delFromFM)( map_locks,
florian6bf37262012-10-21 03:23:36 +0000710 (UWord*)&ga2, (UWord*)&lk, (UWord)ga );
sewardjb4112022007-11-09 22:49:28 +0000711 /* delFromFM produces the val which is being deleted, if it is
712 found. So assert it is non-null; that in effect asserts that we
713 are deleting a (ga, Lock) pair which actually exists. */
714 tl_assert(lk != NULL);
715 tl_assert(ga2 == ga);
716}
717
718
sewardjb4112022007-11-09 22:49:28 +0000719
720/*----------------------------------------------------------------*/
721/*--- Sanity checking the data structures ---*/
722/*----------------------------------------------------------------*/
723
724static UWord stats__sanity_checks = 0;
725
florian6bf37262012-10-21 03:23:36 +0000726static void laog__sanity_check ( const HChar* who ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000727
728/* REQUIRED INVARIANTS:
729
730 Thread vs Segment/Lock/SecMaps
731
732 for each t in Threads {
733
734 // Thread.lockset: each element is really a valid Lock
735
736 // Thread.lockset: each Lock in set is actually held by that thread
737 for lk in Thread.lockset
738 lk == LockedBy(t)
739
740 // Thread.csegid is a valid SegmentID
741 // and the associated Segment has .thr == t
742
743 }
744
745 all thread Locksets are pairwise empty under intersection
746 (that is, no lock is claimed to be held by more than one thread)
747 -- this is guaranteed if all locks in locksets point back to their
748 owner threads
749
750 Lock vs Thread/Segment/SecMaps
751
752 for each entry (gla, la) in map_locks
753 gla == la->guest_addr
754
755 for each lk in Locks {
756
757 lk->tag is valid
758 lk->guest_addr does not have shadow state NoAccess
759 if lk == LockedBy(t), then t->lockset contains lk
760 if lk == UnlockedBy(segid) then segid is valid SegmentID
761 and can be mapped to a valid Segment(seg)
762 and seg->thr->lockset does not contain lk
763 if lk == UnlockedNew then (no lockset contains lk)
764
765 secmaps for lk has .mbHasLocks == True
766
767 }
768
769 Segment vs Thread/Lock/SecMaps
770
771 the Segment graph is a dag (no cycles)
772 all of the Segment graph must be reachable from the segids
773 mentioned in the Threads
774
775 for seg in Segments {
776
777 seg->thr is a sane Thread
778
779 }
780
781 SecMaps vs Segment/Thread/Lock
782
783 for sm in SecMaps {
784
785 sm properly aligned
786 if any shadow word is ShR or ShM then .mbHasShared == True
787
788 for each Excl(segid) state
789 map_segments_lookup maps to a sane Segment(seg)
790 for each ShM/ShR(tsetid,lsetid) state
791 each lk in lset is a valid Lock
792 each thr in tset is a valid thread, which is non-dead
793
794 }
795*/
796
797
798/* Return True iff 'thr' holds 'lk' in some mode. */
799static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
800{
801 if (lk->heldBy)
florian6bf37262012-10-21 03:23:36 +0000802 return VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0;
sewardjb4112022007-11-09 22:49:28 +0000803 else
804 return False;
805}
806
807/* Sanity check Threads, as far as possible */
808__attribute__((noinline))
florian6bf37262012-10-21 03:23:36 +0000809static void threads__sanity_check ( const HChar* who )
sewardjb4112022007-11-09 22:49:28 +0000810{
811#define BAD(_str) do { how = (_str); goto bad; } while (0)
florian6bf37262012-10-21 03:23:36 +0000812 const HChar* how = "no error";
sewardjb4112022007-11-09 22:49:28 +0000813 Thread* thr;
814 WordSetID wsA, wsW;
sewardj250ec2e2008-02-15 22:02:30 +0000815 UWord* ls_words;
florian6bf37262012-10-21 03:23:36 +0000816 UWord ls_size, i;
sewardjb4112022007-11-09 22:49:28 +0000817 Lock* lk;
sewardjb4112022007-11-09 22:49:28 +0000818 for (thr = admin_threads; thr; thr = thr->admin) {
sewardjf98e1c02008-10-25 16:22:41 +0000819 if (!HG_(is_sane_Thread)(thr)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000820 wsA = thr->locksetA;
821 wsW = thr->locksetW;
822 // locks held in W mode are a subset of all locks held
823 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
824 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
825 for (i = 0; i < ls_size; i++) {
826 lk = (Lock*)ls_words[i];
827 // Thread.lockset: each element is really a valid Lock
sewardjf98e1c02008-10-25 16:22:41 +0000828 if (!HG_(is_sane_LockN)(lk)) BAD("2");
sewardjb4112022007-11-09 22:49:28 +0000829 // Thread.lockset: each Lock in set is actually held by that
830 // thread
831 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000832 }
833 }
834 return;
835 bad:
836 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
837 tl_assert(0);
838#undef BAD
839}
840
841
842/* Sanity check Locks, as far as possible */
843__attribute__((noinline))
florian6bf37262012-10-21 03:23:36 +0000844static void locks__sanity_check ( const HChar* who )
sewardjb4112022007-11-09 22:49:28 +0000845{
846#define BAD(_str) do { how = (_str); goto bad; } while (0)
florian6bf37262012-10-21 03:23:36 +0000847 const HChar* how = "no error";
sewardjb4112022007-11-09 22:49:28 +0000848 Addr gla;
849 Lock* lk;
850 Int i;
851 // # entries in admin_locks == # entries in map_locks
sewardj1d7c3322011-02-28 09:22:51 +0000852 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next)
sewardjb4112022007-11-09 22:49:28 +0000853 ;
sewardj896f6f92008-08-19 08:38:52 +0000854 if (i != VG_(sizeFM)(map_locks)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000855 // for each entry (gla, lk) in map_locks
856 // gla == lk->guest_addr
sewardj896f6f92008-08-19 08:38:52 +0000857 VG_(initIterFM)( map_locks );
858 while (VG_(nextIterFM)( map_locks,
florian6bf37262012-10-21 03:23:36 +0000859 (UWord*)&gla, (UWord*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000860 if (lk->guestaddr != gla) BAD("2");
861 }
sewardj896f6f92008-08-19 08:38:52 +0000862 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000863 // scan through admin_locks ...
sewardj1d7c3322011-02-28 09:22:51 +0000864 for (lk = admin_locks; lk; lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000865 // lock is sane. Quite comprehensive, also checks that
866 // referenced (holder) threads are sane.
sewardjf98e1c02008-10-25 16:22:41 +0000867 if (!HG_(is_sane_LockN)(lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000868 // map_locks binds guest address back to this lock
869 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
sewardjb4112022007-11-09 22:49:28 +0000870 // look at all threads mentioned as holders of this lock. Ensure
871 // this lock is mentioned in their locksets.
872 if (lk->heldBy) {
873 Thread* thr;
florian6bf37262012-10-21 03:23:36 +0000874 UWord count;
sewardj896f6f92008-08-19 08:38:52 +0000875 VG_(initIterBag)( lk->heldBy );
876 while (VG_(nextIterBag)( lk->heldBy,
florian6bf37262012-10-21 03:23:36 +0000877 (UWord*)&thr, &count )) {
sewardjf98e1c02008-10-25 16:22:41 +0000878 // HG_(is_sane_LockN) above ensures these
sewardjb4112022007-11-09 22:49:28 +0000879 tl_assert(count >= 1);
sewardjf98e1c02008-10-25 16:22:41 +0000880 tl_assert(HG_(is_sane_Thread)(thr));
florian6bf37262012-10-21 03:23:36 +0000881 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (UWord)lk))
sewardjb4112022007-11-09 22:49:28 +0000882 BAD("6");
883 // also check the w-only lockset
884 if (lk->heldW
florian6bf37262012-10-21 03:23:36 +0000885 && !HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
sewardjb4112022007-11-09 22:49:28 +0000886 BAD("7");
887 if ((!lk->heldW)
florian6bf37262012-10-21 03:23:36 +0000888 && HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
sewardjb4112022007-11-09 22:49:28 +0000889 BAD("8");
890 }
sewardj896f6f92008-08-19 08:38:52 +0000891 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000892 } else {
893 /* lock not held by anybody */
894 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
895 // since lk is unheld, then (no lockset contains lk)
896 // hmm, this is really too expensive to check. Hmm.
897 }
sewardjb4112022007-11-09 22:49:28 +0000898 }
899
900 return;
901 bad:
902 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
903 tl_assert(0);
904#undef BAD
905}
906
907
florian6bf37262012-10-21 03:23:36 +0000908static void all_except_Locks__sanity_check ( const HChar* who ) {
sewardjb4112022007-11-09 22:49:28 +0000909 stats__sanity_checks++;
910 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
911 threads__sanity_check(who);
sewardjc1fb9d22011-02-28 09:03:44 +0000912 if (HG_(clo_track_lockorders))
913 laog__sanity_check(who);
sewardjb4112022007-11-09 22:49:28 +0000914}
florian6bf37262012-10-21 03:23:36 +0000915static void all__sanity_check ( const HChar* who ) {
sewardjb4112022007-11-09 22:49:28 +0000916 all_except_Locks__sanity_check(who);
917 locks__sanity_check(who);
918}
919
920
921/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +0000922/*--- Shadow value and address range handlers ---*/
923/*----------------------------------------------------------------*/
924
925static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +0000926//static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000927static inline Thread* get_current_Thread ( void ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +0000928__attribute__((noinline))
929static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000930
sewardjb4112022007-11-09 22:49:28 +0000931
932/* Block-copy states (needed for implementing realloc()). */
sewardj23f12002009-07-24 08:45:08 +0000933/* FIXME this copies shadow memory; it doesn't apply the MSM to it.
934 Is that a problem? (hence 'scopy' rather than 'ccopy') */
935static void shadow_mem_scopy_range ( Thread* thr,
936 Addr src, Addr dst, SizeT len )
sewardjf98e1c02008-10-25 16:22:41 +0000937{
938 Thr* hbthr = thr->hbthr;
939 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +0000940 libhb_copy_shadow_state( hbthr, src, dst, len );
sewardjb4112022007-11-09 22:49:28 +0000941}
942
sewardj23f12002009-07-24 08:45:08 +0000943static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
944{
sewardjf98e1c02008-10-25 16:22:41 +0000945 Thr* hbthr = thr->hbthr;
946 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +0000947 LIBHB_CREAD_N(hbthr, a, len);
948}
949
950static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
951 Thr* hbthr = thr->hbthr;
952 tl_assert(hbthr);
953 LIBHB_CWRITE_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +0000954}
955
956static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
957{
sewardj23f12002009-07-24 08:45:08 +0000958 libhb_srange_new( thr->hbthr, a, len );
sewardjb4112022007-11-09 22:49:28 +0000959}
960
sewardjfd35d492011-03-17 19:39:55 +0000961static void shadow_mem_make_NoAccess_NoFX ( Thread* thr, Addr aIN, SizeT len )
sewardjb4112022007-11-09 22:49:28 +0000962{
sewardjb4112022007-11-09 22:49:28 +0000963 if (0 && len > 500)
sewardjfd35d492011-03-17 19:39:55 +0000964 VG_(printf)("make NoAccess_NoFX ( %#lx, %ld )\n", aIN, len );
965 // has no effect (NoFX)
966 libhb_srange_noaccess_NoFX( thr->hbthr, aIN, len );
967}
968
969static void shadow_mem_make_NoAccess_AHAE ( Thread* thr, Addr aIN, SizeT len )
970{
971 if (0 && len > 500)
972 VG_(printf)("make NoAccess_AHAE ( %#lx, %ld )\n", aIN, len );
973 // Actually Has An Effect (AHAE)
974 libhb_srange_noaccess_AHAE( thr->hbthr, aIN, len );
sewardjb4112022007-11-09 22:49:28 +0000975}
976
sewardj406bac82010-03-03 23:03:40 +0000977static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN, SizeT len )
978{
979 if (0 && len > 500)
980 VG_(printf)("make Untracked ( %#lx, %ld )\n", aIN, len );
981 libhb_srange_untrack( thr->hbthr, aIN, len );
982}
983
sewardjb4112022007-11-09 22:49:28 +0000984
985/*----------------------------------------------------------------*/
986/*--- Event handlers (evh__* functions) ---*/
987/*--- plus helpers (evhH__* functions) ---*/
988/*----------------------------------------------------------------*/
989
990/*--------- Event handler helpers (evhH__* functions) ---------*/
991
992/* Create a new segment for 'thr', making it depend (.prev) on its
993 existing segment, bind together the SegmentID and Segment, and
994 return both of them. Also update 'thr' so it references the new
995 Segment. */
sewardjf98e1c02008-10-25 16:22:41 +0000996//zz static
997//zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
998//zz /*OUT*/Segment** new_segP,
999//zz Thread* thr )
1000//zz {
1001//zz Segment* cur_seg;
1002//zz tl_assert(new_segP);
1003//zz tl_assert(new_segidP);
1004//zz tl_assert(HG_(is_sane_Thread)(thr));
1005//zz cur_seg = map_segments_lookup( thr->csegid );
1006//zz tl_assert(cur_seg);
1007//zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1008//zz at their owner thread. */
1009//zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1010//zz *new_segidP = alloc_SegmentID();
1011//zz map_segments_add( *new_segidP, *new_segP );
1012//zz thr->csegid = *new_segidP;
1013//zz }
sewardjb4112022007-11-09 22:49:28 +00001014
1015
1016/* The lock at 'lock_ga' has acquired a writer. Make all necessary
1017 updates, and also do all possible error checks. */
1018static
1019void evhH__post_thread_w_acquires_lock ( Thread* thr,
1020 LockKind lkk, Addr lock_ga )
1021{
1022 Lock* lk;
1023
1024 /* Basically what we need to do is call lockN_acquire_writer.
1025 However, that will barf if any 'invalid' lock states would
1026 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001027 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001028 routine.
1029
1030 Because this routine is only called after successful lock
1031 acquisition, we should not be asked to move the lock into any
1032 invalid states. Requests to do so are bugs in libpthread, since
1033 that should have rejected any such requests. */
1034
sewardjf98e1c02008-10-25 16:22:41 +00001035 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001036 /* Try to find the lock. If we can't, then create a new one with
1037 kind 'lkk'. */
1038 lk = map_locks_lookup_or_create(
1039 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001040 tl_assert( HG_(is_sane_LockN)(lk) );
1041
1042 /* check libhb level entities exist */
1043 tl_assert(thr->hbthr);
1044 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001045
1046 if (lk->heldBy == NULL) {
1047 /* the lock isn't held. Simple. */
1048 tl_assert(!lk->heldW);
1049 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001050 /* acquire a dependency from the lock's VCs */
1051 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001052 goto noerror;
1053 }
1054
1055 /* So the lock is already held. If held as a r-lock then
1056 libpthread must be buggy. */
1057 tl_assert(lk->heldBy);
1058 if (!lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001059 HG_(record_error_Misc)(
1060 thr, "Bug in libpthread: write lock "
1061 "granted on rwlock which is currently rd-held");
sewardjb4112022007-11-09 22:49:28 +00001062 goto error;
1063 }
1064
1065 /* So the lock is held in w-mode. If it's held by some other
1066 thread, then libpthread must be buggy. */
sewardj896f6f92008-08-19 08:38:52 +00001067 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
sewardjb4112022007-11-09 22:49:28 +00001068
sewardj896f6f92008-08-19 08:38:52 +00001069 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
sewardjf98e1c02008-10-25 16:22:41 +00001070 HG_(record_error_Misc)(
1071 thr, "Bug in libpthread: write lock "
1072 "granted on mutex/rwlock which is currently "
1073 "wr-held by a different thread");
sewardjb4112022007-11-09 22:49:28 +00001074 goto error;
1075 }
1076
1077 /* So the lock is already held in w-mode by 'thr'. That means this
1078 is an attempt to lock it recursively, which is only allowable
1079 for LK_mbRec kinded locks. Since this routine is called only
1080 once the lock has been acquired, this must also be a libpthread
1081 bug. */
1082 if (lk->kind != LK_mbRec) {
sewardjf98e1c02008-10-25 16:22:41 +00001083 HG_(record_error_Misc)(
1084 thr, "Bug in libpthread: recursive write lock "
1085 "granted on mutex/wrlock which does not "
1086 "support recursion");
sewardjb4112022007-11-09 22:49:28 +00001087 goto error;
1088 }
1089
1090 /* So we are recursively re-locking a lock we already w-hold. */
1091 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001092 /* acquire a dependency from the lock's VC. Probably pointless,
1093 but also harmless. */
1094 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001095 goto noerror;
1096
1097 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001098 if (HG_(clo_track_lockorders)) {
1099 /* check lock order acquisition graph, and update. This has to
1100 happen before the lock is added to the thread's locksetA/W. */
1101 laog__pre_thread_acquires_lock( thr, lk );
1102 }
sewardjb4112022007-11-09 22:49:28 +00001103 /* update the thread's held-locks set */
florian6bf37262012-10-21 03:23:36 +00001104 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
1105 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +00001106 /* fall through */
1107
1108 error:
sewardjf98e1c02008-10-25 16:22:41 +00001109 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001110}
1111
1112
1113/* The lock at 'lock_ga' has acquired a reader. Make all necessary
1114 updates, and also do all possible error checks. */
1115static
1116void evhH__post_thread_r_acquires_lock ( Thread* thr,
1117 LockKind lkk, Addr lock_ga )
1118{
1119 Lock* lk;
1120
1121 /* Basically what we need to do is call lockN_acquire_reader.
1122 However, that will barf if any 'invalid' lock states would
1123 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001124 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001125 routine.
1126
1127 Because this routine is only called after successful lock
1128 acquisition, we should not be asked to move the lock into any
1129 invalid states. Requests to do so are bugs in libpthread, since
1130 that should have rejected any such requests. */
1131
sewardjf98e1c02008-10-25 16:22:41 +00001132 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001133 /* Try to find the lock. If we can't, then create a new one with
1134 kind 'lkk'. Only a reader-writer lock can be read-locked,
1135 hence the first assertion. */
1136 tl_assert(lkk == LK_rdwr);
1137 lk = map_locks_lookup_or_create(
1138 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001139 tl_assert( HG_(is_sane_LockN)(lk) );
1140
1141 /* check libhb level entities exist */
1142 tl_assert(thr->hbthr);
1143 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001144
1145 if (lk->heldBy == NULL) {
1146 /* the lock isn't held. Simple. */
1147 tl_assert(!lk->heldW);
1148 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001149 /* acquire a dependency from the lock's VC */
1150 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001151 goto noerror;
1152 }
1153
1154 /* So the lock is already held. If held as a w-lock then
1155 libpthread must be buggy. */
1156 tl_assert(lk->heldBy);
1157 if (lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001158 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1159 "granted on rwlock which is "
1160 "currently wr-held");
sewardjb4112022007-11-09 22:49:28 +00001161 goto error;
1162 }
1163
1164 /* Easy enough. In short anybody can get a read-lock on a rwlock
1165 provided it is either unlocked or already in rd-held. */
1166 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001167 /* acquire a dependency from the lock's VC. Probably pointless,
1168 but also harmless. */
1169 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001170 goto noerror;
1171
1172 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001173 if (HG_(clo_track_lockorders)) {
1174 /* check lock order acquisition graph, and update. This has to
1175 happen before the lock is added to the thread's locksetA/W. */
1176 laog__pre_thread_acquires_lock( thr, lk );
1177 }
sewardjb4112022007-11-09 22:49:28 +00001178 /* update the thread's held-locks set */
florian6bf37262012-10-21 03:23:36 +00001179 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +00001180 /* but don't update thr->locksetW, since lk is only rd-held */
1181 /* fall through */
1182
1183 error:
sewardjf98e1c02008-10-25 16:22:41 +00001184 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001185}
1186
1187
1188/* The lock at 'lock_ga' is just about to be unlocked. Make all
1189 necessary updates, and also do all possible error checks. */
1190static
1191void evhH__pre_thread_releases_lock ( Thread* thr,
1192 Addr lock_ga, Bool isRDWR )
1193{
1194 Lock* lock;
1195 Word n;
sewardjf98e1c02008-10-25 16:22:41 +00001196 Bool was_heldW;
sewardjb4112022007-11-09 22:49:28 +00001197
1198 /* This routine is called prior to a lock release, before
1199 libpthread has had a chance to validate the call. Hence we need
1200 to detect and reject any attempts to move the lock into an
1201 invalid state. Such attempts are bugs in the client.
1202
1203 isRDWR is True if we know from the wrapper context that lock_ga
1204 should refer to a reader-writer lock, and is False if [ditto]
1205 lock_ga should refer to a standard mutex. */
1206
sewardjf98e1c02008-10-25 16:22:41 +00001207 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001208 lock = map_locks_maybe_lookup( lock_ga );
1209
1210 if (!lock) {
1211 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1212 the client is trying to unlock it. So complain, then ignore
1213 the attempt. */
sewardjf98e1c02008-10-25 16:22:41 +00001214 HG_(record_error_UnlockBogus)( thr, lock_ga );
sewardjb4112022007-11-09 22:49:28 +00001215 return;
1216 }
1217
1218 tl_assert(lock->guestaddr == lock_ga);
sewardjf98e1c02008-10-25 16:22:41 +00001219 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001220
1221 if (isRDWR && lock->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001222 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1223 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001224 }
1225 if ((!isRDWR) && lock->kind == LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001226 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1227 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001228 }
1229
1230 if (!lock->heldBy) {
1231 /* The lock is not held. This indicates a serious bug in the
1232 client. */
1233 tl_assert(!lock->heldW);
sewardjf98e1c02008-10-25 16:22:41 +00001234 HG_(record_error_UnlockUnlocked)( thr, lock );
florian6bf37262012-10-21 03:23:36 +00001235 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1236 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001237 goto error;
1238 }
1239
sewardjf98e1c02008-10-25 16:22:41 +00001240 /* test just above dominates */
1241 tl_assert(lock->heldBy);
1242 was_heldW = lock->heldW;
1243
sewardjb4112022007-11-09 22:49:28 +00001244 /* The lock is held. Is this thread one of the holders? If not,
1245 report a bug in the client. */
florian6bf37262012-10-21 03:23:36 +00001246 n = VG_(elemBag)( lock->heldBy, (UWord)thr );
sewardjb4112022007-11-09 22:49:28 +00001247 tl_assert(n >= 0);
1248 if (n == 0) {
1249 /* We are not a current holder of the lock. This is a bug in
1250 the guest, and (per POSIX pthread rules) the unlock
1251 attempt will fail. So just complain and do nothing
1252 else. */
sewardj896f6f92008-08-19 08:38:52 +00001253 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001254 tl_assert(HG_(is_sane_Thread)(realOwner));
sewardjb4112022007-11-09 22:49:28 +00001255 tl_assert(realOwner != thr);
florian6bf37262012-10-21 03:23:36 +00001256 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1257 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjf98e1c02008-10-25 16:22:41 +00001258 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
sewardjb4112022007-11-09 22:49:28 +00001259 goto error;
1260 }
1261
1262 /* Ok, we hold the lock 'n' times. */
1263 tl_assert(n >= 1);
1264
1265 lockN_release( lock, thr );
1266
1267 n--;
1268 tl_assert(n >= 0);
1269
1270 if (n > 0) {
1271 tl_assert(lock->heldBy);
florian6bf37262012-10-21 03:23:36 +00001272 tl_assert(n == VG_(elemBag)( lock->heldBy, (UWord)thr ));
sewardjb4112022007-11-09 22:49:28 +00001273 /* We still hold the lock. So either it's a recursive lock
1274 or a rwlock which is currently r-held. */
1275 tl_assert(lock->kind == LK_mbRec
1276 || (lock->kind == LK_rdwr && !lock->heldW));
florian6bf37262012-10-21 03:23:36 +00001277 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001278 if (lock->heldW)
florian6bf37262012-10-21 03:23:36 +00001279 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001280 else
florian6bf37262012-10-21 03:23:36 +00001281 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001282 } else {
sewardj983f3022009-05-21 14:49:55 +00001283 /* n is zero. This means we don't hold the lock any more. But
1284 if it's a rwlock held in r-mode, someone else could still
1285 hold it. Just do whatever sanity checks we can. */
1286 if (lock->kind == LK_rdwr && lock->heldBy) {
1287 /* It's a rwlock. We no longer hold it but we used to;
1288 nevertheless it still appears to be held by someone else.
1289 The implication is that, prior to this release, it must
1290 have been shared by us and and whoever else is holding it;
1291 which in turn implies it must be r-held, since a lock
1292 can't be w-held by more than one thread. */
1293 /* The lock is now R-held by somebody else: */
1294 tl_assert(lock->heldW == False);
1295 } else {
1296 /* Normal case. It's either not a rwlock, or it's a rwlock
1297 that we used to hold in w-mode (which is pretty much the
1298 same thing as a non-rwlock.) Since this transaction is
1299 atomic (V does not allow multiple threads to run
1300 simultaneously), it must mean the lock is now not held by
1301 anybody. Hence assert for it. */
1302 /* The lock is now not held by anybody: */
1303 tl_assert(!lock->heldBy);
1304 tl_assert(lock->heldW == False);
1305 }
sewardjf98e1c02008-10-25 16:22:41 +00001306 //if (lock->heldBy) {
florian6bf37262012-10-21 03:23:36 +00001307 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (UWord)thr ));
sewardjf98e1c02008-10-25 16:22:41 +00001308 //}
sewardjb4112022007-11-09 22:49:28 +00001309 /* update this thread's lockset accordingly. */
1310 thr->locksetA
florian6bf37262012-10-21 03:23:36 +00001311 = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lock );
sewardjb4112022007-11-09 22:49:28 +00001312 thr->locksetW
florian6bf37262012-10-21 03:23:36 +00001313 = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lock );
sewardjf98e1c02008-10-25 16:22:41 +00001314 /* push our VC into the lock */
1315 tl_assert(thr->hbthr);
1316 tl_assert(lock->hbso);
1317 /* If the lock was previously W-held, then we want to do a
1318 strong send, and if previously R-held, then a weak send. */
1319 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
sewardjb4112022007-11-09 22:49:28 +00001320 }
1321 /* fall through */
1322
1323 error:
sewardjf98e1c02008-10-25 16:22:41 +00001324 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001325}
1326
1327
sewardj9f569b72008-11-13 13:33:09 +00001328/* ---------------------------------------------------------- */
1329/* -------- Event handlers proper (evh__* functions) -------- */
1330/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001331
1332/* What is the Thread* for the currently running thread? This is
1333 absolutely performance critical. We receive notifications from the
1334 core for client code starts/stops, and cache the looked-up result
1335 in 'current_Thread'. Hence, for the vast majority of requests,
1336 finding the current thread reduces to a read of a global variable,
1337 provided get_current_Thread_in_C_C is inlined.
1338
1339 Outside of client code, current_Thread is NULL, and presumably
1340 any uses of it will cause a segfault. Hence:
1341
1342 - for uses definitely within client code, use
1343 get_current_Thread_in_C_C.
1344
1345 - for all other uses, use get_current_Thread.
1346*/
1347
sewardj23f12002009-07-24 08:45:08 +00001348static Thread *current_Thread = NULL,
1349 *current_Thread_prev = NULL;
sewardjb4112022007-11-09 22:49:28 +00001350
1351static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1352 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1353 tl_assert(current_Thread == NULL);
1354 current_Thread = map_threads_lookup( tid );
1355 tl_assert(current_Thread != NULL);
sewardj23f12002009-07-24 08:45:08 +00001356 if (current_Thread != current_Thread_prev) {
1357 libhb_Thr_resumes( current_Thread->hbthr );
1358 current_Thread_prev = current_Thread;
1359 }
sewardjb4112022007-11-09 22:49:28 +00001360}
1361static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1362 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1363 tl_assert(current_Thread != NULL);
1364 current_Thread = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00001365 libhb_maybe_GC();
sewardjb4112022007-11-09 22:49:28 +00001366}
1367static inline Thread* get_current_Thread_in_C_C ( void ) {
1368 return current_Thread;
1369}
1370static inline Thread* get_current_Thread ( void ) {
1371 ThreadId coretid;
1372 Thread* thr;
1373 thr = get_current_Thread_in_C_C();
1374 if (LIKELY(thr))
1375 return thr;
1376 /* evidently not in client code. Do it the slow way. */
1377 coretid = VG_(get_running_tid)();
1378 /* FIXME: get rid of the following kludge. It exists because
sewardjf98e1c02008-10-25 16:22:41 +00001379 evh__new_mem is called during initialisation (as notification
sewardjb4112022007-11-09 22:49:28 +00001380 of initial memory layout) and VG_(get_running_tid)() returns
1381 VG_INVALID_THREADID at that point. */
1382 if (coretid == VG_INVALID_THREADID)
1383 coretid = 1; /* KLUDGE */
1384 thr = map_threads_lookup( coretid );
1385 return thr;
1386}
1387
1388static
1389void evh__new_mem ( Addr a, SizeT len ) {
1390 if (SHOW_EVENTS >= 2)
1391 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1392 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001393 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001394 all__sanity_check("evh__new_mem-post");
1395}
1396
1397static
sewardj1f77fec2010-04-12 19:51:04 +00001398void evh__new_mem_stack ( Addr a, SizeT len ) {
1399 if (SHOW_EVENTS >= 2)
1400 VG_(printf)("evh__new_mem_stack(%p, %lu)\n", (void*)a, len );
1401 shadow_mem_make_New( get_current_Thread(),
1402 -VG_STACK_REDZONE_SZB + a, len );
1403 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1404 all__sanity_check("evh__new_mem_stack-post");
1405}
1406
1407static
sewardj7cf4e6b2008-05-01 20:24:26 +00001408void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1409 if (SHOW_EVENTS >= 2)
1410 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1411 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001412 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardj7cf4e6b2008-05-01 20:24:26 +00001413 all__sanity_check("evh__new_mem_w_tid-post");
1414}
1415
1416static
sewardjb4112022007-11-09 22:49:28 +00001417void evh__new_mem_w_perms ( Addr a, SizeT len,
sewardj9c606bd2008-09-18 18:12:50 +00001418 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
sewardjb4112022007-11-09 22:49:28 +00001419 if (SHOW_EVENTS >= 1)
1420 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1421 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1422 if (rr || ww || xx)
1423 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001424 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001425 all__sanity_check("evh__new_mem_w_perms-post");
1426}
1427
1428static
1429void evh__set_perms ( Addr a, SizeT len,
1430 Bool rr, Bool ww, Bool xx ) {
sewardjfd35d492011-03-17 19:39:55 +00001431 // This handles mprotect requests. If the memory is being put
1432 // into no-R no-W state, paint it as NoAccess, for the reasons
1433 // documented at evh__die_mem_munmap().
sewardjb4112022007-11-09 22:49:28 +00001434 if (SHOW_EVENTS >= 1)
sewardjfd35d492011-03-17 19:39:55 +00001435 VG_(printf)("evh__set_perms(%p, %lu, r=%d w=%d x=%d)\n",
sewardjb4112022007-11-09 22:49:28 +00001436 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1437 /* Hmm. What should we do here, that actually makes any sense?
1438 Let's say: if neither readable nor writable, then declare it
1439 NoAccess, else leave it alone. */
1440 if (!(rr || ww))
sewardjfd35d492011-03-17 19:39:55 +00001441 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001442 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001443 all__sanity_check("evh__set_perms-post");
1444}
1445
1446static
1447void evh__die_mem ( Addr a, SizeT len ) {
sewardjfd35d492011-03-17 19:39:55 +00001448 // Urr, libhb ignores this.
sewardjb4112022007-11-09 22:49:28 +00001449 if (SHOW_EVENTS >= 2)
1450 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
sewardjfd35d492011-03-17 19:39:55 +00001451 shadow_mem_make_NoAccess_NoFX( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001452 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001453 all__sanity_check("evh__die_mem-post");
1454}
1455
1456static
sewardjfd35d492011-03-17 19:39:55 +00001457void evh__die_mem_munmap ( Addr a, SizeT len ) {
1458 // It's important that libhb doesn't ignore this. If, as is likely,
1459 // the client is subject to address space layout randomization,
1460 // then unmapped areas may never get remapped over, even in long
1461 // runs. If we just ignore them we wind up with large resource
1462 // (VTS) leaks in libhb. So force them to NoAccess, so that all
1463 // VTS references in the affected area are dropped. Marking memory
1464 // as NoAccess is expensive, but we assume that munmap is sufficiently
1465 // rare that the space gains of doing this are worth the costs.
1466 if (SHOW_EVENTS >= 2)
1467 VG_(printf)("evh__die_mem_munmap(%p, %lu)\n", (void*)a, len );
1468 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
1469}
1470
1471static
sewardj406bac82010-03-03 23:03:40 +00001472void evh__untrack_mem ( Addr a, SizeT len ) {
sewardjfd35d492011-03-17 19:39:55 +00001473 // Libhb doesn't ignore this.
sewardj406bac82010-03-03 23:03:40 +00001474 if (SHOW_EVENTS >= 2)
1475 VG_(printf)("evh__untrack_mem(%p, %lu)\n", (void*)a, len );
1476 shadow_mem_make_Untracked( get_current_Thread(), a, len );
1477 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1478 all__sanity_check("evh__untrack_mem-post");
1479}
1480
1481static
sewardj23f12002009-07-24 08:45:08 +00001482void evh__copy_mem ( Addr src, Addr dst, SizeT len ) {
1483 if (SHOW_EVENTS >= 2)
1484 VG_(printf)("evh__copy_mem(%p, %p, %lu)\n", (void*)src, (void*)dst, len );
1485 shadow_mem_scopy_range( get_current_Thread(), src, dst, len );
1486 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1487 all__sanity_check("evh__copy_mem-post");
1488}
1489
1490static
sewardjb4112022007-11-09 22:49:28 +00001491void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1492{
1493 if (SHOW_EVENTS >= 1)
1494 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1495 (Int)parent, (Int)child );
1496
1497 if (parent != VG_INVALID_THREADID) {
sewardjf98e1c02008-10-25 16:22:41 +00001498 Thread* thr_p;
1499 Thread* thr_c;
1500 Thr* hbthr_p;
1501 Thr* hbthr_c;
sewardjb4112022007-11-09 22:49:28 +00001502
sewardjf98e1c02008-10-25 16:22:41 +00001503 tl_assert(HG_(is_sane_ThreadId)(parent));
1504 tl_assert(HG_(is_sane_ThreadId)(child));
sewardjb4112022007-11-09 22:49:28 +00001505 tl_assert(parent != child);
1506
1507 thr_p = map_threads_maybe_lookup( parent );
1508 thr_c = map_threads_maybe_lookup( child );
1509
1510 tl_assert(thr_p != NULL);
1511 tl_assert(thr_c == NULL);
1512
sewardjf98e1c02008-10-25 16:22:41 +00001513 hbthr_p = thr_p->hbthr;
1514 tl_assert(hbthr_p != NULL);
sewardj60626642011-03-10 15:14:37 +00001515 tl_assert( libhb_get_Thr_hgthread(hbthr_p) == thr_p );
sewardjb4112022007-11-09 22:49:28 +00001516
sewardjf98e1c02008-10-25 16:22:41 +00001517 hbthr_c = libhb_create ( hbthr_p );
1518
1519 /* Create a new thread record for the child. */
sewardjb4112022007-11-09 22:49:28 +00001520 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +00001521 thr_c = mk_Thread( hbthr_c );
sewardj60626642011-03-10 15:14:37 +00001522 tl_assert( libhb_get_Thr_hgthread(hbthr_c) == NULL );
1523 libhb_set_Thr_hgthread(hbthr_c, thr_c);
sewardjb4112022007-11-09 22:49:28 +00001524
1525 /* and bind it in the thread-map table */
1526 map_threads[child] = thr_c;
sewardjf98e1c02008-10-25 16:22:41 +00001527 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1528 thr_c->coretid = child;
sewardjb4112022007-11-09 22:49:28 +00001529
1530 /* Record where the parent is so we can later refer to this in
1531 error messages.
1532
1533 On amd64-linux, this entails a nasty glibc-2.5 specific hack.
1534 The stack snapshot is taken immediately after the parent has
1535 returned from its sys_clone call. Unfortunately there is no
1536 unwind info for the insn following "syscall" - reading the
1537 glibc sources confirms this. So we ask for a snapshot to be
1538 taken as if RIP was 3 bytes earlier, in a place where there
1539 is unwind info. Sigh.
1540 */
1541 { Word first_ip_delta = 0;
1542# if defined(VGP_amd64_linux)
1543 first_ip_delta = -3;
1544# endif
1545 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1546 }
sewardjb4112022007-11-09 22:49:28 +00001547 }
1548
sewardjf98e1c02008-10-25 16:22:41 +00001549 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001550 all__sanity_check("evh__pre_thread_create-post");
1551}
1552
1553static
1554void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1555{
1556 Int nHeld;
1557 Thread* thr_q;
1558 if (SHOW_EVENTS >= 1)
1559 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1560 (Int)quit_tid );
1561
1562 /* quit_tid has disappeared without joining to any other thread.
1563 Therefore there is no synchronisation event associated with its
1564 exit and so we have to pretty much treat it as if it was still
1565 alive but mysteriously making no progress. That is because, if
1566 we don't know when it really exited, then we can never say there
1567 is a point in time when we're sure the thread really has
1568 finished, and so we need to consider the possibility that it
1569 lingers indefinitely and continues to interact with other
1570 threads. */
1571 /* However, it might have rendezvous'd with a thread that called
1572 pthread_join with this one as arg, prior to this point (that's
1573 how NPTL works). In which case there has already been a prior
1574 sync event. So in any case, just let the thread exit. On NPTL,
1575 all thread exits go through here. */
sewardjf98e1c02008-10-25 16:22:41 +00001576 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
sewardjb4112022007-11-09 22:49:28 +00001577 thr_q = map_threads_maybe_lookup( quit_tid );
1578 tl_assert(thr_q != NULL);
1579
1580 /* Complain if this thread holds any locks. */
1581 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1582 tl_assert(nHeld >= 0);
1583 if (nHeld > 0) {
1584 HChar buf[80];
1585 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1586 nHeld, nHeld > 1 ? "s" : "");
sewardjf98e1c02008-10-25 16:22:41 +00001587 HG_(record_error_Misc)( thr_q, buf );
sewardjb4112022007-11-09 22:49:28 +00001588 }
1589
sewardj23f12002009-07-24 08:45:08 +00001590 /* Not much to do here:
1591 - tell libhb the thread is gone
1592 - clear the map_threads entry, in order that the Valgrind core
1593 can re-use it. */
sewardj61bc2c52011-02-09 10:34:00 +00001594 /* Cleanup actions (next 5 lines) copied in evh__atfork_child; keep
1595 in sync. */
sewardj23f12002009-07-24 08:45:08 +00001596 tl_assert(thr_q->hbthr);
1597 libhb_async_exit(thr_q->hbthr);
sewardjf98e1c02008-10-25 16:22:41 +00001598 tl_assert(thr_q->coretid == quit_tid);
1599 thr_q->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +00001600 map_threads_delete( quit_tid );
1601
sewardjf98e1c02008-10-25 16:22:41 +00001602 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001603 all__sanity_check("evh__pre_thread_ll_exit-post");
1604}
1605
sewardj61bc2c52011-02-09 10:34:00 +00001606/* This is called immediately after fork, for the child only. 'tid'
1607 is the only surviving thread (as per POSIX rules on fork() in
1608 threaded programs), so we have to clean up map_threads to remove
1609 entries for any other threads. */
1610static
1611void evh__atfork_child ( ThreadId tid )
1612{
1613 UInt i;
1614 Thread* thr;
1615 /* Slot 0 should never be used. */
1616 thr = map_threads_maybe_lookup( 0/*INVALID*/ );
1617 tl_assert(!thr);
1618 /* Clean up all other slots except 'tid'. */
1619 for (i = 1; i < VG_N_THREADS; i++) {
1620 if (i == tid)
1621 continue;
1622 thr = map_threads_maybe_lookup(i);
1623 if (!thr)
1624 continue;
1625 /* Cleanup actions (next 5 lines) copied from end of
1626 evh__pre_thread_ll_exit; keep in sync. */
1627 tl_assert(thr->hbthr);
1628 libhb_async_exit(thr->hbthr);
1629 tl_assert(thr->coretid == i);
1630 thr->coretid = VG_INVALID_THREADID;
1631 map_threads_delete(i);
1632 }
1633}
1634
sewardjf98e1c02008-10-25 16:22:41 +00001635
sewardjb4112022007-11-09 22:49:28 +00001636static
1637void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1638{
sewardjb4112022007-11-09 22:49:28 +00001639 Thread* thr_s;
1640 Thread* thr_q;
sewardjf98e1c02008-10-25 16:22:41 +00001641 Thr* hbthr_s;
1642 Thr* hbthr_q;
1643 SO* so;
sewardjb4112022007-11-09 22:49:28 +00001644
1645 if (SHOW_EVENTS >= 1)
1646 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1647 (Int)stay_tid, quit_thr );
1648
sewardjf98e1c02008-10-25 16:22:41 +00001649 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
sewardjb4112022007-11-09 22:49:28 +00001650
1651 thr_s = map_threads_maybe_lookup( stay_tid );
1652 thr_q = quit_thr;
1653 tl_assert(thr_s != NULL);
1654 tl_assert(thr_q != NULL);
1655 tl_assert(thr_s != thr_q);
1656
sewardjf98e1c02008-10-25 16:22:41 +00001657 hbthr_s = thr_s->hbthr;
1658 hbthr_q = thr_q->hbthr;
1659 tl_assert(hbthr_s != hbthr_q);
sewardj60626642011-03-10 15:14:37 +00001660 tl_assert( libhb_get_Thr_hgthread(hbthr_s) == thr_s );
1661 tl_assert( libhb_get_Thr_hgthread(hbthr_q) == thr_q );
sewardjb4112022007-11-09 22:49:28 +00001662
sewardjf98e1c02008-10-25 16:22:41 +00001663 /* Allocate a temporary synchronisation object and use it to send
1664 an imaginary message from the quitter to the stayer, the purpose
1665 being to generate a dependence from the quitter to the
1666 stayer. */
1667 so = libhb_so_alloc();
1668 tl_assert(so);
sewardj23f12002009-07-24 08:45:08 +00001669 /* Send last arg of _so_send as False, since the sending thread
1670 doesn't actually exist any more, so we don't want _so_send to
1671 try taking stack snapshots of it. */
sewardjffce8152011-06-24 10:09:41 +00001672 libhb_so_send(hbthr_q, so, True/*strong_send*//*?!? wrt comment above*/);
sewardjf98e1c02008-10-25 16:22:41 +00001673 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1674 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00001675
sewardjffce8152011-06-24 10:09:41 +00001676 /* Tell libhb that the quitter has been reaped. Note that we might
1677 have to be cleverer about this, to exclude 2nd and subsequent
1678 notifications for the same hbthr_q, in the case where the app is
1679 buggy (calls pthread_join twice or more on the same thread) AND
1680 where libpthread is also buggy and doesn't return ESRCH on
1681 subsequent calls. (If libpthread isn't thusly buggy, then the
1682 wrapper for pthread_join in hg_intercepts.c will stop us getting
1683 notified here multiple times for the same joinee.) See also
1684 comments in helgrind/tests/jointwice.c. */
1685 libhb_joinedwith_done(hbthr_q);
1686
sewardjf98e1c02008-10-25 16:22:41 +00001687 /* evh__pre_thread_ll_exit issues an error message if the exiting
1688 thread holds any locks. No need to check here. */
sewardjb4112022007-11-09 22:49:28 +00001689
1690 /* This holds because, at least when using NPTL as the thread
1691 library, we should be notified the low level thread exit before
1692 we hear of any join event on it. The low level exit
1693 notification feeds through into evh__pre_thread_ll_exit,
1694 which should clear the map_threads entry for it. Hence we
1695 expect there to be no map_threads entry at this point. */
1696 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1697 == VG_INVALID_THREADID);
1698
sewardjf98e1c02008-10-25 16:22:41 +00001699 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001700 all__sanity_check("evh__post_thread_join-post");
1701}
1702
1703static
floriane543f302012-10-21 19:43:43 +00001704void evh__pre_mem_read ( CorePart part, ThreadId tid, const HChar* s,
sewardjb4112022007-11-09 22:49:28 +00001705 Addr a, SizeT size) {
1706 if (SHOW_EVENTS >= 2
1707 || (SHOW_EVENTS >= 1 && size != 1))
1708 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1709 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001710 shadow_mem_cread_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001711 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001712 all__sanity_check("evh__pre_mem_read-post");
1713}
1714
1715static
1716void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
floriane543f302012-10-21 19:43:43 +00001717 const HChar* s, Addr a ) {
sewardjb4112022007-11-09 22:49:28 +00001718 Int len;
1719 if (SHOW_EVENTS >= 1)
1720 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1721 (Int)tid, s, (void*)a );
sewardj234e5582011-02-09 12:47:23 +00001722 // Don't segfault if the string starts in an obviously stupid
1723 // place. Actually we should check the whole string, not just
1724 // the start address, but that's too much trouble. At least
1725 // checking the first byte is better than nothing. See #255009.
1726 if (!VG_(am_is_valid_for_client) (a, 1, VKI_PROT_READ))
1727 return;
florian19f91bb2012-11-10 22:29:54 +00001728 len = VG_(strlen)( (HChar*) a );
sewardj23f12002009-07-24 08:45:08 +00001729 shadow_mem_cread_range( map_threads_lookup(tid), a, len+1 );
sewardjf98e1c02008-10-25 16:22:41 +00001730 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001731 all__sanity_check("evh__pre_mem_read_asciiz-post");
1732}
1733
1734static
floriane543f302012-10-21 19:43:43 +00001735void evh__pre_mem_write ( CorePart part, ThreadId tid, const HChar* s,
sewardjb4112022007-11-09 22:49:28 +00001736 Addr a, SizeT size ) {
1737 if (SHOW_EVENTS >= 1)
1738 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1739 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001740 shadow_mem_cwrite_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001741 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001742 all__sanity_check("evh__pre_mem_write-post");
1743}
1744
1745static
1746void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1747 if (SHOW_EVENTS >= 1)
1748 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1749 (void*)a, len, (Int)is_inited );
1750 // FIXME: this is kinda stupid
1751 if (is_inited) {
1752 shadow_mem_make_New(get_current_Thread(), a, len);
1753 } else {
1754 shadow_mem_make_New(get_current_Thread(), a, len);
1755 }
sewardjf98e1c02008-10-25 16:22:41 +00001756 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001757 all__sanity_check("evh__pre_mem_read-post");
1758}
1759
1760static
1761void evh__die_mem_heap ( Addr a, SizeT len ) {
sewardj622fe492011-03-11 21:06:59 +00001762 Thread* thr;
sewardjb4112022007-11-09 22:49:28 +00001763 if (SHOW_EVENTS >= 1)
1764 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
sewardj622fe492011-03-11 21:06:59 +00001765 thr = get_current_Thread();
1766 tl_assert(thr);
1767 if (HG_(clo_free_is_write)) {
1768 /* Treat frees as if the memory was written immediately prior to
1769 the free. This shakes out more races, specifically, cases
1770 where memory is referenced by one thread, and freed by
1771 another, and there's no observable synchronisation event to
1772 guarantee that the reference happens before the free. */
1773 shadow_mem_cwrite_range(thr, a, len);
1774 }
sewardjfd35d492011-03-17 19:39:55 +00001775 shadow_mem_make_NoAccess_NoFX( thr, a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001776 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001777 all__sanity_check("evh__pre_mem_read-post");
1778}
1779
sewardj23f12002009-07-24 08:45:08 +00001780/* --- Event handlers called from generated code --- */
1781
sewardjb4112022007-11-09 22:49:28 +00001782static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001783void evh__mem_help_cread_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001784 Thread* thr = get_current_Thread_in_C_C();
1785 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001786 LIBHB_CREAD_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001787}
sewardjf98e1c02008-10-25 16:22:41 +00001788
sewardjb4112022007-11-09 22:49:28 +00001789static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001790void evh__mem_help_cread_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001791 Thread* thr = get_current_Thread_in_C_C();
1792 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001793 LIBHB_CREAD_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001794}
sewardjf98e1c02008-10-25 16:22:41 +00001795
sewardjb4112022007-11-09 22:49:28 +00001796static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001797void evh__mem_help_cread_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001798 Thread* thr = get_current_Thread_in_C_C();
1799 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001800 LIBHB_CREAD_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001801}
sewardjf98e1c02008-10-25 16:22:41 +00001802
sewardjb4112022007-11-09 22:49:28 +00001803static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001804void evh__mem_help_cread_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001805 Thread* thr = get_current_Thread_in_C_C();
1806 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001807 LIBHB_CREAD_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001808}
sewardjf98e1c02008-10-25 16:22:41 +00001809
sewardjb4112022007-11-09 22:49:28 +00001810static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001811void evh__mem_help_cread_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001812 Thread* thr = get_current_Thread_in_C_C();
1813 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001814 LIBHB_CREAD_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001815}
1816
1817static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001818void evh__mem_help_cwrite_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001819 Thread* thr = get_current_Thread_in_C_C();
1820 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001821 LIBHB_CWRITE_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001822}
sewardjf98e1c02008-10-25 16:22:41 +00001823
sewardjb4112022007-11-09 22:49:28 +00001824static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001825void evh__mem_help_cwrite_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001826 Thread* thr = get_current_Thread_in_C_C();
1827 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001828 LIBHB_CWRITE_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001829}
sewardjf98e1c02008-10-25 16:22:41 +00001830
sewardjb4112022007-11-09 22:49:28 +00001831static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001832void evh__mem_help_cwrite_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001833 Thread* thr = get_current_Thread_in_C_C();
1834 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001835 LIBHB_CWRITE_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001836}
sewardjf98e1c02008-10-25 16:22:41 +00001837
sewardjb4112022007-11-09 22:49:28 +00001838static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001839void evh__mem_help_cwrite_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001840 Thread* thr = get_current_Thread_in_C_C();
1841 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001842 LIBHB_CWRITE_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001843}
sewardjf98e1c02008-10-25 16:22:41 +00001844
sewardjb4112022007-11-09 22:49:28 +00001845static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001846void evh__mem_help_cwrite_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001847 Thread* thr = get_current_Thread_in_C_C();
1848 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001849 LIBHB_CWRITE_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001850}
1851
sewardjb4112022007-11-09 22:49:28 +00001852
sewardj9f569b72008-11-13 13:33:09 +00001853/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001854/* -------------- events to do with mutexes -------------- */
sewardj9f569b72008-11-13 13:33:09 +00001855/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001856
1857/* EXPOSITION only: by intercepting lock init events we can show the
1858 user where the lock was initialised, rather than only being able to
1859 show where it was first locked. Intercepting lock initialisations
1860 is not necessary for the basic operation of the race checker. */
1861static
1862void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
1863 void* mutex, Word mbRec )
1864{
1865 if (SHOW_EVENTS >= 1)
1866 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
1867 (Int)tid, mbRec, (void*)mutex );
1868 tl_assert(mbRec == 0 || mbRec == 1);
1869 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
1870 (Addr)mutex, tid );
sewardjf98e1c02008-10-25 16:22:41 +00001871 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001872 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
1873}
1874
1875static
1876void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex )
1877{
1878 Thread* thr;
1879 Lock* lk;
1880 if (SHOW_EVENTS >= 1)
1881 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE(ctid=%d, %p)\n",
1882 (Int)tid, (void*)mutex );
1883
1884 thr = map_threads_maybe_lookup( tid );
1885 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00001886 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00001887
1888 lk = map_locks_maybe_lookup( (Addr)mutex );
1889
1890 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
sewardjf98e1c02008-10-25 16:22:41 +00001891 HG_(record_error_Misc)(
1892 thr, "pthread_mutex_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00001893 }
1894
1895 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00001896 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00001897 tl_assert( lk->guestaddr == (Addr)mutex );
1898 if (lk->heldBy) {
1899 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00001900 HG_(record_error_Misc)(
1901 thr, "pthread_mutex_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00001902 /* remove lock from locksets of all owning threads */
1903 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00001904 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00001905 lk->heldBy = NULL;
1906 lk->heldW = False;
1907 lk->acquired_at = NULL;
1908 }
1909 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001910 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00001911
1912 if (HG_(clo_track_lockorders))
1913 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00001914 map_locks_delete( lk->guestaddr );
1915 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00001916 }
1917
sewardjf98e1c02008-10-25 16:22:41 +00001918 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001919 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
1920}
1921
1922static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
1923 void* mutex, Word isTryLock )
1924{
1925 /* Just check the mutex is sane; nothing else to do. */
1926 // 'mutex' may be invalid - not checked by wrapper
1927 Thread* thr;
1928 Lock* lk;
1929 if (SHOW_EVENTS >= 1)
1930 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
1931 (Int)tid, (void*)mutex );
1932
1933 tl_assert(isTryLock == 0 || isTryLock == 1);
1934 thr = map_threads_maybe_lookup( tid );
1935 tl_assert(thr); /* cannot fail - Thread* must already exist */
1936
1937 lk = map_locks_maybe_lookup( (Addr)mutex );
1938
1939 if (lk && (lk->kind == LK_rdwr)) {
sewardjf98e1c02008-10-25 16:22:41 +00001940 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
1941 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001942 }
1943
1944 if ( lk
1945 && isTryLock == 0
1946 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
1947 && lk->heldBy
1948 && lk->heldW
florian6bf37262012-10-21 03:23:36 +00001949 && VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0 ) {
sewardjb4112022007-11-09 22:49:28 +00001950 /* uh, it's a non-recursive lock and we already w-hold it, and
1951 this is a real lock operation (not a speculative "tryLock"
1952 kind of thing). Duh. Deadlock coming up; but at least
1953 produce an error message. */
florian6bd9dc12012-11-23 16:17:43 +00001954 const HChar* errstr = "Attempt to re-lock a "
1955 "non-recursive lock I already hold";
1956 const HChar* auxstr = "Lock was previously acquired";
sewardj8fef6252010-07-29 05:28:02 +00001957 if (lk->acquired_at) {
1958 HG_(record_error_Misc_w_aux)( thr, errstr, auxstr, lk->acquired_at );
1959 } else {
1960 HG_(record_error_Misc)( thr, errstr );
1961 }
sewardjb4112022007-11-09 22:49:28 +00001962 }
1963}
1964
1965static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
1966{
1967 // only called if the real library call succeeded - so mutex is sane
1968 Thread* thr;
1969 if (SHOW_EVENTS >= 1)
1970 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
1971 (Int)tid, (void*)mutex );
1972
1973 thr = map_threads_maybe_lookup( tid );
1974 tl_assert(thr); /* cannot fail - Thread* must already exist */
1975
1976 evhH__post_thread_w_acquires_lock(
1977 thr,
1978 LK_mbRec, /* if not known, create new lock with this LockKind */
1979 (Addr)mutex
1980 );
1981}
1982
1983static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
1984{
1985 // 'mutex' may be invalid - not checked by wrapper
1986 Thread* thr;
1987 if (SHOW_EVENTS >= 1)
1988 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
1989 (Int)tid, (void*)mutex );
1990
1991 thr = map_threads_maybe_lookup( tid );
1992 tl_assert(thr); /* cannot fail - Thread* must already exist */
1993
1994 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
1995}
1996
1997static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
1998{
1999 // only called if the real library call succeeded - so mutex is sane
2000 Thread* thr;
2001 if (SHOW_EVENTS >= 1)
2002 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
2003 (Int)tid, (void*)mutex );
2004 thr = map_threads_maybe_lookup( tid );
2005 tl_assert(thr); /* cannot fail - Thread* must already exist */
2006
2007 // anything we should do here?
2008}
2009
2010
sewardj5a644da2009-08-11 10:35:58 +00002011/* ------------------------------------------------------- */
sewardj1f77fec2010-04-12 19:51:04 +00002012/* -------------- events to do with spinlocks ------------ */
sewardj5a644da2009-08-11 10:35:58 +00002013/* ------------------------------------------------------- */
2014
2015/* All a bit of a kludge. Pretend we're really dealing with ordinary
2016 pthread_mutex_t's instead, for the most part. */
2017
2018static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid,
2019 void* slock )
2020{
2021 Thread* thr;
2022 Lock* lk;
2023 /* In glibc's kludgey world, we're either initialising or unlocking
2024 it. Since this is the pre-routine, if it is locked, unlock it
2025 and take a dependence edge. Otherwise, do nothing. */
2026
2027 if (SHOW_EVENTS >= 1)
2028 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
2029 "(ctid=%d, slock=%p)\n",
2030 (Int)tid, (void*)slock );
2031
2032 thr = map_threads_maybe_lookup( tid );
2033 /* cannot fail - Thread* must already exist */;
2034 tl_assert( HG_(is_sane_Thread)(thr) );
2035
2036 lk = map_locks_maybe_lookup( (Addr)slock );
2037 if (lk && lk->heldBy) {
2038 /* it's held. So do the normal pre-unlock actions, as copied
2039 from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE. This stupidly
2040 duplicates the map_locks_maybe_lookup. */
2041 evhH__pre_thread_releases_lock( thr, (Addr)slock,
2042 False/*!isRDWR*/ );
2043 }
2044}
2045
2046static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid,
2047 void* slock )
2048{
2049 Lock* lk;
2050 /* More kludgery. If the lock has never been seen before, do
2051 actions as per evh__HG_PTHREAD_MUTEX_INIT_POST. Else do
2052 nothing. */
2053
2054 if (SHOW_EVENTS >= 1)
2055 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
2056 "(ctid=%d, slock=%p)\n",
2057 (Int)tid, (void*)slock );
2058
2059 lk = map_locks_maybe_lookup( (Addr)slock );
2060 if (!lk) {
2061 map_locks_lookup_or_create( LK_nonRec, (Addr)slock, tid );
2062 }
2063}
2064
2065static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid,
2066 void* slock, Word isTryLock )
2067{
2068 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, slock, isTryLock );
2069}
2070
2071static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid,
2072 void* slock )
2073{
2074 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, slock );
2075}
2076
2077static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid,
2078 void* slock )
2079{
2080 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, slock );
2081}
2082
2083
sewardj9f569b72008-11-13 13:33:09 +00002084/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002085/* --------------- events to do with CVs --------------- */
sewardj9f569b72008-11-13 13:33:09 +00002086/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002087
sewardj02114542009-07-28 20:52:36 +00002088/* A mapping from CV to (the SO associated with it, plus some
2089 auxiliary data for error checking). When the CV is
sewardjf98e1c02008-10-25 16:22:41 +00002090 signalled/broadcasted upon, we do a 'send' into the SO, and when a
2091 wait on it completes, we do a 'recv' from the SO. This is believed
2092 to give the correct happens-before events arising from CV
sewardjb4112022007-11-09 22:49:28 +00002093 signallings/broadcasts.
2094*/
2095
sewardj02114542009-07-28 20:52:36 +00002096/* .so is the SO for this CV.
2097 .mx_ga is the associated mutex, when .nWaiters > 0
sewardjb4112022007-11-09 22:49:28 +00002098
sewardj02114542009-07-28 20:52:36 +00002099 POSIX says effectively that the first pthread_cond_{timed}wait call
2100 causes a dynamic binding between the CV and the mutex, and that
2101 lasts until such time as the waiter count falls to zero. Hence
2102 need to keep track of the number of waiters in order to do
2103 consistency tracking. */
2104typedef
2105 struct {
2106 SO* so; /* libhb-allocated SO */
2107 void* mx_ga; /* addr of associated mutex, if any */
2108 UWord nWaiters; /* # threads waiting on the CV */
2109 }
2110 CVInfo;
2111
2112
2113/* pthread_cond_t* -> CVInfo* */
2114static WordFM* map_cond_to_CVInfo = NULL;
2115
2116static void map_cond_to_CVInfo_INIT ( void ) {
2117 if (UNLIKELY(map_cond_to_CVInfo == NULL)) {
2118 map_cond_to_CVInfo = VG_(newFM)( HG_(zalloc),
2119 "hg.mctCI.1", HG_(free), NULL );
2120 tl_assert(map_cond_to_CVInfo != NULL);
sewardjf98e1c02008-10-25 16:22:41 +00002121 }
2122}
2123
sewardj02114542009-07-28 20:52:36 +00002124static CVInfo* map_cond_to_CVInfo_lookup_or_alloc ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002125 UWord key, val;
sewardj02114542009-07-28 20:52:36 +00002126 map_cond_to_CVInfo_INIT();
2127 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
sewardjf98e1c02008-10-25 16:22:41 +00002128 tl_assert(key == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002129 return (CVInfo*)val;
sewardjf98e1c02008-10-25 16:22:41 +00002130 } else {
sewardj02114542009-07-28 20:52:36 +00002131 SO* so = libhb_so_alloc();
2132 CVInfo* cvi = HG_(zalloc)("hg.mctCloa.1", sizeof(CVInfo));
2133 cvi->so = so;
2134 cvi->mx_ga = 0;
2135 VG_(addToFM)( map_cond_to_CVInfo, (UWord)cond, (UWord)cvi );
2136 return cvi;
sewardjf98e1c02008-10-25 16:22:41 +00002137 }
2138}
2139
philippe8bfc2152012-07-06 23:38:24 +00002140static CVInfo* map_cond_to_CVInfo_lookup_NO_alloc ( void* cond ) {
2141 UWord key, val;
2142 map_cond_to_CVInfo_INIT();
2143 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
2144 tl_assert(key == (UWord)cond);
2145 return (CVInfo*)val;
2146 } else {
2147 return NULL;
2148 }
2149}
2150
2151static void map_cond_to_CVInfo_delete ( ThreadId tid, void* cond ) {
2152 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +00002153 UWord keyW, valW;
philippe8bfc2152012-07-06 23:38:24 +00002154
2155 thr = map_threads_maybe_lookup( tid );
2156 tl_assert(thr); /* cannot fail - Thread* must already exist */
2157
sewardj02114542009-07-28 20:52:36 +00002158 map_cond_to_CVInfo_INIT();
2159 if (VG_(delFromFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond )) {
2160 CVInfo* cvi = (CVInfo*)valW;
sewardjf98e1c02008-10-25 16:22:41 +00002161 tl_assert(keyW == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002162 tl_assert(cvi);
2163 tl_assert(cvi->so);
philippe8bfc2152012-07-06 23:38:24 +00002164 if (cvi->nWaiters > 0) {
2165 HG_(record_error_Misc)(thr,
2166 "pthread_cond_destroy:"
2167 " destruction of condition variable being waited upon");
2168 }
sewardj02114542009-07-28 20:52:36 +00002169 libhb_so_dealloc(cvi->so);
2170 cvi->mx_ga = 0;
2171 HG_(free)(cvi);
philippe8bfc2152012-07-06 23:38:24 +00002172 } else {
2173 HG_(record_error_Misc)(thr,
2174 "pthread_cond_destroy: destruction of unknown cond var");
sewardjb4112022007-11-09 22:49:28 +00002175 }
2176}
2177
2178static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2179{
sewardjf98e1c02008-10-25 16:22:41 +00002180 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2181 cond to a SO if it is not already so bound, and 'send' on the
2182 SO. This is later used by other thread(s) which successfully
2183 exit from a pthread_cond_wait on the same cv; then they 'recv'
2184 from the SO, thereby acquiring a dependency on this signalling
2185 event. */
sewardjb4112022007-11-09 22:49:28 +00002186 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002187 CVInfo* cvi;
2188 //Lock* lk;
sewardjb4112022007-11-09 22:49:28 +00002189
2190 if (SHOW_EVENTS >= 1)
2191 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2192 (Int)tid, (void*)cond );
2193
sewardjb4112022007-11-09 22:49:28 +00002194 thr = map_threads_maybe_lookup( tid );
2195 tl_assert(thr); /* cannot fail - Thread* must already exist */
2196
sewardj02114542009-07-28 20:52:36 +00002197 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2198 tl_assert(cvi);
2199 tl_assert(cvi->so);
2200
sewardjb4112022007-11-09 22:49:28 +00002201 // error-if: mutex is bogus
2202 // error-if: mutex is not locked
sewardj02114542009-07-28 20:52:36 +00002203 // Hmm. POSIX doesn't actually say that it's an error to call
2204 // pthread_cond_signal with the associated mutex being unlocked.
2205 // Although it does say that it should be "if consistent scheduling
sewardjffce8152011-06-24 10:09:41 +00002206 // is desired." For that reason, print "dubious" if the lock isn't
2207 // held by any thread. Skip the "dubious" if it is held by some
2208 // other thread; that sounds straight-out wrong.
sewardj02114542009-07-28 20:52:36 +00002209 //
sewardjffce8152011-06-24 10:09:41 +00002210 // Anybody who writes code that signals on a CV without holding
2211 // the associated MX needs to be shipped off to a lunatic asylum
2212 // ASAP, even though POSIX doesn't actually declare such behaviour
2213 // illegal -- it makes code extremely difficult to understand/
2214 // reason about. In particular it puts the signalling thread in
2215 // a situation where it is racing against the released waiter
2216 // as soon as the signalling is done, and so there needs to be
2217 // some auxiliary synchronisation mechanism in the program that
2218 // makes this safe -- or the race(s) need to be harmless, or
2219 // probably nonexistent.
2220 //
2221 if (1) {
2222 Lock* lk = NULL;
2223 if (cvi->mx_ga != 0) {
2224 lk = map_locks_maybe_lookup( (Addr)cvi->mx_ga );
2225 }
2226 /* note: lk could be NULL. Be careful. */
2227 if (lk) {
2228 if (lk->kind == LK_rdwr) {
2229 HG_(record_error_Misc)(thr,
2230 "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2231 }
2232 if (lk->heldBy == NULL) {
2233 HG_(record_error_Misc)(thr,
2234 "pthread_cond_{signal,broadcast}: dubious: "
2235 "associated lock is not held by any thread");
2236 }
florian6bf37262012-10-21 03:23:36 +00002237 if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (UWord)thr)) {
sewardjffce8152011-06-24 10:09:41 +00002238 HG_(record_error_Misc)(thr,
2239 "pthread_cond_{signal,broadcast}: "
2240 "associated lock is not held by calling thread");
2241 }
2242 } else {
2243 /* Couldn't even find the damn thing. */
2244 // But actually .. that's not necessarily an error. We don't
2245 // know the (CV,MX) binding until a pthread_cond_wait or bcast
2246 // shows us what it is, and if that may not have happened yet.
2247 // So just keep quiet in this circumstance.
2248 //HG_(record_error_Misc)( thr,
2249 // "pthread_cond_{signal,broadcast}: "
2250 // "no or invalid mutex associated with cond");
2251 }
2252 }
sewardjb4112022007-11-09 22:49:28 +00002253
sewardj02114542009-07-28 20:52:36 +00002254 libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
sewardjb4112022007-11-09 22:49:28 +00002255}
2256
2257/* returns True if it reckons 'mutex' is valid and held by this
2258 thread, else False */
2259static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2260 void* cond, void* mutex )
2261{
2262 Thread* thr;
2263 Lock* lk;
2264 Bool lk_valid = True;
sewardj02114542009-07-28 20:52:36 +00002265 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002266
2267 if (SHOW_EVENTS >= 1)
2268 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2269 "(ctid=%d, cond=%p, mutex=%p)\n",
2270 (Int)tid, (void*)cond, (void*)mutex );
2271
sewardjb4112022007-11-09 22:49:28 +00002272 thr = map_threads_maybe_lookup( tid );
2273 tl_assert(thr); /* cannot fail - Thread* must already exist */
2274
2275 lk = map_locks_maybe_lookup( (Addr)mutex );
2276
2277 /* Check for stupid mutex arguments. There are various ways to be
2278 a bozo. Only complain once, though, even if more than one thing
2279 is wrong. */
2280 if (lk == NULL) {
2281 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002282 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002283 thr,
2284 "pthread_cond_{timed}wait called with invalid mutex" );
2285 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002286 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002287 if (lk->kind == LK_rdwr) {
2288 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002289 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002290 thr, "pthread_cond_{timed}wait called with mutex "
2291 "of type pthread_rwlock_t*" );
2292 } else
2293 if (lk->heldBy == NULL) {
2294 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002295 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002296 thr, "pthread_cond_{timed}wait called with un-held mutex");
2297 } else
2298 if (lk->heldBy != NULL
florian6bf37262012-10-21 03:23:36 +00002299 && VG_(elemBag)( lk->heldBy, (UWord)thr ) == 0) {
sewardjb4112022007-11-09 22:49:28 +00002300 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002301 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002302 thr, "pthread_cond_{timed}wait called with mutex "
2303 "held by a different thread" );
2304 }
2305 }
2306
2307 // error-if: cond is also associated with a different mutex
sewardj02114542009-07-28 20:52:36 +00002308 cvi = map_cond_to_CVInfo_lookup_or_alloc(cond);
2309 tl_assert(cvi);
2310 tl_assert(cvi->so);
2311 if (cvi->nWaiters == 0) {
2312 /* form initial (CV,MX) binding */
2313 cvi->mx_ga = mutex;
2314 }
2315 else /* check existing (CV,MX) binding */
2316 if (cvi->mx_ga != mutex) {
2317 HG_(record_error_Misc)(
2318 thr, "pthread_cond_{timed}wait: cond is associated "
2319 "with a different mutex");
2320 }
2321 cvi->nWaiters++;
sewardjb4112022007-11-09 22:49:28 +00002322
2323 return lk_valid;
2324}
2325
2326static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
2327 void* cond, void* mutex )
2328{
sewardjf98e1c02008-10-25 16:22:41 +00002329 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2330 the SO for this cond, and 'recv' from it so as to acquire a
2331 dependency edge back to the signaller/broadcaster. */
2332 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002333 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002334
2335 if (SHOW_EVENTS >= 1)
2336 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
2337 "(ctid=%d, cond=%p, mutex=%p)\n",
2338 (Int)tid, (void*)cond, (void*)mutex );
2339
sewardjb4112022007-11-09 22:49:28 +00002340 thr = map_threads_maybe_lookup( tid );
2341 tl_assert(thr); /* cannot fail - Thread* must already exist */
2342
2343 // error-if: cond is also associated with a different mutex
2344
philippe8bfc2152012-07-06 23:38:24 +00002345 cvi = map_cond_to_CVInfo_lookup_NO_alloc( cond );
2346 if (!cvi) {
2347 /* This could be either a bug in helgrind or the guest application
2348 that did an error (e.g. cond var was destroyed by another thread.
2349 Let's assume helgrind is perfect ...
2350 Note that this is similar to drd behaviour. */
2351 HG_(record_error_Misc)(thr, "condition variable has been destroyed while"
2352 " being waited upon");
2353 return;
2354 }
2355
sewardj02114542009-07-28 20:52:36 +00002356 tl_assert(cvi);
2357 tl_assert(cvi->so);
2358 tl_assert(cvi->nWaiters > 0);
sewardjb4112022007-11-09 22:49:28 +00002359
sewardj02114542009-07-28 20:52:36 +00002360 if (!libhb_so_everSent(cvi->so)) {
sewardjf98e1c02008-10-25 16:22:41 +00002361 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2362 it? If this happened it would surely be a bug in the threads
2363 library. Or one of those fabled "spurious wakeups". */
2364 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
sewardjffce8152011-06-24 10:09:41 +00002365 "succeeded"
sewardjf98e1c02008-10-25 16:22:41 +00002366 " without prior pthread_cond_post");
sewardjb4112022007-11-09 22:49:28 +00002367 }
sewardjf98e1c02008-10-25 16:22:41 +00002368
2369 /* anyway, acquire a dependency on it. */
sewardj02114542009-07-28 20:52:36 +00002370 libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2371
2372 cvi->nWaiters--;
sewardjf98e1c02008-10-25 16:22:41 +00002373}
2374
2375static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
2376 void* cond )
2377{
2378 /* Deal with destroy events. The only purpose is to free storage
2379 associated with the CV, so as to avoid any possible resource
2380 leaks. */
2381 if (SHOW_EVENTS >= 1)
2382 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
2383 "(ctid=%d, cond=%p)\n",
2384 (Int)tid, (void*)cond );
2385
philippe8bfc2152012-07-06 23:38:24 +00002386 map_cond_to_CVInfo_delete( tid, cond );
sewardjb4112022007-11-09 22:49:28 +00002387}
2388
2389
sewardj9f569b72008-11-13 13:33:09 +00002390/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002391/* -------------- events to do with rwlocks -------------- */
sewardj9f569b72008-11-13 13:33:09 +00002392/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002393
2394/* EXPOSITION only */
2395static
2396void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2397{
2398 if (SHOW_EVENTS >= 1)
2399 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2400 (Int)tid, (void*)rwl );
2401 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
sewardjf98e1c02008-10-25 16:22:41 +00002402 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002403 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2404}
2405
2406static
2407void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2408{
2409 Thread* thr;
2410 Lock* lk;
2411 if (SHOW_EVENTS >= 1)
2412 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2413 (Int)tid, (void*)rwl );
2414
2415 thr = map_threads_maybe_lookup( tid );
2416 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00002417 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00002418
2419 lk = map_locks_maybe_lookup( (Addr)rwl );
2420
2421 if (lk == NULL || lk->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00002422 HG_(record_error_Misc)(
2423 thr, "pthread_rwlock_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00002424 }
2425
2426 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00002427 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002428 tl_assert( lk->guestaddr == (Addr)rwl );
2429 if (lk->heldBy) {
2430 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00002431 HG_(record_error_Misc)(
2432 thr, "pthread_rwlock_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00002433 /* remove lock from locksets of all owning threads */
2434 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00002435 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00002436 lk->heldBy = NULL;
2437 lk->heldW = False;
sewardj1c7e8332007-11-29 13:04:03 +00002438 lk->acquired_at = NULL;
sewardjb4112022007-11-09 22:49:28 +00002439 }
2440 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002441 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00002442
2443 if (HG_(clo_track_lockorders))
2444 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002445 map_locks_delete( lk->guestaddr );
2446 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002447 }
2448
sewardjf98e1c02008-10-25 16:22:41 +00002449 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002450 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2451}
2452
2453static
sewardj789c3c52008-02-25 12:10:07 +00002454void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2455 void* rwl,
2456 Word isW, Word isTryLock )
sewardjb4112022007-11-09 22:49:28 +00002457{
2458 /* Just check the rwl is sane; nothing else to do. */
2459 // 'rwl' may be invalid - not checked by wrapper
2460 Thread* thr;
2461 Lock* lk;
2462 if (SHOW_EVENTS >= 1)
2463 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2464 (Int)tid, (Int)isW, (void*)rwl );
2465
2466 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
sewardj789c3c52008-02-25 12:10:07 +00002467 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
sewardjb4112022007-11-09 22:49:28 +00002468 thr = map_threads_maybe_lookup( tid );
2469 tl_assert(thr); /* cannot fail - Thread* must already exist */
2470
2471 lk = map_locks_maybe_lookup( (Addr)rwl );
2472 if ( lk
2473 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2474 /* Wrong kind of lock. Duh. */
sewardjf98e1c02008-10-25 16:22:41 +00002475 HG_(record_error_Misc)(
2476 thr, "pthread_rwlock_{rd,rw}lock with a "
2477 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002478 }
2479}
2480
2481static
2482void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2483{
2484 // only called if the real library call succeeded - so mutex is sane
2485 Thread* thr;
2486 if (SHOW_EVENTS >= 1)
2487 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2488 (Int)tid, (Int)isW, (void*)rwl );
2489
2490 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2491 thr = map_threads_maybe_lookup( tid );
2492 tl_assert(thr); /* cannot fail - Thread* must already exist */
2493
2494 (isW ? evhH__post_thread_w_acquires_lock
2495 : evhH__post_thread_r_acquires_lock)(
2496 thr,
2497 LK_rdwr, /* if not known, create new lock with this LockKind */
2498 (Addr)rwl
2499 );
2500}
2501
2502static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2503{
2504 // 'rwl' may be invalid - not checked by wrapper
2505 Thread* thr;
2506 if (SHOW_EVENTS >= 1)
2507 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2508 (Int)tid, (void*)rwl );
2509
2510 thr = map_threads_maybe_lookup( tid );
2511 tl_assert(thr); /* cannot fail - Thread* must already exist */
2512
2513 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2514}
2515
2516static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2517{
2518 // only called if the real library call succeeded - so mutex is sane
2519 Thread* thr;
2520 if (SHOW_EVENTS >= 1)
2521 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2522 (Int)tid, (void*)rwl );
2523 thr = map_threads_maybe_lookup( tid );
2524 tl_assert(thr); /* cannot fail - Thread* must already exist */
2525
2526 // anything we should do here?
2527}
2528
2529
sewardj9f569b72008-11-13 13:33:09 +00002530/* ---------------------------------------------------------- */
2531/* -------------- events to do with semaphores -------------- */
2532/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002533
sewardj11e352f2007-11-30 11:11:02 +00002534/* This is similar to but not identical to the handling for condition
sewardjb4112022007-11-09 22:49:28 +00002535 variables. */
2536
sewardjf98e1c02008-10-25 16:22:41 +00002537/* For each semaphore, we maintain a stack of SOs. When a 'post'
2538 operation is done on a semaphore (unlocking, essentially), a new SO
2539 is created for the posting thread, the posting thread does a strong
2540 send to it (which merely installs the posting thread's VC in the
2541 SO), and the SO is pushed on the semaphore's stack.
sewardjb4112022007-11-09 22:49:28 +00002542
2543 Later, when a (probably different) thread completes 'wait' on the
sewardjf98e1c02008-10-25 16:22:41 +00002544 semaphore, we pop a SO off the semaphore's stack (which should be
2545 nonempty), and do a strong recv from it. This mechanism creates
sewardjb4112022007-11-09 22:49:28 +00002546 dependencies between posters and waiters of the semaphore.
2547
sewardjf98e1c02008-10-25 16:22:41 +00002548 It may not be necessary to use a stack - perhaps a bag of SOs would
2549 do. But we do need to keep track of how many unused-up posts have
2550 happened for the semaphore.
sewardjb4112022007-11-09 22:49:28 +00002551
sewardjf98e1c02008-10-25 16:22:41 +00002552 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
sewardjb4112022007-11-09 22:49:28 +00002553 twice on S. T3 cannot complete its waits without both T1 and T2
2554 posting. The above mechanism will ensure that T3 acquires
2555 dependencies on both T1 and T2.
sewardj11e352f2007-11-30 11:11:02 +00002556
sewardjf98e1c02008-10-25 16:22:41 +00002557 When a semaphore is initialised with value N, we do as if we'd
2558 posted N times on the semaphore: basically create N SOs and do a
2559 strong send to all of then. This allows up to N waits on the
2560 semaphore to acquire a dependency on the initialisation point,
2561 which AFAICS is the correct behaviour.
sewardj11e352f2007-11-30 11:11:02 +00002562
2563 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2564 about. We should.
sewardjb4112022007-11-09 22:49:28 +00002565*/
2566
sewardjf98e1c02008-10-25 16:22:41 +00002567/* sem_t* -> XArray* SO* */
2568static WordFM* map_sem_to_SO_stack = NULL;
sewardjb4112022007-11-09 22:49:28 +00002569
sewardjf98e1c02008-10-25 16:22:41 +00002570static void map_sem_to_SO_stack_INIT ( void ) {
2571 if (map_sem_to_SO_stack == NULL) {
2572 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2573 HG_(free), NULL );
2574 tl_assert(map_sem_to_SO_stack != NULL);
sewardjb4112022007-11-09 22:49:28 +00002575 }
2576}
2577
sewardjf98e1c02008-10-25 16:22:41 +00002578static void push_SO_for_sem ( void* sem, SO* so ) {
2579 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002580 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002581 tl_assert(so);
2582 map_sem_to_SO_stack_INIT();
2583 if (VG_(lookupFM)( map_sem_to_SO_stack,
2584 &keyW, (UWord*)&xa, (UWord)sem )) {
2585 tl_assert(keyW == (UWord)sem);
sewardjb4112022007-11-09 22:49:28 +00002586 tl_assert(xa);
sewardjf98e1c02008-10-25 16:22:41 +00002587 VG_(addToXA)( xa, &so );
sewardjb4112022007-11-09 22:49:28 +00002588 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002589 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2590 VG_(addToXA)( xa, &so );
florian6bf37262012-10-21 03:23:36 +00002591 VG_(addToFM)( map_sem_to_SO_stack, (UWord)sem, (UWord)xa );
sewardjb4112022007-11-09 22:49:28 +00002592 }
2593}
2594
sewardjf98e1c02008-10-25 16:22:41 +00002595static SO* mb_pop_SO_for_sem ( void* sem ) {
2596 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002597 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002598 SO* so;
2599 map_sem_to_SO_stack_INIT();
2600 if (VG_(lookupFM)( map_sem_to_SO_stack,
2601 &keyW, (UWord*)&xa, (UWord)sem )) {
sewardjb4112022007-11-09 22:49:28 +00002602 /* xa is the stack for this semaphore. */
sewardjf98e1c02008-10-25 16:22:41 +00002603 Word sz;
2604 tl_assert(keyW == (UWord)sem);
2605 sz = VG_(sizeXA)( xa );
sewardjb4112022007-11-09 22:49:28 +00002606 tl_assert(sz >= 0);
2607 if (sz == 0)
2608 return NULL; /* odd, the stack is empty */
sewardjf98e1c02008-10-25 16:22:41 +00002609 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2610 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002611 VG_(dropTailXA)( xa, 1 );
sewardjf98e1c02008-10-25 16:22:41 +00002612 return so;
sewardjb4112022007-11-09 22:49:28 +00002613 } else {
2614 /* hmm, that's odd. No stack for this semaphore. */
2615 return NULL;
2616 }
2617}
2618
sewardj11e352f2007-11-30 11:11:02 +00002619static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002620{
sewardjf98e1c02008-10-25 16:22:41 +00002621 UWord keyW, valW;
2622 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002623
sewardjb4112022007-11-09 22:49:28 +00002624 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002625 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002626 (Int)tid, (void*)sem );
2627
sewardjf98e1c02008-10-25 16:22:41 +00002628 map_sem_to_SO_stack_INIT();
sewardjb4112022007-11-09 22:49:28 +00002629
sewardjf98e1c02008-10-25 16:22:41 +00002630 /* Empty out the semaphore's SO stack. This way of doing it is
2631 stupid, but at least it's easy. */
2632 while (1) {
2633 so = mb_pop_SO_for_sem( sem );
2634 if (!so) break;
2635 libhb_so_dealloc(so);
2636 }
2637
2638 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2639 XArray* xa = (XArray*)valW;
2640 tl_assert(keyW == (UWord)sem);
2641 tl_assert(xa);
2642 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2643 VG_(deleteXA)(xa);
2644 }
sewardjb4112022007-11-09 22:49:28 +00002645}
2646
sewardj11e352f2007-11-30 11:11:02 +00002647static
2648void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2649{
sewardjf98e1c02008-10-25 16:22:41 +00002650 SO* so;
2651 Thread* thr;
sewardj11e352f2007-11-30 11:11:02 +00002652
2653 if (SHOW_EVENTS >= 1)
2654 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2655 (Int)tid, (void*)sem, value );
2656
sewardjf98e1c02008-10-25 16:22:41 +00002657 thr = map_threads_maybe_lookup( tid );
2658 tl_assert(thr); /* cannot fail - Thread* must already exist */
sewardj11e352f2007-11-30 11:11:02 +00002659
sewardjf98e1c02008-10-25 16:22:41 +00002660 /* Empty out the semaphore's SO stack. This way of doing it is
2661 stupid, but at least it's easy. */
2662 while (1) {
2663 so = mb_pop_SO_for_sem( sem );
2664 if (!so) break;
2665 libhb_so_dealloc(so);
2666 }
sewardj11e352f2007-11-30 11:11:02 +00002667
sewardjf98e1c02008-10-25 16:22:41 +00002668 /* If we don't do this check, the following while loop runs us out
2669 of memory for stupid initial values of 'value'. */
2670 if (value > 10000) {
2671 HG_(record_error_Misc)(
2672 thr, "sem_init: initial value exceeds 10000; using 10000" );
2673 value = 10000;
2674 }
sewardj11e352f2007-11-30 11:11:02 +00002675
sewardjf98e1c02008-10-25 16:22:41 +00002676 /* Now create 'valid' new SOs for the thread, do a strong send to
2677 each of them, and push them all on the stack. */
2678 for (; value > 0; value--) {
2679 Thr* hbthr = thr->hbthr;
2680 tl_assert(hbthr);
sewardj11e352f2007-11-30 11:11:02 +00002681
sewardjf98e1c02008-10-25 16:22:41 +00002682 so = libhb_so_alloc();
2683 libhb_so_send( hbthr, so, True/*strong send*/ );
2684 push_SO_for_sem( sem, so );
sewardj11e352f2007-11-30 11:11:02 +00002685 }
2686}
2687
2688static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002689{
sewardjf98e1c02008-10-25 16:22:41 +00002690 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2691 it (iow, write our VC into it, then tick ours), and push the SO
2692 on on a stack of SOs associated with 'sem'. This is later used
2693 by other thread(s) which successfully exit from a sem_wait on
2694 the same sem; by doing a strong recv from SOs popped of the
2695 stack, they acquire dependencies on the posting thread
2696 segment(s). */
sewardjb4112022007-11-09 22:49:28 +00002697
sewardjf98e1c02008-10-25 16:22:41 +00002698 Thread* thr;
2699 SO* so;
2700 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002701
2702 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002703 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002704 (Int)tid, (void*)sem );
2705
2706 thr = map_threads_maybe_lookup( tid );
2707 tl_assert(thr); /* cannot fail - Thread* must already exist */
2708
2709 // error-if: sem is bogus
2710
sewardjf98e1c02008-10-25 16:22:41 +00002711 hbthr = thr->hbthr;
2712 tl_assert(hbthr);
sewardjb4112022007-11-09 22:49:28 +00002713
sewardjf98e1c02008-10-25 16:22:41 +00002714 so = libhb_so_alloc();
2715 libhb_so_send( hbthr, so, True/*strong send*/ );
2716 push_SO_for_sem( sem, so );
sewardjb4112022007-11-09 22:49:28 +00002717}
2718
sewardj11e352f2007-11-30 11:11:02 +00002719static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002720{
sewardjf98e1c02008-10-25 16:22:41 +00002721 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2722 the 'sem' from this semaphore's SO-stack, and do a strong recv
2723 from it. This creates a dependency back to one of the post-ers
2724 for the semaphore. */
sewardjb4112022007-11-09 22:49:28 +00002725
sewardjf98e1c02008-10-25 16:22:41 +00002726 Thread* thr;
2727 SO* so;
2728 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002729
2730 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002731 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002732 (Int)tid, (void*)sem );
2733
2734 thr = map_threads_maybe_lookup( tid );
2735 tl_assert(thr); /* cannot fail - Thread* must already exist */
2736
2737 // error-if: sem is bogus
2738
sewardjf98e1c02008-10-25 16:22:41 +00002739 so = mb_pop_SO_for_sem( sem );
sewardjb4112022007-11-09 22:49:28 +00002740
sewardjf98e1c02008-10-25 16:22:41 +00002741 if (so) {
2742 hbthr = thr->hbthr;
2743 tl_assert(hbthr);
2744
2745 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2746 libhb_so_dealloc(so);
2747 } else {
2748 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2749 If this happened it would surely be a bug in the threads
2750 library. */
2751 HG_(record_error_Misc)(
2752 thr, "Bug in libpthread: sem_wait succeeded on"
2753 " semaphore without prior sem_post");
sewardjb4112022007-11-09 22:49:28 +00002754 }
2755}
2756
2757
sewardj9f569b72008-11-13 13:33:09 +00002758/* -------------------------------------------------------- */
2759/* -------------- events to do with barriers -------------- */
2760/* -------------------------------------------------------- */
2761
2762typedef
2763 struct {
2764 Bool initted; /* has it yet been initted by guest? */
sewardj406bac82010-03-03 23:03:40 +00002765 Bool resizable; /* is resizing allowed? */
sewardj9f569b72008-11-13 13:33:09 +00002766 UWord size; /* declared size */
2767 XArray* waiting; /* XA of Thread*. # present is 0 .. .size */
2768 }
2769 Bar;
2770
2771static Bar* new_Bar ( void ) {
2772 Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
2773 tl_assert(bar);
2774 /* all fields are zero */
2775 tl_assert(bar->initted == False);
2776 return bar;
2777}
2778
2779static void delete_Bar ( Bar* bar ) {
2780 tl_assert(bar);
2781 if (bar->waiting)
2782 VG_(deleteXA)(bar->waiting);
2783 HG_(free)(bar);
2784}
2785
2786/* A mapping which stores auxiliary data for barriers. */
2787
2788/* pthread_barrier_t* -> Bar* */
2789static WordFM* map_barrier_to_Bar = NULL;
2790
2791static void map_barrier_to_Bar_INIT ( void ) {
2792 if (UNLIKELY(map_barrier_to_Bar == NULL)) {
2793 map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
2794 "hg.mbtBI.1", HG_(free), NULL );
2795 tl_assert(map_barrier_to_Bar != NULL);
2796 }
2797}
2798
2799static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
2800 UWord key, val;
2801 map_barrier_to_Bar_INIT();
2802 if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
2803 tl_assert(key == (UWord)barrier);
2804 return (Bar*)val;
2805 } else {
2806 Bar* bar = new_Bar();
2807 VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
2808 return bar;
2809 }
2810}
2811
2812static void map_barrier_to_Bar_delete ( void* barrier ) {
2813 UWord keyW, valW;
2814 map_barrier_to_Bar_INIT();
2815 if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
2816 Bar* bar = (Bar*)valW;
2817 tl_assert(keyW == (UWord)barrier);
2818 delete_Bar(bar);
2819 }
2820}
2821
2822
2823static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
2824 void* barrier,
sewardj406bac82010-03-03 23:03:40 +00002825 UWord count,
2826 UWord resizable )
sewardj9f569b72008-11-13 13:33:09 +00002827{
2828 Thread* thr;
2829 Bar* bar;
2830
2831 if (SHOW_EVENTS >= 1)
2832 VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
sewardj406bac82010-03-03 23:03:40 +00002833 "(tid=%d, barrier=%p, count=%lu, resizable=%lu)\n",
2834 (Int)tid, (void*)barrier, count, resizable );
sewardj9f569b72008-11-13 13:33:09 +00002835
2836 thr = map_threads_maybe_lookup( tid );
2837 tl_assert(thr); /* cannot fail - Thread* must already exist */
2838
2839 if (count == 0) {
2840 HG_(record_error_Misc)(
2841 thr, "pthread_barrier_init: 'count' argument is zero"
2842 );
2843 }
2844
sewardj406bac82010-03-03 23:03:40 +00002845 if (resizable != 0 && resizable != 1) {
2846 HG_(record_error_Misc)(
2847 thr, "pthread_barrier_init: invalid 'resizable' argument"
2848 );
2849 }
2850
sewardj9f569b72008-11-13 13:33:09 +00002851 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2852 tl_assert(bar);
2853
2854 if (bar->initted) {
2855 HG_(record_error_Misc)(
2856 thr, "pthread_barrier_init: barrier is already initialised"
2857 );
2858 }
2859
2860 if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2861 tl_assert(bar->initted);
2862 HG_(record_error_Misc)(
sewardj553655c2008-11-14 19:41:19 +00002863 thr, "pthread_barrier_init: threads are waiting at barrier"
sewardj9f569b72008-11-13 13:33:09 +00002864 );
2865 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2866 }
2867 if (!bar->waiting) {
2868 bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
2869 sizeof(Thread*) );
2870 }
2871
2872 tl_assert(bar->waiting);
2873 tl_assert(VG_(sizeXA)(bar->waiting) == 0);
sewardj406bac82010-03-03 23:03:40 +00002874 bar->initted = True;
2875 bar->resizable = resizable == 1 ? True : False;
2876 bar->size = count;
sewardj9f569b72008-11-13 13:33:09 +00002877}
2878
2879
2880static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
2881 void* barrier )
2882{
sewardj553655c2008-11-14 19:41:19 +00002883 Thread* thr;
2884 Bar* bar;
2885
sewardj9f569b72008-11-13 13:33:09 +00002886 /* Deal with destroy events. The only purpose is to free storage
2887 associated with the barrier, so as to avoid any possible
2888 resource leaks. */
2889 if (SHOW_EVENTS >= 1)
2890 VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
2891 "(tid=%d, barrier=%p)\n",
2892 (Int)tid, (void*)barrier );
2893
sewardj553655c2008-11-14 19:41:19 +00002894 thr = map_threads_maybe_lookup( tid );
2895 tl_assert(thr); /* cannot fail - Thread* must already exist */
2896
2897 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2898 tl_assert(bar);
2899
2900 if (!bar->initted) {
2901 HG_(record_error_Misc)(
2902 thr, "pthread_barrier_destroy: barrier was never initialised"
2903 );
2904 }
2905
2906 if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2907 HG_(record_error_Misc)(
2908 thr, "pthread_barrier_destroy: threads are waiting at barrier"
2909 );
2910 }
2911
sewardj9f569b72008-11-13 13:33:09 +00002912 /* Maybe we shouldn't do this; just let it persist, so that when it
2913 is reinitialised we don't need to do any dynamic memory
2914 allocation? The downside is a potentially unlimited space leak,
2915 if the client creates (in turn) a large number of barriers all
2916 at different locations. Note that if we do later move to the
2917 don't-delete-it scheme, we need to mark the barrier as
2918 uninitialised again since otherwise a later _init call will
sewardj553655c2008-11-14 19:41:19 +00002919 elicit a duplicate-init error. */
sewardj9f569b72008-11-13 13:33:09 +00002920 map_barrier_to_Bar_delete( barrier );
2921}
2922
2923
sewardj406bac82010-03-03 23:03:40 +00002924/* All the threads have arrived. Now do the Interesting Bit. Get a
2925 new synchronisation object and do a weak send to it from all the
2926 participating threads. This makes its vector clocks be the join of
2927 all the individual threads' vector clocks. Then do a strong
2928 receive from it back to all threads, so that their VCs are a copy
2929 of it (hence are all equal to the join of their original VCs.) */
2930static void do_barrier_cross_sync_and_empty ( Bar* bar )
2931{
2932 /* XXX check bar->waiting has no duplicates */
2933 UWord i;
2934 SO* so = libhb_so_alloc();
2935
2936 tl_assert(bar->waiting);
2937 tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
2938
2939 /* compute the join ... */
2940 for (i = 0; i < bar->size; i++) {
2941 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2942 Thr* hbthr = t->hbthr;
2943 libhb_so_send( hbthr, so, False/*weak send*/ );
2944 }
2945 /* ... and distribute to all threads */
2946 for (i = 0; i < bar->size; i++) {
2947 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2948 Thr* hbthr = t->hbthr;
2949 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2950 }
2951
2952 /* finally, we must empty out the waiting vector */
2953 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2954
2955 /* and we don't need this any more. Perhaps a stack-allocated
2956 SO would be better? */
2957 libhb_so_dealloc(so);
2958}
2959
2960
sewardj9f569b72008-11-13 13:33:09 +00002961static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
2962 void* barrier )
2963{
sewardj1c466b72008-11-19 11:52:14 +00002964 /* This function gets called after a client thread calls
2965 pthread_barrier_wait but before it arrives at the real
2966 pthread_barrier_wait.
2967
2968 Why is the following correct? It's a bit subtle.
2969
2970 If this is not the last thread arriving at the barrier, we simply
2971 note its presence and return. Because valgrind (at least as of
2972 Nov 08) is single threaded, we are guaranteed safe from any race
2973 conditions when in this function -- no other client threads are
2974 running.
2975
2976 If this is the last thread, then we are again the only running
2977 thread. All the other threads will have either arrived at the
2978 real pthread_barrier_wait or are on their way to it, but in any
2979 case are guaranteed not to be able to move past it, because this
2980 thread is currently in this function and so has not yet arrived
2981 at the real pthread_barrier_wait. That means that:
2982
2983 1. While we are in this function, none of the other threads
2984 waiting at the barrier can move past it.
2985
2986 2. When this function returns (and simulated execution resumes),
2987 this thread and all other waiting threads will be able to move
2988 past the real barrier.
2989
2990 Because of this, it is now safe to update the vector clocks of
2991 all threads, to represent the fact that they all arrived at the
2992 barrier and have all moved on. There is no danger of any
2993 complications to do with some threads leaving the barrier and
2994 racing back round to the front, whilst others are still leaving
2995 (which is the primary source of complication in correct handling/
2996 implementation of barriers). That can't happen because we update
2997 here our data structures so as to indicate that the threads have
2998 passed the barrier, even though, as per (2) above, they are
2999 guaranteed not to pass the barrier until we return.
3000
3001 This relies crucially on Valgrind being single threaded. If that
3002 changes, this will need to be reconsidered.
3003 */
sewardj9f569b72008-11-13 13:33:09 +00003004 Thread* thr;
3005 Bar* bar;
sewardj406bac82010-03-03 23:03:40 +00003006 UWord present;
sewardj9f569b72008-11-13 13:33:09 +00003007
3008 if (SHOW_EVENTS >= 1)
3009 VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
3010 "(tid=%d, barrier=%p)\n",
3011 (Int)tid, (void*)barrier );
3012
3013 thr = map_threads_maybe_lookup( tid );
3014 tl_assert(thr); /* cannot fail - Thread* must already exist */
3015
3016 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3017 tl_assert(bar);
3018
3019 if (!bar->initted) {
3020 HG_(record_error_Misc)(
3021 thr, "pthread_barrier_wait: barrier is uninitialised"
3022 );
3023 return; /* client is broken .. avoid assertions below */
3024 }
3025
3026 /* guaranteed by _INIT_PRE above */
3027 tl_assert(bar->size > 0);
3028 tl_assert(bar->waiting);
3029
3030 VG_(addToXA)( bar->waiting, &thr );
3031
3032 /* guaranteed by this function */
3033 present = VG_(sizeXA)(bar->waiting);
3034 tl_assert(present > 0 && present <= bar->size);
3035
3036 if (present < bar->size)
3037 return;
3038
sewardj406bac82010-03-03 23:03:40 +00003039 do_barrier_cross_sync_and_empty(bar);
3040}
sewardj9f569b72008-11-13 13:33:09 +00003041
sewardj9f569b72008-11-13 13:33:09 +00003042
sewardj406bac82010-03-03 23:03:40 +00003043static void evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( ThreadId tid,
3044 void* barrier,
3045 UWord newcount )
3046{
3047 Thread* thr;
3048 Bar* bar;
3049 UWord present;
3050
3051 if (SHOW_EVENTS >= 1)
3052 VG_(printf)("evh__HG_PTHREAD_BARRIER_RESIZE_PRE"
3053 "(tid=%d, barrier=%p, newcount=%lu)\n",
3054 (Int)tid, (void*)barrier, newcount );
3055
3056 thr = map_threads_maybe_lookup( tid );
3057 tl_assert(thr); /* cannot fail - Thread* must already exist */
3058
3059 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3060 tl_assert(bar);
3061
3062 if (!bar->initted) {
3063 HG_(record_error_Misc)(
3064 thr, "pthread_barrier_resize: barrier is uninitialised"
3065 );
3066 return; /* client is broken .. avoid assertions below */
3067 }
3068
3069 if (!bar->resizable) {
3070 HG_(record_error_Misc)(
3071 thr, "pthread_barrier_resize: barrier is may not be resized"
3072 );
3073 return; /* client is broken .. avoid assertions below */
3074 }
3075
3076 if (newcount == 0) {
3077 HG_(record_error_Misc)(
3078 thr, "pthread_barrier_resize: 'newcount' argument is zero"
3079 );
3080 return; /* client is broken .. avoid assertions below */
3081 }
3082
3083 /* guaranteed by _INIT_PRE above */
3084 tl_assert(bar->size > 0);
sewardj9f569b72008-11-13 13:33:09 +00003085 tl_assert(bar->waiting);
sewardj406bac82010-03-03 23:03:40 +00003086 /* Guaranteed by this fn */
3087 tl_assert(newcount > 0);
sewardj9f569b72008-11-13 13:33:09 +00003088
sewardj406bac82010-03-03 23:03:40 +00003089 if (newcount >= bar->size) {
3090 /* Increasing the capacity. There's no possibility of threads
3091 moving on from the barrier in this situation, so just note
3092 the fact and do nothing more. */
3093 bar->size = newcount;
3094 } else {
3095 /* Decreasing the capacity. If we decrease it to be equal or
3096 below the number of waiting threads, they will now move past
3097 the barrier, so need to mess with dep edges in the same way
3098 as if the barrier had filled up normally. */
3099 present = VG_(sizeXA)(bar->waiting);
3100 tl_assert(present >= 0 && present <= bar->size);
3101 if (newcount <= present) {
3102 bar->size = present; /* keep the cross_sync call happy */
3103 do_barrier_cross_sync_and_empty(bar);
3104 }
3105 bar->size = newcount;
sewardj9f569b72008-11-13 13:33:09 +00003106 }
sewardj9f569b72008-11-13 13:33:09 +00003107}
3108
3109
sewardjed2e72e2009-08-14 11:08:24 +00003110/* ----------------------------------------------------- */
3111/* ----- events to do with user-specified HB edges ----- */
3112/* ----------------------------------------------------- */
3113
3114/* A mapping from arbitrary UWord tag to the SO associated with it.
3115 The UWord tags are meaningless to us, interpreted only by the
3116 user. */
3117
3118
3119
3120/* UWord -> SO* */
3121static WordFM* map_usertag_to_SO = NULL;
3122
3123static void map_usertag_to_SO_INIT ( void ) {
3124 if (UNLIKELY(map_usertag_to_SO == NULL)) {
3125 map_usertag_to_SO = VG_(newFM)( HG_(zalloc),
3126 "hg.mutS.1", HG_(free), NULL );
3127 tl_assert(map_usertag_to_SO != NULL);
3128 }
3129}
3130
3131static SO* map_usertag_to_SO_lookup_or_alloc ( UWord usertag ) {
3132 UWord key, val;
3133 map_usertag_to_SO_INIT();
3134 if (VG_(lookupFM)( map_usertag_to_SO, &key, &val, usertag )) {
3135 tl_assert(key == (UWord)usertag);
3136 return (SO*)val;
3137 } else {
3138 SO* so = libhb_so_alloc();
3139 VG_(addToFM)( map_usertag_to_SO, usertag, (UWord)so );
3140 return so;
3141 }
3142}
3143
sewardj6015d0e2011-03-11 19:10:48 +00003144static void map_usertag_to_SO_delete ( UWord usertag ) {
3145 UWord keyW, valW;
3146 map_usertag_to_SO_INIT();
3147 if (VG_(delFromFM)( map_usertag_to_SO, &keyW, &valW, usertag )) {
3148 SO* so = (SO*)valW;
3149 tl_assert(keyW == usertag);
3150 tl_assert(so);
3151 libhb_so_dealloc(so);
3152 }
3153}
sewardjed2e72e2009-08-14 11:08:24 +00003154
3155
3156static
3157void evh__HG_USERSO_SEND_PRE ( ThreadId tid, UWord usertag )
3158{
3159 /* TID is just about to notionally sent a message on a notional
3160 abstract synchronisation object whose identity is given by
3161 USERTAG. Bind USERTAG to a real SO if it is not already so
sewardj8c50d3c2011-03-11 18:38:12 +00003162 bound, and do a 'weak send' on the SO. This joins the vector
3163 clocks from this thread into any vector clocks already present
3164 in the SO. The resulting SO vector clocks are later used by
sewardjed2e72e2009-08-14 11:08:24 +00003165 other thread(s) which successfully 'receive' from the SO,
sewardj8c50d3c2011-03-11 18:38:12 +00003166 thereby acquiring a dependency on all the events that have
3167 previously signalled on this SO. */
sewardjed2e72e2009-08-14 11:08:24 +00003168 Thread* thr;
3169 SO* so;
3170
3171 if (SHOW_EVENTS >= 1)
3172 VG_(printf)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3173 (Int)tid, usertag );
3174
3175 thr = map_threads_maybe_lookup( tid );
3176 tl_assert(thr); /* cannot fail - Thread* must already exist */
3177
3178 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3179 tl_assert(so);
3180
sewardj8c50d3c2011-03-11 18:38:12 +00003181 libhb_so_send( thr->hbthr, so, False/*!strong_send*/ );
sewardjed2e72e2009-08-14 11:08:24 +00003182}
3183
3184static
3185void evh__HG_USERSO_RECV_POST ( ThreadId tid, UWord usertag )
3186{
3187 /* TID has just notionally received a message from a notional
3188 abstract synchronisation object whose identity is given by
3189 USERTAG. Bind USERTAG to a real SO if it is not already so
3190 bound. If the SO has at some point in the past been 'sent' on,
3191 to a 'strong receive' on it, thereby acquiring a dependency on
3192 the sender. */
3193 Thread* thr;
3194 SO* so;
3195
3196 if (SHOW_EVENTS >= 1)
3197 VG_(printf)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3198 (Int)tid, usertag );
3199
3200 thr = map_threads_maybe_lookup( tid );
3201 tl_assert(thr); /* cannot fail - Thread* must already exist */
3202
3203 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3204 tl_assert(so);
3205
3206 /* Acquire a dependency on it. If the SO has never so far been
3207 sent on, then libhb_so_recv will do nothing. So we're safe
3208 regardless of SO's history. */
3209 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3210}
3211
sewardj6015d0e2011-03-11 19:10:48 +00003212static
3213void evh__HG_USERSO_FORGET_ALL ( ThreadId tid, UWord usertag )
3214{
3215 /* TID declares that any happens-before edges notionally stored in
3216 USERTAG can be deleted. If (as would normally be the case) a
3217 SO is associated with USERTAG, then the assocation is removed
3218 and all resources associated with SO are freed. Importantly,
3219 that frees up any VTSs stored in SO. */
3220 if (SHOW_EVENTS >= 1)
3221 VG_(printf)("evh__HG_USERSO_FORGET_ALL(ctid=%d, usertag=%#lx)\n",
3222 (Int)tid, usertag );
3223
3224 map_usertag_to_SO_delete( usertag );
3225}
3226
sewardjed2e72e2009-08-14 11:08:24 +00003227
sewardjb4112022007-11-09 22:49:28 +00003228/*--------------------------------------------------------------*/
3229/*--- Lock acquisition order monitoring ---*/
3230/*--------------------------------------------------------------*/
3231
3232/* FIXME: here are some optimisations still to do in
3233 laog__pre_thread_acquires_lock.
3234
3235 The graph is structured so that if L1 --*--> L2 then L1 must be
3236 acquired before L2.
3237
3238 The common case is that some thread T holds (eg) L1 L2 and L3 and
3239 is repeatedly acquiring and releasing Ln, and there is no ordering
3240 error in what it is doing. Hence it repeatly:
3241
3242 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3243 produces the answer No (because there is no error).
3244
3245 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3246 (because they already got added the first time T acquired Ln).
3247
3248 Hence cache these two events:
3249
3250 (1) Cache result of the query from last time. Invalidate the cache
3251 any time any edges are added to or deleted from laog.
3252
3253 (2) Cache these add-edge requests and ignore them if said edges
3254 have already been added to laog. Invalidate the cache any time
3255 any edges are deleted from laog.
3256*/
3257
3258typedef
3259 struct {
3260 WordSetID inns; /* in univ_laog */
3261 WordSetID outs; /* in univ_laog */
3262 }
3263 LAOGLinks;
3264
3265/* lock order acquisition graph */
3266static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
3267
3268/* EXPOSITION ONLY: for each edge in 'laog', record the two places
3269 where that edge was created, so that we can show the user later if
3270 we need to. */
3271typedef
3272 struct {
3273 Addr src_ga; /* Lock guest addresses for */
3274 Addr dst_ga; /* src/dst of the edge */
3275 ExeContext* src_ec; /* And corresponding places where that */
3276 ExeContext* dst_ec; /* ordering was established */
3277 }
3278 LAOGLinkExposition;
3279
sewardj250ec2e2008-02-15 22:02:30 +00003280static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
sewardjb4112022007-11-09 22:49:28 +00003281 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3282 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
3283 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
3284 if (llx1->src_ga < llx2->src_ga) return -1;
3285 if (llx1->src_ga > llx2->src_ga) return 1;
3286 if (llx1->dst_ga < llx2->dst_ga) return -1;
3287 if (llx1->dst_ga > llx2->dst_ga) return 1;
3288 return 0;
3289}
3290
3291static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
3292/* end EXPOSITION ONLY */
3293
3294
sewardja65db102009-01-26 10:45:16 +00003295__attribute__((noinline))
3296static void laog__init ( void )
3297{
3298 tl_assert(!laog);
3299 tl_assert(!laog_exposition);
sewardjc1fb9d22011-02-28 09:03:44 +00003300 tl_assert(HG_(clo_track_lockorders));
sewardja65db102009-01-26 10:45:16 +00003301
3302 laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
3303 HG_(free), NULL/*unboxedcmp*/ );
3304
3305 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
3306 cmp_LAOGLinkExposition );
3307 tl_assert(laog);
3308 tl_assert(laog_exposition);
3309}
3310
florian6bf37262012-10-21 03:23:36 +00003311static void laog__show ( const HChar* who ) {
3312 UWord i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003313 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003314 Lock* me;
3315 LAOGLinks* links;
3316 VG_(printf)("laog (requested by %s) {\n", who);
sewardj896f6f92008-08-19 08:38:52 +00003317 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003318 me = NULL;
3319 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003320 while (VG_(nextIterFM)( laog, (UWord*)&me,
3321 (UWord*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003322 tl_assert(me);
3323 tl_assert(links);
3324 VG_(printf)(" node %p:\n", me);
3325 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3326 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003327 VG_(printf)(" inn %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003328 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3329 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003330 VG_(printf)(" out %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003331 me = NULL;
3332 links = NULL;
3333 }
sewardj896f6f92008-08-19 08:38:52 +00003334 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003335 VG_(printf)("}\n");
3336}
3337
sewardj866c80c2011-10-22 19:29:51 +00003338static void univ_laog_do_GC ( void ) {
3339 Word i;
3340 LAOGLinks* links;
3341 Word seen = 0;
3342 Int prev_next_gc_univ_laog = next_gc_univ_laog;
3343 const UWord univ_laog_cardinality = HG_(cardinalityWSU)( univ_laog);
3344
3345 Bool *univ_laog_seen = HG_(zalloc) ( "hg.gc_univ_laog.1",
3346 (Int) univ_laog_cardinality
3347 * sizeof(Bool) );
3348 // univ_laog_seen[*] set to 0 (False) by zalloc.
3349
3350 if (VG_(clo_stats))
3351 VG_(message)(Vg_DebugMsg,
3352 "univ_laog_do_GC enter cardinality %'10d\n",
3353 (Int)univ_laog_cardinality);
3354
3355 VG_(initIterFM)( laog );
3356 links = NULL;
3357 while (VG_(nextIterFM)( laog, NULL, (UWord*)&links )) {
3358 tl_assert(links);
3359 tl_assert(links->inns >= 0 && links->inns < univ_laog_cardinality);
3360 univ_laog_seen[links->inns] = True;
3361 tl_assert(links->outs >= 0 && links->outs < univ_laog_cardinality);
3362 univ_laog_seen[links->outs] = True;
3363 links = NULL;
3364 }
3365 VG_(doneIterFM)( laog );
3366
3367 for (i = 0; i < (Int)univ_laog_cardinality; i++) {
3368 if (univ_laog_seen[i])
3369 seen++;
3370 else
3371 HG_(dieWS) ( univ_laog, (WordSet)i );
3372 }
3373
3374 HG_(free) (univ_laog_seen);
3375
3376 // We need to decide the value of the next_gc.
3377 // 3 solutions were looked at:
3378 // Sol 1: garbage collect at seen * 2
3379 // This solution was a lot slower, probably because we both do a lot of
3380 // garbage collection and do not keep long enough laog WV that will become
3381 // useful again very soon.
3382 // Sol 2: garbage collect at a percentage increase of the current cardinality
3383 // (with a min increase of 1)
3384 // Trials on a small test program with 1%, 5% and 10% increase was done.
3385 // 1% is slightly faster than 5%, which is slightly slower than 10%.
3386 // However, on a big application, this caused the memory to be exhausted,
3387 // as even a 1% increase of size at each gc becomes a lot, when many gc
3388 // are done.
3389 // Sol 3: always garbage collect at current cardinality + 1.
3390 // This solution was the fastest of the 3 solutions, and caused no memory
3391 // exhaustion in the big application.
3392 //
3393 // With regards to cost introduced by gc: on the t2t perf test (doing only
3394 // lock/unlock operations), t2t 50 10 2 was about 25% faster than the
3395 // version with garbage collection. With t2t 50 20 2, my machine started
3396 // to page out, and so the garbage collected version was much faster.
3397 // On smaller lock sets (e.g. t2t 20 5 2, giving about 100 locks), the
3398 // difference performance is insignificant (~ 0.1 s).
3399 // Of course, it might be that real life programs are not well represented
3400 // by t2t.
3401
3402 // If ever we want to have a more sophisticated control
3403 // (e.g. clo options to control the percentage increase or fixed increased),
3404 // we should do it here, eg.
3405 // next_gc_univ_laog = prev_next_gc_univ_laog + VG_(clo_laog_gc_fixed);
3406 // Currently, we just hard-code the solution 3 above.
3407 next_gc_univ_laog = prev_next_gc_univ_laog + 1;
3408
3409 if (VG_(clo_stats))
3410 VG_(message)
3411 (Vg_DebugMsg,
3412 "univ_laog_do_GC exit seen %'8d next gc at cardinality %'10d\n",
3413 (Int)seen, next_gc_univ_laog);
3414}
3415
3416
sewardjb4112022007-11-09 22:49:28 +00003417__attribute__((noinline))
3418static void laog__add_edge ( Lock* src, Lock* dst ) {
florian6bf37262012-10-21 03:23:36 +00003419 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003420 LAOGLinks* links;
3421 Bool presentF, presentR;
3422 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
3423
3424 /* Take the opportunity to sanity check the graph. Record in
3425 presentF if there is already a src->dst mapping in this node's
3426 forwards links, and presentR if there is already a src->dst
3427 mapping in this node's backwards links. They should agree!
3428 Also, we need to know whether the edge was already present so as
3429 to decide whether or not to update the link details mapping. We
3430 can compute presentF and presentR essentially for free, so may
3431 as well do this always. */
3432 presentF = presentR = False;
3433
3434 /* Update the out edges for src */
3435 keyW = 0;
3436 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003437 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)src )) {
sewardjb4112022007-11-09 22:49:28 +00003438 WordSetID outs_new;
3439 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003440 tl_assert(keyW == (UWord)src);
3441 outs_new = HG_(addToWS)( univ_laog, links->outs, (UWord)dst );
sewardjb4112022007-11-09 22:49:28 +00003442 presentF = outs_new == links->outs;
3443 links->outs = outs_new;
3444 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003445 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003446 links->inns = HG_(emptyWS)( univ_laog );
florian6bf37262012-10-21 03:23:36 +00003447 links->outs = HG_(singletonWS)( univ_laog, (UWord)dst );
3448 VG_(addToFM)( laog, (UWord)src, (UWord)links );
sewardjb4112022007-11-09 22:49:28 +00003449 }
3450 /* Update the in edges for dst */
3451 keyW = 0;
3452 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003453 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003454 WordSetID inns_new;
3455 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003456 tl_assert(keyW == (UWord)dst);
3457 inns_new = HG_(addToWS)( univ_laog, links->inns, (UWord)src );
sewardjb4112022007-11-09 22:49:28 +00003458 presentR = inns_new == links->inns;
3459 links->inns = inns_new;
3460 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003461 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
florian6bf37262012-10-21 03:23:36 +00003462 links->inns = HG_(singletonWS)( univ_laog, (UWord)src );
sewardjb4112022007-11-09 22:49:28 +00003463 links->outs = HG_(emptyWS)( univ_laog );
florian6bf37262012-10-21 03:23:36 +00003464 VG_(addToFM)( laog, (UWord)dst, (UWord)links );
sewardjb4112022007-11-09 22:49:28 +00003465 }
3466
3467 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
3468
3469 if (!presentF && src->acquired_at && dst->acquired_at) {
3470 LAOGLinkExposition expo;
3471 /* If this edge is entering the graph, and we have acquired_at
3472 information for both src and dst, record those acquisition
3473 points. Hence, if there is later a violation of this
3474 ordering, we can show the user the two places in which the
3475 required src-dst ordering was previously established. */
barta0b6b2c2008-07-07 06:49:24 +00003476 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
sewardjb4112022007-11-09 22:49:28 +00003477 src->guestaddr, dst->guestaddr);
3478 expo.src_ga = src->guestaddr;
3479 expo.dst_ga = dst->guestaddr;
3480 expo.src_ec = NULL;
3481 expo.dst_ec = NULL;
3482 tl_assert(laog_exposition);
florian6bf37262012-10-21 03:23:36 +00003483 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (UWord)&expo )) {
sewardjb4112022007-11-09 22:49:28 +00003484 /* we already have it; do nothing */
3485 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003486 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
3487 sizeof(LAOGLinkExposition));
sewardjb4112022007-11-09 22:49:28 +00003488 expo2->src_ga = src->guestaddr;
3489 expo2->dst_ga = dst->guestaddr;
3490 expo2->src_ec = src->acquired_at;
3491 expo2->dst_ec = dst->acquired_at;
florian6bf37262012-10-21 03:23:36 +00003492 VG_(addToFM)( laog_exposition, (UWord)expo2, (UWord)NULL );
sewardjb4112022007-11-09 22:49:28 +00003493 }
3494 }
sewardj866c80c2011-10-22 19:29:51 +00003495
3496 if (HG_(cardinalityWSU) (univ_laog) >= next_gc_univ_laog)
3497 univ_laog_do_GC();
sewardjb4112022007-11-09 22:49:28 +00003498}
3499
3500__attribute__((noinline))
3501static void laog__del_edge ( Lock* src, Lock* dst ) {
florian6bf37262012-10-21 03:23:36 +00003502 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003503 LAOGLinks* links;
sewardj866c80c2011-10-22 19:29:51 +00003504 if (0) VG_(printf)("laog__del_edge enter %p %p\n", src, dst);
sewardjb4112022007-11-09 22:49:28 +00003505 /* Update the out edges for src */
3506 keyW = 0;
3507 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003508 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)src )) {
sewardjb4112022007-11-09 22:49:28 +00003509 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003510 tl_assert(keyW == (UWord)src);
3511 links->outs = HG_(delFromWS)( univ_laog, links->outs, (UWord)dst );
sewardjb4112022007-11-09 22:49:28 +00003512 }
3513 /* Update the in edges for dst */
3514 keyW = 0;
3515 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003516 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003517 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003518 tl_assert(keyW == (UWord)dst);
3519 links->inns = HG_(delFromWS)( univ_laog, links->inns, (UWord)src );
sewardjb4112022007-11-09 22:49:28 +00003520 }
sewardj866c80c2011-10-22 19:29:51 +00003521
3522 /* Remove the exposition of src,dst (if present) */
3523 {
3524 LAOGLinkExposition *fm_expo;
3525
3526 LAOGLinkExposition expo;
3527 expo.src_ga = src->guestaddr;
3528 expo.dst_ga = dst->guestaddr;
3529 expo.src_ec = NULL;
3530 expo.dst_ec = NULL;
3531
3532 if (VG_(delFromFM) (laog_exposition,
3533 (UWord*)&fm_expo, NULL, (UWord)&expo )) {
3534 HG_(free) (fm_expo);
3535 }
3536 }
3537
3538 /* deleting edges can increase nr of of WS so check for gc. */
3539 if (HG_(cardinalityWSU) (univ_laog) >= next_gc_univ_laog)
3540 univ_laog_do_GC();
3541 if (0) VG_(printf)("laog__del_edge exit\n");
sewardjb4112022007-11-09 22:49:28 +00003542}
3543
3544__attribute__((noinline))
3545static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
florian6bf37262012-10-21 03:23:36 +00003546 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003547 LAOGLinks* links;
3548 keyW = 0;
3549 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003550 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003551 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003552 tl_assert(keyW == (UWord)lk);
sewardjb4112022007-11-09 22:49:28 +00003553 return links->outs;
3554 } else {
3555 return HG_(emptyWS)( univ_laog );
3556 }
3557}
3558
3559__attribute__((noinline))
3560static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
florian6bf37262012-10-21 03:23:36 +00003561 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003562 LAOGLinks* links;
3563 keyW = 0;
3564 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003565 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003566 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003567 tl_assert(keyW == (UWord)lk);
sewardjb4112022007-11-09 22:49:28 +00003568 return links->inns;
3569 } else {
3570 return HG_(emptyWS)( univ_laog );
3571 }
3572}
3573
3574__attribute__((noinline))
florian6bf37262012-10-21 03:23:36 +00003575static void laog__sanity_check ( const HChar* who ) {
3576 UWord i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003577 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003578 Lock* me;
3579 LAOGLinks* links;
sewardj896f6f92008-08-19 08:38:52 +00003580 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003581 me = NULL;
3582 links = NULL;
3583 if (0) VG_(printf)("laog sanity check\n");
florian6bf37262012-10-21 03:23:36 +00003584 while (VG_(nextIterFM)( laog, (UWord*)&me,
3585 (UWord*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003586 tl_assert(me);
3587 tl_assert(links);
3588 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3589 for (i = 0; i < ws_size; i++) {
3590 if ( ! HG_(elemWS)( univ_laog,
3591 laog__succs( (Lock*)ws_words[i] ),
florian6bf37262012-10-21 03:23:36 +00003592 (UWord)me ))
sewardjb4112022007-11-09 22:49:28 +00003593 goto bad;
3594 }
3595 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3596 for (i = 0; i < ws_size; i++) {
3597 if ( ! HG_(elemWS)( univ_laog,
3598 laog__preds( (Lock*)ws_words[i] ),
florian6bf37262012-10-21 03:23:36 +00003599 (UWord)me ))
sewardjb4112022007-11-09 22:49:28 +00003600 goto bad;
3601 }
3602 me = NULL;
3603 links = NULL;
3604 }
sewardj896f6f92008-08-19 08:38:52 +00003605 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003606 return;
3607
3608 bad:
3609 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3610 laog__show(who);
3611 tl_assert(0);
3612}
3613
3614/* If there is a path in laog from 'src' to any of the elements in
3615 'dst', return an arbitrarily chosen element of 'dst' reachable from
3616 'src'. If no path exist from 'src' to any element in 'dst', return
3617 NULL. */
3618__attribute__((noinline))
3619static
3620Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3621{
3622 Lock* ret;
florian6bf37262012-10-21 03:23:36 +00003623 Word ssz;
sewardjb4112022007-11-09 22:49:28 +00003624 XArray* stack; /* of Lock* */
3625 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
3626 Lock* here;
3627 WordSetID succs;
florian6bf37262012-10-21 03:23:36 +00003628 UWord succs_size, i;
sewardj250ec2e2008-02-15 22:02:30 +00003629 UWord* succs_words;
sewardjb4112022007-11-09 22:49:28 +00003630 //laog__sanity_check();
3631
3632 /* If the destination set is empty, we can never get there from
3633 'src' :-), so don't bother to try */
3634 if (HG_(isEmptyWS)( univ_lsets, dsts ))
3635 return NULL;
3636
3637 ret = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00003638 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3639 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00003640
3641 (void) VG_(addToXA)( stack, &src );
3642
3643 while (True) {
3644
3645 ssz = VG_(sizeXA)( stack );
3646
3647 if (ssz == 0) { ret = NULL; break; }
3648
3649 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3650 VG_(dropTailXA)( stack, 1 );
3651
florian6bf37262012-10-21 03:23:36 +00003652 if (HG_(elemWS)( univ_lsets, dsts, (UWord)here )) { ret = here; break; }
sewardjb4112022007-11-09 22:49:28 +00003653
florian6bf37262012-10-21 03:23:36 +00003654 if (VG_(lookupFM)( visited, NULL, NULL, (UWord)here ))
sewardjb4112022007-11-09 22:49:28 +00003655 continue;
3656
florian6bf37262012-10-21 03:23:36 +00003657 VG_(addToFM)( visited, (UWord)here, 0 );
sewardjb4112022007-11-09 22:49:28 +00003658
3659 succs = laog__succs( here );
3660 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3661 for (i = 0; i < succs_size; i++)
3662 (void) VG_(addToXA)( stack, &succs_words[i] );
3663 }
3664
sewardj896f6f92008-08-19 08:38:52 +00003665 VG_(deleteFM)( visited, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003666 VG_(deleteXA)( stack );
3667 return ret;
3668}
3669
3670
3671/* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3672 between 'lk' and the locks already held by 'thr' and issue a
3673 complaint if so. Also, update the ordering graph appropriately.
3674*/
3675__attribute__((noinline))
3676static void laog__pre_thread_acquires_lock (
3677 Thread* thr, /* NB: BEFORE lock is added */
3678 Lock* lk
3679 )
3680{
sewardj250ec2e2008-02-15 22:02:30 +00003681 UWord* ls_words;
florian6bf37262012-10-21 03:23:36 +00003682 UWord ls_size, i;
sewardjb4112022007-11-09 22:49:28 +00003683 Lock* other;
3684
3685 /* It may be that 'thr' already holds 'lk' and is recursively
3686 relocking in. In this case we just ignore the call. */
3687 /* NB: univ_lsets really is correct here */
florian6bf37262012-10-21 03:23:36 +00003688 if (HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lk ))
sewardjb4112022007-11-09 22:49:28 +00003689 return;
3690
sewardjb4112022007-11-09 22:49:28 +00003691 /* First, the check. Complain if there is any path in laog from lk
3692 to any of the locks already held by thr, since if any such path
3693 existed, it would mean that previously lk was acquired before
3694 (rather than after, as we are doing here) at least one of those
3695 locks.
3696 */
3697 other = laog__do_dfs_from_to(lk, thr->locksetA);
3698 if (other) {
3699 LAOGLinkExposition key, *found;
3700 /* So we managed to find a path lk --*--> other in the graph,
3701 which implies that 'lk' should have been acquired before
3702 'other' but is in fact being acquired afterwards. We present
3703 the lk/other arguments to record_error_LockOrder in the order
3704 in which they should have been acquired. */
3705 /* Go look in the laog_exposition mapping, to find the allocation
3706 points for this edge, so we can show the user. */
3707 key.src_ga = lk->guestaddr;
3708 key.dst_ga = other->guestaddr;
3709 key.src_ec = NULL;
3710 key.dst_ec = NULL;
3711 found = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003712 if (VG_(lookupFM)( laog_exposition,
florian6bf37262012-10-21 03:23:36 +00003713 (UWord*)&found, NULL, (UWord)&key )) {
sewardjb4112022007-11-09 22:49:28 +00003714 tl_assert(found != &key);
3715 tl_assert(found->src_ga == key.src_ga);
3716 tl_assert(found->dst_ga == key.dst_ga);
3717 tl_assert(found->src_ec);
3718 tl_assert(found->dst_ec);
sewardjf98e1c02008-10-25 16:22:41 +00003719 HG_(record_error_LockOrder)(
3720 thr, lk->guestaddr, other->guestaddr,
sewardjffce8152011-06-24 10:09:41 +00003721 found->src_ec, found->dst_ec, other->acquired_at );
sewardjb4112022007-11-09 22:49:28 +00003722 } else {
3723 /* Hmm. This can't happen (can it?) */
sewardjf98e1c02008-10-25 16:22:41 +00003724 HG_(record_error_LockOrder)(
3725 thr, lk->guestaddr, other->guestaddr,
sewardjffce8152011-06-24 10:09:41 +00003726 NULL, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003727 }
3728 }
3729
3730 /* Second, add to laog the pairs
3731 (old, lk) | old <- locks already held by thr
3732 Since both old and lk are currently held by thr, their acquired_at
3733 fields must be non-NULL.
3734 */
3735 tl_assert(lk->acquired_at);
3736 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3737 for (i = 0; i < ls_size; i++) {
3738 Lock* old = (Lock*)ls_words[i];
3739 tl_assert(old->acquired_at);
3740 laog__add_edge( old, lk );
3741 }
3742
3743 /* Why "except_Locks" ? We're here because a lock is being
3744 acquired by a thread, and we're in an inconsistent state here.
3745 See the call points in evhH__post_thread_{r,w}_acquires_lock.
3746 When called in this inconsistent state, locks__sanity_check duly
3747 barfs. */
sewardjf98e1c02008-10-25 16:22:41 +00003748 if (HG_(clo_sanity_flags) & SCE_LAOG)
sewardjb4112022007-11-09 22:49:28 +00003749 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
3750}
3751
sewardj866c80c2011-10-22 19:29:51 +00003752/* Allocates a duplicate of words. Caller must HG_(free) the result. */
3753static UWord* UWordV_dup(UWord* words, Word words_size)
3754{
3755 UInt i;
3756
3757 if (words_size == 0)
3758 return NULL;
3759
3760 UWord *dup = HG_(zalloc) ("hg.dup.1", (SizeT) words_size * sizeof(UWord));
3761
3762 for (i = 0; i < words_size; i++)
3763 dup[i] = words[i];
3764
3765 return dup;
3766}
sewardjb4112022007-11-09 22:49:28 +00003767
3768/* Delete from 'laog' any pair mentioning a lock in locksToDelete */
3769
3770__attribute__((noinline))
3771static void laog__handle_one_lock_deletion ( Lock* lk )
3772{
3773 WordSetID preds, succs;
florian6bf37262012-10-21 03:23:36 +00003774 UWord preds_size, succs_size, i, j;
sewardj250ec2e2008-02-15 22:02:30 +00003775 UWord *preds_words, *succs_words;
sewardjb4112022007-11-09 22:49:28 +00003776
3777 preds = laog__preds( lk );
3778 succs = laog__succs( lk );
3779
sewardj866c80c2011-10-22 19:29:51 +00003780 // We need to duplicate the payload, as these can be garbage collected
3781 // during the del/add operations below.
sewardjb4112022007-11-09 22:49:28 +00003782 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
sewardj866c80c2011-10-22 19:29:51 +00003783 preds_words = UWordV_dup(preds_words, preds_size);
3784
3785 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3786 succs_words = UWordV_dup(succs_words, succs_size);
3787
sewardjb4112022007-11-09 22:49:28 +00003788 for (i = 0; i < preds_size; i++)
3789 laog__del_edge( (Lock*)preds_words[i], lk );
3790
sewardjb4112022007-11-09 22:49:28 +00003791 for (j = 0; j < succs_size; j++)
3792 laog__del_edge( lk, (Lock*)succs_words[j] );
3793
3794 for (i = 0; i < preds_size; i++) {
3795 for (j = 0; j < succs_size; j++) {
3796 if (preds_words[i] != succs_words[j]) {
3797 /* This can pass unlocked locks to laog__add_edge, since
3798 we're deleting stuff. So their acquired_at fields may
3799 be NULL. */
3800 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
3801 }
3802 }
3803 }
sewardj866c80c2011-10-22 19:29:51 +00003804
3805 if (preds_words)
3806 HG_(free) (preds_words);
3807 if (succs_words)
3808 HG_(free) (succs_words);
3809
3810 // Remove lk information from laog links FM
3811 {
3812 LAOGLinks *links;
3813 Lock* linked_lk;
3814
3815 if (VG_(delFromFM) (laog,
3816 (UWord*)&linked_lk, (UWord*)&links, (UWord)lk)) {
3817 tl_assert (linked_lk == lk);
3818 HG_(free) (links);
3819 }
3820 }
3821 /* FIXME ??? What about removing lock lk data from EXPOSITION ??? */
sewardjb4112022007-11-09 22:49:28 +00003822}
3823
sewardj1cbc12f2008-11-10 16:16:46 +00003824//__attribute__((noinline))
3825//static void laog__handle_lock_deletions (
3826// WordSetID /* in univ_laog */ locksToDelete
3827// )
3828//{
3829// Word i, ws_size;
3830// UWord* ws_words;
3831//
sewardj1cbc12f2008-11-10 16:16:46 +00003832//
3833// HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
sewardj866c80c2011-10-22 19:29:51 +00003834// UWordV_dup call needed here ...
sewardj1cbc12f2008-11-10 16:16:46 +00003835// for (i = 0; i < ws_size; i++)
3836// laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
3837//
3838// if (HG_(clo_sanity_flags) & SCE_LAOG)
3839// all__sanity_check("laog__handle_lock_deletions-post");
3840//}
sewardjb4112022007-11-09 22:49:28 +00003841
3842
3843/*--------------------------------------------------------------*/
3844/*--- Malloc/free replacements ---*/
3845/*--------------------------------------------------------------*/
3846
3847typedef
3848 struct {
3849 void* next; /* required by m_hashtable */
3850 Addr payload; /* ptr to actual block */
3851 SizeT szB; /* size requested */
3852 ExeContext* where; /* where it was allocated */
3853 Thread* thr; /* allocating thread */
3854 }
3855 MallocMeta;
3856
3857/* A hash table of MallocMetas, used to track malloc'd blocks
3858 (obviously). */
3859static VgHashTable hg_mallocmeta_table = NULL;
3860
3861
3862static MallocMeta* new_MallocMeta ( void ) {
sewardjf98e1c02008-10-25 16:22:41 +00003863 MallocMeta* md = HG_(zalloc)( "hg.new_MallocMeta.1", sizeof(MallocMeta) );
sewardjb4112022007-11-09 22:49:28 +00003864 tl_assert(md);
3865 return md;
3866}
3867static void delete_MallocMeta ( MallocMeta* md ) {
sewardjf98e1c02008-10-25 16:22:41 +00003868 HG_(free)(md);
sewardjb4112022007-11-09 22:49:28 +00003869}
3870
3871
3872/* Allocate a client block and set up the metadata for it. */
3873
3874static
3875void* handle_alloc ( ThreadId tid,
3876 SizeT szB, SizeT alignB, Bool is_zeroed )
3877{
3878 Addr p;
3879 MallocMeta* md;
3880
3881 tl_assert( ((SSizeT)szB) >= 0 );
3882 p = (Addr)VG_(cli_malloc)(alignB, szB);
3883 if (!p) {
3884 return NULL;
3885 }
3886 if (is_zeroed)
3887 VG_(memset)((void*)p, 0, szB);
3888
3889 /* Note that map_threads_lookup must succeed (cannot assert), since
3890 memory can only be allocated by currently alive threads, hence
3891 they must have an entry in map_threads. */
3892 md = new_MallocMeta();
3893 md->payload = p;
3894 md->szB = szB;
3895 md->where = VG_(record_ExeContext)( tid, 0 );
3896 md->thr = map_threads_lookup( tid );
3897
3898 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
3899
3900 /* Tell the lower level memory wranglers. */
3901 evh__new_mem_heap( p, szB, is_zeroed );
3902
3903 return (void*)p;
3904}
3905
3906/* Re the checks for less-than-zero (also in hg_cli__realloc below):
3907 Cast to a signed type to catch any unexpectedly negative args.
3908 We're assuming here that the size asked for is not greater than
3909 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
3910 platforms). */
3911static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
3912 if (((SSizeT)n) < 0) return NULL;
3913 return handle_alloc ( tid, n, VG_(clo_alignment),
3914 /*is_zeroed*/False );
3915}
3916static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
3917 if (((SSizeT)n) < 0) return NULL;
3918 return handle_alloc ( tid, n, VG_(clo_alignment),
3919 /*is_zeroed*/False );
3920}
3921static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
3922 if (((SSizeT)n) < 0) return NULL;
3923 return handle_alloc ( tid, n, VG_(clo_alignment),
3924 /*is_zeroed*/False );
3925}
3926static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
3927 if (((SSizeT)n) < 0) return NULL;
3928 return handle_alloc ( tid, n, align,
3929 /*is_zeroed*/False );
3930}
3931static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
3932 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
3933 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
3934 /*is_zeroed*/True );
3935}
3936
3937
3938/* Free a client block, including getting rid of the relevant
3939 metadata. */
3940
3941static void handle_free ( ThreadId tid, void* p )
3942{
3943 MallocMeta *md, *old_md;
3944 SizeT szB;
3945
3946 /* First see if we can find the metadata for 'p'. */
3947 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3948 if (!md)
3949 return; /* apparently freeing a bogus address. Oh well. */
3950
3951 tl_assert(md->payload == (Addr)p);
3952 szB = md->szB;
3953
3954 /* Nuke the metadata block */
3955 old_md = (MallocMeta*)
3956 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
3957 tl_assert(old_md); /* it must be present - we just found it */
3958 tl_assert(old_md == md);
3959 tl_assert(old_md->payload == (Addr)p);
3960
3961 VG_(cli_free)((void*)old_md->payload);
3962 delete_MallocMeta(old_md);
3963
3964 /* Tell the lower level memory wranglers. */
3965 evh__die_mem_heap( (Addr)p, szB );
3966}
3967
3968static void hg_cli__free ( ThreadId tid, void* p ) {
3969 handle_free(tid, p);
3970}
3971static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
3972 handle_free(tid, p);
3973}
3974static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
3975 handle_free(tid, p);
3976}
3977
3978
3979static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
3980{
3981 MallocMeta *md, *md_new, *md_tmp;
3982 SizeT i;
3983
3984 Addr payload = (Addr)payloadV;
3985
3986 if (((SSizeT)new_size) < 0) return NULL;
3987
3988 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
3989 if (!md)
3990 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
3991
3992 tl_assert(md->payload == payload);
3993
3994 if (md->szB == new_size) {
3995 /* size unchanged */
3996 md->where = VG_(record_ExeContext)(tid, 0);
3997 return payloadV;
3998 }
3999
4000 if (md->szB > new_size) {
4001 /* new size is smaller */
4002 md->szB = new_size;
4003 md->where = VG_(record_ExeContext)(tid, 0);
4004 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
4005 return payloadV;
4006 }
4007
4008 /* else */ {
4009 /* new size is bigger */
4010 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
4011
4012 /* First half kept and copied, second half new */
4013 // FIXME: shouldn't we use a copier which implements the
4014 // memory state machine?
sewardj23f12002009-07-24 08:45:08 +00004015 evh__copy_mem( payload, p_new, md->szB );
sewardjb4112022007-11-09 22:49:28 +00004016 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
sewardjf98e1c02008-10-25 16:22:41 +00004017 /*inited*/False );
sewardjb4112022007-11-09 22:49:28 +00004018 /* FIXME: can anything funny happen here? specifically, if the
4019 old range contained a lock, then die_mem_heap will complain.
4020 Is that the correct behaviour? Not sure. */
4021 evh__die_mem_heap( payload, md->szB );
4022
4023 /* Copy from old to new */
4024 for (i = 0; i < md->szB; i++)
4025 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
4026
4027 /* Because the metadata hash table is index by payload address,
4028 we have to get rid of the old hash table entry and make a new
4029 one. We can't just modify the existing metadata in place,
4030 because then it would (almost certainly) be in the wrong hash
4031 chain. */
4032 md_new = new_MallocMeta();
4033 *md_new = *md;
4034
4035 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
4036 tl_assert(md_tmp);
4037 tl_assert(md_tmp == md);
4038
4039 VG_(cli_free)((void*)md->payload);
4040 delete_MallocMeta(md);
4041
4042 /* Update fields */
4043 md_new->where = VG_(record_ExeContext)( tid, 0 );
4044 md_new->szB = new_size;
4045 md_new->payload = p_new;
4046 md_new->thr = map_threads_lookup( tid );
4047
4048 /* and add */
4049 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
4050
4051 return (void*)p_new;
4052 }
4053}
4054
njn8b140de2009-02-17 04:31:18 +00004055static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
4056{
4057 MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
4058
4059 // There may be slop, but pretend there isn't because only the asked-for
4060 // area will have been shadowed properly.
4061 return ( md ? md->szB : 0 );
4062}
4063
sewardjb4112022007-11-09 22:49:28 +00004064
sewardj095d61e2010-03-11 13:43:18 +00004065/* For error creation: map 'data_addr' to a malloc'd chunk, if any.
sewardjc8028ad2010-05-05 09:34:42 +00004066 Slow linear search. With a bit of hash table help if 'data_addr'
4067 is either the start of a block or up to 15 word-sized steps along
4068 from the start of a block. */
sewardj095d61e2010-03-11 13:43:18 +00004069
4070static inline Bool addr_is_in_MM_Chunk( MallocMeta* mm, Addr a )
4071{
sewardjc8028ad2010-05-05 09:34:42 +00004072 /* Accept 'a' as within 'mm' if 'mm's size is zero and 'a' points
4073 right at it. */
4074 if (UNLIKELY(mm->szB == 0 && a == mm->payload))
4075 return True;
4076 /* else normal interval rules apply */
4077 if (LIKELY(a < mm->payload)) return False;
4078 if (LIKELY(a >= mm->payload + mm->szB)) return False;
4079 return True;
sewardj095d61e2010-03-11 13:43:18 +00004080}
4081
sewardjc8028ad2010-05-05 09:34:42 +00004082Bool HG_(mm_find_containing_block)( /*OUT*/ExeContext** where,
sewardj095d61e2010-03-11 13:43:18 +00004083 /*OUT*/Addr* payload,
4084 /*OUT*/SizeT* szB,
4085 Addr data_addr )
4086{
4087 MallocMeta* mm;
sewardjc8028ad2010-05-05 09:34:42 +00004088 Int i;
4089 const Int n_fast_check_words = 16;
4090
4091 /* First, do a few fast searches on the basis that data_addr might
4092 be exactly the start of a block or up to 15 words inside. This
4093 can happen commonly via the creq
4094 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK. */
4095 for (i = 0; i < n_fast_check_words; i++) {
4096 mm = VG_(HT_lookup)( hg_mallocmeta_table,
4097 data_addr - (UWord)(UInt)i * sizeof(UWord) );
4098 if (UNLIKELY(mm && addr_is_in_MM_Chunk(mm, data_addr)))
4099 goto found;
4100 }
4101
sewardj095d61e2010-03-11 13:43:18 +00004102 /* Well, this totally sucks. But without using an interval tree or
sewardjc8028ad2010-05-05 09:34:42 +00004103 some such, it's hard to see how to do better. We have to check
4104 every block in the entire table. */
sewardj095d61e2010-03-11 13:43:18 +00004105 VG_(HT_ResetIter)(hg_mallocmeta_table);
4106 while ( (mm = VG_(HT_Next)(hg_mallocmeta_table)) ) {
sewardjc8028ad2010-05-05 09:34:42 +00004107 if (UNLIKELY(addr_is_in_MM_Chunk(mm, data_addr)))
4108 goto found;
sewardj095d61e2010-03-11 13:43:18 +00004109 }
sewardjc8028ad2010-05-05 09:34:42 +00004110
4111 /* Not found. Bah. */
4112 return False;
4113 /*NOTREACHED*/
4114
4115 found:
4116 tl_assert(mm);
4117 tl_assert(addr_is_in_MM_Chunk(mm, data_addr));
4118 if (where) *where = mm->where;
4119 if (payload) *payload = mm->payload;
4120 if (szB) *szB = mm->szB;
4121 return True;
sewardj095d61e2010-03-11 13:43:18 +00004122}
4123
4124
sewardjb4112022007-11-09 22:49:28 +00004125/*--------------------------------------------------------------*/
4126/*--- Instrumentation ---*/
4127/*--------------------------------------------------------------*/
4128
sewardjffce8152011-06-24 10:09:41 +00004129#define binop(_op, _arg1, _arg2) IRExpr_Binop((_op),(_arg1),(_arg2))
4130#define mkexpr(_tmp) IRExpr_RdTmp((_tmp))
4131#define mkU32(_n) IRExpr_Const(IRConst_U32(_n))
4132#define mkU64(_n) IRExpr_Const(IRConst_U64(_n))
4133#define assign(_t, _e) IRStmt_WrTmp((_t), (_e))
4134
4135static void instrument_mem_access ( IRSB* sbOut,
sewardjb4112022007-11-09 22:49:28 +00004136 IRExpr* addr,
4137 Int szB,
4138 Bool isStore,
sewardjffce8152011-06-24 10:09:41 +00004139 Int hWordTy_szB,
4140 Int goff_sp )
sewardjb4112022007-11-09 22:49:28 +00004141{
4142 IRType tyAddr = Ity_INVALID;
florian6bf37262012-10-21 03:23:36 +00004143 const HChar* hName = NULL;
sewardjb4112022007-11-09 22:49:28 +00004144 void* hAddr = NULL;
4145 Int regparms = 0;
4146 IRExpr** argv = NULL;
4147 IRDirty* di = NULL;
4148
sewardjffce8152011-06-24 10:09:41 +00004149 // THRESH is the size of the window above SP (well,
4150 // mostly above) that we assume implies a stack reference.
4151 const Int THRESH = 4096 * 4; // somewhat arbitrary
4152 const Int rz_szB = VG_STACK_REDZONE_SZB;
4153
sewardjb4112022007-11-09 22:49:28 +00004154 tl_assert(isIRAtom(addr));
4155 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
4156
sewardjffce8152011-06-24 10:09:41 +00004157 tyAddr = typeOfIRExpr( sbOut->tyenv, addr );
sewardjb4112022007-11-09 22:49:28 +00004158 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
4159
4160 /* So the effective address is in 'addr' now. */
4161 regparms = 1; // unless stated otherwise
4162 if (isStore) {
4163 switch (szB) {
4164 case 1:
sewardj23f12002009-07-24 08:45:08 +00004165 hName = "evh__mem_help_cwrite_1";
4166 hAddr = &evh__mem_help_cwrite_1;
sewardjb4112022007-11-09 22:49:28 +00004167 argv = mkIRExprVec_1( addr );
4168 break;
4169 case 2:
sewardj23f12002009-07-24 08:45:08 +00004170 hName = "evh__mem_help_cwrite_2";
4171 hAddr = &evh__mem_help_cwrite_2;
sewardjb4112022007-11-09 22:49:28 +00004172 argv = mkIRExprVec_1( addr );
4173 break;
4174 case 4:
sewardj23f12002009-07-24 08:45:08 +00004175 hName = "evh__mem_help_cwrite_4";
4176 hAddr = &evh__mem_help_cwrite_4;
sewardjb4112022007-11-09 22:49:28 +00004177 argv = mkIRExprVec_1( addr );
4178 break;
4179 case 8:
sewardj23f12002009-07-24 08:45:08 +00004180 hName = "evh__mem_help_cwrite_8";
4181 hAddr = &evh__mem_help_cwrite_8;
sewardjb4112022007-11-09 22:49:28 +00004182 argv = mkIRExprVec_1( addr );
4183 break;
4184 default:
4185 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4186 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004187 hName = "evh__mem_help_cwrite_N";
4188 hAddr = &evh__mem_help_cwrite_N;
sewardjb4112022007-11-09 22:49:28 +00004189 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4190 break;
4191 }
4192 } else {
4193 switch (szB) {
4194 case 1:
sewardj23f12002009-07-24 08:45:08 +00004195 hName = "evh__mem_help_cread_1";
4196 hAddr = &evh__mem_help_cread_1;
sewardjb4112022007-11-09 22:49:28 +00004197 argv = mkIRExprVec_1( addr );
4198 break;
4199 case 2:
sewardj23f12002009-07-24 08:45:08 +00004200 hName = "evh__mem_help_cread_2";
4201 hAddr = &evh__mem_help_cread_2;
sewardjb4112022007-11-09 22:49:28 +00004202 argv = mkIRExprVec_1( addr );
4203 break;
4204 case 4:
sewardj23f12002009-07-24 08:45:08 +00004205 hName = "evh__mem_help_cread_4";
4206 hAddr = &evh__mem_help_cread_4;
sewardjb4112022007-11-09 22:49:28 +00004207 argv = mkIRExprVec_1( addr );
4208 break;
4209 case 8:
sewardj23f12002009-07-24 08:45:08 +00004210 hName = "evh__mem_help_cread_8";
4211 hAddr = &evh__mem_help_cread_8;
sewardjb4112022007-11-09 22:49:28 +00004212 argv = mkIRExprVec_1( addr );
4213 break;
4214 default:
4215 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4216 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004217 hName = "evh__mem_help_cread_N";
4218 hAddr = &evh__mem_help_cread_N;
sewardjb4112022007-11-09 22:49:28 +00004219 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4220 break;
4221 }
4222 }
4223
sewardjffce8152011-06-24 10:09:41 +00004224 /* Create the helper. */
sewardjb4112022007-11-09 22:49:28 +00004225 tl_assert(hName);
4226 tl_assert(hAddr);
4227 tl_assert(argv);
4228 di = unsafeIRDirty_0_N( regparms,
4229 hName, VG_(fnptr_to_fnentry)( hAddr ),
4230 argv );
sewardjffce8152011-06-24 10:09:41 +00004231
4232 if (! HG_(clo_check_stack_refs)) {
4233 /* We're ignoring memory references which are (obviously) to the
4234 stack. In fact just skip stack refs that are within 4 pages
4235 of SP (SP - the redzone, really), as that's simple, easy, and
4236 filters out most stack references. */
4237 /* Generate the guard condition: "(addr - (SP - RZ)) >u N", for
4238 some arbitrary N. If that is true then addr is outside the
4239 range (SP - RZ .. SP + N - RZ). If N is smallish (a few
4240 pages) then we can say addr is within a few pages of SP and
4241 so can't possibly be a heap access, and so can be skipped.
4242
4243 Note that the condition simplifies to
4244 (addr - SP + RZ) >u N
4245 which generates better code in x86/amd64 backends, but it does
4246 not unfortunately simplify to
4247 (addr - SP) >u (N - RZ)
4248 (would be beneficial because N - RZ is a constant) because
4249 wraparound arithmetic messes up the comparison. eg.
4250 20 >u 10 == True,
4251 but (20 - 15) >u (10 - 15) == 5 >u (MAXINT-5) == False.
4252 */
4253 IRTemp sp = newIRTemp(sbOut->tyenv, tyAddr);
4254 addStmtToIRSB( sbOut, assign(sp, IRExpr_Get(goff_sp, tyAddr)));
4255
4256 /* "addr - SP" */
4257 IRTemp addr_minus_sp = newIRTemp(sbOut->tyenv, tyAddr);
4258 addStmtToIRSB(
4259 sbOut,
4260 assign(addr_minus_sp,
4261 tyAddr == Ity_I32
4262 ? binop(Iop_Sub32, addr, mkexpr(sp))
4263 : binop(Iop_Sub64, addr, mkexpr(sp)))
4264 );
4265
4266 /* "addr - SP + RZ" */
4267 IRTemp diff = newIRTemp(sbOut->tyenv, tyAddr);
4268 addStmtToIRSB(
4269 sbOut,
4270 assign(diff,
4271 tyAddr == Ity_I32
4272 ? binop(Iop_Add32, mkexpr(addr_minus_sp), mkU32(rz_szB))
4273 : binop(Iop_Add64, mkexpr(addr_minus_sp), mkU64(rz_szB)))
4274 );
4275
4276 IRTemp guard = newIRTemp(sbOut->tyenv, Ity_I1);
4277 addStmtToIRSB(
4278 sbOut,
4279 assign(guard,
4280 tyAddr == Ity_I32
4281 ? binop(Iop_CmpLT32U, mkU32(THRESH), mkexpr(diff))
4282 : binop(Iop_CmpLT64U, mkU64(THRESH), mkexpr(diff)))
4283 );
4284 di->guard = mkexpr(guard);
4285 }
4286
4287 /* Add the helper. */
4288 addStmtToIRSB( sbOut, IRStmt_Dirty(di) );
sewardjb4112022007-11-09 22:49:28 +00004289}
4290
4291
sewardja0eee322009-07-31 08:46:35 +00004292/* Figure out if GA is a guest code address in the dynamic linker, and
4293 if so return True. Otherwise (and in case of any doubt) return
4294 False. (sidedly safe w/ False as the safe value) */
4295static Bool is_in_dynamic_linker_shared_object( Addr64 ga )
4296{
4297 DebugInfo* dinfo;
florian19f91bb2012-11-10 22:29:54 +00004298 const HChar* soname;
sewardja0eee322009-07-31 08:46:35 +00004299 if (0) return False;
4300
sewardje3f1e592009-07-31 09:41:29 +00004301 dinfo = VG_(find_DebugInfo)( (Addr)ga );
sewardja0eee322009-07-31 08:46:35 +00004302 if (!dinfo) return False;
4303
sewardje3f1e592009-07-31 09:41:29 +00004304 soname = VG_(DebugInfo_get_soname)(dinfo);
sewardja0eee322009-07-31 08:46:35 +00004305 tl_assert(soname);
4306 if (0) VG_(printf)("%s\n", soname);
4307
4308# if defined(VGO_linux)
sewardj651cfa42010-01-11 13:02:19 +00004309 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_3)) return True;
sewardja0eee322009-07-31 08:46:35 +00004310 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_2)) return True;
4311 if (VG_STREQ(soname, VG_U_LD_LINUX_X86_64_SO_2)) return True;
4312 if (VG_STREQ(soname, VG_U_LD64_SO_1)) return True;
4313 if (VG_STREQ(soname, VG_U_LD_SO_1)) return True;
4314# elif defined(VGO_darwin)
4315 if (VG_STREQ(soname, VG_U_DYLD)) return True;
4316# else
4317# error "Unsupported OS"
4318# endif
4319 return False;
4320}
4321
sewardjb4112022007-11-09 22:49:28 +00004322static
4323IRSB* hg_instrument ( VgCallbackClosure* closure,
4324 IRSB* bbIn,
4325 VexGuestLayout* layout,
4326 VexGuestExtents* vge,
florianca503be2012-10-07 21:59:42 +00004327 VexArchInfo* archinfo_host,
sewardjb4112022007-11-09 22:49:28 +00004328 IRType gWordTy, IRType hWordTy )
4329{
sewardj1c0ce7a2009-07-01 08:10:49 +00004330 Int i;
4331 IRSB* bbOut;
4332 Addr64 cia; /* address of current insn */
4333 IRStmt* st;
sewardja0eee322009-07-31 08:46:35 +00004334 Bool inLDSO = False;
4335 Addr64 inLDSOmask4K = 1; /* mismatches on first check */
sewardjb4112022007-11-09 22:49:28 +00004336
sewardjffce8152011-06-24 10:09:41 +00004337 const Int goff_sp = layout->offset_SP;
4338
sewardjb4112022007-11-09 22:49:28 +00004339 if (gWordTy != hWordTy) {
4340 /* We don't currently support this case. */
4341 VG_(tool_panic)("host/guest word size mismatch");
4342 }
4343
sewardja0eee322009-07-31 08:46:35 +00004344 if (VKI_PAGE_SIZE < 4096 || VG_(log2)(VKI_PAGE_SIZE) == -1) {
4345 VG_(tool_panic)("implausible or too-small VKI_PAGE_SIZE");
4346 }
4347
sewardjb4112022007-11-09 22:49:28 +00004348 /* Set up BB */
4349 bbOut = emptyIRSB();
4350 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
4351 bbOut->next = deepCopyIRExpr(bbIn->next);
4352 bbOut->jumpkind = bbIn->jumpkind;
sewardj291849f2012-04-20 23:58:55 +00004353 bbOut->offsIP = bbIn->offsIP;
sewardjb4112022007-11-09 22:49:28 +00004354
4355 // Copy verbatim any IR preamble preceding the first IMark
4356 i = 0;
4357 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
4358 addStmtToIRSB( bbOut, bbIn->stmts[i] );
4359 i++;
4360 }
4361
sewardj1c0ce7a2009-07-01 08:10:49 +00004362 // Get the first statement, and initial cia from it
4363 tl_assert(bbIn->stmts_used > 0);
4364 tl_assert(i < bbIn->stmts_used);
4365 st = bbIn->stmts[i];
4366 tl_assert(Ist_IMark == st->tag);
4367 cia = st->Ist.IMark.addr;
4368 st = NULL;
4369
sewardjb4112022007-11-09 22:49:28 +00004370 for (/*use current i*/; i < bbIn->stmts_used; i++) {
sewardj1c0ce7a2009-07-01 08:10:49 +00004371 st = bbIn->stmts[i];
sewardjb4112022007-11-09 22:49:28 +00004372 tl_assert(st);
4373 tl_assert(isFlatIRStmt(st));
4374 switch (st->tag) {
4375 case Ist_NoOp:
4376 case Ist_AbiHint:
4377 case Ist_Put:
4378 case Ist_PutI:
sewardjb4112022007-11-09 22:49:28 +00004379 case Ist_Exit:
4380 /* None of these can contain any memory references. */
4381 break;
4382
sewardj1c0ce7a2009-07-01 08:10:49 +00004383 case Ist_IMark:
4384 /* no mem refs, but note the insn address. */
4385 cia = st->Ist.IMark.addr;
sewardja0eee322009-07-31 08:46:35 +00004386 /* Don't instrument the dynamic linker. It generates a
4387 lot of races which we just expensively suppress, so
4388 it's pointless.
4389
4390 Avoid flooding is_in_dynamic_linker_shared_object with
4391 requests by only checking at transitions between 4K
4392 pages. */
4393 if ((cia & ~(Addr64)0xFFF) != inLDSOmask4K) {
4394 if (0) VG_(printf)("NEW %#lx\n", (Addr)cia);
4395 inLDSOmask4K = cia & ~(Addr64)0xFFF;
4396 inLDSO = is_in_dynamic_linker_shared_object(cia);
4397 } else {
4398 if (0) VG_(printf)("old %#lx\n", (Addr)cia);
4399 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004400 break;
4401
sewardjb4112022007-11-09 22:49:28 +00004402 case Ist_MBE:
sewardjf98e1c02008-10-25 16:22:41 +00004403 switch (st->Ist.MBE.event) {
4404 case Imbe_Fence:
4405 break; /* not interesting */
sewardjf98e1c02008-10-25 16:22:41 +00004406 default:
4407 goto unhandled;
4408 }
sewardjb4112022007-11-09 22:49:28 +00004409 break;
4410
sewardj1c0ce7a2009-07-01 08:10:49 +00004411 case Ist_CAS: {
4412 /* Atomic read-modify-write cycle. Just pretend it's a
4413 read. */
4414 IRCAS* cas = st->Ist.CAS.details;
sewardj23f12002009-07-24 08:45:08 +00004415 Bool isDCAS = cas->oldHi != IRTemp_INVALID;
4416 if (isDCAS) {
4417 tl_assert(cas->expdHi);
4418 tl_assert(cas->dataHi);
4419 } else {
4420 tl_assert(!cas->expdHi);
4421 tl_assert(!cas->dataHi);
4422 }
4423 /* Just be boring about it. */
sewardja0eee322009-07-31 08:46:35 +00004424 if (!inLDSO) {
4425 instrument_mem_access(
4426 bbOut,
4427 cas->addr,
4428 (isDCAS ? 2 : 1)
4429 * sizeofIRType(typeOfIRExpr(bbIn->tyenv, cas->dataLo)),
4430 False/*!isStore*/,
sewardjffce8152011-06-24 10:09:41 +00004431 sizeofIRType(hWordTy), goff_sp
sewardja0eee322009-07-31 08:46:35 +00004432 );
4433 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004434 break;
4435 }
4436
sewardjdb5907d2009-11-26 17:20:21 +00004437 case Ist_LLSC: {
4438 /* We pretend store-conditionals don't exist, viz, ignore
4439 them. Whereas load-linked's are treated the same as
4440 normal loads. */
4441 IRType dataTy;
4442 if (st->Ist.LLSC.storedata == NULL) {
4443 /* LL */
4444 dataTy = typeOfIRTemp(bbIn->tyenv, st->Ist.LLSC.result);
sewardja0eee322009-07-31 08:46:35 +00004445 if (!inLDSO) {
sewardjdb5907d2009-11-26 17:20:21 +00004446 instrument_mem_access(
4447 bbOut,
4448 st->Ist.LLSC.addr,
4449 sizeofIRType(dataTy),
4450 False/*!isStore*/,
sewardjffce8152011-06-24 10:09:41 +00004451 sizeofIRType(hWordTy), goff_sp
sewardja0eee322009-07-31 08:46:35 +00004452 );
4453 }
sewardjdb5907d2009-11-26 17:20:21 +00004454 } else {
4455 /* SC */
4456 /*ignore */
4457 }
4458 break;
4459 }
4460
4461 case Ist_Store:
4462 /* It seems we pretend that store-conditionals don't
4463 exist, viz, just ignore them ... */
4464 if (!inLDSO) {
4465 instrument_mem_access(
4466 bbOut,
4467 st->Ist.Store.addr,
4468 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
4469 True/*isStore*/,
sewardjffce8152011-06-24 10:09:41 +00004470 sizeofIRType(hWordTy), goff_sp
sewardjdb5907d2009-11-26 17:20:21 +00004471 );
sewardj1c0ce7a2009-07-01 08:10:49 +00004472 }
njnb83caf22009-05-25 01:47:56 +00004473 break;
sewardjb4112022007-11-09 22:49:28 +00004474
4475 case Ist_WrTmp: {
sewardj1c0ce7a2009-07-01 08:10:49 +00004476 /* ... whereas here we don't care whether a load is a
4477 vanilla one or a load-linked. */
sewardjb4112022007-11-09 22:49:28 +00004478 IRExpr* data = st->Ist.WrTmp.data;
4479 if (data->tag == Iex_Load) {
sewardja0eee322009-07-31 08:46:35 +00004480 if (!inLDSO) {
4481 instrument_mem_access(
4482 bbOut,
4483 data->Iex.Load.addr,
4484 sizeofIRType(data->Iex.Load.ty),
4485 False/*!isStore*/,
sewardjffce8152011-06-24 10:09:41 +00004486 sizeofIRType(hWordTy), goff_sp
sewardja0eee322009-07-31 08:46:35 +00004487 );
4488 }
sewardjb4112022007-11-09 22:49:28 +00004489 }
4490 break;
4491 }
4492
4493 case Ist_Dirty: {
4494 Int dataSize;
4495 IRDirty* d = st->Ist.Dirty.details;
4496 if (d->mFx != Ifx_None) {
4497 /* This dirty helper accesses memory. Collect the
4498 details. */
4499 tl_assert(d->mAddr != NULL);
4500 tl_assert(d->mSize != 0);
4501 dataSize = d->mSize;
4502 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004503 if (!inLDSO) {
4504 instrument_mem_access(
4505 bbOut, d->mAddr, dataSize, False/*!isStore*/,
sewardjffce8152011-06-24 10:09:41 +00004506 sizeofIRType(hWordTy), goff_sp
sewardja0eee322009-07-31 08:46:35 +00004507 );
4508 }
sewardjb4112022007-11-09 22:49:28 +00004509 }
4510 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004511 if (!inLDSO) {
4512 instrument_mem_access(
4513 bbOut, d->mAddr, dataSize, True/*isStore*/,
sewardjffce8152011-06-24 10:09:41 +00004514 sizeofIRType(hWordTy), goff_sp
sewardja0eee322009-07-31 08:46:35 +00004515 );
4516 }
sewardjb4112022007-11-09 22:49:28 +00004517 }
4518 } else {
4519 tl_assert(d->mAddr == NULL);
4520 tl_assert(d->mSize == 0);
4521 }
4522 break;
4523 }
4524
4525 default:
sewardjf98e1c02008-10-25 16:22:41 +00004526 unhandled:
4527 ppIRStmt(st);
sewardjb4112022007-11-09 22:49:28 +00004528 tl_assert(0);
4529
4530 } /* switch (st->tag) */
4531
4532 addStmtToIRSB( bbOut, st );
4533 } /* iterate over bbIn->stmts */
4534
4535 return bbOut;
4536}
4537
sewardjffce8152011-06-24 10:09:41 +00004538#undef binop
4539#undef mkexpr
4540#undef mkU32
4541#undef mkU64
4542#undef assign
4543
sewardjb4112022007-11-09 22:49:28 +00004544
4545/*----------------------------------------------------------------*/
4546/*--- Client requests ---*/
4547/*----------------------------------------------------------------*/
4548
4549/* Sheesh. Yet another goddam finite map. */
4550static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
4551
4552static void map_pthread_t_to_Thread_INIT ( void ) {
4553 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
sewardjf98e1c02008-10-25 16:22:41 +00004554 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
4555 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00004556 tl_assert(map_pthread_t_to_Thread != NULL);
4557 }
4558}
4559
4560
4561static
4562Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
4563{
4564 if (!VG_IS_TOOL_USERREQ('H','G',args[0]))
4565 return False;
4566
4567 /* Anything that gets past the above check is one of ours, so we
4568 should be able to handle it. */
4569
4570 /* default, meaningless return value, unless otherwise set */
4571 *ret = 0;
4572
4573 switch (args[0]) {
4574
4575 /* --- --- User-visible client requests --- --- */
4576
4577 case VG_USERREQ__HG_CLEAN_MEMORY:
barta0b6b2c2008-07-07 06:49:24 +00004578 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%ld)\n",
sewardjb4112022007-11-09 22:49:28 +00004579 args[1], args[2]);
4580 /* Call die_mem to (expensively) tidy up properly, if there
sewardjf98e1c02008-10-25 16:22:41 +00004581 are any held locks etc in the area. Calling evh__die_mem
4582 and then evh__new_mem is a bit inefficient; probably just
4583 the latter would do. */
sewardjb4112022007-11-09 22:49:28 +00004584 if (args[2] > 0) { /* length */
4585 evh__die_mem(args[1], args[2]);
4586 /* and then set it to New */
4587 evh__new_mem(args[1], args[2]);
4588 }
4589 break;
4590
sewardjc8028ad2010-05-05 09:34:42 +00004591 case _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK: {
4592 Addr payload = 0;
4593 SizeT pszB = 0;
4594 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK(%#lx)\n",
4595 args[1]);
4596 if (HG_(mm_find_containing_block)(NULL, &payload, &pszB, args[1])) {
4597 if (pszB > 0) {
4598 evh__die_mem(payload, pszB);
4599 evh__new_mem(payload, pszB);
4600 }
4601 *ret = pszB;
4602 } else {
4603 *ret = (UWord)-1;
4604 }
4605 break;
4606 }
4607
sewardj406bac82010-03-03 23:03:40 +00004608 case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED:
4609 if (0) VG_(printf)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%ld)\n",
4610 args[1], args[2]);
4611 if (args[2] > 0) { /* length */
4612 evh__untrack_mem(args[1], args[2]);
4613 }
4614 break;
4615
4616 case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED:
4617 if (0) VG_(printf)("HG_ARANGE_MAKE_TRACKED(%#lx,%ld)\n",
4618 args[1], args[2]);
4619 if (args[2] > 0) { /* length */
4620 evh__new_mem(args[1], args[2]);
4621 }
4622 break;
4623
sewardjb4112022007-11-09 22:49:28 +00004624 /* --- --- Client requests for Helgrind's use only --- --- */
4625
4626 /* Some thread is telling us its pthread_t value. Record the
4627 binding between that and the associated Thread*, so we can
4628 later find the Thread* again when notified of a join by the
4629 thread. */
4630 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
4631 Thread* my_thr = NULL;
4632 if (0)
4633 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
4634 (void*)args[1]);
4635 map_pthread_t_to_Thread_INIT();
4636 my_thr = map_threads_maybe_lookup( tid );
4637 /* This assertion should hold because the map_threads (tid to
4638 Thread*) binding should have been made at the point of
4639 low-level creation of this thread, which should have
4640 happened prior to us getting this client request for it.
4641 That's because this client request is sent from
4642 client-world from the 'thread_wrapper' function, which
4643 only runs once the thread has been low-level created. */
4644 tl_assert(my_thr != NULL);
4645 /* So now we know that (pthread_t)args[1] is associated with
4646 (Thread*)my_thr. Note that down. */
4647 if (0)
4648 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
4649 (void*)args[1], (void*)my_thr );
florian6bf37262012-10-21 03:23:36 +00004650 VG_(addToFM)( map_pthread_t_to_Thread, (UWord)args[1], (UWord)my_thr );
sewardjb4112022007-11-09 22:49:28 +00004651 break;
4652 }
4653
4654 case _VG_USERREQ__HG_PTH_API_ERROR: {
4655 Thread* my_thr = NULL;
4656 map_pthread_t_to_Thread_INIT();
4657 my_thr = map_threads_maybe_lookup( tid );
4658 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
sewardjf98e1c02008-10-25 16:22:41 +00004659 HG_(record_error_PthAPIerror)(
florian6bf37262012-10-21 03:23:36 +00004660 my_thr, (HChar*)args[1], (UWord)args[2], (HChar*)args[3] );
sewardjb4112022007-11-09 22:49:28 +00004661 break;
4662 }
4663
4664 /* This thread (tid) has completed a join with the quitting
4665 thread whose pthread_t is in args[1]. */
4666 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
4667 Thread* thr_q = NULL; /* quitter Thread* */
4668 Bool found = False;
4669 if (0)
4670 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
4671 (void*)args[1]);
4672 map_pthread_t_to_Thread_INIT();
sewardj896f6f92008-08-19 08:38:52 +00004673 found = VG_(lookupFM)( map_pthread_t_to_Thread,
florian6bf37262012-10-21 03:23:36 +00004674 NULL, (UWord*)&thr_q, (UWord)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004675 /* Can this fail? It would mean that our pthread_join
4676 wrapper observed a successful join on args[1] yet that
4677 thread never existed (or at least, it never lodged an
4678 entry in the mapping (via SET_MY_PTHREAD_T)). Which
4679 sounds like a bug in the threads library. */
4680 // FIXME: get rid of this assertion; handle properly
4681 tl_assert(found);
4682 if (found) {
4683 if (0)
4684 VG_(printf)(".................... quitter Thread* = %p\n",
4685 thr_q);
4686 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
4687 }
4688 break;
4689 }
4690
4691 /* EXPOSITION only: by intercepting lock init events we can show
4692 the user where the lock was initialised, rather than only
4693 being able to show where it was first locked. Intercepting
4694 lock initialisations is not necessary for the basic operation
4695 of the race checker. */
4696 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
4697 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
4698 break;
4699
4700 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
4701 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1] );
4702 break;
4703
4704 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
4705 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
4706 break;
4707
4708 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
4709 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
4710 break;
4711
4712 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*, Word
4713 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
4714 break;
4715
4716 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*
4717 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
4718 break;
4719
4720 /* This thread is about to do pthread_cond_signal on the
4721 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
4722 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
4723 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
4724 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
4725 break;
4726
4727 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
4728 Returns a flag indicating whether or not the mutex is believed to be
4729 valid for this operation. */
4730 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
4731 Bool mutex_is_valid
4732 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
4733 (void*)args[2] );
4734 *ret = mutex_is_valid ? 1 : 0;
4735 break;
4736 }
4737
sewardjf98e1c02008-10-25 16:22:41 +00004738 /* cond=arg[1] */
4739 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
4740 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1] );
4741 break;
4742
sewardjb4112022007-11-09 22:49:28 +00004743 /* Thread successfully completed pthread_cond_wait, cond=arg[1],
4744 mutex=arg[2] */
4745 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
4746 evh__HG_PTHREAD_COND_WAIT_POST( tid,
4747 (void*)args[1], (void*)args[2] );
4748 break;
4749
4750 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
4751 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
4752 break;
4753
4754 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
4755 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
4756 break;
4757
sewardj789c3c52008-02-25 12:10:07 +00004758 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
sewardjb4112022007-11-09 22:49:28 +00004759 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
sewardj789c3c52008-02-25 12:10:07 +00004760 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
4761 args[2], args[3] );
sewardjb4112022007-11-09 22:49:28 +00004762 break;
4763
4764 /* rwlock=arg[1], isW=arg[2] */
4765 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
4766 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
4767 break;
4768
4769 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
4770 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
4771 break;
4772
4773 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
4774 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
4775 break;
4776
sewardj11e352f2007-11-30 11:11:02 +00004777 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
4778 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
sewardjb4112022007-11-09 22:49:28 +00004779 break;
4780
sewardj11e352f2007-11-30 11:11:02 +00004781 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
4782 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004783 break;
4784
sewardj11e352f2007-11-30 11:11:02 +00004785 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
4786 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
4787 break;
4788
4789 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t* */
4790 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004791 break;
4792
sewardj9f569b72008-11-13 13:33:09 +00004793 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
sewardj406bac82010-03-03 23:03:40 +00004794 /* pth_bar_t*, ulong count, ulong resizable */
4795 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1],
4796 args[2], args[3] );
4797 break;
4798
4799 case _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE:
4800 /* pth_bar_t*, ulong newcount */
4801 evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( tid, (void*)args[1],
4802 args[2] );
sewardj9f569b72008-11-13 13:33:09 +00004803 break;
4804
4805 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
4806 /* pth_bar_t* */
4807 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
4808 break;
4809
4810 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
4811 /* pth_bar_t* */
4812 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
4813 break;
sewardjb4112022007-11-09 22:49:28 +00004814
sewardj5a644da2009-08-11 10:35:58 +00004815 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE:
4816 /* pth_spinlock_t* */
4817 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid, (void*)args[1] );
4818 break;
4819
4820 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST:
4821 /* pth_spinlock_t* */
4822 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid, (void*)args[1] );
4823 break;
4824
4825 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE:
4826 /* pth_spinlock_t*, Word */
4827 evh__HG_PTHREAD_SPIN_LOCK_PRE( tid, (void*)args[1], args[2] );
4828 break;
4829
4830 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST:
4831 /* pth_spinlock_t* */
4832 evh__HG_PTHREAD_SPIN_LOCK_POST( tid, (void*)args[1] );
4833 break;
4834
4835 case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE:
4836 /* pth_spinlock_t* */
4837 evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid, (void*)args[1] );
4838 break;
4839
sewardjed2e72e2009-08-14 11:08:24 +00004840 case _VG_USERREQ__HG_CLIENTREQ_UNIMP: {
florian19f91bb2012-11-10 22:29:54 +00004841 /* HChar* who */
sewardjed2e72e2009-08-14 11:08:24 +00004842 HChar* who = (HChar*)args[1];
4843 HChar buf[50 + 50];
4844 Thread* thr = map_threads_maybe_lookup( tid );
4845 tl_assert( thr ); /* I must be mapped */
4846 tl_assert( who );
4847 tl_assert( VG_(strlen)(who) <= 50 );
4848 VG_(sprintf)(buf, "Unimplemented client request macro \"%s\"", who );
4849 /* record_error_Misc strdup's buf, so this is safe: */
4850 HG_(record_error_Misc)( thr, buf );
4851 break;
4852 }
4853
4854 case _VG_USERREQ__HG_USERSO_SEND_PRE:
4855 /* UWord arbitrary-SO-tag */
4856 evh__HG_USERSO_SEND_PRE( tid, args[1] );
4857 break;
4858
4859 case _VG_USERREQ__HG_USERSO_RECV_POST:
4860 /* UWord arbitrary-SO-tag */
4861 evh__HG_USERSO_RECV_POST( tid, args[1] );
4862 break;
4863
sewardj6015d0e2011-03-11 19:10:48 +00004864 case _VG_USERREQ__HG_USERSO_FORGET_ALL:
4865 /* UWord arbitrary-SO-tag */
4866 evh__HG_USERSO_FORGET_ALL( tid, args[1] );
4867 break;
4868
sewardjb4112022007-11-09 22:49:28 +00004869 default:
4870 /* Unhandled Helgrind client request! */
sewardjf98e1c02008-10-25 16:22:41 +00004871 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
4872 args[0]);
sewardjb4112022007-11-09 22:49:28 +00004873 }
4874
4875 return True;
4876}
4877
4878
4879/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +00004880/*--- Setup ---*/
4881/*----------------------------------------------------------------*/
4882
florian19f91bb2012-11-10 22:29:54 +00004883static Bool hg_process_cmd_line_option ( const HChar* arg )
sewardjb4112022007-11-09 22:49:28 +00004884{
florian19f91bb2012-11-10 22:29:54 +00004885 const HChar* tmp_str;
sewardjb4112022007-11-09 22:49:28 +00004886
njn83df0b62009-02-25 01:01:05 +00004887 if VG_BOOL_CLO(arg, "--track-lockorders",
4888 HG_(clo_track_lockorders)) {}
4889 else if VG_BOOL_CLO(arg, "--cmp-race-err-addrs",
4890 HG_(clo_cmp_race_err_addrs)) {}
sewardj23f12002009-07-24 08:45:08 +00004891
4892 else if VG_XACT_CLO(arg, "--history-level=none",
4893 HG_(clo_history_level), 0);
sewardjf3861392009-08-02 10:16:03 +00004894 else if VG_XACT_CLO(arg, "--history-level=approx",
sewardj23f12002009-07-24 08:45:08 +00004895 HG_(clo_history_level), 1);
4896 else if VG_XACT_CLO(arg, "--history-level=full",
4897 HG_(clo_history_level), 2);
sewardj849b0ed2008-12-21 10:43:10 +00004898
sewardjf585e482009-08-16 22:52:29 +00004899 /* If you change the 10k/30mill limits, remember to also change
sewardj849b0ed2008-12-21 10:43:10 +00004900 them in assertions at the top of event_map_maybe_GC. */
njn83df0b62009-02-25 01:01:05 +00004901 else if VG_BINT_CLO(arg, "--conflict-cache-size",
sewardjf585e482009-08-16 22:52:29 +00004902 HG_(clo_conflict_cache_size), 10*1000, 30*1000*1000) {}
sewardjb4112022007-11-09 22:49:28 +00004903
sewardj11e352f2007-11-30 11:11:02 +00004904 /* "stuvwx" --> stuvwx (binary) */
njn83df0b62009-02-25 01:01:05 +00004905 else if VG_STR_CLO(arg, "--hg-sanity-flags", tmp_str) {
sewardjb4112022007-11-09 22:49:28 +00004906 Int j;
sewardjb4112022007-11-09 22:49:28 +00004907
njn83df0b62009-02-25 01:01:05 +00004908 if (6 != VG_(strlen)(tmp_str)) {
sewardjb4112022007-11-09 22:49:28 +00004909 VG_(message)(Vg_UserMsg,
sewardj24118492009-07-15 14:50:02 +00004910 "--hg-sanity-flags argument must have 6 digits\n");
sewardjb4112022007-11-09 22:49:28 +00004911 return False;
4912 }
sewardj11e352f2007-11-30 11:11:02 +00004913 for (j = 0; j < 6; j++) {
njn83df0b62009-02-25 01:01:05 +00004914 if ('0' == tmp_str[j]) { /* do nothing */ }
4915 else if ('1' == tmp_str[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
sewardjb4112022007-11-09 22:49:28 +00004916 else {
sewardj11e352f2007-11-30 11:11:02 +00004917 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
sewardj24118492009-07-15 14:50:02 +00004918 "only contain 0s and 1s\n");
sewardjb4112022007-11-09 22:49:28 +00004919 return False;
4920 }
4921 }
sewardjf98e1c02008-10-25 16:22:41 +00004922 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
sewardjb4112022007-11-09 22:49:28 +00004923 }
4924
sewardj622fe492011-03-11 21:06:59 +00004925 else if VG_BOOL_CLO(arg, "--free-is-write",
4926 HG_(clo_free_is_write)) {}
sewardjffce8152011-06-24 10:09:41 +00004927
4928 else if VG_XACT_CLO(arg, "--vts-pruning=never",
4929 HG_(clo_vts_pruning), 0);
4930 else if VG_XACT_CLO(arg, "--vts-pruning=auto",
4931 HG_(clo_vts_pruning), 1);
4932 else if VG_XACT_CLO(arg, "--vts-pruning=always",
4933 HG_(clo_vts_pruning), 2);
4934
4935 else if VG_BOOL_CLO(arg, "--check-stack-refs",
4936 HG_(clo_check_stack_refs)) {}
4937
sewardjb4112022007-11-09 22:49:28 +00004938 else
4939 return VG_(replacement_malloc_process_cmd_line_option)(arg);
4940
4941 return True;
4942}
4943
4944static void hg_print_usage ( void )
4945{
4946 VG_(printf)(
sewardj622fe492011-03-11 21:06:59 +00004947" --free-is-write=no|yes treat heap frees as writes [no]\n"
sewardj849b0ed2008-12-21 10:43:10 +00004948" --track-lockorders=no|yes show lock ordering errors? [yes]\n"
njnf6e8ca92009-08-07 02:18:00 +00004949" --history-level=none|approx|full [full]\n"
sewardjf3861392009-08-02 10:16:03 +00004950" full: show both stack traces for a data race (can be very slow)\n"
4951" approx: full trace for one thread, approx for the other (faster)\n"
4952" none: only show trace for one thread in a race (fastest)\n"
sewardj23f12002009-07-24 08:45:08 +00004953" --conflict-cache-size=N size of 'full' history cache [1000000]\n"
sewardjffce8152011-06-24 10:09:41 +00004954" --check-stack-refs=no|yes race-check reads and writes on the\n"
4955" main stack and thread stacks? [yes]\n"
sewardjb4112022007-11-09 22:49:28 +00004956 );
sewardjb4112022007-11-09 22:49:28 +00004957}
4958
4959static void hg_print_debug_usage ( void )
4960{
sewardjb4112022007-11-09 22:49:28 +00004961 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
4962 "race errors significant? [no]\n");
sewardj849b0ed2008-12-21 10:43:10 +00004963 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
sewardj11e352f2007-11-30 11:11:02 +00004964 " at events (X = 0|1) [000000]\n");
4965 VG_(printf)(" --hg-sanity-flags values:\n");
sewardj11e352f2007-11-30 11:11:02 +00004966 VG_(printf)(" 010000 after changes to "
sewardjb4112022007-11-09 22:49:28 +00004967 "lock-order-acquisition-graph\n");
sewardj11e352f2007-11-30 11:11:02 +00004968 VG_(printf)(" 001000 at memory accesses (NB: not currently used)\n");
4969 VG_(printf)(" 000100 at mem permission setting for "
sewardjb4112022007-11-09 22:49:28 +00004970 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
sewardj11e352f2007-11-30 11:11:02 +00004971 VG_(printf)(" 000010 at lock/unlock events\n");
4972 VG_(printf)(" 000001 at thread create/join events\n");
sewardjffce8152011-06-24 10:09:41 +00004973 VG_(printf)(
4974" --vts-pruning=never|auto|always [auto]\n"
4975" never: is never done (may cause big space leaks in Helgrind)\n"
4976" auto: done just often enough to keep space usage under control\n"
4977" always: done after every VTS GC (mostly just a big time waster)\n"
4978 );
sewardjb4112022007-11-09 22:49:28 +00004979}
4980
sewardjb4112022007-11-09 22:49:28 +00004981static void hg_fini ( Int exitcode )
4982{
sewardj2d9e8742009-08-07 15:46:56 +00004983 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
4984 VG_(message)(Vg_UserMsg,
4985 "For counts of detected and suppressed errors, "
4986 "rerun with: -v\n");
4987 }
4988
4989 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)
4990 && HG_(clo_history_level) >= 2) {
4991 VG_(umsg)(
4992 "Use --history-level=approx or =none to gain increased speed, at\n" );
4993 VG_(umsg)(
4994 "the cost of reduced accuracy of conflicting-access information\n");
4995 }
4996
sewardjb4112022007-11-09 22:49:28 +00004997 if (SHOW_DATA_STRUCTURES)
4998 pp_everything( PP_ALL, "SK_(fini)" );
sewardjf98e1c02008-10-25 16:22:41 +00004999 if (HG_(clo_sanity_flags))
sewardjb4112022007-11-09 22:49:28 +00005000 all__sanity_check("SK_(fini)");
5001
sewardj2d9e8742009-08-07 15:46:56 +00005002 if (VG_(clo_stats)) {
sewardjb4112022007-11-09 22:49:28 +00005003
5004 if (1) {
5005 VG_(printf)("\n");
sewardjb4112022007-11-09 22:49:28 +00005006 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
sewardjc1fb9d22011-02-28 09:03:44 +00005007 if (HG_(clo_track_lockorders)) {
5008 VG_(printf)("\n");
5009 HG_(ppWSUstats)( univ_laog, "univ_laog" );
5010 }
sewardjb4112022007-11-09 22:49:28 +00005011 }
5012
sewardjf98e1c02008-10-25 16:22:41 +00005013 //zz VG_(printf)("\n");
5014 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
5015 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
5016 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
5017 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
5018 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
5019 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
5020 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
5021 //zz stats__hbefore_stk_hwm);
5022 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
5023 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
sewardjb4112022007-11-09 22:49:28 +00005024
5025 VG_(printf)("\n");
barta0b6b2c2008-07-07 06:49:24 +00005026 VG_(printf)(" locksets: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00005027 (Int)HG_(cardinalityWSU)( univ_lsets ));
sewardjc1fb9d22011-02-28 09:03:44 +00005028 if (HG_(clo_track_lockorders)) {
5029 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
5030 (Int)HG_(cardinalityWSU)( univ_laog ));
5031 }
sewardjb4112022007-11-09 22:49:28 +00005032
sewardjd52392d2008-11-08 20:36:26 +00005033 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
5034 // stats__ga_LL_adds,
5035 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
sewardjb4112022007-11-09 22:49:28 +00005036
sewardjf98e1c02008-10-25 16:22:41 +00005037 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
5038 HG_(stats__LockN_to_P_queries),
5039 HG_(stats__LockN_to_P_get_map_size)() );
sewardjb4112022007-11-09 22:49:28 +00005040
sewardjf98e1c02008-10-25 16:22:41 +00005041 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
5042 HG_(stats__string_table_queries),
5043 HG_(stats__string_table_get_map_size)() );
sewardjc1fb9d22011-02-28 09:03:44 +00005044 if (HG_(clo_track_lockorders)) {
5045 VG_(printf)(" LAOG: %'8d map size\n",
5046 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
5047 VG_(printf)(" LAOG exposition: %'8d map size\n",
5048 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
5049 }
5050
barta0b6b2c2008-07-07 06:49:24 +00005051 VG_(printf)(" locks: %'8lu acquires, "
5052 "%'lu releases\n",
sewardjb4112022007-11-09 22:49:28 +00005053 stats__lockN_acquires,
5054 stats__lockN_releases
5055 );
barta0b6b2c2008-07-07 06:49:24 +00005056 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
sewardjb4112022007-11-09 22:49:28 +00005057
5058 VG_(printf)("\n");
sewardjf98e1c02008-10-25 16:22:41 +00005059 libhb_shutdown(True);
sewardjb4112022007-11-09 22:49:28 +00005060 }
5061}
5062
sewardjf98e1c02008-10-25 16:22:41 +00005063/* FIXME: move these somewhere sane */
5064
5065static
5066void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
5067{
5068 Thread* thr;
5069 ThreadId tid;
5070 UWord nActual;
5071 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00005072 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00005073 tl_assert(thr);
5074 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
5075 nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
5076 NULL, NULL, 0 );
5077 tl_assert(nActual <= nRequest);
5078 for (; nActual < nRequest; nActual++)
5079 frames[nActual] = 0;
5080}
5081
5082static
sewardj23f12002009-07-24 08:45:08 +00005083ExeContext* for_libhb__get_EC ( Thr* hbt )
sewardjf98e1c02008-10-25 16:22:41 +00005084{
5085 Thread* thr;
5086 ThreadId tid;
5087 ExeContext* ec;
5088 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00005089 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00005090 tl_assert(thr);
5091 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
sewardj23f12002009-07-24 08:45:08 +00005092 /* this will assert if tid is invalid */
sewardjf98e1c02008-10-25 16:22:41 +00005093 ec = VG_(record_ExeContext)( tid, 0 );
sewardjd52392d2008-11-08 20:36:26 +00005094 return ec;
sewardjf98e1c02008-10-25 16:22:41 +00005095}
5096
5097
sewardjc1fb9d22011-02-28 09:03:44 +00005098static void hg_post_clo_init ( void )
sewardjb4112022007-11-09 22:49:28 +00005099{
sewardjf98e1c02008-10-25 16:22:41 +00005100 Thr* hbthr_root;
njnf76d27a2009-05-28 01:53:07 +00005101
sewardjc1fb9d22011-02-28 09:03:44 +00005102 /////////////////////////////////////////////
5103 hbthr_root = libhb_init( for_libhb__get_stacktrace,
5104 for_libhb__get_EC );
5105 /////////////////////////////////////////////
5106
5107
5108 if (HG_(clo_track_lockorders))
5109 laog__init();
5110
5111 initialise_data_structures(hbthr_root);
5112}
5113
5114static void hg_pre_clo_init ( void )
5115{
sewardjb4112022007-11-09 22:49:28 +00005116 VG_(details_name) ("Helgrind");
5117 VG_(details_version) (NULL);
5118 VG_(details_description) ("a thread error detector");
5119 VG_(details_copyright_author)(
sewardj03f8d3f2012-08-05 15:46:46 +00005120 "Copyright (C) 2007-2012, and GNU GPL'd, by OpenWorks LLP et al.");
sewardjb4112022007-11-09 22:49:28 +00005121 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj9c08c0f2011-03-10 15:01:14 +00005122 VG_(details_avg_translation_sizeB) ( 320 );
sewardjb4112022007-11-09 22:49:28 +00005123
5124 VG_(basic_tool_funcs) (hg_post_clo_init,
5125 hg_instrument,
5126 hg_fini);
5127
5128 VG_(needs_core_errors) ();
sewardjf98e1c02008-10-25 16:22:41 +00005129 VG_(needs_tool_errors) (HG_(eq_Error),
sewardj24118492009-07-15 14:50:02 +00005130 HG_(before_pp_Error),
sewardjf98e1c02008-10-25 16:22:41 +00005131 HG_(pp_Error),
sewardjb4112022007-11-09 22:49:28 +00005132 False,/*show TIDs for errors*/
sewardjf98e1c02008-10-25 16:22:41 +00005133 HG_(update_extra),
5134 HG_(recognised_suppression),
5135 HG_(read_extra_suppression_info),
5136 HG_(error_matches_suppression),
5137 HG_(get_error_name),
sewardj588adef2009-08-15 22:41:51 +00005138 HG_(get_extra_suppression_info));
sewardjb4112022007-11-09 22:49:28 +00005139
sewardj24118492009-07-15 14:50:02 +00005140 VG_(needs_xml_output) ();
5141
sewardjb4112022007-11-09 22:49:28 +00005142 VG_(needs_command_line_options)(hg_process_cmd_line_option,
5143 hg_print_usage,
5144 hg_print_debug_usage);
5145 VG_(needs_client_requests) (hg_handle_client_request);
5146
5147 // FIXME?
5148 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
5149 // hg_expensive_sanity_check);
5150
5151 VG_(needs_malloc_replacement) (hg_cli__malloc,
5152 hg_cli____builtin_new,
5153 hg_cli____builtin_vec_new,
5154 hg_cli__memalign,
5155 hg_cli__calloc,
5156 hg_cli__free,
5157 hg_cli____builtin_delete,
5158 hg_cli____builtin_vec_delete,
5159 hg_cli__realloc,
njn8b140de2009-02-17 04:31:18 +00005160 hg_cli_malloc_usable_size,
philipped99c26a2012-07-31 22:17:28 +00005161 HG_CLI__DEFAULT_MALLOC_REDZONE_SZB );
sewardjb4112022007-11-09 22:49:28 +00005162
sewardj849b0ed2008-12-21 10:43:10 +00005163 /* 21 Dec 08: disabled this; it mostly causes H to start more
5164 slowly and use significantly more memory, without very often
5165 providing useful results. The user can request to load this
5166 information manually with --read-var-info=yes. */
5167 if (0) VG_(needs_var_info)(); /* optional */
sewardjb4112022007-11-09 22:49:28 +00005168
5169 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
sewardj7cf4e6b2008-05-01 20:24:26 +00005170 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
5171 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
sewardjb4112022007-11-09 22:49:28 +00005172 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
sewardj1f77fec2010-04-12 19:51:04 +00005173 VG_(track_new_mem_stack) ( evh__new_mem_stack );
sewardjb4112022007-11-09 22:49:28 +00005174
5175 // FIXME: surely this isn't thread-aware
sewardj23f12002009-07-24 08:45:08 +00005176 VG_(track_copy_mem_remap) ( evh__copy_mem );
sewardjb4112022007-11-09 22:49:28 +00005177
5178 VG_(track_change_mem_mprotect) ( evh__set_perms );
5179
5180 VG_(track_die_mem_stack_signal)( evh__die_mem );
sewardjfd35d492011-03-17 19:39:55 +00005181 VG_(track_die_mem_brk) ( evh__die_mem_munmap );
5182 VG_(track_die_mem_munmap) ( evh__die_mem_munmap );
sewardjb4112022007-11-09 22:49:28 +00005183 VG_(track_die_mem_stack) ( evh__die_mem );
5184
5185 // FIXME: what is this for?
5186 VG_(track_ban_mem_stack) (NULL);
5187
5188 VG_(track_pre_mem_read) ( evh__pre_mem_read );
5189 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
5190 VG_(track_pre_mem_write) ( evh__pre_mem_write );
5191 VG_(track_post_mem_write) (NULL);
5192
5193 /////////////////
5194
5195 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
5196 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
5197
5198 VG_(track_start_client_code)( evh__start_client_code );
5199 VG_(track_stop_client_code)( evh__stop_client_code );
5200
sewardjb4112022007-11-09 22:49:28 +00005201 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
5202 as described in comments at the top of pub_tool_hashtable.h, are
5203 met. Blargh. */
5204 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
5205 tl_assert( sizeof(UWord) == sizeof(Addr) );
5206 hg_mallocmeta_table
5207 = VG_(HT_construct)( "hg_malloc_metadata_table" );
5208
sewardj61bc2c52011-02-09 10:34:00 +00005209 // add a callback to clean up on (threaded) fork.
5210 VG_(atfork)(NULL/*pre*/, NULL/*parent*/, evh__atfork_child/*child*/);
sewardjb4112022007-11-09 22:49:28 +00005211}
5212
5213VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
5214
5215/*--------------------------------------------------------------------*/
5216/*--- end hg_main.c ---*/
5217/*--------------------------------------------------------------------*/