blob: 5c109af7082a3c8e027de1cd07124f5418873237 [file] [log] [blame]
sewardjb4112022007-11-09 22:49:28 +00001
2/*--------------------------------------------------------------------*/
3/*--- Helgrind: a Valgrind tool for detecting errors ---*/
4/*--- in threaded programs. hg_main.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
sewardj03f8d3f2012-08-05 15:46:46 +000011 Copyright (C) 2007-2012 OpenWorks LLP
sewardjb4112022007-11-09 22:49:28 +000012 info@open-works.co.uk
13
sewardj03f8d3f2012-08-05 15:46:46 +000014 Copyright (C) 2007-2012 Apple, Inc.
njnf76d27a2009-05-28 01:53:07 +000015
sewardjb4112022007-11-09 22:49:28 +000016 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
20
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
25
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, write to the Free Software
28 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 02111-1307, USA.
30
31 The GNU General Public License is contained in the file COPYING.
32
33 Neither the names of the U.S. Department of Energy nor the
34 University of California nor the names of its contributors may be
35 used to endorse or promote products derived from this software
36 without prior written permission.
37*/
38
39#include "pub_tool_basics.h"
sewardjb4112022007-11-09 22:49:28 +000040#include "pub_tool_libcassert.h"
41#include "pub_tool_libcbase.h"
42#include "pub_tool_libcprint.h"
sewardjb4112022007-11-09 22:49:28 +000043#include "pub_tool_threadstate.h"
44#include "pub_tool_tooliface.h"
45#include "pub_tool_hashtable.h"
46#include "pub_tool_replacemalloc.h"
47#include "pub_tool_machine.h"
48#include "pub_tool_options.h"
49#include "pub_tool_xarray.h"
50#include "pub_tool_stacktrace.h"
sewardj896f6f92008-08-19 08:38:52 +000051#include "pub_tool_wordfm.h"
sewardja0eee322009-07-31 08:46:35 +000052#include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
53#include "pub_tool_redir.h" // sonames for the dynamic linkers
54#include "pub_tool_vki.h" // VKI_PAGE_SIZE
sewardj61bc2c52011-02-09 10:34:00 +000055#include "pub_tool_libcproc.h" // VG_(atfork)
sewardj234e5582011-02-09 12:47:23 +000056#include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client)
sewardjb4112022007-11-09 22:49:28 +000057
sewardjf98e1c02008-10-25 16:22:41 +000058#include "hg_basics.h"
59#include "hg_wordset.h"
60#include "hg_lock_n_thread.h"
61#include "hg_errors.h"
62
63#include "libhb.h"
64
sewardjb4112022007-11-09 22:49:28 +000065#include "helgrind.h"
66
sewardjf98e1c02008-10-25 16:22:41 +000067
68// FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
69
70// FIXME: when client destroys a lock or a CV, remove these
71// from our mappings, so that the associated SO can be freed up
sewardjb4112022007-11-09 22:49:28 +000072
73/*----------------------------------------------------------------*/
74/*--- ---*/
75/*----------------------------------------------------------------*/
76
sewardj11e352f2007-11-30 11:11:02 +000077/* Note this needs to be compiled with -fno-strict-aliasing, since it
78 contains a whole bunch of calls to lookupFM etc which cast between
79 Word and pointer types. gcc rightly complains this breaks ANSI C
80 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
81 worthwhile performance benefits over -O.
sewardjc17be792007-11-10 22:50:13 +000082*/
sewardjb4112022007-11-09 22:49:28 +000083
84// FIXME what is supposed to happen to locks in memory which
85// is relocated as a result of client realloc?
86
sewardjb4112022007-11-09 22:49:28 +000087// FIXME put referencing ThreadId into Thread and get
88// rid of the slow reverse mapping function.
89
90// FIXME accesses to NoAccess areas: change state to Excl?
91
92// FIXME report errors for accesses of NoAccess memory?
93
94// FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
95// the thread still holds the lock.
96
97/* ------------ Debug/trace options ------------ */
98
sewardjb4112022007-11-09 22:49:28 +000099// 0 for silent, 1 for some stuff, 2 for lots of stuff
100#define SHOW_EVENTS 0
101
sewardjb4112022007-11-09 22:49:28 +0000102
florian6bf37262012-10-21 03:23:36 +0000103static void all__sanity_check ( const HChar* who ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000104
philipped99c26a2012-07-31 22:17:28 +0000105#define HG_CLI__DEFAULT_MALLOC_REDZONE_SZB 16 /* let's say */
sewardjb4112022007-11-09 22:49:28 +0000106
107// 0 for none, 1 for dump at end of run
108#define SHOW_DATA_STRUCTURES 0
109
110
sewardjb4112022007-11-09 22:49:28 +0000111/* ------------ Misc comments ------------ */
112
113// FIXME: don't hardwire initial entries for root thread.
114// Instead, let the pre_thread_ll_create handler do this.
115
sewardjb4112022007-11-09 22:49:28 +0000116
117/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000118/*--- Primary data structures ---*/
sewardjb4112022007-11-09 22:49:28 +0000119/*----------------------------------------------------------------*/
120
sewardjb4112022007-11-09 22:49:28 +0000121/* Admin linked list of Threads */
122static Thread* admin_threads = NULL;
sewardjffce8152011-06-24 10:09:41 +0000123Thread* get_admin_threads ( void ) { return admin_threads; }
sewardjb4112022007-11-09 22:49:28 +0000124
sewardj1d7c3322011-02-28 09:22:51 +0000125/* Admin double linked list of Locks */
126/* We need a double linked list to properly and efficiently
127 handle del_LockN. */
sewardjb4112022007-11-09 22:49:28 +0000128static Lock* admin_locks = NULL;
129
sewardjb4112022007-11-09 22:49:28 +0000130/* Mapping table for core ThreadIds to Thread* */
131static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
132
sewardjb4112022007-11-09 22:49:28 +0000133/* Mapping table for lock guest addresses to Lock* */
134static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
135
sewardj0f64c9e2011-03-10 17:40:22 +0000136/* The word-set universes for lock sets. */
sewardjb4112022007-11-09 22:49:28 +0000137static WordSetU* univ_lsets = NULL; /* sets of Lock* */
138static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
sewardj866c80c2011-10-22 19:29:51 +0000139static Int next_gc_univ_laog = 1;
140/* univ_laog will be garbaged collected when the nr of element in univ_laog is
141 >= next_gc_univ_laog. */
sewardjb4112022007-11-09 22:49:28 +0000142
sewardjffce8152011-06-24 10:09:41 +0000143/* Allow libhb to get at the universe of locksets stored
144 here. Sigh. */
145WordSetU* HG_(get_univ_lsets) ( void ) { return univ_lsets; }
146
147/* Allow libhb to get at the list of locks stored here. Ditto
148 sigh. */
149Lock* HG_(get_admin_locks) ( void ) { return admin_locks; }
150
sewardjb4112022007-11-09 22:49:28 +0000151
152/*----------------------------------------------------------------*/
153/*--- Simple helpers for the data structures ---*/
154/*----------------------------------------------------------------*/
155
156static UWord stats__lockN_acquires = 0;
157static UWord stats__lockN_releases = 0;
158
sewardjf98e1c02008-10-25 16:22:41 +0000159static
160ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
sewardjb4112022007-11-09 22:49:28 +0000161
162/* --------- Constructors --------- */
163
sewardjf98e1c02008-10-25 16:22:41 +0000164static Thread* mk_Thread ( Thr* hbthr ) {
sewardjb4112022007-11-09 22:49:28 +0000165 static Int indx = 1;
sewardjf98e1c02008-10-25 16:22:41 +0000166 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
sewardjb4112022007-11-09 22:49:28 +0000167 thread->locksetA = HG_(emptyWS)( univ_lsets );
168 thread->locksetW = HG_(emptyWS)( univ_lsets );
sewardjb4112022007-11-09 22:49:28 +0000169 thread->magic = Thread_MAGIC;
sewardjf98e1c02008-10-25 16:22:41 +0000170 thread->hbthr = hbthr;
171 thread->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +0000172 thread->created_at = NULL;
173 thread->announced = False;
174 thread->errmsg_index = indx++;
175 thread->admin = admin_threads;
176 admin_threads = thread;
177 return thread;
178}
sewardjf98e1c02008-10-25 16:22:41 +0000179
sewardjb4112022007-11-09 22:49:28 +0000180// Make a new lock which is unlocked (hence ownerless)
sewardj1d7c3322011-02-28 09:22:51 +0000181// and insert the new lock in admin_locks double linked list.
sewardjb4112022007-11-09 22:49:28 +0000182static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
183 static ULong unique = 0;
sewardjf98e1c02008-10-25 16:22:41 +0000184 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
sewardj0f64c9e2011-03-10 17:40:22 +0000185 /* begin: add to double linked list */
sewardj1d7c3322011-02-28 09:22:51 +0000186 if (admin_locks)
187 admin_locks->admin_prev = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000188 lock->admin_next = admin_locks;
189 lock->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000190 admin_locks = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000191 /* end: add */
sewardjb4112022007-11-09 22:49:28 +0000192 lock->unique = unique++;
193 lock->magic = LockN_MAGIC;
194 lock->appeared_at = NULL;
195 lock->acquired_at = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000196 lock->hbso = libhb_so_alloc();
sewardjb4112022007-11-09 22:49:28 +0000197 lock->guestaddr = guestaddr;
198 lock->kind = kind;
199 lock->heldW = False;
200 lock->heldBy = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000201 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +0000202 return lock;
203}
sewardjb4112022007-11-09 22:49:28 +0000204
205/* Release storage for a Lock. Also release storage in .heldBy, if
sewardj1d7c3322011-02-28 09:22:51 +0000206 any. Removes from admin_locks double linked list. */
sewardjb4112022007-11-09 22:49:28 +0000207static void del_LockN ( Lock* lk )
208{
sewardjf98e1c02008-10-25 16:22:41 +0000209 tl_assert(HG_(is_sane_LockN)(lk));
210 tl_assert(lk->hbso);
211 libhb_so_dealloc(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +0000212 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000213 VG_(deleteBag)( lk->heldBy );
sewardj0f64c9e2011-03-10 17:40:22 +0000214 /* begin: del lock from double linked list */
215 if (lk == admin_locks) {
216 tl_assert(lk->admin_prev == NULL);
217 if (lk->admin_next)
218 lk->admin_next->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000219 admin_locks = lk->admin_next;
sewardj1d7c3322011-02-28 09:22:51 +0000220 }
221 else {
sewardj0f64c9e2011-03-10 17:40:22 +0000222 tl_assert(lk->admin_prev != NULL);
sewardj1d7c3322011-02-28 09:22:51 +0000223 lk->admin_prev->admin_next = lk->admin_next;
sewardj0f64c9e2011-03-10 17:40:22 +0000224 if (lk->admin_next)
225 lk->admin_next->admin_prev = lk->admin_prev;
sewardj1d7c3322011-02-28 09:22:51 +0000226 }
sewardj0f64c9e2011-03-10 17:40:22 +0000227 /* end: del */
sewardjb4112022007-11-09 22:49:28 +0000228 VG_(memset)(lk, 0xAA, sizeof(*lk));
sewardjf98e1c02008-10-25 16:22:41 +0000229 HG_(free)(lk);
sewardjb4112022007-11-09 22:49:28 +0000230}
231
232/* Update 'lk' to reflect that 'thr' now has a write-acquisition of
233 it. This is done strictly: only combinations resulting from
234 correct program and libpthread behaviour are allowed. */
235static void lockN_acquire_writer ( Lock* lk, Thread* thr )
236{
sewardjf98e1c02008-10-25 16:22:41 +0000237 tl_assert(HG_(is_sane_LockN)(lk));
238 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000239
240 stats__lockN_acquires++;
241
242 /* EXPOSITION only */
243 /* We need to keep recording snapshots of where the lock was
244 acquired, so as to produce better lock-order error messages. */
245 if (lk->acquired_at == NULL) {
246 ThreadId tid;
247 tl_assert(lk->heldBy == NULL);
248 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
249 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000250 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000251 } else {
252 tl_assert(lk->heldBy != NULL);
253 }
254 /* end EXPOSITION only */
255
256 switch (lk->kind) {
257 case LK_nonRec:
258 case_LK_nonRec:
259 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
260 tl_assert(!lk->heldW);
261 lk->heldW = True;
sewardjf98e1c02008-10-25 16:22:41 +0000262 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
florian6bf37262012-10-21 03:23:36 +0000263 VG_(addToBag)( lk->heldBy, (UWord)thr );
sewardjb4112022007-11-09 22:49:28 +0000264 break;
265 case LK_mbRec:
266 if (lk->heldBy == NULL)
267 goto case_LK_nonRec;
268 /* 2nd and subsequent locking of a lock by its owner */
269 tl_assert(lk->heldW);
270 /* assert: lk is only held by one thread .. */
sewardj896f6f92008-08-19 08:38:52 +0000271 tl_assert(VG_(sizeUniqueBag(lk->heldBy)) == 1);
sewardjb4112022007-11-09 22:49:28 +0000272 /* assert: .. and that thread is 'thr'. */
florian6bf37262012-10-21 03:23:36 +0000273 tl_assert(VG_(elemBag)(lk->heldBy, (UWord)thr)
sewardj896f6f92008-08-19 08:38:52 +0000274 == VG_(sizeTotalBag)(lk->heldBy));
florian6bf37262012-10-21 03:23:36 +0000275 VG_(addToBag)(lk->heldBy, (UWord)thr);
sewardjb4112022007-11-09 22:49:28 +0000276 break;
277 case LK_rdwr:
278 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
279 goto case_LK_nonRec;
280 default:
281 tl_assert(0);
282 }
sewardjf98e1c02008-10-25 16:22:41 +0000283 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000284}
285
286static void lockN_acquire_reader ( Lock* lk, Thread* thr )
287{
sewardjf98e1c02008-10-25 16:22:41 +0000288 tl_assert(HG_(is_sane_LockN)(lk));
289 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000290 /* can only add reader to a reader-writer lock. */
291 tl_assert(lk->kind == LK_rdwr);
292 /* lk must be free or already r-held. */
293 tl_assert(lk->heldBy == NULL
294 || (lk->heldBy != NULL && !lk->heldW));
295
296 stats__lockN_acquires++;
297
298 /* EXPOSITION only */
299 /* We need to keep recording snapshots of where the lock was
300 acquired, so as to produce better lock-order error messages. */
301 if (lk->acquired_at == NULL) {
302 ThreadId tid;
303 tl_assert(lk->heldBy == NULL);
304 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
305 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000306 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000307 } else {
308 tl_assert(lk->heldBy != NULL);
309 }
310 /* end EXPOSITION only */
311
312 if (lk->heldBy) {
florian6bf37262012-10-21 03:23:36 +0000313 VG_(addToBag)(lk->heldBy, (UWord)thr);
sewardjb4112022007-11-09 22:49:28 +0000314 } else {
315 lk->heldW = False;
sewardjf98e1c02008-10-25 16:22:41 +0000316 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
florian6bf37262012-10-21 03:23:36 +0000317 VG_(addToBag)( lk->heldBy, (UWord)thr );
sewardjb4112022007-11-09 22:49:28 +0000318 }
319 tl_assert(!lk->heldW);
sewardjf98e1c02008-10-25 16:22:41 +0000320 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000321}
322
323/* Update 'lk' to reflect a release of it by 'thr'. This is done
324 strictly: only combinations resulting from correct program and
325 libpthread behaviour are allowed. */
326
327static void lockN_release ( Lock* lk, Thread* thr )
328{
329 Bool b;
sewardjf98e1c02008-10-25 16:22:41 +0000330 tl_assert(HG_(is_sane_LockN)(lk));
331 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000332 /* lock must be held by someone */
333 tl_assert(lk->heldBy);
334 stats__lockN_releases++;
335 /* Remove it from the holder set */
florian6bf37262012-10-21 03:23:36 +0000336 b = VG_(delFromBag)(lk->heldBy, (UWord)thr);
sewardjb4112022007-11-09 22:49:28 +0000337 /* thr must actually have been a holder of lk */
338 tl_assert(b);
339 /* normalise */
340 tl_assert(lk->acquired_at);
sewardj896f6f92008-08-19 08:38:52 +0000341 if (VG_(isEmptyBag)(lk->heldBy)) {
342 VG_(deleteBag)(lk->heldBy);
sewardjb4112022007-11-09 22:49:28 +0000343 lk->heldBy = NULL;
344 lk->heldW = False;
345 lk->acquired_at = NULL;
346 }
sewardjf98e1c02008-10-25 16:22:41 +0000347 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000348}
349
350static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
351{
352 Thread* thr;
353 if (!lk->heldBy) {
354 tl_assert(!lk->heldW);
355 return;
356 }
357 /* for each thread that holds this lock do ... */
sewardj896f6f92008-08-19 08:38:52 +0000358 VG_(initIterBag)( lk->heldBy );
florian6bf37262012-10-21 03:23:36 +0000359 while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, NULL )) {
sewardjf98e1c02008-10-25 16:22:41 +0000360 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000361 tl_assert(HG_(elemWS)( univ_lsets,
florian6bf37262012-10-21 03:23:36 +0000362 thr->locksetA, (UWord)lk ));
sewardjb4112022007-11-09 22:49:28 +0000363 thr->locksetA
florian6bf37262012-10-21 03:23:36 +0000364 = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +0000365
366 if (lk->heldW) {
367 tl_assert(HG_(elemWS)( univ_lsets,
florian6bf37262012-10-21 03:23:36 +0000368 thr->locksetW, (UWord)lk ));
sewardjb4112022007-11-09 22:49:28 +0000369 thr->locksetW
florian6bf37262012-10-21 03:23:36 +0000370 = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +0000371 }
372 }
sewardj896f6f92008-08-19 08:38:52 +0000373 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000374}
375
sewardjb4112022007-11-09 22:49:28 +0000376
377/*----------------------------------------------------------------*/
378/*--- Print out the primary data structures ---*/
379/*----------------------------------------------------------------*/
380
sewardjb4112022007-11-09 22:49:28 +0000381#define PP_THREADS (1<<1)
382#define PP_LOCKS (1<<2)
sewardjf98e1c02008-10-25 16:22:41 +0000383#define PP_ALL (PP_THREADS | PP_LOCKS)
sewardjb4112022007-11-09 22:49:28 +0000384
385
386static const Int sHOW_ADMIN = 0;
387
388static void space ( Int n )
389{
390 Int i;
florian6bf37262012-10-21 03:23:36 +0000391 HChar spaces[128+1];
sewardjb4112022007-11-09 22:49:28 +0000392 tl_assert(n >= 0 && n < 128);
393 if (n == 0)
394 return;
395 for (i = 0; i < n; i++)
396 spaces[i] = ' ';
397 spaces[i] = 0;
398 tl_assert(i < 128+1);
399 VG_(printf)("%s", spaces);
400}
401
402static void pp_Thread ( Int d, Thread* t )
403{
404 space(d+0); VG_(printf)("Thread %p {\n", t);
405 if (sHOW_ADMIN) {
406 space(d+3); VG_(printf)("admin %p\n", t->admin);
407 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
408 }
409 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
410 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
sewardjb4112022007-11-09 22:49:28 +0000411 space(d+0); VG_(printf)("}\n");
412}
413
414static void pp_admin_threads ( Int d )
415{
416 Int i, n;
417 Thread* t;
418 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
419 /* nothing */
420 }
421 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
422 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
423 if (0) {
424 space(n);
425 VG_(printf)("admin_threads record %d of %d:\n", i, n);
426 }
427 pp_Thread(d+3, t);
428 }
barta0b6b2c2008-07-07 06:49:24 +0000429 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000430}
431
432static void pp_map_threads ( Int d )
433{
njn4c245e52009-03-15 23:25:38 +0000434 Int i, n = 0;
sewardjb4112022007-11-09 22:49:28 +0000435 space(d); VG_(printf)("map_threads ");
sewardjb4112022007-11-09 22:49:28 +0000436 for (i = 0; i < VG_N_THREADS; i++) {
437 if (map_threads[i] != NULL)
438 n++;
439 }
440 VG_(printf)("(%d entries) {\n", n);
441 for (i = 0; i < VG_N_THREADS; i++) {
442 if (map_threads[i] == NULL)
443 continue;
444 space(d+3);
445 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
446 }
447 space(d); VG_(printf)("}\n");
448}
449
450static const HChar* show_LockKind ( LockKind lkk ) {
451 switch (lkk) {
452 case LK_mbRec: return "mbRec";
453 case LK_nonRec: return "nonRec";
454 case LK_rdwr: return "rdwr";
455 default: tl_assert(0);
456 }
457}
458
459static void pp_Lock ( Int d, Lock* lk )
460{
barta0b6b2c2008-07-07 06:49:24 +0000461 space(d+0); VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
sewardjb4112022007-11-09 22:49:28 +0000462 if (sHOW_ADMIN) {
sewardj1d7c3322011-02-28 09:22:51 +0000463 space(d+3); VG_(printf)("admin_n %p\n", lk->admin_next);
464 space(d+3); VG_(printf)("admin_p %p\n", lk->admin_prev);
465 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
sewardjb4112022007-11-09 22:49:28 +0000466 }
467 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
468 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
469 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
470 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
471 if (lk->heldBy) {
472 Thread* thr;
florian6bf37262012-10-21 03:23:36 +0000473 UWord count;
sewardjb4112022007-11-09 22:49:28 +0000474 VG_(printf)(" { ");
sewardj896f6f92008-08-19 08:38:52 +0000475 VG_(initIterBag)( lk->heldBy );
florian6bf37262012-10-21 03:23:36 +0000476 while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, &count ))
sewardjb4112022007-11-09 22:49:28 +0000477 VG_(printf)("%lu:%p ", count, thr);
sewardj896f6f92008-08-19 08:38:52 +0000478 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000479 VG_(printf)("}");
480 }
481 VG_(printf)("\n");
482 space(d+0); VG_(printf)("}\n");
483}
484
485static void pp_admin_locks ( Int d )
486{
487 Int i, n;
488 Lock* lk;
sewardj1d7c3322011-02-28 09:22:51 +0000489 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000490 /* nothing */
491 }
492 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
sewardj1d7c3322011-02-28 09:22:51 +0000493 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000494 if (0) {
495 space(n);
496 VG_(printf)("admin_locks record %d of %d:\n", i, n);
497 }
498 pp_Lock(d+3, lk);
499 }
barta0b6b2c2008-07-07 06:49:24 +0000500 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000501}
502
503static void pp_map_locks ( Int d )
504{
505 void* gla;
506 Lock* lk;
507 space(d); VG_(printf)("map_locks (%d entries) {\n",
sewardj896f6f92008-08-19 08:38:52 +0000508 (Int)VG_(sizeFM)( map_locks ));
509 VG_(initIterFM)( map_locks );
florian6bf37262012-10-21 03:23:36 +0000510 while (VG_(nextIterFM)( map_locks, (UWord*)&gla,
511 (UWord*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000512 space(d+3);
513 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
514 }
sewardj896f6f92008-08-19 08:38:52 +0000515 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000516 space(d); VG_(printf)("}\n");
517}
518
florian6bf37262012-10-21 03:23:36 +0000519static void pp_everything ( Int flags, const HChar* caller )
sewardjb4112022007-11-09 22:49:28 +0000520{
521 Int d = 0;
522 VG_(printf)("\n");
523 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
524 if (flags & PP_THREADS) {
525 VG_(printf)("\n");
526 pp_admin_threads(d+3);
527 VG_(printf)("\n");
528 pp_map_threads(d+3);
529 }
530 if (flags & PP_LOCKS) {
531 VG_(printf)("\n");
532 pp_admin_locks(d+3);
533 VG_(printf)("\n");
534 pp_map_locks(d+3);
535 }
sewardjb4112022007-11-09 22:49:28 +0000536
537 VG_(printf)("\n");
538 VG_(printf)("}\n");
539 VG_(printf)("\n");
540}
541
542#undef SHOW_ADMIN
543
544
545/*----------------------------------------------------------------*/
546/*--- Initialise the primary data structures ---*/
547/*----------------------------------------------------------------*/
548
sewardjf98e1c02008-10-25 16:22:41 +0000549static void initialise_data_structures ( Thr* hbthr_root )
sewardjb4112022007-11-09 22:49:28 +0000550{
sewardjb4112022007-11-09 22:49:28 +0000551 Thread* thr;
sewardjffce8152011-06-24 10:09:41 +0000552 WordSetID wsid;
sewardjb4112022007-11-09 22:49:28 +0000553
554 /* Get everything initialised and zeroed. */
555 tl_assert(admin_threads == NULL);
556 tl_assert(admin_locks == NULL);
sewardjb4112022007-11-09 22:49:28 +0000557
sewardjb4112022007-11-09 22:49:28 +0000558 tl_assert(map_threads == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000559 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
sewardjb4112022007-11-09 22:49:28 +0000560 tl_assert(map_threads != NULL);
561
florian6bf37262012-10-21 03:23:36 +0000562 tl_assert(sizeof(Addr) == sizeof(UWord));
sewardjb4112022007-11-09 22:49:28 +0000563 tl_assert(map_locks == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000564 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
565 NULL/*unboxed Word cmp*/);
sewardjb4112022007-11-09 22:49:28 +0000566 tl_assert(map_locks != NULL);
567
sewardjb4112022007-11-09 22:49:28 +0000568 tl_assert(univ_lsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000569 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
570 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000571 tl_assert(univ_lsets != NULL);
sewardjffce8152011-06-24 10:09:41 +0000572 /* Ensure that univ_lsets is non-empty, with lockset zero being the
573 empty lockset. hg_errors.c relies on the assumption that
574 lockset number zero in univ_lsets is always valid. */
575 wsid = HG_(emptyWS)(univ_lsets);
576 tl_assert(wsid == 0);
sewardjb4112022007-11-09 22:49:28 +0000577
578 tl_assert(univ_laog == NULL);
sewardjc1fb9d22011-02-28 09:03:44 +0000579 if (HG_(clo_track_lockorders)) {
580 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
581 HG_(free), 24/*cacheSize*/ );
582 tl_assert(univ_laog != NULL);
583 }
sewardjb4112022007-11-09 22:49:28 +0000584
585 /* Set up entries for the root thread */
586 // FIXME: this assumes that the first real ThreadId is 1
587
sewardjb4112022007-11-09 22:49:28 +0000588 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +0000589 thr = mk_Thread(hbthr_root);
590 thr->coretid = 1; /* FIXME: hardwires an assumption about the
591 identity of the root thread. */
sewardj60626642011-03-10 15:14:37 +0000592 tl_assert( libhb_get_Thr_hgthread(hbthr_root) == NULL );
593 libhb_set_Thr_hgthread(hbthr_root, thr);
sewardjb4112022007-11-09 22:49:28 +0000594
sewardjf98e1c02008-10-25 16:22:41 +0000595 /* and bind it in the thread-map table. */
596 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
597 tl_assert(thr->coretid != VG_INVALID_THREADID);
sewardjb4112022007-11-09 22:49:28 +0000598
sewardjf98e1c02008-10-25 16:22:41 +0000599 map_threads[thr->coretid] = thr;
sewardjb4112022007-11-09 22:49:28 +0000600
601 tl_assert(VG_INVALID_THREADID == 0);
602
sewardjb4112022007-11-09 22:49:28 +0000603 all__sanity_check("initialise_data_structures");
604}
605
606
607/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000608/*--- map_threads :: array[core-ThreadId] of Thread* ---*/
sewardjb4112022007-11-09 22:49:28 +0000609/*----------------------------------------------------------------*/
610
611/* Doesn't assert if the relevant map_threads entry is NULL. */
612static Thread* map_threads_maybe_lookup ( ThreadId coretid )
613{
614 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000615 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000616 thr = map_threads[coretid];
617 return thr;
618}
619
620/* Asserts if the relevant map_threads entry is NULL. */
621static inline Thread* map_threads_lookup ( ThreadId coretid )
622{
623 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000624 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000625 thr = map_threads[coretid];
626 tl_assert(thr);
627 return thr;
628}
629
sewardjf98e1c02008-10-25 16:22:41 +0000630/* Do a reverse lookup. Does not assert if 'thr' is not found in
631 map_threads. */
sewardjb4112022007-11-09 22:49:28 +0000632static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
633{
sewardjf98e1c02008-10-25 16:22:41 +0000634 ThreadId tid;
635 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000636 /* Check nobody used the invalid-threadid slot */
637 tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
638 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000639 tid = thr->coretid;
640 tl_assert(HG_(is_sane_ThreadId)(tid));
641 return tid;
sewardjb4112022007-11-09 22:49:28 +0000642}
643
644/* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
645 is not found in map_threads. */
646static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
647{
648 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
649 tl_assert(tid != VG_INVALID_THREADID);
sewardjf98e1c02008-10-25 16:22:41 +0000650 tl_assert(map_threads[tid]);
651 tl_assert(map_threads[tid]->coretid == tid);
sewardjb4112022007-11-09 22:49:28 +0000652 return tid;
653}
654
655static void map_threads_delete ( ThreadId coretid )
656{
657 Thread* thr;
658 tl_assert(coretid != 0);
sewardjf98e1c02008-10-25 16:22:41 +0000659 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000660 thr = map_threads[coretid];
661 tl_assert(thr);
662 map_threads[coretid] = NULL;
663}
664
665
666/*----------------------------------------------------------------*/
667/*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
668/*----------------------------------------------------------------*/
669
670/* Make sure there is a lock table entry for the given (lock) guest
671 address. If not, create one of the stated 'kind' in unheld state.
672 In any case, return the address of the existing or new Lock. */
673static
674Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
675{
676 Bool found;
677 Lock* oldlock = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000678 tl_assert(HG_(is_sane_ThreadId)(tid));
sewardj896f6f92008-08-19 08:38:52 +0000679 found = VG_(lookupFM)( map_locks,
florian6bf37262012-10-21 03:23:36 +0000680 NULL, (UWord*)&oldlock, (UWord)ga );
sewardjb4112022007-11-09 22:49:28 +0000681 if (!found) {
682 Lock* lock = mk_LockN(lkk, ga);
683 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
sewardjf98e1c02008-10-25 16:22:41 +0000684 tl_assert(HG_(is_sane_LockN)(lock));
florian6bf37262012-10-21 03:23:36 +0000685 VG_(addToFM)( map_locks, (UWord)ga, (UWord)lock );
sewardjb4112022007-11-09 22:49:28 +0000686 tl_assert(oldlock == NULL);
sewardjb4112022007-11-09 22:49:28 +0000687 return lock;
688 } else {
689 tl_assert(oldlock != NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000690 tl_assert(HG_(is_sane_LockN)(oldlock));
sewardjb4112022007-11-09 22:49:28 +0000691 tl_assert(oldlock->guestaddr == ga);
sewardjb4112022007-11-09 22:49:28 +0000692 return oldlock;
693 }
694}
695
696static Lock* map_locks_maybe_lookup ( Addr ga )
697{
698 Bool found;
699 Lock* lk = NULL;
florian6bf37262012-10-21 03:23:36 +0000700 found = VG_(lookupFM)( map_locks, NULL, (UWord*)&lk, (UWord)ga );
sewardjb4112022007-11-09 22:49:28 +0000701 tl_assert(found ? lk != NULL : lk == NULL);
sewardjb4112022007-11-09 22:49:28 +0000702 return lk;
703}
704
705static void map_locks_delete ( Addr ga )
706{
707 Addr ga2 = 0;
708 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000709 VG_(delFromFM)( map_locks,
florian6bf37262012-10-21 03:23:36 +0000710 (UWord*)&ga2, (UWord*)&lk, (UWord)ga );
sewardjb4112022007-11-09 22:49:28 +0000711 /* delFromFM produces the val which is being deleted, if it is
712 found. So assert it is non-null; that in effect asserts that we
713 are deleting a (ga, Lock) pair which actually exists. */
714 tl_assert(lk != NULL);
715 tl_assert(ga2 == ga);
716}
717
718
sewardjb4112022007-11-09 22:49:28 +0000719
720/*----------------------------------------------------------------*/
721/*--- Sanity checking the data structures ---*/
722/*----------------------------------------------------------------*/
723
724static UWord stats__sanity_checks = 0;
725
florian6bf37262012-10-21 03:23:36 +0000726static void laog__sanity_check ( const HChar* who ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000727
728/* REQUIRED INVARIANTS:
729
730 Thread vs Segment/Lock/SecMaps
731
732 for each t in Threads {
733
734 // Thread.lockset: each element is really a valid Lock
735
736 // Thread.lockset: each Lock in set is actually held by that thread
737 for lk in Thread.lockset
738 lk == LockedBy(t)
739
740 // Thread.csegid is a valid SegmentID
741 // and the associated Segment has .thr == t
742
743 }
744
745 all thread Locksets are pairwise empty under intersection
746 (that is, no lock is claimed to be held by more than one thread)
747 -- this is guaranteed if all locks in locksets point back to their
748 owner threads
749
750 Lock vs Thread/Segment/SecMaps
751
752 for each entry (gla, la) in map_locks
753 gla == la->guest_addr
754
755 for each lk in Locks {
756
757 lk->tag is valid
758 lk->guest_addr does not have shadow state NoAccess
759 if lk == LockedBy(t), then t->lockset contains lk
760 if lk == UnlockedBy(segid) then segid is valid SegmentID
761 and can be mapped to a valid Segment(seg)
762 and seg->thr->lockset does not contain lk
763 if lk == UnlockedNew then (no lockset contains lk)
764
765 secmaps for lk has .mbHasLocks == True
766
767 }
768
769 Segment vs Thread/Lock/SecMaps
770
771 the Segment graph is a dag (no cycles)
772 all of the Segment graph must be reachable from the segids
773 mentioned in the Threads
774
775 for seg in Segments {
776
777 seg->thr is a sane Thread
778
779 }
780
781 SecMaps vs Segment/Thread/Lock
782
783 for sm in SecMaps {
784
785 sm properly aligned
786 if any shadow word is ShR or ShM then .mbHasShared == True
787
788 for each Excl(segid) state
789 map_segments_lookup maps to a sane Segment(seg)
790 for each ShM/ShR(tsetid,lsetid) state
791 each lk in lset is a valid Lock
792 each thr in tset is a valid thread, which is non-dead
793
794 }
795*/
796
797
798/* Return True iff 'thr' holds 'lk' in some mode. */
799static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
800{
801 if (lk->heldBy)
florian6bf37262012-10-21 03:23:36 +0000802 return VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0;
sewardjb4112022007-11-09 22:49:28 +0000803 else
804 return False;
805}
806
807/* Sanity check Threads, as far as possible */
808__attribute__((noinline))
florian6bf37262012-10-21 03:23:36 +0000809static void threads__sanity_check ( const HChar* who )
sewardjb4112022007-11-09 22:49:28 +0000810{
811#define BAD(_str) do { how = (_str); goto bad; } while (0)
florian6bf37262012-10-21 03:23:36 +0000812 const HChar* how = "no error";
sewardjb4112022007-11-09 22:49:28 +0000813 Thread* thr;
814 WordSetID wsA, wsW;
sewardj250ec2e2008-02-15 22:02:30 +0000815 UWord* ls_words;
florian6bf37262012-10-21 03:23:36 +0000816 UWord ls_size, i;
sewardjb4112022007-11-09 22:49:28 +0000817 Lock* lk;
sewardjb4112022007-11-09 22:49:28 +0000818 for (thr = admin_threads; thr; thr = thr->admin) {
sewardjf98e1c02008-10-25 16:22:41 +0000819 if (!HG_(is_sane_Thread)(thr)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000820 wsA = thr->locksetA;
821 wsW = thr->locksetW;
822 // locks held in W mode are a subset of all locks held
823 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
824 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
825 for (i = 0; i < ls_size; i++) {
826 lk = (Lock*)ls_words[i];
827 // Thread.lockset: each element is really a valid Lock
sewardjf98e1c02008-10-25 16:22:41 +0000828 if (!HG_(is_sane_LockN)(lk)) BAD("2");
sewardjb4112022007-11-09 22:49:28 +0000829 // Thread.lockset: each Lock in set is actually held by that
830 // thread
831 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000832 }
833 }
834 return;
835 bad:
836 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
837 tl_assert(0);
838#undef BAD
839}
840
841
842/* Sanity check Locks, as far as possible */
843__attribute__((noinline))
florian6bf37262012-10-21 03:23:36 +0000844static void locks__sanity_check ( const HChar* who )
sewardjb4112022007-11-09 22:49:28 +0000845{
846#define BAD(_str) do { how = (_str); goto bad; } while (0)
florian6bf37262012-10-21 03:23:36 +0000847 const HChar* how = "no error";
sewardjb4112022007-11-09 22:49:28 +0000848 Addr gla;
849 Lock* lk;
850 Int i;
851 // # entries in admin_locks == # entries in map_locks
sewardj1d7c3322011-02-28 09:22:51 +0000852 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next)
sewardjb4112022007-11-09 22:49:28 +0000853 ;
sewardj896f6f92008-08-19 08:38:52 +0000854 if (i != VG_(sizeFM)(map_locks)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000855 // for each entry (gla, lk) in map_locks
856 // gla == lk->guest_addr
sewardj896f6f92008-08-19 08:38:52 +0000857 VG_(initIterFM)( map_locks );
858 while (VG_(nextIterFM)( map_locks,
florian6bf37262012-10-21 03:23:36 +0000859 (UWord*)&gla, (UWord*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000860 if (lk->guestaddr != gla) BAD("2");
861 }
sewardj896f6f92008-08-19 08:38:52 +0000862 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000863 // scan through admin_locks ...
sewardj1d7c3322011-02-28 09:22:51 +0000864 for (lk = admin_locks; lk; lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000865 // lock is sane. Quite comprehensive, also checks that
866 // referenced (holder) threads are sane.
sewardjf98e1c02008-10-25 16:22:41 +0000867 if (!HG_(is_sane_LockN)(lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000868 // map_locks binds guest address back to this lock
869 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
sewardjb4112022007-11-09 22:49:28 +0000870 // look at all threads mentioned as holders of this lock. Ensure
871 // this lock is mentioned in their locksets.
872 if (lk->heldBy) {
873 Thread* thr;
florian6bf37262012-10-21 03:23:36 +0000874 UWord count;
sewardj896f6f92008-08-19 08:38:52 +0000875 VG_(initIterBag)( lk->heldBy );
876 while (VG_(nextIterBag)( lk->heldBy,
florian6bf37262012-10-21 03:23:36 +0000877 (UWord*)&thr, &count )) {
sewardjf98e1c02008-10-25 16:22:41 +0000878 // HG_(is_sane_LockN) above ensures these
sewardjb4112022007-11-09 22:49:28 +0000879 tl_assert(count >= 1);
sewardjf98e1c02008-10-25 16:22:41 +0000880 tl_assert(HG_(is_sane_Thread)(thr));
florian6bf37262012-10-21 03:23:36 +0000881 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (UWord)lk))
sewardjb4112022007-11-09 22:49:28 +0000882 BAD("6");
883 // also check the w-only lockset
884 if (lk->heldW
florian6bf37262012-10-21 03:23:36 +0000885 && !HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
sewardjb4112022007-11-09 22:49:28 +0000886 BAD("7");
887 if ((!lk->heldW)
florian6bf37262012-10-21 03:23:36 +0000888 && HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
sewardjb4112022007-11-09 22:49:28 +0000889 BAD("8");
890 }
sewardj896f6f92008-08-19 08:38:52 +0000891 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000892 } else {
893 /* lock not held by anybody */
894 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
895 // since lk is unheld, then (no lockset contains lk)
896 // hmm, this is really too expensive to check. Hmm.
897 }
sewardjb4112022007-11-09 22:49:28 +0000898 }
899
900 return;
901 bad:
902 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
903 tl_assert(0);
904#undef BAD
905}
906
907
florian6bf37262012-10-21 03:23:36 +0000908static void all_except_Locks__sanity_check ( const HChar* who ) {
sewardjb4112022007-11-09 22:49:28 +0000909 stats__sanity_checks++;
910 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
911 threads__sanity_check(who);
sewardjc1fb9d22011-02-28 09:03:44 +0000912 if (HG_(clo_track_lockorders))
913 laog__sanity_check(who);
sewardjb4112022007-11-09 22:49:28 +0000914}
florian6bf37262012-10-21 03:23:36 +0000915static void all__sanity_check ( const HChar* who ) {
sewardjb4112022007-11-09 22:49:28 +0000916 all_except_Locks__sanity_check(who);
917 locks__sanity_check(who);
918}
919
920
921/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +0000922/*--- Shadow value and address range handlers ---*/
923/*----------------------------------------------------------------*/
924
925static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +0000926//static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000927static inline Thread* get_current_Thread ( void ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +0000928__attribute__((noinline))
929static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000930
sewardjb4112022007-11-09 22:49:28 +0000931
932/* Block-copy states (needed for implementing realloc()). */
sewardj23f12002009-07-24 08:45:08 +0000933/* FIXME this copies shadow memory; it doesn't apply the MSM to it.
934 Is that a problem? (hence 'scopy' rather than 'ccopy') */
935static void shadow_mem_scopy_range ( Thread* thr,
936 Addr src, Addr dst, SizeT len )
sewardjf98e1c02008-10-25 16:22:41 +0000937{
938 Thr* hbthr = thr->hbthr;
939 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +0000940 libhb_copy_shadow_state( hbthr, src, dst, len );
sewardjb4112022007-11-09 22:49:28 +0000941}
942
sewardj23f12002009-07-24 08:45:08 +0000943static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
944{
sewardjf98e1c02008-10-25 16:22:41 +0000945 Thr* hbthr = thr->hbthr;
946 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +0000947 LIBHB_CREAD_N(hbthr, a, len);
948}
949
950static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
951 Thr* hbthr = thr->hbthr;
952 tl_assert(hbthr);
953 LIBHB_CWRITE_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +0000954}
955
956static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
957{
sewardj23f12002009-07-24 08:45:08 +0000958 libhb_srange_new( thr->hbthr, a, len );
sewardjb4112022007-11-09 22:49:28 +0000959}
960
sewardjfd35d492011-03-17 19:39:55 +0000961static void shadow_mem_make_NoAccess_NoFX ( Thread* thr, Addr aIN, SizeT len )
sewardjb4112022007-11-09 22:49:28 +0000962{
sewardjb4112022007-11-09 22:49:28 +0000963 if (0 && len > 500)
sewardjfd35d492011-03-17 19:39:55 +0000964 VG_(printf)("make NoAccess_NoFX ( %#lx, %ld )\n", aIN, len );
965 // has no effect (NoFX)
966 libhb_srange_noaccess_NoFX( thr->hbthr, aIN, len );
967}
968
969static void shadow_mem_make_NoAccess_AHAE ( Thread* thr, Addr aIN, SizeT len )
970{
971 if (0 && len > 500)
972 VG_(printf)("make NoAccess_AHAE ( %#lx, %ld )\n", aIN, len );
973 // Actually Has An Effect (AHAE)
974 libhb_srange_noaccess_AHAE( thr->hbthr, aIN, len );
sewardjb4112022007-11-09 22:49:28 +0000975}
976
sewardj406bac82010-03-03 23:03:40 +0000977static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN, SizeT len )
978{
979 if (0 && len > 500)
980 VG_(printf)("make Untracked ( %#lx, %ld )\n", aIN, len );
981 libhb_srange_untrack( thr->hbthr, aIN, len );
982}
983
sewardjb4112022007-11-09 22:49:28 +0000984
985/*----------------------------------------------------------------*/
986/*--- Event handlers (evh__* functions) ---*/
987/*--- plus helpers (evhH__* functions) ---*/
988/*----------------------------------------------------------------*/
989
990/*--------- Event handler helpers (evhH__* functions) ---------*/
991
992/* Create a new segment for 'thr', making it depend (.prev) on its
993 existing segment, bind together the SegmentID and Segment, and
994 return both of them. Also update 'thr' so it references the new
995 Segment. */
sewardjf98e1c02008-10-25 16:22:41 +0000996//zz static
997//zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
998//zz /*OUT*/Segment** new_segP,
999//zz Thread* thr )
1000//zz {
1001//zz Segment* cur_seg;
1002//zz tl_assert(new_segP);
1003//zz tl_assert(new_segidP);
1004//zz tl_assert(HG_(is_sane_Thread)(thr));
1005//zz cur_seg = map_segments_lookup( thr->csegid );
1006//zz tl_assert(cur_seg);
1007//zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1008//zz at their owner thread. */
1009//zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1010//zz *new_segidP = alloc_SegmentID();
1011//zz map_segments_add( *new_segidP, *new_segP );
1012//zz thr->csegid = *new_segidP;
1013//zz }
sewardjb4112022007-11-09 22:49:28 +00001014
1015
1016/* The lock at 'lock_ga' has acquired a writer. Make all necessary
1017 updates, and also do all possible error checks. */
1018static
1019void evhH__post_thread_w_acquires_lock ( Thread* thr,
1020 LockKind lkk, Addr lock_ga )
1021{
1022 Lock* lk;
1023
1024 /* Basically what we need to do is call lockN_acquire_writer.
1025 However, that will barf if any 'invalid' lock states would
1026 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001027 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001028 routine.
1029
1030 Because this routine is only called after successful lock
1031 acquisition, we should not be asked to move the lock into any
1032 invalid states. Requests to do so are bugs in libpthread, since
1033 that should have rejected any such requests. */
1034
sewardjf98e1c02008-10-25 16:22:41 +00001035 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001036 /* Try to find the lock. If we can't, then create a new one with
1037 kind 'lkk'. */
1038 lk = map_locks_lookup_or_create(
1039 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001040 tl_assert( HG_(is_sane_LockN)(lk) );
1041
1042 /* check libhb level entities exist */
1043 tl_assert(thr->hbthr);
1044 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001045
1046 if (lk->heldBy == NULL) {
1047 /* the lock isn't held. Simple. */
1048 tl_assert(!lk->heldW);
1049 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001050 /* acquire a dependency from the lock's VCs */
1051 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001052 goto noerror;
1053 }
1054
1055 /* So the lock is already held. If held as a r-lock then
1056 libpthread must be buggy. */
1057 tl_assert(lk->heldBy);
1058 if (!lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001059 HG_(record_error_Misc)(
1060 thr, "Bug in libpthread: write lock "
1061 "granted on rwlock which is currently rd-held");
sewardjb4112022007-11-09 22:49:28 +00001062 goto error;
1063 }
1064
1065 /* So the lock is held in w-mode. If it's held by some other
1066 thread, then libpthread must be buggy. */
sewardj896f6f92008-08-19 08:38:52 +00001067 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
sewardjb4112022007-11-09 22:49:28 +00001068
sewardj896f6f92008-08-19 08:38:52 +00001069 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
sewardjf98e1c02008-10-25 16:22:41 +00001070 HG_(record_error_Misc)(
1071 thr, "Bug in libpthread: write lock "
1072 "granted on mutex/rwlock which is currently "
1073 "wr-held by a different thread");
sewardjb4112022007-11-09 22:49:28 +00001074 goto error;
1075 }
1076
1077 /* So the lock is already held in w-mode by 'thr'. That means this
1078 is an attempt to lock it recursively, which is only allowable
1079 for LK_mbRec kinded locks. Since this routine is called only
1080 once the lock has been acquired, this must also be a libpthread
1081 bug. */
1082 if (lk->kind != LK_mbRec) {
sewardjf98e1c02008-10-25 16:22:41 +00001083 HG_(record_error_Misc)(
1084 thr, "Bug in libpthread: recursive write lock "
1085 "granted on mutex/wrlock which does not "
1086 "support recursion");
sewardjb4112022007-11-09 22:49:28 +00001087 goto error;
1088 }
1089
1090 /* So we are recursively re-locking a lock we already w-hold. */
1091 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001092 /* acquire a dependency from the lock's VC. Probably pointless,
1093 but also harmless. */
1094 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001095 goto noerror;
1096
1097 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001098 if (HG_(clo_track_lockorders)) {
1099 /* check lock order acquisition graph, and update. This has to
1100 happen before the lock is added to the thread's locksetA/W. */
1101 laog__pre_thread_acquires_lock( thr, lk );
1102 }
sewardjb4112022007-11-09 22:49:28 +00001103 /* update the thread's held-locks set */
florian6bf37262012-10-21 03:23:36 +00001104 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
1105 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +00001106 /* fall through */
1107
1108 error:
sewardjf98e1c02008-10-25 16:22:41 +00001109 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001110}
1111
1112
1113/* The lock at 'lock_ga' has acquired a reader. Make all necessary
1114 updates, and also do all possible error checks. */
1115static
1116void evhH__post_thread_r_acquires_lock ( Thread* thr,
1117 LockKind lkk, Addr lock_ga )
1118{
1119 Lock* lk;
1120
1121 /* Basically what we need to do is call lockN_acquire_reader.
1122 However, that will barf if any 'invalid' lock states would
1123 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001124 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001125 routine.
1126
1127 Because this routine is only called after successful lock
1128 acquisition, we should not be asked to move the lock into any
1129 invalid states. Requests to do so are bugs in libpthread, since
1130 that should have rejected any such requests. */
1131
sewardjf98e1c02008-10-25 16:22:41 +00001132 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001133 /* Try to find the lock. If we can't, then create a new one with
1134 kind 'lkk'. Only a reader-writer lock can be read-locked,
1135 hence the first assertion. */
1136 tl_assert(lkk == LK_rdwr);
1137 lk = map_locks_lookup_or_create(
1138 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001139 tl_assert( HG_(is_sane_LockN)(lk) );
1140
1141 /* check libhb level entities exist */
1142 tl_assert(thr->hbthr);
1143 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001144
1145 if (lk->heldBy == NULL) {
1146 /* the lock isn't held. Simple. */
1147 tl_assert(!lk->heldW);
1148 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001149 /* acquire a dependency from the lock's VC */
1150 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001151 goto noerror;
1152 }
1153
1154 /* So the lock is already held. If held as a w-lock then
1155 libpthread must be buggy. */
1156 tl_assert(lk->heldBy);
1157 if (lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001158 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1159 "granted on rwlock which is "
1160 "currently wr-held");
sewardjb4112022007-11-09 22:49:28 +00001161 goto error;
1162 }
1163
1164 /* Easy enough. In short anybody can get a read-lock on a rwlock
1165 provided it is either unlocked or already in rd-held. */
1166 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001167 /* acquire a dependency from the lock's VC. Probably pointless,
1168 but also harmless. */
1169 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001170 goto noerror;
1171
1172 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001173 if (HG_(clo_track_lockorders)) {
1174 /* check lock order acquisition graph, and update. This has to
1175 happen before the lock is added to the thread's locksetA/W. */
1176 laog__pre_thread_acquires_lock( thr, lk );
1177 }
sewardjb4112022007-11-09 22:49:28 +00001178 /* update the thread's held-locks set */
florian6bf37262012-10-21 03:23:36 +00001179 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +00001180 /* but don't update thr->locksetW, since lk is only rd-held */
1181 /* fall through */
1182
1183 error:
sewardjf98e1c02008-10-25 16:22:41 +00001184 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001185}
1186
1187
1188/* The lock at 'lock_ga' is just about to be unlocked. Make all
1189 necessary updates, and also do all possible error checks. */
1190static
1191void evhH__pre_thread_releases_lock ( Thread* thr,
1192 Addr lock_ga, Bool isRDWR )
1193{
1194 Lock* lock;
1195 Word n;
sewardjf98e1c02008-10-25 16:22:41 +00001196 Bool was_heldW;
sewardjb4112022007-11-09 22:49:28 +00001197
1198 /* This routine is called prior to a lock release, before
1199 libpthread has had a chance to validate the call. Hence we need
1200 to detect and reject any attempts to move the lock into an
1201 invalid state. Such attempts are bugs in the client.
1202
1203 isRDWR is True if we know from the wrapper context that lock_ga
1204 should refer to a reader-writer lock, and is False if [ditto]
1205 lock_ga should refer to a standard mutex. */
1206
sewardjf98e1c02008-10-25 16:22:41 +00001207 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001208 lock = map_locks_maybe_lookup( lock_ga );
1209
1210 if (!lock) {
1211 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1212 the client is trying to unlock it. So complain, then ignore
1213 the attempt. */
sewardjf98e1c02008-10-25 16:22:41 +00001214 HG_(record_error_UnlockBogus)( thr, lock_ga );
sewardjb4112022007-11-09 22:49:28 +00001215 return;
1216 }
1217
1218 tl_assert(lock->guestaddr == lock_ga);
sewardjf98e1c02008-10-25 16:22:41 +00001219 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001220
1221 if (isRDWR && lock->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001222 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1223 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001224 }
1225 if ((!isRDWR) && lock->kind == LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001226 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1227 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001228 }
1229
1230 if (!lock->heldBy) {
1231 /* The lock is not held. This indicates a serious bug in the
1232 client. */
1233 tl_assert(!lock->heldW);
sewardjf98e1c02008-10-25 16:22:41 +00001234 HG_(record_error_UnlockUnlocked)( thr, lock );
florian6bf37262012-10-21 03:23:36 +00001235 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1236 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001237 goto error;
1238 }
1239
sewardjf98e1c02008-10-25 16:22:41 +00001240 /* test just above dominates */
1241 tl_assert(lock->heldBy);
1242 was_heldW = lock->heldW;
1243
sewardjb4112022007-11-09 22:49:28 +00001244 /* The lock is held. Is this thread one of the holders? If not,
1245 report a bug in the client. */
florian6bf37262012-10-21 03:23:36 +00001246 n = VG_(elemBag)( lock->heldBy, (UWord)thr );
sewardjb4112022007-11-09 22:49:28 +00001247 tl_assert(n >= 0);
1248 if (n == 0) {
1249 /* We are not a current holder of the lock. This is a bug in
1250 the guest, and (per POSIX pthread rules) the unlock
1251 attempt will fail. So just complain and do nothing
1252 else. */
sewardj896f6f92008-08-19 08:38:52 +00001253 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001254 tl_assert(HG_(is_sane_Thread)(realOwner));
sewardjb4112022007-11-09 22:49:28 +00001255 tl_assert(realOwner != thr);
florian6bf37262012-10-21 03:23:36 +00001256 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1257 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjf98e1c02008-10-25 16:22:41 +00001258 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
sewardjb4112022007-11-09 22:49:28 +00001259 goto error;
1260 }
1261
1262 /* Ok, we hold the lock 'n' times. */
1263 tl_assert(n >= 1);
1264
1265 lockN_release( lock, thr );
1266
1267 n--;
1268 tl_assert(n >= 0);
1269
1270 if (n > 0) {
1271 tl_assert(lock->heldBy);
florian6bf37262012-10-21 03:23:36 +00001272 tl_assert(n == VG_(elemBag)( lock->heldBy, (UWord)thr ));
sewardjb4112022007-11-09 22:49:28 +00001273 /* We still hold the lock. So either it's a recursive lock
1274 or a rwlock which is currently r-held. */
1275 tl_assert(lock->kind == LK_mbRec
1276 || (lock->kind == LK_rdwr && !lock->heldW));
florian6bf37262012-10-21 03:23:36 +00001277 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001278 if (lock->heldW)
florian6bf37262012-10-21 03:23:36 +00001279 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001280 else
florian6bf37262012-10-21 03:23:36 +00001281 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001282 } else {
sewardj983f3022009-05-21 14:49:55 +00001283 /* n is zero. This means we don't hold the lock any more. But
1284 if it's a rwlock held in r-mode, someone else could still
1285 hold it. Just do whatever sanity checks we can. */
1286 if (lock->kind == LK_rdwr && lock->heldBy) {
1287 /* It's a rwlock. We no longer hold it but we used to;
1288 nevertheless it still appears to be held by someone else.
1289 The implication is that, prior to this release, it must
1290 have been shared by us and and whoever else is holding it;
1291 which in turn implies it must be r-held, since a lock
1292 can't be w-held by more than one thread. */
1293 /* The lock is now R-held by somebody else: */
1294 tl_assert(lock->heldW == False);
1295 } else {
1296 /* Normal case. It's either not a rwlock, or it's a rwlock
1297 that we used to hold in w-mode (which is pretty much the
1298 same thing as a non-rwlock.) Since this transaction is
1299 atomic (V does not allow multiple threads to run
1300 simultaneously), it must mean the lock is now not held by
1301 anybody. Hence assert for it. */
1302 /* The lock is now not held by anybody: */
1303 tl_assert(!lock->heldBy);
1304 tl_assert(lock->heldW == False);
1305 }
sewardjf98e1c02008-10-25 16:22:41 +00001306 //if (lock->heldBy) {
florian6bf37262012-10-21 03:23:36 +00001307 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (UWord)thr ));
sewardjf98e1c02008-10-25 16:22:41 +00001308 //}
sewardjb4112022007-11-09 22:49:28 +00001309 /* update this thread's lockset accordingly. */
1310 thr->locksetA
florian6bf37262012-10-21 03:23:36 +00001311 = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lock );
sewardjb4112022007-11-09 22:49:28 +00001312 thr->locksetW
florian6bf37262012-10-21 03:23:36 +00001313 = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lock );
sewardjf98e1c02008-10-25 16:22:41 +00001314 /* push our VC into the lock */
1315 tl_assert(thr->hbthr);
1316 tl_assert(lock->hbso);
1317 /* If the lock was previously W-held, then we want to do a
1318 strong send, and if previously R-held, then a weak send. */
1319 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
sewardjb4112022007-11-09 22:49:28 +00001320 }
1321 /* fall through */
1322
1323 error:
sewardjf98e1c02008-10-25 16:22:41 +00001324 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001325}
1326
1327
sewardj9f569b72008-11-13 13:33:09 +00001328/* ---------------------------------------------------------- */
1329/* -------- Event handlers proper (evh__* functions) -------- */
1330/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001331
1332/* What is the Thread* for the currently running thread? This is
1333 absolutely performance critical. We receive notifications from the
1334 core for client code starts/stops, and cache the looked-up result
1335 in 'current_Thread'. Hence, for the vast majority of requests,
1336 finding the current thread reduces to a read of a global variable,
1337 provided get_current_Thread_in_C_C is inlined.
1338
1339 Outside of client code, current_Thread is NULL, and presumably
1340 any uses of it will cause a segfault. Hence:
1341
1342 - for uses definitely within client code, use
1343 get_current_Thread_in_C_C.
1344
1345 - for all other uses, use get_current_Thread.
1346*/
1347
sewardj23f12002009-07-24 08:45:08 +00001348static Thread *current_Thread = NULL,
1349 *current_Thread_prev = NULL;
sewardjb4112022007-11-09 22:49:28 +00001350
1351static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1352 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1353 tl_assert(current_Thread == NULL);
1354 current_Thread = map_threads_lookup( tid );
1355 tl_assert(current_Thread != NULL);
sewardj23f12002009-07-24 08:45:08 +00001356 if (current_Thread != current_Thread_prev) {
1357 libhb_Thr_resumes( current_Thread->hbthr );
1358 current_Thread_prev = current_Thread;
1359 }
sewardjb4112022007-11-09 22:49:28 +00001360}
1361static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1362 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1363 tl_assert(current_Thread != NULL);
1364 current_Thread = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00001365 libhb_maybe_GC();
sewardjb4112022007-11-09 22:49:28 +00001366}
1367static inline Thread* get_current_Thread_in_C_C ( void ) {
1368 return current_Thread;
1369}
1370static inline Thread* get_current_Thread ( void ) {
1371 ThreadId coretid;
1372 Thread* thr;
1373 thr = get_current_Thread_in_C_C();
1374 if (LIKELY(thr))
1375 return thr;
1376 /* evidently not in client code. Do it the slow way. */
1377 coretid = VG_(get_running_tid)();
1378 /* FIXME: get rid of the following kludge. It exists because
sewardjf98e1c02008-10-25 16:22:41 +00001379 evh__new_mem is called during initialisation (as notification
sewardjb4112022007-11-09 22:49:28 +00001380 of initial memory layout) and VG_(get_running_tid)() returns
1381 VG_INVALID_THREADID at that point. */
1382 if (coretid == VG_INVALID_THREADID)
1383 coretid = 1; /* KLUDGE */
1384 thr = map_threads_lookup( coretid );
1385 return thr;
1386}
1387
1388static
1389void evh__new_mem ( Addr a, SizeT len ) {
1390 if (SHOW_EVENTS >= 2)
1391 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1392 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001393 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001394 all__sanity_check("evh__new_mem-post");
1395}
1396
1397static
sewardj1f77fec2010-04-12 19:51:04 +00001398void evh__new_mem_stack ( Addr a, SizeT len ) {
1399 if (SHOW_EVENTS >= 2)
1400 VG_(printf)("evh__new_mem_stack(%p, %lu)\n", (void*)a, len );
1401 shadow_mem_make_New( get_current_Thread(),
1402 -VG_STACK_REDZONE_SZB + a, len );
1403 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1404 all__sanity_check("evh__new_mem_stack-post");
1405}
1406
1407static
sewardj7cf4e6b2008-05-01 20:24:26 +00001408void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1409 if (SHOW_EVENTS >= 2)
1410 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1411 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001412 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardj7cf4e6b2008-05-01 20:24:26 +00001413 all__sanity_check("evh__new_mem_w_tid-post");
1414}
1415
1416static
sewardjb4112022007-11-09 22:49:28 +00001417void evh__new_mem_w_perms ( Addr a, SizeT len,
sewardj9c606bd2008-09-18 18:12:50 +00001418 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
sewardjb4112022007-11-09 22:49:28 +00001419 if (SHOW_EVENTS >= 1)
1420 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1421 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1422 if (rr || ww || xx)
1423 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001424 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001425 all__sanity_check("evh__new_mem_w_perms-post");
1426}
1427
1428static
1429void evh__set_perms ( Addr a, SizeT len,
1430 Bool rr, Bool ww, Bool xx ) {
sewardjfd35d492011-03-17 19:39:55 +00001431 // This handles mprotect requests. If the memory is being put
1432 // into no-R no-W state, paint it as NoAccess, for the reasons
1433 // documented at evh__die_mem_munmap().
sewardjb4112022007-11-09 22:49:28 +00001434 if (SHOW_EVENTS >= 1)
sewardjfd35d492011-03-17 19:39:55 +00001435 VG_(printf)("evh__set_perms(%p, %lu, r=%d w=%d x=%d)\n",
sewardjb4112022007-11-09 22:49:28 +00001436 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1437 /* Hmm. What should we do here, that actually makes any sense?
1438 Let's say: if neither readable nor writable, then declare it
1439 NoAccess, else leave it alone. */
1440 if (!(rr || ww))
sewardjfd35d492011-03-17 19:39:55 +00001441 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001442 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001443 all__sanity_check("evh__set_perms-post");
1444}
1445
1446static
1447void evh__die_mem ( Addr a, SizeT len ) {
sewardjfd35d492011-03-17 19:39:55 +00001448 // Urr, libhb ignores this.
sewardjb4112022007-11-09 22:49:28 +00001449 if (SHOW_EVENTS >= 2)
1450 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
sewardjfd35d492011-03-17 19:39:55 +00001451 shadow_mem_make_NoAccess_NoFX( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001452 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001453 all__sanity_check("evh__die_mem-post");
1454}
1455
1456static
sewardjfd35d492011-03-17 19:39:55 +00001457void evh__die_mem_munmap ( Addr a, SizeT len ) {
1458 // It's important that libhb doesn't ignore this. If, as is likely,
1459 // the client is subject to address space layout randomization,
1460 // then unmapped areas may never get remapped over, even in long
1461 // runs. If we just ignore them we wind up with large resource
1462 // (VTS) leaks in libhb. So force them to NoAccess, so that all
1463 // VTS references in the affected area are dropped. Marking memory
1464 // as NoAccess is expensive, but we assume that munmap is sufficiently
1465 // rare that the space gains of doing this are worth the costs.
1466 if (SHOW_EVENTS >= 2)
1467 VG_(printf)("evh__die_mem_munmap(%p, %lu)\n", (void*)a, len );
1468 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
1469}
1470
1471static
sewardj406bac82010-03-03 23:03:40 +00001472void evh__untrack_mem ( Addr a, SizeT len ) {
sewardjfd35d492011-03-17 19:39:55 +00001473 // Libhb doesn't ignore this.
sewardj406bac82010-03-03 23:03:40 +00001474 if (SHOW_EVENTS >= 2)
1475 VG_(printf)("evh__untrack_mem(%p, %lu)\n", (void*)a, len );
1476 shadow_mem_make_Untracked( get_current_Thread(), a, len );
1477 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1478 all__sanity_check("evh__untrack_mem-post");
1479}
1480
1481static
sewardj23f12002009-07-24 08:45:08 +00001482void evh__copy_mem ( Addr src, Addr dst, SizeT len ) {
1483 if (SHOW_EVENTS >= 2)
1484 VG_(printf)("evh__copy_mem(%p, %p, %lu)\n", (void*)src, (void*)dst, len );
1485 shadow_mem_scopy_range( get_current_Thread(), src, dst, len );
1486 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1487 all__sanity_check("evh__copy_mem-post");
1488}
1489
1490static
sewardjb4112022007-11-09 22:49:28 +00001491void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1492{
1493 if (SHOW_EVENTS >= 1)
1494 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1495 (Int)parent, (Int)child );
1496
1497 if (parent != VG_INVALID_THREADID) {
sewardjf98e1c02008-10-25 16:22:41 +00001498 Thread* thr_p;
1499 Thread* thr_c;
1500 Thr* hbthr_p;
1501 Thr* hbthr_c;
sewardjb4112022007-11-09 22:49:28 +00001502
sewardjf98e1c02008-10-25 16:22:41 +00001503 tl_assert(HG_(is_sane_ThreadId)(parent));
1504 tl_assert(HG_(is_sane_ThreadId)(child));
sewardjb4112022007-11-09 22:49:28 +00001505 tl_assert(parent != child);
1506
1507 thr_p = map_threads_maybe_lookup( parent );
1508 thr_c = map_threads_maybe_lookup( child );
1509
1510 tl_assert(thr_p != NULL);
1511 tl_assert(thr_c == NULL);
1512
sewardjf98e1c02008-10-25 16:22:41 +00001513 hbthr_p = thr_p->hbthr;
1514 tl_assert(hbthr_p != NULL);
sewardj60626642011-03-10 15:14:37 +00001515 tl_assert( libhb_get_Thr_hgthread(hbthr_p) == thr_p );
sewardjb4112022007-11-09 22:49:28 +00001516
sewardjf98e1c02008-10-25 16:22:41 +00001517 hbthr_c = libhb_create ( hbthr_p );
1518
1519 /* Create a new thread record for the child. */
sewardjb4112022007-11-09 22:49:28 +00001520 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +00001521 thr_c = mk_Thread( hbthr_c );
sewardj60626642011-03-10 15:14:37 +00001522 tl_assert( libhb_get_Thr_hgthread(hbthr_c) == NULL );
1523 libhb_set_Thr_hgthread(hbthr_c, thr_c);
sewardjb4112022007-11-09 22:49:28 +00001524
1525 /* and bind it in the thread-map table */
1526 map_threads[child] = thr_c;
sewardjf98e1c02008-10-25 16:22:41 +00001527 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1528 thr_c->coretid = child;
sewardjb4112022007-11-09 22:49:28 +00001529
1530 /* Record where the parent is so we can later refer to this in
1531 error messages.
1532
1533 On amd64-linux, this entails a nasty glibc-2.5 specific hack.
1534 The stack snapshot is taken immediately after the parent has
1535 returned from its sys_clone call. Unfortunately there is no
1536 unwind info for the insn following "syscall" - reading the
1537 glibc sources confirms this. So we ask for a snapshot to be
1538 taken as if RIP was 3 bytes earlier, in a place where there
1539 is unwind info. Sigh.
1540 */
1541 { Word first_ip_delta = 0;
1542# if defined(VGP_amd64_linux)
1543 first_ip_delta = -3;
1544# endif
1545 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1546 }
sewardjb4112022007-11-09 22:49:28 +00001547 }
1548
sewardjf98e1c02008-10-25 16:22:41 +00001549 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001550 all__sanity_check("evh__pre_thread_create-post");
1551}
1552
1553static
1554void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1555{
1556 Int nHeld;
1557 Thread* thr_q;
1558 if (SHOW_EVENTS >= 1)
1559 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1560 (Int)quit_tid );
1561
1562 /* quit_tid has disappeared without joining to any other thread.
1563 Therefore there is no synchronisation event associated with its
1564 exit and so we have to pretty much treat it as if it was still
1565 alive but mysteriously making no progress. That is because, if
1566 we don't know when it really exited, then we can never say there
1567 is a point in time when we're sure the thread really has
1568 finished, and so we need to consider the possibility that it
1569 lingers indefinitely and continues to interact with other
1570 threads. */
1571 /* However, it might have rendezvous'd with a thread that called
1572 pthread_join with this one as arg, prior to this point (that's
1573 how NPTL works). In which case there has already been a prior
1574 sync event. So in any case, just let the thread exit. On NPTL,
1575 all thread exits go through here. */
sewardjf98e1c02008-10-25 16:22:41 +00001576 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
sewardjb4112022007-11-09 22:49:28 +00001577 thr_q = map_threads_maybe_lookup( quit_tid );
1578 tl_assert(thr_q != NULL);
1579
1580 /* Complain if this thread holds any locks. */
1581 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1582 tl_assert(nHeld >= 0);
1583 if (nHeld > 0) {
1584 HChar buf[80];
1585 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1586 nHeld, nHeld > 1 ? "s" : "");
sewardjf98e1c02008-10-25 16:22:41 +00001587 HG_(record_error_Misc)( thr_q, buf );
sewardjb4112022007-11-09 22:49:28 +00001588 }
1589
sewardj23f12002009-07-24 08:45:08 +00001590 /* Not much to do here:
1591 - tell libhb the thread is gone
1592 - clear the map_threads entry, in order that the Valgrind core
1593 can re-use it. */
sewardj61bc2c52011-02-09 10:34:00 +00001594 /* Cleanup actions (next 5 lines) copied in evh__atfork_child; keep
1595 in sync. */
sewardj23f12002009-07-24 08:45:08 +00001596 tl_assert(thr_q->hbthr);
1597 libhb_async_exit(thr_q->hbthr);
sewardjf98e1c02008-10-25 16:22:41 +00001598 tl_assert(thr_q->coretid == quit_tid);
1599 thr_q->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +00001600 map_threads_delete( quit_tid );
1601
sewardjf98e1c02008-10-25 16:22:41 +00001602 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001603 all__sanity_check("evh__pre_thread_ll_exit-post");
1604}
1605
sewardj61bc2c52011-02-09 10:34:00 +00001606/* This is called immediately after fork, for the child only. 'tid'
1607 is the only surviving thread (as per POSIX rules on fork() in
1608 threaded programs), so we have to clean up map_threads to remove
1609 entries for any other threads. */
1610static
1611void evh__atfork_child ( ThreadId tid )
1612{
1613 UInt i;
1614 Thread* thr;
1615 /* Slot 0 should never be used. */
1616 thr = map_threads_maybe_lookup( 0/*INVALID*/ );
1617 tl_assert(!thr);
1618 /* Clean up all other slots except 'tid'. */
1619 for (i = 1; i < VG_N_THREADS; i++) {
1620 if (i == tid)
1621 continue;
1622 thr = map_threads_maybe_lookup(i);
1623 if (!thr)
1624 continue;
1625 /* Cleanup actions (next 5 lines) copied from end of
1626 evh__pre_thread_ll_exit; keep in sync. */
1627 tl_assert(thr->hbthr);
1628 libhb_async_exit(thr->hbthr);
1629 tl_assert(thr->coretid == i);
1630 thr->coretid = VG_INVALID_THREADID;
1631 map_threads_delete(i);
1632 }
1633}
1634
sewardjf98e1c02008-10-25 16:22:41 +00001635
sewardjb4112022007-11-09 22:49:28 +00001636static
1637void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1638{
sewardjb4112022007-11-09 22:49:28 +00001639 Thread* thr_s;
1640 Thread* thr_q;
sewardjf98e1c02008-10-25 16:22:41 +00001641 Thr* hbthr_s;
1642 Thr* hbthr_q;
1643 SO* so;
sewardjb4112022007-11-09 22:49:28 +00001644
1645 if (SHOW_EVENTS >= 1)
1646 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1647 (Int)stay_tid, quit_thr );
1648
sewardjf98e1c02008-10-25 16:22:41 +00001649 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
sewardjb4112022007-11-09 22:49:28 +00001650
1651 thr_s = map_threads_maybe_lookup( stay_tid );
1652 thr_q = quit_thr;
1653 tl_assert(thr_s != NULL);
1654 tl_assert(thr_q != NULL);
1655 tl_assert(thr_s != thr_q);
1656
sewardjf98e1c02008-10-25 16:22:41 +00001657 hbthr_s = thr_s->hbthr;
1658 hbthr_q = thr_q->hbthr;
1659 tl_assert(hbthr_s != hbthr_q);
sewardj60626642011-03-10 15:14:37 +00001660 tl_assert( libhb_get_Thr_hgthread(hbthr_s) == thr_s );
1661 tl_assert( libhb_get_Thr_hgthread(hbthr_q) == thr_q );
sewardjb4112022007-11-09 22:49:28 +00001662
sewardjf98e1c02008-10-25 16:22:41 +00001663 /* Allocate a temporary synchronisation object and use it to send
1664 an imaginary message from the quitter to the stayer, the purpose
1665 being to generate a dependence from the quitter to the
1666 stayer. */
1667 so = libhb_so_alloc();
1668 tl_assert(so);
sewardj23f12002009-07-24 08:45:08 +00001669 /* Send last arg of _so_send as False, since the sending thread
1670 doesn't actually exist any more, so we don't want _so_send to
1671 try taking stack snapshots of it. */
sewardjffce8152011-06-24 10:09:41 +00001672 libhb_so_send(hbthr_q, so, True/*strong_send*//*?!? wrt comment above*/);
sewardjf98e1c02008-10-25 16:22:41 +00001673 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1674 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00001675
sewardjffce8152011-06-24 10:09:41 +00001676 /* Tell libhb that the quitter has been reaped. Note that we might
1677 have to be cleverer about this, to exclude 2nd and subsequent
1678 notifications for the same hbthr_q, in the case where the app is
1679 buggy (calls pthread_join twice or more on the same thread) AND
1680 where libpthread is also buggy and doesn't return ESRCH on
1681 subsequent calls. (If libpthread isn't thusly buggy, then the
1682 wrapper for pthread_join in hg_intercepts.c will stop us getting
1683 notified here multiple times for the same joinee.) See also
1684 comments in helgrind/tests/jointwice.c. */
1685 libhb_joinedwith_done(hbthr_q);
1686
sewardjf98e1c02008-10-25 16:22:41 +00001687 /* evh__pre_thread_ll_exit issues an error message if the exiting
1688 thread holds any locks. No need to check here. */
sewardjb4112022007-11-09 22:49:28 +00001689
1690 /* This holds because, at least when using NPTL as the thread
1691 library, we should be notified the low level thread exit before
1692 we hear of any join event on it. The low level exit
1693 notification feeds through into evh__pre_thread_ll_exit,
1694 which should clear the map_threads entry for it. Hence we
1695 expect there to be no map_threads entry at this point. */
1696 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1697 == VG_INVALID_THREADID);
1698
sewardjf98e1c02008-10-25 16:22:41 +00001699 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001700 all__sanity_check("evh__post_thread_join-post");
1701}
1702
1703static
floriane543f302012-10-21 19:43:43 +00001704void evh__pre_mem_read ( CorePart part, ThreadId tid, const HChar* s,
sewardjb4112022007-11-09 22:49:28 +00001705 Addr a, SizeT size) {
1706 if (SHOW_EVENTS >= 2
1707 || (SHOW_EVENTS >= 1 && size != 1))
1708 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1709 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001710 shadow_mem_cread_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001711 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001712 all__sanity_check("evh__pre_mem_read-post");
1713}
1714
1715static
1716void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
floriane543f302012-10-21 19:43:43 +00001717 const HChar* s, Addr a ) {
sewardjb4112022007-11-09 22:49:28 +00001718 Int len;
1719 if (SHOW_EVENTS >= 1)
1720 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1721 (Int)tid, s, (void*)a );
sewardj234e5582011-02-09 12:47:23 +00001722 // Don't segfault if the string starts in an obviously stupid
1723 // place. Actually we should check the whole string, not just
1724 // the start address, but that's too much trouble. At least
1725 // checking the first byte is better than nothing. See #255009.
1726 if (!VG_(am_is_valid_for_client) (a, 1, VKI_PROT_READ))
1727 return;
florian19f91bb2012-11-10 22:29:54 +00001728 len = VG_(strlen)( (HChar*) a );
sewardj23f12002009-07-24 08:45:08 +00001729 shadow_mem_cread_range( map_threads_lookup(tid), a, len+1 );
sewardjf98e1c02008-10-25 16:22:41 +00001730 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001731 all__sanity_check("evh__pre_mem_read_asciiz-post");
1732}
1733
1734static
floriane543f302012-10-21 19:43:43 +00001735void evh__pre_mem_write ( CorePart part, ThreadId tid, const HChar* s,
sewardjb4112022007-11-09 22:49:28 +00001736 Addr a, SizeT size ) {
1737 if (SHOW_EVENTS >= 1)
1738 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1739 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001740 shadow_mem_cwrite_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001741 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001742 all__sanity_check("evh__pre_mem_write-post");
1743}
1744
1745static
1746void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1747 if (SHOW_EVENTS >= 1)
1748 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1749 (void*)a, len, (Int)is_inited );
1750 // FIXME: this is kinda stupid
1751 if (is_inited) {
1752 shadow_mem_make_New(get_current_Thread(), a, len);
1753 } else {
1754 shadow_mem_make_New(get_current_Thread(), a, len);
1755 }
sewardjf98e1c02008-10-25 16:22:41 +00001756 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001757 all__sanity_check("evh__pre_mem_read-post");
1758}
1759
1760static
1761void evh__die_mem_heap ( Addr a, SizeT len ) {
sewardj622fe492011-03-11 21:06:59 +00001762 Thread* thr;
sewardjb4112022007-11-09 22:49:28 +00001763 if (SHOW_EVENTS >= 1)
1764 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
sewardj622fe492011-03-11 21:06:59 +00001765 thr = get_current_Thread();
1766 tl_assert(thr);
1767 if (HG_(clo_free_is_write)) {
1768 /* Treat frees as if the memory was written immediately prior to
1769 the free. This shakes out more races, specifically, cases
1770 where memory is referenced by one thread, and freed by
1771 another, and there's no observable synchronisation event to
1772 guarantee that the reference happens before the free. */
1773 shadow_mem_cwrite_range(thr, a, len);
1774 }
sewardjfd35d492011-03-17 19:39:55 +00001775 shadow_mem_make_NoAccess_NoFX( thr, a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001776 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001777 all__sanity_check("evh__pre_mem_read-post");
1778}
1779
sewardj23f12002009-07-24 08:45:08 +00001780/* --- Event handlers called from generated code --- */
1781
sewardjb4112022007-11-09 22:49:28 +00001782static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001783void evh__mem_help_cread_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001784 Thread* thr = get_current_Thread_in_C_C();
1785 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001786 LIBHB_CREAD_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001787}
sewardjf98e1c02008-10-25 16:22:41 +00001788
sewardjb4112022007-11-09 22:49:28 +00001789static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001790void evh__mem_help_cread_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001791 Thread* thr = get_current_Thread_in_C_C();
1792 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001793 LIBHB_CREAD_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001794}
sewardjf98e1c02008-10-25 16:22:41 +00001795
sewardjb4112022007-11-09 22:49:28 +00001796static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001797void evh__mem_help_cread_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001798 Thread* thr = get_current_Thread_in_C_C();
1799 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001800 LIBHB_CREAD_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001801}
sewardjf98e1c02008-10-25 16:22:41 +00001802
sewardjb4112022007-11-09 22:49:28 +00001803static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001804void evh__mem_help_cread_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001805 Thread* thr = get_current_Thread_in_C_C();
1806 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001807 LIBHB_CREAD_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001808}
sewardjf98e1c02008-10-25 16:22:41 +00001809
sewardjb4112022007-11-09 22:49:28 +00001810static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001811void evh__mem_help_cread_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001812 Thread* thr = get_current_Thread_in_C_C();
1813 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001814 LIBHB_CREAD_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001815}
1816
1817static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001818void evh__mem_help_cwrite_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001819 Thread* thr = get_current_Thread_in_C_C();
1820 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001821 LIBHB_CWRITE_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001822}
sewardjf98e1c02008-10-25 16:22:41 +00001823
sewardjb4112022007-11-09 22:49:28 +00001824static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001825void evh__mem_help_cwrite_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001826 Thread* thr = get_current_Thread_in_C_C();
1827 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001828 LIBHB_CWRITE_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001829}
sewardjf98e1c02008-10-25 16:22:41 +00001830
sewardjb4112022007-11-09 22:49:28 +00001831static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001832void evh__mem_help_cwrite_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001833 Thread* thr = get_current_Thread_in_C_C();
1834 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001835 LIBHB_CWRITE_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001836}
sewardjf98e1c02008-10-25 16:22:41 +00001837
sewardjb4112022007-11-09 22:49:28 +00001838static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001839void evh__mem_help_cwrite_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001840 Thread* thr = get_current_Thread_in_C_C();
1841 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001842 LIBHB_CWRITE_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001843}
sewardjf98e1c02008-10-25 16:22:41 +00001844
sewardjb4112022007-11-09 22:49:28 +00001845static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001846void evh__mem_help_cwrite_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001847 Thread* thr = get_current_Thread_in_C_C();
1848 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001849 LIBHB_CWRITE_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001850}
1851
sewardjb4112022007-11-09 22:49:28 +00001852
sewardj9f569b72008-11-13 13:33:09 +00001853/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001854/* -------------- events to do with mutexes -------------- */
sewardj9f569b72008-11-13 13:33:09 +00001855/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001856
1857/* EXPOSITION only: by intercepting lock init events we can show the
1858 user where the lock was initialised, rather than only being able to
1859 show where it was first locked. Intercepting lock initialisations
1860 is not necessary for the basic operation of the race checker. */
1861static
1862void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
1863 void* mutex, Word mbRec )
1864{
1865 if (SHOW_EVENTS >= 1)
1866 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
1867 (Int)tid, mbRec, (void*)mutex );
1868 tl_assert(mbRec == 0 || mbRec == 1);
1869 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
1870 (Addr)mutex, tid );
sewardjf98e1c02008-10-25 16:22:41 +00001871 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001872 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
1873}
1874
1875static
1876void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex )
1877{
1878 Thread* thr;
1879 Lock* lk;
1880 if (SHOW_EVENTS >= 1)
1881 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE(ctid=%d, %p)\n",
1882 (Int)tid, (void*)mutex );
1883
1884 thr = map_threads_maybe_lookup( tid );
1885 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00001886 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00001887
1888 lk = map_locks_maybe_lookup( (Addr)mutex );
1889
1890 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
sewardjf98e1c02008-10-25 16:22:41 +00001891 HG_(record_error_Misc)(
1892 thr, "pthread_mutex_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00001893 }
1894
1895 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00001896 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00001897 tl_assert( lk->guestaddr == (Addr)mutex );
1898 if (lk->heldBy) {
1899 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00001900 HG_(record_error_Misc)(
1901 thr, "pthread_mutex_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00001902 /* remove lock from locksets of all owning threads */
1903 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00001904 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00001905 lk->heldBy = NULL;
1906 lk->heldW = False;
1907 lk->acquired_at = NULL;
1908 }
1909 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001910 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00001911
1912 if (HG_(clo_track_lockorders))
1913 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00001914 map_locks_delete( lk->guestaddr );
1915 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00001916 }
1917
sewardjf98e1c02008-10-25 16:22:41 +00001918 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001919 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
1920}
1921
1922static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
1923 void* mutex, Word isTryLock )
1924{
1925 /* Just check the mutex is sane; nothing else to do. */
1926 // 'mutex' may be invalid - not checked by wrapper
1927 Thread* thr;
1928 Lock* lk;
1929 if (SHOW_EVENTS >= 1)
1930 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
1931 (Int)tid, (void*)mutex );
1932
1933 tl_assert(isTryLock == 0 || isTryLock == 1);
1934 thr = map_threads_maybe_lookup( tid );
1935 tl_assert(thr); /* cannot fail - Thread* must already exist */
1936
1937 lk = map_locks_maybe_lookup( (Addr)mutex );
1938
1939 if (lk && (lk->kind == LK_rdwr)) {
sewardjf98e1c02008-10-25 16:22:41 +00001940 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
1941 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001942 }
1943
1944 if ( lk
1945 && isTryLock == 0
1946 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
1947 && lk->heldBy
1948 && lk->heldW
florian6bf37262012-10-21 03:23:36 +00001949 && VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0 ) {
sewardjb4112022007-11-09 22:49:28 +00001950 /* uh, it's a non-recursive lock and we already w-hold it, and
1951 this is a real lock operation (not a speculative "tryLock"
1952 kind of thing). Duh. Deadlock coming up; but at least
1953 produce an error message. */
florian6bd9dc12012-11-23 16:17:43 +00001954 const HChar* errstr = "Attempt to re-lock a "
1955 "non-recursive lock I already hold";
1956 const HChar* auxstr = "Lock was previously acquired";
sewardj8fef6252010-07-29 05:28:02 +00001957 if (lk->acquired_at) {
1958 HG_(record_error_Misc_w_aux)( thr, errstr, auxstr, lk->acquired_at );
1959 } else {
1960 HG_(record_error_Misc)( thr, errstr );
1961 }
sewardjb4112022007-11-09 22:49:28 +00001962 }
1963}
1964
1965static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
1966{
1967 // only called if the real library call succeeded - so mutex is sane
1968 Thread* thr;
1969 if (SHOW_EVENTS >= 1)
1970 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
1971 (Int)tid, (void*)mutex );
1972
1973 thr = map_threads_maybe_lookup( tid );
1974 tl_assert(thr); /* cannot fail - Thread* must already exist */
1975
1976 evhH__post_thread_w_acquires_lock(
1977 thr,
1978 LK_mbRec, /* if not known, create new lock with this LockKind */
1979 (Addr)mutex
1980 );
1981}
1982
1983static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
1984{
1985 // 'mutex' may be invalid - not checked by wrapper
1986 Thread* thr;
1987 if (SHOW_EVENTS >= 1)
1988 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
1989 (Int)tid, (void*)mutex );
1990
1991 thr = map_threads_maybe_lookup( tid );
1992 tl_assert(thr); /* cannot fail - Thread* must already exist */
1993
1994 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
1995}
1996
1997static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
1998{
1999 // only called if the real library call succeeded - so mutex is sane
2000 Thread* thr;
2001 if (SHOW_EVENTS >= 1)
2002 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
2003 (Int)tid, (void*)mutex );
2004 thr = map_threads_maybe_lookup( tid );
2005 tl_assert(thr); /* cannot fail - Thread* must already exist */
2006
2007 // anything we should do here?
2008}
2009
2010
sewardj5a644da2009-08-11 10:35:58 +00002011/* ------------------------------------------------------- */
sewardj1f77fec2010-04-12 19:51:04 +00002012/* -------------- events to do with spinlocks ------------ */
sewardj5a644da2009-08-11 10:35:58 +00002013/* ------------------------------------------------------- */
2014
2015/* All a bit of a kludge. Pretend we're really dealing with ordinary
2016 pthread_mutex_t's instead, for the most part. */
2017
2018static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid,
2019 void* slock )
2020{
2021 Thread* thr;
2022 Lock* lk;
2023 /* In glibc's kludgey world, we're either initialising or unlocking
2024 it. Since this is the pre-routine, if it is locked, unlock it
2025 and take a dependence edge. Otherwise, do nothing. */
2026
2027 if (SHOW_EVENTS >= 1)
2028 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
2029 "(ctid=%d, slock=%p)\n",
2030 (Int)tid, (void*)slock );
2031
2032 thr = map_threads_maybe_lookup( tid );
2033 /* cannot fail - Thread* must already exist */;
2034 tl_assert( HG_(is_sane_Thread)(thr) );
2035
2036 lk = map_locks_maybe_lookup( (Addr)slock );
2037 if (lk && lk->heldBy) {
2038 /* it's held. So do the normal pre-unlock actions, as copied
2039 from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE. This stupidly
2040 duplicates the map_locks_maybe_lookup. */
2041 evhH__pre_thread_releases_lock( thr, (Addr)slock,
2042 False/*!isRDWR*/ );
2043 }
2044}
2045
2046static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid,
2047 void* slock )
2048{
2049 Lock* lk;
2050 /* More kludgery. If the lock has never been seen before, do
2051 actions as per evh__HG_PTHREAD_MUTEX_INIT_POST. Else do
2052 nothing. */
2053
2054 if (SHOW_EVENTS >= 1)
2055 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
2056 "(ctid=%d, slock=%p)\n",
2057 (Int)tid, (void*)slock );
2058
2059 lk = map_locks_maybe_lookup( (Addr)slock );
2060 if (!lk) {
2061 map_locks_lookup_or_create( LK_nonRec, (Addr)slock, tid );
2062 }
2063}
2064
2065static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid,
2066 void* slock, Word isTryLock )
2067{
2068 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, slock, isTryLock );
2069}
2070
2071static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid,
2072 void* slock )
2073{
2074 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, slock );
2075}
2076
2077static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid,
2078 void* slock )
2079{
2080 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, slock );
2081}
2082
2083
sewardj9f569b72008-11-13 13:33:09 +00002084/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002085/* --------------- events to do with CVs --------------- */
sewardj9f569b72008-11-13 13:33:09 +00002086/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002087
sewardj02114542009-07-28 20:52:36 +00002088/* A mapping from CV to (the SO associated with it, plus some
2089 auxiliary data for error checking). When the CV is
sewardjf98e1c02008-10-25 16:22:41 +00002090 signalled/broadcasted upon, we do a 'send' into the SO, and when a
2091 wait on it completes, we do a 'recv' from the SO. This is believed
2092 to give the correct happens-before events arising from CV
sewardjb4112022007-11-09 22:49:28 +00002093 signallings/broadcasts.
2094*/
2095
sewardj02114542009-07-28 20:52:36 +00002096/* .so is the SO for this CV.
2097 .mx_ga is the associated mutex, when .nWaiters > 0
sewardjb4112022007-11-09 22:49:28 +00002098
sewardj02114542009-07-28 20:52:36 +00002099 POSIX says effectively that the first pthread_cond_{timed}wait call
2100 causes a dynamic binding between the CV and the mutex, and that
2101 lasts until such time as the waiter count falls to zero. Hence
2102 need to keep track of the number of waiters in order to do
2103 consistency tracking. */
2104typedef
2105 struct {
2106 SO* so; /* libhb-allocated SO */
2107 void* mx_ga; /* addr of associated mutex, if any */
2108 UWord nWaiters; /* # threads waiting on the CV */
2109 }
2110 CVInfo;
2111
2112
2113/* pthread_cond_t* -> CVInfo* */
2114static WordFM* map_cond_to_CVInfo = NULL;
2115
2116static void map_cond_to_CVInfo_INIT ( void ) {
2117 if (UNLIKELY(map_cond_to_CVInfo == NULL)) {
2118 map_cond_to_CVInfo = VG_(newFM)( HG_(zalloc),
2119 "hg.mctCI.1", HG_(free), NULL );
2120 tl_assert(map_cond_to_CVInfo != NULL);
sewardjf98e1c02008-10-25 16:22:41 +00002121 }
2122}
2123
sewardj02114542009-07-28 20:52:36 +00002124static CVInfo* map_cond_to_CVInfo_lookup_or_alloc ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002125 UWord key, val;
sewardj02114542009-07-28 20:52:36 +00002126 map_cond_to_CVInfo_INIT();
2127 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
sewardjf98e1c02008-10-25 16:22:41 +00002128 tl_assert(key == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002129 return (CVInfo*)val;
sewardjf98e1c02008-10-25 16:22:41 +00002130 } else {
sewardj02114542009-07-28 20:52:36 +00002131 SO* so = libhb_so_alloc();
2132 CVInfo* cvi = HG_(zalloc)("hg.mctCloa.1", sizeof(CVInfo));
2133 cvi->so = so;
2134 cvi->mx_ga = 0;
2135 VG_(addToFM)( map_cond_to_CVInfo, (UWord)cond, (UWord)cvi );
2136 return cvi;
sewardjf98e1c02008-10-25 16:22:41 +00002137 }
2138}
2139
philippe8bfc2152012-07-06 23:38:24 +00002140static CVInfo* map_cond_to_CVInfo_lookup_NO_alloc ( void* cond ) {
2141 UWord key, val;
2142 map_cond_to_CVInfo_INIT();
2143 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
2144 tl_assert(key == (UWord)cond);
2145 return (CVInfo*)val;
2146 } else {
2147 return NULL;
2148 }
2149}
2150
2151static void map_cond_to_CVInfo_delete ( ThreadId tid, void* cond ) {
2152 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +00002153 UWord keyW, valW;
philippe8bfc2152012-07-06 23:38:24 +00002154
2155 thr = map_threads_maybe_lookup( tid );
2156 tl_assert(thr); /* cannot fail - Thread* must already exist */
2157
sewardj02114542009-07-28 20:52:36 +00002158 map_cond_to_CVInfo_INIT();
2159 if (VG_(delFromFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond )) {
2160 CVInfo* cvi = (CVInfo*)valW;
sewardjf98e1c02008-10-25 16:22:41 +00002161 tl_assert(keyW == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002162 tl_assert(cvi);
2163 tl_assert(cvi->so);
philippe8bfc2152012-07-06 23:38:24 +00002164 if (cvi->nWaiters > 0) {
2165 HG_(record_error_Misc)(thr,
2166 "pthread_cond_destroy:"
2167 " destruction of condition variable being waited upon");
2168 }
sewardj02114542009-07-28 20:52:36 +00002169 libhb_so_dealloc(cvi->so);
2170 cvi->mx_ga = 0;
2171 HG_(free)(cvi);
philippe8bfc2152012-07-06 23:38:24 +00002172 } else {
2173 HG_(record_error_Misc)(thr,
2174 "pthread_cond_destroy: destruction of unknown cond var");
sewardjb4112022007-11-09 22:49:28 +00002175 }
2176}
2177
2178static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2179{
sewardjf98e1c02008-10-25 16:22:41 +00002180 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2181 cond to a SO if it is not already so bound, and 'send' on the
2182 SO. This is later used by other thread(s) which successfully
2183 exit from a pthread_cond_wait on the same cv; then they 'recv'
2184 from the SO, thereby acquiring a dependency on this signalling
2185 event. */
sewardjb4112022007-11-09 22:49:28 +00002186 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002187 CVInfo* cvi;
2188 //Lock* lk;
sewardjb4112022007-11-09 22:49:28 +00002189
2190 if (SHOW_EVENTS >= 1)
2191 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2192 (Int)tid, (void*)cond );
2193
sewardjb4112022007-11-09 22:49:28 +00002194 thr = map_threads_maybe_lookup( tid );
2195 tl_assert(thr); /* cannot fail - Thread* must already exist */
2196
sewardj02114542009-07-28 20:52:36 +00002197 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2198 tl_assert(cvi);
2199 tl_assert(cvi->so);
2200
sewardjb4112022007-11-09 22:49:28 +00002201 // error-if: mutex is bogus
2202 // error-if: mutex is not locked
sewardj02114542009-07-28 20:52:36 +00002203 // Hmm. POSIX doesn't actually say that it's an error to call
2204 // pthread_cond_signal with the associated mutex being unlocked.
2205 // Although it does say that it should be "if consistent scheduling
sewardjffce8152011-06-24 10:09:41 +00002206 // is desired." For that reason, print "dubious" if the lock isn't
2207 // held by any thread. Skip the "dubious" if it is held by some
2208 // other thread; that sounds straight-out wrong.
sewardj02114542009-07-28 20:52:36 +00002209 //
sewardjffce8152011-06-24 10:09:41 +00002210 // Anybody who writes code that signals on a CV without holding
2211 // the associated MX needs to be shipped off to a lunatic asylum
2212 // ASAP, even though POSIX doesn't actually declare such behaviour
2213 // illegal -- it makes code extremely difficult to understand/
2214 // reason about. In particular it puts the signalling thread in
2215 // a situation where it is racing against the released waiter
2216 // as soon as the signalling is done, and so there needs to be
2217 // some auxiliary synchronisation mechanism in the program that
2218 // makes this safe -- or the race(s) need to be harmless, or
2219 // probably nonexistent.
2220 //
2221 if (1) {
2222 Lock* lk = NULL;
2223 if (cvi->mx_ga != 0) {
2224 lk = map_locks_maybe_lookup( (Addr)cvi->mx_ga );
2225 }
2226 /* note: lk could be NULL. Be careful. */
2227 if (lk) {
2228 if (lk->kind == LK_rdwr) {
2229 HG_(record_error_Misc)(thr,
2230 "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2231 }
2232 if (lk->heldBy == NULL) {
2233 HG_(record_error_Misc)(thr,
2234 "pthread_cond_{signal,broadcast}: dubious: "
2235 "associated lock is not held by any thread");
2236 }
florian6bf37262012-10-21 03:23:36 +00002237 if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (UWord)thr)) {
sewardjffce8152011-06-24 10:09:41 +00002238 HG_(record_error_Misc)(thr,
2239 "pthread_cond_{signal,broadcast}: "
2240 "associated lock is not held by calling thread");
2241 }
2242 } else {
2243 /* Couldn't even find the damn thing. */
2244 // But actually .. that's not necessarily an error. We don't
2245 // know the (CV,MX) binding until a pthread_cond_wait or bcast
2246 // shows us what it is, and if that may not have happened yet.
2247 // So just keep quiet in this circumstance.
2248 //HG_(record_error_Misc)( thr,
2249 // "pthread_cond_{signal,broadcast}: "
2250 // "no or invalid mutex associated with cond");
2251 }
2252 }
sewardjb4112022007-11-09 22:49:28 +00002253
sewardj02114542009-07-28 20:52:36 +00002254 libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
sewardjb4112022007-11-09 22:49:28 +00002255}
2256
2257/* returns True if it reckons 'mutex' is valid and held by this
2258 thread, else False */
2259static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2260 void* cond, void* mutex )
2261{
2262 Thread* thr;
2263 Lock* lk;
2264 Bool lk_valid = True;
sewardj02114542009-07-28 20:52:36 +00002265 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002266
2267 if (SHOW_EVENTS >= 1)
2268 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2269 "(ctid=%d, cond=%p, mutex=%p)\n",
2270 (Int)tid, (void*)cond, (void*)mutex );
2271
sewardjb4112022007-11-09 22:49:28 +00002272 thr = map_threads_maybe_lookup( tid );
2273 tl_assert(thr); /* cannot fail - Thread* must already exist */
2274
2275 lk = map_locks_maybe_lookup( (Addr)mutex );
2276
2277 /* Check for stupid mutex arguments. There are various ways to be
2278 a bozo. Only complain once, though, even if more than one thing
2279 is wrong. */
2280 if (lk == NULL) {
2281 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002282 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002283 thr,
2284 "pthread_cond_{timed}wait called with invalid mutex" );
2285 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002286 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002287 if (lk->kind == LK_rdwr) {
2288 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002289 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002290 thr, "pthread_cond_{timed}wait called with mutex "
2291 "of type pthread_rwlock_t*" );
2292 } else
2293 if (lk->heldBy == NULL) {
2294 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002295 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002296 thr, "pthread_cond_{timed}wait called with un-held mutex");
2297 } else
2298 if (lk->heldBy != NULL
florian6bf37262012-10-21 03:23:36 +00002299 && VG_(elemBag)( lk->heldBy, (UWord)thr ) == 0) {
sewardjb4112022007-11-09 22:49:28 +00002300 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002301 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002302 thr, "pthread_cond_{timed}wait called with mutex "
2303 "held by a different thread" );
2304 }
2305 }
2306
2307 // error-if: cond is also associated with a different mutex
sewardj02114542009-07-28 20:52:36 +00002308 cvi = map_cond_to_CVInfo_lookup_or_alloc(cond);
2309 tl_assert(cvi);
2310 tl_assert(cvi->so);
2311 if (cvi->nWaiters == 0) {
2312 /* form initial (CV,MX) binding */
2313 cvi->mx_ga = mutex;
2314 }
2315 else /* check existing (CV,MX) binding */
2316 if (cvi->mx_ga != mutex) {
2317 HG_(record_error_Misc)(
2318 thr, "pthread_cond_{timed}wait: cond is associated "
2319 "with a different mutex");
2320 }
2321 cvi->nWaiters++;
sewardjb4112022007-11-09 22:49:28 +00002322
2323 return lk_valid;
2324}
2325
2326static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
2327 void* cond, void* mutex )
2328{
sewardjf98e1c02008-10-25 16:22:41 +00002329 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2330 the SO for this cond, and 'recv' from it so as to acquire a
2331 dependency edge back to the signaller/broadcaster. */
2332 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002333 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002334
2335 if (SHOW_EVENTS >= 1)
2336 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
2337 "(ctid=%d, cond=%p, mutex=%p)\n",
2338 (Int)tid, (void*)cond, (void*)mutex );
2339
sewardjb4112022007-11-09 22:49:28 +00002340 thr = map_threads_maybe_lookup( tid );
2341 tl_assert(thr); /* cannot fail - Thread* must already exist */
2342
2343 // error-if: cond is also associated with a different mutex
2344
philippe8bfc2152012-07-06 23:38:24 +00002345 cvi = map_cond_to_CVInfo_lookup_NO_alloc( cond );
2346 if (!cvi) {
2347 /* This could be either a bug in helgrind or the guest application
2348 that did an error (e.g. cond var was destroyed by another thread.
2349 Let's assume helgrind is perfect ...
2350 Note that this is similar to drd behaviour. */
2351 HG_(record_error_Misc)(thr, "condition variable has been destroyed while"
2352 " being waited upon");
2353 return;
2354 }
2355
sewardj02114542009-07-28 20:52:36 +00002356 tl_assert(cvi);
2357 tl_assert(cvi->so);
2358 tl_assert(cvi->nWaiters > 0);
sewardjb4112022007-11-09 22:49:28 +00002359
sewardj02114542009-07-28 20:52:36 +00002360 if (!libhb_so_everSent(cvi->so)) {
sewardjf98e1c02008-10-25 16:22:41 +00002361 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2362 it? If this happened it would surely be a bug in the threads
2363 library. Or one of those fabled "spurious wakeups". */
2364 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
sewardjffce8152011-06-24 10:09:41 +00002365 "succeeded"
sewardjf98e1c02008-10-25 16:22:41 +00002366 " without prior pthread_cond_post");
sewardjb4112022007-11-09 22:49:28 +00002367 }
sewardjf98e1c02008-10-25 16:22:41 +00002368
2369 /* anyway, acquire a dependency on it. */
sewardj02114542009-07-28 20:52:36 +00002370 libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2371
2372 cvi->nWaiters--;
sewardjf98e1c02008-10-25 16:22:41 +00002373}
2374
2375static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
2376 void* cond )
2377{
2378 /* Deal with destroy events. The only purpose is to free storage
2379 associated with the CV, so as to avoid any possible resource
2380 leaks. */
2381 if (SHOW_EVENTS >= 1)
2382 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
2383 "(ctid=%d, cond=%p)\n",
2384 (Int)tid, (void*)cond );
2385
philippe8bfc2152012-07-06 23:38:24 +00002386 map_cond_to_CVInfo_delete( tid, cond );
sewardjb4112022007-11-09 22:49:28 +00002387}
2388
2389
sewardj9f569b72008-11-13 13:33:09 +00002390/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002391/* -------------- events to do with rwlocks -------------- */
sewardj9f569b72008-11-13 13:33:09 +00002392/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002393
2394/* EXPOSITION only */
2395static
2396void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2397{
2398 if (SHOW_EVENTS >= 1)
2399 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2400 (Int)tid, (void*)rwl );
2401 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
sewardjf98e1c02008-10-25 16:22:41 +00002402 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002403 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2404}
2405
2406static
2407void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2408{
2409 Thread* thr;
2410 Lock* lk;
2411 if (SHOW_EVENTS >= 1)
2412 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2413 (Int)tid, (void*)rwl );
2414
2415 thr = map_threads_maybe_lookup( tid );
2416 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00002417 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00002418
2419 lk = map_locks_maybe_lookup( (Addr)rwl );
2420
2421 if (lk == NULL || lk->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00002422 HG_(record_error_Misc)(
2423 thr, "pthread_rwlock_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00002424 }
2425
2426 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00002427 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002428 tl_assert( lk->guestaddr == (Addr)rwl );
2429 if (lk->heldBy) {
2430 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00002431 HG_(record_error_Misc)(
2432 thr, "pthread_rwlock_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00002433 /* remove lock from locksets of all owning threads */
2434 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00002435 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00002436 lk->heldBy = NULL;
2437 lk->heldW = False;
sewardj1c7e8332007-11-29 13:04:03 +00002438 lk->acquired_at = NULL;
sewardjb4112022007-11-09 22:49:28 +00002439 }
2440 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002441 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00002442
2443 if (HG_(clo_track_lockorders))
2444 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002445 map_locks_delete( lk->guestaddr );
2446 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002447 }
2448
sewardjf98e1c02008-10-25 16:22:41 +00002449 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002450 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2451}
2452
2453static
sewardj789c3c52008-02-25 12:10:07 +00002454void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2455 void* rwl,
2456 Word isW, Word isTryLock )
sewardjb4112022007-11-09 22:49:28 +00002457{
2458 /* Just check the rwl is sane; nothing else to do. */
2459 // 'rwl' may be invalid - not checked by wrapper
2460 Thread* thr;
2461 Lock* lk;
2462 if (SHOW_EVENTS >= 1)
2463 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2464 (Int)tid, (Int)isW, (void*)rwl );
2465
2466 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
sewardj789c3c52008-02-25 12:10:07 +00002467 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
sewardjb4112022007-11-09 22:49:28 +00002468 thr = map_threads_maybe_lookup( tid );
2469 tl_assert(thr); /* cannot fail - Thread* must already exist */
2470
2471 lk = map_locks_maybe_lookup( (Addr)rwl );
2472 if ( lk
2473 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2474 /* Wrong kind of lock. Duh. */
sewardjf98e1c02008-10-25 16:22:41 +00002475 HG_(record_error_Misc)(
2476 thr, "pthread_rwlock_{rd,rw}lock with a "
2477 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002478 }
2479}
2480
2481static
2482void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2483{
2484 // only called if the real library call succeeded - so mutex is sane
2485 Thread* thr;
2486 if (SHOW_EVENTS >= 1)
2487 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2488 (Int)tid, (Int)isW, (void*)rwl );
2489
2490 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2491 thr = map_threads_maybe_lookup( tid );
2492 tl_assert(thr); /* cannot fail - Thread* must already exist */
2493
2494 (isW ? evhH__post_thread_w_acquires_lock
2495 : evhH__post_thread_r_acquires_lock)(
2496 thr,
2497 LK_rdwr, /* if not known, create new lock with this LockKind */
2498 (Addr)rwl
2499 );
2500}
2501
2502static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2503{
2504 // 'rwl' may be invalid - not checked by wrapper
2505 Thread* thr;
2506 if (SHOW_EVENTS >= 1)
2507 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2508 (Int)tid, (void*)rwl );
2509
2510 thr = map_threads_maybe_lookup( tid );
2511 tl_assert(thr); /* cannot fail - Thread* must already exist */
2512
2513 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2514}
2515
2516static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2517{
2518 // only called if the real library call succeeded - so mutex is sane
2519 Thread* thr;
2520 if (SHOW_EVENTS >= 1)
2521 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2522 (Int)tid, (void*)rwl );
2523 thr = map_threads_maybe_lookup( tid );
2524 tl_assert(thr); /* cannot fail - Thread* must already exist */
2525
2526 // anything we should do here?
2527}
2528
2529
sewardj9f569b72008-11-13 13:33:09 +00002530/* ---------------------------------------------------------- */
2531/* -------------- events to do with semaphores -------------- */
2532/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002533
sewardj11e352f2007-11-30 11:11:02 +00002534/* This is similar to but not identical to the handling for condition
sewardjb4112022007-11-09 22:49:28 +00002535 variables. */
2536
sewardjf98e1c02008-10-25 16:22:41 +00002537/* For each semaphore, we maintain a stack of SOs. When a 'post'
2538 operation is done on a semaphore (unlocking, essentially), a new SO
2539 is created for the posting thread, the posting thread does a strong
2540 send to it (which merely installs the posting thread's VC in the
2541 SO), and the SO is pushed on the semaphore's stack.
sewardjb4112022007-11-09 22:49:28 +00002542
2543 Later, when a (probably different) thread completes 'wait' on the
sewardjf98e1c02008-10-25 16:22:41 +00002544 semaphore, we pop a SO off the semaphore's stack (which should be
2545 nonempty), and do a strong recv from it. This mechanism creates
sewardjb4112022007-11-09 22:49:28 +00002546 dependencies between posters and waiters of the semaphore.
2547
sewardjf98e1c02008-10-25 16:22:41 +00002548 It may not be necessary to use a stack - perhaps a bag of SOs would
2549 do. But we do need to keep track of how many unused-up posts have
2550 happened for the semaphore.
sewardjb4112022007-11-09 22:49:28 +00002551
sewardjf98e1c02008-10-25 16:22:41 +00002552 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
sewardjb4112022007-11-09 22:49:28 +00002553 twice on S. T3 cannot complete its waits without both T1 and T2
2554 posting. The above mechanism will ensure that T3 acquires
2555 dependencies on both T1 and T2.
sewardj11e352f2007-11-30 11:11:02 +00002556
sewardjf98e1c02008-10-25 16:22:41 +00002557 When a semaphore is initialised with value N, we do as if we'd
2558 posted N times on the semaphore: basically create N SOs and do a
2559 strong send to all of then. This allows up to N waits on the
2560 semaphore to acquire a dependency on the initialisation point,
2561 which AFAICS is the correct behaviour.
sewardj11e352f2007-11-30 11:11:02 +00002562
2563 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2564 about. We should.
sewardjb4112022007-11-09 22:49:28 +00002565*/
2566
sewardjf98e1c02008-10-25 16:22:41 +00002567/* sem_t* -> XArray* SO* */
2568static WordFM* map_sem_to_SO_stack = NULL;
sewardjb4112022007-11-09 22:49:28 +00002569
sewardjf98e1c02008-10-25 16:22:41 +00002570static void map_sem_to_SO_stack_INIT ( void ) {
2571 if (map_sem_to_SO_stack == NULL) {
2572 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2573 HG_(free), NULL );
2574 tl_assert(map_sem_to_SO_stack != NULL);
sewardjb4112022007-11-09 22:49:28 +00002575 }
2576}
2577
sewardjf98e1c02008-10-25 16:22:41 +00002578static void push_SO_for_sem ( void* sem, SO* so ) {
2579 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002580 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002581 tl_assert(so);
2582 map_sem_to_SO_stack_INIT();
2583 if (VG_(lookupFM)( map_sem_to_SO_stack,
2584 &keyW, (UWord*)&xa, (UWord)sem )) {
2585 tl_assert(keyW == (UWord)sem);
sewardjb4112022007-11-09 22:49:28 +00002586 tl_assert(xa);
sewardjf98e1c02008-10-25 16:22:41 +00002587 VG_(addToXA)( xa, &so );
sewardjb4112022007-11-09 22:49:28 +00002588 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002589 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2590 VG_(addToXA)( xa, &so );
florian6bf37262012-10-21 03:23:36 +00002591 VG_(addToFM)( map_sem_to_SO_stack, (UWord)sem, (UWord)xa );
sewardjb4112022007-11-09 22:49:28 +00002592 }
2593}
2594
sewardjf98e1c02008-10-25 16:22:41 +00002595static SO* mb_pop_SO_for_sem ( void* sem ) {
2596 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002597 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002598 SO* so;
2599 map_sem_to_SO_stack_INIT();
2600 if (VG_(lookupFM)( map_sem_to_SO_stack,
2601 &keyW, (UWord*)&xa, (UWord)sem )) {
sewardjb4112022007-11-09 22:49:28 +00002602 /* xa is the stack for this semaphore. */
sewardjf98e1c02008-10-25 16:22:41 +00002603 Word sz;
2604 tl_assert(keyW == (UWord)sem);
2605 sz = VG_(sizeXA)( xa );
sewardjb4112022007-11-09 22:49:28 +00002606 tl_assert(sz >= 0);
2607 if (sz == 0)
2608 return NULL; /* odd, the stack is empty */
sewardjf98e1c02008-10-25 16:22:41 +00002609 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2610 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002611 VG_(dropTailXA)( xa, 1 );
sewardjf98e1c02008-10-25 16:22:41 +00002612 return so;
sewardjb4112022007-11-09 22:49:28 +00002613 } else {
2614 /* hmm, that's odd. No stack for this semaphore. */
2615 return NULL;
2616 }
2617}
2618
sewardj11e352f2007-11-30 11:11:02 +00002619static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002620{
sewardjf98e1c02008-10-25 16:22:41 +00002621 UWord keyW, valW;
2622 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002623
sewardjb4112022007-11-09 22:49:28 +00002624 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002625 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002626 (Int)tid, (void*)sem );
2627
sewardjf98e1c02008-10-25 16:22:41 +00002628 map_sem_to_SO_stack_INIT();
sewardjb4112022007-11-09 22:49:28 +00002629
sewardjf98e1c02008-10-25 16:22:41 +00002630 /* Empty out the semaphore's SO stack. This way of doing it is
2631 stupid, but at least it's easy. */
2632 while (1) {
2633 so = mb_pop_SO_for_sem( sem );
2634 if (!so) break;
2635 libhb_so_dealloc(so);
2636 }
2637
2638 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2639 XArray* xa = (XArray*)valW;
2640 tl_assert(keyW == (UWord)sem);
2641 tl_assert(xa);
2642 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2643 VG_(deleteXA)(xa);
2644 }
sewardjb4112022007-11-09 22:49:28 +00002645}
2646
sewardj11e352f2007-11-30 11:11:02 +00002647static
2648void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2649{
sewardjf98e1c02008-10-25 16:22:41 +00002650 SO* so;
2651 Thread* thr;
sewardj11e352f2007-11-30 11:11:02 +00002652
2653 if (SHOW_EVENTS >= 1)
2654 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2655 (Int)tid, (void*)sem, value );
2656
sewardjf98e1c02008-10-25 16:22:41 +00002657 thr = map_threads_maybe_lookup( tid );
2658 tl_assert(thr); /* cannot fail - Thread* must already exist */
sewardj11e352f2007-11-30 11:11:02 +00002659
sewardjf98e1c02008-10-25 16:22:41 +00002660 /* Empty out the semaphore's SO stack. This way of doing it is
2661 stupid, but at least it's easy. */
2662 while (1) {
2663 so = mb_pop_SO_for_sem( sem );
2664 if (!so) break;
2665 libhb_so_dealloc(so);
2666 }
sewardj11e352f2007-11-30 11:11:02 +00002667
sewardjf98e1c02008-10-25 16:22:41 +00002668 /* If we don't do this check, the following while loop runs us out
2669 of memory for stupid initial values of 'value'. */
2670 if (value > 10000) {
2671 HG_(record_error_Misc)(
2672 thr, "sem_init: initial value exceeds 10000; using 10000" );
2673 value = 10000;
2674 }
sewardj11e352f2007-11-30 11:11:02 +00002675
sewardjf98e1c02008-10-25 16:22:41 +00002676 /* Now create 'valid' new SOs for the thread, do a strong send to
2677 each of them, and push them all on the stack. */
2678 for (; value > 0; value--) {
2679 Thr* hbthr = thr->hbthr;
2680 tl_assert(hbthr);
sewardj11e352f2007-11-30 11:11:02 +00002681
sewardjf98e1c02008-10-25 16:22:41 +00002682 so = libhb_so_alloc();
2683 libhb_so_send( hbthr, so, True/*strong send*/ );
2684 push_SO_for_sem( sem, so );
sewardj11e352f2007-11-30 11:11:02 +00002685 }
2686}
2687
2688static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002689{
sewardjf98e1c02008-10-25 16:22:41 +00002690 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2691 it (iow, write our VC into it, then tick ours), and push the SO
2692 on on a stack of SOs associated with 'sem'. This is later used
2693 by other thread(s) which successfully exit from a sem_wait on
2694 the same sem; by doing a strong recv from SOs popped of the
2695 stack, they acquire dependencies on the posting thread
2696 segment(s). */
sewardjb4112022007-11-09 22:49:28 +00002697
sewardjf98e1c02008-10-25 16:22:41 +00002698 Thread* thr;
2699 SO* so;
2700 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002701
2702 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002703 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002704 (Int)tid, (void*)sem );
2705
2706 thr = map_threads_maybe_lookup( tid );
2707 tl_assert(thr); /* cannot fail - Thread* must already exist */
2708
2709 // error-if: sem is bogus
2710
sewardjf98e1c02008-10-25 16:22:41 +00002711 hbthr = thr->hbthr;
2712 tl_assert(hbthr);
sewardjb4112022007-11-09 22:49:28 +00002713
sewardjf98e1c02008-10-25 16:22:41 +00002714 so = libhb_so_alloc();
2715 libhb_so_send( hbthr, so, True/*strong send*/ );
2716 push_SO_for_sem( sem, so );
sewardjb4112022007-11-09 22:49:28 +00002717}
2718
sewardj11e352f2007-11-30 11:11:02 +00002719static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002720{
sewardjf98e1c02008-10-25 16:22:41 +00002721 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2722 the 'sem' from this semaphore's SO-stack, and do a strong recv
2723 from it. This creates a dependency back to one of the post-ers
2724 for the semaphore. */
sewardjb4112022007-11-09 22:49:28 +00002725
sewardjf98e1c02008-10-25 16:22:41 +00002726 Thread* thr;
2727 SO* so;
2728 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002729
2730 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002731 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002732 (Int)tid, (void*)sem );
2733
2734 thr = map_threads_maybe_lookup( tid );
2735 tl_assert(thr); /* cannot fail - Thread* must already exist */
2736
2737 // error-if: sem is bogus
2738
sewardjf98e1c02008-10-25 16:22:41 +00002739 so = mb_pop_SO_for_sem( sem );
sewardjb4112022007-11-09 22:49:28 +00002740
sewardjf98e1c02008-10-25 16:22:41 +00002741 if (so) {
2742 hbthr = thr->hbthr;
2743 tl_assert(hbthr);
2744
2745 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2746 libhb_so_dealloc(so);
2747 } else {
2748 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2749 If this happened it would surely be a bug in the threads
2750 library. */
2751 HG_(record_error_Misc)(
2752 thr, "Bug in libpthread: sem_wait succeeded on"
2753 " semaphore without prior sem_post");
sewardjb4112022007-11-09 22:49:28 +00002754 }
2755}
2756
2757
sewardj9f569b72008-11-13 13:33:09 +00002758/* -------------------------------------------------------- */
2759/* -------------- events to do with barriers -------------- */
2760/* -------------------------------------------------------- */
2761
2762typedef
2763 struct {
2764 Bool initted; /* has it yet been initted by guest? */
sewardj406bac82010-03-03 23:03:40 +00002765 Bool resizable; /* is resizing allowed? */
sewardj9f569b72008-11-13 13:33:09 +00002766 UWord size; /* declared size */
2767 XArray* waiting; /* XA of Thread*. # present is 0 .. .size */
2768 }
2769 Bar;
2770
2771static Bar* new_Bar ( void ) {
2772 Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
2773 tl_assert(bar);
2774 /* all fields are zero */
2775 tl_assert(bar->initted == False);
2776 return bar;
2777}
2778
2779static void delete_Bar ( Bar* bar ) {
2780 tl_assert(bar);
2781 if (bar->waiting)
2782 VG_(deleteXA)(bar->waiting);
2783 HG_(free)(bar);
2784}
2785
2786/* A mapping which stores auxiliary data for barriers. */
2787
2788/* pthread_barrier_t* -> Bar* */
2789static WordFM* map_barrier_to_Bar = NULL;
2790
2791static void map_barrier_to_Bar_INIT ( void ) {
2792 if (UNLIKELY(map_barrier_to_Bar == NULL)) {
2793 map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
2794 "hg.mbtBI.1", HG_(free), NULL );
2795 tl_assert(map_barrier_to_Bar != NULL);
2796 }
2797}
2798
2799static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
2800 UWord key, val;
2801 map_barrier_to_Bar_INIT();
2802 if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
2803 tl_assert(key == (UWord)barrier);
2804 return (Bar*)val;
2805 } else {
2806 Bar* bar = new_Bar();
2807 VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
2808 return bar;
2809 }
2810}
2811
2812static void map_barrier_to_Bar_delete ( void* barrier ) {
2813 UWord keyW, valW;
2814 map_barrier_to_Bar_INIT();
2815 if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
2816 Bar* bar = (Bar*)valW;
2817 tl_assert(keyW == (UWord)barrier);
2818 delete_Bar(bar);
2819 }
2820}
2821
2822
2823static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
2824 void* barrier,
sewardj406bac82010-03-03 23:03:40 +00002825 UWord count,
2826 UWord resizable )
sewardj9f569b72008-11-13 13:33:09 +00002827{
2828 Thread* thr;
2829 Bar* bar;
2830
2831 if (SHOW_EVENTS >= 1)
2832 VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
sewardj406bac82010-03-03 23:03:40 +00002833 "(tid=%d, barrier=%p, count=%lu, resizable=%lu)\n",
2834 (Int)tid, (void*)barrier, count, resizable );
sewardj9f569b72008-11-13 13:33:09 +00002835
2836 thr = map_threads_maybe_lookup( tid );
2837 tl_assert(thr); /* cannot fail - Thread* must already exist */
2838
2839 if (count == 0) {
2840 HG_(record_error_Misc)(
2841 thr, "pthread_barrier_init: 'count' argument is zero"
2842 );
2843 }
2844
sewardj406bac82010-03-03 23:03:40 +00002845 if (resizable != 0 && resizable != 1) {
2846 HG_(record_error_Misc)(
2847 thr, "pthread_barrier_init: invalid 'resizable' argument"
2848 );
2849 }
2850
sewardj9f569b72008-11-13 13:33:09 +00002851 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2852 tl_assert(bar);
2853
2854 if (bar->initted) {
2855 HG_(record_error_Misc)(
2856 thr, "pthread_barrier_init: barrier is already initialised"
2857 );
2858 }
2859
2860 if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2861 tl_assert(bar->initted);
2862 HG_(record_error_Misc)(
sewardj553655c2008-11-14 19:41:19 +00002863 thr, "pthread_barrier_init: threads are waiting at barrier"
sewardj9f569b72008-11-13 13:33:09 +00002864 );
2865 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2866 }
2867 if (!bar->waiting) {
2868 bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
2869 sizeof(Thread*) );
2870 }
2871
2872 tl_assert(bar->waiting);
2873 tl_assert(VG_(sizeXA)(bar->waiting) == 0);
sewardj406bac82010-03-03 23:03:40 +00002874 bar->initted = True;
2875 bar->resizable = resizable == 1 ? True : False;
2876 bar->size = count;
sewardj9f569b72008-11-13 13:33:09 +00002877}
2878
2879
2880static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
2881 void* barrier )
2882{
sewardj553655c2008-11-14 19:41:19 +00002883 Thread* thr;
2884 Bar* bar;
2885
sewardj9f569b72008-11-13 13:33:09 +00002886 /* Deal with destroy events. The only purpose is to free storage
2887 associated with the barrier, so as to avoid any possible
2888 resource leaks. */
2889 if (SHOW_EVENTS >= 1)
2890 VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
2891 "(tid=%d, barrier=%p)\n",
2892 (Int)tid, (void*)barrier );
2893
sewardj553655c2008-11-14 19:41:19 +00002894 thr = map_threads_maybe_lookup( tid );
2895 tl_assert(thr); /* cannot fail - Thread* must already exist */
2896
2897 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2898 tl_assert(bar);
2899
2900 if (!bar->initted) {
2901 HG_(record_error_Misc)(
2902 thr, "pthread_barrier_destroy: barrier was never initialised"
2903 );
2904 }
2905
2906 if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2907 HG_(record_error_Misc)(
2908 thr, "pthread_barrier_destroy: threads are waiting at barrier"
2909 );
2910 }
2911
sewardj9f569b72008-11-13 13:33:09 +00002912 /* Maybe we shouldn't do this; just let it persist, so that when it
2913 is reinitialised we don't need to do any dynamic memory
2914 allocation? The downside is a potentially unlimited space leak,
2915 if the client creates (in turn) a large number of barriers all
2916 at different locations. Note that if we do later move to the
2917 don't-delete-it scheme, we need to mark the barrier as
2918 uninitialised again since otherwise a later _init call will
sewardj553655c2008-11-14 19:41:19 +00002919 elicit a duplicate-init error. */
sewardj9f569b72008-11-13 13:33:09 +00002920 map_barrier_to_Bar_delete( barrier );
2921}
2922
2923
sewardj406bac82010-03-03 23:03:40 +00002924/* All the threads have arrived. Now do the Interesting Bit. Get a
2925 new synchronisation object and do a weak send to it from all the
2926 participating threads. This makes its vector clocks be the join of
2927 all the individual threads' vector clocks. Then do a strong
2928 receive from it back to all threads, so that their VCs are a copy
2929 of it (hence are all equal to the join of their original VCs.) */
2930static void do_barrier_cross_sync_and_empty ( Bar* bar )
2931{
2932 /* XXX check bar->waiting has no duplicates */
2933 UWord i;
2934 SO* so = libhb_so_alloc();
2935
2936 tl_assert(bar->waiting);
2937 tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
2938
2939 /* compute the join ... */
2940 for (i = 0; i < bar->size; i++) {
2941 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2942 Thr* hbthr = t->hbthr;
2943 libhb_so_send( hbthr, so, False/*weak send*/ );
2944 }
2945 /* ... and distribute to all threads */
2946 for (i = 0; i < bar->size; i++) {
2947 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2948 Thr* hbthr = t->hbthr;
2949 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2950 }
2951
2952 /* finally, we must empty out the waiting vector */
2953 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2954
2955 /* and we don't need this any more. Perhaps a stack-allocated
2956 SO would be better? */
2957 libhb_so_dealloc(so);
2958}
2959
2960
sewardj9f569b72008-11-13 13:33:09 +00002961static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
2962 void* barrier )
2963{
sewardj1c466b72008-11-19 11:52:14 +00002964 /* This function gets called after a client thread calls
2965 pthread_barrier_wait but before it arrives at the real
2966 pthread_barrier_wait.
2967
2968 Why is the following correct? It's a bit subtle.
2969
2970 If this is not the last thread arriving at the barrier, we simply
2971 note its presence and return. Because valgrind (at least as of
2972 Nov 08) is single threaded, we are guaranteed safe from any race
2973 conditions when in this function -- no other client threads are
2974 running.
2975
2976 If this is the last thread, then we are again the only running
2977 thread. All the other threads will have either arrived at the
2978 real pthread_barrier_wait or are on their way to it, but in any
2979 case are guaranteed not to be able to move past it, because this
2980 thread is currently in this function and so has not yet arrived
2981 at the real pthread_barrier_wait. That means that:
2982
2983 1. While we are in this function, none of the other threads
2984 waiting at the barrier can move past it.
2985
2986 2. When this function returns (and simulated execution resumes),
2987 this thread and all other waiting threads will be able to move
2988 past the real barrier.
2989
2990 Because of this, it is now safe to update the vector clocks of
2991 all threads, to represent the fact that they all arrived at the
2992 barrier and have all moved on. There is no danger of any
2993 complications to do with some threads leaving the barrier and
2994 racing back round to the front, whilst others are still leaving
2995 (which is the primary source of complication in correct handling/
2996 implementation of barriers). That can't happen because we update
2997 here our data structures so as to indicate that the threads have
2998 passed the barrier, even though, as per (2) above, they are
2999 guaranteed not to pass the barrier until we return.
3000
3001 This relies crucially on Valgrind being single threaded. If that
3002 changes, this will need to be reconsidered.
3003 */
sewardj9f569b72008-11-13 13:33:09 +00003004 Thread* thr;
3005 Bar* bar;
sewardj406bac82010-03-03 23:03:40 +00003006 UWord present;
sewardj9f569b72008-11-13 13:33:09 +00003007
3008 if (SHOW_EVENTS >= 1)
3009 VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
3010 "(tid=%d, barrier=%p)\n",
3011 (Int)tid, (void*)barrier );
3012
3013 thr = map_threads_maybe_lookup( tid );
3014 tl_assert(thr); /* cannot fail - Thread* must already exist */
3015
3016 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3017 tl_assert(bar);
3018
3019 if (!bar->initted) {
3020 HG_(record_error_Misc)(
3021 thr, "pthread_barrier_wait: barrier is uninitialised"
3022 );
3023 return; /* client is broken .. avoid assertions below */
3024 }
3025
3026 /* guaranteed by _INIT_PRE above */
3027 tl_assert(bar->size > 0);
3028 tl_assert(bar->waiting);
3029
3030 VG_(addToXA)( bar->waiting, &thr );
3031
3032 /* guaranteed by this function */
3033 present = VG_(sizeXA)(bar->waiting);
3034 tl_assert(present > 0 && present <= bar->size);
3035
3036 if (present < bar->size)
3037 return;
3038
sewardj406bac82010-03-03 23:03:40 +00003039 do_barrier_cross_sync_and_empty(bar);
3040}
sewardj9f569b72008-11-13 13:33:09 +00003041
sewardj9f569b72008-11-13 13:33:09 +00003042
sewardj406bac82010-03-03 23:03:40 +00003043static void evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( ThreadId tid,
3044 void* barrier,
3045 UWord newcount )
3046{
3047 Thread* thr;
3048 Bar* bar;
3049 UWord present;
3050
3051 if (SHOW_EVENTS >= 1)
3052 VG_(printf)("evh__HG_PTHREAD_BARRIER_RESIZE_PRE"
3053 "(tid=%d, barrier=%p, newcount=%lu)\n",
3054 (Int)tid, (void*)barrier, newcount );
3055
3056 thr = map_threads_maybe_lookup( tid );
3057 tl_assert(thr); /* cannot fail - Thread* must already exist */
3058
3059 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3060 tl_assert(bar);
3061
3062 if (!bar->initted) {
3063 HG_(record_error_Misc)(
3064 thr, "pthread_barrier_resize: barrier is uninitialised"
3065 );
3066 return; /* client is broken .. avoid assertions below */
3067 }
3068
3069 if (!bar->resizable) {
3070 HG_(record_error_Misc)(
3071 thr, "pthread_barrier_resize: barrier is may not be resized"
3072 );
3073 return; /* client is broken .. avoid assertions below */
3074 }
3075
3076 if (newcount == 0) {
3077 HG_(record_error_Misc)(
3078 thr, "pthread_barrier_resize: 'newcount' argument is zero"
3079 );
3080 return; /* client is broken .. avoid assertions below */
3081 }
3082
3083 /* guaranteed by _INIT_PRE above */
3084 tl_assert(bar->size > 0);
sewardj9f569b72008-11-13 13:33:09 +00003085 tl_assert(bar->waiting);
sewardj406bac82010-03-03 23:03:40 +00003086 /* Guaranteed by this fn */
3087 tl_assert(newcount > 0);
sewardj9f569b72008-11-13 13:33:09 +00003088
sewardj406bac82010-03-03 23:03:40 +00003089 if (newcount >= bar->size) {
3090 /* Increasing the capacity. There's no possibility of threads
3091 moving on from the barrier in this situation, so just note
3092 the fact and do nothing more. */
3093 bar->size = newcount;
3094 } else {
3095 /* Decreasing the capacity. If we decrease it to be equal or
3096 below the number of waiting threads, they will now move past
3097 the barrier, so need to mess with dep edges in the same way
3098 as if the barrier had filled up normally. */
3099 present = VG_(sizeXA)(bar->waiting);
3100 tl_assert(present >= 0 && present <= bar->size);
3101 if (newcount <= present) {
3102 bar->size = present; /* keep the cross_sync call happy */
3103 do_barrier_cross_sync_and_empty(bar);
3104 }
3105 bar->size = newcount;
sewardj9f569b72008-11-13 13:33:09 +00003106 }
sewardj9f569b72008-11-13 13:33:09 +00003107}
3108
3109
sewardjed2e72e2009-08-14 11:08:24 +00003110/* ----------------------------------------------------- */
3111/* ----- events to do with user-specified HB edges ----- */
3112/* ----------------------------------------------------- */
3113
3114/* A mapping from arbitrary UWord tag to the SO associated with it.
3115 The UWord tags are meaningless to us, interpreted only by the
3116 user. */
3117
3118
3119
3120/* UWord -> SO* */
3121static WordFM* map_usertag_to_SO = NULL;
3122
3123static void map_usertag_to_SO_INIT ( void ) {
3124 if (UNLIKELY(map_usertag_to_SO == NULL)) {
3125 map_usertag_to_SO = VG_(newFM)( HG_(zalloc),
3126 "hg.mutS.1", HG_(free), NULL );
3127 tl_assert(map_usertag_to_SO != NULL);
3128 }
3129}
3130
3131static SO* map_usertag_to_SO_lookup_or_alloc ( UWord usertag ) {
3132 UWord key, val;
3133 map_usertag_to_SO_INIT();
3134 if (VG_(lookupFM)( map_usertag_to_SO, &key, &val, usertag )) {
3135 tl_assert(key == (UWord)usertag);
3136 return (SO*)val;
3137 } else {
3138 SO* so = libhb_so_alloc();
3139 VG_(addToFM)( map_usertag_to_SO, usertag, (UWord)so );
3140 return so;
3141 }
3142}
3143
sewardj6015d0e2011-03-11 19:10:48 +00003144static void map_usertag_to_SO_delete ( UWord usertag ) {
3145 UWord keyW, valW;
3146 map_usertag_to_SO_INIT();
3147 if (VG_(delFromFM)( map_usertag_to_SO, &keyW, &valW, usertag )) {
3148 SO* so = (SO*)valW;
3149 tl_assert(keyW == usertag);
3150 tl_assert(so);
3151 libhb_so_dealloc(so);
3152 }
3153}
sewardjed2e72e2009-08-14 11:08:24 +00003154
3155
3156static
3157void evh__HG_USERSO_SEND_PRE ( ThreadId tid, UWord usertag )
3158{
3159 /* TID is just about to notionally sent a message on a notional
3160 abstract synchronisation object whose identity is given by
3161 USERTAG. Bind USERTAG to a real SO if it is not already so
sewardj8c50d3c2011-03-11 18:38:12 +00003162 bound, and do a 'weak send' on the SO. This joins the vector
3163 clocks from this thread into any vector clocks already present
3164 in the SO. The resulting SO vector clocks are later used by
sewardjed2e72e2009-08-14 11:08:24 +00003165 other thread(s) which successfully 'receive' from the SO,
sewardj8c50d3c2011-03-11 18:38:12 +00003166 thereby acquiring a dependency on all the events that have
3167 previously signalled on this SO. */
sewardjed2e72e2009-08-14 11:08:24 +00003168 Thread* thr;
3169 SO* so;
3170
3171 if (SHOW_EVENTS >= 1)
3172 VG_(printf)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3173 (Int)tid, usertag );
3174
3175 thr = map_threads_maybe_lookup( tid );
3176 tl_assert(thr); /* cannot fail - Thread* must already exist */
3177
3178 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3179 tl_assert(so);
3180
sewardj8c50d3c2011-03-11 18:38:12 +00003181 libhb_so_send( thr->hbthr, so, False/*!strong_send*/ );
sewardjed2e72e2009-08-14 11:08:24 +00003182}
3183
3184static
3185void evh__HG_USERSO_RECV_POST ( ThreadId tid, UWord usertag )
3186{
3187 /* TID has just notionally received a message from a notional
3188 abstract synchronisation object whose identity is given by
3189 USERTAG. Bind USERTAG to a real SO if it is not already so
3190 bound. If the SO has at some point in the past been 'sent' on,
3191 to a 'strong receive' on it, thereby acquiring a dependency on
3192 the sender. */
3193 Thread* thr;
3194 SO* so;
3195
3196 if (SHOW_EVENTS >= 1)
3197 VG_(printf)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3198 (Int)tid, usertag );
3199
3200 thr = map_threads_maybe_lookup( tid );
3201 tl_assert(thr); /* cannot fail - Thread* must already exist */
3202
3203 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3204 tl_assert(so);
3205
3206 /* Acquire a dependency on it. If the SO has never so far been
3207 sent on, then libhb_so_recv will do nothing. So we're safe
3208 regardless of SO's history. */
3209 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3210}
3211
sewardj6015d0e2011-03-11 19:10:48 +00003212static
3213void evh__HG_USERSO_FORGET_ALL ( ThreadId tid, UWord usertag )
3214{
3215 /* TID declares that any happens-before edges notionally stored in
3216 USERTAG can be deleted. If (as would normally be the case) a
3217 SO is associated with USERTAG, then the assocation is removed
3218 and all resources associated with SO are freed. Importantly,
3219 that frees up any VTSs stored in SO. */
3220 if (SHOW_EVENTS >= 1)
3221 VG_(printf)("evh__HG_USERSO_FORGET_ALL(ctid=%d, usertag=%#lx)\n",
3222 (Int)tid, usertag );
3223
3224 map_usertag_to_SO_delete( usertag );
3225}
3226
sewardjed2e72e2009-08-14 11:08:24 +00003227
sewardjb4112022007-11-09 22:49:28 +00003228/*--------------------------------------------------------------*/
3229/*--- Lock acquisition order monitoring ---*/
3230/*--------------------------------------------------------------*/
3231
3232/* FIXME: here are some optimisations still to do in
3233 laog__pre_thread_acquires_lock.
3234
3235 The graph is structured so that if L1 --*--> L2 then L1 must be
3236 acquired before L2.
3237
3238 The common case is that some thread T holds (eg) L1 L2 and L3 and
3239 is repeatedly acquiring and releasing Ln, and there is no ordering
3240 error in what it is doing. Hence it repeatly:
3241
3242 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3243 produces the answer No (because there is no error).
3244
3245 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3246 (because they already got added the first time T acquired Ln).
3247
3248 Hence cache these two events:
3249
3250 (1) Cache result of the query from last time. Invalidate the cache
3251 any time any edges are added to or deleted from laog.
3252
3253 (2) Cache these add-edge requests and ignore them if said edges
3254 have already been added to laog. Invalidate the cache any time
3255 any edges are deleted from laog.
3256*/
3257
3258typedef
3259 struct {
3260 WordSetID inns; /* in univ_laog */
3261 WordSetID outs; /* in univ_laog */
3262 }
3263 LAOGLinks;
3264
3265/* lock order acquisition graph */
3266static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
3267
3268/* EXPOSITION ONLY: for each edge in 'laog', record the two places
3269 where that edge was created, so that we can show the user later if
3270 we need to. */
3271typedef
3272 struct {
3273 Addr src_ga; /* Lock guest addresses for */
3274 Addr dst_ga; /* src/dst of the edge */
3275 ExeContext* src_ec; /* And corresponding places where that */
3276 ExeContext* dst_ec; /* ordering was established */
3277 }
3278 LAOGLinkExposition;
3279
sewardj250ec2e2008-02-15 22:02:30 +00003280static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
sewardjb4112022007-11-09 22:49:28 +00003281 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3282 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
3283 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
3284 if (llx1->src_ga < llx2->src_ga) return -1;
3285 if (llx1->src_ga > llx2->src_ga) return 1;
3286 if (llx1->dst_ga < llx2->dst_ga) return -1;
3287 if (llx1->dst_ga > llx2->dst_ga) return 1;
3288 return 0;
3289}
3290
3291static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
3292/* end EXPOSITION ONLY */
3293
3294
sewardja65db102009-01-26 10:45:16 +00003295__attribute__((noinline))
3296static void laog__init ( void )
3297{
3298 tl_assert(!laog);
3299 tl_assert(!laog_exposition);
sewardjc1fb9d22011-02-28 09:03:44 +00003300 tl_assert(HG_(clo_track_lockorders));
sewardja65db102009-01-26 10:45:16 +00003301
3302 laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
3303 HG_(free), NULL/*unboxedcmp*/ );
3304
3305 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
3306 cmp_LAOGLinkExposition );
3307 tl_assert(laog);
3308 tl_assert(laog_exposition);
3309}
3310
florian6bf37262012-10-21 03:23:36 +00003311static void laog__show ( const HChar* who ) {
3312 UWord i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003313 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003314 Lock* me;
3315 LAOGLinks* links;
3316 VG_(printf)("laog (requested by %s) {\n", who);
sewardj896f6f92008-08-19 08:38:52 +00003317 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003318 me = NULL;
3319 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003320 while (VG_(nextIterFM)( laog, (UWord*)&me,
3321 (UWord*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003322 tl_assert(me);
3323 tl_assert(links);
3324 VG_(printf)(" node %p:\n", me);
3325 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3326 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003327 VG_(printf)(" inn %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003328 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3329 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003330 VG_(printf)(" out %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003331 me = NULL;
3332 links = NULL;
3333 }
sewardj896f6f92008-08-19 08:38:52 +00003334 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003335 VG_(printf)("}\n");
3336}
3337
sewardj866c80c2011-10-22 19:29:51 +00003338static void univ_laog_do_GC ( void ) {
3339 Word i;
3340 LAOGLinks* links;
3341 Word seen = 0;
3342 Int prev_next_gc_univ_laog = next_gc_univ_laog;
3343 const UWord univ_laog_cardinality = HG_(cardinalityWSU)( univ_laog);
3344
3345 Bool *univ_laog_seen = HG_(zalloc) ( "hg.gc_univ_laog.1",
3346 (Int) univ_laog_cardinality
3347 * sizeof(Bool) );
3348 // univ_laog_seen[*] set to 0 (False) by zalloc.
3349
3350 if (VG_(clo_stats))
3351 VG_(message)(Vg_DebugMsg,
3352 "univ_laog_do_GC enter cardinality %'10d\n",
3353 (Int)univ_laog_cardinality);
3354
3355 VG_(initIterFM)( laog );
3356 links = NULL;
3357 while (VG_(nextIterFM)( laog, NULL, (UWord*)&links )) {
3358 tl_assert(links);
3359 tl_assert(links->inns >= 0 && links->inns < univ_laog_cardinality);
3360 univ_laog_seen[links->inns] = True;
3361 tl_assert(links->outs >= 0 && links->outs < univ_laog_cardinality);
3362 univ_laog_seen[links->outs] = True;
3363 links = NULL;
3364 }
3365 VG_(doneIterFM)( laog );
3366
3367 for (i = 0; i < (Int)univ_laog_cardinality; i++) {
3368 if (univ_laog_seen[i])
3369 seen++;
3370 else
3371 HG_(dieWS) ( univ_laog, (WordSet)i );
3372 }
3373
3374 HG_(free) (univ_laog_seen);
3375
3376 // We need to decide the value of the next_gc.
3377 // 3 solutions were looked at:
3378 // Sol 1: garbage collect at seen * 2
3379 // This solution was a lot slower, probably because we both do a lot of
3380 // garbage collection and do not keep long enough laog WV that will become
3381 // useful again very soon.
3382 // Sol 2: garbage collect at a percentage increase of the current cardinality
3383 // (with a min increase of 1)
3384 // Trials on a small test program with 1%, 5% and 10% increase was done.
3385 // 1% is slightly faster than 5%, which is slightly slower than 10%.
3386 // However, on a big application, this caused the memory to be exhausted,
3387 // as even a 1% increase of size at each gc becomes a lot, when many gc
3388 // are done.
3389 // Sol 3: always garbage collect at current cardinality + 1.
3390 // This solution was the fastest of the 3 solutions, and caused no memory
3391 // exhaustion in the big application.
3392 //
3393 // With regards to cost introduced by gc: on the t2t perf test (doing only
3394 // lock/unlock operations), t2t 50 10 2 was about 25% faster than the
3395 // version with garbage collection. With t2t 50 20 2, my machine started
3396 // to page out, and so the garbage collected version was much faster.
3397 // On smaller lock sets (e.g. t2t 20 5 2, giving about 100 locks), the
3398 // difference performance is insignificant (~ 0.1 s).
3399 // Of course, it might be that real life programs are not well represented
3400 // by t2t.
3401
3402 // If ever we want to have a more sophisticated control
3403 // (e.g. clo options to control the percentage increase or fixed increased),
3404 // we should do it here, eg.
3405 // next_gc_univ_laog = prev_next_gc_univ_laog + VG_(clo_laog_gc_fixed);
3406 // Currently, we just hard-code the solution 3 above.
3407 next_gc_univ_laog = prev_next_gc_univ_laog + 1;
3408
3409 if (VG_(clo_stats))
3410 VG_(message)
3411 (Vg_DebugMsg,
3412 "univ_laog_do_GC exit seen %'8d next gc at cardinality %'10d\n",
3413 (Int)seen, next_gc_univ_laog);
3414}
3415
3416
sewardjb4112022007-11-09 22:49:28 +00003417__attribute__((noinline))
3418static void laog__add_edge ( Lock* src, Lock* dst ) {
florian6bf37262012-10-21 03:23:36 +00003419 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003420 LAOGLinks* links;
3421 Bool presentF, presentR;
3422 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
3423
3424 /* Take the opportunity to sanity check the graph. Record in
3425 presentF if there is already a src->dst mapping in this node's
3426 forwards links, and presentR if there is already a src->dst
3427 mapping in this node's backwards links. They should agree!
3428 Also, we need to know whether the edge was already present so as
3429 to decide whether or not to update the link details mapping. We
3430 can compute presentF and presentR essentially for free, so may
3431 as well do this always. */
3432 presentF = presentR = False;
3433
3434 /* Update the out edges for src */
3435 keyW = 0;
3436 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003437 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)src )) {
sewardjb4112022007-11-09 22:49:28 +00003438 WordSetID outs_new;
3439 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003440 tl_assert(keyW == (UWord)src);
3441 outs_new = HG_(addToWS)( univ_laog, links->outs, (UWord)dst );
sewardjb4112022007-11-09 22:49:28 +00003442 presentF = outs_new == links->outs;
3443 links->outs = outs_new;
3444 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003445 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003446 links->inns = HG_(emptyWS)( univ_laog );
florian6bf37262012-10-21 03:23:36 +00003447 links->outs = HG_(singletonWS)( univ_laog, (UWord)dst );
3448 VG_(addToFM)( laog, (UWord)src, (UWord)links );
sewardjb4112022007-11-09 22:49:28 +00003449 }
3450 /* Update the in edges for dst */
3451 keyW = 0;
3452 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003453 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003454 WordSetID inns_new;
3455 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003456 tl_assert(keyW == (UWord)dst);
3457 inns_new = HG_(addToWS)( univ_laog, links->inns, (UWord)src );
sewardjb4112022007-11-09 22:49:28 +00003458 presentR = inns_new == links->inns;
3459 links->inns = inns_new;
3460 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003461 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
florian6bf37262012-10-21 03:23:36 +00003462 links->inns = HG_(singletonWS)( univ_laog, (UWord)src );
sewardjb4112022007-11-09 22:49:28 +00003463 links->outs = HG_(emptyWS)( univ_laog );
florian6bf37262012-10-21 03:23:36 +00003464 VG_(addToFM)( laog, (UWord)dst, (UWord)links );
sewardjb4112022007-11-09 22:49:28 +00003465 }
3466
3467 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
3468
3469 if (!presentF && src->acquired_at && dst->acquired_at) {
3470 LAOGLinkExposition expo;
3471 /* If this edge is entering the graph, and we have acquired_at
3472 information for both src and dst, record those acquisition
3473 points. Hence, if there is later a violation of this
3474 ordering, we can show the user the two places in which the
3475 required src-dst ordering was previously established. */
barta0b6b2c2008-07-07 06:49:24 +00003476 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
sewardjb4112022007-11-09 22:49:28 +00003477 src->guestaddr, dst->guestaddr);
3478 expo.src_ga = src->guestaddr;
3479 expo.dst_ga = dst->guestaddr;
3480 expo.src_ec = NULL;
3481 expo.dst_ec = NULL;
3482 tl_assert(laog_exposition);
florian6bf37262012-10-21 03:23:36 +00003483 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (UWord)&expo )) {
sewardjb4112022007-11-09 22:49:28 +00003484 /* we already have it; do nothing */
3485 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003486 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
3487 sizeof(LAOGLinkExposition));
sewardjb4112022007-11-09 22:49:28 +00003488 expo2->src_ga = src->guestaddr;
3489 expo2->dst_ga = dst->guestaddr;
3490 expo2->src_ec = src->acquired_at;
3491 expo2->dst_ec = dst->acquired_at;
florian6bf37262012-10-21 03:23:36 +00003492 VG_(addToFM)( laog_exposition, (UWord)expo2, (UWord)NULL );
sewardjb4112022007-11-09 22:49:28 +00003493 }
3494 }
sewardj866c80c2011-10-22 19:29:51 +00003495
3496 if (HG_(cardinalityWSU) (univ_laog) >= next_gc_univ_laog)
3497 univ_laog_do_GC();
sewardjb4112022007-11-09 22:49:28 +00003498}
3499
3500__attribute__((noinline))
3501static void laog__del_edge ( Lock* src, Lock* dst ) {
florian6bf37262012-10-21 03:23:36 +00003502 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003503 LAOGLinks* links;
sewardj866c80c2011-10-22 19:29:51 +00003504 if (0) VG_(printf)("laog__del_edge enter %p %p\n", src, dst);
sewardjb4112022007-11-09 22:49:28 +00003505 /* Update the out edges for src */
3506 keyW = 0;
3507 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003508 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)src )) {
sewardjb4112022007-11-09 22:49:28 +00003509 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003510 tl_assert(keyW == (UWord)src);
3511 links->outs = HG_(delFromWS)( univ_laog, links->outs, (UWord)dst );
sewardjb4112022007-11-09 22:49:28 +00003512 }
3513 /* Update the in edges for dst */
3514 keyW = 0;
3515 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003516 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003517 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003518 tl_assert(keyW == (UWord)dst);
3519 links->inns = HG_(delFromWS)( univ_laog, links->inns, (UWord)src );
sewardjb4112022007-11-09 22:49:28 +00003520 }
sewardj866c80c2011-10-22 19:29:51 +00003521
3522 /* Remove the exposition of src,dst (if present) */
3523 {
3524 LAOGLinkExposition *fm_expo;
3525
3526 LAOGLinkExposition expo;
3527 expo.src_ga = src->guestaddr;
3528 expo.dst_ga = dst->guestaddr;
3529 expo.src_ec = NULL;
3530 expo.dst_ec = NULL;
3531
3532 if (VG_(delFromFM) (laog_exposition,
3533 (UWord*)&fm_expo, NULL, (UWord)&expo )) {
3534 HG_(free) (fm_expo);
3535 }
3536 }
3537
3538 /* deleting edges can increase nr of of WS so check for gc. */
3539 if (HG_(cardinalityWSU) (univ_laog) >= next_gc_univ_laog)
3540 univ_laog_do_GC();
3541 if (0) VG_(printf)("laog__del_edge exit\n");
sewardjb4112022007-11-09 22:49:28 +00003542}
3543
3544__attribute__((noinline))
3545static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
florian6bf37262012-10-21 03:23:36 +00003546 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003547 LAOGLinks* links;
3548 keyW = 0;
3549 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003550 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003551 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003552 tl_assert(keyW == (UWord)lk);
sewardjb4112022007-11-09 22:49:28 +00003553 return links->outs;
3554 } else {
3555 return HG_(emptyWS)( univ_laog );
3556 }
3557}
3558
3559__attribute__((noinline))
3560static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
florian6bf37262012-10-21 03:23:36 +00003561 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003562 LAOGLinks* links;
3563 keyW = 0;
3564 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003565 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003566 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003567 tl_assert(keyW == (UWord)lk);
sewardjb4112022007-11-09 22:49:28 +00003568 return links->inns;
3569 } else {
3570 return HG_(emptyWS)( univ_laog );
3571 }
3572}
3573
3574__attribute__((noinline))
florian6bf37262012-10-21 03:23:36 +00003575static void laog__sanity_check ( const HChar* who ) {
3576 UWord i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003577 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003578 Lock* me;
3579 LAOGLinks* links;
sewardj896f6f92008-08-19 08:38:52 +00003580 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003581 me = NULL;
3582 links = NULL;
3583 if (0) VG_(printf)("laog sanity check\n");
florian6bf37262012-10-21 03:23:36 +00003584 while (VG_(nextIterFM)( laog, (UWord*)&me,
3585 (UWord*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003586 tl_assert(me);
3587 tl_assert(links);
3588 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3589 for (i = 0; i < ws_size; i++) {
3590 if ( ! HG_(elemWS)( univ_laog,
3591 laog__succs( (Lock*)ws_words[i] ),
florian6bf37262012-10-21 03:23:36 +00003592 (UWord)me ))
sewardjb4112022007-11-09 22:49:28 +00003593 goto bad;
3594 }
3595 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3596 for (i = 0; i < ws_size; i++) {
3597 if ( ! HG_(elemWS)( univ_laog,
3598 laog__preds( (Lock*)ws_words[i] ),
florian6bf37262012-10-21 03:23:36 +00003599 (UWord)me ))
sewardjb4112022007-11-09 22:49:28 +00003600 goto bad;
3601 }
3602 me = NULL;
3603 links = NULL;
3604 }
sewardj896f6f92008-08-19 08:38:52 +00003605 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003606 return;
3607
3608 bad:
3609 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3610 laog__show(who);
3611 tl_assert(0);
3612}
3613
3614/* If there is a path in laog from 'src' to any of the elements in
3615 'dst', return an arbitrarily chosen element of 'dst' reachable from
3616 'src'. If no path exist from 'src' to any element in 'dst', return
3617 NULL. */
3618__attribute__((noinline))
3619static
3620Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3621{
3622 Lock* ret;
florian6bf37262012-10-21 03:23:36 +00003623 Word ssz;
sewardjb4112022007-11-09 22:49:28 +00003624 XArray* stack; /* of Lock* */
3625 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
3626 Lock* here;
3627 WordSetID succs;
florian6bf37262012-10-21 03:23:36 +00003628 UWord succs_size, i;
sewardj250ec2e2008-02-15 22:02:30 +00003629 UWord* succs_words;
sewardjb4112022007-11-09 22:49:28 +00003630 //laog__sanity_check();
3631
3632 /* If the destination set is empty, we can never get there from
3633 'src' :-), so don't bother to try */
3634 if (HG_(isEmptyWS)( univ_lsets, dsts ))
3635 return NULL;
3636
3637 ret = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00003638 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3639 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00003640
3641 (void) VG_(addToXA)( stack, &src );
3642
3643 while (True) {
3644
3645 ssz = VG_(sizeXA)( stack );
3646
3647 if (ssz == 0) { ret = NULL; break; }
3648
3649 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3650 VG_(dropTailXA)( stack, 1 );
3651
florian6bf37262012-10-21 03:23:36 +00003652 if (HG_(elemWS)( univ_lsets, dsts, (UWord)here )) { ret = here; break; }
sewardjb4112022007-11-09 22:49:28 +00003653
florian6bf37262012-10-21 03:23:36 +00003654 if (VG_(lookupFM)( visited, NULL, NULL, (UWord)here ))
sewardjb4112022007-11-09 22:49:28 +00003655 continue;
3656
florian6bf37262012-10-21 03:23:36 +00003657 VG_(addToFM)( visited, (UWord)here, 0 );
sewardjb4112022007-11-09 22:49:28 +00003658
3659 succs = laog__succs( here );
3660 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3661 for (i = 0; i < succs_size; i++)
3662 (void) VG_(addToXA)( stack, &succs_words[i] );
3663 }
3664
sewardj896f6f92008-08-19 08:38:52 +00003665 VG_(deleteFM)( visited, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003666 VG_(deleteXA)( stack );
3667 return ret;
3668}
3669
3670
3671/* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3672 between 'lk' and the locks already held by 'thr' and issue a
3673 complaint if so. Also, update the ordering graph appropriately.
3674*/
3675__attribute__((noinline))
3676static void laog__pre_thread_acquires_lock (
3677 Thread* thr, /* NB: BEFORE lock is added */
3678 Lock* lk
3679 )
3680{
sewardj250ec2e2008-02-15 22:02:30 +00003681 UWord* ls_words;
florian6bf37262012-10-21 03:23:36 +00003682 UWord ls_size, i;
sewardjb4112022007-11-09 22:49:28 +00003683 Lock* other;
3684
3685 /* It may be that 'thr' already holds 'lk' and is recursively
3686 relocking in. In this case we just ignore the call. */
3687 /* NB: univ_lsets really is correct here */
florian6bf37262012-10-21 03:23:36 +00003688 if (HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lk ))
sewardjb4112022007-11-09 22:49:28 +00003689 return;
3690
sewardjb4112022007-11-09 22:49:28 +00003691 /* First, the check. Complain if there is any path in laog from lk
3692 to any of the locks already held by thr, since if any such path
3693 existed, it would mean that previously lk was acquired before
3694 (rather than after, as we are doing here) at least one of those
3695 locks.
3696 */
3697 other = laog__do_dfs_from_to(lk, thr->locksetA);
3698 if (other) {
3699 LAOGLinkExposition key, *found;
3700 /* So we managed to find a path lk --*--> other in the graph,
3701 which implies that 'lk' should have been acquired before
3702 'other' but is in fact being acquired afterwards. We present
3703 the lk/other arguments to record_error_LockOrder in the order
3704 in which they should have been acquired. */
3705 /* Go look in the laog_exposition mapping, to find the allocation
3706 points for this edge, so we can show the user. */
3707 key.src_ga = lk->guestaddr;
3708 key.dst_ga = other->guestaddr;
3709 key.src_ec = NULL;
3710 key.dst_ec = NULL;
3711 found = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003712 if (VG_(lookupFM)( laog_exposition,
florian6bf37262012-10-21 03:23:36 +00003713 (UWord*)&found, NULL, (UWord)&key )) {
sewardjb4112022007-11-09 22:49:28 +00003714 tl_assert(found != &key);
3715 tl_assert(found->src_ga == key.src_ga);
3716 tl_assert(found->dst_ga == key.dst_ga);
3717 tl_assert(found->src_ec);
3718 tl_assert(found->dst_ec);
sewardjf98e1c02008-10-25 16:22:41 +00003719 HG_(record_error_LockOrder)(
3720 thr, lk->guestaddr, other->guestaddr,
sewardjffce8152011-06-24 10:09:41 +00003721 found->src_ec, found->dst_ec, other->acquired_at );
sewardjb4112022007-11-09 22:49:28 +00003722 } else {
3723 /* Hmm. This can't happen (can it?) */
philippeebe25802013-01-30 23:21:34 +00003724 /* Yes, it can happen: see tests/tc14_laog_dinphils.
3725 Imagine we have 3 philosophers A B C, and the forks
3726 between them:
3727
3728 C
3729
3730 fCA fBC
3731
3732 A fAB B
3733
3734 Let's have the following actions:
3735 A takes fCA,fAB
3736 A releases fCA,fAB
3737 B takes fAB,fBC
3738 B releases fAB,fBC
3739 C takes fBC,fCA
3740 C releases fBC,fCA
3741
3742 Helgrind will report a lock order error when C takes fCA.
3743 Effectively, we have a deadlock if the following
3744 sequence is done:
3745 A takes fCA
3746 B takes fAB
3747 C takes fBC
3748
3749 The error reported is:
3750 Observed (incorrect) order fBC followed by fCA
3751 but the stack traces that have established the required order
3752 are not given.
3753
3754 This is because there is no pair (fCA, fBC) in laog exposition :
3755 the laog_exposition records all pairs of locks between a new lock
3756 taken by a thread and all the already taken locks.
3757 So, there is no laog_exposition (fCA, fBC) as no thread ever
3758 first locked fCA followed by fBC.
3759
3760 In other words, when the deadlock cycle involves more than
3761 two locks, then helgrind does not report the sequence of
3762 operations that created the cycle.
3763
3764 However, we can report the current stack trace (where
3765 lk is being taken), and the stack trace where other was acquired:
3766 Effectively, the variable 'other' contains a lock currently
3767 held by this thread, with its 'acquired_at'. */
3768
sewardjf98e1c02008-10-25 16:22:41 +00003769 HG_(record_error_LockOrder)(
3770 thr, lk->guestaddr, other->guestaddr,
philippeebe25802013-01-30 23:21:34 +00003771 NULL, NULL, other->acquired_at );
sewardjb4112022007-11-09 22:49:28 +00003772 }
3773 }
3774
3775 /* Second, add to laog the pairs
3776 (old, lk) | old <- locks already held by thr
3777 Since both old and lk are currently held by thr, their acquired_at
3778 fields must be non-NULL.
3779 */
3780 tl_assert(lk->acquired_at);
3781 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3782 for (i = 0; i < ls_size; i++) {
3783 Lock* old = (Lock*)ls_words[i];
3784 tl_assert(old->acquired_at);
3785 laog__add_edge( old, lk );
3786 }
3787
3788 /* Why "except_Locks" ? We're here because a lock is being
3789 acquired by a thread, and we're in an inconsistent state here.
3790 See the call points in evhH__post_thread_{r,w}_acquires_lock.
3791 When called in this inconsistent state, locks__sanity_check duly
3792 barfs. */
sewardjf98e1c02008-10-25 16:22:41 +00003793 if (HG_(clo_sanity_flags) & SCE_LAOG)
sewardjb4112022007-11-09 22:49:28 +00003794 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
3795}
3796
sewardj866c80c2011-10-22 19:29:51 +00003797/* Allocates a duplicate of words. Caller must HG_(free) the result. */
3798static UWord* UWordV_dup(UWord* words, Word words_size)
3799{
3800 UInt i;
3801
3802 if (words_size == 0)
3803 return NULL;
3804
3805 UWord *dup = HG_(zalloc) ("hg.dup.1", (SizeT) words_size * sizeof(UWord));
3806
3807 for (i = 0; i < words_size; i++)
3808 dup[i] = words[i];
3809
3810 return dup;
3811}
sewardjb4112022007-11-09 22:49:28 +00003812
3813/* Delete from 'laog' any pair mentioning a lock in locksToDelete */
3814
3815__attribute__((noinline))
3816static void laog__handle_one_lock_deletion ( Lock* lk )
3817{
3818 WordSetID preds, succs;
florian6bf37262012-10-21 03:23:36 +00003819 UWord preds_size, succs_size, i, j;
sewardj250ec2e2008-02-15 22:02:30 +00003820 UWord *preds_words, *succs_words;
sewardjb4112022007-11-09 22:49:28 +00003821
3822 preds = laog__preds( lk );
3823 succs = laog__succs( lk );
3824
sewardj866c80c2011-10-22 19:29:51 +00003825 // We need to duplicate the payload, as these can be garbage collected
3826 // during the del/add operations below.
sewardjb4112022007-11-09 22:49:28 +00003827 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
sewardj866c80c2011-10-22 19:29:51 +00003828 preds_words = UWordV_dup(preds_words, preds_size);
3829
3830 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3831 succs_words = UWordV_dup(succs_words, succs_size);
3832
sewardjb4112022007-11-09 22:49:28 +00003833 for (i = 0; i < preds_size; i++)
3834 laog__del_edge( (Lock*)preds_words[i], lk );
3835
sewardjb4112022007-11-09 22:49:28 +00003836 for (j = 0; j < succs_size; j++)
3837 laog__del_edge( lk, (Lock*)succs_words[j] );
3838
3839 for (i = 0; i < preds_size; i++) {
3840 for (j = 0; j < succs_size; j++) {
3841 if (preds_words[i] != succs_words[j]) {
3842 /* This can pass unlocked locks to laog__add_edge, since
3843 we're deleting stuff. So their acquired_at fields may
3844 be NULL. */
3845 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
3846 }
3847 }
3848 }
sewardj866c80c2011-10-22 19:29:51 +00003849
3850 if (preds_words)
3851 HG_(free) (preds_words);
3852 if (succs_words)
3853 HG_(free) (succs_words);
3854
3855 // Remove lk information from laog links FM
3856 {
3857 LAOGLinks *links;
3858 Lock* linked_lk;
3859
3860 if (VG_(delFromFM) (laog,
3861 (UWord*)&linked_lk, (UWord*)&links, (UWord)lk)) {
3862 tl_assert (linked_lk == lk);
3863 HG_(free) (links);
3864 }
3865 }
3866 /* FIXME ??? What about removing lock lk data from EXPOSITION ??? */
sewardjb4112022007-11-09 22:49:28 +00003867}
3868
sewardj1cbc12f2008-11-10 16:16:46 +00003869//__attribute__((noinline))
3870//static void laog__handle_lock_deletions (
3871// WordSetID /* in univ_laog */ locksToDelete
3872// )
3873//{
3874// Word i, ws_size;
3875// UWord* ws_words;
3876//
sewardj1cbc12f2008-11-10 16:16:46 +00003877//
3878// HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
sewardj866c80c2011-10-22 19:29:51 +00003879// UWordV_dup call needed here ...
sewardj1cbc12f2008-11-10 16:16:46 +00003880// for (i = 0; i < ws_size; i++)
3881// laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
3882//
3883// if (HG_(clo_sanity_flags) & SCE_LAOG)
3884// all__sanity_check("laog__handle_lock_deletions-post");
3885//}
sewardjb4112022007-11-09 22:49:28 +00003886
3887
3888/*--------------------------------------------------------------*/
3889/*--- Malloc/free replacements ---*/
3890/*--------------------------------------------------------------*/
3891
3892typedef
3893 struct {
3894 void* next; /* required by m_hashtable */
3895 Addr payload; /* ptr to actual block */
3896 SizeT szB; /* size requested */
3897 ExeContext* where; /* where it was allocated */
3898 Thread* thr; /* allocating thread */
3899 }
3900 MallocMeta;
3901
3902/* A hash table of MallocMetas, used to track malloc'd blocks
3903 (obviously). */
3904static VgHashTable hg_mallocmeta_table = NULL;
3905
3906
3907static MallocMeta* new_MallocMeta ( void ) {
sewardjf98e1c02008-10-25 16:22:41 +00003908 MallocMeta* md = HG_(zalloc)( "hg.new_MallocMeta.1", sizeof(MallocMeta) );
sewardjb4112022007-11-09 22:49:28 +00003909 tl_assert(md);
3910 return md;
3911}
3912static void delete_MallocMeta ( MallocMeta* md ) {
sewardjf98e1c02008-10-25 16:22:41 +00003913 HG_(free)(md);
sewardjb4112022007-11-09 22:49:28 +00003914}
3915
3916
3917/* Allocate a client block and set up the metadata for it. */
3918
3919static
3920void* handle_alloc ( ThreadId tid,
3921 SizeT szB, SizeT alignB, Bool is_zeroed )
3922{
3923 Addr p;
3924 MallocMeta* md;
3925
3926 tl_assert( ((SSizeT)szB) >= 0 );
3927 p = (Addr)VG_(cli_malloc)(alignB, szB);
3928 if (!p) {
3929 return NULL;
3930 }
3931 if (is_zeroed)
3932 VG_(memset)((void*)p, 0, szB);
3933
3934 /* Note that map_threads_lookup must succeed (cannot assert), since
3935 memory can only be allocated by currently alive threads, hence
3936 they must have an entry in map_threads. */
3937 md = new_MallocMeta();
3938 md->payload = p;
3939 md->szB = szB;
3940 md->where = VG_(record_ExeContext)( tid, 0 );
3941 md->thr = map_threads_lookup( tid );
3942
3943 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
3944
3945 /* Tell the lower level memory wranglers. */
3946 evh__new_mem_heap( p, szB, is_zeroed );
3947
3948 return (void*)p;
3949}
3950
3951/* Re the checks for less-than-zero (also in hg_cli__realloc below):
3952 Cast to a signed type to catch any unexpectedly negative args.
3953 We're assuming here that the size asked for is not greater than
3954 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
3955 platforms). */
3956static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
3957 if (((SSizeT)n) < 0) return NULL;
3958 return handle_alloc ( tid, n, VG_(clo_alignment),
3959 /*is_zeroed*/False );
3960}
3961static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
3962 if (((SSizeT)n) < 0) return NULL;
3963 return handle_alloc ( tid, n, VG_(clo_alignment),
3964 /*is_zeroed*/False );
3965}
3966static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
3967 if (((SSizeT)n) < 0) return NULL;
3968 return handle_alloc ( tid, n, VG_(clo_alignment),
3969 /*is_zeroed*/False );
3970}
3971static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
3972 if (((SSizeT)n) < 0) return NULL;
3973 return handle_alloc ( tid, n, align,
3974 /*is_zeroed*/False );
3975}
3976static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
3977 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
3978 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
3979 /*is_zeroed*/True );
3980}
3981
3982
3983/* Free a client block, including getting rid of the relevant
3984 metadata. */
3985
3986static void handle_free ( ThreadId tid, void* p )
3987{
3988 MallocMeta *md, *old_md;
3989 SizeT szB;
3990
3991 /* First see if we can find the metadata for 'p'. */
3992 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3993 if (!md)
3994 return; /* apparently freeing a bogus address. Oh well. */
3995
3996 tl_assert(md->payload == (Addr)p);
3997 szB = md->szB;
3998
3999 /* Nuke the metadata block */
4000 old_md = (MallocMeta*)
4001 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
4002 tl_assert(old_md); /* it must be present - we just found it */
4003 tl_assert(old_md == md);
4004 tl_assert(old_md->payload == (Addr)p);
4005
4006 VG_(cli_free)((void*)old_md->payload);
4007 delete_MallocMeta(old_md);
4008
4009 /* Tell the lower level memory wranglers. */
4010 evh__die_mem_heap( (Addr)p, szB );
4011}
4012
4013static void hg_cli__free ( ThreadId tid, void* p ) {
4014 handle_free(tid, p);
4015}
4016static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
4017 handle_free(tid, p);
4018}
4019static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
4020 handle_free(tid, p);
4021}
4022
4023
4024static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
4025{
4026 MallocMeta *md, *md_new, *md_tmp;
4027 SizeT i;
4028
4029 Addr payload = (Addr)payloadV;
4030
4031 if (((SSizeT)new_size) < 0) return NULL;
4032
4033 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
4034 if (!md)
4035 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
4036
4037 tl_assert(md->payload == payload);
4038
4039 if (md->szB == new_size) {
4040 /* size unchanged */
4041 md->where = VG_(record_ExeContext)(tid, 0);
4042 return payloadV;
4043 }
4044
4045 if (md->szB > new_size) {
4046 /* new size is smaller */
4047 md->szB = new_size;
4048 md->where = VG_(record_ExeContext)(tid, 0);
4049 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
4050 return payloadV;
4051 }
4052
4053 /* else */ {
4054 /* new size is bigger */
4055 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
4056
4057 /* First half kept and copied, second half new */
4058 // FIXME: shouldn't we use a copier which implements the
4059 // memory state machine?
sewardj23f12002009-07-24 08:45:08 +00004060 evh__copy_mem( payload, p_new, md->szB );
sewardjb4112022007-11-09 22:49:28 +00004061 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
sewardjf98e1c02008-10-25 16:22:41 +00004062 /*inited*/False );
sewardjb4112022007-11-09 22:49:28 +00004063 /* FIXME: can anything funny happen here? specifically, if the
4064 old range contained a lock, then die_mem_heap will complain.
4065 Is that the correct behaviour? Not sure. */
4066 evh__die_mem_heap( payload, md->szB );
4067
4068 /* Copy from old to new */
4069 for (i = 0; i < md->szB; i++)
4070 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
4071
4072 /* Because the metadata hash table is index by payload address,
4073 we have to get rid of the old hash table entry and make a new
4074 one. We can't just modify the existing metadata in place,
4075 because then it would (almost certainly) be in the wrong hash
4076 chain. */
4077 md_new = new_MallocMeta();
4078 *md_new = *md;
4079
4080 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
4081 tl_assert(md_tmp);
4082 tl_assert(md_tmp == md);
4083
4084 VG_(cli_free)((void*)md->payload);
4085 delete_MallocMeta(md);
4086
4087 /* Update fields */
4088 md_new->where = VG_(record_ExeContext)( tid, 0 );
4089 md_new->szB = new_size;
4090 md_new->payload = p_new;
4091 md_new->thr = map_threads_lookup( tid );
4092
4093 /* and add */
4094 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
4095
4096 return (void*)p_new;
4097 }
4098}
4099
njn8b140de2009-02-17 04:31:18 +00004100static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
4101{
4102 MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
4103
4104 // There may be slop, but pretend there isn't because only the asked-for
4105 // area will have been shadowed properly.
4106 return ( md ? md->szB : 0 );
4107}
4108
sewardjb4112022007-11-09 22:49:28 +00004109
sewardj095d61e2010-03-11 13:43:18 +00004110/* For error creation: map 'data_addr' to a malloc'd chunk, if any.
sewardjc8028ad2010-05-05 09:34:42 +00004111 Slow linear search. With a bit of hash table help if 'data_addr'
4112 is either the start of a block or up to 15 word-sized steps along
4113 from the start of a block. */
sewardj095d61e2010-03-11 13:43:18 +00004114
4115static inline Bool addr_is_in_MM_Chunk( MallocMeta* mm, Addr a )
4116{
sewardjc8028ad2010-05-05 09:34:42 +00004117 /* Accept 'a' as within 'mm' if 'mm's size is zero and 'a' points
4118 right at it. */
4119 if (UNLIKELY(mm->szB == 0 && a == mm->payload))
4120 return True;
4121 /* else normal interval rules apply */
4122 if (LIKELY(a < mm->payload)) return False;
4123 if (LIKELY(a >= mm->payload + mm->szB)) return False;
4124 return True;
sewardj095d61e2010-03-11 13:43:18 +00004125}
4126
sewardjc8028ad2010-05-05 09:34:42 +00004127Bool HG_(mm_find_containing_block)( /*OUT*/ExeContext** where,
sewardj095d61e2010-03-11 13:43:18 +00004128 /*OUT*/Addr* payload,
4129 /*OUT*/SizeT* szB,
4130 Addr data_addr )
4131{
4132 MallocMeta* mm;
sewardjc8028ad2010-05-05 09:34:42 +00004133 Int i;
4134 const Int n_fast_check_words = 16;
4135
4136 /* First, do a few fast searches on the basis that data_addr might
4137 be exactly the start of a block or up to 15 words inside. This
4138 can happen commonly via the creq
4139 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK. */
4140 for (i = 0; i < n_fast_check_words; i++) {
4141 mm = VG_(HT_lookup)( hg_mallocmeta_table,
4142 data_addr - (UWord)(UInt)i * sizeof(UWord) );
4143 if (UNLIKELY(mm && addr_is_in_MM_Chunk(mm, data_addr)))
4144 goto found;
4145 }
4146
sewardj095d61e2010-03-11 13:43:18 +00004147 /* Well, this totally sucks. But without using an interval tree or
sewardjc8028ad2010-05-05 09:34:42 +00004148 some such, it's hard to see how to do better. We have to check
4149 every block in the entire table. */
sewardj095d61e2010-03-11 13:43:18 +00004150 VG_(HT_ResetIter)(hg_mallocmeta_table);
4151 while ( (mm = VG_(HT_Next)(hg_mallocmeta_table)) ) {
sewardjc8028ad2010-05-05 09:34:42 +00004152 if (UNLIKELY(addr_is_in_MM_Chunk(mm, data_addr)))
4153 goto found;
sewardj095d61e2010-03-11 13:43:18 +00004154 }
sewardjc8028ad2010-05-05 09:34:42 +00004155
4156 /* Not found. Bah. */
4157 return False;
4158 /*NOTREACHED*/
4159
4160 found:
4161 tl_assert(mm);
4162 tl_assert(addr_is_in_MM_Chunk(mm, data_addr));
4163 if (where) *where = mm->where;
4164 if (payload) *payload = mm->payload;
4165 if (szB) *szB = mm->szB;
4166 return True;
sewardj095d61e2010-03-11 13:43:18 +00004167}
4168
4169
sewardjb4112022007-11-09 22:49:28 +00004170/*--------------------------------------------------------------*/
4171/*--- Instrumentation ---*/
4172/*--------------------------------------------------------------*/
4173
sewardjcafe5052013-01-17 14:24:35 +00004174#define unop(_op, _arg1) IRExpr_Unop((_op),(_arg1))
sewardjffce8152011-06-24 10:09:41 +00004175#define binop(_op, _arg1, _arg2) IRExpr_Binop((_op),(_arg1),(_arg2))
4176#define mkexpr(_tmp) IRExpr_RdTmp((_tmp))
4177#define mkU32(_n) IRExpr_Const(IRConst_U32(_n))
4178#define mkU64(_n) IRExpr_Const(IRConst_U64(_n))
4179#define assign(_t, _e) IRStmt_WrTmp((_t), (_e))
4180
sewardjcafe5052013-01-17 14:24:35 +00004181/* This takes and returns atoms, of course. Not full IRExprs. */
4182static IRExpr* mk_And1 ( IRSB* sbOut, IRExpr* arg1, IRExpr* arg2 )
4183{
4184 tl_assert(arg1 && arg2);
4185 tl_assert(isIRAtom(arg1));
4186 tl_assert(isIRAtom(arg2));
4187 /* Generate 32to1(And32(1Uto32(arg1), 1Uto32(arg2))). Appalling
4188 code, I know. */
4189 IRTemp wide1 = newIRTemp(sbOut->tyenv, Ity_I32);
4190 IRTemp wide2 = newIRTemp(sbOut->tyenv, Ity_I32);
4191 IRTemp anded = newIRTemp(sbOut->tyenv, Ity_I32);
4192 IRTemp res = newIRTemp(sbOut->tyenv, Ity_I1);
4193 addStmtToIRSB(sbOut, assign(wide1, unop(Iop_1Uto32, arg1)));
4194 addStmtToIRSB(sbOut, assign(wide2, unop(Iop_1Uto32, arg2)));
4195 addStmtToIRSB(sbOut, assign(anded, binop(Iop_And32, mkexpr(wide1),
4196 mkexpr(wide2))));
4197 addStmtToIRSB(sbOut, assign(res, unop(Iop_32to1, mkexpr(anded))));
4198 return mkexpr(res);
4199}
4200
sewardjffce8152011-06-24 10:09:41 +00004201static void instrument_mem_access ( IRSB* sbOut,
sewardjb4112022007-11-09 22:49:28 +00004202 IRExpr* addr,
4203 Int szB,
4204 Bool isStore,
sewardjffce8152011-06-24 10:09:41 +00004205 Int hWordTy_szB,
sewardjcafe5052013-01-17 14:24:35 +00004206 Int goff_sp,
4207 IRExpr* guard ) /* NULL => True */
sewardjb4112022007-11-09 22:49:28 +00004208{
4209 IRType tyAddr = Ity_INVALID;
florian6bf37262012-10-21 03:23:36 +00004210 const HChar* hName = NULL;
sewardjb4112022007-11-09 22:49:28 +00004211 void* hAddr = NULL;
4212 Int regparms = 0;
4213 IRExpr** argv = NULL;
4214 IRDirty* di = NULL;
4215
sewardjffce8152011-06-24 10:09:41 +00004216 // THRESH is the size of the window above SP (well,
4217 // mostly above) that we assume implies a stack reference.
4218 const Int THRESH = 4096 * 4; // somewhat arbitrary
4219 const Int rz_szB = VG_STACK_REDZONE_SZB;
4220
sewardjb4112022007-11-09 22:49:28 +00004221 tl_assert(isIRAtom(addr));
4222 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
4223
sewardjffce8152011-06-24 10:09:41 +00004224 tyAddr = typeOfIRExpr( sbOut->tyenv, addr );
sewardjb4112022007-11-09 22:49:28 +00004225 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
4226
4227 /* So the effective address is in 'addr' now. */
4228 regparms = 1; // unless stated otherwise
4229 if (isStore) {
4230 switch (szB) {
4231 case 1:
sewardj23f12002009-07-24 08:45:08 +00004232 hName = "evh__mem_help_cwrite_1";
4233 hAddr = &evh__mem_help_cwrite_1;
sewardjb4112022007-11-09 22:49:28 +00004234 argv = mkIRExprVec_1( addr );
4235 break;
4236 case 2:
sewardj23f12002009-07-24 08:45:08 +00004237 hName = "evh__mem_help_cwrite_2";
4238 hAddr = &evh__mem_help_cwrite_2;
sewardjb4112022007-11-09 22:49:28 +00004239 argv = mkIRExprVec_1( addr );
4240 break;
4241 case 4:
sewardj23f12002009-07-24 08:45:08 +00004242 hName = "evh__mem_help_cwrite_4";
4243 hAddr = &evh__mem_help_cwrite_4;
sewardjb4112022007-11-09 22:49:28 +00004244 argv = mkIRExprVec_1( addr );
4245 break;
4246 case 8:
sewardj23f12002009-07-24 08:45:08 +00004247 hName = "evh__mem_help_cwrite_8";
4248 hAddr = &evh__mem_help_cwrite_8;
sewardjb4112022007-11-09 22:49:28 +00004249 argv = mkIRExprVec_1( addr );
4250 break;
4251 default:
4252 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4253 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004254 hName = "evh__mem_help_cwrite_N";
4255 hAddr = &evh__mem_help_cwrite_N;
sewardjb4112022007-11-09 22:49:28 +00004256 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4257 break;
4258 }
4259 } else {
4260 switch (szB) {
4261 case 1:
sewardj23f12002009-07-24 08:45:08 +00004262 hName = "evh__mem_help_cread_1";
4263 hAddr = &evh__mem_help_cread_1;
sewardjb4112022007-11-09 22:49:28 +00004264 argv = mkIRExprVec_1( addr );
4265 break;
4266 case 2:
sewardj23f12002009-07-24 08:45:08 +00004267 hName = "evh__mem_help_cread_2";
4268 hAddr = &evh__mem_help_cread_2;
sewardjb4112022007-11-09 22:49:28 +00004269 argv = mkIRExprVec_1( addr );
4270 break;
4271 case 4:
sewardj23f12002009-07-24 08:45:08 +00004272 hName = "evh__mem_help_cread_4";
4273 hAddr = &evh__mem_help_cread_4;
sewardjb4112022007-11-09 22:49:28 +00004274 argv = mkIRExprVec_1( addr );
4275 break;
4276 case 8:
sewardj23f12002009-07-24 08:45:08 +00004277 hName = "evh__mem_help_cread_8";
4278 hAddr = &evh__mem_help_cread_8;
sewardjb4112022007-11-09 22:49:28 +00004279 argv = mkIRExprVec_1( addr );
4280 break;
4281 default:
4282 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4283 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004284 hName = "evh__mem_help_cread_N";
4285 hAddr = &evh__mem_help_cread_N;
sewardjb4112022007-11-09 22:49:28 +00004286 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4287 break;
4288 }
4289 }
4290
sewardjffce8152011-06-24 10:09:41 +00004291 /* Create the helper. */
sewardjb4112022007-11-09 22:49:28 +00004292 tl_assert(hName);
4293 tl_assert(hAddr);
4294 tl_assert(argv);
4295 di = unsafeIRDirty_0_N( regparms,
4296 hName, VG_(fnptr_to_fnentry)( hAddr ),
4297 argv );
sewardjffce8152011-06-24 10:09:41 +00004298
4299 if (! HG_(clo_check_stack_refs)) {
4300 /* We're ignoring memory references which are (obviously) to the
4301 stack. In fact just skip stack refs that are within 4 pages
4302 of SP (SP - the redzone, really), as that's simple, easy, and
4303 filters out most stack references. */
4304 /* Generate the guard condition: "(addr - (SP - RZ)) >u N", for
4305 some arbitrary N. If that is true then addr is outside the
4306 range (SP - RZ .. SP + N - RZ). If N is smallish (a few
4307 pages) then we can say addr is within a few pages of SP and
4308 so can't possibly be a heap access, and so can be skipped.
4309
4310 Note that the condition simplifies to
4311 (addr - SP + RZ) >u N
4312 which generates better code in x86/amd64 backends, but it does
4313 not unfortunately simplify to
4314 (addr - SP) >u (N - RZ)
4315 (would be beneficial because N - RZ is a constant) because
4316 wraparound arithmetic messes up the comparison. eg.
4317 20 >u 10 == True,
4318 but (20 - 15) >u (10 - 15) == 5 >u (MAXINT-5) == False.
4319 */
4320 IRTemp sp = newIRTemp(sbOut->tyenv, tyAddr);
4321 addStmtToIRSB( sbOut, assign(sp, IRExpr_Get(goff_sp, tyAddr)));
4322
4323 /* "addr - SP" */
4324 IRTemp addr_minus_sp = newIRTemp(sbOut->tyenv, tyAddr);
4325 addStmtToIRSB(
4326 sbOut,
4327 assign(addr_minus_sp,
4328 tyAddr == Ity_I32
4329 ? binop(Iop_Sub32, addr, mkexpr(sp))
4330 : binop(Iop_Sub64, addr, mkexpr(sp)))
4331 );
4332
4333 /* "addr - SP + RZ" */
4334 IRTemp diff = newIRTemp(sbOut->tyenv, tyAddr);
4335 addStmtToIRSB(
4336 sbOut,
4337 assign(diff,
4338 tyAddr == Ity_I32
4339 ? binop(Iop_Add32, mkexpr(addr_minus_sp), mkU32(rz_szB))
4340 : binop(Iop_Add64, mkexpr(addr_minus_sp), mkU64(rz_szB)))
4341 );
4342
sewardjcafe5052013-01-17 14:24:35 +00004343 /* guardA == "guard on the address" */
4344 IRTemp guardA = newIRTemp(sbOut->tyenv, Ity_I1);
sewardjffce8152011-06-24 10:09:41 +00004345 addStmtToIRSB(
4346 sbOut,
sewardjcafe5052013-01-17 14:24:35 +00004347 assign(guardA,
sewardjffce8152011-06-24 10:09:41 +00004348 tyAddr == Ity_I32
4349 ? binop(Iop_CmpLT32U, mkU32(THRESH), mkexpr(diff))
4350 : binop(Iop_CmpLT64U, mkU64(THRESH), mkexpr(diff)))
4351 );
sewardjcafe5052013-01-17 14:24:35 +00004352 di->guard = mkexpr(guardA);
4353 }
4354
4355 /* If there's a guard on the access itself (as supplied by the
4356 caller of this routine), we need to AND that in to any guard we
4357 might already have. */
4358 if (guard) {
4359 di->guard = mk_And1(sbOut, di->guard, guard);
sewardjffce8152011-06-24 10:09:41 +00004360 }
4361
4362 /* Add the helper. */
4363 addStmtToIRSB( sbOut, IRStmt_Dirty(di) );
sewardjb4112022007-11-09 22:49:28 +00004364}
4365
4366
sewardja0eee322009-07-31 08:46:35 +00004367/* Figure out if GA is a guest code address in the dynamic linker, and
4368 if so return True. Otherwise (and in case of any doubt) return
4369 False. (sidedly safe w/ False as the safe value) */
4370static Bool is_in_dynamic_linker_shared_object( Addr64 ga )
4371{
4372 DebugInfo* dinfo;
florian19f91bb2012-11-10 22:29:54 +00004373 const HChar* soname;
sewardja0eee322009-07-31 08:46:35 +00004374 if (0) return False;
4375
sewardje3f1e592009-07-31 09:41:29 +00004376 dinfo = VG_(find_DebugInfo)( (Addr)ga );
sewardja0eee322009-07-31 08:46:35 +00004377 if (!dinfo) return False;
4378
sewardje3f1e592009-07-31 09:41:29 +00004379 soname = VG_(DebugInfo_get_soname)(dinfo);
sewardja0eee322009-07-31 08:46:35 +00004380 tl_assert(soname);
4381 if (0) VG_(printf)("%s\n", soname);
4382
4383# if defined(VGO_linux)
sewardj651cfa42010-01-11 13:02:19 +00004384 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_3)) return True;
sewardja0eee322009-07-31 08:46:35 +00004385 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_2)) return True;
4386 if (VG_STREQ(soname, VG_U_LD_LINUX_X86_64_SO_2)) return True;
4387 if (VG_STREQ(soname, VG_U_LD64_SO_1)) return True;
4388 if (VG_STREQ(soname, VG_U_LD_SO_1)) return True;
4389# elif defined(VGO_darwin)
4390 if (VG_STREQ(soname, VG_U_DYLD)) return True;
4391# else
4392# error "Unsupported OS"
4393# endif
4394 return False;
4395}
4396
sewardjb4112022007-11-09 22:49:28 +00004397static
4398IRSB* hg_instrument ( VgCallbackClosure* closure,
4399 IRSB* bbIn,
4400 VexGuestLayout* layout,
4401 VexGuestExtents* vge,
florianca503be2012-10-07 21:59:42 +00004402 VexArchInfo* archinfo_host,
sewardjb4112022007-11-09 22:49:28 +00004403 IRType gWordTy, IRType hWordTy )
4404{
sewardj1c0ce7a2009-07-01 08:10:49 +00004405 Int i;
4406 IRSB* bbOut;
4407 Addr64 cia; /* address of current insn */
4408 IRStmt* st;
sewardja0eee322009-07-31 08:46:35 +00004409 Bool inLDSO = False;
4410 Addr64 inLDSOmask4K = 1; /* mismatches on first check */
sewardjb4112022007-11-09 22:49:28 +00004411
sewardjffce8152011-06-24 10:09:41 +00004412 const Int goff_sp = layout->offset_SP;
4413
sewardjb4112022007-11-09 22:49:28 +00004414 if (gWordTy != hWordTy) {
4415 /* We don't currently support this case. */
4416 VG_(tool_panic)("host/guest word size mismatch");
4417 }
4418
sewardja0eee322009-07-31 08:46:35 +00004419 if (VKI_PAGE_SIZE < 4096 || VG_(log2)(VKI_PAGE_SIZE) == -1) {
4420 VG_(tool_panic)("implausible or too-small VKI_PAGE_SIZE");
4421 }
4422
sewardjb4112022007-11-09 22:49:28 +00004423 /* Set up BB */
4424 bbOut = emptyIRSB();
4425 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
4426 bbOut->next = deepCopyIRExpr(bbIn->next);
4427 bbOut->jumpkind = bbIn->jumpkind;
sewardj291849f2012-04-20 23:58:55 +00004428 bbOut->offsIP = bbIn->offsIP;
sewardjb4112022007-11-09 22:49:28 +00004429
4430 // Copy verbatim any IR preamble preceding the first IMark
4431 i = 0;
4432 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
4433 addStmtToIRSB( bbOut, bbIn->stmts[i] );
4434 i++;
4435 }
4436
sewardj1c0ce7a2009-07-01 08:10:49 +00004437 // Get the first statement, and initial cia from it
4438 tl_assert(bbIn->stmts_used > 0);
4439 tl_assert(i < bbIn->stmts_used);
4440 st = bbIn->stmts[i];
4441 tl_assert(Ist_IMark == st->tag);
4442 cia = st->Ist.IMark.addr;
4443 st = NULL;
4444
sewardjb4112022007-11-09 22:49:28 +00004445 for (/*use current i*/; i < bbIn->stmts_used; i++) {
sewardj1c0ce7a2009-07-01 08:10:49 +00004446 st = bbIn->stmts[i];
sewardjb4112022007-11-09 22:49:28 +00004447 tl_assert(st);
4448 tl_assert(isFlatIRStmt(st));
4449 switch (st->tag) {
4450 case Ist_NoOp:
4451 case Ist_AbiHint:
4452 case Ist_Put:
4453 case Ist_PutI:
sewardjb4112022007-11-09 22:49:28 +00004454 case Ist_Exit:
4455 /* None of these can contain any memory references. */
4456 break;
4457
sewardj1c0ce7a2009-07-01 08:10:49 +00004458 case Ist_IMark:
4459 /* no mem refs, but note the insn address. */
4460 cia = st->Ist.IMark.addr;
sewardja0eee322009-07-31 08:46:35 +00004461 /* Don't instrument the dynamic linker. It generates a
4462 lot of races which we just expensively suppress, so
4463 it's pointless.
4464
4465 Avoid flooding is_in_dynamic_linker_shared_object with
4466 requests by only checking at transitions between 4K
4467 pages. */
4468 if ((cia & ~(Addr64)0xFFF) != inLDSOmask4K) {
4469 if (0) VG_(printf)("NEW %#lx\n", (Addr)cia);
4470 inLDSOmask4K = cia & ~(Addr64)0xFFF;
4471 inLDSO = is_in_dynamic_linker_shared_object(cia);
4472 } else {
4473 if (0) VG_(printf)("old %#lx\n", (Addr)cia);
4474 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004475 break;
4476
sewardjb4112022007-11-09 22:49:28 +00004477 case Ist_MBE:
sewardjf98e1c02008-10-25 16:22:41 +00004478 switch (st->Ist.MBE.event) {
4479 case Imbe_Fence:
4480 break; /* not interesting */
sewardjf98e1c02008-10-25 16:22:41 +00004481 default:
4482 goto unhandled;
4483 }
sewardjb4112022007-11-09 22:49:28 +00004484 break;
4485
sewardj1c0ce7a2009-07-01 08:10:49 +00004486 case Ist_CAS: {
4487 /* Atomic read-modify-write cycle. Just pretend it's a
4488 read. */
4489 IRCAS* cas = st->Ist.CAS.details;
sewardj23f12002009-07-24 08:45:08 +00004490 Bool isDCAS = cas->oldHi != IRTemp_INVALID;
4491 if (isDCAS) {
4492 tl_assert(cas->expdHi);
4493 tl_assert(cas->dataHi);
4494 } else {
4495 tl_assert(!cas->expdHi);
4496 tl_assert(!cas->dataHi);
4497 }
4498 /* Just be boring about it. */
sewardja0eee322009-07-31 08:46:35 +00004499 if (!inLDSO) {
4500 instrument_mem_access(
4501 bbOut,
4502 cas->addr,
4503 (isDCAS ? 2 : 1)
4504 * sizeofIRType(typeOfIRExpr(bbIn->tyenv, cas->dataLo)),
4505 False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004506 sizeofIRType(hWordTy), goff_sp,
4507 NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004508 );
4509 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004510 break;
4511 }
4512
sewardjdb5907d2009-11-26 17:20:21 +00004513 case Ist_LLSC: {
4514 /* We pretend store-conditionals don't exist, viz, ignore
4515 them. Whereas load-linked's are treated the same as
4516 normal loads. */
4517 IRType dataTy;
4518 if (st->Ist.LLSC.storedata == NULL) {
4519 /* LL */
4520 dataTy = typeOfIRTemp(bbIn->tyenv, st->Ist.LLSC.result);
sewardja0eee322009-07-31 08:46:35 +00004521 if (!inLDSO) {
sewardjdb5907d2009-11-26 17:20:21 +00004522 instrument_mem_access(
4523 bbOut,
4524 st->Ist.LLSC.addr,
4525 sizeofIRType(dataTy),
4526 False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004527 sizeofIRType(hWordTy), goff_sp,
4528 NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004529 );
4530 }
sewardjdb5907d2009-11-26 17:20:21 +00004531 } else {
4532 /* SC */
4533 /*ignore */
4534 }
4535 break;
4536 }
4537
4538 case Ist_Store:
sewardjdb5907d2009-11-26 17:20:21 +00004539 if (!inLDSO) {
4540 instrument_mem_access(
4541 bbOut,
4542 st->Ist.Store.addr,
4543 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
4544 True/*isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004545 sizeofIRType(hWordTy), goff_sp,
4546 NULL/*no-guard*/
sewardjdb5907d2009-11-26 17:20:21 +00004547 );
sewardj1c0ce7a2009-07-01 08:10:49 +00004548 }
njnb83caf22009-05-25 01:47:56 +00004549 break;
sewardjb4112022007-11-09 22:49:28 +00004550
sewardjcafe5052013-01-17 14:24:35 +00004551 case Ist_StoreG: {
4552 IRStoreG* sg = st->Ist.StoreG.details;
4553 IRExpr* data = sg->data;
4554 IRExpr* addr = sg->addr;
4555 IRType type = typeOfIRExpr(bbIn->tyenv, data);
4556 tl_assert(type != Ity_INVALID);
4557 instrument_mem_access( bbOut, addr, sizeofIRType(type),
4558 True/*isStore*/,
4559 sizeofIRType(hWordTy),
4560 goff_sp, sg->guard );
4561 break;
4562 }
4563
4564 case Ist_LoadG: {
4565 IRLoadG* lg = st->Ist.LoadG.details;
4566 IRType type = Ity_INVALID; /* loaded type */
4567 IRType typeWide = Ity_INVALID; /* after implicit widening */
4568 IRExpr* addr = lg->addr;
4569 typeOfIRLoadGOp(lg->cvt, &typeWide, &type);
4570 tl_assert(type != Ity_INVALID);
4571 instrument_mem_access( bbOut, addr, sizeofIRType(type),
4572 False/*!isStore*/,
4573 sizeofIRType(hWordTy),
4574 goff_sp, lg->guard );
4575 break;
4576 }
4577
sewardjb4112022007-11-09 22:49:28 +00004578 case Ist_WrTmp: {
4579 IRExpr* data = st->Ist.WrTmp.data;
4580 if (data->tag == Iex_Load) {
sewardja0eee322009-07-31 08:46:35 +00004581 if (!inLDSO) {
4582 instrument_mem_access(
4583 bbOut,
4584 data->Iex.Load.addr,
4585 sizeofIRType(data->Iex.Load.ty),
4586 False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004587 sizeofIRType(hWordTy), goff_sp,
4588 NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004589 );
4590 }
sewardjb4112022007-11-09 22:49:28 +00004591 }
4592 break;
4593 }
4594
4595 case Ist_Dirty: {
4596 Int dataSize;
4597 IRDirty* d = st->Ist.Dirty.details;
4598 if (d->mFx != Ifx_None) {
4599 /* This dirty helper accesses memory. Collect the
4600 details. */
4601 tl_assert(d->mAddr != NULL);
4602 tl_assert(d->mSize != 0);
4603 dataSize = d->mSize;
4604 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004605 if (!inLDSO) {
4606 instrument_mem_access(
4607 bbOut, d->mAddr, dataSize, False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004608 sizeofIRType(hWordTy), goff_sp, NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004609 );
4610 }
sewardjb4112022007-11-09 22:49:28 +00004611 }
4612 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004613 if (!inLDSO) {
4614 instrument_mem_access(
4615 bbOut, d->mAddr, dataSize, True/*isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004616 sizeofIRType(hWordTy), goff_sp, NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004617 );
4618 }
sewardjb4112022007-11-09 22:49:28 +00004619 }
4620 } else {
4621 tl_assert(d->mAddr == NULL);
4622 tl_assert(d->mSize == 0);
4623 }
4624 break;
4625 }
4626
4627 default:
sewardjf98e1c02008-10-25 16:22:41 +00004628 unhandled:
4629 ppIRStmt(st);
sewardjb4112022007-11-09 22:49:28 +00004630 tl_assert(0);
4631
4632 } /* switch (st->tag) */
4633
4634 addStmtToIRSB( bbOut, st );
4635 } /* iterate over bbIn->stmts */
4636
4637 return bbOut;
4638}
4639
sewardjffce8152011-06-24 10:09:41 +00004640#undef binop
4641#undef mkexpr
4642#undef mkU32
4643#undef mkU64
4644#undef assign
4645
sewardjb4112022007-11-09 22:49:28 +00004646
4647/*----------------------------------------------------------------*/
4648/*--- Client requests ---*/
4649/*----------------------------------------------------------------*/
4650
4651/* Sheesh. Yet another goddam finite map. */
4652static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
4653
4654static void map_pthread_t_to_Thread_INIT ( void ) {
4655 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
sewardjf98e1c02008-10-25 16:22:41 +00004656 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
4657 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00004658 tl_assert(map_pthread_t_to_Thread != NULL);
4659 }
4660}
4661
4662
4663static
4664Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
4665{
4666 if (!VG_IS_TOOL_USERREQ('H','G',args[0]))
4667 return False;
4668
4669 /* Anything that gets past the above check is one of ours, so we
4670 should be able to handle it. */
4671
4672 /* default, meaningless return value, unless otherwise set */
4673 *ret = 0;
4674
4675 switch (args[0]) {
4676
4677 /* --- --- User-visible client requests --- --- */
4678
4679 case VG_USERREQ__HG_CLEAN_MEMORY:
barta0b6b2c2008-07-07 06:49:24 +00004680 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%ld)\n",
sewardjb4112022007-11-09 22:49:28 +00004681 args[1], args[2]);
4682 /* Call die_mem to (expensively) tidy up properly, if there
sewardjf98e1c02008-10-25 16:22:41 +00004683 are any held locks etc in the area. Calling evh__die_mem
4684 and then evh__new_mem is a bit inefficient; probably just
4685 the latter would do. */
sewardjb4112022007-11-09 22:49:28 +00004686 if (args[2] > 0) { /* length */
4687 evh__die_mem(args[1], args[2]);
4688 /* and then set it to New */
4689 evh__new_mem(args[1], args[2]);
4690 }
4691 break;
4692
sewardjc8028ad2010-05-05 09:34:42 +00004693 case _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK: {
4694 Addr payload = 0;
4695 SizeT pszB = 0;
4696 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK(%#lx)\n",
4697 args[1]);
4698 if (HG_(mm_find_containing_block)(NULL, &payload, &pszB, args[1])) {
4699 if (pszB > 0) {
4700 evh__die_mem(payload, pszB);
4701 evh__new_mem(payload, pszB);
4702 }
4703 *ret = pszB;
4704 } else {
4705 *ret = (UWord)-1;
4706 }
4707 break;
4708 }
4709
sewardj406bac82010-03-03 23:03:40 +00004710 case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED:
4711 if (0) VG_(printf)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%ld)\n",
4712 args[1], args[2]);
4713 if (args[2] > 0) { /* length */
4714 evh__untrack_mem(args[1], args[2]);
4715 }
4716 break;
4717
4718 case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED:
4719 if (0) VG_(printf)("HG_ARANGE_MAKE_TRACKED(%#lx,%ld)\n",
4720 args[1], args[2]);
4721 if (args[2] > 0) { /* length */
4722 evh__new_mem(args[1], args[2]);
4723 }
4724 break;
4725
sewardjb4112022007-11-09 22:49:28 +00004726 /* --- --- Client requests for Helgrind's use only --- --- */
4727
4728 /* Some thread is telling us its pthread_t value. Record the
4729 binding between that and the associated Thread*, so we can
4730 later find the Thread* again when notified of a join by the
4731 thread. */
4732 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
4733 Thread* my_thr = NULL;
4734 if (0)
4735 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
4736 (void*)args[1]);
4737 map_pthread_t_to_Thread_INIT();
4738 my_thr = map_threads_maybe_lookup( tid );
4739 /* This assertion should hold because the map_threads (tid to
4740 Thread*) binding should have been made at the point of
4741 low-level creation of this thread, which should have
4742 happened prior to us getting this client request for it.
4743 That's because this client request is sent from
4744 client-world from the 'thread_wrapper' function, which
4745 only runs once the thread has been low-level created. */
4746 tl_assert(my_thr != NULL);
4747 /* So now we know that (pthread_t)args[1] is associated with
4748 (Thread*)my_thr. Note that down. */
4749 if (0)
4750 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
4751 (void*)args[1], (void*)my_thr );
florian6bf37262012-10-21 03:23:36 +00004752 VG_(addToFM)( map_pthread_t_to_Thread, (UWord)args[1], (UWord)my_thr );
sewardjb4112022007-11-09 22:49:28 +00004753 break;
4754 }
4755
4756 case _VG_USERREQ__HG_PTH_API_ERROR: {
4757 Thread* my_thr = NULL;
4758 map_pthread_t_to_Thread_INIT();
4759 my_thr = map_threads_maybe_lookup( tid );
4760 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
sewardjf98e1c02008-10-25 16:22:41 +00004761 HG_(record_error_PthAPIerror)(
florian6bf37262012-10-21 03:23:36 +00004762 my_thr, (HChar*)args[1], (UWord)args[2], (HChar*)args[3] );
sewardjb4112022007-11-09 22:49:28 +00004763 break;
4764 }
4765
4766 /* This thread (tid) has completed a join with the quitting
4767 thread whose pthread_t is in args[1]. */
4768 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
4769 Thread* thr_q = NULL; /* quitter Thread* */
4770 Bool found = False;
4771 if (0)
4772 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
4773 (void*)args[1]);
4774 map_pthread_t_to_Thread_INIT();
sewardj896f6f92008-08-19 08:38:52 +00004775 found = VG_(lookupFM)( map_pthread_t_to_Thread,
florian6bf37262012-10-21 03:23:36 +00004776 NULL, (UWord*)&thr_q, (UWord)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004777 /* Can this fail? It would mean that our pthread_join
4778 wrapper observed a successful join on args[1] yet that
4779 thread never existed (or at least, it never lodged an
4780 entry in the mapping (via SET_MY_PTHREAD_T)). Which
4781 sounds like a bug in the threads library. */
4782 // FIXME: get rid of this assertion; handle properly
4783 tl_assert(found);
4784 if (found) {
4785 if (0)
4786 VG_(printf)(".................... quitter Thread* = %p\n",
4787 thr_q);
4788 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
4789 }
4790 break;
4791 }
4792
4793 /* EXPOSITION only: by intercepting lock init events we can show
4794 the user where the lock was initialised, rather than only
4795 being able to show where it was first locked. Intercepting
4796 lock initialisations is not necessary for the basic operation
4797 of the race checker. */
4798 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
4799 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
4800 break;
4801
4802 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
4803 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1] );
4804 break;
4805
4806 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
4807 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
4808 break;
4809
4810 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
4811 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
4812 break;
4813
4814 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*, Word
4815 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
4816 break;
4817
4818 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*
4819 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
4820 break;
4821
4822 /* This thread is about to do pthread_cond_signal on the
4823 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
4824 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
4825 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
4826 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
4827 break;
4828
4829 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
4830 Returns a flag indicating whether or not the mutex is believed to be
4831 valid for this operation. */
4832 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
4833 Bool mutex_is_valid
4834 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
4835 (void*)args[2] );
4836 *ret = mutex_is_valid ? 1 : 0;
4837 break;
4838 }
4839
sewardjf98e1c02008-10-25 16:22:41 +00004840 /* cond=arg[1] */
4841 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
4842 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1] );
4843 break;
4844
sewardjb4112022007-11-09 22:49:28 +00004845 /* Thread successfully completed pthread_cond_wait, cond=arg[1],
4846 mutex=arg[2] */
4847 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
4848 evh__HG_PTHREAD_COND_WAIT_POST( tid,
4849 (void*)args[1], (void*)args[2] );
4850 break;
4851
4852 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
4853 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
4854 break;
4855
4856 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
4857 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
4858 break;
4859
sewardj789c3c52008-02-25 12:10:07 +00004860 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
sewardjb4112022007-11-09 22:49:28 +00004861 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
sewardj789c3c52008-02-25 12:10:07 +00004862 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
4863 args[2], args[3] );
sewardjb4112022007-11-09 22:49:28 +00004864 break;
4865
4866 /* rwlock=arg[1], isW=arg[2] */
4867 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
4868 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
4869 break;
4870
4871 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
4872 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
4873 break;
4874
4875 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
4876 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
4877 break;
4878
sewardj11e352f2007-11-30 11:11:02 +00004879 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
4880 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
sewardjb4112022007-11-09 22:49:28 +00004881 break;
4882
sewardj11e352f2007-11-30 11:11:02 +00004883 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
4884 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004885 break;
4886
sewardj11e352f2007-11-30 11:11:02 +00004887 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
4888 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
4889 break;
4890
4891 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t* */
4892 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004893 break;
4894
sewardj9f569b72008-11-13 13:33:09 +00004895 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
sewardj406bac82010-03-03 23:03:40 +00004896 /* pth_bar_t*, ulong count, ulong resizable */
4897 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1],
4898 args[2], args[3] );
4899 break;
4900
4901 case _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE:
4902 /* pth_bar_t*, ulong newcount */
4903 evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( tid, (void*)args[1],
4904 args[2] );
sewardj9f569b72008-11-13 13:33:09 +00004905 break;
4906
4907 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
4908 /* pth_bar_t* */
4909 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
4910 break;
4911
4912 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
4913 /* pth_bar_t* */
4914 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
4915 break;
sewardjb4112022007-11-09 22:49:28 +00004916
sewardj5a644da2009-08-11 10:35:58 +00004917 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE:
4918 /* pth_spinlock_t* */
4919 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid, (void*)args[1] );
4920 break;
4921
4922 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST:
4923 /* pth_spinlock_t* */
4924 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid, (void*)args[1] );
4925 break;
4926
4927 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE:
4928 /* pth_spinlock_t*, Word */
4929 evh__HG_PTHREAD_SPIN_LOCK_PRE( tid, (void*)args[1], args[2] );
4930 break;
4931
4932 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST:
4933 /* pth_spinlock_t* */
4934 evh__HG_PTHREAD_SPIN_LOCK_POST( tid, (void*)args[1] );
4935 break;
4936
4937 case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE:
4938 /* pth_spinlock_t* */
4939 evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid, (void*)args[1] );
4940 break;
4941
sewardjed2e72e2009-08-14 11:08:24 +00004942 case _VG_USERREQ__HG_CLIENTREQ_UNIMP: {
florian19f91bb2012-11-10 22:29:54 +00004943 /* HChar* who */
sewardjed2e72e2009-08-14 11:08:24 +00004944 HChar* who = (HChar*)args[1];
4945 HChar buf[50 + 50];
4946 Thread* thr = map_threads_maybe_lookup( tid );
4947 tl_assert( thr ); /* I must be mapped */
4948 tl_assert( who );
4949 tl_assert( VG_(strlen)(who) <= 50 );
4950 VG_(sprintf)(buf, "Unimplemented client request macro \"%s\"", who );
4951 /* record_error_Misc strdup's buf, so this is safe: */
4952 HG_(record_error_Misc)( thr, buf );
4953 break;
4954 }
4955
4956 case _VG_USERREQ__HG_USERSO_SEND_PRE:
4957 /* UWord arbitrary-SO-tag */
4958 evh__HG_USERSO_SEND_PRE( tid, args[1] );
4959 break;
4960
4961 case _VG_USERREQ__HG_USERSO_RECV_POST:
4962 /* UWord arbitrary-SO-tag */
4963 evh__HG_USERSO_RECV_POST( tid, args[1] );
4964 break;
4965
sewardj6015d0e2011-03-11 19:10:48 +00004966 case _VG_USERREQ__HG_USERSO_FORGET_ALL:
4967 /* UWord arbitrary-SO-tag */
4968 evh__HG_USERSO_FORGET_ALL( tid, args[1] );
4969 break;
4970
sewardjb4112022007-11-09 22:49:28 +00004971 default:
4972 /* Unhandled Helgrind client request! */
sewardjf98e1c02008-10-25 16:22:41 +00004973 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
4974 args[0]);
sewardjb4112022007-11-09 22:49:28 +00004975 }
4976
4977 return True;
4978}
4979
4980
4981/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +00004982/*--- Setup ---*/
4983/*----------------------------------------------------------------*/
4984
florian19f91bb2012-11-10 22:29:54 +00004985static Bool hg_process_cmd_line_option ( const HChar* arg )
sewardjb4112022007-11-09 22:49:28 +00004986{
florian19f91bb2012-11-10 22:29:54 +00004987 const HChar* tmp_str;
sewardjb4112022007-11-09 22:49:28 +00004988
njn83df0b62009-02-25 01:01:05 +00004989 if VG_BOOL_CLO(arg, "--track-lockorders",
4990 HG_(clo_track_lockorders)) {}
4991 else if VG_BOOL_CLO(arg, "--cmp-race-err-addrs",
4992 HG_(clo_cmp_race_err_addrs)) {}
sewardj23f12002009-07-24 08:45:08 +00004993
4994 else if VG_XACT_CLO(arg, "--history-level=none",
4995 HG_(clo_history_level), 0);
sewardjf3861392009-08-02 10:16:03 +00004996 else if VG_XACT_CLO(arg, "--history-level=approx",
sewardj23f12002009-07-24 08:45:08 +00004997 HG_(clo_history_level), 1);
4998 else if VG_XACT_CLO(arg, "--history-level=full",
4999 HG_(clo_history_level), 2);
sewardj849b0ed2008-12-21 10:43:10 +00005000
sewardjf585e482009-08-16 22:52:29 +00005001 /* If you change the 10k/30mill limits, remember to also change
sewardj849b0ed2008-12-21 10:43:10 +00005002 them in assertions at the top of event_map_maybe_GC. */
njn83df0b62009-02-25 01:01:05 +00005003 else if VG_BINT_CLO(arg, "--conflict-cache-size",
sewardjf585e482009-08-16 22:52:29 +00005004 HG_(clo_conflict_cache_size), 10*1000, 30*1000*1000) {}
sewardjb4112022007-11-09 22:49:28 +00005005
sewardj11e352f2007-11-30 11:11:02 +00005006 /* "stuvwx" --> stuvwx (binary) */
njn83df0b62009-02-25 01:01:05 +00005007 else if VG_STR_CLO(arg, "--hg-sanity-flags", tmp_str) {
sewardjb4112022007-11-09 22:49:28 +00005008 Int j;
sewardjb4112022007-11-09 22:49:28 +00005009
njn83df0b62009-02-25 01:01:05 +00005010 if (6 != VG_(strlen)(tmp_str)) {
sewardjb4112022007-11-09 22:49:28 +00005011 VG_(message)(Vg_UserMsg,
sewardj24118492009-07-15 14:50:02 +00005012 "--hg-sanity-flags argument must have 6 digits\n");
sewardjb4112022007-11-09 22:49:28 +00005013 return False;
5014 }
sewardj11e352f2007-11-30 11:11:02 +00005015 for (j = 0; j < 6; j++) {
njn83df0b62009-02-25 01:01:05 +00005016 if ('0' == tmp_str[j]) { /* do nothing */ }
5017 else if ('1' == tmp_str[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
sewardjb4112022007-11-09 22:49:28 +00005018 else {
sewardj11e352f2007-11-30 11:11:02 +00005019 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
sewardj24118492009-07-15 14:50:02 +00005020 "only contain 0s and 1s\n");
sewardjb4112022007-11-09 22:49:28 +00005021 return False;
5022 }
5023 }
sewardjf98e1c02008-10-25 16:22:41 +00005024 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
sewardjb4112022007-11-09 22:49:28 +00005025 }
5026
sewardj622fe492011-03-11 21:06:59 +00005027 else if VG_BOOL_CLO(arg, "--free-is-write",
5028 HG_(clo_free_is_write)) {}
sewardjffce8152011-06-24 10:09:41 +00005029
5030 else if VG_XACT_CLO(arg, "--vts-pruning=never",
5031 HG_(clo_vts_pruning), 0);
5032 else if VG_XACT_CLO(arg, "--vts-pruning=auto",
5033 HG_(clo_vts_pruning), 1);
5034 else if VG_XACT_CLO(arg, "--vts-pruning=always",
5035 HG_(clo_vts_pruning), 2);
5036
5037 else if VG_BOOL_CLO(arg, "--check-stack-refs",
5038 HG_(clo_check_stack_refs)) {}
5039
sewardjb4112022007-11-09 22:49:28 +00005040 else
5041 return VG_(replacement_malloc_process_cmd_line_option)(arg);
5042
5043 return True;
5044}
5045
5046static void hg_print_usage ( void )
5047{
5048 VG_(printf)(
sewardj622fe492011-03-11 21:06:59 +00005049" --free-is-write=no|yes treat heap frees as writes [no]\n"
sewardj849b0ed2008-12-21 10:43:10 +00005050" --track-lockorders=no|yes show lock ordering errors? [yes]\n"
njnf6e8ca92009-08-07 02:18:00 +00005051" --history-level=none|approx|full [full]\n"
sewardjf3861392009-08-02 10:16:03 +00005052" full: show both stack traces for a data race (can be very slow)\n"
5053" approx: full trace for one thread, approx for the other (faster)\n"
5054" none: only show trace for one thread in a race (fastest)\n"
sewardj23f12002009-07-24 08:45:08 +00005055" --conflict-cache-size=N size of 'full' history cache [1000000]\n"
sewardjffce8152011-06-24 10:09:41 +00005056" --check-stack-refs=no|yes race-check reads and writes on the\n"
5057" main stack and thread stacks? [yes]\n"
sewardjb4112022007-11-09 22:49:28 +00005058 );
sewardjb4112022007-11-09 22:49:28 +00005059}
5060
5061static void hg_print_debug_usage ( void )
5062{
sewardjb4112022007-11-09 22:49:28 +00005063 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
5064 "race errors significant? [no]\n");
sewardj849b0ed2008-12-21 10:43:10 +00005065 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
sewardj11e352f2007-11-30 11:11:02 +00005066 " at events (X = 0|1) [000000]\n");
5067 VG_(printf)(" --hg-sanity-flags values:\n");
sewardj11e352f2007-11-30 11:11:02 +00005068 VG_(printf)(" 010000 after changes to "
sewardjb4112022007-11-09 22:49:28 +00005069 "lock-order-acquisition-graph\n");
sewardj11e352f2007-11-30 11:11:02 +00005070 VG_(printf)(" 001000 at memory accesses (NB: not currently used)\n");
5071 VG_(printf)(" 000100 at mem permission setting for "
sewardjb4112022007-11-09 22:49:28 +00005072 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
sewardj11e352f2007-11-30 11:11:02 +00005073 VG_(printf)(" 000010 at lock/unlock events\n");
5074 VG_(printf)(" 000001 at thread create/join events\n");
sewardjffce8152011-06-24 10:09:41 +00005075 VG_(printf)(
5076" --vts-pruning=never|auto|always [auto]\n"
5077" never: is never done (may cause big space leaks in Helgrind)\n"
5078" auto: done just often enough to keep space usage under control\n"
5079" always: done after every VTS GC (mostly just a big time waster)\n"
5080 );
sewardjb4112022007-11-09 22:49:28 +00005081}
5082
sewardjb4112022007-11-09 22:49:28 +00005083static void hg_fini ( Int exitcode )
5084{
sewardj2d9e8742009-08-07 15:46:56 +00005085 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
5086 VG_(message)(Vg_UserMsg,
5087 "For counts of detected and suppressed errors, "
5088 "rerun with: -v\n");
5089 }
5090
5091 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)
5092 && HG_(clo_history_level) >= 2) {
5093 VG_(umsg)(
5094 "Use --history-level=approx or =none to gain increased speed, at\n" );
5095 VG_(umsg)(
5096 "the cost of reduced accuracy of conflicting-access information\n");
5097 }
5098
sewardjb4112022007-11-09 22:49:28 +00005099 if (SHOW_DATA_STRUCTURES)
5100 pp_everything( PP_ALL, "SK_(fini)" );
sewardjf98e1c02008-10-25 16:22:41 +00005101 if (HG_(clo_sanity_flags))
sewardjb4112022007-11-09 22:49:28 +00005102 all__sanity_check("SK_(fini)");
5103
sewardj2d9e8742009-08-07 15:46:56 +00005104 if (VG_(clo_stats)) {
sewardjb4112022007-11-09 22:49:28 +00005105
5106 if (1) {
5107 VG_(printf)("\n");
sewardjb4112022007-11-09 22:49:28 +00005108 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
sewardjc1fb9d22011-02-28 09:03:44 +00005109 if (HG_(clo_track_lockorders)) {
5110 VG_(printf)("\n");
5111 HG_(ppWSUstats)( univ_laog, "univ_laog" );
5112 }
sewardjb4112022007-11-09 22:49:28 +00005113 }
5114
sewardjf98e1c02008-10-25 16:22:41 +00005115 //zz VG_(printf)("\n");
5116 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
5117 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
5118 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
5119 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
5120 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
5121 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
5122 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
5123 //zz stats__hbefore_stk_hwm);
5124 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
5125 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
sewardjb4112022007-11-09 22:49:28 +00005126
5127 VG_(printf)("\n");
barta0b6b2c2008-07-07 06:49:24 +00005128 VG_(printf)(" locksets: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00005129 (Int)HG_(cardinalityWSU)( univ_lsets ));
sewardjc1fb9d22011-02-28 09:03:44 +00005130 if (HG_(clo_track_lockorders)) {
5131 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
5132 (Int)HG_(cardinalityWSU)( univ_laog ));
5133 }
sewardjb4112022007-11-09 22:49:28 +00005134
sewardjd52392d2008-11-08 20:36:26 +00005135 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
5136 // stats__ga_LL_adds,
5137 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
sewardjb4112022007-11-09 22:49:28 +00005138
sewardjf98e1c02008-10-25 16:22:41 +00005139 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
5140 HG_(stats__LockN_to_P_queries),
5141 HG_(stats__LockN_to_P_get_map_size)() );
sewardjb4112022007-11-09 22:49:28 +00005142
sewardjf98e1c02008-10-25 16:22:41 +00005143 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
5144 HG_(stats__string_table_queries),
5145 HG_(stats__string_table_get_map_size)() );
sewardjc1fb9d22011-02-28 09:03:44 +00005146 if (HG_(clo_track_lockorders)) {
5147 VG_(printf)(" LAOG: %'8d map size\n",
5148 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
5149 VG_(printf)(" LAOG exposition: %'8d map size\n",
5150 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
5151 }
5152
barta0b6b2c2008-07-07 06:49:24 +00005153 VG_(printf)(" locks: %'8lu acquires, "
5154 "%'lu releases\n",
sewardjb4112022007-11-09 22:49:28 +00005155 stats__lockN_acquires,
5156 stats__lockN_releases
5157 );
barta0b6b2c2008-07-07 06:49:24 +00005158 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
sewardjb4112022007-11-09 22:49:28 +00005159
5160 VG_(printf)("\n");
sewardjf98e1c02008-10-25 16:22:41 +00005161 libhb_shutdown(True);
sewardjb4112022007-11-09 22:49:28 +00005162 }
5163}
5164
sewardjf98e1c02008-10-25 16:22:41 +00005165/* FIXME: move these somewhere sane */
5166
5167static
5168void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
5169{
5170 Thread* thr;
5171 ThreadId tid;
5172 UWord nActual;
5173 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00005174 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00005175 tl_assert(thr);
5176 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
5177 nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
5178 NULL, NULL, 0 );
5179 tl_assert(nActual <= nRequest);
5180 for (; nActual < nRequest; nActual++)
5181 frames[nActual] = 0;
5182}
5183
5184static
sewardj23f12002009-07-24 08:45:08 +00005185ExeContext* for_libhb__get_EC ( Thr* hbt )
sewardjf98e1c02008-10-25 16:22:41 +00005186{
5187 Thread* thr;
5188 ThreadId tid;
5189 ExeContext* ec;
5190 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00005191 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00005192 tl_assert(thr);
5193 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
sewardj23f12002009-07-24 08:45:08 +00005194 /* this will assert if tid is invalid */
sewardjf98e1c02008-10-25 16:22:41 +00005195 ec = VG_(record_ExeContext)( tid, 0 );
sewardjd52392d2008-11-08 20:36:26 +00005196 return ec;
sewardjf98e1c02008-10-25 16:22:41 +00005197}
5198
5199
sewardjc1fb9d22011-02-28 09:03:44 +00005200static void hg_post_clo_init ( void )
sewardjb4112022007-11-09 22:49:28 +00005201{
sewardjf98e1c02008-10-25 16:22:41 +00005202 Thr* hbthr_root;
njnf76d27a2009-05-28 01:53:07 +00005203
sewardjc1fb9d22011-02-28 09:03:44 +00005204 /////////////////////////////////////////////
5205 hbthr_root = libhb_init( for_libhb__get_stacktrace,
5206 for_libhb__get_EC );
5207 /////////////////////////////////////////////
5208
5209
5210 if (HG_(clo_track_lockorders))
5211 laog__init();
5212
5213 initialise_data_structures(hbthr_root);
5214}
5215
5216static void hg_pre_clo_init ( void )
5217{
sewardjb4112022007-11-09 22:49:28 +00005218 VG_(details_name) ("Helgrind");
5219 VG_(details_version) (NULL);
5220 VG_(details_description) ("a thread error detector");
5221 VG_(details_copyright_author)(
sewardj03f8d3f2012-08-05 15:46:46 +00005222 "Copyright (C) 2007-2012, and GNU GPL'd, by OpenWorks LLP et al.");
sewardjb4112022007-11-09 22:49:28 +00005223 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj9c08c0f2011-03-10 15:01:14 +00005224 VG_(details_avg_translation_sizeB) ( 320 );
sewardjb4112022007-11-09 22:49:28 +00005225
5226 VG_(basic_tool_funcs) (hg_post_clo_init,
5227 hg_instrument,
5228 hg_fini);
5229
5230 VG_(needs_core_errors) ();
sewardjf98e1c02008-10-25 16:22:41 +00005231 VG_(needs_tool_errors) (HG_(eq_Error),
sewardj24118492009-07-15 14:50:02 +00005232 HG_(before_pp_Error),
sewardjf98e1c02008-10-25 16:22:41 +00005233 HG_(pp_Error),
sewardjb4112022007-11-09 22:49:28 +00005234 False,/*show TIDs for errors*/
sewardjf98e1c02008-10-25 16:22:41 +00005235 HG_(update_extra),
5236 HG_(recognised_suppression),
5237 HG_(read_extra_suppression_info),
5238 HG_(error_matches_suppression),
5239 HG_(get_error_name),
sewardj588adef2009-08-15 22:41:51 +00005240 HG_(get_extra_suppression_info));
sewardjb4112022007-11-09 22:49:28 +00005241
sewardj24118492009-07-15 14:50:02 +00005242 VG_(needs_xml_output) ();
5243
sewardjb4112022007-11-09 22:49:28 +00005244 VG_(needs_command_line_options)(hg_process_cmd_line_option,
5245 hg_print_usage,
5246 hg_print_debug_usage);
5247 VG_(needs_client_requests) (hg_handle_client_request);
5248
5249 // FIXME?
5250 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
5251 // hg_expensive_sanity_check);
5252
5253 VG_(needs_malloc_replacement) (hg_cli__malloc,
5254 hg_cli____builtin_new,
5255 hg_cli____builtin_vec_new,
5256 hg_cli__memalign,
5257 hg_cli__calloc,
5258 hg_cli__free,
5259 hg_cli____builtin_delete,
5260 hg_cli____builtin_vec_delete,
5261 hg_cli__realloc,
njn8b140de2009-02-17 04:31:18 +00005262 hg_cli_malloc_usable_size,
philipped99c26a2012-07-31 22:17:28 +00005263 HG_CLI__DEFAULT_MALLOC_REDZONE_SZB );
sewardjb4112022007-11-09 22:49:28 +00005264
sewardj849b0ed2008-12-21 10:43:10 +00005265 /* 21 Dec 08: disabled this; it mostly causes H to start more
5266 slowly and use significantly more memory, without very often
5267 providing useful results. The user can request to load this
5268 information manually with --read-var-info=yes. */
5269 if (0) VG_(needs_var_info)(); /* optional */
sewardjb4112022007-11-09 22:49:28 +00005270
5271 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
sewardj7cf4e6b2008-05-01 20:24:26 +00005272 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
5273 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
sewardjb4112022007-11-09 22:49:28 +00005274 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
sewardj1f77fec2010-04-12 19:51:04 +00005275 VG_(track_new_mem_stack) ( evh__new_mem_stack );
sewardjb4112022007-11-09 22:49:28 +00005276
5277 // FIXME: surely this isn't thread-aware
sewardj23f12002009-07-24 08:45:08 +00005278 VG_(track_copy_mem_remap) ( evh__copy_mem );
sewardjb4112022007-11-09 22:49:28 +00005279
5280 VG_(track_change_mem_mprotect) ( evh__set_perms );
5281
5282 VG_(track_die_mem_stack_signal)( evh__die_mem );
sewardjfd35d492011-03-17 19:39:55 +00005283 VG_(track_die_mem_brk) ( evh__die_mem_munmap );
5284 VG_(track_die_mem_munmap) ( evh__die_mem_munmap );
sewardjb4112022007-11-09 22:49:28 +00005285 VG_(track_die_mem_stack) ( evh__die_mem );
5286
5287 // FIXME: what is this for?
5288 VG_(track_ban_mem_stack) (NULL);
5289
5290 VG_(track_pre_mem_read) ( evh__pre_mem_read );
5291 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
5292 VG_(track_pre_mem_write) ( evh__pre_mem_write );
5293 VG_(track_post_mem_write) (NULL);
5294
5295 /////////////////
5296
5297 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
5298 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
5299
5300 VG_(track_start_client_code)( evh__start_client_code );
5301 VG_(track_stop_client_code)( evh__stop_client_code );
5302
sewardjb4112022007-11-09 22:49:28 +00005303 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
5304 as described in comments at the top of pub_tool_hashtable.h, are
5305 met. Blargh. */
5306 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
5307 tl_assert( sizeof(UWord) == sizeof(Addr) );
5308 hg_mallocmeta_table
5309 = VG_(HT_construct)( "hg_malloc_metadata_table" );
5310
sewardj61bc2c52011-02-09 10:34:00 +00005311 // add a callback to clean up on (threaded) fork.
5312 VG_(atfork)(NULL/*pre*/, NULL/*parent*/, evh__atfork_child/*child*/);
sewardjb4112022007-11-09 22:49:28 +00005313}
5314
5315VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
5316
5317/*--------------------------------------------------------------------*/
5318/*--- end hg_main.c ---*/
5319/*--------------------------------------------------------------------*/