blob: e89ef308ceb22fc4b088f1598e79c0c6d0909db2 [file] [log] [blame]
sewardjb4112022007-11-09 22:49:28 +00001
2/*--------------------------------------------------------------------*/
3/*--- Helgrind: a Valgrind tool for detecting errors ---*/
4/*--- in threaded programs. hg_main.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
sewardj9eecbbb2010-05-03 21:37:12 +000011 Copyright (C) 2007-2010 OpenWorks LLP
sewardjb4112022007-11-09 22:49:28 +000012 info@open-works.co.uk
13
sewardj9eecbbb2010-05-03 21:37:12 +000014 Copyright (C) 2007-2010 Apple, Inc.
njnf76d27a2009-05-28 01:53:07 +000015
sewardjb4112022007-11-09 22:49:28 +000016 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
20
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
25
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, write to the Free Software
28 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 02111-1307, USA.
30
31 The GNU General Public License is contained in the file COPYING.
32
33 Neither the names of the U.S. Department of Energy nor the
34 University of California nor the names of its contributors may be
35 used to endorse or promote products derived from this software
36 without prior written permission.
37*/
38
39#include "pub_tool_basics.h"
sewardjb4112022007-11-09 22:49:28 +000040#include "pub_tool_libcassert.h"
41#include "pub_tool_libcbase.h"
42#include "pub_tool_libcprint.h"
sewardjb4112022007-11-09 22:49:28 +000043#include "pub_tool_threadstate.h"
44#include "pub_tool_tooliface.h"
45#include "pub_tool_hashtable.h"
46#include "pub_tool_replacemalloc.h"
47#include "pub_tool_machine.h"
48#include "pub_tool_options.h"
49#include "pub_tool_xarray.h"
50#include "pub_tool_stacktrace.h"
sewardj896f6f92008-08-19 08:38:52 +000051#include "pub_tool_wordfm.h"
sewardja0eee322009-07-31 08:46:35 +000052#include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
53#include "pub_tool_redir.h" // sonames for the dynamic linkers
54#include "pub_tool_vki.h" // VKI_PAGE_SIZE
sewardj61bc2c52011-02-09 10:34:00 +000055#include "pub_tool_libcproc.h" // VG_(atfork)
sewardj234e5582011-02-09 12:47:23 +000056#include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client)
sewardjb4112022007-11-09 22:49:28 +000057
sewardjf98e1c02008-10-25 16:22:41 +000058#include "hg_basics.h"
59#include "hg_wordset.h"
60#include "hg_lock_n_thread.h"
61#include "hg_errors.h"
62
63#include "libhb.h"
64
sewardjb4112022007-11-09 22:49:28 +000065#include "helgrind.h"
66
sewardjf98e1c02008-10-25 16:22:41 +000067
68// FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
69
70// FIXME: when client destroys a lock or a CV, remove these
71// from our mappings, so that the associated SO can be freed up
sewardjb4112022007-11-09 22:49:28 +000072
73/*----------------------------------------------------------------*/
74/*--- ---*/
75/*----------------------------------------------------------------*/
76
sewardj11e352f2007-11-30 11:11:02 +000077/* Note this needs to be compiled with -fno-strict-aliasing, since it
78 contains a whole bunch of calls to lookupFM etc which cast between
79 Word and pointer types. gcc rightly complains this breaks ANSI C
80 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
81 worthwhile performance benefits over -O.
sewardjc17be792007-11-10 22:50:13 +000082*/
sewardjb4112022007-11-09 22:49:28 +000083
84// FIXME what is supposed to happen to locks in memory which
85// is relocated as a result of client realloc?
86
sewardjb4112022007-11-09 22:49:28 +000087// FIXME put referencing ThreadId into Thread and get
88// rid of the slow reverse mapping function.
89
90// FIXME accesses to NoAccess areas: change state to Excl?
91
92// FIXME report errors for accesses of NoAccess memory?
93
94// FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
95// the thread still holds the lock.
96
97/* ------------ Debug/trace options ------------ */
98
sewardjb4112022007-11-09 22:49:28 +000099// 0 for silent, 1 for some stuff, 2 for lots of stuff
100#define SHOW_EVENTS 0
101
sewardjb4112022007-11-09 22:49:28 +0000102
103static void all__sanity_check ( Char* who ); /* fwds */
104
105#define HG_CLI__MALLOC_REDZONE_SZB 16 /* let's say */
106
107// 0 for none, 1 for dump at end of run
108#define SHOW_DATA_STRUCTURES 0
109
110
sewardjb4112022007-11-09 22:49:28 +0000111/* ------------ Misc comments ------------ */
112
113// FIXME: don't hardwire initial entries for root thread.
114// Instead, let the pre_thread_ll_create handler do this.
115
sewardjb4112022007-11-09 22:49:28 +0000116
117/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000118/*--- Primary data structures ---*/
sewardjb4112022007-11-09 22:49:28 +0000119/*----------------------------------------------------------------*/
120
sewardjb4112022007-11-09 22:49:28 +0000121/* Admin linked list of Threads */
122static Thread* admin_threads = NULL;
sewardjffce8152011-06-24 10:09:41 +0000123Thread* get_admin_threads ( void ) { return admin_threads; }
sewardjb4112022007-11-09 22:49:28 +0000124
sewardj1d7c3322011-02-28 09:22:51 +0000125/* Admin double linked list of Locks */
126/* We need a double linked list to properly and efficiently
127 handle del_LockN. */
sewardjb4112022007-11-09 22:49:28 +0000128static Lock* admin_locks = NULL;
129
sewardjb4112022007-11-09 22:49:28 +0000130/* Mapping table for core ThreadIds to Thread* */
131static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
132
sewardjb4112022007-11-09 22:49:28 +0000133/* Mapping table for lock guest addresses to Lock* */
134static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
135
sewardj0f64c9e2011-03-10 17:40:22 +0000136/* The word-set universes for lock sets. */
sewardjb4112022007-11-09 22:49:28 +0000137static WordSetU* univ_lsets = NULL; /* sets of Lock* */
138static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
139
sewardjffce8152011-06-24 10:09:41 +0000140/* Allow libhb to get at the universe of locksets stored
141 here. Sigh. */
142WordSetU* HG_(get_univ_lsets) ( void ) { return univ_lsets; }
143
144/* Allow libhb to get at the list of locks stored here. Ditto
145 sigh. */
146Lock* HG_(get_admin_locks) ( void ) { return admin_locks; }
147
sewardjb4112022007-11-09 22:49:28 +0000148
149/*----------------------------------------------------------------*/
150/*--- Simple helpers for the data structures ---*/
151/*----------------------------------------------------------------*/
152
153static UWord stats__lockN_acquires = 0;
154static UWord stats__lockN_releases = 0;
155
sewardjf98e1c02008-10-25 16:22:41 +0000156static
157ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
sewardjb4112022007-11-09 22:49:28 +0000158
159/* --------- Constructors --------- */
160
sewardjf98e1c02008-10-25 16:22:41 +0000161static Thread* mk_Thread ( Thr* hbthr ) {
sewardjb4112022007-11-09 22:49:28 +0000162 static Int indx = 1;
sewardjf98e1c02008-10-25 16:22:41 +0000163 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
sewardjb4112022007-11-09 22:49:28 +0000164 thread->locksetA = HG_(emptyWS)( univ_lsets );
165 thread->locksetW = HG_(emptyWS)( univ_lsets );
sewardjb4112022007-11-09 22:49:28 +0000166 thread->magic = Thread_MAGIC;
sewardjf98e1c02008-10-25 16:22:41 +0000167 thread->hbthr = hbthr;
168 thread->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +0000169 thread->created_at = NULL;
170 thread->announced = False;
171 thread->errmsg_index = indx++;
172 thread->admin = admin_threads;
173 admin_threads = thread;
174 return thread;
175}
sewardjf98e1c02008-10-25 16:22:41 +0000176
sewardjb4112022007-11-09 22:49:28 +0000177// Make a new lock which is unlocked (hence ownerless)
sewardj1d7c3322011-02-28 09:22:51 +0000178// and insert the new lock in admin_locks double linked list.
sewardjb4112022007-11-09 22:49:28 +0000179static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
180 static ULong unique = 0;
sewardjf98e1c02008-10-25 16:22:41 +0000181 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
sewardj0f64c9e2011-03-10 17:40:22 +0000182 /* begin: add to double linked list */
sewardj1d7c3322011-02-28 09:22:51 +0000183 if (admin_locks)
184 admin_locks->admin_prev = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000185 lock->admin_next = admin_locks;
186 lock->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000187 admin_locks = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000188 /* end: add */
sewardjb4112022007-11-09 22:49:28 +0000189 lock->unique = unique++;
190 lock->magic = LockN_MAGIC;
191 lock->appeared_at = NULL;
192 lock->acquired_at = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000193 lock->hbso = libhb_so_alloc();
sewardjb4112022007-11-09 22:49:28 +0000194 lock->guestaddr = guestaddr;
195 lock->kind = kind;
196 lock->heldW = False;
197 lock->heldBy = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000198 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +0000199 return lock;
200}
sewardjb4112022007-11-09 22:49:28 +0000201
202/* Release storage for a Lock. Also release storage in .heldBy, if
sewardj1d7c3322011-02-28 09:22:51 +0000203 any. Removes from admin_locks double linked list. */
sewardjb4112022007-11-09 22:49:28 +0000204static void del_LockN ( Lock* lk )
205{
sewardjf98e1c02008-10-25 16:22:41 +0000206 tl_assert(HG_(is_sane_LockN)(lk));
207 tl_assert(lk->hbso);
208 libhb_so_dealloc(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +0000209 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000210 VG_(deleteBag)( lk->heldBy );
sewardj0f64c9e2011-03-10 17:40:22 +0000211 /* begin: del lock from double linked list */
212 if (lk == admin_locks) {
213 tl_assert(lk->admin_prev == NULL);
214 if (lk->admin_next)
215 lk->admin_next->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000216 admin_locks = lk->admin_next;
sewardj1d7c3322011-02-28 09:22:51 +0000217 }
218 else {
sewardj0f64c9e2011-03-10 17:40:22 +0000219 tl_assert(lk->admin_prev != NULL);
sewardj1d7c3322011-02-28 09:22:51 +0000220 lk->admin_prev->admin_next = lk->admin_next;
sewardj0f64c9e2011-03-10 17:40:22 +0000221 if (lk->admin_next)
222 lk->admin_next->admin_prev = lk->admin_prev;
sewardj1d7c3322011-02-28 09:22:51 +0000223 }
sewardj0f64c9e2011-03-10 17:40:22 +0000224 /* end: del */
sewardjb4112022007-11-09 22:49:28 +0000225 VG_(memset)(lk, 0xAA, sizeof(*lk));
sewardjf98e1c02008-10-25 16:22:41 +0000226 HG_(free)(lk);
sewardjb4112022007-11-09 22:49:28 +0000227}
228
229/* Update 'lk' to reflect that 'thr' now has a write-acquisition of
230 it. This is done strictly: only combinations resulting from
231 correct program and libpthread behaviour are allowed. */
232static void lockN_acquire_writer ( Lock* lk, Thread* thr )
233{
sewardjf98e1c02008-10-25 16:22:41 +0000234 tl_assert(HG_(is_sane_LockN)(lk));
235 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000236
237 stats__lockN_acquires++;
238
239 /* EXPOSITION only */
240 /* We need to keep recording snapshots of where the lock was
241 acquired, so as to produce better lock-order error messages. */
242 if (lk->acquired_at == NULL) {
243 ThreadId tid;
244 tl_assert(lk->heldBy == NULL);
245 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
246 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000247 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000248 } else {
249 tl_assert(lk->heldBy != NULL);
250 }
251 /* end EXPOSITION only */
252
253 switch (lk->kind) {
254 case LK_nonRec:
255 case_LK_nonRec:
256 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
257 tl_assert(!lk->heldW);
258 lk->heldW = True;
sewardjf98e1c02008-10-25 16:22:41 +0000259 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000260 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000261 break;
262 case LK_mbRec:
263 if (lk->heldBy == NULL)
264 goto case_LK_nonRec;
265 /* 2nd and subsequent locking of a lock by its owner */
266 tl_assert(lk->heldW);
267 /* assert: lk is only held by one thread .. */
sewardj896f6f92008-08-19 08:38:52 +0000268 tl_assert(VG_(sizeUniqueBag(lk->heldBy)) == 1);
sewardjb4112022007-11-09 22:49:28 +0000269 /* assert: .. and that thread is 'thr'. */
sewardj896f6f92008-08-19 08:38:52 +0000270 tl_assert(VG_(elemBag)(lk->heldBy, (Word)thr)
271 == VG_(sizeTotalBag)(lk->heldBy));
272 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000273 break;
274 case LK_rdwr:
275 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
276 goto case_LK_nonRec;
277 default:
278 tl_assert(0);
279 }
sewardjf98e1c02008-10-25 16:22:41 +0000280 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000281}
282
283static void lockN_acquire_reader ( Lock* lk, Thread* thr )
284{
sewardjf98e1c02008-10-25 16:22:41 +0000285 tl_assert(HG_(is_sane_LockN)(lk));
286 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000287 /* can only add reader to a reader-writer lock. */
288 tl_assert(lk->kind == LK_rdwr);
289 /* lk must be free or already r-held. */
290 tl_assert(lk->heldBy == NULL
291 || (lk->heldBy != NULL && !lk->heldW));
292
293 stats__lockN_acquires++;
294
295 /* EXPOSITION only */
296 /* We need to keep recording snapshots of where the lock was
297 acquired, so as to produce better lock-order error messages. */
298 if (lk->acquired_at == NULL) {
299 ThreadId tid;
300 tl_assert(lk->heldBy == NULL);
301 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
302 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000303 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000304 } else {
305 tl_assert(lk->heldBy != NULL);
306 }
307 /* end EXPOSITION only */
308
309 if (lk->heldBy) {
sewardj896f6f92008-08-19 08:38:52 +0000310 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000311 } else {
312 lk->heldW = False;
sewardjf98e1c02008-10-25 16:22:41 +0000313 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000314 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000315 }
316 tl_assert(!lk->heldW);
sewardjf98e1c02008-10-25 16:22:41 +0000317 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000318}
319
320/* Update 'lk' to reflect a release of it by 'thr'. This is done
321 strictly: only combinations resulting from correct program and
322 libpthread behaviour are allowed. */
323
324static void lockN_release ( Lock* lk, Thread* thr )
325{
326 Bool b;
sewardjf98e1c02008-10-25 16:22:41 +0000327 tl_assert(HG_(is_sane_LockN)(lk));
328 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000329 /* lock must be held by someone */
330 tl_assert(lk->heldBy);
331 stats__lockN_releases++;
332 /* Remove it from the holder set */
sewardj896f6f92008-08-19 08:38:52 +0000333 b = VG_(delFromBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000334 /* thr must actually have been a holder of lk */
335 tl_assert(b);
336 /* normalise */
337 tl_assert(lk->acquired_at);
sewardj896f6f92008-08-19 08:38:52 +0000338 if (VG_(isEmptyBag)(lk->heldBy)) {
339 VG_(deleteBag)(lk->heldBy);
sewardjb4112022007-11-09 22:49:28 +0000340 lk->heldBy = NULL;
341 lk->heldW = False;
342 lk->acquired_at = NULL;
343 }
sewardjf98e1c02008-10-25 16:22:41 +0000344 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000345}
346
347static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
348{
349 Thread* thr;
350 if (!lk->heldBy) {
351 tl_assert(!lk->heldW);
352 return;
353 }
354 /* for each thread that holds this lock do ... */
sewardj896f6f92008-08-19 08:38:52 +0000355 VG_(initIterBag)( lk->heldBy );
356 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, NULL )) {
sewardjf98e1c02008-10-25 16:22:41 +0000357 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000358 tl_assert(HG_(elemWS)( univ_lsets,
359 thr->locksetA, (Word)lk ));
360 thr->locksetA
361 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lk );
362
363 if (lk->heldW) {
364 tl_assert(HG_(elemWS)( univ_lsets,
365 thr->locksetW, (Word)lk ));
366 thr->locksetW
367 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lk );
368 }
369 }
sewardj896f6f92008-08-19 08:38:52 +0000370 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000371}
372
sewardjb4112022007-11-09 22:49:28 +0000373
374/*----------------------------------------------------------------*/
375/*--- Print out the primary data structures ---*/
376/*----------------------------------------------------------------*/
377
sewardjb4112022007-11-09 22:49:28 +0000378#define PP_THREADS (1<<1)
379#define PP_LOCKS (1<<2)
sewardjf98e1c02008-10-25 16:22:41 +0000380#define PP_ALL (PP_THREADS | PP_LOCKS)
sewardjb4112022007-11-09 22:49:28 +0000381
382
383static const Int sHOW_ADMIN = 0;
384
385static void space ( Int n )
386{
387 Int i;
388 Char spaces[128+1];
389 tl_assert(n >= 0 && n < 128);
390 if (n == 0)
391 return;
392 for (i = 0; i < n; i++)
393 spaces[i] = ' ';
394 spaces[i] = 0;
395 tl_assert(i < 128+1);
396 VG_(printf)("%s", spaces);
397}
398
399static void pp_Thread ( Int d, Thread* t )
400{
401 space(d+0); VG_(printf)("Thread %p {\n", t);
402 if (sHOW_ADMIN) {
403 space(d+3); VG_(printf)("admin %p\n", t->admin);
404 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
405 }
406 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
407 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
sewardjb4112022007-11-09 22:49:28 +0000408 space(d+0); VG_(printf)("}\n");
409}
410
411static void pp_admin_threads ( Int d )
412{
413 Int i, n;
414 Thread* t;
415 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
416 /* nothing */
417 }
418 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
419 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
420 if (0) {
421 space(n);
422 VG_(printf)("admin_threads record %d of %d:\n", i, n);
423 }
424 pp_Thread(d+3, t);
425 }
barta0b6b2c2008-07-07 06:49:24 +0000426 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000427}
428
429static void pp_map_threads ( Int d )
430{
njn4c245e52009-03-15 23:25:38 +0000431 Int i, n = 0;
sewardjb4112022007-11-09 22:49:28 +0000432 space(d); VG_(printf)("map_threads ");
sewardjb4112022007-11-09 22:49:28 +0000433 for (i = 0; i < VG_N_THREADS; i++) {
434 if (map_threads[i] != NULL)
435 n++;
436 }
437 VG_(printf)("(%d entries) {\n", n);
438 for (i = 0; i < VG_N_THREADS; i++) {
439 if (map_threads[i] == NULL)
440 continue;
441 space(d+3);
442 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
443 }
444 space(d); VG_(printf)("}\n");
445}
446
447static const HChar* show_LockKind ( LockKind lkk ) {
448 switch (lkk) {
449 case LK_mbRec: return "mbRec";
450 case LK_nonRec: return "nonRec";
451 case LK_rdwr: return "rdwr";
452 default: tl_assert(0);
453 }
454}
455
456static void pp_Lock ( Int d, Lock* lk )
457{
barta0b6b2c2008-07-07 06:49:24 +0000458 space(d+0); VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
sewardjb4112022007-11-09 22:49:28 +0000459 if (sHOW_ADMIN) {
sewardj1d7c3322011-02-28 09:22:51 +0000460 space(d+3); VG_(printf)("admin_n %p\n", lk->admin_next);
461 space(d+3); VG_(printf)("admin_p %p\n", lk->admin_prev);
462 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
sewardjb4112022007-11-09 22:49:28 +0000463 }
464 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
465 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
466 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
467 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
468 if (lk->heldBy) {
469 Thread* thr;
470 Word count;
471 VG_(printf)(" { ");
sewardj896f6f92008-08-19 08:38:52 +0000472 VG_(initIterBag)( lk->heldBy );
473 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, &count ))
sewardjb4112022007-11-09 22:49:28 +0000474 VG_(printf)("%lu:%p ", count, thr);
sewardj896f6f92008-08-19 08:38:52 +0000475 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000476 VG_(printf)("}");
477 }
478 VG_(printf)("\n");
479 space(d+0); VG_(printf)("}\n");
480}
481
482static void pp_admin_locks ( Int d )
483{
484 Int i, n;
485 Lock* lk;
sewardj1d7c3322011-02-28 09:22:51 +0000486 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000487 /* nothing */
488 }
489 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
sewardj1d7c3322011-02-28 09:22:51 +0000490 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000491 if (0) {
492 space(n);
493 VG_(printf)("admin_locks record %d of %d:\n", i, n);
494 }
495 pp_Lock(d+3, lk);
496 }
barta0b6b2c2008-07-07 06:49:24 +0000497 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000498}
499
500static void pp_map_locks ( Int d )
501{
502 void* gla;
503 Lock* lk;
504 space(d); VG_(printf)("map_locks (%d entries) {\n",
sewardj896f6f92008-08-19 08:38:52 +0000505 (Int)VG_(sizeFM)( map_locks ));
506 VG_(initIterFM)( map_locks );
507 while (VG_(nextIterFM)( map_locks, (Word*)&gla,
sewardjb5f29642007-11-16 12:02:43 +0000508 (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000509 space(d+3);
510 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
511 }
sewardj896f6f92008-08-19 08:38:52 +0000512 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000513 space(d); VG_(printf)("}\n");
514}
515
sewardjb4112022007-11-09 22:49:28 +0000516static void pp_everything ( Int flags, Char* caller )
517{
518 Int d = 0;
519 VG_(printf)("\n");
520 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
521 if (flags & PP_THREADS) {
522 VG_(printf)("\n");
523 pp_admin_threads(d+3);
524 VG_(printf)("\n");
525 pp_map_threads(d+3);
526 }
527 if (flags & PP_LOCKS) {
528 VG_(printf)("\n");
529 pp_admin_locks(d+3);
530 VG_(printf)("\n");
531 pp_map_locks(d+3);
532 }
sewardjb4112022007-11-09 22:49:28 +0000533
534 VG_(printf)("\n");
535 VG_(printf)("}\n");
536 VG_(printf)("\n");
537}
538
539#undef SHOW_ADMIN
540
541
542/*----------------------------------------------------------------*/
543/*--- Initialise the primary data structures ---*/
544/*----------------------------------------------------------------*/
545
sewardjf98e1c02008-10-25 16:22:41 +0000546static void initialise_data_structures ( Thr* hbthr_root )
sewardjb4112022007-11-09 22:49:28 +0000547{
sewardjb4112022007-11-09 22:49:28 +0000548 Thread* thr;
sewardjffce8152011-06-24 10:09:41 +0000549 WordSetID wsid;
sewardjb4112022007-11-09 22:49:28 +0000550
551 /* Get everything initialised and zeroed. */
552 tl_assert(admin_threads == NULL);
553 tl_assert(admin_locks == NULL);
sewardjb4112022007-11-09 22:49:28 +0000554
555 tl_assert(sizeof(Addr) == sizeof(Word));
sewardjb4112022007-11-09 22:49:28 +0000556
557 tl_assert(map_threads == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000558 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
sewardjb4112022007-11-09 22:49:28 +0000559 tl_assert(map_threads != NULL);
560
sewardjb4112022007-11-09 22:49:28 +0000561 tl_assert(sizeof(Addr) == sizeof(Word));
562 tl_assert(map_locks == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000563 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
564 NULL/*unboxed Word cmp*/);
sewardjb4112022007-11-09 22:49:28 +0000565 tl_assert(map_locks != NULL);
566
sewardjb4112022007-11-09 22:49:28 +0000567 tl_assert(univ_lsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000568 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
569 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000570 tl_assert(univ_lsets != NULL);
sewardjffce8152011-06-24 10:09:41 +0000571 /* Ensure that univ_lsets is non-empty, with lockset zero being the
572 empty lockset. hg_errors.c relies on the assumption that
573 lockset number zero in univ_lsets is always valid. */
574 wsid = HG_(emptyWS)(univ_lsets);
575 tl_assert(wsid == 0);
sewardjb4112022007-11-09 22:49:28 +0000576
577 tl_assert(univ_laog == NULL);
sewardjc1fb9d22011-02-28 09:03:44 +0000578 if (HG_(clo_track_lockorders)) {
579 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
580 HG_(free), 24/*cacheSize*/ );
581 tl_assert(univ_laog != NULL);
582 }
sewardjb4112022007-11-09 22:49:28 +0000583
584 /* Set up entries for the root thread */
585 // FIXME: this assumes that the first real ThreadId is 1
586
sewardjb4112022007-11-09 22:49:28 +0000587 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +0000588 thr = mk_Thread(hbthr_root);
589 thr->coretid = 1; /* FIXME: hardwires an assumption about the
590 identity of the root thread. */
sewardj60626642011-03-10 15:14:37 +0000591 tl_assert( libhb_get_Thr_hgthread(hbthr_root) == NULL );
592 libhb_set_Thr_hgthread(hbthr_root, thr);
sewardjb4112022007-11-09 22:49:28 +0000593
sewardjf98e1c02008-10-25 16:22:41 +0000594 /* and bind it in the thread-map table. */
595 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
596 tl_assert(thr->coretid != VG_INVALID_THREADID);
sewardjb4112022007-11-09 22:49:28 +0000597
sewardjf98e1c02008-10-25 16:22:41 +0000598 map_threads[thr->coretid] = thr;
sewardjb4112022007-11-09 22:49:28 +0000599
600 tl_assert(VG_INVALID_THREADID == 0);
601
sewardjb4112022007-11-09 22:49:28 +0000602 all__sanity_check("initialise_data_structures");
603}
604
605
606/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000607/*--- map_threads :: array[core-ThreadId] of Thread* ---*/
sewardjb4112022007-11-09 22:49:28 +0000608/*----------------------------------------------------------------*/
609
610/* Doesn't assert if the relevant map_threads entry is NULL. */
611static Thread* map_threads_maybe_lookup ( ThreadId coretid )
612{
613 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000614 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000615 thr = map_threads[coretid];
616 return thr;
617}
618
619/* Asserts if the relevant map_threads entry is NULL. */
620static inline Thread* map_threads_lookup ( ThreadId coretid )
621{
622 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000623 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000624 thr = map_threads[coretid];
625 tl_assert(thr);
626 return thr;
627}
628
sewardjf98e1c02008-10-25 16:22:41 +0000629/* Do a reverse lookup. Does not assert if 'thr' is not found in
630 map_threads. */
sewardjb4112022007-11-09 22:49:28 +0000631static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
632{
sewardjf98e1c02008-10-25 16:22:41 +0000633 ThreadId tid;
634 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000635 /* Check nobody used the invalid-threadid slot */
636 tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
637 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000638 tid = thr->coretid;
639 tl_assert(HG_(is_sane_ThreadId)(tid));
640 return tid;
sewardjb4112022007-11-09 22:49:28 +0000641}
642
643/* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
644 is not found in map_threads. */
645static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
646{
647 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
648 tl_assert(tid != VG_INVALID_THREADID);
sewardjf98e1c02008-10-25 16:22:41 +0000649 tl_assert(map_threads[tid]);
650 tl_assert(map_threads[tid]->coretid == tid);
sewardjb4112022007-11-09 22:49:28 +0000651 return tid;
652}
653
654static void map_threads_delete ( ThreadId coretid )
655{
656 Thread* thr;
657 tl_assert(coretid != 0);
sewardjf98e1c02008-10-25 16:22:41 +0000658 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000659 thr = map_threads[coretid];
660 tl_assert(thr);
661 map_threads[coretid] = NULL;
662}
663
664
665/*----------------------------------------------------------------*/
666/*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
667/*----------------------------------------------------------------*/
668
669/* Make sure there is a lock table entry for the given (lock) guest
670 address. If not, create one of the stated 'kind' in unheld state.
671 In any case, return the address of the existing or new Lock. */
672static
673Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
674{
675 Bool found;
676 Lock* oldlock = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000677 tl_assert(HG_(is_sane_ThreadId)(tid));
sewardj896f6f92008-08-19 08:38:52 +0000678 found = VG_(lookupFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000679 NULL, (Word*)&oldlock, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000680 if (!found) {
681 Lock* lock = mk_LockN(lkk, ga);
682 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
sewardjf98e1c02008-10-25 16:22:41 +0000683 tl_assert(HG_(is_sane_LockN)(lock));
sewardj896f6f92008-08-19 08:38:52 +0000684 VG_(addToFM)( map_locks, (Word)ga, (Word)lock );
sewardjb4112022007-11-09 22:49:28 +0000685 tl_assert(oldlock == NULL);
sewardjb4112022007-11-09 22:49:28 +0000686 return lock;
687 } else {
688 tl_assert(oldlock != NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000689 tl_assert(HG_(is_sane_LockN)(oldlock));
sewardjb4112022007-11-09 22:49:28 +0000690 tl_assert(oldlock->guestaddr == ga);
sewardjb4112022007-11-09 22:49:28 +0000691 return oldlock;
692 }
693}
694
695static Lock* map_locks_maybe_lookup ( Addr ga )
696{
697 Bool found;
698 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000699 found = VG_(lookupFM)( map_locks, NULL, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000700 tl_assert(found ? lk != NULL : lk == NULL);
sewardjb4112022007-11-09 22:49:28 +0000701 return lk;
702}
703
704static void map_locks_delete ( Addr ga )
705{
706 Addr ga2 = 0;
707 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000708 VG_(delFromFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000709 (Word*)&ga2, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000710 /* delFromFM produces the val which is being deleted, if it is
711 found. So assert it is non-null; that in effect asserts that we
712 are deleting a (ga, Lock) pair which actually exists. */
713 tl_assert(lk != NULL);
714 tl_assert(ga2 == ga);
715}
716
717
sewardjb4112022007-11-09 22:49:28 +0000718
719/*----------------------------------------------------------------*/
720/*--- Sanity checking the data structures ---*/
721/*----------------------------------------------------------------*/
722
723static UWord stats__sanity_checks = 0;
724
sewardjb4112022007-11-09 22:49:28 +0000725static void laog__sanity_check ( Char* who ); /* fwds */
726
727/* REQUIRED INVARIANTS:
728
729 Thread vs Segment/Lock/SecMaps
730
731 for each t in Threads {
732
733 // Thread.lockset: each element is really a valid Lock
734
735 // Thread.lockset: each Lock in set is actually held by that thread
736 for lk in Thread.lockset
737 lk == LockedBy(t)
738
739 // Thread.csegid is a valid SegmentID
740 // and the associated Segment has .thr == t
741
742 }
743
744 all thread Locksets are pairwise empty under intersection
745 (that is, no lock is claimed to be held by more than one thread)
746 -- this is guaranteed if all locks in locksets point back to their
747 owner threads
748
749 Lock vs Thread/Segment/SecMaps
750
751 for each entry (gla, la) in map_locks
752 gla == la->guest_addr
753
754 for each lk in Locks {
755
756 lk->tag is valid
757 lk->guest_addr does not have shadow state NoAccess
758 if lk == LockedBy(t), then t->lockset contains lk
759 if lk == UnlockedBy(segid) then segid is valid SegmentID
760 and can be mapped to a valid Segment(seg)
761 and seg->thr->lockset does not contain lk
762 if lk == UnlockedNew then (no lockset contains lk)
763
764 secmaps for lk has .mbHasLocks == True
765
766 }
767
768 Segment vs Thread/Lock/SecMaps
769
770 the Segment graph is a dag (no cycles)
771 all of the Segment graph must be reachable from the segids
772 mentioned in the Threads
773
774 for seg in Segments {
775
776 seg->thr is a sane Thread
777
778 }
779
780 SecMaps vs Segment/Thread/Lock
781
782 for sm in SecMaps {
783
784 sm properly aligned
785 if any shadow word is ShR or ShM then .mbHasShared == True
786
787 for each Excl(segid) state
788 map_segments_lookup maps to a sane Segment(seg)
789 for each ShM/ShR(tsetid,lsetid) state
790 each lk in lset is a valid Lock
791 each thr in tset is a valid thread, which is non-dead
792
793 }
794*/
795
796
797/* Return True iff 'thr' holds 'lk' in some mode. */
798static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
799{
800 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000801 return VG_(elemBag)( lk->heldBy, (Word)thr ) > 0;
sewardjb4112022007-11-09 22:49:28 +0000802 else
803 return False;
804}
805
806/* Sanity check Threads, as far as possible */
807__attribute__((noinline))
808static void threads__sanity_check ( Char* who )
809{
810#define BAD(_str) do { how = (_str); goto bad; } while (0)
811 Char* how = "no error";
812 Thread* thr;
813 WordSetID wsA, wsW;
sewardj250ec2e2008-02-15 22:02:30 +0000814 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +0000815 Word ls_size, i;
816 Lock* lk;
sewardjb4112022007-11-09 22:49:28 +0000817 for (thr = admin_threads; thr; thr = thr->admin) {
sewardjf98e1c02008-10-25 16:22:41 +0000818 if (!HG_(is_sane_Thread)(thr)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000819 wsA = thr->locksetA;
820 wsW = thr->locksetW;
821 // locks held in W mode are a subset of all locks held
822 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
823 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
824 for (i = 0; i < ls_size; i++) {
825 lk = (Lock*)ls_words[i];
826 // Thread.lockset: each element is really a valid Lock
sewardjf98e1c02008-10-25 16:22:41 +0000827 if (!HG_(is_sane_LockN)(lk)) BAD("2");
sewardjb4112022007-11-09 22:49:28 +0000828 // Thread.lockset: each Lock in set is actually held by that
829 // thread
830 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000831 }
832 }
833 return;
834 bad:
835 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
836 tl_assert(0);
837#undef BAD
838}
839
840
841/* Sanity check Locks, as far as possible */
842__attribute__((noinline))
843static void locks__sanity_check ( Char* who )
844{
845#define BAD(_str) do { how = (_str); goto bad; } while (0)
846 Char* how = "no error";
847 Addr gla;
848 Lock* lk;
849 Int i;
850 // # entries in admin_locks == # entries in map_locks
sewardj1d7c3322011-02-28 09:22:51 +0000851 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next)
sewardjb4112022007-11-09 22:49:28 +0000852 ;
sewardj896f6f92008-08-19 08:38:52 +0000853 if (i != VG_(sizeFM)(map_locks)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000854 // for each entry (gla, lk) in map_locks
855 // gla == lk->guest_addr
sewardj896f6f92008-08-19 08:38:52 +0000856 VG_(initIterFM)( map_locks );
857 while (VG_(nextIterFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000858 (Word*)&gla, (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000859 if (lk->guestaddr != gla) BAD("2");
860 }
sewardj896f6f92008-08-19 08:38:52 +0000861 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000862 // scan through admin_locks ...
sewardj1d7c3322011-02-28 09:22:51 +0000863 for (lk = admin_locks; lk; lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000864 // lock is sane. Quite comprehensive, also checks that
865 // referenced (holder) threads are sane.
sewardjf98e1c02008-10-25 16:22:41 +0000866 if (!HG_(is_sane_LockN)(lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000867 // map_locks binds guest address back to this lock
868 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
sewardjb4112022007-11-09 22:49:28 +0000869 // look at all threads mentioned as holders of this lock. Ensure
870 // this lock is mentioned in their locksets.
871 if (lk->heldBy) {
872 Thread* thr;
873 Word count;
sewardj896f6f92008-08-19 08:38:52 +0000874 VG_(initIterBag)( lk->heldBy );
875 while (VG_(nextIterBag)( lk->heldBy,
sewardjb5f29642007-11-16 12:02:43 +0000876 (Word*)&thr, &count )) {
sewardjf98e1c02008-10-25 16:22:41 +0000877 // HG_(is_sane_LockN) above ensures these
sewardjb4112022007-11-09 22:49:28 +0000878 tl_assert(count >= 1);
sewardjf98e1c02008-10-25 16:22:41 +0000879 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000880 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (Word)lk))
881 BAD("6");
882 // also check the w-only lockset
883 if (lk->heldW
884 && !HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
885 BAD("7");
886 if ((!lk->heldW)
887 && HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
888 BAD("8");
889 }
sewardj896f6f92008-08-19 08:38:52 +0000890 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000891 } else {
892 /* lock not held by anybody */
893 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
894 // since lk is unheld, then (no lockset contains lk)
895 // hmm, this is really too expensive to check. Hmm.
896 }
sewardjb4112022007-11-09 22:49:28 +0000897 }
898
899 return;
900 bad:
901 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
902 tl_assert(0);
903#undef BAD
904}
905
906
sewardjb4112022007-11-09 22:49:28 +0000907static void all_except_Locks__sanity_check ( Char* who ) {
908 stats__sanity_checks++;
909 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
910 threads__sanity_check(who);
sewardjc1fb9d22011-02-28 09:03:44 +0000911 if (HG_(clo_track_lockorders))
912 laog__sanity_check(who);
sewardjb4112022007-11-09 22:49:28 +0000913}
914static void all__sanity_check ( Char* who ) {
915 all_except_Locks__sanity_check(who);
916 locks__sanity_check(who);
917}
918
919
920/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +0000921/*--- Shadow value and address range handlers ---*/
922/*----------------------------------------------------------------*/
923
924static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +0000925//static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000926static inline Thread* get_current_Thread ( void ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +0000927__attribute__((noinline))
928static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000929
sewardjb4112022007-11-09 22:49:28 +0000930
931/* Block-copy states (needed for implementing realloc()). */
sewardj23f12002009-07-24 08:45:08 +0000932/* FIXME this copies shadow memory; it doesn't apply the MSM to it.
933 Is that a problem? (hence 'scopy' rather than 'ccopy') */
934static void shadow_mem_scopy_range ( Thread* thr,
935 Addr src, Addr dst, SizeT len )
sewardjf98e1c02008-10-25 16:22:41 +0000936{
937 Thr* hbthr = thr->hbthr;
938 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +0000939 libhb_copy_shadow_state( hbthr, src, dst, len );
sewardjb4112022007-11-09 22:49:28 +0000940}
941
sewardj23f12002009-07-24 08:45:08 +0000942static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
943{
sewardjf98e1c02008-10-25 16:22:41 +0000944 Thr* hbthr = thr->hbthr;
945 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +0000946 LIBHB_CREAD_N(hbthr, a, len);
947}
948
949static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
950 Thr* hbthr = thr->hbthr;
951 tl_assert(hbthr);
952 LIBHB_CWRITE_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +0000953}
954
955static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
956{
sewardj23f12002009-07-24 08:45:08 +0000957 libhb_srange_new( thr->hbthr, a, len );
sewardjb4112022007-11-09 22:49:28 +0000958}
959
sewardjfd35d492011-03-17 19:39:55 +0000960static void shadow_mem_make_NoAccess_NoFX ( Thread* thr, Addr aIN, SizeT len )
sewardjb4112022007-11-09 22:49:28 +0000961{
sewardjb4112022007-11-09 22:49:28 +0000962 if (0 && len > 500)
sewardjfd35d492011-03-17 19:39:55 +0000963 VG_(printf)("make NoAccess_NoFX ( %#lx, %ld )\n", aIN, len );
964 // has no effect (NoFX)
965 libhb_srange_noaccess_NoFX( thr->hbthr, aIN, len );
966}
967
968static void shadow_mem_make_NoAccess_AHAE ( Thread* thr, Addr aIN, SizeT len )
969{
970 if (0 && len > 500)
971 VG_(printf)("make NoAccess_AHAE ( %#lx, %ld )\n", aIN, len );
972 // Actually Has An Effect (AHAE)
973 libhb_srange_noaccess_AHAE( thr->hbthr, aIN, len );
sewardjb4112022007-11-09 22:49:28 +0000974}
975
sewardj406bac82010-03-03 23:03:40 +0000976static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN, SizeT len )
977{
978 if (0 && len > 500)
979 VG_(printf)("make Untracked ( %#lx, %ld )\n", aIN, len );
980 libhb_srange_untrack( thr->hbthr, aIN, len );
981}
982
sewardjb4112022007-11-09 22:49:28 +0000983
984/*----------------------------------------------------------------*/
985/*--- Event handlers (evh__* functions) ---*/
986/*--- plus helpers (evhH__* functions) ---*/
987/*----------------------------------------------------------------*/
988
989/*--------- Event handler helpers (evhH__* functions) ---------*/
990
991/* Create a new segment for 'thr', making it depend (.prev) on its
992 existing segment, bind together the SegmentID and Segment, and
993 return both of them. Also update 'thr' so it references the new
994 Segment. */
sewardjf98e1c02008-10-25 16:22:41 +0000995//zz static
996//zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
997//zz /*OUT*/Segment** new_segP,
998//zz Thread* thr )
999//zz {
1000//zz Segment* cur_seg;
1001//zz tl_assert(new_segP);
1002//zz tl_assert(new_segidP);
1003//zz tl_assert(HG_(is_sane_Thread)(thr));
1004//zz cur_seg = map_segments_lookup( thr->csegid );
1005//zz tl_assert(cur_seg);
1006//zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1007//zz at their owner thread. */
1008//zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1009//zz *new_segidP = alloc_SegmentID();
1010//zz map_segments_add( *new_segidP, *new_segP );
1011//zz thr->csegid = *new_segidP;
1012//zz }
sewardjb4112022007-11-09 22:49:28 +00001013
1014
1015/* The lock at 'lock_ga' has acquired a writer. Make all necessary
1016 updates, and also do all possible error checks. */
1017static
1018void evhH__post_thread_w_acquires_lock ( Thread* thr,
1019 LockKind lkk, Addr lock_ga )
1020{
1021 Lock* lk;
1022
1023 /* Basically what we need to do is call lockN_acquire_writer.
1024 However, that will barf if any 'invalid' lock states would
1025 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001026 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001027 routine.
1028
1029 Because this routine is only called after successful lock
1030 acquisition, we should not be asked to move the lock into any
1031 invalid states. Requests to do so are bugs in libpthread, since
1032 that should have rejected any such requests. */
1033
sewardjf98e1c02008-10-25 16:22:41 +00001034 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001035 /* Try to find the lock. If we can't, then create a new one with
1036 kind 'lkk'. */
1037 lk = map_locks_lookup_or_create(
1038 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001039 tl_assert( HG_(is_sane_LockN)(lk) );
1040
1041 /* check libhb level entities exist */
1042 tl_assert(thr->hbthr);
1043 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001044
1045 if (lk->heldBy == NULL) {
1046 /* the lock isn't held. Simple. */
1047 tl_assert(!lk->heldW);
1048 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001049 /* acquire a dependency from the lock's VCs */
1050 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001051 goto noerror;
1052 }
1053
1054 /* So the lock is already held. If held as a r-lock then
1055 libpthread must be buggy. */
1056 tl_assert(lk->heldBy);
1057 if (!lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001058 HG_(record_error_Misc)(
1059 thr, "Bug in libpthread: write lock "
1060 "granted on rwlock which is currently rd-held");
sewardjb4112022007-11-09 22:49:28 +00001061 goto error;
1062 }
1063
1064 /* So the lock is held in w-mode. If it's held by some other
1065 thread, then libpthread must be buggy. */
sewardj896f6f92008-08-19 08:38:52 +00001066 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
sewardjb4112022007-11-09 22:49:28 +00001067
sewardj896f6f92008-08-19 08:38:52 +00001068 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
sewardjf98e1c02008-10-25 16:22:41 +00001069 HG_(record_error_Misc)(
1070 thr, "Bug in libpthread: write lock "
1071 "granted on mutex/rwlock which is currently "
1072 "wr-held by a different thread");
sewardjb4112022007-11-09 22:49:28 +00001073 goto error;
1074 }
1075
1076 /* So the lock is already held in w-mode by 'thr'. That means this
1077 is an attempt to lock it recursively, which is only allowable
1078 for LK_mbRec kinded locks. Since this routine is called only
1079 once the lock has been acquired, this must also be a libpthread
1080 bug. */
1081 if (lk->kind != LK_mbRec) {
sewardjf98e1c02008-10-25 16:22:41 +00001082 HG_(record_error_Misc)(
1083 thr, "Bug in libpthread: recursive write lock "
1084 "granted on mutex/wrlock which does not "
1085 "support recursion");
sewardjb4112022007-11-09 22:49:28 +00001086 goto error;
1087 }
1088
1089 /* So we are recursively re-locking a lock we already w-hold. */
1090 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001091 /* acquire a dependency from the lock's VC. Probably pointless,
1092 but also harmless. */
1093 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001094 goto noerror;
1095
1096 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001097 if (HG_(clo_track_lockorders)) {
1098 /* check lock order acquisition graph, and update. This has to
1099 happen before the lock is added to the thread's locksetA/W. */
1100 laog__pre_thread_acquires_lock( thr, lk );
1101 }
sewardjb4112022007-11-09 22:49:28 +00001102 /* update the thread's held-locks set */
1103 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1104 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (Word)lk );
1105 /* fall through */
1106
1107 error:
sewardjf98e1c02008-10-25 16:22:41 +00001108 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001109}
1110
1111
1112/* The lock at 'lock_ga' has acquired a reader. Make all necessary
1113 updates, and also do all possible error checks. */
1114static
1115void evhH__post_thread_r_acquires_lock ( Thread* thr,
1116 LockKind lkk, Addr lock_ga )
1117{
1118 Lock* lk;
1119
1120 /* Basically what we need to do is call lockN_acquire_reader.
1121 However, that will barf if any 'invalid' lock states would
1122 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001123 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001124 routine.
1125
1126 Because this routine is only called after successful lock
1127 acquisition, we should not be asked to move the lock into any
1128 invalid states. Requests to do so are bugs in libpthread, since
1129 that should have rejected any such requests. */
1130
sewardjf98e1c02008-10-25 16:22:41 +00001131 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001132 /* Try to find the lock. If we can't, then create a new one with
1133 kind 'lkk'. Only a reader-writer lock can be read-locked,
1134 hence the first assertion. */
1135 tl_assert(lkk == LK_rdwr);
1136 lk = map_locks_lookup_or_create(
1137 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001138 tl_assert( HG_(is_sane_LockN)(lk) );
1139
1140 /* check libhb level entities exist */
1141 tl_assert(thr->hbthr);
1142 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001143
1144 if (lk->heldBy == NULL) {
1145 /* the lock isn't held. Simple. */
1146 tl_assert(!lk->heldW);
1147 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001148 /* acquire a dependency from the lock's VC */
1149 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001150 goto noerror;
1151 }
1152
1153 /* So the lock is already held. If held as a w-lock then
1154 libpthread must be buggy. */
1155 tl_assert(lk->heldBy);
1156 if (lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001157 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1158 "granted on rwlock which is "
1159 "currently wr-held");
sewardjb4112022007-11-09 22:49:28 +00001160 goto error;
1161 }
1162
1163 /* Easy enough. In short anybody can get a read-lock on a rwlock
1164 provided it is either unlocked or already in rd-held. */
1165 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001166 /* acquire a dependency from the lock's VC. Probably pointless,
1167 but also harmless. */
1168 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001169 goto noerror;
1170
1171 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001172 if (HG_(clo_track_lockorders)) {
1173 /* check lock order acquisition graph, and update. This has to
1174 happen before the lock is added to the thread's locksetA/W. */
1175 laog__pre_thread_acquires_lock( thr, lk );
1176 }
sewardjb4112022007-11-09 22:49:28 +00001177 /* update the thread's held-locks set */
1178 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1179 /* but don't update thr->locksetW, since lk is only rd-held */
1180 /* fall through */
1181
1182 error:
sewardjf98e1c02008-10-25 16:22:41 +00001183 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001184}
1185
1186
1187/* The lock at 'lock_ga' is just about to be unlocked. Make all
1188 necessary updates, and also do all possible error checks. */
1189static
1190void evhH__pre_thread_releases_lock ( Thread* thr,
1191 Addr lock_ga, Bool isRDWR )
1192{
1193 Lock* lock;
1194 Word n;
sewardjf98e1c02008-10-25 16:22:41 +00001195 Bool was_heldW;
sewardjb4112022007-11-09 22:49:28 +00001196
1197 /* This routine is called prior to a lock release, before
1198 libpthread has had a chance to validate the call. Hence we need
1199 to detect and reject any attempts to move the lock into an
1200 invalid state. Such attempts are bugs in the client.
1201
1202 isRDWR is True if we know from the wrapper context that lock_ga
1203 should refer to a reader-writer lock, and is False if [ditto]
1204 lock_ga should refer to a standard mutex. */
1205
sewardjf98e1c02008-10-25 16:22:41 +00001206 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001207 lock = map_locks_maybe_lookup( lock_ga );
1208
1209 if (!lock) {
1210 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1211 the client is trying to unlock it. So complain, then ignore
1212 the attempt. */
sewardjf98e1c02008-10-25 16:22:41 +00001213 HG_(record_error_UnlockBogus)( thr, lock_ga );
sewardjb4112022007-11-09 22:49:28 +00001214 return;
1215 }
1216
1217 tl_assert(lock->guestaddr == lock_ga);
sewardjf98e1c02008-10-25 16:22:41 +00001218 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001219
1220 if (isRDWR && lock->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001221 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1222 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001223 }
1224 if ((!isRDWR) && lock->kind == LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001225 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1226 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001227 }
1228
1229 if (!lock->heldBy) {
1230 /* The lock is not held. This indicates a serious bug in the
1231 client. */
1232 tl_assert(!lock->heldW);
sewardjf98e1c02008-10-25 16:22:41 +00001233 HG_(record_error_UnlockUnlocked)( thr, lock );
sewardjb4112022007-11-09 22:49:28 +00001234 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1235 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1236 goto error;
1237 }
1238
sewardjf98e1c02008-10-25 16:22:41 +00001239 /* test just above dominates */
1240 tl_assert(lock->heldBy);
1241 was_heldW = lock->heldW;
1242
sewardjb4112022007-11-09 22:49:28 +00001243 /* The lock is held. Is this thread one of the holders? If not,
1244 report a bug in the client. */
sewardj896f6f92008-08-19 08:38:52 +00001245 n = VG_(elemBag)( lock->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +00001246 tl_assert(n >= 0);
1247 if (n == 0) {
1248 /* We are not a current holder of the lock. This is a bug in
1249 the guest, and (per POSIX pthread rules) the unlock
1250 attempt will fail. So just complain and do nothing
1251 else. */
sewardj896f6f92008-08-19 08:38:52 +00001252 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001253 tl_assert(HG_(is_sane_Thread)(realOwner));
sewardjb4112022007-11-09 22:49:28 +00001254 tl_assert(realOwner != thr);
1255 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1256 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
sewardjf98e1c02008-10-25 16:22:41 +00001257 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
sewardjb4112022007-11-09 22:49:28 +00001258 goto error;
1259 }
1260
1261 /* Ok, we hold the lock 'n' times. */
1262 tl_assert(n >= 1);
1263
1264 lockN_release( lock, thr );
1265
1266 n--;
1267 tl_assert(n >= 0);
1268
1269 if (n > 0) {
1270 tl_assert(lock->heldBy);
sewardj896f6f92008-08-19 08:38:52 +00001271 tl_assert(n == VG_(elemBag)( lock->heldBy, (Word)thr ));
sewardjb4112022007-11-09 22:49:28 +00001272 /* We still hold the lock. So either it's a recursive lock
1273 or a rwlock which is currently r-held. */
1274 tl_assert(lock->kind == LK_mbRec
1275 || (lock->kind == LK_rdwr && !lock->heldW));
1276 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1277 if (lock->heldW)
1278 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1279 else
1280 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1281 } else {
sewardj983f3022009-05-21 14:49:55 +00001282 /* n is zero. This means we don't hold the lock any more. But
1283 if it's a rwlock held in r-mode, someone else could still
1284 hold it. Just do whatever sanity checks we can. */
1285 if (lock->kind == LK_rdwr && lock->heldBy) {
1286 /* It's a rwlock. We no longer hold it but we used to;
1287 nevertheless it still appears to be held by someone else.
1288 The implication is that, prior to this release, it must
1289 have been shared by us and and whoever else is holding it;
1290 which in turn implies it must be r-held, since a lock
1291 can't be w-held by more than one thread. */
1292 /* The lock is now R-held by somebody else: */
1293 tl_assert(lock->heldW == False);
1294 } else {
1295 /* Normal case. It's either not a rwlock, or it's a rwlock
1296 that we used to hold in w-mode (which is pretty much the
1297 same thing as a non-rwlock.) Since this transaction is
1298 atomic (V does not allow multiple threads to run
1299 simultaneously), it must mean the lock is now not held by
1300 anybody. Hence assert for it. */
1301 /* The lock is now not held by anybody: */
1302 tl_assert(!lock->heldBy);
1303 tl_assert(lock->heldW == False);
1304 }
sewardjf98e1c02008-10-25 16:22:41 +00001305 //if (lock->heldBy) {
1306 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (Word)thr ));
1307 //}
sewardjb4112022007-11-09 22:49:28 +00001308 /* update this thread's lockset accordingly. */
1309 thr->locksetA
1310 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lock );
1311 thr->locksetW
1312 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lock );
sewardjf98e1c02008-10-25 16:22:41 +00001313 /* push our VC into the lock */
1314 tl_assert(thr->hbthr);
1315 tl_assert(lock->hbso);
1316 /* If the lock was previously W-held, then we want to do a
1317 strong send, and if previously R-held, then a weak send. */
1318 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
sewardjb4112022007-11-09 22:49:28 +00001319 }
1320 /* fall through */
1321
1322 error:
sewardjf98e1c02008-10-25 16:22:41 +00001323 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001324}
1325
1326
sewardj9f569b72008-11-13 13:33:09 +00001327/* ---------------------------------------------------------- */
1328/* -------- Event handlers proper (evh__* functions) -------- */
1329/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001330
1331/* What is the Thread* for the currently running thread? This is
1332 absolutely performance critical. We receive notifications from the
1333 core for client code starts/stops, and cache the looked-up result
1334 in 'current_Thread'. Hence, for the vast majority of requests,
1335 finding the current thread reduces to a read of a global variable,
1336 provided get_current_Thread_in_C_C is inlined.
1337
1338 Outside of client code, current_Thread is NULL, and presumably
1339 any uses of it will cause a segfault. Hence:
1340
1341 - for uses definitely within client code, use
1342 get_current_Thread_in_C_C.
1343
1344 - for all other uses, use get_current_Thread.
1345*/
1346
sewardj23f12002009-07-24 08:45:08 +00001347static Thread *current_Thread = NULL,
1348 *current_Thread_prev = NULL;
sewardjb4112022007-11-09 22:49:28 +00001349
1350static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1351 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1352 tl_assert(current_Thread == NULL);
1353 current_Thread = map_threads_lookup( tid );
1354 tl_assert(current_Thread != NULL);
sewardj23f12002009-07-24 08:45:08 +00001355 if (current_Thread != current_Thread_prev) {
1356 libhb_Thr_resumes( current_Thread->hbthr );
1357 current_Thread_prev = current_Thread;
1358 }
sewardjb4112022007-11-09 22:49:28 +00001359}
1360static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1361 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1362 tl_assert(current_Thread != NULL);
1363 current_Thread = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00001364 libhb_maybe_GC();
sewardjb4112022007-11-09 22:49:28 +00001365}
1366static inline Thread* get_current_Thread_in_C_C ( void ) {
1367 return current_Thread;
1368}
1369static inline Thread* get_current_Thread ( void ) {
1370 ThreadId coretid;
1371 Thread* thr;
1372 thr = get_current_Thread_in_C_C();
1373 if (LIKELY(thr))
1374 return thr;
1375 /* evidently not in client code. Do it the slow way. */
1376 coretid = VG_(get_running_tid)();
1377 /* FIXME: get rid of the following kludge. It exists because
sewardjf98e1c02008-10-25 16:22:41 +00001378 evh__new_mem is called during initialisation (as notification
sewardjb4112022007-11-09 22:49:28 +00001379 of initial memory layout) and VG_(get_running_tid)() returns
1380 VG_INVALID_THREADID at that point. */
1381 if (coretid == VG_INVALID_THREADID)
1382 coretid = 1; /* KLUDGE */
1383 thr = map_threads_lookup( coretid );
1384 return thr;
1385}
1386
1387static
1388void evh__new_mem ( Addr a, SizeT len ) {
1389 if (SHOW_EVENTS >= 2)
1390 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1391 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001392 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001393 all__sanity_check("evh__new_mem-post");
1394}
1395
1396static
sewardj1f77fec2010-04-12 19:51:04 +00001397void evh__new_mem_stack ( Addr a, SizeT len ) {
1398 if (SHOW_EVENTS >= 2)
1399 VG_(printf)("evh__new_mem_stack(%p, %lu)\n", (void*)a, len );
1400 shadow_mem_make_New( get_current_Thread(),
1401 -VG_STACK_REDZONE_SZB + a, len );
1402 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1403 all__sanity_check("evh__new_mem_stack-post");
1404}
1405
1406static
sewardj7cf4e6b2008-05-01 20:24:26 +00001407void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1408 if (SHOW_EVENTS >= 2)
1409 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1410 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001411 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardj7cf4e6b2008-05-01 20:24:26 +00001412 all__sanity_check("evh__new_mem_w_tid-post");
1413}
1414
1415static
sewardjb4112022007-11-09 22:49:28 +00001416void evh__new_mem_w_perms ( Addr a, SizeT len,
sewardj9c606bd2008-09-18 18:12:50 +00001417 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
sewardjb4112022007-11-09 22:49:28 +00001418 if (SHOW_EVENTS >= 1)
1419 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1420 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1421 if (rr || ww || xx)
1422 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001423 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001424 all__sanity_check("evh__new_mem_w_perms-post");
1425}
1426
1427static
1428void evh__set_perms ( Addr a, SizeT len,
1429 Bool rr, Bool ww, Bool xx ) {
sewardjfd35d492011-03-17 19:39:55 +00001430 // This handles mprotect requests. If the memory is being put
1431 // into no-R no-W state, paint it as NoAccess, for the reasons
1432 // documented at evh__die_mem_munmap().
sewardjb4112022007-11-09 22:49:28 +00001433 if (SHOW_EVENTS >= 1)
sewardjfd35d492011-03-17 19:39:55 +00001434 VG_(printf)("evh__set_perms(%p, %lu, r=%d w=%d x=%d)\n",
sewardjb4112022007-11-09 22:49:28 +00001435 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1436 /* Hmm. What should we do here, that actually makes any sense?
1437 Let's say: if neither readable nor writable, then declare it
1438 NoAccess, else leave it alone. */
1439 if (!(rr || ww))
sewardjfd35d492011-03-17 19:39:55 +00001440 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001441 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001442 all__sanity_check("evh__set_perms-post");
1443}
1444
1445static
1446void evh__die_mem ( Addr a, SizeT len ) {
sewardjfd35d492011-03-17 19:39:55 +00001447 // Urr, libhb ignores this.
sewardjb4112022007-11-09 22:49:28 +00001448 if (SHOW_EVENTS >= 2)
1449 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
sewardjfd35d492011-03-17 19:39:55 +00001450 shadow_mem_make_NoAccess_NoFX( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001451 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001452 all__sanity_check("evh__die_mem-post");
1453}
1454
1455static
sewardjfd35d492011-03-17 19:39:55 +00001456void evh__die_mem_munmap ( Addr a, SizeT len ) {
1457 // It's important that libhb doesn't ignore this. If, as is likely,
1458 // the client is subject to address space layout randomization,
1459 // then unmapped areas may never get remapped over, even in long
1460 // runs. If we just ignore them we wind up with large resource
1461 // (VTS) leaks in libhb. So force them to NoAccess, so that all
1462 // VTS references in the affected area are dropped. Marking memory
1463 // as NoAccess is expensive, but we assume that munmap is sufficiently
1464 // rare that the space gains of doing this are worth the costs.
1465 if (SHOW_EVENTS >= 2)
1466 VG_(printf)("evh__die_mem_munmap(%p, %lu)\n", (void*)a, len );
1467 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
1468}
1469
1470static
sewardj406bac82010-03-03 23:03:40 +00001471void evh__untrack_mem ( Addr a, SizeT len ) {
sewardjfd35d492011-03-17 19:39:55 +00001472 // Libhb doesn't ignore this.
sewardj406bac82010-03-03 23:03:40 +00001473 if (SHOW_EVENTS >= 2)
1474 VG_(printf)("evh__untrack_mem(%p, %lu)\n", (void*)a, len );
1475 shadow_mem_make_Untracked( get_current_Thread(), a, len );
1476 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1477 all__sanity_check("evh__untrack_mem-post");
1478}
1479
1480static
sewardj23f12002009-07-24 08:45:08 +00001481void evh__copy_mem ( Addr src, Addr dst, SizeT len ) {
1482 if (SHOW_EVENTS >= 2)
1483 VG_(printf)("evh__copy_mem(%p, %p, %lu)\n", (void*)src, (void*)dst, len );
1484 shadow_mem_scopy_range( get_current_Thread(), src, dst, len );
1485 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1486 all__sanity_check("evh__copy_mem-post");
1487}
1488
1489static
sewardjb4112022007-11-09 22:49:28 +00001490void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1491{
1492 if (SHOW_EVENTS >= 1)
1493 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1494 (Int)parent, (Int)child );
1495
1496 if (parent != VG_INVALID_THREADID) {
sewardjf98e1c02008-10-25 16:22:41 +00001497 Thread* thr_p;
1498 Thread* thr_c;
1499 Thr* hbthr_p;
1500 Thr* hbthr_c;
sewardjb4112022007-11-09 22:49:28 +00001501
sewardjf98e1c02008-10-25 16:22:41 +00001502 tl_assert(HG_(is_sane_ThreadId)(parent));
1503 tl_assert(HG_(is_sane_ThreadId)(child));
sewardjb4112022007-11-09 22:49:28 +00001504 tl_assert(parent != child);
1505
1506 thr_p = map_threads_maybe_lookup( parent );
1507 thr_c = map_threads_maybe_lookup( child );
1508
1509 tl_assert(thr_p != NULL);
1510 tl_assert(thr_c == NULL);
1511
sewardjf98e1c02008-10-25 16:22:41 +00001512 hbthr_p = thr_p->hbthr;
1513 tl_assert(hbthr_p != NULL);
sewardj60626642011-03-10 15:14:37 +00001514 tl_assert( libhb_get_Thr_hgthread(hbthr_p) == thr_p );
sewardjb4112022007-11-09 22:49:28 +00001515
sewardjf98e1c02008-10-25 16:22:41 +00001516 hbthr_c = libhb_create ( hbthr_p );
1517
1518 /* Create a new thread record for the child. */
sewardjb4112022007-11-09 22:49:28 +00001519 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +00001520 thr_c = mk_Thread( hbthr_c );
sewardj60626642011-03-10 15:14:37 +00001521 tl_assert( libhb_get_Thr_hgthread(hbthr_c) == NULL );
1522 libhb_set_Thr_hgthread(hbthr_c, thr_c);
sewardjb4112022007-11-09 22:49:28 +00001523
1524 /* and bind it in the thread-map table */
1525 map_threads[child] = thr_c;
sewardjf98e1c02008-10-25 16:22:41 +00001526 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1527 thr_c->coretid = child;
sewardjb4112022007-11-09 22:49:28 +00001528
1529 /* Record where the parent is so we can later refer to this in
1530 error messages.
1531
1532 On amd64-linux, this entails a nasty glibc-2.5 specific hack.
1533 The stack snapshot is taken immediately after the parent has
1534 returned from its sys_clone call. Unfortunately there is no
1535 unwind info for the insn following "syscall" - reading the
1536 glibc sources confirms this. So we ask for a snapshot to be
1537 taken as if RIP was 3 bytes earlier, in a place where there
1538 is unwind info. Sigh.
1539 */
1540 { Word first_ip_delta = 0;
1541# if defined(VGP_amd64_linux)
1542 first_ip_delta = -3;
1543# endif
1544 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1545 }
sewardjb4112022007-11-09 22:49:28 +00001546 }
1547
sewardjf98e1c02008-10-25 16:22:41 +00001548 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001549 all__sanity_check("evh__pre_thread_create-post");
1550}
1551
1552static
1553void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1554{
1555 Int nHeld;
1556 Thread* thr_q;
1557 if (SHOW_EVENTS >= 1)
1558 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1559 (Int)quit_tid );
1560
1561 /* quit_tid has disappeared without joining to any other thread.
1562 Therefore there is no synchronisation event associated with its
1563 exit and so we have to pretty much treat it as if it was still
1564 alive but mysteriously making no progress. That is because, if
1565 we don't know when it really exited, then we can never say there
1566 is a point in time when we're sure the thread really has
1567 finished, and so we need to consider the possibility that it
1568 lingers indefinitely and continues to interact with other
1569 threads. */
1570 /* However, it might have rendezvous'd with a thread that called
1571 pthread_join with this one as arg, prior to this point (that's
1572 how NPTL works). In which case there has already been a prior
1573 sync event. So in any case, just let the thread exit. On NPTL,
1574 all thread exits go through here. */
sewardjf98e1c02008-10-25 16:22:41 +00001575 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
sewardjb4112022007-11-09 22:49:28 +00001576 thr_q = map_threads_maybe_lookup( quit_tid );
1577 tl_assert(thr_q != NULL);
1578
1579 /* Complain if this thread holds any locks. */
1580 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1581 tl_assert(nHeld >= 0);
1582 if (nHeld > 0) {
1583 HChar buf[80];
1584 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1585 nHeld, nHeld > 1 ? "s" : "");
sewardjf98e1c02008-10-25 16:22:41 +00001586 HG_(record_error_Misc)( thr_q, buf );
sewardjb4112022007-11-09 22:49:28 +00001587 }
1588
sewardj23f12002009-07-24 08:45:08 +00001589 /* Not much to do here:
1590 - tell libhb the thread is gone
1591 - clear the map_threads entry, in order that the Valgrind core
1592 can re-use it. */
sewardj61bc2c52011-02-09 10:34:00 +00001593 /* Cleanup actions (next 5 lines) copied in evh__atfork_child; keep
1594 in sync. */
sewardj23f12002009-07-24 08:45:08 +00001595 tl_assert(thr_q->hbthr);
1596 libhb_async_exit(thr_q->hbthr);
sewardjf98e1c02008-10-25 16:22:41 +00001597 tl_assert(thr_q->coretid == quit_tid);
1598 thr_q->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +00001599 map_threads_delete( quit_tid );
1600
sewardjf98e1c02008-10-25 16:22:41 +00001601 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001602 all__sanity_check("evh__pre_thread_ll_exit-post");
1603}
1604
sewardj61bc2c52011-02-09 10:34:00 +00001605/* This is called immediately after fork, for the child only. 'tid'
1606 is the only surviving thread (as per POSIX rules on fork() in
1607 threaded programs), so we have to clean up map_threads to remove
1608 entries for any other threads. */
1609static
1610void evh__atfork_child ( ThreadId tid )
1611{
1612 UInt i;
1613 Thread* thr;
1614 /* Slot 0 should never be used. */
1615 thr = map_threads_maybe_lookup( 0/*INVALID*/ );
1616 tl_assert(!thr);
1617 /* Clean up all other slots except 'tid'. */
1618 for (i = 1; i < VG_N_THREADS; i++) {
1619 if (i == tid)
1620 continue;
1621 thr = map_threads_maybe_lookup(i);
1622 if (!thr)
1623 continue;
1624 /* Cleanup actions (next 5 lines) copied from end of
1625 evh__pre_thread_ll_exit; keep in sync. */
1626 tl_assert(thr->hbthr);
1627 libhb_async_exit(thr->hbthr);
1628 tl_assert(thr->coretid == i);
1629 thr->coretid = VG_INVALID_THREADID;
1630 map_threads_delete(i);
1631 }
1632}
1633
sewardjf98e1c02008-10-25 16:22:41 +00001634
sewardjb4112022007-11-09 22:49:28 +00001635static
1636void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1637{
sewardjb4112022007-11-09 22:49:28 +00001638 Thread* thr_s;
1639 Thread* thr_q;
sewardjf98e1c02008-10-25 16:22:41 +00001640 Thr* hbthr_s;
1641 Thr* hbthr_q;
1642 SO* so;
sewardjb4112022007-11-09 22:49:28 +00001643
1644 if (SHOW_EVENTS >= 1)
1645 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1646 (Int)stay_tid, quit_thr );
1647
sewardjf98e1c02008-10-25 16:22:41 +00001648 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
sewardjb4112022007-11-09 22:49:28 +00001649
1650 thr_s = map_threads_maybe_lookup( stay_tid );
1651 thr_q = quit_thr;
1652 tl_assert(thr_s != NULL);
1653 tl_assert(thr_q != NULL);
1654 tl_assert(thr_s != thr_q);
1655
sewardjf98e1c02008-10-25 16:22:41 +00001656 hbthr_s = thr_s->hbthr;
1657 hbthr_q = thr_q->hbthr;
1658 tl_assert(hbthr_s != hbthr_q);
sewardj60626642011-03-10 15:14:37 +00001659 tl_assert( libhb_get_Thr_hgthread(hbthr_s) == thr_s );
1660 tl_assert( libhb_get_Thr_hgthread(hbthr_q) == thr_q );
sewardjb4112022007-11-09 22:49:28 +00001661
sewardjf98e1c02008-10-25 16:22:41 +00001662 /* Allocate a temporary synchronisation object and use it to send
1663 an imaginary message from the quitter to the stayer, the purpose
1664 being to generate a dependence from the quitter to the
1665 stayer. */
1666 so = libhb_so_alloc();
1667 tl_assert(so);
sewardj23f12002009-07-24 08:45:08 +00001668 /* Send last arg of _so_send as False, since the sending thread
1669 doesn't actually exist any more, so we don't want _so_send to
1670 try taking stack snapshots of it. */
sewardjffce8152011-06-24 10:09:41 +00001671 libhb_so_send(hbthr_q, so, True/*strong_send*//*?!? wrt comment above*/);
sewardjf98e1c02008-10-25 16:22:41 +00001672 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1673 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00001674
sewardjffce8152011-06-24 10:09:41 +00001675 /* Tell libhb that the quitter has been reaped. Note that we might
1676 have to be cleverer about this, to exclude 2nd and subsequent
1677 notifications for the same hbthr_q, in the case where the app is
1678 buggy (calls pthread_join twice or more on the same thread) AND
1679 where libpthread is also buggy and doesn't return ESRCH on
1680 subsequent calls. (If libpthread isn't thusly buggy, then the
1681 wrapper for pthread_join in hg_intercepts.c will stop us getting
1682 notified here multiple times for the same joinee.) See also
1683 comments in helgrind/tests/jointwice.c. */
1684 libhb_joinedwith_done(hbthr_q);
1685
sewardjf98e1c02008-10-25 16:22:41 +00001686 /* evh__pre_thread_ll_exit issues an error message if the exiting
1687 thread holds any locks. No need to check here. */
sewardjb4112022007-11-09 22:49:28 +00001688
1689 /* This holds because, at least when using NPTL as the thread
1690 library, we should be notified the low level thread exit before
1691 we hear of any join event on it. The low level exit
1692 notification feeds through into evh__pre_thread_ll_exit,
1693 which should clear the map_threads entry for it. Hence we
1694 expect there to be no map_threads entry at this point. */
1695 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1696 == VG_INVALID_THREADID);
1697
sewardjf98e1c02008-10-25 16:22:41 +00001698 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001699 all__sanity_check("evh__post_thread_join-post");
1700}
1701
1702static
1703void evh__pre_mem_read ( CorePart part, ThreadId tid, Char* s,
1704 Addr a, SizeT size) {
1705 if (SHOW_EVENTS >= 2
1706 || (SHOW_EVENTS >= 1 && size != 1))
1707 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1708 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001709 shadow_mem_cread_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001710 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001711 all__sanity_check("evh__pre_mem_read-post");
1712}
1713
1714static
1715void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
1716 Char* s, Addr a ) {
1717 Int len;
1718 if (SHOW_EVENTS >= 1)
1719 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1720 (Int)tid, s, (void*)a );
sewardj234e5582011-02-09 12:47:23 +00001721 // Don't segfault if the string starts in an obviously stupid
1722 // place. Actually we should check the whole string, not just
1723 // the start address, but that's too much trouble. At least
1724 // checking the first byte is better than nothing. See #255009.
1725 if (!VG_(am_is_valid_for_client) (a, 1, VKI_PROT_READ))
1726 return;
sewardjb4112022007-11-09 22:49:28 +00001727 len = VG_(strlen)( (Char*) a );
sewardj23f12002009-07-24 08:45:08 +00001728 shadow_mem_cread_range( map_threads_lookup(tid), a, len+1 );
sewardjf98e1c02008-10-25 16:22:41 +00001729 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001730 all__sanity_check("evh__pre_mem_read_asciiz-post");
1731}
1732
1733static
1734void evh__pre_mem_write ( CorePart part, ThreadId tid, Char* s,
1735 Addr a, SizeT size ) {
1736 if (SHOW_EVENTS >= 1)
1737 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1738 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001739 shadow_mem_cwrite_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001740 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001741 all__sanity_check("evh__pre_mem_write-post");
1742}
1743
1744static
1745void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1746 if (SHOW_EVENTS >= 1)
1747 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1748 (void*)a, len, (Int)is_inited );
1749 // FIXME: this is kinda stupid
1750 if (is_inited) {
1751 shadow_mem_make_New(get_current_Thread(), a, len);
1752 } else {
1753 shadow_mem_make_New(get_current_Thread(), a, len);
1754 }
sewardjf98e1c02008-10-25 16:22:41 +00001755 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001756 all__sanity_check("evh__pre_mem_read-post");
1757}
1758
1759static
1760void evh__die_mem_heap ( Addr a, SizeT len ) {
sewardj622fe492011-03-11 21:06:59 +00001761 Thread* thr;
sewardjb4112022007-11-09 22:49:28 +00001762 if (SHOW_EVENTS >= 1)
1763 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
sewardj622fe492011-03-11 21:06:59 +00001764 thr = get_current_Thread();
1765 tl_assert(thr);
1766 if (HG_(clo_free_is_write)) {
1767 /* Treat frees as if the memory was written immediately prior to
1768 the free. This shakes out more races, specifically, cases
1769 where memory is referenced by one thread, and freed by
1770 another, and there's no observable synchronisation event to
1771 guarantee that the reference happens before the free. */
1772 shadow_mem_cwrite_range(thr, a, len);
1773 }
sewardjfd35d492011-03-17 19:39:55 +00001774 shadow_mem_make_NoAccess_NoFX( thr, a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001775 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001776 all__sanity_check("evh__pre_mem_read-post");
1777}
1778
sewardj23f12002009-07-24 08:45:08 +00001779/* --- Event handlers called from generated code --- */
1780
sewardjb4112022007-11-09 22:49:28 +00001781static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001782void evh__mem_help_cread_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001783 Thread* thr = get_current_Thread_in_C_C();
1784 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001785 LIBHB_CREAD_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001786}
sewardjf98e1c02008-10-25 16:22:41 +00001787
sewardjb4112022007-11-09 22:49:28 +00001788static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001789void evh__mem_help_cread_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001790 Thread* thr = get_current_Thread_in_C_C();
1791 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001792 LIBHB_CREAD_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001793}
sewardjf98e1c02008-10-25 16:22:41 +00001794
sewardjb4112022007-11-09 22:49:28 +00001795static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001796void evh__mem_help_cread_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001797 Thread* thr = get_current_Thread_in_C_C();
1798 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001799 LIBHB_CREAD_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001800}
sewardjf98e1c02008-10-25 16:22:41 +00001801
sewardjb4112022007-11-09 22:49:28 +00001802static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001803void evh__mem_help_cread_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001804 Thread* thr = get_current_Thread_in_C_C();
1805 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001806 LIBHB_CREAD_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001807}
sewardjf98e1c02008-10-25 16:22:41 +00001808
sewardjb4112022007-11-09 22:49:28 +00001809static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001810void evh__mem_help_cread_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001811 Thread* thr = get_current_Thread_in_C_C();
1812 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001813 LIBHB_CREAD_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001814}
1815
1816static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001817void evh__mem_help_cwrite_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001818 Thread* thr = get_current_Thread_in_C_C();
1819 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001820 LIBHB_CWRITE_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001821}
sewardjf98e1c02008-10-25 16:22:41 +00001822
sewardjb4112022007-11-09 22:49:28 +00001823static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001824void evh__mem_help_cwrite_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001825 Thread* thr = get_current_Thread_in_C_C();
1826 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001827 LIBHB_CWRITE_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001828}
sewardjf98e1c02008-10-25 16:22:41 +00001829
sewardjb4112022007-11-09 22:49:28 +00001830static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001831void evh__mem_help_cwrite_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001832 Thread* thr = get_current_Thread_in_C_C();
1833 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001834 LIBHB_CWRITE_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001835}
sewardjf98e1c02008-10-25 16:22:41 +00001836
sewardjb4112022007-11-09 22:49:28 +00001837static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001838void evh__mem_help_cwrite_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001839 Thread* thr = get_current_Thread_in_C_C();
1840 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001841 LIBHB_CWRITE_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001842}
sewardjf98e1c02008-10-25 16:22:41 +00001843
sewardjb4112022007-11-09 22:49:28 +00001844static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001845void evh__mem_help_cwrite_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001846 Thread* thr = get_current_Thread_in_C_C();
1847 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001848 LIBHB_CWRITE_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001849}
1850
sewardjb4112022007-11-09 22:49:28 +00001851
sewardj9f569b72008-11-13 13:33:09 +00001852/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001853/* -------------- events to do with mutexes -------------- */
sewardj9f569b72008-11-13 13:33:09 +00001854/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001855
1856/* EXPOSITION only: by intercepting lock init events we can show the
1857 user where the lock was initialised, rather than only being able to
1858 show where it was first locked. Intercepting lock initialisations
1859 is not necessary for the basic operation of the race checker. */
1860static
1861void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
1862 void* mutex, Word mbRec )
1863{
1864 if (SHOW_EVENTS >= 1)
1865 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
1866 (Int)tid, mbRec, (void*)mutex );
1867 tl_assert(mbRec == 0 || mbRec == 1);
1868 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
1869 (Addr)mutex, tid );
sewardjf98e1c02008-10-25 16:22:41 +00001870 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001871 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
1872}
1873
1874static
1875void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex )
1876{
1877 Thread* thr;
1878 Lock* lk;
1879 if (SHOW_EVENTS >= 1)
1880 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE(ctid=%d, %p)\n",
1881 (Int)tid, (void*)mutex );
1882
1883 thr = map_threads_maybe_lookup( tid );
1884 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00001885 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00001886
1887 lk = map_locks_maybe_lookup( (Addr)mutex );
1888
1889 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
sewardjf98e1c02008-10-25 16:22:41 +00001890 HG_(record_error_Misc)(
1891 thr, "pthread_mutex_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00001892 }
1893
1894 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00001895 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00001896 tl_assert( lk->guestaddr == (Addr)mutex );
1897 if (lk->heldBy) {
1898 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00001899 HG_(record_error_Misc)(
1900 thr, "pthread_mutex_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00001901 /* remove lock from locksets of all owning threads */
1902 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00001903 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00001904 lk->heldBy = NULL;
1905 lk->heldW = False;
1906 lk->acquired_at = NULL;
1907 }
1908 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001909 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00001910
1911 if (HG_(clo_track_lockorders))
1912 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00001913 map_locks_delete( lk->guestaddr );
1914 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00001915 }
1916
sewardjf98e1c02008-10-25 16:22:41 +00001917 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001918 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
1919}
1920
1921static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
1922 void* mutex, Word isTryLock )
1923{
1924 /* Just check the mutex is sane; nothing else to do. */
1925 // 'mutex' may be invalid - not checked by wrapper
1926 Thread* thr;
1927 Lock* lk;
1928 if (SHOW_EVENTS >= 1)
1929 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
1930 (Int)tid, (void*)mutex );
1931
1932 tl_assert(isTryLock == 0 || isTryLock == 1);
1933 thr = map_threads_maybe_lookup( tid );
1934 tl_assert(thr); /* cannot fail - Thread* must already exist */
1935
1936 lk = map_locks_maybe_lookup( (Addr)mutex );
1937
1938 if (lk && (lk->kind == LK_rdwr)) {
sewardjf98e1c02008-10-25 16:22:41 +00001939 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
1940 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001941 }
1942
1943 if ( lk
1944 && isTryLock == 0
1945 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
1946 && lk->heldBy
1947 && lk->heldW
sewardj896f6f92008-08-19 08:38:52 +00001948 && VG_(elemBag)( lk->heldBy, (Word)thr ) > 0 ) {
sewardjb4112022007-11-09 22:49:28 +00001949 /* uh, it's a non-recursive lock and we already w-hold it, and
1950 this is a real lock operation (not a speculative "tryLock"
1951 kind of thing). Duh. Deadlock coming up; but at least
1952 produce an error message. */
sewardj8fef6252010-07-29 05:28:02 +00001953 HChar* errstr = "Attempt to re-lock a "
1954 "non-recursive lock I already hold";
1955 HChar* auxstr = "Lock was previously acquired";
1956 if (lk->acquired_at) {
1957 HG_(record_error_Misc_w_aux)( thr, errstr, auxstr, lk->acquired_at );
1958 } else {
1959 HG_(record_error_Misc)( thr, errstr );
1960 }
sewardjb4112022007-11-09 22:49:28 +00001961 }
1962}
1963
1964static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
1965{
1966 // only called if the real library call succeeded - so mutex is sane
1967 Thread* thr;
1968 if (SHOW_EVENTS >= 1)
1969 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
1970 (Int)tid, (void*)mutex );
1971
1972 thr = map_threads_maybe_lookup( tid );
1973 tl_assert(thr); /* cannot fail - Thread* must already exist */
1974
1975 evhH__post_thread_w_acquires_lock(
1976 thr,
1977 LK_mbRec, /* if not known, create new lock with this LockKind */
1978 (Addr)mutex
1979 );
1980}
1981
1982static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
1983{
1984 // 'mutex' may be invalid - not checked by wrapper
1985 Thread* thr;
1986 if (SHOW_EVENTS >= 1)
1987 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
1988 (Int)tid, (void*)mutex );
1989
1990 thr = map_threads_maybe_lookup( tid );
1991 tl_assert(thr); /* cannot fail - Thread* must already exist */
1992
1993 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
1994}
1995
1996static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
1997{
1998 // only called if the real library call succeeded - so mutex is sane
1999 Thread* thr;
2000 if (SHOW_EVENTS >= 1)
2001 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
2002 (Int)tid, (void*)mutex );
2003 thr = map_threads_maybe_lookup( tid );
2004 tl_assert(thr); /* cannot fail - Thread* must already exist */
2005
2006 // anything we should do here?
2007}
2008
2009
sewardj5a644da2009-08-11 10:35:58 +00002010/* ------------------------------------------------------- */
sewardj1f77fec2010-04-12 19:51:04 +00002011/* -------------- events to do with spinlocks ------------ */
sewardj5a644da2009-08-11 10:35:58 +00002012/* ------------------------------------------------------- */
2013
2014/* All a bit of a kludge. Pretend we're really dealing with ordinary
2015 pthread_mutex_t's instead, for the most part. */
2016
2017static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid,
2018 void* slock )
2019{
2020 Thread* thr;
2021 Lock* lk;
2022 /* In glibc's kludgey world, we're either initialising or unlocking
2023 it. Since this is the pre-routine, if it is locked, unlock it
2024 and take a dependence edge. Otherwise, do nothing. */
2025
2026 if (SHOW_EVENTS >= 1)
2027 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
2028 "(ctid=%d, slock=%p)\n",
2029 (Int)tid, (void*)slock );
2030
2031 thr = map_threads_maybe_lookup( tid );
2032 /* cannot fail - Thread* must already exist */;
2033 tl_assert( HG_(is_sane_Thread)(thr) );
2034
2035 lk = map_locks_maybe_lookup( (Addr)slock );
2036 if (lk && lk->heldBy) {
2037 /* it's held. So do the normal pre-unlock actions, as copied
2038 from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE. This stupidly
2039 duplicates the map_locks_maybe_lookup. */
2040 evhH__pre_thread_releases_lock( thr, (Addr)slock,
2041 False/*!isRDWR*/ );
2042 }
2043}
2044
2045static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid,
2046 void* slock )
2047{
2048 Lock* lk;
2049 /* More kludgery. If the lock has never been seen before, do
2050 actions as per evh__HG_PTHREAD_MUTEX_INIT_POST. Else do
2051 nothing. */
2052
2053 if (SHOW_EVENTS >= 1)
2054 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
2055 "(ctid=%d, slock=%p)\n",
2056 (Int)tid, (void*)slock );
2057
2058 lk = map_locks_maybe_lookup( (Addr)slock );
2059 if (!lk) {
2060 map_locks_lookup_or_create( LK_nonRec, (Addr)slock, tid );
2061 }
2062}
2063
2064static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid,
2065 void* slock, Word isTryLock )
2066{
2067 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, slock, isTryLock );
2068}
2069
2070static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid,
2071 void* slock )
2072{
2073 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, slock );
2074}
2075
2076static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid,
2077 void* slock )
2078{
2079 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, slock );
2080}
2081
2082
sewardj9f569b72008-11-13 13:33:09 +00002083/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002084/* --------------- events to do with CVs --------------- */
sewardj9f569b72008-11-13 13:33:09 +00002085/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002086
sewardj02114542009-07-28 20:52:36 +00002087/* A mapping from CV to (the SO associated with it, plus some
2088 auxiliary data for error checking). When the CV is
sewardjf98e1c02008-10-25 16:22:41 +00002089 signalled/broadcasted upon, we do a 'send' into the SO, and when a
2090 wait on it completes, we do a 'recv' from the SO. This is believed
2091 to give the correct happens-before events arising from CV
sewardjb4112022007-11-09 22:49:28 +00002092 signallings/broadcasts.
2093*/
2094
sewardj02114542009-07-28 20:52:36 +00002095/* .so is the SO for this CV.
2096 .mx_ga is the associated mutex, when .nWaiters > 0
sewardjb4112022007-11-09 22:49:28 +00002097
sewardj02114542009-07-28 20:52:36 +00002098 POSIX says effectively that the first pthread_cond_{timed}wait call
2099 causes a dynamic binding between the CV and the mutex, and that
2100 lasts until such time as the waiter count falls to zero. Hence
2101 need to keep track of the number of waiters in order to do
2102 consistency tracking. */
2103typedef
2104 struct {
2105 SO* so; /* libhb-allocated SO */
2106 void* mx_ga; /* addr of associated mutex, if any */
2107 UWord nWaiters; /* # threads waiting on the CV */
2108 }
2109 CVInfo;
2110
2111
2112/* pthread_cond_t* -> CVInfo* */
2113static WordFM* map_cond_to_CVInfo = NULL;
2114
2115static void map_cond_to_CVInfo_INIT ( void ) {
2116 if (UNLIKELY(map_cond_to_CVInfo == NULL)) {
2117 map_cond_to_CVInfo = VG_(newFM)( HG_(zalloc),
2118 "hg.mctCI.1", HG_(free), NULL );
2119 tl_assert(map_cond_to_CVInfo != NULL);
sewardjf98e1c02008-10-25 16:22:41 +00002120 }
2121}
2122
sewardj02114542009-07-28 20:52:36 +00002123static CVInfo* map_cond_to_CVInfo_lookup_or_alloc ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002124 UWord key, val;
sewardj02114542009-07-28 20:52:36 +00002125 map_cond_to_CVInfo_INIT();
2126 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
sewardjf98e1c02008-10-25 16:22:41 +00002127 tl_assert(key == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002128 return (CVInfo*)val;
sewardjf98e1c02008-10-25 16:22:41 +00002129 } else {
sewardj02114542009-07-28 20:52:36 +00002130 SO* so = libhb_so_alloc();
2131 CVInfo* cvi = HG_(zalloc)("hg.mctCloa.1", sizeof(CVInfo));
2132 cvi->so = so;
2133 cvi->mx_ga = 0;
2134 VG_(addToFM)( map_cond_to_CVInfo, (UWord)cond, (UWord)cvi );
2135 return cvi;
sewardjf98e1c02008-10-25 16:22:41 +00002136 }
2137}
2138
sewardj02114542009-07-28 20:52:36 +00002139static void map_cond_to_CVInfo_delete ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002140 UWord keyW, valW;
sewardj02114542009-07-28 20:52:36 +00002141 map_cond_to_CVInfo_INIT();
2142 if (VG_(delFromFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond )) {
2143 CVInfo* cvi = (CVInfo*)valW;
sewardjf98e1c02008-10-25 16:22:41 +00002144 tl_assert(keyW == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002145 tl_assert(cvi);
2146 tl_assert(cvi->so);
2147 libhb_so_dealloc(cvi->so);
2148 cvi->mx_ga = 0;
2149 HG_(free)(cvi);
sewardjb4112022007-11-09 22:49:28 +00002150 }
2151}
2152
2153static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2154{
sewardjf98e1c02008-10-25 16:22:41 +00002155 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2156 cond to a SO if it is not already so bound, and 'send' on the
2157 SO. This is later used by other thread(s) which successfully
2158 exit from a pthread_cond_wait on the same cv; then they 'recv'
2159 from the SO, thereby acquiring a dependency on this signalling
2160 event. */
sewardjb4112022007-11-09 22:49:28 +00002161 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002162 CVInfo* cvi;
2163 //Lock* lk;
sewardjb4112022007-11-09 22:49:28 +00002164
2165 if (SHOW_EVENTS >= 1)
2166 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2167 (Int)tid, (void*)cond );
2168
sewardjb4112022007-11-09 22:49:28 +00002169 thr = map_threads_maybe_lookup( tid );
2170 tl_assert(thr); /* cannot fail - Thread* must already exist */
2171
sewardj02114542009-07-28 20:52:36 +00002172 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2173 tl_assert(cvi);
2174 tl_assert(cvi->so);
2175
sewardjb4112022007-11-09 22:49:28 +00002176 // error-if: mutex is bogus
2177 // error-if: mutex is not locked
sewardj02114542009-07-28 20:52:36 +00002178 // Hmm. POSIX doesn't actually say that it's an error to call
2179 // pthread_cond_signal with the associated mutex being unlocked.
2180 // Although it does say that it should be "if consistent scheduling
sewardjffce8152011-06-24 10:09:41 +00002181 // is desired." For that reason, print "dubious" if the lock isn't
2182 // held by any thread. Skip the "dubious" if it is held by some
2183 // other thread; that sounds straight-out wrong.
sewardj02114542009-07-28 20:52:36 +00002184 //
sewardjffce8152011-06-24 10:09:41 +00002185 // Anybody who writes code that signals on a CV without holding
2186 // the associated MX needs to be shipped off to a lunatic asylum
2187 // ASAP, even though POSIX doesn't actually declare such behaviour
2188 // illegal -- it makes code extremely difficult to understand/
2189 // reason about. In particular it puts the signalling thread in
2190 // a situation where it is racing against the released waiter
2191 // as soon as the signalling is done, and so there needs to be
2192 // some auxiliary synchronisation mechanism in the program that
2193 // makes this safe -- or the race(s) need to be harmless, or
2194 // probably nonexistent.
2195 //
2196 if (1) {
2197 Lock* lk = NULL;
2198 if (cvi->mx_ga != 0) {
2199 lk = map_locks_maybe_lookup( (Addr)cvi->mx_ga );
2200 }
2201 /* note: lk could be NULL. Be careful. */
2202 if (lk) {
2203 if (lk->kind == LK_rdwr) {
2204 HG_(record_error_Misc)(thr,
2205 "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2206 }
2207 if (lk->heldBy == NULL) {
2208 HG_(record_error_Misc)(thr,
2209 "pthread_cond_{signal,broadcast}: dubious: "
2210 "associated lock is not held by any thread");
2211 }
2212 if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (Word)thr)) {
2213 HG_(record_error_Misc)(thr,
2214 "pthread_cond_{signal,broadcast}: "
2215 "associated lock is not held by calling thread");
2216 }
2217 } else {
2218 /* Couldn't even find the damn thing. */
2219 // But actually .. that's not necessarily an error. We don't
2220 // know the (CV,MX) binding until a pthread_cond_wait or bcast
2221 // shows us what it is, and if that may not have happened yet.
2222 // So just keep quiet in this circumstance.
2223 //HG_(record_error_Misc)( thr,
2224 // "pthread_cond_{signal,broadcast}: "
2225 // "no or invalid mutex associated with cond");
2226 }
2227 }
sewardjb4112022007-11-09 22:49:28 +00002228
sewardj02114542009-07-28 20:52:36 +00002229 libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
sewardjb4112022007-11-09 22:49:28 +00002230}
2231
2232/* returns True if it reckons 'mutex' is valid and held by this
2233 thread, else False */
2234static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2235 void* cond, void* mutex )
2236{
2237 Thread* thr;
2238 Lock* lk;
2239 Bool lk_valid = True;
sewardj02114542009-07-28 20:52:36 +00002240 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002241
2242 if (SHOW_EVENTS >= 1)
2243 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2244 "(ctid=%d, cond=%p, mutex=%p)\n",
2245 (Int)tid, (void*)cond, (void*)mutex );
2246
sewardjb4112022007-11-09 22:49:28 +00002247 thr = map_threads_maybe_lookup( tid );
2248 tl_assert(thr); /* cannot fail - Thread* must already exist */
2249
2250 lk = map_locks_maybe_lookup( (Addr)mutex );
2251
2252 /* Check for stupid mutex arguments. There are various ways to be
2253 a bozo. Only complain once, though, even if more than one thing
2254 is wrong. */
2255 if (lk == NULL) {
2256 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002257 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002258 thr,
2259 "pthread_cond_{timed}wait called with invalid mutex" );
2260 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002261 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002262 if (lk->kind == LK_rdwr) {
2263 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002264 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002265 thr, "pthread_cond_{timed}wait called with mutex "
2266 "of type pthread_rwlock_t*" );
2267 } else
2268 if (lk->heldBy == NULL) {
2269 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002270 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002271 thr, "pthread_cond_{timed}wait called with un-held mutex");
2272 } else
2273 if (lk->heldBy != NULL
sewardj896f6f92008-08-19 08:38:52 +00002274 && VG_(elemBag)( lk->heldBy, (Word)thr ) == 0) {
sewardjb4112022007-11-09 22:49:28 +00002275 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002276 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002277 thr, "pthread_cond_{timed}wait called with mutex "
2278 "held by a different thread" );
2279 }
2280 }
2281
2282 // error-if: cond is also associated with a different mutex
sewardj02114542009-07-28 20:52:36 +00002283 cvi = map_cond_to_CVInfo_lookup_or_alloc(cond);
2284 tl_assert(cvi);
2285 tl_assert(cvi->so);
2286 if (cvi->nWaiters == 0) {
2287 /* form initial (CV,MX) binding */
2288 cvi->mx_ga = mutex;
2289 }
2290 else /* check existing (CV,MX) binding */
2291 if (cvi->mx_ga != mutex) {
2292 HG_(record_error_Misc)(
2293 thr, "pthread_cond_{timed}wait: cond is associated "
2294 "with a different mutex");
2295 }
2296 cvi->nWaiters++;
sewardjb4112022007-11-09 22:49:28 +00002297
2298 return lk_valid;
2299}
2300
2301static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
2302 void* cond, void* mutex )
2303{
sewardjf98e1c02008-10-25 16:22:41 +00002304 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2305 the SO for this cond, and 'recv' from it so as to acquire a
2306 dependency edge back to the signaller/broadcaster. */
2307 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002308 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002309
2310 if (SHOW_EVENTS >= 1)
2311 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
2312 "(ctid=%d, cond=%p, mutex=%p)\n",
2313 (Int)tid, (void*)cond, (void*)mutex );
2314
sewardjb4112022007-11-09 22:49:28 +00002315 thr = map_threads_maybe_lookup( tid );
2316 tl_assert(thr); /* cannot fail - Thread* must already exist */
2317
2318 // error-if: cond is also associated with a different mutex
2319
sewardj02114542009-07-28 20:52:36 +00002320 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2321 tl_assert(cvi);
2322 tl_assert(cvi->so);
2323 tl_assert(cvi->nWaiters > 0);
sewardjb4112022007-11-09 22:49:28 +00002324
sewardj02114542009-07-28 20:52:36 +00002325 if (!libhb_so_everSent(cvi->so)) {
sewardjf98e1c02008-10-25 16:22:41 +00002326 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2327 it? If this happened it would surely be a bug in the threads
2328 library. Or one of those fabled "spurious wakeups". */
2329 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
sewardjffce8152011-06-24 10:09:41 +00002330 "succeeded"
sewardjf98e1c02008-10-25 16:22:41 +00002331 " without prior pthread_cond_post");
sewardjb4112022007-11-09 22:49:28 +00002332 }
sewardjf98e1c02008-10-25 16:22:41 +00002333
2334 /* anyway, acquire a dependency on it. */
sewardj02114542009-07-28 20:52:36 +00002335 libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2336
2337 cvi->nWaiters--;
sewardjf98e1c02008-10-25 16:22:41 +00002338}
2339
2340static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
2341 void* cond )
2342{
2343 /* Deal with destroy events. The only purpose is to free storage
2344 associated with the CV, so as to avoid any possible resource
2345 leaks. */
2346 if (SHOW_EVENTS >= 1)
2347 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
2348 "(ctid=%d, cond=%p)\n",
2349 (Int)tid, (void*)cond );
2350
sewardj02114542009-07-28 20:52:36 +00002351 map_cond_to_CVInfo_delete( cond );
sewardjb4112022007-11-09 22:49:28 +00002352}
2353
2354
sewardj9f569b72008-11-13 13:33:09 +00002355/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002356/* -------------- events to do with rwlocks -------------- */
sewardj9f569b72008-11-13 13:33:09 +00002357/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002358
2359/* EXPOSITION only */
2360static
2361void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2362{
2363 if (SHOW_EVENTS >= 1)
2364 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2365 (Int)tid, (void*)rwl );
2366 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
sewardjf98e1c02008-10-25 16:22:41 +00002367 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002368 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2369}
2370
2371static
2372void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2373{
2374 Thread* thr;
2375 Lock* lk;
2376 if (SHOW_EVENTS >= 1)
2377 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2378 (Int)tid, (void*)rwl );
2379
2380 thr = map_threads_maybe_lookup( tid );
2381 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00002382 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00002383
2384 lk = map_locks_maybe_lookup( (Addr)rwl );
2385
2386 if (lk == NULL || lk->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00002387 HG_(record_error_Misc)(
2388 thr, "pthread_rwlock_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00002389 }
2390
2391 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00002392 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002393 tl_assert( lk->guestaddr == (Addr)rwl );
2394 if (lk->heldBy) {
2395 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00002396 HG_(record_error_Misc)(
2397 thr, "pthread_rwlock_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00002398 /* remove lock from locksets of all owning threads */
2399 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00002400 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00002401 lk->heldBy = NULL;
2402 lk->heldW = False;
sewardj1c7e8332007-11-29 13:04:03 +00002403 lk->acquired_at = NULL;
sewardjb4112022007-11-09 22:49:28 +00002404 }
2405 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002406 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00002407
2408 if (HG_(clo_track_lockorders))
2409 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002410 map_locks_delete( lk->guestaddr );
2411 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002412 }
2413
sewardjf98e1c02008-10-25 16:22:41 +00002414 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002415 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2416}
2417
2418static
sewardj789c3c52008-02-25 12:10:07 +00002419void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2420 void* rwl,
2421 Word isW, Word isTryLock )
sewardjb4112022007-11-09 22:49:28 +00002422{
2423 /* Just check the rwl is sane; nothing else to do. */
2424 // 'rwl' may be invalid - not checked by wrapper
2425 Thread* thr;
2426 Lock* lk;
2427 if (SHOW_EVENTS >= 1)
2428 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2429 (Int)tid, (Int)isW, (void*)rwl );
2430
2431 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
sewardj789c3c52008-02-25 12:10:07 +00002432 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
sewardjb4112022007-11-09 22:49:28 +00002433 thr = map_threads_maybe_lookup( tid );
2434 tl_assert(thr); /* cannot fail - Thread* must already exist */
2435
2436 lk = map_locks_maybe_lookup( (Addr)rwl );
2437 if ( lk
2438 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2439 /* Wrong kind of lock. Duh. */
sewardjf98e1c02008-10-25 16:22:41 +00002440 HG_(record_error_Misc)(
2441 thr, "pthread_rwlock_{rd,rw}lock with a "
2442 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002443 }
2444}
2445
2446static
2447void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2448{
2449 // only called if the real library call succeeded - so mutex is sane
2450 Thread* thr;
2451 if (SHOW_EVENTS >= 1)
2452 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2453 (Int)tid, (Int)isW, (void*)rwl );
2454
2455 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2456 thr = map_threads_maybe_lookup( tid );
2457 tl_assert(thr); /* cannot fail - Thread* must already exist */
2458
2459 (isW ? evhH__post_thread_w_acquires_lock
2460 : evhH__post_thread_r_acquires_lock)(
2461 thr,
2462 LK_rdwr, /* if not known, create new lock with this LockKind */
2463 (Addr)rwl
2464 );
2465}
2466
2467static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2468{
2469 // 'rwl' may be invalid - not checked by wrapper
2470 Thread* thr;
2471 if (SHOW_EVENTS >= 1)
2472 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2473 (Int)tid, (void*)rwl );
2474
2475 thr = map_threads_maybe_lookup( tid );
2476 tl_assert(thr); /* cannot fail - Thread* must already exist */
2477
2478 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2479}
2480
2481static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2482{
2483 // only called if the real library call succeeded - so mutex is sane
2484 Thread* thr;
2485 if (SHOW_EVENTS >= 1)
2486 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2487 (Int)tid, (void*)rwl );
2488 thr = map_threads_maybe_lookup( tid );
2489 tl_assert(thr); /* cannot fail - Thread* must already exist */
2490
2491 // anything we should do here?
2492}
2493
2494
sewardj9f569b72008-11-13 13:33:09 +00002495/* ---------------------------------------------------------- */
2496/* -------------- events to do with semaphores -------------- */
2497/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002498
sewardj11e352f2007-11-30 11:11:02 +00002499/* This is similar to but not identical to the handling for condition
sewardjb4112022007-11-09 22:49:28 +00002500 variables. */
2501
sewardjf98e1c02008-10-25 16:22:41 +00002502/* For each semaphore, we maintain a stack of SOs. When a 'post'
2503 operation is done on a semaphore (unlocking, essentially), a new SO
2504 is created for the posting thread, the posting thread does a strong
2505 send to it (which merely installs the posting thread's VC in the
2506 SO), and the SO is pushed on the semaphore's stack.
sewardjb4112022007-11-09 22:49:28 +00002507
2508 Later, when a (probably different) thread completes 'wait' on the
sewardjf98e1c02008-10-25 16:22:41 +00002509 semaphore, we pop a SO off the semaphore's stack (which should be
2510 nonempty), and do a strong recv from it. This mechanism creates
sewardjb4112022007-11-09 22:49:28 +00002511 dependencies between posters and waiters of the semaphore.
2512
sewardjf98e1c02008-10-25 16:22:41 +00002513 It may not be necessary to use a stack - perhaps a bag of SOs would
2514 do. But we do need to keep track of how many unused-up posts have
2515 happened for the semaphore.
sewardjb4112022007-11-09 22:49:28 +00002516
sewardjf98e1c02008-10-25 16:22:41 +00002517 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
sewardjb4112022007-11-09 22:49:28 +00002518 twice on S. T3 cannot complete its waits without both T1 and T2
2519 posting. The above mechanism will ensure that T3 acquires
2520 dependencies on both T1 and T2.
sewardj11e352f2007-11-30 11:11:02 +00002521
sewardjf98e1c02008-10-25 16:22:41 +00002522 When a semaphore is initialised with value N, we do as if we'd
2523 posted N times on the semaphore: basically create N SOs and do a
2524 strong send to all of then. This allows up to N waits on the
2525 semaphore to acquire a dependency on the initialisation point,
2526 which AFAICS is the correct behaviour.
sewardj11e352f2007-11-30 11:11:02 +00002527
2528 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2529 about. We should.
sewardjb4112022007-11-09 22:49:28 +00002530*/
2531
sewardjf98e1c02008-10-25 16:22:41 +00002532/* sem_t* -> XArray* SO* */
2533static WordFM* map_sem_to_SO_stack = NULL;
sewardjb4112022007-11-09 22:49:28 +00002534
sewardjf98e1c02008-10-25 16:22:41 +00002535static void map_sem_to_SO_stack_INIT ( void ) {
2536 if (map_sem_to_SO_stack == NULL) {
2537 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2538 HG_(free), NULL );
2539 tl_assert(map_sem_to_SO_stack != NULL);
sewardjb4112022007-11-09 22:49:28 +00002540 }
2541}
2542
sewardjf98e1c02008-10-25 16:22:41 +00002543static void push_SO_for_sem ( void* sem, SO* so ) {
2544 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002545 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002546 tl_assert(so);
2547 map_sem_to_SO_stack_INIT();
2548 if (VG_(lookupFM)( map_sem_to_SO_stack,
2549 &keyW, (UWord*)&xa, (UWord)sem )) {
2550 tl_assert(keyW == (UWord)sem);
sewardjb4112022007-11-09 22:49:28 +00002551 tl_assert(xa);
sewardjf98e1c02008-10-25 16:22:41 +00002552 VG_(addToXA)( xa, &so );
sewardjb4112022007-11-09 22:49:28 +00002553 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002554 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2555 VG_(addToXA)( xa, &so );
2556 VG_(addToFM)( map_sem_to_SO_stack, (Word)sem, (Word)xa );
sewardjb4112022007-11-09 22:49:28 +00002557 }
2558}
2559
sewardjf98e1c02008-10-25 16:22:41 +00002560static SO* mb_pop_SO_for_sem ( void* sem ) {
2561 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002562 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002563 SO* so;
2564 map_sem_to_SO_stack_INIT();
2565 if (VG_(lookupFM)( map_sem_to_SO_stack,
2566 &keyW, (UWord*)&xa, (UWord)sem )) {
sewardjb4112022007-11-09 22:49:28 +00002567 /* xa is the stack for this semaphore. */
sewardjf98e1c02008-10-25 16:22:41 +00002568 Word sz;
2569 tl_assert(keyW == (UWord)sem);
2570 sz = VG_(sizeXA)( xa );
sewardjb4112022007-11-09 22:49:28 +00002571 tl_assert(sz >= 0);
2572 if (sz == 0)
2573 return NULL; /* odd, the stack is empty */
sewardjf98e1c02008-10-25 16:22:41 +00002574 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2575 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002576 VG_(dropTailXA)( xa, 1 );
sewardjf98e1c02008-10-25 16:22:41 +00002577 return so;
sewardjb4112022007-11-09 22:49:28 +00002578 } else {
2579 /* hmm, that's odd. No stack for this semaphore. */
2580 return NULL;
2581 }
2582}
2583
sewardj11e352f2007-11-30 11:11:02 +00002584static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002585{
sewardjf98e1c02008-10-25 16:22:41 +00002586 UWord keyW, valW;
2587 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002588
sewardjb4112022007-11-09 22:49:28 +00002589 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002590 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002591 (Int)tid, (void*)sem );
2592
sewardjf98e1c02008-10-25 16:22:41 +00002593 map_sem_to_SO_stack_INIT();
sewardjb4112022007-11-09 22:49:28 +00002594
sewardjf98e1c02008-10-25 16:22:41 +00002595 /* Empty out the semaphore's SO stack. This way of doing it is
2596 stupid, but at least it's easy. */
2597 while (1) {
2598 so = mb_pop_SO_for_sem( sem );
2599 if (!so) break;
2600 libhb_so_dealloc(so);
2601 }
2602
2603 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2604 XArray* xa = (XArray*)valW;
2605 tl_assert(keyW == (UWord)sem);
2606 tl_assert(xa);
2607 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2608 VG_(deleteXA)(xa);
2609 }
sewardjb4112022007-11-09 22:49:28 +00002610}
2611
sewardj11e352f2007-11-30 11:11:02 +00002612static
2613void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2614{
sewardjf98e1c02008-10-25 16:22:41 +00002615 SO* so;
2616 Thread* thr;
sewardj11e352f2007-11-30 11:11:02 +00002617
2618 if (SHOW_EVENTS >= 1)
2619 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2620 (Int)tid, (void*)sem, value );
2621
sewardjf98e1c02008-10-25 16:22:41 +00002622 thr = map_threads_maybe_lookup( tid );
2623 tl_assert(thr); /* cannot fail - Thread* must already exist */
sewardj11e352f2007-11-30 11:11:02 +00002624
sewardjf98e1c02008-10-25 16:22:41 +00002625 /* Empty out the semaphore's SO stack. This way of doing it is
2626 stupid, but at least it's easy. */
2627 while (1) {
2628 so = mb_pop_SO_for_sem( sem );
2629 if (!so) break;
2630 libhb_so_dealloc(so);
2631 }
sewardj11e352f2007-11-30 11:11:02 +00002632
sewardjf98e1c02008-10-25 16:22:41 +00002633 /* If we don't do this check, the following while loop runs us out
2634 of memory for stupid initial values of 'value'. */
2635 if (value > 10000) {
2636 HG_(record_error_Misc)(
2637 thr, "sem_init: initial value exceeds 10000; using 10000" );
2638 value = 10000;
2639 }
sewardj11e352f2007-11-30 11:11:02 +00002640
sewardjf98e1c02008-10-25 16:22:41 +00002641 /* Now create 'valid' new SOs for the thread, do a strong send to
2642 each of them, and push them all on the stack. */
2643 for (; value > 0; value--) {
2644 Thr* hbthr = thr->hbthr;
2645 tl_assert(hbthr);
sewardj11e352f2007-11-30 11:11:02 +00002646
sewardjf98e1c02008-10-25 16:22:41 +00002647 so = libhb_so_alloc();
2648 libhb_so_send( hbthr, so, True/*strong send*/ );
2649 push_SO_for_sem( sem, so );
sewardj11e352f2007-11-30 11:11:02 +00002650 }
2651}
2652
2653static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002654{
sewardjf98e1c02008-10-25 16:22:41 +00002655 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2656 it (iow, write our VC into it, then tick ours), and push the SO
2657 on on a stack of SOs associated with 'sem'. This is later used
2658 by other thread(s) which successfully exit from a sem_wait on
2659 the same sem; by doing a strong recv from SOs popped of the
2660 stack, they acquire dependencies on the posting thread
2661 segment(s). */
sewardjb4112022007-11-09 22:49:28 +00002662
sewardjf98e1c02008-10-25 16:22:41 +00002663 Thread* thr;
2664 SO* so;
2665 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002666
2667 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002668 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002669 (Int)tid, (void*)sem );
2670
2671 thr = map_threads_maybe_lookup( tid );
2672 tl_assert(thr); /* cannot fail - Thread* must already exist */
2673
2674 // error-if: sem is bogus
2675
sewardjf98e1c02008-10-25 16:22:41 +00002676 hbthr = thr->hbthr;
2677 tl_assert(hbthr);
sewardjb4112022007-11-09 22:49:28 +00002678
sewardjf98e1c02008-10-25 16:22:41 +00002679 so = libhb_so_alloc();
2680 libhb_so_send( hbthr, so, True/*strong send*/ );
2681 push_SO_for_sem( sem, so );
sewardjb4112022007-11-09 22:49:28 +00002682}
2683
sewardj11e352f2007-11-30 11:11:02 +00002684static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002685{
sewardjf98e1c02008-10-25 16:22:41 +00002686 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2687 the 'sem' from this semaphore's SO-stack, and do a strong recv
2688 from it. This creates a dependency back to one of the post-ers
2689 for the semaphore. */
sewardjb4112022007-11-09 22:49:28 +00002690
sewardjf98e1c02008-10-25 16:22:41 +00002691 Thread* thr;
2692 SO* so;
2693 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002694
2695 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002696 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002697 (Int)tid, (void*)sem );
2698
2699 thr = map_threads_maybe_lookup( tid );
2700 tl_assert(thr); /* cannot fail - Thread* must already exist */
2701
2702 // error-if: sem is bogus
2703
sewardjf98e1c02008-10-25 16:22:41 +00002704 so = mb_pop_SO_for_sem( sem );
sewardjb4112022007-11-09 22:49:28 +00002705
sewardjf98e1c02008-10-25 16:22:41 +00002706 if (so) {
2707 hbthr = thr->hbthr;
2708 tl_assert(hbthr);
2709
2710 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2711 libhb_so_dealloc(so);
2712 } else {
2713 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2714 If this happened it would surely be a bug in the threads
2715 library. */
2716 HG_(record_error_Misc)(
2717 thr, "Bug in libpthread: sem_wait succeeded on"
2718 " semaphore without prior sem_post");
sewardjb4112022007-11-09 22:49:28 +00002719 }
2720}
2721
2722
sewardj9f569b72008-11-13 13:33:09 +00002723/* -------------------------------------------------------- */
2724/* -------------- events to do with barriers -------------- */
2725/* -------------------------------------------------------- */
2726
2727typedef
2728 struct {
2729 Bool initted; /* has it yet been initted by guest? */
sewardj406bac82010-03-03 23:03:40 +00002730 Bool resizable; /* is resizing allowed? */
sewardj9f569b72008-11-13 13:33:09 +00002731 UWord size; /* declared size */
2732 XArray* waiting; /* XA of Thread*. # present is 0 .. .size */
2733 }
2734 Bar;
2735
2736static Bar* new_Bar ( void ) {
2737 Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
2738 tl_assert(bar);
2739 /* all fields are zero */
2740 tl_assert(bar->initted == False);
2741 return bar;
2742}
2743
2744static void delete_Bar ( Bar* bar ) {
2745 tl_assert(bar);
2746 if (bar->waiting)
2747 VG_(deleteXA)(bar->waiting);
2748 HG_(free)(bar);
2749}
2750
2751/* A mapping which stores auxiliary data for barriers. */
2752
2753/* pthread_barrier_t* -> Bar* */
2754static WordFM* map_barrier_to_Bar = NULL;
2755
2756static void map_barrier_to_Bar_INIT ( void ) {
2757 if (UNLIKELY(map_barrier_to_Bar == NULL)) {
2758 map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
2759 "hg.mbtBI.1", HG_(free), NULL );
2760 tl_assert(map_barrier_to_Bar != NULL);
2761 }
2762}
2763
2764static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
2765 UWord key, val;
2766 map_barrier_to_Bar_INIT();
2767 if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
2768 tl_assert(key == (UWord)barrier);
2769 return (Bar*)val;
2770 } else {
2771 Bar* bar = new_Bar();
2772 VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
2773 return bar;
2774 }
2775}
2776
2777static void map_barrier_to_Bar_delete ( void* barrier ) {
2778 UWord keyW, valW;
2779 map_barrier_to_Bar_INIT();
2780 if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
2781 Bar* bar = (Bar*)valW;
2782 tl_assert(keyW == (UWord)barrier);
2783 delete_Bar(bar);
2784 }
2785}
2786
2787
2788static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
2789 void* barrier,
sewardj406bac82010-03-03 23:03:40 +00002790 UWord count,
2791 UWord resizable )
sewardj9f569b72008-11-13 13:33:09 +00002792{
2793 Thread* thr;
2794 Bar* bar;
2795
2796 if (SHOW_EVENTS >= 1)
2797 VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
sewardj406bac82010-03-03 23:03:40 +00002798 "(tid=%d, barrier=%p, count=%lu, resizable=%lu)\n",
2799 (Int)tid, (void*)barrier, count, resizable );
sewardj9f569b72008-11-13 13:33:09 +00002800
2801 thr = map_threads_maybe_lookup( tid );
2802 tl_assert(thr); /* cannot fail - Thread* must already exist */
2803
2804 if (count == 0) {
2805 HG_(record_error_Misc)(
2806 thr, "pthread_barrier_init: 'count' argument is zero"
2807 );
2808 }
2809
sewardj406bac82010-03-03 23:03:40 +00002810 if (resizable != 0 && resizable != 1) {
2811 HG_(record_error_Misc)(
2812 thr, "pthread_barrier_init: invalid 'resizable' argument"
2813 );
2814 }
2815
sewardj9f569b72008-11-13 13:33:09 +00002816 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2817 tl_assert(bar);
2818
2819 if (bar->initted) {
2820 HG_(record_error_Misc)(
2821 thr, "pthread_barrier_init: barrier is already initialised"
2822 );
2823 }
2824
2825 if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2826 tl_assert(bar->initted);
2827 HG_(record_error_Misc)(
sewardj553655c2008-11-14 19:41:19 +00002828 thr, "pthread_barrier_init: threads are waiting at barrier"
sewardj9f569b72008-11-13 13:33:09 +00002829 );
2830 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2831 }
2832 if (!bar->waiting) {
2833 bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
2834 sizeof(Thread*) );
2835 }
2836
2837 tl_assert(bar->waiting);
2838 tl_assert(VG_(sizeXA)(bar->waiting) == 0);
sewardj406bac82010-03-03 23:03:40 +00002839 bar->initted = True;
2840 bar->resizable = resizable == 1 ? True : False;
2841 bar->size = count;
sewardj9f569b72008-11-13 13:33:09 +00002842}
2843
2844
2845static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
2846 void* barrier )
2847{
sewardj553655c2008-11-14 19:41:19 +00002848 Thread* thr;
2849 Bar* bar;
2850
sewardj9f569b72008-11-13 13:33:09 +00002851 /* Deal with destroy events. The only purpose is to free storage
2852 associated with the barrier, so as to avoid any possible
2853 resource leaks. */
2854 if (SHOW_EVENTS >= 1)
2855 VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
2856 "(tid=%d, barrier=%p)\n",
2857 (Int)tid, (void*)barrier );
2858
sewardj553655c2008-11-14 19:41:19 +00002859 thr = map_threads_maybe_lookup( tid );
2860 tl_assert(thr); /* cannot fail - Thread* must already exist */
2861
2862 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2863 tl_assert(bar);
2864
2865 if (!bar->initted) {
2866 HG_(record_error_Misc)(
2867 thr, "pthread_barrier_destroy: barrier was never initialised"
2868 );
2869 }
2870
2871 if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2872 HG_(record_error_Misc)(
2873 thr, "pthread_barrier_destroy: threads are waiting at barrier"
2874 );
2875 }
2876
sewardj9f569b72008-11-13 13:33:09 +00002877 /* Maybe we shouldn't do this; just let it persist, so that when it
2878 is reinitialised we don't need to do any dynamic memory
2879 allocation? The downside is a potentially unlimited space leak,
2880 if the client creates (in turn) a large number of barriers all
2881 at different locations. Note that if we do later move to the
2882 don't-delete-it scheme, we need to mark the barrier as
2883 uninitialised again since otherwise a later _init call will
sewardj553655c2008-11-14 19:41:19 +00002884 elicit a duplicate-init error. */
sewardj9f569b72008-11-13 13:33:09 +00002885 map_barrier_to_Bar_delete( barrier );
2886}
2887
2888
sewardj406bac82010-03-03 23:03:40 +00002889/* All the threads have arrived. Now do the Interesting Bit. Get a
2890 new synchronisation object and do a weak send to it from all the
2891 participating threads. This makes its vector clocks be the join of
2892 all the individual threads' vector clocks. Then do a strong
2893 receive from it back to all threads, so that their VCs are a copy
2894 of it (hence are all equal to the join of their original VCs.) */
2895static void do_barrier_cross_sync_and_empty ( Bar* bar )
2896{
2897 /* XXX check bar->waiting has no duplicates */
2898 UWord i;
2899 SO* so = libhb_so_alloc();
2900
2901 tl_assert(bar->waiting);
2902 tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
2903
2904 /* compute the join ... */
2905 for (i = 0; i < bar->size; i++) {
2906 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2907 Thr* hbthr = t->hbthr;
2908 libhb_so_send( hbthr, so, False/*weak send*/ );
2909 }
2910 /* ... and distribute to all threads */
2911 for (i = 0; i < bar->size; i++) {
2912 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2913 Thr* hbthr = t->hbthr;
2914 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2915 }
2916
2917 /* finally, we must empty out the waiting vector */
2918 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2919
2920 /* and we don't need this any more. Perhaps a stack-allocated
2921 SO would be better? */
2922 libhb_so_dealloc(so);
2923}
2924
2925
sewardj9f569b72008-11-13 13:33:09 +00002926static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
2927 void* barrier )
2928{
sewardj1c466b72008-11-19 11:52:14 +00002929 /* This function gets called after a client thread calls
2930 pthread_barrier_wait but before it arrives at the real
2931 pthread_barrier_wait.
2932
2933 Why is the following correct? It's a bit subtle.
2934
2935 If this is not the last thread arriving at the barrier, we simply
2936 note its presence and return. Because valgrind (at least as of
2937 Nov 08) is single threaded, we are guaranteed safe from any race
2938 conditions when in this function -- no other client threads are
2939 running.
2940
2941 If this is the last thread, then we are again the only running
2942 thread. All the other threads will have either arrived at the
2943 real pthread_barrier_wait or are on their way to it, but in any
2944 case are guaranteed not to be able to move past it, because this
2945 thread is currently in this function and so has not yet arrived
2946 at the real pthread_barrier_wait. That means that:
2947
2948 1. While we are in this function, none of the other threads
2949 waiting at the barrier can move past it.
2950
2951 2. When this function returns (and simulated execution resumes),
2952 this thread and all other waiting threads will be able to move
2953 past the real barrier.
2954
2955 Because of this, it is now safe to update the vector clocks of
2956 all threads, to represent the fact that they all arrived at the
2957 barrier and have all moved on. There is no danger of any
2958 complications to do with some threads leaving the barrier and
2959 racing back round to the front, whilst others are still leaving
2960 (which is the primary source of complication in correct handling/
2961 implementation of barriers). That can't happen because we update
2962 here our data structures so as to indicate that the threads have
2963 passed the barrier, even though, as per (2) above, they are
2964 guaranteed not to pass the barrier until we return.
2965
2966 This relies crucially on Valgrind being single threaded. If that
2967 changes, this will need to be reconsidered.
2968 */
sewardj9f569b72008-11-13 13:33:09 +00002969 Thread* thr;
2970 Bar* bar;
sewardj406bac82010-03-03 23:03:40 +00002971 UWord present;
sewardj9f569b72008-11-13 13:33:09 +00002972
2973 if (SHOW_EVENTS >= 1)
2974 VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
2975 "(tid=%d, barrier=%p)\n",
2976 (Int)tid, (void*)barrier );
2977
2978 thr = map_threads_maybe_lookup( tid );
2979 tl_assert(thr); /* cannot fail - Thread* must already exist */
2980
2981 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2982 tl_assert(bar);
2983
2984 if (!bar->initted) {
2985 HG_(record_error_Misc)(
2986 thr, "pthread_barrier_wait: barrier is uninitialised"
2987 );
2988 return; /* client is broken .. avoid assertions below */
2989 }
2990
2991 /* guaranteed by _INIT_PRE above */
2992 tl_assert(bar->size > 0);
2993 tl_assert(bar->waiting);
2994
2995 VG_(addToXA)( bar->waiting, &thr );
2996
2997 /* guaranteed by this function */
2998 present = VG_(sizeXA)(bar->waiting);
2999 tl_assert(present > 0 && present <= bar->size);
3000
3001 if (present < bar->size)
3002 return;
3003
sewardj406bac82010-03-03 23:03:40 +00003004 do_barrier_cross_sync_and_empty(bar);
3005}
sewardj9f569b72008-11-13 13:33:09 +00003006
sewardj9f569b72008-11-13 13:33:09 +00003007
sewardj406bac82010-03-03 23:03:40 +00003008static void evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( ThreadId tid,
3009 void* barrier,
3010 UWord newcount )
3011{
3012 Thread* thr;
3013 Bar* bar;
3014 UWord present;
3015
3016 if (SHOW_EVENTS >= 1)
3017 VG_(printf)("evh__HG_PTHREAD_BARRIER_RESIZE_PRE"
3018 "(tid=%d, barrier=%p, newcount=%lu)\n",
3019 (Int)tid, (void*)barrier, newcount );
3020
3021 thr = map_threads_maybe_lookup( tid );
3022 tl_assert(thr); /* cannot fail - Thread* must already exist */
3023
3024 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3025 tl_assert(bar);
3026
3027 if (!bar->initted) {
3028 HG_(record_error_Misc)(
3029 thr, "pthread_barrier_resize: barrier is uninitialised"
3030 );
3031 return; /* client is broken .. avoid assertions below */
3032 }
3033
3034 if (!bar->resizable) {
3035 HG_(record_error_Misc)(
3036 thr, "pthread_barrier_resize: barrier is may not be resized"
3037 );
3038 return; /* client is broken .. avoid assertions below */
3039 }
3040
3041 if (newcount == 0) {
3042 HG_(record_error_Misc)(
3043 thr, "pthread_barrier_resize: 'newcount' argument is zero"
3044 );
3045 return; /* client is broken .. avoid assertions below */
3046 }
3047
3048 /* guaranteed by _INIT_PRE above */
3049 tl_assert(bar->size > 0);
sewardj9f569b72008-11-13 13:33:09 +00003050 tl_assert(bar->waiting);
sewardj406bac82010-03-03 23:03:40 +00003051 /* Guaranteed by this fn */
3052 tl_assert(newcount > 0);
sewardj9f569b72008-11-13 13:33:09 +00003053
sewardj406bac82010-03-03 23:03:40 +00003054 if (newcount >= bar->size) {
3055 /* Increasing the capacity. There's no possibility of threads
3056 moving on from the barrier in this situation, so just note
3057 the fact and do nothing more. */
3058 bar->size = newcount;
3059 } else {
3060 /* Decreasing the capacity. If we decrease it to be equal or
3061 below the number of waiting threads, they will now move past
3062 the barrier, so need to mess with dep edges in the same way
3063 as if the barrier had filled up normally. */
3064 present = VG_(sizeXA)(bar->waiting);
3065 tl_assert(present >= 0 && present <= bar->size);
3066 if (newcount <= present) {
3067 bar->size = present; /* keep the cross_sync call happy */
3068 do_barrier_cross_sync_and_empty(bar);
3069 }
3070 bar->size = newcount;
sewardj9f569b72008-11-13 13:33:09 +00003071 }
sewardj9f569b72008-11-13 13:33:09 +00003072}
3073
3074
sewardjed2e72e2009-08-14 11:08:24 +00003075/* ----------------------------------------------------- */
3076/* ----- events to do with user-specified HB edges ----- */
3077/* ----------------------------------------------------- */
3078
3079/* A mapping from arbitrary UWord tag to the SO associated with it.
3080 The UWord tags are meaningless to us, interpreted only by the
3081 user. */
3082
3083
3084
3085/* UWord -> SO* */
3086static WordFM* map_usertag_to_SO = NULL;
3087
3088static void map_usertag_to_SO_INIT ( void ) {
3089 if (UNLIKELY(map_usertag_to_SO == NULL)) {
3090 map_usertag_to_SO = VG_(newFM)( HG_(zalloc),
3091 "hg.mutS.1", HG_(free), NULL );
3092 tl_assert(map_usertag_to_SO != NULL);
3093 }
3094}
3095
3096static SO* map_usertag_to_SO_lookup_or_alloc ( UWord usertag ) {
3097 UWord key, val;
3098 map_usertag_to_SO_INIT();
3099 if (VG_(lookupFM)( map_usertag_to_SO, &key, &val, usertag )) {
3100 tl_assert(key == (UWord)usertag);
3101 return (SO*)val;
3102 } else {
3103 SO* so = libhb_so_alloc();
3104 VG_(addToFM)( map_usertag_to_SO, usertag, (UWord)so );
3105 return so;
3106 }
3107}
3108
sewardj6015d0e2011-03-11 19:10:48 +00003109static void map_usertag_to_SO_delete ( UWord usertag ) {
3110 UWord keyW, valW;
3111 map_usertag_to_SO_INIT();
3112 if (VG_(delFromFM)( map_usertag_to_SO, &keyW, &valW, usertag )) {
3113 SO* so = (SO*)valW;
3114 tl_assert(keyW == usertag);
3115 tl_assert(so);
3116 libhb_so_dealloc(so);
3117 }
3118}
sewardjed2e72e2009-08-14 11:08:24 +00003119
3120
3121static
3122void evh__HG_USERSO_SEND_PRE ( ThreadId tid, UWord usertag )
3123{
3124 /* TID is just about to notionally sent a message on a notional
3125 abstract synchronisation object whose identity is given by
3126 USERTAG. Bind USERTAG to a real SO if it is not already so
sewardj8c50d3c2011-03-11 18:38:12 +00003127 bound, and do a 'weak send' on the SO. This joins the vector
3128 clocks from this thread into any vector clocks already present
3129 in the SO. The resulting SO vector clocks are later used by
sewardjed2e72e2009-08-14 11:08:24 +00003130 other thread(s) which successfully 'receive' from the SO,
sewardj8c50d3c2011-03-11 18:38:12 +00003131 thereby acquiring a dependency on all the events that have
3132 previously signalled on this SO. */
sewardjed2e72e2009-08-14 11:08:24 +00003133 Thread* thr;
3134 SO* so;
3135
3136 if (SHOW_EVENTS >= 1)
3137 VG_(printf)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3138 (Int)tid, usertag );
3139
3140 thr = map_threads_maybe_lookup( tid );
3141 tl_assert(thr); /* cannot fail - Thread* must already exist */
3142
3143 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3144 tl_assert(so);
3145
sewardj8c50d3c2011-03-11 18:38:12 +00003146 libhb_so_send( thr->hbthr, so, False/*!strong_send*/ );
sewardjed2e72e2009-08-14 11:08:24 +00003147}
3148
3149static
3150void evh__HG_USERSO_RECV_POST ( ThreadId tid, UWord usertag )
3151{
3152 /* TID has just notionally received a message from a notional
3153 abstract synchronisation object whose identity is given by
3154 USERTAG. Bind USERTAG to a real SO if it is not already so
3155 bound. If the SO has at some point in the past been 'sent' on,
3156 to a 'strong receive' on it, thereby acquiring a dependency on
3157 the sender. */
3158 Thread* thr;
3159 SO* so;
3160
3161 if (SHOW_EVENTS >= 1)
3162 VG_(printf)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3163 (Int)tid, usertag );
3164
3165 thr = map_threads_maybe_lookup( tid );
3166 tl_assert(thr); /* cannot fail - Thread* must already exist */
3167
3168 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3169 tl_assert(so);
3170
3171 /* Acquire a dependency on it. If the SO has never so far been
3172 sent on, then libhb_so_recv will do nothing. So we're safe
3173 regardless of SO's history. */
3174 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3175}
3176
sewardj6015d0e2011-03-11 19:10:48 +00003177static
3178void evh__HG_USERSO_FORGET_ALL ( ThreadId tid, UWord usertag )
3179{
3180 /* TID declares that any happens-before edges notionally stored in
3181 USERTAG can be deleted. If (as would normally be the case) a
3182 SO is associated with USERTAG, then the assocation is removed
3183 and all resources associated with SO are freed. Importantly,
3184 that frees up any VTSs stored in SO. */
3185 if (SHOW_EVENTS >= 1)
3186 VG_(printf)("evh__HG_USERSO_FORGET_ALL(ctid=%d, usertag=%#lx)\n",
3187 (Int)tid, usertag );
3188
3189 map_usertag_to_SO_delete( usertag );
3190}
3191
sewardjed2e72e2009-08-14 11:08:24 +00003192
sewardjb4112022007-11-09 22:49:28 +00003193/*--------------------------------------------------------------*/
3194/*--- Lock acquisition order monitoring ---*/
3195/*--------------------------------------------------------------*/
3196
3197/* FIXME: here are some optimisations still to do in
3198 laog__pre_thread_acquires_lock.
3199
3200 The graph is structured so that if L1 --*--> L2 then L1 must be
3201 acquired before L2.
3202
3203 The common case is that some thread T holds (eg) L1 L2 and L3 and
3204 is repeatedly acquiring and releasing Ln, and there is no ordering
3205 error in what it is doing. Hence it repeatly:
3206
3207 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3208 produces the answer No (because there is no error).
3209
3210 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3211 (because they already got added the first time T acquired Ln).
3212
3213 Hence cache these two events:
3214
3215 (1) Cache result of the query from last time. Invalidate the cache
3216 any time any edges are added to or deleted from laog.
3217
3218 (2) Cache these add-edge requests and ignore them if said edges
3219 have already been added to laog. Invalidate the cache any time
3220 any edges are deleted from laog.
3221*/
3222
3223typedef
3224 struct {
3225 WordSetID inns; /* in univ_laog */
3226 WordSetID outs; /* in univ_laog */
3227 }
3228 LAOGLinks;
3229
3230/* lock order acquisition graph */
3231static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
3232
3233/* EXPOSITION ONLY: for each edge in 'laog', record the two places
3234 where that edge was created, so that we can show the user later if
3235 we need to. */
3236typedef
3237 struct {
3238 Addr src_ga; /* Lock guest addresses for */
3239 Addr dst_ga; /* src/dst of the edge */
3240 ExeContext* src_ec; /* And corresponding places where that */
3241 ExeContext* dst_ec; /* ordering was established */
3242 }
3243 LAOGLinkExposition;
3244
sewardj250ec2e2008-02-15 22:02:30 +00003245static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
sewardjb4112022007-11-09 22:49:28 +00003246 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3247 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
3248 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
3249 if (llx1->src_ga < llx2->src_ga) return -1;
3250 if (llx1->src_ga > llx2->src_ga) return 1;
3251 if (llx1->dst_ga < llx2->dst_ga) return -1;
3252 if (llx1->dst_ga > llx2->dst_ga) return 1;
3253 return 0;
3254}
3255
3256static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
3257/* end EXPOSITION ONLY */
3258
3259
sewardja65db102009-01-26 10:45:16 +00003260__attribute__((noinline))
3261static void laog__init ( void )
3262{
3263 tl_assert(!laog);
3264 tl_assert(!laog_exposition);
sewardjc1fb9d22011-02-28 09:03:44 +00003265 tl_assert(HG_(clo_track_lockorders));
sewardja65db102009-01-26 10:45:16 +00003266
3267 laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
3268 HG_(free), NULL/*unboxedcmp*/ );
3269
3270 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
3271 cmp_LAOGLinkExposition );
3272 tl_assert(laog);
3273 tl_assert(laog_exposition);
3274}
3275
sewardjb4112022007-11-09 22:49:28 +00003276static void laog__show ( Char* who ) {
3277 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003278 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003279 Lock* me;
3280 LAOGLinks* links;
3281 VG_(printf)("laog (requested by %s) {\n", who);
sewardj896f6f92008-08-19 08:38:52 +00003282 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003283 me = NULL;
3284 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003285 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003286 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003287 tl_assert(me);
3288 tl_assert(links);
3289 VG_(printf)(" node %p:\n", me);
3290 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3291 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003292 VG_(printf)(" inn %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003293 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3294 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003295 VG_(printf)(" out %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003296 me = NULL;
3297 links = NULL;
3298 }
sewardj896f6f92008-08-19 08:38:52 +00003299 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003300 VG_(printf)("}\n");
3301}
3302
3303__attribute__((noinline))
3304static void laog__add_edge ( Lock* src, Lock* dst ) {
3305 Word keyW;
3306 LAOGLinks* links;
3307 Bool presentF, presentR;
3308 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
3309
3310 /* Take the opportunity to sanity check the graph. Record in
3311 presentF if there is already a src->dst mapping in this node's
3312 forwards links, and presentR if there is already a src->dst
3313 mapping in this node's backwards links. They should agree!
3314 Also, we need to know whether the edge was already present so as
3315 to decide whether or not to update the link details mapping. We
3316 can compute presentF and presentR essentially for free, so may
3317 as well do this always. */
3318 presentF = presentR = False;
3319
3320 /* Update the out edges for src */
3321 keyW = 0;
3322 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003323 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00003324 WordSetID outs_new;
3325 tl_assert(links);
3326 tl_assert(keyW == (Word)src);
3327 outs_new = HG_(addToWS)( univ_laog, links->outs, (Word)dst );
3328 presentF = outs_new == links->outs;
3329 links->outs = outs_new;
3330 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003331 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003332 links->inns = HG_(emptyWS)( univ_laog );
3333 links->outs = HG_(singletonWS)( univ_laog, (Word)dst );
sewardj896f6f92008-08-19 08:38:52 +00003334 VG_(addToFM)( laog, (Word)src, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00003335 }
3336 /* Update the in edges for dst */
3337 keyW = 0;
3338 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003339 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003340 WordSetID inns_new;
3341 tl_assert(links);
3342 tl_assert(keyW == (Word)dst);
3343 inns_new = HG_(addToWS)( univ_laog, links->inns, (Word)src );
3344 presentR = inns_new == links->inns;
3345 links->inns = inns_new;
3346 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003347 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003348 links->inns = HG_(singletonWS)( univ_laog, (Word)src );
3349 links->outs = HG_(emptyWS)( univ_laog );
sewardj896f6f92008-08-19 08:38:52 +00003350 VG_(addToFM)( laog, (Word)dst, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00003351 }
3352
3353 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
3354
3355 if (!presentF && src->acquired_at && dst->acquired_at) {
3356 LAOGLinkExposition expo;
3357 /* If this edge is entering the graph, and we have acquired_at
3358 information for both src and dst, record those acquisition
3359 points. Hence, if there is later a violation of this
3360 ordering, we can show the user the two places in which the
3361 required src-dst ordering was previously established. */
barta0b6b2c2008-07-07 06:49:24 +00003362 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
sewardjb4112022007-11-09 22:49:28 +00003363 src->guestaddr, dst->guestaddr);
3364 expo.src_ga = src->guestaddr;
3365 expo.dst_ga = dst->guestaddr;
3366 expo.src_ec = NULL;
3367 expo.dst_ec = NULL;
3368 tl_assert(laog_exposition);
sewardj896f6f92008-08-19 08:38:52 +00003369 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (Word)&expo )) {
sewardjb4112022007-11-09 22:49:28 +00003370 /* we already have it; do nothing */
3371 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003372 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
3373 sizeof(LAOGLinkExposition));
sewardjb4112022007-11-09 22:49:28 +00003374 expo2->src_ga = src->guestaddr;
3375 expo2->dst_ga = dst->guestaddr;
3376 expo2->src_ec = src->acquired_at;
3377 expo2->dst_ec = dst->acquired_at;
sewardj896f6f92008-08-19 08:38:52 +00003378 VG_(addToFM)( laog_exposition, (Word)expo2, (Word)NULL );
sewardjb4112022007-11-09 22:49:28 +00003379 }
3380 }
3381}
3382
3383__attribute__((noinline))
3384static void laog__del_edge ( Lock* src, Lock* dst ) {
3385 Word keyW;
3386 LAOGLinks* links;
3387 if (0) VG_(printf)("laog__del_edge %p %p\n", src, dst);
3388 /* Update the out edges for src */
3389 keyW = 0;
3390 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003391 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00003392 tl_assert(links);
3393 tl_assert(keyW == (Word)src);
3394 links->outs = HG_(delFromWS)( univ_laog, links->outs, (Word)dst );
3395 }
3396 /* Update the in edges for dst */
3397 keyW = 0;
3398 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003399 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003400 tl_assert(links);
3401 tl_assert(keyW == (Word)dst);
3402 links->inns = HG_(delFromWS)( univ_laog, links->inns, (Word)src );
3403 }
3404}
3405
3406__attribute__((noinline))
3407static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
3408 Word keyW;
3409 LAOGLinks* links;
3410 keyW = 0;
3411 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003412 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003413 tl_assert(links);
3414 tl_assert(keyW == (Word)lk);
3415 return links->outs;
3416 } else {
3417 return HG_(emptyWS)( univ_laog );
3418 }
3419}
3420
3421__attribute__((noinline))
3422static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
3423 Word keyW;
3424 LAOGLinks* links;
3425 keyW = 0;
3426 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003427 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003428 tl_assert(links);
3429 tl_assert(keyW == (Word)lk);
3430 return links->inns;
3431 } else {
3432 return HG_(emptyWS)( univ_laog );
3433 }
3434}
3435
3436__attribute__((noinline))
3437static void laog__sanity_check ( Char* who ) {
3438 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003439 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003440 Lock* me;
3441 LAOGLinks* links;
sewardj896f6f92008-08-19 08:38:52 +00003442 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003443 me = NULL;
3444 links = NULL;
3445 if (0) VG_(printf)("laog sanity check\n");
sewardj896f6f92008-08-19 08:38:52 +00003446 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003447 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003448 tl_assert(me);
3449 tl_assert(links);
3450 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3451 for (i = 0; i < ws_size; i++) {
3452 if ( ! HG_(elemWS)( univ_laog,
3453 laog__succs( (Lock*)ws_words[i] ),
3454 (Word)me ))
3455 goto bad;
3456 }
3457 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3458 for (i = 0; i < ws_size; i++) {
3459 if ( ! HG_(elemWS)( univ_laog,
3460 laog__preds( (Lock*)ws_words[i] ),
3461 (Word)me ))
3462 goto bad;
3463 }
3464 me = NULL;
3465 links = NULL;
3466 }
sewardj896f6f92008-08-19 08:38:52 +00003467 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003468 return;
3469
3470 bad:
3471 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3472 laog__show(who);
3473 tl_assert(0);
3474}
3475
3476/* If there is a path in laog from 'src' to any of the elements in
3477 'dst', return an arbitrarily chosen element of 'dst' reachable from
3478 'src'. If no path exist from 'src' to any element in 'dst', return
3479 NULL. */
3480__attribute__((noinline))
3481static
3482Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3483{
3484 Lock* ret;
3485 Word i, ssz;
3486 XArray* stack; /* of Lock* */
3487 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
3488 Lock* here;
3489 WordSetID succs;
3490 Word succs_size;
sewardj250ec2e2008-02-15 22:02:30 +00003491 UWord* succs_words;
sewardjb4112022007-11-09 22:49:28 +00003492 //laog__sanity_check();
3493
3494 /* If the destination set is empty, we can never get there from
3495 'src' :-), so don't bother to try */
3496 if (HG_(isEmptyWS)( univ_lsets, dsts ))
3497 return NULL;
3498
3499 ret = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00003500 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3501 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00003502
3503 (void) VG_(addToXA)( stack, &src );
3504
3505 while (True) {
3506
3507 ssz = VG_(sizeXA)( stack );
3508
3509 if (ssz == 0) { ret = NULL; break; }
3510
3511 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3512 VG_(dropTailXA)( stack, 1 );
3513
3514 if (HG_(elemWS)( univ_lsets, dsts, (Word)here )) { ret = here; break; }
3515
sewardj896f6f92008-08-19 08:38:52 +00003516 if (VG_(lookupFM)( visited, NULL, NULL, (Word)here ))
sewardjb4112022007-11-09 22:49:28 +00003517 continue;
3518
sewardj896f6f92008-08-19 08:38:52 +00003519 VG_(addToFM)( visited, (Word)here, 0 );
sewardjb4112022007-11-09 22:49:28 +00003520
3521 succs = laog__succs( here );
3522 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3523 for (i = 0; i < succs_size; i++)
3524 (void) VG_(addToXA)( stack, &succs_words[i] );
3525 }
3526
sewardj896f6f92008-08-19 08:38:52 +00003527 VG_(deleteFM)( visited, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003528 VG_(deleteXA)( stack );
3529 return ret;
3530}
3531
3532
3533/* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3534 between 'lk' and the locks already held by 'thr' and issue a
3535 complaint if so. Also, update the ordering graph appropriately.
3536*/
3537__attribute__((noinline))
3538static void laog__pre_thread_acquires_lock (
3539 Thread* thr, /* NB: BEFORE lock is added */
3540 Lock* lk
3541 )
3542{
sewardj250ec2e2008-02-15 22:02:30 +00003543 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +00003544 Word ls_size, i;
3545 Lock* other;
3546
3547 /* It may be that 'thr' already holds 'lk' and is recursively
3548 relocking in. In this case we just ignore the call. */
3549 /* NB: univ_lsets really is correct here */
3550 if (HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lk ))
3551 return;
3552
sewardjb4112022007-11-09 22:49:28 +00003553 /* First, the check. Complain if there is any path in laog from lk
3554 to any of the locks already held by thr, since if any such path
3555 existed, it would mean that previously lk was acquired before
3556 (rather than after, as we are doing here) at least one of those
3557 locks.
3558 */
3559 other = laog__do_dfs_from_to(lk, thr->locksetA);
3560 if (other) {
3561 LAOGLinkExposition key, *found;
3562 /* So we managed to find a path lk --*--> other in the graph,
3563 which implies that 'lk' should have been acquired before
3564 'other' but is in fact being acquired afterwards. We present
3565 the lk/other arguments to record_error_LockOrder in the order
3566 in which they should have been acquired. */
3567 /* Go look in the laog_exposition mapping, to find the allocation
3568 points for this edge, so we can show the user. */
3569 key.src_ga = lk->guestaddr;
3570 key.dst_ga = other->guestaddr;
3571 key.src_ec = NULL;
3572 key.dst_ec = NULL;
3573 found = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003574 if (VG_(lookupFM)( laog_exposition,
sewardjb5f29642007-11-16 12:02:43 +00003575 (Word*)&found, NULL, (Word)&key )) {
sewardjb4112022007-11-09 22:49:28 +00003576 tl_assert(found != &key);
3577 tl_assert(found->src_ga == key.src_ga);
3578 tl_assert(found->dst_ga == key.dst_ga);
3579 tl_assert(found->src_ec);
3580 tl_assert(found->dst_ec);
sewardjf98e1c02008-10-25 16:22:41 +00003581 HG_(record_error_LockOrder)(
3582 thr, lk->guestaddr, other->guestaddr,
sewardjffce8152011-06-24 10:09:41 +00003583 found->src_ec, found->dst_ec, other->acquired_at );
sewardjb4112022007-11-09 22:49:28 +00003584 } else {
3585 /* Hmm. This can't happen (can it?) */
sewardjf98e1c02008-10-25 16:22:41 +00003586 HG_(record_error_LockOrder)(
3587 thr, lk->guestaddr, other->guestaddr,
sewardjffce8152011-06-24 10:09:41 +00003588 NULL, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003589 }
3590 }
3591
3592 /* Second, add to laog the pairs
3593 (old, lk) | old <- locks already held by thr
3594 Since both old and lk are currently held by thr, their acquired_at
3595 fields must be non-NULL.
3596 */
3597 tl_assert(lk->acquired_at);
3598 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3599 for (i = 0; i < ls_size; i++) {
3600 Lock* old = (Lock*)ls_words[i];
3601 tl_assert(old->acquired_at);
3602 laog__add_edge( old, lk );
3603 }
3604
3605 /* Why "except_Locks" ? We're here because a lock is being
3606 acquired by a thread, and we're in an inconsistent state here.
3607 See the call points in evhH__post_thread_{r,w}_acquires_lock.
3608 When called in this inconsistent state, locks__sanity_check duly
3609 barfs. */
sewardjf98e1c02008-10-25 16:22:41 +00003610 if (HG_(clo_sanity_flags) & SCE_LAOG)
sewardjb4112022007-11-09 22:49:28 +00003611 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
3612}
3613
3614
3615/* Delete from 'laog' any pair mentioning a lock in locksToDelete */
3616
3617__attribute__((noinline))
3618static void laog__handle_one_lock_deletion ( Lock* lk )
3619{
3620 WordSetID preds, succs;
3621 Word preds_size, succs_size, i, j;
sewardj250ec2e2008-02-15 22:02:30 +00003622 UWord *preds_words, *succs_words;
sewardjb4112022007-11-09 22:49:28 +00003623
3624 preds = laog__preds( lk );
3625 succs = laog__succs( lk );
3626
3627 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
3628 for (i = 0; i < preds_size; i++)
3629 laog__del_edge( (Lock*)preds_words[i], lk );
3630
3631 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3632 for (j = 0; j < succs_size; j++)
3633 laog__del_edge( lk, (Lock*)succs_words[j] );
3634
3635 for (i = 0; i < preds_size; i++) {
3636 for (j = 0; j < succs_size; j++) {
3637 if (preds_words[i] != succs_words[j]) {
3638 /* This can pass unlocked locks to laog__add_edge, since
3639 we're deleting stuff. So their acquired_at fields may
3640 be NULL. */
3641 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
3642 }
3643 }
3644 }
3645}
3646
sewardj1cbc12f2008-11-10 16:16:46 +00003647//__attribute__((noinline))
3648//static void laog__handle_lock_deletions (
3649// WordSetID /* in univ_laog */ locksToDelete
3650// )
3651//{
3652// Word i, ws_size;
3653// UWord* ws_words;
3654//
sewardj1cbc12f2008-11-10 16:16:46 +00003655//
3656// HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
3657// for (i = 0; i < ws_size; i++)
3658// laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
3659//
3660// if (HG_(clo_sanity_flags) & SCE_LAOG)
3661// all__sanity_check("laog__handle_lock_deletions-post");
3662//}
sewardjb4112022007-11-09 22:49:28 +00003663
3664
3665/*--------------------------------------------------------------*/
3666/*--- Malloc/free replacements ---*/
3667/*--------------------------------------------------------------*/
3668
3669typedef
3670 struct {
3671 void* next; /* required by m_hashtable */
3672 Addr payload; /* ptr to actual block */
3673 SizeT szB; /* size requested */
3674 ExeContext* where; /* where it was allocated */
3675 Thread* thr; /* allocating thread */
3676 }
3677 MallocMeta;
3678
3679/* A hash table of MallocMetas, used to track malloc'd blocks
3680 (obviously). */
3681static VgHashTable hg_mallocmeta_table = NULL;
3682
3683
3684static MallocMeta* new_MallocMeta ( void ) {
sewardjf98e1c02008-10-25 16:22:41 +00003685 MallocMeta* md = HG_(zalloc)( "hg.new_MallocMeta.1", sizeof(MallocMeta) );
sewardjb4112022007-11-09 22:49:28 +00003686 tl_assert(md);
3687 return md;
3688}
3689static void delete_MallocMeta ( MallocMeta* md ) {
sewardjf98e1c02008-10-25 16:22:41 +00003690 HG_(free)(md);
sewardjb4112022007-11-09 22:49:28 +00003691}
3692
3693
3694/* Allocate a client block and set up the metadata for it. */
3695
3696static
3697void* handle_alloc ( ThreadId tid,
3698 SizeT szB, SizeT alignB, Bool is_zeroed )
3699{
3700 Addr p;
3701 MallocMeta* md;
3702
3703 tl_assert( ((SSizeT)szB) >= 0 );
3704 p = (Addr)VG_(cli_malloc)(alignB, szB);
3705 if (!p) {
3706 return NULL;
3707 }
3708 if (is_zeroed)
3709 VG_(memset)((void*)p, 0, szB);
3710
3711 /* Note that map_threads_lookup must succeed (cannot assert), since
3712 memory can only be allocated by currently alive threads, hence
3713 they must have an entry in map_threads. */
3714 md = new_MallocMeta();
3715 md->payload = p;
3716 md->szB = szB;
3717 md->where = VG_(record_ExeContext)( tid, 0 );
3718 md->thr = map_threads_lookup( tid );
3719
3720 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
3721
3722 /* Tell the lower level memory wranglers. */
3723 evh__new_mem_heap( p, szB, is_zeroed );
3724
3725 return (void*)p;
3726}
3727
3728/* Re the checks for less-than-zero (also in hg_cli__realloc below):
3729 Cast to a signed type to catch any unexpectedly negative args.
3730 We're assuming here that the size asked for is not greater than
3731 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
3732 platforms). */
3733static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
3734 if (((SSizeT)n) < 0) return NULL;
3735 return handle_alloc ( tid, n, VG_(clo_alignment),
3736 /*is_zeroed*/False );
3737}
3738static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
3739 if (((SSizeT)n) < 0) return NULL;
3740 return handle_alloc ( tid, n, VG_(clo_alignment),
3741 /*is_zeroed*/False );
3742}
3743static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
3744 if (((SSizeT)n) < 0) return NULL;
3745 return handle_alloc ( tid, n, VG_(clo_alignment),
3746 /*is_zeroed*/False );
3747}
3748static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
3749 if (((SSizeT)n) < 0) return NULL;
3750 return handle_alloc ( tid, n, align,
3751 /*is_zeroed*/False );
3752}
3753static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
3754 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
3755 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
3756 /*is_zeroed*/True );
3757}
3758
3759
3760/* Free a client block, including getting rid of the relevant
3761 metadata. */
3762
3763static void handle_free ( ThreadId tid, void* p )
3764{
3765 MallocMeta *md, *old_md;
3766 SizeT szB;
3767
3768 /* First see if we can find the metadata for 'p'. */
3769 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3770 if (!md)
3771 return; /* apparently freeing a bogus address. Oh well. */
3772
3773 tl_assert(md->payload == (Addr)p);
3774 szB = md->szB;
3775
3776 /* Nuke the metadata block */
3777 old_md = (MallocMeta*)
3778 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
3779 tl_assert(old_md); /* it must be present - we just found it */
3780 tl_assert(old_md == md);
3781 tl_assert(old_md->payload == (Addr)p);
3782
3783 VG_(cli_free)((void*)old_md->payload);
3784 delete_MallocMeta(old_md);
3785
3786 /* Tell the lower level memory wranglers. */
3787 evh__die_mem_heap( (Addr)p, szB );
3788}
3789
3790static void hg_cli__free ( ThreadId tid, void* p ) {
3791 handle_free(tid, p);
3792}
3793static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
3794 handle_free(tid, p);
3795}
3796static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
3797 handle_free(tid, p);
3798}
3799
3800
3801static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
3802{
3803 MallocMeta *md, *md_new, *md_tmp;
3804 SizeT i;
3805
3806 Addr payload = (Addr)payloadV;
3807
3808 if (((SSizeT)new_size) < 0) return NULL;
3809
3810 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
3811 if (!md)
3812 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
3813
3814 tl_assert(md->payload == payload);
3815
3816 if (md->szB == new_size) {
3817 /* size unchanged */
3818 md->where = VG_(record_ExeContext)(tid, 0);
3819 return payloadV;
3820 }
3821
3822 if (md->szB > new_size) {
3823 /* new size is smaller */
3824 md->szB = new_size;
3825 md->where = VG_(record_ExeContext)(tid, 0);
3826 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
3827 return payloadV;
3828 }
3829
3830 /* else */ {
3831 /* new size is bigger */
3832 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
3833
3834 /* First half kept and copied, second half new */
3835 // FIXME: shouldn't we use a copier which implements the
3836 // memory state machine?
sewardj23f12002009-07-24 08:45:08 +00003837 evh__copy_mem( payload, p_new, md->szB );
sewardjb4112022007-11-09 22:49:28 +00003838 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
sewardjf98e1c02008-10-25 16:22:41 +00003839 /*inited*/False );
sewardjb4112022007-11-09 22:49:28 +00003840 /* FIXME: can anything funny happen here? specifically, if the
3841 old range contained a lock, then die_mem_heap will complain.
3842 Is that the correct behaviour? Not sure. */
3843 evh__die_mem_heap( payload, md->szB );
3844
3845 /* Copy from old to new */
3846 for (i = 0; i < md->szB; i++)
3847 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
3848
3849 /* Because the metadata hash table is index by payload address,
3850 we have to get rid of the old hash table entry and make a new
3851 one. We can't just modify the existing metadata in place,
3852 because then it would (almost certainly) be in the wrong hash
3853 chain. */
3854 md_new = new_MallocMeta();
3855 *md_new = *md;
3856
3857 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
3858 tl_assert(md_tmp);
3859 tl_assert(md_tmp == md);
3860
3861 VG_(cli_free)((void*)md->payload);
3862 delete_MallocMeta(md);
3863
3864 /* Update fields */
3865 md_new->where = VG_(record_ExeContext)( tid, 0 );
3866 md_new->szB = new_size;
3867 md_new->payload = p_new;
3868 md_new->thr = map_threads_lookup( tid );
3869
3870 /* and add */
3871 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
3872
3873 return (void*)p_new;
3874 }
3875}
3876
njn8b140de2009-02-17 04:31:18 +00003877static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
3878{
3879 MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3880
3881 // There may be slop, but pretend there isn't because only the asked-for
3882 // area will have been shadowed properly.
3883 return ( md ? md->szB : 0 );
3884}
3885
sewardjb4112022007-11-09 22:49:28 +00003886
sewardj095d61e2010-03-11 13:43:18 +00003887/* For error creation: map 'data_addr' to a malloc'd chunk, if any.
sewardjc8028ad2010-05-05 09:34:42 +00003888 Slow linear search. With a bit of hash table help if 'data_addr'
3889 is either the start of a block or up to 15 word-sized steps along
3890 from the start of a block. */
sewardj095d61e2010-03-11 13:43:18 +00003891
3892static inline Bool addr_is_in_MM_Chunk( MallocMeta* mm, Addr a )
3893{
sewardjc8028ad2010-05-05 09:34:42 +00003894 /* Accept 'a' as within 'mm' if 'mm's size is zero and 'a' points
3895 right at it. */
3896 if (UNLIKELY(mm->szB == 0 && a == mm->payload))
3897 return True;
3898 /* else normal interval rules apply */
3899 if (LIKELY(a < mm->payload)) return False;
3900 if (LIKELY(a >= mm->payload + mm->szB)) return False;
3901 return True;
sewardj095d61e2010-03-11 13:43:18 +00003902}
3903
sewardjc8028ad2010-05-05 09:34:42 +00003904Bool HG_(mm_find_containing_block)( /*OUT*/ExeContext** where,
sewardj095d61e2010-03-11 13:43:18 +00003905 /*OUT*/Addr* payload,
3906 /*OUT*/SizeT* szB,
3907 Addr data_addr )
3908{
3909 MallocMeta* mm;
sewardjc8028ad2010-05-05 09:34:42 +00003910 Int i;
3911 const Int n_fast_check_words = 16;
3912
3913 /* First, do a few fast searches on the basis that data_addr might
3914 be exactly the start of a block or up to 15 words inside. This
3915 can happen commonly via the creq
3916 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK. */
3917 for (i = 0; i < n_fast_check_words; i++) {
3918 mm = VG_(HT_lookup)( hg_mallocmeta_table,
3919 data_addr - (UWord)(UInt)i * sizeof(UWord) );
3920 if (UNLIKELY(mm && addr_is_in_MM_Chunk(mm, data_addr)))
3921 goto found;
3922 }
3923
sewardj095d61e2010-03-11 13:43:18 +00003924 /* Well, this totally sucks. But without using an interval tree or
sewardjc8028ad2010-05-05 09:34:42 +00003925 some such, it's hard to see how to do better. We have to check
3926 every block in the entire table. */
sewardj095d61e2010-03-11 13:43:18 +00003927 VG_(HT_ResetIter)(hg_mallocmeta_table);
3928 while ( (mm = VG_(HT_Next)(hg_mallocmeta_table)) ) {
sewardjc8028ad2010-05-05 09:34:42 +00003929 if (UNLIKELY(addr_is_in_MM_Chunk(mm, data_addr)))
3930 goto found;
sewardj095d61e2010-03-11 13:43:18 +00003931 }
sewardjc8028ad2010-05-05 09:34:42 +00003932
3933 /* Not found. Bah. */
3934 return False;
3935 /*NOTREACHED*/
3936
3937 found:
3938 tl_assert(mm);
3939 tl_assert(addr_is_in_MM_Chunk(mm, data_addr));
3940 if (where) *where = mm->where;
3941 if (payload) *payload = mm->payload;
3942 if (szB) *szB = mm->szB;
3943 return True;
sewardj095d61e2010-03-11 13:43:18 +00003944}
3945
3946
sewardjb4112022007-11-09 22:49:28 +00003947/*--------------------------------------------------------------*/
3948/*--- Instrumentation ---*/
3949/*--------------------------------------------------------------*/
3950
sewardjffce8152011-06-24 10:09:41 +00003951#define binop(_op, _arg1, _arg2) IRExpr_Binop((_op),(_arg1),(_arg2))
3952#define mkexpr(_tmp) IRExpr_RdTmp((_tmp))
3953#define mkU32(_n) IRExpr_Const(IRConst_U32(_n))
3954#define mkU64(_n) IRExpr_Const(IRConst_U64(_n))
3955#define assign(_t, _e) IRStmt_WrTmp((_t), (_e))
3956
3957static void instrument_mem_access ( IRSB* sbOut,
sewardjb4112022007-11-09 22:49:28 +00003958 IRExpr* addr,
3959 Int szB,
3960 Bool isStore,
sewardjffce8152011-06-24 10:09:41 +00003961 Int hWordTy_szB,
3962 Int goff_sp )
sewardjb4112022007-11-09 22:49:28 +00003963{
3964 IRType tyAddr = Ity_INVALID;
3965 HChar* hName = NULL;
3966 void* hAddr = NULL;
3967 Int regparms = 0;
3968 IRExpr** argv = NULL;
3969 IRDirty* di = NULL;
3970
sewardjffce8152011-06-24 10:09:41 +00003971 // THRESH is the size of the window above SP (well,
3972 // mostly above) that we assume implies a stack reference.
3973 const Int THRESH = 4096 * 4; // somewhat arbitrary
3974 const Int rz_szB = VG_STACK_REDZONE_SZB;
3975
sewardjb4112022007-11-09 22:49:28 +00003976 tl_assert(isIRAtom(addr));
3977 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
3978
sewardjffce8152011-06-24 10:09:41 +00003979 tyAddr = typeOfIRExpr( sbOut->tyenv, addr );
sewardjb4112022007-11-09 22:49:28 +00003980 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
3981
3982 /* So the effective address is in 'addr' now. */
3983 regparms = 1; // unless stated otherwise
3984 if (isStore) {
3985 switch (szB) {
3986 case 1:
sewardj23f12002009-07-24 08:45:08 +00003987 hName = "evh__mem_help_cwrite_1";
3988 hAddr = &evh__mem_help_cwrite_1;
sewardjb4112022007-11-09 22:49:28 +00003989 argv = mkIRExprVec_1( addr );
3990 break;
3991 case 2:
sewardj23f12002009-07-24 08:45:08 +00003992 hName = "evh__mem_help_cwrite_2";
3993 hAddr = &evh__mem_help_cwrite_2;
sewardjb4112022007-11-09 22:49:28 +00003994 argv = mkIRExprVec_1( addr );
3995 break;
3996 case 4:
sewardj23f12002009-07-24 08:45:08 +00003997 hName = "evh__mem_help_cwrite_4";
3998 hAddr = &evh__mem_help_cwrite_4;
sewardjb4112022007-11-09 22:49:28 +00003999 argv = mkIRExprVec_1( addr );
4000 break;
4001 case 8:
sewardj23f12002009-07-24 08:45:08 +00004002 hName = "evh__mem_help_cwrite_8";
4003 hAddr = &evh__mem_help_cwrite_8;
sewardjb4112022007-11-09 22:49:28 +00004004 argv = mkIRExprVec_1( addr );
4005 break;
4006 default:
4007 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4008 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004009 hName = "evh__mem_help_cwrite_N";
4010 hAddr = &evh__mem_help_cwrite_N;
sewardjb4112022007-11-09 22:49:28 +00004011 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4012 break;
4013 }
4014 } else {
4015 switch (szB) {
4016 case 1:
sewardj23f12002009-07-24 08:45:08 +00004017 hName = "evh__mem_help_cread_1";
4018 hAddr = &evh__mem_help_cread_1;
sewardjb4112022007-11-09 22:49:28 +00004019 argv = mkIRExprVec_1( addr );
4020 break;
4021 case 2:
sewardj23f12002009-07-24 08:45:08 +00004022 hName = "evh__mem_help_cread_2";
4023 hAddr = &evh__mem_help_cread_2;
sewardjb4112022007-11-09 22:49:28 +00004024 argv = mkIRExprVec_1( addr );
4025 break;
4026 case 4:
sewardj23f12002009-07-24 08:45:08 +00004027 hName = "evh__mem_help_cread_4";
4028 hAddr = &evh__mem_help_cread_4;
sewardjb4112022007-11-09 22:49:28 +00004029 argv = mkIRExprVec_1( addr );
4030 break;
4031 case 8:
sewardj23f12002009-07-24 08:45:08 +00004032 hName = "evh__mem_help_cread_8";
4033 hAddr = &evh__mem_help_cread_8;
sewardjb4112022007-11-09 22:49:28 +00004034 argv = mkIRExprVec_1( addr );
4035 break;
4036 default:
4037 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4038 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004039 hName = "evh__mem_help_cread_N";
4040 hAddr = &evh__mem_help_cread_N;
sewardjb4112022007-11-09 22:49:28 +00004041 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4042 break;
4043 }
4044 }
4045
sewardjffce8152011-06-24 10:09:41 +00004046 /* Create the helper. */
sewardjb4112022007-11-09 22:49:28 +00004047 tl_assert(hName);
4048 tl_assert(hAddr);
4049 tl_assert(argv);
4050 di = unsafeIRDirty_0_N( regparms,
4051 hName, VG_(fnptr_to_fnentry)( hAddr ),
4052 argv );
sewardjffce8152011-06-24 10:09:41 +00004053
4054 if (! HG_(clo_check_stack_refs)) {
4055 /* We're ignoring memory references which are (obviously) to the
4056 stack. In fact just skip stack refs that are within 4 pages
4057 of SP (SP - the redzone, really), as that's simple, easy, and
4058 filters out most stack references. */
4059 /* Generate the guard condition: "(addr - (SP - RZ)) >u N", for
4060 some arbitrary N. If that is true then addr is outside the
4061 range (SP - RZ .. SP + N - RZ). If N is smallish (a few
4062 pages) then we can say addr is within a few pages of SP and
4063 so can't possibly be a heap access, and so can be skipped.
4064
4065 Note that the condition simplifies to
4066 (addr - SP + RZ) >u N
4067 which generates better code in x86/amd64 backends, but it does
4068 not unfortunately simplify to
4069 (addr - SP) >u (N - RZ)
4070 (would be beneficial because N - RZ is a constant) because
4071 wraparound arithmetic messes up the comparison. eg.
4072 20 >u 10 == True,
4073 but (20 - 15) >u (10 - 15) == 5 >u (MAXINT-5) == False.
4074 */
4075 IRTemp sp = newIRTemp(sbOut->tyenv, tyAddr);
4076 addStmtToIRSB( sbOut, assign(sp, IRExpr_Get(goff_sp, tyAddr)));
4077
4078 /* "addr - SP" */
4079 IRTemp addr_minus_sp = newIRTemp(sbOut->tyenv, tyAddr);
4080 addStmtToIRSB(
4081 sbOut,
4082 assign(addr_minus_sp,
4083 tyAddr == Ity_I32
4084 ? binop(Iop_Sub32, addr, mkexpr(sp))
4085 : binop(Iop_Sub64, addr, mkexpr(sp)))
4086 );
4087
4088 /* "addr - SP + RZ" */
4089 IRTemp diff = newIRTemp(sbOut->tyenv, tyAddr);
4090 addStmtToIRSB(
4091 sbOut,
4092 assign(diff,
4093 tyAddr == Ity_I32
4094 ? binop(Iop_Add32, mkexpr(addr_minus_sp), mkU32(rz_szB))
4095 : binop(Iop_Add64, mkexpr(addr_minus_sp), mkU64(rz_szB)))
4096 );
4097
4098 IRTemp guard = newIRTemp(sbOut->tyenv, Ity_I1);
4099 addStmtToIRSB(
4100 sbOut,
4101 assign(guard,
4102 tyAddr == Ity_I32
4103 ? binop(Iop_CmpLT32U, mkU32(THRESH), mkexpr(diff))
4104 : binop(Iop_CmpLT64U, mkU64(THRESH), mkexpr(diff)))
4105 );
4106 di->guard = mkexpr(guard);
4107 }
4108
4109 /* Add the helper. */
4110 addStmtToIRSB( sbOut, IRStmt_Dirty(di) );
sewardjb4112022007-11-09 22:49:28 +00004111}
4112
4113
sewardja0eee322009-07-31 08:46:35 +00004114/* Figure out if GA is a guest code address in the dynamic linker, and
4115 if so return True. Otherwise (and in case of any doubt) return
4116 False. (sidedly safe w/ False as the safe value) */
4117static Bool is_in_dynamic_linker_shared_object( Addr64 ga )
4118{
4119 DebugInfo* dinfo;
4120 const UChar* soname;
4121 if (0) return False;
4122
sewardje3f1e592009-07-31 09:41:29 +00004123 dinfo = VG_(find_DebugInfo)( (Addr)ga );
sewardja0eee322009-07-31 08:46:35 +00004124 if (!dinfo) return False;
4125
sewardje3f1e592009-07-31 09:41:29 +00004126 soname = VG_(DebugInfo_get_soname)(dinfo);
sewardja0eee322009-07-31 08:46:35 +00004127 tl_assert(soname);
4128 if (0) VG_(printf)("%s\n", soname);
4129
4130# if defined(VGO_linux)
sewardj651cfa42010-01-11 13:02:19 +00004131 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_3)) return True;
sewardja0eee322009-07-31 08:46:35 +00004132 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_2)) return True;
4133 if (VG_STREQ(soname, VG_U_LD_LINUX_X86_64_SO_2)) return True;
4134 if (VG_STREQ(soname, VG_U_LD64_SO_1)) return True;
4135 if (VG_STREQ(soname, VG_U_LD_SO_1)) return True;
4136# elif defined(VGO_darwin)
4137 if (VG_STREQ(soname, VG_U_DYLD)) return True;
4138# else
4139# error "Unsupported OS"
4140# endif
4141 return False;
4142}
4143
sewardjb4112022007-11-09 22:49:28 +00004144static
4145IRSB* hg_instrument ( VgCallbackClosure* closure,
4146 IRSB* bbIn,
4147 VexGuestLayout* layout,
4148 VexGuestExtents* vge,
4149 IRType gWordTy, IRType hWordTy )
4150{
sewardj1c0ce7a2009-07-01 08:10:49 +00004151 Int i;
4152 IRSB* bbOut;
4153 Addr64 cia; /* address of current insn */
4154 IRStmt* st;
sewardja0eee322009-07-31 08:46:35 +00004155 Bool inLDSO = False;
4156 Addr64 inLDSOmask4K = 1; /* mismatches on first check */
sewardjb4112022007-11-09 22:49:28 +00004157
sewardjffce8152011-06-24 10:09:41 +00004158 const Int goff_sp = layout->offset_SP;
4159
sewardjb4112022007-11-09 22:49:28 +00004160 if (gWordTy != hWordTy) {
4161 /* We don't currently support this case. */
4162 VG_(tool_panic)("host/guest word size mismatch");
4163 }
4164
sewardja0eee322009-07-31 08:46:35 +00004165 if (VKI_PAGE_SIZE < 4096 || VG_(log2)(VKI_PAGE_SIZE) == -1) {
4166 VG_(tool_panic)("implausible or too-small VKI_PAGE_SIZE");
4167 }
4168
sewardjb4112022007-11-09 22:49:28 +00004169 /* Set up BB */
4170 bbOut = emptyIRSB();
4171 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
4172 bbOut->next = deepCopyIRExpr(bbIn->next);
4173 bbOut->jumpkind = bbIn->jumpkind;
4174
4175 // Copy verbatim any IR preamble preceding the first IMark
4176 i = 0;
4177 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
4178 addStmtToIRSB( bbOut, bbIn->stmts[i] );
4179 i++;
4180 }
4181
sewardj1c0ce7a2009-07-01 08:10:49 +00004182 // Get the first statement, and initial cia from it
4183 tl_assert(bbIn->stmts_used > 0);
4184 tl_assert(i < bbIn->stmts_used);
4185 st = bbIn->stmts[i];
4186 tl_assert(Ist_IMark == st->tag);
4187 cia = st->Ist.IMark.addr;
4188 st = NULL;
4189
sewardjb4112022007-11-09 22:49:28 +00004190 for (/*use current i*/; i < bbIn->stmts_used; i++) {
sewardj1c0ce7a2009-07-01 08:10:49 +00004191 st = bbIn->stmts[i];
sewardjb4112022007-11-09 22:49:28 +00004192 tl_assert(st);
4193 tl_assert(isFlatIRStmt(st));
4194 switch (st->tag) {
4195 case Ist_NoOp:
4196 case Ist_AbiHint:
4197 case Ist_Put:
4198 case Ist_PutI:
sewardjb4112022007-11-09 22:49:28 +00004199 case Ist_Exit:
4200 /* None of these can contain any memory references. */
4201 break;
4202
sewardj1c0ce7a2009-07-01 08:10:49 +00004203 case Ist_IMark:
4204 /* no mem refs, but note the insn address. */
4205 cia = st->Ist.IMark.addr;
sewardja0eee322009-07-31 08:46:35 +00004206 /* Don't instrument the dynamic linker. It generates a
4207 lot of races which we just expensively suppress, so
4208 it's pointless.
4209
4210 Avoid flooding is_in_dynamic_linker_shared_object with
4211 requests by only checking at transitions between 4K
4212 pages. */
4213 if ((cia & ~(Addr64)0xFFF) != inLDSOmask4K) {
4214 if (0) VG_(printf)("NEW %#lx\n", (Addr)cia);
4215 inLDSOmask4K = cia & ~(Addr64)0xFFF;
4216 inLDSO = is_in_dynamic_linker_shared_object(cia);
4217 } else {
4218 if (0) VG_(printf)("old %#lx\n", (Addr)cia);
4219 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004220 break;
4221
sewardjb4112022007-11-09 22:49:28 +00004222 case Ist_MBE:
sewardjf98e1c02008-10-25 16:22:41 +00004223 switch (st->Ist.MBE.event) {
4224 case Imbe_Fence:
4225 break; /* not interesting */
sewardjf98e1c02008-10-25 16:22:41 +00004226 default:
4227 goto unhandled;
4228 }
sewardjb4112022007-11-09 22:49:28 +00004229 break;
4230
sewardj1c0ce7a2009-07-01 08:10:49 +00004231 case Ist_CAS: {
4232 /* Atomic read-modify-write cycle. Just pretend it's a
4233 read. */
4234 IRCAS* cas = st->Ist.CAS.details;
sewardj23f12002009-07-24 08:45:08 +00004235 Bool isDCAS = cas->oldHi != IRTemp_INVALID;
4236 if (isDCAS) {
4237 tl_assert(cas->expdHi);
4238 tl_assert(cas->dataHi);
4239 } else {
4240 tl_assert(!cas->expdHi);
4241 tl_assert(!cas->dataHi);
4242 }
4243 /* Just be boring about it. */
sewardja0eee322009-07-31 08:46:35 +00004244 if (!inLDSO) {
4245 instrument_mem_access(
4246 bbOut,
4247 cas->addr,
4248 (isDCAS ? 2 : 1)
4249 * sizeofIRType(typeOfIRExpr(bbIn->tyenv, cas->dataLo)),
4250 False/*!isStore*/,
sewardjffce8152011-06-24 10:09:41 +00004251 sizeofIRType(hWordTy), goff_sp
sewardja0eee322009-07-31 08:46:35 +00004252 );
4253 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004254 break;
4255 }
4256
sewardjdb5907d2009-11-26 17:20:21 +00004257 case Ist_LLSC: {
4258 /* We pretend store-conditionals don't exist, viz, ignore
4259 them. Whereas load-linked's are treated the same as
4260 normal loads. */
4261 IRType dataTy;
4262 if (st->Ist.LLSC.storedata == NULL) {
4263 /* LL */
4264 dataTy = typeOfIRTemp(bbIn->tyenv, st->Ist.LLSC.result);
sewardja0eee322009-07-31 08:46:35 +00004265 if (!inLDSO) {
sewardjdb5907d2009-11-26 17:20:21 +00004266 instrument_mem_access(
4267 bbOut,
4268 st->Ist.LLSC.addr,
4269 sizeofIRType(dataTy),
4270 False/*!isStore*/,
sewardjffce8152011-06-24 10:09:41 +00004271 sizeofIRType(hWordTy), goff_sp
sewardja0eee322009-07-31 08:46:35 +00004272 );
4273 }
sewardjdb5907d2009-11-26 17:20:21 +00004274 } else {
4275 /* SC */
4276 /*ignore */
4277 }
4278 break;
4279 }
4280
4281 case Ist_Store:
4282 /* It seems we pretend that store-conditionals don't
4283 exist, viz, just ignore them ... */
4284 if (!inLDSO) {
4285 instrument_mem_access(
4286 bbOut,
4287 st->Ist.Store.addr,
4288 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
4289 True/*isStore*/,
sewardjffce8152011-06-24 10:09:41 +00004290 sizeofIRType(hWordTy), goff_sp
sewardjdb5907d2009-11-26 17:20:21 +00004291 );
sewardj1c0ce7a2009-07-01 08:10:49 +00004292 }
njnb83caf22009-05-25 01:47:56 +00004293 break;
sewardjb4112022007-11-09 22:49:28 +00004294
4295 case Ist_WrTmp: {
sewardj1c0ce7a2009-07-01 08:10:49 +00004296 /* ... whereas here we don't care whether a load is a
4297 vanilla one or a load-linked. */
sewardjb4112022007-11-09 22:49:28 +00004298 IRExpr* data = st->Ist.WrTmp.data;
4299 if (data->tag == Iex_Load) {
sewardja0eee322009-07-31 08:46:35 +00004300 if (!inLDSO) {
4301 instrument_mem_access(
4302 bbOut,
4303 data->Iex.Load.addr,
4304 sizeofIRType(data->Iex.Load.ty),
4305 False/*!isStore*/,
sewardjffce8152011-06-24 10:09:41 +00004306 sizeofIRType(hWordTy), goff_sp
sewardja0eee322009-07-31 08:46:35 +00004307 );
4308 }
sewardjb4112022007-11-09 22:49:28 +00004309 }
4310 break;
4311 }
4312
4313 case Ist_Dirty: {
4314 Int dataSize;
4315 IRDirty* d = st->Ist.Dirty.details;
4316 if (d->mFx != Ifx_None) {
4317 /* This dirty helper accesses memory. Collect the
4318 details. */
4319 tl_assert(d->mAddr != NULL);
4320 tl_assert(d->mSize != 0);
4321 dataSize = d->mSize;
4322 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004323 if (!inLDSO) {
4324 instrument_mem_access(
4325 bbOut, d->mAddr, dataSize, False/*!isStore*/,
sewardjffce8152011-06-24 10:09:41 +00004326 sizeofIRType(hWordTy), goff_sp
sewardja0eee322009-07-31 08:46:35 +00004327 );
4328 }
sewardjb4112022007-11-09 22:49:28 +00004329 }
4330 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004331 if (!inLDSO) {
4332 instrument_mem_access(
4333 bbOut, d->mAddr, dataSize, True/*isStore*/,
sewardjffce8152011-06-24 10:09:41 +00004334 sizeofIRType(hWordTy), goff_sp
sewardja0eee322009-07-31 08:46:35 +00004335 );
4336 }
sewardjb4112022007-11-09 22:49:28 +00004337 }
4338 } else {
4339 tl_assert(d->mAddr == NULL);
4340 tl_assert(d->mSize == 0);
4341 }
4342 break;
4343 }
4344
4345 default:
sewardjf98e1c02008-10-25 16:22:41 +00004346 unhandled:
4347 ppIRStmt(st);
sewardjb4112022007-11-09 22:49:28 +00004348 tl_assert(0);
4349
4350 } /* switch (st->tag) */
4351
4352 addStmtToIRSB( bbOut, st );
4353 } /* iterate over bbIn->stmts */
4354
4355 return bbOut;
4356}
4357
sewardjffce8152011-06-24 10:09:41 +00004358#undef binop
4359#undef mkexpr
4360#undef mkU32
4361#undef mkU64
4362#undef assign
4363
sewardjb4112022007-11-09 22:49:28 +00004364
4365/*----------------------------------------------------------------*/
4366/*--- Client requests ---*/
4367/*----------------------------------------------------------------*/
4368
4369/* Sheesh. Yet another goddam finite map. */
4370static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
4371
4372static void map_pthread_t_to_Thread_INIT ( void ) {
4373 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
sewardjf98e1c02008-10-25 16:22:41 +00004374 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
4375 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00004376 tl_assert(map_pthread_t_to_Thread != NULL);
4377 }
4378}
4379
4380
4381static
4382Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
4383{
4384 if (!VG_IS_TOOL_USERREQ('H','G',args[0]))
4385 return False;
4386
4387 /* Anything that gets past the above check is one of ours, so we
4388 should be able to handle it. */
4389
4390 /* default, meaningless return value, unless otherwise set */
4391 *ret = 0;
4392
4393 switch (args[0]) {
4394
4395 /* --- --- User-visible client requests --- --- */
4396
4397 case VG_USERREQ__HG_CLEAN_MEMORY:
barta0b6b2c2008-07-07 06:49:24 +00004398 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%ld)\n",
sewardjb4112022007-11-09 22:49:28 +00004399 args[1], args[2]);
4400 /* Call die_mem to (expensively) tidy up properly, if there
sewardjf98e1c02008-10-25 16:22:41 +00004401 are any held locks etc in the area. Calling evh__die_mem
4402 and then evh__new_mem is a bit inefficient; probably just
4403 the latter would do. */
sewardjb4112022007-11-09 22:49:28 +00004404 if (args[2] > 0) { /* length */
4405 evh__die_mem(args[1], args[2]);
4406 /* and then set it to New */
4407 evh__new_mem(args[1], args[2]);
4408 }
4409 break;
4410
sewardjc8028ad2010-05-05 09:34:42 +00004411 case _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK: {
4412 Addr payload = 0;
4413 SizeT pszB = 0;
4414 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK(%#lx)\n",
4415 args[1]);
4416 if (HG_(mm_find_containing_block)(NULL, &payload, &pszB, args[1])) {
4417 if (pszB > 0) {
4418 evh__die_mem(payload, pszB);
4419 evh__new_mem(payload, pszB);
4420 }
4421 *ret = pszB;
4422 } else {
4423 *ret = (UWord)-1;
4424 }
4425 break;
4426 }
4427
sewardj406bac82010-03-03 23:03:40 +00004428 case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED:
4429 if (0) VG_(printf)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%ld)\n",
4430 args[1], args[2]);
4431 if (args[2] > 0) { /* length */
4432 evh__untrack_mem(args[1], args[2]);
4433 }
4434 break;
4435
4436 case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED:
4437 if (0) VG_(printf)("HG_ARANGE_MAKE_TRACKED(%#lx,%ld)\n",
4438 args[1], args[2]);
4439 if (args[2] > 0) { /* length */
4440 evh__new_mem(args[1], args[2]);
4441 }
4442 break;
4443
sewardjb4112022007-11-09 22:49:28 +00004444 /* --- --- Client requests for Helgrind's use only --- --- */
4445
4446 /* Some thread is telling us its pthread_t value. Record the
4447 binding between that and the associated Thread*, so we can
4448 later find the Thread* again when notified of a join by the
4449 thread. */
4450 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
4451 Thread* my_thr = NULL;
4452 if (0)
4453 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
4454 (void*)args[1]);
4455 map_pthread_t_to_Thread_INIT();
4456 my_thr = map_threads_maybe_lookup( tid );
4457 /* This assertion should hold because the map_threads (tid to
4458 Thread*) binding should have been made at the point of
4459 low-level creation of this thread, which should have
4460 happened prior to us getting this client request for it.
4461 That's because this client request is sent from
4462 client-world from the 'thread_wrapper' function, which
4463 only runs once the thread has been low-level created. */
4464 tl_assert(my_thr != NULL);
4465 /* So now we know that (pthread_t)args[1] is associated with
4466 (Thread*)my_thr. Note that down. */
4467 if (0)
4468 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
4469 (void*)args[1], (void*)my_thr );
sewardj896f6f92008-08-19 08:38:52 +00004470 VG_(addToFM)( map_pthread_t_to_Thread, (Word)args[1], (Word)my_thr );
sewardjb4112022007-11-09 22:49:28 +00004471 break;
4472 }
4473
4474 case _VG_USERREQ__HG_PTH_API_ERROR: {
4475 Thread* my_thr = NULL;
4476 map_pthread_t_to_Thread_INIT();
4477 my_thr = map_threads_maybe_lookup( tid );
4478 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
sewardjf98e1c02008-10-25 16:22:41 +00004479 HG_(record_error_PthAPIerror)(
4480 my_thr, (HChar*)args[1], (Word)args[2], (HChar*)args[3] );
sewardjb4112022007-11-09 22:49:28 +00004481 break;
4482 }
4483
4484 /* This thread (tid) has completed a join with the quitting
4485 thread whose pthread_t is in args[1]. */
4486 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
4487 Thread* thr_q = NULL; /* quitter Thread* */
4488 Bool found = False;
4489 if (0)
4490 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
4491 (void*)args[1]);
4492 map_pthread_t_to_Thread_INIT();
sewardj896f6f92008-08-19 08:38:52 +00004493 found = VG_(lookupFM)( map_pthread_t_to_Thread,
sewardjb5f29642007-11-16 12:02:43 +00004494 NULL, (Word*)&thr_q, (Word)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004495 /* Can this fail? It would mean that our pthread_join
4496 wrapper observed a successful join on args[1] yet that
4497 thread never existed (or at least, it never lodged an
4498 entry in the mapping (via SET_MY_PTHREAD_T)). Which
4499 sounds like a bug in the threads library. */
4500 // FIXME: get rid of this assertion; handle properly
4501 tl_assert(found);
4502 if (found) {
4503 if (0)
4504 VG_(printf)(".................... quitter Thread* = %p\n",
4505 thr_q);
4506 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
4507 }
4508 break;
4509 }
4510
4511 /* EXPOSITION only: by intercepting lock init events we can show
4512 the user where the lock was initialised, rather than only
4513 being able to show where it was first locked. Intercepting
4514 lock initialisations is not necessary for the basic operation
4515 of the race checker. */
4516 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
4517 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
4518 break;
4519
4520 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
4521 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1] );
4522 break;
4523
4524 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
4525 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
4526 break;
4527
4528 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
4529 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
4530 break;
4531
4532 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*, Word
4533 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
4534 break;
4535
4536 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*
4537 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
4538 break;
4539
4540 /* This thread is about to do pthread_cond_signal on the
4541 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
4542 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
4543 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
4544 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
4545 break;
4546
4547 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
4548 Returns a flag indicating whether or not the mutex is believed to be
4549 valid for this operation. */
4550 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
4551 Bool mutex_is_valid
4552 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
4553 (void*)args[2] );
4554 *ret = mutex_is_valid ? 1 : 0;
4555 break;
4556 }
4557
sewardjf98e1c02008-10-25 16:22:41 +00004558 /* cond=arg[1] */
4559 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
4560 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1] );
4561 break;
4562
sewardjb4112022007-11-09 22:49:28 +00004563 /* Thread successfully completed pthread_cond_wait, cond=arg[1],
4564 mutex=arg[2] */
4565 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
4566 evh__HG_PTHREAD_COND_WAIT_POST( tid,
4567 (void*)args[1], (void*)args[2] );
4568 break;
4569
4570 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
4571 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
4572 break;
4573
4574 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
4575 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
4576 break;
4577
sewardj789c3c52008-02-25 12:10:07 +00004578 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
sewardjb4112022007-11-09 22:49:28 +00004579 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
sewardj789c3c52008-02-25 12:10:07 +00004580 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
4581 args[2], args[3] );
sewardjb4112022007-11-09 22:49:28 +00004582 break;
4583
4584 /* rwlock=arg[1], isW=arg[2] */
4585 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
4586 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
4587 break;
4588
4589 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
4590 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
4591 break;
4592
4593 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
4594 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
4595 break;
4596
sewardj11e352f2007-11-30 11:11:02 +00004597 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
4598 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
sewardjb4112022007-11-09 22:49:28 +00004599 break;
4600
sewardj11e352f2007-11-30 11:11:02 +00004601 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
4602 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004603 break;
4604
sewardj11e352f2007-11-30 11:11:02 +00004605 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
4606 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
4607 break;
4608
4609 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t* */
4610 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004611 break;
4612
sewardj9f569b72008-11-13 13:33:09 +00004613 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
sewardj406bac82010-03-03 23:03:40 +00004614 /* pth_bar_t*, ulong count, ulong resizable */
4615 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1],
4616 args[2], args[3] );
4617 break;
4618
4619 case _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE:
4620 /* pth_bar_t*, ulong newcount */
4621 evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( tid, (void*)args[1],
4622 args[2] );
sewardj9f569b72008-11-13 13:33:09 +00004623 break;
4624
4625 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
4626 /* pth_bar_t* */
4627 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
4628 break;
4629
4630 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
4631 /* pth_bar_t* */
4632 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
4633 break;
sewardjb4112022007-11-09 22:49:28 +00004634
sewardj5a644da2009-08-11 10:35:58 +00004635 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE:
4636 /* pth_spinlock_t* */
4637 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid, (void*)args[1] );
4638 break;
4639
4640 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST:
4641 /* pth_spinlock_t* */
4642 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid, (void*)args[1] );
4643 break;
4644
4645 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE:
4646 /* pth_spinlock_t*, Word */
4647 evh__HG_PTHREAD_SPIN_LOCK_PRE( tid, (void*)args[1], args[2] );
4648 break;
4649
4650 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST:
4651 /* pth_spinlock_t* */
4652 evh__HG_PTHREAD_SPIN_LOCK_POST( tid, (void*)args[1] );
4653 break;
4654
4655 case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE:
4656 /* pth_spinlock_t* */
4657 evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid, (void*)args[1] );
4658 break;
4659
sewardjed2e72e2009-08-14 11:08:24 +00004660 case _VG_USERREQ__HG_CLIENTREQ_UNIMP: {
4661 /* char* who */
4662 HChar* who = (HChar*)args[1];
4663 HChar buf[50 + 50];
4664 Thread* thr = map_threads_maybe_lookup( tid );
4665 tl_assert( thr ); /* I must be mapped */
4666 tl_assert( who );
4667 tl_assert( VG_(strlen)(who) <= 50 );
4668 VG_(sprintf)(buf, "Unimplemented client request macro \"%s\"", who );
4669 /* record_error_Misc strdup's buf, so this is safe: */
4670 HG_(record_error_Misc)( thr, buf );
4671 break;
4672 }
4673
4674 case _VG_USERREQ__HG_USERSO_SEND_PRE:
4675 /* UWord arbitrary-SO-tag */
4676 evh__HG_USERSO_SEND_PRE( tid, args[1] );
4677 break;
4678
4679 case _VG_USERREQ__HG_USERSO_RECV_POST:
4680 /* UWord arbitrary-SO-tag */
4681 evh__HG_USERSO_RECV_POST( tid, args[1] );
4682 break;
4683
sewardj6015d0e2011-03-11 19:10:48 +00004684 case _VG_USERREQ__HG_USERSO_FORGET_ALL:
4685 /* UWord arbitrary-SO-tag */
4686 evh__HG_USERSO_FORGET_ALL( tid, args[1] );
4687 break;
4688
sewardjb4112022007-11-09 22:49:28 +00004689 default:
4690 /* Unhandled Helgrind client request! */
sewardjf98e1c02008-10-25 16:22:41 +00004691 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
4692 args[0]);
sewardjb4112022007-11-09 22:49:28 +00004693 }
4694
4695 return True;
4696}
4697
4698
4699/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +00004700/*--- Setup ---*/
4701/*----------------------------------------------------------------*/
4702
4703static Bool hg_process_cmd_line_option ( Char* arg )
4704{
njn83df0b62009-02-25 01:01:05 +00004705 Char* tmp_str;
sewardjb4112022007-11-09 22:49:28 +00004706
njn83df0b62009-02-25 01:01:05 +00004707 if VG_BOOL_CLO(arg, "--track-lockorders",
4708 HG_(clo_track_lockorders)) {}
4709 else if VG_BOOL_CLO(arg, "--cmp-race-err-addrs",
4710 HG_(clo_cmp_race_err_addrs)) {}
sewardj23f12002009-07-24 08:45:08 +00004711
4712 else if VG_XACT_CLO(arg, "--history-level=none",
4713 HG_(clo_history_level), 0);
sewardjf3861392009-08-02 10:16:03 +00004714 else if VG_XACT_CLO(arg, "--history-level=approx",
sewardj23f12002009-07-24 08:45:08 +00004715 HG_(clo_history_level), 1);
4716 else if VG_XACT_CLO(arg, "--history-level=full",
4717 HG_(clo_history_level), 2);
sewardj849b0ed2008-12-21 10:43:10 +00004718
sewardjf585e482009-08-16 22:52:29 +00004719 /* If you change the 10k/30mill limits, remember to also change
sewardj849b0ed2008-12-21 10:43:10 +00004720 them in assertions at the top of event_map_maybe_GC. */
njn83df0b62009-02-25 01:01:05 +00004721 else if VG_BINT_CLO(arg, "--conflict-cache-size",
sewardjf585e482009-08-16 22:52:29 +00004722 HG_(clo_conflict_cache_size), 10*1000, 30*1000*1000) {}
sewardjb4112022007-11-09 22:49:28 +00004723
sewardj11e352f2007-11-30 11:11:02 +00004724 /* "stuvwx" --> stuvwx (binary) */
njn83df0b62009-02-25 01:01:05 +00004725 else if VG_STR_CLO(arg, "--hg-sanity-flags", tmp_str) {
sewardjb4112022007-11-09 22:49:28 +00004726 Int j;
sewardjb4112022007-11-09 22:49:28 +00004727
njn83df0b62009-02-25 01:01:05 +00004728 if (6 != VG_(strlen)(tmp_str)) {
sewardjb4112022007-11-09 22:49:28 +00004729 VG_(message)(Vg_UserMsg,
sewardj24118492009-07-15 14:50:02 +00004730 "--hg-sanity-flags argument must have 6 digits\n");
sewardjb4112022007-11-09 22:49:28 +00004731 return False;
4732 }
sewardj11e352f2007-11-30 11:11:02 +00004733 for (j = 0; j < 6; j++) {
njn83df0b62009-02-25 01:01:05 +00004734 if ('0' == tmp_str[j]) { /* do nothing */ }
4735 else if ('1' == tmp_str[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
sewardjb4112022007-11-09 22:49:28 +00004736 else {
sewardj11e352f2007-11-30 11:11:02 +00004737 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
sewardj24118492009-07-15 14:50:02 +00004738 "only contain 0s and 1s\n");
sewardjb4112022007-11-09 22:49:28 +00004739 return False;
4740 }
4741 }
sewardjf98e1c02008-10-25 16:22:41 +00004742 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
sewardjb4112022007-11-09 22:49:28 +00004743 }
4744
sewardj622fe492011-03-11 21:06:59 +00004745 else if VG_BOOL_CLO(arg, "--free-is-write",
4746 HG_(clo_free_is_write)) {}
sewardjffce8152011-06-24 10:09:41 +00004747
4748 else if VG_XACT_CLO(arg, "--vts-pruning=never",
4749 HG_(clo_vts_pruning), 0);
4750 else if VG_XACT_CLO(arg, "--vts-pruning=auto",
4751 HG_(clo_vts_pruning), 1);
4752 else if VG_XACT_CLO(arg, "--vts-pruning=always",
4753 HG_(clo_vts_pruning), 2);
4754
4755 else if VG_BOOL_CLO(arg, "--check-stack-refs",
4756 HG_(clo_check_stack_refs)) {}
4757
sewardjb4112022007-11-09 22:49:28 +00004758 else
4759 return VG_(replacement_malloc_process_cmd_line_option)(arg);
4760
4761 return True;
4762}
4763
4764static void hg_print_usage ( void )
4765{
4766 VG_(printf)(
sewardj622fe492011-03-11 21:06:59 +00004767" --free-is-write=no|yes treat heap frees as writes [no]\n"
sewardj849b0ed2008-12-21 10:43:10 +00004768" --track-lockorders=no|yes show lock ordering errors? [yes]\n"
njnf6e8ca92009-08-07 02:18:00 +00004769" --history-level=none|approx|full [full]\n"
sewardjf3861392009-08-02 10:16:03 +00004770" full: show both stack traces for a data race (can be very slow)\n"
4771" approx: full trace for one thread, approx for the other (faster)\n"
4772" none: only show trace for one thread in a race (fastest)\n"
sewardj23f12002009-07-24 08:45:08 +00004773" --conflict-cache-size=N size of 'full' history cache [1000000]\n"
sewardjffce8152011-06-24 10:09:41 +00004774" --check-stack-refs=no|yes race-check reads and writes on the\n"
4775" main stack and thread stacks? [yes]\n"
sewardjb4112022007-11-09 22:49:28 +00004776 );
sewardjb4112022007-11-09 22:49:28 +00004777}
4778
4779static void hg_print_debug_usage ( void )
4780{
sewardjb4112022007-11-09 22:49:28 +00004781 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
4782 "race errors significant? [no]\n");
sewardj849b0ed2008-12-21 10:43:10 +00004783 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
sewardj11e352f2007-11-30 11:11:02 +00004784 " at events (X = 0|1) [000000]\n");
4785 VG_(printf)(" --hg-sanity-flags values:\n");
sewardj11e352f2007-11-30 11:11:02 +00004786 VG_(printf)(" 010000 after changes to "
sewardjb4112022007-11-09 22:49:28 +00004787 "lock-order-acquisition-graph\n");
sewardj11e352f2007-11-30 11:11:02 +00004788 VG_(printf)(" 001000 at memory accesses (NB: not currently used)\n");
4789 VG_(printf)(" 000100 at mem permission setting for "
sewardjb4112022007-11-09 22:49:28 +00004790 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
sewardj11e352f2007-11-30 11:11:02 +00004791 VG_(printf)(" 000010 at lock/unlock events\n");
4792 VG_(printf)(" 000001 at thread create/join events\n");
sewardjffce8152011-06-24 10:09:41 +00004793 VG_(printf)(
4794" --vts-pruning=never|auto|always [auto]\n"
4795" never: is never done (may cause big space leaks in Helgrind)\n"
4796" auto: done just often enough to keep space usage under control\n"
4797" always: done after every VTS GC (mostly just a big time waster)\n"
4798 );
sewardjb4112022007-11-09 22:49:28 +00004799}
4800
sewardjb4112022007-11-09 22:49:28 +00004801static void hg_fini ( Int exitcode )
4802{
sewardj2d9e8742009-08-07 15:46:56 +00004803 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
4804 VG_(message)(Vg_UserMsg,
4805 "For counts of detected and suppressed errors, "
4806 "rerun with: -v\n");
4807 }
4808
4809 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)
4810 && HG_(clo_history_level) >= 2) {
4811 VG_(umsg)(
4812 "Use --history-level=approx or =none to gain increased speed, at\n" );
4813 VG_(umsg)(
4814 "the cost of reduced accuracy of conflicting-access information\n");
4815 }
4816
sewardjb4112022007-11-09 22:49:28 +00004817 if (SHOW_DATA_STRUCTURES)
4818 pp_everything( PP_ALL, "SK_(fini)" );
sewardjf98e1c02008-10-25 16:22:41 +00004819 if (HG_(clo_sanity_flags))
sewardjb4112022007-11-09 22:49:28 +00004820 all__sanity_check("SK_(fini)");
4821
sewardj2d9e8742009-08-07 15:46:56 +00004822 if (VG_(clo_stats)) {
sewardjb4112022007-11-09 22:49:28 +00004823
4824 if (1) {
4825 VG_(printf)("\n");
sewardjb4112022007-11-09 22:49:28 +00004826 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
sewardjc1fb9d22011-02-28 09:03:44 +00004827 if (HG_(clo_track_lockorders)) {
4828 VG_(printf)("\n");
4829 HG_(ppWSUstats)( univ_laog, "univ_laog" );
4830 }
sewardjb4112022007-11-09 22:49:28 +00004831 }
4832
sewardjf98e1c02008-10-25 16:22:41 +00004833 //zz VG_(printf)("\n");
4834 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
4835 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
4836 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
4837 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
4838 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
4839 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
4840 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
4841 //zz stats__hbefore_stk_hwm);
4842 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
4843 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
sewardjb4112022007-11-09 22:49:28 +00004844
4845 VG_(printf)("\n");
barta0b6b2c2008-07-07 06:49:24 +00004846 VG_(printf)(" locksets: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00004847 (Int)HG_(cardinalityWSU)( univ_lsets ));
sewardjc1fb9d22011-02-28 09:03:44 +00004848 if (HG_(clo_track_lockorders)) {
4849 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
4850 (Int)HG_(cardinalityWSU)( univ_laog ));
4851 }
sewardjb4112022007-11-09 22:49:28 +00004852
sewardjd52392d2008-11-08 20:36:26 +00004853 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
4854 // stats__ga_LL_adds,
4855 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
sewardjb4112022007-11-09 22:49:28 +00004856
sewardjf98e1c02008-10-25 16:22:41 +00004857 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
4858 HG_(stats__LockN_to_P_queries),
4859 HG_(stats__LockN_to_P_get_map_size)() );
sewardjb4112022007-11-09 22:49:28 +00004860
sewardjf98e1c02008-10-25 16:22:41 +00004861 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
4862 HG_(stats__string_table_queries),
4863 HG_(stats__string_table_get_map_size)() );
sewardjc1fb9d22011-02-28 09:03:44 +00004864 if (HG_(clo_track_lockorders)) {
4865 VG_(printf)(" LAOG: %'8d map size\n",
4866 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
4867 VG_(printf)(" LAOG exposition: %'8d map size\n",
4868 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
4869 }
4870
barta0b6b2c2008-07-07 06:49:24 +00004871 VG_(printf)(" locks: %'8lu acquires, "
4872 "%'lu releases\n",
sewardjb4112022007-11-09 22:49:28 +00004873 stats__lockN_acquires,
4874 stats__lockN_releases
4875 );
barta0b6b2c2008-07-07 06:49:24 +00004876 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
sewardjb4112022007-11-09 22:49:28 +00004877
4878 VG_(printf)("\n");
sewardjf98e1c02008-10-25 16:22:41 +00004879 libhb_shutdown(True);
sewardjb4112022007-11-09 22:49:28 +00004880 }
4881}
4882
sewardjf98e1c02008-10-25 16:22:41 +00004883/* FIXME: move these somewhere sane */
4884
4885static
4886void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
4887{
4888 Thread* thr;
4889 ThreadId tid;
4890 UWord nActual;
4891 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00004892 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00004893 tl_assert(thr);
4894 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
4895 nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
4896 NULL, NULL, 0 );
4897 tl_assert(nActual <= nRequest);
4898 for (; nActual < nRequest; nActual++)
4899 frames[nActual] = 0;
4900}
4901
4902static
sewardj23f12002009-07-24 08:45:08 +00004903ExeContext* for_libhb__get_EC ( Thr* hbt )
sewardjf98e1c02008-10-25 16:22:41 +00004904{
4905 Thread* thr;
4906 ThreadId tid;
4907 ExeContext* ec;
4908 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00004909 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00004910 tl_assert(thr);
4911 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
sewardj23f12002009-07-24 08:45:08 +00004912 /* this will assert if tid is invalid */
sewardjf98e1c02008-10-25 16:22:41 +00004913 ec = VG_(record_ExeContext)( tid, 0 );
sewardjd52392d2008-11-08 20:36:26 +00004914 return ec;
sewardjf98e1c02008-10-25 16:22:41 +00004915}
4916
4917
sewardjc1fb9d22011-02-28 09:03:44 +00004918static void hg_post_clo_init ( void )
sewardjb4112022007-11-09 22:49:28 +00004919{
sewardjf98e1c02008-10-25 16:22:41 +00004920 Thr* hbthr_root;
njnf76d27a2009-05-28 01:53:07 +00004921
sewardjc1fb9d22011-02-28 09:03:44 +00004922 /////////////////////////////////////////////
4923 hbthr_root = libhb_init( for_libhb__get_stacktrace,
4924 for_libhb__get_EC );
4925 /////////////////////////////////////////////
4926
4927
4928 if (HG_(clo_track_lockorders))
4929 laog__init();
4930
4931 initialise_data_structures(hbthr_root);
4932}
4933
4934static void hg_pre_clo_init ( void )
4935{
sewardjb4112022007-11-09 22:49:28 +00004936 VG_(details_name) ("Helgrind");
4937 VG_(details_version) (NULL);
4938 VG_(details_description) ("a thread error detector");
4939 VG_(details_copyright_author)(
sewardj9eecbbb2010-05-03 21:37:12 +00004940 "Copyright (C) 2007-2010, and GNU GPL'd, by OpenWorks LLP et al.");
sewardjb4112022007-11-09 22:49:28 +00004941 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj9c08c0f2011-03-10 15:01:14 +00004942 VG_(details_avg_translation_sizeB) ( 320 );
sewardjb4112022007-11-09 22:49:28 +00004943
4944 VG_(basic_tool_funcs) (hg_post_clo_init,
4945 hg_instrument,
4946 hg_fini);
4947
4948 VG_(needs_core_errors) ();
sewardjf98e1c02008-10-25 16:22:41 +00004949 VG_(needs_tool_errors) (HG_(eq_Error),
sewardj24118492009-07-15 14:50:02 +00004950 HG_(before_pp_Error),
sewardjf98e1c02008-10-25 16:22:41 +00004951 HG_(pp_Error),
sewardjb4112022007-11-09 22:49:28 +00004952 False,/*show TIDs for errors*/
sewardjf98e1c02008-10-25 16:22:41 +00004953 HG_(update_extra),
4954 HG_(recognised_suppression),
4955 HG_(read_extra_suppression_info),
4956 HG_(error_matches_suppression),
4957 HG_(get_error_name),
sewardj588adef2009-08-15 22:41:51 +00004958 HG_(get_extra_suppression_info));
sewardjb4112022007-11-09 22:49:28 +00004959
sewardj24118492009-07-15 14:50:02 +00004960 VG_(needs_xml_output) ();
4961
sewardjb4112022007-11-09 22:49:28 +00004962 VG_(needs_command_line_options)(hg_process_cmd_line_option,
4963 hg_print_usage,
4964 hg_print_debug_usage);
4965 VG_(needs_client_requests) (hg_handle_client_request);
4966
4967 // FIXME?
4968 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
4969 // hg_expensive_sanity_check);
4970
4971 VG_(needs_malloc_replacement) (hg_cli__malloc,
4972 hg_cli____builtin_new,
4973 hg_cli____builtin_vec_new,
4974 hg_cli__memalign,
4975 hg_cli__calloc,
4976 hg_cli__free,
4977 hg_cli____builtin_delete,
4978 hg_cli____builtin_vec_delete,
4979 hg_cli__realloc,
njn8b140de2009-02-17 04:31:18 +00004980 hg_cli_malloc_usable_size,
sewardjb4112022007-11-09 22:49:28 +00004981 HG_CLI__MALLOC_REDZONE_SZB );
4982
sewardj849b0ed2008-12-21 10:43:10 +00004983 /* 21 Dec 08: disabled this; it mostly causes H to start more
4984 slowly and use significantly more memory, without very often
4985 providing useful results. The user can request to load this
4986 information manually with --read-var-info=yes. */
4987 if (0) VG_(needs_var_info)(); /* optional */
sewardjb4112022007-11-09 22:49:28 +00004988
4989 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
sewardj7cf4e6b2008-05-01 20:24:26 +00004990 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
4991 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
sewardjb4112022007-11-09 22:49:28 +00004992 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
sewardj1f77fec2010-04-12 19:51:04 +00004993 VG_(track_new_mem_stack) ( evh__new_mem_stack );
sewardjb4112022007-11-09 22:49:28 +00004994
4995 // FIXME: surely this isn't thread-aware
sewardj23f12002009-07-24 08:45:08 +00004996 VG_(track_copy_mem_remap) ( evh__copy_mem );
sewardjb4112022007-11-09 22:49:28 +00004997
4998 VG_(track_change_mem_mprotect) ( evh__set_perms );
4999
5000 VG_(track_die_mem_stack_signal)( evh__die_mem );
sewardjfd35d492011-03-17 19:39:55 +00005001 VG_(track_die_mem_brk) ( evh__die_mem_munmap );
5002 VG_(track_die_mem_munmap) ( evh__die_mem_munmap );
sewardjb4112022007-11-09 22:49:28 +00005003 VG_(track_die_mem_stack) ( evh__die_mem );
5004
5005 // FIXME: what is this for?
5006 VG_(track_ban_mem_stack) (NULL);
5007
5008 VG_(track_pre_mem_read) ( evh__pre_mem_read );
5009 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
5010 VG_(track_pre_mem_write) ( evh__pre_mem_write );
5011 VG_(track_post_mem_write) (NULL);
5012
5013 /////////////////
5014
5015 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
5016 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
5017
5018 VG_(track_start_client_code)( evh__start_client_code );
5019 VG_(track_stop_client_code)( evh__stop_client_code );
5020
sewardjb4112022007-11-09 22:49:28 +00005021 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
5022 as described in comments at the top of pub_tool_hashtable.h, are
5023 met. Blargh. */
5024 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
5025 tl_assert( sizeof(UWord) == sizeof(Addr) );
5026 hg_mallocmeta_table
5027 = VG_(HT_construct)( "hg_malloc_metadata_table" );
5028
sewardj61bc2c52011-02-09 10:34:00 +00005029 // add a callback to clean up on (threaded) fork.
5030 VG_(atfork)(NULL/*pre*/, NULL/*parent*/, evh__atfork_child/*child*/);
sewardjb4112022007-11-09 22:49:28 +00005031}
5032
5033VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
5034
5035/*--------------------------------------------------------------------*/
5036/*--- end hg_main.c ---*/
5037/*--------------------------------------------------------------------*/