blob: a09e18c17af5528fc84359b75892ba78a3706e8a [file] [log] [blame]
sewardjb4112022007-11-09 22:49:28 +00001
2/*--------------------------------------------------------------------*/
3/*--- Helgrind: a Valgrind tool for detecting errors ---*/
4/*--- in threaded programs. hg_main.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
sewardj9eecbbb2010-05-03 21:37:12 +000011 Copyright (C) 2007-2010 OpenWorks LLP
sewardjb4112022007-11-09 22:49:28 +000012 info@open-works.co.uk
13
sewardj9eecbbb2010-05-03 21:37:12 +000014 Copyright (C) 2007-2010 Apple, Inc.
njnf76d27a2009-05-28 01:53:07 +000015
sewardjb4112022007-11-09 22:49:28 +000016 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
20
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
25
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, write to the Free Software
28 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 02111-1307, USA.
30
31 The GNU General Public License is contained in the file COPYING.
32
33 Neither the names of the U.S. Department of Energy nor the
34 University of California nor the names of its contributors may be
35 used to endorse or promote products derived from this software
36 without prior written permission.
37*/
38
39#include "pub_tool_basics.h"
sewardjb4112022007-11-09 22:49:28 +000040#include "pub_tool_libcassert.h"
41#include "pub_tool_libcbase.h"
42#include "pub_tool_libcprint.h"
sewardjb4112022007-11-09 22:49:28 +000043#include "pub_tool_threadstate.h"
44#include "pub_tool_tooliface.h"
45#include "pub_tool_hashtable.h"
46#include "pub_tool_replacemalloc.h"
47#include "pub_tool_machine.h"
48#include "pub_tool_options.h"
49#include "pub_tool_xarray.h"
50#include "pub_tool_stacktrace.h"
sewardj896f6f92008-08-19 08:38:52 +000051#include "pub_tool_wordfm.h"
sewardja0eee322009-07-31 08:46:35 +000052#include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
53#include "pub_tool_redir.h" // sonames for the dynamic linkers
54#include "pub_tool_vki.h" // VKI_PAGE_SIZE
sewardj61bc2c52011-02-09 10:34:00 +000055#include "pub_tool_libcproc.h" // VG_(atfork)
sewardj234e5582011-02-09 12:47:23 +000056#include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client)
sewardjb4112022007-11-09 22:49:28 +000057
sewardjf98e1c02008-10-25 16:22:41 +000058#include "hg_basics.h"
59#include "hg_wordset.h"
60#include "hg_lock_n_thread.h"
61#include "hg_errors.h"
62
63#include "libhb.h"
64
sewardjb4112022007-11-09 22:49:28 +000065#include "helgrind.h"
66
sewardjf98e1c02008-10-25 16:22:41 +000067
68// FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
69
70// FIXME: when client destroys a lock or a CV, remove these
71// from our mappings, so that the associated SO can be freed up
sewardjb4112022007-11-09 22:49:28 +000072
73/*----------------------------------------------------------------*/
74/*--- ---*/
75/*----------------------------------------------------------------*/
76
sewardj11e352f2007-11-30 11:11:02 +000077/* Note this needs to be compiled with -fno-strict-aliasing, since it
78 contains a whole bunch of calls to lookupFM etc which cast between
79 Word and pointer types. gcc rightly complains this breaks ANSI C
80 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
81 worthwhile performance benefits over -O.
sewardjc17be792007-11-10 22:50:13 +000082*/
sewardjb4112022007-11-09 22:49:28 +000083
84// FIXME what is supposed to happen to locks in memory which
85// is relocated as a result of client realloc?
86
sewardjb4112022007-11-09 22:49:28 +000087// FIXME put referencing ThreadId into Thread and get
88// rid of the slow reverse mapping function.
89
90// FIXME accesses to NoAccess areas: change state to Excl?
91
92// FIXME report errors for accesses of NoAccess memory?
93
94// FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
95// the thread still holds the lock.
96
97/* ------------ Debug/trace options ------------ */
98
sewardjb4112022007-11-09 22:49:28 +000099// 0 for silent, 1 for some stuff, 2 for lots of stuff
100#define SHOW_EVENTS 0
101
sewardjb4112022007-11-09 22:49:28 +0000102
103static void all__sanity_check ( Char* who ); /* fwds */
104
105#define HG_CLI__MALLOC_REDZONE_SZB 16 /* let's say */
106
107// 0 for none, 1 for dump at end of run
108#define SHOW_DATA_STRUCTURES 0
109
110
sewardjb4112022007-11-09 22:49:28 +0000111/* ------------ Misc comments ------------ */
112
113// FIXME: don't hardwire initial entries for root thread.
114// Instead, let the pre_thread_ll_create handler do this.
115
sewardjb4112022007-11-09 22:49:28 +0000116
117/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000118/*--- Primary data structures ---*/
sewardjb4112022007-11-09 22:49:28 +0000119/*----------------------------------------------------------------*/
120
sewardjb4112022007-11-09 22:49:28 +0000121/* Admin linked list of Threads */
122static Thread* admin_threads = NULL;
123
sewardj1d7c3322011-02-28 09:22:51 +0000124/* Admin double linked list of Locks */
125/* We need a double linked list to properly and efficiently
126 handle del_LockN. */
sewardjb4112022007-11-09 22:49:28 +0000127static Lock* admin_locks = NULL;
128
sewardjb4112022007-11-09 22:49:28 +0000129/* Mapping table for core ThreadIds to Thread* */
130static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
131
sewardjb4112022007-11-09 22:49:28 +0000132/* Mapping table for lock guest addresses to Lock* */
133static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
134
sewardj0f64c9e2011-03-10 17:40:22 +0000135/* The word-set universes for lock sets. */
sewardjb4112022007-11-09 22:49:28 +0000136static WordSetU* univ_lsets = NULL; /* sets of Lock* */
137static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
138
sewardjb4112022007-11-09 22:49:28 +0000139
140/*----------------------------------------------------------------*/
141/*--- Simple helpers for the data structures ---*/
142/*----------------------------------------------------------------*/
143
144static UWord stats__lockN_acquires = 0;
145static UWord stats__lockN_releases = 0;
146
sewardjf98e1c02008-10-25 16:22:41 +0000147static
148ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
sewardjb4112022007-11-09 22:49:28 +0000149
150/* --------- Constructors --------- */
151
sewardjf98e1c02008-10-25 16:22:41 +0000152static Thread* mk_Thread ( Thr* hbthr ) {
sewardjb4112022007-11-09 22:49:28 +0000153 static Int indx = 1;
sewardjf98e1c02008-10-25 16:22:41 +0000154 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
sewardjb4112022007-11-09 22:49:28 +0000155 thread->locksetA = HG_(emptyWS)( univ_lsets );
156 thread->locksetW = HG_(emptyWS)( univ_lsets );
sewardjb4112022007-11-09 22:49:28 +0000157 thread->magic = Thread_MAGIC;
sewardjf98e1c02008-10-25 16:22:41 +0000158 thread->hbthr = hbthr;
159 thread->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +0000160 thread->created_at = NULL;
161 thread->announced = False;
162 thread->errmsg_index = indx++;
163 thread->admin = admin_threads;
164 admin_threads = thread;
165 return thread;
166}
sewardjf98e1c02008-10-25 16:22:41 +0000167
sewardjb4112022007-11-09 22:49:28 +0000168// Make a new lock which is unlocked (hence ownerless)
sewardj1d7c3322011-02-28 09:22:51 +0000169// and insert the new lock in admin_locks double linked list.
sewardjb4112022007-11-09 22:49:28 +0000170static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
171 static ULong unique = 0;
sewardjf98e1c02008-10-25 16:22:41 +0000172 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
sewardj0f64c9e2011-03-10 17:40:22 +0000173 /* begin: add to double linked list */
sewardj1d7c3322011-02-28 09:22:51 +0000174 if (admin_locks)
175 admin_locks->admin_prev = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000176 lock->admin_next = admin_locks;
177 lock->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000178 admin_locks = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000179 /* end: add */
sewardjb4112022007-11-09 22:49:28 +0000180 lock->unique = unique++;
181 lock->magic = LockN_MAGIC;
182 lock->appeared_at = NULL;
183 lock->acquired_at = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000184 lock->hbso = libhb_so_alloc();
sewardjb4112022007-11-09 22:49:28 +0000185 lock->guestaddr = guestaddr;
186 lock->kind = kind;
187 lock->heldW = False;
188 lock->heldBy = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000189 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +0000190 return lock;
191}
sewardjb4112022007-11-09 22:49:28 +0000192
193/* Release storage for a Lock. Also release storage in .heldBy, if
sewardj1d7c3322011-02-28 09:22:51 +0000194 any. Removes from admin_locks double linked list. */
sewardjb4112022007-11-09 22:49:28 +0000195static void del_LockN ( Lock* lk )
196{
sewardjf98e1c02008-10-25 16:22:41 +0000197 tl_assert(HG_(is_sane_LockN)(lk));
198 tl_assert(lk->hbso);
199 libhb_so_dealloc(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +0000200 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000201 VG_(deleteBag)( lk->heldBy );
sewardj0f64c9e2011-03-10 17:40:22 +0000202 /* begin: del lock from double linked list */
203 if (lk == admin_locks) {
204 tl_assert(lk->admin_prev == NULL);
205 if (lk->admin_next)
206 lk->admin_next->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000207 admin_locks = lk->admin_next;
sewardj1d7c3322011-02-28 09:22:51 +0000208 }
209 else {
sewardj0f64c9e2011-03-10 17:40:22 +0000210 tl_assert(lk->admin_prev != NULL);
sewardj1d7c3322011-02-28 09:22:51 +0000211 lk->admin_prev->admin_next = lk->admin_next;
sewardj0f64c9e2011-03-10 17:40:22 +0000212 if (lk->admin_next)
213 lk->admin_next->admin_prev = lk->admin_prev;
sewardj1d7c3322011-02-28 09:22:51 +0000214 }
sewardj0f64c9e2011-03-10 17:40:22 +0000215 /* end: del */
sewardjb4112022007-11-09 22:49:28 +0000216 VG_(memset)(lk, 0xAA, sizeof(*lk));
sewardjf98e1c02008-10-25 16:22:41 +0000217 HG_(free)(lk);
sewardjb4112022007-11-09 22:49:28 +0000218}
219
220/* Update 'lk' to reflect that 'thr' now has a write-acquisition of
221 it. This is done strictly: only combinations resulting from
222 correct program and libpthread behaviour are allowed. */
223static void lockN_acquire_writer ( Lock* lk, Thread* thr )
224{
sewardjf98e1c02008-10-25 16:22:41 +0000225 tl_assert(HG_(is_sane_LockN)(lk));
226 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000227
228 stats__lockN_acquires++;
229
230 /* EXPOSITION only */
231 /* We need to keep recording snapshots of where the lock was
232 acquired, so as to produce better lock-order error messages. */
233 if (lk->acquired_at == NULL) {
234 ThreadId tid;
235 tl_assert(lk->heldBy == NULL);
236 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
237 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000238 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000239 } else {
240 tl_assert(lk->heldBy != NULL);
241 }
242 /* end EXPOSITION only */
243
244 switch (lk->kind) {
245 case LK_nonRec:
246 case_LK_nonRec:
247 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
248 tl_assert(!lk->heldW);
249 lk->heldW = True;
sewardjf98e1c02008-10-25 16:22:41 +0000250 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000251 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000252 break;
253 case LK_mbRec:
254 if (lk->heldBy == NULL)
255 goto case_LK_nonRec;
256 /* 2nd and subsequent locking of a lock by its owner */
257 tl_assert(lk->heldW);
258 /* assert: lk is only held by one thread .. */
sewardj896f6f92008-08-19 08:38:52 +0000259 tl_assert(VG_(sizeUniqueBag(lk->heldBy)) == 1);
sewardjb4112022007-11-09 22:49:28 +0000260 /* assert: .. and that thread is 'thr'. */
sewardj896f6f92008-08-19 08:38:52 +0000261 tl_assert(VG_(elemBag)(lk->heldBy, (Word)thr)
262 == VG_(sizeTotalBag)(lk->heldBy));
263 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000264 break;
265 case LK_rdwr:
266 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
267 goto case_LK_nonRec;
268 default:
269 tl_assert(0);
270 }
sewardjf98e1c02008-10-25 16:22:41 +0000271 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000272}
273
274static void lockN_acquire_reader ( Lock* lk, Thread* thr )
275{
sewardjf98e1c02008-10-25 16:22:41 +0000276 tl_assert(HG_(is_sane_LockN)(lk));
277 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000278 /* can only add reader to a reader-writer lock. */
279 tl_assert(lk->kind == LK_rdwr);
280 /* lk must be free or already r-held. */
281 tl_assert(lk->heldBy == NULL
282 || (lk->heldBy != NULL && !lk->heldW));
283
284 stats__lockN_acquires++;
285
286 /* EXPOSITION only */
287 /* We need to keep recording snapshots of where the lock was
288 acquired, so as to produce better lock-order error messages. */
289 if (lk->acquired_at == NULL) {
290 ThreadId tid;
291 tl_assert(lk->heldBy == NULL);
292 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
293 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000294 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000295 } else {
296 tl_assert(lk->heldBy != NULL);
297 }
298 /* end EXPOSITION only */
299
300 if (lk->heldBy) {
sewardj896f6f92008-08-19 08:38:52 +0000301 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000302 } else {
303 lk->heldW = False;
sewardjf98e1c02008-10-25 16:22:41 +0000304 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000305 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000306 }
307 tl_assert(!lk->heldW);
sewardjf98e1c02008-10-25 16:22:41 +0000308 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000309}
310
311/* Update 'lk' to reflect a release of it by 'thr'. This is done
312 strictly: only combinations resulting from correct program and
313 libpthread behaviour are allowed. */
314
315static void lockN_release ( Lock* lk, Thread* thr )
316{
317 Bool b;
sewardjf98e1c02008-10-25 16:22:41 +0000318 tl_assert(HG_(is_sane_LockN)(lk));
319 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000320 /* lock must be held by someone */
321 tl_assert(lk->heldBy);
322 stats__lockN_releases++;
323 /* Remove it from the holder set */
sewardj896f6f92008-08-19 08:38:52 +0000324 b = VG_(delFromBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000325 /* thr must actually have been a holder of lk */
326 tl_assert(b);
327 /* normalise */
328 tl_assert(lk->acquired_at);
sewardj896f6f92008-08-19 08:38:52 +0000329 if (VG_(isEmptyBag)(lk->heldBy)) {
330 VG_(deleteBag)(lk->heldBy);
sewardjb4112022007-11-09 22:49:28 +0000331 lk->heldBy = NULL;
332 lk->heldW = False;
333 lk->acquired_at = NULL;
334 }
sewardjf98e1c02008-10-25 16:22:41 +0000335 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000336}
337
338static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
339{
340 Thread* thr;
341 if (!lk->heldBy) {
342 tl_assert(!lk->heldW);
343 return;
344 }
345 /* for each thread that holds this lock do ... */
sewardj896f6f92008-08-19 08:38:52 +0000346 VG_(initIterBag)( lk->heldBy );
347 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, NULL )) {
sewardjf98e1c02008-10-25 16:22:41 +0000348 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000349 tl_assert(HG_(elemWS)( univ_lsets,
350 thr->locksetA, (Word)lk ));
351 thr->locksetA
352 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lk );
353
354 if (lk->heldW) {
355 tl_assert(HG_(elemWS)( univ_lsets,
356 thr->locksetW, (Word)lk ));
357 thr->locksetW
358 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lk );
359 }
360 }
sewardj896f6f92008-08-19 08:38:52 +0000361 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000362}
363
sewardjb4112022007-11-09 22:49:28 +0000364
365/*----------------------------------------------------------------*/
366/*--- Print out the primary data structures ---*/
367/*----------------------------------------------------------------*/
368
sewardjb4112022007-11-09 22:49:28 +0000369#define PP_THREADS (1<<1)
370#define PP_LOCKS (1<<2)
sewardjf98e1c02008-10-25 16:22:41 +0000371#define PP_ALL (PP_THREADS | PP_LOCKS)
sewardjb4112022007-11-09 22:49:28 +0000372
373
374static const Int sHOW_ADMIN = 0;
375
376static void space ( Int n )
377{
378 Int i;
379 Char spaces[128+1];
380 tl_assert(n >= 0 && n < 128);
381 if (n == 0)
382 return;
383 for (i = 0; i < n; i++)
384 spaces[i] = ' ';
385 spaces[i] = 0;
386 tl_assert(i < 128+1);
387 VG_(printf)("%s", spaces);
388}
389
390static void pp_Thread ( Int d, Thread* t )
391{
392 space(d+0); VG_(printf)("Thread %p {\n", t);
393 if (sHOW_ADMIN) {
394 space(d+3); VG_(printf)("admin %p\n", t->admin);
395 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
396 }
397 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
398 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
sewardjb4112022007-11-09 22:49:28 +0000399 space(d+0); VG_(printf)("}\n");
400}
401
402static void pp_admin_threads ( Int d )
403{
404 Int i, n;
405 Thread* t;
406 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
407 /* nothing */
408 }
409 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
410 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
411 if (0) {
412 space(n);
413 VG_(printf)("admin_threads record %d of %d:\n", i, n);
414 }
415 pp_Thread(d+3, t);
416 }
barta0b6b2c2008-07-07 06:49:24 +0000417 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000418}
419
420static void pp_map_threads ( Int d )
421{
njn4c245e52009-03-15 23:25:38 +0000422 Int i, n = 0;
sewardjb4112022007-11-09 22:49:28 +0000423 space(d); VG_(printf)("map_threads ");
sewardjb4112022007-11-09 22:49:28 +0000424 for (i = 0; i < VG_N_THREADS; i++) {
425 if (map_threads[i] != NULL)
426 n++;
427 }
428 VG_(printf)("(%d entries) {\n", n);
429 for (i = 0; i < VG_N_THREADS; i++) {
430 if (map_threads[i] == NULL)
431 continue;
432 space(d+3);
433 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
434 }
435 space(d); VG_(printf)("}\n");
436}
437
438static const HChar* show_LockKind ( LockKind lkk ) {
439 switch (lkk) {
440 case LK_mbRec: return "mbRec";
441 case LK_nonRec: return "nonRec";
442 case LK_rdwr: return "rdwr";
443 default: tl_assert(0);
444 }
445}
446
447static void pp_Lock ( Int d, Lock* lk )
448{
barta0b6b2c2008-07-07 06:49:24 +0000449 space(d+0); VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
sewardjb4112022007-11-09 22:49:28 +0000450 if (sHOW_ADMIN) {
sewardj1d7c3322011-02-28 09:22:51 +0000451 space(d+3); VG_(printf)("admin_n %p\n", lk->admin_next);
452 space(d+3); VG_(printf)("admin_p %p\n", lk->admin_prev);
453 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
sewardjb4112022007-11-09 22:49:28 +0000454 }
455 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
456 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
457 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
458 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
459 if (lk->heldBy) {
460 Thread* thr;
461 Word count;
462 VG_(printf)(" { ");
sewardj896f6f92008-08-19 08:38:52 +0000463 VG_(initIterBag)( lk->heldBy );
464 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, &count ))
sewardjb4112022007-11-09 22:49:28 +0000465 VG_(printf)("%lu:%p ", count, thr);
sewardj896f6f92008-08-19 08:38:52 +0000466 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000467 VG_(printf)("}");
468 }
469 VG_(printf)("\n");
470 space(d+0); VG_(printf)("}\n");
471}
472
473static void pp_admin_locks ( Int d )
474{
475 Int i, n;
476 Lock* lk;
sewardj1d7c3322011-02-28 09:22:51 +0000477 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000478 /* nothing */
479 }
480 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
sewardj1d7c3322011-02-28 09:22:51 +0000481 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000482 if (0) {
483 space(n);
484 VG_(printf)("admin_locks record %d of %d:\n", i, n);
485 }
486 pp_Lock(d+3, lk);
487 }
barta0b6b2c2008-07-07 06:49:24 +0000488 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000489}
490
491static void pp_map_locks ( Int d )
492{
493 void* gla;
494 Lock* lk;
495 space(d); VG_(printf)("map_locks (%d entries) {\n",
sewardj896f6f92008-08-19 08:38:52 +0000496 (Int)VG_(sizeFM)( map_locks ));
497 VG_(initIterFM)( map_locks );
498 while (VG_(nextIterFM)( map_locks, (Word*)&gla,
sewardjb5f29642007-11-16 12:02:43 +0000499 (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000500 space(d+3);
501 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
502 }
sewardj896f6f92008-08-19 08:38:52 +0000503 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000504 space(d); VG_(printf)("}\n");
505}
506
sewardjb4112022007-11-09 22:49:28 +0000507static void pp_everything ( Int flags, Char* caller )
508{
509 Int d = 0;
510 VG_(printf)("\n");
511 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
512 if (flags & PP_THREADS) {
513 VG_(printf)("\n");
514 pp_admin_threads(d+3);
515 VG_(printf)("\n");
516 pp_map_threads(d+3);
517 }
518 if (flags & PP_LOCKS) {
519 VG_(printf)("\n");
520 pp_admin_locks(d+3);
521 VG_(printf)("\n");
522 pp_map_locks(d+3);
523 }
sewardjb4112022007-11-09 22:49:28 +0000524
525 VG_(printf)("\n");
526 VG_(printf)("}\n");
527 VG_(printf)("\n");
528}
529
530#undef SHOW_ADMIN
531
532
533/*----------------------------------------------------------------*/
534/*--- Initialise the primary data structures ---*/
535/*----------------------------------------------------------------*/
536
sewardjf98e1c02008-10-25 16:22:41 +0000537static void initialise_data_structures ( Thr* hbthr_root )
sewardjb4112022007-11-09 22:49:28 +0000538{
sewardjb4112022007-11-09 22:49:28 +0000539 Thread* thr;
540
541 /* Get everything initialised and zeroed. */
542 tl_assert(admin_threads == NULL);
543 tl_assert(admin_locks == NULL);
sewardjb4112022007-11-09 22:49:28 +0000544
545 tl_assert(sizeof(Addr) == sizeof(Word));
sewardjb4112022007-11-09 22:49:28 +0000546
547 tl_assert(map_threads == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000548 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
sewardjb4112022007-11-09 22:49:28 +0000549 tl_assert(map_threads != NULL);
550
sewardjb4112022007-11-09 22:49:28 +0000551 tl_assert(sizeof(Addr) == sizeof(Word));
552 tl_assert(map_locks == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000553 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
554 NULL/*unboxed Word cmp*/);
sewardjb4112022007-11-09 22:49:28 +0000555 tl_assert(map_locks != NULL);
556
sewardjb4112022007-11-09 22:49:28 +0000557 tl_assert(univ_lsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000558 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
559 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000560 tl_assert(univ_lsets != NULL);
561
562 tl_assert(univ_laog == NULL);
sewardjc1fb9d22011-02-28 09:03:44 +0000563 if (HG_(clo_track_lockorders)) {
564 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
565 HG_(free), 24/*cacheSize*/ );
566 tl_assert(univ_laog != NULL);
567 }
sewardjb4112022007-11-09 22:49:28 +0000568
569 /* Set up entries for the root thread */
570 // FIXME: this assumes that the first real ThreadId is 1
571
sewardjb4112022007-11-09 22:49:28 +0000572 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +0000573 thr = mk_Thread(hbthr_root);
574 thr->coretid = 1; /* FIXME: hardwires an assumption about the
575 identity of the root thread. */
sewardj60626642011-03-10 15:14:37 +0000576 tl_assert( libhb_get_Thr_hgthread(hbthr_root) == NULL );
577 libhb_set_Thr_hgthread(hbthr_root, thr);
sewardjb4112022007-11-09 22:49:28 +0000578
sewardjf98e1c02008-10-25 16:22:41 +0000579 /* and bind it in the thread-map table. */
580 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
581 tl_assert(thr->coretid != VG_INVALID_THREADID);
sewardjb4112022007-11-09 22:49:28 +0000582
sewardjf98e1c02008-10-25 16:22:41 +0000583 map_threads[thr->coretid] = thr;
sewardjb4112022007-11-09 22:49:28 +0000584
585 tl_assert(VG_INVALID_THREADID == 0);
586
sewardjb4112022007-11-09 22:49:28 +0000587 all__sanity_check("initialise_data_structures");
588}
589
590
591/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000592/*--- map_threads :: array[core-ThreadId] of Thread* ---*/
sewardjb4112022007-11-09 22:49:28 +0000593/*----------------------------------------------------------------*/
594
595/* Doesn't assert if the relevant map_threads entry is NULL. */
596static Thread* map_threads_maybe_lookup ( ThreadId coretid )
597{
598 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000599 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000600 thr = map_threads[coretid];
601 return thr;
602}
603
604/* Asserts if the relevant map_threads entry is NULL. */
605static inline Thread* map_threads_lookup ( ThreadId coretid )
606{
607 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000608 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000609 thr = map_threads[coretid];
610 tl_assert(thr);
611 return thr;
612}
613
sewardjf98e1c02008-10-25 16:22:41 +0000614/* Do a reverse lookup. Does not assert if 'thr' is not found in
615 map_threads. */
sewardjb4112022007-11-09 22:49:28 +0000616static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
617{
sewardjf98e1c02008-10-25 16:22:41 +0000618 ThreadId tid;
619 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000620 /* Check nobody used the invalid-threadid slot */
621 tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
622 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000623 tid = thr->coretid;
624 tl_assert(HG_(is_sane_ThreadId)(tid));
625 return tid;
sewardjb4112022007-11-09 22:49:28 +0000626}
627
628/* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
629 is not found in map_threads. */
630static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
631{
632 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
633 tl_assert(tid != VG_INVALID_THREADID);
sewardjf98e1c02008-10-25 16:22:41 +0000634 tl_assert(map_threads[tid]);
635 tl_assert(map_threads[tid]->coretid == tid);
sewardjb4112022007-11-09 22:49:28 +0000636 return tid;
637}
638
639static void map_threads_delete ( ThreadId coretid )
640{
641 Thread* thr;
642 tl_assert(coretid != 0);
sewardjf98e1c02008-10-25 16:22:41 +0000643 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000644 thr = map_threads[coretid];
645 tl_assert(thr);
646 map_threads[coretid] = NULL;
647}
648
649
650/*----------------------------------------------------------------*/
651/*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
652/*----------------------------------------------------------------*/
653
654/* Make sure there is a lock table entry for the given (lock) guest
655 address. If not, create one of the stated 'kind' in unheld state.
656 In any case, return the address of the existing or new Lock. */
657static
658Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
659{
660 Bool found;
661 Lock* oldlock = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000662 tl_assert(HG_(is_sane_ThreadId)(tid));
sewardj896f6f92008-08-19 08:38:52 +0000663 found = VG_(lookupFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000664 NULL, (Word*)&oldlock, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000665 if (!found) {
666 Lock* lock = mk_LockN(lkk, ga);
667 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
sewardjf98e1c02008-10-25 16:22:41 +0000668 tl_assert(HG_(is_sane_LockN)(lock));
sewardj896f6f92008-08-19 08:38:52 +0000669 VG_(addToFM)( map_locks, (Word)ga, (Word)lock );
sewardjb4112022007-11-09 22:49:28 +0000670 tl_assert(oldlock == NULL);
sewardjb4112022007-11-09 22:49:28 +0000671 return lock;
672 } else {
673 tl_assert(oldlock != NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000674 tl_assert(HG_(is_sane_LockN)(oldlock));
sewardjb4112022007-11-09 22:49:28 +0000675 tl_assert(oldlock->guestaddr == ga);
sewardjb4112022007-11-09 22:49:28 +0000676 return oldlock;
677 }
678}
679
680static Lock* map_locks_maybe_lookup ( Addr ga )
681{
682 Bool found;
683 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000684 found = VG_(lookupFM)( map_locks, NULL, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000685 tl_assert(found ? lk != NULL : lk == NULL);
sewardjb4112022007-11-09 22:49:28 +0000686 return lk;
687}
688
689static void map_locks_delete ( Addr ga )
690{
691 Addr ga2 = 0;
692 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000693 VG_(delFromFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000694 (Word*)&ga2, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000695 /* delFromFM produces the val which is being deleted, if it is
696 found. So assert it is non-null; that in effect asserts that we
697 are deleting a (ga, Lock) pair which actually exists. */
698 tl_assert(lk != NULL);
699 tl_assert(ga2 == ga);
700}
701
702
sewardjb4112022007-11-09 22:49:28 +0000703
704/*----------------------------------------------------------------*/
705/*--- Sanity checking the data structures ---*/
706/*----------------------------------------------------------------*/
707
708static UWord stats__sanity_checks = 0;
709
sewardjb4112022007-11-09 22:49:28 +0000710static void laog__sanity_check ( Char* who ); /* fwds */
711
712/* REQUIRED INVARIANTS:
713
714 Thread vs Segment/Lock/SecMaps
715
716 for each t in Threads {
717
718 // Thread.lockset: each element is really a valid Lock
719
720 // Thread.lockset: each Lock in set is actually held by that thread
721 for lk in Thread.lockset
722 lk == LockedBy(t)
723
724 // Thread.csegid is a valid SegmentID
725 // and the associated Segment has .thr == t
726
727 }
728
729 all thread Locksets are pairwise empty under intersection
730 (that is, no lock is claimed to be held by more than one thread)
731 -- this is guaranteed if all locks in locksets point back to their
732 owner threads
733
734 Lock vs Thread/Segment/SecMaps
735
736 for each entry (gla, la) in map_locks
737 gla == la->guest_addr
738
739 for each lk in Locks {
740
741 lk->tag is valid
742 lk->guest_addr does not have shadow state NoAccess
743 if lk == LockedBy(t), then t->lockset contains lk
744 if lk == UnlockedBy(segid) then segid is valid SegmentID
745 and can be mapped to a valid Segment(seg)
746 and seg->thr->lockset does not contain lk
747 if lk == UnlockedNew then (no lockset contains lk)
748
749 secmaps for lk has .mbHasLocks == True
750
751 }
752
753 Segment vs Thread/Lock/SecMaps
754
755 the Segment graph is a dag (no cycles)
756 all of the Segment graph must be reachable from the segids
757 mentioned in the Threads
758
759 for seg in Segments {
760
761 seg->thr is a sane Thread
762
763 }
764
765 SecMaps vs Segment/Thread/Lock
766
767 for sm in SecMaps {
768
769 sm properly aligned
770 if any shadow word is ShR or ShM then .mbHasShared == True
771
772 for each Excl(segid) state
773 map_segments_lookup maps to a sane Segment(seg)
774 for each ShM/ShR(tsetid,lsetid) state
775 each lk in lset is a valid Lock
776 each thr in tset is a valid thread, which is non-dead
777
778 }
779*/
780
781
782/* Return True iff 'thr' holds 'lk' in some mode. */
783static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
784{
785 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000786 return VG_(elemBag)( lk->heldBy, (Word)thr ) > 0;
sewardjb4112022007-11-09 22:49:28 +0000787 else
788 return False;
789}
790
791/* Sanity check Threads, as far as possible */
792__attribute__((noinline))
793static void threads__sanity_check ( Char* who )
794{
795#define BAD(_str) do { how = (_str); goto bad; } while (0)
796 Char* how = "no error";
797 Thread* thr;
798 WordSetID wsA, wsW;
sewardj250ec2e2008-02-15 22:02:30 +0000799 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +0000800 Word ls_size, i;
801 Lock* lk;
sewardjb4112022007-11-09 22:49:28 +0000802 for (thr = admin_threads; thr; thr = thr->admin) {
sewardjf98e1c02008-10-25 16:22:41 +0000803 if (!HG_(is_sane_Thread)(thr)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000804 wsA = thr->locksetA;
805 wsW = thr->locksetW;
806 // locks held in W mode are a subset of all locks held
807 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
808 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
809 for (i = 0; i < ls_size; i++) {
810 lk = (Lock*)ls_words[i];
811 // Thread.lockset: each element is really a valid Lock
sewardjf98e1c02008-10-25 16:22:41 +0000812 if (!HG_(is_sane_LockN)(lk)) BAD("2");
sewardjb4112022007-11-09 22:49:28 +0000813 // Thread.lockset: each Lock in set is actually held by that
814 // thread
815 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000816 }
817 }
818 return;
819 bad:
820 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
821 tl_assert(0);
822#undef BAD
823}
824
825
826/* Sanity check Locks, as far as possible */
827__attribute__((noinline))
828static void locks__sanity_check ( Char* who )
829{
830#define BAD(_str) do { how = (_str); goto bad; } while (0)
831 Char* how = "no error";
832 Addr gla;
833 Lock* lk;
834 Int i;
835 // # entries in admin_locks == # entries in map_locks
sewardj1d7c3322011-02-28 09:22:51 +0000836 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next)
sewardjb4112022007-11-09 22:49:28 +0000837 ;
sewardj896f6f92008-08-19 08:38:52 +0000838 if (i != VG_(sizeFM)(map_locks)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000839 // for each entry (gla, lk) in map_locks
840 // gla == lk->guest_addr
sewardj896f6f92008-08-19 08:38:52 +0000841 VG_(initIterFM)( map_locks );
842 while (VG_(nextIterFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000843 (Word*)&gla, (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000844 if (lk->guestaddr != gla) BAD("2");
845 }
sewardj896f6f92008-08-19 08:38:52 +0000846 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000847 // scan through admin_locks ...
sewardj1d7c3322011-02-28 09:22:51 +0000848 for (lk = admin_locks; lk; lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000849 // lock is sane. Quite comprehensive, also checks that
850 // referenced (holder) threads are sane.
sewardjf98e1c02008-10-25 16:22:41 +0000851 if (!HG_(is_sane_LockN)(lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000852 // map_locks binds guest address back to this lock
853 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
sewardjb4112022007-11-09 22:49:28 +0000854 // look at all threads mentioned as holders of this lock. Ensure
855 // this lock is mentioned in their locksets.
856 if (lk->heldBy) {
857 Thread* thr;
858 Word count;
sewardj896f6f92008-08-19 08:38:52 +0000859 VG_(initIterBag)( lk->heldBy );
860 while (VG_(nextIterBag)( lk->heldBy,
sewardjb5f29642007-11-16 12:02:43 +0000861 (Word*)&thr, &count )) {
sewardjf98e1c02008-10-25 16:22:41 +0000862 // HG_(is_sane_LockN) above ensures these
sewardjb4112022007-11-09 22:49:28 +0000863 tl_assert(count >= 1);
sewardjf98e1c02008-10-25 16:22:41 +0000864 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000865 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (Word)lk))
866 BAD("6");
867 // also check the w-only lockset
868 if (lk->heldW
869 && !HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
870 BAD("7");
871 if ((!lk->heldW)
872 && HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
873 BAD("8");
874 }
sewardj896f6f92008-08-19 08:38:52 +0000875 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000876 } else {
877 /* lock not held by anybody */
878 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
879 // since lk is unheld, then (no lockset contains lk)
880 // hmm, this is really too expensive to check. Hmm.
881 }
sewardjb4112022007-11-09 22:49:28 +0000882 }
883
884 return;
885 bad:
886 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
887 tl_assert(0);
888#undef BAD
889}
890
891
sewardjb4112022007-11-09 22:49:28 +0000892static void all_except_Locks__sanity_check ( Char* who ) {
893 stats__sanity_checks++;
894 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
895 threads__sanity_check(who);
sewardjc1fb9d22011-02-28 09:03:44 +0000896 if (HG_(clo_track_lockorders))
897 laog__sanity_check(who);
sewardjb4112022007-11-09 22:49:28 +0000898}
899static void all__sanity_check ( Char* who ) {
900 all_except_Locks__sanity_check(who);
901 locks__sanity_check(who);
902}
903
904
905/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +0000906/*--- Shadow value and address range handlers ---*/
907/*----------------------------------------------------------------*/
908
909static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +0000910//static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000911static inline Thread* get_current_Thread ( void ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +0000912__attribute__((noinline))
913static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000914
sewardjb4112022007-11-09 22:49:28 +0000915
916/* Block-copy states (needed for implementing realloc()). */
sewardj23f12002009-07-24 08:45:08 +0000917/* FIXME this copies shadow memory; it doesn't apply the MSM to it.
918 Is that a problem? (hence 'scopy' rather than 'ccopy') */
919static void shadow_mem_scopy_range ( Thread* thr,
920 Addr src, Addr dst, SizeT len )
sewardjf98e1c02008-10-25 16:22:41 +0000921{
922 Thr* hbthr = thr->hbthr;
923 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +0000924 libhb_copy_shadow_state( hbthr, src, dst, len );
sewardjb4112022007-11-09 22:49:28 +0000925}
926
sewardj23f12002009-07-24 08:45:08 +0000927static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
928{
sewardjf98e1c02008-10-25 16:22:41 +0000929 Thr* hbthr = thr->hbthr;
930 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +0000931 LIBHB_CREAD_N(hbthr, a, len);
932}
933
934static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
935 Thr* hbthr = thr->hbthr;
936 tl_assert(hbthr);
937 LIBHB_CWRITE_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +0000938}
939
940static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
941{
sewardj23f12002009-07-24 08:45:08 +0000942 libhb_srange_new( thr->hbthr, a, len );
sewardjb4112022007-11-09 22:49:28 +0000943}
944
sewardjb4112022007-11-09 22:49:28 +0000945static void shadow_mem_make_NoAccess ( Thread* thr, Addr aIN, SizeT len )
946{
sewardjb4112022007-11-09 22:49:28 +0000947 if (0 && len > 500)
barta0b6b2c2008-07-07 06:49:24 +0000948 VG_(printf)("make NoAccess ( %#lx, %ld )\n", aIN, len );
sewardj23f12002009-07-24 08:45:08 +0000949 libhb_srange_noaccess( thr->hbthr, aIN, len );
sewardjb4112022007-11-09 22:49:28 +0000950}
951
sewardj406bac82010-03-03 23:03:40 +0000952static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN, SizeT len )
953{
954 if (0 && len > 500)
955 VG_(printf)("make Untracked ( %#lx, %ld )\n", aIN, len );
956 libhb_srange_untrack( thr->hbthr, aIN, len );
957}
958
sewardjb4112022007-11-09 22:49:28 +0000959
960/*----------------------------------------------------------------*/
961/*--- Event handlers (evh__* functions) ---*/
962/*--- plus helpers (evhH__* functions) ---*/
963/*----------------------------------------------------------------*/
964
965/*--------- Event handler helpers (evhH__* functions) ---------*/
966
967/* Create a new segment for 'thr', making it depend (.prev) on its
968 existing segment, bind together the SegmentID and Segment, and
969 return both of them. Also update 'thr' so it references the new
970 Segment. */
sewardjf98e1c02008-10-25 16:22:41 +0000971//zz static
972//zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
973//zz /*OUT*/Segment** new_segP,
974//zz Thread* thr )
975//zz {
976//zz Segment* cur_seg;
977//zz tl_assert(new_segP);
978//zz tl_assert(new_segidP);
979//zz tl_assert(HG_(is_sane_Thread)(thr));
980//zz cur_seg = map_segments_lookup( thr->csegid );
981//zz tl_assert(cur_seg);
982//zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
983//zz at their owner thread. */
984//zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
985//zz *new_segidP = alloc_SegmentID();
986//zz map_segments_add( *new_segidP, *new_segP );
987//zz thr->csegid = *new_segidP;
988//zz }
sewardjb4112022007-11-09 22:49:28 +0000989
990
991/* The lock at 'lock_ga' has acquired a writer. Make all necessary
992 updates, and also do all possible error checks. */
993static
994void evhH__post_thread_w_acquires_lock ( Thread* thr,
995 LockKind lkk, Addr lock_ga )
996{
997 Lock* lk;
998
999 /* Basically what we need to do is call lockN_acquire_writer.
1000 However, that will barf if any 'invalid' lock states would
1001 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001002 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001003 routine.
1004
1005 Because this routine is only called after successful lock
1006 acquisition, we should not be asked to move the lock into any
1007 invalid states. Requests to do so are bugs in libpthread, since
1008 that should have rejected any such requests. */
1009
sewardjf98e1c02008-10-25 16:22:41 +00001010 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001011 /* Try to find the lock. If we can't, then create a new one with
1012 kind 'lkk'. */
1013 lk = map_locks_lookup_or_create(
1014 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001015 tl_assert( HG_(is_sane_LockN)(lk) );
1016
1017 /* check libhb level entities exist */
1018 tl_assert(thr->hbthr);
1019 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001020
1021 if (lk->heldBy == NULL) {
1022 /* the lock isn't held. Simple. */
1023 tl_assert(!lk->heldW);
1024 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001025 /* acquire a dependency from the lock's VCs */
1026 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001027 goto noerror;
1028 }
1029
1030 /* So the lock is already held. If held as a r-lock then
1031 libpthread must be buggy. */
1032 tl_assert(lk->heldBy);
1033 if (!lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001034 HG_(record_error_Misc)(
1035 thr, "Bug in libpthread: write lock "
1036 "granted on rwlock which is currently rd-held");
sewardjb4112022007-11-09 22:49:28 +00001037 goto error;
1038 }
1039
1040 /* So the lock is held in w-mode. If it's held by some other
1041 thread, then libpthread must be buggy. */
sewardj896f6f92008-08-19 08:38:52 +00001042 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
sewardjb4112022007-11-09 22:49:28 +00001043
sewardj896f6f92008-08-19 08:38:52 +00001044 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
sewardjf98e1c02008-10-25 16:22:41 +00001045 HG_(record_error_Misc)(
1046 thr, "Bug in libpthread: write lock "
1047 "granted on mutex/rwlock which is currently "
1048 "wr-held by a different thread");
sewardjb4112022007-11-09 22:49:28 +00001049 goto error;
1050 }
1051
1052 /* So the lock is already held in w-mode by 'thr'. That means this
1053 is an attempt to lock it recursively, which is only allowable
1054 for LK_mbRec kinded locks. Since this routine is called only
1055 once the lock has been acquired, this must also be a libpthread
1056 bug. */
1057 if (lk->kind != LK_mbRec) {
sewardjf98e1c02008-10-25 16:22:41 +00001058 HG_(record_error_Misc)(
1059 thr, "Bug in libpthread: recursive write lock "
1060 "granted on mutex/wrlock which does not "
1061 "support recursion");
sewardjb4112022007-11-09 22:49:28 +00001062 goto error;
1063 }
1064
1065 /* So we are recursively re-locking a lock we already w-hold. */
1066 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001067 /* acquire a dependency from the lock's VC. Probably pointless,
1068 but also harmless. */
1069 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001070 goto noerror;
1071
1072 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001073 if (HG_(clo_track_lockorders)) {
1074 /* check lock order acquisition graph, and update. This has to
1075 happen before the lock is added to the thread's locksetA/W. */
1076 laog__pre_thread_acquires_lock( thr, lk );
1077 }
sewardjb4112022007-11-09 22:49:28 +00001078 /* update the thread's held-locks set */
1079 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1080 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (Word)lk );
1081 /* fall through */
1082
1083 error:
sewardjf98e1c02008-10-25 16:22:41 +00001084 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001085}
1086
1087
1088/* The lock at 'lock_ga' has acquired a reader. Make all necessary
1089 updates, and also do all possible error checks. */
1090static
1091void evhH__post_thread_r_acquires_lock ( Thread* thr,
1092 LockKind lkk, Addr lock_ga )
1093{
1094 Lock* lk;
1095
1096 /* Basically what we need to do is call lockN_acquire_reader.
1097 However, that will barf if any 'invalid' lock states would
1098 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001099 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001100 routine.
1101
1102 Because this routine is only called after successful lock
1103 acquisition, we should not be asked to move the lock into any
1104 invalid states. Requests to do so are bugs in libpthread, since
1105 that should have rejected any such requests. */
1106
sewardjf98e1c02008-10-25 16:22:41 +00001107 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001108 /* Try to find the lock. If we can't, then create a new one with
1109 kind 'lkk'. Only a reader-writer lock can be read-locked,
1110 hence the first assertion. */
1111 tl_assert(lkk == LK_rdwr);
1112 lk = map_locks_lookup_or_create(
1113 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001114 tl_assert( HG_(is_sane_LockN)(lk) );
1115
1116 /* check libhb level entities exist */
1117 tl_assert(thr->hbthr);
1118 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001119
1120 if (lk->heldBy == NULL) {
1121 /* the lock isn't held. Simple. */
1122 tl_assert(!lk->heldW);
1123 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001124 /* acquire a dependency from the lock's VC */
1125 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001126 goto noerror;
1127 }
1128
1129 /* So the lock is already held. If held as a w-lock then
1130 libpthread must be buggy. */
1131 tl_assert(lk->heldBy);
1132 if (lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001133 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1134 "granted on rwlock which is "
1135 "currently wr-held");
sewardjb4112022007-11-09 22:49:28 +00001136 goto error;
1137 }
1138
1139 /* Easy enough. In short anybody can get a read-lock on a rwlock
1140 provided it is either unlocked or already in rd-held. */
1141 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001142 /* acquire a dependency from the lock's VC. Probably pointless,
1143 but also harmless. */
1144 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001145 goto noerror;
1146
1147 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001148 if (HG_(clo_track_lockorders)) {
1149 /* check lock order acquisition graph, and update. This has to
1150 happen before the lock is added to the thread's locksetA/W. */
1151 laog__pre_thread_acquires_lock( thr, lk );
1152 }
sewardjb4112022007-11-09 22:49:28 +00001153 /* update the thread's held-locks set */
1154 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1155 /* but don't update thr->locksetW, since lk is only rd-held */
1156 /* fall through */
1157
1158 error:
sewardjf98e1c02008-10-25 16:22:41 +00001159 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001160}
1161
1162
1163/* The lock at 'lock_ga' is just about to be unlocked. Make all
1164 necessary updates, and also do all possible error checks. */
1165static
1166void evhH__pre_thread_releases_lock ( Thread* thr,
1167 Addr lock_ga, Bool isRDWR )
1168{
1169 Lock* lock;
1170 Word n;
sewardjf98e1c02008-10-25 16:22:41 +00001171 Bool was_heldW;
sewardjb4112022007-11-09 22:49:28 +00001172
1173 /* This routine is called prior to a lock release, before
1174 libpthread has had a chance to validate the call. Hence we need
1175 to detect and reject any attempts to move the lock into an
1176 invalid state. Such attempts are bugs in the client.
1177
1178 isRDWR is True if we know from the wrapper context that lock_ga
1179 should refer to a reader-writer lock, and is False if [ditto]
1180 lock_ga should refer to a standard mutex. */
1181
sewardjf98e1c02008-10-25 16:22:41 +00001182 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001183 lock = map_locks_maybe_lookup( lock_ga );
1184
1185 if (!lock) {
1186 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1187 the client is trying to unlock it. So complain, then ignore
1188 the attempt. */
sewardjf98e1c02008-10-25 16:22:41 +00001189 HG_(record_error_UnlockBogus)( thr, lock_ga );
sewardjb4112022007-11-09 22:49:28 +00001190 return;
1191 }
1192
1193 tl_assert(lock->guestaddr == lock_ga);
sewardjf98e1c02008-10-25 16:22:41 +00001194 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001195
1196 if (isRDWR && lock->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001197 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1198 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001199 }
1200 if ((!isRDWR) && lock->kind == LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001201 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1202 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001203 }
1204
1205 if (!lock->heldBy) {
1206 /* The lock is not held. This indicates a serious bug in the
1207 client. */
1208 tl_assert(!lock->heldW);
sewardjf98e1c02008-10-25 16:22:41 +00001209 HG_(record_error_UnlockUnlocked)( thr, lock );
sewardjb4112022007-11-09 22:49:28 +00001210 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1211 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1212 goto error;
1213 }
1214
sewardjf98e1c02008-10-25 16:22:41 +00001215 /* test just above dominates */
1216 tl_assert(lock->heldBy);
1217 was_heldW = lock->heldW;
1218
sewardjb4112022007-11-09 22:49:28 +00001219 /* The lock is held. Is this thread one of the holders? If not,
1220 report a bug in the client. */
sewardj896f6f92008-08-19 08:38:52 +00001221 n = VG_(elemBag)( lock->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +00001222 tl_assert(n >= 0);
1223 if (n == 0) {
1224 /* We are not a current holder of the lock. This is a bug in
1225 the guest, and (per POSIX pthread rules) the unlock
1226 attempt will fail. So just complain and do nothing
1227 else. */
sewardj896f6f92008-08-19 08:38:52 +00001228 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001229 tl_assert(HG_(is_sane_Thread)(realOwner));
sewardjb4112022007-11-09 22:49:28 +00001230 tl_assert(realOwner != thr);
1231 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1232 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
sewardjf98e1c02008-10-25 16:22:41 +00001233 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
sewardjb4112022007-11-09 22:49:28 +00001234 goto error;
1235 }
1236
1237 /* Ok, we hold the lock 'n' times. */
1238 tl_assert(n >= 1);
1239
1240 lockN_release( lock, thr );
1241
1242 n--;
1243 tl_assert(n >= 0);
1244
1245 if (n > 0) {
1246 tl_assert(lock->heldBy);
sewardj896f6f92008-08-19 08:38:52 +00001247 tl_assert(n == VG_(elemBag)( lock->heldBy, (Word)thr ));
sewardjb4112022007-11-09 22:49:28 +00001248 /* We still hold the lock. So either it's a recursive lock
1249 or a rwlock which is currently r-held. */
1250 tl_assert(lock->kind == LK_mbRec
1251 || (lock->kind == LK_rdwr && !lock->heldW));
1252 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1253 if (lock->heldW)
1254 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1255 else
1256 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1257 } else {
sewardj983f3022009-05-21 14:49:55 +00001258 /* n is zero. This means we don't hold the lock any more. But
1259 if it's a rwlock held in r-mode, someone else could still
1260 hold it. Just do whatever sanity checks we can. */
1261 if (lock->kind == LK_rdwr && lock->heldBy) {
1262 /* It's a rwlock. We no longer hold it but we used to;
1263 nevertheless it still appears to be held by someone else.
1264 The implication is that, prior to this release, it must
1265 have been shared by us and and whoever else is holding it;
1266 which in turn implies it must be r-held, since a lock
1267 can't be w-held by more than one thread. */
1268 /* The lock is now R-held by somebody else: */
1269 tl_assert(lock->heldW == False);
1270 } else {
1271 /* Normal case. It's either not a rwlock, or it's a rwlock
1272 that we used to hold in w-mode (which is pretty much the
1273 same thing as a non-rwlock.) Since this transaction is
1274 atomic (V does not allow multiple threads to run
1275 simultaneously), it must mean the lock is now not held by
1276 anybody. Hence assert for it. */
1277 /* The lock is now not held by anybody: */
1278 tl_assert(!lock->heldBy);
1279 tl_assert(lock->heldW == False);
1280 }
sewardjf98e1c02008-10-25 16:22:41 +00001281 //if (lock->heldBy) {
1282 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (Word)thr ));
1283 //}
sewardjb4112022007-11-09 22:49:28 +00001284 /* update this thread's lockset accordingly. */
1285 thr->locksetA
1286 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lock );
1287 thr->locksetW
1288 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lock );
sewardjf98e1c02008-10-25 16:22:41 +00001289 /* push our VC into the lock */
1290 tl_assert(thr->hbthr);
1291 tl_assert(lock->hbso);
1292 /* If the lock was previously W-held, then we want to do a
1293 strong send, and if previously R-held, then a weak send. */
1294 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
sewardjb4112022007-11-09 22:49:28 +00001295 }
1296 /* fall through */
1297
1298 error:
sewardjf98e1c02008-10-25 16:22:41 +00001299 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001300}
1301
1302
sewardj9f569b72008-11-13 13:33:09 +00001303/* ---------------------------------------------------------- */
1304/* -------- Event handlers proper (evh__* functions) -------- */
1305/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001306
1307/* What is the Thread* for the currently running thread? This is
1308 absolutely performance critical. We receive notifications from the
1309 core for client code starts/stops, and cache the looked-up result
1310 in 'current_Thread'. Hence, for the vast majority of requests,
1311 finding the current thread reduces to a read of a global variable,
1312 provided get_current_Thread_in_C_C is inlined.
1313
1314 Outside of client code, current_Thread is NULL, and presumably
1315 any uses of it will cause a segfault. Hence:
1316
1317 - for uses definitely within client code, use
1318 get_current_Thread_in_C_C.
1319
1320 - for all other uses, use get_current_Thread.
1321*/
1322
sewardj23f12002009-07-24 08:45:08 +00001323static Thread *current_Thread = NULL,
1324 *current_Thread_prev = NULL;
sewardjb4112022007-11-09 22:49:28 +00001325
1326static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1327 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1328 tl_assert(current_Thread == NULL);
1329 current_Thread = map_threads_lookup( tid );
1330 tl_assert(current_Thread != NULL);
sewardj23f12002009-07-24 08:45:08 +00001331 if (current_Thread != current_Thread_prev) {
1332 libhb_Thr_resumes( current_Thread->hbthr );
1333 current_Thread_prev = current_Thread;
1334 }
sewardjb4112022007-11-09 22:49:28 +00001335}
1336static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1337 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1338 tl_assert(current_Thread != NULL);
1339 current_Thread = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00001340 libhb_maybe_GC();
sewardjb4112022007-11-09 22:49:28 +00001341}
1342static inline Thread* get_current_Thread_in_C_C ( void ) {
1343 return current_Thread;
1344}
1345static inline Thread* get_current_Thread ( void ) {
1346 ThreadId coretid;
1347 Thread* thr;
1348 thr = get_current_Thread_in_C_C();
1349 if (LIKELY(thr))
1350 return thr;
1351 /* evidently not in client code. Do it the slow way. */
1352 coretid = VG_(get_running_tid)();
1353 /* FIXME: get rid of the following kludge. It exists because
sewardjf98e1c02008-10-25 16:22:41 +00001354 evh__new_mem is called during initialisation (as notification
sewardjb4112022007-11-09 22:49:28 +00001355 of initial memory layout) and VG_(get_running_tid)() returns
1356 VG_INVALID_THREADID at that point. */
1357 if (coretid == VG_INVALID_THREADID)
1358 coretid = 1; /* KLUDGE */
1359 thr = map_threads_lookup( coretid );
1360 return thr;
1361}
1362
1363static
1364void evh__new_mem ( Addr a, SizeT len ) {
1365 if (SHOW_EVENTS >= 2)
1366 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1367 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001368 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001369 all__sanity_check("evh__new_mem-post");
1370}
1371
1372static
sewardj1f77fec2010-04-12 19:51:04 +00001373void evh__new_mem_stack ( Addr a, SizeT len ) {
1374 if (SHOW_EVENTS >= 2)
1375 VG_(printf)("evh__new_mem_stack(%p, %lu)\n", (void*)a, len );
1376 shadow_mem_make_New( get_current_Thread(),
1377 -VG_STACK_REDZONE_SZB + a, len );
1378 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1379 all__sanity_check("evh__new_mem_stack-post");
1380}
1381
1382static
sewardj7cf4e6b2008-05-01 20:24:26 +00001383void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1384 if (SHOW_EVENTS >= 2)
1385 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1386 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001387 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardj7cf4e6b2008-05-01 20:24:26 +00001388 all__sanity_check("evh__new_mem_w_tid-post");
1389}
1390
1391static
sewardjb4112022007-11-09 22:49:28 +00001392void evh__new_mem_w_perms ( Addr a, SizeT len,
sewardj9c606bd2008-09-18 18:12:50 +00001393 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
sewardjb4112022007-11-09 22:49:28 +00001394 if (SHOW_EVENTS >= 1)
1395 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1396 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1397 if (rr || ww || xx)
1398 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001399 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001400 all__sanity_check("evh__new_mem_w_perms-post");
1401}
1402
1403static
1404void evh__set_perms ( Addr a, SizeT len,
1405 Bool rr, Bool ww, Bool xx ) {
1406 if (SHOW_EVENTS >= 1)
1407 VG_(printf)("evh__set_perms(%p, %lu, %d,%d,%d)\n",
1408 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1409 /* Hmm. What should we do here, that actually makes any sense?
1410 Let's say: if neither readable nor writable, then declare it
1411 NoAccess, else leave it alone. */
1412 if (!(rr || ww))
1413 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001414 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001415 all__sanity_check("evh__set_perms-post");
1416}
1417
1418static
1419void evh__die_mem ( Addr a, SizeT len ) {
sewardj406bac82010-03-03 23:03:40 +00001420 // urr, libhb ignores this.
sewardjb4112022007-11-09 22:49:28 +00001421 if (SHOW_EVENTS >= 2)
1422 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
1423 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001424 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001425 all__sanity_check("evh__die_mem-post");
1426}
1427
1428static
sewardj406bac82010-03-03 23:03:40 +00001429void evh__untrack_mem ( Addr a, SizeT len ) {
1430 // whereas it doesn't ignore this
1431 if (SHOW_EVENTS >= 2)
1432 VG_(printf)("evh__untrack_mem(%p, %lu)\n", (void*)a, len );
1433 shadow_mem_make_Untracked( get_current_Thread(), a, len );
1434 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1435 all__sanity_check("evh__untrack_mem-post");
1436}
1437
1438static
sewardj23f12002009-07-24 08:45:08 +00001439void evh__copy_mem ( Addr src, Addr dst, SizeT len ) {
1440 if (SHOW_EVENTS >= 2)
1441 VG_(printf)("evh__copy_mem(%p, %p, %lu)\n", (void*)src, (void*)dst, len );
1442 shadow_mem_scopy_range( get_current_Thread(), src, dst, len );
1443 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1444 all__sanity_check("evh__copy_mem-post");
1445}
1446
1447static
sewardjb4112022007-11-09 22:49:28 +00001448void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1449{
1450 if (SHOW_EVENTS >= 1)
1451 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1452 (Int)parent, (Int)child );
1453
1454 if (parent != VG_INVALID_THREADID) {
sewardjf98e1c02008-10-25 16:22:41 +00001455 Thread* thr_p;
1456 Thread* thr_c;
1457 Thr* hbthr_p;
1458 Thr* hbthr_c;
sewardjb4112022007-11-09 22:49:28 +00001459
sewardjf98e1c02008-10-25 16:22:41 +00001460 tl_assert(HG_(is_sane_ThreadId)(parent));
1461 tl_assert(HG_(is_sane_ThreadId)(child));
sewardjb4112022007-11-09 22:49:28 +00001462 tl_assert(parent != child);
1463
1464 thr_p = map_threads_maybe_lookup( parent );
1465 thr_c = map_threads_maybe_lookup( child );
1466
1467 tl_assert(thr_p != NULL);
1468 tl_assert(thr_c == NULL);
1469
sewardjf98e1c02008-10-25 16:22:41 +00001470 hbthr_p = thr_p->hbthr;
1471 tl_assert(hbthr_p != NULL);
sewardj60626642011-03-10 15:14:37 +00001472 tl_assert( libhb_get_Thr_hgthread(hbthr_p) == thr_p );
sewardjb4112022007-11-09 22:49:28 +00001473
sewardjf98e1c02008-10-25 16:22:41 +00001474 hbthr_c = libhb_create ( hbthr_p );
1475
1476 /* Create a new thread record for the child. */
sewardjb4112022007-11-09 22:49:28 +00001477 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +00001478 thr_c = mk_Thread( hbthr_c );
sewardj60626642011-03-10 15:14:37 +00001479 tl_assert( libhb_get_Thr_hgthread(hbthr_c) == NULL );
1480 libhb_set_Thr_hgthread(hbthr_c, thr_c);
sewardjb4112022007-11-09 22:49:28 +00001481
1482 /* and bind it in the thread-map table */
1483 map_threads[child] = thr_c;
sewardjf98e1c02008-10-25 16:22:41 +00001484 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1485 thr_c->coretid = child;
sewardjb4112022007-11-09 22:49:28 +00001486
1487 /* Record where the parent is so we can later refer to this in
1488 error messages.
1489
1490 On amd64-linux, this entails a nasty glibc-2.5 specific hack.
1491 The stack snapshot is taken immediately after the parent has
1492 returned from its sys_clone call. Unfortunately there is no
1493 unwind info for the insn following "syscall" - reading the
1494 glibc sources confirms this. So we ask for a snapshot to be
1495 taken as if RIP was 3 bytes earlier, in a place where there
1496 is unwind info. Sigh.
1497 */
1498 { Word first_ip_delta = 0;
1499# if defined(VGP_amd64_linux)
1500 first_ip_delta = -3;
1501# endif
1502 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1503 }
sewardjb4112022007-11-09 22:49:28 +00001504 }
1505
sewardjf98e1c02008-10-25 16:22:41 +00001506 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001507 all__sanity_check("evh__pre_thread_create-post");
1508}
1509
1510static
1511void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1512{
1513 Int nHeld;
1514 Thread* thr_q;
1515 if (SHOW_EVENTS >= 1)
1516 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1517 (Int)quit_tid );
1518
1519 /* quit_tid has disappeared without joining to any other thread.
1520 Therefore there is no synchronisation event associated with its
1521 exit and so we have to pretty much treat it as if it was still
1522 alive but mysteriously making no progress. That is because, if
1523 we don't know when it really exited, then we can never say there
1524 is a point in time when we're sure the thread really has
1525 finished, and so we need to consider the possibility that it
1526 lingers indefinitely and continues to interact with other
1527 threads. */
1528 /* However, it might have rendezvous'd with a thread that called
1529 pthread_join with this one as arg, prior to this point (that's
1530 how NPTL works). In which case there has already been a prior
1531 sync event. So in any case, just let the thread exit. On NPTL,
1532 all thread exits go through here. */
sewardjf98e1c02008-10-25 16:22:41 +00001533 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
sewardjb4112022007-11-09 22:49:28 +00001534 thr_q = map_threads_maybe_lookup( quit_tid );
1535 tl_assert(thr_q != NULL);
1536
1537 /* Complain if this thread holds any locks. */
1538 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1539 tl_assert(nHeld >= 0);
1540 if (nHeld > 0) {
1541 HChar buf[80];
1542 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1543 nHeld, nHeld > 1 ? "s" : "");
sewardjf98e1c02008-10-25 16:22:41 +00001544 HG_(record_error_Misc)( thr_q, buf );
sewardjb4112022007-11-09 22:49:28 +00001545 }
1546
sewardj23f12002009-07-24 08:45:08 +00001547 /* Not much to do here:
1548 - tell libhb the thread is gone
1549 - clear the map_threads entry, in order that the Valgrind core
1550 can re-use it. */
sewardj61bc2c52011-02-09 10:34:00 +00001551 /* Cleanup actions (next 5 lines) copied in evh__atfork_child; keep
1552 in sync. */
sewardj23f12002009-07-24 08:45:08 +00001553 tl_assert(thr_q->hbthr);
1554 libhb_async_exit(thr_q->hbthr);
sewardjf98e1c02008-10-25 16:22:41 +00001555 tl_assert(thr_q->coretid == quit_tid);
1556 thr_q->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +00001557 map_threads_delete( quit_tid );
1558
sewardjf98e1c02008-10-25 16:22:41 +00001559 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001560 all__sanity_check("evh__pre_thread_ll_exit-post");
1561}
1562
sewardj61bc2c52011-02-09 10:34:00 +00001563/* This is called immediately after fork, for the child only. 'tid'
1564 is the only surviving thread (as per POSIX rules on fork() in
1565 threaded programs), so we have to clean up map_threads to remove
1566 entries for any other threads. */
1567static
1568void evh__atfork_child ( ThreadId tid )
1569{
1570 UInt i;
1571 Thread* thr;
1572 /* Slot 0 should never be used. */
1573 thr = map_threads_maybe_lookup( 0/*INVALID*/ );
1574 tl_assert(!thr);
1575 /* Clean up all other slots except 'tid'. */
1576 for (i = 1; i < VG_N_THREADS; i++) {
1577 if (i == tid)
1578 continue;
1579 thr = map_threads_maybe_lookup(i);
1580 if (!thr)
1581 continue;
1582 /* Cleanup actions (next 5 lines) copied from end of
1583 evh__pre_thread_ll_exit; keep in sync. */
1584 tl_assert(thr->hbthr);
1585 libhb_async_exit(thr->hbthr);
1586 tl_assert(thr->coretid == i);
1587 thr->coretid = VG_INVALID_THREADID;
1588 map_threads_delete(i);
1589 }
1590}
1591
sewardjf98e1c02008-10-25 16:22:41 +00001592
sewardjb4112022007-11-09 22:49:28 +00001593static
1594void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1595{
sewardjb4112022007-11-09 22:49:28 +00001596 Thread* thr_s;
1597 Thread* thr_q;
sewardjf98e1c02008-10-25 16:22:41 +00001598 Thr* hbthr_s;
1599 Thr* hbthr_q;
1600 SO* so;
sewardjb4112022007-11-09 22:49:28 +00001601
1602 if (SHOW_EVENTS >= 1)
1603 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1604 (Int)stay_tid, quit_thr );
1605
sewardjf98e1c02008-10-25 16:22:41 +00001606 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
sewardjb4112022007-11-09 22:49:28 +00001607
1608 thr_s = map_threads_maybe_lookup( stay_tid );
1609 thr_q = quit_thr;
1610 tl_assert(thr_s != NULL);
1611 tl_assert(thr_q != NULL);
1612 tl_assert(thr_s != thr_q);
1613
sewardjf98e1c02008-10-25 16:22:41 +00001614 hbthr_s = thr_s->hbthr;
1615 hbthr_q = thr_q->hbthr;
1616 tl_assert(hbthr_s != hbthr_q);
sewardj60626642011-03-10 15:14:37 +00001617 tl_assert( libhb_get_Thr_hgthread(hbthr_s) == thr_s );
1618 tl_assert( libhb_get_Thr_hgthread(hbthr_q) == thr_q );
sewardjb4112022007-11-09 22:49:28 +00001619
sewardjf98e1c02008-10-25 16:22:41 +00001620 /* Allocate a temporary synchronisation object and use it to send
1621 an imaginary message from the quitter to the stayer, the purpose
1622 being to generate a dependence from the quitter to the
1623 stayer. */
1624 so = libhb_so_alloc();
1625 tl_assert(so);
sewardj23f12002009-07-24 08:45:08 +00001626 /* Send last arg of _so_send as False, since the sending thread
1627 doesn't actually exist any more, so we don't want _so_send to
1628 try taking stack snapshots of it. */
sewardjf98e1c02008-10-25 16:22:41 +00001629 libhb_so_send(hbthr_q, so, True/*strong_send*/);
1630 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1631 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00001632
sewardjf98e1c02008-10-25 16:22:41 +00001633 /* evh__pre_thread_ll_exit issues an error message if the exiting
1634 thread holds any locks. No need to check here. */
sewardjb4112022007-11-09 22:49:28 +00001635
1636 /* This holds because, at least when using NPTL as the thread
1637 library, we should be notified the low level thread exit before
1638 we hear of any join event on it. The low level exit
1639 notification feeds through into evh__pre_thread_ll_exit,
1640 which should clear the map_threads entry for it. Hence we
1641 expect there to be no map_threads entry at this point. */
1642 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1643 == VG_INVALID_THREADID);
1644
sewardjf98e1c02008-10-25 16:22:41 +00001645 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001646 all__sanity_check("evh__post_thread_join-post");
1647}
1648
1649static
1650void evh__pre_mem_read ( CorePart part, ThreadId tid, Char* s,
1651 Addr a, SizeT size) {
1652 if (SHOW_EVENTS >= 2
1653 || (SHOW_EVENTS >= 1 && size != 1))
1654 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1655 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001656 shadow_mem_cread_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001657 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001658 all__sanity_check("evh__pre_mem_read-post");
1659}
1660
1661static
1662void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
1663 Char* s, Addr a ) {
1664 Int len;
1665 if (SHOW_EVENTS >= 1)
1666 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1667 (Int)tid, s, (void*)a );
sewardj234e5582011-02-09 12:47:23 +00001668 // Don't segfault if the string starts in an obviously stupid
1669 // place. Actually we should check the whole string, not just
1670 // the start address, but that's too much trouble. At least
1671 // checking the first byte is better than nothing. See #255009.
1672 if (!VG_(am_is_valid_for_client) (a, 1, VKI_PROT_READ))
1673 return;
sewardjb4112022007-11-09 22:49:28 +00001674 len = VG_(strlen)( (Char*) a );
sewardj23f12002009-07-24 08:45:08 +00001675 shadow_mem_cread_range( map_threads_lookup(tid), a, len+1 );
sewardjf98e1c02008-10-25 16:22:41 +00001676 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001677 all__sanity_check("evh__pre_mem_read_asciiz-post");
1678}
1679
1680static
1681void evh__pre_mem_write ( CorePart part, ThreadId tid, Char* s,
1682 Addr a, SizeT size ) {
1683 if (SHOW_EVENTS >= 1)
1684 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1685 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001686 shadow_mem_cwrite_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001687 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001688 all__sanity_check("evh__pre_mem_write-post");
1689}
1690
1691static
1692void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1693 if (SHOW_EVENTS >= 1)
1694 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1695 (void*)a, len, (Int)is_inited );
1696 // FIXME: this is kinda stupid
1697 if (is_inited) {
1698 shadow_mem_make_New(get_current_Thread(), a, len);
1699 } else {
1700 shadow_mem_make_New(get_current_Thread(), a, len);
1701 }
sewardjf98e1c02008-10-25 16:22:41 +00001702 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001703 all__sanity_check("evh__pre_mem_read-post");
1704}
1705
1706static
1707void evh__die_mem_heap ( Addr a, SizeT len ) {
1708 if (SHOW_EVENTS >= 1)
1709 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
1710 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001711 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001712 all__sanity_check("evh__pre_mem_read-post");
1713}
1714
sewardj23f12002009-07-24 08:45:08 +00001715/* --- Event handlers called from generated code --- */
1716
sewardjb4112022007-11-09 22:49:28 +00001717static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001718void evh__mem_help_cread_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001719 Thread* thr = get_current_Thread_in_C_C();
1720 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001721 LIBHB_CREAD_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001722}
sewardjf98e1c02008-10-25 16:22:41 +00001723
sewardjb4112022007-11-09 22:49:28 +00001724static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001725void evh__mem_help_cread_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001726 Thread* thr = get_current_Thread_in_C_C();
1727 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001728 LIBHB_CREAD_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001729}
sewardjf98e1c02008-10-25 16:22:41 +00001730
sewardjb4112022007-11-09 22:49:28 +00001731static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001732void evh__mem_help_cread_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001733 Thread* thr = get_current_Thread_in_C_C();
1734 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001735 LIBHB_CREAD_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001736}
sewardjf98e1c02008-10-25 16:22:41 +00001737
sewardjb4112022007-11-09 22:49:28 +00001738static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001739void evh__mem_help_cread_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001740 Thread* thr = get_current_Thread_in_C_C();
1741 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001742 LIBHB_CREAD_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001743}
sewardjf98e1c02008-10-25 16:22:41 +00001744
sewardjb4112022007-11-09 22:49:28 +00001745static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001746void evh__mem_help_cread_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001747 Thread* thr = get_current_Thread_in_C_C();
1748 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001749 LIBHB_CREAD_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001750}
1751
1752static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001753void evh__mem_help_cwrite_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001754 Thread* thr = get_current_Thread_in_C_C();
1755 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001756 LIBHB_CWRITE_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001757}
sewardjf98e1c02008-10-25 16:22:41 +00001758
sewardjb4112022007-11-09 22:49:28 +00001759static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001760void evh__mem_help_cwrite_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001761 Thread* thr = get_current_Thread_in_C_C();
1762 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001763 LIBHB_CWRITE_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001764}
sewardjf98e1c02008-10-25 16:22:41 +00001765
sewardjb4112022007-11-09 22:49:28 +00001766static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001767void evh__mem_help_cwrite_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001768 Thread* thr = get_current_Thread_in_C_C();
1769 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001770 LIBHB_CWRITE_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001771}
sewardjf98e1c02008-10-25 16:22:41 +00001772
sewardjb4112022007-11-09 22:49:28 +00001773static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001774void evh__mem_help_cwrite_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001775 Thread* thr = get_current_Thread_in_C_C();
1776 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001777 LIBHB_CWRITE_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001778}
sewardjf98e1c02008-10-25 16:22:41 +00001779
sewardjb4112022007-11-09 22:49:28 +00001780static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001781void evh__mem_help_cwrite_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001782 Thread* thr = get_current_Thread_in_C_C();
1783 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001784 LIBHB_CWRITE_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001785}
1786
sewardjb4112022007-11-09 22:49:28 +00001787
sewardj9f569b72008-11-13 13:33:09 +00001788/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001789/* -------------- events to do with mutexes -------------- */
sewardj9f569b72008-11-13 13:33:09 +00001790/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001791
1792/* EXPOSITION only: by intercepting lock init events we can show the
1793 user where the lock was initialised, rather than only being able to
1794 show where it was first locked. Intercepting lock initialisations
1795 is not necessary for the basic operation of the race checker. */
1796static
1797void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
1798 void* mutex, Word mbRec )
1799{
1800 if (SHOW_EVENTS >= 1)
1801 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
1802 (Int)tid, mbRec, (void*)mutex );
1803 tl_assert(mbRec == 0 || mbRec == 1);
1804 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
1805 (Addr)mutex, tid );
sewardjf98e1c02008-10-25 16:22:41 +00001806 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001807 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
1808}
1809
1810static
1811void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex )
1812{
1813 Thread* thr;
1814 Lock* lk;
1815 if (SHOW_EVENTS >= 1)
1816 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE(ctid=%d, %p)\n",
1817 (Int)tid, (void*)mutex );
1818
1819 thr = map_threads_maybe_lookup( tid );
1820 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00001821 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00001822
1823 lk = map_locks_maybe_lookup( (Addr)mutex );
1824
1825 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
sewardjf98e1c02008-10-25 16:22:41 +00001826 HG_(record_error_Misc)(
1827 thr, "pthread_mutex_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00001828 }
1829
1830 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00001831 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00001832 tl_assert( lk->guestaddr == (Addr)mutex );
1833 if (lk->heldBy) {
1834 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00001835 HG_(record_error_Misc)(
1836 thr, "pthread_mutex_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00001837 /* remove lock from locksets of all owning threads */
1838 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00001839 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00001840 lk->heldBy = NULL;
1841 lk->heldW = False;
1842 lk->acquired_at = NULL;
1843 }
1844 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001845 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00001846
1847 if (HG_(clo_track_lockorders))
1848 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00001849 map_locks_delete( lk->guestaddr );
1850 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00001851 }
1852
sewardjf98e1c02008-10-25 16:22:41 +00001853 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001854 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
1855}
1856
1857static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
1858 void* mutex, Word isTryLock )
1859{
1860 /* Just check the mutex is sane; nothing else to do. */
1861 // 'mutex' may be invalid - not checked by wrapper
1862 Thread* thr;
1863 Lock* lk;
1864 if (SHOW_EVENTS >= 1)
1865 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
1866 (Int)tid, (void*)mutex );
1867
1868 tl_assert(isTryLock == 0 || isTryLock == 1);
1869 thr = map_threads_maybe_lookup( tid );
1870 tl_assert(thr); /* cannot fail - Thread* must already exist */
1871
1872 lk = map_locks_maybe_lookup( (Addr)mutex );
1873
1874 if (lk && (lk->kind == LK_rdwr)) {
sewardjf98e1c02008-10-25 16:22:41 +00001875 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
1876 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001877 }
1878
1879 if ( lk
1880 && isTryLock == 0
1881 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
1882 && lk->heldBy
1883 && lk->heldW
sewardj896f6f92008-08-19 08:38:52 +00001884 && VG_(elemBag)( lk->heldBy, (Word)thr ) > 0 ) {
sewardjb4112022007-11-09 22:49:28 +00001885 /* uh, it's a non-recursive lock and we already w-hold it, and
1886 this is a real lock operation (not a speculative "tryLock"
1887 kind of thing). Duh. Deadlock coming up; but at least
1888 produce an error message. */
sewardj8fef6252010-07-29 05:28:02 +00001889 HChar* errstr = "Attempt to re-lock a "
1890 "non-recursive lock I already hold";
1891 HChar* auxstr = "Lock was previously acquired";
1892 if (lk->acquired_at) {
1893 HG_(record_error_Misc_w_aux)( thr, errstr, auxstr, lk->acquired_at );
1894 } else {
1895 HG_(record_error_Misc)( thr, errstr );
1896 }
sewardjb4112022007-11-09 22:49:28 +00001897 }
1898}
1899
1900static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
1901{
1902 // only called if the real library call succeeded - so mutex is sane
1903 Thread* thr;
1904 if (SHOW_EVENTS >= 1)
1905 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
1906 (Int)tid, (void*)mutex );
1907
1908 thr = map_threads_maybe_lookup( tid );
1909 tl_assert(thr); /* cannot fail - Thread* must already exist */
1910
1911 evhH__post_thread_w_acquires_lock(
1912 thr,
1913 LK_mbRec, /* if not known, create new lock with this LockKind */
1914 (Addr)mutex
1915 );
1916}
1917
1918static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
1919{
1920 // 'mutex' may be invalid - not checked by wrapper
1921 Thread* thr;
1922 if (SHOW_EVENTS >= 1)
1923 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
1924 (Int)tid, (void*)mutex );
1925
1926 thr = map_threads_maybe_lookup( tid );
1927 tl_assert(thr); /* cannot fail - Thread* must already exist */
1928
1929 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
1930}
1931
1932static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
1933{
1934 // only called if the real library call succeeded - so mutex is sane
1935 Thread* thr;
1936 if (SHOW_EVENTS >= 1)
1937 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
1938 (Int)tid, (void*)mutex );
1939 thr = map_threads_maybe_lookup( tid );
1940 tl_assert(thr); /* cannot fail - Thread* must already exist */
1941
1942 // anything we should do here?
1943}
1944
1945
sewardj5a644da2009-08-11 10:35:58 +00001946/* ------------------------------------------------------- */
sewardj1f77fec2010-04-12 19:51:04 +00001947/* -------------- events to do with spinlocks ------------ */
sewardj5a644da2009-08-11 10:35:58 +00001948/* ------------------------------------------------------- */
1949
1950/* All a bit of a kludge. Pretend we're really dealing with ordinary
1951 pthread_mutex_t's instead, for the most part. */
1952
1953static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid,
1954 void* slock )
1955{
1956 Thread* thr;
1957 Lock* lk;
1958 /* In glibc's kludgey world, we're either initialising or unlocking
1959 it. Since this is the pre-routine, if it is locked, unlock it
1960 and take a dependence edge. Otherwise, do nothing. */
1961
1962 if (SHOW_EVENTS >= 1)
1963 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
1964 "(ctid=%d, slock=%p)\n",
1965 (Int)tid, (void*)slock );
1966
1967 thr = map_threads_maybe_lookup( tid );
1968 /* cannot fail - Thread* must already exist */;
1969 tl_assert( HG_(is_sane_Thread)(thr) );
1970
1971 lk = map_locks_maybe_lookup( (Addr)slock );
1972 if (lk && lk->heldBy) {
1973 /* it's held. So do the normal pre-unlock actions, as copied
1974 from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE. This stupidly
1975 duplicates the map_locks_maybe_lookup. */
1976 evhH__pre_thread_releases_lock( thr, (Addr)slock,
1977 False/*!isRDWR*/ );
1978 }
1979}
1980
1981static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid,
1982 void* slock )
1983{
1984 Lock* lk;
1985 /* More kludgery. If the lock has never been seen before, do
1986 actions as per evh__HG_PTHREAD_MUTEX_INIT_POST. Else do
1987 nothing. */
1988
1989 if (SHOW_EVENTS >= 1)
1990 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
1991 "(ctid=%d, slock=%p)\n",
1992 (Int)tid, (void*)slock );
1993
1994 lk = map_locks_maybe_lookup( (Addr)slock );
1995 if (!lk) {
1996 map_locks_lookup_or_create( LK_nonRec, (Addr)slock, tid );
1997 }
1998}
1999
2000static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid,
2001 void* slock, Word isTryLock )
2002{
2003 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, slock, isTryLock );
2004}
2005
2006static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid,
2007 void* slock )
2008{
2009 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, slock );
2010}
2011
2012static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid,
2013 void* slock )
2014{
2015 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, slock );
2016}
2017
2018
sewardj9f569b72008-11-13 13:33:09 +00002019/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002020/* --------------- events to do with CVs --------------- */
sewardj9f569b72008-11-13 13:33:09 +00002021/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002022
sewardj02114542009-07-28 20:52:36 +00002023/* A mapping from CV to (the SO associated with it, plus some
2024 auxiliary data for error checking). When the CV is
sewardjf98e1c02008-10-25 16:22:41 +00002025 signalled/broadcasted upon, we do a 'send' into the SO, and when a
2026 wait on it completes, we do a 'recv' from the SO. This is believed
2027 to give the correct happens-before events arising from CV
sewardjb4112022007-11-09 22:49:28 +00002028 signallings/broadcasts.
2029*/
2030
sewardj02114542009-07-28 20:52:36 +00002031/* .so is the SO for this CV.
2032 .mx_ga is the associated mutex, when .nWaiters > 0
sewardjb4112022007-11-09 22:49:28 +00002033
sewardj02114542009-07-28 20:52:36 +00002034 POSIX says effectively that the first pthread_cond_{timed}wait call
2035 causes a dynamic binding between the CV and the mutex, and that
2036 lasts until such time as the waiter count falls to zero. Hence
2037 need to keep track of the number of waiters in order to do
2038 consistency tracking. */
2039typedef
2040 struct {
2041 SO* so; /* libhb-allocated SO */
2042 void* mx_ga; /* addr of associated mutex, if any */
2043 UWord nWaiters; /* # threads waiting on the CV */
2044 }
2045 CVInfo;
2046
2047
2048/* pthread_cond_t* -> CVInfo* */
2049static WordFM* map_cond_to_CVInfo = NULL;
2050
2051static void map_cond_to_CVInfo_INIT ( void ) {
2052 if (UNLIKELY(map_cond_to_CVInfo == NULL)) {
2053 map_cond_to_CVInfo = VG_(newFM)( HG_(zalloc),
2054 "hg.mctCI.1", HG_(free), NULL );
2055 tl_assert(map_cond_to_CVInfo != NULL);
sewardjf98e1c02008-10-25 16:22:41 +00002056 }
2057}
2058
sewardj02114542009-07-28 20:52:36 +00002059static CVInfo* map_cond_to_CVInfo_lookup_or_alloc ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002060 UWord key, val;
sewardj02114542009-07-28 20:52:36 +00002061 map_cond_to_CVInfo_INIT();
2062 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
sewardjf98e1c02008-10-25 16:22:41 +00002063 tl_assert(key == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002064 return (CVInfo*)val;
sewardjf98e1c02008-10-25 16:22:41 +00002065 } else {
sewardj02114542009-07-28 20:52:36 +00002066 SO* so = libhb_so_alloc();
2067 CVInfo* cvi = HG_(zalloc)("hg.mctCloa.1", sizeof(CVInfo));
2068 cvi->so = so;
2069 cvi->mx_ga = 0;
2070 VG_(addToFM)( map_cond_to_CVInfo, (UWord)cond, (UWord)cvi );
2071 return cvi;
sewardjf98e1c02008-10-25 16:22:41 +00002072 }
2073}
2074
sewardj02114542009-07-28 20:52:36 +00002075static void map_cond_to_CVInfo_delete ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002076 UWord keyW, valW;
sewardj02114542009-07-28 20:52:36 +00002077 map_cond_to_CVInfo_INIT();
2078 if (VG_(delFromFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond )) {
2079 CVInfo* cvi = (CVInfo*)valW;
sewardjf98e1c02008-10-25 16:22:41 +00002080 tl_assert(keyW == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002081 tl_assert(cvi);
2082 tl_assert(cvi->so);
2083 libhb_so_dealloc(cvi->so);
2084 cvi->mx_ga = 0;
2085 HG_(free)(cvi);
sewardjb4112022007-11-09 22:49:28 +00002086 }
2087}
2088
2089static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2090{
sewardjf98e1c02008-10-25 16:22:41 +00002091 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2092 cond to a SO if it is not already so bound, and 'send' on the
2093 SO. This is later used by other thread(s) which successfully
2094 exit from a pthread_cond_wait on the same cv; then they 'recv'
2095 from the SO, thereby acquiring a dependency on this signalling
2096 event. */
sewardjb4112022007-11-09 22:49:28 +00002097 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002098 CVInfo* cvi;
2099 //Lock* lk;
sewardjb4112022007-11-09 22:49:28 +00002100
2101 if (SHOW_EVENTS >= 1)
2102 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2103 (Int)tid, (void*)cond );
2104
sewardjb4112022007-11-09 22:49:28 +00002105 thr = map_threads_maybe_lookup( tid );
2106 tl_assert(thr); /* cannot fail - Thread* must already exist */
2107
sewardj02114542009-07-28 20:52:36 +00002108 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2109 tl_assert(cvi);
2110 tl_assert(cvi->so);
2111
sewardjb4112022007-11-09 22:49:28 +00002112 // error-if: mutex is bogus
2113 // error-if: mutex is not locked
sewardj02114542009-07-28 20:52:36 +00002114 // Hmm. POSIX doesn't actually say that it's an error to call
2115 // pthread_cond_signal with the associated mutex being unlocked.
2116 // Although it does say that it should be "if consistent scheduling
2117 // is desired."
2118 //
2119 // For the moment, disable these checks.
2120 //lk = map_locks_maybe_lookup(cvi->mx_ga);
2121 //if (lk == NULL || cvi->mx_ga == 0) {
2122 // HG_(record_error_Misc)( thr,
2123 // "pthread_cond_{signal,broadcast}: "
2124 // "no or invalid mutex associated with cond");
2125 //}
2126 ///* note: lk could be NULL. Be careful. */
2127 //if (lk) {
2128 // if (lk->kind == LK_rdwr) {
2129 // HG_(record_error_Misc)(thr,
2130 // "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2131 // }
2132 // if (lk->heldBy == NULL) {
2133 // HG_(record_error_Misc)(thr,
2134 // "pthread_cond_{signal,broadcast}: "
2135 // "associated lock is not held by any thread");
2136 // }
2137 // if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (Word)thr)) {
2138 // HG_(record_error_Misc)(thr,
2139 // "pthread_cond_{signal,broadcast}: "
2140 // "associated lock is not held by calling thread");
2141 // }
2142 //}
sewardjb4112022007-11-09 22:49:28 +00002143
sewardj02114542009-07-28 20:52:36 +00002144 libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
sewardjb4112022007-11-09 22:49:28 +00002145}
2146
2147/* returns True if it reckons 'mutex' is valid and held by this
2148 thread, else False */
2149static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2150 void* cond, void* mutex )
2151{
2152 Thread* thr;
2153 Lock* lk;
2154 Bool lk_valid = True;
sewardj02114542009-07-28 20:52:36 +00002155 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002156
2157 if (SHOW_EVENTS >= 1)
2158 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2159 "(ctid=%d, cond=%p, mutex=%p)\n",
2160 (Int)tid, (void*)cond, (void*)mutex );
2161
sewardjb4112022007-11-09 22:49:28 +00002162 thr = map_threads_maybe_lookup( tid );
2163 tl_assert(thr); /* cannot fail - Thread* must already exist */
2164
2165 lk = map_locks_maybe_lookup( (Addr)mutex );
2166
2167 /* Check for stupid mutex arguments. There are various ways to be
2168 a bozo. Only complain once, though, even if more than one thing
2169 is wrong. */
2170 if (lk == NULL) {
2171 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002172 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002173 thr,
2174 "pthread_cond_{timed}wait called with invalid mutex" );
2175 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002176 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002177 if (lk->kind == LK_rdwr) {
2178 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002179 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002180 thr, "pthread_cond_{timed}wait called with mutex "
2181 "of type pthread_rwlock_t*" );
2182 } else
2183 if (lk->heldBy == NULL) {
2184 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002185 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002186 thr, "pthread_cond_{timed}wait called with un-held mutex");
2187 } else
2188 if (lk->heldBy != NULL
sewardj896f6f92008-08-19 08:38:52 +00002189 && VG_(elemBag)( lk->heldBy, (Word)thr ) == 0) {
sewardjb4112022007-11-09 22:49:28 +00002190 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002191 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002192 thr, "pthread_cond_{timed}wait called with mutex "
2193 "held by a different thread" );
2194 }
2195 }
2196
2197 // error-if: cond is also associated with a different mutex
sewardj02114542009-07-28 20:52:36 +00002198 cvi = map_cond_to_CVInfo_lookup_or_alloc(cond);
2199 tl_assert(cvi);
2200 tl_assert(cvi->so);
2201 if (cvi->nWaiters == 0) {
2202 /* form initial (CV,MX) binding */
2203 cvi->mx_ga = mutex;
2204 }
2205 else /* check existing (CV,MX) binding */
2206 if (cvi->mx_ga != mutex) {
2207 HG_(record_error_Misc)(
2208 thr, "pthread_cond_{timed}wait: cond is associated "
2209 "with a different mutex");
2210 }
2211 cvi->nWaiters++;
sewardjb4112022007-11-09 22:49:28 +00002212
2213 return lk_valid;
2214}
2215
2216static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
2217 void* cond, void* mutex )
2218{
sewardjf98e1c02008-10-25 16:22:41 +00002219 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2220 the SO for this cond, and 'recv' from it so as to acquire a
2221 dependency edge back to the signaller/broadcaster. */
2222 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002223 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002224
2225 if (SHOW_EVENTS >= 1)
2226 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
2227 "(ctid=%d, cond=%p, mutex=%p)\n",
2228 (Int)tid, (void*)cond, (void*)mutex );
2229
sewardjb4112022007-11-09 22:49:28 +00002230 thr = map_threads_maybe_lookup( tid );
2231 tl_assert(thr); /* cannot fail - Thread* must already exist */
2232
2233 // error-if: cond is also associated with a different mutex
2234
sewardj02114542009-07-28 20:52:36 +00002235 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2236 tl_assert(cvi);
2237 tl_assert(cvi->so);
2238 tl_assert(cvi->nWaiters > 0);
sewardjb4112022007-11-09 22:49:28 +00002239
sewardj02114542009-07-28 20:52:36 +00002240 if (!libhb_so_everSent(cvi->so)) {
sewardjf98e1c02008-10-25 16:22:41 +00002241 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2242 it? If this happened it would surely be a bug in the threads
2243 library. Or one of those fabled "spurious wakeups". */
2244 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
2245 "succeeded on"
2246 " without prior pthread_cond_post");
sewardjb4112022007-11-09 22:49:28 +00002247 }
sewardjf98e1c02008-10-25 16:22:41 +00002248
2249 /* anyway, acquire a dependency on it. */
sewardj02114542009-07-28 20:52:36 +00002250 libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2251
2252 cvi->nWaiters--;
sewardjf98e1c02008-10-25 16:22:41 +00002253}
2254
2255static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
2256 void* cond )
2257{
2258 /* Deal with destroy events. The only purpose is to free storage
2259 associated with the CV, so as to avoid any possible resource
2260 leaks. */
2261 if (SHOW_EVENTS >= 1)
2262 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
2263 "(ctid=%d, cond=%p)\n",
2264 (Int)tid, (void*)cond );
2265
sewardj02114542009-07-28 20:52:36 +00002266 map_cond_to_CVInfo_delete( cond );
sewardjb4112022007-11-09 22:49:28 +00002267}
2268
2269
sewardj9f569b72008-11-13 13:33:09 +00002270/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002271/* -------------- events to do with rwlocks -------------- */
sewardj9f569b72008-11-13 13:33:09 +00002272/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002273
2274/* EXPOSITION only */
2275static
2276void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2277{
2278 if (SHOW_EVENTS >= 1)
2279 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2280 (Int)tid, (void*)rwl );
2281 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
sewardjf98e1c02008-10-25 16:22:41 +00002282 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002283 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2284}
2285
2286static
2287void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2288{
2289 Thread* thr;
2290 Lock* lk;
2291 if (SHOW_EVENTS >= 1)
2292 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2293 (Int)tid, (void*)rwl );
2294
2295 thr = map_threads_maybe_lookup( tid );
2296 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00002297 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00002298
2299 lk = map_locks_maybe_lookup( (Addr)rwl );
2300
2301 if (lk == NULL || lk->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00002302 HG_(record_error_Misc)(
2303 thr, "pthread_rwlock_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00002304 }
2305
2306 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00002307 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002308 tl_assert( lk->guestaddr == (Addr)rwl );
2309 if (lk->heldBy) {
2310 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00002311 HG_(record_error_Misc)(
2312 thr, "pthread_rwlock_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00002313 /* remove lock from locksets of all owning threads */
2314 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00002315 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00002316 lk->heldBy = NULL;
2317 lk->heldW = False;
sewardj1c7e8332007-11-29 13:04:03 +00002318 lk->acquired_at = NULL;
sewardjb4112022007-11-09 22:49:28 +00002319 }
2320 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002321 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00002322
2323 if (HG_(clo_track_lockorders))
2324 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002325 map_locks_delete( lk->guestaddr );
2326 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002327 }
2328
sewardjf98e1c02008-10-25 16:22:41 +00002329 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002330 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2331}
2332
2333static
sewardj789c3c52008-02-25 12:10:07 +00002334void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2335 void* rwl,
2336 Word isW, Word isTryLock )
sewardjb4112022007-11-09 22:49:28 +00002337{
2338 /* Just check the rwl is sane; nothing else to do. */
2339 // 'rwl' may be invalid - not checked by wrapper
2340 Thread* thr;
2341 Lock* lk;
2342 if (SHOW_EVENTS >= 1)
2343 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2344 (Int)tid, (Int)isW, (void*)rwl );
2345
2346 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
sewardj789c3c52008-02-25 12:10:07 +00002347 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
sewardjb4112022007-11-09 22:49:28 +00002348 thr = map_threads_maybe_lookup( tid );
2349 tl_assert(thr); /* cannot fail - Thread* must already exist */
2350
2351 lk = map_locks_maybe_lookup( (Addr)rwl );
2352 if ( lk
2353 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2354 /* Wrong kind of lock. Duh. */
sewardjf98e1c02008-10-25 16:22:41 +00002355 HG_(record_error_Misc)(
2356 thr, "pthread_rwlock_{rd,rw}lock with a "
2357 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002358 }
2359}
2360
2361static
2362void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2363{
2364 // only called if the real library call succeeded - so mutex is sane
2365 Thread* thr;
2366 if (SHOW_EVENTS >= 1)
2367 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2368 (Int)tid, (Int)isW, (void*)rwl );
2369
2370 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2371 thr = map_threads_maybe_lookup( tid );
2372 tl_assert(thr); /* cannot fail - Thread* must already exist */
2373
2374 (isW ? evhH__post_thread_w_acquires_lock
2375 : evhH__post_thread_r_acquires_lock)(
2376 thr,
2377 LK_rdwr, /* if not known, create new lock with this LockKind */
2378 (Addr)rwl
2379 );
2380}
2381
2382static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2383{
2384 // 'rwl' may be invalid - not checked by wrapper
2385 Thread* thr;
2386 if (SHOW_EVENTS >= 1)
2387 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2388 (Int)tid, (void*)rwl );
2389
2390 thr = map_threads_maybe_lookup( tid );
2391 tl_assert(thr); /* cannot fail - Thread* must already exist */
2392
2393 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2394}
2395
2396static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2397{
2398 // only called if the real library call succeeded - so mutex is sane
2399 Thread* thr;
2400 if (SHOW_EVENTS >= 1)
2401 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2402 (Int)tid, (void*)rwl );
2403 thr = map_threads_maybe_lookup( tid );
2404 tl_assert(thr); /* cannot fail - Thread* must already exist */
2405
2406 // anything we should do here?
2407}
2408
2409
sewardj9f569b72008-11-13 13:33:09 +00002410/* ---------------------------------------------------------- */
2411/* -------------- events to do with semaphores -------------- */
2412/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002413
sewardj11e352f2007-11-30 11:11:02 +00002414/* This is similar to but not identical to the handling for condition
sewardjb4112022007-11-09 22:49:28 +00002415 variables. */
2416
sewardjf98e1c02008-10-25 16:22:41 +00002417/* For each semaphore, we maintain a stack of SOs. When a 'post'
2418 operation is done on a semaphore (unlocking, essentially), a new SO
2419 is created for the posting thread, the posting thread does a strong
2420 send to it (which merely installs the posting thread's VC in the
2421 SO), and the SO is pushed on the semaphore's stack.
sewardjb4112022007-11-09 22:49:28 +00002422
2423 Later, when a (probably different) thread completes 'wait' on the
sewardjf98e1c02008-10-25 16:22:41 +00002424 semaphore, we pop a SO off the semaphore's stack (which should be
2425 nonempty), and do a strong recv from it. This mechanism creates
sewardjb4112022007-11-09 22:49:28 +00002426 dependencies between posters and waiters of the semaphore.
2427
sewardjf98e1c02008-10-25 16:22:41 +00002428 It may not be necessary to use a stack - perhaps a bag of SOs would
2429 do. But we do need to keep track of how many unused-up posts have
2430 happened for the semaphore.
sewardjb4112022007-11-09 22:49:28 +00002431
sewardjf98e1c02008-10-25 16:22:41 +00002432 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
sewardjb4112022007-11-09 22:49:28 +00002433 twice on S. T3 cannot complete its waits without both T1 and T2
2434 posting. The above mechanism will ensure that T3 acquires
2435 dependencies on both T1 and T2.
sewardj11e352f2007-11-30 11:11:02 +00002436
sewardjf98e1c02008-10-25 16:22:41 +00002437 When a semaphore is initialised with value N, we do as if we'd
2438 posted N times on the semaphore: basically create N SOs and do a
2439 strong send to all of then. This allows up to N waits on the
2440 semaphore to acquire a dependency on the initialisation point,
2441 which AFAICS is the correct behaviour.
sewardj11e352f2007-11-30 11:11:02 +00002442
2443 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2444 about. We should.
sewardjb4112022007-11-09 22:49:28 +00002445*/
2446
sewardjf98e1c02008-10-25 16:22:41 +00002447/* sem_t* -> XArray* SO* */
2448static WordFM* map_sem_to_SO_stack = NULL;
sewardjb4112022007-11-09 22:49:28 +00002449
sewardjf98e1c02008-10-25 16:22:41 +00002450static void map_sem_to_SO_stack_INIT ( void ) {
2451 if (map_sem_to_SO_stack == NULL) {
2452 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2453 HG_(free), NULL );
2454 tl_assert(map_sem_to_SO_stack != NULL);
sewardjb4112022007-11-09 22:49:28 +00002455 }
2456}
2457
sewardjf98e1c02008-10-25 16:22:41 +00002458static void push_SO_for_sem ( void* sem, SO* so ) {
2459 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002460 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002461 tl_assert(so);
2462 map_sem_to_SO_stack_INIT();
2463 if (VG_(lookupFM)( map_sem_to_SO_stack,
2464 &keyW, (UWord*)&xa, (UWord)sem )) {
2465 tl_assert(keyW == (UWord)sem);
sewardjb4112022007-11-09 22:49:28 +00002466 tl_assert(xa);
sewardjf98e1c02008-10-25 16:22:41 +00002467 VG_(addToXA)( xa, &so );
sewardjb4112022007-11-09 22:49:28 +00002468 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002469 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2470 VG_(addToXA)( xa, &so );
2471 VG_(addToFM)( map_sem_to_SO_stack, (Word)sem, (Word)xa );
sewardjb4112022007-11-09 22:49:28 +00002472 }
2473}
2474
sewardjf98e1c02008-10-25 16:22:41 +00002475static SO* mb_pop_SO_for_sem ( void* sem ) {
2476 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002477 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002478 SO* so;
2479 map_sem_to_SO_stack_INIT();
2480 if (VG_(lookupFM)( map_sem_to_SO_stack,
2481 &keyW, (UWord*)&xa, (UWord)sem )) {
sewardjb4112022007-11-09 22:49:28 +00002482 /* xa is the stack for this semaphore. */
sewardjf98e1c02008-10-25 16:22:41 +00002483 Word sz;
2484 tl_assert(keyW == (UWord)sem);
2485 sz = VG_(sizeXA)( xa );
sewardjb4112022007-11-09 22:49:28 +00002486 tl_assert(sz >= 0);
2487 if (sz == 0)
2488 return NULL; /* odd, the stack is empty */
sewardjf98e1c02008-10-25 16:22:41 +00002489 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2490 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002491 VG_(dropTailXA)( xa, 1 );
sewardjf98e1c02008-10-25 16:22:41 +00002492 return so;
sewardjb4112022007-11-09 22:49:28 +00002493 } else {
2494 /* hmm, that's odd. No stack for this semaphore. */
2495 return NULL;
2496 }
2497}
2498
sewardj11e352f2007-11-30 11:11:02 +00002499static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002500{
sewardjf98e1c02008-10-25 16:22:41 +00002501 UWord keyW, valW;
2502 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002503
sewardjb4112022007-11-09 22:49:28 +00002504 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002505 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002506 (Int)tid, (void*)sem );
2507
sewardjf98e1c02008-10-25 16:22:41 +00002508 map_sem_to_SO_stack_INIT();
sewardjb4112022007-11-09 22:49:28 +00002509
sewardjf98e1c02008-10-25 16:22:41 +00002510 /* Empty out the semaphore's SO stack. This way of doing it is
2511 stupid, but at least it's easy. */
2512 while (1) {
2513 so = mb_pop_SO_for_sem( sem );
2514 if (!so) break;
2515 libhb_so_dealloc(so);
2516 }
2517
2518 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2519 XArray* xa = (XArray*)valW;
2520 tl_assert(keyW == (UWord)sem);
2521 tl_assert(xa);
2522 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2523 VG_(deleteXA)(xa);
2524 }
sewardjb4112022007-11-09 22:49:28 +00002525}
2526
sewardj11e352f2007-11-30 11:11:02 +00002527static
2528void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2529{
sewardjf98e1c02008-10-25 16:22:41 +00002530 SO* so;
2531 Thread* thr;
sewardj11e352f2007-11-30 11:11:02 +00002532
2533 if (SHOW_EVENTS >= 1)
2534 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2535 (Int)tid, (void*)sem, value );
2536
sewardjf98e1c02008-10-25 16:22:41 +00002537 thr = map_threads_maybe_lookup( tid );
2538 tl_assert(thr); /* cannot fail - Thread* must already exist */
sewardj11e352f2007-11-30 11:11:02 +00002539
sewardjf98e1c02008-10-25 16:22:41 +00002540 /* Empty out the semaphore's SO stack. This way of doing it is
2541 stupid, but at least it's easy. */
2542 while (1) {
2543 so = mb_pop_SO_for_sem( sem );
2544 if (!so) break;
2545 libhb_so_dealloc(so);
2546 }
sewardj11e352f2007-11-30 11:11:02 +00002547
sewardjf98e1c02008-10-25 16:22:41 +00002548 /* If we don't do this check, the following while loop runs us out
2549 of memory for stupid initial values of 'value'. */
2550 if (value > 10000) {
2551 HG_(record_error_Misc)(
2552 thr, "sem_init: initial value exceeds 10000; using 10000" );
2553 value = 10000;
2554 }
sewardj11e352f2007-11-30 11:11:02 +00002555
sewardjf98e1c02008-10-25 16:22:41 +00002556 /* Now create 'valid' new SOs for the thread, do a strong send to
2557 each of them, and push them all on the stack. */
2558 for (; value > 0; value--) {
2559 Thr* hbthr = thr->hbthr;
2560 tl_assert(hbthr);
sewardj11e352f2007-11-30 11:11:02 +00002561
sewardjf98e1c02008-10-25 16:22:41 +00002562 so = libhb_so_alloc();
2563 libhb_so_send( hbthr, so, True/*strong send*/ );
2564 push_SO_for_sem( sem, so );
sewardj11e352f2007-11-30 11:11:02 +00002565 }
2566}
2567
2568static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002569{
sewardjf98e1c02008-10-25 16:22:41 +00002570 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2571 it (iow, write our VC into it, then tick ours), and push the SO
2572 on on a stack of SOs associated with 'sem'. This is later used
2573 by other thread(s) which successfully exit from a sem_wait on
2574 the same sem; by doing a strong recv from SOs popped of the
2575 stack, they acquire dependencies on the posting thread
2576 segment(s). */
sewardjb4112022007-11-09 22:49:28 +00002577
sewardjf98e1c02008-10-25 16:22:41 +00002578 Thread* thr;
2579 SO* so;
2580 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002581
2582 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002583 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002584 (Int)tid, (void*)sem );
2585
2586 thr = map_threads_maybe_lookup( tid );
2587 tl_assert(thr); /* cannot fail - Thread* must already exist */
2588
2589 // error-if: sem is bogus
2590
sewardjf98e1c02008-10-25 16:22:41 +00002591 hbthr = thr->hbthr;
2592 tl_assert(hbthr);
sewardjb4112022007-11-09 22:49:28 +00002593
sewardjf98e1c02008-10-25 16:22:41 +00002594 so = libhb_so_alloc();
2595 libhb_so_send( hbthr, so, True/*strong send*/ );
2596 push_SO_for_sem( sem, so );
sewardjb4112022007-11-09 22:49:28 +00002597}
2598
sewardj11e352f2007-11-30 11:11:02 +00002599static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002600{
sewardjf98e1c02008-10-25 16:22:41 +00002601 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2602 the 'sem' from this semaphore's SO-stack, and do a strong recv
2603 from it. This creates a dependency back to one of the post-ers
2604 for the semaphore. */
sewardjb4112022007-11-09 22:49:28 +00002605
sewardjf98e1c02008-10-25 16:22:41 +00002606 Thread* thr;
2607 SO* so;
2608 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002609
2610 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002611 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002612 (Int)tid, (void*)sem );
2613
2614 thr = map_threads_maybe_lookup( tid );
2615 tl_assert(thr); /* cannot fail - Thread* must already exist */
2616
2617 // error-if: sem is bogus
2618
sewardjf98e1c02008-10-25 16:22:41 +00002619 so = mb_pop_SO_for_sem( sem );
sewardjb4112022007-11-09 22:49:28 +00002620
sewardjf98e1c02008-10-25 16:22:41 +00002621 if (so) {
2622 hbthr = thr->hbthr;
2623 tl_assert(hbthr);
2624
2625 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2626 libhb_so_dealloc(so);
2627 } else {
2628 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2629 If this happened it would surely be a bug in the threads
2630 library. */
2631 HG_(record_error_Misc)(
2632 thr, "Bug in libpthread: sem_wait succeeded on"
2633 " semaphore without prior sem_post");
sewardjb4112022007-11-09 22:49:28 +00002634 }
2635}
2636
2637
sewardj9f569b72008-11-13 13:33:09 +00002638/* -------------------------------------------------------- */
2639/* -------------- events to do with barriers -------------- */
2640/* -------------------------------------------------------- */
2641
2642typedef
2643 struct {
2644 Bool initted; /* has it yet been initted by guest? */
sewardj406bac82010-03-03 23:03:40 +00002645 Bool resizable; /* is resizing allowed? */
sewardj9f569b72008-11-13 13:33:09 +00002646 UWord size; /* declared size */
2647 XArray* waiting; /* XA of Thread*. # present is 0 .. .size */
2648 }
2649 Bar;
2650
2651static Bar* new_Bar ( void ) {
2652 Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
2653 tl_assert(bar);
2654 /* all fields are zero */
2655 tl_assert(bar->initted == False);
2656 return bar;
2657}
2658
2659static void delete_Bar ( Bar* bar ) {
2660 tl_assert(bar);
2661 if (bar->waiting)
2662 VG_(deleteXA)(bar->waiting);
2663 HG_(free)(bar);
2664}
2665
2666/* A mapping which stores auxiliary data for barriers. */
2667
2668/* pthread_barrier_t* -> Bar* */
2669static WordFM* map_barrier_to_Bar = NULL;
2670
2671static void map_barrier_to_Bar_INIT ( void ) {
2672 if (UNLIKELY(map_barrier_to_Bar == NULL)) {
2673 map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
2674 "hg.mbtBI.1", HG_(free), NULL );
2675 tl_assert(map_barrier_to_Bar != NULL);
2676 }
2677}
2678
2679static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
2680 UWord key, val;
2681 map_barrier_to_Bar_INIT();
2682 if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
2683 tl_assert(key == (UWord)barrier);
2684 return (Bar*)val;
2685 } else {
2686 Bar* bar = new_Bar();
2687 VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
2688 return bar;
2689 }
2690}
2691
2692static void map_barrier_to_Bar_delete ( void* barrier ) {
2693 UWord keyW, valW;
2694 map_barrier_to_Bar_INIT();
2695 if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
2696 Bar* bar = (Bar*)valW;
2697 tl_assert(keyW == (UWord)barrier);
2698 delete_Bar(bar);
2699 }
2700}
2701
2702
2703static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
2704 void* barrier,
sewardj406bac82010-03-03 23:03:40 +00002705 UWord count,
2706 UWord resizable )
sewardj9f569b72008-11-13 13:33:09 +00002707{
2708 Thread* thr;
2709 Bar* bar;
2710
2711 if (SHOW_EVENTS >= 1)
2712 VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
sewardj406bac82010-03-03 23:03:40 +00002713 "(tid=%d, barrier=%p, count=%lu, resizable=%lu)\n",
2714 (Int)tid, (void*)barrier, count, resizable );
sewardj9f569b72008-11-13 13:33:09 +00002715
2716 thr = map_threads_maybe_lookup( tid );
2717 tl_assert(thr); /* cannot fail - Thread* must already exist */
2718
2719 if (count == 0) {
2720 HG_(record_error_Misc)(
2721 thr, "pthread_barrier_init: 'count' argument is zero"
2722 );
2723 }
2724
sewardj406bac82010-03-03 23:03:40 +00002725 if (resizable != 0 && resizable != 1) {
2726 HG_(record_error_Misc)(
2727 thr, "pthread_barrier_init: invalid 'resizable' argument"
2728 );
2729 }
2730
sewardj9f569b72008-11-13 13:33:09 +00002731 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2732 tl_assert(bar);
2733
2734 if (bar->initted) {
2735 HG_(record_error_Misc)(
2736 thr, "pthread_barrier_init: barrier is already initialised"
2737 );
2738 }
2739
2740 if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2741 tl_assert(bar->initted);
2742 HG_(record_error_Misc)(
sewardj553655c2008-11-14 19:41:19 +00002743 thr, "pthread_barrier_init: threads are waiting at barrier"
sewardj9f569b72008-11-13 13:33:09 +00002744 );
2745 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2746 }
2747 if (!bar->waiting) {
2748 bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
2749 sizeof(Thread*) );
2750 }
2751
2752 tl_assert(bar->waiting);
2753 tl_assert(VG_(sizeXA)(bar->waiting) == 0);
sewardj406bac82010-03-03 23:03:40 +00002754 bar->initted = True;
2755 bar->resizable = resizable == 1 ? True : False;
2756 bar->size = count;
sewardj9f569b72008-11-13 13:33:09 +00002757}
2758
2759
2760static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
2761 void* barrier )
2762{
sewardj553655c2008-11-14 19:41:19 +00002763 Thread* thr;
2764 Bar* bar;
2765
sewardj9f569b72008-11-13 13:33:09 +00002766 /* Deal with destroy events. The only purpose is to free storage
2767 associated with the barrier, so as to avoid any possible
2768 resource leaks. */
2769 if (SHOW_EVENTS >= 1)
2770 VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
2771 "(tid=%d, barrier=%p)\n",
2772 (Int)tid, (void*)barrier );
2773
sewardj553655c2008-11-14 19:41:19 +00002774 thr = map_threads_maybe_lookup( tid );
2775 tl_assert(thr); /* cannot fail - Thread* must already exist */
2776
2777 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2778 tl_assert(bar);
2779
2780 if (!bar->initted) {
2781 HG_(record_error_Misc)(
2782 thr, "pthread_barrier_destroy: barrier was never initialised"
2783 );
2784 }
2785
2786 if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2787 HG_(record_error_Misc)(
2788 thr, "pthread_barrier_destroy: threads are waiting at barrier"
2789 );
2790 }
2791
sewardj9f569b72008-11-13 13:33:09 +00002792 /* Maybe we shouldn't do this; just let it persist, so that when it
2793 is reinitialised we don't need to do any dynamic memory
2794 allocation? The downside is a potentially unlimited space leak,
2795 if the client creates (in turn) a large number of barriers all
2796 at different locations. Note that if we do later move to the
2797 don't-delete-it scheme, we need to mark the barrier as
2798 uninitialised again since otherwise a later _init call will
sewardj553655c2008-11-14 19:41:19 +00002799 elicit a duplicate-init error. */
sewardj9f569b72008-11-13 13:33:09 +00002800 map_barrier_to_Bar_delete( barrier );
2801}
2802
2803
sewardj406bac82010-03-03 23:03:40 +00002804/* All the threads have arrived. Now do the Interesting Bit. Get a
2805 new synchronisation object and do a weak send to it from all the
2806 participating threads. This makes its vector clocks be the join of
2807 all the individual threads' vector clocks. Then do a strong
2808 receive from it back to all threads, so that their VCs are a copy
2809 of it (hence are all equal to the join of their original VCs.) */
2810static void do_barrier_cross_sync_and_empty ( Bar* bar )
2811{
2812 /* XXX check bar->waiting has no duplicates */
2813 UWord i;
2814 SO* so = libhb_so_alloc();
2815
2816 tl_assert(bar->waiting);
2817 tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
2818
2819 /* compute the join ... */
2820 for (i = 0; i < bar->size; i++) {
2821 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2822 Thr* hbthr = t->hbthr;
2823 libhb_so_send( hbthr, so, False/*weak send*/ );
2824 }
2825 /* ... and distribute to all threads */
2826 for (i = 0; i < bar->size; i++) {
2827 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2828 Thr* hbthr = t->hbthr;
2829 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2830 }
2831
2832 /* finally, we must empty out the waiting vector */
2833 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2834
2835 /* and we don't need this any more. Perhaps a stack-allocated
2836 SO would be better? */
2837 libhb_so_dealloc(so);
2838}
2839
2840
sewardj9f569b72008-11-13 13:33:09 +00002841static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
2842 void* barrier )
2843{
sewardj1c466b72008-11-19 11:52:14 +00002844 /* This function gets called after a client thread calls
2845 pthread_barrier_wait but before it arrives at the real
2846 pthread_barrier_wait.
2847
2848 Why is the following correct? It's a bit subtle.
2849
2850 If this is not the last thread arriving at the barrier, we simply
2851 note its presence and return. Because valgrind (at least as of
2852 Nov 08) is single threaded, we are guaranteed safe from any race
2853 conditions when in this function -- no other client threads are
2854 running.
2855
2856 If this is the last thread, then we are again the only running
2857 thread. All the other threads will have either arrived at the
2858 real pthread_barrier_wait or are on their way to it, but in any
2859 case are guaranteed not to be able to move past it, because this
2860 thread is currently in this function and so has not yet arrived
2861 at the real pthread_barrier_wait. That means that:
2862
2863 1. While we are in this function, none of the other threads
2864 waiting at the barrier can move past it.
2865
2866 2. When this function returns (and simulated execution resumes),
2867 this thread and all other waiting threads will be able to move
2868 past the real barrier.
2869
2870 Because of this, it is now safe to update the vector clocks of
2871 all threads, to represent the fact that they all arrived at the
2872 barrier and have all moved on. There is no danger of any
2873 complications to do with some threads leaving the barrier and
2874 racing back round to the front, whilst others are still leaving
2875 (which is the primary source of complication in correct handling/
2876 implementation of barriers). That can't happen because we update
2877 here our data structures so as to indicate that the threads have
2878 passed the barrier, even though, as per (2) above, they are
2879 guaranteed not to pass the barrier until we return.
2880
2881 This relies crucially on Valgrind being single threaded. If that
2882 changes, this will need to be reconsidered.
2883 */
sewardj9f569b72008-11-13 13:33:09 +00002884 Thread* thr;
2885 Bar* bar;
sewardj406bac82010-03-03 23:03:40 +00002886 UWord present;
sewardj9f569b72008-11-13 13:33:09 +00002887
2888 if (SHOW_EVENTS >= 1)
2889 VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
2890 "(tid=%d, barrier=%p)\n",
2891 (Int)tid, (void*)barrier );
2892
2893 thr = map_threads_maybe_lookup( tid );
2894 tl_assert(thr); /* cannot fail - Thread* must already exist */
2895
2896 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2897 tl_assert(bar);
2898
2899 if (!bar->initted) {
2900 HG_(record_error_Misc)(
2901 thr, "pthread_barrier_wait: barrier is uninitialised"
2902 );
2903 return; /* client is broken .. avoid assertions below */
2904 }
2905
2906 /* guaranteed by _INIT_PRE above */
2907 tl_assert(bar->size > 0);
2908 tl_assert(bar->waiting);
2909
2910 VG_(addToXA)( bar->waiting, &thr );
2911
2912 /* guaranteed by this function */
2913 present = VG_(sizeXA)(bar->waiting);
2914 tl_assert(present > 0 && present <= bar->size);
2915
2916 if (present < bar->size)
2917 return;
2918
sewardj406bac82010-03-03 23:03:40 +00002919 do_barrier_cross_sync_and_empty(bar);
2920}
sewardj9f569b72008-11-13 13:33:09 +00002921
sewardj9f569b72008-11-13 13:33:09 +00002922
sewardj406bac82010-03-03 23:03:40 +00002923static void evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( ThreadId tid,
2924 void* barrier,
2925 UWord newcount )
2926{
2927 Thread* thr;
2928 Bar* bar;
2929 UWord present;
2930
2931 if (SHOW_EVENTS >= 1)
2932 VG_(printf)("evh__HG_PTHREAD_BARRIER_RESIZE_PRE"
2933 "(tid=%d, barrier=%p, newcount=%lu)\n",
2934 (Int)tid, (void*)barrier, newcount );
2935
2936 thr = map_threads_maybe_lookup( tid );
2937 tl_assert(thr); /* cannot fail - Thread* must already exist */
2938
2939 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2940 tl_assert(bar);
2941
2942 if (!bar->initted) {
2943 HG_(record_error_Misc)(
2944 thr, "pthread_barrier_resize: barrier is uninitialised"
2945 );
2946 return; /* client is broken .. avoid assertions below */
2947 }
2948
2949 if (!bar->resizable) {
2950 HG_(record_error_Misc)(
2951 thr, "pthread_barrier_resize: barrier is may not be resized"
2952 );
2953 return; /* client is broken .. avoid assertions below */
2954 }
2955
2956 if (newcount == 0) {
2957 HG_(record_error_Misc)(
2958 thr, "pthread_barrier_resize: 'newcount' argument is zero"
2959 );
2960 return; /* client is broken .. avoid assertions below */
2961 }
2962
2963 /* guaranteed by _INIT_PRE above */
2964 tl_assert(bar->size > 0);
sewardj9f569b72008-11-13 13:33:09 +00002965 tl_assert(bar->waiting);
sewardj406bac82010-03-03 23:03:40 +00002966 /* Guaranteed by this fn */
2967 tl_assert(newcount > 0);
sewardj9f569b72008-11-13 13:33:09 +00002968
sewardj406bac82010-03-03 23:03:40 +00002969 if (newcount >= bar->size) {
2970 /* Increasing the capacity. There's no possibility of threads
2971 moving on from the barrier in this situation, so just note
2972 the fact and do nothing more. */
2973 bar->size = newcount;
2974 } else {
2975 /* Decreasing the capacity. If we decrease it to be equal or
2976 below the number of waiting threads, they will now move past
2977 the barrier, so need to mess with dep edges in the same way
2978 as if the barrier had filled up normally. */
2979 present = VG_(sizeXA)(bar->waiting);
2980 tl_assert(present >= 0 && present <= bar->size);
2981 if (newcount <= present) {
2982 bar->size = present; /* keep the cross_sync call happy */
2983 do_barrier_cross_sync_and_empty(bar);
2984 }
2985 bar->size = newcount;
sewardj9f569b72008-11-13 13:33:09 +00002986 }
sewardj9f569b72008-11-13 13:33:09 +00002987}
2988
2989
sewardjed2e72e2009-08-14 11:08:24 +00002990/* ----------------------------------------------------- */
2991/* ----- events to do with user-specified HB edges ----- */
2992/* ----------------------------------------------------- */
2993
2994/* A mapping from arbitrary UWord tag to the SO associated with it.
2995 The UWord tags are meaningless to us, interpreted only by the
2996 user. */
2997
2998
2999
3000/* UWord -> SO* */
3001static WordFM* map_usertag_to_SO = NULL;
3002
3003static void map_usertag_to_SO_INIT ( void ) {
3004 if (UNLIKELY(map_usertag_to_SO == NULL)) {
3005 map_usertag_to_SO = VG_(newFM)( HG_(zalloc),
3006 "hg.mutS.1", HG_(free), NULL );
3007 tl_assert(map_usertag_to_SO != NULL);
3008 }
3009}
3010
3011static SO* map_usertag_to_SO_lookup_or_alloc ( UWord usertag ) {
3012 UWord key, val;
3013 map_usertag_to_SO_INIT();
3014 if (VG_(lookupFM)( map_usertag_to_SO, &key, &val, usertag )) {
3015 tl_assert(key == (UWord)usertag);
3016 return (SO*)val;
3017 } else {
3018 SO* so = libhb_so_alloc();
3019 VG_(addToFM)( map_usertag_to_SO, usertag, (UWord)so );
3020 return so;
3021 }
3022}
3023
sewardj6015d0e2011-03-11 19:10:48 +00003024static void map_usertag_to_SO_delete ( UWord usertag ) {
3025 UWord keyW, valW;
3026 map_usertag_to_SO_INIT();
3027 if (VG_(delFromFM)( map_usertag_to_SO, &keyW, &valW, usertag )) {
3028 SO* so = (SO*)valW;
3029 tl_assert(keyW == usertag);
3030 tl_assert(so);
3031 libhb_so_dealloc(so);
3032 }
3033}
sewardjed2e72e2009-08-14 11:08:24 +00003034
3035
3036static
3037void evh__HG_USERSO_SEND_PRE ( ThreadId tid, UWord usertag )
3038{
3039 /* TID is just about to notionally sent a message on a notional
3040 abstract synchronisation object whose identity is given by
3041 USERTAG. Bind USERTAG to a real SO if it is not already so
sewardj8c50d3c2011-03-11 18:38:12 +00003042 bound, and do a 'weak send' on the SO. This joins the vector
3043 clocks from this thread into any vector clocks already present
3044 in the SO. The resulting SO vector clocks are later used by
sewardjed2e72e2009-08-14 11:08:24 +00003045 other thread(s) which successfully 'receive' from the SO,
sewardj8c50d3c2011-03-11 18:38:12 +00003046 thereby acquiring a dependency on all the events that have
3047 previously signalled on this SO. */
sewardjed2e72e2009-08-14 11:08:24 +00003048 Thread* thr;
3049 SO* so;
3050
3051 if (SHOW_EVENTS >= 1)
3052 VG_(printf)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3053 (Int)tid, usertag );
3054
3055 thr = map_threads_maybe_lookup( tid );
3056 tl_assert(thr); /* cannot fail - Thread* must already exist */
3057
3058 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3059 tl_assert(so);
3060
sewardj8c50d3c2011-03-11 18:38:12 +00003061 libhb_so_send( thr->hbthr, so, False/*!strong_send*/ );
sewardjed2e72e2009-08-14 11:08:24 +00003062}
3063
3064static
3065void evh__HG_USERSO_RECV_POST ( ThreadId tid, UWord usertag )
3066{
3067 /* TID has just notionally received a message from a notional
3068 abstract synchronisation object whose identity is given by
3069 USERTAG. Bind USERTAG to a real SO if it is not already so
3070 bound. If the SO has at some point in the past been 'sent' on,
3071 to a 'strong receive' on it, thereby acquiring a dependency on
3072 the sender. */
3073 Thread* thr;
3074 SO* so;
3075
3076 if (SHOW_EVENTS >= 1)
3077 VG_(printf)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3078 (Int)tid, usertag );
3079
3080 thr = map_threads_maybe_lookup( tid );
3081 tl_assert(thr); /* cannot fail - Thread* must already exist */
3082
3083 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3084 tl_assert(so);
3085
3086 /* Acquire a dependency on it. If the SO has never so far been
3087 sent on, then libhb_so_recv will do nothing. So we're safe
3088 regardless of SO's history. */
3089 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3090}
3091
sewardj6015d0e2011-03-11 19:10:48 +00003092static
3093void evh__HG_USERSO_FORGET_ALL ( ThreadId tid, UWord usertag )
3094{
3095 /* TID declares that any happens-before edges notionally stored in
3096 USERTAG can be deleted. If (as would normally be the case) a
3097 SO is associated with USERTAG, then the assocation is removed
3098 and all resources associated with SO are freed. Importantly,
3099 that frees up any VTSs stored in SO. */
3100 if (SHOW_EVENTS >= 1)
3101 VG_(printf)("evh__HG_USERSO_FORGET_ALL(ctid=%d, usertag=%#lx)\n",
3102 (Int)tid, usertag );
3103
3104 map_usertag_to_SO_delete( usertag );
3105}
3106
sewardjed2e72e2009-08-14 11:08:24 +00003107
sewardjb4112022007-11-09 22:49:28 +00003108/*--------------------------------------------------------------*/
3109/*--- Lock acquisition order monitoring ---*/
3110/*--------------------------------------------------------------*/
3111
3112/* FIXME: here are some optimisations still to do in
3113 laog__pre_thread_acquires_lock.
3114
3115 The graph is structured so that if L1 --*--> L2 then L1 must be
3116 acquired before L2.
3117
3118 The common case is that some thread T holds (eg) L1 L2 and L3 and
3119 is repeatedly acquiring and releasing Ln, and there is no ordering
3120 error in what it is doing. Hence it repeatly:
3121
3122 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3123 produces the answer No (because there is no error).
3124
3125 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3126 (because they already got added the first time T acquired Ln).
3127
3128 Hence cache these two events:
3129
3130 (1) Cache result of the query from last time. Invalidate the cache
3131 any time any edges are added to or deleted from laog.
3132
3133 (2) Cache these add-edge requests and ignore them if said edges
3134 have already been added to laog. Invalidate the cache any time
3135 any edges are deleted from laog.
3136*/
3137
3138typedef
3139 struct {
3140 WordSetID inns; /* in univ_laog */
3141 WordSetID outs; /* in univ_laog */
3142 }
3143 LAOGLinks;
3144
3145/* lock order acquisition graph */
3146static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
3147
3148/* EXPOSITION ONLY: for each edge in 'laog', record the two places
3149 where that edge was created, so that we can show the user later if
3150 we need to. */
3151typedef
3152 struct {
3153 Addr src_ga; /* Lock guest addresses for */
3154 Addr dst_ga; /* src/dst of the edge */
3155 ExeContext* src_ec; /* And corresponding places where that */
3156 ExeContext* dst_ec; /* ordering was established */
3157 }
3158 LAOGLinkExposition;
3159
sewardj250ec2e2008-02-15 22:02:30 +00003160static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
sewardjb4112022007-11-09 22:49:28 +00003161 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3162 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
3163 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
3164 if (llx1->src_ga < llx2->src_ga) return -1;
3165 if (llx1->src_ga > llx2->src_ga) return 1;
3166 if (llx1->dst_ga < llx2->dst_ga) return -1;
3167 if (llx1->dst_ga > llx2->dst_ga) return 1;
3168 return 0;
3169}
3170
3171static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
3172/* end EXPOSITION ONLY */
3173
3174
sewardja65db102009-01-26 10:45:16 +00003175__attribute__((noinline))
3176static void laog__init ( void )
3177{
3178 tl_assert(!laog);
3179 tl_assert(!laog_exposition);
sewardjc1fb9d22011-02-28 09:03:44 +00003180 tl_assert(HG_(clo_track_lockorders));
sewardja65db102009-01-26 10:45:16 +00003181
3182 laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
3183 HG_(free), NULL/*unboxedcmp*/ );
3184
3185 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
3186 cmp_LAOGLinkExposition );
3187 tl_assert(laog);
3188 tl_assert(laog_exposition);
3189}
3190
sewardjb4112022007-11-09 22:49:28 +00003191static void laog__show ( Char* who ) {
3192 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003193 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003194 Lock* me;
3195 LAOGLinks* links;
3196 VG_(printf)("laog (requested by %s) {\n", who);
sewardj896f6f92008-08-19 08:38:52 +00003197 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003198 me = NULL;
3199 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003200 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003201 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003202 tl_assert(me);
3203 tl_assert(links);
3204 VG_(printf)(" node %p:\n", me);
3205 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3206 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003207 VG_(printf)(" inn %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003208 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3209 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003210 VG_(printf)(" out %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003211 me = NULL;
3212 links = NULL;
3213 }
sewardj896f6f92008-08-19 08:38:52 +00003214 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003215 VG_(printf)("}\n");
3216}
3217
3218__attribute__((noinline))
3219static void laog__add_edge ( Lock* src, Lock* dst ) {
3220 Word keyW;
3221 LAOGLinks* links;
3222 Bool presentF, presentR;
3223 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
3224
3225 /* Take the opportunity to sanity check the graph. Record in
3226 presentF if there is already a src->dst mapping in this node's
3227 forwards links, and presentR if there is already a src->dst
3228 mapping in this node's backwards links. They should agree!
3229 Also, we need to know whether the edge was already present so as
3230 to decide whether or not to update the link details mapping. We
3231 can compute presentF and presentR essentially for free, so may
3232 as well do this always. */
3233 presentF = presentR = False;
3234
3235 /* Update the out edges for src */
3236 keyW = 0;
3237 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003238 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00003239 WordSetID outs_new;
3240 tl_assert(links);
3241 tl_assert(keyW == (Word)src);
3242 outs_new = HG_(addToWS)( univ_laog, links->outs, (Word)dst );
3243 presentF = outs_new == links->outs;
3244 links->outs = outs_new;
3245 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003246 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003247 links->inns = HG_(emptyWS)( univ_laog );
3248 links->outs = HG_(singletonWS)( univ_laog, (Word)dst );
sewardj896f6f92008-08-19 08:38:52 +00003249 VG_(addToFM)( laog, (Word)src, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00003250 }
3251 /* Update the in edges for dst */
3252 keyW = 0;
3253 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003254 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003255 WordSetID inns_new;
3256 tl_assert(links);
3257 tl_assert(keyW == (Word)dst);
3258 inns_new = HG_(addToWS)( univ_laog, links->inns, (Word)src );
3259 presentR = inns_new == links->inns;
3260 links->inns = inns_new;
3261 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003262 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003263 links->inns = HG_(singletonWS)( univ_laog, (Word)src );
3264 links->outs = HG_(emptyWS)( univ_laog );
sewardj896f6f92008-08-19 08:38:52 +00003265 VG_(addToFM)( laog, (Word)dst, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00003266 }
3267
3268 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
3269
3270 if (!presentF && src->acquired_at && dst->acquired_at) {
3271 LAOGLinkExposition expo;
3272 /* If this edge is entering the graph, and we have acquired_at
3273 information for both src and dst, record those acquisition
3274 points. Hence, if there is later a violation of this
3275 ordering, we can show the user the two places in which the
3276 required src-dst ordering was previously established. */
barta0b6b2c2008-07-07 06:49:24 +00003277 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
sewardjb4112022007-11-09 22:49:28 +00003278 src->guestaddr, dst->guestaddr);
3279 expo.src_ga = src->guestaddr;
3280 expo.dst_ga = dst->guestaddr;
3281 expo.src_ec = NULL;
3282 expo.dst_ec = NULL;
3283 tl_assert(laog_exposition);
sewardj896f6f92008-08-19 08:38:52 +00003284 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (Word)&expo )) {
sewardjb4112022007-11-09 22:49:28 +00003285 /* we already have it; do nothing */
3286 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003287 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
3288 sizeof(LAOGLinkExposition));
sewardjb4112022007-11-09 22:49:28 +00003289 expo2->src_ga = src->guestaddr;
3290 expo2->dst_ga = dst->guestaddr;
3291 expo2->src_ec = src->acquired_at;
3292 expo2->dst_ec = dst->acquired_at;
sewardj896f6f92008-08-19 08:38:52 +00003293 VG_(addToFM)( laog_exposition, (Word)expo2, (Word)NULL );
sewardjb4112022007-11-09 22:49:28 +00003294 }
3295 }
3296}
3297
3298__attribute__((noinline))
3299static void laog__del_edge ( Lock* src, Lock* dst ) {
3300 Word keyW;
3301 LAOGLinks* links;
3302 if (0) VG_(printf)("laog__del_edge %p %p\n", src, dst);
3303 /* Update the out edges for src */
3304 keyW = 0;
3305 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003306 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00003307 tl_assert(links);
3308 tl_assert(keyW == (Word)src);
3309 links->outs = HG_(delFromWS)( univ_laog, links->outs, (Word)dst );
3310 }
3311 /* Update the in edges for dst */
3312 keyW = 0;
3313 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003314 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003315 tl_assert(links);
3316 tl_assert(keyW == (Word)dst);
3317 links->inns = HG_(delFromWS)( univ_laog, links->inns, (Word)src );
3318 }
3319}
3320
3321__attribute__((noinline))
3322static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
3323 Word keyW;
3324 LAOGLinks* links;
3325 keyW = 0;
3326 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003327 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003328 tl_assert(links);
3329 tl_assert(keyW == (Word)lk);
3330 return links->outs;
3331 } else {
3332 return HG_(emptyWS)( univ_laog );
3333 }
3334}
3335
3336__attribute__((noinline))
3337static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
3338 Word keyW;
3339 LAOGLinks* links;
3340 keyW = 0;
3341 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003342 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003343 tl_assert(links);
3344 tl_assert(keyW == (Word)lk);
3345 return links->inns;
3346 } else {
3347 return HG_(emptyWS)( univ_laog );
3348 }
3349}
3350
3351__attribute__((noinline))
3352static void laog__sanity_check ( Char* who ) {
3353 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003354 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003355 Lock* me;
3356 LAOGLinks* links;
sewardj896f6f92008-08-19 08:38:52 +00003357 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003358 me = NULL;
3359 links = NULL;
3360 if (0) VG_(printf)("laog sanity check\n");
sewardj896f6f92008-08-19 08:38:52 +00003361 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003362 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003363 tl_assert(me);
3364 tl_assert(links);
3365 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3366 for (i = 0; i < ws_size; i++) {
3367 if ( ! HG_(elemWS)( univ_laog,
3368 laog__succs( (Lock*)ws_words[i] ),
3369 (Word)me ))
3370 goto bad;
3371 }
3372 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3373 for (i = 0; i < ws_size; i++) {
3374 if ( ! HG_(elemWS)( univ_laog,
3375 laog__preds( (Lock*)ws_words[i] ),
3376 (Word)me ))
3377 goto bad;
3378 }
3379 me = NULL;
3380 links = NULL;
3381 }
sewardj896f6f92008-08-19 08:38:52 +00003382 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003383 return;
3384
3385 bad:
3386 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3387 laog__show(who);
3388 tl_assert(0);
3389}
3390
3391/* If there is a path in laog from 'src' to any of the elements in
3392 'dst', return an arbitrarily chosen element of 'dst' reachable from
3393 'src'. If no path exist from 'src' to any element in 'dst', return
3394 NULL. */
3395__attribute__((noinline))
3396static
3397Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3398{
3399 Lock* ret;
3400 Word i, ssz;
3401 XArray* stack; /* of Lock* */
3402 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
3403 Lock* here;
3404 WordSetID succs;
3405 Word succs_size;
sewardj250ec2e2008-02-15 22:02:30 +00003406 UWord* succs_words;
sewardjb4112022007-11-09 22:49:28 +00003407 //laog__sanity_check();
3408
3409 /* If the destination set is empty, we can never get there from
3410 'src' :-), so don't bother to try */
3411 if (HG_(isEmptyWS)( univ_lsets, dsts ))
3412 return NULL;
3413
3414 ret = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00003415 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3416 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00003417
3418 (void) VG_(addToXA)( stack, &src );
3419
3420 while (True) {
3421
3422 ssz = VG_(sizeXA)( stack );
3423
3424 if (ssz == 0) { ret = NULL; break; }
3425
3426 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3427 VG_(dropTailXA)( stack, 1 );
3428
3429 if (HG_(elemWS)( univ_lsets, dsts, (Word)here )) { ret = here; break; }
3430
sewardj896f6f92008-08-19 08:38:52 +00003431 if (VG_(lookupFM)( visited, NULL, NULL, (Word)here ))
sewardjb4112022007-11-09 22:49:28 +00003432 continue;
3433
sewardj896f6f92008-08-19 08:38:52 +00003434 VG_(addToFM)( visited, (Word)here, 0 );
sewardjb4112022007-11-09 22:49:28 +00003435
3436 succs = laog__succs( here );
3437 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3438 for (i = 0; i < succs_size; i++)
3439 (void) VG_(addToXA)( stack, &succs_words[i] );
3440 }
3441
sewardj896f6f92008-08-19 08:38:52 +00003442 VG_(deleteFM)( visited, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003443 VG_(deleteXA)( stack );
3444 return ret;
3445}
3446
3447
3448/* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3449 between 'lk' and the locks already held by 'thr' and issue a
3450 complaint if so. Also, update the ordering graph appropriately.
3451*/
3452__attribute__((noinline))
3453static void laog__pre_thread_acquires_lock (
3454 Thread* thr, /* NB: BEFORE lock is added */
3455 Lock* lk
3456 )
3457{
sewardj250ec2e2008-02-15 22:02:30 +00003458 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +00003459 Word ls_size, i;
3460 Lock* other;
3461
3462 /* It may be that 'thr' already holds 'lk' and is recursively
3463 relocking in. In this case we just ignore the call. */
3464 /* NB: univ_lsets really is correct here */
3465 if (HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lk ))
3466 return;
3467
sewardjb4112022007-11-09 22:49:28 +00003468 /* First, the check. Complain if there is any path in laog from lk
3469 to any of the locks already held by thr, since if any such path
3470 existed, it would mean that previously lk was acquired before
3471 (rather than after, as we are doing here) at least one of those
3472 locks.
3473 */
3474 other = laog__do_dfs_from_to(lk, thr->locksetA);
3475 if (other) {
3476 LAOGLinkExposition key, *found;
3477 /* So we managed to find a path lk --*--> other in the graph,
3478 which implies that 'lk' should have been acquired before
3479 'other' but is in fact being acquired afterwards. We present
3480 the lk/other arguments to record_error_LockOrder in the order
3481 in which they should have been acquired. */
3482 /* Go look in the laog_exposition mapping, to find the allocation
3483 points for this edge, so we can show the user. */
3484 key.src_ga = lk->guestaddr;
3485 key.dst_ga = other->guestaddr;
3486 key.src_ec = NULL;
3487 key.dst_ec = NULL;
3488 found = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003489 if (VG_(lookupFM)( laog_exposition,
sewardjb5f29642007-11-16 12:02:43 +00003490 (Word*)&found, NULL, (Word)&key )) {
sewardjb4112022007-11-09 22:49:28 +00003491 tl_assert(found != &key);
3492 tl_assert(found->src_ga == key.src_ga);
3493 tl_assert(found->dst_ga == key.dst_ga);
3494 tl_assert(found->src_ec);
3495 tl_assert(found->dst_ec);
sewardjf98e1c02008-10-25 16:22:41 +00003496 HG_(record_error_LockOrder)(
3497 thr, lk->guestaddr, other->guestaddr,
3498 found->src_ec, found->dst_ec );
sewardjb4112022007-11-09 22:49:28 +00003499 } else {
3500 /* Hmm. This can't happen (can it?) */
sewardjf98e1c02008-10-25 16:22:41 +00003501 HG_(record_error_LockOrder)(
3502 thr, lk->guestaddr, other->guestaddr,
3503 NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003504 }
3505 }
3506
3507 /* Second, add to laog the pairs
3508 (old, lk) | old <- locks already held by thr
3509 Since both old and lk are currently held by thr, their acquired_at
3510 fields must be non-NULL.
3511 */
3512 tl_assert(lk->acquired_at);
3513 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3514 for (i = 0; i < ls_size; i++) {
3515 Lock* old = (Lock*)ls_words[i];
3516 tl_assert(old->acquired_at);
3517 laog__add_edge( old, lk );
3518 }
3519
3520 /* Why "except_Locks" ? We're here because a lock is being
3521 acquired by a thread, and we're in an inconsistent state here.
3522 See the call points in evhH__post_thread_{r,w}_acquires_lock.
3523 When called in this inconsistent state, locks__sanity_check duly
3524 barfs. */
sewardjf98e1c02008-10-25 16:22:41 +00003525 if (HG_(clo_sanity_flags) & SCE_LAOG)
sewardjb4112022007-11-09 22:49:28 +00003526 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
3527}
3528
3529
3530/* Delete from 'laog' any pair mentioning a lock in locksToDelete */
3531
3532__attribute__((noinline))
3533static void laog__handle_one_lock_deletion ( Lock* lk )
3534{
3535 WordSetID preds, succs;
3536 Word preds_size, succs_size, i, j;
sewardj250ec2e2008-02-15 22:02:30 +00003537 UWord *preds_words, *succs_words;
sewardjb4112022007-11-09 22:49:28 +00003538
3539 preds = laog__preds( lk );
3540 succs = laog__succs( lk );
3541
3542 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
3543 for (i = 0; i < preds_size; i++)
3544 laog__del_edge( (Lock*)preds_words[i], lk );
3545
3546 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3547 for (j = 0; j < succs_size; j++)
3548 laog__del_edge( lk, (Lock*)succs_words[j] );
3549
3550 for (i = 0; i < preds_size; i++) {
3551 for (j = 0; j < succs_size; j++) {
3552 if (preds_words[i] != succs_words[j]) {
3553 /* This can pass unlocked locks to laog__add_edge, since
3554 we're deleting stuff. So their acquired_at fields may
3555 be NULL. */
3556 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
3557 }
3558 }
3559 }
3560}
3561
sewardj1cbc12f2008-11-10 16:16:46 +00003562//__attribute__((noinline))
3563//static void laog__handle_lock_deletions (
3564// WordSetID /* in univ_laog */ locksToDelete
3565// )
3566//{
3567// Word i, ws_size;
3568// UWord* ws_words;
3569//
sewardj1cbc12f2008-11-10 16:16:46 +00003570//
3571// HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
3572// for (i = 0; i < ws_size; i++)
3573// laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
3574//
3575// if (HG_(clo_sanity_flags) & SCE_LAOG)
3576// all__sanity_check("laog__handle_lock_deletions-post");
3577//}
sewardjb4112022007-11-09 22:49:28 +00003578
3579
3580/*--------------------------------------------------------------*/
3581/*--- Malloc/free replacements ---*/
3582/*--------------------------------------------------------------*/
3583
3584typedef
3585 struct {
3586 void* next; /* required by m_hashtable */
3587 Addr payload; /* ptr to actual block */
3588 SizeT szB; /* size requested */
3589 ExeContext* where; /* where it was allocated */
3590 Thread* thr; /* allocating thread */
3591 }
3592 MallocMeta;
3593
3594/* A hash table of MallocMetas, used to track malloc'd blocks
3595 (obviously). */
3596static VgHashTable hg_mallocmeta_table = NULL;
3597
3598
3599static MallocMeta* new_MallocMeta ( void ) {
sewardjf98e1c02008-10-25 16:22:41 +00003600 MallocMeta* md = HG_(zalloc)( "hg.new_MallocMeta.1", sizeof(MallocMeta) );
sewardjb4112022007-11-09 22:49:28 +00003601 tl_assert(md);
3602 return md;
3603}
3604static void delete_MallocMeta ( MallocMeta* md ) {
sewardjf98e1c02008-10-25 16:22:41 +00003605 HG_(free)(md);
sewardjb4112022007-11-09 22:49:28 +00003606}
3607
3608
3609/* Allocate a client block and set up the metadata for it. */
3610
3611static
3612void* handle_alloc ( ThreadId tid,
3613 SizeT szB, SizeT alignB, Bool is_zeroed )
3614{
3615 Addr p;
3616 MallocMeta* md;
3617
3618 tl_assert( ((SSizeT)szB) >= 0 );
3619 p = (Addr)VG_(cli_malloc)(alignB, szB);
3620 if (!p) {
3621 return NULL;
3622 }
3623 if (is_zeroed)
3624 VG_(memset)((void*)p, 0, szB);
3625
3626 /* Note that map_threads_lookup must succeed (cannot assert), since
3627 memory can only be allocated by currently alive threads, hence
3628 they must have an entry in map_threads. */
3629 md = new_MallocMeta();
3630 md->payload = p;
3631 md->szB = szB;
3632 md->where = VG_(record_ExeContext)( tid, 0 );
3633 md->thr = map_threads_lookup( tid );
3634
3635 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
3636
3637 /* Tell the lower level memory wranglers. */
3638 evh__new_mem_heap( p, szB, is_zeroed );
3639
3640 return (void*)p;
3641}
3642
3643/* Re the checks for less-than-zero (also in hg_cli__realloc below):
3644 Cast to a signed type to catch any unexpectedly negative args.
3645 We're assuming here that the size asked for is not greater than
3646 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
3647 platforms). */
3648static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
3649 if (((SSizeT)n) < 0) return NULL;
3650 return handle_alloc ( tid, n, VG_(clo_alignment),
3651 /*is_zeroed*/False );
3652}
3653static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
3654 if (((SSizeT)n) < 0) return NULL;
3655 return handle_alloc ( tid, n, VG_(clo_alignment),
3656 /*is_zeroed*/False );
3657}
3658static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
3659 if (((SSizeT)n) < 0) return NULL;
3660 return handle_alloc ( tid, n, VG_(clo_alignment),
3661 /*is_zeroed*/False );
3662}
3663static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
3664 if (((SSizeT)n) < 0) return NULL;
3665 return handle_alloc ( tid, n, align,
3666 /*is_zeroed*/False );
3667}
3668static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
3669 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
3670 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
3671 /*is_zeroed*/True );
3672}
3673
3674
3675/* Free a client block, including getting rid of the relevant
3676 metadata. */
3677
3678static void handle_free ( ThreadId tid, void* p )
3679{
3680 MallocMeta *md, *old_md;
3681 SizeT szB;
3682
3683 /* First see if we can find the metadata for 'p'. */
3684 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3685 if (!md)
3686 return; /* apparently freeing a bogus address. Oh well. */
3687
3688 tl_assert(md->payload == (Addr)p);
3689 szB = md->szB;
3690
3691 /* Nuke the metadata block */
3692 old_md = (MallocMeta*)
3693 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
3694 tl_assert(old_md); /* it must be present - we just found it */
3695 tl_assert(old_md == md);
3696 tl_assert(old_md->payload == (Addr)p);
3697
3698 VG_(cli_free)((void*)old_md->payload);
3699 delete_MallocMeta(old_md);
3700
3701 /* Tell the lower level memory wranglers. */
3702 evh__die_mem_heap( (Addr)p, szB );
3703}
3704
3705static void hg_cli__free ( ThreadId tid, void* p ) {
3706 handle_free(tid, p);
3707}
3708static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
3709 handle_free(tid, p);
3710}
3711static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
3712 handle_free(tid, p);
3713}
3714
3715
3716static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
3717{
3718 MallocMeta *md, *md_new, *md_tmp;
3719 SizeT i;
3720
3721 Addr payload = (Addr)payloadV;
3722
3723 if (((SSizeT)new_size) < 0) return NULL;
3724
3725 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
3726 if (!md)
3727 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
3728
3729 tl_assert(md->payload == payload);
3730
3731 if (md->szB == new_size) {
3732 /* size unchanged */
3733 md->where = VG_(record_ExeContext)(tid, 0);
3734 return payloadV;
3735 }
3736
3737 if (md->szB > new_size) {
3738 /* new size is smaller */
3739 md->szB = new_size;
3740 md->where = VG_(record_ExeContext)(tid, 0);
3741 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
3742 return payloadV;
3743 }
3744
3745 /* else */ {
3746 /* new size is bigger */
3747 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
3748
3749 /* First half kept and copied, second half new */
3750 // FIXME: shouldn't we use a copier which implements the
3751 // memory state machine?
sewardj23f12002009-07-24 08:45:08 +00003752 evh__copy_mem( payload, p_new, md->szB );
sewardjb4112022007-11-09 22:49:28 +00003753 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
sewardjf98e1c02008-10-25 16:22:41 +00003754 /*inited*/False );
sewardjb4112022007-11-09 22:49:28 +00003755 /* FIXME: can anything funny happen here? specifically, if the
3756 old range contained a lock, then die_mem_heap will complain.
3757 Is that the correct behaviour? Not sure. */
3758 evh__die_mem_heap( payload, md->szB );
3759
3760 /* Copy from old to new */
3761 for (i = 0; i < md->szB; i++)
3762 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
3763
3764 /* Because the metadata hash table is index by payload address,
3765 we have to get rid of the old hash table entry and make a new
3766 one. We can't just modify the existing metadata in place,
3767 because then it would (almost certainly) be in the wrong hash
3768 chain. */
3769 md_new = new_MallocMeta();
3770 *md_new = *md;
3771
3772 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
3773 tl_assert(md_tmp);
3774 tl_assert(md_tmp == md);
3775
3776 VG_(cli_free)((void*)md->payload);
3777 delete_MallocMeta(md);
3778
3779 /* Update fields */
3780 md_new->where = VG_(record_ExeContext)( tid, 0 );
3781 md_new->szB = new_size;
3782 md_new->payload = p_new;
3783 md_new->thr = map_threads_lookup( tid );
3784
3785 /* and add */
3786 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
3787
3788 return (void*)p_new;
3789 }
3790}
3791
njn8b140de2009-02-17 04:31:18 +00003792static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
3793{
3794 MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3795
3796 // There may be slop, but pretend there isn't because only the asked-for
3797 // area will have been shadowed properly.
3798 return ( md ? md->szB : 0 );
3799}
3800
sewardjb4112022007-11-09 22:49:28 +00003801
sewardj095d61e2010-03-11 13:43:18 +00003802/* For error creation: map 'data_addr' to a malloc'd chunk, if any.
sewardjc8028ad2010-05-05 09:34:42 +00003803 Slow linear search. With a bit of hash table help if 'data_addr'
3804 is either the start of a block or up to 15 word-sized steps along
3805 from the start of a block. */
sewardj095d61e2010-03-11 13:43:18 +00003806
3807static inline Bool addr_is_in_MM_Chunk( MallocMeta* mm, Addr a )
3808{
sewardjc8028ad2010-05-05 09:34:42 +00003809 /* Accept 'a' as within 'mm' if 'mm's size is zero and 'a' points
3810 right at it. */
3811 if (UNLIKELY(mm->szB == 0 && a == mm->payload))
3812 return True;
3813 /* else normal interval rules apply */
3814 if (LIKELY(a < mm->payload)) return False;
3815 if (LIKELY(a >= mm->payload + mm->szB)) return False;
3816 return True;
sewardj095d61e2010-03-11 13:43:18 +00003817}
3818
sewardjc8028ad2010-05-05 09:34:42 +00003819Bool HG_(mm_find_containing_block)( /*OUT*/ExeContext** where,
sewardj095d61e2010-03-11 13:43:18 +00003820 /*OUT*/Addr* payload,
3821 /*OUT*/SizeT* szB,
3822 Addr data_addr )
3823{
3824 MallocMeta* mm;
sewardjc8028ad2010-05-05 09:34:42 +00003825 Int i;
3826 const Int n_fast_check_words = 16;
3827
3828 /* First, do a few fast searches on the basis that data_addr might
3829 be exactly the start of a block or up to 15 words inside. This
3830 can happen commonly via the creq
3831 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK. */
3832 for (i = 0; i < n_fast_check_words; i++) {
3833 mm = VG_(HT_lookup)( hg_mallocmeta_table,
3834 data_addr - (UWord)(UInt)i * sizeof(UWord) );
3835 if (UNLIKELY(mm && addr_is_in_MM_Chunk(mm, data_addr)))
3836 goto found;
3837 }
3838
sewardj095d61e2010-03-11 13:43:18 +00003839 /* Well, this totally sucks. But without using an interval tree or
sewardjc8028ad2010-05-05 09:34:42 +00003840 some such, it's hard to see how to do better. We have to check
3841 every block in the entire table. */
sewardj095d61e2010-03-11 13:43:18 +00003842 VG_(HT_ResetIter)(hg_mallocmeta_table);
3843 while ( (mm = VG_(HT_Next)(hg_mallocmeta_table)) ) {
sewardjc8028ad2010-05-05 09:34:42 +00003844 if (UNLIKELY(addr_is_in_MM_Chunk(mm, data_addr)))
3845 goto found;
sewardj095d61e2010-03-11 13:43:18 +00003846 }
sewardjc8028ad2010-05-05 09:34:42 +00003847
3848 /* Not found. Bah. */
3849 return False;
3850 /*NOTREACHED*/
3851
3852 found:
3853 tl_assert(mm);
3854 tl_assert(addr_is_in_MM_Chunk(mm, data_addr));
3855 if (where) *where = mm->where;
3856 if (payload) *payload = mm->payload;
3857 if (szB) *szB = mm->szB;
3858 return True;
sewardj095d61e2010-03-11 13:43:18 +00003859}
3860
3861
sewardjb4112022007-11-09 22:49:28 +00003862/*--------------------------------------------------------------*/
3863/*--- Instrumentation ---*/
3864/*--------------------------------------------------------------*/
3865
3866static void instrument_mem_access ( IRSB* bbOut,
3867 IRExpr* addr,
3868 Int szB,
3869 Bool isStore,
3870 Int hWordTy_szB )
3871{
3872 IRType tyAddr = Ity_INVALID;
3873 HChar* hName = NULL;
3874 void* hAddr = NULL;
3875 Int regparms = 0;
3876 IRExpr** argv = NULL;
3877 IRDirty* di = NULL;
3878
3879 tl_assert(isIRAtom(addr));
3880 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
3881
3882 tyAddr = typeOfIRExpr( bbOut->tyenv, addr );
3883 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
3884
3885 /* So the effective address is in 'addr' now. */
3886 regparms = 1; // unless stated otherwise
3887 if (isStore) {
3888 switch (szB) {
3889 case 1:
sewardj23f12002009-07-24 08:45:08 +00003890 hName = "evh__mem_help_cwrite_1";
3891 hAddr = &evh__mem_help_cwrite_1;
sewardjb4112022007-11-09 22:49:28 +00003892 argv = mkIRExprVec_1( addr );
3893 break;
3894 case 2:
sewardj23f12002009-07-24 08:45:08 +00003895 hName = "evh__mem_help_cwrite_2";
3896 hAddr = &evh__mem_help_cwrite_2;
sewardjb4112022007-11-09 22:49:28 +00003897 argv = mkIRExprVec_1( addr );
3898 break;
3899 case 4:
sewardj23f12002009-07-24 08:45:08 +00003900 hName = "evh__mem_help_cwrite_4";
3901 hAddr = &evh__mem_help_cwrite_4;
sewardjb4112022007-11-09 22:49:28 +00003902 argv = mkIRExprVec_1( addr );
3903 break;
3904 case 8:
sewardj23f12002009-07-24 08:45:08 +00003905 hName = "evh__mem_help_cwrite_8";
3906 hAddr = &evh__mem_help_cwrite_8;
sewardjb4112022007-11-09 22:49:28 +00003907 argv = mkIRExprVec_1( addr );
3908 break;
3909 default:
3910 tl_assert(szB > 8 && szB <= 512); /* stay sane */
3911 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00003912 hName = "evh__mem_help_cwrite_N";
3913 hAddr = &evh__mem_help_cwrite_N;
sewardjb4112022007-11-09 22:49:28 +00003914 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
3915 break;
3916 }
3917 } else {
3918 switch (szB) {
3919 case 1:
sewardj23f12002009-07-24 08:45:08 +00003920 hName = "evh__mem_help_cread_1";
3921 hAddr = &evh__mem_help_cread_1;
sewardjb4112022007-11-09 22:49:28 +00003922 argv = mkIRExprVec_1( addr );
3923 break;
3924 case 2:
sewardj23f12002009-07-24 08:45:08 +00003925 hName = "evh__mem_help_cread_2";
3926 hAddr = &evh__mem_help_cread_2;
sewardjb4112022007-11-09 22:49:28 +00003927 argv = mkIRExprVec_1( addr );
3928 break;
3929 case 4:
sewardj23f12002009-07-24 08:45:08 +00003930 hName = "evh__mem_help_cread_4";
3931 hAddr = &evh__mem_help_cread_4;
sewardjb4112022007-11-09 22:49:28 +00003932 argv = mkIRExprVec_1( addr );
3933 break;
3934 case 8:
sewardj23f12002009-07-24 08:45:08 +00003935 hName = "evh__mem_help_cread_8";
3936 hAddr = &evh__mem_help_cread_8;
sewardjb4112022007-11-09 22:49:28 +00003937 argv = mkIRExprVec_1( addr );
3938 break;
3939 default:
3940 tl_assert(szB > 8 && szB <= 512); /* stay sane */
3941 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00003942 hName = "evh__mem_help_cread_N";
3943 hAddr = &evh__mem_help_cread_N;
sewardjb4112022007-11-09 22:49:28 +00003944 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
3945 break;
3946 }
3947 }
3948
3949 /* Add the helper. */
3950 tl_assert(hName);
3951 tl_assert(hAddr);
3952 tl_assert(argv);
3953 di = unsafeIRDirty_0_N( regparms,
3954 hName, VG_(fnptr_to_fnentry)( hAddr ),
3955 argv );
3956 addStmtToIRSB( bbOut, IRStmt_Dirty(di) );
3957}
3958
3959
sewardja0eee322009-07-31 08:46:35 +00003960/* Figure out if GA is a guest code address in the dynamic linker, and
3961 if so return True. Otherwise (and in case of any doubt) return
3962 False. (sidedly safe w/ False as the safe value) */
3963static Bool is_in_dynamic_linker_shared_object( Addr64 ga )
3964{
3965 DebugInfo* dinfo;
3966 const UChar* soname;
3967 if (0) return False;
3968
sewardje3f1e592009-07-31 09:41:29 +00003969 dinfo = VG_(find_DebugInfo)( (Addr)ga );
sewardja0eee322009-07-31 08:46:35 +00003970 if (!dinfo) return False;
3971
sewardje3f1e592009-07-31 09:41:29 +00003972 soname = VG_(DebugInfo_get_soname)(dinfo);
sewardja0eee322009-07-31 08:46:35 +00003973 tl_assert(soname);
3974 if (0) VG_(printf)("%s\n", soname);
3975
3976# if defined(VGO_linux)
sewardj651cfa42010-01-11 13:02:19 +00003977 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_3)) return True;
sewardja0eee322009-07-31 08:46:35 +00003978 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_2)) return True;
3979 if (VG_STREQ(soname, VG_U_LD_LINUX_X86_64_SO_2)) return True;
3980 if (VG_STREQ(soname, VG_U_LD64_SO_1)) return True;
3981 if (VG_STREQ(soname, VG_U_LD_SO_1)) return True;
3982# elif defined(VGO_darwin)
3983 if (VG_STREQ(soname, VG_U_DYLD)) return True;
3984# else
3985# error "Unsupported OS"
3986# endif
3987 return False;
3988}
3989
sewardjb4112022007-11-09 22:49:28 +00003990static
3991IRSB* hg_instrument ( VgCallbackClosure* closure,
3992 IRSB* bbIn,
3993 VexGuestLayout* layout,
3994 VexGuestExtents* vge,
3995 IRType gWordTy, IRType hWordTy )
3996{
sewardj1c0ce7a2009-07-01 08:10:49 +00003997 Int i;
3998 IRSB* bbOut;
3999 Addr64 cia; /* address of current insn */
4000 IRStmt* st;
sewardja0eee322009-07-31 08:46:35 +00004001 Bool inLDSO = False;
4002 Addr64 inLDSOmask4K = 1; /* mismatches on first check */
sewardjb4112022007-11-09 22:49:28 +00004003
4004 if (gWordTy != hWordTy) {
4005 /* We don't currently support this case. */
4006 VG_(tool_panic)("host/guest word size mismatch");
4007 }
4008
sewardja0eee322009-07-31 08:46:35 +00004009 if (VKI_PAGE_SIZE < 4096 || VG_(log2)(VKI_PAGE_SIZE) == -1) {
4010 VG_(tool_panic)("implausible or too-small VKI_PAGE_SIZE");
4011 }
4012
sewardjb4112022007-11-09 22:49:28 +00004013 /* Set up BB */
4014 bbOut = emptyIRSB();
4015 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
4016 bbOut->next = deepCopyIRExpr(bbIn->next);
4017 bbOut->jumpkind = bbIn->jumpkind;
4018
4019 // Copy verbatim any IR preamble preceding the first IMark
4020 i = 0;
4021 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
4022 addStmtToIRSB( bbOut, bbIn->stmts[i] );
4023 i++;
4024 }
4025
sewardj1c0ce7a2009-07-01 08:10:49 +00004026 // Get the first statement, and initial cia from it
4027 tl_assert(bbIn->stmts_used > 0);
4028 tl_assert(i < bbIn->stmts_used);
4029 st = bbIn->stmts[i];
4030 tl_assert(Ist_IMark == st->tag);
4031 cia = st->Ist.IMark.addr;
4032 st = NULL;
4033
sewardjb4112022007-11-09 22:49:28 +00004034 for (/*use current i*/; i < bbIn->stmts_used; i++) {
sewardj1c0ce7a2009-07-01 08:10:49 +00004035 st = bbIn->stmts[i];
sewardjb4112022007-11-09 22:49:28 +00004036 tl_assert(st);
4037 tl_assert(isFlatIRStmt(st));
4038 switch (st->tag) {
4039 case Ist_NoOp:
4040 case Ist_AbiHint:
4041 case Ist_Put:
4042 case Ist_PutI:
sewardjb4112022007-11-09 22:49:28 +00004043 case Ist_Exit:
4044 /* None of these can contain any memory references. */
4045 break;
4046
sewardj1c0ce7a2009-07-01 08:10:49 +00004047 case Ist_IMark:
4048 /* no mem refs, but note the insn address. */
4049 cia = st->Ist.IMark.addr;
sewardja0eee322009-07-31 08:46:35 +00004050 /* Don't instrument the dynamic linker. It generates a
4051 lot of races which we just expensively suppress, so
4052 it's pointless.
4053
4054 Avoid flooding is_in_dynamic_linker_shared_object with
4055 requests by only checking at transitions between 4K
4056 pages. */
4057 if ((cia & ~(Addr64)0xFFF) != inLDSOmask4K) {
4058 if (0) VG_(printf)("NEW %#lx\n", (Addr)cia);
4059 inLDSOmask4K = cia & ~(Addr64)0xFFF;
4060 inLDSO = is_in_dynamic_linker_shared_object(cia);
4061 } else {
4062 if (0) VG_(printf)("old %#lx\n", (Addr)cia);
4063 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004064 break;
4065
sewardjb4112022007-11-09 22:49:28 +00004066 case Ist_MBE:
sewardjf98e1c02008-10-25 16:22:41 +00004067 switch (st->Ist.MBE.event) {
4068 case Imbe_Fence:
4069 break; /* not interesting */
sewardjf98e1c02008-10-25 16:22:41 +00004070 default:
4071 goto unhandled;
4072 }
sewardjb4112022007-11-09 22:49:28 +00004073 break;
4074
sewardj1c0ce7a2009-07-01 08:10:49 +00004075 case Ist_CAS: {
4076 /* Atomic read-modify-write cycle. Just pretend it's a
4077 read. */
4078 IRCAS* cas = st->Ist.CAS.details;
sewardj23f12002009-07-24 08:45:08 +00004079 Bool isDCAS = cas->oldHi != IRTemp_INVALID;
4080 if (isDCAS) {
4081 tl_assert(cas->expdHi);
4082 tl_assert(cas->dataHi);
4083 } else {
4084 tl_assert(!cas->expdHi);
4085 tl_assert(!cas->dataHi);
4086 }
4087 /* Just be boring about it. */
sewardja0eee322009-07-31 08:46:35 +00004088 if (!inLDSO) {
4089 instrument_mem_access(
4090 bbOut,
4091 cas->addr,
4092 (isDCAS ? 2 : 1)
4093 * sizeofIRType(typeOfIRExpr(bbIn->tyenv, cas->dataLo)),
4094 False/*!isStore*/,
4095 sizeofIRType(hWordTy)
4096 );
4097 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004098 break;
4099 }
4100
sewardjdb5907d2009-11-26 17:20:21 +00004101 case Ist_LLSC: {
4102 /* We pretend store-conditionals don't exist, viz, ignore
4103 them. Whereas load-linked's are treated the same as
4104 normal loads. */
4105 IRType dataTy;
4106 if (st->Ist.LLSC.storedata == NULL) {
4107 /* LL */
4108 dataTy = typeOfIRTemp(bbIn->tyenv, st->Ist.LLSC.result);
sewardja0eee322009-07-31 08:46:35 +00004109 if (!inLDSO) {
sewardjdb5907d2009-11-26 17:20:21 +00004110 instrument_mem_access(
4111 bbOut,
4112 st->Ist.LLSC.addr,
4113 sizeofIRType(dataTy),
4114 False/*!isStore*/,
sewardja0eee322009-07-31 08:46:35 +00004115 sizeofIRType(hWordTy)
4116 );
4117 }
sewardjdb5907d2009-11-26 17:20:21 +00004118 } else {
4119 /* SC */
4120 /*ignore */
4121 }
4122 break;
4123 }
4124
4125 case Ist_Store:
4126 /* It seems we pretend that store-conditionals don't
4127 exist, viz, just ignore them ... */
4128 if (!inLDSO) {
4129 instrument_mem_access(
4130 bbOut,
4131 st->Ist.Store.addr,
4132 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
4133 True/*isStore*/,
4134 sizeofIRType(hWordTy)
4135 );
sewardj1c0ce7a2009-07-01 08:10:49 +00004136 }
njnb83caf22009-05-25 01:47:56 +00004137 break;
sewardjb4112022007-11-09 22:49:28 +00004138
4139 case Ist_WrTmp: {
sewardj1c0ce7a2009-07-01 08:10:49 +00004140 /* ... whereas here we don't care whether a load is a
4141 vanilla one or a load-linked. */
sewardjb4112022007-11-09 22:49:28 +00004142 IRExpr* data = st->Ist.WrTmp.data;
4143 if (data->tag == Iex_Load) {
sewardja0eee322009-07-31 08:46:35 +00004144 if (!inLDSO) {
4145 instrument_mem_access(
4146 bbOut,
4147 data->Iex.Load.addr,
4148 sizeofIRType(data->Iex.Load.ty),
4149 False/*!isStore*/,
4150 sizeofIRType(hWordTy)
4151 );
4152 }
sewardjb4112022007-11-09 22:49:28 +00004153 }
4154 break;
4155 }
4156
4157 case Ist_Dirty: {
4158 Int dataSize;
4159 IRDirty* d = st->Ist.Dirty.details;
4160 if (d->mFx != Ifx_None) {
4161 /* This dirty helper accesses memory. Collect the
4162 details. */
4163 tl_assert(d->mAddr != NULL);
4164 tl_assert(d->mSize != 0);
4165 dataSize = d->mSize;
4166 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004167 if (!inLDSO) {
4168 instrument_mem_access(
4169 bbOut, d->mAddr, dataSize, False/*!isStore*/,
4170 sizeofIRType(hWordTy)
4171 );
4172 }
sewardjb4112022007-11-09 22:49:28 +00004173 }
4174 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004175 if (!inLDSO) {
4176 instrument_mem_access(
4177 bbOut, d->mAddr, dataSize, True/*isStore*/,
4178 sizeofIRType(hWordTy)
4179 );
4180 }
sewardjb4112022007-11-09 22:49:28 +00004181 }
4182 } else {
4183 tl_assert(d->mAddr == NULL);
4184 tl_assert(d->mSize == 0);
4185 }
4186 break;
4187 }
4188
4189 default:
sewardjf98e1c02008-10-25 16:22:41 +00004190 unhandled:
4191 ppIRStmt(st);
sewardjb4112022007-11-09 22:49:28 +00004192 tl_assert(0);
4193
4194 } /* switch (st->tag) */
4195
4196 addStmtToIRSB( bbOut, st );
4197 } /* iterate over bbIn->stmts */
4198
4199 return bbOut;
4200}
4201
4202
4203/*----------------------------------------------------------------*/
4204/*--- Client requests ---*/
4205/*----------------------------------------------------------------*/
4206
4207/* Sheesh. Yet another goddam finite map. */
4208static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
4209
4210static void map_pthread_t_to_Thread_INIT ( void ) {
4211 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
sewardjf98e1c02008-10-25 16:22:41 +00004212 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
4213 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00004214 tl_assert(map_pthread_t_to_Thread != NULL);
4215 }
4216}
4217
4218
4219static
4220Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
4221{
4222 if (!VG_IS_TOOL_USERREQ('H','G',args[0]))
4223 return False;
4224
4225 /* Anything that gets past the above check is one of ours, so we
4226 should be able to handle it. */
4227
4228 /* default, meaningless return value, unless otherwise set */
4229 *ret = 0;
4230
4231 switch (args[0]) {
4232
4233 /* --- --- User-visible client requests --- --- */
4234
4235 case VG_USERREQ__HG_CLEAN_MEMORY:
barta0b6b2c2008-07-07 06:49:24 +00004236 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%ld)\n",
sewardjb4112022007-11-09 22:49:28 +00004237 args[1], args[2]);
4238 /* Call die_mem to (expensively) tidy up properly, if there
sewardjf98e1c02008-10-25 16:22:41 +00004239 are any held locks etc in the area. Calling evh__die_mem
4240 and then evh__new_mem is a bit inefficient; probably just
4241 the latter would do. */
sewardjb4112022007-11-09 22:49:28 +00004242 if (args[2] > 0) { /* length */
4243 evh__die_mem(args[1], args[2]);
4244 /* and then set it to New */
4245 evh__new_mem(args[1], args[2]);
4246 }
4247 break;
4248
sewardjc8028ad2010-05-05 09:34:42 +00004249 case _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK: {
4250 Addr payload = 0;
4251 SizeT pszB = 0;
4252 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK(%#lx)\n",
4253 args[1]);
4254 if (HG_(mm_find_containing_block)(NULL, &payload, &pszB, args[1])) {
4255 if (pszB > 0) {
4256 evh__die_mem(payload, pszB);
4257 evh__new_mem(payload, pszB);
4258 }
4259 *ret = pszB;
4260 } else {
4261 *ret = (UWord)-1;
4262 }
4263 break;
4264 }
4265
sewardj406bac82010-03-03 23:03:40 +00004266 case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED:
4267 if (0) VG_(printf)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%ld)\n",
4268 args[1], args[2]);
4269 if (args[2] > 0) { /* length */
4270 evh__untrack_mem(args[1], args[2]);
4271 }
4272 break;
4273
4274 case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED:
4275 if (0) VG_(printf)("HG_ARANGE_MAKE_TRACKED(%#lx,%ld)\n",
4276 args[1], args[2]);
4277 if (args[2] > 0) { /* length */
4278 evh__new_mem(args[1], args[2]);
4279 }
4280 break;
4281
sewardjb4112022007-11-09 22:49:28 +00004282 /* --- --- Client requests for Helgrind's use only --- --- */
4283
4284 /* Some thread is telling us its pthread_t value. Record the
4285 binding between that and the associated Thread*, so we can
4286 later find the Thread* again when notified of a join by the
4287 thread. */
4288 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
4289 Thread* my_thr = NULL;
4290 if (0)
4291 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
4292 (void*)args[1]);
4293 map_pthread_t_to_Thread_INIT();
4294 my_thr = map_threads_maybe_lookup( tid );
4295 /* This assertion should hold because the map_threads (tid to
4296 Thread*) binding should have been made at the point of
4297 low-level creation of this thread, which should have
4298 happened prior to us getting this client request for it.
4299 That's because this client request is sent from
4300 client-world from the 'thread_wrapper' function, which
4301 only runs once the thread has been low-level created. */
4302 tl_assert(my_thr != NULL);
4303 /* So now we know that (pthread_t)args[1] is associated with
4304 (Thread*)my_thr. Note that down. */
4305 if (0)
4306 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
4307 (void*)args[1], (void*)my_thr );
sewardj896f6f92008-08-19 08:38:52 +00004308 VG_(addToFM)( map_pthread_t_to_Thread, (Word)args[1], (Word)my_thr );
sewardjb4112022007-11-09 22:49:28 +00004309 break;
4310 }
4311
4312 case _VG_USERREQ__HG_PTH_API_ERROR: {
4313 Thread* my_thr = NULL;
4314 map_pthread_t_to_Thread_INIT();
4315 my_thr = map_threads_maybe_lookup( tid );
4316 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
sewardjf98e1c02008-10-25 16:22:41 +00004317 HG_(record_error_PthAPIerror)(
4318 my_thr, (HChar*)args[1], (Word)args[2], (HChar*)args[3] );
sewardjb4112022007-11-09 22:49:28 +00004319 break;
4320 }
4321
4322 /* This thread (tid) has completed a join with the quitting
4323 thread whose pthread_t is in args[1]. */
4324 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
4325 Thread* thr_q = NULL; /* quitter Thread* */
4326 Bool found = False;
4327 if (0)
4328 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
4329 (void*)args[1]);
4330 map_pthread_t_to_Thread_INIT();
sewardj896f6f92008-08-19 08:38:52 +00004331 found = VG_(lookupFM)( map_pthread_t_to_Thread,
sewardjb5f29642007-11-16 12:02:43 +00004332 NULL, (Word*)&thr_q, (Word)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004333 /* Can this fail? It would mean that our pthread_join
4334 wrapper observed a successful join on args[1] yet that
4335 thread never existed (or at least, it never lodged an
4336 entry in the mapping (via SET_MY_PTHREAD_T)). Which
4337 sounds like a bug in the threads library. */
4338 // FIXME: get rid of this assertion; handle properly
4339 tl_assert(found);
4340 if (found) {
4341 if (0)
4342 VG_(printf)(".................... quitter Thread* = %p\n",
4343 thr_q);
4344 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
4345 }
4346 break;
4347 }
4348
4349 /* EXPOSITION only: by intercepting lock init events we can show
4350 the user where the lock was initialised, rather than only
4351 being able to show where it was first locked. Intercepting
4352 lock initialisations is not necessary for the basic operation
4353 of the race checker. */
4354 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
4355 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
4356 break;
4357
4358 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
4359 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1] );
4360 break;
4361
4362 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
4363 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
4364 break;
4365
4366 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
4367 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
4368 break;
4369
4370 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*, Word
4371 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
4372 break;
4373
4374 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*
4375 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
4376 break;
4377
4378 /* This thread is about to do pthread_cond_signal on the
4379 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
4380 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
4381 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
4382 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
4383 break;
4384
4385 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
4386 Returns a flag indicating whether or not the mutex is believed to be
4387 valid for this operation. */
4388 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
4389 Bool mutex_is_valid
4390 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
4391 (void*)args[2] );
4392 *ret = mutex_is_valid ? 1 : 0;
4393 break;
4394 }
4395
sewardjf98e1c02008-10-25 16:22:41 +00004396 /* cond=arg[1] */
4397 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
4398 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1] );
4399 break;
4400
sewardjb4112022007-11-09 22:49:28 +00004401 /* Thread successfully completed pthread_cond_wait, cond=arg[1],
4402 mutex=arg[2] */
4403 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
4404 evh__HG_PTHREAD_COND_WAIT_POST( tid,
4405 (void*)args[1], (void*)args[2] );
4406 break;
4407
4408 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
4409 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
4410 break;
4411
4412 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
4413 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
4414 break;
4415
sewardj789c3c52008-02-25 12:10:07 +00004416 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
sewardjb4112022007-11-09 22:49:28 +00004417 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
sewardj789c3c52008-02-25 12:10:07 +00004418 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
4419 args[2], args[3] );
sewardjb4112022007-11-09 22:49:28 +00004420 break;
4421
4422 /* rwlock=arg[1], isW=arg[2] */
4423 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
4424 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
4425 break;
4426
4427 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
4428 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
4429 break;
4430
4431 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
4432 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
4433 break;
4434
sewardj11e352f2007-11-30 11:11:02 +00004435 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
4436 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
sewardjb4112022007-11-09 22:49:28 +00004437 break;
4438
sewardj11e352f2007-11-30 11:11:02 +00004439 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
4440 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004441 break;
4442
sewardj11e352f2007-11-30 11:11:02 +00004443 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
4444 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
4445 break;
4446
4447 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t* */
4448 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004449 break;
4450
sewardj9f569b72008-11-13 13:33:09 +00004451 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
sewardj406bac82010-03-03 23:03:40 +00004452 /* pth_bar_t*, ulong count, ulong resizable */
4453 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1],
4454 args[2], args[3] );
4455 break;
4456
4457 case _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE:
4458 /* pth_bar_t*, ulong newcount */
4459 evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( tid, (void*)args[1],
4460 args[2] );
sewardj9f569b72008-11-13 13:33:09 +00004461 break;
4462
4463 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
4464 /* pth_bar_t* */
4465 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
4466 break;
4467
4468 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
4469 /* pth_bar_t* */
4470 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
4471 break;
sewardjb4112022007-11-09 22:49:28 +00004472
sewardj5a644da2009-08-11 10:35:58 +00004473 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE:
4474 /* pth_spinlock_t* */
4475 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid, (void*)args[1] );
4476 break;
4477
4478 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST:
4479 /* pth_spinlock_t* */
4480 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid, (void*)args[1] );
4481 break;
4482
4483 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE:
4484 /* pth_spinlock_t*, Word */
4485 evh__HG_PTHREAD_SPIN_LOCK_PRE( tid, (void*)args[1], args[2] );
4486 break;
4487
4488 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST:
4489 /* pth_spinlock_t* */
4490 evh__HG_PTHREAD_SPIN_LOCK_POST( tid, (void*)args[1] );
4491 break;
4492
4493 case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE:
4494 /* pth_spinlock_t* */
4495 evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid, (void*)args[1] );
4496 break;
4497
sewardjed2e72e2009-08-14 11:08:24 +00004498 case _VG_USERREQ__HG_CLIENTREQ_UNIMP: {
4499 /* char* who */
4500 HChar* who = (HChar*)args[1];
4501 HChar buf[50 + 50];
4502 Thread* thr = map_threads_maybe_lookup( tid );
4503 tl_assert( thr ); /* I must be mapped */
4504 tl_assert( who );
4505 tl_assert( VG_(strlen)(who) <= 50 );
4506 VG_(sprintf)(buf, "Unimplemented client request macro \"%s\"", who );
4507 /* record_error_Misc strdup's buf, so this is safe: */
4508 HG_(record_error_Misc)( thr, buf );
4509 break;
4510 }
4511
4512 case _VG_USERREQ__HG_USERSO_SEND_PRE:
4513 /* UWord arbitrary-SO-tag */
4514 evh__HG_USERSO_SEND_PRE( tid, args[1] );
4515 break;
4516
4517 case _VG_USERREQ__HG_USERSO_RECV_POST:
4518 /* UWord arbitrary-SO-tag */
4519 evh__HG_USERSO_RECV_POST( tid, args[1] );
4520 break;
4521
sewardj6015d0e2011-03-11 19:10:48 +00004522 case _VG_USERREQ__HG_USERSO_FORGET_ALL:
4523 /* UWord arbitrary-SO-tag */
4524 evh__HG_USERSO_FORGET_ALL( tid, args[1] );
4525 break;
4526
sewardjb4112022007-11-09 22:49:28 +00004527 default:
4528 /* Unhandled Helgrind client request! */
sewardjf98e1c02008-10-25 16:22:41 +00004529 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
4530 args[0]);
sewardjb4112022007-11-09 22:49:28 +00004531 }
4532
4533 return True;
4534}
4535
4536
4537/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +00004538/*--- Setup ---*/
4539/*----------------------------------------------------------------*/
4540
4541static Bool hg_process_cmd_line_option ( Char* arg )
4542{
njn83df0b62009-02-25 01:01:05 +00004543 Char* tmp_str;
sewardjb4112022007-11-09 22:49:28 +00004544
njn83df0b62009-02-25 01:01:05 +00004545 if VG_BOOL_CLO(arg, "--track-lockorders",
4546 HG_(clo_track_lockorders)) {}
4547 else if VG_BOOL_CLO(arg, "--cmp-race-err-addrs",
4548 HG_(clo_cmp_race_err_addrs)) {}
sewardj23f12002009-07-24 08:45:08 +00004549
4550 else if VG_XACT_CLO(arg, "--history-level=none",
4551 HG_(clo_history_level), 0);
sewardjf3861392009-08-02 10:16:03 +00004552 else if VG_XACT_CLO(arg, "--history-level=approx",
sewardj23f12002009-07-24 08:45:08 +00004553 HG_(clo_history_level), 1);
4554 else if VG_XACT_CLO(arg, "--history-level=full",
4555 HG_(clo_history_level), 2);
sewardj849b0ed2008-12-21 10:43:10 +00004556
sewardjf585e482009-08-16 22:52:29 +00004557 /* If you change the 10k/30mill limits, remember to also change
sewardj849b0ed2008-12-21 10:43:10 +00004558 them in assertions at the top of event_map_maybe_GC. */
njn83df0b62009-02-25 01:01:05 +00004559 else if VG_BINT_CLO(arg, "--conflict-cache-size",
sewardjf585e482009-08-16 22:52:29 +00004560 HG_(clo_conflict_cache_size), 10*1000, 30*1000*1000) {}
sewardjb4112022007-11-09 22:49:28 +00004561
sewardj11e352f2007-11-30 11:11:02 +00004562 /* "stuvwx" --> stuvwx (binary) */
njn83df0b62009-02-25 01:01:05 +00004563 else if VG_STR_CLO(arg, "--hg-sanity-flags", tmp_str) {
sewardjb4112022007-11-09 22:49:28 +00004564 Int j;
sewardjb4112022007-11-09 22:49:28 +00004565
njn83df0b62009-02-25 01:01:05 +00004566 if (6 != VG_(strlen)(tmp_str)) {
sewardjb4112022007-11-09 22:49:28 +00004567 VG_(message)(Vg_UserMsg,
sewardj24118492009-07-15 14:50:02 +00004568 "--hg-sanity-flags argument must have 6 digits\n");
sewardjb4112022007-11-09 22:49:28 +00004569 return False;
4570 }
sewardj11e352f2007-11-30 11:11:02 +00004571 for (j = 0; j < 6; j++) {
njn83df0b62009-02-25 01:01:05 +00004572 if ('0' == tmp_str[j]) { /* do nothing */ }
4573 else if ('1' == tmp_str[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
sewardjb4112022007-11-09 22:49:28 +00004574 else {
sewardj11e352f2007-11-30 11:11:02 +00004575 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
sewardj24118492009-07-15 14:50:02 +00004576 "only contain 0s and 1s\n");
sewardjb4112022007-11-09 22:49:28 +00004577 return False;
4578 }
4579 }
sewardjf98e1c02008-10-25 16:22:41 +00004580 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
sewardjb4112022007-11-09 22:49:28 +00004581 }
4582
4583 else
4584 return VG_(replacement_malloc_process_cmd_line_option)(arg);
4585
4586 return True;
4587}
4588
4589static void hg_print_usage ( void )
4590{
4591 VG_(printf)(
sewardj849b0ed2008-12-21 10:43:10 +00004592" --track-lockorders=no|yes show lock ordering errors? [yes]\n"
njnf6e8ca92009-08-07 02:18:00 +00004593" --history-level=none|approx|full [full]\n"
sewardjf3861392009-08-02 10:16:03 +00004594" full: show both stack traces for a data race (can be very slow)\n"
4595" approx: full trace for one thread, approx for the other (faster)\n"
4596" none: only show trace for one thread in a race (fastest)\n"
sewardj23f12002009-07-24 08:45:08 +00004597" --conflict-cache-size=N size of 'full' history cache [1000000]\n"
sewardjb4112022007-11-09 22:49:28 +00004598 );
sewardjb4112022007-11-09 22:49:28 +00004599}
4600
4601static void hg_print_debug_usage ( void )
4602{
sewardjb4112022007-11-09 22:49:28 +00004603 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
4604 "race errors significant? [no]\n");
sewardj849b0ed2008-12-21 10:43:10 +00004605 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
sewardj11e352f2007-11-30 11:11:02 +00004606 " at events (X = 0|1) [000000]\n");
4607 VG_(printf)(" --hg-sanity-flags values:\n");
sewardj11e352f2007-11-30 11:11:02 +00004608 VG_(printf)(" 010000 after changes to "
sewardjb4112022007-11-09 22:49:28 +00004609 "lock-order-acquisition-graph\n");
sewardj11e352f2007-11-30 11:11:02 +00004610 VG_(printf)(" 001000 at memory accesses (NB: not currently used)\n");
4611 VG_(printf)(" 000100 at mem permission setting for "
sewardjb4112022007-11-09 22:49:28 +00004612 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
sewardj11e352f2007-11-30 11:11:02 +00004613 VG_(printf)(" 000010 at lock/unlock events\n");
4614 VG_(printf)(" 000001 at thread create/join events\n");
sewardjb4112022007-11-09 22:49:28 +00004615}
4616
sewardjb4112022007-11-09 22:49:28 +00004617static void hg_fini ( Int exitcode )
4618{
sewardj2d9e8742009-08-07 15:46:56 +00004619 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
4620 VG_(message)(Vg_UserMsg,
4621 "For counts of detected and suppressed errors, "
4622 "rerun with: -v\n");
4623 }
4624
4625 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)
4626 && HG_(clo_history_level) >= 2) {
4627 VG_(umsg)(
4628 "Use --history-level=approx or =none to gain increased speed, at\n" );
4629 VG_(umsg)(
4630 "the cost of reduced accuracy of conflicting-access information\n");
4631 }
4632
sewardjb4112022007-11-09 22:49:28 +00004633 if (SHOW_DATA_STRUCTURES)
4634 pp_everything( PP_ALL, "SK_(fini)" );
sewardjf98e1c02008-10-25 16:22:41 +00004635 if (HG_(clo_sanity_flags))
sewardjb4112022007-11-09 22:49:28 +00004636 all__sanity_check("SK_(fini)");
4637
sewardj2d9e8742009-08-07 15:46:56 +00004638 if (VG_(clo_stats)) {
sewardjb4112022007-11-09 22:49:28 +00004639
4640 if (1) {
4641 VG_(printf)("\n");
sewardjb4112022007-11-09 22:49:28 +00004642 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
sewardjc1fb9d22011-02-28 09:03:44 +00004643 if (HG_(clo_track_lockorders)) {
4644 VG_(printf)("\n");
4645 HG_(ppWSUstats)( univ_laog, "univ_laog" );
4646 }
sewardjb4112022007-11-09 22:49:28 +00004647 }
4648
sewardjf98e1c02008-10-25 16:22:41 +00004649 //zz VG_(printf)("\n");
4650 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
4651 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
4652 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
4653 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
4654 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
4655 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
4656 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
4657 //zz stats__hbefore_stk_hwm);
4658 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
4659 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
sewardjb4112022007-11-09 22:49:28 +00004660
4661 VG_(printf)("\n");
barta0b6b2c2008-07-07 06:49:24 +00004662 VG_(printf)(" locksets: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00004663 (Int)HG_(cardinalityWSU)( univ_lsets ));
sewardjc1fb9d22011-02-28 09:03:44 +00004664 if (HG_(clo_track_lockorders)) {
4665 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
4666 (Int)HG_(cardinalityWSU)( univ_laog ));
4667 }
sewardjb4112022007-11-09 22:49:28 +00004668
sewardjd52392d2008-11-08 20:36:26 +00004669 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
4670 // stats__ga_LL_adds,
4671 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
sewardjb4112022007-11-09 22:49:28 +00004672
sewardjf98e1c02008-10-25 16:22:41 +00004673 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
4674 HG_(stats__LockN_to_P_queries),
4675 HG_(stats__LockN_to_P_get_map_size)() );
sewardjb4112022007-11-09 22:49:28 +00004676
sewardjf98e1c02008-10-25 16:22:41 +00004677 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
4678 HG_(stats__string_table_queries),
4679 HG_(stats__string_table_get_map_size)() );
sewardjc1fb9d22011-02-28 09:03:44 +00004680 if (HG_(clo_track_lockorders)) {
4681 VG_(printf)(" LAOG: %'8d map size\n",
4682 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
4683 VG_(printf)(" LAOG exposition: %'8d map size\n",
4684 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
4685 }
4686
barta0b6b2c2008-07-07 06:49:24 +00004687 VG_(printf)(" locks: %'8lu acquires, "
4688 "%'lu releases\n",
sewardjb4112022007-11-09 22:49:28 +00004689 stats__lockN_acquires,
4690 stats__lockN_releases
4691 );
barta0b6b2c2008-07-07 06:49:24 +00004692 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
sewardjb4112022007-11-09 22:49:28 +00004693
4694 VG_(printf)("\n");
sewardjf98e1c02008-10-25 16:22:41 +00004695 libhb_shutdown(True);
sewardjb4112022007-11-09 22:49:28 +00004696 }
4697}
4698
sewardjf98e1c02008-10-25 16:22:41 +00004699/* FIXME: move these somewhere sane */
4700
4701static
4702void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
4703{
4704 Thread* thr;
4705 ThreadId tid;
4706 UWord nActual;
4707 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00004708 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00004709 tl_assert(thr);
4710 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
4711 nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
4712 NULL, NULL, 0 );
4713 tl_assert(nActual <= nRequest);
4714 for (; nActual < nRequest; nActual++)
4715 frames[nActual] = 0;
4716}
4717
4718static
sewardj23f12002009-07-24 08:45:08 +00004719ExeContext* for_libhb__get_EC ( Thr* hbt )
sewardjf98e1c02008-10-25 16:22:41 +00004720{
4721 Thread* thr;
4722 ThreadId tid;
4723 ExeContext* ec;
4724 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00004725 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00004726 tl_assert(thr);
4727 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
sewardj23f12002009-07-24 08:45:08 +00004728 /* this will assert if tid is invalid */
sewardjf98e1c02008-10-25 16:22:41 +00004729 ec = VG_(record_ExeContext)( tid, 0 );
sewardjd52392d2008-11-08 20:36:26 +00004730 return ec;
sewardjf98e1c02008-10-25 16:22:41 +00004731}
4732
4733
sewardjc1fb9d22011-02-28 09:03:44 +00004734static void hg_post_clo_init ( void )
sewardjb4112022007-11-09 22:49:28 +00004735{
sewardjf98e1c02008-10-25 16:22:41 +00004736 Thr* hbthr_root;
njnf76d27a2009-05-28 01:53:07 +00004737
sewardjc1fb9d22011-02-28 09:03:44 +00004738 /////////////////////////////////////////////
4739 hbthr_root = libhb_init( for_libhb__get_stacktrace,
4740 for_libhb__get_EC );
4741 /////////////////////////////////////////////
4742
4743
4744 if (HG_(clo_track_lockorders))
4745 laog__init();
4746
4747 initialise_data_structures(hbthr_root);
4748}
4749
4750static void hg_pre_clo_init ( void )
4751{
sewardjb4112022007-11-09 22:49:28 +00004752 VG_(details_name) ("Helgrind");
4753 VG_(details_version) (NULL);
4754 VG_(details_description) ("a thread error detector");
4755 VG_(details_copyright_author)(
sewardj9eecbbb2010-05-03 21:37:12 +00004756 "Copyright (C) 2007-2010, and GNU GPL'd, by OpenWorks LLP et al.");
sewardjb4112022007-11-09 22:49:28 +00004757 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj9c08c0f2011-03-10 15:01:14 +00004758 VG_(details_avg_translation_sizeB) ( 320 );
sewardjb4112022007-11-09 22:49:28 +00004759
4760 VG_(basic_tool_funcs) (hg_post_clo_init,
4761 hg_instrument,
4762 hg_fini);
4763
4764 VG_(needs_core_errors) ();
sewardjf98e1c02008-10-25 16:22:41 +00004765 VG_(needs_tool_errors) (HG_(eq_Error),
sewardj24118492009-07-15 14:50:02 +00004766 HG_(before_pp_Error),
sewardjf98e1c02008-10-25 16:22:41 +00004767 HG_(pp_Error),
sewardjb4112022007-11-09 22:49:28 +00004768 False,/*show TIDs for errors*/
sewardjf98e1c02008-10-25 16:22:41 +00004769 HG_(update_extra),
4770 HG_(recognised_suppression),
4771 HG_(read_extra_suppression_info),
4772 HG_(error_matches_suppression),
4773 HG_(get_error_name),
sewardj588adef2009-08-15 22:41:51 +00004774 HG_(get_extra_suppression_info));
sewardjb4112022007-11-09 22:49:28 +00004775
sewardj24118492009-07-15 14:50:02 +00004776 VG_(needs_xml_output) ();
4777
sewardjb4112022007-11-09 22:49:28 +00004778 VG_(needs_command_line_options)(hg_process_cmd_line_option,
4779 hg_print_usage,
4780 hg_print_debug_usage);
4781 VG_(needs_client_requests) (hg_handle_client_request);
4782
4783 // FIXME?
4784 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
4785 // hg_expensive_sanity_check);
4786
4787 VG_(needs_malloc_replacement) (hg_cli__malloc,
4788 hg_cli____builtin_new,
4789 hg_cli____builtin_vec_new,
4790 hg_cli__memalign,
4791 hg_cli__calloc,
4792 hg_cli__free,
4793 hg_cli____builtin_delete,
4794 hg_cli____builtin_vec_delete,
4795 hg_cli__realloc,
njn8b140de2009-02-17 04:31:18 +00004796 hg_cli_malloc_usable_size,
sewardjb4112022007-11-09 22:49:28 +00004797 HG_CLI__MALLOC_REDZONE_SZB );
4798
sewardj849b0ed2008-12-21 10:43:10 +00004799 /* 21 Dec 08: disabled this; it mostly causes H to start more
4800 slowly and use significantly more memory, without very often
4801 providing useful results. The user can request to load this
4802 information manually with --read-var-info=yes. */
4803 if (0) VG_(needs_var_info)(); /* optional */
sewardjb4112022007-11-09 22:49:28 +00004804
4805 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
sewardj7cf4e6b2008-05-01 20:24:26 +00004806 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
4807 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
sewardjb4112022007-11-09 22:49:28 +00004808 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
sewardj1f77fec2010-04-12 19:51:04 +00004809 VG_(track_new_mem_stack) ( evh__new_mem_stack );
sewardjb4112022007-11-09 22:49:28 +00004810
4811 // FIXME: surely this isn't thread-aware
sewardj23f12002009-07-24 08:45:08 +00004812 VG_(track_copy_mem_remap) ( evh__copy_mem );
sewardjb4112022007-11-09 22:49:28 +00004813
4814 VG_(track_change_mem_mprotect) ( evh__set_perms );
4815
4816 VG_(track_die_mem_stack_signal)( evh__die_mem );
4817 VG_(track_die_mem_brk) ( evh__die_mem );
4818 VG_(track_die_mem_munmap) ( evh__die_mem );
4819 VG_(track_die_mem_stack) ( evh__die_mem );
4820
4821 // FIXME: what is this for?
4822 VG_(track_ban_mem_stack) (NULL);
4823
4824 VG_(track_pre_mem_read) ( evh__pre_mem_read );
4825 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
4826 VG_(track_pre_mem_write) ( evh__pre_mem_write );
4827 VG_(track_post_mem_write) (NULL);
4828
4829 /////////////////
4830
4831 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
4832 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
4833
4834 VG_(track_start_client_code)( evh__start_client_code );
4835 VG_(track_stop_client_code)( evh__stop_client_code );
4836
sewardjb4112022007-11-09 22:49:28 +00004837 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
4838 as described in comments at the top of pub_tool_hashtable.h, are
4839 met. Blargh. */
4840 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
4841 tl_assert( sizeof(UWord) == sizeof(Addr) );
4842 hg_mallocmeta_table
4843 = VG_(HT_construct)( "hg_malloc_metadata_table" );
4844
sewardj61bc2c52011-02-09 10:34:00 +00004845 // add a callback to clean up on (threaded) fork.
4846 VG_(atfork)(NULL/*pre*/, NULL/*parent*/, evh__atfork_child/*child*/);
sewardjb4112022007-11-09 22:49:28 +00004847}
4848
4849VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
4850
4851/*--------------------------------------------------------------------*/
4852/*--- end hg_main.c ---*/
4853/*--------------------------------------------------------------------*/