blob: 9f2d503de7c998b66745e4ace3844f16f02d7a5c [file] [log] [blame]
sewardjb4112022007-11-09 22:49:28 +00001
2/*--------------------------------------------------------------------*/
3/*--- Helgrind: a Valgrind tool for detecting errors ---*/
4/*--- in threaded programs. hg_main.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
sewardj9eecbbb2010-05-03 21:37:12 +000011 Copyright (C) 2007-2010 OpenWorks LLP
sewardjb4112022007-11-09 22:49:28 +000012 info@open-works.co.uk
13
sewardj9eecbbb2010-05-03 21:37:12 +000014 Copyright (C) 2007-2010 Apple, Inc.
njnf76d27a2009-05-28 01:53:07 +000015
sewardjb4112022007-11-09 22:49:28 +000016 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
20
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
25
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, write to the Free Software
28 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 02111-1307, USA.
30
31 The GNU General Public License is contained in the file COPYING.
32
33 Neither the names of the U.S. Department of Energy nor the
34 University of California nor the names of its contributors may be
35 used to endorse or promote products derived from this software
36 without prior written permission.
37*/
38
39#include "pub_tool_basics.h"
sewardjb4112022007-11-09 22:49:28 +000040#include "pub_tool_libcassert.h"
41#include "pub_tool_libcbase.h"
42#include "pub_tool_libcprint.h"
sewardjb4112022007-11-09 22:49:28 +000043#include "pub_tool_threadstate.h"
44#include "pub_tool_tooliface.h"
45#include "pub_tool_hashtable.h"
46#include "pub_tool_replacemalloc.h"
47#include "pub_tool_machine.h"
48#include "pub_tool_options.h"
49#include "pub_tool_xarray.h"
50#include "pub_tool_stacktrace.h"
sewardj896f6f92008-08-19 08:38:52 +000051#include "pub_tool_wordfm.h"
sewardja0eee322009-07-31 08:46:35 +000052#include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
53#include "pub_tool_redir.h" // sonames for the dynamic linkers
54#include "pub_tool_vki.h" // VKI_PAGE_SIZE
sewardj61bc2c52011-02-09 10:34:00 +000055#include "pub_tool_libcproc.h" // VG_(atfork)
sewardj234e5582011-02-09 12:47:23 +000056#include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client)
sewardjb4112022007-11-09 22:49:28 +000057
sewardjf98e1c02008-10-25 16:22:41 +000058#include "hg_basics.h"
59#include "hg_wordset.h"
60#include "hg_lock_n_thread.h"
61#include "hg_errors.h"
62
63#include "libhb.h"
64
sewardjb4112022007-11-09 22:49:28 +000065#include "helgrind.h"
66
sewardjf98e1c02008-10-25 16:22:41 +000067
68// FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
69
70// FIXME: when client destroys a lock or a CV, remove these
71// from our mappings, so that the associated SO can be freed up
sewardjb4112022007-11-09 22:49:28 +000072
73/*----------------------------------------------------------------*/
74/*--- ---*/
75/*----------------------------------------------------------------*/
76
sewardj11e352f2007-11-30 11:11:02 +000077/* Note this needs to be compiled with -fno-strict-aliasing, since it
78 contains a whole bunch of calls to lookupFM etc which cast between
79 Word and pointer types. gcc rightly complains this breaks ANSI C
80 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
81 worthwhile performance benefits over -O.
sewardjc17be792007-11-10 22:50:13 +000082*/
sewardjb4112022007-11-09 22:49:28 +000083
84// FIXME what is supposed to happen to locks in memory which
85// is relocated as a result of client realloc?
86
sewardjb4112022007-11-09 22:49:28 +000087// FIXME put referencing ThreadId into Thread and get
88// rid of the slow reverse mapping function.
89
90// FIXME accesses to NoAccess areas: change state to Excl?
91
92// FIXME report errors for accesses of NoAccess memory?
93
94// FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
95// the thread still holds the lock.
96
97/* ------------ Debug/trace options ------------ */
98
sewardjb4112022007-11-09 22:49:28 +000099// 0 for silent, 1 for some stuff, 2 for lots of stuff
100#define SHOW_EVENTS 0
101
sewardjb4112022007-11-09 22:49:28 +0000102
103static void all__sanity_check ( Char* who ); /* fwds */
104
105#define HG_CLI__MALLOC_REDZONE_SZB 16 /* let's say */
106
107// 0 for none, 1 for dump at end of run
108#define SHOW_DATA_STRUCTURES 0
109
110
sewardjb4112022007-11-09 22:49:28 +0000111/* ------------ Misc comments ------------ */
112
113// FIXME: don't hardwire initial entries for root thread.
114// Instead, let the pre_thread_ll_create handler do this.
115
sewardjb4112022007-11-09 22:49:28 +0000116
117/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000118/*--- Primary data structures ---*/
sewardjb4112022007-11-09 22:49:28 +0000119/*----------------------------------------------------------------*/
120
sewardjb4112022007-11-09 22:49:28 +0000121/* Admin linked list of Threads */
122static Thread* admin_threads = NULL;
123
sewardj1d7c3322011-02-28 09:22:51 +0000124/* Admin double linked list of Locks */
125/* We need a double linked list to properly and efficiently
126 handle del_LockN. */
sewardjb4112022007-11-09 22:49:28 +0000127static Lock* admin_locks = NULL;
128
sewardjb4112022007-11-09 22:49:28 +0000129/* Mapping table for core ThreadIds to Thread* */
130static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
131
sewardjb4112022007-11-09 22:49:28 +0000132/* Mapping table for lock guest addresses to Lock* */
133static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
134
sewardj0f64c9e2011-03-10 17:40:22 +0000135/* The word-set universes for lock sets. */
sewardjb4112022007-11-09 22:49:28 +0000136static WordSetU* univ_lsets = NULL; /* sets of Lock* */
137static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
138
sewardjb4112022007-11-09 22:49:28 +0000139
140/*----------------------------------------------------------------*/
141/*--- Simple helpers for the data structures ---*/
142/*----------------------------------------------------------------*/
143
144static UWord stats__lockN_acquires = 0;
145static UWord stats__lockN_releases = 0;
146
sewardjf98e1c02008-10-25 16:22:41 +0000147static
148ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
sewardjb4112022007-11-09 22:49:28 +0000149
150/* --------- Constructors --------- */
151
sewardjf98e1c02008-10-25 16:22:41 +0000152static Thread* mk_Thread ( Thr* hbthr ) {
sewardjb4112022007-11-09 22:49:28 +0000153 static Int indx = 1;
sewardjf98e1c02008-10-25 16:22:41 +0000154 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
sewardjb4112022007-11-09 22:49:28 +0000155 thread->locksetA = HG_(emptyWS)( univ_lsets );
156 thread->locksetW = HG_(emptyWS)( univ_lsets );
sewardjb4112022007-11-09 22:49:28 +0000157 thread->magic = Thread_MAGIC;
sewardjf98e1c02008-10-25 16:22:41 +0000158 thread->hbthr = hbthr;
159 thread->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +0000160 thread->created_at = NULL;
161 thread->announced = False;
162 thread->errmsg_index = indx++;
163 thread->admin = admin_threads;
164 admin_threads = thread;
165 return thread;
166}
sewardjf98e1c02008-10-25 16:22:41 +0000167
sewardjb4112022007-11-09 22:49:28 +0000168// Make a new lock which is unlocked (hence ownerless)
sewardj1d7c3322011-02-28 09:22:51 +0000169// and insert the new lock in admin_locks double linked list.
sewardjb4112022007-11-09 22:49:28 +0000170static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
171 static ULong unique = 0;
sewardjf98e1c02008-10-25 16:22:41 +0000172 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
sewardj0f64c9e2011-03-10 17:40:22 +0000173 /* begin: add to double linked list */
sewardj1d7c3322011-02-28 09:22:51 +0000174 if (admin_locks)
175 admin_locks->admin_prev = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000176 lock->admin_next = admin_locks;
177 lock->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000178 admin_locks = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000179 /* end: add */
sewardjb4112022007-11-09 22:49:28 +0000180 lock->unique = unique++;
181 lock->magic = LockN_MAGIC;
182 lock->appeared_at = NULL;
183 lock->acquired_at = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000184 lock->hbso = libhb_so_alloc();
sewardjb4112022007-11-09 22:49:28 +0000185 lock->guestaddr = guestaddr;
186 lock->kind = kind;
187 lock->heldW = False;
188 lock->heldBy = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000189 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +0000190 return lock;
191}
sewardjb4112022007-11-09 22:49:28 +0000192
193/* Release storage for a Lock. Also release storage in .heldBy, if
sewardj1d7c3322011-02-28 09:22:51 +0000194 any. Removes from admin_locks double linked list. */
sewardjb4112022007-11-09 22:49:28 +0000195static void del_LockN ( Lock* lk )
196{
sewardjf98e1c02008-10-25 16:22:41 +0000197 tl_assert(HG_(is_sane_LockN)(lk));
198 tl_assert(lk->hbso);
199 libhb_so_dealloc(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +0000200 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000201 VG_(deleteBag)( lk->heldBy );
sewardj0f64c9e2011-03-10 17:40:22 +0000202 /* begin: del lock from double linked list */
203 if (lk == admin_locks) {
204 tl_assert(lk->admin_prev == NULL);
205 if (lk->admin_next)
206 lk->admin_next->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000207 admin_locks = lk->admin_next;
sewardj1d7c3322011-02-28 09:22:51 +0000208 }
209 else {
sewardj0f64c9e2011-03-10 17:40:22 +0000210 tl_assert(lk->admin_prev != NULL);
sewardj1d7c3322011-02-28 09:22:51 +0000211 lk->admin_prev->admin_next = lk->admin_next;
sewardj0f64c9e2011-03-10 17:40:22 +0000212 if (lk->admin_next)
213 lk->admin_next->admin_prev = lk->admin_prev;
sewardj1d7c3322011-02-28 09:22:51 +0000214 }
sewardj0f64c9e2011-03-10 17:40:22 +0000215 /* end: del */
sewardjb4112022007-11-09 22:49:28 +0000216 VG_(memset)(lk, 0xAA, sizeof(*lk));
sewardjf98e1c02008-10-25 16:22:41 +0000217 HG_(free)(lk);
sewardjb4112022007-11-09 22:49:28 +0000218}
219
220/* Update 'lk' to reflect that 'thr' now has a write-acquisition of
221 it. This is done strictly: only combinations resulting from
222 correct program and libpthread behaviour are allowed. */
223static void lockN_acquire_writer ( Lock* lk, Thread* thr )
224{
sewardjf98e1c02008-10-25 16:22:41 +0000225 tl_assert(HG_(is_sane_LockN)(lk));
226 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000227
228 stats__lockN_acquires++;
229
230 /* EXPOSITION only */
231 /* We need to keep recording snapshots of where the lock was
232 acquired, so as to produce better lock-order error messages. */
233 if (lk->acquired_at == NULL) {
234 ThreadId tid;
235 tl_assert(lk->heldBy == NULL);
236 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
237 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000238 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000239 } else {
240 tl_assert(lk->heldBy != NULL);
241 }
242 /* end EXPOSITION only */
243
244 switch (lk->kind) {
245 case LK_nonRec:
246 case_LK_nonRec:
247 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
248 tl_assert(!lk->heldW);
249 lk->heldW = True;
sewardjf98e1c02008-10-25 16:22:41 +0000250 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000251 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000252 break;
253 case LK_mbRec:
254 if (lk->heldBy == NULL)
255 goto case_LK_nonRec;
256 /* 2nd and subsequent locking of a lock by its owner */
257 tl_assert(lk->heldW);
258 /* assert: lk is only held by one thread .. */
sewardj896f6f92008-08-19 08:38:52 +0000259 tl_assert(VG_(sizeUniqueBag(lk->heldBy)) == 1);
sewardjb4112022007-11-09 22:49:28 +0000260 /* assert: .. and that thread is 'thr'. */
sewardj896f6f92008-08-19 08:38:52 +0000261 tl_assert(VG_(elemBag)(lk->heldBy, (Word)thr)
262 == VG_(sizeTotalBag)(lk->heldBy));
263 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000264 break;
265 case LK_rdwr:
266 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
267 goto case_LK_nonRec;
268 default:
269 tl_assert(0);
270 }
sewardjf98e1c02008-10-25 16:22:41 +0000271 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000272}
273
274static void lockN_acquire_reader ( Lock* lk, Thread* thr )
275{
sewardjf98e1c02008-10-25 16:22:41 +0000276 tl_assert(HG_(is_sane_LockN)(lk));
277 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000278 /* can only add reader to a reader-writer lock. */
279 tl_assert(lk->kind == LK_rdwr);
280 /* lk must be free or already r-held. */
281 tl_assert(lk->heldBy == NULL
282 || (lk->heldBy != NULL && !lk->heldW));
283
284 stats__lockN_acquires++;
285
286 /* EXPOSITION only */
287 /* We need to keep recording snapshots of where the lock was
288 acquired, so as to produce better lock-order error messages. */
289 if (lk->acquired_at == NULL) {
290 ThreadId tid;
291 tl_assert(lk->heldBy == NULL);
292 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
293 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000294 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000295 } else {
296 tl_assert(lk->heldBy != NULL);
297 }
298 /* end EXPOSITION only */
299
300 if (lk->heldBy) {
sewardj896f6f92008-08-19 08:38:52 +0000301 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000302 } else {
303 lk->heldW = False;
sewardjf98e1c02008-10-25 16:22:41 +0000304 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000305 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000306 }
307 tl_assert(!lk->heldW);
sewardjf98e1c02008-10-25 16:22:41 +0000308 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000309}
310
311/* Update 'lk' to reflect a release of it by 'thr'. This is done
312 strictly: only combinations resulting from correct program and
313 libpthread behaviour are allowed. */
314
315static void lockN_release ( Lock* lk, Thread* thr )
316{
317 Bool b;
sewardjf98e1c02008-10-25 16:22:41 +0000318 tl_assert(HG_(is_sane_LockN)(lk));
319 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000320 /* lock must be held by someone */
321 tl_assert(lk->heldBy);
322 stats__lockN_releases++;
323 /* Remove it from the holder set */
sewardj896f6f92008-08-19 08:38:52 +0000324 b = VG_(delFromBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000325 /* thr must actually have been a holder of lk */
326 tl_assert(b);
327 /* normalise */
328 tl_assert(lk->acquired_at);
sewardj896f6f92008-08-19 08:38:52 +0000329 if (VG_(isEmptyBag)(lk->heldBy)) {
330 VG_(deleteBag)(lk->heldBy);
sewardjb4112022007-11-09 22:49:28 +0000331 lk->heldBy = NULL;
332 lk->heldW = False;
333 lk->acquired_at = NULL;
334 }
sewardjf98e1c02008-10-25 16:22:41 +0000335 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000336}
337
338static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
339{
340 Thread* thr;
341 if (!lk->heldBy) {
342 tl_assert(!lk->heldW);
343 return;
344 }
345 /* for each thread that holds this lock do ... */
sewardj896f6f92008-08-19 08:38:52 +0000346 VG_(initIterBag)( lk->heldBy );
347 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, NULL )) {
sewardjf98e1c02008-10-25 16:22:41 +0000348 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000349 tl_assert(HG_(elemWS)( univ_lsets,
350 thr->locksetA, (Word)lk ));
351 thr->locksetA
352 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lk );
353
354 if (lk->heldW) {
355 tl_assert(HG_(elemWS)( univ_lsets,
356 thr->locksetW, (Word)lk ));
357 thr->locksetW
358 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lk );
359 }
360 }
sewardj896f6f92008-08-19 08:38:52 +0000361 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000362}
363
sewardjb4112022007-11-09 22:49:28 +0000364
365/*----------------------------------------------------------------*/
366/*--- Print out the primary data structures ---*/
367/*----------------------------------------------------------------*/
368
sewardjb4112022007-11-09 22:49:28 +0000369#define PP_THREADS (1<<1)
370#define PP_LOCKS (1<<2)
sewardjf98e1c02008-10-25 16:22:41 +0000371#define PP_ALL (PP_THREADS | PP_LOCKS)
sewardjb4112022007-11-09 22:49:28 +0000372
373
374static const Int sHOW_ADMIN = 0;
375
376static void space ( Int n )
377{
378 Int i;
379 Char spaces[128+1];
380 tl_assert(n >= 0 && n < 128);
381 if (n == 0)
382 return;
383 for (i = 0; i < n; i++)
384 spaces[i] = ' ';
385 spaces[i] = 0;
386 tl_assert(i < 128+1);
387 VG_(printf)("%s", spaces);
388}
389
390static void pp_Thread ( Int d, Thread* t )
391{
392 space(d+0); VG_(printf)("Thread %p {\n", t);
393 if (sHOW_ADMIN) {
394 space(d+3); VG_(printf)("admin %p\n", t->admin);
395 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
396 }
397 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
398 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
sewardjb4112022007-11-09 22:49:28 +0000399 space(d+0); VG_(printf)("}\n");
400}
401
402static void pp_admin_threads ( Int d )
403{
404 Int i, n;
405 Thread* t;
406 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
407 /* nothing */
408 }
409 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
410 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
411 if (0) {
412 space(n);
413 VG_(printf)("admin_threads record %d of %d:\n", i, n);
414 }
415 pp_Thread(d+3, t);
416 }
barta0b6b2c2008-07-07 06:49:24 +0000417 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000418}
419
420static void pp_map_threads ( Int d )
421{
njn4c245e52009-03-15 23:25:38 +0000422 Int i, n = 0;
sewardjb4112022007-11-09 22:49:28 +0000423 space(d); VG_(printf)("map_threads ");
sewardjb4112022007-11-09 22:49:28 +0000424 for (i = 0; i < VG_N_THREADS; i++) {
425 if (map_threads[i] != NULL)
426 n++;
427 }
428 VG_(printf)("(%d entries) {\n", n);
429 for (i = 0; i < VG_N_THREADS; i++) {
430 if (map_threads[i] == NULL)
431 continue;
432 space(d+3);
433 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
434 }
435 space(d); VG_(printf)("}\n");
436}
437
438static const HChar* show_LockKind ( LockKind lkk ) {
439 switch (lkk) {
440 case LK_mbRec: return "mbRec";
441 case LK_nonRec: return "nonRec";
442 case LK_rdwr: return "rdwr";
443 default: tl_assert(0);
444 }
445}
446
447static void pp_Lock ( Int d, Lock* lk )
448{
barta0b6b2c2008-07-07 06:49:24 +0000449 space(d+0); VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
sewardjb4112022007-11-09 22:49:28 +0000450 if (sHOW_ADMIN) {
sewardj1d7c3322011-02-28 09:22:51 +0000451 space(d+3); VG_(printf)("admin_n %p\n", lk->admin_next);
452 space(d+3); VG_(printf)("admin_p %p\n", lk->admin_prev);
453 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
sewardjb4112022007-11-09 22:49:28 +0000454 }
455 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
456 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
457 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
458 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
459 if (lk->heldBy) {
460 Thread* thr;
461 Word count;
462 VG_(printf)(" { ");
sewardj896f6f92008-08-19 08:38:52 +0000463 VG_(initIterBag)( lk->heldBy );
464 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, &count ))
sewardjb4112022007-11-09 22:49:28 +0000465 VG_(printf)("%lu:%p ", count, thr);
sewardj896f6f92008-08-19 08:38:52 +0000466 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000467 VG_(printf)("}");
468 }
469 VG_(printf)("\n");
470 space(d+0); VG_(printf)("}\n");
471}
472
473static void pp_admin_locks ( Int d )
474{
475 Int i, n;
476 Lock* lk;
sewardj1d7c3322011-02-28 09:22:51 +0000477 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000478 /* nothing */
479 }
480 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
sewardj1d7c3322011-02-28 09:22:51 +0000481 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000482 if (0) {
483 space(n);
484 VG_(printf)("admin_locks record %d of %d:\n", i, n);
485 }
486 pp_Lock(d+3, lk);
487 }
barta0b6b2c2008-07-07 06:49:24 +0000488 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000489}
490
491static void pp_map_locks ( Int d )
492{
493 void* gla;
494 Lock* lk;
495 space(d); VG_(printf)("map_locks (%d entries) {\n",
sewardj896f6f92008-08-19 08:38:52 +0000496 (Int)VG_(sizeFM)( map_locks ));
497 VG_(initIterFM)( map_locks );
498 while (VG_(nextIterFM)( map_locks, (Word*)&gla,
sewardjb5f29642007-11-16 12:02:43 +0000499 (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000500 space(d+3);
501 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
502 }
sewardj896f6f92008-08-19 08:38:52 +0000503 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000504 space(d); VG_(printf)("}\n");
505}
506
sewardjb4112022007-11-09 22:49:28 +0000507static void pp_everything ( Int flags, Char* caller )
508{
509 Int d = 0;
510 VG_(printf)("\n");
511 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
512 if (flags & PP_THREADS) {
513 VG_(printf)("\n");
514 pp_admin_threads(d+3);
515 VG_(printf)("\n");
516 pp_map_threads(d+3);
517 }
518 if (flags & PP_LOCKS) {
519 VG_(printf)("\n");
520 pp_admin_locks(d+3);
521 VG_(printf)("\n");
522 pp_map_locks(d+3);
523 }
sewardjb4112022007-11-09 22:49:28 +0000524
525 VG_(printf)("\n");
526 VG_(printf)("}\n");
527 VG_(printf)("\n");
528}
529
530#undef SHOW_ADMIN
531
532
533/*----------------------------------------------------------------*/
534/*--- Initialise the primary data structures ---*/
535/*----------------------------------------------------------------*/
536
sewardjf98e1c02008-10-25 16:22:41 +0000537static void initialise_data_structures ( Thr* hbthr_root )
sewardjb4112022007-11-09 22:49:28 +0000538{
sewardjb4112022007-11-09 22:49:28 +0000539 Thread* thr;
540
541 /* Get everything initialised and zeroed. */
542 tl_assert(admin_threads == NULL);
543 tl_assert(admin_locks == NULL);
sewardjb4112022007-11-09 22:49:28 +0000544
545 tl_assert(sizeof(Addr) == sizeof(Word));
sewardjb4112022007-11-09 22:49:28 +0000546
547 tl_assert(map_threads == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000548 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
sewardjb4112022007-11-09 22:49:28 +0000549 tl_assert(map_threads != NULL);
550
sewardjb4112022007-11-09 22:49:28 +0000551 tl_assert(sizeof(Addr) == sizeof(Word));
552 tl_assert(map_locks == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000553 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
554 NULL/*unboxed Word cmp*/);
sewardjb4112022007-11-09 22:49:28 +0000555 tl_assert(map_locks != NULL);
556
sewardjb4112022007-11-09 22:49:28 +0000557 tl_assert(univ_lsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000558 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
559 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000560 tl_assert(univ_lsets != NULL);
561
562 tl_assert(univ_laog == NULL);
sewardjc1fb9d22011-02-28 09:03:44 +0000563 if (HG_(clo_track_lockorders)) {
564 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
565 HG_(free), 24/*cacheSize*/ );
566 tl_assert(univ_laog != NULL);
567 }
sewardjb4112022007-11-09 22:49:28 +0000568
569 /* Set up entries for the root thread */
570 // FIXME: this assumes that the first real ThreadId is 1
571
sewardjb4112022007-11-09 22:49:28 +0000572 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +0000573 thr = mk_Thread(hbthr_root);
574 thr->coretid = 1; /* FIXME: hardwires an assumption about the
575 identity of the root thread. */
sewardj60626642011-03-10 15:14:37 +0000576 tl_assert( libhb_get_Thr_hgthread(hbthr_root) == NULL );
577 libhb_set_Thr_hgthread(hbthr_root, thr);
sewardjb4112022007-11-09 22:49:28 +0000578
sewardjf98e1c02008-10-25 16:22:41 +0000579 /* and bind it in the thread-map table. */
580 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
581 tl_assert(thr->coretid != VG_INVALID_THREADID);
sewardjb4112022007-11-09 22:49:28 +0000582
sewardjf98e1c02008-10-25 16:22:41 +0000583 map_threads[thr->coretid] = thr;
sewardjb4112022007-11-09 22:49:28 +0000584
585 tl_assert(VG_INVALID_THREADID == 0);
586
sewardjb4112022007-11-09 22:49:28 +0000587 all__sanity_check("initialise_data_structures");
588}
589
590
591/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000592/*--- map_threads :: array[core-ThreadId] of Thread* ---*/
sewardjb4112022007-11-09 22:49:28 +0000593/*----------------------------------------------------------------*/
594
595/* Doesn't assert if the relevant map_threads entry is NULL. */
596static Thread* map_threads_maybe_lookup ( ThreadId coretid )
597{
598 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000599 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000600 thr = map_threads[coretid];
601 return thr;
602}
603
604/* Asserts if the relevant map_threads entry is NULL. */
605static inline Thread* map_threads_lookup ( ThreadId coretid )
606{
607 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000608 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000609 thr = map_threads[coretid];
610 tl_assert(thr);
611 return thr;
612}
613
sewardjf98e1c02008-10-25 16:22:41 +0000614/* Do a reverse lookup. Does not assert if 'thr' is not found in
615 map_threads. */
sewardjb4112022007-11-09 22:49:28 +0000616static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
617{
sewardjf98e1c02008-10-25 16:22:41 +0000618 ThreadId tid;
619 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000620 /* Check nobody used the invalid-threadid slot */
621 tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
622 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000623 tid = thr->coretid;
624 tl_assert(HG_(is_sane_ThreadId)(tid));
625 return tid;
sewardjb4112022007-11-09 22:49:28 +0000626}
627
628/* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
629 is not found in map_threads. */
630static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
631{
632 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
633 tl_assert(tid != VG_INVALID_THREADID);
sewardjf98e1c02008-10-25 16:22:41 +0000634 tl_assert(map_threads[tid]);
635 tl_assert(map_threads[tid]->coretid == tid);
sewardjb4112022007-11-09 22:49:28 +0000636 return tid;
637}
638
639static void map_threads_delete ( ThreadId coretid )
640{
641 Thread* thr;
642 tl_assert(coretid != 0);
sewardjf98e1c02008-10-25 16:22:41 +0000643 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000644 thr = map_threads[coretid];
645 tl_assert(thr);
646 map_threads[coretid] = NULL;
647}
648
649
650/*----------------------------------------------------------------*/
651/*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
652/*----------------------------------------------------------------*/
653
654/* Make sure there is a lock table entry for the given (lock) guest
655 address. If not, create one of the stated 'kind' in unheld state.
656 In any case, return the address of the existing or new Lock. */
657static
658Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
659{
660 Bool found;
661 Lock* oldlock = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000662 tl_assert(HG_(is_sane_ThreadId)(tid));
sewardj896f6f92008-08-19 08:38:52 +0000663 found = VG_(lookupFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000664 NULL, (Word*)&oldlock, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000665 if (!found) {
666 Lock* lock = mk_LockN(lkk, ga);
667 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
sewardjf98e1c02008-10-25 16:22:41 +0000668 tl_assert(HG_(is_sane_LockN)(lock));
sewardj896f6f92008-08-19 08:38:52 +0000669 VG_(addToFM)( map_locks, (Word)ga, (Word)lock );
sewardjb4112022007-11-09 22:49:28 +0000670 tl_assert(oldlock == NULL);
sewardjb4112022007-11-09 22:49:28 +0000671 return lock;
672 } else {
673 tl_assert(oldlock != NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000674 tl_assert(HG_(is_sane_LockN)(oldlock));
sewardjb4112022007-11-09 22:49:28 +0000675 tl_assert(oldlock->guestaddr == ga);
sewardjb4112022007-11-09 22:49:28 +0000676 return oldlock;
677 }
678}
679
680static Lock* map_locks_maybe_lookup ( Addr ga )
681{
682 Bool found;
683 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000684 found = VG_(lookupFM)( map_locks, NULL, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000685 tl_assert(found ? lk != NULL : lk == NULL);
sewardjb4112022007-11-09 22:49:28 +0000686 return lk;
687}
688
689static void map_locks_delete ( Addr ga )
690{
691 Addr ga2 = 0;
692 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000693 VG_(delFromFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000694 (Word*)&ga2, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000695 /* delFromFM produces the val which is being deleted, if it is
696 found. So assert it is non-null; that in effect asserts that we
697 are deleting a (ga, Lock) pair which actually exists. */
698 tl_assert(lk != NULL);
699 tl_assert(ga2 == ga);
700}
701
702
sewardjb4112022007-11-09 22:49:28 +0000703
704/*----------------------------------------------------------------*/
705/*--- Sanity checking the data structures ---*/
706/*----------------------------------------------------------------*/
707
708static UWord stats__sanity_checks = 0;
709
sewardjb4112022007-11-09 22:49:28 +0000710static void laog__sanity_check ( Char* who ); /* fwds */
711
712/* REQUIRED INVARIANTS:
713
714 Thread vs Segment/Lock/SecMaps
715
716 for each t in Threads {
717
718 // Thread.lockset: each element is really a valid Lock
719
720 // Thread.lockset: each Lock in set is actually held by that thread
721 for lk in Thread.lockset
722 lk == LockedBy(t)
723
724 // Thread.csegid is a valid SegmentID
725 // and the associated Segment has .thr == t
726
727 }
728
729 all thread Locksets are pairwise empty under intersection
730 (that is, no lock is claimed to be held by more than one thread)
731 -- this is guaranteed if all locks in locksets point back to their
732 owner threads
733
734 Lock vs Thread/Segment/SecMaps
735
736 for each entry (gla, la) in map_locks
737 gla == la->guest_addr
738
739 for each lk in Locks {
740
741 lk->tag is valid
742 lk->guest_addr does not have shadow state NoAccess
743 if lk == LockedBy(t), then t->lockset contains lk
744 if lk == UnlockedBy(segid) then segid is valid SegmentID
745 and can be mapped to a valid Segment(seg)
746 and seg->thr->lockset does not contain lk
747 if lk == UnlockedNew then (no lockset contains lk)
748
749 secmaps for lk has .mbHasLocks == True
750
751 }
752
753 Segment vs Thread/Lock/SecMaps
754
755 the Segment graph is a dag (no cycles)
756 all of the Segment graph must be reachable from the segids
757 mentioned in the Threads
758
759 for seg in Segments {
760
761 seg->thr is a sane Thread
762
763 }
764
765 SecMaps vs Segment/Thread/Lock
766
767 for sm in SecMaps {
768
769 sm properly aligned
770 if any shadow word is ShR or ShM then .mbHasShared == True
771
772 for each Excl(segid) state
773 map_segments_lookup maps to a sane Segment(seg)
774 for each ShM/ShR(tsetid,lsetid) state
775 each lk in lset is a valid Lock
776 each thr in tset is a valid thread, which is non-dead
777
778 }
779*/
780
781
782/* Return True iff 'thr' holds 'lk' in some mode. */
783static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
784{
785 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000786 return VG_(elemBag)( lk->heldBy, (Word)thr ) > 0;
sewardjb4112022007-11-09 22:49:28 +0000787 else
788 return False;
789}
790
791/* Sanity check Threads, as far as possible */
792__attribute__((noinline))
793static void threads__sanity_check ( Char* who )
794{
795#define BAD(_str) do { how = (_str); goto bad; } while (0)
796 Char* how = "no error";
797 Thread* thr;
798 WordSetID wsA, wsW;
sewardj250ec2e2008-02-15 22:02:30 +0000799 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +0000800 Word ls_size, i;
801 Lock* lk;
sewardjb4112022007-11-09 22:49:28 +0000802 for (thr = admin_threads; thr; thr = thr->admin) {
sewardjf98e1c02008-10-25 16:22:41 +0000803 if (!HG_(is_sane_Thread)(thr)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000804 wsA = thr->locksetA;
805 wsW = thr->locksetW;
806 // locks held in W mode are a subset of all locks held
807 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
808 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
809 for (i = 0; i < ls_size; i++) {
810 lk = (Lock*)ls_words[i];
811 // Thread.lockset: each element is really a valid Lock
sewardjf98e1c02008-10-25 16:22:41 +0000812 if (!HG_(is_sane_LockN)(lk)) BAD("2");
sewardjb4112022007-11-09 22:49:28 +0000813 // Thread.lockset: each Lock in set is actually held by that
814 // thread
815 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000816 }
817 }
818 return;
819 bad:
820 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
821 tl_assert(0);
822#undef BAD
823}
824
825
826/* Sanity check Locks, as far as possible */
827__attribute__((noinline))
828static void locks__sanity_check ( Char* who )
829{
830#define BAD(_str) do { how = (_str); goto bad; } while (0)
831 Char* how = "no error";
832 Addr gla;
833 Lock* lk;
834 Int i;
835 // # entries in admin_locks == # entries in map_locks
sewardj1d7c3322011-02-28 09:22:51 +0000836 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next)
sewardjb4112022007-11-09 22:49:28 +0000837 ;
sewardj896f6f92008-08-19 08:38:52 +0000838 if (i != VG_(sizeFM)(map_locks)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000839 // for each entry (gla, lk) in map_locks
840 // gla == lk->guest_addr
sewardj896f6f92008-08-19 08:38:52 +0000841 VG_(initIterFM)( map_locks );
842 while (VG_(nextIterFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000843 (Word*)&gla, (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000844 if (lk->guestaddr != gla) BAD("2");
845 }
sewardj896f6f92008-08-19 08:38:52 +0000846 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000847 // scan through admin_locks ...
sewardj1d7c3322011-02-28 09:22:51 +0000848 for (lk = admin_locks; lk; lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000849 // lock is sane. Quite comprehensive, also checks that
850 // referenced (holder) threads are sane.
sewardjf98e1c02008-10-25 16:22:41 +0000851 if (!HG_(is_sane_LockN)(lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000852 // map_locks binds guest address back to this lock
853 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
sewardjb4112022007-11-09 22:49:28 +0000854 // look at all threads mentioned as holders of this lock. Ensure
855 // this lock is mentioned in their locksets.
856 if (lk->heldBy) {
857 Thread* thr;
858 Word count;
sewardj896f6f92008-08-19 08:38:52 +0000859 VG_(initIterBag)( lk->heldBy );
860 while (VG_(nextIterBag)( lk->heldBy,
sewardjb5f29642007-11-16 12:02:43 +0000861 (Word*)&thr, &count )) {
sewardjf98e1c02008-10-25 16:22:41 +0000862 // HG_(is_sane_LockN) above ensures these
sewardjb4112022007-11-09 22:49:28 +0000863 tl_assert(count >= 1);
sewardjf98e1c02008-10-25 16:22:41 +0000864 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000865 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (Word)lk))
866 BAD("6");
867 // also check the w-only lockset
868 if (lk->heldW
869 && !HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
870 BAD("7");
871 if ((!lk->heldW)
872 && HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
873 BAD("8");
874 }
sewardj896f6f92008-08-19 08:38:52 +0000875 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000876 } else {
877 /* lock not held by anybody */
878 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
879 // since lk is unheld, then (no lockset contains lk)
880 // hmm, this is really too expensive to check. Hmm.
881 }
sewardjb4112022007-11-09 22:49:28 +0000882 }
883
884 return;
885 bad:
886 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
887 tl_assert(0);
888#undef BAD
889}
890
891
sewardjb4112022007-11-09 22:49:28 +0000892static void all_except_Locks__sanity_check ( Char* who ) {
893 stats__sanity_checks++;
894 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
895 threads__sanity_check(who);
sewardjc1fb9d22011-02-28 09:03:44 +0000896 if (HG_(clo_track_lockorders))
897 laog__sanity_check(who);
sewardjb4112022007-11-09 22:49:28 +0000898}
899static void all__sanity_check ( Char* who ) {
900 all_except_Locks__sanity_check(who);
901 locks__sanity_check(who);
902}
903
904
905/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +0000906/*--- Shadow value and address range handlers ---*/
907/*----------------------------------------------------------------*/
908
909static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +0000910//static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000911static inline Thread* get_current_Thread ( void ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +0000912__attribute__((noinline))
913static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000914
sewardjb4112022007-11-09 22:49:28 +0000915
916/* Block-copy states (needed for implementing realloc()). */
sewardj23f12002009-07-24 08:45:08 +0000917/* FIXME this copies shadow memory; it doesn't apply the MSM to it.
918 Is that a problem? (hence 'scopy' rather than 'ccopy') */
919static void shadow_mem_scopy_range ( Thread* thr,
920 Addr src, Addr dst, SizeT len )
sewardjf98e1c02008-10-25 16:22:41 +0000921{
922 Thr* hbthr = thr->hbthr;
923 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +0000924 libhb_copy_shadow_state( hbthr, src, dst, len );
sewardjb4112022007-11-09 22:49:28 +0000925}
926
sewardj23f12002009-07-24 08:45:08 +0000927static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
928{
sewardjf98e1c02008-10-25 16:22:41 +0000929 Thr* hbthr = thr->hbthr;
930 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +0000931 LIBHB_CREAD_N(hbthr, a, len);
932}
933
934static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
935 Thr* hbthr = thr->hbthr;
936 tl_assert(hbthr);
937 LIBHB_CWRITE_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +0000938}
939
940static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
941{
sewardj23f12002009-07-24 08:45:08 +0000942 libhb_srange_new( thr->hbthr, a, len );
sewardjb4112022007-11-09 22:49:28 +0000943}
944
sewardjb4112022007-11-09 22:49:28 +0000945static void shadow_mem_make_NoAccess ( Thread* thr, Addr aIN, SizeT len )
946{
sewardjb4112022007-11-09 22:49:28 +0000947 if (0 && len > 500)
barta0b6b2c2008-07-07 06:49:24 +0000948 VG_(printf)("make NoAccess ( %#lx, %ld )\n", aIN, len );
sewardj23f12002009-07-24 08:45:08 +0000949 libhb_srange_noaccess( thr->hbthr, aIN, len );
sewardjb4112022007-11-09 22:49:28 +0000950}
951
sewardj406bac82010-03-03 23:03:40 +0000952static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN, SizeT len )
953{
954 if (0 && len > 500)
955 VG_(printf)("make Untracked ( %#lx, %ld )\n", aIN, len );
956 libhb_srange_untrack( thr->hbthr, aIN, len );
957}
958
sewardjb4112022007-11-09 22:49:28 +0000959
960/*----------------------------------------------------------------*/
961/*--- Event handlers (evh__* functions) ---*/
962/*--- plus helpers (evhH__* functions) ---*/
963/*----------------------------------------------------------------*/
964
965/*--------- Event handler helpers (evhH__* functions) ---------*/
966
967/* Create a new segment for 'thr', making it depend (.prev) on its
968 existing segment, bind together the SegmentID and Segment, and
969 return both of them. Also update 'thr' so it references the new
970 Segment. */
sewardjf98e1c02008-10-25 16:22:41 +0000971//zz static
972//zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
973//zz /*OUT*/Segment** new_segP,
974//zz Thread* thr )
975//zz {
976//zz Segment* cur_seg;
977//zz tl_assert(new_segP);
978//zz tl_assert(new_segidP);
979//zz tl_assert(HG_(is_sane_Thread)(thr));
980//zz cur_seg = map_segments_lookup( thr->csegid );
981//zz tl_assert(cur_seg);
982//zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
983//zz at their owner thread. */
984//zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
985//zz *new_segidP = alloc_SegmentID();
986//zz map_segments_add( *new_segidP, *new_segP );
987//zz thr->csegid = *new_segidP;
988//zz }
sewardjb4112022007-11-09 22:49:28 +0000989
990
991/* The lock at 'lock_ga' has acquired a writer. Make all necessary
992 updates, and also do all possible error checks. */
993static
994void evhH__post_thread_w_acquires_lock ( Thread* thr,
995 LockKind lkk, Addr lock_ga )
996{
997 Lock* lk;
998
999 /* Basically what we need to do is call lockN_acquire_writer.
1000 However, that will barf if any 'invalid' lock states would
1001 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001002 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001003 routine.
1004
1005 Because this routine is only called after successful lock
1006 acquisition, we should not be asked to move the lock into any
1007 invalid states. Requests to do so are bugs in libpthread, since
1008 that should have rejected any such requests. */
1009
sewardjf98e1c02008-10-25 16:22:41 +00001010 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001011 /* Try to find the lock. If we can't, then create a new one with
1012 kind 'lkk'. */
1013 lk = map_locks_lookup_or_create(
1014 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001015 tl_assert( HG_(is_sane_LockN)(lk) );
1016
1017 /* check libhb level entities exist */
1018 tl_assert(thr->hbthr);
1019 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001020
1021 if (lk->heldBy == NULL) {
1022 /* the lock isn't held. Simple. */
1023 tl_assert(!lk->heldW);
1024 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001025 /* acquire a dependency from the lock's VCs */
1026 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001027 goto noerror;
1028 }
1029
1030 /* So the lock is already held. If held as a r-lock then
1031 libpthread must be buggy. */
1032 tl_assert(lk->heldBy);
1033 if (!lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001034 HG_(record_error_Misc)(
1035 thr, "Bug in libpthread: write lock "
1036 "granted on rwlock which is currently rd-held");
sewardjb4112022007-11-09 22:49:28 +00001037 goto error;
1038 }
1039
1040 /* So the lock is held in w-mode. If it's held by some other
1041 thread, then libpthread must be buggy. */
sewardj896f6f92008-08-19 08:38:52 +00001042 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
sewardjb4112022007-11-09 22:49:28 +00001043
sewardj896f6f92008-08-19 08:38:52 +00001044 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
sewardjf98e1c02008-10-25 16:22:41 +00001045 HG_(record_error_Misc)(
1046 thr, "Bug in libpthread: write lock "
1047 "granted on mutex/rwlock which is currently "
1048 "wr-held by a different thread");
sewardjb4112022007-11-09 22:49:28 +00001049 goto error;
1050 }
1051
1052 /* So the lock is already held in w-mode by 'thr'. That means this
1053 is an attempt to lock it recursively, which is only allowable
1054 for LK_mbRec kinded locks. Since this routine is called only
1055 once the lock has been acquired, this must also be a libpthread
1056 bug. */
1057 if (lk->kind != LK_mbRec) {
sewardjf98e1c02008-10-25 16:22:41 +00001058 HG_(record_error_Misc)(
1059 thr, "Bug in libpthread: recursive write lock "
1060 "granted on mutex/wrlock which does not "
1061 "support recursion");
sewardjb4112022007-11-09 22:49:28 +00001062 goto error;
1063 }
1064
1065 /* So we are recursively re-locking a lock we already w-hold. */
1066 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001067 /* acquire a dependency from the lock's VC. Probably pointless,
1068 but also harmless. */
1069 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001070 goto noerror;
1071
1072 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001073 if (HG_(clo_track_lockorders)) {
1074 /* check lock order acquisition graph, and update. This has to
1075 happen before the lock is added to the thread's locksetA/W. */
1076 laog__pre_thread_acquires_lock( thr, lk );
1077 }
sewardjb4112022007-11-09 22:49:28 +00001078 /* update the thread's held-locks set */
1079 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1080 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (Word)lk );
1081 /* fall through */
1082
1083 error:
sewardjf98e1c02008-10-25 16:22:41 +00001084 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001085}
1086
1087
1088/* The lock at 'lock_ga' has acquired a reader. Make all necessary
1089 updates, and also do all possible error checks. */
1090static
1091void evhH__post_thread_r_acquires_lock ( Thread* thr,
1092 LockKind lkk, Addr lock_ga )
1093{
1094 Lock* lk;
1095
1096 /* Basically what we need to do is call lockN_acquire_reader.
1097 However, that will barf if any 'invalid' lock states would
1098 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001099 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001100 routine.
1101
1102 Because this routine is only called after successful lock
1103 acquisition, we should not be asked to move the lock into any
1104 invalid states. Requests to do so are bugs in libpthread, since
1105 that should have rejected any such requests. */
1106
sewardjf98e1c02008-10-25 16:22:41 +00001107 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001108 /* Try to find the lock. If we can't, then create a new one with
1109 kind 'lkk'. Only a reader-writer lock can be read-locked,
1110 hence the first assertion. */
1111 tl_assert(lkk == LK_rdwr);
1112 lk = map_locks_lookup_or_create(
1113 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001114 tl_assert( HG_(is_sane_LockN)(lk) );
1115
1116 /* check libhb level entities exist */
1117 tl_assert(thr->hbthr);
1118 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001119
1120 if (lk->heldBy == NULL) {
1121 /* the lock isn't held. Simple. */
1122 tl_assert(!lk->heldW);
1123 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001124 /* acquire a dependency from the lock's VC */
1125 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001126 goto noerror;
1127 }
1128
1129 /* So the lock is already held. If held as a w-lock then
1130 libpthread must be buggy. */
1131 tl_assert(lk->heldBy);
1132 if (lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001133 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1134 "granted on rwlock which is "
1135 "currently wr-held");
sewardjb4112022007-11-09 22:49:28 +00001136 goto error;
1137 }
1138
1139 /* Easy enough. In short anybody can get a read-lock on a rwlock
1140 provided it is either unlocked or already in rd-held. */
1141 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001142 /* acquire a dependency from the lock's VC. Probably pointless,
1143 but also harmless. */
1144 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001145 goto noerror;
1146
1147 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001148 if (HG_(clo_track_lockorders)) {
1149 /* check lock order acquisition graph, and update. This has to
1150 happen before the lock is added to the thread's locksetA/W. */
1151 laog__pre_thread_acquires_lock( thr, lk );
1152 }
sewardjb4112022007-11-09 22:49:28 +00001153 /* update the thread's held-locks set */
1154 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1155 /* but don't update thr->locksetW, since lk is only rd-held */
1156 /* fall through */
1157
1158 error:
sewardjf98e1c02008-10-25 16:22:41 +00001159 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001160}
1161
1162
1163/* The lock at 'lock_ga' is just about to be unlocked. Make all
1164 necessary updates, and also do all possible error checks. */
1165static
1166void evhH__pre_thread_releases_lock ( Thread* thr,
1167 Addr lock_ga, Bool isRDWR )
1168{
1169 Lock* lock;
1170 Word n;
sewardjf98e1c02008-10-25 16:22:41 +00001171 Bool was_heldW;
sewardjb4112022007-11-09 22:49:28 +00001172
1173 /* This routine is called prior to a lock release, before
1174 libpthread has had a chance to validate the call. Hence we need
1175 to detect and reject any attempts to move the lock into an
1176 invalid state. Such attempts are bugs in the client.
1177
1178 isRDWR is True if we know from the wrapper context that lock_ga
1179 should refer to a reader-writer lock, and is False if [ditto]
1180 lock_ga should refer to a standard mutex. */
1181
sewardjf98e1c02008-10-25 16:22:41 +00001182 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001183 lock = map_locks_maybe_lookup( lock_ga );
1184
1185 if (!lock) {
1186 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1187 the client is trying to unlock it. So complain, then ignore
1188 the attempt. */
sewardjf98e1c02008-10-25 16:22:41 +00001189 HG_(record_error_UnlockBogus)( thr, lock_ga );
sewardjb4112022007-11-09 22:49:28 +00001190 return;
1191 }
1192
1193 tl_assert(lock->guestaddr == lock_ga);
sewardjf98e1c02008-10-25 16:22:41 +00001194 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001195
1196 if (isRDWR && lock->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001197 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1198 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001199 }
1200 if ((!isRDWR) && lock->kind == LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001201 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1202 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001203 }
1204
1205 if (!lock->heldBy) {
1206 /* The lock is not held. This indicates a serious bug in the
1207 client. */
1208 tl_assert(!lock->heldW);
sewardjf98e1c02008-10-25 16:22:41 +00001209 HG_(record_error_UnlockUnlocked)( thr, lock );
sewardjb4112022007-11-09 22:49:28 +00001210 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1211 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1212 goto error;
1213 }
1214
sewardjf98e1c02008-10-25 16:22:41 +00001215 /* test just above dominates */
1216 tl_assert(lock->heldBy);
1217 was_heldW = lock->heldW;
1218
sewardjb4112022007-11-09 22:49:28 +00001219 /* The lock is held. Is this thread one of the holders? If not,
1220 report a bug in the client. */
sewardj896f6f92008-08-19 08:38:52 +00001221 n = VG_(elemBag)( lock->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +00001222 tl_assert(n >= 0);
1223 if (n == 0) {
1224 /* We are not a current holder of the lock. This is a bug in
1225 the guest, and (per POSIX pthread rules) the unlock
1226 attempt will fail. So just complain and do nothing
1227 else. */
sewardj896f6f92008-08-19 08:38:52 +00001228 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001229 tl_assert(HG_(is_sane_Thread)(realOwner));
sewardjb4112022007-11-09 22:49:28 +00001230 tl_assert(realOwner != thr);
1231 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1232 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
sewardjf98e1c02008-10-25 16:22:41 +00001233 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
sewardjb4112022007-11-09 22:49:28 +00001234 goto error;
1235 }
1236
1237 /* Ok, we hold the lock 'n' times. */
1238 tl_assert(n >= 1);
1239
1240 lockN_release( lock, thr );
1241
1242 n--;
1243 tl_assert(n >= 0);
1244
1245 if (n > 0) {
1246 tl_assert(lock->heldBy);
sewardj896f6f92008-08-19 08:38:52 +00001247 tl_assert(n == VG_(elemBag)( lock->heldBy, (Word)thr ));
sewardjb4112022007-11-09 22:49:28 +00001248 /* We still hold the lock. So either it's a recursive lock
1249 or a rwlock which is currently r-held. */
1250 tl_assert(lock->kind == LK_mbRec
1251 || (lock->kind == LK_rdwr && !lock->heldW));
1252 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1253 if (lock->heldW)
1254 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1255 else
1256 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1257 } else {
sewardj983f3022009-05-21 14:49:55 +00001258 /* n is zero. This means we don't hold the lock any more. But
1259 if it's a rwlock held in r-mode, someone else could still
1260 hold it. Just do whatever sanity checks we can. */
1261 if (lock->kind == LK_rdwr && lock->heldBy) {
1262 /* It's a rwlock. We no longer hold it but we used to;
1263 nevertheless it still appears to be held by someone else.
1264 The implication is that, prior to this release, it must
1265 have been shared by us and and whoever else is holding it;
1266 which in turn implies it must be r-held, since a lock
1267 can't be w-held by more than one thread. */
1268 /* The lock is now R-held by somebody else: */
1269 tl_assert(lock->heldW == False);
1270 } else {
1271 /* Normal case. It's either not a rwlock, or it's a rwlock
1272 that we used to hold in w-mode (which is pretty much the
1273 same thing as a non-rwlock.) Since this transaction is
1274 atomic (V does not allow multiple threads to run
1275 simultaneously), it must mean the lock is now not held by
1276 anybody. Hence assert for it. */
1277 /* The lock is now not held by anybody: */
1278 tl_assert(!lock->heldBy);
1279 tl_assert(lock->heldW == False);
1280 }
sewardjf98e1c02008-10-25 16:22:41 +00001281 //if (lock->heldBy) {
1282 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (Word)thr ));
1283 //}
sewardjb4112022007-11-09 22:49:28 +00001284 /* update this thread's lockset accordingly. */
1285 thr->locksetA
1286 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lock );
1287 thr->locksetW
1288 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lock );
sewardjf98e1c02008-10-25 16:22:41 +00001289 /* push our VC into the lock */
1290 tl_assert(thr->hbthr);
1291 tl_assert(lock->hbso);
1292 /* If the lock was previously W-held, then we want to do a
1293 strong send, and if previously R-held, then a weak send. */
1294 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
sewardjb4112022007-11-09 22:49:28 +00001295 }
1296 /* fall through */
1297
1298 error:
sewardjf98e1c02008-10-25 16:22:41 +00001299 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001300}
1301
1302
sewardj9f569b72008-11-13 13:33:09 +00001303/* ---------------------------------------------------------- */
1304/* -------- Event handlers proper (evh__* functions) -------- */
1305/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001306
1307/* What is the Thread* for the currently running thread? This is
1308 absolutely performance critical. We receive notifications from the
1309 core for client code starts/stops, and cache the looked-up result
1310 in 'current_Thread'. Hence, for the vast majority of requests,
1311 finding the current thread reduces to a read of a global variable,
1312 provided get_current_Thread_in_C_C is inlined.
1313
1314 Outside of client code, current_Thread is NULL, and presumably
1315 any uses of it will cause a segfault. Hence:
1316
1317 - for uses definitely within client code, use
1318 get_current_Thread_in_C_C.
1319
1320 - for all other uses, use get_current_Thread.
1321*/
1322
sewardj23f12002009-07-24 08:45:08 +00001323static Thread *current_Thread = NULL,
1324 *current_Thread_prev = NULL;
sewardjb4112022007-11-09 22:49:28 +00001325
1326static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1327 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1328 tl_assert(current_Thread == NULL);
1329 current_Thread = map_threads_lookup( tid );
1330 tl_assert(current_Thread != NULL);
sewardj23f12002009-07-24 08:45:08 +00001331 if (current_Thread != current_Thread_prev) {
1332 libhb_Thr_resumes( current_Thread->hbthr );
1333 current_Thread_prev = current_Thread;
1334 }
sewardjb4112022007-11-09 22:49:28 +00001335}
1336static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1337 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1338 tl_assert(current_Thread != NULL);
1339 current_Thread = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00001340 libhb_maybe_GC();
sewardjb4112022007-11-09 22:49:28 +00001341}
1342static inline Thread* get_current_Thread_in_C_C ( void ) {
1343 return current_Thread;
1344}
1345static inline Thread* get_current_Thread ( void ) {
1346 ThreadId coretid;
1347 Thread* thr;
1348 thr = get_current_Thread_in_C_C();
1349 if (LIKELY(thr))
1350 return thr;
1351 /* evidently not in client code. Do it the slow way. */
1352 coretid = VG_(get_running_tid)();
1353 /* FIXME: get rid of the following kludge. It exists because
sewardjf98e1c02008-10-25 16:22:41 +00001354 evh__new_mem is called during initialisation (as notification
sewardjb4112022007-11-09 22:49:28 +00001355 of initial memory layout) and VG_(get_running_tid)() returns
1356 VG_INVALID_THREADID at that point. */
1357 if (coretid == VG_INVALID_THREADID)
1358 coretid = 1; /* KLUDGE */
1359 thr = map_threads_lookup( coretid );
1360 return thr;
1361}
1362
1363static
1364void evh__new_mem ( Addr a, SizeT len ) {
1365 if (SHOW_EVENTS >= 2)
1366 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1367 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001368 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001369 all__sanity_check("evh__new_mem-post");
1370}
1371
1372static
sewardj1f77fec2010-04-12 19:51:04 +00001373void evh__new_mem_stack ( Addr a, SizeT len ) {
1374 if (SHOW_EVENTS >= 2)
1375 VG_(printf)("evh__new_mem_stack(%p, %lu)\n", (void*)a, len );
1376 shadow_mem_make_New( get_current_Thread(),
1377 -VG_STACK_REDZONE_SZB + a, len );
1378 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1379 all__sanity_check("evh__new_mem_stack-post");
1380}
1381
1382static
sewardj7cf4e6b2008-05-01 20:24:26 +00001383void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1384 if (SHOW_EVENTS >= 2)
1385 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1386 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001387 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardj7cf4e6b2008-05-01 20:24:26 +00001388 all__sanity_check("evh__new_mem_w_tid-post");
1389}
1390
1391static
sewardjb4112022007-11-09 22:49:28 +00001392void evh__new_mem_w_perms ( Addr a, SizeT len,
sewardj9c606bd2008-09-18 18:12:50 +00001393 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
sewardjb4112022007-11-09 22:49:28 +00001394 if (SHOW_EVENTS >= 1)
1395 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1396 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1397 if (rr || ww || xx)
1398 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001399 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001400 all__sanity_check("evh__new_mem_w_perms-post");
1401}
1402
1403static
1404void evh__set_perms ( Addr a, SizeT len,
1405 Bool rr, Bool ww, Bool xx ) {
1406 if (SHOW_EVENTS >= 1)
1407 VG_(printf)("evh__set_perms(%p, %lu, %d,%d,%d)\n",
1408 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1409 /* Hmm. What should we do here, that actually makes any sense?
1410 Let's say: if neither readable nor writable, then declare it
1411 NoAccess, else leave it alone. */
1412 if (!(rr || ww))
1413 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001414 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001415 all__sanity_check("evh__set_perms-post");
1416}
1417
1418static
1419void evh__die_mem ( Addr a, SizeT len ) {
sewardj406bac82010-03-03 23:03:40 +00001420 // urr, libhb ignores this.
sewardjb4112022007-11-09 22:49:28 +00001421 if (SHOW_EVENTS >= 2)
1422 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
1423 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001424 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001425 all__sanity_check("evh__die_mem-post");
1426}
1427
1428static
sewardj406bac82010-03-03 23:03:40 +00001429void evh__untrack_mem ( Addr a, SizeT len ) {
1430 // whereas it doesn't ignore this
1431 if (SHOW_EVENTS >= 2)
1432 VG_(printf)("evh__untrack_mem(%p, %lu)\n", (void*)a, len );
1433 shadow_mem_make_Untracked( get_current_Thread(), a, len );
1434 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1435 all__sanity_check("evh__untrack_mem-post");
1436}
1437
1438static
sewardj23f12002009-07-24 08:45:08 +00001439void evh__copy_mem ( Addr src, Addr dst, SizeT len ) {
1440 if (SHOW_EVENTS >= 2)
1441 VG_(printf)("evh__copy_mem(%p, %p, %lu)\n", (void*)src, (void*)dst, len );
1442 shadow_mem_scopy_range( get_current_Thread(), src, dst, len );
1443 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1444 all__sanity_check("evh__copy_mem-post");
1445}
1446
1447static
sewardjb4112022007-11-09 22:49:28 +00001448void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1449{
1450 if (SHOW_EVENTS >= 1)
1451 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1452 (Int)parent, (Int)child );
1453
1454 if (parent != VG_INVALID_THREADID) {
sewardjf98e1c02008-10-25 16:22:41 +00001455 Thread* thr_p;
1456 Thread* thr_c;
1457 Thr* hbthr_p;
1458 Thr* hbthr_c;
sewardjb4112022007-11-09 22:49:28 +00001459
sewardjf98e1c02008-10-25 16:22:41 +00001460 tl_assert(HG_(is_sane_ThreadId)(parent));
1461 tl_assert(HG_(is_sane_ThreadId)(child));
sewardjb4112022007-11-09 22:49:28 +00001462 tl_assert(parent != child);
1463
1464 thr_p = map_threads_maybe_lookup( parent );
1465 thr_c = map_threads_maybe_lookup( child );
1466
1467 tl_assert(thr_p != NULL);
1468 tl_assert(thr_c == NULL);
1469
sewardjf98e1c02008-10-25 16:22:41 +00001470 hbthr_p = thr_p->hbthr;
1471 tl_assert(hbthr_p != NULL);
sewardj60626642011-03-10 15:14:37 +00001472 tl_assert( libhb_get_Thr_hgthread(hbthr_p) == thr_p );
sewardjb4112022007-11-09 22:49:28 +00001473
sewardjf98e1c02008-10-25 16:22:41 +00001474 hbthr_c = libhb_create ( hbthr_p );
1475
1476 /* Create a new thread record for the child. */
sewardjb4112022007-11-09 22:49:28 +00001477 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +00001478 thr_c = mk_Thread( hbthr_c );
sewardj60626642011-03-10 15:14:37 +00001479 tl_assert( libhb_get_Thr_hgthread(hbthr_c) == NULL );
1480 libhb_set_Thr_hgthread(hbthr_c, thr_c);
sewardjb4112022007-11-09 22:49:28 +00001481
1482 /* and bind it in the thread-map table */
1483 map_threads[child] = thr_c;
sewardjf98e1c02008-10-25 16:22:41 +00001484 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1485 thr_c->coretid = child;
sewardjb4112022007-11-09 22:49:28 +00001486
1487 /* Record where the parent is so we can later refer to this in
1488 error messages.
1489
1490 On amd64-linux, this entails a nasty glibc-2.5 specific hack.
1491 The stack snapshot is taken immediately after the parent has
1492 returned from its sys_clone call. Unfortunately there is no
1493 unwind info for the insn following "syscall" - reading the
1494 glibc sources confirms this. So we ask for a snapshot to be
1495 taken as if RIP was 3 bytes earlier, in a place where there
1496 is unwind info. Sigh.
1497 */
1498 { Word first_ip_delta = 0;
1499# if defined(VGP_amd64_linux)
1500 first_ip_delta = -3;
1501# endif
1502 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1503 }
sewardjb4112022007-11-09 22:49:28 +00001504 }
1505
sewardjf98e1c02008-10-25 16:22:41 +00001506 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001507 all__sanity_check("evh__pre_thread_create-post");
1508}
1509
1510static
1511void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1512{
1513 Int nHeld;
1514 Thread* thr_q;
1515 if (SHOW_EVENTS >= 1)
1516 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1517 (Int)quit_tid );
1518
1519 /* quit_tid has disappeared without joining to any other thread.
1520 Therefore there is no synchronisation event associated with its
1521 exit and so we have to pretty much treat it as if it was still
1522 alive but mysteriously making no progress. That is because, if
1523 we don't know when it really exited, then we can never say there
1524 is a point in time when we're sure the thread really has
1525 finished, and so we need to consider the possibility that it
1526 lingers indefinitely and continues to interact with other
1527 threads. */
1528 /* However, it might have rendezvous'd with a thread that called
1529 pthread_join with this one as arg, prior to this point (that's
1530 how NPTL works). In which case there has already been a prior
1531 sync event. So in any case, just let the thread exit. On NPTL,
1532 all thread exits go through here. */
sewardjf98e1c02008-10-25 16:22:41 +00001533 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
sewardjb4112022007-11-09 22:49:28 +00001534 thr_q = map_threads_maybe_lookup( quit_tid );
1535 tl_assert(thr_q != NULL);
1536
1537 /* Complain if this thread holds any locks. */
1538 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1539 tl_assert(nHeld >= 0);
1540 if (nHeld > 0) {
1541 HChar buf[80];
1542 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1543 nHeld, nHeld > 1 ? "s" : "");
sewardjf98e1c02008-10-25 16:22:41 +00001544 HG_(record_error_Misc)( thr_q, buf );
sewardjb4112022007-11-09 22:49:28 +00001545 }
1546
sewardj23f12002009-07-24 08:45:08 +00001547 /* Not much to do here:
1548 - tell libhb the thread is gone
1549 - clear the map_threads entry, in order that the Valgrind core
1550 can re-use it. */
sewardj61bc2c52011-02-09 10:34:00 +00001551 /* Cleanup actions (next 5 lines) copied in evh__atfork_child; keep
1552 in sync. */
sewardj23f12002009-07-24 08:45:08 +00001553 tl_assert(thr_q->hbthr);
1554 libhb_async_exit(thr_q->hbthr);
sewardjf98e1c02008-10-25 16:22:41 +00001555 tl_assert(thr_q->coretid == quit_tid);
1556 thr_q->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +00001557 map_threads_delete( quit_tid );
1558
sewardjf98e1c02008-10-25 16:22:41 +00001559 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001560 all__sanity_check("evh__pre_thread_ll_exit-post");
1561}
1562
sewardj61bc2c52011-02-09 10:34:00 +00001563/* This is called immediately after fork, for the child only. 'tid'
1564 is the only surviving thread (as per POSIX rules on fork() in
1565 threaded programs), so we have to clean up map_threads to remove
1566 entries for any other threads. */
1567static
1568void evh__atfork_child ( ThreadId tid )
1569{
1570 UInt i;
1571 Thread* thr;
1572 /* Slot 0 should never be used. */
1573 thr = map_threads_maybe_lookup( 0/*INVALID*/ );
1574 tl_assert(!thr);
1575 /* Clean up all other slots except 'tid'. */
1576 for (i = 1; i < VG_N_THREADS; i++) {
1577 if (i == tid)
1578 continue;
1579 thr = map_threads_maybe_lookup(i);
1580 if (!thr)
1581 continue;
1582 /* Cleanup actions (next 5 lines) copied from end of
1583 evh__pre_thread_ll_exit; keep in sync. */
1584 tl_assert(thr->hbthr);
1585 libhb_async_exit(thr->hbthr);
1586 tl_assert(thr->coretid == i);
1587 thr->coretid = VG_INVALID_THREADID;
1588 map_threads_delete(i);
1589 }
1590}
1591
sewardjf98e1c02008-10-25 16:22:41 +00001592
sewardjb4112022007-11-09 22:49:28 +00001593static
1594void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1595{
sewardjb4112022007-11-09 22:49:28 +00001596 Thread* thr_s;
1597 Thread* thr_q;
sewardjf98e1c02008-10-25 16:22:41 +00001598 Thr* hbthr_s;
1599 Thr* hbthr_q;
1600 SO* so;
sewardjb4112022007-11-09 22:49:28 +00001601
1602 if (SHOW_EVENTS >= 1)
1603 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1604 (Int)stay_tid, quit_thr );
1605
sewardjf98e1c02008-10-25 16:22:41 +00001606 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
sewardjb4112022007-11-09 22:49:28 +00001607
1608 thr_s = map_threads_maybe_lookup( stay_tid );
1609 thr_q = quit_thr;
1610 tl_assert(thr_s != NULL);
1611 tl_assert(thr_q != NULL);
1612 tl_assert(thr_s != thr_q);
1613
sewardjf98e1c02008-10-25 16:22:41 +00001614 hbthr_s = thr_s->hbthr;
1615 hbthr_q = thr_q->hbthr;
1616 tl_assert(hbthr_s != hbthr_q);
sewardj60626642011-03-10 15:14:37 +00001617 tl_assert( libhb_get_Thr_hgthread(hbthr_s) == thr_s );
1618 tl_assert( libhb_get_Thr_hgthread(hbthr_q) == thr_q );
sewardjb4112022007-11-09 22:49:28 +00001619
sewardjf98e1c02008-10-25 16:22:41 +00001620 /* Allocate a temporary synchronisation object and use it to send
1621 an imaginary message from the quitter to the stayer, the purpose
1622 being to generate a dependence from the quitter to the
1623 stayer. */
1624 so = libhb_so_alloc();
1625 tl_assert(so);
sewardj23f12002009-07-24 08:45:08 +00001626 /* Send last arg of _so_send as False, since the sending thread
1627 doesn't actually exist any more, so we don't want _so_send to
1628 try taking stack snapshots of it. */
sewardjf98e1c02008-10-25 16:22:41 +00001629 libhb_so_send(hbthr_q, so, True/*strong_send*/);
1630 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1631 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00001632
sewardjf98e1c02008-10-25 16:22:41 +00001633 /* evh__pre_thread_ll_exit issues an error message if the exiting
1634 thread holds any locks. No need to check here. */
sewardjb4112022007-11-09 22:49:28 +00001635
1636 /* This holds because, at least when using NPTL as the thread
1637 library, we should be notified the low level thread exit before
1638 we hear of any join event on it. The low level exit
1639 notification feeds through into evh__pre_thread_ll_exit,
1640 which should clear the map_threads entry for it. Hence we
1641 expect there to be no map_threads entry at this point. */
1642 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1643 == VG_INVALID_THREADID);
1644
sewardjf98e1c02008-10-25 16:22:41 +00001645 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001646 all__sanity_check("evh__post_thread_join-post");
1647}
1648
1649static
1650void evh__pre_mem_read ( CorePart part, ThreadId tid, Char* s,
1651 Addr a, SizeT size) {
1652 if (SHOW_EVENTS >= 2
1653 || (SHOW_EVENTS >= 1 && size != 1))
1654 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1655 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001656 shadow_mem_cread_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001657 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001658 all__sanity_check("evh__pre_mem_read-post");
1659}
1660
1661static
1662void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
1663 Char* s, Addr a ) {
1664 Int len;
1665 if (SHOW_EVENTS >= 1)
1666 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1667 (Int)tid, s, (void*)a );
sewardj234e5582011-02-09 12:47:23 +00001668 // Don't segfault if the string starts in an obviously stupid
1669 // place. Actually we should check the whole string, not just
1670 // the start address, but that's too much trouble. At least
1671 // checking the first byte is better than nothing. See #255009.
1672 if (!VG_(am_is_valid_for_client) (a, 1, VKI_PROT_READ))
1673 return;
sewardjb4112022007-11-09 22:49:28 +00001674 len = VG_(strlen)( (Char*) a );
sewardj23f12002009-07-24 08:45:08 +00001675 shadow_mem_cread_range( map_threads_lookup(tid), a, len+1 );
sewardjf98e1c02008-10-25 16:22:41 +00001676 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001677 all__sanity_check("evh__pre_mem_read_asciiz-post");
1678}
1679
1680static
1681void evh__pre_mem_write ( CorePart part, ThreadId tid, Char* s,
1682 Addr a, SizeT size ) {
1683 if (SHOW_EVENTS >= 1)
1684 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1685 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001686 shadow_mem_cwrite_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001687 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001688 all__sanity_check("evh__pre_mem_write-post");
1689}
1690
1691static
1692void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1693 if (SHOW_EVENTS >= 1)
1694 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1695 (void*)a, len, (Int)is_inited );
1696 // FIXME: this is kinda stupid
1697 if (is_inited) {
1698 shadow_mem_make_New(get_current_Thread(), a, len);
1699 } else {
1700 shadow_mem_make_New(get_current_Thread(), a, len);
1701 }
sewardjf98e1c02008-10-25 16:22:41 +00001702 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001703 all__sanity_check("evh__pre_mem_read-post");
1704}
1705
1706static
1707void evh__die_mem_heap ( Addr a, SizeT len ) {
1708 if (SHOW_EVENTS >= 1)
1709 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
1710 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001711 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001712 all__sanity_check("evh__pre_mem_read-post");
1713}
1714
sewardj23f12002009-07-24 08:45:08 +00001715/* --- Event handlers called from generated code --- */
1716
sewardjb4112022007-11-09 22:49:28 +00001717static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001718void evh__mem_help_cread_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001719 Thread* thr = get_current_Thread_in_C_C();
1720 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001721 LIBHB_CREAD_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001722}
sewardjf98e1c02008-10-25 16:22:41 +00001723
sewardjb4112022007-11-09 22:49:28 +00001724static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001725void evh__mem_help_cread_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001726 Thread* thr = get_current_Thread_in_C_C();
1727 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001728 LIBHB_CREAD_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001729}
sewardjf98e1c02008-10-25 16:22:41 +00001730
sewardjb4112022007-11-09 22:49:28 +00001731static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001732void evh__mem_help_cread_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001733 Thread* thr = get_current_Thread_in_C_C();
1734 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001735 LIBHB_CREAD_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001736}
sewardjf98e1c02008-10-25 16:22:41 +00001737
sewardjb4112022007-11-09 22:49:28 +00001738static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001739void evh__mem_help_cread_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001740 Thread* thr = get_current_Thread_in_C_C();
1741 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001742 LIBHB_CREAD_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001743}
sewardjf98e1c02008-10-25 16:22:41 +00001744
sewardjb4112022007-11-09 22:49:28 +00001745static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001746void evh__mem_help_cread_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001747 Thread* thr = get_current_Thread_in_C_C();
1748 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001749 LIBHB_CREAD_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001750}
1751
1752static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001753void evh__mem_help_cwrite_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001754 Thread* thr = get_current_Thread_in_C_C();
1755 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001756 LIBHB_CWRITE_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001757}
sewardjf98e1c02008-10-25 16:22:41 +00001758
sewardjb4112022007-11-09 22:49:28 +00001759static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001760void evh__mem_help_cwrite_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001761 Thread* thr = get_current_Thread_in_C_C();
1762 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001763 LIBHB_CWRITE_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001764}
sewardjf98e1c02008-10-25 16:22:41 +00001765
sewardjb4112022007-11-09 22:49:28 +00001766static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001767void evh__mem_help_cwrite_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001768 Thread* thr = get_current_Thread_in_C_C();
1769 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001770 LIBHB_CWRITE_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001771}
sewardjf98e1c02008-10-25 16:22:41 +00001772
sewardjb4112022007-11-09 22:49:28 +00001773static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001774void evh__mem_help_cwrite_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001775 Thread* thr = get_current_Thread_in_C_C();
1776 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001777 LIBHB_CWRITE_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001778}
sewardjf98e1c02008-10-25 16:22:41 +00001779
sewardjb4112022007-11-09 22:49:28 +00001780static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001781void evh__mem_help_cwrite_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001782 Thread* thr = get_current_Thread_in_C_C();
1783 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001784 LIBHB_CWRITE_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001785}
1786
sewardjb4112022007-11-09 22:49:28 +00001787
sewardj9f569b72008-11-13 13:33:09 +00001788/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001789/* -------------- events to do with mutexes -------------- */
sewardj9f569b72008-11-13 13:33:09 +00001790/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001791
1792/* EXPOSITION only: by intercepting lock init events we can show the
1793 user where the lock was initialised, rather than only being able to
1794 show where it was first locked. Intercepting lock initialisations
1795 is not necessary for the basic operation of the race checker. */
1796static
1797void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
1798 void* mutex, Word mbRec )
1799{
1800 if (SHOW_EVENTS >= 1)
1801 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
1802 (Int)tid, mbRec, (void*)mutex );
1803 tl_assert(mbRec == 0 || mbRec == 1);
1804 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
1805 (Addr)mutex, tid );
sewardjf98e1c02008-10-25 16:22:41 +00001806 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001807 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
1808}
1809
1810static
1811void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex )
1812{
1813 Thread* thr;
1814 Lock* lk;
1815 if (SHOW_EVENTS >= 1)
1816 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE(ctid=%d, %p)\n",
1817 (Int)tid, (void*)mutex );
1818
1819 thr = map_threads_maybe_lookup( tid );
1820 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00001821 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00001822
1823 lk = map_locks_maybe_lookup( (Addr)mutex );
1824
1825 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
sewardjf98e1c02008-10-25 16:22:41 +00001826 HG_(record_error_Misc)(
1827 thr, "pthread_mutex_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00001828 }
1829
1830 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00001831 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00001832 tl_assert( lk->guestaddr == (Addr)mutex );
1833 if (lk->heldBy) {
1834 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00001835 HG_(record_error_Misc)(
1836 thr, "pthread_mutex_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00001837 /* remove lock from locksets of all owning threads */
1838 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00001839 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00001840 lk->heldBy = NULL;
1841 lk->heldW = False;
1842 lk->acquired_at = NULL;
1843 }
1844 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001845 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00001846
1847 if (HG_(clo_track_lockorders))
1848 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00001849 map_locks_delete( lk->guestaddr );
1850 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00001851 }
1852
sewardjf98e1c02008-10-25 16:22:41 +00001853 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001854 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
1855}
1856
1857static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
1858 void* mutex, Word isTryLock )
1859{
1860 /* Just check the mutex is sane; nothing else to do. */
1861 // 'mutex' may be invalid - not checked by wrapper
1862 Thread* thr;
1863 Lock* lk;
1864 if (SHOW_EVENTS >= 1)
1865 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
1866 (Int)tid, (void*)mutex );
1867
1868 tl_assert(isTryLock == 0 || isTryLock == 1);
1869 thr = map_threads_maybe_lookup( tid );
1870 tl_assert(thr); /* cannot fail - Thread* must already exist */
1871
1872 lk = map_locks_maybe_lookup( (Addr)mutex );
1873
1874 if (lk && (lk->kind == LK_rdwr)) {
sewardjf98e1c02008-10-25 16:22:41 +00001875 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
1876 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001877 }
1878
1879 if ( lk
1880 && isTryLock == 0
1881 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
1882 && lk->heldBy
1883 && lk->heldW
sewardj896f6f92008-08-19 08:38:52 +00001884 && VG_(elemBag)( lk->heldBy, (Word)thr ) > 0 ) {
sewardjb4112022007-11-09 22:49:28 +00001885 /* uh, it's a non-recursive lock and we already w-hold it, and
1886 this is a real lock operation (not a speculative "tryLock"
1887 kind of thing). Duh. Deadlock coming up; but at least
1888 produce an error message. */
sewardj8fef6252010-07-29 05:28:02 +00001889 HChar* errstr = "Attempt to re-lock a "
1890 "non-recursive lock I already hold";
1891 HChar* auxstr = "Lock was previously acquired";
1892 if (lk->acquired_at) {
1893 HG_(record_error_Misc_w_aux)( thr, errstr, auxstr, lk->acquired_at );
1894 } else {
1895 HG_(record_error_Misc)( thr, errstr );
1896 }
sewardjb4112022007-11-09 22:49:28 +00001897 }
1898}
1899
1900static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
1901{
1902 // only called if the real library call succeeded - so mutex is sane
1903 Thread* thr;
1904 if (SHOW_EVENTS >= 1)
1905 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
1906 (Int)tid, (void*)mutex );
1907
1908 thr = map_threads_maybe_lookup( tid );
1909 tl_assert(thr); /* cannot fail - Thread* must already exist */
1910
1911 evhH__post_thread_w_acquires_lock(
1912 thr,
1913 LK_mbRec, /* if not known, create new lock with this LockKind */
1914 (Addr)mutex
1915 );
1916}
1917
1918static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
1919{
1920 // 'mutex' may be invalid - not checked by wrapper
1921 Thread* thr;
1922 if (SHOW_EVENTS >= 1)
1923 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
1924 (Int)tid, (void*)mutex );
1925
1926 thr = map_threads_maybe_lookup( tid );
1927 tl_assert(thr); /* cannot fail - Thread* must already exist */
1928
1929 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
1930}
1931
1932static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
1933{
1934 // only called if the real library call succeeded - so mutex is sane
1935 Thread* thr;
1936 if (SHOW_EVENTS >= 1)
1937 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
1938 (Int)tid, (void*)mutex );
1939 thr = map_threads_maybe_lookup( tid );
1940 tl_assert(thr); /* cannot fail - Thread* must already exist */
1941
1942 // anything we should do here?
1943}
1944
1945
sewardj5a644da2009-08-11 10:35:58 +00001946/* ------------------------------------------------------- */
sewardj1f77fec2010-04-12 19:51:04 +00001947/* -------------- events to do with spinlocks ------------ */
sewardj5a644da2009-08-11 10:35:58 +00001948/* ------------------------------------------------------- */
1949
1950/* All a bit of a kludge. Pretend we're really dealing with ordinary
1951 pthread_mutex_t's instead, for the most part. */
1952
1953static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid,
1954 void* slock )
1955{
1956 Thread* thr;
1957 Lock* lk;
1958 /* In glibc's kludgey world, we're either initialising or unlocking
1959 it. Since this is the pre-routine, if it is locked, unlock it
1960 and take a dependence edge. Otherwise, do nothing. */
1961
1962 if (SHOW_EVENTS >= 1)
1963 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
1964 "(ctid=%d, slock=%p)\n",
1965 (Int)tid, (void*)slock );
1966
1967 thr = map_threads_maybe_lookup( tid );
1968 /* cannot fail - Thread* must already exist */;
1969 tl_assert( HG_(is_sane_Thread)(thr) );
1970
1971 lk = map_locks_maybe_lookup( (Addr)slock );
1972 if (lk && lk->heldBy) {
1973 /* it's held. So do the normal pre-unlock actions, as copied
1974 from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE. This stupidly
1975 duplicates the map_locks_maybe_lookup. */
1976 evhH__pre_thread_releases_lock( thr, (Addr)slock,
1977 False/*!isRDWR*/ );
1978 }
1979}
1980
1981static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid,
1982 void* slock )
1983{
1984 Lock* lk;
1985 /* More kludgery. If the lock has never been seen before, do
1986 actions as per evh__HG_PTHREAD_MUTEX_INIT_POST. Else do
1987 nothing. */
1988
1989 if (SHOW_EVENTS >= 1)
1990 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
1991 "(ctid=%d, slock=%p)\n",
1992 (Int)tid, (void*)slock );
1993
1994 lk = map_locks_maybe_lookup( (Addr)slock );
1995 if (!lk) {
1996 map_locks_lookup_or_create( LK_nonRec, (Addr)slock, tid );
1997 }
1998}
1999
2000static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid,
2001 void* slock, Word isTryLock )
2002{
2003 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, slock, isTryLock );
2004}
2005
2006static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid,
2007 void* slock )
2008{
2009 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, slock );
2010}
2011
2012static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid,
2013 void* slock )
2014{
2015 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, slock );
2016}
2017
2018
sewardj9f569b72008-11-13 13:33:09 +00002019/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002020/* --------------- events to do with CVs --------------- */
sewardj9f569b72008-11-13 13:33:09 +00002021/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002022
sewardj02114542009-07-28 20:52:36 +00002023/* A mapping from CV to (the SO associated with it, plus some
2024 auxiliary data for error checking). When the CV is
sewardjf98e1c02008-10-25 16:22:41 +00002025 signalled/broadcasted upon, we do a 'send' into the SO, and when a
2026 wait on it completes, we do a 'recv' from the SO. This is believed
2027 to give the correct happens-before events arising from CV
sewardjb4112022007-11-09 22:49:28 +00002028 signallings/broadcasts.
2029*/
2030
sewardj02114542009-07-28 20:52:36 +00002031/* .so is the SO for this CV.
2032 .mx_ga is the associated mutex, when .nWaiters > 0
sewardjb4112022007-11-09 22:49:28 +00002033
sewardj02114542009-07-28 20:52:36 +00002034 POSIX says effectively that the first pthread_cond_{timed}wait call
2035 causes a dynamic binding between the CV and the mutex, and that
2036 lasts until such time as the waiter count falls to zero. Hence
2037 need to keep track of the number of waiters in order to do
2038 consistency tracking. */
2039typedef
2040 struct {
2041 SO* so; /* libhb-allocated SO */
2042 void* mx_ga; /* addr of associated mutex, if any */
2043 UWord nWaiters; /* # threads waiting on the CV */
2044 }
2045 CVInfo;
2046
2047
2048/* pthread_cond_t* -> CVInfo* */
2049static WordFM* map_cond_to_CVInfo = NULL;
2050
2051static void map_cond_to_CVInfo_INIT ( void ) {
2052 if (UNLIKELY(map_cond_to_CVInfo == NULL)) {
2053 map_cond_to_CVInfo = VG_(newFM)( HG_(zalloc),
2054 "hg.mctCI.1", HG_(free), NULL );
2055 tl_assert(map_cond_to_CVInfo != NULL);
sewardjf98e1c02008-10-25 16:22:41 +00002056 }
2057}
2058
sewardj02114542009-07-28 20:52:36 +00002059static CVInfo* map_cond_to_CVInfo_lookup_or_alloc ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002060 UWord key, val;
sewardj02114542009-07-28 20:52:36 +00002061 map_cond_to_CVInfo_INIT();
2062 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
sewardjf98e1c02008-10-25 16:22:41 +00002063 tl_assert(key == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002064 return (CVInfo*)val;
sewardjf98e1c02008-10-25 16:22:41 +00002065 } else {
sewardj02114542009-07-28 20:52:36 +00002066 SO* so = libhb_so_alloc();
2067 CVInfo* cvi = HG_(zalloc)("hg.mctCloa.1", sizeof(CVInfo));
2068 cvi->so = so;
2069 cvi->mx_ga = 0;
2070 VG_(addToFM)( map_cond_to_CVInfo, (UWord)cond, (UWord)cvi );
2071 return cvi;
sewardjf98e1c02008-10-25 16:22:41 +00002072 }
2073}
2074
sewardj02114542009-07-28 20:52:36 +00002075static void map_cond_to_CVInfo_delete ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002076 UWord keyW, valW;
sewardj02114542009-07-28 20:52:36 +00002077 map_cond_to_CVInfo_INIT();
2078 if (VG_(delFromFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond )) {
2079 CVInfo* cvi = (CVInfo*)valW;
sewardjf98e1c02008-10-25 16:22:41 +00002080 tl_assert(keyW == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002081 tl_assert(cvi);
2082 tl_assert(cvi->so);
2083 libhb_so_dealloc(cvi->so);
2084 cvi->mx_ga = 0;
2085 HG_(free)(cvi);
sewardjb4112022007-11-09 22:49:28 +00002086 }
2087}
2088
2089static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2090{
sewardjf98e1c02008-10-25 16:22:41 +00002091 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2092 cond to a SO if it is not already so bound, and 'send' on the
2093 SO. This is later used by other thread(s) which successfully
2094 exit from a pthread_cond_wait on the same cv; then they 'recv'
2095 from the SO, thereby acquiring a dependency on this signalling
2096 event. */
sewardjb4112022007-11-09 22:49:28 +00002097 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002098 CVInfo* cvi;
2099 //Lock* lk;
sewardjb4112022007-11-09 22:49:28 +00002100
2101 if (SHOW_EVENTS >= 1)
2102 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2103 (Int)tid, (void*)cond );
2104
sewardjb4112022007-11-09 22:49:28 +00002105 thr = map_threads_maybe_lookup( tid );
2106 tl_assert(thr); /* cannot fail - Thread* must already exist */
2107
sewardj02114542009-07-28 20:52:36 +00002108 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2109 tl_assert(cvi);
2110 tl_assert(cvi->so);
2111
sewardjb4112022007-11-09 22:49:28 +00002112 // error-if: mutex is bogus
2113 // error-if: mutex is not locked
sewardj02114542009-07-28 20:52:36 +00002114 // Hmm. POSIX doesn't actually say that it's an error to call
2115 // pthread_cond_signal with the associated mutex being unlocked.
2116 // Although it does say that it should be "if consistent scheduling
2117 // is desired."
2118 //
2119 // For the moment, disable these checks.
2120 //lk = map_locks_maybe_lookup(cvi->mx_ga);
2121 //if (lk == NULL || cvi->mx_ga == 0) {
2122 // HG_(record_error_Misc)( thr,
2123 // "pthread_cond_{signal,broadcast}: "
2124 // "no or invalid mutex associated with cond");
2125 //}
2126 ///* note: lk could be NULL. Be careful. */
2127 //if (lk) {
2128 // if (lk->kind == LK_rdwr) {
2129 // HG_(record_error_Misc)(thr,
2130 // "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2131 // }
2132 // if (lk->heldBy == NULL) {
2133 // HG_(record_error_Misc)(thr,
2134 // "pthread_cond_{signal,broadcast}: "
2135 // "associated lock is not held by any thread");
2136 // }
2137 // if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (Word)thr)) {
2138 // HG_(record_error_Misc)(thr,
2139 // "pthread_cond_{signal,broadcast}: "
2140 // "associated lock is not held by calling thread");
2141 // }
2142 //}
sewardjb4112022007-11-09 22:49:28 +00002143
sewardj02114542009-07-28 20:52:36 +00002144 libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
sewardjb4112022007-11-09 22:49:28 +00002145}
2146
2147/* returns True if it reckons 'mutex' is valid and held by this
2148 thread, else False */
2149static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2150 void* cond, void* mutex )
2151{
2152 Thread* thr;
2153 Lock* lk;
2154 Bool lk_valid = True;
sewardj02114542009-07-28 20:52:36 +00002155 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002156
2157 if (SHOW_EVENTS >= 1)
2158 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2159 "(ctid=%d, cond=%p, mutex=%p)\n",
2160 (Int)tid, (void*)cond, (void*)mutex );
2161
sewardjb4112022007-11-09 22:49:28 +00002162 thr = map_threads_maybe_lookup( tid );
2163 tl_assert(thr); /* cannot fail - Thread* must already exist */
2164
2165 lk = map_locks_maybe_lookup( (Addr)mutex );
2166
2167 /* Check for stupid mutex arguments. There are various ways to be
2168 a bozo. Only complain once, though, even if more than one thing
2169 is wrong. */
2170 if (lk == NULL) {
2171 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002172 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002173 thr,
2174 "pthread_cond_{timed}wait called with invalid mutex" );
2175 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002176 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002177 if (lk->kind == LK_rdwr) {
2178 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002179 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002180 thr, "pthread_cond_{timed}wait called with mutex "
2181 "of type pthread_rwlock_t*" );
2182 } else
2183 if (lk->heldBy == NULL) {
2184 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002185 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002186 thr, "pthread_cond_{timed}wait called with un-held mutex");
2187 } else
2188 if (lk->heldBy != NULL
sewardj896f6f92008-08-19 08:38:52 +00002189 && VG_(elemBag)( lk->heldBy, (Word)thr ) == 0) {
sewardjb4112022007-11-09 22:49:28 +00002190 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002191 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002192 thr, "pthread_cond_{timed}wait called with mutex "
2193 "held by a different thread" );
2194 }
2195 }
2196
2197 // error-if: cond is also associated with a different mutex
sewardj02114542009-07-28 20:52:36 +00002198 cvi = map_cond_to_CVInfo_lookup_or_alloc(cond);
2199 tl_assert(cvi);
2200 tl_assert(cvi->so);
2201 if (cvi->nWaiters == 0) {
2202 /* form initial (CV,MX) binding */
2203 cvi->mx_ga = mutex;
2204 }
2205 else /* check existing (CV,MX) binding */
2206 if (cvi->mx_ga != mutex) {
2207 HG_(record_error_Misc)(
2208 thr, "pthread_cond_{timed}wait: cond is associated "
2209 "with a different mutex");
2210 }
2211 cvi->nWaiters++;
sewardjb4112022007-11-09 22:49:28 +00002212
2213 return lk_valid;
2214}
2215
2216static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
2217 void* cond, void* mutex )
2218{
sewardjf98e1c02008-10-25 16:22:41 +00002219 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2220 the SO for this cond, and 'recv' from it so as to acquire a
2221 dependency edge back to the signaller/broadcaster. */
2222 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002223 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002224
2225 if (SHOW_EVENTS >= 1)
2226 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
2227 "(ctid=%d, cond=%p, mutex=%p)\n",
2228 (Int)tid, (void*)cond, (void*)mutex );
2229
sewardjb4112022007-11-09 22:49:28 +00002230 thr = map_threads_maybe_lookup( tid );
2231 tl_assert(thr); /* cannot fail - Thread* must already exist */
2232
2233 // error-if: cond is also associated with a different mutex
2234
sewardj02114542009-07-28 20:52:36 +00002235 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2236 tl_assert(cvi);
2237 tl_assert(cvi->so);
2238 tl_assert(cvi->nWaiters > 0);
sewardjb4112022007-11-09 22:49:28 +00002239
sewardj02114542009-07-28 20:52:36 +00002240 if (!libhb_so_everSent(cvi->so)) {
sewardjf98e1c02008-10-25 16:22:41 +00002241 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2242 it? If this happened it would surely be a bug in the threads
2243 library. Or one of those fabled "spurious wakeups". */
2244 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
2245 "succeeded on"
2246 " without prior pthread_cond_post");
sewardjb4112022007-11-09 22:49:28 +00002247 }
sewardjf98e1c02008-10-25 16:22:41 +00002248
2249 /* anyway, acquire a dependency on it. */
sewardj02114542009-07-28 20:52:36 +00002250 libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2251
2252 cvi->nWaiters--;
sewardjf98e1c02008-10-25 16:22:41 +00002253}
2254
2255static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
2256 void* cond )
2257{
2258 /* Deal with destroy events. The only purpose is to free storage
2259 associated with the CV, so as to avoid any possible resource
2260 leaks. */
2261 if (SHOW_EVENTS >= 1)
2262 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
2263 "(ctid=%d, cond=%p)\n",
2264 (Int)tid, (void*)cond );
2265
sewardj02114542009-07-28 20:52:36 +00002266 map_cond_to_CVInfo_delete( cond );
sewardjb4112022007-11-09 22:49:28 +00002267}
2268
2269
sewardj9f569b72008-11-13 13:33:09 +00002270/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002271/* -------------- events to do with rwlocks -------------- */
sewardj9f569b72008-11-13 13:33:09 +00002272/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002273
2274/* EXPOSITION only */
2275static
2276void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2277{
2278 if (SHOW_EVENTS >= 1)
2279 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2280 (Int)tid, (void*)rwl );
2281 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
sewardjf98e1c02008-10-25 16:22:41 +00002282 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002283 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2284}
2285
2286static
2287void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2288{
2289 Thread* thr;
2290 Lock* lk;
2291 if (SHOW_EVENTS >= 1)
2292 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2293 (Int)tid, (void*)rwl );
2294
2295 thr = map_threads_maybe_lookup( tid );
2296 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00002297 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00002298
2299 lk = map_locks_maybe_lookup( (Addr)rwl );
2300
2301 if (lk == NULL || lk->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00002302 HG_(record_error_Misc)(
2303 thr, "pthread_rwlock_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00002304 }
2305
2306 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00002307 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002308 tl_assert( lk->guestaddr == (Addr)rwl );
2309 if (lk->heldBy) {
2310 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00002311 HG_(record_error_Misc)(
2312 thr, "pthread_rwlock_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00002313 /* remove lock from locksets of all owning threads */
2314 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00002315 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00002316 lk->heldBy = NULL;
2317 lk->heldW = False;
sewardj1c7e8332007-11-29 13:04:03 +00002318 lk->acquired_at = NULL;
sewardjb4112022007-11-09 22:49:28 +00002319 }
2320 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002321 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00002322
2323 if (HG_(clo_track_lockorders))
2324 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002325 map_locks_delete( lk->guestaddr );
2326 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002327 }
2328
sewardjf98e1c02008-10-25 16:22:41 +00002329 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002330 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2331}
2332
2333static
sewardj789c3c52008-02-25 12:10:07 +00002334void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2335 void* rwl,
2336 Word isW, Word isTryLock )
sewardjb4112022007-11-09 22:49:28 +00002337{
2338 /* Just check the rwl is sane; nothing else to do. */
2339 // 'rwl' may be invalid - not checked by wrapper
2340 Thread* thr;
2341 Lock* lk;
2342 if (SHOW_EVENTS >= 1)
2343 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2344 (Int)tid, (Int)isW, (void*)rwl );
2345
2346 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
sewardj789c3c52008-02-25 12:10:07 +00002347 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
sewardjb4112022007-11-09 22:49:28 +00002348 thr = map_threads_maybe_lookup( tid );
2349 tl_assert(thr); /* cannot fail - Thread* must already exist */
2350
2351 lk = map_locks_maybe_lookup( (Addr)rwl );
2352 if ( lk
2353 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2354 /* Wrong kind of lock. Duh. */
sewardjf98e1c02008-10-25 16:22:41 +00002355 HG_(record_error_Misc)(
2356 thr, "pthread_rwlock_{rd,rw}lock with a "
2357 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002358 }
2359}
2360
2361static
2362void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2363{
2364 // only called if the real library call succeeded - so mutex is sane
2365 Thread* thr;
2366 if (SHOW_EVENTS >= 1)
2367 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2368 (Int)tid, (Int)isW, (void*)rwl );
2369
2370 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2371 thr = map_threads_maybe_lookup( tid );
2372 tl_assert(thr); /* cannot fail - Thread* must already exist */
2373
2374 (isW ? evhH__post_thread_w_acquires_lock
2375 : evhH__post_thread_r_acquires_lock)(
2376 thr,
2377 LK_rdwr, /* if not known, create new lock with this LockKind */
2378 (Addr)rwl
2379 );
2380}
2381
2382static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2383{
2384 // 'rwl' may be invalid - not checked by wrapper
2385 Thread* thr;
2386 if (SHOW_EVENTS >= 1)
2387 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2388 (Int)tid, (void*)rwl );
2389
2390 thr = map_threads_maybe_lookup( tid );
2391 tl_assert(thr); /* cannot fail - Thread* must already exist */
2392
2393 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2394}
2395
2396static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2397{
2398 // only called if the real library call succeeded - so mutex is sane
2399 Thread* thr;
2400 if (SHOW_EVENTS >= 1)
2401 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2402 (Int)tid, (void*)rwl );
2403 thr = map_threads_maybe_lookup( tid );
2404 tl_assert(thr); /* cannot fail - Thread* must already exist */
2405
2406 // anything we should do here?
2407}
2408
2409
sewardj9f569b72008-11-13 13:33:09 +00002410/* ---------------------------------------------------------- */
2411/* -------------- events to do with semaphores -------------- */
2412/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002413
sewardj11e352f2007-11-30 11:11:02 +00002414/* This is similar to but not identical to the handling for condition
sewardjb4112022007-11-09 22:49:28 +00002415 variables. */
2416
sewardjf98e1c02008-10-25 16:22:41 +00002417/* For each semaphore, we maintain a stack of SOs. When a 'post'
2418 operation is done on a semaphore (unlocking, essentially), a new SO
2419 is created for the posting thread, the posting thread does a strong
2420 send to it (which merely installs the posting thread's VC in the
2421 SO), and the SO is pushed on the semaphore's stack.
sewardjb4112022007-11-09 22:49:28 +00002422
2423 Later, when a (probably different) thread completes 'wait' on the
sewardjf98e1c02008-10-25 16:22:41 +00002424 semaphore, we pop a SO off the semaphore's stack (which should be
2425 nonempty), and do a strong recv from it. This mechanism creates
sewardjb4112022007-11-09 22:49:28 +00002426 dependencies between posters and waiters of the semaphore.
2427
sewardjf98e1c02008-10-25 16:22:41 +00002428 It may not be necessary to use a stack - perhaps a bag of SOs would
2429 do. But we do need to keep track of how many unused-up posts have
2430 happened for the semaphore.
sewardjb4112022007-11-09 22:49:28 +00002431
sewardjf98e1c02008-10-25 16:22:41 +00002432 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
sewardjb4112022007-11-09 22:49:28 +00002433 twice on S. T3 cannot complete its waits without both T1 and T2
2434 posting. The above mechanism will ensure that T3 acquires
2435 dependencies on both T1 and T2.
sewardj11e352f2007-11-30 11:11:02 +00002436
sewardjf98e1c02008-10-25 16:22:41 +00002437 When a semaphore is initialised with value N, we do as if we'd
2438 posted N times on the semaphore: basically create N SOs and do a
2439 strong send to all of then. This allows up to N waits on the
2440 semaphore to acquire a dependency on the initialisation point,
2441 which AFAICS is the correct behaviour.
sewardj11e352f2007-11-30 11:11:02 +00002442
2443 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2444 about. We should.
sewardjb4112022007-11-09 22:49:28 +00002445*/
2446
sewardjf98e1c02008-10-25 16:22:41 +00002447/* sem_t* -> XArray* SO* */
2448static WordFM* map_sem_to_SO_stack = NULL;
sewardjb4112022007-11-09 22:49:28 +00002449
sewardjf98e1c02008-10-25 16:22:41 +00002450static void map_sem_to_SO_stack_INIT ( void ) {
2451 if (map_sem_to_SO_stack == NULL) {
2452 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2453 HG_(free), NULL );
2454 tl_assert(map_sem_to_SO_stack != NULL);
sewardjb4112022007-11-09 22:49:28 +00002455 }
2456}
2457
sewardjf98e1c02008-10-25 16:22:41 +00002458static void push_SO_for_sem ( void* sem, SO* so ) {
2459 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002460 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002461 tl_assert(so);
2462 map_sem_to_SO_stack_INIT();
2463 if (VG_(lookupFM)( map_sem_to_SO_stack,
2464 &keyW, (UWord*)&xa, (UWord)sem )) {
2465 tl_assert(keyW == (UWord)sem);
sewardjb4112022007-11-09 22:49:28 +00002466 tl_assert(xa);
sewardjf98e1c02008-10-25 16:22:41 +00002467 VG_(addToXA)( xa, &so );
sewardjb4112022007-11-09 22:49:28 +00002468 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002469 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2470 VG_(addToXA)( xa, &so );
2471 VG_(addToFM)( map_sem_to_SO_stack, (Word)sem, (Word)xa );
sewardjb4112022007-11-09 22:49:28 +00002472 }
2473}
2474
sewardjf98e1c02008-10-25 16:22:41 +00002475static SO* mb_pop_SO_for_sem ( void* sem ) {
2476 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002477 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002478 SO* so;
2479 map_sem_to_SO_stack_INIT();
2480 if (VG_(lookupFM)( map_sem_to_SO_stack,
2481 &keyW, (UWord*)&xa, (UWord)sem )) {
sewardjb4112022007-11-09 22:49:28 +00002482 /* xa is the stack for this semaphore. */
sewardjf98e1c02008-10-25 16:22:41 +00002483 Word sz;
2484 tl_assert(keyW == (UWord)sem);
2485 sz = VG_(sizeXA)( xa );
sewardjb4112022007-11-09 22:49:28 +00002486 tl_assert(sz >= 0);
2487 if (sz == 0)
2488 return NULL; /* odd, the stack is empty */
sewardjf98e1c02008-10-25 16:22:41 +00002489 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2490 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002491 VG_(dropTailXA)( xa, 1 );
sewardjf98e1c02008-10-25 16:22:41 +00002492 return so;
sewardjb4112022007-11-09 22:49:28 +00002493 } else {
2494 /* hmm, that's odd. No stack for this semaphore. */
2495 return NULL;
2496 }
2497}
2498
sewardj11e352f2007-11-30 11:11:02 +00002499static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002500{
sewardjf98e1c02008-10-25 16:22:41 +00002501 UWord keyW, valW;
2502 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002503
sewardjb4112022007-11-09 22:49:28 +00002504 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002505 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002506 (Int)tid, (void*)sem );
2507
sewardjf98e1c02008-10-25 16:22:41 +00002508 map_sem_to_SO_stack_INIT();
sewardjb4112022007-11-09 22:49:28 +00002509
sewardjf98e1c02008-10-25 16:22:41 +00002510 /* Empty out the semaphore's SO stack. This way of doing it is
2511 stupid, but at least it's easy. */
2512 while (1) {
2513 so = mb_pop_SO_for_sem( sem );
2514 if (!so) break;
2515 libhb_so_dealloc(so);
2516 }
2517
2518 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2519 XArray* xa = (XArray*)valW;
2520 tl_assert(keyW == (UWord)sem);
2521 tl_assert(xa);
2522 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2523 VG_(deleteXA)(xa);
2524 }
sewardjb4112022007-11-09 22:49:28 +00002525}
2526
sewardj11e352f2007-11-30 11:11:02 +00002527static
2528void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2529{
sewardjf98e1c02008-10-25 16:22:41 +00002530 SO* so;
2531 Thread* thr;
sewardj11e352f2007-11-30 11:11:02 +00002532
2533 if (SHOW_EVENTS >= 1)
2534 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2535 (Int)tid, (void*)sem, value );
2536
sewardjf98e1c02008-10-25 16:22:41 +00002537 thr = map_threads_maybe_lookup( tid );
2538 tl_assert(thr); /* cannot fail - Thread* must already exist */
sewardj11e352f2007-11-30 11:11:02 +00002539
sewardjf98e1c02008-10-25 16:22:41 +00002540 /* Empty out the semaphore's SO stack. This way of doing it is
2541 stupid, but at least it's easy. */
2542 while (1) {
2543 so = mb_pop_SO_for_sem( sem );
2544 if (!so) break;
2545 libhb_so_dealloc(so);
2546 }
sewardj11e352f2007-11-30 11:11:02 +00002547
sewardjf98e1c02008-10-25 16:22:41 +00002548 /* If we don't do this check, the following while loop runs us out
2549 of memory for stupid initial values of 'value'. */
2550 if (value > 10000) {
2551 HG_(record_error_Misc)(
2552 thr, "sem_init: initial value exceeds 10000; using 10000" );
2553 value = 10000;
2554 }
sewardj11e352f2007-11-30 11:11:02 +00002555
sewardjf98e1c02008-10-25 16:22:41 +00002556 /* Now create 'valid' new SOs for the thread, do a strong send to
2557 each of them, and push them all on the stack. */
2558 for (; value > 0; value--) {
2559 Thr* hbthr = thr->hbthr;
2560 tl_assert(hbthr);
sewardj11e352f2007-11-30 11:11:02 +00002561
sewardjf98e1c02008-10-25 16:22:41 +00002562 so = libhb_so_alloc();
2563 libhb_so_send( hbthr, so, True/*strong send*/ );
2564 push_SO_for_sem( sem, so );
sewardj11e352f2007-11-30 11:11:02 +00002565 }
2566}
2567
2568static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002569{
sewardjf98e1c02008-10-25 16:22:41 +00002570 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2571 it (iow, write our VC into it, then tick ours), and push the SO
2572 on on a stack of SOs associated with 'sem'. This is later used
2573 by other thread(s) which successfully exit from a sem_wait on
2574 the same sem; by doing a strong recv from SOs popped of the
2575 stack, they acquire dependencies on the posting thread
2576 segment(s). */
sewardjb4112022007-11-09 22:49:28 +00002577
sewardjf98e1c02008-10-25 16:22:41 +00002578 Thread* thr;
2579 SO* so;
2580 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002581
2582 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002583 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002584 (Int)tid, (void*)sem );
2585
2586 thr = map_threads_maybe_lookup( tid );
2587 tl_assert(thr); /* cannot fail - Thread* must already exist */
2588
2589 // error-if: sem is bogus
2590
sewardjf98e1c02008-10-25 16:22:41 +00002591 hbthr = thr->hbthr;
2592 tl_assert(hbthr);
sewardjb4112022007-11-09 22:49:28 +00002593
sewardjf98e1c02008-10-25 16:22:41 +00002594 so = libhb_so_alloc();
2595 libhb_so_send( hbthr, so, True/*strong send*/ );
2596 push_SO_for_sem( sem, so );
sewardjb4112022007-11-09 22:49:28 +00002597}
2598
sewardj11e352f2007-11-30 11:11:02 +00002599static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002600{
sewardjf98e1c02008-10-25 16:22:41 +00002601 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2602 the 'sem' from this semaphore's SO-stack, and do a strong recv
2603 from it. This creates a dependency back to one of the post-ers
2604 for the semaphore. */
sewardjb4112022007-11-09 22:49:28 +00002605
sewardjf98e1c02008-10-25 16:22:41 +00002606 Thread* thr;
2607 SO* so;
2608 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002609
2610 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002611 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002612 (Int)tid, (void*)sem );
2613
2614 thr = map_threads_maybe_lookup( tid );
2615 tl_assert(thr); /* cannot fail - Thread* must already exist */
2616
2617 // error-if: sem is bogus
2618
sewardjf98e1c02008-10-25 16:22:41 +00002619 so = mb_pop_SO_for_sem( sem );
sewardjb4112022007-11-09 22:49:28 +00002620
sewardjf98e1c02008-10-25 16:22:41 +00002621 if (so) {
2622 hbthr = thr->hbthr;
2623 tl_assert(hbthr);
2624
2625 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2626 libhb_so_dealloc(so);
2627 } else {
2628 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2629 If this happened it would surely be a bug in the threads
2630 library. */
2631 HG_(record_error_Misc)(
2632 thr, "Bug in libpthread: sem_wait succeeded on"
2633 " semaphore without prior sem_post");
sewardjb4112022007-11-09 22:49:28 +00002634 }
2635}
2636
2637
sewardj9f569b72008-11-13 13:33:09 +00002638/* -------------------------------------------------------- */
2639/* -------------- events to do with barriers -------------- */
2640/* -------------------------------------------------------- */
2641
2642typedef
2643 struct {
2644 Bool initted; /* has it yet been initted by guest? */
sewardj406bac82010-03-03 23:03:40 +00002645 Bool resizable; /* is resizing allowed? */
sewardj9f569b72008-11-13 13:33:09 +00002646 UWord size; /* declared size */
2647 XArray* waiting; /* XA of Thread*. # present is 0 .. .size */
2648 }
2649 Bar;
2650
2651static Bar* new_Bar ( void ) {
2652 Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
2653 tl_assert(bar);
2654 /* all fields are zero */
2655 tl_assert(bar->initted == False);
2656 return bar;
2657}
2658
2659static void delete_Bar ( Bar* bar ) {
2660 tl_assert(bar);
2661 if (bar->waiting)
2662 VG_(deleteXA)(bar->waiting);
2663 HG_(free)(bar);
2664}
2665
2666/* A mapping which stores auxiliary data for barriers. */
2667
2668/* pthread_barrier_t* -> Bar* */
2669static WordFM* map_barrier_to_Bar = NULL;
2670
2671static void map_barrier_to_Bar_INIT ( void ) {
2672 if (UNLIKELY(map_barrier_to_Bar == NULL)) {
2673 map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
2674 "hg.mbtBI.1", HG_(free), NULL );
2675 tl_assert(map_barrier_to_Bar != NULL);
2676 }
2677}
2678
2679static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
2680 UWord key, val;
2681 map_barrier_to_Bar_INIT();
2682 if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
2683 tl_assert(key == (UWord)barrier);
2684 return (Bar*)val;
2685 } else {
2686 Bar* bar = new_Bar();
2687 VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
2688 return bar;
2689 }
2690}
2691
2692static void map_barrier_to_Bar_delete ( void* barrier ) {
2693 UWord keyW, valW;
2694 map_barrier_to_Bar_INIT();
2695 if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
2696 Bar* bar = (Bar*)valW;
2697 tl_assert(keyW == (UWord)barrier);
2698 delete_Bar(bar);
2699 }
2700}
2701
2702
2703static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
2704 void* barrier,
sewardj406bac82010-03-03 23:03:40 +00002705 UWord count,
2706 UWord resizable )
sewardj9f569b72008-11-13 13:33:09 +00002707{
2708 Thread* thr;
2709 Bar* bar;
2710
2711 if (SHOW_EVENTS >= 1)
2712 VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
sewardj406bac82010-03-03 23:03:40 +00002713 "(tid=%d, barrier=%p, count=%lu, resizable=%lu)\n",
2714 (Int)tid, (void*)barrier, count, resizable );
sewardj9f569b72008-11-13 13:33:09 +00002715
2716 thr = map_threads_maybe_lookup( tid );
2717 tl_assert(thr); /* cannot fail - Thread* must already exist */
2718
2719 if (count == 0) {
2720 HG_(record_error_Misc)(
2721 thr, "pthread_barrier_init: 'count' argument is zero"
2722 );
2723 }
2724
sewardj406bac82010-03-03 23:03:40 +00002725 if (resizable != 0 && resizable != 1) {
2726 HG_(record_error_Misc)(
2727 thr, "pthread_barrier_init: invalid 'resizable' argument"
2728 );
2729 }
2730
sewardj9f569b72008-11-13 13:33:09 +00002731 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2732 tl_assert(bar);
2733
2734 if (bar->initted) {
2735 HG_(record_error_Misc)(
2736 thr, "pthread_barrier_init: barrier is already initialised"
2737 );
2738 }
2739
2740 if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2741 tl_assert(bar->initted);
2742 HG_(record_error_Misc)(
sewardj553655c2008-11-14 19:41:19 +00002743 thr, "pthread_barrier_init: threads are waiting at barrier"
sewardj9f569b72008-11-13 13:33:09 +00002744 );
2745 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2746 }
2747 if (!bar->waiting) {
2748 bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
2749 sizeof(Thread*) );
2750 }
2751
2752 tl_assert(bar->waiting);
2753 tl_assert(VG_(sizeXA)(bar->waiting) == 0);
sewardj406bac82010-03-03 23:03:40 +00002754 bar->initted = True;
2755 bar->resizable = resizable == 1 ? True : False;
2756 bar->size = count;
sewardj9f569b72008-11-13 13:33:09 +00002757}
2758
2759
2760static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
2761 void* barrier )
2762{
sewardj553655c2008-11-14 19:41:19 +00002763 Thread* thr;
2764 Bar* bar;
2765
sewardj9f569b72008-11-13 13:33:09 +00002766 /* Deal with destroy events. The only purpose is to free storage
2767 associated with the barrier, so as to avoid any possible
2768 resource leaks. */
2769 if (SHOW_EVENTS >= 1)
2770 VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
2771 "(tid=%d, barrier=%p)\n",
2772 (Int)tid, (void*)barrier );
2773
sewardj553655c2008-11-14 19:41:19 +00002774 thr = map_threads_maybe_lookup( tid );
2775 tl_assert(thr); /* cannot fail - Thread* must already exist */
2776
2777 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2778 tl_assert(bar);
2779
2780 if (!bar->initted) {
2781 HG_(record_error_Misc)(
2782 thr, "pthread_barrier_destroy: barrier was never initialised"
2783 );
2784 }
2785
2786 if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2787 HG_(record_error_Misc)(
2788 thr, "pthread_barrier_destroy: threads are waiting at barrier"
2789 );
2790 }
2791
sewardj9f569b72008-11-13 13:33:09 +00002792 /* Maybe we shouldn't do this; just let it persist, so that when it
2793 is reinitialised we don't need to do any dynamic memory
2794 allocation? The downside is a potentially unlimited space leak,
2795 if the client creates (in turn) a large number of barriers all
2796 at different locations. Note that if we do later move to the
2797 don't-delete-it scheme, we need to mark the barrier as
2798 uninitialised again since otherwise a later _init call will
sewardj553655c2008-11-14 19:41:19 +00002799 elicit a duplicate-init error. */
sewardj9f569b72008-11-13 13:33:09 +00002800 map_barrier_to_Bar_delete( barrier );
2801}
2802
2803
sewardj406bac82010-03-03 23:03:40 +00002804/* All the threads have arrived. Now do the Interesting Bit. Get a
2805 new synchronisation object and do a weak send to it from all the
2806 participating threads. This makes its vector clocks be the join of
2807 all the individual threads' vector clocks. Then do a strong
2808 receive from it back to all threads, so that their VCs are a copy
2809 of it (hence are all equal to the join of their original VCs.) */
2810static void do_barrier_cross_sync_and_empty ( Bar* bar )
2811{
2812 /* XXX check bar->waiting has no duplicates */
2813 UWord i;
2814 SO* so = libhb_so_alloc();
2815
2816 tl_assert(bar->waiting);
2817 tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
2818
2819 /* compute the join ... */
2820 for (i = 0; i < bar->size; i++) {
2821 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2822 Thr* hbthr = t->hbthr;
2823 libhb_so_send( hbthr, so, False/*weak send*/ );
2824 }
2825 /* ... and distribute to all threads */
2826 for (i = 0; i < bar->size; i++) {
2827 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2828 Thr* hbthr = t->hbthr;
2829 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2830 }
2831
2832 /* finally, we must empty out the waiting vector */
2833 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2834
2835 /* and we don't need this any more. Perhaps a stack-allocated
2836 SO would be better? */
2837 libhb_so_dealloc(so);
2838}
2839
2840
sewardj9f569b72008-11-13 13:33:09 +00002841static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
2842 void* barrier )
2843{
sewardj1c466b72008-11-19 11:52:14 +00002844 /* This function gets called after a client thread calls
2845 pthread_barrier_wait but before it arrives at the real
2846 pthread_barrier_wait.
2847
2848 Why is the following correct? It's a bit subtle.
2849
2850 If this is not the last thread arriving at the barrier, we simply
2851 note its presence and return. Because valgrind (at least as of
2852 Nov 08) is single threaded, we are guaranteed safe from any race
2853 conditions when in this function -- no other client threads are
2854 running.
2855
2856 If this is the last thread, then we are again the only running
2857 thread. All the other threads will have either arrived at the
2858 real pthread_barrier_wait or are on their way to it, but in any
2859 case are guaranteed not to be able to move past it, because this
2860 thread is currently in this function and so has not yet arrived
2861 at the real pthread_barrier_wait. That means that:
2862
2863 1. While we are in this function, none of the other threads
2864 waiting at the barrier can move past it.
2865
2866 2. When this function returns (and simulated execution resumes),
2867 this thread and all other waiting threads will be able to move
2868 past the real barrier.
2869
2870 Because of this, it is now safe to update the vector clocks of
2871 all threads, to represent the fact that they all arrived at the
2872 barrier and have all moved on. There is no danger of any
2873 complications to do with some threads leaving the barrier and
2874 racing back round to the front, whilst others are still leaving
2875 (which is the primary source of complication in correct handling/
2876 implementation of barriers). That can't happen because we update
2877 here our data structures so as to indicate that the threads have
2878 passed the barrier, even though, as per (2) above, they are
2879 guaranteed not to pass the barrier until we return.
2880
2881 This relies crucially on Valgrind being single threaded. If that
2882 changes, this will need to be reconsidered.
2883 */
sewardj9f569b72008-11-13 13:33:09 +00002884 Thread* thr;
2885 Bar* bar;
sewardj406bac82010-03-03 23:03:40 +00002886 UWord present;
sewardj9f569b72008-11-13 13:33:09 +00002887
2888 if (SHOW_EVENTS >= 1)
2889 VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
2890 "(tid=%d, barrier=%p)\n",
2891 (Int)tid, (void*)barrier );
2892
2893 thr = map_threads_maybe_lookup( tid );
2894 tl_assert(thr); /* cannot fail - Thread* must already exist */
2895
2896 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2897 tl_assert(bar);
2898
2899 if (!bar->initted) {
2900 HG_(record_error_Misc)(
2901 thr, "pthread_barrier_wait: barrier is uninitialised"
2902 );
2903 return; /* client is broken .. avoid assertions below */
2904 }
2905
2906 /* guaranteed by _INIT_PRE above */
2907 tl_assert(bar->size > 0);
2908 tl_assert(bar->waiting);
2909
2910 VG_(addToXA)( bar->waiting, &thr );
2911
2912 /* guaranteed by this function */
2913 present = VG_(sizeXA)(bar->waiting);
2914 tl_assert(present > 0 && present <= bar->size);
2915
2916 if (present < bar->size)
2917 return;
2918
sewardj406bac82010-03-03 23:03:40 +00002919 do_barrier_cross_sync_and_empty(bar);
2920}
sewardj9f569b72008-11-13 13:33:09 +00002921
sewardj9f569b72008-11-13 13:33:09 +00002922
sewardj406bac82010-03-03 23:03:40 +00002923static void evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( ThreadId tid,
2924 void* barrier,
2925 UWord newcount )
2926{
2927 Thread* thr;
2928 Bar* bar;
2929 UWord present;
2930
2931 if (SHOW_EVENTS >= 1)
2932 VG_(printf)("evh__HG_PTHREAD_BARRIER_RESIZE_PRE"
2933 "(tid=%d, barrier=%p, newcount=%lu)\n",
2934 (Int)tid, (void*)barrier, newcount );
2935
2936 thr = map_threads_maybe_lookup( tid );
2937 tl_assert(thr); /* cannot fail - Thread* must already exist */
2938
2939 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2940 tl_assert(bar);
2941
2942 if (!bar->initted) {
2943 HG_(record_error_Misc)(
2944 thr, "pthread_barrier_resize: barrier is uninitialised"
2945 );
2946 return; /* client is broken .. avoid assertions below */
2947 }
2948
2949 if (!bar->resizable) {
2950 HG_(record_error_Misc)(
2951 thr, "pthread_barrier_resize: barrier is may not be resized"
2952 );
2953 return; /* client is broken .. avoid assertions below */
2954 }
2955
2956 if (newcount == 0) {
2957 HG_(record_error_Misc)(
2958 thr, "pthread_barrier_resize: 'newcount' argument is zero"
2959 );
2960 return; /* client is broken .. avoid assertions below */
2961 }
2962
2963 /* guaranteed by _INIT_PRE above */
2964 tl_assert(bar->size > 0);
sewardj9f569b72008-11-13 13:33:09 +00002965 tl_assert(bar->waiting);
sewardj406bac82010-03-03 23:03:40 +00002966 /* Guaranteed by this fn */
2967 tl_assert(newcount > 0);
sewardj9f569b72008-11-13 13:33:09 +00002968
sewardj406bac82010-03-03 23:03:40 +00002969 if (newcount >= bar->size) {
2970 /* Increasing the capacity. There's no possibility of threads
2971 moving on from the barrier in this situation, so just note
2972 the fact and do nothing more. */
2973 bar->size = newcount;
2974 } else {
2975 /* Decreasing the capacity. If we decrease it to be equal or
2976 below the number of waiting threads, they will now move past
2977 the barrier, so need to mess with dep edges in the same way
2978 as if the barrier had filled up normally. */
2979 present = VG_(sizeXA)(bar->waiting);
2980 tl_assert(present >= 0 && present <= bar->size);
2981 if (newcount <= present) {
2982 bar->size = present; /* keep the cross_sync call happy */
2983 do_barrier_cross_sync_and_empty(bar);
2984 }
2985 bar->size = newcount;
sewardj9f569b72008-11-13 13:33:09 +00002986 }
sewardj9f569b72008-11-13 13:33:09 +00002987}
2988
2989
sewardjed2e72e2009-08-14 11:08:24 +00002990/* ----------------------------------------------------- */
2991/* ----- events to do with user-specified HB edges ----- */
2992/* ----------------------------------------------------- */
2993
2994/* A mapping from arbitrary UWord tag to the SO associated with it.
2995 The UWord tags are meaningless to us, interpreted only by the
2996 user. */
2997
2998
2999
3000/* UWord -> SO* */
3001static WordFM* map_usertag_to_SO = NULL;
3002
3003static void map_usertag_to_SO_INIT ( void ) {
3004 if (UNLIKELY(map_usertag_to_SO == NULL)) {
3005 map_usertag_to_SO = VG_(newFM)( HG_(zalloc),
3006 "hg.mutS.1", HG_(free), NULL );
3007 tl_assert(map_usertag_to_SO != NULL);
3008 }
3009}
3010
3011static SO* map_usertag_to_SO_lookup_or_alloc ( UWord usertag ) {
3012 UWord key, val;
3013 map_usertag_to_SO_INIT();
3014 if (VG_(lookupFM)( map_usertag_to_SO, &key, &val, usertag )) {
3015 tl_assert(key == (UWord)usertag);
3016 return (SO*)val;
3017 } else {
3018 SO* so = libhb_so_alloc();
3019 VG_(addToFM)( map_usertag_to_SO, usertag, (UWord)so );
3020 return so;
3021 }
3022}
3023
3024// If it's ever needed (XXX check before use)
3025//static void map_usertag_to_SO_delete ( UWord usertag ) {
3026// UWord keyW, valW;
3027// map_usertag_to_SO_INIT();
3028// if (VG_(delFromFM)( map_usertag_to_SO, &keyW, &valW, usertag )) {
3029// SO* so = (SO*)valW;
3030// tl_assert(keyW == usertag);
3031// tl_assert(so);
3032// libhb_so_dealloc(so);
3033// }
3034//}
3035
3036
3037static
3038void evh__HG_USERSO_SEND_PRE ( ThreadId tid, UWord usertag )
3039{
3040 /* TID is just about to notionally sent a message on a notional
3041 abstract synchronisation object whose identity is given by
3042 USERTAG. Bind USERTAG to a real SO if it is not already so
3043 bound, and do a 'strong send' on the SO. This is later used by
3044 other thread(s) which successfully 'receive' from the SO,
3045 thereby acquiring a dependency on this signalling event. */
3046 Thread* thr;
3047 SO* so;
3048
3049 if (SHOW_EVENTS >= 1)
3050 VG_(printf)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3051 (Int)tid, usertag );
3052
3053 thr = map_threads_maybe_lookup( tid );
3054 tl_assert(thr); /* cannot fail - Thread* must already exist */
3055
3056 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3057 tl_assert(so);
3058
3059 libhb_so_send( thr->hbthr, so, True/*strong_send*/ );
3060}
3061
3062static
3063void evh__HG_USERSO_RECV_POST ( ThreadId tid, UWord usertag )
3064{
3065 /* TID has just notionally received a message from a notional
3066 abstract synchronisation object whose identity is given by
3067 USERTAG. Bind USERTAG to a real SO if it is not already so
3068 bound. If the SO has at some point in the past been 'sent' on,
3069 to a 'strong receive' on it, thereby acquiring a dependency on
3070 the sender. */
3071 Thread* thr;
3072 SO* so;
3073
3074 if (SHOW_EVENTS >= 1)
3075 VG_(printf)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3076 (Int)tid, usertag );
3077
3078 thr = map_threads_maybe_lookup( tid );
3079 tl_assert(thr); /* cannot fail - Thread* must already exist */
3080
3081 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3082 tl_assert(so);
3083
3084 /* Acquire a dependency on it. If the SO has never so far been
3085 sent on, then libhb_so_recv will do nothing. So we're safe
3086 regardless of SO's history. */
3087 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3088}
3089
3090
sewardjb4112022007-11-09 22:49:28 +00003091/*--------------------------------------------------------------*/
3092/*--- Lock acquisition order monitoring ---*/
3093/*--------------------------------------------------------------*/
3094
3095/* FIXME: here are some optimisations still to do in
3096 laog__pre_thread_acquires_lock.
3097
3098 The graph is structured so that if L1 --*--> L2 then L1 must be
3099 acquired before L2.
3100
3101 The common case is that some thread T holds (eg) L1 L2 and L3 and
3102 is repeatedly acquiring and releasing Ln, and there is no ordering
3103 error in what it is doing. Hence it repeatly:
3104
3105 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3106 produces the answer No (because there is no error).
3107
3108 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3109 (because they already got added the first time T acquired Ln).
3110
3111 Hence cache these two events:
3112
3113 (1) Cache result of the query from last time. Invalidate the cache
3114 any time any edges are added to or deleted from laog.
3115
3116 (2) Cache these add-edge requests and ignore them if said edges
3117 have already been added to laog. Invalidate the cache any time
3118 any edges are deleted from laog.
3119*/
3120
3121typedef
3122 struct {
3123 WordSetID inns; /* in univ_laog */
3124 WordSetID outs; /* in univ_laog */
3125 }
3126 LAOGLinks;
3127
3128/* lock order acquisition graph */
3129static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
3130
3131/* EXPOSITION ONLY: for each edge in 'laog', record the two places
3132 where that edge was created, so that we can show the user later if
3133 we need to. */
3134typedef
3135 struct {
3136 Addr src_ga; /* Lock guest addresses for */
3137 Addr dst_ga; /* src/dst of the edge */
3138 ExeContext* src_ec; /* And corresponding places where that */
3139 ExeContext* dst_ec; /* ordering was established */
3140 }
3141 LAOGLinkExposition;
3142
sewardj250ec2e2008-02-15 22:02:30 +00003143static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
sewardjb4112022007-11-09 22:49:28 +00003144 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3145 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
3146 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
3147 if (llx1->src_ga < llx2->src_ga) return -1;
3148 if (llx1->src_ga > llx2->src_ga) return 1;
3149 if (llx1->dst_ga < llx2->dst_ga) return -1;
3150 if (llx1->dst_ga > llx2->dst_ga) return 1;
3151 return 0;
3152}
3153
3154static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
3155/* end EXPOSITION ONLY */
3156
3157
sewardja65db102009-01-26 10:45:16 +00003158__attribute__((noinline))
3159static void laog__init ( void )
3160{
3161 tl_assert(!laog);
3162 tl_assert(!laog_exposition);
sewardjc1fb9d22011-02-28 09:03:44 +00003163 tl_assert(HG_(clo_track_lockorders));
sewardja65db102009-01-26 10:45:16 +00003164
3165 laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
3166 HG_(free), NULL/*unboxedcmp*/ );
3167
3168 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
3169 cmp_LAOGLinkExposition );
3170 tl_assert(laog);
3171 tl_assert(laog_exposition);
3172}
3173
sewardjb4112022007-11-09 22:49:28 +00003174static void laog__show ( Char* who ) {
3175 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003176 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003177 Lock* me;
3178 LAOGLinks* links;
3179 VG_(printf)("laog (requested by %s) {\n", who);
sewardj896f6f92008-08-19 08:38:52 +00003180 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003181 me = NULL;
3182 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003183 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003184 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003185 tl_assert(me);
3186 tl_assert(links);
3187 VG_(printf)(" node %p:\n", me);
3188 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3189 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003190 VG_(printf)(" inn %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003191 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3192 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003193 VG_(printf)(" out %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003194 me = NULL;
3195 links = NULL;
3196 }
sewardj896f6f92008-08-19 08:38:52 +00003197 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003198 VG_(printf)("}\n");
3199}
3200
3201__attribute__((noinline))
3202static void laog__add_edge ( Lock* src, Lock* dst ) {
3203 Word keyW;
3204 LAOGLinks* links;
3205 Bool presentF, presentR;
3206 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
3207
3208 /* Take the opportunity to sanity check the graph. Record in
3209 presentF if there is already a src->dst mapping in this node's
3210 forwards links, and presentR if there is already a src->dst
3211 mapping in this node's backwards links. They should agree!
3212 Also, we need to know whether the edge was already present so as
3213 to decide whether or not to update the link details mapping. We
3214 can compute presentF and presentR essentially for free, so may
3215 as well do this always. */
3216 presentF = presentR = False;
3217
3218 /* Update the out edges for src */
3219 keyW = 0;
3220 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003221 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00003222 WordSetID outs_new;
3223 tl_assert(links);
3224 tl_assert(keyW == (Word)src);
3225 outs_new = HG_(addToWS)( univ_laog, links->outs, (Word)dst );
3226 presentF = outs_new == links->outs;
3227 links->outs = outs_new;
3228 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003229 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003230 links->inns = HG_(emptyWS)( univ_laog );
3231 links->outs = HG_(singletonWS)( univ_laog, (Word)dst );
sewardj896f6f92008-08-19 08:38:52 +00003232 VG_(addToFM)( laog, (Word)src, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00003233 }
3234 /* Update the in edges for dst */
3235 keyW = 0;
3236 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003237 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003238 WordSetID inns_new;
3239 tl_assert(links);
3240 tl_assert(keyW == (Word)dst);
3241 inns_new = HG_(addToWS)( univ_laog, links->inns, (Word)src );
3242 presentR = inns_new == links->inns;
3243 links->inns = inns_new;
3244 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003245 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003246 links->inns = HG_(singletonWS)( univ_laog, (Word)src );
3247 links->outs = HG_(emptyWS)( univ_laog );
sewardj896f6f92008-08-19 08:38:52 +00003248 VG_(addToFM)( laog, (Word)dst, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00003249 }
3250
3251 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
3252
3253 if (!presentF && src->acquired_at && dst->acquired_at) {
3254 LAOGLinkExposition expo;
3255 /* If this edge is entering the graph, and we have acquired_at
3256 information for both src and dst, record those acquisition
3257 points. Hence, if there is later a violation of this
3258 ordering, we can show the user the two places in which the
3259 required src-dst ordering was previously established. */
barta0b6b2c2008-07-07 06:49:24 +00003260 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
sewardjb4112022007-11-09 22:49:28 +00003261 src->guestaddr, dst->guestaddr);
3262 expo.src_ga = src->guestaddr;
3263 expo.dst_ga = dst->guestaddr;
3264 expo.src_ec = NULL;
3265 expo.dst_ec = NULL;
3266 tl_assert(laog_exposition);
sewardj896f6f92008-08-19 08:38:52 +00003267 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (Word)&expo )) {
sewardjb4112022007-11-09 22:49:28 +00003268 /* we already have it; do nothing */
3269 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003270 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
3271 sizeof(LAOGLinkExposition));
sewardjb4112022007-11-09 22:49:28 +00003272 expo2->src_ga = src->guestaddr;
3273 expo2->dst_ga = dst->guestaddr;
3274 expo2->src_ec = src->acquired_at;
3275 expo2->dst_ec = dst->acquired_at;
sewardj896f6f92008-08-19 08:38:52 +00003276 VG_(addToFM)( laog_exposition, (Word)expo2, (Word)NULL );
sewardjb4112022007-11-09 22:49:28 +00003277 }
3278 }
3279}
3280
3281__attribute__((noinline))
3282static void laog__del_edge ( Lock* src, Lock* dst ) {
3283 Word keyW;
3284 LAOGLinks* links;
3285 if (0) VG_(printf)("laog__del_edge %p %p\n", src, dst);
3286 /* Update the out edges for src */
3287 keyW = 0;
3288 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003289 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00003290 tl_assert(links);
3291 tl_assert(keyW == (Word)src);
3292 links->outs = HG_(delFromWS)( univ_laog, links->outs, (Word)dst );
3293 }
3294 /* Update the in edges for dst */
3295 keyW = 0;
3296 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003297 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003298 tl_assert(links);
3299 tl_assert(keyW == (Word)dst);
3300 links->inns = HG_(delFromWS)( univ_laog, links->inns, (Word)src );
3301 }
3302}
3303
3304__attribute__((noinline))
3305static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
3306 Word keyW;
3307 LAOGLinks* links;
3308 keyW = 0;
3309 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003310 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003311 tl_assert(links);
3312 tl_assert(keyW == (Word)lk);
3313 return links->outs;
3314 } else {
3315 return HG_(emptyWS)( univ_laog );
3316 }
3317}
3318
3319__attribute__((noinline))
3320static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
3321 Word keyW;
3322 LAOGLinks* links;
3323 keyW = 0;
3324 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003325 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003326 tl_assert(links);
3327 tl_assert(keyW == (Word)lk);
3328 return links->inns;
3329 } else {
3330 return HG_(emptyWS)( univ_laog );
3331 }
3332}
3333
3334__attribute__((noinline))
3335static void laog__sanity_check ( Char* who ) {
3336 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003337 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003338 Lock* me;
3339 LAOGLinks* links;
sewardj896f6f92008-08-19 08:38:52 +00003340 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003341 me = NULL;
3342 links = NULL;
3343 if (0) VG_(printf)("laog sanity check\n");
sewardj896f6f92008-08-19 08:38:52 +00003344 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003345 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003346 tl_assert(me);
3347 tl_assert(links);
3348 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3349 for (i = 0; i < ws_size; i++) {
3350 if ( ! HG_(elemWS)( univ_laog,
3351 laog__succs( (Lock*)ws_words[i] ),
3352 (Word)me ))
3353 goto bad;
3354 }
3355 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3356 for (i = 0; i < ws_size; i++) {
3357 if ( ! HG_(elemWS)( univ_laog,
3358 laog__preds( (Lock*)ws_words[i] ),
3359 (Word)me ))
3360 goto bad;
3361 }
3362 me = NULL;
3363 links = NULL;
3364 }
sewardj896f6f92008-08-19 08:38:52 +00003365 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003366 return;
3367
3368 bad:
3369 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3370 laog__show(who);
3371 tl_assert(0);
3372}
3373
3374/* If there is a path in laog from 'src' to any of the elements in
3375 'dst', return an arbitrarily chosen element of 'dst' reachable from
3376 'src'. If no path exist from 'src' to any element in 'dst', return
3377 NULL. */
3378__attribute__((noinline))
3379static
3380Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3381{
3382 Lock* ret;
3383 Word i, ssz;
3384 XArray* stack; /* of Lock* */
3385 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
3386 Lock* here;
3387 WordSetID succs;
3388 Word succs_size;
sewardj250ec2e2008-02-15 22:02:30 +00003389 UWord* succs_words;
sewardjb4112022007-11-09 22:49:28 +00003390 //laog__sanity_check();
3391
3392 /* If the destination set is empty, we can never get there from
3393 'src' :-), so don't bother to try */
3394 if (HG_(isEmptyWS)( univ_lsets, dsts ))
3395 return NULL;
3396
3397 ret = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00003398 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3399 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00003400
3401 (void) VG_(addToXA)( stack, &src );
3402
3403 while (True) {
3404
3405 ssz = VG_(sizeXA)( stack );
3406
3407 if (ssz == 0) { ret = NULL; break; }
3408
3409 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3410 VG_(dropTailXA)( stack, 1 );
3411
3412 if (HG_(elemWS)( univ_lsets, dsts, (Word)here )) { ret = here; break; }
3413
sewardj896f6f92008-08-19 08:38:52 +00003414 if (VG_(lookupFM)( visited, NULL, NULL, (Word)here ))
sewardjb4112022007-11-09 22:49:28 +00003415 continue;
3416
sewardj896f6f92008-08-19 08:38:52 +00003417 VG_(addToFM)( visited, (Word)here, 0 );
sewardjb4112022007-11-09 22:49:28 +00003418
3419 succs = laog__succs( here );
3420 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3421 for (i = 0; i < succs_size; i++)
3422 (void) VG_(addToXA)( stack, &succs_words[i] );
3423 }
3424
sewardj896f6f92008-08-19 08:38:52 +00003425 VG_(deleteFM)( visited, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003426 VG_(deleteXA)( stack );
3427 return ret;
3428}
3429
3430
3431/* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3432 between 'lk' and the locks already held by 'thr' and issue a
3433 complaint if so. Also, update the ordering graph appropriately.
3434*/
3435__attribute__((noinline))
3436static void laog__pre_thread_acquires_lock (
3437 Thread* thr, /* NB: BEFORE lock is added */
3438 Lock* lk
3439 )
3440{
sewardj250ec2e2008-02-15 22:02:30 +00003441 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +00003442 Word ls_size, i;
3443 Lock* other;
3444
3445 /* It may be that 'thr' already holds 'lk' and is recursively
3446 relocking in. In this case we just ignore the call. */
3447 /* NB: univ_lsets really is correct here */
3448 if (HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lk ))
3449 return;
3450
sewardjb4112022007-11-09 22:49:28 +00003451 /* First, the check. Complain if there is any path in laog from lk
3452 to any of the locks already held by thr, since if any such path
3453 existed, it would mean that previously lk was acquired before
3454 (rather than after, as we are doing here) at least one of those
3455 locks.
3456 */
3457 other = laog__do_dfs_from_to(lk, thr->locksetA);
3458 if (other) {
3459 LAOGLinkExposition key, *found;
3460 /* So we managed to find a path lk --*--> other in the graph,
3461 which implies that 'lk' should have been acquired before
3462 'other' but is in fact being acquired afterwards. We present
3463 the lk/other arguments to record_error_LockOrder in the order
3464 in which they should have been acquired. */
3465 /* Go look in the laog_exposition mapping, to find the allocation
3466 points for this edge, so we can show the user. */
3467 key.src_ga = lk->guestaddr;
3468 key.dst_ga = other->guestaddr;
3469 key.src_ec = NULL;
3470 key.dst_ec = NULL;
3471 found = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003472 if (VG_(lookupFM)( laog_exposition,
sewardjb5f29642007-11-16 12:02:43 +00003473 (Word*)&found, NULL, (Word)&key )) {
sewardjb4112022007-11-09 22:49:28 +00003474 tl_assert(found != &key);
3475 tl_assert(found->src_ga == key.src_ga);
3476 tl_assert(found->dst_ga == key.dst_ga);
3477 tl_assert(found->src_ec);
3478 tl_assert(found->dst_ec);
sewardjf98e1c02008-10-25 16:22:41 +00003479 HG_(record_error_LockOrder)(
3480 thr, lk->guestaddr, other->guestaddr,
3481 found->src_ec, found->dst_ec );
sewardjb4112022007-11-09 22:49:28 +00003482 } else {
3483 /* Hmm. This can't happen (can it?) */
sewardjf98e1c02008-10-25 16:22:41 +00003484 HG_(record_error_LockOrder)(
3485 thr, lk->guestaddr, other->guestaddr,
3486 NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003487 }
3488 }
3489
3490 /* Second, add to laog the pairs
3491 (old, lk) | old <- locks already held by thr
3492 Since both old and lk are currently held by thr, their acquired_at
3493 fields must be non-NULL.
3494 */
3495 tl_assert(lk->acquired_at);
3496 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3497 for (i = 0; i < ls_size; i++) {
3498 Lock* old = (Lock*)ls_words[i];
3499 tl_assert(old->acquired_at);
3500 laog__add_edge( old, lk );
3501 }
3502
3503 /* Why "except_Locks" ? We're here because a lock is being
3504 acquired by a thread, and we're in an inconsistent state here.
3505 See the call points in evhH__post_thread_{r,w}_acquires_lock.
3506 When called in this inconsistent state, locks__sanity_check duly
3507 barfs. */
sewardjf98e1c02008-10-25 16:22:41 +00003508 if (HG_(clo_sanity_flags) & SCE_LAOG)
sewardjb4112022007-11-09 22:49:28 +00003509 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
3510}
3511
3512
3513/* Delete from 'laog' any pair mentioning a lock in locksToDelete */
3514
3515__attribute__((noinline))
3516static void laog__handle_one_lock_deletion ( Lock* lk )
3517{
3518 WordSetID preds, succs;
3519 Word preds_size, succs_size, i, j;
sewardj250ec2e2008-02-15 22:02:30 +00003520 UWord *preds_words, *succs_words;
sewardjb4112022007-11-09 22:49:28 +00003521
3522 preds = laog__preds( lk );
3523 succs = laog__succs( lk );
3524
3525 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
3526 for (i = 0; i < preds_size; i++)
3527 laog__del_edge( (Lock*)preds_words[i], lk );
3528
3529 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3530 for (j = 0; j < succs_size; j++)
3531 laog__del_edge( lk, (Lock*)succs_words[j] );
3532
3533 for (i = 0; i < preds_size; i++) {
3534 for (j = 0; j < succs_size; j++) {
3535 if (preds_words[i] != succs_words[j]) {
3536 /* This can pass unlocked locks to laog__add_edge, since
3537 we're deleting stuff. So their acquired_at fields may
3538 be NULL. */
3539 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
3540 }
3541 }
3542 }
3543}
3544
sewardj1cbc12f2008-11-10 16:16:46 +00003545//__attribute__((noinline))
3546//static void laog__handle_lock_deletions (
3547// WordSetID /* in univ_laog */ locksToDelete
3548// )
3549//{
3550// Word i, ws_size;
3551// UWord* ws_words;
3552//
sewardj1cbc12f2008-11-10 16:16:46 +00003553//
3554// HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
3555// for (i = 0; i < ws_size; i++)
3556// laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
3557//
3558// if (HG_(clo_sanity_flags) & SCE_LAOG)
3559// all__sanity_check("laog__handle_lock_deletions-post");
3560//}
sewardjb4112022007-11-09 22:49:28 +00003561
3562
3563/*--------------------------------------------------------------*/
3564/*--- Malloc/free replacements ---*/
3565/*--------------------------------------------------------------*/
3566
3567typedef
3568 struct {
3569 void* next; /* required by m_hashtable */
3570 Addr payload; /* ptr to actual block */
3571 SizeT szB; /* size requested */
3572 ExeContext* where; /* where it was allocated */
3573 Thread* thr; /* allocating thread */
3574 }
3575 MallocMeta;
3576
3577/* A hash table of MallocMetas, used to track malloc'd blocks
3578 (obviously). */
3579static VgHashTable hg_mallocmeta_table = NULL;
3580
3581
3582static MallocMeta* new_MallocMeta ( void ) {
sewardjf98e1c02008-10-25 16:22:41 +00003583 MallocMeta* md = HG_(zalloc)( "hg.new_MallocMeta.1", sizeof(MallocMeta) );
sewardjb4112022007-11-09 22:49:28 +00003584 tl_assert(md);
3585 return md;
3586}
3587static void delete_MallocMeta ( MallocMeta* md ) {
sewardjf98e1c02008-10-25 16:22:41 +00003588 HG_(free)(md);
sewardjb4112022007-11-09 22:49:28 +00003589}
3590
3591
3592/* Allocate a client block and set up the metadata for it. */
3593
3594static
3595void* handle_alloc ( ThreadId tid,
3596 SizeT szB, SizeT alignB, Bool is_zeroed )
3597{
3598 Addr p;
3599 MallocMeta* md;
3600
3601 tl_assert( ((SSizeT)szB) >= 0 );
3602 p = (Addr)VG_(cli_malloc)(alignB, szB);
3603 if (!p) {
3604 return NULL;
3605 }
3606 if (is_zeroed)
3607 VG_(memset)((void*)p, 0, szB);
3608
3609 /* Note that map_threads_lookup must succeed (cannot assert), since
3610 memory can only be allocated by currently alive threads, hence
3611 they must have an entry in map_threads. */
3612 md = new_MallocMeta();
3613 md->payload = p;
3614 md->szB = szB;
3615 md->where = VG_(record_ExeContext)( tid, 0 );
3616 md->thr = map_threads_lookup( tid );
3617
3618 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
3619
3620 /* Tell the lower level memory wranglers. */
3621 evh__new_mem_heap( p, szB, is_zeroed );
3622
3623 return (void*)p;
3624}
3625
3626/* Re the checks for less-than-zero (also in hg_cli__realloc below):
3627 Cast to a signed type to catch any unexpectedly negative args.
3628 We're assuming here that the size asked for is not greater than
3629 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
3630 platforms). */
3631static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
3632 if (((SSizeT)n) < 0) return NULL;
3633 return handle_alloc ( tid, n, VG_(clo_alignment),
3634 /*is_zeroed*/False );
3635}
3636static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
3637 if (((SSizeT)n) < 0) return NULL;
3638 return handle_alloc ( tid, n, VG_(clo_alignment),
3639 /*is_zeroed*/False );
3640}
3641static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
3642 if (((SSizeT)n) < 0) return NULL;
3643 return handle_alloc ( tid, n, VG_(clo_alignment),
3644 /*is_zeroed*/False );
3645}
3646static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
3647 if (((SSizeT)n) < 0) return NULL;
3648 return handle_alloc ( tid, n, align,
3649 /*is_zeroed*/False );
3650}
3651static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
3652 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
3653 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
3654 /*is_zeroed*/True );
3655}
3656
3657
3658/* Free a client block, including getting rid of the relevant
3659 metadata. */
3660
3661static void handle_free ( ThreadId tid, void* p )
3662{
3663 MallocMeta *md, *old_md;
3664 SizeT szB;
3665
3666 /* First see if we can find the metadata for 'p'. */
3667 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3668 if (!md)
3669 return; /* apparently freeing a bogus address. Oh well. */
3670
3671 tl_assert(md->payload == (Addr)p);
3672 szB = md->szB;
3673
3674 /* Nuke the metadata block */
3675 old_md = (MallocMeta*)
3676 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
3677 tl_assert(old_md); /* it must be present - we just found it */
3678 tl_assert(old_md == md);
3679 tl_assert(old_md->payload == (Addr)p);
3680
3681 VG_(cli_free)((void*)old_md->payload);
3682 delete_MallocMeta(old_md);
3683
3684 /* Tell the lower level memory wranglers. */
3685 evh__die_mem_heap( (Addr)p, szB );
3686}
3687
3688static void hg_cli__free ( ThreadId tid, void* p ) {
3689 handle_free(tid, p);
3690}
3691static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
3692 handle_free(tid, p);
3693}
3694static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
3695 handle_free(tid, p);
3696}
3697
3698
3699static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
3700{
3701 MallocMeta *md, *md_new, *md_tmp;
3702 SizeT i;
3703
3704 Addr payload = (Addr)payloadV;
3705
3706 if (((SSizeT)new_size) < 0) return NULL;
3707
3708 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
3709 if (!md)
3710 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
3711
3712 tl_assert(md->payload == payload);
3713
3714 if (md->szB == new_size) {
3715 /* size unchanged */
3716 md->where = VG_(record_ExeContext)(tid, 0);
3717 return payloadV;
3718 }
3719
3720 if (md->szB > new_size) {
3721 /* new size is smaller */
3722 md->szB = new_size;
3723 md->where = VG_(record_ExeContext)(tid, 0);
3724 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
3725 return payloadV;
3726 }
3727
3728 /* else */ {
3729 /* new size is bigger */
3730 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
3731
3732 /* First half kept and copied, second half new */
3733 // FIXME: shouldn't we use a copier which implements the
3734 // memory state machine?
sewardj23f12002009-07-24 08:45:08 +00003735 evh__copy_mem( payload, p_new, md->szB );
sewardjb4112022007-11-09 22:49:28 +00003736 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
sewardjf98e1c02008-10-25 16:22:41 +00003737 /*inited*/False );
sewardjb4112022007-11-09 22:49:28 +00003738 /* FIXME: can anything funny happen here? specifically, if the
3739 old range contained a lock, then die_mem_heap will complain.
3740 Is that the correct behaviour? Not sure. */
3741 evh__die_mem_heap( payload, md->szB );
3742
3743 /* Copy from old to new */
3744 for (i = 0; i < md->szB; i++)
3745 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
3746
3747 /* Because the metadata hash table is index by payload address,
3748 we have to get rid of the old hash table entry and make a new
3749 one. We can't just modify the existing metadata in place,
3750 because then it would (almost certainly) be in the wrong hash
3751 chain. */
3752 md_new = new_MallocMeta();
3753 *md_new = *md;
3754
3755 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
3756 tl_assert(md_tmp);
3757 tl_assert(md_tmp == md);
3758
3759 VG_(cli_free)((void*)md->payload);
3760 delete_MallocMeta(md);
3761
3762 /* Update fields */
3763 md_new->where = VG_(record_ExeContext)( tid, 0 );
3764 md_new->szB = new_size;
3765 md_new->payload = p_new;
3766 md_new->thr = map_threads_lookup( tid );
3767
3768 /* and add */
3769 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
3770
3771 return (void*)p_new;
3772 }
3773}
3774
njn8b140de2009-02-17 04:31:18 +00003775static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
3776{
3777 MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3778
3779 // There may be slop, but pretend there isn't because only the asked-for
3780 // area will have been shadowed properly.
3781 return ( md ? md->szB : 0 );
3782}
3783
sewardjb4112022007-11-09 22:49:28 +00003784
sewardj095d61e2010-03-11 13:43:18 +00003785/* For error creation: map 'data_addr' to a malloc'd chunk, if any.
sewardjc8028ad2010-05-05 09:34:42 +00003786 Slow linear search. With a bit of hash table help if 'data_addr'
3787 is either the start of a block or up to 15 word-sized steps along
3788 from the start of a block. */
sewardj095d61e2010-03-11 13:43:18 +00003789
3790static inline Bool addr_is_in_MM_Chunk( MallocMeta* mm, Addr a )
3791{
sewardjc8028ad2010-05-05 09:34:42 +00003792 /* Accept 'a' as within 'mm' if 'mm's size is zero and 'a' points
3793 right at it. */
3794 if (UNLIKELY(mm->szB == 0 && a == mm->payload))
3795 return True;
3796 /* else normal interval rules apply */
3797 if (LIKELY(a < mm->payload)) return False;
3798 if (LIKELY(a >= mm->payload + mm->szB)) return False;
3799 return True;
sewardj095d61e2010-03-11 13:43:18 +00003800}
3801
sewardjc8028ad2010-05-05 09:34:42 +00003802Bool HG_(mm_find_containing_block)( /*OUT*/ExeContext** where,
sewardj095d61e2010-03-11 13:43:18 +00003803 /*OUT*/Addr* payload,
3804 /*OUT*/SizeT* szB,
3805 Addr data_addr )
3806{
3807 MallocMeta* mm;
sewardjc8028ad2010-05-05 09:34:42 +00003808 Int i;
3809 const Int n_fast_check_words = 16;
3810
3811 /* First, do a few fast searches on the basis that data_addr might
3812 be exactly the start of a block or up to 15 words inside. This
3813 can happen commonly via the creq
3814 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK. */
3815 for (i = 0; i < n_fast_check_words; i++) {
3816 mm = VG_(HT_lookup)( hg_mallocmeta_table,
3817 data_addr - (UWord)(UInt)i * sizeof(UWord) );
3818 if (UNLIKELY(mm && addr_is_in_MM_Chunk(mm, data_addr)))
3819 goto found;
3820 }
3821
sewardj095d61e2010-03-11 13:43:18 +00003822 /* Well, this totally sucks. But without using an interval tree or
sewardjc8028ad2010-05-05 09:34:42 +00003823 some such, it's hard to see how to do better. We have to check
3824 every block in the entire table. */
sewardj095d61e2010-03-11 13:43:18 +00003825 VG_(HT_ResetIter)(hg_mallocmeta_table);
3826 while ( (mm = VG_(HT_Next)(hg_mallocmeta_table)) ) {
sewardjc8028ad2010-05-05 09:34:42 +00003827 if (UNLIKELY(addr_is_in_MM_Chunk(mm, data_addr)))
3828 goto found;
sewardj095d61e2010-03-11 13:43:18 +00003829 }
sewardjc8028ad2010-05-05 09:34:42 +00003830
3831 /* Not found. Bah. */
3832 return False;
3833 /*NOTREACHED*/
3834
3835 found:
3836 tl_assert(mm);
3837 tl_assert(addr_is_in_MM_Chunk(mm, data_addr));
3838 if (where) *where = mm->where;
3839 if (payload) *payload = mm->payload;
3840 if (szB) *szB = mm->szB;
3841 return True;
sewardj095d61e2010-03-11 13:43:18 +00003842}
3843
3844
sewardjb4112022007-11-09 22:49:28 +00003845/*--------------------------------------------------------------*/
3846/*--- Instrumentation ---*/
3847/*--------------------------------------------------------------*/
3848
3849static void instrument_mem_access ( IRSB* bbOut,
3850 IRExpr* addr,
3851 Int szB,
3852 Bool isStore,
3853 Int hWordTy_szB )
3854{
3855 IRType tyAddr = Ity_INVALID;
3856 HChar* hName = NULL;
3857 void* hAddr = NULL;
3858 Int regparms = 0;
3859 IRExpr** argv = NULL;
3860 IRDirty* di = NULL;
3861
3862 tl_assert(isIRAtom(addr));
3863 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
3864
3865 tyAddr = typeOfIRExpr( bbOut->tyenv, addr );
3866 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
3867
3868 /* So the effective address is in 'addr' now. */
3869 regparms = 1; // unless stated otherwise
3870 if (isStore) {
3871 switch (szB) {
3872 case 1:
sewardj23f12002009-07-24 08:45:08 +00003873 hName = "evh__mem_help_cwrite_1";
3874 hAddr = &evh__mem_help_cwrite_1;
sewardjb4112022007-11-09 22:49:28 +00003875 argv = mkIRExprVec_1( addr );
3876 break;
3877 case 2:
sewardj23f12002009-07-24 08:45:08 +00003878 hName = "evh__mem_help_cwrite_2";
3879 hAddr = &evh__mem_help_cwrite_2;
sewardjb4112022007-11-09 22:49:28 +00003880 argv = mkIRExprVec_1( addr );
3881 break;
3882 case 4:
sewardj23f12002009-07-24 08:45:08 +00003883 hName = "evh__mem_help_cwrite_4";
3884 hAddr = &evh__mem_help_cwrite_4;
sewardjb4112022007-11-09 22:49:28 +00003885 argv = mkIRExprVec_1( addr );
3886 break;
3887 case 8:
sewardj23f12002009-07-24 08:45:08 +00003888 hName = "evh__mem_help_cwrite_8";
3889 hAddr = &evh__mem_help_cwrite_8;
sewardjb4112022007-11-09 22:49:28 +00003890 argv = mkIRExprVec_1( addr );
3891 break;
3892 default:
3893 tl_assert(szB > 8 && szB <= 512); /* stay sane */
3894 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00003895 hName = "evh__mem_help_cwrite_N";
3896 hAddr = &evh__mem_help_cwrite_N;
sewardjb4112022007-11-09 22:49:28 +00003897 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
3898 break;
3899 }
3900 } else {
3901 switch (szB) {
3902 case 1:
sewardj23f12002009-07-24 08:45:08 +00003903 hName = "evh__mem_help_cread_1";
3904 hAddr = &evh__mem_help_cread_1;
sewardjb4112022007-11-09 22:49:28 +00003905 argv = mkIRExprVec_1( addr );
3906 break;
3907 case 2:
sewardj23f12002009-07-24 08:45:08 +00003908 hName = "evh__mem_help_cread_2";
3909 hAddr = &evh__mem_help_cread_2;
sewardjb4112022007-11-09 22:49:28 +00003910 argv = mkIRExprVec_1( addr );
3911 break;
3912 case 4:
sewardj23f12002009-07-24 08:45:08 +00003913 hName = "evh__mem_help_cread_4";
3914 hAddr = &evh__mem_help_cread_4;
sewardjb4112022007-11-09 22:49:28 +00003915 argv = mkIRExprVec_1( addr );
3916 break;
3917 case 8:
sewardj23f12002009-07-24 08:45:08 +00003918 hName = "evh__mem_help_cread_8";
3919 hAddr = &evh__mem_help_cread_8;
sewardjb4112022007-11-09 22:49:28 +00003920 argv = mkIRExprVec_1( addr );
3921 break;
3922 default:
3923 tl_assert(szB > 8 && szB <= 512); /* stay sane */
3924 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00003925 hName = "evh__mem_help_cread_N";
3926 hAddr = &evh__mem_help_cread_N;
sewardjb4112022007-11-09 22:49:28 +00003927 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
3928 break;
3929 }
3930 }
3931
3932 /* Add the helper. */
3933 tl_assert(hName);
3934 tl_assert(hAddr);
3935 tl_assert(argv);
3936 di = unsafeIRDirty_0_N( regparms,
3937 hName, VG_(fnptr_to_fnentry)( hAddr ),
3938 argv );
3939 addStmtToIRSB( bbOut, IRStmt_Dirty(di) );
3940}
3941
3942
sewardja0eee322009-07-31 08:46:35 +00003943/* Figure out if GA is a guest code address in the dynamic linker, and
3944 if so return True. Otherwise (and in case of any doubt) return
3945 False. (sidedly safe w/ False as the safe value) */
3946static Bool is_in_dynamic_linker_shared_object( Addr64 ga )
3947{
3948 DebugInfo* dinfo;
3949 const UChar* soname;
3950 if (0) return False;
3951
sewardje3f1e592009-07-31 09:41:29 +00003952 dinfo = VG_(find_DebugInfo)( (Addr)ga );
sewardja0eee322009-07-31 08:46:35 +00003953 if (!dinfo) return False;
3954
sewardje3f1e592009-07-31 09:41:29 +00003955 soname = VG_(DebugInfo_get_soname)(dinfo);
sewardja0eee322009-07-31 08:46:35 +00003956 tl_assert(soname);
3957 if (0) VG_(printf)("%s\n", soname);
3958
3959# if defined(VGO_linux)
sewardj651cfa42010-01-11 13:02:19 +00003960 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_3)) return True;
sewardja0eee322009-07-31 08:46:35 +00003961 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_2)) return True;
3962 if (VG_STREQ(soname, VG_U_LD_LINUX_X86_64_SO_2)) return True;
3963 if (VG_STREQ(soname, VG_U_LD64_SO_1)) return True;
3964 if (VG_STREQ(soname, VG_U_LD_SO_1)) return True;
3965# elif defined(VGO_darwin)
3966 if (VG_STREQ(soname, VG_U_DYLD)) return True;
3967# else
3968# error "Unsupported OS"
3969# endif
3970 return False;
3971}
3972
sewardjb4112022007-11-09 22:49:28 +00003973static
3974IRSB* hg_instrument ( VgCallbackClosure* closure,
3975 IRSB* bbIn,
3976 VexGuestLayout* layout,
3977 VexGuestExtents* vge,
3978 IRType gWordTy, IRType hWordTy )
3979{
sewardj1c0ce7a2009-07-01 08:10:49 +00003980 Int i;
3981 IRSB* bbOut;
3982 Addr64 cia; /* address of current insn */
3983 IRStmt* st;
sewardja0eee322009-07-31 08:46:35 +00003984 Bool inLDSO = False;
3985 Addr64 inLDSOmask4K = 1; /* mismatches on first check */
sewardjb4112022007-11-09 22:49:28 +00003986
3987 if (gWordTy != hWordTy) {
3988 /* We don't currently support this case. */
3989 VG_(tool_panic)("host/guest word size mismatch");
3990 }
3991
sewardja0eee322009-07-31 08:46:35 +00003992 if (VKI_PAGE_SIZE < 4096 || VG_(log2)(VKI_PAGE_SIZE) == -1) {
3993 VG_(tool_panic)("implausible or too-small VKI_PAGE_SIZE");
3994 }
3995
sewardjb4112022007-11-09 22:49:28 +00003996 /* Set up BB */
3997 bbOut = emptyIRSB();
3998 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
3999 bbOut->next = deepCopyIRExpr(bbIn->next);
4000 bbOut->jumpkind = bbIn->jumpkind;
4001
4002 // Copy verbatim any IR preamble preceding the first IMark
4003 i = 0;
4004 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
4005 addStmtToIRSB( bbOut, bbIn->stmts[i] );
4006 i++;
4007 }
4008
sewardj1c0ce7a2009-07-01 08:10:49 +00004009 // Get the first statement, and initial cia from it
4010 tl_assert(bbIn->stmts_used > 0);
4011 tl_assert(i < bbIn->stmts_used);
4012 st = bbIn->stmts[i];
4013 tl_assert(Ist_IMark == st->tag);
4014 cia = st->Ist.IMark.addr;
4015 st = NULL;
4016
sewardjb4112022007-11-09 22:49:28 +00004017 for (/*use current i*/; i < bbIn->stmts_used; i++) {
sewardj1c0ce7a2009-07-01 08:10:49 +00004018 st = bbIn->stmts[i];
sewardjb4112022007-11-09 22:49:28 +00004019 tl_assert(st);
4020 tl_assert(isFlatIRStmt(st));
4021 switch (st->tag) {
4022 case Ist_NoOp:
4023 case Ist_AbiHint:
4024 case Ist_Put:
4025 case Ist_PutI:
sewardjb4112022007-11-09 22:49:28 +00004026 case Ist_Exit:
4027 /* None of these can contain any memory references. */
4028 break;
4029
sewardj1c0ce7a2009-07-01 08:10:49 +00004030 case Ist_IMark:
4031 /* no mem refs, but note the insn address. */
4032 cia = st->Ist.IMark.addr;
sewardja0eee322009-07-31 08:46:35 +00004033 /* Don't instrument the dynamic linker. It generates a
4034 lot of races which we just expensively suppress, so
4035 it's pointless.
4036
4037 Avoid flooding is_in_dynamic_linker_shared_object with
4038 requests by only checking at transitions between 4K
4039 pages. */
4040 if ((cia & ~(Addr64)0xFFF) != inLDSOmask4K) {
4041 if (0) VG_(printf)("NEW %#lx\n", (Addr)cia);
4042 inLDSOmask4K = cia & ~(Addr64)0xFFF;
4043 inLDSO = is_in_dynamic_linker_shared_object(cia);
4044 } else {
4045 if (0) VG_(printf)("old %#lx\n", (Addr)cia);
4046 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004047 break;
4048
sewardjb4112022007-11-09 22:49:28 +00004049 case Ist_MBE:
sewardjf98e1c02008-10-25 16:22:41 +00004050 switch (st->Ist.MBE.event) {
4051 case Imbe_Fence:
4052 break; /* not interesting */
sewardjf98e1c02008-10-25 16:22:41 +00004053 default:
4054 goto unhandled;
4055 }
sewardjb4112022007-11-09 22:49:28 +00004056 break;
4057
sewardj1c0ce7a2009-07-01 08:10:49 +00004058 case Ist_CAS: {
4059 /* Atomic read-modify-write cycle. Just pretend it's a
4060 read. */
4061 IRCAS* cas = st->Ist.CAS.details;
sewardj23f12002009-07-24 08:45:08 +00004062 Bool isDCAS = cas->oldHi != IRTemp_INVALID;
4063 if (isDCAS) {
4064 tl_assert(cas->expdHi);
4065 tl_assert(cas->dataHi);
4066 } else {
4067 tl_assert(!cas->expdHi);
4068 tl_assert(!cas->dataHi);
4069 }
4070 /* Just be boring about it. */
sewardja0eee322009-07-31 08:46:35 +00004071 if (!inLDSO) {
4072 instrument_mem_access(
4073 bbOut,
4074 cas->addr,
4075 (isDCAS ? 2 : 1)
4076 * sizeofIRType(typeOfIRExpr(bbIn->tyenv, cas->dataLo)),
4077 False/*!isStore*/,
4078 sizeofIRType(hWordTy)
4079 );
4080 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004081 break;
4082 }
4083
sewardjdb5907d2009-11-26 17:20:21 +00004084 case Ist_LLSC: {
4085 /* We pretend store-conditionals don't exist, viz, ignore
4086 them. Whereas load-linked's are treated the same as
4087 normal loads. */
4088 IRType dataTy;
4089 if (st->Ist.LLSC.storedata == NULL) {
4090 /* LL */
4091 dataTy = typeOfIRTemp(bbIn->tyenv, st->Ist.LLSC.result);
sewardja0eee322009-07-31 08:46:35 +00004092 if (!inLDSO) {
sewardjdb5907d2009-11-26 17:20:21 +00004093 instrument_mem_access(
4094 bbOut,
4095 st->Ist.LLSC.addr,
4096 sizeofIRType(dataTy),
4097 False/*!isStore*/,
sewardja0eee322009-07-31 08:46:35 +00004098 sizeofIRType(hWordTy)
4099 );
4100 }
sewardjdb5907d2009-11-26 17:20:21 +00004101 } else {
4102 /* SC */
4103 /*ignore */
4104 }
4105 break;
4106 }
4107
4108 case Ist_Store:
4109 /* It seems we pretend that store-conditionals don't
4110 exist, viz, just ignore them ... */
4111 if (!inLDSO) {
4112 instrument_mem_access(
4113 bbOut,
4114 st->Ist.Store.addr,
4115 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
4116 True/*isStore*/,
4117 sizeofIRType(hWordTy)
4118 );
sewardj1c0ce7a2009-07-01 08:10:49 +00004119 }
njnb83caf22009-05-25 01:47:56 +00004120 break;
sewardjb4112022007-11-09 22:49:28 +00004121
4122 case Ist_WrTmp: {
sewardj1c0ce7a2009-07-01 08:10:49 +00004123 /* ... whereas here we don't care whether a load is a
4124 vanilla one or a load-linked. */
sewardjb4112022007-11-09 22:49:28 +00004125 IRExpr* data = st->Ist.WrTmp.data;
4126 if (data->tag == Iex_Load) {
sewardja0eee322009-07-31 08:46:35 +00004127 if (!inLDSO) {
4128 instrument_mem_access(
4129 bbOut,
4130 data->Iex.Load.addr,
4131 sizeofIRType(data->Iex.Load.ty),
4132 False/*!isStore*/,
4133 sizeofIRType(hWordTy)
4134 );
4135 }
sewardjb4112022007-11-09 22:49:28 +00004136 }
4137 break;
4138 }
4139
4140 case Ist_Dirty: {
4141 Int dataSize;
4142 IRDirty* d = st->Ist.Dirty.details;
4143 if (d->mFx != Ifx_None) {
4144 /* This dirty helper accesses memory. Collect the
4145 details. */
4146 tl_assert(d->mAddr != NULL);
4147 tl_assert(d->mSize != 0);
4148 dataSize = d->mSize;
4149 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004150 if (!inLDSO) {
4151 instrument_mem_access(
4152 bbOut, d->mAddr, dataSize, False/*!isStore*/,
4153 sizeofIRType(hWordTy)
4154 );
4155 }
sewardjb4112022007-11-09 22:49:28 +00004156 }
4157 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004158 if (!inLDSO) {
4159 instrument_mem_access(
4160 bbOut, d->mAddr, dataSize, True/*isStore*/,
4161 sizeofIRType(hWordTy)
4162 );
4163 }
sewardjb4112022007-11-09 22:49:28 +00004164 }
4165 } else {
4166 tl_assert(d->mAddr == NULL);
4167 tl_assert(d->mSize == 0);
4168 }
4169 break;
4170 }
4171
4172 default:
sewardjf98e1c02008-10-25 16:22:41 +00004173 unhandled:
4174 ppIRStmt(st);
sewardjb4112022007-11-09 22:49:28 +00004175 tl_assert(0);
4176
4177 } /* switch (st->tag) */
4178
4179 addStmtToIRSB( bbOut, st );
4180 } /* iterate over bbIn->stmts */
4181
4182 return bbOut;
4183}
4184
4185
4186/*----------------------------------------------------------------*/
4187/*--- Client requests ---*/
4188/*----------------------------------------------------------------*/
4189
4190/* Sheesh. Yet another goddam finite map. */
4191static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
4192
4193static void map_pthread_t_to_Thread_INIT ( void ) {
4194 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
sewardjf98e1c02008-10-25 16:22:41 +00004195 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
4196 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00004197 tl_assert(map_pthread_t_to_Thread != NULL);
4198 }
4199}
4200
4201
4202static
4203Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
4204{
4205 if (!VG_IS_TOOL_USERREQ('H','G',args[0]))
4206 return False;
4207
4208 /* Anything that gets past the above check is one of ours, so we
4209 should be able to handle it. */
4210
4211 /* default, meaningless return value, unless otherwise set */
4212 *ret = 0;
4213
4214 switch (args[0]) {
4215
4216 /* --- --- User-visible client requests --- --- */
4217
4218 case VG_USERREQ__HG_CLEAN_MEMORY:
barta0b6b2c2008-07-07 06:49:24 +00004219 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%ld)\n",
sewardjb4112022007-11-09 22:49:28 +00004220 args[1], args[2]);
4221 /* Call die_mem to (expensively) tidy up properly, if there
sewardjf98e1c02008-10-25 16:22:41 +00004222 are any held locks etc in the area. Calling evh__die_mem
4223 and then evh__new_mem is a bit inefficient; probably just
4224 the latter would do. */
sewardjb4112022007-11-09 22:49:28 +00004225 if (args[2] > 0) { /* length */
4226 evh__die_mem(args[1], args[2]);
4227 /* and then set it to New */
4228 evh__new_mem(args[1], args[2]);
4229 }
4230 break;
4231
sewardjc8028ad2010-05-05 09:34:42 +00004232 case _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK: {
4233 Addr payload = 0;
4234 SizeT pszB = 0;
4235 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK(%#lx)\n",
4236 args[1]);
4237 if (HG_(mm_find_containing_block)(NULL, &payload, &pszB, args[1])) {
4238 if (pszB > 0) {
4239 evh__die_mem(payload, pszB);
4240 evh__new_mem(payload, pszB);
4241 }
4242 *ret = pszB;
4243 } else {
4244 *ret = (UWord)-1;
4245 }
4246 break;
4247 }
4248
sewardj406bac82010-03-03 23:03:40 +00004249 case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED:
4250 if (0) VG_(printf)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%ld)\n",
4251 args[1], args[2]);
4252 if (args[2] > 0) { /* length */
4253 evh__untrack_mem(args[1], args[2]);
4254 }
4255 break;
4256
4257 case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED:
4258 if (0) VG_(printf)("HG_ARANGE_MAKE_TRACKED(%#lx,%ld)\n",
4259 args[1], args[2]);
4260 if (args[2] > 0) { /* length */
4261 evh__new_mem(args[1], args[2]);
4262 }
4263 break;
4264
sewardjb4112022007-11-09 22:49:28 +00004265 /* --- --- Client requests for Helgrind's use only --- --- */
4266
4267 /* Some thread is telling us its pthread_t value. Record the
4268 binding between that and the associated Thread*, so we can
4269 later find the Thread* again when notified of a join by the
4270 thread. */
4271 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
4272 Thread* my_thr = NULL;
4273 if (0)
4274 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
4275 (void*)args[1]);
4276 map_pthread_t_to_Thread_INIT();
4277 my_thr = map_threads_maybe_lookup( tid );
4278 /* This assertion should hold because the map_threads (tid to
4279 Thread*) binding should have been made at the point of
4280 low-level creation of this thread, which should have
4281 happened prior to us getting this client request for it.
4282 That's because this client request is sent from
4283 client-world from the 'thread_wrapper' function, which
4284 only runs once the thread has been low-level created. */
4285 tl_assert(my_thr != NULL);
4286 /* So now we know that (pthread_t)args[1] is associated with
4287 (Thread*)my_thr. Note that down. */
4288 if (0)
4289 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
4290 (void*)args[1], (void*)my_thr );
sewardj896f6f92008-08-19 08:38:52 +00004291 VG_(addToFM)( map_pthread_t_to_Thread, (Word)args[1], (Word)my_thr );
sewardjb4112022007-11-09 22:49:28 +00004292 break;
4293 }
4294
4295 case _VG_USERREQ__HG_PTH_API_ERROR: {
4296 Thread* my_thr = NULL;
4297 map_pthread_t_to_Thread_INIT();
4298 my_thr = map_threads_maybe_lookup( tid );
4299 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
sewardjf98e1c02008-10-25 16:22:41 +00004300 HG_(record_error_PthAPIerror)(
4301 my_thr, (HChar*)args[1], (Word)args[2], (HChar*)args[3] );
sewardjb4112022007-11-09 22:49:28 +00004302 break;
4303 }
4304
4305 /* This thread (tid) has completed a join with the quitting
4306 thread whose pthread_t is in args[1]. */
4307 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
4308 Thread* thr_q = NULL; /* quitter Thread* */
4309 Bool found = False;
4310 if (0)
4311 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
4312 (void*)args[1]);
4313 map_pthread_t_to_Thread_INIT();
sewardj896f6f92008-08-19 08:38:52 +00004314 found = VG_(lookupFM)( map_pthread_t_to_Thread,
sewardjb5f29642007-11-16 12:02:43 +00004315 NULL, (Word*)&thr_q, (Word)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004316 /* Can this fail? It would mean that our pthread_join
4317 wrapper observed a successful join on args[1] yet that
4318 thread never existed (or at least, it never lodged an
4319 entry in the mapping (via SET_MY_PTHREAD_T)). Which
4320 sounds like a bug in the threads library. */
4321 // FIXME: get rid of this assertion; handle properly
4322 tl_assert(found);
4323 if (found) {
4324 if (0)
4325 VG_(printf)(".................... quitter Thread* = %p\n",
4326 thr_q);
4327 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
4328 }
4329 break;
4330 }
4331
4332 /* EXPOSITION only: by intercepting lock init events we can show
4333 the user where the lock was initialised, rather than only
4334 being able to show where it was first locked. Intercepting
4335 lock initialisations is not necessary for the basic operation
4336 of the race checker. */
4337 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
4338 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
4339 break;
4340
4341 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
4342 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1] );
4343 break;
4344
4345 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
4346 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
4347 break;
4348
4349 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
4350 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
4351 break;
4352
4353 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*, Word
4354 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
4355 break;
4356
4357 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*
4358 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
4359 break;
4360
4361 /* This thread is about to do pthread_cond_signal on the
4362 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
4363 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
4364 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
4365 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
4366 break;
4367
4368 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
4369 Returns a flag indicating whether or not the mutex is believed to be
4370 valid for this operation. */
4371 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
4372 Bool mutex_is_valid
4373 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
4374 (void*)args[2] );
4375 *ret = mutex_is_valid ? 1 : 0;
4376 break;
4377 }
4378
sewardjf98e1c02008-10-25 16:22:41 +00004379 /* cond=arg[1] */
4380 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
4381 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1] );
4382 break;
4383
sewardjb4112022007-11-09 22:49:28 +00004384 /* Thread successfully completed pthread_cond_wait, cond=arg[1],
4385 mutex=arg[2] */
4386 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
4387 evh__HG_PTHREAD_COND_WAIT_POST( tid,
4388 (void*)args[1], (void*)args[2] );
4389 break;
4390
4391 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
4392 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
4393 break;
4394
4395 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
4396 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
4397 break;
4398
sewardj789c3c52008-02-25 12:10:07 +00004399 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
sewardjb4112022007-11-09 22:49:28 +00004400 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
sewardj789c3c52008-02-25 12:10:07 +00004401 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
4402 args[2], args[3] );
sewardjb4112022007-11-09 22:49:28 +00004403 break;
4404
4405 /* rwlock=arg[1], isW=arg[2] */
4406 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
4407 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
4408 break;
4409
4410 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
4411 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
4412 break;
4413
4414 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
4415 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
4416 break;
4417
sewardj11e352f2007-11-30 11:11:02 +00004418 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
4419 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
sewardjb4112022007-11-09 22:49:28 +00004420 break;
4421
sewardj11e352f2007-11-30 11:11:02 +00004422 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
4423 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004424 break;
4425
sewardj11e352f2007-11-30 11:11:02 +00004426 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
4427 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
4428 break;
4429
4430 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t* */
4431 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004432 break;
4433
sewardj9f569b72008-11-13 13:33:09 +00004434 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
sewardj406bac82010-03-03 23:03:40 +00004435 /* pth_bar_t*, ulong count, ulong resizable */
4436 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1],
4437 args[2], args[3] );
4438 break;
4439
4440 case _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE:
4441 /* pth_bar_t*, ulong newcount */
4442 evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( tid, (void*)args[1],
4443 args[2] );
sewardj9f569b72008-11-13 13:33:09 +00004444 break;
4445
4446 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
4447 /* pth_bar_t* */
4448 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
4449 break;
4450
4451 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
4452 /* pth_bar_t* */
4453 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
4454 break;
sewardjb4112022007-11-09 22:49:28 +00004455
sewardj5a644da2009-08-11 10:35:58 +00004456 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE:
4457 /* pth_spinlock_t* */
4458 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid, (void*)args[1] );
4459 break;
4460
4461 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST:
4462 /* pth_spinlock_t* */
4463 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid, (void*)args[1] );
4464 break;
4465
4466 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE:
4467 /* pth_spinlock_t*, Word */
4468 evh__HG_PTHREAD_SPIN_LOCK_PRE( tid, (void*)args[1], args[2] );
4469 break;
4470
4471 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST:
4472 /* pth_spinlock_t* */
4473 evh__HG_PTHREAD_SPIN_LOCK_POST( tid, (void*)args[1] );
4474 break;
4475
4476 case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE:
4477 /* pth_spinlock_t* */
4478 evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid, (void*)args[1] );
4479 break;
4480
sewardjed2e72e2009-08-14 11:08:24 +00004481 case _VG_USERREQ__HG_CLIENTREQ_UNIMP: {
4482 /* char* who */
4483 HChar* who = (HChar*)args[1];
4484 HChar buf[50 + 50];
4485 Thread* thr = map_threads_maybe_lookup( tid );
4486 tl_assert( thr ); /* I must be mapped */
4487 tl_assert( who );
4488 tl_assert( VG_(strlen)(who) <= 50 );
4489 VG_(sprintf)(buf, "Unimplemented client request macro \"%s\"", who );
4490 /* record_error_Misc strdup's buf, so this is safe: */
4491 HG_(record_error_Misc)( thr, buf );
4492 break;
4493 }
4494
4495 case _VG_USERREQ__HG_USERSO_SEND_PRE:
4496 /* UWord arbitrary-SO-tag */
4497 evh__HG_USERSO_SEND_PRE( tid, args[1] );
4498 break;
4499
4500 case _VG_USERREQ__HG_USERSO_RECV_POST:
4501 /* UWord arbitrary-SO-tag */
4502 evh__HG_USERSO_RECV_POST( tid, args[1] );
4503 break;
4504
sewardjb4112022007-11-09 22:49:28 +00004505 default:
4506 /* Unhandled Helgrind client request! */
sewardjf98e1c02008-10-25 16:22:41 +00004507 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
4508 args[0]);
sewardjb4112022007-11-09 22:49:28 +00004509 }
4510
4511 return True;
4512}
4513
4514
4515/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +00004516/*--- Setup ---*/
4517/*----------------------------------------------------------------*/
4518
4519static Bool hg_process_cmd_line_option ( Char* arg )
4520{
njn83df0b62009-02-25 01:01:05 +00004521 Char* tmp_str;
sewardjb4112022007-11-09 22:49:28 +00004522
njn83df0b62009-02-25 01:01:05 +00004523 if VG_BOOL_CLO(arg, "--track-lockorders",
4524 HG_(clo_track_lockorders)) {}
4525 else if VG_BOOL_CLO(arg, "--cmp-race-err-addrs",
4526 HG_(clo_cmp_race_err_addrs)) {}
sewardj23f12002009-07-24 08:45:08 +00004527
4528 else if VG_XACT_CLO(arg, "--history-level=none",
4529 HG_(clo_history_level), 0);
sewardjf3861392009-08-02 10:16:03 +00004530 else if VG_XACT_CLO(arg, "--history-level=approx",
sewardj23f12002009-07-24 08:45:08 +00004531 HG_(clo_history_level), 1);
4532 else if VG_XACT_CLO(arg, "--history-level=full",
4533 HG_(clo_history_level), 2);
sewardj849b0ed2008-12-21 10:43:10 +00004534
sewardjf585e482009-08-16 22:52:29 +00004535 /* If you change the 10k/30mill limits, remember to also change
sewardj849b0ed2008-12-21 10:43:10 +00004536 them in assertions at the top of event_map_maybe_GC. */
njn83df0b62009-02-25 01:01:05 +00004537 else if VG_BINT_CLO(arg, "--conflict-cache-size",
sewardjf585e482009-08-16 22:52:29 +00004538 HG_(clo_conflict_cache_size), 10*1000, 30*1000*1000) {}
sewardjb4112022007-11-09 22:49:28 +00004539
sewardj11e352f2007-11-30 11:11:02 +00004540 /* "stuvwx" --> stuvwx (binary) */
njn83df0b62009-02-25 01:01:05 +00004541 else if VG_STR_CLO(arg, "--hg-sanity-flags", tmp_str) {
sewardjb4112022007-11-09 22:49:28 +00004542 Int j;
sewardjb4112022007-11-09 22:49:28 +00004543
njn83df0b62009-02-25 01:01:05 +00004544 if (6 != VG_(strlen)(tmp_str)) {
sewardjb4112022007-11-09 22:49:28 +00004545 VG_(message)(Vg_UserMsg,
sewardj24118492009-07-15 14:50:02 +00004546 "--hg-sanity-flags argument must have 6 digits\n");
sewardjb4112022007-11-09 22:49:28 +00004547 return False;
4548 }
sewardj11e352f2007-11-30 11:11:02 +00004549 for (j = 0; j < 6; j++) {
njn83df0b62009-02-25 01:01:05 +00004550 if ('0' == tmp_str[j]) { /* do nothing */ }
4551 else if ('1' == tmp_str[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
sewardjb4112022007-11-09 22:49:28 +00004552 else {
sewardj11e352f2007-11-30 11:11:02 +00004553 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
sewardj24118492009-07-15 14:50:02 +00004554 "only contain 0s and 1s\n");
sewardjb4112022007-11-09 22:49:28 +00004555 return False;
4556 }
4557 }
sewardjf98e1c02008-10-25 16:22:41 +00004558 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
sewardjb4112022007-11-09 22:49:28 +00004559 }
4560
4561 else
4562 return VG_(replacement_malloc_process_cmd_line_option)(arg);
4563
4564 return True;
4565}
4566
4567static void hg_print_usage ( void )
4568{
4569 VG_(printf)(
sewardj849b0ed2008-12-21 10:43:10 +00004570" --track-lockorders=no|yes show lock ordering errors? [yes]\n"
njnf6e8ca92009-08-07 02:18:00 +00004571" --history-level=none|approx|full [full]\n"
sewardjf3861392009-08-02 10:16:03 +00004572" full: show both stack traces for a data race (can be very slow)\n"
4573" approx: full trace for one thread, approx for the other (faster)\n"
4574" none: only show trace for one thread in a race (fastest)\n"
sewardj23f12002009-07-24 08:45:08 +00004575" --conflict-cache-size=N size of 'full' history cache [1000000]\n"
sewardjb4112022007-11-09 22:49:28 +00004576 );
sewardjb4112022007-11-09 22:49:28 +00004577}
4578
4579static void hg_print_debug_usage ( void )
4580{
sewardjb4112022007-11-09 22:49:28 +00004581 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
4582 "race errors significant? [no]\n");
sewardj849b0ed2008-12-21 10:43:10 +00004583 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
sewardj11e352f2007-11-30 11:11:02 +00004584 " at events (X = 0|1) [000000]\n");
4585 VG_(printf)(" --hg-sanity-flags values:\n");
sewardj11e352f2007-11-30 11:11:02 +00004586 VG_(printf)(" 010000 after changes to "
sewardjb4112022007-11-09 22:49:28 +00004587 "lock-order-acquisition-graph\n");
sewardj11e352f2007-11-30 11:11:02 +00004588 VG_(printf)(" 001000 at memory accesses (NB: not currently used)\n");
4589 VG_(printf)(" 000100 at mem permission setting for "
sewardjb4112022007-11-09 22:49:28 +00004590 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
sewardj11e352f2007-11-30 11:11:02 +00004591 VG_(printf)(" 000010 at lock/unlock events\n");
4592 VG_(printf)(" 000001 at thread create/join events\n");
sewardjb4112022007-11-09 22:49:28 +00004593}
4594
sewardjb4112022007-11-09 22:49:28 +00004595static void hg_fini ( Int exitcode )
4596{
sewardj2d9e8742009-08-07 15:46:56 +00004597 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
4598 VG_(message)(Vg_UserMsg,
4599 "For counts of detected and suppressed errors, "
4600 "rerun with: -v\n");
4601 }
4602
4603 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)
4604 && HG_(clo_history_level) >= 2) {
4605 VG_(umsg)(
4606 "Use --history-level=approx or =none to gain increased speed, at\n" );
4607 VG_(umsg)(
4608 "the cost of reduced accuracy of conflicting-access information\n");
4609 }
4610
sewardjb4112022007-11-09 22:49:28 +00004611 if (SHOW_DATA_STRUCTURES)
4612 pp_everything( PP_ALL, "SK_(fini)" );
sewardjf98e1c02008-10-25 16:22:41 +00004613 if (HG_(clo_sanity_flags))
sewardjb4112022007-11-09 22:49:28 +00004614 all__sanity_check("SK_(fini)");
4615
sewardj2d9e8742009-08-07 15:46:56 +00004616 if (VG_(clo_stats)) {
sewardjb4112022007-11-09 22:49:28 +00004617
4618 if (1) {
4619 VG_(printf)("\n");
sewardjb4112022007-11-09 22:49:28 +00004620 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
sewardjc1fb9d22011-02-28 09:03:44 +00004621 if (HG_(clo_track_lockorders)) {
4622 VG_(printf)("\n");
4623 HG_(ppWSUstats)( univ_laog, "univ_laog" );
4624 }
sewardjb4112022007-11-09 22:49:28 +00004625 }
4626
sewardjf98e1c02008-10-25 16:22:41 +00004627 //zz VG_(printf)("\n");
4628 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
4629 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
4630 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
4631 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
4632 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
4633 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
4634 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
4635 //zz stats__hbefore_stk_hwm);
4636 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
4637 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
sewardjb4112022007-11-09 22:49:28 +00004638
4639 VG_(printf)("\n");
barta0b6b2c2008-07-07 06:49:24 +00004640 VG_(printf)(" locksets: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00004641 (Int)HG_(cardinalityWSU)( univ_lsets ));
sewardjc1fb9d22011-02-28 09:03:44 +00004642 if (HG_(clo_track_lockorders)) {
4643 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
4644 (Int)HG_(cardinalityWSU)( univ_laog ));
4645 }
sewardjb4112022007-11-09 22:49:28 +00004646
sewardjd52392d2008-11-08 20:36:26 +00004647 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
4648 // stats__ga_LL_adds,
4649 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
sewardjb4112022007-11-09 22:49:28 +00004650
sewardjf98e1c02008-10-25 16:22:41 +00004651 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
4652 HG_(stats__LockN_to_P_queries),
4653 HG_(stats__LockN_to_P_get_map_size)() );
sewardjb4112022007-11-09 22:49:28 +00004654
sewardjf98e1c02008-10-25 16:22:41 +00004655 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
4656 HG_(stats__string_table_queries),
4657 HG_(stats__string_table_get_map_size)() );
sewardjc1fb9d22011-02-28 09:03:44 +00004658 if (HG_(clo_track_lockorders)) {
4659 VG_(printf)(" LAOG: %'8d map size\n",
4660 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
4661 VG_(printf)(" LAOG exposition: %'8d map size\n",
4662 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
4663 }
4664
barta0b6b2c2008-07-07 06:49:24 +00004665 VG_(printf)(" locks: %'8lu acquires, "
4666 "%'lu releases\n",
sewardjb4112022007-11-09 22:49:28 +00004667 stats__lockN_acquires,
4668 stats__lockN_releases
4669 );
barta0b6b2c2008-07-07 06:49:24 +00004670 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
sewardjb4112022007-11-09 22:49:28 +00004671
4672 VG_(printf)("\n");
sewardjf98e1c02008-10-25 16:22:41 +00004673 libhb_shutdown(True);
sewardjb4112022007-11-09 22:49:28 +00004674 }
4675}
4676
sewardjf98e1c02008-10-25 16:22:41 +00004677/* FIXME: move these somewhere sane */
4678
4679static
4680void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
4681{
4682 Thread* thr;
4683 ThreadId tid;
4684 UWord nActual;
4685 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00004686 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00004687 tl_assert(thr);
4688 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
4689 nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
4690 NULL, NULL, 0 );
4691 tl_assert(nActual <= nRequest);
4692 for (; nActual < nRequest; nActual++)
4693 frames[nActual] = 0;
4694}
4695
4696static
sewardj23f12002009-07-24 08:45:08 +00004697ExeContext* for_libhb__get_EC ( Thr* hbt )
sewardjf98e1c02008-10-25 16:22:41 +00004698{
4699 Thread* thr;
4700 ThreadId tid;
4701 ExeContext* ec;
4702 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00004703 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00004704 tl_assert(thr);
4705 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
sewardj23f12002009-07-24 08:45:08 +00004706 /* this will assert if tid is invalid */
sewardjf98e1c02008-10-25 16:22:41 +00004707 ec = VG_(record_ExeContext)( tid, 0 );
sewardjd52392d2008-11-08 20:36:26 +00004708 return ec;
sewardjf98e1c02008-10-25 16:22:41 +00004709}
4710
4711
sewardjc1fb9d22011-02-28 09:03:44 +00004712static void hg_post_clo_init ( void )
sewardjb4112022007-11-09 22:49:28 +00004713{
sewardjf98e1c02008-10-25 16:22:41 +00004714 Thr* hbthr_root;
njnf76d27a2009-05-28 01:53:07 +00004715
sewardjc1fb9d22011-02-28 09:03:44 +00004716 /////////////////////////////////////////////
4717 hbthr_root = libhb_init( for_libhb__get_stacktrace,
4718 for_libhb__get_EC );
4719 /////////////////////////////////////////////
4720
4721
4722 if (HG_(clo_track_lockorders))
4723 laog__init();
4724
4725 initialise_data_structures(hbthr_root);
4726}
4727
4728static void hg_pre_clo_init ( void )
4729{
sewardjb4112022007-11-09 22:49:28 +00004730 VG_(details_name) ("Helgrind");
4731 VG_(details_version) (NULL);
4732 VG_(details_description) ("a thread error detector");
4733 VG_(details_copyright_author)(
sewardj9eecbbb2010-05-03 21:37:12 +00004734 "Copyright (C) 2007-2010, and GNU GPL'd, by OpenWorks LLP et al.");
sewardjb4112022007-11-09 22:49:28 +00004735 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj9c08c0f2011-03-10 15:01:14 +00004736 VG_(details_avg_translation_sizeB) ( 320 );
sewardjb4112022007-11-09 22:49:28 +00004737
4738 VG_(basic_tool_funcs) (hg_post_clo_init,
4739 hg_instrument,
4740 hg_fini);
4741
4742 VG_(needs_core_errors) ();
sewardjf98e1c02008-10-25 16:22:41 +00004743 VG_(needs_tool_errors) (HG_(eq_Error),
sewardj24118492009-07-15 14:50:02 +00004744 HG_(before_pp_Error),
sewardjf98e1c02008-10-25 16:22:41 +00004745 HG_(pp_Error),
sewardjb4112022007-11-09 22:49:28 +00004746 False,/*show TIDs for errors*/
sewardjf98e1c02008-10-25 16:22:41 +00004747 HG_(update_extra),
4748 HG_(recognised_suppression),
4749 HG_(read_extra_suppression_info),
4750 HG_(error_matches_suppression),
4751 HG_(get_error_name),
sewardj588adef2009-08-15 22:41:51 +00004752 HG_(get_extra_suppression_info));
sewardjb4112022007-11-09 22:49:28 +00004753
sewardj24118492009-07-15 14:50:02 +00004754 VG_(needs_xml_output) ();
4755
sewardjb4112022007-11-09 22:49:28 +00004756 VG_(needs_command_line_options)(hg_process_cmd_line_option,
4757 hg_print_usage,
4758 hg_print_debug_usage);
4759 VG_(needs_client_requests) (hg_handle_client_request);
4760
4761 // FIXME?
4762 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
4763 // hg_expensive_sanity_check);
4764
4765 VG_(needs_malloc_replacement) (hg_cli__malloc,
4766 hg_cli____builtin_new,
4767 hg_cli____builtin_vec_new,
4768 hg_cli__memalign,
4769 hg_cli__calloc,
4770 hg_cli__free,
4771 hg_cli____builtin_delete,
4772 hg_cli____builtin_vec_delete,
4773 hg_cli__realloc,
njn8b140de2009-02-17 04:31:18 +00004774 hg_cli_malloc_usable_size,
sewardjb4112022007-11-09 22:49:28 +00004775 HG_CLI__MALLOC_REDZONE_SZB );
4776
sewardj849b0ed2008-12-21 10:43:10 +00004777 /* 21 Dec 08: disabled this; it mostly causes H to start more
4778 slowly and use significantly more memory, without very often
4779 providing useful results. The user can request to load this
4780 information manually with --read-var-info=yes. */
4781 if (0) VG_(needs_var_info)(); /* optional */
sewardjb4112022007-11-09 22:49:28 +00004782
4783 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
sewardj7cf4e6b2008-05-01 20:24:26 +00004784 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
4785 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
sewardjb4112022007-11-09 22:49:28 +00004786 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
sewardj1f77fec2010-04-12 19:51:04 +00004787 VG_(track_new_mem_stack) ( evh__new_mem_stack );
sewardjb4112022007-11-09 22:49:28 +00004788
4789 // FIXME: surely this isn't thread-aware
sewardj23f12002009-07-24 08:45:08 +00004790 VG_(track_copy_mem_remap) ( evh__copy_mem );
sewardjb4112022007-11-09 22:49:28 +00004791
4792 VG_(track_change_mem_mprotect) ( evh__set_perms );
4793
4794 VG_(track_die_mem_stack_signal)( evh__die_mem );
4795 VG_(track_die_mem_brk) ( evh__die_mem );
4796 VG_(track_die_mem_munmap) ( evh__die_mem );
4797 VG_(track_die_mem_stack) ( evh__die_mem );
4798
4799 // FIXME: what is this for?
4800 VG_(track_ban_mem_stack) (NULL);
4801
4802 VG_(track_pre_mem_read) ( evh__pre_mem_read );
4803 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
4804 VG_(track_pre_mem_write) ( evh__pre_mem_write );
4805 VG_(track_post_mem_write) (NULL);
4806
4807 /////////////////
4808
4809 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
4810 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
4811
4812 VG_(track_start_client_code)( evh__start_client_code );
4813 VG_(track_stop_client_code)( evh__stop_client_code );
4814
sewardjb4112022007-11-09 22:49:28 +00004815 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
4816 as described in comments at the top of pub_tool_hashtable.h, are
4817 met. Blargh. */
4818 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
4819 tl_assert( sizeof(UWord) == sizeof(Addr) );
4820 hg_mallocmeta_table
4821 = VG_(HT_construct)( "hg_malloc_metadata_table" );
4822
sewardj61bc2c52011-02-09 10:34:00 +00004823 // add a callback to clean up on (threaded) fork.
4824 VG_(atfork)(NULL/*pre*/, NULL/*parent*/, evh__atfork_child/*child*/);
sewardjb4112022007-11-09 22:49:28 +00004825}
4826
4827VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
4828
4829/*--------------------------------------------------------------------*/
4830/*--- end hg_main.c ---*/
4831/*--------------------------------------------------------------------*/